diff options
21 files changed, 323 insertions, 818 deletions
diff --git a/environments/hyperconverged-ceph.yaml b/environments/hyperconverged-ceph.yaml new file mode 100644 index 00000000..87ebb1d7 --- /dev/null +++ b/environments/hyperconverged-ceph.yaml @@ -0,0 +1,12 @@ +# If using an isolated StorageMgmt network, this will have to be uncommented to +# plug the network on the compute nodes as well. +#resource_registry: +# OS::TripleO::Compute::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml + +# Should match the default list of services for the compute node plus CephOSD +parameter_defaults: + ComputeServices: + - OS::TripleO::Services::CephOSD + +parameter_merge_strategies: + ComputeServices: merge
\ No newline at end of file diff --git a/extraconfig/tasks/major_upgrade_ceph_mon.sh b/extraconfig/tasks/major_upgrade_ceph_mon.sh index b76dd7c3..21a2b5bc 100755 --- a/extraconfig/tasks/major_upgrade_ceph_mon.sh +++ b/extraconfig/tasks/major_upgrade_ceph_mon.sh @@ -18,13 +18,13 @@ if ! [[ "$INSTALLED_VERSION" =~ ^0\.94.* ]]; then fi CEPH_STATUS=$(ceph health | awk '{print $1}') -if [ ${CEPH_STATUS} = HEALTH_ERR ]; do +if [ ${CEPH_STATUS} = HEALTH_ERR ]; then echo ERROR: Ceph cluster status is HEALTH_ERR, cannot be upgraded exit 1 fi # Useful when upgrading with OSDs num < replica size -if [ $ignore_ceph_upgrade_warnings != "true" ]; then +if [ ${ignore_ceph_upgrade_warnings:-false} != "true" ]; then timeout 300 bash -c "while [ ${CEPH_STATUS} != HEALTH_OK ]; do echo WARNING: Waiting for Ceph cluster status to go HEALTH_OK; sleep 30; @@ -44,7 +44,7 @@ timeout 60 bash -c "while kill -0 ${MON_PID} 2> /dev/null; do done" # Update to Jewel -yum -y -q update ceph-mon +yum -y -q update ceph-mon ceph # Restart/Exit if not on Jewel, only in that case we need the changes UPDATED_VERSION=$(ceph --version | awk '{print $3}') @@ -54,7 +54,7 @@ if [[ "$UPDATED_VERSION" =~ ^0\.94.* ]]; then elif [[ "$UPDATED_VERSION" =~ ^10\.2.* ]]; then # RPM could own some of these but we can't take risks on the pre-existing files for d in /var/lib/ceph/mon /var/log/ceph /var/run/ceph /etc/ceph; do - chown -R ceph:ceph $d + chown -R ceph:ceph $d || echo WARNING: chown of $d failed done # Replay udev events with newer rules diff --git a/extraconfig/tasks/major_upgrade_ceph_storage.sh b/extraconfig/tasks/major_upgrade_ceph_storage.sh index 03a1c1c2..dc80a724 100644 --- a/extraconfig/tasks/major_upgrade_ceph_storage.sh +++ b/extraconfig/tasks/major_upgrade_ceph_storage.sh @@ -63,7 +63,7 @@ if [[ "$UPDATED_VERSION" =~ ^0\.94.* ]]; then elif [[ "$UPDATED_VERSION" =~ ^10\.2.* ]]; then # RPM could own some of these but we can't take risks on the pre-existing files for d in /var/lib/ceph/osd /var/log/ceph /var/run/ceph /etc/ceph; do - chown -R ceph:ceph $d + chown -R ceph:ceph $d || echo WARNING: chown of $d failed done # Replay udev events with newer rules diff --git a/extraconfig/tasks/major_upgrade_check.sh b/extraconfig/tasks/major_upgrade_check.sh new file mode 100755 index 00000000..dc7ec71a --- /dev/null +++ b/extraconfig/tasks/major_upgrade_check.sh @@ -0,0 +1,104 @@ +#!/bin/bash + +set -eu + +check_cluster() +{ + if pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; then + echo_error "ERROR: upgrade cannot start with some cluster nodes being offline" + exit 1 + fi +} + +check_pcsd() +{ + if pcs status 2>&1 | grep -E 'Offline'; then + echo_error "ERROR: upgrade cannot start with some pcsd daemon offline" + exit 1 + fi +} + +check_disk_for_mysql_dump() +{ + # Where to backup current database if mysql need to be upgraded + MYSQL_BACKUP_DIR=/var/tmp/mysql_upgrade_osp + MYSQL_TEMP_UPGRADE_BACKUP_DIR=/var/lib/mysql-temp-upgrade-backup + # Spare disk ratio for extra safety + MYSQL_BACKUP_SIZE_RATIO=1.2 + + # Shall we upgrade mysql data directory during the stack upgrade? + if [ "$mariadb_do_major_upgrade" = "auto" ]; then + ret=$(is_mysql_upgrade_needed) + if [ $ret = "1" ]; then + DO_MYSQL_UPGRADE=1 + else + DO_MYSQL_UPGRADE=0 + fi + echo "mysql upgrade required: $DO_MYSQL_UPGRADE" + elif [ "$mariadb_do_major_upgrade" = "no" ]; then + DO_MYSQL_UPGRADE=0 + else + DO_MYSQL_UPGRADE=1 + fi + + if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then + if [ $DO_MYSQL_UPGRADE -eq 1 ]; then + + if [ -d "$MYSQL_BACKUP_DIR" ]; then + echo_error "Error: $MYSQL_BACKUP_DIR exists already. Likely an upgrade failed previously" + exit 1 + fi + mkdir "$MYSQL_BACKUP_DIR" + if [ $? -ne 0 ]; then + echo_error "Error: could not create temporary backup directory $MYSQL_BACKUP_DIR" + exit 1 + fi + + # the /root/.my.cnf is needed because we set the mysql root + # password from liberty onwards + backup_flags="--defaults-extra-file=/root/.my.cnf -u root --flush-privileges --all-databases --single-transaction" + # While not ideal, this step allows us to calculate exactly how much space the dump + # will need. Our main goal here is avoiding any chance of corruption due to disk space + # exhaustion + backup_size=$(mysqldump $backup_flags 2>/dev/null | wc -c) + database_size=$(du -cb /var/lib/mysql | tail -1 | awk '{ print $1 }') + free_space=$(df -B1 --output=avail "$MYSQL_BACKUP_DIR" | tail -1) + + # we need at least space for a new mysql database + dump of the existing one, + # times a small factor for additional safety room + # note: bash doesn't do floating point math or floats in if statements, + # so use python to apply the ratio and cast it back to integer + required_space=$(python -c "from __future__ import print_function; print(\"%d\" % int((($database_size + $backup_size) * $MYSQL_BACKUP_SIZE_RATIO)))") + if [ $required_space -ge $free_space ]; then + echo_error "Error: not enough free space in $MYSQL_BACKUP_DIR ($required_space bytes required)" + exit 1 + fi + fi + fi +} + +check_python_rpm() +{ + # If for some reason rpm-python are missing we want to error out early enough + if ! rpm -q rpm-python &> /dev/null; then + echo_error "ERROR: upgrade cannot start without rpm-python installed" + exit 1 + fi +} + +check_clean_cluster() +{ + if crm_mon -1 | grep -A3 Failed; then + echo_error "ERROR: upgrade cannot start with failed resources on the cluster. Clean them up before starting: pcs resource cleanup." + exit 1 + fi +} + +check_galera_root_password() +{ + # BZ: 1357112 + if [ ! -e /root/.my.cnf ]; then + echo_error "ERROR: upgrade cannot be started, the galera password is missing. The overcloud needs update." + exit 1 + fi +} diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh index 0b702630..e81ca086 100755 --- a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh +++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh @@ -4,11 +4,12 @@ set -eu cluster_sync_timeout=1800 -if pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; then - echo_error "ERROR: upgrade cannot start with some cluster nodes being offline" - exit 1 -fi - +check_cluster +check_pcsd +check_clean_cluster +check_python_rpm +check_galera_root_password +check_disk_for_mysql_dump # We want to disable fencing during the cluster --stop as it might fence # nodes where a service fails to stop, which could be fatal during an upgrade @@ -17,12 +18,6 @@ fi STONITH_STATE=$(pcs property show stonith-enabled | grep "stonith-enabled" | awk '{ print $2 }') pcs property set stonith-enabled=false -# If for some reason rpm-python are missing we want to error out early enough -if ! rpm -q rpm-python &> /dev/null; then - echo_error "ERROR: upgrade cannot start without rpm-python installed" - exit 1 -fi - # In case the mysql package is updated, the database on disk must be # upgraded as well. This typically needs to happen during major # version upgrades (e.g. 5.5 -> 5.6, 5.5 -> 10.1...) @@ -35,59 +30,8 @@ fi # on mysql package versionning, but this can be overriden manually # to support specific upgrade scenario -# Where to backup current database if mysql need to be upgraded -MYSQL_BACKUP_DIR=/var/tmp/mysql_upgrade_osp -MYSQL_TEMP_UPGRADE_BACKUP_DIR=/var/lib/mysql-temp-upgrade-backup -# Spare disk ratio for extra safety -MYSQL_BACKUP_SIZE_RATIO=1.2 - -# Shall we upgrade mysql data directory during the stack upgrade? -if [ "$mariadb_do_major_upgrade" = "auto" ]; then - ret=$(is_mysql_upgrade_needed) - if [ $ret = "1" ]; then - DO_MYSQL_UPGRADE=1 - else - DO_MYSQL_UPGRADE=0 - fi - echo "mysql upgrade required: $DO_MYSQL_UPGRADE" -elif [ "$mariadb_do_major_upgrade" = "no" ]; then - DO_MYSQL_UPGRADE=0 -else - DO_MYSQL_UPGRADE=1 -fi - if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then if [ $DO_MYSQL_UPGRADE -eq 1 ]; then - if [ -d "$MYSQL_BACKUP_DIR" ]; then - echo_error "Error: $MYSQL_BACKUP_DIR exists already. Likely an upgrade failed previously" - exit 1 - fi - mkdir "$MYSQL_BACKUP_DIR" - if [ $? -ne 0 ]; then - echo_error "Error: could not create temporary backup directory $MYSQL_BACKUP_DIR" - exit 1 - fi - - # the /root/.my.cnf is needed because we set the mysql root - # password from liberty onwards - backup_flags="--defaults-extra-file=/root/.my.cnf -u root --flush-privileges --all-databases --single-transaction" - # While not ideal, this step allows us to calculate exactly how much space the dump - # will need. Our main goal here is avoiding any chance of corruption due to disk space - # exhaustion - backup_size=$(mysqldump $backup_flags 2>/dev/null | wc -c) - database_size=$(du -cb /var/lib/mysql | tail -1 | awk '{ print $1 }') - free_space=$(df -B1 --output=avail "$MYSQL_BACKUP_DIR" | tail -1) - - # we need at least space for a new mysql database + dump of the existing one, - # times a small factor for additional safety room - # note: bash doesn't do floating point math or floats in if statements, - # so use python to apply the ratio and cast it back to integer - required_space=$(python -c "from __future__ import print_function; print(\"%d\" % int((($database_size + $backup_size) * $MYSQL_BACKUP_SIZE_RATIO)))") - if [ $required_space -ge $free_space ]; then - echo_error "Error: not enough free space in $MYSQL_BACKUP_DIR ($required_space bytes required)" - exit 1 - fi - mysqldump $backup_flags > "$MYSQL_BACKUP_DIR/openstack_database.sql" cp -rdp /etc/my.cnf* "$MYSQL_BACKUP_DIR" fi diff --git a/extraconfig/tasks/major_upgrade_pacemaker.yaml b/extraconfig/tasks/major_upgrade_pacemaker.yaml index 598d22d0..7244f949 100644 --- a/extraconfig/tasks/major_upgrade_pacemaker.yaml +++ b/extraconfig/tasks/major_upgrade_pacemaker.yaml @@ -1,16 +1,8 @@ -heat_template_version: 2014-10-16 +heat_template_version: 2016-10-14 description: 'Upgrade for Pacemaker deployments' parameters: - controller_servers: - type: json - compute_servers: - type: json - blockstorage_servers: - type: json - objectstorage_servers: - type: json - cephstorage_servers: + servers: type: json input_values: type: json @@ -54,9 +46,10 @@ resources: CephMonUpgradeDeployment: type: OS::Heat::SoftwareDeploymentGroup properties: - servers: {get_param: controller_servers} + servers: {get_param: servers, Controller} config: {get_resource: CephMonUpgradeConfig} input_values: {get_param: input_values} + update_policy: batch_create: max_batch_size: 1 rolling_update: @@ -82,6 +75,7 @@ resources: params: MYSQL_MAJOR_UPGRADE: {get_param: MySqlMajorUpgrade} - get_file: pacemaker_common_functions.sh + - get_file: major_upgrade_check.sh - get_file: major_upgrade_pacemaker_migrations.sh - get_file: major_upgrade_controller_pacemaker_1.sh @@ -89,7 +83,7 @@ resources: type: OS::Heat::SoftwareDeploymentGroup depends_on: CephMonUpgradeDeployment properties: - servers: {get_param: controller_servers} + servers: {get_param: servers, Controller} config: {get_resource: ControllerPacemakerUpgradeConfig_Step1} input_values: {get_param: input_values} @@ -103,7 +97,7 @@ resources: BlockStorageUpgradeDeployment: type: OS::Heat::SoftwareDeploymentGroup properties: - servers: {get_param: blockstorage_servers} + servers: {get_param: servers, BlockStorage} config: {get_resource: BlockStorageUpgradeConfig} input_values: {get_param: input_values} @@ -122,7 +116,7 @@ resources: type: OS::Heat::SoftwareDeploymentGroup depends_on: BlockStorageUpgradeDeployment properties: - servers: {get_param: controller_servers} + servers: {get_param: servers, Controller} config: {get_resource: ControllerPacemakerUpgradeConfig_Step2} input_values: {get_param: input_values} diff --git a/extraconfig/tasks/major_upgrade_pacemaker_init.yaml b/extraconfig/tasks/major_upgrade_pacemaker_init.j2.yaml index 623549a0..f6aa3066 100644 --- a/extraconfig/tasks/major_upgrade_pacemaker_init.yaml +++ b/extraconfig/tasks/major_upgrade_pacemaker_init.j2.yaml @@ -3,15 +3,7 @@ description: 'Upgrade for Pacemaker deployments' parameters: - controller_servers: - type: json - compute_servers: - type: json - blockstorage_servers: - type: json - objectstorage_servers: - type: json - cephstorage_servers: + servers: type: json input_values: type: json @@ -43,45 +35,12 @@ resources: - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n" - get_param: UpgradeInitCommand - UpgradeInitControllerDeployment: - type: OS::Heat::SoftwareDeploymentGroup - properties: - servers: {get_param: controller_servers} - config: {get_resource: UpgradeInitConfig} - input_values: {get_param: input_values} - - UpgradeInitComputeDeployment: - type: OS::Heat::SoftwareDeploymentGroup - properties: - servers: {get_param: compute_servers} - config: {get_resource: UpgradeInitConfig} - input_values: {get_param: input_values} - - UpgradeInitBlockStorageDeployment: - type: OS::Heat::SoftwareDeploymentGroup - properties: - servers: {get_param: blockstorage_servers} - config: {get_resource: UpgradeInitConfig} - input_values: {get_param: input_values} - - UpgradeInitObjectStorageDeployment: - type: OS::Heat::SoftwareDeploymentGroup - properties: - servers: {get_param: objectstorage_servers} - config: {get_resource: UpgradeInitConfig} - input_values: {get_param: input_values} - - UpgradeInitCephStorageDeployment: - type: OS::Heat::SoftwareDeploymentGroup - properties: - servers: {get_param: cephstorage_servers} - config: {get_resource: UpgradeInitConfig} - input_values: {get_param: input_values} - # TODO(jistr): for Mitaka->Newton upgrades and further we can use # map_merge with input_values instead of feeding params into scripts # via str_replace on bash snippets + # FIXME(shardy) we have hard-coded per-role *ScriptConfig's here + # Would be better to have a common config for all roles ComputeDeliverUpgradeScriptConfig: type: OS::Heat::SoftwareConfig properties: @@ -97,35 +56,32 @@ resources: UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute} - get_file: major_upgrade_compute.sh - ComputeDeliverUpgradeScriptDeployment: - type: OS::Heat::SoftwareDeploymentGroup - properties: - servers: {get_param: compute_servers} - config: {get_resource: ComputeDeliverUpgradeScriptConfig} - input_values: {get_param: input_values} - ObjectStorageDeliverUpgradeScriptConfig: type: OS::Heat::SoftwareConfig properties: group: script config: {get_file: major_upgrade_object_storage.sh} - ObjectStorageDeliverUpgradeScriptDeployment: - type: OS::Heat::SoftwareDeploymentGroup - properties: - servers: {get_param: objectstorage_servers} - config: {get_resource: ObjectStorageDeliverUpgradeScriptConfig} - input_values: {get_param: input_values} - CephStorageDeliverUpgradeScriptConfig: type: OS::Heat::SoftwareConfig properties: group: script config: {get_file: major_upgrade_ceph_storage.sh} - CephStorageDeliverUpgradeScriptDeployment: +{% for role in roles %} + UpgradeInit{{role.name}}Deployment: + type: OS::Heat::SoftwareDeploymentGroup + properties: + servers: {get_param: [servers, {{role.name}}]} + config: {get_resource: UpgradeInitConfig} + input_values: {get_param: input_values} + + {% if not role.name in ['Controller', 'BlockStorage'] %} + {{role.name}}DeliverUpgradeScriptDeployment: type: OS::Heat::SoftwareDeploymentGroup properties: - servers: {get_param: cephstorage_servers} - config: {get_resource: CephStorageDeliverUpgradeScriptConfig} + servers: {get_param: [servers, {{role.name}}]} + config: {get_resource: {{role.name}}DeliverUpgradeScriptConfig} input_values: {get_param: input_values} + {% endif %} +{% endfor %} diff --git a/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml b/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml index 9414ac19..91406fba 100644 --- a/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml +++ b/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml @@ -4,15 +4,7 @@ description: > Software-config for performing aodh data migration parameters: - controller_servers: - type: json - compute_servers: - type: json - blockstorage_servers: - type: json - objectstorage_servers: - type: json - cephstorage_servers: + servers: type: json input_values: type: json @@ -28,6 +20,6 @@ resources: AodhMysqlMigrationScriptDeployment: type: OS::Heat::SoftwareDeploymentGroup properties: - servers: {get_param: controller_servers} + servers: {get_param: servers, Controller} config: {get_resource: AodhMysqlMigrationScriptConfig} input_values: {get_param: input_values} diff --git a/network/ports/external_from_pool_v6.yaml b/network/ports/external_from_pool_v6.yaml index baa544e7..e541049d 100644 --- a/network/ports/external_from_pool_v6.yaml +++ b/network/ports/external_from_pool_v6.yaml @@ -49,4 +49,4 @@ outputs: - '' - - {get_param: [IPPool, {get_param: ExternalNetName}, {get_param: NodeIndex}]} - '/' - - {str_split: ['/', {get_attr: [ExternalPort, subnets, 0, cidr]}, 1]} + - {str_split: ['/', {get_param: ExternalNetCidr}, 1]} diff --git a/network/ports/internal_api_from_pool_v6.yaml b/network/ports/internal_api_from_pool_v6.yaml index 8d0a91b6..afb144ba 100644 --- a/network/ports/internal_api_from_pool_v6.yaml +++ b/network/ports/internal_api_from_pool_v6.yaml @@ -49,4 +49,4 @@ outputs: - '' - - {get_param: [IPPool, {get_param: InternalApiNetName}, {get_param: NodeIndex}]} - '/' - - {str_split: ['/', {get_attr: [InternalApiPort, subnets, 0, cidr]}, 1]} + - {str_split: ['/', {get_param: InternalApiNetCidr}, 1]} diff --git a/network/ports/management_from_pool_v6.yaml b/network/ports/management_from_pool_v6.yaml index d9ac6046..4c1cc216 100644 --- a/network/ports/management_from_pool_v6.yaml +++ b/network/ports/management_from_pool_v6.yaml @@ -49,4 +49,4 @@ outputs: - '' - - {get_param: [IPPool, {get_param: ManagementNetName}, {get_param: NodeIndex}]} - '/' - - {str_split: ['/', {get_attr: [ManagementPort, subnets, 0, cidr]}, 1]} + - {str_split: ['/', {get_param: ManagementNetCidr}, 1]} diff --git a/network/ports/storage_from_pool_v6.yaml b/network/ports/storage_from_pool_v6.yaml index 328f8385..18faf1bd 100644 --- a/network/ports/storage_from_pool_v6.yaml +++ b/network/ports/storage_from_pool_v6.yaml @@ -49,4 +49,4 @@ outputs: - '' - - {get_param: [IPPool, {get_param: StorageNetName}, {get_param: NodeIndex}]} - '/' - - {str_split: ['/', {get_attr: [StoragePort, subnets, 0, cidr]}, 1]} + - {str_split: ['/', {get_param: StorageNetCidr}, 1]} diff --git a/network/ports/storage_mgmt_from_pool_v6.yaml b/network/ports/storage_mgmt_from_pool_v6.yaml index 50470c92..e1145a31 100644 --- a/network/ports/storage_mgmt_from_pool_v6.yaml +++ b/network/ports/storage_mgmt_from_pool_v6.yaml @@ -49,4 +49,4 @@ outputs: - '' - - {get_param: [IPPool, {get_param: StorageMgmtNetName}, {get_param: NodeIndex}]} - '/' - - {str_split: ['/', {get_attr: [StorageMgmtPort, subnets, 0, cidr]}, 1]} + - {str_split: ['/', {get_param: StorageMgmtNetCidr}, 1]} diff --git a/network/ports/tenant_from_pool_v6.yaml b/network/ports/tenant_from_pool_v6.yaml index bbe6f736..d4f0d29c 100644 --- a/network/ports/tenant_from_pool_v6.yaml +++ b/network/ports/tenant_from_pool_v6.yaml @@ -48,4 +48,4 @@ outputs: - '' - - {get_param: [IPPool, {get_param: TenantNetName}, {get_param: NodeIndex}]} - '/' - - {str_split: ['/', {get_attr: [TenantPort, subnets, 0, cidr]}, 1]} + - {str_split: ['/', {get_param: TenantNetCidr}, 1]} diff --git a/overcloud.j2.yaml b/overcloud.j2.yaml index fa160e4d..5c0d9148 100644 --- a/overcloud.j2.yaml +++ b/overcloud.j2.yaml @@ -428,11 +428,10 @@ resources: UpdateWorkflow: type: OS::TripleO::Tasks::UpdateWorkflow properties: - controller_servers: {get_attr: [Controller, attributes, nova_server_resource]} - compute_servers: {get_attr: [Compute, attributes, nova_server_resource]} - blockstorage_servers: {get_attr: [BlockStorage, attributes, nova_server_resource]} - objectstorage_servers: {get_attr: [ObjectStorage, attributes, nova_server_resource]} - cephstorage_servers: {get_attr: [CephStorage, attributes, nova_server_resource]} + servers: +{% for role in roles %} + {{role.name}}: {get_attr: [{{role.name}}, attributes, nova_server_resource]} +{% endfor %} input_values: deploy_identifier: {get_param: DeployIdentifier} update_identifier: {get_param: UpdateIdentifier} @@ -460,17 +459,13 @@ resources: type: OS::TripleO::PostDeploySteps properties: servers: - Controller: {get_attr: [Controller, attributes, nova_server_resource]} - Compute: {get_attr: [Compute, attributes, nova_server_resource]} - BlockStorage: {get_attr: [BlockStorage, attributes, nova_server_resource]} - ObjectStorage: {get_attr: [ObjectStorage, attributes, nova_server_resource]} - CephStorage: {get_attr: [CephStorage, attributes, nova_server_resource]} +{% for role in roles %} + {{role.name}}: {get_attr: [{{role.name}}, attributes, nova_server_resource]} +{% endfor %} role_data: - Controller: {get_attr: [ControllerServiceChain, role_data]} - Compute: {get_attr: [ComputeServiceChain, role_data]} - BlockStorage: {get_attr: [BlockStorageServiceChain, role_data]} - ObjectStorage: {get_attr: [ObjectStorageServiceChain, role_data]} - CephStorage: {get_attr: [CephStorageServiceChain, role_data]} +{% for role in roles %} + {{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data]} +{% endfor %} outputs: ManagedEndpoints: diff --git a/puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml b/puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml index e496553a..f5b1f0e6 100644 --- a/puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml +++ b/puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml @@ -32,6 +32,18 @@ resources: contrail::vrouter::provision_vrouter::keystone_admin_tenant_name: admin contrail::vrouter::provision_vrouter::keystone_admin_password: '"%{::admin_password}"' + contrail::vnc_api::vnc_api_config: + 'auth/AUTHN_TYPE': + value: keystone + 'auth/AUTHN_PROTOCOL': + value: http + 'auth/AUTHN_SERVER': + value: "%{hiera('keystone_admin_api_vip')}" + 'auth/AUTHN_PORT': + value: 35357 + 'auth/AUTHN_URL': + value: '/v2.0/tokens' + ComputeContrailDeployment: type: OS::Heat::StructuredDeployment properties: diff --git a/puppet/post.j2.yaml b/puppet/post.j2.yaml new file mode 100644 index 00000000..65c96ac2 --- /dev/null +++ b/puppet/post.j2.yaml @@ -0,0 +1,139 @@ +heat_template_version: 2016-10-14 + +description: > + Post-deploy configuration steps via puppet for all roles, + as defined in ../roles_data.yaml + +parameters: + servers: + type: json + description: Mapping of Role name e.g Controller to a list of servers + + role_data: + type: json + description: Mapping of Role name e.g Controller to the per-role data + + DeployIdentifier: + default: '' + type: string + description: > + Setting this to a unique value will re-run any deployment tasks which + perform configuration on a Heat stack-update. + +resources: + +{% for role in roles %} + # Post deployment steps for all roles + # A single config is re-applied with an incrementing step number + # {{role.name}} Role steps + {{role.name}}ArtifactsConfig: + type: deploy-artifacts.yaml + + {{role.name}}ArtifactsDeploy: + type: OS::Heat::StructuredDeployments + properties: + servers: {get_param: [servers, {{role.name}}]} + config: {get_resource: {{role.name}}ArtifactsConfig} + + {{role.name}}PreConfig: + type: OS::TripleO::Tasks::{{role.name}}PreConfig + properties: + servers: {get_param: [servers, {{role.name}}]} + input_values: + update_identifier: {get_param: DeployIdentifier} + + {{role.name}}Config: + type: OS::TripleO::{{role.name}}Config + properties: + StepConfig: {get_param: [role_data, {{role.name}}, step_config]} + + # Step through a series of configuration steps + {{role.name}}Deployment_Step1: + type: OS::Heat::StructuredDeploymentGroup + depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy] + properties: + name: {{role.name}}Deployment_Step1 + servers: {get_param: [servers, {{role.name}}]} + config: {get_resource: {{role.name}}Config} + input_values: + step: 1 + update_identifier: {get_param: DeployIdentifier} + + {{role.name}}Deployment_Step2: + type: OS::Heat::StructuredDeploymentGroup + depends_on: + {% for dep in roles %} + - {{dep.name}}Deployment_Step1 + {% endfor %} + properties: + name: {{role.name}}Deployment_Step2 + servers: {get_param: [servers, {{role.name}}]} + config: {get_resource: {{role.name}}Config} + input_values: + step: 2 + update_identifier: {get_param: DeployIdentifier} + + {{role.name}}Deployment_Step3: + type: OS::Heat::StructuredDeploymentGroup + depends_on: + {% for dep in roles %} + - {{dep.name}}Deployment_Step2 + {% endfor %} + properties: + name: {{role.name}}Deployment_Step3 + servers: {get_param: [servers, {{role.name}}]} + config: {get_resource: {{role.name}}Config} + input_values: + step: 3 + update_identifier: {get_param: DeployIdentifier} + + {{role.name}}Deployment_Step4: + type: OS::Heat::StructuredDeploymentGroup + depends_on: + {% for dep in roles %} + - {{dep.name}}Deployment_Step3 + {% endfor %} + properties: + name: {{role.name}}Deployment_Step4 + servers: {get_param: [servers, {{role.name}}]} + config: {get_resource: {{role.name}}Config} + input_values: + step: 4 + update_identifier: {get_param: DeployIdentifier} + + {{role.name}}Deployment_Step5: + type: OS::Heat::StructuredDeploymentGroup + depends_on: + {% for dep in roles %} + - {{dep.name}}Deployment_Step4 + {% endfor %} + properties: + name: {{role.name}}Deployment_Step5 + servers: {get_param: [servers, {{role.name}}]} + config: {get_resource: {{role.name}}Config} + input_values: + step: 5 + update_identifier: {get_param: DeployIdentifier} + + {{role.name}}PostConfig: + type: OS::TripleO::Tasks::{{role.name}}PostConfig + depends_on: + {% for dep in roles %} + - {{dep.name}}Deployment_Step5 + {% endfor %} + properties: + servers: {get_param: servers} + input_values: + update_identifier: {get_param: DeployIdentifier} + + # Note, this should come last, so use depends_on to ensure + # this is created after any other resources. + {{role.name}}ExtraConfigPost: + depends_on: + {% for dep in roles %} + - {{dep.name}}PostConfig + {% endfor %} + type: OS::TripleO::NodeExtraConfigPost + properties: + servers: {get_param: [servers, {{role.name}}]} +{% endfor %} diff --git a/puppet/post.yaml b/puppet/post.yaml deleted file mode 100644 index 8f57b34e..00000000 --- a/puppet/post.yaml +++ /dev/null @@ -1,644 +0,0 @@ -heat_template_version: 2016-10-14 - -description: > - Post-deploy configuration steps via puppet for all roles, - Controller, Compute, BlockStorage, SwiftStorage and CephStorage. - -parameters: - servers: - type: json - description: Mapping of Role name e.g Controller to a list of servers - - role_data: - type: json - description: Mapping of Role name e.g Controller to the per-role data - - DeployIdentifier: - default: '' - type: string - description: > - Setting this to a unique value will re-run any deployment tasks which - perform configuration on a Heat stack-update. - -resources: - # Post deployment steps for all roles - # A single config is re-applied with an incrementing step number - # Controller Role steps - ControllerArtifactsConfig: - type: deploy-artifacts.yaml - - ControllerArtifactsDeploy: - type: OS::Heat::StructuredDeployments - properties: - servers: {get_param: [servers, Controller]} - config: {get_resource: ControllerArtifactsConfig} - - ControllerPreConfig: - type: OS::TripleO::Tasks::ControllerPreConfig - properties: - servers: {get_param: [servers, Controller]} - input_values: - update_identifier: {get_param: DeployIdentifier} - - ControllerConfig: - type: OS::TripleO::ControllerConfig - properties: - StepConfig: {get_param: [role_data, Controller, step_config]} - - # Step through a series of configuration steps - ControllerDeployment_Step1: - type: OS::Heat::StructuredDeploymentGroup - depends_on: [ControllerPreConfig, ControllerArtifactsDeploy] - properties: - name: ControllerDeployment_Step1 - servers: {get_param: [servers, Controller]} - config: {get_resource: ControllerConfig} - input_values: - step: 1 - update_identifier: {get_param: DeployIdentifier} - - ControllerDeployment_Step2: - type: OS::Heat::StructuredDeploymentGroup - depends_on: - - ControllerDeployment_Step1 - - ComputeDeployment_Step1 - - BlockStorageDeployment_Step1 - - ObjectStorageDeployment_Step1 - - CephStorageDeployment_Step1 - properties: - name: ControllerDeployment_Step2 - servers: {get_param: [servers, Controller]} - config: {get_resource: ControllerConfig} - input_values: - step: 2 - update_identifier: {get_param: DeployIdentifier} - - ControllerDeployment_Step3: - type: OS::Heat::StructuredDeploymentGroup - depends_on: - - ControllerDeployment_Step2 - - ComputeDeployment_Step2 - - BlockStorageDeployment_Step2 - - ObjectStorageDeployment_Step2 - - CephStorageDeployment_Step2 - properties: - name: ControllerDeployment_Step3 - servers: {get_param: [servers, Controller]} - config: {get_resource: ControllerConfig} - input_values: - step: 3 - update_identifier: {get_param: DeployIdentifier} - - ControllerDeployment_Step4: - type: OS::Heat::StructuredDeploymentGroup - depends_on: - - ControllerDeployment_Step3 - - ComputeDeployment_Step3 - - BlockStorageDeployment_Step3 - - ObjectStorageDeployment_Step3 - - CephStorageDeployment_Step3 - properties: - name: ControllerDeployment_Step4 - servers: {get_param: [servers, Controller]} - config: {get_resource: ControllerConfig} - input_values: - step: 4 - update_identifier: {get_param: DeployIdentifier} - - ControllerDeployment_Step5: - type: OS::Heat::StructuredDeploymentGroup - depends_on: - - ControllerDeployment_Step4 - - ComputeDeployment_Step4 - - BlockStorageDeployment_Step4 - - ObjectStorageDeployment_Step4 - - CephStorageDeployment_Step4 - properties: - name: ControllerDeployment_Step5 - servers: {get_param: [servers, Controller]} - config: {get_resource: ControllerConfig} - input_values: - step: 5 - update_identifier: {get_param: DeployIdentifier} - - ControllerPostConfig: - type: OS::TripleO::Tasks::ControllerPostConfig - depends_on: - - ControllerDeployment_Step5 - - ComputeDeployment_Step5 - - BlockStorageDeployment_Step5 - - ObjectStorageDeployment_Step5 - - CephStorageDeployment_Step5 - properties: - servers: {get_param: servers} - input_values: - update_identifier: {get_param: DeployIdentifier} - - # Note, this should come last, so use depends_on to ensure - # this is created after any other resources. - ControllerExtraConfigPost: - depends_on: - - ControllerPostConfig - - ComputePostConfig - - BlockStoragePostConfig - - ObjectStoragePostConfig - - CephStoragePostConfig - type: OS::TripleO::NodeExtraConfigPost - properties: - servers: {get_param: [servers, Controller]} - - # Compute Role steps - ComputeArtifactsConfig: - type: deploy-artifacts.yaml - - ComputeArtifactsDeploy: - type: OS::Heat::StructuredDeployments - properties: - servers: {get_param: [servers, Compute]} - config: {get_resource: ComputeArtifactsConfig} - - ComputePreConfig: - type: OS::TripleO::Tasks::ComputePreConfig - properties: - servers: {get_param: [servers, Compute]} - input_values: - update_identifier: {get_param: DeployIdentifier} - - ComputeConfig: - type: OS::TripleO::ComputeConfig - properties: - StepConfig: {get_param: [role_data, Compute, step_config]} - - # Step through a series of configuration steps - ComputeDeployment_Step1: - type: OS::Heat::StructuredDeploymentGroup - depends_on: [ComputePreConfig, ComputeArtifactsDeploy] - properties: - name: ComputeDeployment_Step1 - servers: {get_param: [servers, Compute]} - config: {get_resource: ComputeConfig} - input_values: - step: 1 - update_identifier: {get_param: DeployIdentifier} - - ComputeDeployment_Step2: - type: OS::Heat::StructuredDeploymentGroup - depends_on: - - ControllerDeployment_Step1 - - ComputeDeployment_Step1 - - BlockStorageDeployment_Step1 - - ObjectStorageDeployment_Step1 - - CephStorageDeployment_Step1 - properties: - name: ComputeDeployment_Step2 - servers: {get_param: [servers, Compute]} - config: {get_resource: ComputeConfig} - input_values: - step: 2 - update_identifier: {get_param: DeployIdentifier} - - ComputeDeployment_Step3: - type: OS::Heat::StructuredDeploymentGroup - depends_on: - - ControllerDeployment_Step2 - - ComputeDeployment_Step2 - - BlockStorageDeployment_Step2 - - ObjectStorageDeployment_Step2 - - CephStorageDeployment_Step2 - properties: - name: ComputeDeployment_Step3 - servers: {get_param: [servers, Compute]} - config: {get_resource: ComputeConfig} - input_values: - step: 3 - update_identifier: {get_param: DeployIdentifier} - - ComputeDeployment_Step4: - type: OS::Heat::StructuredDeploymentGroup - depends_on: - - ControllerDeployment_Step3 - - ComputeDeployment_Step3 - - BlockStorageDeployment_Step3 - - ObjectStorageDeployment_Step3 - - CephStorageDeployment_Step3 - properties: - name: ComputeDeployment_Step4 - servers: {get_param: [servers, Compute]} - config: {get_resource: ComputeConfig} - input_values: - step: 4 - update_identifier: {get_param: DeployIdentifier} - - ComputeDeployment_Step5: - type: OS::Heat::StructuredDeploymentGroup - depends_on: - - ControllerDeployment_Step4 - - ComputeDeployment_Step4 - - BlockStorageDeployment_Step4 - - ObjectStorageDeployment_Step4 - - CephStorageDeployment_Step4 - properties: - name: ComputeDeployment_Step5 - servers: {get_param: [servers, Compute]} - config: {get_resource: ComputeConfig} - input_values: - step: 5 - update_identifier: {get_param: DeployIdentifier} - - ComputePostConfig: - type: OS::TripleO::Tasks::ComputePostConfig - depends_on: - - ControllerDeployment_Step5 - - ComputeDeployment_Step5 - - BlockStorageDeployment_Step5 - - ObjectStorageDeployment_Step5 - - CephStorageDeployment_Step5 - properties: - servers: {get_param: servers} - input_values: - update_identifier: {get_param: DeployIdentifier} - - # Note, this should come last, so use depends_on to ensure - # this is created after any other resources. - ComputeExtraConfigPost: - depends_on: - - ControllerPostConfig - - ComputePostConfig - - BlockStoragePostConfig - - ObjectStoragePostConfig - - CephStoragePostConfig - type: OS::TripleO::NodeExtraConfigPost - properties: - servers: {get_param: [servers, Compute]} - - # BlockStorage Role steps - BlockStorageArtifactsConfig: - type: deploy-artifacts.yaml - - BlockStorageArtifactsDeploy: - type: OS::Heat::StructuredDeployments - properties: - servers: {get_param: [servers, BlockStorage]} - config: {get_resource: BlockStorageArtifactsConfig} - - BlockStoragePreConfig: - type: OS::TripleO::Tasks::BlockStoragePreConfig - properties: - servers: {get_param: [servers, BlockStorage]} - input_values: - update_identifier: {get_param: DeployIdentifier} - - BlockStorageConfig: - type: OS::TripleO::BlockStorageConfig - properties: - StepConfig: {get_param: [role_data, BlockStorage, step_config]} - - # Step through a series of configuration steps - BlockStorageDeployment_Step1: - type: OS::Heat::StructuredDeploymentGroup - depends_on: [BlockStoragePreConfig, BlockStorageArtifactsDeploy] - properties: - name: BlockStorageDeployment_Step1 - servers: {get_param: [servers, BlockStorage]} - config: {get_resource: BlockStorageConfig} - input_values: - step: 1 - update_identifier: {get_param: DeployIdentifier} - - BlockStorageDeployment_Step2: - type: OS::Heat::StructuredDeploymentGroup - depends_on: - - ControllerDeployment_Step1 - - ComputeDeployment_Step1 - - BlockStorageDeployment_Step1 - - ObjectStorageDeployment_Step1 - - CephStorageDeployment_Step1 - properties: - name: BlockStorageDeployment_Step2 - servers: {get_param: [servers, BlockStorage]} - config: {get_resource: BlockStorageConfig} - input_values: - step: 2 - update_identifier: {get_param: DeployIdentifier} - - BlockStorageDeployment_Step3: - type: OS::Heat::StructuredDeploymentGroup - depends_on: - - ControllerDeployment_Step2 - - ComputeDeployment_Step2 - - BlockStorageDeployment_Step2 - - ObjectStorageDeployment_Step2 - - CephStorageDeployment_Step2 - properties: - name: BlockStorageDeployment_Step3 - servers: {get_param: [servers, BlockStorage]} - config: {get_resource: BlockStorageConfig} - input_values: - step: 3 - update_identifier: {get_param: DeployIdentifier} - - BlockStorageDeployment_Step4: - type: OS::Heat::StructuredDeploymentGroup - depends_on: - - ControllerDeployment_Step3 - - ComputeDeployment_Step3 - - BlockStorageDeployment_Step3 - - ObjectStorageDeployment_Step3 - - CephStorageDeployment_Step3 - properties: - name: BlockStorageDeployment_Step4 - servers: {get_param: [servers, BlockStorage]} - config: {get_resource: BlockStorageConfig} - input_values: - step: 4 - update_identifier: {get_param: DeployIdentifier} - - BlockStorageDeployment_Step5: - type: OS::Heat::StructuredDeploymentGroup - depends_on: - - ControllerDeployment_Step4 - - ComputeDeployment_Step4 - - BlockStorageDeployment_Step4 - - ObjectStorageDeployment_Step4 - - CephStorageDeployment_Step4 - properties: - name: BlockStorageDeployment_Step5 - servers: {get_param: [servers, BlockStorage]} - config: {get_resource: BlockStorageConfig} - input_values: - step: 5 - update_identifier: {get_param: DeployIdentifier} - - BlockStoragePostConfig: - type: OS::TripleO::Tasks::BlockStoragePostConfig - depends_on: - - ControllerDeployment_Step5 - - ComputeDeployment_Step5 - - BlockStorageDeployment_Step5 - - ObjectStorageDeployment_Step5 - - CephStorageDeployment_Step5 - properties: - servers: {get_param: servers} - input_values: - update_identifier: {get_param: DeployIdentifier} - - # Note, this should come last, so use depends_on to ensure - # this is created after any other resources. - BlockStorageExtraConfigPost: - depends_on: - - ControllerPostConfig - - ComputePostConfig - - BlockStoragePostConfig - - ObjectStoragePostConfig - - CephStoragePostConfig - type: OS::TripleO::NodeExtraConfigPost - properties: - servers: {get_param: [servers, BlockStorage]} - - # ObjectStorage Role steps - ObjectStorageArtifactsConfig: - type: deploy-artifacts.yaml - - ObjectStorageArtifactsDeploy: - type: OS::Heat::StructuredDeployments - properties: - servers: {get_param: [servers, ObjectStorage]} - config: {get_resource: ObjectStorageArtifactsConfig} - - ObjectStoragePreConfig: - type: OS::TripleO::Tasks::ObjectStoragePreConfig - properties: - servers: {get_param: [servers, ObjectStorage]} - input_values: - update_identifier: {get_param: DeployIdentifier} - - ObjectStorageConfig: - type: OS::TripleO::ObjectStorageConfig - properties: - StepConfig: {get_param: [role_data, ObjectStorage, step_config]} - - # Step through a series of configuration steps - ObjectStorageDeployment_Step1: - type: OS::Heat::StructuredDeploymentGroup - depends_on: [ObjectStoragePreConfig, ObjectStorageArtifactsDeploy] - properties: - name: ObjectStorageDeployment_Step1 - servers: {get_param: [servers, ObjectStorage]} - config: {get_resource: ObjectStorageConfig} - input_values: - step: 1 - update_identifier: {get_param: DeployIdentifier} - - ObjectStorageDeployment_Step2: - type: OS::Heat::StructuredDeploymentGroup - depends_on: - - ControllerDeployment_Step1 - - ComputeDeployment_Step1 - - BlockStorageDeployment_Step1 - - ObjectStorageDeployment_Step1 - - CephStorageDeployment_Step1 - properties: - name: ObjectStorageDeployment_Step2 - servers: {get_param: [servers, ObjectStorage]} - config: {get_resource: ObjectStorageConfig} - input_values: - step: 2 - update_identifier: {get_param: DeployIdentifier} - - ObjectStorageDeployment_Step3: - type: OS::Heat::StructuredDeploymentGroup - depends_on: - - ControllerDeployment_Step2 - - ComputeDeployment_Step2 - - BlockStorageDeployment_Step2 - - ObjectStorageDeployment_Step2 - - CephStorageDeployment_Step2 - properties: - name: ObjectStorageDeployment_Step3 - servers: {get_param: [servers, ObjectStorage]} - config: {get_resource: ObjectStorageConfig} - input_values: - step: 3 - update_identifier: {get_param: DeployIdentifier} - - ObjectStorageDeployment_Step4: - type: OS::Heat::StructuredDeploymentGroup - depends_on: - - ControllerDeployment_Step3 - - ComputeDeployment_Step3 - - BlockStorageDeployment_Step3 - - ObjectStorageDeployment_Step3 - - CephStorageDeployment_Step3 - properties: - name: ObjectStorageDeployment_Step4 - servers: {get_param: [servers, ObjectStorage]} - config: {get_resource: ObjectStorageConfig} - input_values: - step: 4 - update_identifier: {get_param: DeployIdentifier} - - ObjectStorageDeployment_Step5: - type: OS::Heat::StructuredDeploymentGroup - depends_on: - - ControllerDeployment_Step4 - - ComputeDeployment_Step4 - - BlockStorageDeployment_Step4 - - ObjectStorageDeployment_Step4 - - CephStorageDeployment_Step4 - properties: - name: ObjectStorageDeployment_Step5 - servers: {get_param: [servers, ObjectStorage]} - config: {get_resource: ObjectStorageConfig} - input_values: - step: 5 - update_identifier: {get_param: DeployIdentifier} - - ObjectStoragePostConfig: - type: OS::TripleO::Tasks::ObjectStoragePostConfig - depends_on: - - ControllerDeployment_Step5 - - ComputeDeployment_Step5 - - BlockStorageDeployment_Step5 - - ObjectStorageDeployment_Step5 - - CephStorageDeployment_Step5 - properties: - servers: {get_param: servers} - input_values: - update_identifier: {get_param: DeployIdentifier} - - # Note, this should come last, so use depends_on to ensure - # this is created after any other resources. - ObjectStorageExtraConfigPost: - depends_on: - - ControllerPostConfig - - ComputePostConfig - - BlockStoragePostConfig - - ObjectStoragePostConfig - - CephStoragePostConfig - type: OS::TripleO::NodeExtraConfigPost - properties: - servers: {get_param: [servers, ObjectStorage]} - - # CephStorage Role steps - CephStorageArtifactsConfig: - type: deploy-artifacts.yaml - - CephStorageArtifactsDeploy: - type: OS::Heat::StructuredDeployments - properties: - servers: {get_param: [servers, CephStorage]} - config: {get_resource: CephStorageArtifactsConfig} - - CephStoragePreConfig: - type: OS::TripleO::Tasks::CephStoragePreConfig - properties: - servers: {get_param: [servers, CephStorage]} - input_values: - update_identifier: {get_param: DeployIdentifier} - - CephStorageConfig: - type: OS::TripleO::CephStorageConfig - properties: - StepConfig: {get_param: [role_data, CephStorage, step_config]} - - # Step through a series of configuration steps - CephStorageDeployment_Step1: - type: OS::Heat::StructuredDeploymentGroup - depends_on: [CephStoragePreConfig, CephStorageArtifactsDeploy] - properties: - name: CephStorageDeployment_Step1 - servers: {get_param: [servers, CephStorage]} - config: {get_resource: CephStorageConfig} - input_values: - step: 1 - update_identifier: {get_param: DeployIdentifier} - - CephStorageDeployment_Step2: - type: OS::Heat::StructuredDeploymentGroup - depends_on: - - ControllerDeployment_Step1 - - ComputeDeployment_Step1 - - BlockStorageDeployment_Step1 - - ObjectStorageDeployment_Step1 - - CephStorageDeployment_Step1 - properties: - name: CephStorageDeployment_Step2 - servers: {get_param: [servers, CephStorage]} - config: {get_resource: CephStorageConfig} - input_values: - step: 2 - update_identifier: {get_param: DeployIdentifier} - - CephStorageDeployment_Step3: - type: OS::Heat::StructuredDeploymentGroup - depends_on: - - ControllerDeployment_Step2 - - ComputeDeployment_Step2 - - BlockStorageDeployment_Step2 - - ObjectStorageDeployment_Step2 - - CephStorageDeployment_Step2 - properties: - name: CephStorageDeployment_Step3 - servers: {get_param: [servers, CephStorage]} - config: {get_resource: CephStorageConfig} - input_values: - step: 3 - update_identifier: {get_param: DeployIdentifier} - - CephStorageDeployment_Step4: - type: OS::Heat::StructuredDeploymentGroup - depends_on: - - ControllerDeployment_Step3 - - ComputeDeployment_Step3 - - BlockStorageDeployment_Step3 - - ObjectStorageDeployment_Step3 - - CephStorageDeployment_Step3 - properties: - name: CephStorageDeployment_Step4 - servers: {get_param: [servers, CephStorage]} - config: {get_resource: CephStorageConfig} - input_values: - step: 4 - update_identifier: {get_param: DeployIdentifier} - - CephStorageDeployment_Step5: - type: OS::Heat::StructuredDeploymentGroup - depends_on: - - ControllerDeployment_Step4 - - ComputeDeployment_Step4 - - BlockStorageDeployment_Step4 - - ObjectStorageDeployment_Step4 - - CephStorageDeployment_Step4 - properties: - name: CephStorageDeployment_Step5 - servers: {get_param: [servers, CephStorage]} - config: {get_resource: CephStorageConfig} - input_values: - step: 5 - update_identifier: {get_param: DeployIdentifier} - - CephStoragePostConfig: - type: OS::TripleO::Tasks::CephStoragePostConfig - depends_on: - - ControllerDeployment_Step5 - - ComputeDeployment_Step5 - - BlockStorageDeployment_Step5 - - ObjectStorageDeployment_Step5 - - CephStorageDeployment_Step5 - properties: - servers: {get_param: servers} - input_values: - update_identifier: {get_param: DeployIdentifier} - - # Note, this should come last, so use depends_on to ensure - # this is created after any other resources. - CephStorageExtraConfigPost: - depends_on: - - ControllerPostConfig - - ComputePostConfig - - BlockStoragePostConfig - - ObjectStoragePostConfig - - CephStoragePostConfig - type: OS::TripleO::NodeExtraConfigPost - properties: - servers: {get_param: [servers, CephStorage]} diff --git a/puppet/services/glance-api.yaml b/puppet/services/glance-api.yaml index adc1b4cb..03abe79b 100644 --- a/puppet/services/glance-api.yaml +++ b/puppet/services/glance-api.yaml @@ -101,6 +101,7 @@ outputs: template: "'REGISTRY_HOST'" params: REGISTRY_HOST: {get_param: [EndpointMap, GlanceRegistryInternal, host]} + glance::api::registry_client_protocol: {get_param: [EndpointMap, GlanceRegistryInternal, protocol] } glance::api::authtoken::password: {get_param: GlancePassword} glance::api::enable_proxy_headers_parsing: true glance::api::debug: {get_param: Debug} diff --git a/puppet/services/keystone.yaml b/puppet/services/keystone.yaml index b321ecbe..18fc9158 100644 --- a/puppet/services/keystone.yaml +++ b/puppet/services/keystone.yaml @@ -148,7 +148,6 @@ outputs: keystone::endpoint::admin_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} keystone::endpoint::region: {get_param: KeystoneRegion} keystone_enable_db_purge: {get_param: KeystoneEnableDBPurge} - keystone::public_endpoint: {get_param: [EndpointMap, KeystonePublic, uri_no_suffix]} keystone::db::mysql::user: keystone keystone::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]} keystone::db::mysql::dbname: keystone diff --git a/roles_data.yaml b/roles_data.yaml index db0004c3..be96cacd 100644 --- a/roles_data.yaml +++ b/roles_data.yaml @@ -5,6 +5,7 @@ - OS::TripleO::Services::CACerts - OS::TripleO::Services::CephMon - OS::TripleO::Services::CephExternal + - OS::TripleO::Services::CephRgw - OS::TripleO::Services::CinderApi - OS::TripleO::Services::CinderBackup - OS::TripleO::Services::CinderScheduler |