summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--environments/hyperconverged-ceph.yaml12
-rwxr-xr-xextraconfig/tasks/major_upgrade_ceph_mon.sh8
-rw-r--r--extraconfig/tasks/major_upgrade_ceph_storage.sh2
-rwxr-xr-xextraconfig/tasks/major_upgrade_check.sh104
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_1.sh68
-rw-r--r--extraconfig/tasks/major_upgrade_pacemaker.yaml4
-rw-r--r--network/ports/external_from_pool_v6.yaml2
-rw-r--r--network/ports/internal_api_from_pool_v6.yaml2
-rw-r--r--network/ports/management_from_pool_v6.yaml2
-rw-r--r--network/ports/storage_from_pool_v6.yaml2
-rw-r--r--network/ports/storage_mgmt_from_pool_v6.yaml2
-rw-r--r--network/ports/tenant_from_pool_v6.yaml2
-rw-r--r--puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml12
-rw-r--r--puppet/services/aodh-api.yaml6
-rw-r--r--puppet/services/glance-api.yaml1
-rw-r--r--puppet/services/keystone.yaml1
-rw-r--r--roles_data.yaml1
17 files changed, 156 insertions, 75 deletions
diff --git a/environments/hyperconverged-ceph.yaml b/environments/hyperconverged-ceph.yaml
new file mode 100644
index 00000000..87ebb1d7
--- /dev/null
+++ b/environments/hyperconverged-ceph.yaml
@@ -0,0 +1,12 @@
+# If using an isolated StorageMgmt network, this will have to be uncommented to
+# plug the network on the compute nodes as well.
+#resource_registry:
+# OS::TripleO::Compute::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
+
+# Should match the default list of services for the compute node plus CephOSD
+parameter_defaults:
+ ComputeServices:
+ - OS::TripleO::Services::CephOSD
+
+parameter_merge_strategies:
+ ComputeServices: merge \ No newline at end of file
diff --git a/extraconfig/tasks/major_upgrade_ceph_mon.sh b/extraconfig/tasks/major_upgrade_ceph_mon.sh
index b76dd7c3..21a2b5bc 100755
--- a/extraconfig/tasks/major_upgrade_ceph_mon.sh
+++ b/extraconfig/tasks/major_upgrade_ceph_mon.sh
@@ -18,13 +18,13 @@ if ! [[ "$INSTALLED_VERSION" =~ ^0\.94.* ]]; then
fi
CEPH_STATUS=$(ceph health | awk '{print $1}')
-if [ ${CEPH_STATUS} = HEALTH_ERR ]; do
+if [ ${CEPH_STATUS} = HEALTH_ERR ]; then
echo ERROR: Ceph cluster status is HEALTH_ERR, cannot be upgraded
exit 1
fi
# Useful when upgrading with OSDs num < replica size
-if [ $ignore_ceph_upgrade_warnings != "true" ]; then
+if [ ${ignore_ceph_upgrade_warnings:-false} != "true" ]; then
timeout 300 bash -c "while [ ${CEPH_STATUS} != HEALTH_OK ]; do
echo WARNING: Waiting for Ceph cluster status to go HEALTH_OK;
sleep 30;
@@ -44,7 +44,7 @@ timeout 60 bash -c "while kill -0 ${MON_PID} 2> /dev/null; do
done"
# Update to Jewel
-yum -y -q update ceph-mon
+yum -y -q update ceph-mon ceph
# Restart/Exit if not on Jewel, only in that case we need the changes
UPDATED_VERSION=$(ceph --version | awk '{print $3}')
@@ -54,7 +54,7 @@ if [[ "$UPDATED_VERSION" =~ ^0\.94.* ]]; then
elif [[ "$UPDATED_VERSION" =~ ^10\.2.* ]]; then
# RPM could own some of these but we can't take risks on the pre-existing files
for d in /var/lib/ceph/mon /var/log/ceph /var/run/ceph /etc/ceph; do
- chown -R ceph:ceph $d
+ chown -R ceph:ceph $d || echo WARNING: chown of $d failed
done
# Replay udev events with newer rules
diff --git a/extraconfig/tasks/major_upgrade_ceph_storage.sh b/extraconfig/tasks/major_upgrade_ceph_storage.sh
index 03a1c1c2..dc80a724 100644
--- a/extraconfig/tasks/major_upgrade_ceph_storage.sh
+++ b/extraconfig/tasks/major_upgrade_ceph_storage.sh
@@ -63,7 +63,7 @@ if [[ "$UPDATED_VERSION" =~ ^0\.94.* ]]; then
elif [[ "$UPDATED_VERSION" =~ ^10\.2.* ]]; then
# RPM could own some of these but we can't take risks on the pre-existing files
for d in /var/lib/ceph/osd /var/log/ceph /var/run/ceph /etc/ceph; do
- chown -R ceph:ceph $d
+ chown -R ceph:ceph $d || echo WARNING: chown of $d failed
done
# Replay udev events with newer rules
diff --git a/extraconfig/tasks/major_upgrade_check.sh b/extraconfig/tasks/major_upgrade_check.sh
new file mode 100755
index 00000000..dc7ec71a
--- /dev/null
+++ b/extraconfig/tasks/major_upgrade_check.sh
@@ -0,0 +1,104 @@
+#!/bin/bash
+
+set -eu
+
+check_cluster()
+{
+ if pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; then
+ echo_error "ERROR: upgrade cannot start with some cluster nodes being offline"
+ exit 1
+ fi
+}
+
+check_pcsd()
+{
+ if pcs status 2>&1 | grep -E 'Offline'; then
+ echo_error "ERROR: upgrade cannot start with some pcsd daemon offline"
+ exit 1
+ fi
+}
+
+check_disk_for_mysql_dump()
+{
+ # Where to backup current database if mysql need to be upgraded
+ MYSQL_BACKUP_DIR=/var/tmp/mysql_upgrade_osp
+ MYSQL_TEMP_UPGRADE_BACKUP_DIR=/var/lib/mysql-temp-upgrade-backup
+ # Spare disk ratio for extra safety
+ MYSQL_BACKUP_SIZE_RATIO=1.2
+
+ # Shall we upgrade mysql data directory during the stack upgrade?
+ if [ "$mariadb_do_major_upgrade" = "auto" ]; then
+ ret=$(is_mysql_upgrade_needed)
+ if [ $ret = "1" ]; then
+ DO_MYSQL_UPGRADE=1
+ else
+ DO_MYSQL_UPGRADE=0
+ fi
+ echo "mysql upgrade required: $DO_MYSQL_UPGRADE"
+ elif [ "$mariadb_do_major_upgrade" = "no" ]; then
+ DO_MYSQL_UPGRADE=0
+ else
+ DO_MYSQL_UPGRADE=1
+ fi
+
+ if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
+ if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
+
+ if [ -d "$MYSQL_BACKUP_DIR" ]; then
+ echo_error "Error: $MYSQL_BACKUP_DIR exists already. Likely an upgrade failed previously"
+ exit 1
+ fi
+ mkdir "$MYSQL_BACKUP_DIR"
+ if [ $? -ne 0 ]; then
+ echo_error "Error: could not create temporary backup directory $MYSQL_BACKUP_DIR"
+ exit 1
+ fi
+
+ # the /root/.my.cnf is needed because we set the mysql root
+ # password from liberty onwards
+ backup_flags="--defaults-extra-file=/root/.my.cnf -u root --flush-privileges --all-databases --single-transaction"
+ # While not ideal, this step allows us to calculate exactly how much space the dump
+ # will need. Our main goal here is avoiding any chance of corruption due to disk space
+ # exhaustion
+ backup_size=$(mysqldump $backup_flags 2>/dev/null | wc -c)
+ database_size=$(du -cb /var/lib/mysql | tail -1 | awk '{ print $1 }')
+ free_space=$(df -B1 --output=avail "$MYSQL_BACKUP_DIR" | tail -1)
+
+ # we need at least space for a new mysql database + dump of the existing one,
+ # times a small factor for additional safety room
+ # note: bash doesn't do floating point math or floats in if statements,
+ # so use python to apply the ratio and cast it back to integer
+ required_space=$(python -c "from __future__ import print_function; print(\"%d\" % int((($database_size + $backup_size) * $MYSQL_BACKUP_SIZE_RATIO)))")
+ if [ $required_space -ge $free_space ]; then
+ echo_error "Error: not enough free space in $MYSQL_BACKUP_DIR ($required_space bytes required)"
+ exit 1
+ fi
+ fi
+ fi
+}
+
+check_python_rpm()
+{
+ # If for some reason rpm-python are missing we want to error out early enough
+ if ! rpm -q rpm-python &> /dev/null; then
+ echo_error "ERROR: upgrade cannot start without rpm-python installed"
+ exit 1
+ fi
+}
+
+check_clean_cluster()
+{
+ if crm_mon -1 | grep -A3 Failed; then
+ echo_error "ERROR: upgrade cannot start with failed resources on the cluster. Clean them up before starting: pcs resource cleanup."
+ exit 1
+ fi
+}
+
+check_galera_root_password()
+{
+ # BZ: 1357112
+ if [ ! -e /root/.my.cnf ]; then
+ echo_error "ERROR: upgrade cannot be started, the galera password is missing. The overcloud needs update."
+ exit 1
+ fi
+}
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh
index 0b702630..e81ca086 100755
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh
+++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh
@@ -4,11 +4,12 @@ set -eu
cluster_sync_timeout=1800
-if pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; then
- echo_error "ERROR: upgrade cannot start with some cluster nodes being offline"
- exit 1
-fi
-
+check_cluster
+check_pcsd
+check_clean_cluster
+check_python_rpm
+check_galera_root_password
+check_disk_for_mysql_dump
# We want to disable fencing during the cluster --stop as it might fence
# nodes where a service fails to stop, which could be fatal during an upgrade
@@ -17,12 +18,6 @@ fi
STONITH_STATE=$(pcs property show stonith-enabled | grep "stonith-enabled" | awk '{ print $2 }')
pcs property set stonith-enabled=false
-# If for some reason rpm-python are missing we want to error out early enough
-if ! rpm -q rpm-python &> /dev/null; then
- echo_error "ERROR: upgrade cannot start without rpm-python installed"
- exit 1
-fi
-
# In case the mysql package is updated, the database on disk must be
# upgraded as well. This typically needs to happen during major
# version upgrades (e.g. 5.5 -> 5.6, 5.5 -> 10.1...)
@@ -35,59 +30,8 @@ fi
# on mysql package versionning, but this can be overriden manually
# to support specific upgrade scenario
-# Where to backup current database if mysql need to be upgraded
-MYSQL_BACKUP_DIR=/var/tmp/mysql_upgrade_osp
-MYSQL_TEMP_UPGRADE_BACKUP_DIR=/var/lib/mysql-temp-upgrade-backup
-# Spare disk ratio for extra safety
-MYSQL_BACKUP_SIZE_RATIO=1.2
-
-# Shall we upgrade mysql data directory during the stack upgrade?
-if [ "$mariadb_do_major_upgrade" = "auto" ]; then
- ret=$(is_mysql_upgrade_needed)
- if [ $ret = "1" ]; then
- DO_MYSQL_UPGRADE=1
- else
- DO_MYSQL_UPGRADE=0
- fi
- echo "mysql upgrade required: $DO_MYSQL_UPGRADE"
-elif [ "$mariadb_do_major_upgrade" = "no" ]; then
- DO_MYSQL_UPGRADE=0
-else
- DO_MYSQL_UPGRADE=1
-fi
-
if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
- if [ -d "$MYSQL_BACKUP_DIR" ]; then
- echo_error "Error: $MYSQL_BACKUP_DIR exists already. Likely an upgrade failed previously"
- exit 1
- fi
- mkdir "$MYSQL_BACKUP_DIR"
- if [ $? -ne 0 ]; then
- echo_error "Error: could not create temporary backup directory $MYSQL_BACKUP_DIR"
- exit 1
- fi
-
- # the /root/.my.cnf is needed because we set the mysql root
- # password from liberty onwards
- backup_flags="--defaults-extra-file=/root/.my.cnf -u root --flush-privileges --all-databases --single-transaction"
- # While not ideal, this step allows us to calculate exactly how much space the dump
- # will need. Our main goal here is avoiding any chance of corruption due to disk space
- # exhaustion
- backup_size=$(mysqldump $backup_flags 2>/dev/null | wc -c)
- database_size=$(du -cb /var/lib/mysql | tail -1 | awk '{ print $1 }')
- free_space=$(df -B1 --output=avail "$MYSQL_BACKUP_DIR" | tail -1)
-
- # we need at least space for a new mysql database + dump of the existing one,
- # times a small factor for additional safety room
- # note: bash doesn't do floating point math or floats in if statements,
- # so use python to apply the ratio and cast it back to integer
- required_space=$(python -c "from __future__ import print_function; print(\"%d\" % int((($database_size + $backup_size) * $MYSQL_BACKUP_SIZE_RATIO)))")
- if [ $required_space -ge $free_space ]; then
- echo_error "Error: not enough free space in $MYSQL_BACKUP_DIR ($required_space bytes required)"
- exit 1
- fi
-
mysqldump $backup_flags > "$MYSQL_BACKUP_DIR/openstack_database.sql"
cp -rdp /etc/my.cnf* "$MYSQL_BACKUP_DIR"
fi
diff --git a/extraconfig/tasks/major_upgrade_pacemaker.yaml b/extraconfig/tasks/major_upgrade_pacemaker.yaml
index ec36ba55..7244f949 100644
--- a/extraconfig/tasks/major_upgrade_pacemaker.yaml
+++ b/extraconfig/tasks/major_upgrade_pacemaker.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: 2016-10-14
description: 'Upgrade for Pacemaker deployments'
parameters:
@@ -49,6 +49,7 @@ resources:
servers: {get_param: servers, Controller}
config: {get_resource: CephMonUpgradeConfig}
input_values: {get_param: input_values}
+ update_policy:
batch_create:
max_batch_size: 1
rolling_update:
@@ -74,6 +75,7 @@ resources:
params:
MYSQL_MAJOR_UPGRADE: {get_param: MySqlMajorUpgrade}
- get_file: pacemaker_common_functions.sh
+ - get_file: major_upgrade_check.sh
- get_file: major_upgrade_pacemaker_migrations.sh
- get_file: major_upgrade_controller_pacemaker_1.sh
diff --git a/network/ports/external_from_pool_v6.yaml b/network/ports/external_from_pool_v6.yaml
index baa544e7..e541049d 100644
--- a/network/ports/external_from_pool_v6.yaml
+++ b/network/ports/external_from_pool_v6.yaml
@@ -49,4 +49,4 @@ outputs:
- ''
- - {get_param: [IPPool, {get_param: ExternalNetName}, {get_param: NodeIndex}]}
- '/'
- - {str_split: ['/', {get_attr: [ExternalPort, subnets, 0, cidr]}, 1]}
+ - {str_split: ['/', {get_param: ExternalNetCidr}, 1]}
diff --git a/network/ports/internal_api_from_pool_v6.yaml b/network/ports/internal_api_from_pool_v6.yaml
index 8d0a91b6..afb144ba 100644
--- a/network/ports/internal_api_from_pool_v6.yaml
+++ b/network/ports/internal_api_from_pool_v6.yaml
@@ -49,4 +49,4 @@ outputs:
- ''
- - {get_param: [IPPool, {get_param: InternalApiNetName}, {get_param: NodeIndex}]}
- '/'
- - {str_split: ['/', {get_attr: [InternalApiPort, subnets, 0, cidr]}, 1]}
+ - {str_split: ['/', {get_param: InternalApiNetCidr}, 1]}
diff --git a/network/ports/management_from_pool_v6.yaml b/network/ports/management_from_pool_v6.yaml
index d9ac6046..4c1cc216 100644
--- a/network/ports/management_from_pool_v6.yaml
+++ b/network/ports/management_from_pool_v6.yaml
@@ -49,4 +49,4 @@ outputs:
- ''
- - {get_param: [IPPool, {get_param: ManagementNetName}, {get_param: NodeIndex}]}
- '/'
- - {str_split: ['/', {get_attr: [ManagementPort, subnets, 0, cidr]}, 1]}
+ - {str_split: ['/', {get_param: ManagementNetCidr}, 1]}
diff --git a/network/ports/storage_from_pool_v6.yaml b/network/ports/storage_from_pool_v6.yaml
index 328f8385..18faf1bd 100644
--- a/network/ports/storage_from_pool_v6.yaml
+++ b/network/ports/storage_from_pool_v6.yaml
@@ -49,4 +49,4 @@ outputs:
- ''
- - {get_param: [IPPool, {get_param: StorageNetName}, {get_param: NodeIndex}]}
- '/'
- - {str_split: ['/', {get_attr: [StoragePort, subnets, 0, cidr]}, 1]}
+ - {str_split: ['/', {get_param: StorageNetCidr}, 1]}
diff --git a/network/ports/storage_mgmt_from_pool_v6.yaml b/network/ports/storage_mgmt_from_pool_v6.yaml
index 50470c92..e1145a31 100644
--- a/network/ports/storage_mgmt_from_pool_v6.yaml
+++ b/network/ports/storage_mgmt_from_pool_v6.yaml
@@ -49,4 +49,4 @@ outputs:
- ''
- - {get_param: [IPPool, {get_param: StorageMgmtNetName}, {get_param: NodeIndex}]}
- '/'
- - {str_split: ['/', {get_attr: [StorageMgmtPort, subnets, 0, cidr]}, 1]}
+ - {str_split: ['/', {get_param: StorageMgmtNetCidr}, 1]}
diff --git a/network/ports/tenant_from_pool_v6.yaml b/network/ports/tenant_from_pool_v6.yaml
index bbe6f736..d4f0d29c 100644
--- a/network/ports/tenant_from_pool_v6.yaml
+++ b/network/ports/tenant_from_pool_v6.yaml
@@ -48,4 +48,4 @@ outputs:
- ''
- - {get_param: [IPPool, {get_param: TenantNetName}, {get_param: NodeIndex}]}
- '/'
- - {str_split: ['/', {get_attr: [TenantPort, subnets, 0, cidr]}, 1]}
+ - {str_split: ['/', {get_param: TenantNetCidr}, 1]}
diff --git a/puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml b/puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml
index e496553a..f5b1f0e6 100644
--- a/puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml
+++ b/puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml
@@ -32,6 +32,18 @@ resources:
contrail::vrouter::provision_vrouter::keystone_admin_tenant_name: admin
contrail::vrouter::provision_vrouter::keystone_admin_password: '"%{::admin_password}"'
+ contrail::vnc_api::vnc_api_config:
+ 'auth/AUTHN_TYPE':
+ value: keystone
+ 'auth/AUTHN_PROTOCOL':
+ value: http
+ 'auth/AUTHN_SERVER':
+ value: "%{hiera('keystone_admin_api_vip')}"
+ 'auth/AUTHN_PORT':
+ value: 35357
+ 'auth/AUTHN_URL':
+ value: '/v2.0/tokens'
+
ComputeContrailDeployment:
type: OS::Heat::StructuredDeployment
properties:
diff --git a/puppet/services/aodh-api.yaml b/puppet/services/aodh-api.yaml
index 65afffad..c84614ba 100644
--- a/puppet/services/aodh-api.yaml
+++ b/puppet/services/aodh-api.yaml
@@ -21,6 +21,11 @@ parameters:
MonitoringSubscriptionAodhApi:
default: 'overcloud-ceilometer-aodh-api'
type: string
+ EnableCombinationAlarms:
+ default: false
+ description: Combination alarms are deprecated in Newton, hence disabled
+ by default. To enable, set this parameter to true.
+ type: boolean
resources:
AodhBase:
@@ -62,5 +67,6 @@ outputs:
# internal_api_subnet - > IP/CIDR
aodh::api::host: {get_param: [ServiceNetMap, AodhApiNetwork]}
aodh::wsgi::apache::bind_host: {get_param: [ServiceNetMap, AodhApiNetwork]}
+ tripleo::profile::base::aodh::api::enable_combination_alarms: {get_param: EnableCombinationAlarms}
step_config: |
include tripleo::profile::base::aodh::api
diff --git a/puppet/services/glance-api.yaml b/puppet/services/glance-api.yaml
index adc1b4cb..03abe79b 100644
--- a/puppet/services/glance-api.yaml
+++ b/puppet/services/glance-api.yaml
@@ -101,6 +101,7 @@ outputs:
template: "'REGISTRY_HOST'"
params:
REGISTRY_HOST: {get_param: [EndpointMap, GlanceRegistryInternal, host]}
+ glance::api::registry_client_protocol: {get_param: [EndpointMap, GlanceRegistryInternal, protocol] }
glance::api::authtoken::password: {get_param: GlancePassword}
glance::api::enable_proxy_headers_parsing: true
glance::api::debug: {get_param: Debug}
diff --git a/puppet/services/keystone.yaml b/puppet/services/keystone.yaml
index b321ecbe..18fc9158 100644
--- a/puppet/services/keystone.yaml
+++ b/puppet/services/keystone.yaml
@@ -148,7 +148,6 @@ outputs:
keystone::endpoint::admin_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
keystone::endpoint::region: {get_param: KeystoneRegion}
keystone_enable_db_purge: {get_param: KeystoneEnableDBPurge}
- keystone::public_endpoint: {get_param: [EndpointMap, KeystonePublic, uri_no_suffix]}
keystone::db::mysql::user: keystone
keystone::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
keystone::db::mysql::dbname: keystone
diff --git a/roles_data.yaml b/roles_data.yaml
index db0004c3..be96cacd 100644
--- a/roles_data.yaml
+++ b/roles_data.yaml
@@ -5,6 +5,7 @@
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::CephMon
- OS::TripleO::Services::CephExternal
+ - OS::TripleO::Services::CephRgw
- OS::TripleO::Services::CinderApi
- OS::TripleO::Services::CinderBackup
- OS::TripleO::Services::CinderScheduler