summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--environments/manila-cephfsnative-config.yaml8
-rw-r--r--environments/manila-generic-config.yaml8
-rw-r--r--environments/manila-netapp-config.yaml8
-rw-r--r--environments/neutron-nuage-config.yaml2
-rw-r--r--environments/services/barbican.yaml4
-rw-r--r--environments/storage-environment.yaml22
-rw-r--r--environments/tls-endpoints-public-dns.yaml3
-rw-r--r--environments/tls-endpoints-public-ip.yaml3
-rw-r--r--environments/tls-everywhere-endpoints-dns.yaml3
-rw-r--r--extraconfig/tasks/major_upgrade_block_storage.sh4
-rw-r--r--extraconfig/tasks/major_upgrade_ceph_storage.sh6
-rwxr-xr-xextraconfig/tasks/major_upgrade_check.sh19
-rw-r--r--extraconfig/tasks/major_upgrade_compute.sh9
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_1.sh187
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_2.sh222
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_3.sh76
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_4.sh17
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_5.sh8
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_6.sh15
-rw-r--r--extraconfig/tasks/major_upgrade_object_storage.sh4
-rw-r--r--extraconfig/tasks/major_upgrade_pacemaker.yaml81
-rwxr-xr-xextraconfig/tasks/pacemaker_common_functions.sh2
-rwxr-xr-xextraconfig/tasks/pacemaker_resource_restart.sh15
-rwxr-xr-xextraconfig/tasks/yum_update.sh8
-rw-r--r--firstboot/userdata_heat_admin.yaml8
-rw-r--r--network/endpoints/endpoint_data.yaml9
-rw-r--r--network/endpoints/endpoint_map.yaml246
-rw-r--r--network/service_net_map.j2.yaml1
-rw-r--r--overcloud-resource-registry-puppet.j2.yaml21
-rw-r--r--overcloud.j2.yaml67
-rw-r--r--puppet/cephstorage-role.yaml1
-rw-r--r--puppet/deploy-artifacts.sh2
-rw-r--r--puppet/post.j2.yaml21
-rw-r--r--puppet/role.role.j2.yaml1
-rw-r--r--puppet/services/aodh-api.yaml13
-rw-r--r--puppet/services/barbican-api.yaml127
-rw-r--r--puppet/services/ceilometer-api.yaml14
-rw-r--r--puppet/services/ceph-external.yaml26
-rw-r--r--puppet/services/cinder-api.yaml1
-rw-r--r--puppet/services/glance-base.yaml18
-rw-r--r--puppet/services/gnocchi-api.yaml13
-rw-r--r--puppet/services/gnocchi-statsd.yaml4
-rw-r--r--puppet/services/heat-api-cfn.yaml14
-rw-r--r--puppet/services/heat-api.yaml14
-rw-r--r--puppet/services/heat-base.yaml5
-rw-r--r--puppet/services/kernel.yaml6
-rw-r--r--puppet/services/manila-api.yaml35
-rw-r--r--puppet/services/manila-base.yaml22
-rw-r--r--puppet/services/manila-scheduler.yaml9
-rw-r--r--puppet/services/manila-share.yaml10
-rw-r--r--puppet/services/nova-api.yaml20
-rw-r--r--puppet/services/nova-libvirt.yaml3
-rw-r--r--puppet/services/nova-scheduler.yaml2
-rw-r--r--puppet/services/nova-vnc-proxy.yaml5
-rw-r--r--puppet/services/opendaylight-api.yaml2
-rw-r--r--puppet/services/rabbitmq.yaml2
-rw-r--r--puppet/services/sahara-base.yaml12
-rw-r--r--puppet/services/services.yaml21
-rw-r--r--puppet/services/swift-proxy.yaml4
-rw-r--r--puppet/services/swift-ringbuilder.yaml7
-rw-r--r--puppet/services/swift-storage.yaml2
-rw-r--r--roles_data.yaml14
62 files changed, 1086 insertions, 450 deletions
diff --git a/environments/manila-cephfsnative-config.yaml b/environments/manila-cephfsnative-config.yaml
index 825a5066..5632d8d6 100644
--- a/environments/manila-cephfsnative-config.yaml
+++ b/environments/manila-cephfsnative-config.yaml
@@ -1,11 +1,11 @@
# A Heat environment file which can be used to enable a
# a Manila CephFS Native driver backend.
resource_registry:
- OS::Tripleo::Services::ManilaApi: ../puppet/services/manila-api.yaml
- OS::Tripleo::Services::ManilaScheduler: ../puppet/services/manila-scheduler.yaml
+ OS::TripleO::Services::ManilaApi: ../puppet/services/manila-api.yaml
+ OS::TripleO::Services::ManilaScheduler: ../puppet/services/manila-scheduler.yaml
# Only manila-share is pacemaker managed:
- OS::Tripleo::Services::ManilaShare: ../puppet/services/pacemaker/manila-share.yaml
- OS::Tripleo::Services::ManilaBackendCephFs: ../puppet/services/manila-backend-cephfs.yaml
+ OS::TripleO::Services::ManilaShare: ../puppet/services/pacemaker/manila-share.yaml
+ OS::TripleO::Services::ManilaBackendCephFs: ../puppet/services/manila-backend-cephfs.yaml
parameter_defaults:
diff --git a/environments/manila-generic-config.yaml b/environments/manila-generic-config.yaml
index 9344bc6e..65884a94 100644
--- a/environments/manila-generic-config.yaml
+++ b/environments/manila-generic-config.yaml
@@ -1,10 +1,10 @@
# This environment file enables Manila with the Generic backend.
resource_registry:
- OS::Tripleo::Services::ManilaApi: ../puppet/services/manila-api.yaml
- OS::Tripleo::Services::ManilaScheduler: ../puppet/services/manila-scheduler.yaml
+ OS::TripleO::Services::ManilaApi: ../puppet/services/manila-api.yaml
+ OS::TripleO::Services::ManilaScheduler: ../puppet/services/manila-scheduler.yaml
# Only manila-share is pacemaker managed:
- OS::Tripleo::Services::ManilaShare: ../puppet/services/pacemaker/manila-share.yaml
- OS::Tripleo::Services::ManilaBackendGeneric: ../puppet/services/manila-backend-generic.yaml
+ OS::TripleO::Services::ManilaShare: ../puppet/services/pacemaker/manila-share.yaml
+ OS::TripleO::Services::ManilaBackendGeneric: ../puppet/services/manila-backend-generic.yaml
parameter_defaults:
ManilaServiceInstanceUser: ''
diff --git a/environments/manila-netapp-config.yaml b/environments/manila-netapp-config.yaml
index 3dadfe5d..7eb14941 100644
--- a/environments/manila-netapp-config.yaml
+++ b/environments/manila-netapp-config.yaml
@@ -1,10 +1,10 @@
# This environment file enables Manila with the Netapp backend.
resource_registry:
- OS::Tripleo::Services::ManilaApi: ../puppet/services/manila-api.yaml
- OS::Tripleo::Services::ManilaScheduler: ../puppet/services/manila-scheduler.yaml
+ OS::TripleO::Services::ManilaApi: ../puppet/services/manila-api.yaml
+ OS::TripleO::Services::ManilaScheduler: ../puppet/services/manila-scheduler.yaml
# Only manila-share is pacemaker managed:
- OS::Tripleo::Services::ManilaShare: ../puppet/services/pacemaker/manila-share.yaml
- OS::Tripleo::Services::ManilaBackendNetapp: ../puppet/services/manila-backend-netapp.yaml
+ OS::TripleO::Services::ManilaShare: ../puppet/services/pacemaker/manila-share.yaml
+ OS::TripleO::Services::ManilaBackendNetapp: ../puppet/services/manila-backend-netapp.yaml
parameter_defaults:
ManilaNetappBackendName: tripleo_netapp
diff --git a/environments/neutron-nuage-config.yaml b/environments/neutron-nuage-config.yaml
index e157ae35..74899246 100644
--- a/environments/neutron-nuage-config.yaml
+++ b/environments/neutron-nuage-config.yaml
@@ -19,7 +19,7 @@ parameter_defaults:
NeutronNuageBaseURIVersion: 'default_uri_version'
NeutronNuageCMSId: ''
UseForwardedFor: true
- NeutronCorePlugin: 'neutron.plugins.nuage.plugin.NuagePlugin'
+ NeutronCorePlugin: 'nuage_neutron.plugins.nuage.plugin.NuagePlugin'
NeutronEnableDHCPAgent: false
NeutronServicePlugins: []
NovaOVSBridge: 'alubr0'
diff --git a/environments/services/barbican.yaml b/environments/services/barbican.yaml
new file mode 100644
index 00000000..1735646a
--- /dev/null
+++ b/environments/services/barbican.yaml
@@ -0,0 +1,4 @@
+# A Heat environment file which can be used to enable
+# Barbican with the default secret store backend.
+resource_registry:
+ OS::TripleO::Services::BarbicanApi: ../../puppet/services/barbican-api.yaml
diff --git a/environments/storage-environment.yaml b/environments/storage-environment.yaml
index 8cf34622..8e02c300 100644
--- a/environments/storage-environment.yaml
+++ b/environments/storage-environment.yaml
@@ -34,18 +34,18 @@ parameter_defaults:
# CinderNfsServers: ''
- #### GLANCE FILE BACKEND PACEMAKER SETTINGS (used for mounting NFS) ####
+ #### GLANCE NFS SETTINGS ####
- ## Whether to make Glance 'file' backend a mount managed by Pacemaker
- # GlanceFilePcmkManage: false
- ## File system type of the mount
- # GlanceFilePcmkFstype: nfs
- ## Pacemaker mount point, e.g. '192.168.122.1:/export/glance' for NFS
- ## (If using IPv6, use both double- and single-quotes,
- ## e.g. "'[fdd0::1]:/export/glance'")
- # GlanceFilePcmkDevice: ''
- ## Options for the mount managed by Pacemaker
- # GlanceFilePcmkOptions: ''
+ ## Make sure to set `GlanceBackend: file` when enabling NFS
+ ##
+ ## Whether to make Glance 'file' backend a NFS mount
+ # GlanceNfsEnabled: false
+ ## NFS share for image storage, e.g. '192.168.122.1:/export/glance'
+ ## (If using IPv6, use both double- and single-quotes,
+ ## e.g. "'[fdd0::1]:/export/glance'")
+ # GlanceNfsShare: ''
+ ## Mount options for the NFS image storage mount point
+ # GlanceNfsOptions: 'intr,context=system_u:object_r:glance_var_lib_t:s0'
#### CEPH SETTINGS ####
diff --git a/environments/tls-endpoints-public-dns.yaml b/environments/tls-endpoints-public-dns.yaml
index f94a7726..79c7599f 100644
--- a/environments/tls-endpoints-public-dns.yaml
+++ b/environments/tls-endpoints-public-dns.yaml
@@ -5,6 +5,9 @@ parameter_defaults:
AodhAdmin: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
AodhInternal: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
AodhPublic: {protocol: 'https', port: '13042', host: 'CLOUDNAME'}
+ BarbicanAdmin: {protocol: 'http', port: '9311', host: 'IP_ADDRESS'}
+ BarbicanInternal: {protocol: 'http', port: '9311', host: 'IP_ADDRESS'}
+ BarbicanPublic: {protocol: 'https', port: '13311', host: 'CLOUDNAME'}
CeilometerAdmin: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
CeilometerInternal: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
CeilometerPublic: {protocol: 'https', port: '13777', host: 'CLOUDNAME'}
diff --git a/environments/tls-endpoints-public-ip.yaml b/environments/tls-endpoints-public-ip.yaml
index eb2a23b4..a49ca343 100644
--- a/environments/tls-endpoints-public-ip.yaml
+++ b/environments/tls-endpoints-public-ip.yaml
@@ -5,6 +5,9 @@ parameter_defaults:
AodhAdmin: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
AodhInternal: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
AodhPublic: {protocol: 'https', port: '13042', host: 'IP_ADDRESS'}
+ BarbicanAdmin: {protocol: 'http', port: '9311', host: 'IP_ADDRESS'}
+ BarbicanInternal: {protocol: 'http', port: '9311', host: 'IP_ADDRESS'}
+ BarbicanPublic: {protocol: 'https', port: '13311', host: 'IP_ADDRESS'}
CeilometerAdmin: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
CeilometerInternal: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
CeilometerPublic: {protocol: 'https', port: '13777', host: 'IP_ADDRESS'}
diff --git a/environments/tls-everywhere-endpoints-dns.yaml b/environments/tls-everywhere-endpoints-dns.yaml
index c3fbaf49..cc1915fe 100644
--- a/environments/tls-everywhere-endpoints-dns.yaml
+++ b/environments/tls-everywhere-endpoints-dns.yaml
@@ -5,6 +5,9 @@ parameter_defaults:
AodhAdmin: {protocol: 'https', port: '8042', host: 'CLOUDNAME'}
AodhInternal: {protocol: 'https', port: '8042', host: 'CLOUDNAME'}
AodhPublic: {protocol: 'https', port: '13042', host: 'CLOUDNAME'}
+ BarbicanAdmin: {protocol: 'https', port: '9311', host: 'CLOUDNAME'}
+ BarbicanInternal: {protocol: 'https', port: '9311', host: 'CLOUDNAME'}
+ BarbicanPublic: {protocol: 'https', port: '13311', host: 'CLOUDNAME'}
CeilometerAdmin: {protocol: 'https', port: '8777', host: 'CLOUDNAME'}
CeilometerInternal: {protocol: 'https', port: '8777', host: 'CLOUDNAME'}
CeilometerPublic: {protocol: 'https', port: '13777', host: 'CLOUDNAME'}
diff --git a/extraconfig/tasks/major_upgrade_block_storage.sh b/extraconfig/tasks/major_upgrade_block_storage.sh
index f161c049..39861826 100644
--- a/extraconfig/tasks/major_upgrade_block_storage.sh
+++ b/extraconfig/tasks/major_upgrade_block_storage.sh
@@ -11,8 +11,8 @@ if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "syst
pushd OVS_UPGRADE
echo "Attempting to downloading latest openvswitch with yumdownloader"
yumdownloader --resolve openvswitch
- echo "Updating openvswitch with nopostun"
- rpm -U --nopostun ./*.rpm
+ echo "Updating openvswitch with nopostun option"
+ rpm -U --replacepkgs --nopostun ./*.rpm
popd
else
echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
diff --git a/extraconfig/tasks/major_upgrade_ceph_storage.sh b/extraconfig/tasks/major_upgrade_ceph_storage.sh
index e690a383..d84cad45 100644
--- a/extraconfig/tasks/major_upgrade_ceph_storage.sh
+++ b/extraconfig/tasks/major_upgrade_ceph_storage.sh
@@ -50,14 +50,14 @@ timeout 60 bash -c "while kill -0 ${OSD_PIDS} 2> /dev/null; do
done"
# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
-if [[ -n \$(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
+if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
echo "Manual upgrade of openvswitch - restart in postun detected"
mkdir OVS_UPGRADE || true
pushd OVS_UPGRADE
echo "Attempting to downloading latest openvswitch with yumdownloader"
yumdownloader --resolve openvswitch
- echo "Updating openvswitch with nopostun"
- rpm -U --nopostun ./*.rpm
+ echo "Updating openvswitch with nopostun option"
+ rpm -U --replacepkgs --nopostun ./*.rpm
popd
else
echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
diff --git a/extraconfig/tasks/major_upgrade_check.sh b/extraconfig/tasks/major_upgrade_check.sh
index b65f6915..8bdff5e7 100755
--- a/extraconfig/tasks/major_upgrade_check.sh
+++ b/extraconfig/tasks/major_upgrade_check.sh
@@ -18,14 +18,8 @@ check_pcsd()
fi
}
-check_disk_for_mysql_dump()
+mysql_need_update()
{
- # Where to backup current database if mysql need to be upgraded
- MYSQL_BACKUP_DIR=/var/tmp/mysql_upgrade_osp
- MYSQL_TEMP_UPGRADE_BACKUP_DIR=/var/lib/mysql-temp-upgrade-backup
- # Spare disk ratio for extra safety
- MYSQL_BACKUP_SIZE_RATIO=1.2
-
# Shall we upgrade mysql data directory during the stack upgrade?
if [ "$mariadb_do_major_upgrade" = "auto" ]; then
ret=$(is_mysql_upgrade_needed)
@@ -40,6 +34,17 @@ check_disk_for_mysql_dump()
else
DO_MYSQL_UPGRADE=1
fi
+}
+
+check_disk_for_mysql_dump()
+{
+ # Where to backup current database if mysql need to be upgraded
+ MYSQL_BACKUP_DIR=/var/tmp/mysql_upgrade_osp
+ MYSQL_TEMP_UPGRADE_BACKUP_DIR=/var/lib/mysql-temp-upgrade-backup
+ # Spare disk ratio for extra safety
+ MYSQL_BACKUP_SIZE_RATIO=1.2
+
+ mysql_need_update
if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
diff --git a/extraconfig/tasks/major_upgrade_compute.sh b/extraconfig/tasks/major_upgrade_compute.sh
index 950fe8d5..b0d42806 100644
--- a/extraconfig/tasks/major_upgrade_compute.sh
+++ b/extraconfig/tasks/major_upgrade_compute.sh
@@ -18,7 +18,6 @@ set -eu
crudini --set /etc/nova/nova.conf upgrade_levels compute $upgrade_level_nova_compute
-
# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
if [[ -n \$(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
echo "Manual upgrade of openvswitch - restart in postun detected"
@@ -26,8 +25,8 @@ if [[ -n \$(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "sys
pushd OVS_UPGRADE
echo "Attempting to downloading latest openvswitch with yumdownloader"
yumdownloader --resolve openvswitch
- echo "Updating openvswitch with nopostun"
- rpm -U --nopostun ./*.rpm
+ echo "Updating openvswitch with nopostun option"
+ rpm -U --replacepkgs --nopostun ./*.rpm
popd
else
echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
@@ -36,6 +35,10 @@ fi
yum -y install python-zaqarclient # needed for os-collect-config
yum -y update
+# Due to bug#1640177 we need to restart compute agent
+echo "Restarting openstack ceilometer agent compute"
+systemctl restart openstack-ceilometer-compute
+
ENDOFCAT
# ensure the permissions are OK
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh
index 2690ee64..739c01dd 100755
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh
+++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh
@@ -2,8 +2,6 @@
set -eu
-cluster_sync_timeout=1800
-
check_cluster
check_pcsd
if [[ -n $(is_bootstrap_node) ]]; then
@@ -17,8 +15,15 @@ check_disk_for_mysql_dump
# nodes where a service fails to stop, which could be fatal during an upgrade
# procedure. So we remember the stonith state. If it was enabled we reenable it
# at the end of this script
-STONITH_STATE=$(pcs property show stonith-enabled | grep "stonith-enabled" | awk '{ print $2 }')
-pcs property set stonith-enabled=false
+if [[ -n $(is_bootstrap_node) ]]; then
+ STONITH_STATE=$(pcs property show stonith-enabled | grep "stonith-enabled" | awk '{ print $2 }')
+ # We create this empty file if stonith was set to true so we can reenable stonith in step2
+ rm -f /var/tmp/stonith-true
+ if [ $STONITH_STATE == "true" ]; then
+ touch /var/tmp/stonith-true
+ fi
+ pcs property set stonith-enabled=false
+fi
# Migrate to HA NG and fix up rabbitmq queues
# We fix up the rabbitmq ha queues after the migration because it will
@@ -29,177 +34,3 @@ if [[ -n $(is_bootstrap_node) ]]; then
rabbitmq_mitaka_newton_upgrade
fi
-# After migrating the cluster to HA-NG the services not under pacemaker's control
-# are still up and running. We need to stop them explicitely otherwise during the yum
-# upgrade the rpm %post sections will try to do a systemctl try-restart <service>, which
-# is going to take a long time because rabbit is down. By having the service stopped
-# systemctl try-restart is a noop
-
-for service in $(services_to_migrate); do
- manage_systemd_service stop "${service%%-clone}"
- # So the reason for not reusing check_resource_systemd is that
- # I have observed systemctl is-active returning unknown with at least
- # one service that was stopped (See LP 1627254)
- timeout=600
- tstart=$(date +%s)
- tend=$(( $tstart + $timeout ))
- check_interval=3
- while (( $(date +%s) < $tend )); do
- if [[ "$(systemctl is-active ${service%%-clone})" = "active" ]]; then
- echo "$service still active, sleeping $check_interval seconds."
- sleep $check_interval
- else
- # we do not care if it is inactive, unknown or failed as long as it is
- # not running
- break
- fi
-
- done
-done
-
-# In case the mysql package is updated, the database on disk must be
-# upgraded as well. This typically needs to happen during major
-# version upgrades (e.g. 5.5 -> 5.6, 5.5 -> 10.1...)
-#
-# Because in-place upgrades are not supported across 2+ major versions
-# (e.g. 5.5 -> 10.1), we rely on logical upgrades via dump/restore cycle
-# https://bugzilla.redhat.com/show_bug.cgi?id=1341968
-#
-# The default is to determine automatically if upgrade is needed based
-# on mysql package versionning, but this can be overriden manually
-# to support specific upgrade scenario
-
-if [[ -n $(is_bootstrap_node) ]]; then
- if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
- mysqldump $backup_flags > "$MYSQL_BACKUP_DIR/openstack_database.sql"
- cp -rdp /etc/my.cnf* "$MYSQL_BACKUP_DIR"
- fi
-
- pcs resource disable redis
- check_resource redis stopped 600
- pcs resource disable rabbitmq
- check_resource rabbitmq stopped 600
- pcs resource disable galera
- check_resource galera stopped 600
- pcs resource disable openstack-cinder-volume
- check_resource openstack-cinder-volume stopped 600
- # Disable all VIPs before stopping the cluster, so that pcs doesn't use one as a source address:
- # https://bugzilla.redhat.com/show_bug.cgi?id=1330688
- for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Started | awk '{ print $1 }'); do
- pcs resource disable $vip
- check_resource $vip stopped 60
- done
- pcs cluster stop --all
-fi
-
-
-# Swift isn't controlled by pacemaker
-systemctl_swift stop
-
-tstart=$(date +%s)
-while systemctl is-active pacemaker; do
- sleep 5
- tnow=$(date +%s)
- if (( tnow-tstart > cluster_sync_timeout )) ; then
- echo_error "ERROR: cluster shutdown timed out"
- exit 1
- fi
-done
-
-# The reason we do an sql dump *and* we move the old dir out of
-# the way is because it gives us an extra level of safety in case
-# something goes wrong during the upgrade. Once the restore is
-# successful we go ahead and remove it. If the directory exists
-# we bail out as it means the upgrade process had issues in the last
-# run.
-if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
- if [ -d $MYSQL_TEMP_UPGRADE_BACKUP_DIR ]; then
- echo_error "ERROR: mysql backup dir already exist"
- exit 1
- fi
- mv /var/lib/mysql $MYSQL_TEMP_UPGRADE_BACKUP_DIR
-fi
-
-
-# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
-if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
- echo "Manual upgrade of openvswitch - restart in postun detected"
- mkdir OVS_UPGRADE || true
- pushd OVS_UPGRADE
- echo "Attempting to downloading latest openvswitch with yumdownloader"
- yumdownloader --resolve openvswitch
- echo "Updating openvswitch with nopostun"
- rpm -U --nopostun ./*.rpm
- popd
-else
- echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
-fi
-
-yum -y install python-zaqarclient # needed for os-collect-config
-yum -y -q update
-
-# We need to ensure at least those two configuration settings, otherwise
-# mariadb 10.1+ won't activate galera replication.
-# wsrep_cluster_address must only be set though, its value does not
-# matter because it's overriden by the galera resource agent.
-cat >> /etc/my.cnf.d/galera.cnf <<EOF
-[mysqld]
-wsrep_on = ON
-wsrep_cluster_address = gcomm://localhost
-EOF
-
-if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
- # Scripts run via heat have no HOME variable set and this confuses
- # mysqladmin
- export HOME=/root
-
- mkdir /var/lib/mysql || /bin/true
- chown mysql:mysql /var/lib/mysql
- chmod 0755 /var/lib/mysql
- restorecon -R /var/lib/mysql/
- mysql_install_db --datadir=/var/lib/mysql --user=mysql
- chown -R mysql:mysql /var/lib/mysql/
-
- if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
- mysqld_safe --wsrep-new-cluster &
- # We have a populated /root/.my.cnf with root/password here so
- # we need to temporarily rename it because the newly created
- # db is empty and no root password is set
- mv /root/.my.cnf /root/.my.cnf.temporary
- timeout 60 sh -c 'while ! mysql -e "" &> /dev/null; do sleep 1; done'
- mysql -u root < "$MYSQL_BACKUP_DIR/openstack_database.sql"
- mv /root/.my.cnf.temporary /root/.my.cnf
- mysqladmin -u root shutdown
- # The import was successful so we may remove the folder
- rm -r "$MYSQL_BACKUP_DIR"
- fi
-fi
-
-# If we reached here without error we can safely blow away the origin
-# mysql dir from every controller
-
-# TODO: What if the upgrade fails on the bootstrap node, but not on
-# this controller. Data may be lost.
-if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
- rm -r $MYSQL_TEMP_UPGRADE_BACKUP_DIR
-fi
-
-# Let's reset the stonith back to true if it was true, before starting the cluster
-if [ $STONITH_STATE == "true" ]; then
- pcs -f /var/lib/pacemaker/cib/cib.xml property set stonith-enabled=true
-fi
-
-# Pin messages sent to compute nodes to kilo, these will be upgraded later
-crudini --set /etc/nova/nova.conf upgrade_levels compute "$upgrade_level_nova_compute"
-# https://bugzilla.redhat.com/show_bug.cgi?id=1284047
-# Change-Id: Ib3f6c12ff5471e1f017f28b16b1e6496a4a4b435
-crudini --set /etc/ceilometer/ceilometer.conf DEFAULT rpc_backend rabbit
-# https://bugzilla.redhat.com/show_bug.cgi?id=1284058
-# Ifd1861e3df46fad0e44ff9b5cbd58711bbc87c97 Swift Ceilometer middleware no longer exists
-crudini --set /etc/swift/proxy-server.conf pipeline:main pipeline "catch_errors healthcheck cache ratelimit tempurl formpost authtoken keystone staticweb proxy-logging proxy-server"
-# LP: 1615035, required only for M/N upgrade.
-crudini --set /etc/nova/nova.conf DEFAULT scheduler_host_manager host_manager
-# LP: 1627450, required only for M/N upgrade
-crudini --set /etc/nova/nova.conf DEFAULT scheduler_driver filter_scheduler
-
-crudini --set /etc/sahara/sahara.conf DEFAULT plugins ambari,cdh,mapr,vanilla,spark,storm
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
index b3a0098c..7cc6735f 100755
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
+++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
@@ -2,68 +2,186 @@
set -eu
-cluster_form_timeout=600
-cluster_settle_timeout=1800
-galera_sync_timeout=600
+cluster_sync_timeout=1800
-if [[ -n $(is_bootstrap_node) ]]; then
- pcs cluster start --all
+# After migrating the cluster to HA-NG the services not under pacemaker's control
+# are still up and running. We need to stop them explicitely otherwise during the yum
+# upgrade the rpm %post sections will try to do a systemctl try-restart <service>, which
+# is going to take a long time because rabbit is down. By having the service stopped
+# systemctl try-restart is a noop
+for service in $(services_to_migrate); do
+ manage_systemd_service stop "${service%%-clone}"
+ # So the reason for not reusing check_resource_systemd is that
+ # I have observed systemctl is-active returning unknown with at least
+ # one service that was stopped (See LP 1627254)
+ timeout=600
tstart=$(date +%s)
- while pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; do
- sleep 5
- tnow=$(date +%s)
- if (( tnow-tstart > cluster_form_timeout )) ; then
- echo_error "ERROR: timed out forming the cluster"
- exit 1
- fi
+ tend=$(( $tstart + $timeout ))
+ check_interval=3
+ while (( $(date +%s) < $tend )); do
+ if [[ "$(systemctl is-active ${service%%-clone})" = "active" ]]; then
+ echo "$service still active, sleeping $check_interval seconds."
+ sleep $check_interval
+ else
+ # we do not care if it is inactive, unknown or failed as long as it is
+ # not running
+ break
+ fi
+
done
+done
- if ! timeout -k 10 $cluster_settle_timeout crm_resource --wait; then
- echo_error "ERROR: timed out waiting for cluster to finish transition"
- exit 1
+# In case the mysql package is updated, the database on disk must be
+# upgraded as well. This typically needs to happen during major
+# version upgrades (e.g. 5.5 -> 5.6, 5.5 -> 10.1...)
+#
+# Because in-place upgrades are not supported across 2+ major versions
+# (e.g. 5.5 -> 10.1), we rely on logical upgrades via dump/restore cycle
+# https://bugzilla.redhat.com/show_bug.cgi?id=1341968
+#
+# The default is to determine automatically if upgrade is needed based
+# on mysql package versionning, but this can be overriden manually
+# to support specific upgrade scenario
+
+# Calling this function will set the DO_MYSQL_UPGRADE variable which is used
+# later
+mysql_need_update
+
+if [[ -n $(is_bootstrap_node) ]]; then
+ if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
+ mysqldump $backup_flags > "$MYSQL_BACKUP_DIR/openstack_database.sql"
+ cp -rdp /etc/my.cnf* "$MYSQL_BACKUP_DIR"
fi
- for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Stopped | awk '{ print $1 }'); do
- pcs resource enable $vip
- check_resource_pacemaker $vip started 60
+ pcs resource disable redis
+ check_resource redis stopped 600
+ pcs resource disable rabbitmq
+ check_resource rabbitmq stopped 600
+ pcs resource disable galera
+ check_resource galera stopped 600
+ pcs resource disable openstack-cinder-volume
+ check_resource openstack-cinder-volume stopped 600
+ # Disable all VIPs before stopping the cluster, so that pcs doesn't use one as a source address:
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1330688
+ for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Started | awk '{ print $1 }'); do
+ pcs resource disable $vip
+ check_resource $vip stopped 60
done
+ pcs cluster stop --all
fi
-start_or_enable_service galera
-check_resource galera started 600
-start_or_enable_service redis
-check_resource redis started 600
-# We need mongod which is now a systemd service up and running before calling
-# ceilometer-dbsync. There is still a race here: mongod might not be up on all nodes
-# so ceilometer-dbsync will fail a couple of times before that. As it retries indefinitely
-# we should be good.
-# Due to LP Bug https://bugs.launchpad.net/tripleo/+bug/1627254 am using systemctl directly atm
-systemctl start mongod
-check_resource mongod started 600
-if [[ -n $(is_bootstrap_node) ]]; then
- tstart=$(date +%s)
- while ! clustercheck; do
- sleep 5
- tnow=$(date +%s)
- if (( tnow-tstart > galera_sync_timeout )) ; then
- echo_error "ERROR galera sync timed out"
- exit 1
- fi
- done
+# Swift isn't controlled by pacemaker
+systemctl_swift stop
- # Run all the db syncs
- # TODO: check if this can be triggered in puppet and removed from here
- ceilometer-dbsync --config-file=/etc/ceilometer/ceilometer.conf
- cinder-manage db sync
- glance-manage --config-file=/etc/glance/glance-registry.conf db_sync
- heat-manage --config-file /etc/heat/heat.conf db_sync
- keystone-manage db_sync
- neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head
- nova-manage db sync
- nova-manage api_db sync
- nova-manage db online_data_migrations
- gnocchi-upgrade
- sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head
+tstart=$(date +%s)
+while systemctl is-active pacemaker; do
+ sleep 5
+ tnow=$(date +%s)
+ if (( tnow-tstart > cluster_sync_timeout )) ; then
+ echo_error "ERROR: cluster shutdown timed out"
+ exit 1
+ fi
+done
+
+# The reason we do an sql dump *and* we move the old dir out of
+# the way is because it gives us an extra level of safety in case
+# something goes wrong during the upgrade. Once the restore is
+# successful we go ahead and remove it. If the directory exists
+# we bail out as it means the upgrade process had issues in the last
+# run.
+if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
+ if [ -d $MYSQL_TEMP_UPGRADE_BACKUP_DIR ]; then
+ echo_error "ERROR: mysql backup dir already exist"
+ exit 1
+ fi
+ mv /var/lib/mysql $MYSQL_TEMP_UPGRADE_BACKUP_DIR
+fi
+
+# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
+if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
+ echo "Manual upgrade of openvswitch - restart in postun detected"
+ mkdir OVS_UPGRADE || true
+ pushd OVS_UPGRADE
+ echo "Attempting to downloading latest openvswitch with yumdownloader"
+ yumdownloader --resolve openvswitch
+ echo "Updating openvswitch with nopostun option"
+ rpm -U --replacepkgs --nopostun ./*.rpm
+ popd
+else
+ echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
fi
+
+yum -y install python-zaqarclient # needed for os-collect-config
+yum -y -q update
+
+# We need to ensure at least those two configuration settings, otherwise
+# mariadb 10.1+ won't activate galera replication.
+# wsrep_cluster_address must only be set though, its value does not
+# matter because it's overriden by the galera resource agent.
+cat >> /etc/my.cnf.d/galera.cnf <<EOF
+[mysqld]
+wsrep_on = ON
+wsrep_cluster_address = gcomm://localhost
+EOF
+
+if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
+ # Scripts run via heat have no HOME variable set and this confuses
+ # mysqladmin
+ export HOME=/root
+
+ mkdir /var/lib/mysql || /bin/true
+ chown mysql:mysql /var/lib/mysql
+ chmod 0755 /var/lib/mysql
+ restorecon -R /var/lib/mysql/
+ mysql_install_db --datadir=/var/lib/mysql --user=mysql
+ chown -R mysql:mysql /var/lib/mysql/
+
+ if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
+ mysqld_safe --wsrep-new-cluster &
+ # We have a populated /root/.my.cnf with root/password here so
+ # we need to temporarily rename it because the newly created
+ # db is empty and no root password is set
+ mv /root/.my.cnf /root/.my.cnf.temporary
+ timeout 60 sh -c 'while ! mysql -e "" &> /dev/null; do sleep 1; done'
+ mysql -u root < "$MYSQL_BACKUP_DIR/openstack_database.sql"
+ mv /root/.my.cnf.temporary /root/.my.cnf
+ mysqladmin -u root shutdown
+ # The import was successful so we may remove the folder
+ rm -r "$MYSQL_BACKUP_DIR"
+ fi
+fi
+
+# If we reached here without error we can safely blow away the origin
+# mysql dir from every controller
+
+# TODO: What if the upgrade fails on the bootstrap node, but not on
+# this controller. Data may be lost.
+if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
+ rm -r $MYSQL_TEMP_UPGRADE_BACKUP_DIR
+fi
+
+# Let's reset the stonith back to true if it was true, before starting the cluster
+if [[ -n $(is_bootstrap_node) ]]; then
+ if [ -f /var/tmp/stonith-true ]; then
+ pcs -f /var/lib/pacemaker/cib/cib.xml property set stonith-enabled=true
+ fi
+ rm -f /var/tmp/stonith-true
+fi
+
+# Pin messages sent to compute nodes to kilo, these will be upgraded later
+crudini --set /etc/nova/nova.conf upgrade_levels compute "$upgrade_level_nova_compute"
+# https://bugzilla.redhat.com/show_bug.cgi?id=1284047
+# Change-Id: Ib3f6c12ff5471e1f017f28b16b1e6496a4a4b435
+crudini --set /etc/ceilometer/ceilometer.conf DEFAULT rpc_backend rabbit
+# https://bugzilla.redhat.com/show_bug.cgi?id=1284058
+# Ifd1861e3df46fad0e44ff9b5cbd58711bbc87c97 Swift Ceilometer middleware no longer exists
+crudini --set /etc/swift/proxy-server.conf pipeline:main pipeline "catch_errors healthcheck cache ratelimit tempurl formpost authtoken keystone staticweb proxy-logging proxy-server"
+# LP: 1615035, required only for M/N upgrade.
+crudini --set /etc/nova/nova.conf DEFAULT scheduler_host_manager host_manager
+# LP: 1627450, required only for M/N upgrade
+crudini --set /etc/nova/nova.conf DEFAULT scheduler_driver filter_scheduler
+
+crudini --set /etc/sahara/sahara.conf DEFAULT plugins ambari,cdh,mapr,vanilla,spark,storm
+
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh
index b653c7c7..37061512 100755
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh
+++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh
@@ -2,25 +2,67 @@
set -eu
-start_or_enable_service rabbitmq
-check_resource rabbitmq started 600
+cluster_form_timeout=600
+cluster_settle_timeout=1800
+galera_sync_timeout=600
+
+if [[ -n $(is_bootstrap_node) ]]; then
+ pcs cluster start --all
+
+ tstart=$(date +%s)
+ while pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; do
+ sleep 5
+ tnow=$(date +%s)
+ if (( tnow-tstart > cluster_form_timeout )) ; then
+ echo_error "ERROR: timed out forming the cluster"
+ exit 1
+ fi
+ done
+
+ if ! timeout -k 10 $cluster_settle_timeout crm_resource --wait; then
+ echo_error "ERROR: timed out waiting for cluster to finish transition"
+ exit 1
+ fi
+
+ for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Stopped | awk '{ print $1 }'); do
+ pcs resource enable $vip
+ check_resource_pacemaker $vip started 60
+ done
+fi
+
+start_or_enable_service galera
+check_resource galera started 600
start_or_enable_service redis
check_resource redis started 600
-start_or_enable_service openstack-cinder-volume
-check_resource openstack-cinder-volume started 600
-
+# We need mongod which is now a systemd service up and running before calling
+# ceilometer-dbsync. There is still a race here: mongod might not be up on all nodes
+# so ceilometer-dbsync will fail a couple of times before that. As it retries indefinitely
+# we should be good.
+# Due to LP Bug https://bugs.launchpad.net/tripleo/+bug/1627254 am using systemctl directly atm
+systemctl start mongod
+check_resource mongod started 600
-# Swift isn't controled by pacemaker
-systemctl_swift start
+if [[ -n $(is_bootstrap_node) ]]; then
+ tstart=$(date +%s)
+ while ! clustercheck; do
+ sleep 5
+ tnow=$(date +%s)
+ if (( tnow-tstart > galera_sync_timeout )) ; then
+ echo_error "ERROR galera sync timed out"
+ exit 1
+ fi
+ done
-# We need to start the systemd services we explicitely stopped at step _1.sh
-# FIXME: Should we let puppet during the convergence step do the service enabling or
-# should we add it here?
-services=$(services_to_migrate)
-if [[ ${keep_sahara_services_on_upgrade} =~ [Ff]alse ]] ; then
- services=${services%%openstack-sahara*}
+ # Run all the db syncs
+ # TODO: check if this can be triggered in puppet and removed from here
+ ceilometer-dbsync --config-file=/etc/ceilometer/ceilometer.conf
+ cinder-manage db sync
+ glance-manage --config-file=/etc/glance/glance-registry.conf db_sync
+ heat-manage --config-file /etc/heat/heat.conf db_sync
+ keystone-manage db_sync
+ neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head
+ nova-manage db sync
+ nova-manage api_db sync
+ nova-manage db online_data_migrations
+ sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head
fi
-for service in $services; do
- manage_systemd_service start "${service%%-clone}"
- check_resource_systemd "${service%%-clone}" started 600
-done
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_4.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_4.sh
new file mode 100755
index 00000000..d2cb9553
--- /dev/null
+++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_4.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+set -eu
+
+start_or_enable_service rabbitmq
+check_resource rabbitmq started 600
+start_or_enable_service redis
+check_resource redis started 600
+start_or_enable_service openstack-cinder-volume
+check_resource openstack-cinder-volume started 600
+
+# start httpd so keystone is available for gnocchi
+# upgrade to run.
+systemctl start httpd
+
+# Swift isn't controled by pacemaker
+systemctl_swift start
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_5.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_5.sh
new file mode 100755
index 00000000..fa95f1f8
--- /dev/null
+++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_5.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+set -eu
+
+if [[ -n $(is_bootstrap_node) ]]; then
+ # run gnocchi upgrade
+ gnocchi-upgrade
+fi
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh
new file mode 100755
index 00000000..d569084d
--- /dev/null
+++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+
+set -eu
+
+# We need to start the systemd services we explicitely stopped at step _1.sh
+# FIXME: Should we let puppet during the convergence step do the service enabling or
+# should we add it here?
+services=$(services_to_migrate)
+if [[ ${keep_sahara_services_on_upgrade} =~ [Ff]alse ]] ; then
+ services=${services%%openstack-sahara*}
+fi
+for service in $services; do
+ manage_systemd_service start "${service%%-clone}"
+ check_resource_systemd "${service%%-clone}" started 600
+done
diff --git a/extraconfig/tasks/major_upgrade_object_storage.sh b/extraconfig/tasks/major_upgrade_object_storage.sh
index 750ad82c..2667bb16 100644
--- a/extraconfig/tasks/major_upgrade_object_storage.sh
+++ b/extraconfig/tasks/major_upgrade_object_storage.sh
@@ -30,8 +30,8 @@ if [[ -n \$(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "sys
pushd OVS_UPGRADE
echo "Attempting to downloading latest openvswitch with yumdownloader"
yumdownloader --resolve openvswitch
- echo "Updating openvswitch with nopostun"
- rpm -U --nopostun ./*.rpm
+ echo "Updating openvswitch with nopostun option"
+ rpm -U --replacepkgs --nopostun ./*.rpm
popd
else
echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
diff --git a/extraconfig/tasks/major_upgrade_pacemaker.yaml b/extraconfig/tasks/major_upgrade_pacemaker.yaml
index 7c78d5ad..b0418a56 100644
--- a/extraconfig/tasks/major_upgrade_pacemaker.yaml
+++ b/extraconfig/tasks/major_upgrade_pacemaker.yaml
@@ -113,7 +113,20 @@ resources:
config:
list_join:
- ''
- - - get_file: pacemaker_common_functions.sh
+ - - str_replace:
+ template: |
+ #!/bin/bash
+ upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE'
+ params:
+ UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute}
+ - str_replace:
+ template: |
+ #!/bin/bash
+ mariadb_do_major_upgrade='MYSQL_MAJOR_UPGRADE'
+ params:
+ MYSQL_MAJOR_UPGRADE: {get_param: MySqlMajorUpgrade}
+ - get_file: pacemaker_common_functions.sh
+ - get_file: major_upgrade_check.sh
- get_file: major_upgrade_pacemaker_migrations.sh
- get_file: major_upgrade_controller_pacemaker_2.sh
@@ -132,6 +145,63 @@ resources:
config:
list_join:
- ''
+ - - get_file: pacemaker_common_functions.sh
+ - get_file: major_upgrade_pacemaker_migrations.sh
+ - get_file: major_upgrade_controller_pacemaker_3.sh
+
+ ControllerPacemakerUpgradeDeployment_Step3:
+ type: OS::Heat::SoftwareDeploymentGroup
+ depends_on: ControllerPacemakerUpgradeDeployment_Step2
+ properties:
+ servers: {get_param: [servers, Controller]}
+ config: {get_resource: ControllerPacemakerUpgradeConfig_Step3}
+ input_values: {get_param: input_values}
+
+ ControllerPacemakerUpgradeConfig_Step4:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - get_file: pacemaker_common_functions.sh
+ - get_file: major_upgrade_pacemaker_migrations.sh
+ - get_file: major_upgrade_controller_pacemaker_4.sh
+
+ ControllerPacemakerUpgradeDeployment_Step4:
+ type: OS::Heat::SoftwareDeploymentGroup
+ depends_on: ControllerPacemakerUpgradeDeployment_Step3
+ properties:
+ servers: {get_param: [servers, Controller]}
+ config: {get_resource: ControllerPacemakerUpgradeConfig_Step4}
+ input_values: {get_param: input_values}
+
+ ControllerPacemakerUpgradeConfig_Step5:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - get_file: pacemaker_common_functions.sh
+ - get_file: major_upgrade_pacemaker_migrations.sh
+ - get_file: major_upgrade_controller_pacemaker_5.sh
+
+ ControllerPacemakerUpgradeDeployment_Step5:
+ type: OS::Heat::SoftwareDeploymentGroup
+ depends_on: ControllerPacemakerUpgradeDeployment_Step4
+ properties:
+ servers: {get_param: [servers, Controller]}
+ config: {get_resource: ControllerPacemakerUpgradeConfig_Step5}
+ input_values: {get_param: input_values}
+
+ ControllerPacemakerUpgradeConfig_Step6:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
- - str_replace:
template: |
#!/bin/bash
@@ -140,13 +210,12 @@ resources:
KEEP_SAHARA_SERVICES_ON_UPGRADE: {get_param: KeepSaharaServicesOnUpgrade}
- get_file: pacemaker_common_functions.sh
- get_file: major_upgrade_pacemaker_migrations.sh
- - get_file: major_upgrade_controller_pacemaker_3.sh
+ - get_file: major_upgrade_controller_pacemaker_6.sh
- ControllerPacemakerUpgradeDeployment_Step3:
+ ControllerPacemakerUpgradeDeployment_Step6:
type: OS::Heat::SoftwareDeploymentGroup
- depends_on: ControllerPacemakerUpgradeDeployment_Step2
+ depends_on: ControllerPacemakerUpgradeDeployment_Step5
properties:
servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerPacemakerUpgradeConfig_Step3}
+ config: {get_resource: ControllerPacemakerUpgradeConfig_Step6}
input_values: {get_param: input_values}
-
diff --git a/extraconfig/tasks/pacemaker_common_functions.sh b/extraconfig/tasks/pacemaker_common_functions.sh
index 4f17b69a..2c7dfc35 100755
--- a/extraconfig/tasks/pacemaker_common_functions.sh
+++ b/extraconfig/tasks/pacemaker_common_functions.sh
@@ -284,7 +284,7 @@ function systemctl_swift {
services=$(systemctl | grep openstack-swift- | grep running | awk '{print $1}')
;;
start)
- enable_swift_storage=$(hiera -c /etc/puppet/hiera.yaml 'enable_swift_storage')
+ enable_swift_storage=$(hiera -c /etc/puppet/hiera.yaml tripleo::profile::base::swift::storage::enable_swift_storage)
if [[ $enable_swift_storage != "true" ]]; then
services=( openstack-swift-proxy )
fi
diff --git a/extraconfig/tasks/pacemaker_resource_restart.sh b/extraconfig/tasks/pacemaker_resource_restart.sh
index 3da7efec..8500bcef 100755
--- a/extraconfig/tasks/pacemaker_resource_restart.sh
+++ b/extraconfig/tasks/pacemaker_resource_restart.sh
@@ -4,11 +4,14 @@ set -eux
# Run if pacemaker is running, we're the bootstrap node,
# and we're updating the deployment (not creating).
-if [[ -n $(pcmk_running) && -n $(is_bootstrap_node) ]]; then
+
+RESTART_FOLDER="/var/lib/tripleo/pacemaker-restarts"
+
+if [[ -d "$RESTART_FOLDER" && -n $(pcmk_running) && -n $(is_bootstrap_node) ]]; then
TIMEOUT=600
- SERVICES_TO_RESTART="$(ls /var/lib/tripleo/pacemaker-restarts)"
PCS_STATUS_OUTPUT="$(pcs status)"
+ SERVICES_TO_RESTART="$(ls $RESTART_FOLDER)"
for service in $SERVICES_TO_RESTART; do
if ! echo "$PCS_STATUS_OUTPUT" | grep $service; then
@@ -20,6 +23,12 @@ if [[ -n $(pcmk_running) && -n $(is_bootstrap_node) ]]; then
for service in $SERVICES_TO_RESTART; do
echo "Restarting $service..."
pcs resource restart --wait=$TIMEOUT $service
- rm -f /var/lib/tripleo/pacemaker-restarts/$service
+ rm -f "$RESTART_FOLDER"/$service
done
+
+fi
+
+haproxy_status=$(systemctl is-active haproxy)
+if [ "$haproxy_status" = "active" ]; then
+ systemctl reload haproxy
fi
diff --git a/extraconfig/tasks/yum_update.sh b/extraconfig/tasks/yum_update.sh
index 4ca0b140..4612f197 100755
--- a/extraconfig/tasks/yum_update.sh
+++ b/extraconfig/tasks/yum_update.sh
@@ -73,14 +73,14 @@ if [[ "$pacemaker_status" == "active" ]] ; then
pcs cluster stop
fi
else
- echo "Upgrading openstack-puppet-modules"
+ echo "Upgrading openstack-puppet-modules and its dependencies"
yum -q -y update openstack-puppet-modules
+ yum deplist openstack-puppet-modules | awk '/dependency/{print $2}' | xargs yum -q -y update
echo "Upgrading other packages is handled by config management tooling"
echo -n "true" > $heat_outputs_path.update_managed_packages
exit 0
fi
-
# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
echo "Manual upgrade of openvswitch - restart in postun detected"
@@ -88,8 +88,8 @@ if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "syst
pushd OVS_UPGRADE
echo "Attempting to downloading latest openvswitch with yumdownloader"
yumdownloader --resolve openvswitch
- echo "Updating openvswitch with nopostun"
- rpm -U --nopostun ./*.rpm
+ echo "Updating openvswitch with nopostun option"
+ rpm -U --replacepkgs --nopostun ./*.rpm
popd
else
echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
diff --git a/firstboot/userdata_heat_admin.yaml b/firstboot/userdata_heat_admin.yaml
index f8891b29..63d5bbf8 100644
--- a/firstboot/userdata_heat_admin.yaml
+++ b/firstboot/userdata_heat_admin.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: 2016-10-14
parameters:
# Can be overridden via parameter_defaults in the environment
@@ -6,6 +6,10 @@ parameters:
type: string
default: heat-admin
+ node_admin_extra_ssh_keys:
+ type: comma_delimited_list
+ default: []
+
description: >
Uses cloud-init to create an additional user with a known name, in addition
to the distro-default user created by the cloud-init default.
@@ -23,6 +27,8 @@ resources:
properties:
cloud_config:
user: {get_param: node_admin_username}
+ ssh_authorized_keys: {get_param: node_admin_extra_ssh_keys}
+
outputs:
OS::stack_id:
diff --git a/network/endpoints/endpoint_data.yaml b/network/endpoints/endpoint_data.yaml
index 011dea7d..1df3b665 100644
--- a/network/endpoints/endpoint_data.yaml
+++ b/network/endpoints/endpoint_data.yaml
@@ -10,6 +10,15 @@ Aodh:
net_param: AodhApi
port: 8042
+Barbican:
+ Internal:
+ net_param: BarbicanApi
+ Public:
+ net_param: Public
+ Admin:
+ net_param: BarbicanApi
+ port: 9311
+
Ceilometer:
Internal:
net_param: CeilometerApi
diff --git a/network/endpoints/endpoint_map.yaml b/network/endpoints/endpoint_map.yaml
index ac519a5f..43fb20cc 100644
--- a/network/endpoints/endpoint_map.yaml
+++ b/network/endpoints/endpoint_map.yaml
@@ -22,6 +22,9 @@ parameters:
AodhAdmin: {protocol: http, port: '8042', host: IP_ADDRESS}
AodhInternal: {protocol: http, port: '8042', host: IP_ADDRESS}
AodhPublic: {protocol: http, port: '8042', host: IP_ADDRESS}
+ BarbicanAdmin: {protocol: http, port: '9311', host: IP_ADDRESS}
+ BarbicanInternal: {protocol: http, port: '9311', host: IP_ADDRESS}
+ BarbicanPublic: {protocol: http, port: '9311', host: IP_ADDRESS}
CeilometerAdmin: {protocol: http, port: '8777', host: IP_ADDRESS}
CeilometerInternal: {protocol: http, port: '8777', host: IP_ADDRESS}
CeilometerPublic: {protocol: http, port: '8777', host: IP_ADDRESS}
@@ -326,6 +329,249 @@ outputs:
template: NETWORK_uri
- ':'
- get_param: [EndpointMap, AodhPublic, port]
+ BarbicanAdmin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, BarbicanAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, BarbicanApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, BarbicanApiNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, BarbicanAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, BarbicanApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, BarbicanApiNetwork]
+ port:
+ get_param: [EndpointMap, BarbicanAdmin, port]
+ protocol:
+ get_param: [EndpointMap, BarbicanAdmin, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, BarbicanAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, BarbicanAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, BarbicanApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, BarbicanApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, BarbicanAdmin, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, BarbicanAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, BarbicanAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, BarbicanApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, BarbicanApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, BarbicanAdmin, port]
+ BarbicanInternal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, BarbicanInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, BarbicanApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, BarbicanApiNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, BarbicanInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, BarbicanApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, BarbicanApiNetwork]
+ port:
+ get_param: [EndpointMap, BarbicanInternal, port]
+ protocol:
+ get_param: [EndpointMap, BarbicanInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, BarbicanInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, BarbicanInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, BarbicanApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, BarbicanApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, BarbicanInternal, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, BarbicanInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, BarbicanInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, BarbicanApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, BarbicanApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, BarbicanInternal, port]
+ BarbicanPublic:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, BarbicanPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, BarbicanPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, PublicNetwork]
+ port:
+ get_param: [EndpointMap, BarbicanPublic, port]
+ protocol:
+ get_param: [EndpointMap, BarbicanPublic, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, BarbicanPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, BarbicanPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, BarbicanPublic, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, BarbicanPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, BarbicanPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, BarbicanPublic, port]
CeilometerAdmin:
host:
str_replace:
diff --git a/network/service_net_map.j2.yaml b/network/service_net_map.j2.yaml
index ac05fc73..61c97f13 100644
--- a/network/service_net_map.j2.yaml
+++ b/network/service_net_map.j2.yaml
@@ -25,6 +25,7 @@ parameters:
NeutronTenantNetwork: tenant
CeilometerApiNetwork: internal_api
AodhApiNetwork: internal_api
+ BarbicanApiNetwork: internal_api
GnocchiApiNetwork: internal_api
MongodbNetwork: internal_api
CinderApiNetwork: internal_api
diff --git a/overcloud-resource-registry-puppet.j2.yaml b/overcloud-resource-registry-puppet.j2.yaml
index 9b9cd581..3e201175 100644
--- a/overcloud-resource-registry-puppet.j2.yaml
+++ b/overcloud-resource-registry-puppet.j2.yaml
@@ -57,6 +57,9 @@ resource_registry:
OS::TripleO::NodeExtraConfig: puppet/extraconfig/pre_deploy/default.yaml
OS::TripleO::NodeExtraConfigPost: extraconfig/post_deploy/default.yaml
+ OS::TripleO::Tasks::ControllerPrePuppet: OS::Heat::None
+ OS::TripleO::Tasks::ControllerPostPuppet: OS::Heat::None
+
# "AllNodes" Extra cluster config, runs on all nodes prior to the post_deploy
# phase, e.g when puppet is applied, but after the pre_deploy phase. Useful when
# configuration with knowledge of all nodes in the cluster is required vs single
@@ -181,14 +184,15 @@ resource_registry:
# Services that are disabled by default (use relevant environment files):
OS::TripleO::Services::FluentdClient: OS::Heat::None
OS::TripleO::LoggingConfiguration: puppet/services/logging/fluentd-config.yaml
- OS::Tripleo::Services::ManilaApi: OS::Heat::None
- OS::Tripleo::Services::ManilaScheduler: OS::Heat::None
- OS::Tripleo::Services::ManilaShare: OS::Heat::None
- OS::Tripleo::Services::ManilaBackendGeneric: OS::Heat::None
- OS::Tripleo::Services::ManilaBackendNetapp: OS::Heat::None
- OS::Tripleo::Services::ManilaBackendCephFs: OS::Heat::None
+ OS::TripleO::Services::ManilaApi: OS::Heat::None
+ OS::TripleO::Services::ManilaScheduler: OS::Heat::None
+ OS::TripleO::Services::ManilaShare: OS::Heat::None
+ OS::TripleO::Services::ManilaBackendGeneric: OS::Heat::None
+ OS::TripleO::Services::ManilaBackendNetapp: OS::Heat::None
+ OS::TripleO::Services::ManilaBackendCephFs: OS::Heat::None
OS::TripleO::Services::ComputeNeutronL3Agent: OS::Heat::None
OS::TripleO::Services::ComputeNeutronMetadataAgent: OS::Heat::None
+ OS::TripleO::Services::BarbicanApi: OS::Heat::None
OS::TripleO::Services::AodhApi: puppet/services/aodh-api.yaml
OS::TripleO::Services::AodhEvaluator: puppet/services/aodh-evaluator.yaml
OS::TripleO::Services::AodhNotifier: puppet/services/aodh-notifier.yaml
@@ -213,3 +217,8 @@ resource_registry:
parameter_defaults:
EnablePackageInstall: false
SoftwareConfigTransport: POLL_TEMP_URL
+
+{% for role in roles %}
+ # Parameters generated for {{role.name}} Role
+ {{role.name}}Services: {{role.ServicesDefault|default([])}}
+{% endfor %}
diff --git a/overcloud.j2.yaml b/overcloud.j2.yaml
index d8d38c2a..64bed276 100644
--- a/overcloud.j2.yaml
+++ b/overcloud.j2.yaml
@@ -121,7 +121,6 @@ parameters:
resource_registry) which represent nested stacks
for each service that should get installed on the {{role.name}} role.
type: comma_delimited_list
- default: {{role.ServicesDefault|default([])}}
{{role.name}}Count:
description: Number of {{role.name}} nodes to deploy
@@ -232,8 +231,19 @@ resources:
config: {get_attr: [allNodesConfig, config_id]}
servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
input_values:
- bootstrap_nodeid: {get_attr: [{{role.name}}, resource.0.hostname]}
- bootstrap_nodeid_ip: {get_attr: [{{role.name}}, resource.0.ip_address]}
+ # Note we have to use yaql to look up the first hostname/ip in the
+ # list because heat path based attributes operate on the attribute
+ # inside the ResourceGroup, not the exposed list ref discussion in
+ # https://bugs.launchpad.net/heat/+bug/1640488
+ # The coalesce is needed because $.data is None during heat validation
+ bootstrap_nodeid:
+ yaql:
+ expression: coalesce($.data, []).first(null)
+ data: {get_attr: [{{role.name}}, hostname]}
+ bootstrap_nodeid_ip:
+ yaql:
+ expression: coalesce($.data, []).first(null)
+ data: {get_attr: [{{role.name}}, ip_address]}
{{role.name}}AllNodesValidationDeployment:
type: OS::Heat::StructuredDeployments
@@ -556,57 +566,6 @@ outputs:
KeystoneAdminVip:
description: Keystone Admin VIP endpoint
value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, KeystoneAdminApiNetwork]}]}
- PublicVip:
- description: Controller VIP for public API endpoints
- value: {get_attr: [VipMap, net_ip_map, external]}
- AodhInternalVip:
- description: VIP for Aodh API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, AodhApiNetwork]}]}
- CeilometerInternalVip:
- description: VIP for Ceilometer API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, CeilometerApiNetwork]}]}
- CephRgwInternalVip:
- description: VIP for Ceph RGW internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, CephRgwNetwork]}]}
- CinderInternalVip:
- description: VIP for Cinder API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, CinderApiNetwork]}]}
- GlanceInternalVip:
- description: VIP for Glance API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, GlanceApiNetwork]}]}
- GnocchiInternalVip:
- description: VIP for Gnocchi API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, GnocchiApiNetwork]}]}
- MistralInternalVip:
- description: VIP for Mistral API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, MistralApiNetwork]}]}
- HeatInternalVip:
- description: VIP for Heat API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, HeatApiNetwork]}]}
- IronicInternalVip:
- description: VIP for Ironic API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, IronicApiNetwork]}]}
- KeystoneInternalVip:
- description: VIP for Keystone API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, KeystonePublicApiNetwork]}]}
- ManilaInternalVip:
- description: VIP for Manila API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, ManilaApiNetwork]}]}
- NeutronInternalVip:
- description: VIP for Neutron API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, NeutronApiNetwork]}]}
- NovaInternalVip:
- description: VIP for Nova API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, NovaApiNetwork]}]}
- OpenDaylightInternalVip:
- description: VIP for OpenDaylight API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, OpenDaylightApiNetwork]}]}
- SaharaInternalVip:
- description: VIP for Sahara API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, SaharaApiNetwork]}]}
- SwiftInternalVip:
- description: VIP for Swift Proxy internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, SwiftProxyNetwork]}]}
EndpointMap:
description: |
Mapping of the resources with the needed info for their endpoints.
diff --git a/puppet/cephstorage-role.yaml b/puppet/cephstorage-role.yaml
index 55b26336..f7e29b70 100644
--- a/puppet/cephstorage-role.yaml
+++ b/puppet/cephstorage-role.yaml
@@ -253,6 +253,7 @@ resources:
- extraconfig
- service_names
- service_configs
+ - ceph
- bootstrap_node # provided by allNodesConfig
- all_nodes # provided by allNodesConfig
- vip_data # provided by allNodesConfig
diff --git a/puppet/deploy-artifacts.sh b/puppet/deploy-artifacts.sh
index 22fde9a7..8bcbbf4c 100644
--- a/puppet/deploy-artifacts.sh
+++ b/puppet/deploy-artifacts.sh
@@ -8,7 +8,7 @@ trap cleanup EXIT
if [ -n "$artifact_urls" ]; then
for URL in $(echo $artifact_urls | sed -e "s| |\n|g" | sort -u); do
- curl -o $TMP_DATA/file_data "$artifact_urls"
+ curl --globoff -o $TMP_DATA/file_data "$artifact_urls"
if file -b $TMP_DATA/file_data | grep RPM &>/dev/null; then
yum install -y $TMP_DATA/file_data
elif file -b $TMP_DATA/file_data | grep 'gzip compressed data' &>/dev/null; then
diff --git a/puppet/post.j2.yaml b/puppet/post.j2.yaml
index 65c96ac2..8218f41a 100644
--- a/puppet/post.j2.yaml
+++ b/puppet/post.j2.yaml
@@ -47,6 +47,15 @@ resources:
properties:
StepConfig: {get_param: [role_data, {{role.name}}, step_config]}
+ {% if role.name == 'Controller' %}
+ ControllerPrePuppet:
+ type: OS::TripleO::Tasks::ControllerPrePuppet
+ properties:
+ servers: {get_param: [servers, Controller]}
+ input_values:
+ update_identifier: {get_param: DeployIdentifier}
+ {% endif %}
+
# Step through a series of configuration steps
{{role.name}}Deployment_Step1:
type: OS::Heat::StructuredDeploymentGroup
@@ -136,4 +145,16 @@ resources:
type: OS::TripleO::NodeExtraConfigPost
properties:
servers: {get_param: [servers, {{role.name}}]}
+
+ {% if role.name == 'Controller' %}
+ ControllerPostPuppet:
+ depends_on:
+ - ControllerExtraConfigPost
+ type: OS::TripleO::Tasks::ControllerPostPuppet
+ properties:
+ servers: {get_param: [servers, Controller]}
+ input_values:
+ update_identifier: {get_param: DeployIdentifier}
+ {% endif %}
+
{% endfor %}
diff --git a/puppet/role.role.j2.yaml b/puppet/role.role.j2.yaml
index e4307001..5b419f80 100644
--- a/puppet/role.role.j2.yaml
+++ b/puppet/role.role.j2.yaml
@@ -259,6 +259,7 @@ resources:
- extraconfig
- service_names
- service_configs
+ - {{role.lower()}}
- bootstrap_node # provided by allNodesConfig
- all_nodes # provided by allNodesConfig
- vip_data # provided by allNodesConfig
diff --git a/puppet/services/aodh-api.yaml b/puppet/services/aodh-api.yaml
index da043c80..48cc4af6 100644
--- a/puppet/services/aodh-api.yaml
+++ b/puppet/services/aodh-api.yaml
@@ -26,6 +26,9 @@ parameters:
description: Combination alarms are deprecated in Newton, hence disabled
by default. To enable, set this parameter to true.
type: boolean
+ EnableInternalTLS:
+ type: boolean
+ default: false
resources:
AodhBase:
@@ -41,6 +44,7 @@ resources:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
outputs:
role_data:
@@ -52,7 +56,7 @@ outputs:
map_merge:
- get_attr: [AodhBase, role_data, config_settings]
- get_attr: [ApacheServiceBase, role_data, config_settings]
- - aodh::wsgi::apache::ssl: false
+ - aodh::wsgi::apache::ssl: {get_param: EnableInternalTLS}
aodh::wsgi::apache::servername:
str_replace:
template:
@@ -66,13 +70,18 @@ outputs:
dport:
- 8042
- 13042
+ aodh::api::host:
+ str_replace:
+ template:
+ '"%{::fqdn_$NETWORK}"'
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, AodhApiNetwork]}
# NOTE: bind IP is found in Heat replacing the network name with the
# local node IP for the given network; replacement examples
# (eg. for internal_api):
# internal_api -> IP
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
- aodh::api::host: {get_param: [ServiceNetMap, AodhApiNetwork]}
aodh::wsgi::apache::bind_host: {get_param: [ServiceNetMap, AodhApiNetwork]}
tripleo::profile::base::aodh::api::enable_combination_alarms: {get_param: EnableCombinationAlarms}
service_config_settings:
diff --git a/puppet/services/barbican-api.yaml b/puppet/services/barbican-api.yaml
new file mode 100644
index 00000000..cf57680c
--- /dev/null
+++ b/puppet/services/barbican-api.yaml
@@ -0,0 +1,127 @@
+heat_template_version: 2016-04-08
+
+description: >
+ OpenStack Barbican API service configured with Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ BarbicanPassword:
+ description: The password for the barbican service account.
+ type: string
+ hidden: true
+ BarbicanWorkers:
+ description: Set the number of workers for barbican::wsgi::apache
+ default: '"%{::processorcount}"'
+ type: string
+ Debug:
+ default: ''
+ description: Set to True to enable debugging on all services.
+ type: string
+ KeystoneRegion:
+ type: string
+ default: 'regionOne'
+ description: Keystone region for endpoint
+ RabbitClientPort:
+ default: 5672
+ description: Set rabbit subscriber port, change this if using SSL
+ type: number
+ RabbitClientUseSSL:
+ default: false
+ description: >
+ Rabbit client subscriber parameter to specify
+ an SSL connection to the RabbitMQ host.
+ type: string
+ RabbitPassword:
+ description: The password for RabbitMQ
+ type: string
+ hidden: true
+ RabbitUserName:
+ default: guest
+ description: The username for RabbitMQ
+ type: string
+
+resources:
+
+ ApacheServiceBase:
+ type: ./apache.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for the Barbican API role.
+ value:
+ service_name: barbican_api
+ config_settings:
+ map_merge:
+ - get_attr: [ApacheServiceBase, role_data, config_settings]
+ - barbican::keystone::authtoken::password: {get_param: BarbicanPassword}
+ barbican::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ barbican::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
+ barbican::keystone::authtoken::project_name: 'service'
+ barbican::api::host_href: {get_param: [EndpointMap, BarbicanPublic, uri]}
+ barbican::api::db_auto_create: false
+ barbican::api::enabled_certificate_plugins: ['simple_certificate']
+ barbican::api::logging::debug: {get_param: Debug}
+ barbican::api::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
+ barbican::api::rabbit_userid: {get_param: RabbitUserName}
+ barbican::api::rabbit_password: {get_param: RabbitPassword}
+ barbican::api::rabbit_port: {get_param: RabbitClientPort}
+ barbican::api::rabbit_heartbeat_timeout_threshold: 60
+ barbican::api::service_name: 'httpd'
+ barbican::wsgi::apache::bind_host: {get_param: [ServiceNetMap, BarbicanApiNetwork]}
+ barbican::wsgi::apache::ssl: false
+ barbican::wsgi::apache::workers: {get_param: BarbicanWorkers}
+ barbican::wsgi::apache::servername:
+ str_replace:
+ template:
+ '"%{::fqdn_$NETWORK}"'
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, BarbicanApiNetwork]}
+ barbican::db::database_connection:
+ list_join:
+ - ''
+ - - {get_param: [EndpointMap, MysqlInternal, protocol]}
+ - '://barbican:'
+ - {get_param: BarbicanPassword}
+ - '@'
+ - {get_param: [EndpointMap, MysqlInternal, host]}
+ - '/barbican'
+ tripleo.barbican_api.firewall_rules:
+ '117 barbican':
+ dport:
+ - 9311
+ - 13311
+ step_config: |
+ include ::tripleo::profile::base::barbican::api
+ service_config_settings:
+ mysql:
+ barbican::db::mysql::password: {get_param: BarbicanPassword}
+ barbican::db::mysql::user: barbican
+ barbican::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ barbican::db::mysql::dbname: barbican
+ barbican::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
+ keystone:
+ barbican::keystone::auth::public_url: {get_param: [EndpointMap, BarbicanPublic, uri]}
+ barbican::keystone::auth::internal_url: {get_param: [EndpointMap, BarbicanInternal, uri]}
+ barbican::keystone::auth::admin_url: {get_param: [EndpointMap, BarbicanAdmin, uri]}
+ barbican::keystone::auth::password: {get_param: BarbicanPassword}
+ barbican::keystone::auth::region: {get_param: KeystoneRegion}
+ barbican::keystone::auth::tenant: 'service'
diff --git a/puppet/services/ceilometer-api.yaml b/puppet/services/ceilometer-api.yaml
index 27c32bfd..97b255a9 100644
--- a/puppet/services/ceilometer-api.yaml
+++ b/puppet/services/ceilometer-api.yaml
@@ -26,7 +26,9 @@ parameters:
default:
tag: openstack.ceilometer.api
path: /var/log/ceilometer/api.log
-
+ EnableInternalTLS:
+ type: boolean
+ default: false
resources:
CeilometerServiceBase:
@@ -42,6 +44,7 @@ resources:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
outputs:
role_data:
@@ -69,9 +72,14 @@ outputs:
# internal_api_subnet - > IP/CIDR
- ceilometer::api::service_name: 'httpd'
ceilometer::api::enable_proxy_headers_parsing: true
- ceilometer::api::host: {get_param: [ServiceNetMap, CeilometerApiNetwork]}
+ ceilometer::api::host:
+ str_replace:
+ template:
+ '"%{::fqdn_$NETWORK}"'
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, CeilometerApiNetwork]}
ceilometer::wsgi::apache::bind_host: {get_param: [ServiceNetMap, CeilometerApiNetwork]}
- ceilometer::wsgi::apache::ssl: false
+ ceilometer::wsgi::apache::ssl: {get_param: EnableInternalTLS}
ceilometer::wsgi::apache::servername:
str_replace:
template:
diff --git a/puppet/services/ceph-external.yaml b/puppet/services/ceph-external.yaml
index 7d75074c..9120687b 100644
--- a/puppet/services/ceph-external.yaml
+++ b/puppet/services/ceph-external.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: 2016-10-14
description: >
Ceph External service.
@@ -27,9 +27,20 @@ parameters:
GlanceRbdPoolName:
default: images
type: string
+ GlanceBackend:
+ default: swift
+ description: The short name of the Glance backend to use. Should be one
+ of swift, rbd, or file
+ type: string
+ constraints:
+ - allowed_values: ['swift', 'file', 'rbd']
GnocchiRbdPoolName:
default: metrics
type: string
+ NovaEnableRbdBackend:
+ default: false
+ description: Whether to enable or not the Rbd backend for Nova
+ type: boolean
NovaRbdPoolName:
default: vms
type: string
@@ -51,6 +62,16 @@ parameters:
default: 'overcloud-ceph-external'
type: string
+conditions:
+ glance_multiple_locations:
+ and:
+ - equals:
+ - get_param: GlanceBackend
+ - rbd
+ - equals:
+ - get_param: NovaEnableRbdBackend
+ - true
+
outputs:
role_data:
description: Role data for the Ceph External service.
@@ -79,6 +100,7 @@ outputs:
GLANCE_POOL: {get_param: GlanceRbdPoolName}
GNOCCHI_POOL: {get_param: GnocchiRbdPoolName}
service_config_settings:
- get_attr: [CephBase, role_data, service_config_settings]
+ glance_api:
+ glance::api::show_multiple_locations: {if: [glance_multiple_locations, true, false]}
step_config: |
include ::tripleo::profile::base::ceph::client
diff --git a/puppet/services/cinder-api.yaml b/puppet/services/cinder-api.yaml
index 3c624e3a..fe48667a 100644
--- a/puppet/services/cinder-api.yaml
+++ b/puppet/services/cinder-api.yaml
@@ -100,6 +100,7 @@ outputs:
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
cinder::api::bind_host: {get_param: [ServiceNetMap, CinderApiNetwork]}
+ cinder::api::service_name: 'httpd'
cinder::wsgi::apache::ssl: false
cinder::wsgi::apache::bind_host: {get_param: [ServiceNetMap, CinderApiNetwork]}
cinder::wsgi::apache::servername:
diff --git a/puppet/services/glance-base.yaml b/puppet/services/glance-base.yaml
index 3294fc0f..cc979af9 100644
--- a/puppet/services/glance-base.yaml
+++ b/puppet/services/glance-base.yaml
@@ -44,6 +44,21 @@ parameters:
type: string
constraints:
- allowed_values: ['swift', 'file', 'rbd']
+ GlanceNfsEnabled:
+ default: false
+ description: >
+ When using GlanceBackend 'file', mount NFS share for image storage.
+ type: boolean
+ GlanceNfsShare:
+ default: ''
+ description: >
+ NFS share to mount for image storage (when GlanceNfsEnabled is true)
+ type: string
+ GlanceNfsOptions:
+ default: 'intr,context=system_u:object_r:glance_var_lib_t:s0'
+ description: >
+ NFS mount options for image storage (when GlanceNfsEnabled is true)
+ type: string
GlanceRbdPoolName:
default: images
type: string
@@ -92,6 +107,9 @@ outputs:
glance::notify::rabbitmq::notification_driver: messagingv2
glance::registry::db::database_db_max_retries: -1
glance::registry::db::database_max_retries: -1
+ tripleo::profile::base::glance::api::glance_nfs_enabled: {get_param: GlanceNfsEnabled}
+ tripleo::glance::nfs_mount::share: {get_param: GlanceNfsShare}
+ tripleo::glance::nfs_mount::options: {get_param: GlanceNfsOptions}
service_config_settings:
keystone:
glance::keystone::auth::public_url: {get_param: [EndpointMap, GlancePublic, uri]}
diff --git a/puppet/services/gnocchi-api.yaml b/puppet/services/gnocchi-api.yaml
index e3397769..ac15de4f 100644
--- a/puppet/services/gnocchi-api.yaml
+++ b/puppet/services/gnocchi-api.yaml
@@ -41,6 +41,9 @@ parameters:
default:
tag: openstack.gnocchi.api
path: /var/log/gnocchi/app.log
+ EnableInternalTLS:
+ type: boolean
+ default: false
resources:
@@ -57,6 +60,7 @@ resources:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
outputs:
role_data:
@@ -83,7 +87,7 @@ outputs:
gnocchi::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
gnocchi::keystone::authtoken::password: {get_param: GnocchiPassword}
gnocchi::keystone::authtoken::project_name: 'service'
- gnocchi::wsgi::apache::ssl: false
+ gnocchi::wsgi::apache::ssl: {get_param: EnableInternalTLS}
gnocchi::wsgi::apache::servername:
str_replace:
template:
@@ -98,7 +102,12 @@ outputs:
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
gnocchi::wsgi::apache::bind_host: {get_param: [ServiceNetMap, GnocchiApiNetwork]}
- gnocchi::api::host: {get_param: [ServiceNetMap, GnocchiApiNetwork]}
+ gnocchi::api::host:
+ str_replace:
+ template:
+ '"%{::fqdn_$NETWORK}"'
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, GnocchiApiNetwork]}
gnocchi::api::keystone_auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
gnocchi::api::keystone_identity_uri: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
diff --git a/puppet/services/gnocchi-statsd.yaml b/puppet/services/gnocchi-statsd.yaml
index 04339f46..983d6c91 100644
--- a/puppet/services/gnocchi-statsd.yaml
+++ b/puppet/services/gnocchi-statsd.yaml
@@ -39,5 +39,9 @@ outputs:
config_settings:
map_merge:
- get_attr: [GnocchiServiceBase, role_data, config_settings]
+ - tripleo.gnocchi_statsd.firewall_rules:
+ '140 gnocchi-statsd':
+ dport: 8125
+ proto: 'udp'
step_config: |
include ::tripleo::profile::base::gnocchi::statsd
diff --git a/puppet/services/heat-api-cfn.yaml b/puppet/services/heat-api-cfn.yaml
index 1a86ec71..12d4a6a1 100644
--- a/puppet/services/heat-api-cfn.yaml
+++ b/puppet/services/heat-api-cfn.yaml
@@ -76,9 +76,11 @@ outputs:
include ::tripleo::profile::base::heat::api_cfn
service_config_settings:
keystone:
- heat::keystone::auth_cfn::tenant: 'service'
- heat::keystone::auth_cfn::public_url: {get_param: [EndpointMap, HeatCfnPublic, uri]}
- heat::keystone::auth_cfn::internal_url: {get_param: [EndpointMap, HeatCfnInternal, uri]}
- heat::keystone::auth_cfn::admin_url: {get_param: [EndpointMap, HeatCfnAdmin, uri]}
- heat::keystone::auth_cfn::password: {get_param: HeatPassword}
- heat::keystone::auth_cfn::region: {get_param: KeystoneRegion}
+ map_merge:
+ - get_attr: [HeatBase, role_data, service_config_settings, keystone]
+ - heat::keystone::auth_cfn::tenant: 'service'
+ heat::keystone::auth_cfn::public_url: {get_param: [EndpointMap, HeatCfnPublic, uri]}
+ heat::keystone::auth_cfn::internal_url: {get_param: [EndpointMap, HeatCfnInternal, uri]}
+ heat::keystone::auth_cfn::admin_url: {get_param: [EndpointMap, HeatCfnAdmin, uri]}
+ heat::keystone::auth_cfn::password: {get_param: HeatPassword}
+ heat::keystone::auth_cfn::region: {get_param: KeystoneRegion}
diff --git a/puppet/services/heat-api.yaml b/puppet/services/heat-api.yaml
index 2ea96fc0..b0cd16dd 100644
--- a/puppet/services/heat-api.yaml
+++ b/puppet/services/heat-api.yaml
@@ -76,9 +76,11 @@ outputs:
include ::tripleo::profile::base::heat::api
service_config_settings:
keystone:
- heat::keystone::auth::tenant: 'service'
- heat::keystone::auth::public_url: {get_param: [EndpointMap, HeatPublic, uri]}
- heat::keystone::auth::internal_url: {get_param: [EndpointMap, HeatInternal, uri]}
- heat::keystone::auth::admin_url: {get_param: [EndpointMap, HeatAdmin, uri]}
- heat::keystone::auth::password: {get_param: HeatPassword}
- heat::keystone::auth::region: {get_param: KeystoneRegion}
+ map_merge:
+ - get_attr: [HeatBase, role_data, service_config_settings, keystone]
+ - heat::keystone::auth::tenant: 'service'
+ heat::keystone::auth::public_url: {get_param: [EndpointMap, HeatPublic, uri]}
+ heat::keystone::auth::internal_url: {get_param: [EndpointMap, HeatInternal, uri]}
+ heat::keystone::auth::admin_url: {get_param: [EndpointMap, HeatAdmin, uri]}
+ heat::keystone::auth::password: {get_param: HeatPassword}
+ heat::keystone::auth::region: {get_param: KeystoneRegion}
diff --git a/puppet/services/heat-base.yaml b/puppet/services/heat-base.yaml
index 7eb58f56..a2a65d7d 100644
--- a/puppet/services/heat-base.yaml
+++ b/puppet/services/heat-base.yaml
@@ -77,3 +77,8 @@ outputs:
heat::cron::purge_deleted::destination: '/dev/null'
heat::db::database_db_max_retries: -1
heat::db::database_max_retries: -1
+ service_config_settings:
+ keystone:
+ tripleo::profile::base::keystone::heat_admin_domain: 'heat_stack'
+ tripleo::profile::base::keystone::heat_admin_user: 'heat_stack_domain_admin'
+ tripleo::profile::base::keystone::heat_admin_email: 'heat_stack_domain_admin@localhost'
diff --git a/puppet/services/kernel.yaml b/puppet/services/kernel.yaml
index 1fc88bf1..69898718 100644
--- a/puppet/services/kernel.yaml
+++ b/puppet/services/kernel.yaml
@@ -18,6 +18,10 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ KernelPidMax:
+ default: 1048576
+ description: Configures sysctl kernel.pid_max key
+ type: number
outputs:
role_data:
@@ -49,5 +53,7 @@ outputs:
value: 0
net.core.netdev_max_backlog:
value: 10000
+ kernel.pid_max:
+ value: {get_param: KernelPidMax}
step_config: |
include ::tripleo::profile::base::kernel
diff --git a/puppet/services/manila-api.yaml b/puppet/services/manila-api.yaml
index 4d3fd47c..b4b3d480 100644
--- a/puppet/services/manila-api.yaml
+++ b/puppet/services/manila-api.yaml
@@ -51,6 +51,11 @@ outputs:
manila::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
manila::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
manila::keystone::authtoken::project_name: 'service'
+ tripleo.manila_api.firewall_rules:
+ '150 manila':
+ dport:
+ - 8786
+ - 13786
# NOTE: bind IP is found in Heat replacing the network name with the
# local node IP for the given network; replacement examples
# (eg. for internal_api):
@@ -62,21 +67,15 @@ outputs:
step_config: |
include ::tripleo::profile::base::manila::api
service_config_settings:
- keystone:
- manila::keystone::auth::tenant: 'service'
- manila::keystone::auth::public_url: {get_param: [EndpointMap, ManilaV1Public, uri]}
- manila::keystone::auth::internal_url: {get_param: [EndpointMap, ManilaV1Internal, uri]}
- manila::keystone::auth::admin_url: {get_param: [EndpointMap, ManilaV1Admin, uri]}
- manila::keystone::auth::public_url_v2: {get_param: [EndpointMap, ManilaPublic, uri]}
- manila::keystone::auth::internal_url_v2: {get_param: [EndpointMap, ManilaInternal, uri]}
- manila::keystone::auth::admin_url_v2: {get_param: [EndpointMap, ManilaAdmin, uri]}
- manila::keystone::auth::password: {get_param: ManilaPassword}
- manila::keystone::auth::region: {get_param: KeystoneRegion}
- mysql:
- manila::db::mysql::password: {get_param: ManilaPassword}
- manila::db::mysql::user: manila
- manila::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
- manila::db::mysql::dbname: manila
- manila::db::mysql::allowed_hosts:
- - '%'
- - "%{hiera('mysql_bind_host')}"
+ map_merge:
+ - get_attr: [ManilaBase, role_data, service_config_settings]
+ - keystone:
+ manila::keystone::auth::tenant: 'service'
+ manila::keystone::auth::public_url: {get_param: [EndpointMap, ManilaV1Public, uri]}
+ manila::keystone::auth::internal_url: {get_param: [EndpointMap, ManilaV1Internal, uri]}
+ manila::keystone::auth::admin_url: {get_param: [EndpointMap, ManilaV1Admin, uri]}
+ manila::keystone::auth::public_url_v2: {get_param: [EndpointMap, ManilaPublic, uri]}
+ manila::keystone::auth::internal_url_v2: {get_param: [EndpointMap, ManilaInternal, uri]}
+ manila::keystone::auth::admin_url_v2: {get_param: [EndpointMap, ManilaAdmin, uri]}
+ manila::keystone::auth::password: {get_param: ManilaPassword}
+ manila::keystone::auth::region: {get_param: KeystoneRegion}
diff --git a/puppet/services/manila-base.yaml b/puppet/services/manila-base.yaml
index d228577a..844bd3a3 100644
--- a/puppet/services/manila-base.yaml
+++ b/puppet/services/manila-base.yaml
@@ -40,6 +40,10 @@ parameters:
default: 5672
description: Set rabbit subscriber port, change this if using SSL
type: number
+ ManilaPassword:
+ description: The password for the manila service account.
+ type: string
+ hidden: true
outputs:
role_data:
@@ -54,3 +58,21 @@ outputs:
manila::debug: {get_param: Debug}
manila::db::database_db_max_retries: -1
manila::db::database_max_retries: -1
+ manila::sql_connection:
+ list_join:
+ - ''
+ - - {get_param: [EndpointMap, MysqlInternal, protocol]}
+ - '://manila:'
+ - {get_param: ManilaPassword}
+ - '@'
+ - {get_param: [EndpointMap, MysqlInternal, host]}
+ - '/manila'
+ service_config_settings:
+ mysql:
+ manila::db::mysql::password: {get_param: ManilaPassword}
+ manila::db::mysql::user: manila
+ manila::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ manila::db::mysql::dbname: manila
+ manila::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
diff --git a/puppet/services/manila-scheduler.yaml b/puppet/services/manila-scheduler.yaml
index 474cc24f..d96b677b 100644
--- a/puppet/services/manila-scheduler.yaml
+++ b/puppet/services/manila-scheduler.yaml
@@ -57,14 +57,5 @@ outputs:
manila::network::neutron::neutron_url: {get_param: [EndpointMap, NeutronInternal, uri]}
manila::network::neutron::neutron_admin_auth_url: {get_param: [EndpointMap, NeutronAdmin, uri]}
manila::network::neutron::neutron_admin_password: {get_param: NeutronPassword}
- manila::sql_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://manila:'
- - {get_param: ManilaPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/manila'
step_config: |
include ::tripleo::profile::base::manila::scheduler
diff --git a/puppet/services/manila-share.yaml b/puppet/services/manila-share.yaml
index e42d2fae..49c69fc1 100644
--- a/puppet/services/manila-share.yaml
+++ b/puppet/services/manila-share.yaml
@@ -21,6 +21,10 @@ parameters:
MonitoringSubscriptionManilaShare:
default: 'overcloud-manila-share'
type: string
+ ManilaPassword:
+ description: The password for the manila service account.
+ type: string
+ hidden: true
resources:
ManilaBase:
@@ -40,5 +44,11 @@ outputs:
map_merge:
- get_attr: [ManilaBase, role_data, config_settings]
- manila::volume::cinder::cinder_admin_tenant_name: 'service'
+ manila::keystone::authtoken::password: {get_param: ManilaPassword}
+ manila::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ manila::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
+ manila::keystone::authtoken::project_name: 'service'
+ service_config_settings:
+ get_attr: [ManilaBase, role_data, service_config_settings]
step_config: |
include ::tripleo::profile::base::manila::share
diff --git a/puppet/services/nova-api.yaml b/puppet/services/nova-api.yaml
index ba7fb2e1..49bd84bc 100644
--- a/puppet/services/nova-api.yaml
+++ b/puppet/services/nova-api.yaml
@@ -51,6 +51,9 @@ parameters:
default:
tag: openstack.nova.api
path: /var/log/nova/nova-api.log
+ EnableInternalTLS:
+ type: boolean
+ default: false
conditions:
nova_workers_zero: {equals : [{get_param: NovaWorkers}, 0]}
@@ -62,6 +65,7 @@ resources:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
NovaBase:
type: ./nova-base.yaml
@@ -88,8 +92,6 @@ outputs:
tripleo.nova_api.firewall_rules:
'113 nova_api':
dport:
- - 6080
- - 13080
- 8773
- 3773
- 8774
@@ -103,22 +105,26 @@ outputs:
nova::api::default_floating_pool: 'public'
nova::api::sync_db_api: true
nova::api::enable_proxy_headers_parsing: true
+ nova::api::api_bind_address:
+ str_replace:
+ template:
+ '"%{::fqdn_$NETWORK}"'
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, NovaApiNetwork]}
+ nova::api::service_name: 'httpd'
+ nova::wsgi::apache::ssl: {get_param: EnableInternalTLS}
# NOTE: bind IP is found in Heat replacing the network name with the local node IP
# for the given network; replacement examples (eg. for internal_api):
# internal_api -> IP
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
- nova::api::api_bind_address: {get_param: [ServiceNetMap, NovaApiNetwork]}
- nova::api::service_name: 'httpd'
- nova::wsgi::apache::ssl: false
nova::wsgi::apache::bind_host: {get_param: [ServiceNetMap, NovaApiNetwork]}
nova::wsgi::apache::servername:
str_replace:
template:
'"%{::fqdn_$NETWORK}"'
params:
- $NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
- nova::wsgi::apache::bind_host: {get_param: [ServiceNetMap, NovaApiNetwork]}
+ $NETWORK: {get_param: [ServiceNetMap, NovaApiNetwork]}
nova::api::neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret}
nova::api::instance_name_template: {get_param: InstanceNameTemplate}
nova_enable_db_purge: {get_param: NovaEnableDBPurge}
diff --git a/puppet/services/nova-libvirt.yaml b/puppet/services/nova-libvirt.yaml
index 31732580..70774bac 100644
--- a/puppet/services/nova-libvirt.yaml
+++ b/puppet/services/nova-libvirt.yaml
@@ -54,6 +54,9 @@ outputs:
'200 nova_libvirt':
dport:
- 16509
+ - 16514
+ - '49152-49215'
+ - '5900-5999'
step_config: |
include tripleo::profile::base::nova::libvirt
diff --git a/puppet/services/nova-scheduler.yaml b/puppet/services/nova-scheduler.yaml
index d89e3e11..d4e5fff6 100644
--- a/puppet/services/nova-scheduler.yaml
+++ b/puppet/services/nova-scheduler.yaml
@@ -58,7 +58,7 @@ outputs:
config_settings:
map_merge:
- get_attr: [NovaBase, role_data, config_settings]
- - nova::scheduler::filter::ram_allocation_ratio: '1.0'
+ - nova::ram_allocation_ratio: '1.0'
nova::scheduler::filter::scheduler_available_filters: {get_param: NovaSchedulerAvailableFilters}
nova::scheduler::filter::scheduler_default_filters: {get_param: NovaSchedulerDefaultFilters}
step_config: |
diff --git a/puppet/services/nova-vnc-proxy.yaml b/puppet/services/nova-vnc-proxy.yaml
index 85d59ae6..e6b0703f 100644
--- a/puppet/services/nova-vnc-proxy.yaml
+++ b/puppet/services/nova-vnc-proxy.yaml
@@ -57,5 +57,10 @@ outputs:
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
nova::vncproxy::host: {get_param: [ServiceNetMap, NovaApiNetwork]}
+ tripleo.nova_vnc_proxy.firewall_rules:
+ '137 nova_vnc_proxy':
+ dport:
+ - 6080
+ - 13080
step_config: |
include tripleo::profile::base::nova::vncproxy
diff --git a/puppet/services/opendaylight-api.yaml b/puppet/services/opendaylight-api.yaml
index 318c898e..253d63ef 100644
--- a/puppet/services/opendaylight-api.yaml
+++ b/puppet/services/opendaylight-api.yaml
@@ -59,6 +59,6 @@ outputs:
opendaylight::enable_l3: {get_param: OpenDaylightEnableL3}
opendaylight::extra_features: {get_param: OpenDaylightFeatures}
opendaylight::enable_dhcp: {get_param: OpenDaylightEnableDHCP}
- opendaylight::odl_bind_ip: {get_param: [ServiceNetMap, OpenDaylightApiNetwork]}
+ opendaylight::odl_bind_ip: {get_param: [ServiceNetMap, OpendaylightApiNetwork]}
step_config: |
include tripleo::profile::base::neutron::opendaylight
diff --git a/puppet/services/rabbitmq.yaml b/puppet/services/rabbitmq.yaml
index 5387529d..44a09a42 100644
--- a/puppet/services/rabbitmq.yaml
+++ b/puppet/services/rabbitmq.yaml
@@ -71,6 +71,7 @@ outputs:
rabbitmq::port: '5672'
rabbitmq::package_source: undef
rabbitmq::repos_ensure: false
+ rabbitmq::tcp_keepalive: true
rabbitmq_environment:
RABBITMQ_NODENAME: "rabbit@%{::hostname}"
RABBITMQ_SERVER_ERL_ARGS: '"+K true +P 1048576 -kernel inet_default_connect_options [{nodelay,true},{raw,6,18,<<5000:64/native>>}] -kernel inet_default_listen_options [{raw,6,18,<<5000:64/native>>}]"'
@@ -78,7 +79,6 @@ outputs:
inet_dist_listen_min: '25672'
inet_dist_listen_max: '25672'
rabbitmq_config_variables:
- tcp_listen_options: '[binary, {packet, raw}, {reuseaddr, true}, {backlog, 128}, {nodelay, true}, {exit_on_close, false}, {keepalive, true}]'
cluster_partition_handling: 'pause_minority'
queue_master_locator: '<<"min-masters">>'
loopback_users: '[]'
diff --git a/puppet/services/sahara-base.yaml b/puppet/services/sahara-base.yaml
index 5fc8ed61..4072a150 100644
--- a/puppet/services/sahara-base.yaml
+++ b/puppet/services/sahara-base.yaml
@@ -44,6 +44,10 @@ parameters:
type: string
default: ''
description: Set to True to enable debugging on all services.
+ SaharaPlugins:
+ default: ["ambari","cdh","mapr","vanilla","spark","storm"]
+ description: Sahara enabled plugin list
+ type: comma_delimited_list
outputs:
role_data:
@@ -69,13 +73,7 @@ outputs:
sahara::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
sahara::identity_uri: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
sahara::use_neutron: true
- sahara::plugins:
- - ambari
- - cdh
- - mapr
- - vanilla
- - spark
- - storm
+ sahara::plugins: {get_param: SaharaPlugins}
sahara::rpc_backend: rabbit
sahara::admin_tenant_name: 'service'
sahara::db::database_db_max_retries: -1
diff --git a/puppet/services/services.yaml b/puppet/services/services.yaml
index 176fd235..ffe2d2d4 100644
--- a/puppet/services/services.yaml
+++ b/puppet/services/services.yaml
@@ -54,8 +54,8 @@ outputs:
data: {s_names: {get_attr: [ServiceChain, role_data, service_name]}}
monitoring_subscriptions:
yaql:
- expression: list($.data.where($ != null).select($.get('monitoring_subscription')).where($ != null))
- data: {get_attr: [ServiceChain, role_data]}
+ expression: list($.data.role_data.where($ != null).select($.get('monitoring_subscription')).where($ != null))
+ data: {role_data: {get_attr: [ServiceChain, role_data]}}
logging_sources:
# Transform the individual logging_source configuration from
# each service in the chain into a global list, adding some
@@ -78,8 +78,9 @@ outputs:
sources:
- {get_attr: [LoggingConfiguration, LoggingDefaultSources]}
- yaql:
- expression: list($.data.where($ != null).select($.get('logging_source')).where($ != null))
- data: {get_attr: [ServiceChain, role_data]}
+ expression: list($.data.role_data.where($ != null).select($.get('logging_source')).where($ != null))
+ data: {role_data: {get_attr: [ServiceChain, role_data]}}
+
- {get_attr: [LoggingConfiguration, LoggingExtraSources]}
default_format: {get_attr: [LoggingConfiguration, LoggingDefaultFormat]}
pos_file_path: {get_attr: [LoggingConfiguration, LoggingPosFilePath]}
@@ -93,17 +94,17 @@ outputs:
groups:
- [{get_attr: [LoggingConfiguration, LoggingDefaultGroups]}]
- yaql:
- expression: list($.data.where($ != null).select($.get('logging_groups')).where($ != null))
- data: {get_attr: [ServiceChain, role_data]}
+ expression: list($.data.role_data.where($ != null).select($.get('logging_groups')).where($ != null))
+ data: {role_data: {get_attr: [ServiceChain, role_data]}}
- [{get_attr: [LoggingConfiguration, LoggingExtraGroups]}]
config_settings: {map_merge: {get_attr: [ServiceChain, role_data, config_settings]}}
global_config_settings:
map_merge:
yaql:
- expression: list($.data.where($ != null).select($.get('global_config_settings')).where($ != null))
- data: {get_attr: [ServiceChain, role_data]}
+ expression: list($.data.role_data.where($ != null).select($.get('global_config_settings')).where($ != null))
+ data: {role_data: {get_attr: [ServiceChain, role_data]}}
service_config_settings:
yaql:
- expression: $.data.where($ != null).select($.get('service_config_settings')).where($ != null).reduce($1.mergeWith($2), {})
- data: {get_attr: [ServiceChain, role_data]}
+ expression: $.data.role_data.where($ != null).select($.get('service_config_settings')).where($ != null).reduce($1.mergeWith($2), {})
+ data: {role_data: {get_attr: [ServiceChain, role_data]}}
step_config: {list_join: ["\n", {get_attr: [ServiceChain, role_data, step_config]}]}
diff --git a/puppet/services/swift-proxy.yaml b/puppet/services/swift-proxy.yaml
index de8daea5..ba1d99f1 100644
--- a/puppet/services/swift-proxy.yaml
+++ b/puppet/services/swift-proxy.yaml
@@ -74,9 +74,9 @@ outputs:
swift::proxy::authtoken::project_name: 'service'
swift::proxy::node_timeout: {get_param: SwiftProxyNodeTimeout}
swift::proxy::workers: {get_param: SwiftWorkers}
- swift::proxy::ceilometer::rabbit_host: {get_param: [ServiceNetMap, RabbitmqNetwork]}
swift::proxy::ceilometer::rabbit_user: {get_param: RabbitUserName}
swift::proxy::ceilometer::rabbit_password: {get_param: RabbitPassword}
+ swift::proxy::staticweb::url_base: {get_param: [EndpointMap, SwiftPublic, uri_no_suffix]}
tripleo.swift_proxy.firewall_rules:
'122 swift proxy':
dport:
@@ -88,7 +88,6 @@ outputs:
- ResellerAdmin
swift::proxy::versioned_writes::allow_versioned_writes: true
swift::proxy::pipeline:
- - 'ceilometer'
- 'catch_errors'
- 'healthcheck'
- 'proxy-logging'
@@ -101,6 +100,7 @@ outputs:
- 'keystone'
- 'staticweb'
- 'versioned_writes'
+ - 'ceilometer'
- 'proxy-logging'
- 'proxy-server'
swift::proxy::account_autocreate: true
diff --git a/puppet/services/swift-ringbuilder.yaml b/puppet/services/swift-ringbuilder.yaml
index 8ed4e9f4..5c70b6ab 100644
--- a/puppet/services/swift-ringbuilder.yaml
+++ b/puppet/services/swift-ringbuilder.yaml
@@ -38,7 +38,10 @@ parameters:
default: {}
description: 'A hash of additional raw devices to use as Swift backend (eg. {sdb: {}})'
type: json
-
+ SwiftUseLocalDir:
+ default: true
+ description: 'Use a local directory for Swift storage services when building rings'
+ type: boolean
outputs:
role_data:
@@ -56,7 +59,7 @@ outputs:
expression: $.data.raw_disk_lists.flatten()
data:
raw_disk_lists:
- - [':%PORT%/d1']
+ - {if: [{get_param: SwiftUseLocalDir}, [':%PORT%/d1'], []]}
- repeat:
template: ':%PORT%/DEVICE'
for_each:
diff --git a/puppet/services/swift-storage.yaml b/puppet/services/swift-storage.yaml
index 7fbb8d90..cffe78f5 100644
--- a/puppet/services/swift-storage.yaml
+++ b/puppet/services/swift-storage.yaml
@@ -86,7 +86,7 @@ outputs:
swift::storage::all::account_pipeline:
- healthcheck
- account-server
- swift::storage::disks: {get_param: SwiftRawDisks}
+ swift::storage::disks::args: {get_param: SwiftRawDisks}
swift::storage::all::storage_local_net_ip: {get_param: [ServiceNetMap, SwiftStorageNetwork]}
step_config: |
include ::tripleo::profile::base::swift::storage
diff --git a/roles_data.yaml b/roles_data.yaml
index 86d0e4f5..dad62f85 100644
--- a/roles_data.yaml
+++ b/roles_data.yaml
@@ -73,12 +73,12 @@
- OS::TripleO::Services::GnocchiApi
- OS::TripleO::Services::GnocchiMetricd
- OS::TripleO::Services::GnocchiStatsd
- - OS::Tripleo::Services::ManilaApi
- - OS::Tripleo::Services::ManilaScheduler
- - OS::Tripleo::Services::ManilaBackendGeneric
- - OS::Tripleo::Services::ManilaBackendNetapp
- - OS::Tripleo::Services::ManilaBackendCephFs
- - OS::Tripleo::Services::ManilaShare
+ - OS::TripleO::Services::ManilaApi
+ - OS::TripleO::Services::ManilaScheduler
+ - OS::TripleO::Services::ManilaBackendGeneric
+ - OS::TripleO::Services::ManilaBackendNetapp
+ - OS::TripleO::Services::ManilaBackendCephFs
+ - OS::TripleO::Services::ManilaShare
- OS::TripleO::Services::AodhApi
- OS::TripleO::Services::AodhEvaluator
- OS::TripleO::Services::AodhNotifier
@@ -95,6 +95,7 @@
- OS::TripleO::Services::SensuClient
- OS::TripleO::Services::FluentdClient
- OS::TripleO::Services::VipHosts
+ - OS::TripleO::Services::BarbicanApi
- name: Compute
CountDefault: 1
@@ -157,6 +158,7 @@
- OS::TripleO::Services::CephOSD
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::Snmp
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall