summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--environments/low-memory-usage.yaml3
-rw-r--r--environments/manila-cephfsnative-config.yaml8
-rw-r--r--environments/manila-generic-config.yaml8
-rw-r--r--environments/manila-netapp-config.yaml8
-rw-r--r--environments/tls-everywhere-endpoints-dns.yaml3
-rwxr-xr-xextraconfig/tasks/major_upgrade_check.sh19
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_1.sh184
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_2.sh221
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_3.sh69
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_4.sh17
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_5.sh13
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_6.sh15
-rw-r--r--extraconfig/tasks/major_upgrade_pacemaker.yaml42
-rw-r--r--extraconfig/tasks/major_upgrade_pacemaker_migrations.sh20
-rwxr-xr-xextraconfig/tasks/pacemaker_resource_restart.sh3
-rw-r--r--overcloud-resource-registry-puppet.j2.yaml12
-rw-r--r--overcloud.j2.yaml71
-rw-r--r--puppet/services/barbican-api.yaml5
-rw-r--r--puppet/services/ceph-external.yaml8
-rw-r--r--puppet/services/ceph-rgw.yaml12
-rw-r--r--puppet/services/cinder-api.yaml17
-rw-r--r--puppet/services/heat-api-cfn.yaml14
-rw-r--r--puppet/services/heat-api.yaml14
-rw-r--r--puppet/services/heat-base.yaml5
-rw-r--r--puppet/services/heat-engine.yaml2
-rw-r--r--puppet/services/horizon.yaml68
-rw-r--r--puppet/services/keystone.yaml15
-rw-r--r--puppet/services/monitoring/sensu-base.yaml17
-rw-r--r--puppet/services/neutron-base.yaml7
-rw-r--r--puppet/services/neutron-l3.yaml22
-rw-r--r--puppet/services/nova-base.yaml94
-rw-r--r--puppet/services/opendaylight-api.yaml2
-rw-r--r--puppet/services/opendaylight-ovs.yaml6
-rw-r--r--puppet/services/services.yaml21
-rw-r--r--roles_data.yaml12
35 files changed, 578 insertions, 479 deletions
diff --git a/environments/low-memory-usage.yaml b/environments/low-memory-usage.yaml
index ad428686..47b2003d 100644
--- a/environments/low-memory-usage.yaml
+++ b/environments/low-memory-usage.yaml
@@ -13,3 +13,6 @@ parameter_defaults:
ApacheMaxRequestWorkers: 32
ApacheServerLimit: 32
+
+ ControllerExtraConfig:
+ 'nova::network::neutron::neutron_url_timeout': '60'
diff --git a/environments/manila-cephfsnative-config.yaml b/environments/manila-cephfsnative-config.yaml
index 825a5066..5632d8d6 100644
--- a/environments/manila-cephfsnative-config.yaml
+++ b/environments/manila-cephfsnative-config.yaml
@@ -1,11 +1,11 @@
# A Heat environment file which can be used to enable a
# a Manila CephFS Native driver backend.
resource_registry:
- OS::Tripleo::Services::ManilaApi: ../puppet/services/manila-api.yaml
- OS::Tripleo::Services::ManilaScheduler: ../puppet/services/manila-scheduler.yaml
+ OS::TripleO::Services::ManilaApi: ../puppet/services/manila-api.yaml
+ OS::TripleO::Services::ManilaScheduler: ../puppet/services/manila-scheduler.yaml
# Only manila-share is pacemaker managed:
- OS::Tripleo::Services::ManilaShare: ../puppet/services/pacemaker/manila-share.yaml
- OS::Tripleo::Services::ManilaBackendCephFs: ../puppet/services/manila-backend-cephfs.yaml
+ OS::TripleO::Services::ManilaShare: ../puppet/services/pacemaker/manila-share.yaml
+ OS::TripleO::Services::ManilaBackendCephFs: ../puppet/services/manila-backend-cephfs.yaml
parameter_defaults:
diff --git a/environments/manila-generic-config.yaml b/environments/manila-generic-config.yaml
index 9344bc6e..65884a94 100644
--- a/environments/manila-generic-config.yaml
+++ b/environments/manila-generic-config.yaml
@@ -1,10 +1,10 @@
# This environment file enables Manila with the Generic backend.
resource_registry:
- OS::Tripleo::Services::ManilaApi: ../puppet/services/manila-api.yaml
- OS::Tripleo::Services::ManilaScheduler: ../puppet/services/manila-scheduler.yaml
+ OS::TripleO::Services::ManilaApi: ../puppet/services/manila-api.yaml
+ OS::TripleO::Services::ManilaScheduler: ../puppet/services/manila-scheduler.yaml
# Only manila-share is pacemaker managed:
- OS::Tripleo::Services::ManilaShare: ../puppet/services/pacemaker/manila-share.yaml
- OS::Tripleo::Services::ManilaBackendGeneric: ../puppet/services/manila-backend-generic.yaml
+ OS::TripleO::Services::ManilaShare: ../puppet/services/pacemaker/manila-share.yaml
+ OS::TripleO::Services::ManilaBackendGeneric: ../puppet/services/manila-backend-generic.yaml
parameter_defaults:
ManilaServiceInstanceUser: ''
diff --git a/environments/manila-netapp-config.yaml b/environments/manila-netapp-config.yaml
index 3dadfe5d..7eb14941 100644
--- a/environments/manila-netapp-config.yaml
+++ b/environments/manila-netapp-config.yaml
@@ -1,10 +1,10 @@
# This environment file enables Manila with the Netapp backend.
resource_registry:
- OS::Tripleo::Services::ManilaApi: ../puppet/services/manila-api.yaml
- OS::Tripleo::Services::ManilaScheduler: ../puppet/services/manila-scheduler.yaml
+ OS::TripleO::Services::ManilaApi: ../puppet/services/manila-api.yaml
+ OS::TripleO::Services::ManilaScheduler: ../puppet/services/manila-scheduler.yaml
# Only manila-share is pacemaker managed:
- OS::Tripleo::Services::ManilaShare: ../puppet/services/pacemaker/manila-share.yaml
- OS::Tripleo::Services::ManilaBackendNetapp: ../puppet/services/manila-backend-netapp.yaml
+ OS::TripleO::Services::ManilaShare: ../puppet/services/pacemaker/manila-share.yaml
+ OS::TripleO::Services::ManilaBackendNetapp: ../puppet/services/manila-backend-netapp.yaml
parameter_defaults:
ManilaNetappBackendName: tripleo_netapp
diff --git a/environments/tls-everywhere-endpoints-dns.yaml b/environments/tls-everywhere-endpoints-dns.yaml
index c3fbaf49..cc1915fe 100644
--- a/environments/tls-everywhere-endpoints-dns.yaml
+++ b/environments/tls-everywhere-endpoints-dns.yaml
@@ -5,6 +5,9 @@ parameter_defaults:
AodhAdmin: {protocol: 'https', port: '8042', host: 'CLOUDNAME'}
AodhInternal: {protocol: 'https', port: '8042', host: 'CLOUDNAME'}
AodhPublic: {protocol: 'https', port: '13042', host: 'CLOUDNAME'}
+ BarbicanAdmin: {protocol: 'https', port: '9311', host: 'CLOUDNAME'}
+ BarbicanInternal: {protocol: 'https', port: '9311', host: 'CLOUDNAME'}
+ BarbicanPublic: {protocol: 'https', port: '13311', host: 'CLOUDNAME'}
CeilometerAdmin: {protocol: 'https', port: '8777', host: 'CLOUDNAME'}
CeilometerInternal: {protocol: 'https', port: '8777', host: 'CLOUDNAME'}
CeilometerPublic: {protocol: 'https', port: '13777', host: 'CLOUDNAME'}
diff --git a/extraconfig/tasks/major_upgrade_check.sh b/extraconfig/tasks/major_upgrade_check.sh
index b65f6915..8bdff5e7 100755
--- a/extraconfig/tasks/major_upgrade_check.sh
+++ b/extraconfig/tasks/major_upgrade_check.sh
@@ -18,14 +18,8 @@ check_pcsd()
fi
}
-check_disk_for_mysql_dump()
+mysql_need_update()
{
- # Where to backup current database if mysql need to be upgraded
- MYSQL_BACKUP_DIR=/var/tmp/mysql_upgrade_osp
- MYSQL_TEMP_UPGRADE_BACKUP_DIR=/var/lib/mysql-temp-upgrade-backup
- # Spare disk ratio for extra safety
- MYSQL_BACKUP_SIZE_RATIO=1.2
-
# Shall we upgrade mysql data directory during the stack upgrade?
if [ "$mariadb_do_major_upgrade" = "auto" ]; then
ret=$(is_mysql_upgrade_needed)
@@ -40,6 +34,17 @@ check_disk_for_mysql_dump()
else
DO_MYSQL_UPGRADE=1
fi
+}
+
+check_disk_for_mysql_dump()
+{
+ # Where to backup current database if mysql need to be upgraded
+ MYSQL_BACKUP_DIR=/var/tmp/mysql_upgrade_osp
+ MYSQL_TEMP_UPGRADE_BACKUP_DIR=/var/lib/mysql-temp-upgrade-backup
+ # Spare disk ratio for extra safety
+ MYSQL_BACKUP_SIZE_RATIO=1.2
+
+ mysql_need_update
if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh
index fbdbc30b..080831ab 100755
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh
+++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh
@@ -2,8 +2,6 @@
set -eu
-cluster_sync_timeout=1800
-
check_cluster
check_pcsd
if [[ -n $(is_bootstrap_node) ]]; then
@@ -19,6 +17,11 @@ check_disk_for_mysql_dump
# at the end of this script
if [[ -n $(is_bootstrap_node) ]]; then
STONITH_STATE=$(pcs property show stonith-enabled | grep "stonith-enabled" | awk '{ print $2 }')
+ # We create this empty file if stonith was set to true so we can reenable stonith in step2
+ rm -f /var/tmp/stonith-true
+ if [ $STONITH_STATE == "true" ]; then
+ touch /var/tmp/stonith-true
+ fi
pcs property set stonith-enabled=false
fi
@@ -28,181 +31,6 @@ fi
# services will be restart as there are no other constraints
if [[ -n $(is_bootstrap_node) ]]; then
migrate_full_to_ng_ha
- rabbitmq_mitaka_newton_upgrade
-fi
-
-# After migrating the cluster to HA-NG the services not under pacemaker's control
-# are still up and running. We need to stop them explicitely otherwise during the yum
-# upgrade the rpm %post sections will try to do a systemctl try-restart <service>, which
-# is going to take a long time because rabbit is down. By having the service stopped
-# systemctl try-restart is a noop
-
-for service in $(services_to_migrate); do
- manage_systemd_service stop "${service%%-clone}"
- # So the reason for not reusing check_resource_systemd is that
- # I have observed systemctl is-active returning unknown with at least
- # one service that was stopped (See LP 1627254)
- timeout=600
- tstart=$(date +%s)
- tend=$(( $tstart + $timeout ))
- check_interval=3
- while (( $(date +%s) < $tend )); do
- if [[ "$(systemctl is-active ${service%%-clone})" = "active" ]]; then
- echo "$service still active, sleeping $check_interval seconds."
- sleep $check_interval
- else
- # we do not care if it is inactive, unknown or failed as long as it is
- # not running
- break
- fi
-
- done
-done
-
-# In case the mysql package is updated, the database on disk must be
-# upgraded as well. This typically needs to happen during major
-# version upgrades (e.g. 5.5 -> 5.6, 5.5 -> 10.1...)
-#
-# Because in-place upgrades are not supported across 2+ major versions
-# (e.g. 5.5 -> 10.1), we rely on logical upgrades via dump/restore cycle
-# https://bugzilla.redhat.com/show_bug.cgi?id=1341968
-#
-# The default is to determine automatically if upgrade is needed based
-# on mysql package versionning, but this can be overriden manually
-# to support specific upgrade scenario
-
-if [[ -n $(is_bootstrap_node) ]]; then
- if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
- mysqldump $backup_flags > "$MYSQL_BACKUP_DIR/openstack_database.sql"
- cp -rdp /etc/my.cnf* "$MYSQL_BACKUP_DIR"
- fi
-
- pcs resource disable redis
- check_resource redis stopped 600
- pcs resource disable rabbitmq
- check_resource rabbitmq stopped 600
- pcs resource disable galera
- check_resource galera stopped 600
- pcs resource disable openstack-cinder-volume
- check_resource openstack-cinder-volume stopped 600
- # Disable all VIPs before stopping the cluster, so that pcs doesn't use one as a source address:
- # https://bugzilla.redhat.com/show_bug.cgi?id=1330688
- for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Started | awk '{ print $1 }'); do
- pcs resource disable $vip
- check_resource $vip stopped 60
- done
- pcs cluster stop --all
-fi
-
-
-# Swift isn't controlled by pacemaker
-systemctl_swift stop
-
-tstart=$(date +%s)
-while systemctl is-active pacemaker; do
- sleep 5
- tnow=$(date +%s)
- if (( tnow-tstart > cluster_sync_timeout )) ; then
- echo_error "ERROR: cluster shutdown timed out"
- exit 1
- fi
-done
-
-# The reason we do an sql dump *and* we move the old dir out of
-# the way is because it gives us an extra level of safety in case
-# something goes wrong during the upgrade. Once the restore is
-# successful we go ahead and remove it. If the directory exists
-# we bail out as it means the upgrade process had issues in the last
-# run.
-if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
- if [ -d $MYSQL_TEMP_UPGRADE_BACKUP_DIR ]; then
- echo_error "ERROR: mysql backup dir already exist"
- exit 1
- fi
- mv /var/lib/mysql $MYSQL_TEMP_UPGRADE_BACKUP_DIR
+ rabbitmq_newton_ocata_upgrade
fi
-# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
-if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
- echo "Manual upgrade of openvswitch - restart in postun detected"
- mkdir OVS_UPGRADE || true
- pushd OVS_UPGRADE
- echo "Attempting to downloading latest openvswitch with yumdownloader"
- yumdownloader --resolve openvswitch
- echo "Updating openvswitch with nopostun option"
- rpm -U --replacepkgs --nopostun ./*.rpm
- popd
-else
- echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
-fi
-
-yum -y install python-zaqarclient # needed for os-collect-config
-yum -y -q update
-
-# We need to ensure at least those two configuration settings, otherwise
-# mariadb 10.1+ won't activate galera replication.
-# wsrep_cluster_address must only be set though, its value does not
-# matter because it's overriden by the galera resource agent.
-cat >> /etc/my.cnf.d/galera.cnf <<EOF
-[mysqld]
-wsrep_on = ON
-wsrep_cluster_address = gcomm://localhost
-EOF
-
-if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
- # Scripts run via heat have no HOME variable set and this confuses
- # mysqladmin
- export HOME=/root
-
- mkdir /var/lib/mysql || /bin/true
- chown mysql:mysql /var/lib/mysql
- chmod 0755 /var/lib/mysql
- restorecon -R /var/lib/mysql/
- mysql_install_db --datadir=/var/lib/mysql --user=mysql
- chown -R mysql:mysql /var/lib/mysql/
-
- if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
- mysqld_safe --wsrep-new-cluster &
- # We have a populated /root/.my.cnf with root/password here so
- # we need to temporarily rename it because the newly created
- # db is empty and no root password is set
- mv /root/.my.cnf /root/.my.cnf.temporary
- timeout 60 sh -c 'while ! mysql -e "" &> /dev/null; do sleep 1; done'
- mysql -u root < "$MYSQL_BACKUP_DIR/openstack_database.sql"
- mv /root/.my.cnf.temporary /root/.my.cnf
- mysqladmin -u root shutdown
- # The import was successful so we may remove the folder
- rm -r "$MYSQL_BACKUP_DIR"
- fi
-fi
-
-# If we reached here without error we can safely blow away the origin
-# mysql dir from every controller
-
-# TODO: What if the upgrade fails on the bootstrap node, but not on
-# this controller. Data may be lost.
-if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
- rm -r $MYSQL_TEMP_UPGRADE_BACKUP_DIR
-fi
-
-# Let's reset the stonith back to true if it was true, before starting the cluster
-if [[ -n $(is_bootstrap_node) ]]; then
- if [ $STONITH_STATE == "true" ]; then
- pcs -f /var/lib/pacemaker/cib/cib.xml property set stonith-enabled=true
- fi
-fi
-
-# Pin messages sent to compute nodes to kilo, these will be upgraded later
-crudini --set /etc/nova/nova.conf upgrade_levels compute "$upgrade_level_nova_compute"
-# https://bugzilla.redhat.com/show_bug.cgi?id=1284047
-# Change-Id: Ib3f6c12ff5471e1f017f28b16b1e6496a4a4b435
-crudini --set /etc/ceilometer/ceilometer.conf DEFAULT rpc_backend rabbit
-# https://bugzilla.redhat.com/show_bug.cgi?id=1284058
-# Ifd1861e3df46fad0e44ff9b5cbd58711bbc87c97 Swift Ceilometer middleware no longer exists
-crudini --set /etc/swift/proxy-server.conf pipeline:main pipeline "catch_errors healthcheck cache ratelimit tempurl formpost authtoken keystone staticweb proxy-logging proxy-server"
-# LP: 1615035, required only for M/N upgrade.
-crudini --set /etc/nova/nova.conf DEFAULT scheduler_host_manager host_manager
-# LP: 1627450, required only for M/N upgrade
-crudini --set /etc/nova/nova.conf DEFAULT scheduler_driver filter_scheduler
-
-crudini --set /etc/sahara/sahara.conf DEFAULT plugins ambari,cdh,mapr,vanilla,spark,storm
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
index 37061512..7cc6735f 100755
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
+++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
@@ -2,67 +2,186 @@
set -eu
-cluster_form_timeout=600
-cluster_settle_timeout=1800
-galera_sync_timeout=600
+cluster_sync_timeout=1800
-if [[ -n $(is_bootstrap_node) ]]; then
- pcs cluster start --all
+# After migrating the cluster to HA-NG the services not under pacemaker's control
+# are still up and running. We need to stop them explicitely otherwise during the yum
+# upgrade the rpm %post sections will try to do a systemctl try-restart <service>, which
+# is going to take a long time because rabbit is down. By having the service stopped
+# systemctl try-restart is a noop
+for service in $(services_to_migrate); do
+ manage_systemd_service stop "${service%%-clone}"
+ # So the reason for not reusing check_resource_systemd is that
+ # I have observed systemctl is-active returning unknown with at least
+ # one service that was stopped (See LP 1627254)
+ timeout=600
tstart=$(date +%s)
- while pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; do
- sleep 5
- tnow=$(date +%s)
- if (( tnow-tstart > cluster_form_timeout )) ; then
- echo_error "ERROR: timed out forming the cluster"
- exit 1
- fi
+ tend=$(( $tstart + $timeout ))
+ check_interval=3
+ while (( $(date +%s) < $tend )); do
+ if [[ "$(systemctl is-active ${service%%-clone})" = "active" ]]; then
+ echo "$service still active, sleeping $check_interval seconds."
+ sleep $check_interval
+ else
+ # we do not care if it is inactive, unknown or failed as long as it is
+ # not running
+ break
+ fi
+
done
+done
- if ! timeout -k 10 $cluster_settle_timeout crm_resource --wait; then
- echo_error "ERROR: timed out waiting for cluster to finish transition"
- exit 1
+# In case the mysql package is updated, the database on disk must be
+# upgraded as well. This typically needs to happen during major
+# version upgrades (e.g. 5.5 -> 5.6, 5.5 -> 10.1...)
+#
+# Because in-place upgrades are not supported across 2+ major versions
+# (e.g. 5.5 -> 10.1), we rely on logical upgrades via dump/restore cycle
+# https://bugzilla.redhat.com/show_bug.cgi?id=1341968
+#
+# The default is to determine automatically if upgrade is needed based
+# on mysql package versionning, but this can be overriden manually
+# to support specific upgrade scenario
+
+# Calling this function will set the DO_MYSQL_UPGRADE variable which is used
+# later
+mysql_need_update
+
+if [[ -n $(is_bootstrap_node) ]]; then
+ if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
+ mysqldump $backup_flags > "$MYSQL_BACKUP_DIR/openstack_database.sql"
+ cp -rdp /etc/my.cnf* "$MYSQL_BACKUP_DIR"
fi
- for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Stopped | awk '{ print $1 }'); do
- pcs resource enable $vip
- check_resource_pacemaker $vip started 60
+ pcs resource disable redis
+ check_resource redis stopped 600
+ pcs resource disable rabbitmq
+ check_resource rabbitmq stopped 600
+ pcs resource disable galera
+ check_resource galera stopped 600
+ pcs resource disable openstack-cinder-volume
+ check_resource openstack-cinder-volume stopped 600
+ # Disable all VIPs before stopping the cluster, so that pcs doesn't use one as a source address:
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1330688
+ for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Started | awk '{ print $1 }'); do
+ pcs resource disable $vip
+ check_resource $vip stopped 60
done
+ pcs cluster stop --all
fi
-start_or_enable_service galera
-check_resource galera started 600
-start_or_enable_service redis
-check_resource redis started 600
-# We need mongod which is now a systemd service up and running before calling
-# ceilometer-dbsync. There is still a race here: mongod might not be up on all nodes
-# so ceilometer-dbsync will fail a couple of times before that. As it retries indefinitely
-# we should be good.
-# Due to LP Bug https://bugs.launchpad.net/tripleo/+bug/1627254 am using systemctl directly atm
-systemctl start mongod
-check_resource mongod started 600
-if [[ -n $(is_bootstrap_node) ]]; then
- tstart=$(date +%s)
- while ! clustercheck; do
- sleep 5
- tnow=$(date +%s)
- if (( tnow-tstart > galera_sync_timeout )) ; then
- echo_error "ERROR galera sync timed out"
- exit 1
- fi
- done
+# Swift isn't controlled by pacemaker
+systemctl_swift stop
- # Run all the db syncs
- # TODO: check if this can be triggered in puppet and removed from here
- ceilometer-dbsync --config-file=/etc/ceilometer/ceilometer.conf
- cinder-manage db sync
- glance-manage --config-file=/etc/glance/glance-registry.conf db_sync
- heat-manage --config-file /etc/heat/heat.conf db_sync
- keystone-manage db_sync
- neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head
- nova-manage db sync
- nova-manage api_db sync
- nova-manage db online_data_migrations
- sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head
+tstart=$(date +%s)
+while systemctl is-active pacemaker; do
+ sleep 5
+ tnow=$(date +%s)
+ if (( tnow-tstart > cluster_sync_timeout )) ; then
+ echo_error "ERROR: cluster shutdown timed out"
+ exit 1
+ fi
+done
+
+# The reason we do an sql dump *and* we move the old dir out of
+# the way is because it gives us an extra level of safety in case
+# something goes wrong during the upgrade. Once the restore is
+# successful we go ahead and remove it. If the directory exists
+# we bail out as it means the upgrade process had issues in the last
+# run.
+if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
+ if [ -d $MYSQL_TEMP_UPGRADE_BACKUP_DIR ]; then
+ echo_error "ERROR: mysql backup dir already exist"
+ exit 1
+ fi
+ mv /var/lib/mysql $MYSQL_TEMP_UPGRADE_BACKUP_DIR
+fi
+
+# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
+if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
+ echo "Manual upgrade of openvswitch - restart in postun detected"
+ mkdir OVS_UPGRADE || true
+ pushd OVS_UPGRADE
+ echo "Attempting to downloading latest openvswitch with yumdownloader"
+ yumdownloader --resolve openvswitch
+ echo "Updating openvswitch with nopostun option"
+ rpm -U --replacepkgs --nopostun ./*.rpm
+ popd
+else
+ echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
fi
+
+yum -y install python-zaqarclient # needed for os-collect-config
+yum -y -q update
+
+# We need to ensure at least those two configuration settings, otherwise
+# mariadb 10.1+ won't activate galera replication.
+# wsrep_cluster_address must only be set though, its value does not
+# matter because it's overriden by the galera resource agent.
+cat >> /etc/my.cnf.d/galera.cnf <<EOF
+[mysqld]
+wsrep_on = ON
+wsrep_cluster_address = gcomm://localhost
+EOF
+
+if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
+ # Scripts run via heat have no HOME variable set and this confuses
+ # mysqladmin
+ export HOME=/root
+
+ mkdir /var/lib/mysql || /bin/true
+ chown mysql:mysql /var/lib/mysql
+ chmod 0755 /var/lib/mysql
+ restorecon -R /var/lib/mysql/
+ mysql_install_db --datadir=/var/lib/mysql --user=mysql
+ chown -R mysql:mysql /var/lib/mysql/
+
+ if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
+ mysqld_safe --wsrep-new-cluster &
+ # We have a populated /root/.my.cnf with root/password here so
+ # we need to temporarily rename it because the newly created
+ # db is empty and no root password is set
+ mv /root/.my.cnf /root/.my.cnf.temporary
+ timeout 60 sh -c 'while ! mysql -e "" &> /dev/null; do sleep 1; done'
+ mysql -u root < "$MYSQL_BACKUP_DIR/openstack_database.sql"
+ mv /root/.my.cnf.temporary /root/.my.cnf
+ mysqladmin -u root shutdown
+ # The import was successful so we may remove the folder
+ rm -r "$MYSQL_BACKUP_DIR"
+ fi
+fi
+
+# If we reached here without error we can safely blow away the origin
+# mysql dir from every controller
+
+# TODO: What if the upgrade fails on the bootstrap node, but not on
+# this controller. Data may be lost.
+if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
+ rm -r $MYSQL_TEMP_UPGRADE_BACKUP_DIR
+fi
+
+# Let's reset the stonith back to true if it was true, before starting the cluster
+if [[ -n $(is_bootstrap_node) ]]; then
+ if [ -f /var/tmp/stonith-true ]; then
+ pcs -f /var/lib/pacemaker/cib/cib.xml property set stonith-enabled=true
+ fi
+ rm -f /var/tmp/stonith-true
+fi
+
+# Pin messages sent to compute nodes to kilo, these will be upgraded later
+crudini --set /etc/nova/nova.conf upgrade_levels compute "$upgrade_level_nova_compute"
+# https://bugzilla.redhat.com/show_bug.cgi?id=1284047
+# Change-Id: Ib3f6c12ff5471e1f017f28b16b1e6496a4a4b435
+crudini --set /etc/ceilometer/ceilometer.conf DEFAULT rpc_backend rabbit
+# https://bugzilla.redhat.com/show_bug.cgi?id=1284058
+# Ifd1861e3df46fad0e44ff9b5cbd58711bbc87c97 Swift Ceilometer middleware no longer exists
+crudini --set /etc/swift/proxy-server.conf pipeline:main pipeline "catch_errors healthcheck cache ratelimit tempurl formpost authtoken keystone staticweb proxy-logging proxy-server"
+# LP: 1615035, required only for M/N upgrade.
+crudini --set /etc/nova/nova.conf DEFAULT scheduler_host_manager host_manager
+# LP: 1627450, required only for M/N upgrade
+crudini --set /etc/nova/nova.conf DEFAULT scheduler_driver filter_scheduler
+
+crudini --set /etc/sahara/sahara.conf DEFAULT plugins ambari,cdh,mapr,vanilla,spark,storm
+
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh
index d2cb9553..6748f891 100755
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh
+++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh
@@ -2,16 +2,67 @@
set -eu
-start_or_enable_service rabbitmq
-check_resource rabbitmq started 600
+cluster_form_timeout=600
+cluster_settle_timeout=1800
+galera_sync_timeout=600
+
+if [[ -n $(is_bootstrap_node) ]]; then
+ pcs cluster start --all
+
+ tstart=$(date +%s)
+ while pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; do
+ sleep 5
+ tnow=$(date +%s)
+ if (( tnow-tstart > cluster_form_timeout )) ; then
+ echo_error "ERROR: timed out forming the cluster"
+ exit 1
+ fi
+ done
+
+ if ! timeout -k 10 $cluster_settle_timeout crm_resource --wait; then
+ echo_error "ERROR: timed out waiting for cluster to finish transition"
+ exit 1
+ fi
+
+ for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Stopped | awk '{ print $1 }'); do
+ pcs resource enable $vip
+ check_resource_pacemaker $vip started 60
+ done
+fi
+
+start_or_enable_service galera
+check_resource galera started 600
start_or_enable_service redis
check_resource redis started 600
-start_or_enable_service openstack-cinder-volume
-check_resource openstack-cinder-volume started 600
+# We need mongod which is now a systemd service up and running before calling
+# ceilometer-dbsync. There is still a race here: mongod might not be up on all nodes
+# so ceilometer-dbsync will fail a couple of times before that. As it retries indefinitely
+# we should be good.
+# Due to LP Bug https://bugs.launchpad.net/tripleo/+bug/1627254 am using systemctl directly atm
+systemctl start mongod
+check_resource mongod started 600
-# start httpd so keystone is available for gnocchi
-# upgrade to run.
-systemctl start httpd
+if [[ -n $(is_bootstrap_node) ]]; then
+ tstart=$(date +%s)
+ while ! clustercheck; do
+ sleep 5
+ tnow=$(date +%s)
+ if (( tnow-tstart > galera_sync_timeout )) ; then
+ echo_error "ERROR galera sync timed out"
+ exit 1
+ fi
+ done
-# Swift isn't controled by pacemaker
-systemctl_swift start
+ # Run all the db syncs
+ # TODO: check if this can be triggered in puppet and removed from here
+ ceilometer-upgrade --config-file=/etc/ceilometer/ceilometer.conf --skip-gnocchi-resource-types
+ cinder-manage db sync
+ glance-manage --config-file=/etc/glance/glance-registry.conf db_sync
+ heat-manage --config-file /etc/heat/heat.conf db_sync
+ keystone-manage db_sync
+ neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head
+ nova-manage db sync
+ nova-manage api_db sync
+ nova-manage db online_data_migrations
+ sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head
+fi
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_4.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_4.sh
index fa95f1f8..d2cb9553 100755
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_4.sh
+++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_4.sh
@@ -2,7 +2,16 @@
set -eu
-if [[ -n $(is_bootstrap_node) ]]; then
- # run gnocchi upgrade
- gnocchi-upgrade
-fi
+start_or_enable_service rabbitmq
+check_resource rabbitmq started 600
+start_or_enable_service redis
+check_resource redis started 600
+start_or_enable_service openstack-cinder-volume
+check_resource openstack-cinder-volume started 600
+
+# start httpd so keystone is available for gnocchi
+# upgrade to run.
+systemctl start httpd
+
+# Swift isn't controled by pacemaker
+systemctl_swift start
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_5.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_5.sh
index d569084d..fa95f1f8 100755
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_5.sh
+++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_5.sh
@@ -2,14 +2,7 @@
set -eu
-# We need to start the systemd services we explicitely stopped at step _1.sh
-# FIXME: Should we let puppet during the convergence step do the service enabling or
-# should we add it here?
-services=$(services_to_migrate)
-if [[ ${keep_sahara_services_on_upgrade} =~ [Ff]alse ]] ; then
- services=${services%%openstack-sahara*}
+if [[ -n $(is_bootstrap_node) ]]; then
+ # run gnocchi upgrade
+ gnocchi-upgrade
fi
-for service in $services; do
- manage_systemd_service start "${service%%-clone}"
- check_resource_systemd "${service%%-clone}" started 600
-done
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh
new file mode 100755
index 00000000..d569084d
--- /dev/null
+++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+
+set -eu
+
+# We need to start the systemd services we explicitely stopped at step _1.sh
+# FIXME: Should we let puppet during the convergence step do the service enabling or
+# should we add it here?
+services=$(services_to_migrate)
+if [[ ${keep_sahara_services_on_upgrade} =~ [Ff]alse ]] ; then
+ services=${services%%openstack-sahara*}
+fi
+for service in $services; do
+ manage_systemd_service start "${service%%-clone}"
+ check_resource_systemd "${service%%-clone}" started 600
+done
diff --git a/extraconfig/tasks/major_upgrade_pacemaker.yaml b/extraconfig/tasks/major_upgrade_pacemaker.yaml
index e13aada3..b0418a56 100644
--- a/extraconfig/tasks/major_upgrade_pacemaker.yaml
+++ b/extraconfig/tasks/major_upgrade_pacemaker.yaml
@@ -113,7 +113,20 @@ resources:
config:
list_join:
- ''
- - - get_file: pacemaker_common_functions.sh
+ - - str_replace:
+ template: |
+ #!/bin/bash
+ upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE'
+ params:
+ UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute}
+ - str_replace:
+ template: |
+ #!/bin/bash
+ mariadb_do_major_upgrade='MYSQL_MAJOR_UPGRADE'
+ params:
+ MYSQL_MAJOR_UPGRADE: {get_param: MySqlMajorUpgrade}
+ - get_file: pacemaker_common_functions.sh
+ - get_file: major_upgrade_check.sh
- get_file: major_upgrade_pacemaker_migrations.sh
- get_file: major_upgrade_controller_pacemaker_2.sh
@@ -170,6 +183,25 @@ resources:
config:
list_join:
- ''
+ - - get_file: pacemaker_common_functions.sh
+ - get_file: major_upgrade_pacemaker_migrations.sh
+ - get_file: major_upgrade_controller_pacemaker_5.sh
+
+ ControllerPacemakerUpgradeDeployment_Step5:
+ type: OS::Heat::SoftwareDeploymentGroup
+ depends_on: ControllerPacemakerUpgradeDeployment_Step4
+ properties:
+ servers: {get_param: [servers, Controller]}
+ config: {get_resource: ControllerPacemakerUpgradeConfig_Step5}
+ input_values: {get_param: input_values}
+
+ ControllerPacemakerUpgradeConfig_Step6:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
- - str_replace:
template: |
#!/bin/bash
@@ -178,12 +210,12 @@ resources:
KEEP_SAHARA_SERVICES_ON_UPGRADE: {get_param: KeepSaharaServicesOnUpgrade}
- get_file: pacemaker_common_functions.sh
- get_file: major_upgrade_pacemaker_migrations.sh
- - get_file: major_upgrade_controller_pacemaker_5.sh
+ - get_file: major_upgrade_controller_pacemaker_6.sh
- ControllerPacemakerUpgradeDeployment_Step5:
+ ControllerPacemakerUpgradeDeployment_Step6:
type: OS::Heat::SoftwareDeploymentGroup
- depends_on: ControllerPacemakerUpgradeDeployment_Step4
+ depends_on: ControllerPacemakerUpgradeDeployment_Step5
properties:
servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerPacemakerUpgradeConfig_Step5}
+ config: {get_resource: ControllerPacemakerUpgradeConfig_Step6}
input_values: {get_param: input_values}
diff --git a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh b/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh
index 7c9083a4..6d02acc8 100644
--- a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh
+++ b/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh
@@ -179,3 +179,23 @@ function disable_standalone_ceilometer_api {
fi
fi
}
+
+
+# This function will make sure that the rabbitmq ha policies are converted from mitaka to newton
+# In newton we had: Attributes: set_policy="ha-all ^(?!amq\.).* {"ha-mode":"all"}"
+# In ocata we want: Attributes: set_policy="ha-all ^(?!amq\.).* {"ha-mode":"exactly","ha-params":2}"
+# The nr "2" should be CEIL(N/2) where N is the number of Controllers (i.e. rabbit instances)
+# Note that changing an attribute like this makes the rabbitmq resource restart
+function rabbitmq_newton_ocata_upgrade {
+ if pcs resource show rabbitmq-clone | grep -q -E "Attributes:.*\"ha-mode\":\"all\""; then
+ # Number of controller is obtained by counting how many hostnames we
+ # have in controller_node_names hiera key
+ nr_controllers=$(($(hiera controller_node_names | grep -o "," |wc -l) + 1))
+ nr_queues=$(($nr_controllers / 2 + ($nr_controllers % 2)))
+ if ! [ $nr_queues -gt 0 -a $nr_queues -le $nr_controllers ]; then
+ echo_error "ERROR: The nr. of HA queues during the M/N upgrade is out of range $nr_queues"
+ exit 1
+ fi
+ pcs resource update rabbitmq set_policy='ha-all ^(?!amq\\.).* {"ha-mode":"exactly","ha-params":'"$nr_queues}" --wait=600
+ fi
+}
diff --git a/extraconfig/tasks/pacemaker_resource_restart.sh b/extraconfig/tasks/pacemaker_resource_restart.sh
index 8500bcef..49d39bc8 100755
--- a/extraconfig/tasks/pacemaker_resource_restart.sh
+++ b/extraconfig/tasks/pacemaker_resource_restart.sh
@@ -28,7 +28,6 @@ if [[ -d "$RESTART_FOLDER" && -n $(pcmk_running) && -n $(is_bootstrap_node) ]];
fi
-haproxy_status=$(systemctl is-active haproxy)
-if [ "$haproxy_status" = "active" ]; then
+if [ $(systemctl is-active haproxy) = "active" ]; then
systemctl reload haproxy
fi
diff --git a/overcloud-resource-registry-puppet.j2.yaml b/overcloud-resource-registry-puppet.j2.yaml
index d6f1ff7d..3e201175 100644
--- a/overcloud-resource-registry-puppet.j2.yaml
+++ b/overcloud-resource-registry-puppet.j2.yaml
@@ -184,12 +184,12 @@ resource_registry:
# Services that are disabled by default (use relevant environment files):
OS::TripleO::Services::FluentdClient: OS::Heat::None
OS::TripleO::LoggingConfiguration: puppet/services/logging/fluentd-config.yaml
- OS::Tripleo::Services::ManilaApi: OS::Heat::None
- OS::Tripleo::Services::ManilaScheduler: OS::Heat::None
- OS::Tripleo::Services::ManilaShare: OS::Heat::None
- OS::Tripleo::Services::ManilaBackendGeneric: OS::Heat::None
- OS::Tripleo::Services::ManilaBackendNetapp: OS::Heat::None
- OS::Tripleo::Services::ManilaBackendCephFs: OS::Heat::None
+ OS::TripleO::Services::ManilaApi: OS::Heat::None
+ OS::TripleO::Services::ManilaScheduler: OS::Heat::None
+ OS::TripleO::Services::ManilaShare: OS::Heat::None
+ OS::TripleO::Services::ManilaBackendGeneric: OS::Heat::None
+ OS::TripleO::Services::ManilaBackendNetapp: OS::Heat::None
+ OS::TripleO::Services::ManilaBackendCephFs: OS::Heat::None
OS::TripleO::Services::ComputeNeutronL3Agent: OS::Heat::None
OS::TripleO::Services::ComputeNeutronMetadataAgent: OS::Heat::None
OS::TripleO::Services::BarbicanApi: OS::Heat::None
diff --git a/overcloud.j2.yaml b/overcloud.j2.yaml
index 47c73f8e..42339ead 100644
--- a/overcloud.j2.yaml
+++ b/overcloud.j2.yaml
@@ -231,8 +231,19 @@ resources:
config: {get_attr: [allNodesConfig, config_id]}
servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
input_values:
- bootstrap_nodeid: {get_attr: [{{role.name}}, resource.0.hostname]}
- bootstrap_nodeid_ip: {get_attr: [{{role.name}}, resource.0.ip_address]}
+ # Note we have to use yaql to look up the first hostname/ip in the
+ # list because heat path based attributes operate on the attribute
+ # inside the ResourceGroup, not the exposed list ref discussion in
+ # https://bugs.launchpad.net/heat/+bug/1640488
+ # The coalesce is needed because $.data is None during heat validation
+ bootstrap_nodeid:
+ yaql:
+ expression: coalesce($.data, []).first(null)
+ data: {get_attr: [{{role.name}}, hostname]}
+ bootstrap_nodeid_ip:
+ yaql:
+ expression: coalesce($.data, []).first(null)
+ data: {get_attr: [{{role.name}}, ip_address]}
{{role.name}}AllNodesValidationDeployment:
type: OS::Heat::StructuredDeployments
@@ -531,8 +542,8 @@ resources:
# Post deployment steps for all roles
AllNodesDeploySteps:
type: OS::TripleO::PostDeploySteps
-{% for role in roles %}
depends_on:
+{% for role in roles %}
- {{role.name}}AllNodesDeployment
{% endfor %}
properties:
@@ -555,60 +566,6 @@ outputs:
KeystoneAdminVip:
description: Keystone Admin VIP endpoint
value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, KeystoneAdminApiNetwork]}]}
- PublicVip:
- description: Controller VIP for public API endpoints
- value: {get_attr: [VipMap, net_ip_map, external]}
- AodhInternalVip:
- description: VIP for Aodh API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, AodhApiNetwork]}]}
- BarbicanInternalVip:
- description: VIP for Barbican API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, BarbicanApiNetwork]}]}
- CeilometerInternalVip:
- description: VIP for Ceilometer API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, CeilometerApiNetwork]}]}
- CephRgwInternalVip:
- description: VIP for Ceph RGW internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, CephRgwNetwork]}]}
- CinderInternalVip:
- description: VIP for Cinder API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, CinderApiNetwork]}]}
- GlanceInternalVip:
- description: VIP for Glance API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, GlanceApiNetwork]}]}
- GnocchiInternalVip:
- description: VIP for Gnocchi API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, GnocchiApiNetwork]}]}
- MistralInternalVip:
- description: VIP for Mistral API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, MistralApiNetwork]}]}
- HeatInternalVip:
- description: VIP for Heat API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, HeatApiNetwork]}]}
- IronicInternalVip:
- description: VIP for Ironic API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, IronicApiNetwork]}]}
- KeystoneInternalVip:
- description: VIP for Keystone API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, KeystonePublicApiNetwork]}]}
- ManilaInternalVip:
- description: VIP for Manila API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, ManilaApiNetwork]}]}
- NeutronInternalVip:
- description: VIP for Neutron API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, NeutronApiNetwork]}]}
- NovaInternalVip:
- description: VIP for Nova API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, NovaApiNetwork]}]}
- OpenDaylightInternalVip:
- description: VIP for OpenDaylight API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, OpenDaylightApiNetwork]}]}
- SaharaInternalVip:
- description: VIP for Sahara API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, SaharaApiNetwork]}]}
- SwiftInternalVip:
- description: VIP for Swift Proxy internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, SwiftProxyNetwork]}]}
EndpointMap:
description: |
Mapping of the resources with the needed info for their endpoints.
diff --git a/puppet/services/barbican-api.yaml b/puppet/services/barbican-api.yaml
index cf57680c..ab6b0ec7 100644
--- a/puppet/services/barbican-api.yaml
+++ b/puppet/services/barbican-api.yaml
@@ -52,6 +52,9 @@ parameters:
default: guest
description: The username for RabbitMQ
type: string
+ EnableInternalTLS:
+ type: boolean
+ default: false
resources:
@@ -85,7 +88,7 @@ outputs:
barbican::api::rabbit_heartbeat_timeout_threshold: 60
barbican::api::service_name: 'httpd'
barbican::wsgi::apache::bind_host: {get_param: [ServiceNetMap, BarbicanApiNetwork]}
- barbican::wsgi::apache::ssl: false
+ barbican::wsgi::apache::ssl: {get_param: EnableInternalTLS}
barbican::wsgi::apache::workers: {get_param: BarbicanWorkers}
barbican::wsgi::apache::servername:
str_replace:
diff --git a/puppet/services/ceph-external.yaml b/puppet/services/ceph-external.yaml
index 9120687b..b708665f 100644
--- a/puppet/services/ceph-external.yaml
+++ b/puppet/services/ceph-external.yaml
@@ -99,6 +99,14 @@ outputs:
CINDERBACKUP_POOL: {get_param: CinderBackupRbdPoolName}
GLANCE_POOL: {get_param: GlanceRbdPoolName}
GNOCCHI_POOL: {get_param: GnocchiRbdPoolName}
+ ceph::profile::params::manage_repo: false
+ # FIXME(gfidente): we should not have to list the packages explicitly in
+ # the templates, but this should stay until the following is fixed:
+ # https://bugs.launchpad.net/puppet-ceph/+bug/1629933
+ ceph::params::packages:
+ - ceph-base
+ - ceph-mon
+ - ceph-osd
service_config_settings:
glance_api:
glance::api::show_multiple_locations: {if: [glance_multiple_locations, true, false]}
diff --git a/puppet/services/ceph-rgw.yaml b/puppet/services/ceph-rgw.yaml
index 18a4b780..89c1a5ee 100644
--- a/puppet/services/ceph-rgw.yaml
+++ b/puppet/services/ceph-rgw.yaml
@@ -55,15 +55,9 @@ outputs:
- tripleo::profile::base::ceph::rgw::rgw_key: {get_param: CephRgwKey}
tripleo::profile::base::ceph::rgw::keystone_admin_token: {get_param: AdminToken}
tripleo::profile::base::ceph::rgw::keystone_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
- ceph::profile::params::frontend_type: 'civetweb'
- ceph_rgw_civetweb_bind_address: {get_param: [ServiceNetMap, CephRgwNetwork]}
- ceph::profile::params::rgw_frontends:
- list_join:
- - ''
- - - 'civetweb port='
- - '%{hiera("ceph_rgw_civetweb_bind_address")}'
- - ':'
- - {get_param: [EndpointMap, CephRgwInternal, port]}
+ tripleo::profile::base::ceph::rgw::civetweb_bind_ip: {get_param: [ServiceNetMap, CephRgwNetwork]}
+ tripleo::profile::base::ceph::rgw::civetweb_bind_port: {get_param: [EndpointMap, CephRgwInternal, port]}
+ ceph::params::user_radosgw: ceph
tripleo.ceph_rgw.firewall_rules:
'122 ceph rgw':
dport: {get_param: [EndpointMap, CephRgwInternal, port]}
diff --git a/puppet/services/cinder-api.yaml b/puppet/services/cinder-api.yaml
index fe48667a..803d8b83 100644
--- a/puppet/services/cinder-api.yaml
+++ b/puppet/services/cinder-api.yaml
@@ -43,6 +43,9 @@ parameters:
type: string
description: Set the number of workers for cinder::wsgi::apache
default: '"%{::os_workers}"'
+ EnableInternalTLS:
+ type: boolean
+ default: false
conditions:
cinder_workers_zero: {equals : [{get_param: CinderWorkers}, 0]}
@@ -55,6 +58,7 @@ resources:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
CinderBase:
type: ./cinder-base.yaml
@@ -94,21 +98,26 @@ outputs:
dport:
- 8776
- 13776
+ cinder::api::bind_host:
+ str_replace:
+ template:
+ '"%{::fqdn_$NETWORK}"'
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, CinderApiNetwork]}
+ cinder::wsgi::apache::ssl: {get_param: EnableInternalTLS}
+ cinder::api::service_name: 'httpd'
# NOTE: bind IP is found in Heat replacing the network name with the local node IP
# for the given network; replacement examples (eg. for internal_api):
# internal_api -> IP
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
- cinder::api::bind_host: {get_param: [ServiceNetMap, CinderApiNetwork]}
- cinder::api::service_name: 'httpd'
- cinder::wsgi::apache::ssl: false
cinder::wsgi::apache::bind_host: {get_param: [ServiceNetMap, CinderApiNetwork]}
cinder::wsgi::apache::servername:
str_replace:
template:
'"%{::fqdn_$NETWORK}"'
params:
- $NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
+ $NETWORK: {get_param: [ServiceNetMap, CinderApiNetwork]}
-
if:
- cinder_workers_zero
diff --git a/puppet/services/heat-api-cfn.yaml b/puppet/services/heat-api-cfn.yaml
index 1a86ec71..12d4a6a1 100644
--- a/puppet/services/heat-api-cfn.yaml
+++ b/puppet/services/heat-api-cfn.yaml
@@ -76,9 +76,11 @@ outputs:
include ::tripleo::profile::base::heat::api_cfn
service_config_settings:
keystone:
- heat::keystone::auth_cfn::tenant: 'service'
- heat::keystone::auth_cfn::public_url: {get_param: [EndpointMap, HeatCfnPublic, uri]}
- heat::keystone::auth_cfn::internal_url: {get_param: [EndpointMap, HeatCfnInternal, uri]}
- heat::keystone::auth_cfn::admin_url: {get_param: [EndpointMap, HeatCfnAdmin, uri]}
- heat::keystone::auth_cfn::password: {get_param: HeatPassword}
- heat::keystone::auth_cfn::region: {get_param: KeystoneRegion}
+ map_merge:
+ - get_attr: [HeatBase, role_data, service_config_settings, keystone]
+ - heat::keystone::auth_cfn::tenant: 'service'
+ heat::keystone::auth_cfn::public_url: {get_param: [EndpointMap, HeatCfnPublic, uri]}
+ heat::keystone::auth_cfn::internal_url: {get_param: [EndpointMap, HeatCfnInternal, uri]}
+ heat::keystone::auth_cfn::admin_url: {get_param: [EndpointMap, HeatCfnAdmin, uri]}
+ heat::keystone::auth_cfn::password: {get_param: HeatPassword}
+ heat::keystone::auth_cfn::region: {get_param: KeystoneRegion}
diff --git a/puppet/services/heat-api.yaml b/puppet/services/heat-api.yaml
index 2ea96fc0..b0cd16dd 100644
--- a/puppet/services/heat-api.yaml
+++ b/puppet/services/heat-api.yaml
@@ -76,9 +76,11 @@ outputs:
include ::tripleo::profile::base::heat::api
service_config_settings:
keystone:
- heat::keystone::auth::tenant: 'service'
- heat::keystone::auth::public_url: {get_param: [EndpointMap, HeatPublic, uri]}
- heat::keystone::auth::internal_url: {get_param: [EndpointMap, HeatInternal, uri]}
- heat::keystone::auth::admin_url: {get_param: [EndpointMap, HeatAdmin, uri]}
- heat::keystone::auth::password: {get_param: HeatPassword}
- heat::keystone::auth::region: {get_param: KeystoneRegion}
+ map_merge:
+ - get_attr: [HeatBase, role_data, service_config_settings, keystone]
+ - heat::keystone::auth::tenant: 'service'
+ heat::keystone::auth::public_url: {get_param: [EndpointMap, HeatPublic, uri]}
+ heat::keystone::auth::internal_url: {get_param: [EndpointMap, HeatInternal, uri]}
+ heat::keystone::auth::admin_url: {get_param: [EndpointMap, HeatAdmin, uri]}
+ heat::keystone::auth::password: {get_param: HeatPassword}
+ heat::keystone::auth::region: {get_param: KeystoneRegion}
diff --git a/puppet/services/heat-base.yaml b/puppet/services/heat-base.yaml
index 7eb58f56..a2a65d7d 100644
--- a/puppet/services/heat-base.yaml
+++ b/puppet/services/heat-base.yaml
@@ -77,3 +77,8 @@ outputs:
heat::cron::purge_deleted::destination: '/dev/null'
heat::db::database_db_max_retries: -1
heat::db::database_max_retries: -1
+ service_config_settings:
+ keystone:
+ tripleo::profile::base::keystone::heat_admin_domain: 'heat_stack'
+ tripleo::profile::base::keystone::heat_admin_user: 'heat_stack_domain_admin'
+ tripleo::profile::base::keystone::heat_admin_email: 'heat_stack_domain_admin@localhost'
diff --git a/puppet/services/heat-engine.yaml b/puppet/services/heat-engine.yaml
index 20415eef..3f0e4105 100644
--- a/puppet/services/heat-engine.yaml
+++ b/puppet/services/heat-engine.yaml
@@ -105,4 +105,4 @@ outputs:
- "%{hiera('mysql_bind_host')}"
keystone:
# This is needed because the keystone profile handles creating the domain
- heat::keystone::domain::domain_password: {get_param: HeatStackDomainAdminPassword}
+ tripleo::profile::base::keystone::heat_admin_password: {get_param: HeatStackDomainAdminPassword}
diff --git a/puppet/services/horizon.yaml b/puppet/services/horizon.yaml
index 1e08415c..017bb76f 100644
--- a/puppet/services/horizon.yaml
+++ b/puppet/services/horizon.yaml
@@ -24,7 +24,7 @@ parameters:
type: json
HorizonAllowedHosts:
default: '*'
- description: A list of IP/Hostname for the server Horizonis running on.
+ description: A list of IP/Hostname for the server Horizon is running on.
Used for header checks.
type: comma_delimited_list
HorizonSecret:
@@ -32,11 +32,6 @@ parameters:
type: string
hidden: true
default: ''
- NeutronMechanismDrivers:
- default: 'openvswitch'
- description: |
- The mechanism drivers for the Neutron tenant network.
- type: comma_delimited_list
MemcachedIPv6:
default: false
description: Enable IPv6 features in Memcached.
@@ -45,6 +40,10 @@ parameters:
default: 'overcloud-horizon'
type: string
+conditions:
+
+ debug_empty: {equals : [{get_param: Debug}, '']}
+
outputs:
role_data:
description: Role data for the Horizon role.
@@ -52,33 +51,34 @@ outputs:
service_name: horizon
monitoring_subscription: {get_param: MonitoringSubscriptionHorizon}
config_settings:
- horizon::allowed_hosts: {get_param: HorizonAllowedHosts}
- neutron::plugins::ml2::mechanism_drivers:
- str_replace:
- template: MECHANISMS
- params:
- MECHANISMS: {get_param: NeutronMechanismDrivers}
- tripleo.horizon.firewall_rules:
- '126 horizon':
- dport:
- - 80
- - 443
- horizon::cache_backend: django.core.cache.backends.memcached.MemcachedCache
- horizon::django_session_engine: 'django.contrib.sessions.backends.cache'
- horizon::vhost_extra_params:
- add_listen: false
- priority: 10
- access_log_format: '%a %l %u %t \"%r\" %>s %b \"%%{}{Referer}i\" \"%%{}{User-Agent}i\"'
- horizon::bind_address: {get_param: [ServiceNetMap, HorizonNetwork]}
- horizon::django_debug: {get_param: Debug}
- horizon::keystone_url: {get_param: [EndpointMap, KeystoneInternal, uri]}
- horizon::secret_key:
- yaql:
- expression: $.data.passwords.where($ != '').first()
- data:
- passwords:
- - {get_param: HorizonSecret}
- - {get_param: [DefaultPasswords, horizon_secret]}
- memcached_ipv6: {get_param: MemcachedIPv6}
+ map_merge:
+ - horizon::allowed_hosts: {get_param: HorizonAllowedHosts}
+ tripleo.horizon.firewall_rules:
+ '126 horizon':
+ dport:
+ - 80
+ - 443
+ horizon::disable_password_reveal: true
+ horizon::cache_backend: django.core.cache.backends.memcached.MemcachedCache
+ horizon::django_session_engine: 'django.contrib.sessions.backends.cache'
+ horizon::vhost_extra_params:
+ add_listen: false
+ priority: 10
+ access_log_format: '%a %l %u %t \"%r\" %>s %b \"%%{}{Referer}i\" \"%%{}{User-Agent}i\"'
+ horizon::bind_address: {get_param: [ServiceNetMap, HorizonNetwork]}
+ horizon::keystone_url: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ horizon::secret_key:
+ yaql:
+ expression: $.data.passwords.where($ != '').first()
+ data:
+ passwords:
+ - {get_param: HorizonSecret}
+ - {get_param: [DefaultPasswords, horizon_secret]}
+ memcached_ipv6: {get_param: MemcachedIPv6}
+ -
+ if:
+ - debug_empty
+ - {}
+ - horizon::django_debug: {get_param: Debug}
step_config: |
include ::tripleo::profile::base::horizon
diff --git a/puppet/services/keystone.yaml b/puppet/services/keystone.yaml
index 1f83b680..d819e043 100644
--- a/puppet/services/keystone.yaml
+++ b/puppet/services/keystone.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: 2016-10-14
description: >
OpenStack Keystone service configured with Puppet
@@ -32,6 +32,12 @@ parameters:
type: string
default: 'regionOne'
description: Keystone region for endpoint
+ KeystoneTokenProvider:
+ description: The keystone token format
+ type: string
+ default: 'uuid'
+ constraints:
+ - allowed_values: ['uuid', 'fernet']
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
@@ -85,7 +91,7 @@ parameters:
description: Set the number of workers for keystone::wsgi::apache
default: '"%{::os_workers}"'
MonitoringSubscriptionKeystone:
- default: 'overcloud-kestone'
+ default: 'overcloud-keystone'
type: string
KeystoneCredential0:
type: string
@@ -112,6 +118,9 @@ resources:
EndpointMap: {get_param: EndpointMap}
EnableInternalTLS: {get_param: EnableInternalTLS}
+conditions:
+ keystone_fernet_tokens: {equals: [{get_param: KeystoneTokenProvider}, "fernet"]}
+
outputs:
role_data:
description: Role data for the Keystone role.
@@ -138,6 +147,8 @@ outputs:
keystone::roles::admin::password: {get_param: AdminPassword}
keystone_ssl_certificate: {get_param: KeystoneSSLCertificate}
keystone_ssl_certificate_key: {get_param: KeystoneSSLCertificateKey}
+ keystone::token_provider: {get_param: KeystoneTokenProvider}
+ keystone::enable_fernet_setup: {if: [keystone_fernet_tokens, true, false]}
keystone::enable_proxy_headers_parsing: true
keystone::enable_credential_setup: true
keystone::credential_keys:
diff --git a/puppet/services/monitoring/sensu-base.yaml b/puppet/services/monitoring/sensu-base.yaml
index d7350d07..e5762328 100644
--- a/puppet/services/monitoring/sensu-base.yaml
+++ b/puppet/services/monitoring/sensu-base.yaml
@@ -43,7 +43,19 @@ parameters:
description: The RabbitMQ vhost used for monitoring purposes.
type: string
default: '/sensu'
-
+ SensuRedactVariables:
+ description: Variables from Sensu configuration, which have to be redacted.
+ type: array
+ default:
+ - password
+ - passwd
+ - pass
+ - api_key
+ - api_token
+ - access_key
+ - secret_key
+ - private_key
+ - secret
outputs:
role_data:
@@ -61,8 +73,7 @@ outputs:
sensu::rabbitmq_ssl: {get_param: MonitoringRabbitUseSSL}
sensu::rabbitmq_user: {get_param: MonitoringRabbitUserName}
sensu::rabbitmq_vhost: {get_param: MonitoringRabbitVhost}
- #sensu::redis_host: {get_param: MonitoringRedisHost}
- #sensu::redis_password: {get_param: MonitoringRedisPassword}
+ sensu::redact: {get_param: SensuRedactVariables}
sensu::sensu_plugin_provider: 'yum'
sensu::sensu_plugin_name: 'rubygem-sensu-plugin'
sensu::version: 'present'
diff --git a/puppet/services/neutron-base.yaml b/puppet/services/neutron-base.yaml
index 6bb4ba08..0b2cef07 100644
--- a/puppet/services/neutron-base.yaml
+++ b/puppet/services/neutron-base.yaml
@@ -50,16 +50,13 @@ parameters:
to false may result in configuration remnants after updates/upgrades.
NeutronGlobalPhysnetMtu:
type: number
- default: 1496
+ default: 1500
description: |
MTU of the underlying physical network. Neutron uses this value to
calculate MTU for all virtual network components. For flat and VLAN
networks, neutron uses this value without modification. For overlay
networks such as VXLAN, neutron automatically subtracts the overlay
- protocol overhead from this value. The default value of 1496 is
- currently in effect to compensate for some additional overhead when
- deploying with some network configurations (e.g. network isolation over
- single network interfaces)
+ protocol overhead from this value.
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
diff --git a/puppet/services/neutron-l3.yaml b/puppet/services/neutron-l3.yaml
index a89e3d75..a2157555 100644
--- a/puppet/services/neutron-l3.yaml
+++ b/puppet/services/neutron-l3.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: 2016-10-14
description: >
OpenStack Neutron L3 agent configured with Puppet
@@ -43,6 +43,10 @@ parameters:
tag: openstack.neutron.agent.l3
path: /var/log/neutron/l3-agent.log
+conditions:
+
+ external_network_bridge_empty: {equals : [{get_param: NeutronExternalNetworkBridge}, "''"]}
+
resources:
NeutronBase:
@@ -63,12 +67,16 @@ outputs:
- neutron
config_settings:
map_merge:
- - get_attr: [NeutronBase, role_data, config_settings]
+ - get_attr: [NeutronBase, role_data, config_settings]
+ - neutron::agents::l3::router_delete_namespaces: True
+ neutron::agents::l3::agent_mode: {get_param: NeutronL3AgentMode}
+ tripleo.neutron_l3.firewall_rules:
+ '106 neutron_l3 vrrp':
+ proto: vrrp
+ -
+ if:
+ - external_network_bridge_empty
+ - {}
- neutron::agents::l3::external_network_bridge: {get_param: NeutronExternalNetworkBridge}
- neutron::agents::l3::router_delete_namespaces: True
- neutron::agents::l3::agent_mode : {get_param: NeutronL3AgentMode}
- tripleo.neutron_l3.firewall_rules:
- '106 neutron_l3 vrrp':
- proto: vrrp
step_config: |
include tripleo::profile::base::neutron::l3
diff --git a/puppet/services/nova-base.yaml b/puppet/services/nova-base.yaml
index 8db00d8f..74a95d20 100644
--- a/puppet/services/nova-base.yaml
+++ b/puppet/services/nova-base.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: 2016-10-14
description: >
OpenStack Nova base service. Shared for all Nova services.
@@ -66,6 +66,9 @@ parameters:
type: string
description: Nova Compute upgrade level
default: ''
+conditions:
+
+ compute_upgrade_level_empty: {equals : [{get_param: UpgradeLevelNovaCompute}, '']}
outputs:
role_data:
@@ -73,45 +76,50 @@ outputs:
value:
service_name: nova_base
config_settings:
- nova::rabbit_password: {get_param: RabbitPassword}
- nova::rabbit_userid: {get_param: RabbitUserName}
- nova::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
- nova::rabbit_port: {get_param: RabbitClientPort}
- nova::database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://nova:'
- - {get_param: NovaPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/nova'
- nova::api_database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://nova_api:'
- - {get_param: NovaPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/nova_api'
- nova::debug: {get_param: Debug}
- nova::purge_config: {get_param: EnableConfigPurge}
- nova::network::neutron::neutron_project_name: 'service'
- nova::network::neutron::neutron_username: 'neutron'
- nova::network::neutron::dhcp_domain: ''
- nova::network::neutron::neutron_password: {get_param: NeutronPassword}
- nova::network::neutron::neutron_url: {get_param: [EndpointMap, NeutronInternal, uri]}
- nova::network::neutron::neutron_auth_url: {get_param: [EndpointMap, KeystoneV3Admin, uri]}
- nova::rabbit_heartbeat_timeout_threshold: 60
- nova::cinder_catalog_info: 'volumev2:cinderv2:internalURL'
- nova::host: '"%{::fqdn}"' # NOTE: extra quoting is needed.
- nova::notify_on_state_change: 'vm_and_task_state'
- nova::notification_driver: messagingv2
- nova::network::neutron::neutron_auth_type: 'v3password'
- nova::db::database_db_max_retries: -1
- nova::db::database_max_retries: -1
- nova::glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]}
- nova::use_ipv6: {get_param: NovaIPv6}
- nova::upgrade_level_compute: {get_param: UpgradeLevelNovaCompute}
- nova::network::neutron::neutron_ovs_bridge: {get_param: NovaOVSBridge}
+ map_merge:
+ - nova::rabbit_password: {get_param: RabbitPassword}
+ nova::rabbit_userid: {get_param: RabbitUserName}
+ nova::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
+ nova::rabbit_port: {get_param: RabbitClientPort}
+ nova::database_connection:
+ list_join:
+ - ''
+ - - {get_param: [EndpointMap, MysqlInternal, protocol]}
+ - '://nova:'
+ - {get_param: NovaPassword}
+ - '@'
+ - {get_param: [EndpointMap, MysqlInternal, host]}
+ - '/nova'
+ nova::api_database_connection:
+ list_join:
+ - ''
+ - - {get_param: [EndpointMap, MysqlInternal, protocol]}
+ - '://nova_api:'
+ - {get_param: NovaPassword}
+ - '@'
+ - {get_param: [EndpointMap, MysqlInternal, host]}
+ - '/nova_api'
+ nova::debug: {get_param: Debug}
+ nova::purge_config: {get_param: EnableConfigPurge}
+ nova::network::neutron::neutron_project_name: 'service'
+ nova::network::neutron::neutron_username: 'neutron'
+ nova::network::neutron::dhcp_domain: ''
+ nova::network::neutron::neutron_password: {get_param: NeutronPassword}
+ nova::network::neutron::neutron_url: {get_param: [EndpointMap, NeutronInternal, uri]}
+ nova::network::neutron::neutron_auth_url: {get_param: [EndpointMap, KeystoneV3Admin, uri]}
+ nova::rabbit_heartbeat_timeout_threshold: 60
+ nova::cinder_catalog_info: 'volumev2:cinderv2:internalURL'
+ nova::host: '"%{::fqdn}"' # NOTE: extra quoting is needed.
+ nova::notify_on_state_change: 'vm_and_task_state'
+ nova::notification_driver: messagingv2
+ nova::network::neutron::neutron_auth_type: 'v3password'
+ nova::db::database_db_max_retries: -1
+ nova::db::database_max_retries: -1
+ nova::glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]}
+ nova::use_ipv6: {get_param: NovaIPv6}
+ nova::network::neutron::neutron_ovs_bridge: {get_param: NovaOVSBridge}
+ -
+ if:
+ - compute_upgrade_level_empty
+ - {}
+ - nova::upgrade_level_compute: {get_param: UpgradeLevelNovaCompute}
diff --git a/puppet/services/opendaylight-api.yaml b/puppet/services/opendaylight-api.yaml
index 318c898e..253d63ef 100644
--- a/puppet/services/opendaylight-api.yaml
+++ b/puppet/services/opendaylight-api.yaml
@@ -59,6 +59,6 @@ outputs:
opendaylight::enable_l3: {get_param: OpenDaylightEnableL3}
opendaylight::extra_features: {get_param: OpenDaylightFeatures}
opendaylight::enable_dhcp: {get_param: OpenDaylightEnableDHCP}
- opendaylight::odl_bind_ip: {get_param: [ServiceNetMap, OpenDaylightApiNetwork]}
+ opendaylight::odl_bind_ip: {get_param: [ServiceNetMap, OpendaylightApiNetwork]}
step_config: |
include tripleo::profile::base::neutron::opendaylight
diff --git a/puppet/services/opendaylight-ovs.yaml b/puppet/services/opendaylight-ovs.yaml
index 268ca244..907ecddc 100644
--- a/puppet/services/opendaylight-ovs.yaml
+++ b/puppet/services/opendaylight-ovs.yaml
@@ -54,5 +54,11 @@ outputs:
template: MAPPINGS
params:
MAPPINGS: {get_param: OpenDaylightProviderMappings}
+ tripleo.opendaylight_ovs.firewall_rules:
+ '118 neutron vxlan networks':
+ proto: 'udp'
+ dport: 4789
+ '136 neutron gre networks':
+ proto: 'gre'
step_config: |
include tripleo::profile::base::neutron::plugins::ovs::opendaylight
diff --git a/puppet/services/services.yaml b/puppet/services/services.yaml
index 176fd235..ffe2d2d4 100644
--- a/puppet/services/services.yaml
+++ b/puppet/services/services.yaml
@@ -54,8 +54,8 @@ outputs:
data: {s_names: {get_attr: [ServiceChain, role_data, service_name]}}
monitoring_subscriptions:
yaql:
- expression: list($.data.where($ != null).select($.get('monitoring_subscription')).where($ != null))
- data: {get_attr: [ServiceChain, role_data]}
+ expression: list($.data.role_data.where($ != null).select($.get('monitoring_subscription')).where($ != null))
+ data: {role_data: {get_attr: [ServiceChain, role_data]}}
logging_sources:
# Transform the individual logging_source configuration from
# each service in the chain into a global list, adding some
@@ -78,8 +78,9 @@ outputs:
sources:
- {get_attr: [LoggingConfiguration, LoggingDefaultSources]}
- yaql:
- expression: list($.data.where($ != null).select($.get('logging_source')).where($ != null))
- data: {get_attr: [ServiceChain, role_data]}
+ expression: list($.data.role_data.where($ != null).select($.get('logging_source')).where($ != null))
+ data: {role_data: {get_attr: [ServiceChain, role_data]}}
+
- {get_attr: [LoggingConfiguration, LoggingExtraSources]}
default_format: {get_attr: [LoggingConfiguration, LoggingDefaultFormat]}
pos_file_path: {get_attr: [LoggingConfiguration, LoggingPosFilePath]}
@@ -93,17 +94,17 @@ outputs:
groups:
- [{get_attr: [LoggingConfiguration, LoggingDefaultGroups]}]
- yaql:
- expression: list($.data.where($ != null).select($.get('logging_groups')).where($ != null))
- data: {get_attr: [ServiceChain, role_data]}
+ expression: list($.data.role_data.where($ != null).select($.get('logging_groups')).where($ != null))
+ data: {role_data: {get_attr: [ServiceChain, role_data]}}
- [{get_attr: [LoggingConfiguration, LoggingExtraGroups]}]
config_settings: {map_merge: {get_attr: [ServiceChain, role_data, config_settings]}}
global_config_settings:
map_merge:
yaql:
- expression: list($.data.where($ != null).select($.get('global_config_settings')).where($ != null))
- data: {get_attr: [ServiceChain, role_data]}
+ expression: list($.data.role_data.where($ != null).select($.get('global_config_settings')).where($ != null))
+ data: {role_data: {get_attr: [ServiceChain, role_data]}}
service_config_settings:
yaql:
- expression: $.data.where($ != null).select($.get('service_config_settings')).where($ != null).reduce($1.mergeWith($2), {})
- data: {get_attr: [ServiceChain, role_data]}
+ expression: $.data.role_data.where($ != null).select($.get('service_config_settings')).where($ != null).reduce($1.mergeWith($2), {})
+ data: {role_data: {get_attr: [ServiceChain, role_data]}}
step_config: {list_join: ["\n", {get_attr: [ServiceChain, role_data, step_config]}]}
diff --git a/roles_data.yaml b/roles_data.yaml
index cc9ee14d..dad62f85 100644
--- a/roles_data.yaml
+++ b/roles_data.yaml
@@ -73,12 +73,12 @@
- OS::TripleO::Services::GnocchiApi
- OS::TripleO::Services::GnocchiMetricd
- OS::TripleO::Services::GnocchiStatsd
- - OS::Tripleo::Services::ManilaApi
- - OS::Tripleo::Services::ManilaScheduler
- - OS::Tripleo::Services::ManilaBackendGeneric
- - OS::Tripleo::Services::ManilaBackendNetapp
- - OS::Tripleo::Services::ManilaBackendCephFs
- - OS::Tripleo::Services::ManilaShare
+ - OS::TripleO::Services::ManilaApi
+ - OS::TripleO::Services::ManilaScheduler
+ - OS::TripleO::Services::ManilaBackendGeneric
+ - OS::TripleO::Services::ManilaBackendNetapp
+ - OS::TripleO::Services::ManilaBackendCephFs
+ - OS::TripleO::Services::ManilaShare
- OS::TripleO::Services::AodhApi
- OS::TripleO::Services::AodhEvaluator
- OS::TripleO::Services::AodhNotifier