diff options
75 files changed, 1287 insertions, 614 deletions
diff --git a/environments/low-memory-usage.yaml b/environments/low-memory-usage.yaml index ad428686..47b2003d 100644 --- a/environments/low-memory-usage.yaml +++ b/environments/low-memory-usage.yaml @@ -13,3 +13,6 @@ parameter_defaults: ApacheMaxRequestWorkers: 32 ApacheServerLimit: 32 + + ControllerExtraConfig: + 'nova::network::neutron::neutron_url_timeout': '60' diff --git a/environments/manila-cephfsnative-config.yaml b/environments/manila-cephfsnative-config.yaml index 825a5066..5632d8d6 100644 --- a/environments/manila-cephfsnative-config.yaml +++ b/environments/manila-cephfsnative-config.yaml @@ -1,11 +1,11 @@ # A Heat environment file which can be used to enable a # a Manila CephFS Native driver backend. resource_registry: - OS::Tripleo::Services::ManilaApi: ../puppet/services/manila-api.yaml - OS::Tripleo::Services::ManilaScheduler: ../puppet/services/manila-scheduler.yaml + OS::TripleO::Services::ManilaApi: ../puppet/services/manila-api.yaml + OS::TripleO::Services::ManilaScheduler: ../puppet/services/manila-scheduler.yaml # Only manila-share is pacemaker managed: - OS::Tripleo::Services::ManilaShare: ../puppet/services/pacemaker/manila-share.yaml - OS::Tripleo::Services::ManilaBackendCephFs: ../puppet/services/manila-backend-cephfs.yaml + OS::TripleO::Services::ManilaShare: ../puppet/services/pacemaker/manila-share.yaml + OS::TripleO::Services::ManilaBackendCephFs: ../puppet/services/manila-backend-cephfs.yaml parameter_defaults: diff --git a/environments/manila-generic-config.yaml b/environments/manila-generic-config.yaml index 9344bc6e..65884a94 100644 --- a/environments/manila-generic-config.yaml +++ b/environments/manila-generic-config.yaml @@ -1,10 +1,10 @@ # This environment file enables Manila with the Generic backend. resource_registry: - OS::Tripleo::Services::ManilaApi: ../puppet/services/manila-api.yaml - OS::Tripleo::Services::ManilaScheduler: ../puppet/services/manila-scheduler.yaml + OS::TripleO::Services::ManilaApi: ../puppet/services/manila-api.yaml + OS::TripleO::Services::ManilaScheduler: ../puppet/services/manila-scheduler.yaml # Only manila-share is pacemaker managed: - OS::Tripleo::Services::ManilaShare: ../puppet/services/pacemaker/manila-share.yaml - OS::Tripleo::Services::ManilaBackendGeneric: ../puppet/services/manila-backend-generic.yaml + OS::TripleO::Services::ManilaShare: ../puppet/services/pacemaker/manila-share.yaml + OS::TripleO::Services::ManilaBackendGeneric: ../puppet/services/manila-backend-generic.yaml parameter_defaults: ManilaServiceInstanceUser: '' diff --git a/environments/manila-netapp-config.yaml b/environments/manila-netapp-config.yaml index 3dadfe5d..7eb14941 100644 --- a/environments/manila-netapp-config.yaml +++ b/environments/manila-netapp-config.yaml @@ -1,10 +1,10 @@ # This environment file enables Manila with the Netapp backend. resource_registry: - OS::Tripleo::Services::ManilaApi: ../puppet/services/manila-api.yaml - OS::Tripleo::Services::ManilaScheduler: ../puppet/services/manila-scheduler.yaml + OS::TripleO::Services::ManilaApi: ../puppet/services/manila-api.yaml + OS::TripleO::Services::ManilaScheduler: ../puppet/services/manila-scheduler.yaml # Only manila-share is pacemaker managed: - OS::Tripleo::Services::ManilaShare: ../puppet/services/pacemaker/manila-share.yaml - OS::Tripleo::Services::ManilaBackendNetapp: ../puppet/services/manila-backend-netapp.yaml + OS::TripleO::Services::ManilaShare: ../puppet/services/pacemaker/manila-share.yaml + OS::TripleO::Services::ManilaBackendNetapp: ../puppet/services/manila-backend-netapp.yaml parameter_defaults: ManilaNetappBackendName: tripleo_netapp diff --git a/environments/neutron-nuage-config.yaml b/environments/neutron-nuage-config.yaml index e157ae35..74899246 100644 --- a/environments/neutron-nuage-config.yaml +++ b/environments/neutron-nuage-config.yaml @@ -19,7 +19,7 @@ parameter_defaults: NeutronNuageBaseURIVersion: 'default_uri_version' NeutronNuageCMSId: '' UseForwardedFor: true - NeutronCorePlugin: 'neutron.plugins.nuage.plugin.NuagePlugin' + NeutronCorePlugin: 'nuage_neutron.plugins.nuage.plugin.NuagePlugin' NeutronEnableDHCPAgent: false NeutronServicePlugins: [] NovaOVSBridge: 'alubr0' diff --git a/environments/puppet-pacemaker.yaml b/environments/puppet-pacemaker.yaml index 8cfbab6d..b8e93f20 100644 --- a/environments/puppet-pacemaker.yaml +++ b/environments/puppet-pacemaker.yaml @@ -16,3 +16,6 @@ resource_registry: OS::TripleO::Services::Redis: ../puppet/services/pacemaker/database/redis.yaml OS::TripleO::Services::MySQL: ../puppet/services/pacemaker/database/mysql.yaml # Services that are disabled by default (use relevant environment files): + + # Services that are disabled for HA deployments with pacemaker + OS::TripleO::Services::Keepalived: OS::Heat::None diff --git a/environments/services/barbican.yaml b/environments/services/barbican.yaml new file mode 100644 index 00000000..1735646a --- /dev/null +++ b/environments/services/barbican.yaml @@ -0,0 +1,4 @@ +# A Heat environment file which can be used to enable +# Barbican with the default secret store backend. +resource_registry: + OS::TripleO::Services::BarbicanApi: ../../puppet/services/barbican-api.yaml diff --git a/environments/storage-environment.yaml b/environments/storage-environment.yaml index 8cf34622..8e02c300 100644 --- a/environments/storage-environment.yaml +++ b/environments/storage-environment.yaml @@ -34,18 +34,18 @@ parameter_defaults: # CinderNfsServers: '' - #### GLANCE FILE BACKEND PACEMAKER SETTINGS (used for mounting NFS) #### + #### GLANCE NFS SETTINGS #### - ## Whether to make Glance 'file' backend a mount managed by Pacemaker - # GlanceFilePcmkManage: false - ## File system type of the mount - # GlanceFilePcmkFstype: nfs - ## Pacemaker mount point, e.g. '192.168.122.1:/export/glance' for NFS - ## (If using IPv6, use both double- and single-quotes, - ## e.g. "'[fdd0::1]:/export/glance'") - # GlanceFilePcmkDevice: '' - ## Options for the mount managed by Pacemaker - # GlanceFilePcmkOptions: '' + ## Make sure to set `GlanceBackend: file` when enabling NFS + ## + ## Whether to make Glance 'file' backend a NFS mount + # GlanceNfsEnabled: false + ## NFS share for image storage, e.g. '192.168.122.1:/export/glance' + ## (If using IPv6, use both double- and single-quotes, + ## e.g. "'[fdd0::1]:/export/glance'") + # GlanceNfsShare: '' + ## Mount options for the NFS image storage mount point + # GlanceNfsOptions: 'intr,context=system_u:object_r:glance_var_lib_t:s0' #### CEPH SETTINGS #### diff --git a/environments/tls-endpoints-public-dns.yaml b/environments/tls-endpoints-public-dns.yaml index f94a7726..79c7599f 100644 --- a/environments/tls-endpoints-public-dns.yaml +++ b/environments/tls-endpoints-public-dns.yaml @@ -5,6 +5,9 @@ parameter_defaults: AodhAdmin: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'} AodhInternal: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'} AodhPublic: {protocol: 'https', port: '13042', host: 'CLOUDNAME'} + BarbicanAdmin: {protocol: 'http', port: '9311', host: 'IP_ADDRESS'} + BarbicanInternal: {protocol: 'http', port: '9311', host: 'IP_ADDRESS'} + BarbicanPublic: {protocol: 'https', port: '13311', host: 'CLOUDNAME'} CeilometerAdmin: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'} CeilometerInternal: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'} CeilometerPublic: {protocol: 'https', port: '13777', host: 'CLOUDNAME'} diff --git a/environments/tls-endpoints-public-ip.yaml b/environments/tls-endpoints-public-ip.yaml index eb2a23b4..a49ca343 100644 --- a/environments/tls-endpoints-public-ip.yaml +++ b/environments/tls-endpoints-public-ip.yaml @@ -5,6 +5,9 @@ parameter_defaults: AodhAdmin: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'} AodhInternal: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'} AodhPublic: {protocol: 'https', port: '13042', host: 'IP_ADDRESS'} + BarbicanAdmin: {protocol: 'http', port: '9311', host: 'IP_ADDRESS'} + BarbicanInternal: {protocol: 'http', port: '9311', host: 'IP_ADDRESS'} + BarbicanPublic: {protocol: 'https', port: '13311', host: 'IP_ADDRESS'} CeilometerAdmin: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'} CeilometerInternal: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'} CeilometerPublic: {protocol: 'https', port: '13777', host: 'IP_ADDRESS'} diff --git a/environments/tls-everywhere-endpoints-dns.yaml b/environments/tls-everywhere-endpoints-dns.yaml index c3fbaf49..cc1915fe 100644 --- a/environments/tls-everywhere-endpoints-dns.yaml +++ b/environments/tls-everywhere-endpoints-dns.yaml @@ -5,6 +5,9 @@ parameter_defaults: AodhAdmin: {protocol: 'https', port: '8042', host: 'CLOUDNAME'} AodhInternal: {protocol: 'https', port: '8042', host: 'CLOUDNAME'} AodhPublic: {protocol: 'https', port: '13042', host: 'CLOUDNAME'} + BarbicanAdmin: {protocol: 'https', port: '9311', host: 'CLOUDNAME'} + BarbicanInternal: {protocol: 'https', port: '9311', host: 'CLOUDNAME'} + BarbicanPublic: {protocol: 'https', port: '13311', host: 'CLOUDNAME'} CeilometerAdmin: {protocol: 'https', port: '8777', host: 'CLOUDNAME'} CeilometerInternal: {protocol: 'https', port: '8777', host: 'CLOUDNAME'} CeilometerPublic: {protocol: 'https', port: '13777', host: 'CLOUDNAME'} diff --git a/extraconfig/tasks/major_upgrade_block_storage.sh b/extraconfig/tasks/major_upgrade_block_storage.sh index f161c049..39861826 100644 --- a/extraconfig/tasks/major_upgrade_block_storage.sh +++ b/extraconfig/tasks/major_upgrade_block_storage.sh @@ -11,8 +11,8 @@ if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "syst pushd OVS_UPGRADE echo "Attempting to downloading latest openvswitch with yumdownloader" yumdownloader --resolve openvswitch - echo "Updating openvswitch with nopostun" - rpm -U --nopostun ./*.rpm + echo "Updating openvswitch with nopostun option" + rpm -U --replacepkgs --nopostun ./*.rpm popd else echo "Skipping manual upgrade of openvswitch - no restart in postun detected" diff --git a/extraconfig/tasks/major_upgrade_ceph_storage.sh b/extraconfig/tasks/major_upgrade_ceph_storage.sh index e690a383..d84cad45 100644 --- a/extraconfig/tasks/major_upgrade_ceph_storage.sh +++ b/extraconfig/tasks/major_upgrade_ceph_storage.sh @@ -50,14 +50,14 @@ timeout 60 bash -c "while kill -0 ${OSD_PIDS} 2> /dev/null; do done" # Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205 -if [[ -n \$(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then +if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then echo "Manual upgrade of openvswitch - restart in postun detected" mkdir OVS_UPGRADE || true pushd OVS_UPGRADE echo "Attempting to downloading latest openvswitch with yumdownloader" yumdownloader --resolve openvswitch - echo "Updating openvswitch with nopostun" - rpm -U --nopostun ./*.rpm + echo "Updating openvswitch with nopostun option" + rpm -U --replacepkgs --nopostun ./*.rpm popd else echo "Skipping manual upgrade of openvswitch - no restart in postun detected" diff --git a/extraconfig/tasks/major_upgrade_check.sh b/extraconfig/tasks/major_upgrade_check.sh index b65f6915..8bdff5e7 100755 --- a/extraconfig/tasks/major_upgrade_check.sh +++ b/extraconfig/tasks/major_upgrade_check.sh @@ -18,14 +18,8 @@ check_pcsd() fi } -check_disk_for_mysql_dump() +mysql_need_update() { - # Where to backup current database if mysql need to be upgraded - MYSQL_BACKUP_DIR=/var/tmp/mysql_upgrade_osp - MYSQL_TEMP_UPGRADE_BACKUP_DIR=/var/lib/mysql-temp-upgrade-backup - # Spare disk ratio for extra safety - MYSQL_BACKUP_SIZE_RATIO=1.2 - # Shall we upgrade mysql data directory during the stack upgrade? if [ "$mariadb_do_major_upgrade" = "auto" ]; then ret=$(is_mysql_upgrade_needed) @@ -40,6 +34,17 @@ check_disk_for_mysql_dump() else DO_MYSQL_UPGRADE=1 fi +} + +check_disk_for_mysql_dump() +{ + # Where to backup current database if mysql need to be upgraded + MYSQL_BACKUP_DIR=/var/tmp/mysql_upgrade_osp + MYSQL_TEMP_UPGRADE_BACKUP_DIR=/var/lib/mysql-temp-upgrade-backup + # Spare disk ratio for extra safety + MYSQL_BACKUP_SIZE_RATIO=1.2 + + mysql_need_update if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then if [ $DO_MYSQL_UPGRADE -eq 1 ]; then diff --git a/extraconfig/tasks/major_upgrade_compute.sh b/extraconfig/tasks/major_upgrade_compute.sh index 950fe8d5..b0d42806 100644 --- a/extraconfig/tasks/major_upgrade_compute.sh +++ b/extraconfig/tasks/major_upgrade_compute.sh @@ -18,7 +18,6 @@ set -eu crudini --set /etc/nova/nova.conf upgrade_levels compute $upgrade_level_nova_compute - # Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205 if [[ -n \$(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then echo "Manual upgrade of openvswitch - restart in postun detected" @@ -26,8 +25,8 @@ if [[ -n \$(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "sys pushd OVS_UPGRADE echo "Attempting to downloading latest openvswitch with yumdownloader" yumdownloader --resolve openvswitch - echo "Updating openvswitch with nopostun" - rpm -U --nopostun ./*.rpm + echo "Updating openvswitch with nopostun option" + rpm -U --replacepkgs --nopostun ./*.rpm popd else echo "Skipping manual upgrade of openvswitch - no restart in postun detected" @@ -36,6 +35,10 @@ fi yum -y install python-zaqarclient # needed for os-collect-config yum -y update +# Due to bug#1640177 we need to restart compute agent +echo "Restarting openstack ceilometer agent compute" +systemctl restart openstack-ceilometer-compute + ENDOFCAT # ensure the permissions are OK diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh index 2690ee64..080831ab 100755 --- a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh +++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh @@ -2,8 +2,6 @@ set -eu -cluster_sync_timeout=1800 - check_cluster check_pcsd if [[ -n $(is_bootstrap_node) ]]; then @@ -17,8 +15,15 @@ check_disk_for_mysql_dump # nodes where a service fails to stop, which could be fatal during an upgrade # procedure. So we remember the stonith state. If it was enabled we reenable it # at the end of this script -STONITH_STATE=$(pcs property show stonith-enabled | grep "stonith-enabled" | awk '{ print $2 }') -pcs property set stonith-enabled=false +if [[ -n $(is_bootstrap_node) ]]; then + STONITH_STATE=$(pcs property show stonith-enabled | grep "stonith-enabled" | awk '{ print $2 }') + # We create this empty file if stonith was set to true so we can reenable stonith in step2 + rm -f /var/tmp/stonith-true + if [ $STONITH_STATE == "true" ]; then + touch /var/tmp/stonith-true + fi + pcs property set stonith-enabled=false +fi # Migrate to HA NG and fix up rabbitmq queues # We fix up the rabbitmq ha queues after the migration because it will @@ -26,180 +31,6 @@ pcs property set stonith-enabled=false # services will be restart as there are no other constraints if [[ -n $(is_bootstrap_node) ]]; then migrate_full_to_ng_ha - rabbitmq_mitaka_newton_upgrade -fi - -# After migrating the cluster to HA-NG the services not under pacemaker's control -# are still up and running. We need to stop them explicitely otherwise during the yum -# upgrade the rpm %post sections will try to do a systemctl try-restart <service>, which -# is going to take a long time because rabbit is down. By having the service stopped -# systemctl try-restart is a noop - -for service in $(services_to_migrate); do - manage_systemd_service stop "${service%%-clone}" - # So the reason for not reusing check_resource_systemd is that - # I have observed systemctl is-active returning unknown with at least - # one service that was stopped (See LP 1627254) - timeout=600 - tstart=$(date +%s) - tend=$(( $tstart + $timeout )) - check_interval=3 - while (( $(date +%s) < $tend )); do - if [[ "$(systemctl is-active ${service%%-clone})" = "active" ]]; then - echo "$service still active, sleeping $check_interval seconds." - sleep $check_interval - else - # we do not care if it is inactive, unknown or failed as long as it is - # not running - break - fi - - done -done - -# In case the mysql package is updated, the database on disk must be -# upgraded as well. This typically needs to happen during major -# version upgrades (e.g. 5.5 -> 5.6, 5.5 -> 10.1...) -# -# Because in-place upgrades are not supported across 2+ major versions -# (e.g. 5.5 -> 10.1), we rely on logical upgrades via dump/restore cycle -# https://bugzilla.redhat.com/show_bug.cgi?id=1341968 -# -# The default is to determine automatically if upgrade is needed based -# on mysql package versionning, but this can be overriden manually -# to support specific upgrade scenario - -if [[ -n $(is_bootstrap_node) ]]; then - if [ $DO_MYSQL_UPGRADE -eq 1 ]; then - mysqldump $backup_flags > "$MYSQL_BACKUP_DIR/openstack_database.sql" - cp -rdp /etc/my.cnf* "$MYSQL_BACKUP_DIR" - fi - - pcs resource disable redis - check_resource redis stopped 600 - pcs resource disable rabbitmq - check_resource rabbitmq stopped 600 - pcs resource disable galera - check_resource galera stopped 600 - pcs resource disable openstack-cinder-volume - check_resource openstack-cinder-volume stopped 600 - # Disable all VIPs before stopping the cluster, so that pcs doesn't use one as a source address: - # https://bugzilla.redhat.com/show_bug.cgi?id=1330688 - for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Started | awk '{ print $1 }'); do - pcs resource disable $vip - check_resource $vip stopped 60 - done - pcs cluster stop --all -fi - - -# Swift isn't controlled by pacemaker -systemctl_swift stop - -tstart=$(date +%s) -while systemctl is-active pacemaker; do - sleep 5 - tnow=$(date +%s) - if (( tnow-tstart > cluster_sync_timeout )) ; then - echo_error "ERROR: cluster shutdown timed out" - exit 1 - fi -done - -# The reason we do an sql dump *and* we move the old dir out of -# the way is because it gives us an extra level of safety in case -# something goes wrong during the upgrade. Once the restore is -# successful we go ahead and remove it. If the directory exists -# we bail out as it means the upgrade process had issues in the last -# run. -if [ $DO_MYSQL_UPGRADE -eq 1 ]; then - if [ -d $MYSQL_TEMP_UPGRADE_BACKUP_DIR ]; then - echo_error "ERROR: mysql backup dir already exist" - exit 1 - fi - mv /var/lib/mysql $MYSQL_TEMP_UPGRADE_BACKUP_DIR -fi - - -# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205 -if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then - echo "Manual upgrade of openvswitch - restart in postun detected" - mkdir OVS_UPGRADE || true - pushd OVS_UPGRADE - echo "Attempting to downloading latest openvswitch with yumdownloader" - yumdownloader --resolve openvswitch - echo "Updating openvswitch with nopostun" - rpm -U --nopostun ./*.rpm - popd -else - echo "Skipping manual upgrade of openvswitch - no restart in postun detected" -fi - -yum -y install python-zaqarclient # needed for os-collect-config -yum -y -q update - -# We need to ensure at least those two configuration settings, otherwise -# mariadb 10.1+ won't activate galera replication. -# wsrep_cluster_address must only be set though, its value does not -# matter because it's overriden by the galera resource agent. -cat >> /etc/my.cnf.d/galera.cnf <<EOF -[mysqld] -wsrep_on = ON -wsrep_cluster_address = gcomm://localhost -EOF - -if [ $DO_MYSQL_UPGRADE -eq 1 ]; then - # Scripts run via heat have no HOME variable set and this confuses - # mysqladmin - export HOME=/root - - mkdir /var/lib/mysql || /bin/true - chown mysql:mysql /var/lib/mysql - chmod 0755 /var/lib/mysql - restorecon -R /var/lib/mysql/ - mysql_install_db --datadir=/var/lib/mysql --user=mysql - chown -R mysql:mysql /var/lib/mysql/ - - if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then - mysqld_safe --wsrep-new-cluster & - # We have a populated /root/.my.cnf with root/password here so - # we need to temporarily rename it because the newly created - # db is empty and no root password is set - mv /root/.my.cnf /root/.my.cnf.temporary - timeout 60 sh -c 'while ! mysql -e "" &> /dev/null; do sleep 1; done' - mysql -u root < "$MYSQL_BACKUP_DIR/openstack_database.sql" - mv /root/.my.cnf.temporary /root/.my.cnf - mysqladmin -u root shutdown - # The import was successful so we may remove the folder - rm -r "$MYSQL_BACKUP_DIR" - fi + rabbitmq_newton_ocata_upgrade fi -# If we reached here without error we can safely blow away the origin -# mysql dir from every controller - -# TODO: What if the upgrade fails on the bootstrap node, but not on -# this controller. Data may be lost. -if [ $DO_MYSQL_UPGRADE -eq 1 ]; then - rm -r $MYSQL_TEMP_UPGRADE_BACKUP_DIR -fi - -# Let's reset the stonith back to true if it was true, before starting the cluster -if [ $STONITH_STATE == "true" ]; then - pcs -f /var/lib/pacemaker/cib/cib.xml property set stonith-enabled=true -fi - -# Pin messages sent to compute nodes to kilo, these will be upgraded later -crudini --set /etc/nova/nova.conf upgrade_levels compute "$upgrade_level_nova_compute" -# https://bugzilla.redhat.com/show_bug.cgi?id=1284047 -# Change-Id: Ib3f6c12ff5471e1f017f28b16b1e6496a4a4b435 -crudini --set /etc/ceilometer/ceilometer.conf DEFAULT rpc_backend rabbit -# https://bugzilla.redhat.com/show_bug.cgi?id=1284058 -# Ifd1861e3df46fad0e44ff9b5cbd58711bbc87c97 Swift Ceilometer middleware no longer exists -crudini --set /etc/swift/proxy-server.conf pipeline:main pipeline "catch_errors healthcheck cache ratelimit tempurl formpost authtoken keystone staticweb proxy-logging proxy-server" -# LP: 1615035, required only for M/N upgrade. -crudini --set /etc/nova/nova.conf DEFAULT scheduler_host_manager host_manager -# LP: 1627450, required only for M/N upgrade -crudini --set /etc/nova/nova.conf DEFAULT scheduler_driver filter_scheduler - -crudini --set /etc/sahara/sahara.conf DEFAULT plugins ambari,cdh,mapr,vanilla,spark,storm diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh index b3a0098c..7cc6735f 100755 --- a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh +++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh @@ -2,68 +2,186 @@ set -eu -cluster_form_timeout=600 -cluster_settle_timeout=1800 -galera_sync_timeout=600 +cluster_sync_timeout=1800 -if [[ -n $(is_bootstrap_node) ]]; then - pcs cluster start --all +# After migrating the cluster to HA-NG the services not under pacemaker's control +# are still up and running. We need to stop them explicitely otherwise during the yum +# upgrade the rpm %post sections will try to do a systemctl try-restart <service>, which +# is going to take a long time because rabbit is down. By having the service stopped +# systemctl try-restart is a noop +for service in $(services_to_migrate); do + manage_systemd_service stop "${service%%-clone}" + # So the reason for not reusing check_resource_systemd is that + # I have observed systemctl is-active returning unknown with at least + # one service that was stopped (See LP 1627254) + timeout=600 tstart=$(date +%s) - while pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; do - sleep 5 - tnow=$(date +%s) - if (( tnow-tstart > cluster_form_timeout )) ; then - echo_error "ERROR: timed out forming the cluster" - exit 1 - fi + tend=$(( $tstart + $timeout )) + check_interval=3 + while (( $(date +%s) < $tend )); do + if [[ "$(systemctl is-active ${service%%-clone})" = "active" ]]; then + echo "$service still active, sleeping $check_interval seconds." + sleep $check_interval + else + # we do not care if it is inactive, unknown or failed as long as it is + # not running + break + fi + done +done - if ! timeout -k 10 $cluster_settle_timeout crm_resource --wait; then - echo_error "ERROR: timed out waiting for cluster to finish transition" - exit 1 +# In case the mysql package is updated, the database on disk must be +# upgraded as well. This typically needs to happen during major +# version upgrades (e.g. 5.5 -> 5.6, 5.5 -> 10.1...) +# +# Because in-place upgrades are not supported across 2+ major versions +# (e.g. 5.5 -> 10.1), we rely on logical upgrades via dump/restore cycle +# https://bugzilla.redhat.com/show_bug.cgi?id=1341968 +# +# The default is to determine automatically if upgrade is needed based +# on mysql package versionning, but this can be overriden manually +# to support specific upgrade scenario + +# Calling this function will set the DO_MYSQL_UPGRADE variable which is used +# later +mysql_need_update + +if [[ -n $(is_bootstrap_node) ]]; then + if [ $DO_MYSQL_UPGRADE -eq 1 ]; then + mysqldump $backup_flags > "$MYSQL_BACKUP_DIR/openstack_database.sql" + cp -rdp /etc/my.cnf* "$MYSQL_BACKUP_DIR" fi - for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Stopped | awk '{ print $1 }'); do - pcs resource enable $vip - check_resource_pacemaker $vip started 60 + pcs resource disable redis + check_resource redis stopped 600 + pcs resource disable rabbitmq + check_resource rabbitmq stopped 600 + pcs resource disable galera + check_resource galera stopped 600 + pcs resource disable openstack-cinder-volume + check_resource openstack-cinder-volume stopped 600 + # Disable all VIPs before stopping the cluster, so that pcs doesn't use one as a source address: + # https://bugzilla.redhat.com/show_bug.cgi?id=1330688 + for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Started | awk '{ print $1 }'); do + pcs resource disable $vip + check_resource $vip stopped 60 done + pcs cluster stop --all fi -start_or_enable_service galera -check_resource galera started 600 -start_or_enable_service redis -check_resource redis started 600 -# We need mongod which is now a systemd service up and running before calling -# ceilometer-dbsync. There is still a race here: mongod might not be up on all nodes -# so ceilometer-dbsync will fail a couple of times before that. As it retries indefinitely -# we should be good. -# Due to LP Bug https://bugs.launchpad.net/tripleo/+bug/1627254 am using systemctl directly atm -systemctl start mongod -check_resource mongod started 600 -if [[ -n $(is_bootstrap_node) ]]; then - tstart=$(date +%s) - while ! clustercheck; do - sleep 5 - tnow=$(date +%s) - if (( tnow-tstart > galera_sync_timeout )) ; then - echo_error "ERROR galera sync timed out" - exit 1 - fi - done +# Swift isn't controlled by pacemaker +systemctl_swift stop - # Run all the db syncs - # TODO: check if this can be triggered in puppet and removed from here - ceilometer-dbsync --config-file=/etc/ceilometer/ceilometer.conf - cinder-manage db sync - glance-manage --config-file=/etc/glance/glance-registry.conf db_sync - heat-manage --config-file /etc/heat/heat.conf db_sync - keystone-manage db_sync - neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head - nova-manage db sync - nova-manage api_db sync - nova-manage db online_data_migrations - gnocchi-upgrade - sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head +tstart=$(date +%s) +while systemctl is-active pacemaker; do + sleep 5 + tnow=$(date +%s) + if (( tnow-tstart > cluster_sync_timeout )) ; then + echo_error "ERROR: cluster shutdown timed out" + exit 1 + fi +done + +# The reason we do an sql dump *and* we move the old dir out of +# the way is because it gives us an extra level of safety in case +# something goes wrong during the upgrade. Once the restore is +# successful we go ahead and remove it. If the directory exists +# we bail out as it means the upgrade process had issues in the last +# run. +if [ $DO_MYSQL_UPGRADE -eq 1 ]; then + if [ -d $MYSQL_TEMP_UPGRADE_BACKUP_DIR ]; then + echo_error "ERROR: mysql backup dir already exist" + exit 1 + fi + mv /var/lib/mysql $MYSQL_TEMP_UPGRADE_BACKUP_DIR +fi + +# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205 +if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then + echo "Manual upgrade of openvswitch - restart in postun detected" + mkdir OVS_UPGRADE || true + pushd OVS_UPGRADE + echo "Attempting to downloading latest openvswitch with yumdownloader" + yumdownloader --resolve openvswitch + echo "Updating openvswitch with nopostun option" + rpm -U --replacepkgs --nopostun ./*.rpm + popd +else + echo "Skipping manual upgrade of openvswitch - no restart in postun detected" fi + +yum -y install python-zaqarclient # needed for os-collect-config +yum -y -q update + +# We need to ensure at least those two configuration settings, otherwise +# mariadb 10.1+ won't activate galera replication. +# wsrep_cluster_address must only be set though, its value does not +# matter because it's overriden by the galera resource agent. +cat >> /etc/my.cnf.d/galera.cnf <<EOF +[mysqld] +wsrep_on = ON +wsrep_cluster_address = gcomm://localhost +EOF + +if [ $DO_MYSQL_UPGRADE -eq 1 ]; then + # Scripts run via heat have no HOME variable set and this confuses + # mysqladmin + export HOME=/root + + mkdir /var/lib/mysql || /bin/true + chown mysql:mysql /var/lib/mysql + chmod 0755 /var/lib/mysql + restorecon -R /var/lib/mysql/ + mysql_install_db --datadir=/var/lib/mysql --user=mysql + chown -R mysql:mysql /var/lib/mysql/ + + if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then + mysqld_safe --wsrep-new-cluster & + # We have a populated /root/.my.cnf with root/password here so + # we need to temporarily rename it because the newly created + # db is empty and no root password is set + mv /root/.my.cnf /root/.my.cnf.temporary + timeout 60 sh -c 'while ! mysql -e "" &> /dev/null; do sleep 1; done' + mysql -u root < "$MYSQL_BACKUP_DIR/openstack_database.sql" + mv /root/.my.cnf.temporary /root/.my.cnf + mysqladmin -u root shutdown + # The import was successful so we may remove the folder + rm -r "$MYSQL_BACKUP_DIR" + fi +fi + +# If we reached here without error we can safely blow away the origin +# mysql dir from every controller + +# TODO: What if the upgrade fails on the bootstrap node, but not on +# this controller. Data may be lost. +if [ $DO_MYSQL_UPGRADE -eq 1 ]; then + rm -r $MYSQL_TEMP_UPGRADE_BACKUP_DIR +fi + +# Let's reset the stonith back to true if it was true, before starting the cluster +if [[ -n $(is_bootstrap_node) ]]; then + if [ -f /var/tmp/stonith-true ]; then + pcs -f /var/lib/pacemaker/cib/cib.xml property set stonith-enabled=true + fi + rm -f /var/tmp/stonith-true +fi + +# Pin messages sent to compute nodes to kilo, these will be upgraded later +crudini --set /etc/nova/nova.conf upgrade_levels compute "$upgrade_level_nova_compute" +# https://bugzilla.redhat.com/show_bug.cgi?id=1284047 +# Change-Id: Ib3f6c12ff5471e1f017f28b16b1e6496a4a4b435 +crudini --set /etc/ceilometer/ceilometer.conf DEFAULT rpc_backend rabbit +# https://bugzilla.redhat.com/show_bug.cgi?id=1284058 +# Ifd1861e3df46fad0e44ff9b5cbd58711bbc87c97 Swift Ceilometer middleware no longer exists +crudini --set /etc/swift/proxy-server.conf pipeline:main pipeline "catch_errors healthcheck cache ratelimit tempurl formpost authtoken keystone staticweb proxy-logging proxy-server" +# LP: 1615035, required only for M/N upgrade. +crudini --set /etc/nova/nova.conf DEFAULT scheduler_host_manager host_manager +# LP: 1627450, required only for M/N upgrade +crudini --set /etc/nova/nova.conf DEFAULT scheduler_driver filter_scheduler + +crudini --set /etc/sahara/sahara.conf DEFAULT plugins ambari,cdh,mapr,vanilla,spark,storm + diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh index b653c7c7..6748f891 100755 --- a/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh +++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh @@ -2,25 +2,67 @@ set -eu -start_or_enable_service rabbitmq -check_resource rabbitmq started 600 +cluster_form_timeout=600 +cluster_settle_timeout=1800 +galera_sync_timeout=600 + +if [[ -n $(is_bootstrap_node) ]]; then + pcs cluster start --all + + tstart=$(date +%s) + while pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; do + sleep 5 + tnow=$(date +%s) + if (( tnow-tstart > cluster_form_timeout )) ; then + echo_error "ERROR: timed out forming the cluster" + exit 1 + fi + done + + if ! timeout -k 10 $cluster_settle_timeout crm_resource --wait; then + echo_error "ERROR: timed out waiting for cluster to finish transition" + exit 1 + fi + + for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Stopped | awk '{ print $1 }'); do + pcs resource enable $vip + check_resource_pacemaker $vip started 60 + done +fi + +start_or_enable_service galera +check_resource galera started 600 start_or_enable_service redis check_resource redis started 600 -start_or_enable_service openstack-cinder-volume -check_resource openstack-cinder-volume started 600 - +# We need mongod which is now a systemd service up and running before calling +# ceilometer-dbsync. There is still a race here: mongod might not be up on all nodes +# so ceilometer-dbsync will fail a couple of times before that. As it retries indefinitely +# we should be good. +# Due to LP Bug https://bugs.launchpad.net/tripleo/+bug/1627254 am using systemctl directly atm +systemctl start mongod +check_resource mongod started 600 -# Swift isn't controled by pacemaker -systemctl_swift start +if [[ -n $(is_bootstrap_node) ]]; then + tstart=$(date +%s) + while ! clustercheck; do + sleep 5 + tnow=$(date +%s) + if (( tnow-tstart > galera_sync_timeout )) ; then + echo_error "ERROR galera sync timed out" + exit 1 + fi + done -# We need to start the systemd services we explicitely stopped at step _1.sh -# FIXME: Should we let puppet during the convergence step do the service enabling or -# should we add it here? -services=$(services_to_migrate) -if [[ ${keep_sahara_services_on_upgrade} =~ [Ff]alse ]] ; then - services=${services%%openstack-sahara*} + # Run all the db syncs + # TODO: check if this can be triggered in puppet and removed from here + ceilometer-upgrade --config-file=/etc/ceilometer/ceilometer.conf --skip-gnocchi-resource-types + cinder-manage db sync + glance-manage --config-file=/etc/glance/glance-registry.conf db_sync + heat-manage --config-file /etc/heat/heat.conf db_sync + keystone-manage db_sync + neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head + nova-manage db sync + nova-manage api_db sync + nova-manage db online_data_migrations + sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head fi -for service in $services; do - manage_systemd_service start "${service%%-clone}" - check_resource_systemd "${service%%-clone}" started 600 -done diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_4.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_4.sh new file mode 100755 index 00000000..d2cb9553 --- /dev/null +++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_4.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +set -eu + +start_or_enable_service rabbitmq +check_resource rabbitmq started 600 +start_or_enable_service redis +check_resource redis started 600 +start_or_enable_service openstack-cinder-volume +check_resource openstack-cinder-volume started 600 + +# start httpd so keystone is available for gnocchi +# upgrade to run. +systemctl start httpd + +# Swift isn't controled by pacemaker +systemctl_swift start diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_5.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_5.sh new file mode 100755 index 00000000..fa95f1f8 --- /dev/null +++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_5.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +set -eu + +if [[ -n $(is_bootstrap_node) ]]; then + # run gnocchi upgrade + gnocchi-upgrade +fi diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh new file mode 100755 index 00000000..d569084d --- /dev/null +++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +set -eu + +# We need to start the systemd services we explicitely stopped at step _1.sh +# FIXME: Should we let puppet during the convergence step do the service enabling or +# should we add it here? +services=$(services_to_migrate) +if [[ ${keep_sahara_services_on_upgrade} =~ [Ff]alse ]] ; then + services=${services%%openstack-sahara*} +fi +for service in $services; do + manage_systemd_service start "${service%%-clone}" + check_resource_systemd "${service%%-clone}" started 600 +done diff --git a/extraconfig/tasks/major_upgrade_object_storage.sh b/extraconfig/tasks/major_upgrade_object_storage.sh index 750ad82c..2667bb16 100644 --- a/extraconfig/tasks/major_upgrade_object_storage.sh +++ b/extraconfig/tasks/major_upgrade_object_storage.sh @@ -30,8 +30,8 @@ if [[ -n \$(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "sys pushd OVS_UPGRADE echo "Attempting to downloading latest openvswitch with yumdownloader" yumdownloader --resolve openvswitch - echo "Updating openvswitch with nopostun" - rpm -U --nopostun ./*.rpm + echo "Updating openvswitch with nopostun option" + rpm -U --replacepkgs --nopostun ./*.rpm popd else echo "Skipping manual upgrade of openvswitch - no restart in postun detected" diff --git a/extraconfig/tasks/major_upgrade_pacemaker.yaml b/extraconfig/tasks/major_upgrade_pacemaker.yaml index 7c78d5ad..b0418a56 100644 --- a/extraconfig/tasks/major_upgrade_pacemaker.yaml +++ b/extraconfig/tasks/major_upgrade_pacemaker.yaml @@ -113,7 +113,20 @@ resources: config: list_join: - '' - - - get_file: pacemaker_common_functions.sh + - - str_replace: + template: | + #!/bin/bash + upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE' + params: + UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute} + - str_replace: + template: | + #!/bin/bash + mariadb_do_major_upgrade='MYSQL_MAJOR_UPGRADE' + params: + MYSQL_MAJOR_UPGRADE: {get_param: MySqlMajorUpgrade} + - get_file: pacemaker_common_functions.sh + - get_file: major_upgrade_check.sh - get_file: major_upgrade_pacemaker_migrations.sh - get_file: major_upgrade_controller_pacemaker_2.sh @@ -132,6 +145,63 @@ resources: config: list_join: - '' + - - get_file: pacemaker_common_functions.sh + - get_file: major_upgrade_pacemaker_migrations.sh + - get_file: major_upgrade_controller_pacemaker_3.sh + + ControllerPacemakerUpgradeDeployment_Step3: + type: OS::Heat::SoftwareDeploymentGroup + depends_on: ControllerPacemakerUpgradeDeployment_Step2 + properties: + servers: {get_param: [servers, Controller]} + config: {get_resource: ControllerPacemakerUpgradeConfig_Step3} + input_values: {get_param: input_values} + + ControllerPacemakerUpgradeConfig_Step4: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: + list_join: + - '' + - - get_file: pacemaker_common_functions.sh + - get_file: major_upgrade_pacemaker_migrations.sh + - get_file: major_upgrade_controller_pacemaker_4.sh + + ControllerPacemakerUpgradeDeployment_Step4: + type: OS::Heat::SoftwareDeploymentGroup + depends_on: ControllerPacemakerUpgradeDeployment_Step3 + properties: + servers: {get_param: [servers, Controller]} + config: {get_resource: ControllerPacemakerUpgradeConfig_Step4} + input_values: {get_param: input_values} + + ControllerPacemakerUpgradeConfig_Step5: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: + list_join: + - '' + - - get_file: pacemaker_common_functions.sh + - get_file: major_upgrade_pacemaker_migrations.sh + - get_file: major_upgrade_controller_pacemaker_5.sh + + ControllerPacemakerUpgradeDeployment_Step5: + type: OS::Heat::SoftwareDeploymentGroup + depends_on: ControllerPacemakerUpgradeDeployment_Step4 + properties: + servers: {get_param: [servers, Controller]} + config: {get_resource: ControllerPacemakerUpgradeConfig_Step5} + input_values: {get_param: input_values} + + ControllerPacemakerUpgradeConfig_Step6: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: + list_join: + - '' - - str_replace: template: | #!/bin/bash @@ -140,13 +210,12 @@ resources: KEEP_SAHARA_SERVICES_ON_UPGRADE: {get_param: KeepSaharaServicesOnUpgrade} - get_file: pacemaker_common_functions.sh - get_file: major_upgrade_pacemaker_migrations.sh - - get_file: major_upgrade_controller_pacemaker_3.sh + - get_file: major_upgrade_controller_pacemaker_6.sh - ControllerPacemakerUpgradeDeployment_Step3: + ControllerPacemakerUpgradeDeployment_Step6: type: OS::Heat::SoftwareDeploymentGroup - depends_on: ControllerPacemakerUpgradeDeployment_Step2 + depends_on: ControllerPacemakerUpgradeDeployment_Step5 properties: servers: {get_param: [servers, Controller]} - config: {get_resource: ControllerPacemakerUpgradeConfig_Step3} + config: {get_resource: ControllerPacemakerUpgradeConfig_Step6} input_values: {get_param: input_values} - diff --git a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh b/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh index 7c9083a4..6d02acc8 100644 --- a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh +++ b/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh @@ -179,3 +179,23 @@ function disable_standalone_ceilometer_api { fi fi } + + +# This function will make sure that the rabbitmq ha policies are converted from mitaka to newton +# In newton we had: Attributes: set_policy="ha-all ^(?!amq\.).* {"ha-mode":"all"}" +# In ocata we want: Attributes: set_policy="ha-all ^(?!amq\.).* {"ha-mode":"exactly","ha-params":2}" +# The nr "2" should be CEIL(N/2) where N is the number of Controllers (i.e. rabbit instances) +# Note that changing an attribute like this makes the rabbitmq resource restart +function rabbitmq_newton_ocata_upgrade { + if pcs resource show rabbitmq-clone | grep -q -E "Attributes:.*\"ha-mode\":\"all\""; then + # Number of controller is obtained by counting how many hostnames we + # have in controller_node_names hiera key + nr_controllers=$(($(hiera controller_node_names | grep -o "," |wc -l) + 1)) + nr_queues=$(($nr_controllers / 2 + ($nr_controllers % 2))) + if ! [ $nr_queues -gt 0 -a $nr_queues -le $nr_controllers ]; then + echo_error "ERROR: The nr. of HA queues during the M/N upgrade is out of range $nr_queues" + exit 1 + fi + pcs resource update rabbitmq set_policy='ha-all ^(?!amq\\.).* {"ha-mode":"exactly","ha-params":'"$nr_queues}" --wait=600 + fi +} diff --git a/extraconfig/tasks/pacemaker_common_functions.sh b/extraconfig/tasks/pacemaker_common_functions.sh index 4f17b69a..2c7dfc35 100755 --- a/extraconfig/tasks/pacemaker_common_functions.sh +++ b/extraconfig/tasks/pacemaker_common_functions.sh @@ -284,7 +284,7 @@ function systemctl_swift { services=$(systemctl | grep openstack-swift- | grep running | awk '{print $1}') ;; start) - enable_swift_storage=$(hiera -c /etc/puppet/hiera.yaml 'enable_swift_storage') + enable_swift_storage=$(hiera -c /etc/puppet/hiera.yaml tripleo::profile::base::swift::storage::enable_swift_storage) if [[ $enable_swift_storage != "true" ]]; then services=( openstack-swift-proxy ) fi diff --git a/extraconfig/tasks/pacemaker_resource_restart.sh b/extraconfig/tasks/pacemaker_resource_restart.sh index 3da7efec..49d39bc8 100755 --- a/extraconfig/tasks/pacemaker_resource_restart.sh +++ b/extraconfig/tasks/pacemaker_resource_restart.sh @@ -4,11 +4,14 @@ set -eux # Run if pacemaker is running, we're the bootstrap node, # and we're updating the deployment (not creating). -if [[ -n $(pcmk_running) && -n $(is_bootstrap_node) ]]; then + +RESTART_FOLDER="/var/lib/tripleo/pacemaker-restarts" + +if [[ -d "$RESTART_FOLDER" && -n $(pcmk_running) && -n $(is_bootstrap_node) ]]; then TIMEOUT=600 - SERVICES_TO_RESTART="$(ls /var/lib/tripleo/pacemaker-restarts)" PCS_STATUS_OUTPUT="$(pcs status)" + SERVICES_TO_RESTART="$(ls $RESTART_FOLDER)" for service in $SERVICES_TO_RESTART; do if ! echo "$PCS_STATUS_OUTPUT" | grep $service; then @@ -20,6 +23,11 @@ if [[ -n $(pcmk_running) && -n $(is_bootstrap_node) ]]; then for service in $SERVICES_TO_RESTART; do echo "Restarting $service..." pcs resource restart --wait=$TIMEOUT $service - rm -f /var/lib/tripleo/pacemaker-restarts/$service + rm -f "$RESTART_FOLDER"/$service done + +fi + +if [ $(systemctl is-active haproxy) = "active" ]; then + systemctl reload haproxy fi diff --git a/extraconfig/tasks/yum_update.sh b/extraconfig/tasks/yum_update.sh index 4ca0b140..4612f197 100755 --- a/extraconfig/tasks/yum_update.sh +++ b/extraconfig/tasks/yum_update.sh @@ -73,14 +73,14 @@ if [[ "$pacemaker_status" == "active" ]] ; then pcs cluster stop fi else - echo "Upgrading openstack-puppet-modules" + echo "Upgrading openstack-puppet-modules and its dependencies" yum -q -y update openstack-puppet-modules + yum deplist openstack-puppet-modules | awk '/dependency/{print $2}' | xargs yum -q -y update echo "Upgrading other packages is handled by config management tooling" echo -n "true" > $heat_outputs_path.update_managed_packages exit 0 fi - # Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205 if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then echo "Manual upgrade of openvswitch - restart in postun detected" @@ -88,8 +88,8 @@ if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "syst pushd OVS_UPGRADE echo "Attempting to downloading latest openvswitch with yumdownloader" yumdownloader --resolve openvswitch - echo "Updating openvswitch with nopostun" - rpm -U --nopostun ./*.rpm + echo "Updating openvswitch with nopostun option" + rpm -U --replacepkgs --nopostun ./*.rpm popd else echo "Skipping manual upgrade of openvswitch - no restart in postun detected" diff --git a/firstboot/userdata_heat_admin.yaml b/firstboot/userdata_heat_admin.yaml index f8891b29..63d5bbf8 100644 --- a/firstboot/userdata_heat_admin.yaml +++ b/firstboot/userdata_heat_admin.yaml @@ -1,4 +1,4 @@ -heat_template_version: 2014-10-16 +heat_template_version: 2016-10-14 parameters: # Can be overridden via parameter_defaults in the environment @@ -6,6 +6,10 @@ parameters: type: string default: heat-admin + node_admin_extra_ssh_keys: + type: comma_delimited_list + default: [] + description: > Uses cloud-init to create an additional user with a known name, in addition to the distro-default user created by the cloud-init default. @@ -23,6 +27,8 @@ resources: properties: cloud_config: user: {get_param: node_admin_username} + ssh_authorized_keys: {get_param: node_admin_extra_ssh_keys} + outputs: OS::stack_id: diff --git a/network/endpoints/endpoint_data.yaml b/network/endpoints/endpoint_data.yaml index 011dea7d..1df3b665 100644 --- a/network/endpoints/endpoint_data.yaml +++ b/network/endpoints/endpoint_data.yaml @@ -10,6 +10,15 @@ Aodh: net_param: AodhApi port: 8042 +Barbican: + Internal: + net_param: BarbicanApi + Public: + net_param: Public + Admin: + net_param: BarbicanApi + port: 9311 + Ceilometer: Internal: net_param: CeilometerApi diff --git a/network/endpoints/endpoint_map.yaml b/network/endpoints/endpoint_map.yaml index ac519a5f..43fb20cc 100644 --- a/network/endpoints/endpoint_map.yaml +++ b/network/endpoints/endpoint_map.yaml @@ -22,6 +22,9 @@ parameters: AodhAdmin: {protocol: http, port: '8042', host: IP_ADDRESS} AodhInternal: {protocol: http, port: '8042', host: IP_ADDRESS} AodhPublic: {protocol: http, port: '8042', host: IP_ADDRESS} + BarbicanAdmin: {protocol: http, port: '9311', host: IP_ADDRESS} + BarbicanInternal: {protocol: http, port: '9311', host: IP_ADDRESS} + BarbicanPublic: {protocol: http, port: '9311', host: IP_ADDRESS} CeilometerAdmin: {protocol: http, port: '8777', host: IP_ADDRESS} CeilometerInternal: {protocol: http, port: '8777', host: IP_ADDRESS} CeilometerPublic: {protocol: http, port: '8777', host: IP_ADDRESS} @@ -326,6 +329,249 @@ outputs: template: NETWORK_uri - ':' - get_param: [EndpointMap, AodhPublic, port] + BarbicanAdmin: + host: + str_replace: + template: + get_param: [EndpointMap, BarbicanAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, BarbicanApiNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, BarbicanApiNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, BarbicanAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, BarbicanApiNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, BarbicanApiNetwork] + port: + get_param: [EndpointMap, BarbicanAdmin, port] + protocol: + get_param: [EndpointMap, BarbicanAdmin, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, BarbicanAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, BarbicanAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, BarbicanApiNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, BarbicanApiNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, BarbicanAdmin, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, BarbicanAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, BarbicanAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, BarbicanApiNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, BarbicanApiNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, BarbicanAdmin, port] + BarbicanInternal: + host: + str_replace: + template: + get_param: [EndpointMap, BarbicanInternal, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, BarbicanApiNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, BarbicanApiNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, BarbicanInternal, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, BarbicanApiNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, BarbicanApiNetwork] + port: + get_param: [EndpointMap, BarbicanInternal, port] + protocol: + get_param: [EndpointMap, BarbicanInternal, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, BarbicanInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, BarbicanInternal, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, BarbicanApiNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, BarbicanApiNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, BarbicanInternal, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, BarbicanInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, BarbicanInternal, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, BarbicanApiNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, BarbicanApiNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, BarbicanInternal, port] + BarbicanPublic: + host: + str_replace: + template: + get_param: [EndpointMap, BarbicanPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, BarbicanPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, PublicNetwork] + port: + get_param: [EndpointMap, BarbicanPublic, port] + protocol: + get_param: [EndpointMap, BarbicanPublic, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, BarbicanPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, BarbicanPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, BarbicanPublic, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, BarbicanPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, BarbicanPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, BarbicanPublic, port] CeilometerAdmin: host: str_replace: diff --git a/network/service_net_map.j2.yaml b/network/service_net_map.j2.yaml index ac05fc73..61c97f13 100644 --- a/network/service_net_map.j2.yaml +++ b/network/service_net_map.j2.yaml @@ -25,6 +25,7 @@ parameters: NeutronTenantNetwork: tenant CeilometerApiNetwork: internal_api AodhApiNetwork: internal_api + BarbicanApiNetwork: internal_api GnocchiApiNetwork: internal_api MongodbNetwork: internal_api CinderApiNetwork: internal_api diff --git a/overcloud-resource-registry-puppet.j2.yaml b/overcloud-resource-registry-puppet.j2.yaml index 9b9cd581..3e201175 100644 --- a/overcloud-resource-registry-puppet.j2.yaml +++ b/overcloud-resource-registry-puppet.j2.yaml @@ -57,6 +57,9 @@ resource_registry: OS::TripleO::NodeExtraConfig: puppet/extraconfig/pre_deploy/default.yaml OS::TripleO::NodeExtraConfigPost: extraconfig/post_deploy/default.yaml + OS::TripleO::Tasks::ControllerPrePuppet: OS::Heat::None + OS::TripleO::Tasks::ControllerPostPuppet: OS::Heat::None + # "AllNodes" Extra cluster config, runs on all nodes prior to the post_deploy # phase, e.g when puppet is applied, but after the pre_deploy phase. Useful when # configuration with knowledge of all nodes in the cluster is required vs single @@ -181,14 +184,15 @@ resource_registry: # Services that are disabled by default (use relevant environment files): OS::TripleO::Services::FluentdClient: OS::Heat::None OS::TripleO::LoggingConfiguration: puppet/services/logging/fluentd-config.yaml - OS::Tripleo::Services::ManilaApi: OS::Heat::None - OS::Tripleo::Services::ManilaScheduler: OS::Heat::None - OS::Tripleo::Services::ManilaShare: OS::Heat::None - OS::Tripleo::Services::ManilaBackendGeneric: OS::Heat::None - OS::Tripleo::Services::ManilaBackendNetapp: OS::Heat::None - OS::Tripleo::Services::ManilaBackendCephFs: OS::Heat::None + OS::TripleO::Services::ManilaApi: OS::Heat::None + OS::TripleO::Services::ManilaScheduler: OS::Heat::None + OS::TripleO::Services::ManilaShare: OS::Heat::None + OS::TripleO::Services::ManilaBackendGeneric: OS::Heat::None + OS::TripleO::Services::ManilaBackendNetapp: OS::Heat::None + OS::TripleO::Services::ManilaBackendCephFs: OS::Heat::None OS::TripleO::Services::ComputeNeutronL3Agent: OS::Heat::None OS::TripleO::Services::ComputeNeutronMetadataAgent: OS::Heat::None + OS::TripleO::Services::BarbicanApi: OS::Heat::None OS::TripleO::Services::AodhApi: puppet/services/aodh-api.yaml OS::TripleO::Services::AodhEvaluator: puppet/services/aodh-evaluator.yaml OS::TripleO::Services::AodhNotifier: puppet/services/aodh-notifier.yaml @@ -213,3 +217,8 @@ resource_registry: parameter_defaults: EnablePackageInstall: false SoftwareConfigTransport: POLL_TEMP_URL + +{% for role in roles %} + # Parameters generated for {{role.name}} Role + {{role.name}}Services: {{role.ServicesDefault|default([])}} +{% endfor %} diff --git a/overcloud.j2.yaml b/overcloud.j2.yaml index d8d38c2a..42339ead 100644 --- a/overcloud.j2.yaml +++ b/overcloud.j2.yaml @@ -121,7 +121,6 @@ parameters: resource_registry) which represent nested stacks for each service that should get installed on the {{role.name}} role. type: comma_delimited_list - default: {{role.ServicesDefault|default([])}} {{role.name}}Count: description: Number of {{role.name}} nodes to deploy @@ -232,8 +231,19 @@ resources: config: {get_attr: [allNodesConfig, config_id]} servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]} input_values: - bootstrap_nodeid: {get_attr: [{{role.name}}, resource.0.hostname]} - bootstrap_nodeid_ip: {get_attr: [{{role.name}}, resource.0.ip_address]} + # Note we have to use yaql to look up the first hostname/ip in the + # list because heat path based attributes operate on the attribute + # inside the ResourceGroup, not the exposed list ref discussion in + # https://bugs.launchpad.net/heat/+bug/1640488 + # The coalesce is needed because $.data is None during heat validation + bootstrap_nodeid: + yaql: + expression: coalesce($.data, []).first(null) + data: {get_attr: [{{role.name}}, hostname]} + bootstrap_nodeid_ip: + yaql: + expression: coalesce($.data, []).first(null) + data: {get_attr: [{{role.name}}, ip_address]} {{role.name}}AllNodesValidationDeployment: type: OS::Heat::StructuredDeployments @@ -532,8 +542,8 @@ resources: # Post deployment steps for all roles AllNodesDeploySteps: type: OS::TripleO::PostDeploySteps -{% for role in roles %} depends_on: +{% for role in roles %} - {{role.name}}AllNodesDeployment {% endfor %} properties: @@ -556,57 +566,6 @@ outputs: KeystoneAdminVip: description: Keystone Admin VIP endpoint value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, KeystoneAdminApiNetwork]}]} - PublicVip: - description: Controller VIP for public API endpoints - value: {get_attr: [VipMap, net_ip_map, external]} - AodhInternalVip: - description: VIP for Aodh API internal endpoint - value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, AodhApiNetwork]}]} - CeilometerInternalVip: - description: VIP for Ceilometer API internal endpoint - value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, CeilometerApiNetwork]}]} - CephRgwInternalVip: - description: VIP for Ceph RGW internal endpoint - value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, CephRgwNetwork]}]} - CinderInternalVip: - description: VIP for Cinder API internal endpoint - value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, CinderApiNetwork]}]} - GlanceInternalVip: - description: VIP for Glance API internal endpoint - value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, GlanceApiNetwork]}]} - GnocchiInternalVip: - description: VIP for Gnocchi API internal endpoint - value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, GnocchiApiNetwork]}]} - MistralInternalVip: - description: VIP for Mistral API internal endpoint - value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, MistralApiNetwork]}]} - HeatInternalVip: - description: VIP for Heat API internal endpoint - value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, HeatApiNetwork]}]} - IronicInternalVip: - description: VIP for Ironic API internal endpoint - value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, IronicApiNetwork]}]} - KeystoneInternalVip: - description: VIP for Keystone API internal endpoint - value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, KeystonePublicApiNetwork]}]} - ManilaInternalVip: - description: VIP for Manila API internal endpoint - value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, ManilaApiNetwork]}]} - NeutronInternalVip: - description: VIP for Neutron API internal endpoint - value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, NeutronApiNetwork]}]} - NovaInternalVip: - description: VIP for Nova API internal endpoint - value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, NovaApiNetwork]}]} - OpenDaylightInternalVip: - description: VIP for OpenDaylight API internal endpoint - value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, OpenDaylightApiNetwork]}]} - SaharaInternalVip: - description: VIP for Sahara API internal endpoint - value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, SaharaApiNetwork]}]} - SwiftInternalVip: - description: VIP for Swift Proxy internal endpoint - value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, SwiftProxyNetwork]}]} EndpointMap: description: | Mapping of the resources with the needed info for their endpoints. diff --git a/puppet/cephstorage-role.yaml b/puppet/cephstorage-role.yaml index 55b26336..f7e29b70 100644 --- a/puppet/cephstorage-role.yaml +++ b/puppet/cephstorage-role.yaml @@ -253,6 +253,7 @@ resources: - extraconfig - service_names - service_configs + - ceph - bootstrap_node # provided by allNodesConfig - all_nodes # provided by allNodesConfig - vip_data # provided by allNodesConfig diff --git a/puppet/deploy-artifacts.sh b/puppet/deploy-artifacts.sh index 22fde9a7..8bcbbf4c 100644 --- a/puppet/deploy-artifacts.sh +++ b/puppet/deploy-artifacts.sh @@ -8,7 +8,7 @@ trap cleanup EXIT if [ -n "$artifact_urls" ]; then for URL in $(echo $artifact_urls | sed -e "s| |\n|g" | sort -u); do - curl -o $TMP_DATA/file_data "$artifact_urls" + curl --globoff -o $TMP_DATA/file_data "$artifact_urls" if file -b $TMP_DATA/file_data | grep RPM &>/dev/null; then yum install -y $TMP_DATA/file_data elif file -b $TMP_DATA/file_data | grep 'gzip compressed data' &>/dev/null; then diff --git a/puppet/post.j2.yaml b/puppet/post.j2.yaml index 65c96ac2..582eb28d 100644 --- a/puppet/post.j2.yaml +++ b/puppet/post.j2.yaml @@ -47,73 +47,39 @@ resources: properties: StepConfig: {get_param: [role_data, {{role.name}}, step_config]} - # Step through a series of configuration steps - {{role.name}}Deployment_Step1: - type: OS::Heat::StructuredDeploymentGroup - depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy] - properties: - name: {{role.name}}Deployment_Step1 - servers: {get_param: [servers, {{role.name}}]} - config: {get_resource: {{role.name}}Config} - input_values: - step: 1 - update_identifier: {get_param: DeployIdentifier} - - {{role.name}}Deployment_Step2: - type: OS::Heat::StructuredDeploymentGroup - depends_on: - {% for dep in roles %} - - {{dep.name}}Deployment_Step1 - {% endfor %} + {% if role.name == 'Controller' %} + ControllerPrePuppet: + type: OS::TripleO::Tasks::ControllerPrePuppet properties: - name: {{role.name}}Deployment_Step2 - servers: {get_param: [servers, {{role.name}}]} - config: {get_resource: {{role.name}}Config} + servers: {get_param: [servers, Controller]} input_values: - step: 2 update_identifier: {get_param: DeployIdentifier} + {% endif %} - {{role.name}}Deployment_Step3: - type: OS::Heat::StructuredDeploymentGroup - depends_on: - {% for dep in roles %} - - {{dep.name}}Deployment_Step2 - {% endfor %} - properties: - name: {{role.name}}Deployment_Step3 - servers: {get_param: [servers, {{role.name}}]} - config: {get_resource: {{role.name}}Config} - input_values: - step: 3 - update_identifier: {get_param: DeployIdentifier} + # Step through a series of configuration steps +{% for step in range(1, 6) %} + {% for role in roles %} - {{role.name}}Deployment_Step4: + {{role.name}}Deployment_Step{{step}}: type: OS::Heat::StructuredDeploymentGroup + {% if step == 1 %} + depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy] + {% else %} depends_on: - {% for dep in roles %} - - {{dep.name}}Deployment_Step3 - {% endfor %} + {% for dep in roles %} + - {{dep.name}}Deployment_Step{{step -1}} + {% endfor %} + {% endif %} properties: - name: {{role.name}}Deployment_Step4 + name: {{role.name}}Deployment_Step{{step}} servers: {get_param: [servers, {{role.name}}]} config: {get_resource: {{role.name}}Config} input_values: - step: 4 + step: {{step}} update_identifier: {get_param: DeployIdentifier} - {{role.name}}Deployment_Step5: - type: OS::Heat::StructuredDeploymentGroup - depends_on: - {% for dep in roles %} - - {{dep.name}}Deployment_Step4 {% endfor %} - properties: - name: {{role.name}}Deployment_Step5 - servers: {get_param: [servers, {{role.name}}]} - config: {get_resource: {{role.name}}Config} - input_values: - step: 5 - update_identifier: {get_param: DeployIdentifier} +{% endfor %} {{role.name}}PostConfig: type: OS::TripleO::Tasks::{{role.name}}PostConfig @@ -136,4 +102,16 @@ resources: type: OS::TripleO::NodeExtraConfigPost properties: servers: {get_param: [servers, {{role.name}}]} + + {% if role.name == 'Controller' %} + ControllerPostPuppet: + depends_on: + - ControllerExtraConfigPost + type: OS::TripleO::Tasks::ControllerPostPuppet + properties: + servers: {get_param: [servers, Controller]} + input_values: + update_identifier: {get_param: DeployIdentifier} + {% endif %} + {% endfor %} diff --git a/puppet/role.role.j2.yaml b/puppet/role.role.j2.yaml index e4307001..5b419f80 100644 --- a/puppet/role.role.j2.yaml +++ b/puppet/role.role.j2.yaml @@ -259,6 +259,7 @@ resources: - extraconfig - service_names - service_configs + - {{role.lower()}} - bootstrap_node # provided by allNodesConfig - all_nodes # provided by allNodesConfig - vip_data # provided by allNodesConfig diff --git a/puppet/services/aodh-api.yaml b/puppet/services/aodh-api.yaml index da043c80..48cc4af6 100644 --- a/puppet/services/aodh-api.yaml +++ b/puppet/services/aodh-api.yaml @@ -26,6 +26,9 @@ parameters: description: Combination alarms are deprecated in Newton, hence disabled by default. To enable, set this parameter to true. type: boolean + EnableInternalTLS: + type: boolean + default: false resources: AodhBase: @@ -41,6 +44,7 @@ resources: ServiceNetMap: {get_param: ServiceNetMap} DefaultPasswords: {get_param: DefaultPasswords} EndpointMap: {get_param: EndpointMap} + EnableInternalTLS: {get_param: EnableInternalTLS} outputs: role_data: @@ -52,7 +56,7 @@ outputs: map_merge: - get_attr: [AodhBase, role_data, config_settings] - get_attr: [ApacheServiceBase, role_data, config_settings] - - aodh::wsgi::apache::ssl: false + - aodh::wsgi::apache::ssl: {get_param: EnableInternalTLS} aodh::wsgi::apache::servername: str_replace: template: @@ -66,13 +70,18 @@ outputs: dport: - 8042 - 13042 + aodh::api::host: + str_replace: + template: + '"%{::fqdn_$NETWORK}"' + params: + $NETWORK: {get_param: [ServiceNetMap, AodhApiNetwork]} # NOTE: bind IP is found in Heat replacing the network name with the # local node IP for the given network; replacement examples # (eg. for internal_api): # internal_api -> IP # internal_api_uri -> [IP] # internal_api_subnet - > IP/CIDR - aodh::api::host: {get_param: [ServiceNetMap, AodhApiNetwork]} aodh::wsgi::apache::bind_host: {get_param: [ServiceNetMap, AodhApiNetwork]} tripleo::profile::base::aodh::api::enable_combination_alarms: {get_param: EnableCombinationAlarms} service_config_settings: diff --git a/puppet/services/barbican-api.yaml b/puppet/services/barbican-api.yaml new file mode 100644 index 00000000..ab6b0ec7 --- /dev/null +++ b/puppet/services/barbican-api.yaml @@ -0,0 +1,130 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Barbican API service configured with Puppet + +parameters: + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + BarbicanPassword: + description: The password for the barbican service account. + type: string + hidden: true + BarbicanWorkers: + description: Set the number of workers for barbican::wsgi::apache + default: '"%{::processorcount}"' + type: string + Debug: + default: '' + description: Set to True to enable debugging on all services. + type: string + KeystoneRegion: + type: string + default: 'regionOne' + description: Keystone region for endpoint + RabbitClientPort: + default: 5672 + description: Set rabbit subscriber port, change this if using SSL + type: number + RabbitClientUseSSL: + default: false + description: > + Rabbit client subscriber parameter to specify + an SSL connection to the RabbitMQ host. + type: string + RabbitPassword: + description: The password for RabbitMQ + type: string + hidden: true + RabbitUserName: + default: guest + description: The username for RabbitMQ + type: string + EnableInternalTLS: + type: boolean + default: false + +resources: + + ApacheServiceBase: + type: ./apache.yaml + properties: + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Barbican API role. + value: + service_name: barbican_api + config_settings: + map_merge: + - get_attr: [ApacheServiceBase, role_data, config_settings] + - barbican::keystone::authtoken::password: {get_param: BarbicanPassword} + barbican::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]} + barbican::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} + barbican::keystone::authtoken::project_name: 'service' + barbican::api::host_href: {get_param: [EndpointMap, BarbicanPublic, uri]} + barbican::api::db_auto_create: false + barbican::api::enabled_certificate_plugins: ['simple_certificate'] + barbican::api::logging::debug: {get_param: Debug} + barbican::api::rabbit_use_ssl: {get_param: RabbitClientUseSSL} + barbican::api::rabbit_userid: {get_param: RabbitUserName} + barbican::api::rabbit_password: {get_param: RabbitPassword} + barbican::api::rabbit_port: {get_param: RabbitClientPort} + barbican::api::rabbit_heartbeat_timeout_threshold: 60 + barbican::api::service_name: 'httpd' + barbican::wsgi::apache::bind_host: {get_param: [ServiceNetMap, BarbicanApiNetwork]} + barbican::wsgi::apache::ssl: {get_param: EnableInternalTLS} + barbican::wsgi::apache::workers: {get_param: BarbicanWorkers} + barbican::wsgi::apache::servername: + str_replace: + template: + '"%{::fqdn_$NETWORK}"' + params: + $NETWORK: {get_param: [ServiceNetMap, BarbicanApiNetwork]} + barbican::db::database_connection: + list_join: + - '' + - - {get_param: [EndpointMap, MysqlInternal, protocol]} + - '://barbican:' + - {get_param: BarbicanPassword} + - '@' + - {get_param: [EndpointMap, MysqlInternal, host]} + - '/barbican' + tripleo.barbican_api.firewall_rules: + '117 barbican': + dport: + - 9311 + - 13311 + step_config: | + include ::tripleo::profile::base::barbican::api + service_config_settings: + mysql: + barbican::db::mysql::password: {get_param: BarbicanPassword} + barbican::db::mysql::user: barbican + barbican::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]} + barbican::db::mysql::dbname: barbican + barbican::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" + keystone: + barbican::keystone::auth::public_url: {get_param: [EndpointMap, BarbicanPublic, uri]} + barbican::keystone::auth::internal_url: {get_param: [EndpointMap, BarbicanInternal, uri]} + barbican::keystone::auth::admin_url: {get_param: [EndpointMap, BarbicanAdmin, uri]} + barbican::keystone::auth::password: {get_param: BarbicanPassword} + barbican::keystone::auth::region: {get_param: KeystoneRegion} + barbican::keystone::auth::tenant: 'service' diff --git a/puppet/services/ceilometer-api.yaml b/puppet/services/ceilometer-api.yaml index 27c32bfd..97b255a9 100644 --- a/puppet/services/ceilometer-api.yaml +++ b/puppet/services/ceilometer-api.yaml @@ -26,7 +26,9 @@ parameters: default: tag: openstack.ceilometer.api path: /var/log/ceilometer/api.log - + EnableInternalTLS: + type: boolean + default: false resources: CeilometerServiceBase: @@ -42,6 +44,7 @@ resources: ServiceNetMap: {get_param: ServiceNetMap} DefaultPasswords: {get_param: DefaultPasswords} EndpointMap: {get_param: EndpointMap} + EnableInternalTLS: {get_param: EnableInternalTLS} outputs: role_data: @@ -69,9 +72,14 @@ outputs: # internal_api_subnet - > IP/CIDR - ceilometer::api::service_name: 'httpd' ceilometer::api::enable_proxy_headers_parsing: true - ceilometer::api::host: {get_param: [ServiceNetMap, CeilometerApiNetwork]} + ceilometer::api::host: + str_replace: + template: + '"%{::fqdn_$NETWORK}"' + params: + $NETWORK: {get_param: [ServiceNetMap, CeilometerApiNetwork]} ceilometer::wsgi::apache::bind_host: {get_param: [ServiceNetMap, CeilometerApiNetwork]} - ceilometer::wsgi::apache::ssl: false + ceilometer::wsgi::apache::ssl: {get_param: EnableInternalTLS} ceilometer::wsgi::apache::servername: str_replace: template: diff --git a/puppet/services/ceph-external.yaml b/puppet/services/ceph-external.yaml index 7d75074c..b708665f 100644 --- a/puppet/services/ceph-external.yaml +++ b/puppet/services/ceph-external.yaml @@ -1,4 +1,4 @@ -heat_template_version: 2016-04-08 +heat_template_version: 2016-10-14 description: > Ceph External service. @@ -27,9 +27,20 @@ parameters: GlanceRbdPoolName: default: images type: string + GlanceBackend: + default: swift + description: The short name of the Glance backend to use. Should be one + of swift, rbd, or file + type: string + constraints: + - allowed_values: ['swift', 'file', 'rbd'] GnocchiRbdPoolName: default: metrics type: string + NovaEnableRbdBackend: + default: false + description: Whether to enable or not the Rbd backend for Nova + type: boolean NovaRbdPoolName: default: vms type: string @@ -51,6 +62,16 @@ parameters: default: 'overcloud-ceph-external' type: string +conditions: + glance_multiple_locations: + and: + - equals: + - get_param: GlanceBackend + - rbd + - equals: + - get_param: NovaEnableRbdBackend + - true + outputs: role_data: description: Role data for the Ceph External service. @@ -78,7 +99,16 @@ outputs: CINDERBACKUP_POOL: {get_param: CinderBackupRbdPoolName} GLANCE_POOL: {get_param: GlanceRbdPoolName} GNOCCHI_POOL: {get_param: GnocchiRbdPoolName} + ceph::profile::params::manage_repo: false + # FIXME(gfidente): we should not have to list the packages explicitly in + # the templates, but this should stay until the following is fixed: + # https://bugs.launchpad.net/puppet-ceph/+bug/1629933 + ceph::params::packages: + - ceph-base + - ceph-mon + - ceph-osd service_config_settings: - get_attr: [CephBase, role_data, service_config_settings] + glance_api: + glance::api::show_multiple_locations: {if: [glance_multiple_locations, true, false]} step_config: | include ::tripleo::profile::base::ceph::client diff --git a/puppet/services/ceph-rgw.yaml b/puppet/services/ceph-rgw.yaml index 18a4b780..89c1a5ee 100644 --- a/puppet/services/ceph-rgw.yaml +++ b/puppet/services/ceph-rgw.yaml @@ -55,15 +55,9 @@ outputs: - tripleo::profile::base::ceph::rgw::rgw_key: {get_param: CephRgwKey} tripleo::profile::base::ceph::rgw::keystone_admin_token: {get_param: AdminToken} tripleo::profile::base::ceph::rgw::keystone_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} - ceph::profile::params::frontend_type: 'civetweb' - ceph_rgw_civetweb_bind_address: {get_param: [ServiceNetMap, CephRgwNetwork]} - ceph::profile::params::rgw_frontends: - list_join: - - '' - - - 'civetweb port=' - - '%{hiera("ceph_rgw_civetweb_bind_address")}' - - ':' - - {get_param: [EndpointMap, CephRgwInternal, port]} + tripleo::profile::base::ceph::rgw::civetweb_bind_ip: {get_param: [ServiceNetMap, CephRgwNetwork]} + tripleo::profile::base::ceph::rgw::civetweb_bind_port: {get_param: [EndpointMap, CephRgwInternal, port]} + ceph::params::user_radosgw: ceph tripleo.ceph_rgw.firewall_rules: '122 ceph rgw': dport: {get_param: [EndpointMap, CephRgwInternal, port]} diff --git a/puppet/services/cinder-api.yaml b/puppet/services/cinder-api.yaml index 3c624e3a..803d8b83 100644 --- a/puppet/services/cinder-api.yaml +++ b/puppet/services/cinder-api.yaml @@ -43,6 +43,9 @@ parameters: type: string description: Set the number of workers for cinder::wsgi::apache default: '"%{::os_workers}"' + EnableInternalTLS: + type: boolean + default: false conditions: cinder_workers_zero: {equals : [{get_param: CinderWorkers}, 0]} @@ -55,6 +58,7 @@ resources: ServiceNetMap: {get_param: ServiceNetMap} DefaultPasswords: {get_param: DefaultPasswords} EndpointMap: {get_param: EndpointMap} + EnableInternalTLS: {get_param: EnableInternalTLS} CinderBase: type: ./cinder-base.yaml @@ -94,20 +98,26 @@ outputs: dport: - 8776 - 13776 + cinder::api::bind_host: + str_replace: + template: + '"%{::fqdn_$NETWORK}"' + params: + $NETWORK: {get_param: [ServiceNetMap, CinderApiNetwork]} + cinder::wsgi::apache::ssl: {get_param: EnableInternalTLS} + cinder::api::service_name: 'httpd' # NOTE: bind IP is found in Heat replacing the network name with the local node IP # for the given network; replacement examples (eg. for internal_api): # internal_api -> IP # internal_api_uri -> [IP] # internal_api_subnet - > IP/CIDR - cinder::api::bind_host: {get_param: [ServiceNetMap, CinderApiNetwork]} - cinder::wsgi::apache::ssl: false cinder::wsgi::apache::bind_host: {get_param: [ServiceNetMap, CinderApiNetwork]} cinder::wsgi::apache::servername: str_replace: template: '"%{::fqdn_$NETWORK}"' params: - $NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]} + $NETWORK: {get_param: [ServiceNetMap, CinderApiNetwork]} - if: - cinder_workers_zero diff --git a/puppet/services/glance-base.yaml b/puppet/services/glance-base.yaml index 3294fc0f..cc979af9 100644 --- a/puppet/services/glance-base.yaml +++ b/puppet/services/glance-base.yaml @@ -44,6 +44,21 @@ parameters: type: string constraints: - allowed_values: ['swift', 'file', 'rbd'] + GlanceNfsEnabled: + default: false + description: > + When using GlanceBackend 'file', mount NFS share for image storage. + type: boolean + GlanceNfsShare: + default: '' + description: > + NFS share to mount for image storage (when GlanceNfsEnabled is true) + type: string + GlanceNfsOptions: + default: 'intr,context=system_u:object_r:glance_var_lib_t:s0' + description: > + NFS mount options for image storage (when GlanceNfsEnabled is true) + type: string GlanceRbdPoolName: default: images type: string @@ -92,6 +107,9 @@ outputs: glance::notify::rabbitmq::notification_driver: messagingv2 glance::registry::db::database_db_max_retries: -1 glance::registry::db::database_max_retries: -1 + tripleo::profile::base::glance::api::glance_nfs_enabled: {get_param: GlanceNfsEnabled} + tripleo::glance::nfs_mount::share: {get_param: GlanceNfsShare} + tripleo::glance::nfs_mount::options: {get_param: GlanceNfsOptions} service_config_settings: keystone: glance::keystone::auth::public_url: {get_param: [EndpointMap, GlancePublic, uri]} diff --git a/puppet/services/gnocchi-api.yaml b/puppet/services/gnocchi-api.yaml index e3397769..ac15de4f 100644 --- a/puppet/services/gnocchi-api.yaml +++ b/puppet/services/gnocchi-api.yaml @@ -41,6 +41,9 @@ parameters: default: tag: openstack.gnocchi.api path: /var/log/gnocchi/app.log + EnableInternalTLS: + type: boolean + default: false resources: @@ -57,6 +60,7 @@ resources: ServiceNetMap: {get_param: ServiceNetMap} DefaultPasswords: {get_param: DefaultPasswords} EndpointMap: {get_param: EndpointMap} + EnableInternalTLS: {get_param: EnableInternalTLS} outputs: role_data: @@ -83,7 +87,7 @@ outputs: gnocchi::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} gnocchi::keystone::authtoken::password: {get_param: GnocchiPassword} gnocchi::keystone::authtoken::project_name: 'service' - gnocchi::wsgi::apache::ssl: false + gnocchi::wsgi::apache::ssl: {get_param: EnableInternalTLS} gnocchi::wsgi::apache::servername: str_replace: template: @@ -98,7 +102,12 @@ outputs: # internal_api_uri -> [IP] # internal_api_subnet - > IP/CIDR gnocchi::wsgi::apache::bind_host: {get_param: [ServiceNetMap, GnocchiApiNetwork]} - gnocchi::api::host: {get_param: [ServiceNetMap, GnocchiApiNetwork]} + gnocchi::api::host: + str_replace: + template: + '"%{::fqdn_$NETWORK}"' + params: + $NETWORK: {get_param: [ServiceNetMap, GnocchiApiNetwork]} gnocchi::api::keystone_auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]} gnocchi::api::keystone_identity_uri: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} diff --git a/puppet/services/gnocchi-statsd.yaml b/puppet/services/gnocchi-statsd.yaml index 04339f46..983d6c91 100644 --- a/puppet/services/gnocchi-statsd.yaml +++ b/puppet/services/gnocchi-statsd.yaml @@ -39,5 +39,9 @@ outputs: config_settings: map_merge: - get_attr: [GnocchiServiceBase, role_data, config_settings] + - tripleo.gnocchi_statsd.firewall_rules: + '140 gnocchi-statsd': + dport: 8125 + proto: 'udp' step_config: | include ::tripleo::profile::base::gnocchi::statsd diff --git a/puppet/services/heat-api-cfn.yaml b/puppet/services/heat-api-cfn.yaml index 1a86ec71..12d4a6a1 100644 --- a/puppet/services/heat-api-cfn.yaml +++ b/puppet/services/heat-api-cfn.yaml @@ -76,9 +76,11 @@ outputs: include ::tripleo::profile::base::heat::api_cfn service_config_settings: keystone: - heat::keystone::auth_cfn::tenant: 'service' - heat::keystone::auth_cfn::public_url: {get_param: [EndpointMap, HeatCfnPublic, uri]} - heat::keystone::auth_cfn::internal_url: {get_param: [EndpointMap, HeatCfnInternal, uri]} - heat::keystone::auth_cfn::admin_url: {get_param: [EndpointMap, HeatCfnAdmin, uri]} - heat::keystone::auth_cfn::password: {get_param: HeatPassword} - heat::keystone::auth_cfn::region: {get_param: KeystoneRegion} + map_merge: + - get_attr: [HeatBase, role_data, service_config_settings, keystone] + - heat::keystone::auth_cfn::tenant: 'service' + heat::keystone::auth_cfn::public_url: {get_param: [EndpointMap, HeatCfnPublic, uri]} + heat::keystone::auth_cfn::internal_url: {get_param: [EndpointMap, HeatCfnInternal, uri]} + heat::keystone::auth_cfn::admin_url: {get_param: [EndpointMap, HeatCfnAdmin, uri]} + heat::keystone::auth_cfn::password: {get_param: HeatPassword} + heat::keystone::auth_cfn::region: {get_param: KeystoneRegion} diff --git a/puppet/services/heat-api.yaml b/puppet/services/heat-api.yaml index 2ea96fc0..b0cd16dd 100644 --- a/puppet/services/heat-api.yaml +++ b/puppet/services/heat-api.yaml @@ -76,9 +76,11 @@ outputs: include ::tripleo::profile::base::heat::api service_config_settings: keystone: - heat::keystone::auth::tenant: 'service' - heat::keystone::auth::public_url: {get_param: [EndpointMap, HeatPublic, uri]} - heat::keystone::auth::internal_url: {get_param: [EndpointMap, HeatInternal, uri]} - heat::keystone::auth::admin_url: {get_param: [EndpointMap, HeatAdmin, uri]} - heat::keystone::auth::password: {get_param: HeatPassword} - heat::keystone::auth::region: {get_param: KeystoneRegion} + map_merge: + - get_attr: [HeatBase, role_data, service_config_settings, keystone] + - heat::keystone::auth::tenant: 'service' + heat::keystone::auth::public_url: {get_param: [EndpointMap, HeatPublic, uri]} + heat::keystone::auth::internal_url: {get_param: [EndpointMap, HeatInternal, uri]} + heat::keystone::auth::admin_url: {get_param: [EndpointMap, HeatAdmin, uri]} + heat::keystone::auth::password: {get_param: HeatPassword} + heat::keystone::auth::region: {get_param: KeystoneRegion} diff --git a/puppet/services/heat-base.yaml b/puppet/services/heat-base.yaml index 7eb58f56..a2a65d7d 100644 --- a/puppet/services/heat-base.yaml +++ b/puppet/services/heat-base.yaml @@ -77,3 +77,8 @@ outputs: heat::cron::purge_deleted::destination: '/dev/null' heat::db::database_db_max_retries: -1 heat::db::database_max_retries: -1 + service_config_settings: + keystone: + tripleo::profile::base::keystone::heat_admin_domain: 'heat_stack' + tripleo::profile::base::keystone::heat_admin_user: 'heat_stack_domain_admin' + tripleo::profile::base::keystone::heat_admin_email: 'heat_stack_domain_admin@localhost' diff --git a/puppet/services/heat-engine.yaml b/puppet/services/heat-engine.yaml index 20415eef..3f0e4105 100644 --- a/puppet/services/heat-engine.yaml +++ b/puppet/services/heat-engine.yaml @@ -105,4 +105,4 @@ outputs: - "%{hiera('mysql_bind_host')}" keystone: # This is needed because the keystone profile handles creating the domain - heat::keystone::domain::domain_password: {get_param: HeatStackDomainAdminPassword} + tripleo::profile::base::keystone::heat_admin_password: {get_param: HeatStackDomainAdminPassword} diff --git a/puppet/services/horizon.yaml b/puppet/services/horizon.yaml index 1e08415c..8eaf4044 100644 --- a/puppet/services/horizon.yaml +++ b/puppet/services/horizon.yaml @@ -24,7 +24,7 @@ parameters: type: json HorizonAllowedHosts: default: '*' - description: A list of IP/Hostname for the server Horizonis running on. + description: A list of IP/Hostname for the server Horizon is running on. Used for header checks. type: comma_delimited_list HorizonSecret: @@ -32,11 +32,6 @@ parameters: type: string hidden: true default: '' - NeutronMechanismDrivers: - default: 'openvswitch' - description: | - The mechanism drivers for the Neutron tenant network. - type: comma_delimited_list MemcachedIPv6: default: false description: Enable IPv6 features in Memcached. @@ -45,6 +40,10 @@ parameters: default: 'overcloud-horizon' type: string +conditions: + + debug_empty: {equals : [{get_param: Debug}, '']} + outputs: role_data: description: Role data for the Horizon role. @@ -52,33 +51,36 @@ outputs: service_name: horizon monitoring_subscription: {get_param: MonitoringSubscriptionHorizon} config_settings: - horizon::allowed_hosts: {get_param: HorizonAllowedHosts} - neutron::plugins::ml2::mechanism_drivers: - str_replace: - template: MECHANISMS - params: - MECHANISMS: {get_param: NeutronMechanismDrivers} - tripleo.horizon.firewall_rules: - '126 horizon': - dport: - - 80 - - 443 - horizon::cache_backend: django.core.cache.backends.memcached.MemcachedCache - horizon::django_session_engine: 'django.contrib.sessions.backends.cache' - horizon::vhost_extra_params: - add_listen: false - priority: 10 - access_log_format: '%a %l %u %t \"%r\" %>s %b \"%%{}{Referer}i\" \"%%{}{User-Agent}i\"' - horizon::bind_address: {get_param: [ServiceNetMap, HorizonNetwork]} - horizon::django_debug: {get_param: Debug} - horizon::keystone_url: {get_param: [EndpointMap, KeystoneInternal, uri]} - horizon::secret_key: - yaql: - expression: $.data.passwords.where($ != '').first() - data: - passwords: - - {get_param: HorizonSecret} - - {get_param: [DefaultPasswords, horizon_secret]} - memcached_ipv6: {get_param: MemcachedIPv6} + map_merge: + - horizon::allowed_hosts: {get_param: HorizonAllowedHosts} + tripleo.horizon.firewall_rules: + '126 horizon': + dport: + - 80 + - 443 + horizon::disable_password_reveal: true + horizon::enforce_password_check: true + horizon::cache_backend: django.core.cache.backends.memcached.MemcachedCache + horizon::django_session_engine: 'django.contrib.sessions.backends.cache' + horizon::vhost_extra_params: + add_listen: false + priority: 10 + access_log_format: '%a %l %u %t \"%r\" %>s %b \"%%{}{Referer}i\" \"%%{}{User-Agent}i\"' + options: ['FollowSymLinks','MultiViews'] + horizon::bind_address: {get_param: [ServiceNetMap, HorizonNetwork]} + horizon::keystone_url: {get_param: [EndpointMap, KeystoneInternal, uri]} + horizon::secret_key: + yaql: + expression: $.data.passwords.where($ != '').first() + data: + passwords: + - {get_param: HorizonSecret} + - {get_param: [DefaultPasswords, horizon_secret]} + memcached_ipv6: {get_param: MemcachedIPv6} + - + if: + - debug_empty + - {} + - horizon::django_debug: {get_param: Debug} step_config: | include ::tripleo::profile::base::horizon diff --git a/puppet/services/kernel.yaml b/puppet/services/kernel.yaml index 1fc88bf1..69898718 100644 --- a/puppet/services/kernel.yaml +++ b/puppet/services/kernel.yaml @@ -18,6 +18,10 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json + KernelPidMax: + default: 1048576 + description: Configures sysctl kernel.pid_max key + type: number outputs: role_data: @@ -49,5 +53,7 @@ outputs: value: 0 net.core.netdev_max_backlog: value: 10000 + kernel.pid_max: + value: {get_param: KernelPidMax} step_config: | include ::tripleo::profile::base::kernel diff --git a/puppet/services/keystone.yaml b/puppet/services/keystone.yaml index 1f83b680..d819e043 100644 --- a/puppet/services/keystone.yaml +++ b/puppet/services/keystone.yaml @@ -1,4 +1,4 @@ -heat_template_version: 2016-04-08 +heat_template_version: 2016-10-14 description: > OpenStack Keystone service configured with Puppet @@ -32,6 +32,12 @@ parameters: type: string default: 'regionOne' description: Keystone region for endpoint + KeystoneTokenProvider: + description: The keystone token format + type: string + default: 'uuid' + constraints: + - allowed_values: ['uuid', 'fernet'] ServiceNetMap: default: {} description: Mapping of service_name -> network name. Typically set @@ -85,7 +91,7 @@ parameters: description: Set the number of workers for keystone::wsgi::apache default: '"%{::os_workers}"' MonitoringSubscriptionKeystone: - default: 'overcloud-kestone' + default: 'overcloud-keystone' type: string KeystoneCredential0: type: string @@ -112,6 +118,9 @@ resources: EndpointMap: {get_param: EndpointMap} EnableInternalTLS: {get_param: EnableInternalTLS} +conditions: + keystone_fernet_tokens: {equals: [{get_param: KeystoneTokenProvider}, "fernet"]} + outputs: role_data: description: Role data for the Keystone role. @@ -138,6 +147,8 @@ outputs: keystone::roles::admin::password: {get_param: AdminPassword} keystone_ssl_certificate: {get_param: KeystoneSSLCertificate} keystone_ssl_certificate_key: {get_param: KeystoneSSLCertificateKey} + keystone::token_provider: {get_param: KeystoneTokenProvider} + keystone::enable_fernet_setup: {if: [keystone_fernet_tokens, true, false]} keystone::enable_proxy_headers_parsing: true keystone::enable_credential_setup: true keystone::credential_keys: diff --git a/puppet/services/manila-api.yaml b/puppet/services/manila-api.yaml index 4d3fd47c..b4b3d480 100644 --- a/puppet/services/manila-api.yaml +++ b/puppet/services/manila-api.yaml @@ -51,6 +51,11 @@ outputs: manila::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]} manila::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] } manila::keystone::authtoken::project_name: 'service' + tripleo.manila_api.firewall_rules: + '150 manila': + dport: + - 8786 + - 13786 # NOTE: bind IP is found in Heat replacing the network name with the # local node IP for the given network; replacement examples # (eg. for internal_api): @@ -62,21 +67,15 @@ outputs: step_config: | include ::tripleo::profile::base::manila::api service_config_settings: - keystone: - manila::keystone::auth::tenant: 'service' - manila::keystone::auth::public_url: {get_param: [EndpointMap, ManilaV1Public, uri]} - manila::keystone::auth::internal_url: {get_param: [EndpointMap, ManilaV1Internal, uri]} - manila::keystone::auth::admin_url: {get_param: [EndpointMap, ManilaV1Admin, uri]} - manila::keystone::auth::public_url_v2: {get_param: [EndpointMap, ManilaPublic, uri]} - manila::keystone::auth::internal_url_v2: {get_param: [EndpointMap, ManilaInternal, uri]} - manila::keystone::auth::admin_url_v2: {get_param: [EndpointMap, ManilaAdmin, uri]} - manila::keystone::auth::password: {get_param: ManilaPassword} - manila::keystone::auth::region: {get_param: KeystoneRegion} - mysql: - manila::db::mysql::password: {get_param: ManilaPassword} - manila::db::mysql::user: manila - manila::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]} - manila::db::mysql::dbname: manila - manila::db::mysql::allowed_hosts: - - '%' - - "%{hiera('mysql_bind_host')}" + map_merge: + - get_attr: [ManilaBase, role_data, service_config_settings] + - keystone: + manila::keystone::auth::tenant: 'service' + manila::keystone::auth::public_url: {get_param: [EndpointMap, ManilaV1Public, uri]} + manila::keystone::auth::internal_url: {get_param: [EndpointMap, ManilaV1Internal, uri]} + manila::keystone::auth::admin_url: {get_param: [EndpointMap, ManilaV1Admin, uri]} + manila::keystone::auth::public_url_v2: {get_param: [EndpointMap, ManilaPublic, uri]} + manila::keystone::auth::internal_url_v2: {get_param: [EndpointMap, ManilaInternal, uri]} + manila::keystone::auth::admin_url_v2: {get_param: [EndpointMap, ManilaAdmin, uri]} + manila::keystone::auth::password: {get_param: ManilaPassword} + manila::keystone::auth::region: {get_param: KeystoneRegion} diff --git a/puppet/services/manila-base.yaml b/puppet/services/manila-base.yaml index d228577a..844bd3a3 100644 --- a/puppet/services/manila-base.yaml +++ b/puppet/services/manila-base.yaml @@ -40,6 +40,10 @@ parameters: default: 5672 description: Set rabbit subscriber port, change this if using SSL type: number + ManilaPassword: + description: The password for the manila service account. + type: string + hidden: true outputs: role_data: @@ -54,3 +58,21 @@ outputs: manila::debug: {get_param: Debug} manila::db::database_db_max_retries: -1 manila::db::database_max_retries: -1 + manila::sql_connection: + list_join: + - '' + - - {get_param: [EndpointMap, MysqlInternal, protocol]} + - '://manila:' + - {get_param: ManilaPassword} + - '@' + - {get_param: [EndpointMap, MysqlInternal, host]} + - '/manila' + service_config_settings: + mysql: + manila::db::mysql::password: {get_param: ManilaPassword} + manila::db::mysql::user: manila + manila::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]} + manila::db::mysql::dbname: manila + manila::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" diff --git a/puppet/services/manila-scheduler.yaml b/puppet/services/manila-scheduler.yaml index 474cc24f..d96b677b 100644 --- a/puppet/services/manila-scheduler.yaml +++ b/puppet/services/manila-scheduler.yaml @@ -57,14 +57,5 @@ outputs: manila::network::neutron::neutron_url: {get_param: [EndpointMap, NeutronInternal, uri]} manila::network::neutron::neutron_admin_auth_url: {get_param: [EndpointMap, NeutronAdmin, uri]} manila::network::neutron::neutron_admin_password: {get_param: NeutronPassword} - manila::sql_connection: - list_join: - - '' - - - {get_param: [EndpointMap, MysqlInternal, protocol]} - - '://manila:' - - {get_param: ManilaPassword} - - '@' - - {get_param: [EndpointMap, MysqlInternal, host]} - - '/manila' step_config: | include ::tripleo::profile::base::manila::scheduler diff --git a/puppet/services/manila-share.yaml b/puppet/services/manila-share.yaml index e42d2fae..49c69fc1 100644 --- a/puppet/services/manila-share.yaml +++ b/puppet/services/manila-share.yaml @@ -21,6 +21,10 @@ parameters: MonitoringSubscriptionManilaShare: default: 'overcloud-manila-share' type: string + ManilaPassword: + description: The password for the manila service account. + type: string + hidden: true resources: ManilaBase: @@ -40,5 +44,11 @@ outputs: map_merge: - get_attr: [ManilaBase, role_data, config_settings] - manila::volume::cinder::cinder_admin_tenant_name: 'service' + manila::keystone::authtoken::password: {get_param: ManilaPassword} + manila::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]} + manila::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] } + manila::keystone::authtoken::project_name: 'service' + service_config_settings: + get_attr: [ManilaBase, role_data, service_config_settings] step_config: | include ::tripleo::profile::base::manila::share diff --git a/puppet/services/monitoring/sensu-base.yaml b/puppet/services/monitoring/sensu-base.yaml index d7350d07..e5762328 100644 --- a/puppet/services/monitoring/sensu-base.yaml +++ b/puppet/services/monitoring/sensu-base.yaml @@ -43,7 +43,19 @@ parameters: description: The RabbitMQ vhost used for monitoring purposes. type: string default: '/sensu' - + SensuRedactVariables: + description: Variables from Sensu configuration, which have to be redacted. + type: array + default: + - password + - passwd + - pass + - api_key + - api_token + - access_key + - secret_key + - private_key + - secret outputs: role_data: @@ -61,8 +73,7 @@ outputs: sensu::rabbitmq_ssl: {get_param: MonitoringRabbitUseSSL} sensu::rabbitmq_user: {get_param: MonitoringRabbitUserName} sensu::rabbitmq_vhost: {get_param: MonitoringRabbitVhost} - #sensu::redis_host: {get_param: MonitoringRedisHost} - #sensu::redis_password: {get_param: MonitoringRedisPassword} + sensu::redact: {get_param: SensuRedactVariables} sensu::sensu_plugin_provider: 'yum' sensu::sensu_plugin_name: 'rubygem-sensu-plugin' sensu::version: 'present' diff --git a/puppet/services/neutron-base.yaml b/puppet/services/neutron-base.yaml index 6bb4ba08..0b2cef07 100644 --- a/puppet/services/neutron-base.yaml +++ b/puppet/services/neutron-base.yaml @@ -50,16 +50,13 @@ parameters: to false may result in configuration remnants after updates/upgrades. NeutronGlobalPhysnetMtu: type: number - default: 1496 + default: 1500 description: | MTU of the underlying physical network. Neutron uses this value to calculate MTU for all virtual network components. For flat and VLAN networks, neutron uses this value without modification. For overlay networks such as VXLAN, neutron automatically subtracts the overlay - protocol overhead from this value. The default value of 1496 is - currently in effect to compensate for some additional overhead when - deploying with some network configurations (e.g. network isolation over - single network interfaces) + protocol overhead from this value. ServiceNetMap: default: {} description: Mapping of service_name -> network name. Typically set diff --git a/puppet/services/neutron-l3.yaml b/puppet/services/neutron-l3.yaml index a89e3d75..a2157555 100644 --- a/puppet/services/neutron-l3.yaml +++ b/puppet/services/neutron-l3.yaml @@ -1,4 +1,4 @@ -heat_template_version: 2016-04-08 +heat_template_version: 2016-10-14 description: > OpenStack Neutron L3 agent configured with Puppet @@ -43,6 +43,10 @@ parameters: tag: openstack.neutron.agent.l3 path: /var/log/neutron/l3-agent.log +conditions: + + external_network_bridge_empty: {equals : [{get_param: NeutronExternalNetworkBridge}, "''"]} + resources: NeutronBase: @@ -63,12 +67,16 @@ outputs: - neutron config_settings: map_merge: - - get_attr: [NeutronBase, role_data, config_settings] + - get_attr: [NeutronBase, role_data, config_settings] + - neutron::agents::l3::router_delete_namespaces: True + neutron::agents::l3::agent_mode: {get_param: NeutronL3AgentMode} + tripleo.neutron_l3.firewall_rules: + '106 neutron_l3 vrrp': + proto: vrrp + - + if: + - external_network_bridge_empty + - {} - neutron::agents::l3::external_network_bridge: {get_param: NeutronExternalNetworkBridge} - neutron::agents::l3::router_delete_namespaces: True - neutron::agents::l3::agent_mode : {get_param: NeutronL3AgentMode} - tripleo.neutron_l3.firewall_rules: - '106 neutron_l3 vrrp': - proto: vrrp step_config: | include tripleo::profile::base::neutron::l3 diff --git a/puppet/services/nova-api.yaml b/puppet/services/nova-api.yaml index ba7fb2e1..49bd84bc 100644 --- a/puppet/services/nova-api.yaml +++ b/puppet/services/nova-api.yaml @@ -51,6 +51,9 @@ parameters: default: tag: openstack.nova.api path: /var/log/nova/nova-api.log + EnableInternalTLS: + type: boolean + default: false conditions: nova_workers_zero: {equals : [{get_param: NovaWorkers}, 0]} @@ -62,6 +65,7 @@ resources: ServiceNetMap: {get_param: ServiceNetMap} DefaultPasswords: {get_param: DefaultPasswords} EndpointMap: {get_param: EndpointMap} + EnableInternalTLS: {get_param: EnableInternalTLS} NovaBase: type: ./nova-base.yaml @@ -88,8 +92,6 @@ outputs: tripleo.nova_api.firewall_rules: '113 nova_api': dport: - - 6080 - - 13080 - 8773 - 3773 - 8774 @@ -103,22 +105,26 @@ outputs: nova::api::default_floating_pool: 'public' nova::api::sync_db_api: true nova::api::enable_proxy_headers_parsing: true + nova::api::api_bind_address: + str_replace: + template: + '"%{::fqdn_$NETWORK}"' + params: + $NETWORK: {get_param: [ServiceNetMap, NovaApiNetwork]} + nova::api::service_name: 'httpd' + nova::wsgi::apache::ssl: {get_param: EnableInternalTLS} # NOTE: bind IP is found in Heat replacing the network name with the local node IP # for the given network; replacement examples (eg. for internal_api): # internal_api -> IP # internal_api_uri -> [IP] # internal_api_subnet - > IP/CIDR - nova::api::api_bind_address: {get_param: [ServiceNetMap, NovaApiNetwork]} - nova::api::service_name: 'httpd' - nova::wsgi::apache::ssl: false nova::wsgi::apache::bind_host: {get_param: [ServiceNetMap, NovaApiNetwork]} nova::wsgi::apache::servername: str_replace: template: '"%{::fqdn_$NETWORK}"' params: - $NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]} - nova::wsgi::apache::bind_host: {get_param: [ServiceNetMap, NovaApiNetwork]} + $NETWORK: {get_param: [ServiceNetMap, NovaApiNetwork]} nova::api::neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret} nova::api::instance_name_template: {get_param: InstanceNameTemplate} nova_enable_db_purge: {get_param: NovaEnableDBPurge} diff --git a/puppet/services/nova-base.yaml b/puppet/services/nova-base.yaml index 8db00d8f..74a95d20 100644 --- a/puppet/services/nova-base.yaml +++ b/puppet/services/nova-base.yaml @@ -1,4 +1,4 @@ -heat_template_version: 2016-04-08 +heat_template_version: 2016-10-14 description: > OpenStack Nova base service. Shared for all Nova services. @@ -66,6 +66,9 @@ parameters: type: string description: Nova Compute upgrade level default: '' +conditions: + + compute_upgrade_level_empty: {equals : [{get_param: UpgradeLevelNovaCompute}, '']} outputs: role_data: @@ -73,45 +76,50 @@ outputs: value: service_name: nova_base config_settings: - nova::rabbit_password: {get_param: RabbitPassword} - nova::rabbit_userid: {get_param: RabbitUserName} - nova::rabbit_use_ssl: {get_param: RabbitClientUseSSL} - nova::rabbit_port: {get_param: RabbitClientPort} - nova::database_connection: - list_join: - - '' - - - {get_param: [EndpointMap, MysqlInternal, protocol]} - - '://nova:' - - {get_param: NovaPassword} - - '@' - - {get_param: [EndpointMap, MysqlInternal, host]} - - '/nova' - nova::api_database_connection: - list_join: - - '' - - - {get_param: [EndpointMap, MysqlInternal, protocol]} - - '://nova_api:' - - {get_param: NovaPassword} - - '@' - - {get_param: [EndpointMap, MysqlInternal, host]} - - '/nova_api' - nova::debug: {get_param: Debug} - nova::purge_config: {get_param: EnableConfigPurge} - nova::network::neutron::neutron_project_name: 'service' - nova::network::neutron::neutron_username: 'neutron' - nova::network::neutron::dhcp_domain: '' - nova::network::neutron::neutron_password: {get_param: NeutronPassword} - nova::network::neutron::neutron_url: {get_param: [EndpointMap, NeutronInternal, uri]} - nova::network::neutron::neutron_auth_url: {get_param: [EndpointMap, KeystoneV3Admin, uri]} - nova::rabbit_heartbeat_timeout_threshold: 60 - nova::cinder_catalog_info: 'volumev2:cinderv2:internalURL' - nova::host: '"%{::fqdn}"' # NOTE: extra quoting is needed. - nova::notify_on_state_change: 'vm_and_task_state' - nova::notification_driver: messagingv2 - nova::network::neutron::neutron_auth_type: 'v3password' - nova::db::database_db_max_retries: -1 - nova::db::database_max_retries: -1 - nova::glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]} - nova::use_ipv6: {get_param: NovaIPv6} - nova::upgrade_level_compute: {get_param: UpgradeLevelNovaCompute} - nova::network::neutron::neutron_ovs_bridge: {get_param: NovaOVSBridge} + map_merge: + - nova::rabbit_password: {get_param: RabbitPassword} + nova::rabbit_userid: {get_param: RabbitUserName} + nova::rabbit_use_ssl: {get_param: RabbitClientUseSSL} + nova::rabbit_port: {get_param: RabbitClientPort} + nova::database_connection: + list_join: + - '' + - - {get_param: [EndpointMap, MysqlInternal, protocol]} + - '://nova:' + - {get_param: NovaPassword} + - '@' + - {get_param: [EndpointMap, MysqlInternal, host]} + - '/nova' + nova::api_database_connection: + list_join: + - '' + - - {get_param: [EndpointMap, MysqlInternal, protocol]} + - '://nova_api:' + - {get_param: NovaPassword} + - '@' + - {get_param: [EndpointMap, MysqlInternal, host]} + - '/nova_api' + nova::debug: {get_param: Debug} + nova::purge_config: {get_param: EnableConfigPurge} + nova::network::neutron::neutron_project_name: 'service' + nova::network::neutron::neutron_username: 'neutron' + nova::network::neutron::dhcp_domain: '' + nova::network::neutron::neutron_password: {get_param: NeutronPassword} + nova::network::neutron::neutron_url: {get_param: [EndpointMap, NeutronInternal, uri]} + nova::network::neutron::neutron_auth_url: {get_param: [EndpointMap, KeystoneV3Admin, uri]} + nova::rabbit_heartbeat_timeout_threshold: 60 + nova::cinder_catalog_info: 'volumev2:cinderv2:internalURL' + nova::host: '"%{::fqdn}"' # NOTE: extra quoting is needed. + nova::notify_on_state_change: 'vm_and_task_state' + nova::notification_driver: messagingv2 + nova::network::neutron::neutron_auth_type: 'v3password' + nova::db::database_db_max_retries: -1 + nova::db::database_max_retries: -1 + nova::glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]} + nova::use_ipv6: {get_param: NovaIPv6} + nova::network::neutron::neutron_ovs_bridge: {get_param: NovaOVSBridge} + - + if: + - compute_upgrade_level_empty + - {} + - nova::upgrade_level_compute: {get_param: UpgradeLevelNovaCompute} diff --git a/puppet/services/nova-libvirt.yaml b/puppet/services/nova-libvirt.yaml index 31732580..70774bac 100644 --- a/puppet/services/nova-libvirt.yaml +++ b/puppet/services/nova-libvirt.yaml @@ -54,6 +54,9 @@ outputs: '200 nova_libvirt': dport: - 16509 + - 16514 + - '49152-49215' + - '5900-5999' step_config: | include tripleo::profile::base::nova::libvirt diff --git a/puppet/services/nova-scheduler.yaml b/puppet/services/nova-scheduler.yaml index d89e3e11..d4e5fff6 100644 --- a/puppet/services/nova-scheduler.yaml +++ b/puppet/services/nova-scheduler.yaml @@ -58,7 +58,7 @@ outputs: config_settings: map_merge: - get_attr: [NovaBase, role_data, config_settings] - - nova::scheduler::filter::ram_allocation_ratio: '1.0' + - nova::ram_allocation_ratio: '1.0' nova::scheduler::filter::scheduler_available_filters: {get_param: NovaSchedulerAvailableFilters} nova::scheduler::filter::scheduler_default_filters: {get_param: NovaSchedulerDefaultFilters} step_config: | diff --git a/puppet/services/nova-vnc-proxy.yaml b/puppet/services/nova-vnc-proxy.yaml index 85d59ae6..e6b0703f 100644 --- a/puppet/services/nova-vnc-proxy.yaml +++ b/puppet/services/nova-vnc-proxy.yaml @@ -57,5 +57,10 @@ outputs: # internal_api_uri -> [IP] # internal_api_subnet - > IP/CIDR nova::vncproxy::host: {get_param: [ServiceNetMap, NovaApiNetwork]} + tripleo.nova_vnc_proxy.firewall_rules: + '137 nova_vnc_proxy': + dport: + - 6080 + - 13080 step_config: | include tripleo::profile::base::nova::vncproxy diff --git a/puppet/services/opendaylight-api.yaml b/puppet/services/opendaylight-api.yaml index 318c898e..253d63ef 100644 --- a/puppet/services/opendaylight-api.yaml +++ b/puppet/services/opendaylight-api.yaml @@ -59,6 +59,6 @@ outputs: opendaylight::enable_l3: {get_param: OpenDaylightEnableL3} opendaylight::extra_features: {get_param: OpenDaylightFeatures} opendaylight::enable_dhcp: {get_param: OpenDaylightEnableDHCP} - opendaylight::odl_bind_ip: {get_param: [ServiceNetMap, OpenDaylightApiNetwork]} + opendaylight::odl_bind_ip: {get_param: [ServiceNetMap, OpendaylightApiNetwork]} step_config: | include tripleo::profile::base::neutron::opendaylight diff --git a/puppet/services/opendaylight-ovs.yaml b/puppet/services/opendaylight-ovs.yaml index 268ca244..907ecddc 100644 --- a/puppet/services/opendaylight-ovs.yaml +++ b/puppet/services/opendaylight-ovs.yaml @@ -54,5 +54,11 @@ outputs: template: MAPPINGS params: MAPPINGS: {get_param: OpenDaylightProviderMappings} + tripleo.opendaylight_ovs.firewall_rules: + '118 neutron vxlan networks': + proto: 'udp' + dport: 4789 + '136 neutron gre networks': + proto: 'gre' step_config: | include tripleo::profile::base::neutron::plugins::ovs::opendaylight diff --git a/puppet/services/pacemaker/haproxy.yaml b/puppet/services/pacemaker/haproxy.yaml index 52104a71..e4115d64 100644 --- a/puppet/services/pacemaker/haproxy.yaml +++ b/puppet/services/pacemaker/haproxy.yaml @@ -38,7 +38,5 @@ outputs: - get_attr: [LoadbalancerServiceBase, role_data, config_settings] - tripleo::haproxy::haproxy_service_manage: false tripleo::haproxy::mysql_clustercheck: true - enable_keepalived: false - tripleo::haproxy::keepalived: false step_config: | include ::tripleo::profile::pacemaker::haproxy diff --git a/puppet/services/rabbitmq.yaml b/puppet/services/rabbitmq.yaml index 5387529d..44a09a42 100644 --- a/puppet/services/rabbitmq.yaml +++ b/puppet/services/rabbitmq.yaml @@ -71,6 +71,7 @@ outputs: rabbitmq::port: '5672' rabbitmq::package_source: undef rabbitmq::repos_ensure: false + rabbitmq::tcp_keepalive: true rabbitmq_environment: RABBITMQ_NODENAME: "rabbit@%{::hostname}" RABBITMQ_SERVER_ERL_ARGS: '"+K true +P 1048576 -kernel inet_default_connect_options [{nodelay,true},{raw,6,18,<<5000:64/native>>}] -kernel inet_default_listen_options [{raw,6,18,<<5000:64/native>>}]"' @@ -78,7 +79,6 @@ outputs: inet_dist_listen_min: '25672' inet_dist_listen_max: '25672' rabbitmq_config_variables: - tcp_listen_options: '[binary, {packet, raw}, {reuseaddr, true}, {backlog, 128}, {nodelay, true}, {exit_on_close, false}, {keepalive, true}]' cluster_partition_handling: 'pause_minority' queue_master_locator: '<<"min-masters">>' loopback_users: '[]' diff --git a/puppet/services/sahara-base.yaml b/puppet/services/sahara-base.yaml index 5fc8ed61..4072a150 100644 --- a/puppet/services/sahara-base.yaml +++ b/puppet/services/sahara-base.yaml @@ -44,6 +44,10 @@ parameters: type: string default: '' description: Set to True to enable debugging on all services. + SaharaPlugins: + default: ["ambari","cdh","mapr","vanilla","spark","storm"] + description: Sahara enabled plugin list + type: comma_delimited_list outputs: role_data: @@ -69,13 +73,7 @@ outputs: sahara::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } sahara::identity_uri: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] } sahara::use_neutron: true - sahara::plugins: - - ambari - - cdh - - mapr - - vanilla - - spark - - storm + sahara::plugins: {get_param: SaharaPlugins} sahara::rpc_backend: rabbit sahara::admin_tenant_name: 'service' sahara::db::database_db_max_retries: -1 diff --git a/puppet/services/services.yaml b/puppet/services/services.yaml index 176fd235..ffe2d2d4 100644 --- a/puppet/services/services.yaml +++ b/puppet/services/services.yaml @@ -54,8 +54,8 @@ outputs: data: {s_names: {get_attr: [ServiceChain, role_data, service_name]}} monitoring_subscriptions: yaql: - expression: list($.data.where($ != null).select($.get('monitoring_subscription')).where($ != null)) - data: {get_attr: [ServiceChain, role_data]} + expression: list($.data.role_data.where($ != null).select($.get('monitoring_subscription')).where($ != null)) + data: {role_data: {get_attr: [ServiceChain, role_data]}} logging_sources: # Transform the individual logging_source configuration from # each service in the chain into a global list, adding some @@ -78,8 +78,9 @@ outputs: sources: - {get_attr: [LoggingConfiguration, LoggingDefaultSources]} - yaql: - expression: list($.data.where($ != null).select($.get('logging_source')).where($ != null)) - data: {get_attr: [ServiceChain, role_data]} + expression: list($.data.role_data.where($ != null).select($.get('logging_source')).where($ != null)) + data: {role_data: {get_attr: [ServiceChain, role_data]}} + - {get_attr: [LoggingConfiguration, LoggingExtraSources]} default_format: {get_attr: [LoggingConfiguration, LoggingDefaultFormat]} pos_file_path: {get_attr: [LoggingConfiguration, LoggingPosFilePath]} @@ -93,17 +94,17 @@ outputs: groups: - [{get_attr: [LoggingConfiguration, LoggingDefaultGroups]}] - yaql: - expression: list($.data.where($ != null).select($.get('logging_groups')).where($ != null)) - data: {get_attr: [ServiceChain, role_data]} + expression: list($.data.role_data.where($ != null).select($.get('logging_groups')).where($ != null)) + data: {role_data: {get_attr: [ServiceChain, role_data]}} - [{get_attr: [LoggingConfiguration, LoggingExtraGroups]}] config_settings: {map_merge: {get_attr: [ServiceChain, role_data, config_settings]}} global_config_settings: map_merge: yaql: - expression: list($.data.where($ != null).select($.get('global_config_settings')).where($ != null)) - data: {get_attr: [ServiceChain, role_data]} + expression: list($.data.role_data.where($ != null).select($.get('global_config_settings')).where($ != null)) + data: {role_data: {get_attr: [ServiceChain, role_data]}} service_config_settings: yaql: - expression: $.data.where($ != null).select($.get('service_config_settings')).where($ != null).reduce($1.mergeWith($2), {}) - data: {get_attr: [ServiceChain, role_data]} + expression: $.data.role_data.where($ != null).select($.get('service_config_settings')).where($ != null).reduce($1.mergeWith($2), {}) + data: {role_data: {get_attr: [ServiceChain, role_data]}} step_config: {list_join: ["\n", {get_attr: [ServiceChain, role_data, step_config]}]} diff --git a/puppet/services/swift-proxy.yaml b/puppet/services/swift-proxy.yaml index de8daea5..ba1d99f1 100644 --- a/puppet/services/swift-proxy.yaml +++ b/puppet/services/swift-proxy.yaml @@ -74,9 +74,9 @@ outputs: swift::proxy::authtoken::project_name: 'service' swift::proxy::node_timeout: {get_param: SwiftProxyNodeTimeout} swift::proxy::workers: {get_param: SwiftWorkers} - swift::proxy::ceilometer::rabbit_host: {get_param: [ServiceNetMap, RabbitmqNetwork]} swift::proxy::ceilometer::rabbit_user: {get_param: RabbitUserName} swift::proxy::ceilometer::rabbit_password: {get_param: RabbitPassword} + swift::proxy::staticweb::url_base: {get_param: [EndpointMap, SwiftPublic, uri_no_suffix]} tripleo.swift_proxy.firewall_rules: '122 swift proxy': dport: @@ -88,7 +88,6 @@ outputs: - ResellerAdmin swift::proxy::versioned_writes::allow_versioned_writes: true swift::proxy::pipeline: - - 'ceilometer' - 'catch_errors' - 'healthcheck' - 'proxy-logging' @@ -101,6 +100,7 @@ outputs: - 'keystone' - 'staticweb' - 'versioned_writes' + - 'ceilometer' - 'proxy-logging' - 'proxy-server' swift::proxy::account_autocreate: true diff --git a/puppet/services/swift-ringbuilder.yaml b/puppet/services/swift-ringbuilder.yaml index 8ed4e9f4..5c70b6ab 100644 --- a/puppet/services/swift-ringbuilder.yaml +++ b/puppet/services/swift-ringbuilder.yaml @@ -38,7 +38,10 @@ parameters: default: {} description: 'A hash of additional raw devices to use as Swift backend (eg. {sdb: {}})' type: json - + SwiftUseLocalDir: + default: true + description: 'Use a local directory for Swift storage services when building rings' + type: boolean outputs: role_data: @@ -56,7 +59,7 @@ outputs: expression: $.data.raw_disk_lists.flatten() data: raw_disk_lists: - - [':%PORT%/d1'] + - {if: [{get_param: SwiftUseLocalDir}, [':%PORT%/d1'], []]} - repeat: template: ':%PORT%/DEVICE' for_each: diff --git a/puppet/services/swift-storage.yaml b/puppet/services/swift-storage.yaml index 7fbb8d90..cffe78f5 100644 --- a/puppet/services/swift-storage.yaml +++ b/puppet/services/swift-storage.yaml @@ -86,7 +86,7 @@ outputs: swift::storage::all::account_pipeline: - healthcheck - account-server - swift::storage::disks: {get_param: SwiftRawDisks} + swift::storage::disks::args: {get_param: SwiftRawDisks} swift::storage::all::storage_local_net_ip: {get_param: [ServiceNetMap, SwiftStorageNetwork]} step_config: | include ::tripleo::profile::base::swift::storage diff --git a/roles_data.yaml b/roles_data.yaml index 86d0e4f5..dad62f85 100644 --- a/roles_data.yaml +++ b/roles_data.yaml @@ -73,12 +73,12 @@ - OS::TripleO::Services::GnocchiApi - OS::TripleO::Services::GnocchiMetricd - OS::TripleO::Services::GnocchiStatsd - - OS::Tripleo::Services::ManilaApi - - OS::Tripleo::Services::ManilaScheduler - - OS::Tripleo::Services::ManilaBackendGeneric - - OS::Tripleo::Services::ManilaBackendNetapp - - OS::Tripleo::Services::ManilaBackendCephFs - - OS::Tripleo::Services::ManilaShare + - OS::TripleO::Services::ManilaApi + - OS::TripleO::Services::ManilaScheduler + - OS::TripleO::Services::ManilaBackendGeneric + - OS::TripleO::Services::ManilaBackendNetapp + - OS::TripleO::Services::ManilaBackendCephFs + - OS::TripleO::Services::ManilaShare - OS::TripleO::Services::AodhApi - OS::TripleO::Services::AodhEvaluator - OS::TripleO::Services::AodhNotifier @@ -95,6 +95,7 @@ - OS::TripleO::Services::SensuClient - OS::TripleO::Services::FluentdClient - OS::TripleO::Services::VipHosts + - OS::TripleO::Services::BarbicanApi - name: Compute CountDefault: 1 @@ -157,6 +158,7 @@ - OS::TripleO::Services::CephOSD - OS::TripleO::Services::Kernel - OS::TripleO::Services::Ntp + - OS::TripleO::Services::Snmp - OS::TripleO::Services::Timezone - OS::TripleO::Services::TripleoPackages - OS::TripleO::Services::TripleoFirewall |