aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_1.sh26
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_2.sh14
-rw-r--r--extraconfig/tasks/major_upgrade_pacemaker_migrations.sh113
-rw-r--r--overcloud-resource-registry-puppet.yaml1
-rw-r--r--puppet/services/ceph-mon.yaml9
-rw-r--r--puppet/services/nova-compute.yaml3
-rw-r--r--puppet/services/nova-metadata.yaml34
-rw-r--r--puppet/services/rabbitmq.yaml2
-rw-r--r--roles_data.yaml1
9 files changed, 183 insertions, 20 deletions
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh
index 6d073d86..2490ce27 100755
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh
+++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh
@@ -18,6 +18,22 @@ check_disk_for_mysql_dump
STONITH_STATE=$(pcs property show stonith-enabled | grep "stonith-enabled" | awk '{ print $2 }')
pcs property set stonith-enabled=false
+# Migrate to HA NG
+if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
+ migrate_full_to_ng_ha
+fi
+
+# After migrating the cluster to HA-NG the services not under pacemaker's control
+# are still up and running. We need to stop them explicitely otherwise during the yum
+# upgrade the rpm %post sections will try to do a systemctl try-restart <service>, which
+# is going to take a long time because rabbit is down. By having the service stopped
+# systemctl try-restart is a noop
+
+for $service in $(services_to_migrate); do
+ manage_systemd_service stop "${service%%-clone}"
+ check_resource_systemd "${service%%-clone}" stopped 600
+done
+
# In case the mysql package is updated, the database on disk must be
# upgraded as well. This typically needs to happen during major
# version upgrades (e.g. 5.5 -> 5.6, 5.5 -> 10.1...)
@@ -36,8 +52,6 @@ if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)
cp -rdp /etc/my.cnf* "$MYSQL_BACKUP_DIR"
fi
- pcs resource disable httpd
- check_resource httpd stopped 1800
pcs resource disable redis
check_resource redis stopped 600
pcs resource disable rabbitmq
@@ -53,14 +67,6 @@ if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)
pcs cluster stop --all
fi
-stop_or_disable_service mongod
-check_resource mongod stopped 600
-stop_or_disable_service memcached
-check_resource memcached stopped 600
-
-
-
-
# Swift isn't controled by pacemaker
systemctl_swift stop
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
index cfe5bcfe..6bb2fa73 100755
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
+++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
@@ -32,8 +32,6 @@ fi
start_or_enable_service galera
check_resource galera started 600
-start_or_enable_service mongod
-check_resource mongod started 600
if [[ -n $(is_bootstrap_node) ]]; then
tstart=$(date +%s)
@@ -59,14 +57,18 @@ if [[ -n $(is_bootstrap_node) ]]; then
# sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head
fi
-start_or_enable_service memcached
-check_resource memcached started 600
start_or_enable_service rabbitmq
check_resource rabbitmq started 600
start_or_enable_service redis
check_resource redis started 600
-start_or_enable_service httpd
-check_resource httpd started 1800
# Swift isn't controled by pacemaker
systemctl_swift start
+
+# We need to start the systemd services we explicitely stopped at step _1.sh
+# FIXME: Should we let puppet during the convergence step do the service enabling or
+# should we add it here?
+for $service in $(services_to_migrate); do
+ manage_systemd_service stop "${service%%-clone}"
+ check_resource_systemd "${service%%-clone}" started 600
+done
diff --git a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh b/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh
index 7ed7012d..b8c5321b 100644
--- a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh
+++ b/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh
@@ -56,3 +56,116 @@ function is_mysql_upgrade_needed {
fi
echo "1"
}
+
+# This function returns the list of services to be migrated away from pacemaker
+# and to systemd. The reason to have these services in a separate function is because
+# this list is needed in three different places: major_upgrade_controller_pacemaker_{1,2}
+# and in the function to migrate the cluster from full HA to HA NG
+function services_to_migrate {
+ # The following PCMK resources the ones the we are going to delete
+ PCMK_RESOURCE_TODELETE="
+ httpd-clone
+ memcached-clone
+ mongod-clone
+ neutron-dhcp-agent-clone
+ neutron-l3-agent-clone
+ neutron-metadata-agent-clone
+ neutron-netns-cleanup-clone
+ neutron-openvswitch-agent-clone
+ neutron-ovs-cleanup-clone
+ neutron-server-clone
+ openstack-aodh-evaluator-clone
+ openstack-aodh-listener-clone
+ openstack-aodh-notifier-clone
+ openstack-ceilometer-api-clone
+ openstack-ceilometer-central-clone
+ openstack-ceilometer-collector-clone
+ openstack-ceilometer-notification-clone
+ openstack-cinder-api-clone
+ openstack-cinder-scheduler-clone
+ openstack-glance-api-clone
+ openstack-glance-registry-clone
+ openstack-gnocchi-metricd-clone
+ openstack-gnocchi-statsd-clone
+ openstack-heat-api-cfn-clone
+ openstack-heat-api-clone
+ openstack-heat-api-cloudwatch-clone
+ openstack-heat-engine-clone
+ openstack-nova-api-clone
+ openstack-nova-conductor-clone
+ openstack-nova-consoleauth-clone
+ openstack-nova-novncproxy-clone
+ openstack-nova-scheduler-clone
+ openstack-sahara-api-clone
+ openstack-sahara-engine-clone
+ "
+ echo $PCMK_RESOURCE_TODELETE
+}
+
+# This function will migrate a mitaka system where all the resources are managed
+# via pacemaker to a newton setup where only a few services will be managed by pacemaker
+# On a high-level it will operate as follows:
+# 1. Set the cluster in maintenance-mode so no start/stop action will actually take place
+# during the conversion
+# 2. Remove all the colocation constraints and then the ordering constraints, except the
+# ones related to haproxy/VIPs which exist in Newton as well
+# 3. Remove all the resources that won't be managed by pacemaker in newton. Note that they
+# will show up as ORPHANED but they will keep running normally via systemd. They will be
+# enabled to start at boot by puppet during the converge step
+# 4. Take the cluster out of maintenance-mode and do a resource cleanup
+function migrate_full_to_ng_ha {
+ if [[ -n $(pcmk_running) ]]; then
+ pcs property set maintenance-mode=true
+ # We are making sure here that the property has propagated everywhere
+ if ! timeout -k 10 300 crm_resource --wait; then
+ echo_error "ERROR: cluster remained unstable after setting maintenance-mode for more than 300 seconds, exiting."
+ exit 1
+ fi
+ # First we go through all the colocation constraints (except the ones we want to keep, i.e. the haproxy/ip ones)
+ # and we remove those
+ COL_CONSTRAINTS=$(pcs config show | sed -n '/^Colocation Constraints:$/,/^$/p' | grep -v "Colocation Constraints:" | egrep -v "ip-.*haproxy" | awk '{print $NF}' | cut -f2 -d: |cut -f1 -d\))
+ for constraint in $COL_CONSTRAINTS; do
+ log_debug "Deleting colocation constraint $constraint from CIB"
+ pcs constraint remove "$constraint"
+ done
+
+ # Now we kill all the ordering constraints (except the haproxy/ip ones)
+ ORD_CONSTRAINTS=$(pcs config show | sed -n '/^Ordering Constraints:/,/^Colocation Constraints:$/p' | grep -v "Ordering Constraints:" | awk '{print $NF}' | cut -f2 -d: |cut -f1 -d\))
+ for constraint in $ORD_CONSTRAINTS; do
+ log_debug "Deleting ordering constraint $constraint from CIB"
+ pcs constraint remove "$constraint"
+ done
+
+ # At this stage there are no constraints whatsoever except the haproxy/ip ones
+ # which we want to keep. We now delete each resource that will move to systemd
+ # Note that the corresponding systemd resource will stay running, which means that
+ # later when we do the "yum update", things will be a bit slower because each
+ # "systemctl try-restart <service>" is not a no-op any longer because the service is up
+ # and running and it will be restarted with rabbitmq being down.
+ PCS_STATUS_OUTPUT="$(pcs status)"
+ for resource in $(services_to_migrate) "delay-clone" "openstack-core-clone"; do
+ if echo "$PCS_STATUS_OUTPUT" | grep "$resource"; then
+ log_debug "Deleting $resource from the CIB"
+
+ # We need to add --force because the cluster is in maintenance mode and the resource
+ # is unmanaged. The if serves to make this idempotent
+ pcs resource delete --force "$resource"
+ else
+ log_debug "Service $service not found as a pacemaker resource, not trying to delete."
+ fi
+ done
+
+ # At this stage all the pacemaker resources are removed from the CIB. Once we remove the
+ # maintenance-mode those systemd resources will keep on running. They shall be systemd enabled
+ # via the puppet converge step later on
+ pcs property set maintenance-mode=false
+ # We need to do a pcs resource cleanup here + crm_resource --wait to make sure the
+ # cluster is in a clean state before we stop everything, upgrade and restart everything
+ pcs resource cleanup
+ # We are making sure here that the cluster is stable before proceeding
+ if ! timeout -k 10 600 crm_resource --wait; then
+ echo_error "ERROR: cluster remained unstable after resource cleanup for more than 600 seconds, exiting."
+ exit 1
+ fi
+ fi
+}
diff --git a/overcloud-resource-registry-puppet.yaml b/overcloud-resource-registry-puppet.yaml
index 47038411..505f033d 100644
--- a/overcloud-resource-registry-puppet.yaml
+++ b/overcloud-resource-registry-puppet.yaml
@@ -188,6 +188,7 @@ resource_registry:
OS::TripleO::Services::NovaConductor: puppet/services/nova-conductor.yaml
OS::TripleO::Services::MongoDb: puppet/services/database/mongodb.yaml
OS::TripleO::Services::NovaApi: puppet/services/nova-api.yaml
+ OS::TripleO::Services::NovaMetadata: puppet/services/nova-metadata.yaml
OS::TripleO::Services::NovaScheduler: puppet/services/nova-scheduler.yaml
OS::TripleO::Services::NovaConsoleauth: puppet/services/nova-consoleauth.yaml
OS::TripleO::Services::NovaVncProxy: puppet/services/nova-vnc-proxy.yaml
diff --git a/puppet/services/ceph-mon.yaml b/puppet/services/ceph-mon.yaml
index a2b3f13e..552086ab 100644
--- a/puppet/services/ceph-mon.yaml
+++ b/puppet/services/ceph-mon.yaml
@@ -76,6 +76,9 @@ outputs:
- get_attr: [CephBase, role_data, config_settings]
- ceph::profile::params::ms_bind_ipv6: {get_param: CephIPv6}
ceph::profile::params::mon_key: {get_param: CephMonKey}
+ ceph::profile::params::osd_pool_default_pg_num: 32
+ ceph::profile::params::osd_pool_default_pgp_num: 32
+ ceph::profile::params::osd_pool_default_size: 3
# repeat returns items in a list, so we need to map_merge twice
tripleo::profile::base::ceph::mon::ceph_pools:
map_merge:
@@ -90,9 +93,9 @@ outputs:
- {get_param: GnocchiRbdPoolName}
template:
<%pool%>:
- pg_num: 32
- pgp_num: 32
- size: 3
+ pg_num: "%{hiera('ceph::profile::params::osd_pool_default_pg_num')}"
+ pgp_num: "%{hiera('ceph::profile::params::osd_pool_default_pgp_num')}"
+ size: "%{hiera('ceph::profile::params::osd_pool_default_size')}"
- {get_param: CephPools}
tripleo.ceph_mon.firewall_rules:
'110 ceph_mon':
diff --git a/puppet/services/nova-compute.yaml b/puppet/services/nova-compute.yaml
index c9a95346..f7f2510e 100644
--- a/puppet/services/nova-compute.yaml
+++ b/puppet/services/nova-compute.yaml
@@ -137,6 +137,9 @@ outputs:
# internal_api_subnet - > IP/CIDR
nova::compute::vncserver_proxyclient_address: {get_param: [ServiceNetMap, NovaVncProxyNetwork]}
nova::compute::vncproxy_host: {get_param: [EndpointMap, NovaPublic, host_nobrackets]}
+ nova::vncproxy::common::vncproxy_protocol: {get_param: [EndpointMap, NovaVNCProxyPublic, protocol]}
+ nova::vncproxy::common::vncproxy_host: {get_param: [EndpointMap, NovaVNCProxyPublic, host_nobrackets]}
+ nova::vncproxy::common::vncproxy_port: {get_param: [EndpointMap, NovaVNCProxyPublic, port]}
step_config: |
# TODO(emilien): figure how to deal with libvirt profile.
# We'll probably treat it like we do with Neutron plugins.
diff --git a/puppet/services/nova-metadata.yaml b/puppet/services/nova-metadata.yaml
new file mode 100644
index 00000000..92373c56
--- /dev/null
+++ b/puppet/services/nova-metadata.yaml
@@ -0,0 +1,34 @@
+heat_template_version: 2016-04-08
+
+description: >
+ OpenStack Nova API service configured with Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ NovaWorkers:
+ default: 0
+ description: Number of workers for Nova API service.
+ type: number
+
+outputs:
+ role_data:
+ description: Role data for the Nova Metadata service.
+ value:
+ service_name: nova_metadata
+ config_settings:
+ nova::api::metadata_workers: {get_param: NovaWorkers}
+ nova::api::metadata_listen: {get_param: [ServiceNetMap, NovaMetadataNetwork]}
+ step_config: ""
diff --git a/puppet/services/rabbitmq.yaml b/puppet/services/rabbitmq.yaml
index e4a16e86..52300a2f 100644
--- a/puppet/services/rabbitmq.yaml
+++ b/puppet/services/rabbitmq.yaml
@@ -66,7 +66,7 @@ outputs:
rabbitmq::repos_ensure: false
rabbitmq_environment:
RABBITMQ_NODENAME: "rabbit@%{::hostname}"
- RABBITMQ_SERVER_ERL_ARGS: '"+K true +A30 +P 1048576 -kernel inet_default_connect_options [{nodelay,true},{raw,6,18,<<5000:64/native>>}] -kernel inet_default_listen_options [{raw,6,18,<<5000:64/native>>}]"'
+ RABBITMQ_SERVER_ERL_ARGS: '"+K true +P 1048576 -kernel inet_default_connect_options [{nodelay,true},{raw,6,18,<<5000:64/native>>}] -kernel inet_default_listen_options [{raw,6,18,<<5000:64/native>>}]"'
rabbitmq_kernel_variables:
inet_dist_listen_min: '25672'
inet_dist_listen_max: '25672'
diff --git a/roles_data.yaml b/roles_data.yaml
index cde3aadd..af1eba82 100644
--- a/roles_data.yaml
+++ b/roles_data.yaml
@@ -35,6 +35,7 @@
- OS::TripleO::Services::NovaConductor
- OS::TripleO::Services::MongoDb
- OS::TripleO::Services::NovaApi
+ - OS::TripleO::Services::NovaMetadata
- OS::TripleO::Services::NovaScheduler
- OS::TripleO::Services::NovaConsoleauth
- OS::TripleO::Services::NovaVncProxy