diff options
-rw-r--r-- | environments/enable-tls.yaml | 1 | ||||
-rw-r--r-- | extraconfig/tasks/major_upgrade_pacemaker_migrations.sh | 36 | ||||
-rw-r--r-- | network/endpoints/endpoint_data.yaml | 5 | ||||
-rw-r--r-- | network/endpoints/endpoint_map.yaml | 40 | ||||
-rw-r--r-- | overcloud.yaml | 1 | ||||
-rw-r--r-- | puppet/ceph-cluster-config.yaml | 3 | ||||
-rw-r--r-- | puppet/controller.yaml | 1 | ||||
-rw-r--r-- | puppet/extraconfig/tls/tls-cert-inject.yaml | 8 | ||||
-rw-r--r-- | puppet/manifests/overcloud_controller.pp | 1 | ||||
-rw-r--r-- | puppet/manifests/overcloud_controller_pacemaker.pp | 224 |
10 files changed, 148 insertions, 172 deletions
diff --git a/environments/enable-tls.yaml b/environments/enable-tls.yaml index e95ce4a2..6986a0c8 100644 --- a/environments/enable-tls.yaml +++ b/environments/enable-tls.yaml @@ -17,6 +17,7 @@ parameter_defaults: GlanceAdmin: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'} GlanceInternal: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'} GlancePublic: {protocol: 'https', port: '13292', host: 'CLOUDNAME'} + GlanceRegistryInternal: {protocol: 'http', port: '9191', host: 'IP_ADDRESS'} GnocchiAdmin: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'} GnocchiInternal: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'} GnocchiPublic: {protocol: 'https', port: '13041', host: 'CLOUDNAME'} diff --git a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh b/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh index 1f420b32..b63198db 100644 --- a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh +++ b/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh @@ -13,6 +13,42 @@ # been already applied, it should be possible to call the function # again without damaging the deployment or failing the upgrade. +function add_missing_openstack_core_constraints { + # The CIBs are saved under /root as they might contain sensitive data + CIB="/root/migration.cib" + CIB_BACKUP="/root/backup.cib" + CIB_PUSH_NEEDED=n + + rm -f "$CIB" "$CIB_BACKUP" || /bin/true + pcs cluster cib "$CIB" + cp "$CIB" "$CIB_BACKUP" + + if ! pcs -f "$CIB" constraint --full | grep 'start openstack-sahara-api-clone then start openstack-sahara-engine-clone'; then + pcs -f "$CIB" constraint order start openstack-sahara-api-clone then start openstack-sahara-engine-clone + CIB_PUSH_NEEDED=y + fi + + if ! pcs -f "$CIB" constraint --full | grep 'start openstack-core-clone then start openstack-ceilometer-notification-clone'; then + pcs -f "$CIB" constraint order start openstack-core-clone then start openstack-ceilometer-notification-clone + CIB_PUSH_NEEDED=y + fi + + if ! pcs -f "$CIB" constraint --full | grep 'start openstack-aodh-evaluator-clone then start openstack-aodh-listener-clone'; then + pcs -f "$CIB" constraint order start openstack-aodh-evaluator-clone then start openstack-aodh-listener-clone + CIB_PUSH_NEEDED=y + fi + + if pcs -f "$CIB" constraint --full | grep 'start openstack-core-clone then start openstack-heat-api-clone'; then + CID=$(pcs -f "$CIB" constraint --full | grep 'start openstack-core-clone then start openstack-heat-api-clone' | sed -e 's/.*id\://g' -e 's/)//g') + pcs -f "$CIB" constraint remove $CID + CIB_PUSH_NEEDED=y + fi + + if [ "$CIB_PUSH_NEEDED" = 'y' ]; then + pcs cluster cib-push "$CIB" + fi +} + function remove_ceilometer_alarm { if pcs status | grep openstack-ceilometer-alarm; then # Disable pacemaker resources for ceilometer-alarms diff --git a/network/endpoints/endpoint_data.yaml b/network/endpoints/endpoint_data.yaml index b934ab99..bd4e2281 100644 --- a/network/endpoints/endpoint_data.yaml +++ b/network/endpoints/endpoint_data.yaml @@ -55,6 +55,11 @@ Glance: vip_param: GlanceApi port: 9292 +GlanceRegistry: + Internal: + vip_param: GlanceRegistry + port: 9191 + Mysql: '': vip_param: Mysql diff --git a/network/endpoints/endpoint_map.yaml b/network/endpoints/endpoint_map.yaml index a7f0aff6..f9a8b83c 100644 --- a/network/endpoints/endpoint_map.yaml +++ b/network/endpoints/endpoint_map.yaml @@ -12,6 +12,7 @@ parameters: CeilometerApiVirtualIP: {type: string, default: ''} CinderApiVirtualIP: {type: string, default: ''} GlanceApiVirtualIP: {type: string, default: ''} + GlanceRegistryVirtualIP: {type: string, default: ''} GnocchiApiVirtualIP: {type: string, default: ''} HeatApiVirtualIP: {type: string, default: ''} KeystoneAdminApiVirtualIP: {type: string, default: ''} @@ -37,6 +38,7 @@ parameters: GlanceAdmin: {protocol: http, port: '9292', host: IP_ADDRESS} GlanceInternal: {protocol: http, port: '9292', host: IP_ADDRESS} GlancePublic: {protocol: http, port: '9292', host: IP_ADDRESS} + GlanceRegistryInternal: {protocol: http, port: '9191', host: IP_ADDRESS} GnocchiAdmin: {protocol: http, port: '8041', host: IP_ADDRESS} GnocchiInternal: {protocol: http, port: '8041', host: IP_ADDRESS} GnocchiPublic: {protocol: http, port: '8041', host: IP_ADDRESS} @@ -648,6 +650,44 @@ outputs: IP_ADDRESS: {get_param: PublicVirtualIP} - ':' - get_param: [EndpointMap, GlancePublic, port] + GlanceRegistryInternal: + host: + str_replace: + template: + get_param: [EndpointMap, GlanceRegistryInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: GlanceRegistryVirtualIP} + port: + get_param: [EndpointMap, GlanceRegistryInternal, port] + protocol: + get_param: [EndpointMap, GlanceRegistryInternal, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, GlanceRegistryInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, GlanceRegistryInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: GlanceRegistryVirtualIP} + - ':' + - get_param: [EndpointMap, GlanceRegistryInternal, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, GlanceRegistryInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, GlanceRegistryInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: GlanceRegistryVirtualIP} + - ':' + - get_param: [EndpointMap, GlanceRegistryInternal, port] GnocchiAdmin: host: str_replace: diff --git a/overcloud.yaml b/overcloud.yaml index 87a5f7b4..20c853cd 100644 --- a/overcloud.yaml +++ b/overcloud.yaml @@ -876,6 +876,7 @@ resources: AodhApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, AodhApiNetwork]}]} CinderApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, CinderApiNetwork]}]} GlanceApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, GlanceApiNetwork]}]} + GlanceRegistryVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, GlanceRegistryNetwork]}]} GnocchiApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, GnocchiApiNetwork]}]} HeatApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]} KeystoneAdminApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}]} diff --git a/puppet/ceph-cluster-config.yaml b/puppet/ceph-cluster-config.yaml index 6c6be473..fd161886 100644 --- a/puppet/ceph-cluster-config.yaml +++ b/puppet/ceph-cluster-config.yaml @@ -94,7 +94,7 @@ resources: cap_mon: 'allow profile bootstrap-osd' }, client.CLIENT_USER: { - secret: 'ADMIN_KEY', + secret: 'CLIENT_KEY', mode: '0644', cap_mon: 'allow r', cap_osd: 'allow class-read object_prefix rbd_children, allow rwx pool=CINDER_POOL, allow rwx pool=NOVA_POOL, allow rwx pool=GLANCE_POOL, allow rwx pool=GNOCCHI_POOL' @@ -102,6 +102,7 @@ resources: }" params: CLIENT_USER: {get_param: CephClientUserName} + CLIENT_KEY: {get_param: ceph_client_key} ADMIN_KEY: {get_param: ceph_admin_key} NOVA_POOL: {get_param: NovaRbdPoolName} CINDER_POOL: {get_param: CinderRbdPoolName} diff --git a/puppet/controller.yaml b/puppet/controller.yaml index 56eb8b96..8ad0ef28 100644 --- a/puppet/controller.yaml +++ b/puppet/controller.yaml @@ -1402,6 +1402,7 @@ resources: heat::api_cloudwatch::workers: {get_input: heat_workers} heat::api_cfn::bind_host: {get_input: heat_api_network} heat::api_cfn::workers: {get_input: heat_workers} + heat::engine::num_engine_workers: {get_input: heat_workers} heat::database_connection: {get_input: heat_dsn} heat::debug: {get_input: debug} heat::db::mysql::password: {get_input: heat_password} diff --git a/puppet/extraconfig/tls/tls-cert-inject.yaml b/puppet/extraconfig/tls/tls-cert-inject.yaml index 77b11378..e281ef51 100644 --- a/puppet/extraconfig/tls/tls-cert-inject.yaml +++ b/puppet/extraconfig/tls/tls-cert-inject.yaml @@ -63,6 +63,14 @@ resources: openssl rsa -noout -modulus -in ${cert_path} \ | openssl md5 | cut -c 10- \ > ${heat_outputs_path}.key_modulus + # We need to reload haproxy in case the certificate changed because + # puppet doesn't know the contents of the cert file. The pacemaker + # case is handled separately in a pacemaker-specific resource. + pacemaker_status=$(systemctl is-active pacemaker) + haproxy_status=$(systemctl is-active haproxy) + if [ "$pacemaker_status" != "active" -a "$haproxy_status" = "active"]; then + systemctl reload haproxy + fi ControllerTLSDeployment: type: OS::Heat::SoftwareDeployment diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp index 779e7f21..d7bb025a 100644 --- a/puppet/manifests/overcloud_controller.pp +++ b/puppet/manifests/overcloud_controller.pp @@ -435,6 +435,7 @@ if hiera('step') >= 4 { $cinder_rbd_backend = 'tripleo_ceph' cinder::backend::rbd { $cinder_rbd_backend : + backend_host => hiera('cinder::host'), rbd_pool => hiera('cinder_rbd_pool_name'), rbd_user => hiera('ceph_client_user_name'), rbd_secret_uuid => hiera('ceph::profile::params::fsid'), diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp index f5d4cf9c..3a6dbc06 100644 --- a/puppet/manifests/overcloud_controller_pacemaker.pp +++ b/puppet/manifests/overcloud_controller_pacemaker.pp @@ -255,183 +255,46 @@ if hiera('step') >= 2 { } $control_vip = hiera('tripleo::loadbalancer::controller_virtual_ip') - if is_ipv6_address($control_vip) { - $control_vip_netmask = '64' - } else { - $control_vip_netmask = '32' - } - pacemaker::resource::ip { 'control_vip': - ip_address => $control_vip, - cidr_netmask => $control_vip_netmask, - } - pacemaker::constraint::base { 'control_vip-then-haproxy': - constraint_type => 'order', - first_resource => "ip-${control_vip}", - second_resource => 'haproxy-clone', - first_action => 'start', - second_action => 'start', - constraint_params => 'kind=Optional', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['control_vip']], - } - pacemaker::constraint::colocation { 'control_vip-with-haproxy': - source => "ip-${control_vip}", - target => 'haproxy-clone', - score => 'INFINITY', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['control_vip']], + tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_control_vip': + vip_name => 'control', + ip_address => $control_vip, } $public_vip = hiera('tripleo::loadbalancer::public_virtual_ip') - if is_ipv6_address($public_vip) { - $public_vip_netmask = '64' - } else { - $public_vip_netmask = '32' - } - if $public_vip and $public_vip != $control_vip { - pacemaker::resource::ip { 'public_vip': - ip_address => $public_vip, - cidr_netmask => $public_vip_netmask, - } - pacemaker::constraint::base { 'public_vip-then-haproxy': - constraint_type => 'order', - first_resource => "ip-${public_vip}", - second_resource => 'haproxy-clone', - first_action => 'start', - second_action => 'start', - constraint_params => 'kind=Optional', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['public_vip']], - } - pacemaker::constraint::colocation { 'public_vip-with-haproxy': - source => "ip-${public_vip}", - target => 'haproxy-clone', - score => 'INFINITY', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['public_vip']], - } + tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_public_vip': + ensure => $public_vip and $public_vip != $control_vip, + vip_name => 'public', + ip_address => $public_vip, } $redis_vip = hiera('redis_vip') - if is_ipv6_address($redis_vip) { - $redis_vip_netmask = '64' - } else { - $redis_vip_netmask = '32' - } - if $redis_vip and $redis_vip != $control_vip { - pacemaker::resource::ip { 'redis_vip': - ip_address => $redis_vip, - cidr_netmask => $redis_vip_netmask, - } - pacemaker::constraint::base { 'redis_vip-then-haproxy': - constraint_type => 'order', - first_resource => "ip-${redis_vip}", - second_resource => 'haproxy-clone', - first_action => 'start', - second_action => 'start', - constraint_params => 'kind=Optional', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['redis_vip']], - } - pacemaker::constraint::colocation { 'redis_vip-with-haproxy': - source => "ip-${redis_vip}", - target => 'haproxy-clone', - score => 'INFINITY', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['redis_vip']], - } + tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_redis_vip': + ensure => $redis_vip and $redis_vip != $control_vip, + vip_name => 'redis', + ip_address => $redis_vip, } + $internal_api_vip = hiera('tripleo::loadbalancer::internal_api_virtual_ip') - if is_ipv6_address($internal_api_vip) { - $internal_api_vip_netmask = '64' - } else { - $internal_api_vip_netmask = '32' - } - if $internal_api_vip and $internal_api_vip != $control_vip { - pacemaker::resource::ip { 'internal_api_vip': - ip_address => $internal_api_vip, - cidr_netmask => $internal_api_vip_netmask, - } - pacemaker::constraint::base { 'internal_api_vip-then-haproxy': - constraint_type => 'order', - first_resource => "ip-${internal_api_vip}", - second_resource => 'haproxy-clone', - first_action => 'start', - second_action => 'start', - constraint_params => 'kind=Optional', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['internal_api_vip']], - } - pacemaker::constraint::colocation { 'internal_api_vip-with-haproxy': - source => "ip-${internal_api_vip}", - target => 'haproxy-clone', - score => 'INFINITY', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['internal_api_vip']], - } + tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_internal_api_vip': + ensure => $internal_api_vip and $internal_api_vip != $control_vip, + vip_name => 'internal_api', + ip_address => $internal_api_vip, } $storage_vip = hiera('tripleo::loadbalancer::storage_virtual_ip') - if is_ipv6_address($storage_vip) { - $storage_vip_netmask = '64' - } else { - $storage_vip_netmask = '32' - } - if $storage_vip and $storage_vip != $control_vip { - pacemaker::resource::ip { 'storage_vip': - ip_address => $storage_vip, - cidr_netmask => $storage_vip_netmask, - } - pacemaker::constraint::base { 'storage_vip-then-haproxy': - constraint_type => 'order', - first_resource => "ip-${storage_vip}", - second_resource => 'haproxy-clone', - first_action => 'start', - second_action => 'start', - constraint_params => 'kind=Optional', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['storage_vip']], - } - pacemaker::constraint::colocation { 'storage_vip-with-haproxy': - source => "ip-${storage_vip}", - target => 'haproxy-clone', - score => 'INFINITY', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['storage_vip']], - } + tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_storage_vip': + ensure => $storage_vip and $storage_vip != $control_vip, + vip_name => 'storage', + ip_address => $storage_vip, } $storage_mgmt_vip = hiera('tripleo::loadbalancer::storage_mgmt_virtual_ip') - if is_ipv6_address($storage_mgmt_vip) { - $storage_mgmt_vip_netmask = '64' - } else { - $storage_mgmt_vip_netmask = '32' - } - if $storage_mgmt_vip and $storage_mgmt_vip != $control_vip { - pacemaker::resource::ip { 'storage_mgmt_vip': - ip_address => $storage_mgmt_vip, - cidr_netmask => $storage_mgmt_vip_netmask, - } - pacemaker::constraint::base { 'storage_mgmt_vip-then-haproxy': - constraint_type => 'order', - first_resource => "ip-${storage_mgmt_vip}", - second_resource => 'haproxy-clone', - first_action => 'start', - second_action => 'start', - constraint_params => 'kind=Optional', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['storage_mgmt_vip']], - } - pacemaker::constraint::colocation { 'storage_mgmt_vip-with-haproxy': - source => "ip-${storage_mgmt_vip}", - target => 'haproxy-clone', - score => 'INFINITY', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['storage_mgmt_vip']], - } + tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_storage_mgmt_vip': + ensure => $storage_mgmt_vip and $storage_mgmt_vip != $control_vip, + vip_name => 'storage_mgmt', + ip_address => $storage_mgmt_vip, } - } pacemaker::resource::service { $::memcached::params::service_name : @@ -876,6 +739,7 @@ if hiera('step') >= 4 { $cinder_rbd_backend = 'tripleo_ceph' cinder::backend::rbd { $cinder_rbd_backend : + backend_host => hiera('cinder::host'), rbd_pool => hiera('cinder_rbd_pool_name'), rbd_user => hiera('ceph_client_user_name'), rbd_secret_uuid => hiera('ceph::profile::params::fsid'), @@ -1304,6 +1168,15 @@ if hiera('step') >= 5 { require => [Pacemaker::Resource::Service[$::sahara::params::api_service_name], Pacemaker::Resource::Ocf['openstack-core']], } + pacemaker::constraint::base { 'sahara-api-then-sahara-engine-constraint': + constraint_type => 'order', + first_resource => "${::sahara::params::api_service_name}-clone", + second_resource => "${::sahara::params::engine_service_name}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::sahara::params::api_service_name], + Pacemaker::Resource::Service[$::sahara::params::engine_service_name]], + } # Glance if $glance_backend == 'file' and hiera('glance_file_pcmk_manage', false) { @@ -1487,7 +1360,7 @@ if hiera('step') >= 5 { Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]], } } - if hiera('neutron::enable_dhcp_agent',true) and hiera('l3_agent_service',true) { + if hiera('neutron::enable_dhcp_agent',true) and hiera('neutron::enable_l3_agent',true) { pacemaker::constraint::base { 'neutron-dhcp-agent-to-l3-agent-constraint': constraint_type => 'order', first_resource => "${::neutron::params::dhcp_agent_service}-clone", @@ -1720,6 +1593,15 @@ if hiera('step') >= 5 { require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name], Pacemaker::Resource::Ocf['openstack-core']], } + pacemaker::constraint::base { 'keystone-then-ceilometer-notification-constraint': + constraint_type => 'order', + first_resource => 'openstack-core-clone', + second_resource => "${::ceilometer::params::agent_notification_service_name}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name], + Pacemaker::Resource::Ocf['openstack-core']], + } pacemaker::constraint::base { 'ceilometer-central-then-ceilometer-collector-constraint': constraint_type => 'order', first_resource => "${::ceilometer::params::agent_central_service_name}-clone", @@ -1803,6 +1685,15 @@ if hiera('step') >= 5 { require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name], Pacemaker::Resource::Service[$::aodh::params::notifier_service_name]], } + pacemaker::constraint::base { 'aodh-evaluator-then-aodh-listener-constraint': + constraint_type => 'order', + first_resource => "${::aodh::params::evaluator_service_name}-clone", + second_resource => "${::aodh::params::listener_service_name}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name], + Pacemaker::Resource::Service[$::aodh::params::listener_service_name]], + } pacemaker::constraint::colocation { 'aodh-listener-with-aodh-evaluator-colocation': source => "${::aodh::params::listener_service_name}-clone", target => "${::aodh::params::evaluator_service_name}-clone", @@ -1859,15 +1750,6 @@ if hiera('step') >= 5 { pacemaker::resource::service { $::heat::params::engine_service_name : clone_params => 'interleave=true', } - pacemaker::constraint::base { 'keystone-then-heat-api-constraint': - constraint_type => 'order', - first_resource => 'openstack-core-clone', - second_resource => "${::heat::params::api_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::heat::params::api_service_name], - Pacemaker::Resource::Ocf['openstack-core']], - } pacemaker::constraint::base { 'heat-api-then-heat-api-cfn-constraint': constraint_type => 'order', first_resource => "${::heat::params::api_service_name}-clone", |