aboutsummaryrefslogtreecommitdiffstats
path: root/puppet/manifests
diff options
context:
space:
mode:
Diffstat (limited to 'puppet/manifests')
-rw-r--r--puppet/manifests/overcloud_compute.pp24
-rw-r--r--puppet/manifests/overcloud_controller.pp203
-rw-r--r--puppet/manifests/overcloud_controller_pacemaker.pp872
-rw-r--r--puppet/manifests/ringbuilder.pp8
4 files changed, 389 insertions, 718 deletions
diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp
index 13ae31c5..cc58cb14 100644
--- a/puppet/manifests/overcloud_compute.pp
+++ b/puppet/manifests/overcloud_compute.pp
@@ -152,8 +152,32 @@ elsif hiera('neutron::core_plugin') == 'neutron_plugin_contrail.plugins.opencont
# require => Class['contrail::vrouter'],
#}
}
+elsif hiera('neutron::core_plugin') == 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2' {
+ # forward all ipv4 traffic
+ # this is required for the vms to pass through the gateways public interface
+ sysctl::value { 'net.ipv4.ip_forward': value => '1' }
+
+ # ifc_ctl_pp needs to be invoked by root as part of the vif.py when a VM is powered on
+ file { '/etc/sudoers.d/ifc_ctl_sudoers':
+ ensure => file,
+ owner => root,
+ group => root,
+ mode => '0440',
+ content => "nova ALL=(root) NOPASSWD: /opt/pg/bin/ifc_ctl_pp *\n",
+ }
+}
else {
+ # NOTE: this code won't live in puppet-neutron until Neutron OVS agent
+ # can be gracefully restarted. See https://review.openstack.org/#/c/297211
+ # In the meantime, it's safe to restart the agent on each change in neutron.conf,
+ # because Puppet changes are supposed to be done during bootstrap and upgrades.
+ # Some resource managed by Neutron_config (like messaging and logging options) require
+ # a restart of OVS agent. This code does it.
+ # In Newton, OVS agent will be able to be restarted gracefully so we'll drop the code
+ # from here and fix it in puppet-neutron.
+ Neutron_config<||> ~> Service['neutron-ovs-agent-service']
+
include ::neutron::plugins::ml2
include ::neutron::agents::ml2::ovs
diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp
index 5556a40c..833a3e01 100644
--- a/puppet/manifests/overcloud_controller.pp
+++ b/puppet/manifests/overcloud_controller.pp
@@ -24,15 +24,6 @@ if hiera('step') >= 1 {
create_resources(sysctl::value, hiera('sysctl_settings'), {})
Exec <| tag == 'kmod::load' |> -> Sysctl <| |>
- $controller_node_ips = split(hiera('controller_node_ips'), ',')
-
- if $enable_load_balancer {
- class { '::tripleo::loadbalancer' :
- controller_hosts => $controller_node_ips,
- manage_vip => true,
- }
- }
-
}
if hiera('step') >= 2 {
@@ -113,46 +104,18 @@ if hiera('step') >= 2 {
# FIXME: this should only occur on the bootstrap host (ditto for db syncs)
# Create all the database schemas
- include ::keystone::db::mysql
- include ::glance::db::mysql
include ::nova::db::mysql
include ::nova::db::mysql_api
include ::neutron::db::mysql
include ::cinder::db::mysql
include ::heat::db::mysql
include ::sahara::db::mysql
+ if downcase(hiera('gnocchi_indexer_backend')) == 'mysql' {
+ include ::gnocchi::db::mysql
+ }
if downcase(hiera('ceilometer_backend')) == 'mysql' {
include ::ceilometer::db::mysql
- }
-
- $rabbit_nodes = hiera('rabbit_node_ips')
- if count($rabbit_nodes) > 1 {
-
- $rabbit_ipv6 = str2bool(hiera('rabbit_ipv6', false))
- if $rabbit_ipv6 {
- $rabbit_env = merge(hiera('rabbitmq_environment'), {
- 'RABBITMQ_SERVER_START_ARGS' => '"-proto_dist inet6_tcp"'
- })
- } else {
- $rabbit_env = hiera('rabbitmq_environment')
- }
-
- class { '::rabbitmq':
- config_cluster => true,
- cluster_nodes => $rabbit_nodes,
- tcp_keepalive => false,
- config_kernel_variables => hiera('rabbitmq_kernel_variables'),
- config_variables => hiera('rabbitmq_config_variables'),
- environment_variables => $rabbit_env,
- }
- rabbitmq_policy { 'ha-all@/':
- pattern => '^(?!amq\.).*',
- definition => {
- 'ha-mode' => 'all',
- },
- }
- } else {
- include ::rabbitmq
+ include ::aodh::db::mysql
}
# pre-install swift here so we can build rings
@@ -209,66 +172,17 @@ if hiera('step') >= 2 {
} #END STEP 2
-if hiera('step') >= 3 {
-
- include ::keystone
- include ::keystone::config
- include ::keystone::roles::admin
- include ::keystone::endpoint
- include ::keystone::wsgi::apache
-
- #TODO: need a cleanup-keystone-tokens.sh solution here
-
- file { [ '/etc/keystone/ssl', '/etc/keystone/ssl/certs', '/etc/keystone/ssl/private' ]:
- ensure => 'directory',
- owner => 'keystone',
- group => 'keystone',
- require => Package['keystone'],
- }
- file { '/etc/keystone/ssl/certs/signing_cert.pem':
- content => hiera('keystone_signing_certificate'),
- owner => 'keystone',
- group => 'keystone',
- notify => Service['keystone'],
- require => File['/etc/keystone/ssl/certs'],
- }
- file { '/etc/keystone/ssl/private/signing_key.pem':
- content => hiera('keystone_signing_key'),
- owner => 'keystone',
- group => 'keystone',
- notify => Service['keystone'],
- require => File['/etc/keystone/ssl/private'],
- }
- file { '/etc/keystone/ssl/certs/ca.pem':
- content => hiera('keystone_ca_certificate'),
- owner => 'keystone',
- group => 'keystone',
- notify => Service['keystone'],
- require => File['/etc/keystone/ssl/certs'],
- }
-
- $glance_backend = downcase(hiera('glance_backend', 'swift'))
- case $glance_backend {
- 'swift': { $backend_store = 'glance.store.swift.Store' }
- 'file': { $backend_store = 'glance.store.filesystem.Store' }
- 'rbd': { $backend_store = 'glance.store.rbd.Store' }
- default: { fail('Unrecognized glance_backend parameter.') }
- }
- $http_store = ['glance.store.http.Store']
- $glance_store = concat($http_store, $backend_store)
-
- # TODO: scrubber and other additional optional features
- include ::glance
- include ::glance::config
- class { '::glance::api':
- known_stores => $glance_store,
- }
- include ::glance::registry
- include ::glance::notify::rabbitmq
- include join(['::glance::backend::', $glance_backend])
+if hiera('step') >= 4 {
+
+ $nova_ipv6 = hiera('nova::use_ipv6', false)
+ if $nova_ipv6 {
+ $memcached_servers = suffix(hiera('memcache_node_ips_v6'), ':11211')
+ } else {
+ $memcached_servers = suffix(hiera('memcache_node_ips'), ':11211')
+ }
class { '::nova' :
- memcached_servers => suffix(hiera('memcache_node_ips'), ':11211'),
+ memcached_servers => $memcached_servers
}
include ::nova::config
include ::nova::api
@@ -336,25 +250,22 @@ if hiera('step') >= 3 {
include ::neutron::server
include ::neutron::server::notifications
- # If the value of core plugin is set to 'nuage' or 'opencontrail',
- # include nuage or opencontrail core plugins, and it does not
- # need the l3, dhcp and metadata agents
+ # If the value of core plugin is set to 'nuage' or'opencontrail' or 'plumgrid',
+ # include nuage or opencontrail or plumgrid core plugins
+ # else use the default value of 'ml2'
if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' {
include ::neutron::plugins::nuage
} elsif hiera('neutron::core_plugin') == 'neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2' {
include ::neutron::plugins::opencontrail
- } else {
- include ::neutron::agents::l3
- include ::neutron::agents::dhcp
- include ::neutron::agents::metadata
-
- file { '/etc/neutron/dnsmasq-neutron.conf':
- content => hiera('neutron_dnsmasq_options'),
- owner => 'neutron',
- group => 'neutron',
- notify => Service['neutron-dhcp-service'],
- require => Package['neutron'],
+ }
+ elsif hiera('neutron::core_plugin') == 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2' {
+ class { '::neutron::plugins::plumgrid' :
+ connection => hiera('neutron::server::database_connection'),
+ controller_priv_host => hiera('keystone_admin_api_vip'),
+ admin_password => hiera('admin_password'),
+ metadata_proxy_shared_secret => hiera('nova::api::neutron_metadata_proxy_shared_secret'),
}
+ } else {
# If the value of core plugin is set to 'midonet',
# skip all the ML2 configuration
@@ -397,17 +308,9 @@ if hiera('step') >= 3 {
include ::neutron::plugins::ml2::bigswitch::restproxy
include ::neutron::agents::bigswitch
}
- neutron_l3_agent_config {
- 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
- }
- neutron_dhcp_agent_config {
- 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
- }
Service['neutron-server'] -> Service['neutron-ovs-agent-service']
}
- Service['neutron-server'] -> Service['neutron-dhcp-service']
- Service['neutron-server'] -> Service['neutron-l3']
Service['neutron-server'] -> Service['neutron-metadata']
}
@@ -452,6 +355,7 @@ if hiera('step') >= 3 {
$cinder_rbd_backend = 'tripleo_ceph'
cinder::backend::rbd { $cinder_rbd_backend :
+ backend_host => hiera('cinder::host'),
rbd_pool => hiera('cinder_rbd_pool_name'),
rbd_user => hiera('ceph_client_user_name'),
rbd_secret_uuid => hiera('ceph::profile::params::fsid'),
@@ -597,12 +501,28 @@ if hiera('step') >= 3 {
include ::ceilometer::expirer
include ::ceilometer::collector
include ::ceilometer::agent::auth
+ include ::ceilometer::dispatcher::gnocchi
class { '::ceilometer::db' :
database_connection => $ceilometer_database_connection,
}
Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" }
+ # Aodh
+ class { '::aodh' :
+ database_connection => $ceilometer_database_connection,
+ }
+ include ::aodh::db::sync
+ # To manage the upgrade:
+ Exec['ceilometer-dbsync'] -> Exec['aodh-db-sync']
+ include ::aodh::auth
+ include ::aodh::api
+ include ::aodh::wsgi::apache
+ include ::aodh::evaluator
+ include ::aodh::notifier
+ include ::aodh::listener
+ include ::aodh::client
+
# Heat
class { '::heat' :
notification_driver => 'messaging',
@@ -619,6 +539,7 @@ if hiera('step') >= 3 {
include ::sahara::service::engine
# Horizon
+ include ::apache::mod::remoteip
if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
$_profile_support = 'cisco'
} else {
@@ -626,11 +547,38 @@ if hiera('step') >= 3 {
}
$neutron_options = {'profile_support' => $_profile_support }
+ $memcached_ipv6 = hiera('memcached_ipv6', false)
+ if $memcached_ipv6 {
+ $horizon_memcached_servers = hiera('memcache_node_ips_v6', '[::1]')
+ } else {
+ $horizon_memcached_servers = hiera('memcache_node_ips', '127.0.0.1')
+ }
+
class { '::horizon':
- cache_server_ip => hiera('memcache_node_ips', '127.0.0.1'),
+ cache_server_ip => $horizon_memcached_servers,
neutron_options => $neutron_options,
}
+ # Gnocchi
+ $gnocchi_database_connection = hiera('gnocchi_mysql_conn_string')
+ class { '::gnocchi':
+ database_connection => $gnocchi_database_connection,
+ }
+ include ::gnocchi::api
+ include ::gnocchi::wsgi::apache
+ include ::gnocchi::client
+ include ::gnocchi::db::sync
+ include ::gnocchi::storage
+ include ::gnocchi::metricd
+ include ::gnocchi::statsd
+ $gnocchi_backend = downcase(hiera('gnocchi_backend', 'swift'))
+ case $gnocchi_backend {
+ 'swift': { include ::gnocchi::storage::swift }
+ 'file': { include ::gnocchi::storage::file }
+ 'rbd': { include ::gnocchi::storage::ceph }
+ default: { fail('Unrecognized gnocchi_backend parameter.') }
+ }
+
$snmpd_user = hiera('snmpd_readonly_user_name')
snmp::snmpv3_user { $snmpd_user:
authtype => 'MD5',
@@ -643,17 +591,13 @@ if hiera('step') >= 3 {
hiera_include('controller_classes')
-} #END STEP 3
+} #END STEP 4
-if hiera('step') >= 4 {
- $keystone_enable_db_purge = hiera('keystone_enable_db_purge', true)
+if hiera('step') >= 5 {
$nova_enable_db_purge = hiera('nova_enable_db_purge', true)
$cinder_enable_db_purge = hiera('cinder_enable_db_purge', true)
$heat_enable_db_purge = hiera('heat_enable_db_purge', true)
- if $keystone_enable_db_purge {
- include ::keystone::cron::token_flush
- }
if $nova_enable_db_purge {
include ::nova::cron::archive_deleted_rows
}
@@ -665,7 +609,6 @@ if hiera('step') >= 4 {
}
if downcase(hiera('bootstrap_nodeid')) == $::hostname {
- include ::keystone::roles::admin
# Class ::heat::keystone::domain has to run on bootstrap node
# because it creates DB entities via API calls.
include ::heat::keystone::domain
@@ -680,7 +623,7 @@ if hiera('step') >= 4 {
}
}
-} #END STEP 4
+} #END STEP 5
$package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller', hiera('step')])
package_manifest{$package_manifest_name: ensure => present}
diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp
index db3d8652..6b0929b9 100644
--- a/puppet/manifests/overcloud_controller_pacemaker.pp
+++ b/puppet/manifests/overcloud_controller_pacemaker.pp
@@ -18,6 +18,23 @@ Pcmk_resource <| |> {
try_sleep => 3,
}
+# TODO(jistr): use pcs resource provider instead of just no-ops
+Service <|
+ tag == 'aodh-service' or
+ tag == 'cinder-service' or
+ tag == 'ceilometer-service' or
+ tag == 'gnocchi-service' or
+ tag == 'heat-service' or
+ tag == 'neutron-service' or
+ tag == 'nova-service' or
+ tag == 'sahara-service'
+|> {
+ hasrestart => true,
+ restart => '/bin/true',
+ start => '/bin/true',
+ stop => '/bin/true',
+}
+
include ::tripleo::packages
include ::tripleo::firewall
@@ -35,7 +52,7 @@ $enable_load_balancer = hiera('enable_load_balancer', true)
# When to start and enable services which haven't been Pacemakerized
# FIXME: remove when we start all OpenStack services using Pacemaker
# (occurrences of this variable will be gradually replaced with false)
-$non_pcmk_start = hiera('step') >= 4
+$non_pcmk_start = hiera('step') >= 5
if hiera('step') >= 1 {
@@ -49,28 +66,13 @@ if hiera('step') >= 1 {
include ::ntp
}
- $controller_node_ips = split(hiera('controller_node_ips'), ',')
- $controller_node_names = split(downcase(hiera('controller_node_names')), ',')
- if $enable_load_balancer {
- class { '::tripleo::loadbalancer' :
- controller_hosts => $controller_node_ips,
- controller_hosts_names => $controller_node_names,
- manage_vip => false,
- mysql_clustercheck => true,
- haproxy_service_manage => false,
- }
- }
-
$pacemaker_cluster_members = downcase(regsubst(hiera('controller_node_names'), ',', ' ', 'G'))
$corosync_ipv6 = str2bool(hiera('corosync_ipv6', false))
if $corosync_ipv6 {
- $cluster_setup_extras = { '--ipv6' => '' }
+ $cluster_setup_extras = { '--token' => hiera('corosync_token_timeout', 1000), '--ipv6' => '' }
} else {
- $cluster_setup_extras = {}
+ $cluster_setup_extras = { '--token' => hiera('corosync_token_timeout', 1000) }
}
- user { 'hacluster':
- ensure => present,
- } ->
class { '::pacemaker':
hacluster_pwd => hiera('hacluster_pwd'),
} ->
@@ -85,6 +87,10 @@ if hiera('step') >= 1 {
if $enable_fencing {
include ::tripleo::fencing
+ # enable stonith after all Pacemaker resources have been created
+ Pcmk_resource<||> -> Class['tripleo::fencing']
+ Pcmk_constraint<||> -> Class['tripleo::fencing']
+ Exec <| tag == 'pacemaker_constraint' |> -> Class['tripleo::fencing']
# enable stonith after all fencing devices have been created
Class['tripleo::fencing'] -> Class['pacemaker::stonith']
}
@@ -96,35 +102,6 @@ if hiera('step') >= 1 {
op_params => 'start timeout=200s stop timeout=200s',
}
- # Only configure RabbitMQ in this step, don't start it yet to
- # avoid races where non-master nodes attempt to start without
- # config (eg. binding on 0.0.0.0)
- # The module ignores erlang_cookie if cluster_config is false
- $rabbit_ipv6 = str2bool(hiera('rabbit_ipv6', false))
- if $rabbit_ipv6 {
- $rabbit_env = merge(hiera('rabbitmq_environment'), {
- 'RABBITMQ_SERVER_START_ARGS' => '"-proto_dist inet6_tcp"'
- })
- } else {
- $rabbit_env = hiera('rabbitmq_environment')
- }
-
- class { '::rabbitmq':
- service_manage => false,
- tcp_keepalive => false,
- config_kernel_variables => hiera('rabbitmq_kernel_variables'),
- config_variables => hiera('rabbitmq_config_variables'),
- environment_variables => $rabbit_env,
- } ->
- file { '/var/lib/rabbitmq/.erlang.cookie':
- ensure => file,
- owner => 'rabbitmq',
- group => 'rabbitmq',
- mode => '0400',
- content => hiera('rabbitmq::erlang_cookie'),
- replace => true,
- }
-
if downcase(hiera('ceilometer_backend')) == 'mongodb' {
include ::mongodb::globals
include ::mongodb::client
@@ -170,8 +147,10 @@ if hiera('step') >= 1 {
'bind-address' => $::hostname,
'max_connections' => hiera('mysql_max_connections'),
'open_files_limit' => '-1',
+ 'wsrep_on' => 'ON',
'wsrep_provider' => '/usr/lib64/galera/libgalera_smm.so',
'wsrep_cluster_name' => 'galera_cluster',
+ 'wsrep_cluster_address' => "gcomm://${galera_nodes}",
'wsrep_slave_threads' => '1',
'wsrep_certify_nonPK' => '1',
'wsrep_max_ws_rows' => '131072',
@@ -219,201 +198,12 @@ if hiera('step') >= 2 {
if $pacemaker_master {
- if $enable_load_balancer {
-
- include ::pacemaker::resource_defaults
-
- # Create an openstack-core dummy resource. See RHBZ 1290121
- pacemaker::resource::ocf { 'openstack-core':
- ocf_agent_name => 'heartbeat:Dummy',
- clone_params => true,
- }
- # FIXME: we should not have to access tripleo::loadbalancer class
- # parameters here to configure pacemaker VIPs. The configuration
- # of pacemaker VIPs could move into puppet-tripleo or we should
- # make use of less specific hiera parameters here for the settings.
- pacemaker::resource::service { 'haproxy':
- clone_params => true,
- }
-
- $control_vip = hiera('tripleo::loadbalancer::controller_virtual_ip')
- if is_ipv6_address($control_vip) {
- $control_vip_netmask = '64'
- } else {
- $control_vip_netmask = '32'
- }
- pacemaker::resource::ip { 'control_vip':
- ip_address => $control_vip,
- cidr_netmask => $control_vip_netmask,
- }
- pacemaker::constraint::base { 'control_vip-then-haproxy':
- constraint_type => 'order',
- first_resource => "ip-${control_vip}",
- second_resource => 'haproxy-clone',
- first_action => 'start',
- second_action => 'start',
- constraint_params => 'kind=Optional',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['control_vip']],
- }
- pacemaker::constraint::colocation { 'control_vip-with-haproxy':
- source => "ip-${control_vip}",
- target => 'haproxy-clone',
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['control_vip']],
- }
-
- $public_vip = hiera('tripleo::loadbalancer::public_virtual_ip')
- if is_ipv6_address($public_vip) {
- $public_vip_netmask = '64'
- } else {
- $public_vip_netmask = '32'
- }
- if $public_vip and $public_vip != $control_vip {
- pacemaker::resource::ip { 'public_vip':
- ip_address => $public_vip,
- cidr_netmask => $public_vip_netmask,
- }
- pacemaker::constraint::base { 'public_vip-then-haproxy':
- constraint_type => 'order',
- first_resource => "ip-${public_vip}",
- second_resource => 'haproxy-clone',
- first_action => 'start',
- second_action => 'start',
- constraint_params => 'kind=Optional',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['public_vip']],
- }
- pacemaker::constraint::colocation { 'public_vip-with-haproxy':
- source => "ip-${public_vip}",
- target => 'haproxy-clone',
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['public_vip']],
- }
- }
-
- $redis_vip = hiera('redis_vip')
- if is_ipv6_address($redis_vip) {
- $redis_vip_netmask = '64'
- } else {
- $redis_vip_netmask = '32'
- }
- if $redis_vip and $redis_vip != $control_vip {
- pacemaker::resource::ip { 'redis_vip':
- ip_address => $redis_vip,
- cidr_netmask => $redis_vip_netmask,
- }
- pacemaker::constraint::base { 'redis_vip-then-haproxy':
- constraint_type => 'order',
- first_resource => "ip-${redis_vip}",
- second_resource => 'haproxy-clone',
- first_action => 'start',
- second_action => 'start',
- constraint_params => 'kind=Optional',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['redis_vip']],
- }
- pacemaker::constraint::colocation { 'redis_vip-with-haproxy':
- source => "ip-${redis_vip}",
- target => 'haproxy-clone',
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['redis_vip']],
- }
- }
-
- $internal_api_vip = hiera('tripleo::loadbalancer::internal_api_virtual_ip')
- if is_ipv6_address($internal_api_vip) {
- $internal_api_vip_netmask = '64'
- } else {
- $internal_api_vip_netmask = '32'
- }
- if $internal_api_vip and $internal_api_vip != $control_vip {
- pacemaker::resource::ip { 'internal_api_vip':
- ip_address => $internal_api_vip,
- cidr_netmask => $internal_api_vip_netmask,
- }
- pacemaker::constraint::base { 'internal_api_vip-then-haproxy':
- constraint_type => 'order',
- first_resource => "ip-${internal_api_vip}",
- second_resource => 'haproxy-clone',
- first_action => 'start',
- second_action => 'start',
- constraint_params => 'kind=Optional',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['internal_api_vip']],
- }
- pacemaker::constraint::colocation { 'internal_api_vip-with-haproxy':
- source => "ip-${internal_api_vip}",
- target => 'haproxy-clone',
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['internal_api_vip']],
- }
- }
-
- $storage_vip = hiera('tripleo::loadbalancer::storage_virtual_ip')
- if is_ipv6_address($storage_vip) {
- $storage_vip_netmask = '64'
- } else {
- $storage_vip_netmask = '32'
- }
- if $storage_vip and $storage_vip != $control_vip {
- pacemaker::resource::ip { 'storage_vip':
- ip_address => $storage_vip,
- cidr_netmask => $storage_vip_netmask,
- }
- pacemaker::constraint::base { 'storage_vip-then-haproxy':
- constraint_type => 'order',
- first_resource => "ip-${storage_vip}",
- second_resource => 'haproxy-clone',
- first_action => 'start',
- second_action => 'start',
- constraint_params => 'kind=Optional',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['storage_vip']],
- }
- pacemaker::constraint::colocation { 'storage_vip-with-haproxy':
- source => "ip-${storage_vip}",
- target => 'haproxy-clone',
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['storage_vip']],
- }
- }
-
- $storage_mgmt_vip = hiera('tripleo::loadbalancer::storage_mgmt_virtual_ip')
- if is_ipv6_address($storage_mgmt_vip) {
- $storage_mgmt_vip_netmask = '64'
- } else {
- $storage_mgmt_vip_netmask = '32'
- }
- if $storage_mgmt_vip and $storage_mgmt_vip != $control_vip {
- pacemaker::resource::ip { 'storage_mgmt_vip':
- ip_address => $storage_mgmt_vip,
- cidr_netmask => $storage_mgmt_vip_netmask,
- }
- pacemaker::constraint::base { 'storage_mgmt_vip-then-haproxy':
- constraint_type => 'order',
- first_resource => "ip-${storage_mgmt_vip}",
- second_resource => 'haproxy-clone',
- first_action => 'start',
- second_action => 'start',
- constraint_params => 'kind=Optional',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['storage_mgmt_vip']],
- }
- pacemaker::constraint::colocation { 'storage_mgmt_vip-with-haproxy':
- source => "ip-${storage_mgmt_vip}",
- target => 'haproxy-clone',
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['storage_mgmt_vip']],
- }
- }
+ include ::pacemaker::resource_defaults
+ # Create an openstack-core dummy resource. See RHBZ 1290121
+ pacemaker::resource::ocf { 'openstack-core':
+ ocf_agent_name => 'heartbeat:Dummy',
+ clone_params => true,
}
pacemaker::resource::service { $::memcached::params::service_name :
@@ -421,14 +211,6 @@ if hiera('step') >= 2 {
require => Class['::memcached'],
}
- pacemaker::resource::ocf { 'rabbitmq':
- ocf_agent_name => 'heartbeat:rabbitmq-cluster',
- resource_params => 'set_policy=\'ha-all ^(?!amq\.).* {"ha-mode":"all"}\'',
- clone_params => 'ordered=true interleave=true',
- meta_params => 'notify=true',
- require => Class['::rabbitmq'],
- }
-
if downcase(hiera('ceilometer_backend')) == 'mongodb' {
pacemaker::resource::service { $::mongodb::params::service_name :
op_params => 'start timeout=370s stop timeout=200s',
@@ -466,6 +248,16 @@ if hiera('step') >= 2 {
}
}
+ $mysql_root_password = hiera('mysql::server::root_password')
+ $mysql_clustercheck_password = hiera('mysql_clustercheck_password')
+ # This step is to create a sysconfig clustercheck file with the root user and empty password
+ # on the first install only (because later on the clustercheck db user will be used)
+ # We are using exec and not file in order to not have duplicate definition errors in puppet
+ # when we later set the the file to contain the clustercheck data
+ exec { 'create-root-sysconfig-clustercheck':
+ command => "/bin/echo 'MYSQL_USERNAME=root\nMYSQL_PASSWORD=\'\'\nMYSQL_HOST=localhost\n' > /etc/sysconfig/clustercheck",
+ unless => '/bin/test -e /etc/sysconfig/clustercheck && grep -q clustercheck /etc/sysconfig/clustercheck',
+ }
exec { 'galera-ready' :
command => '/usr/bin/clustercheck >/dev/null',
@@ -473,14 +265,7 @@ if hiera('step') >= 2 {
tries => 180,
try_sleep => 10,
environment => ['AVAILABLE_WHEN_READONLY=0'],
- require => File['/etc/sysconfig/clustercheck'],
- }
-
- file { '/etc/sysconfig/clustercheck' :
- ensure => file,
- content => "MYSQL_USERNAME=root\n
-MYSQL_PASSWORD=''\n
-MYSQL_HOST=localhost\n",
+ require => Exec['create-root-sysconfig-clustercheck'],
}
xinetd::service { 'galera-monitor' :
@@ -493,17 +278,28 @@ MYSQL_HOST=localhost\n",
service_type => 'UNLISTED',
user => 'root',
group => 'root',
- require => File['/etc/sysconfig/clustercheck'],
+ require => Exec['create-root-sysconfig-clustercheck'],
+ }
+ # We add a clustercheck db user and we will switch /etc/sysconfig/clustercheck
+ # to it in a later step. We do this only on one node as it will replicate on
+ # the other members. We also make sure that the permissions are the minimum necessary
+ if $pacemaker_master {
+ mysql_user { 'clustercheck@localhost':
+ ensure => 'present',
+ password_hash => mysql_password($mysql_clustercheck_password),
+ require => Exec['galera-ready'],
+ }
+ mysql_grant { 'clustercheck@localhost/*.*':
+ ensure => 'present',
+ options => ['GRANT'],
+ privileges => ['PROCESS'],
+ table => '*.*',
+ user => 'clustercheck@localhost',
+ }
}
# Create all the database schemas
if $sync_db {
- class { '::keystone::db::mysql':
- require => Exec['galera-ready'],
- }
- class { '::glance::db::mysql':
- require => Exec['galera-ready'],
- }
class { '::nova::db::mysql':
require => Exec['galera-ready'],
}
@@ -526,6 +322,11 @@ MYSQL_HOST=localhost\n",
}
}
+ if downcase(hiera('gnocchi_indexer_backend')) == 'mysql' {
+ class { '::gnocchi::db::mysql':
+ require => Exec['galera-ready'],
+ }
+ }
class { '::sahara::db::mysql':
require => Exec['galera-ready'],
}
@@ -587,82 +388,18 @@ MYSQL_HOST=localhost\n",
} #END STEP 2
-if hiera('step') >= 3 {
-
- class { '::keystone':
- sync_db => $sync_db,
- manage_service => false,
- enabled => false,
- enable_bootstrap => $pacemaker_master,
- }
- include ::keystone::config
-
- #TODO: need a cleanup-keystone-tokens.sh solution here
-
- file { [ '/etc/keystone/ssl', '/etc/keystone/ssl/certs', '/etc/keystone/ssl/private' ]:
- ensure => 'directory',
- owner => 'keystone',
- group => 'keystone',
- require => Package['keystone'],
- }
- file { '/etc/keystone/ssl/certs/signing_cert.pem':
- content => hiera('keystone_signing_certificate'),
- owner => 'keystone',
- group => 'keystone',
- notify => Service['keystone'],
- require => File['/etc/keystone/ssl/certs'],
- }
- file { '/etc/keystone/ssl/private/signing_key.pem':
- content => hiera('keystone_signing_key'),
- owner => 'keystone',
- group => 'keystone',
- notify => Service['keystone'],
- require => File['/etc/keystone/ssl/private'],
- }
- file { '/etc/keystone/ssl/certs/ca.pem':
- content => hiera('keystone_ca_certificate'),
- owner => 'keystone',
- group => 'keystone',
- notify => Service['keystone'],
- require => File['/etc/keystone/ssl/certs'],
- }
-
- $glance_backend = downcase(hiera('glance_backend', 'swift'))
- case $glance_backend {
- 'swift': { $backend_store = 'glance.store.swift.Store' }
- 'file': { $backend_store = 'glance.store.filesystem.Store' }
- 'rbd': { $backend_store = 'glance.store.rbd.Store' }
- default: { fail('Unrecognized glance_backend parameter.') }
- }
- $http_store = ['glance.store.http.Store']
- $glance_store = concat($http_store, $backend_store)
-
- if $glance_backend == 'file' and hiera('glance_file_pcmk_manage', false) {
- $secontext = 'context="system_u:object_r:glance_var_lib_t:s0"'
- pacemaker::resource::filesystem { 'glance-fs':
- device => hiera('glance_file_pcmk_device'),
- directory => hiera('glance_file_pcmk_directory'),
- fstype => hiera('glance_file_pcmk_fstype'),
- fsoptions => join([$secontext, hiera('glance_file_pcmk_options', '')],','),
- clone_params => '',
- }
- }
-
- # TODO: notifications, scrubber, etc.
- include ::glance
- include ::glance::config
- class { '::glance::api':
- known_stores => $glance_store,
- manage_service => false,
- enabled => false,
- }
- class { '::glance::registry' :
- sync_db => $sync_db,
- manage_service => false,
- enabled => false,
+if hiera('step') >= 4 or ( hiera('step') >= 3 and $sync_db ) {
+ # At this stage we are guaranteed that the clustercheck db user exists
+ # so we switch the resource agent to use it.
+ file { '/etc/sysconfig/clustercheck' :
+ ensure => file,
+ mode => '0600',
+ owner => 'root',
+ group => 'root',
+ content => "MYSQL_USERNAME=clustercheck\n
+MYSQL_PASSWORD='${mysql_clustercheck_password}'\n
+MYSQL_HOST=localhost\n",
}
- include ::glance::notify::rabbitmq
- include join(['::glance::backend::', $glance_backend])
$nova_ipv6 = hiera('nova::use_ipv6', false)
if $nova_ipv6 {
@@ -777,29 +514,12 @@ if hiera('step') >= 3 {
keystone_password => hiera('neutron::server::auth_password')
}
}
- if hiera('neutron::enable_dhcp_agent',true) {
- class { '::neutron::agents::dhcp' :
- manage_service => false,
- enabled => false,
- }
- file { '/etc/neutron/dnsmasq-neutron.conf':
- content => hiera('neutron_dnsmasq_options'),
- owner => 'neutron',
- group => 'neutron',
- notify => Service['neutron-dhcp-service'],
- require => Package['neutron'],
- }
- }
- if hiera('neutron::enable_l3_agent',true) {
- class { '::neutron::agents::l3' :
- manage_service => false,
- enabled => false,
- }
- }
- if hiera('neutron::enable_metadata_agent',true) {
- class { '::neutron::agents::metadata':
- manage_service => false,
- enabled => false,
+ if hiera('neutron::core_plugin') == 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2' {
+ class { '::neutron::plugins::plumgrid' :
+ connection => hiera('neutron::server::database_connection'),
+ controller_priv_host => hiera('keystone_admin_api_vip'),
+ admin_password => hiera('admin_password'),
+ metadata_proxy_shared_secret => hiera('nova::api::neutron_metadata_proxy_shared_secret'),
}
}
include ::neutron::plugins::ml2
@@ -833,15 +553,6 @@ if hiera('step') >= 3 {
include ::neutron::plugins::ml2::bigswitch::restproxy
include ::neutron::agents::bigswitch
}
- neutron_l3_agent_config {
- 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
- }
- neutron_dhcp_agent_config {
- 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
- }
- neutron_config {
- 'DEFAULT/notification_driver': value => 'messaging';
- }
include ::cinder
include ::cinder::config
@@ -894,6 +605,7 @@ if hiera('step') >= 3 {
$cinder_rbd_backend = 'tripleo_ceph'
cinder::backend::rbd { $cinder_rbd_backend :
+ backend_host => hiera('cinder::host'),
rbd_pool => hiera('cinder_rbd_pool_name'),
rbd_user => hiera('ceph_client_user_name'),
rbd_secret_uuid => hiera('ceph::profile::params::fsid'),
@@ -1081,6 +793,7 @@ if hiera('step') >= 3 {
sync_db => $sync_db,
}
include ::ceilometer::agent::auth
+ include ::ceilometer::dispatcher::gnocchi
Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" }
@@ -1113,7 +826,7 @@ if hiera('step') >= 3 {
service_enable => false,
# service_manage => false, # <-- not supported with horizon&apache mod_wsgi?
}
- include ::keystone::wsgi::apache
+ include ::apache::mod::remoteip
include ::apache::mod::status
if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
$_profile_support = 'cisco'
@@ -1121,11 +834,79 @@ if hiera('step') >= 3 {
$_profile_support = 'None'
}
$neutron_options = {'profile_support' => $_profile_support }
+
+ $memcached_ipv6 = hiera('memcached_ipv6', false)
+ if $memcached_ipv6 {
+ $horizon_memcached_servers = hiera('memcache_node_ips_v6', '[::1]')
+ } else {
+ $horizon_memcached_servers = hiera('memcache_node_ips', '127.0.0.1')
+ }
+
class { '::horizon':
- cache_server_ip => hiera('memcache_node_ips', '127.0.0.1'),
+ cache_server_ip => $horizon_memcached_servers,
neutron_options => $neutron_options,
}
+ # Aodh
+ class { '::aodh' :
+ database_connection => $ceilometer_database_connection,
+ }
+ include ::aodh::config
+ include ::aodh::auth
+ include ::aodh::client
+ include ::aodh::wsgi::apache
+ class { '::aodh::api':
+ manage_service => false,
+ enabled => false,
+ service_name => 'httpd',
+ }
+ class { '::aodh::evaluator':
+ manage_service => false,
+ enabled => false,
+ }
+ class { '::aodh::notifier':
+ manage_service => false,
+ enabled => false,
+ }
+ class { '::aodh::listener':
+ manage_service => false,
+ enabled => false,
+ }
+
+ # Gnocchi
+ $gnocchi_database_connection = hiera('gnocchi_mysql_conn_string')
+ include ::gnocchi::client
+ if $sync_db {
+ include ::gnocchi::db::sync
+ }
+ include ::gnocchi::storage
+ $gnocchi_backend = downcase(hiera('gnocchi_backend', 'swift'))
+ case $gnocchi_backend {
+ 'swift': { include ::gnocchi::storage::swift }
+ 'file': { include ::gnocchi::storage::file }
+ 'rbd': { include ::gnocchi::storage::ceph }
+ default: { fail('Unrecognized gnocchi_backend parameter.') }
+ }
+ class { '::gnocchi':
+ database_connection => $gnocchi_database_connection,
+ }
+ class { '::gnocchi::api' :
+ manage_service => false,
+ enabled => false,
+ service_name => 'httpd',
+ }
+ class { '::gnocchi::wsgi::apache' :
+ ssl => false,
+ }
+ class { '::gnocchi::metricd' :
+ manage_service => false,
+ enabled => false,
+ }
+ class { '::gnocchi::statsd' :
+ manage_service => false,
+ enabled => false,
+ }
+
$snmpd_user = hiera('snmpd_readonly_user_name')
snmp::snmpv3_user { $snmpd_user:
authtype => 'MD5',
@@ -1138,17 +919,36 @@ if hiera('step') >= 3 {
hiera_include('controller_classes')
-} #END STEP 3
+} #END STEP 4
+
+if hiera('step') >= 5 {
+ # We now make sure that the root db password is set to a random one
+ # At first installation /root/.my.cnf will be empty and we connect without a root
+ # password. On second runs or updates /root/.my.cnf will already be populated
+ # with proper credentials. This step happens on every node because this sql
+ # statement does not automatically replicate across nodes.
+ exec { 'galera-set-root-password':
+ command => "/bin/touch /root/.my.cnf && /bin/echo \"UPDATE mysql.user SET Password = PASSWORD('${mysql_root_password}') WHERE user = 'root'; flush privileges;\" | /bin/mysql --defaults-extra-file=/root/.my.cnf -u root",
+ }
+ file { '/root/.my.cnf' :
+ ensure => file,
+ mode => '0600',
+ owner => 'root',
+ group => 'root',
+ content => "[client]
+user=root
+password=\"${mysql_root_password}\"
+
+[mysql]
+user=root
+password=\"${mysql_root_password}\"",
+ require => Exec['galera-set-root-password'],
+ }
-if hiera('step') >= 4 {
- $keystone_enable_db_purge = hiera('keystone_enable_db_purge', true)
$nova_enable_db_purge = hiera('nova_enable_db_purge', true)
$cinder_enable_db_purge = hiera('cinder_enable_db_purge', true)
$heat_enable_db_purge = hiera('heat_enable_db_purge', true)
- if $keystone_enable_db_purge {
- include ::keystone::cron::token_flush
- }
if $nova_enable_db_purge {
include ::nova::cron::archive_deleted_rows
}
@@ -1161,18 +961,6 @@ if hiera('step') >= 4 {
if $pacemaker_master {
- if $enable_load_balancer {
- pacemaker::constraint::base { 'haproxy-then-keystone-constraint':
- constraint_type => 'order',
- first_resource => 'haproxy-clone',
- second_resource => 'openstack-core-clone',
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ocf['openstack-core']],
- }
- }
-
pacemaker::constraint::base { 'openstack-core-then-httpd-constraint':
constraint_type => 'order',
first_resource => 'openstack-core-clone',
@@ -1182,15 +970,6 @@ if hiera('step') >= 4 {
require => [Pacemaker::Resource::Service[$::apache::params::service_name],
Pacemaker::Resource::Ocf['openstack-core']],
}
- pacemaker::constraint::base { 'rabbitmq-then-keystone-constraint':
- constraint_type => 'order',
- first_resource => 'rabbitmq-clone',
- second_resource => 'openstack-core-clone',
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Ocf['rabbitmq'],
- Pacemaker::Resource::Ocf['openstack-core']],
- }
pacemaker::constraint::base { 'memcached-then-openstack-core-constraint':
constraint_type => 'order',
first_resource => 'memcached-clone',
@@ -1279,79 +1058,16 @@ if hiera('step') >= 4 {
require => [Pacemaker::Resource::Service[$::sahara::params::api_service_name],
Pacemaker::Resource::Ocf['openstack-core']],
}
-
- # Glance
- pacemaker::resource::service { $::glance::params::registry_service_name :
- clone_params => 'interleave=true',
- require => Pacemaker::Resource::Ocf['openstack-core'],
- }
- pacemaker::resource::service { $::glance::params::api_service_name :
- clone_params => 'interleave=true',
- }
-
- pacemaker::constraint::base { 'keystone-then-glance-registry-constraint':
+ pacemaker::constraint::base { 'sahara-api-then-sahara-engine-constraint':
constraint_type => 'order',
- first_resource => 'openstack-core-clone',
- second_resource => "${::glance::params::registry_service_name}-clone",
+ first_resource => "${::sahara::params::api_service_name}-clone",
+ second_resource => "${::sahara::params::engine_service_name}-clone",
first_action => 'start',
second_action => 'start',
- require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
- Pacemaker::Resource::Ocf['openstack-core']],
- }
- pacemaker::constraint::base { 'glance-registry-then-glance-api-constraint':
- constraint_type => 'order',
- first_resource => "${::glance::params::registry_service_name}-clone",
- second_resource => "${::glance::params::api_service_name}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
- Pacemaker::Resource::Service[$::glance::params::api_service_name]],
- }
- pacemaker::constraint::colocation { 'glance-api-with-glance-registry-colocation':
- source => "${::glance::params::api_service_name}-clone",
- target => "${::glance::params::registry_service_name}-clone",
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
- Pacemaker::Resource::Service[$::glance::params::api_service_name]],
- }
-
- if hiera('step') == 4 {
- # Neutron
- # NOTE(gfidente): Neutron will try to populate the database with some data
- # as soon as neutron-server is started; to avoid races we want to make this
- # happen only on one node, before normal Pacemaker initialization
- # https://bugzilla.redhat.com/show_bug.cgi?id=1233061
- # NOTE(emilien): we need to run this Exec only at Step 4 otherwise this exec
- # will try to start the service while it's already started by Pacemaker
- # It would result to a deployment failure since systemd would return 1 to Puppet
- # and the overcloud would fail to deploy (6 would be returned).
- # This conditional prevents from a race condition during the deployment.
- # https://bugzilla.redhat.com/show_bug.cgi?id=1290582
- exec { 'neutron-server-systemd-start-sleep' :
- command => 'systemctl start neutron-server && /usr/bin/sleep 5',
- path => '/usr/bin',
- unless => '/sbin/pcs resource show neutron-server',
- } ->
- pacemaker::resource::service { $::neutron::params::server_service:
- clone_params => 'interleave=true',
- require => Pacemaker::Resource::Ocf['openstack-core']
- }
- } else {
- pacemaker::resource::service { $::neutron::params::server_service:
- clone_params => 'interleave=true',
- require => Pacemaker::Resource::Ocf['openstack-core']
- }
- }
- if hiera('neutron::enable_l3_agent', true) {
- pacemaker::resource::service { $::neutron::params::l3_agent_service:
- clone_params => 'interleave=true',
- }
- }
- if hiera('neutron::enable_dhcp_agent', true) {
- pacemaker::resource::service { $::neutron::params::dhcp_agent_service:
- clone_params => 'interleave=true',
- }
+ require => [Pacemaker::Resource::Service[$::sahara::params::api_service_name],
+ Pacemaker::Resource::Service[$::sahara::params::engine_service_name]],
}
+
if hiera('neutron::enable_ovs_agent', true) {
pacemaker::resource::service { $::neutron::params::ovs_agent_service:
clone_params => 'interleave=true',
@@ -1362,11 +1078,6 @@ if hiera('step') >= 4 {
clone_params => 'interleave=true',
}
}
- if hiera('neutron::enable_metadata_agent', true) {
- pacemaker::resource::service { $::neutron::params::metadata_agent_service:
- clone_params => 'interleave=true',
- }
- }
if hiera('neutron::enable_ovs_agent', true) {
pacemaker::resource::ocf { $::neutron::params::ovs_cleanup_service:
ocf_agent_name => 'neutron:OVSCleanup',
@@ -1411,81 +1122,6 @@ if hiera('step') >= 4 {
Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
}
}
- pacemaker::constraint::base { 'keystone-to-neutron-server-constraint':
- constraint_type => 'order',
- first_resource => 'openstack-core-clone',
- second_resource => "${::neutron::params::server_service}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Ocf['openstack-core'],
- Pacemaker::Resource::Service[$::neutron::params::server_service]],
- }
- if hiera('neutron::enable_ovs_agent',true) {
- pacemaker::constraint::base { 'neutron-openvswitch-agent-to-dhcp-agent-constraint':
- constraint_type => 'order',
- first_resource => "${::neutron::params::ovs_agent_service}-clone",
- second_resource => "${::neutron::params::dhcp_agent_service}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service],
- Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
- }
- }
- if hiera('neutron::enable_dhcp_agent',true) and hiera('neutron::enable_ovs_agent',true) {
- pacemaker::constraint::base { 'neutron-server-to-openvswitch-agent-constraint':
- constraint_type => 'order',
- first_resource => "${::neutron::params::server_service}-clone",
- second_resource => "${::neutron::params::ovs_agent_service}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::neutron::params::server_service],
- Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
- }
-
- pacemaker::constraint::colocation { 'neutron-openvswitch-agent-to-dhcp-agent-colocation':
- source => "${::neutron::params::dhcp_agent_service}-clone",
- target => "${::neutron::params::ovs_agent_service}-clone",
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service],
- Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
- }
- }
- if hiera('neutron::enable_dhcp_agent',true) and hiera('l3_agent_service',true) {
- pacemaker::constraint::base { 'neutron-dhcp-agent-to-l3-agent-constraint':
- constraint_type => 'order',
- first_resource => "${::neutron::params::dhcp_agent_service}-clone",
- second_resource => "${::neutron::params::l3_agent_service}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
- Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]]
- }
- pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-l3-agent-colocation':
- source => "${::neutron::params::l3_agent_service}-clone",
- target => "${::neutron::params::dhcp_agent_service}-clone",
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
- Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]]
- }
- }
- if hiera('neutron::enable_l3_agent',true) and hiera('neutron::enable_metadata_agent',true) {
- pacemaker::constraint::base { 'neutron-l3-agent-to-metadata-agent-constraint':
- constraint_type => 'order',
- first_resource => "${::neutron::params::l3_agent_service}-clone",
- second_resource => "${::neutron::params::metadata_agent_service}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service],
- Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]]
- }
- pacemaker::constraint::colocation { 'neutron-l3-agent-to-metadata-agent-colocation':
- source => "${::neutron::params::metadata_agent_service}-clone",
- target => "${::neutron::params::l3_agent_service}-clone",
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service],
- Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]]
- }
- }
if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
#midonet-chain chain keystone-->neutron-server-->dhcp-->metadata->tomcat
pacemaker::constraint::base { 'neutron-server-to-dhcp-agent-constraint':
@@ -1616,7 +1252,7 @@ if hiera('step') >= 4 {
Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
}
- # Ceilometer
+ # Ceilometer and Aodh
case downcase(hiera('ceilometer_backend')) {
/mysql/: {
pacemaker::resource::service { $::ceilometer::params::agent_central_service_name:
@@ -1649,8 +1285,10 @@ if hiera('step') >= 4 {
# Fedora doesn't know `require-all` parameter for constraints yet
if $::operatingsystem == 'Fedora' {
$redis_ceilometer_constraint_params = undef
+ $redis_aodh_constraint_params = undef
} else {
$redis_ceilometer_constraint_params = 'require-all=false'
+ $redis_aodh_constraint_params = 'require-all=false'
}
pacemaker::constraint::base { 'redis-then-ceilometer-central-constraint':
constraint_type => 'order',
@@ -1662,6 +1300,16 @@ if hiera('step') >= 4 {
require => [Pacemaker::Resource::Ocf['redis'],
Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name]],
}
+ pacemaker::constraint::base { 'redis-then-aodh-evaluator-constraint':
+ constraint_type => 'order',
+ first_resource => 'redis-master',
+ second_resource => "${::aodh::params::evaluator_service_name}-clone",
+ first_action => 'promote',
+ second_action => 'start',
+ constraint_params => $redis_aodh_constraint_params,
+ require => [Pacemaker::Resource::Ocf['redis'],
+ Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name]],
+ }
pacemaker::constraint::base { 'keystone-then-ceilometer-central-constraint':
constraint_type => 'order',
first_resource => 'openstack-core-clone',
@@ -1671,6 +1319,15 @@ if hiera('step') >= 4 {
require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
Pacemaker::Resource::Ocf['openstack-core']],
}
+ pacemaker::constraint::base { 'keystone-then-ceilometer-notification-constraint':
+ constraint_type => 'order',
+ first_resource => 'openstack-core-clone',
+ second_resource => "${::ceilometer::params::agent_notification_service_name}-clone",
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
+ Pacemaker::Resource::Ocf['openstack-core']],
+ }
pacemaker::constraint::base { 'ceilometer-central-then-ceilometer-collector-constraint':
constraint_type => 'order',
first_resource => "${::ceilometer::params::agent_central_service_name}-clone",
@@ -1712,6 +1369,64 @@ if hiera('step') >= 4 {
require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
Pacemaker::Resource::Ocf['delay']],
}
+ # Aodh
+ pacemaker::resource::service { $::aodh::params::evaluator_service_name :
+ clone_params => 'interleave=true',
+ }
+ pacemaker::resource::service { $::aodh::params::notifier_service_name :
+ clone_params => 'interleave=true',
+ }
+ pacemaker::resource::service { $::aodh::params::listener_service_name :
+ clone_params => 'interleave=true',
+ }
+ pacemaker::constraint::base { 'aodh-delay-then-aodh-evaluator-constraint':
+ constraint_type => 'order',
+ first_resource => 'delay-clone',
+ second_resource => "${::aodh::params::evaluator_service_name}-clone",
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
+ Pacemaker::Resource::Ocf['delay']],
+ }
+ pacemaker::constraint::colocation { 'aodh-evaluator-with-aodh-delay-colocation':
+ source => "${::aodh::params::evaluator_service_name}-clone",
+ target => 'delay-clone',
+ score => 'INFINITY',
+ require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
+ Pacemaker::Resource::Ocf['delay']],
+ }
+ pacemaker::constraint::base { 'aodh-evaluator-then-aodh-notifier-constraint':
+ constraint_type => 'order',
+ first_resource => "${::aodh::params::evaluator_service_name}-clone",
+ second_resource => "${::aodh::params::notifier_service_name}-clone",
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
+ Pacemaker::Resource::Service[$::aodh::params::notifier_service_name]],
+ }
+ pacemaker::constraint::colocation { 'aodh-notifier-with-aodh-evaluator-colocation':
+ source => "${::aodh::params::notifier_service_name}-clone",
+ target => "${::aodh::params::evaluator_service_name}-clone",
+ score => 'INFINITY',
+ require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
+ Pacemaker::Resource::Service[$::aodh::params::notifier_service_name]],
+ }
+ pacemaker::constraint::base { 'aodh-evaluator-then-aodh-listener-constraint':
+ constraint_type => 'order',
+ first_resource => "${::aodh::params::evaluator_service_name}-clone",
+ second_resource => "${::aodh::params::listener_service_name}-clone",
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
+ Pacemaker::Resource::Service[$::aodh::params::listener_service_name]],
+ }
+ pacemaker::constraint::colocation { 'aodh-listener-with-aodh-evaluator-colocation':
+ source => "${::aodh::params::listener_service_name}-clone",
+ target => "${::aodh::params::evaluator_service_name}-clone",
+ score => 'INFINITY',
+ require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
+ Pacemaker::Resource::Service[$::aodh::params::listener_service_name]],
+ }
if downcase(hiera('ceilometer_backend')) == 'mongodb' {
pacemaker::constraint::base { 'mongodb-then-ceilometer-central-constraint':
constraint_type => 'order',
@@ -1724,6 +1439,30 @@ if hiera('step') >= 4 {
}
}
+ # gnocchi
+ pacemaker::resource::service { $::gnocchi::params::metricd_service_name :
+ clone_params => 'interleave=true',
+ }
+ pacemaker::resource::service { $::gnocchi::params::statsd_service_name :
+ clone_params => 'interleave=true',
+ }
+ pacemaker::constraint::base { 'gnocchi-metricd-then-gnocchi-statsd-constraint':
+ constraint_type => 'order',
+ first_resource => "${::gnocchi::params::metricd_service_name}-clone",
+ second_resource => "${::gnocchi::params::statsd_service_name}-clone",
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service[$::gnocchi::params::metricd_service_name],
+ Pacemaker::Resource::Service[$::gnocchi::params::statsd_service_name]],
+ }
+ pacemaker::constraint::colocation { 'gnocchi-statsd-with-metricd-colocation':
+ source => "${::gnocchi::params::statsd_service_name}-clone",
+ target => "${::gnocchi::params::metricd_service_name}-clone",
+ score => 'INFINITY',
+ require => [Pacemaker::Resource::Service[$::gnocchi::params::metricd_service_name],
+ Pacemaker::Resource::Service[$::gnocchi::params::statsd_service_name]],
+ }
+
# Heat
pacemaker::resource::service { $::heat::params::api_service_name :
clone_params => 'interleave=true',
@@ -1737,15 +1476,6 @@ if hiera('step') >= 4 {
pacemaker::resource::service { $::heat::params::engine_service_name :
clone_params => 'interleave=true',
}
- pacemaker::constraint::base { 'keystone-then-heat-api-constraint':
- constraint_type => 'order',
- first_resource => 'openstack-core-clone',
- second_resource => "${::heat::params::api_service_name}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::heat::params::api_service_name],
- Pacemaker::Resource::Ocf['openstack-core']],
- }
pacemaker::constraint::base { 'heat-api-then-heat-api-cfn-constraint':
constraint_type => 'order',
first_resource => "${::heat::params::api_service_name}-clone",
@@ -1840,30 +1570,6 @@ if hiera('step') >= 4 {
}
-} #END STEP 4
-
-if hiera('step') >= 5 {
-
- if $pacemaker_master {
-
- class {'::keystone::roles::admin' :
- require => Pacemaker::Resource::Service[$::apache::params::service_name],
- } ->
- class {'::keystone::endpoint' :
- require => Pacemaker::Resource::Service[$::apache::params::service_name],
- }
- include ::heat::keystone::domain
- Class['::keystone::roles::admin'] -> Class['::heat::keystone::domain']
-
- } else {
- # On non-master controller we don't need to create Keystone resources again
- class { '::heat::keystone::domain':
- manage_domain => false,
- manage_user => false,
- manage_role => false,
- }
- }
-
} #END STEP 5
$package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller_pacemaker', hiera('step')])
diff --git a/puppet/manifests/ringbuilder.pp b/puppet/manifests/ringbuilder.pp
index 2d880d33..a623da29 100644
--- a/puppet/manifests/ringbuilder.pp
+++ b/puppet/manifests/ringbuilder.pp
@@ -13,8 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-include ::tripleo::packages
-
define add_devices(
$swift_zones = '1'
){
@@ -91,6 +89,6 @@ class tripleo::ringbuilder (
}
}
-include ::tripleo::ringbuilder
-
-package_manifest{'/var/lib/tripleo/installed-packages/ringbuilder': ensure => present}
+if hiera('step') >= 3 {
+ include ::tripleo::ringbuilder
+}