summaryrefslogtreecommitdiffstats
path: root/puppet/manifests/overcloud_controller_pacemaker.pp
diff options
context:
space:
mode:
Diffstat (limited to 'puppet/manifests/overcloud_controller_pacemaker.pp')
-rw-r--r--puppet/manifests/overcloud_controller_pacemaker.pp352
1 files changed, 38 insertions, 314 deletions
diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp
index 7205002a..cfa693be 100644
--- a/puppet/manifests/overcloud_controller_pacemaker.pp
+++ b/puppet/manifests/overcloud_controller_pacemaker.pp
@@ -21,10 +21,7 @@ Pcmk_resource <| |> {
# TODO(jistr): use pcs resource provider instead of just no-ops
Service <|
tag == 'aodh-service' or
- tag == 'ceilometer-service' or
- tag == 'gnocchi-service' or
- tag == 'neutron-service' or
- tag == 'nova-service'
+ tag == 'gnocchi-service'
|> {
hasrestart => true,
restart => '/bin/true',
@@ -53,10 +50,6 @@ $non_pcmk_start = hiera('step') >= 5
if hiera('step') >= 1 {
- create_resources(kmod::load, hiera('kernel_modules'), {})
- create_resources(sysctl::value, hiera('sysctl_settings'), {})
- Exec <| tag == 'kmod::load' |> -> Sysctl <| |>
-
$pacemaker_cluster_members = downcase(regsubst(hiera('controller_node_names'), ',', ' ', 'G'))
$corosync_ipv6 = str2bool(hiera('corosync_ipv6', false))
if $corosync_ipv6 {
@@ -93,10 +86,6 @@ if hiera('step') >= 1 {
op_params => 'start timeout=200s stop timeout=200s',
}
- if downcase(hiera('ceilometer_backend')) == 'mongodb' {
- include ::mongodb::params
- }
-
# Galera
if str2bool(hiera('enable_galera', true)) {
$mysql_config_file = '/etc/my.cnf.d/galera.cnf'
@@ -192,48 +181,25 @@ if hiera('step') >= 2 {
require => Class['::mysql::server'],
before => Exec['galera-ready'],
}
- }
- $mysql_root_password = hiera('mysql::server::root_password')
- $mysql_clustercheck_password = hiera('mysql_clustercheck_password')
- # This step is to create a sysconfig clustercheck file with the root user and empty password
- # on the first install only (because later on the clustercheck db user will be used)
- # We are using exec and not file in order to not have duplicate definition errors in puppet
- # when we later set the the file to contain the clustercheck data
- exec { 'create-root-sysconfig-clustercheck':
- command => "/bin/echo 'MYSQL_USERNAME=root\nMYSQL_PASSWORD=\'\'\nMYSQL_HOST=localhost\n' > /etc/sysconfig/clustercheck",
- unless => '/bin/test -e /etc/sysconfig/clustercheck && grep -q clustercheck /etc/sysconfig/clustercheck',
- }
- exec { 'galera-ready' :
- command => '/usr/bin/clustercheck >/dev/null',
- timeout => 30,
- tries => 180,
- try_sleep => 10,
- environment => ['AVAILABLE_WHEN_READONLY=0'],
- require => Exec['create-root-sysconfig-clustercheck'],
- }
+ exec { 'galera-ready' :
+ command => '/usr/bin/clustercheck >/dev/null',
+ timeout => 30,
+ tries => 180,
+ try_sleep => 10,
+ environment => ['AVAILABLE_WHEN_READONLY=0'],
+ require => Exec['create-root-sysconfig-clustercheck'],
+ }
- xinetd::service { 'galera-monitor' :
- port => '9200',
- server => '/usr/bin/clustercheck',
- per_source => 'UNLIMITED',
- log_on_success => '',
- log_on_failure => 'HOST',
- flags => 'REUSE',
- service_type => 'UNLISTED',
- user => 'root',
- group => 'root',
- require => Exec['create-root-sysconfig-clustercheck'],
- }
- # We add a clustercheck db user and we will switch /etc/sysconfig/clustercheck
- # to it in a later step. We do this only on one node as it will replicate on
- # the other members. We also make sure that the permissions are the minimum necessary
- if $pacemaker_master {
+ # We add a clustercheck db user and we will switch /etc/sysconfig/clustercheck
+ # to it in a later step. We do this only on one node as it will replicate on
+ # the other members. We also make sure that the permissions are the minimum necessary
mysql_user { 'clustercheck@localhost':
ensure => 'present',
- password_hash => mysql_password($mysql_clustercheck_password),
+ password_hash => mysql_password(hiera('mysql_clustercheck_password')),
require => Exec['galera-ready'],
}
+
mysql_grant { 'clustercheck@localhost/*.*':
ensure => 'present',
options => ['GRANT'],
@@ -241,15 +207,6 @@ if hiera('step') >= 2 {
table => '*.*',
user => 'clustercheck@localhost',
}
- }
-
- # Create all the database schemas
- if $sync_db {
- if downcase(hiera('ceilometer_backend')) == 'mysql' {
- class { '::ceilometer::db::mysql':
- require => Exec['galera-ready'],
- }
- }
if downcase(hiera('gnocchi_indexer_backend')) == 'mysql' {
class { '::gnocchi::db::mysql':
@@ -259,7 +216,28 @@ if hiera('step') >= 2 {
class { '::aodh::db::mysql':
require => Exec['galera-ready'],
- }
+ }
+ }
+ # This step is to create a sysconfig clustercheck file with the root user and empty password
+ # on the first install only (because later on the clustercheck db user will be used)
+ # We are using exec and not file in order to not have duplicate definition errors in puppet
+ # when we later set the the file to contain the clustercheck data
+ exec { 'create-root-sysconfig-clustercheck':
+ command => "/bin/echo 'MYSQL_USERNAME=root\nMYSQL_PASSWORD=\'\'\nMYSQL_HOST=localhost\n' > /etc/sysconfig/clustercheck",
+ unless => '/bin/test -e /etc/sysconfig/clustercheck && grep -q clustercheck /etc/sysconfig/clustercheck',
+ }
+
+ xinetd::service { 'galera-monitor' :
+ port => '9200',
+ server => '/usr/bin/clustercheck',
+ per_source => 'UNLIMITED',
+ log_on_success => '',
+ log_on_failure => 'HOST',
+ flags => 'REUSE',
+ service_type => 'UNLISTED',
+ user => 'root',
+ group => 'root',
+ require => Exec['create-root-sysconfig-clustercheck'],
}
} #END STEP 2
@@ -267,6 +245,7 @@ if hiera('step') >= 2 {
if hiera('step') >= 4 or ( hiera('step') >= 3 and $sync_db ) {
# At this stage we are guaranteed that the clustercheck db user exists
# so we switch the resource agent to use it.
+ $mysql_clustercheck_password = hiera('mysql_clustercheck_password')
file { '/etc/sysconfig/clustercheck' :
ensure => file,
mode => '0600',
@@ -290,128 +269,6 @@ MYSQL_HOST=localhost\n",
include ::nova::config
- if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
-
- # TODO(devvesa) provide non-controller ips for these services
- $zookeeper_node_ips = hiera('neutron_api_node_ips')
- $cassandra_node_ips = hiera('neutron_api_node_ips')
-
- # Run zookeeper in the controller if configured
- if hiera('enable_zookeeper_on_controller') {
- class {'::tripleo::cluster::zookeeper':
- zookeeper_server_ips => $zookeeper_node_ips,
- # TODO: create a 'bind' hiera key for zookeeper
- zookeeper_client_ip => hiera('neutron::bind_host'),
- zookeeper_hostnames => split(hiera('controller_node_names'), ',')
- }
- }
-
- # Run cassandra in the controller if configured
- if hiera('enable_cassandra_on_controller') {
- class {'::tripleo::cluster::cassandra':
- cassandra_servers => $cassandra_node_ips,
- # TODO: create a 'bind' hiera key for cassandra
- cassandra_ip => hiera('neutron::bind_host'),
- }
- }
-
- class {'::tripleo::network::midonet::agent':
- zookeeper_servers => $zookeeper_node_ips,
- cassandra_seeds => $cassandra_node_ips
- }
-
- class {'::tripleo::network::midonet::api':
- zookeeper_servers => $zookeeper_node_ips,
- vip => hiera('public_virtual_ip'),
- keystone_ip => hiera('public_virtual_ip'),
- keystone_admin_token => hiera('keystone::admin_token'),
- # TODO: create a 'bind' hiera key for api
- bind_address => hiera('neutron::bind_host'),
- admin_password => hiera('admin_password')
- }
-
- # Configure Neutron
- # TODO: when doing the composable midonet plugin, don't forget to
- # set service_plugins to an empty array in Hiera.
- class {'::neutron':
- service_plugins => []
- }
-
- }
-
- if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
- class {'::neutron::plugins::midonet':
- midonet_api_ip => hiera('public_virtual_ip'),
- keystone_tenant => hiera('neutron::server::auth_tenant'),
- keystone_password => hiera('neutron::server::password')
- }
- }
-
- # Ceilometer
- case downcase(hiera('ceilometer_backend')) {
- /mysql/: {
- $ceilometer_database_connection = hiera('ceilometer_mysql_conn_string')
- }
- default: {
- $mongo_node_string = join($mongo_node_ips_with_port, ',')
- $ceilometer_database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
- }
- }
- include ::ceilometer
- include ::ceilometer::config
- class { '::ceilometer::api' :
- manage_service => false,
- enabled => false,
- }
- class { '::ceilometer::agent::notification' :
- manage_service => false,
- enabled => false,
- }
- class { '::ceilometer::agent::central' :
- manage_service => false,
- enabled => false,
- }
- class { '::ceilometer::collector' :
- manage_service => false,
- enabled => false,
- }
- include ::ceilometer::expirer
- class { '::ceilometer::db' :
- database_connection => $ceilometer_database_connection,
- sync_db => $sync_db,
- }
- include ::ceilometer::agent::auth
- include ::ceilometer::dispatcher::gnocchi
-
- Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" }
-
- # httpd/apache and horizon
- # NOTE(gfidente): server-status can be consumed by the pacemaker resource agent
- class { '::apache' :
- service_enable => false,
- # service_manage => false, # <-- not supported with horizon&apache mod_wsgi?
- }
- include ::apache::mod::remoteip
- include ::apache::mod::status
- if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
- $_profile_support = 'cisco'
- } else {
- $_profile_support = 'None'
- }
- $neutron_options = {'profile_support' => $_profile_support }
-
- $memcached_ipv6 = hiera('memcached_ipv6', false)
- if $memcached_ipv6 {
- $horizon_memcached_servers = hiera('memcache_node_ips_v6', '[::1]')
- } else {
- $horizon_memcached_servers = hiera('memcache_node_ips', '127.0.0.1')
- }
-
- class { '::horizon':
- cache_server_ip => $horizon_memcached_servers,
- neutron_options => $neutron_options,
- }
-
# Aodh
class { '::aodh' :
database_connection => hiera('aodh_mysql_conn_string'),
@@ -482,6 +339,7 @@ if hiera('step') >= 5 {
# password. On second runs or updates /root/.my.cnf will already be populated
# with proper credentials. This step happens on every node because this sql
# statement does not automatically replicate across nodes.
+ $mysql_root_password = hiera('mysql::server::root_password')
exec { 'galera-set-root-password':
command => "/bin/touch /root/.my.cnf && /bin/echo \"UPDATE mysql.user SET Password = PASSWORD('${mysql_root_password}') WHERE user = 'root'; flush privileges;\" | /bin/mysql --defaults-extra-file=/root/.my.cnf -u root",
}
@@ -527,49 +385,6 @@ password=\"${mysql_root_password}\"",
Pacemaker::Resource::Ocf['openstack-core']],
}
- if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
- pacemaker::resource::service {'tomcat':
- clone_params => 'interleave=true',
- }
- }
- if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
- #midonet-chain chain keystone-->neutron-server-->dhcp-->metadata->tomcat
- pacemaker::constraint::base { 'neutron-server-to-dhcp-agent-constraint':
- constraint_type => 'order',
- first_resource => "${::neutron::params::server_service}-clone",
- second_resource => "${::neutron::params::dhcp_agent_service}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::neutron::params::server_service],
- Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
- }
- pacemaker::constraint::base { 'neutron-dhcp-agent-to-metadata-agent-constraint':
- constraint_type => 'order',
- first_resource => "${::neutron::params::dhcp_agent_service}-clone",
- second_resource => "${::neutron::params::metadata_agent_service}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
- Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
- }
- pacemaker::constraint::base { 'neutron-metadata-agent-to-tomcat-constraint':
- constraint_type => 'order',
- first_resource => "${::neutron::params::metadata_agent_service}-clone",
- second_resource => 'tomcat-clone',
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service],
- Pacemaker::Resource::Service['tomcat']],
- }
- pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-metadata-agent-colocation':
- source => "${::neutron::params::metadata_agent_service}-clone",
- target => "${::neutron::params::dhcp_agent_service}-clone",
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
- Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
- }
- }
-
# Nova
pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint':
constraint_type => 'order',
@@ -652,49 +467,12 @@ password=\"${mysql_root_password}\"",
Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
}
- # Ceilometer and Aodh
- case downcase(hiera('ceilometer_backend')) {
- /mysql/: {
- pacemaker::resource::service { $::ceilometer::params::agent_central_service_name:
- clone_params => 'interleave=true',
- require => Pacemaker::Resource::Ocf['openstack-core'],
- }
- }
- default: {
- pacemaker::resource::service { $::ceilometer::params::agent_central_service_name:
- clone_params => 'interleave=true',
- require => [Pacemaker::Resource::Ocf['openstack-core'],
- Pacemaker::Resource::Service[$::mongodb::params::service_name]],
- }
- }
- }
- pacemaker::resource::service { $::ceilometer::params::collector_service_name :
- clone_params => 'interleave=true',
- }
- pacemaker::resource::service { $::ceilometer::params::api_service_name :
- clone_params => 'interleave=true',
- }
- pacemaker::resource::service { $::ceilometer::params::agent_notification_service_name :
- clone_params => 'interleave=true',
- }
# Fedora doesn't know `require-all` parameter for constraints yet
if $::operatingsystem == 'Fedora' {
- $redis_ceilometer_constraint_params = undef
$redis_aodh_constraint_params = undef
} else {
- $redis_ceilometer_constraint_params = 'require-all=false'
$redis_aodh_constraint_params = 'require-all=false'
}
- pacemaker::constraint::base { 'redis-then-ceilometer-central-constraint':
- constraint_type => 'order',
- first_resource => 'redis-master',
- second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
- first_action => 'promote',
- second_action => 'start',
- constraint_params => $redis_ceilometer_constraint_params,
- require => [Pacemaker::Resource::Ocf['redis'],
- Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name]],
- }
pacemaker::constraint::base { 'redis-then-aodh-evaluator-constraint':
constraint_type => 'order',
first_resource => 'redis-master',
@@ -705,49 +483,6 @@ password=\"${mysql_root_password}\"",
require => [Pacemaker::Resource::Ocf['redis'],
Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name]],
}
- pacemaker::constraint::base { 'keystone-then-ceilometer-central-constraint':
- constraint_type => 'order',
- first_resource => 'openstack-core-clone',
- second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
- Pacemaker::Resource::Ocf['openstack-core']],
- }
- pacemaker::constraint::base { 'keystone-then-ceilometer-notification-constraint':
- constraint_type => 'order',
- first_resource => 'openstack-core-clone',
- second_resource => "${::ceilometer::params::agent_notification_service_name}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
- Pacemaker::Resource::Ocf['openstack-core']],
- }
- pacemaker::constraint::base { 'ceilometer-central-then-ceilometer-collector-constraint':
- constraint_type => 'order',
- first_resource => "${::ceilometer::params::agent_central_service_name}-clone",
- second_resource => "${::ceilometer::params::collector_service_name}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
- Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]],
- }
- pacemaker::constraint::base { 'ceilometer-collector-then-ceilometer-api-constraint':
- constraint_type => 'order',
- first_resource => "${::ceilometer::params::collector_service_name}-clone",
- second_resource => "${::ceilometer::params::api_service_name}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name],
- Pacemaker::Resource::Service[$::ceilometer::params::api_service_name]],
- }
- pacemaker::constraint::colocation { 'ceilometer-api-with-ceilometer-collector-colocation':
- source => "${::ceilometer::params::api_service_name}-clone",
- target => "${::ceilometer::params::collector_service_name}-clone",
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
- Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]],
- }
# Aodh
pacemaker::resource::service { $::aodh::params::evaluator_service_name :
clone_params => 'interleave=true',
@@ -790,17 +525,6 @@ password=\"${mysql_root_password}\"",
require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
Pacemaker::Resource::Service[$::aodh::params::listener_service_name]],
}
- if downcase(hiera('ceilometer_backend')) == 'mongodb' {
- pacemaker::constraint::base { 'mongodb-then-ceilometer-central-constraint':
- constraint_type => 'order',
- first_resource => "${::mongodb::params::service_name}-clone",
- second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
- Pacemaker::Resource::Service[$::mongodb::params::service_name]],
- }
- }
# gnocchi
pacemaker::resource::service { $::gnocchi::params::metricd_service_name :