aboutsummaryrefslogtreecommitdiffstats
path: root/puppet/manifests/overcloud_controller_pacemaker.pp
diff options
context:
space:
mode:
Diffstat (limited to 'puppet/manifests/overcloud_controller_pacemaker.pp')
-rw-r--r--puppet/manifests/overcloud_controller_pacemaker.pp243
1 files changed, 125 insertions, 118 deletions
diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp
index 996701d0..47f460ac 100644
--- a/puppet/manifests/overcloud_controller_pacemaker.pp
+++ b/puppet/manifests/overcloud_controller_pacemaker.pp
@@ -13,6 +13,11 @@
# License for the specific language governing permissions and limitations
# under the License.
+Pcmk_resource <| |> {
+ tries => 10,
+ try_sleep => 3,
+}
+
if !str2bool(hiera('enable_package_install', 'false')) {
case $::osfamily {
'RedHat': {
@@ -26,12 +31,18 @@ if !str2bool(hiera('enable_package_install', 'false')) {
if $::hostname == downcase(hiera('bootstrap_nodeid')) {
$pacemaker_master = true
+ $sync_db = true
} else {
$pacemaker_master = false
+ $sync_db = false
}
if hiera('step') >= 1 {
+ if count(hiera('ntp::servers')) > 0 {
+ include ::ntp
+ }
+
$controller_node_ips = split(hiera('controller_node_ips'), ',')
$controller_node_names = split(downcase(hiera('controller_node_names')), ',')
class { '::tripleo::loadbalancer' :
@@ -55,88 +66,38 @@ if hiera('step') >= 1 {
class { '::pacemaker::stonith':
disable => true,
}
- if $pacemaker_master {
- $control_vip = hiera('tripleo::loadbalancer::controller_virtual_ip')
- pacemaker::resource::ip { 'control_vip':
- ip_address => $control_vip,
- }
- $public_vip = hiera('tripleo::loadbalancer::public_virtual_ip')
- pacemaker::resource::ip { 'public_vip':
- ip_address => $public_vip,
- }
- pacemaker::resource::service { 'haproxy':
- clone => true,
- }
- }
- Class['::pacemaker::corosync'] -> Pacemaker::Resource::Service <| |>
-
-}
-
-if hiera('step') >= 2 {
-
- if count(hiera('ntp::servers')) > 0 {
- include ::ntp
+ # Only configure RabbitMQ in this step, don't start it yet to
+ # avoid races where non-master nodes attempt to start without
+ # config (eg. binding on 0.0.0.0)
+ # The module ignores erlang_cookie if cluster_config is false
+ class { '::rabbitmq':
+ service_manage => false,
+ tcp_keepalive => false,
+ config_kernel_variables => hiera('rabbitmq_kernel_variables'),
+ config_variables => hiera('rabbitmq_config_variables'),
+ environment_variables => hiera('rabbitmq_environment'),
+ } ->
+ file { '/var/lib/rabbitmq/.erlang.cookie':
+ ensure => 'present',
+ owner => 'rabbitmq',
+ group => 'rabbitmq',
+ mode => '0400',
+ content => hiera('rabbitmq::erlang_cookie'),
+ replace => true,
}
# MongoDB
- if downcase(hiera('ceilometer_backend')) == 'mongodb' {
- include ::mongodb::globals
+ include ::mongodb::globals
- class {'::mongodb::server' :
- service_ensure => undef
- }
- $mongo_node_ips = split(hiera('mongo_node_ips'), ',')
- $mongo_node_ips_with_port = suffix($mongo_node_ips, ':27017')
- $mongo_node_string = join($mongo_node_ips_with_port, ',')
-
- $mongodb_replset = hiera('mongodb::server::replset')
- $ceilometer_mongodb_conn_string = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
- if downcase(hiera('bootstrap_nodeid')) == $::hostname {
-
- pacemaker::resource::service { 'mongod' :
- options => "op start timeout=120s",
- clone => true,
- before => Exec['mongodb-ready'],
- }
- # NOTE (spredzy) : The replset can only be run
- # once all the nodes have joined the cluster.
- $mongodb_cluster_ready_command = join(suffix(prefix($mongo_node_ips, '/bin/nc -w1 '), ' 27017 < /dev/null'), ' && ')
- exec { 'mongodb-ready' :
- command => $mongodb_cluster_ready_command,
- timeout => 600,
- tries => 60,
- try_sleep => 10,
- before => Mongodb_replset[$mongodb_replset],
- }
-
- mongodb_replset { $mongodb_replset :
- members => $mongo_node_ips_with_port,
- }
- }
- }
-
- # Redis
- $redis_node_ips = split(hiera('redis_node_ips'), ',')
- $redis_master_hostname = downcase(hiera('bootstrap_nodeid'))
-
- if $redis_master_hostname == $::hostname {
- $slaveof = undef
- } else {
- $slaveof = "${redis_master_hostname} 6379"
- }
- class {'::redis' :
- slaveof => $slaveof,
- }
-
- if count($redis_node_ips) > 1 {
- Class['::tripleo::redis_notification'] -> Service['redis-sentinel']
- include ::redis::sentinel
- class {'::tripleo::redis_notification' :
- haproxy_monitor_ip => hiera('tripleo::loadbalancer::controller_virtual_ip'),
- }
+ # FIXME: replace with service_manage => false on ::mongodb::server
+ # when this is merged: https://github.com/puppetlabs/pupp etlabs-mongodb/pull/198
+ class { '::mongodb::server' :
+ service_ensure => undef,
+ service_enable => false,
}
+ # Galera
if str2bool(hiera('enable_galera', 'true')) {
$mysql_config_file = '/etc/my.cnf.d/galera.cnf'
} else {
@@ -184,31 +145,97 @@ if hiera('step') >= 2 {
service_manage => false,
}
+}
+
+if hiera('step') >= 2 {
+
if $pacemaker_master {
- $sync_db = true
+ $control_vip = hiera('tripleo::loadbalancer::controller_virtual_ip')
+ pacemaker::resource::ip { 'control_vip':
+ ip_address => $control_vip,
+ }
+ $public_vip = hiera('tripleo::loadbalancer::public_virtual_ip')
+ pacemaker::resource::ip { 'public_vip':
+ ip_address => $public_vip,
+ }
+ pacemaker::resource::service { 'haproxy':
+ clone_params => true,
+ }
- pacemaker::resource::ocf { 'galera' :
- resource_name => 'heartbeat:galera',
- options => "enable_creation=true wsrep_cluster_address='gcomm://${galera_nodes}' meta master-max=${galera_nodes_count} ordered=true op promote timeout=300s on-fail=block --master",
- require => Class['::mysql::server'],
- before => Exec['galera-ready'],
+ pacemaker::resource::ocf { 'rabbitmq':
+ ocf_agent_name => 'heartbeat:rabbitmq-cluster',
+ resource_params => 'set_policy=\'ha-all ^(?!amq\.).* {"ha-mode":"all"}\'',
+ clone_params => 'ordered=true interleave=true',
+ require => Class['::rabbitmq'],
}
+ if downcase(hiera('ceilometer_backend')) == 'mongodb' {
+ pacemaker::resource::service { $::mongodb::params::service_name :
+ op_params => 'start timeout=120s',
+ clone_params => true,
+ require => Class['::mongodb::server'],
+ before => Exec['mongodb-ready'],
+ }
+ # NOTE (spredzy) : The replset can only be run
+ # once all the nodes have joined the cluster.
+ $mongo_node_ips = split(hiera('mongo_node_ips'), ',')
+ $mongo_node_ips_with_port = suffix($mongo_node_ips, ':27017')
+ $mongo_node_string = join($mongo_node_ips_with_port, ',')
+ $mongodb_replset = hiera('mongodb::server::replset')
+ $mongodb_cluster_ready_command = join(suffix(prefix($mongo_node_ips, '/bin/nc -w1 '), ' 27017 < /dev/null'), ' && ')
+ exec { 'mongodb-ready' :
+ command => $mongodb_cluster_ready_command,
+ timeout => 600,
+ tries => 60,
+ try_sleep => 10,
+ }
+ mongodb_replset { $mongodb_replset :
+ members => $mongo_node_ips_with_port,
+ require => Exec['mongodb-ready'],
+ }
+ }
+
+ pacemaker::resource::ocf { 'galera' :
+ ocf_agent_name => 'heartbeat:galera',
+ op_params => 'promote timeout=300s on-fail=block --master',
+ meta_params => "master-max=${galera_nodes_count} ordered=true",
+ resource_params => "additional_parameters='--open-files-limit=16384' enable_creation=true wsrep_cluster_address='gcomm://${galera_nodes}'",
+ require => Class['::mysql::server'],
+ before => Exec['galera-ready'],
+ }
mysql_user { 'clustercheckuser@localhost' :
password_hash => mysql_password($clustercheck_password),
require => Exec['galera-ready'],
}
+ }
+
+ # Redis
+ $redis_node_ips = split(hiera('redis_node_ips'), ',')
+ $redis_master_hostname = downcase(hiera('bootstrap_nodeid'))
+
+ if $redis_master_hostname == $::hostname {
+ $slaveof = undef
} else {
- $sync_db = false
+ $slaveof = "${redis_master_hostname} 6379"
+ }
+ class {'::redis' :
+ slaveof => $slaveof,
+ }
+
+ if count($redis_node_ips) > 1 {
+ Class['::tripleo::redis_notification'] -> Service['redis-sentinel']
+ include ::redis::sentinel
+ class {'::tripleo::redis_notification' :
+ haproxy_monitor_ip => hiera('tripleo::loadbalancer::controller_virtual_ip'),
+ }
}
exec { 'galera-ready' :
command => '/bin/mysql -e "SHOW GLOBAL VARIABLES LIKE \'read_only\'" | /bin/grep -i off',
- timeout => 3600,
+ timeout => 600,
tries => 60,
- try_sleep => 60,
+ try_sleep => 10,
environment => 'HOME=/root',
- require => Class['::mysql::server'],
}
file { '/etc/sysconfig/clustercheck' :
@@ -232,7 +259,6 @@ MYSQL_HOST=localhost\n",
require => File['/etc/sysconfig/clustercheck'],
}
- # FIXME: this should only occur on the bootstrap host (ditto for db syncs)
# Create all the database schemas
# Example DSN format: mysql://user:password@host/dbname
if $sync_db {
@@ -304,34 +330,10 @@ MYSQL_HOST=localhost\n",
}
}
- # the module ignores erlang_cookie if cluster_config is false
- file { '/var/lib/rabbitmq/.erlang.cookie':
- ensure => 'present',
- owner => 'rabbitmq',
- group => 'rabbitmq',
- mode => '0400',
- content => hiera('rabbitmq::erlang_cookie'),
- replace => true,
- } ->
- class { '::rabbitmq':
- service_manage => false,
- tcp_keepalive => false,
- config_kernel_variables => hiera('rabbitmq_kernel_variables'),
- config_variables => hiera('rabbitmq_config_variables'),
- environment_variables => hiera('rabbitmq_environment'),
- }
- if $pacemaker_master {
- pacemaker::resource::ocf { 'rabbitmq':
- resource_name => 'heartbeat:rabbitmq-cluster',
- options => 'set_policy=\'ha-all ^(?!amq\.).* {"ha-mode":"all"}\'',
- clone => true,
- require => Class['::rabbitmq'],
- }
- }
-
# pre-install swift here so we can build rings
include ::swift
+ # Ceph
$cinder_enable_rbd_backend = hiera('cinder_enable_rbd_backend', false)
$enable_ceph = $cinder_enable_rbd_backend
@@ -347,12 +349,16 @@ MYSQL_HOST=localhost\n",
include ::ceph::profile::osd
}
+ # Memcached
+ include ::memcached
+
} #END STEP 2
-if (hiera('step') >= 3 and $::hostname == downcase(hiera('bootstrap_nodeid')))
- or hiera('step') >= 4 {
+if (hiera('step') >= 3 and $pacemaker_master) or hiera('step') >= 4 {
- include ::keystone
+ class { '::keystone':
+ sync_db => $sync_db,
+ }
#TODO: need a cleanup-keystone-tokens.sh solution here
keystone_config {
@@ -454,7 +460,9 @@ if (hiera('step') >= 3 and $::hostname == downcase(hiera('bootstrap_nodeid')))
Service['neutron-server'] -> Service['neutron-metadata']
include ::cinder
- include ::cinder::api
+ class { '::cinder::api':
+ sync_db => $sync_db,
+ }
include ::cinder::glance
include ::cinder::scheduler
include ::cinder::volume
@@ -505,7 +513,6 @@ if (hiera('step') >= 3 and $::hostname == downcase(hiera('bootstrap_nodeid')))
}
# swift proxy
- include ::memcached
include ::swift::proxy
include ::swift::proxy::proxy_logging
include ::swift::proxy::healthcheck
@@ -544,7 +551,7 @@ if (hiera('step') >= 3 and $::hostname == downcase(hiera('bootstrap_nodeid')))
$ceilometer_database_connection = hiera('ceilometer_mysql_conn_string')
}
default : {
- $ceilometer_database_connection = $ceilometer_mongodb_conn_string
+ $ceilometer_database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
}
}
include ::ceilometer
@@ -591,4 +598,4 @@ if (hiera('step') >= 3 and $::hostname == downcase(hiera('bootstrap_nodeid')))
snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
}
-} #END STEP 3
+} #END STEP 3/4