aboutsummaryrefslogtreecommitdiffstats
path: root/puppet/manifests/overcloud_controller_pacemaker.pp
diff options
context:
space:
mode:
Diffstat (limited to 'puppet/manifests/overcloud_controller_pacemaker.pp')
-rw-r--r--puppet/manifests/overcloud_controller_pacemaker.pp252
1 files changed, 226 insertions, 26 deletions
diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp
index cf607e53..f8d3fd76 100644
--- a/puppet/manifests/overcloud_controller_pacemaker.pp
+++ b/puppet/manifests/overcloud_controller_pacemaker.pp
@@ -41,6 +41,8 @@ if hiera('step') >= 1 {
create_resources(sysctl::value, hiera('sysctl_settings'), {})
+ include ::timezone
+
if count(hiera('ntp::servers')) > 0 {
include ::ntp
}
@@ -78,11 +80,11 @@ if hiera('step') >= 1 {
Class['tripleo::fencing'] -> Class['pacemaker::stonith']
}
- # FIXME(gfidente): sets 100secs as default start timeout op
+ # FIXME(gfidente): sets 200secs as default start timeout op
# param; until we can use pcmk global defaults we'll still
# need to add it to every resource which redefines op params
Pacemaker::Resource::Service {
- op_params => 'start timeout=100s stop timeout=100s',
+ op_params => 'start timeout=200s stop timeout=200s',
}
# Only configure RabbitMQ in this step, don't start it yet to
@@ -352,7 +354,7 @@ if hiera('step') >= 2 {
if downcase(hiera('ceilometer_backend')) == 'mongodb' {
pacemaker::resource::service { $::mongodb::params::service_name :
- op_params => 'start timeout=120s stop timeout=100s',
+ op_params => 'start timeout=370s stop timeout=200s',
clone_params => true,
require => Class['::mongodb::server'],
}
@@ -443,13 +445,17 @@ MYSQL_HOST=localhost\n",
require => Exec['galera-ready'],
}
}
+
+ class { '::sahara::db::mysql':
+ require => Exec['galera-ready'],
+ }
}
# pre-install swift here so we can build rings
include ::swift
# Ceph
- $enable_ceph = hiera('ceph_storage_count', 0) > 0
+ $enable_ceph = hiera('ceph_storage_count', 0) > 0 or hiera('enable_ceph_storage', false)
if $enable_ceph {
class { '::ceph::profile::params':
@@ -490,11 +496,10 @@ if hiera('step') >= 3 {
manage_service => false,
enabled => false,
}
+ include ::keystone::config
#TODO: need a cleanup-keystone-tokens.sh solution here
- keystone_config {
- 'ec2/driver': value => 'keystone.contrib.ec2.backends.sql.Ec2';
- }
+
file { [ '/etc/keystone/ssl', '/etc/keystone/ssl/certs', '/etc/keystone/ssl/private' ]:
ensure => 'directory',
owner => 'keystone',
@@ -592,8 +597,54 @@ if hiera('step') >= 3 {
}
include ::nova::network::neutron
- # Neutron class definitions
- include ::neutron
+ if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
+
+ # TODO(devvesa) provide non-controller ips for these services
+ $zookeeper_node_ips = hiera('neutron_api_node_ips')
+ $cassandra_node_ips = hiera('neutron_api_node_ips')
+
+ # Run zookeeper in the controller if configured
+ if hiera('enable_zookeeper_on_controller') {
+ class {'::tripleo::cluster::zookeeper':
+ zookeeper_server_ips => $zookeeper_node_ips,
+ zookeeper_client_ip => $ipaddress,
+ zookeeper_hostnames => hiera('controller_node_names')
+ }
+ }
+
+ # Run cassandra in the controller if configured
+ if hiera('enable_cassandra_on_controller') {
+ class {'::tripleo::cluster::cassandra':
+ cassandra_servers => $cassandra_node_ips,
+ cassandra_ip => $ipaddress
+ }
+ }
+
+ class {'::tripleo::network::midonet::agent':
+ zookeeper_servers => $zookeeper_node_ips,
+ cassandra_seeds => $cassandra_node_ips
+ }
+
+ class {'::tripleo::network::midonet::api':
+ zookeeper_servers => hiera('neutron_api_node_ips'),
+ vip => $public_vip,
+ keystone_ip => $public_vip,
+ keystone_admin_token => hiera('keystone::admin_token'),
+ bind_address => $ipaddress,
+ admin_password => hiera('admin_password')
+ }
+
+ # Configure Neutron
+ class {'::neutron':
+ service_plugins => []
+ }
+
+ }
+ else {
+ # Neutron class definitions
+ include ::neutron
+ }
+
class { '::neutron::server' :
sync_db => $sync_db,
manage_service => false,
@@ -603,6 +654,13 @@ if hiera('step') >= 3 {
if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' {
include ::neutron::plugins::nuage
}
+ if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
+ class {'::neutron::plugins::midonet':
+ midonet_api_ip => $public_vip,
+ keystone_tenant => hiera('neutron::server::auth_tenant'),
+ keystone_password => hiera('neutron::server::auth_password')
+ }
+ }
if hiera('neutron::enable_dhcp_agent',true) {
class { '::neutron::agents::dhcp' :
manage_service => false,
@@ -720,6 +778,48 @@ if hiera('step') >= 3 {
}
}
+ if hiera('cinder_enable_eqlx_backend', false) {
+ $cinder_eqlx_backend = hiera('cinder::backend::eqlx::volume_backend_name')
+
+ cinder_config {
+ "${cinder_eqlx_backend}/host": value => 'hostgroup';
+ }
+
+ cinder::backend::eqlx { $cinder_eqlx_backend :
+ volume_backend_name => hiera('cinder::backend::eqlx::volume_backend_name', undef),
+ san_ip => hiera('cinder::backend::eqlx::san_ip', undef),
+ san_login => hiera('cinder::backend::eqlx::san_login', undef),
+ san_password => hiera('cinder::backend::eqlx::san_password', undef),
+ san_thin_provision => hiera('cinder::backend::eqlx::san_thin_provision', undef),
+ eqlx_group_name => hiera('cinder::backend::eqlx::eqlx_group_name', undef),
+ eqlx_pool => hiera('cinder::backend::eqlx::eqlx_lpool', undef),
+ eqlx_use_chap => hiera('cinder::backend::eqlx::eqlx_use_chap', undef),
+ eqlx_chap_login => hiera('cinder::backend::eqlx::eqlx_chap_login', undef),
+ eqlx_chap_password => hiera('cinder::backend::eqlx::eqlx_san_password', undef),
+ }
+ }
+
+ if hiera('cinder_enable_dellsc_backend', false) {
+ $cinder_dellsc_backend = hiera('cinder::backend::dellsc_iscsi::volume_backend_name')
+
+ cinder_config {
+ "${cinder_dellsc_backend}/host": value => 'hostgroup';
+ }
+
+ cinder::backend::dellsc_iscsi{ $cinder_dellsc_backend :
+ volume_backend_name => hiera('cinder::backend::dellsc_iscsi::volume_backend_name', undef),
+ san_ip => hiera('cinder::backend::dellsc_iscsi::san_ip', undef),
+ san_login => hiera('cinder::backend::dellsc_iscsi::san_login', undef),
+ san_password => hiera('cinder::backend::dellsc_iscsi::san_password', undef),
+ dell_sc_ssn => hiera('cinder::backend::dellsc_iscsi::dell_sc_ssn', undef),
+ iscsi_ip_address => hiera('cinder::backend::dellsc_iscsi::iscsi_ip_address', undef),
+ iscsi_port => hiera('cinder::backend::dellsc_iscsi::iscsi_port', undef),
+ dell_sc_port => hiera('cinder::backend::dellsc_iscsi::dell_sc_port', undef),
+ dell_sc_server_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_server_folder', undef),
+ dell_sc_volume_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_volume_folder', undef),
+ }
+ }
+
if hiera('cinder_enable_netapp_backend', false) {
$cinder_netapp_backend = hiera('cinder::backend::netapp::title')
@@ -773,11 +873,23 @@ if hiera('step') >= 3 {
}
}
- $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_netapp_backend, $cinder_nfs_backend])
+ $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_eqlx_backend, $cinder_dellsc_backend, $cinder_netapp_backend, $cinder_nfs_backend])
class { '::cinder::backends' :
enabled_backends => $cinder_enabled_backends,
}
+ class { '::sahara':
+ sync_db => $sync_db,
+ }
+ class { '::sahara::service::api':
+ manage_service => false,
+ enabled => false,
+ }
+ class { '::sahara::service::engine':
+ manage_service => false,
+ enabled => false,
+ }
+
# swift proxy
class { '::swift::proxy' :
manage_service => $non_pcmk_start,
@@ -915,7 +1027,19 @@ if hiera('step') >= 3 {
} #END STEP 3
if hiera('step') >= 4 {
- include ::keystone::cron::token_flush
+ $keystone_enable_db_purge = hiera('keystone_enable_db_purge', true)
+ $nova_enable_db_purge = hiera('nova_enable_db_purge', true)
+ $cinder_enable_db_purge = hiera('cinder_enable_db_purge', true)
+
+ if $keystone_enable_db_purge {
+ include ::keystone::cron::token_flush
+ }
+ if $nova_enable_db_purge {
+ include ::nova::cron::archive_deleted_rows
+ }
+ if $cinder_enable_db_purge {
+ include ::cinder::cron::db_purge
+ }
if $pacemaker_master {
@@ -1018,6 +1142,24 @@ if hiera('step') >= 4 {
Pacemaker::Resource::Service[$::cinder::params::volume_service]],
}
+ # Sahara
+ pacemaker::resource::service { $::sahara::params::api_service_name :
+ clone_params => 'interleave=true',
+ require => Pacemaker::Resource::Service[$::keystone::params::service_name],
+ }
+ pacemaker::resource::service { $::sahara::params::engine_service_name :
+ clone_params => 'interleave=true',
+ }
+ pacemaker::constraint::base { 'keystone-then-sahara-api-constraint':
+ constraint_type => 'order',
+ first_resource => "${::keystone::params::service_name}-clone",
+ second_resource => "${::sahara::params::api_service_name}-clone",
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service[$::sahara::params::api_service_name],
+ Pacemaker::Resource::Service[$::keystone::params::service_name]],
+ }
+
# Glance
pacemaker::resource::service { $::glance::params::registry_service_name :
clone_params => 'interleave=true',
@@ -1053,15 +1195,32 @@ if hiera('step') >= 4 {
Pacemaker::Resource::Service[$::glance::params::api_service_name]],
}
- # Neutron
- # NOTE(gfidente): Neutron will try to populate the database with some data
- # as soon as neutron-server is started; to avoid races we want to make this
- # happen only on one node, before normal Pacemaker initialization
- # https://bugzilla.redhat.com/show_bug.cgi?id=1233061
- exec { '/usr/bin/systemctl start neutron-server && /usr/bin/sleep 5' : } ->
- pacemaker::resource::service { $::neutron::params::server_service:
- clone_params => 'interleave=true',
- require => Pacemaker::Resource::Service[$::keystone::params::service_name],
+ if hiera('step') == 4 {
+ # Neutron
+ # NOTE(gfidente): Neutron will try to populate the database with some data
+ # as soon as neutron-server is started; to avoid races we want to make this
+ # happen only on one node, before normal Pacemaker initialization
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1233061
+ # NOTE(emilien): we need to run this Exec only at Step 4 otherwise this exec
+ # will try to start the service while it's already started by Pacemaker
+ # It would result to a deployment failure since systemd would return 1 to Puppet
+ # and the overcloud would fail to deploy (6 would be returned).
+ # This conditional prevents from a race condition during the deployment.
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1290582
+ exec { 'neutron-server-systemd-start-sleep' :
+ command => 'systemctl start neutron-server && /usr/bin/sleep 5',
+ path => '/usr/bin',
+ unless => '/sbin/pcs resource show neutron-server',
+ } ->
+ pacemaker::resource::service { $::neutron::params::server_service:
+ clone_params => 'interleave=true',
+ require => Pacemaker::Resource::Service[$::keystone::params::service_name]
+ }
+ } else {
+ pacemaker::resource::service { $::neutron::params::server_service:
+ clone_params => 'interleave=true',
+ require => Pacemaker::Resource::Service[$::keystone::params::service_name]
+ }
}
if hiera('neutron::enable_l3_agent', true) {
pacemaker::resource::service { $::neutron::params::l3_agent_service:
@@ -1078,6 +1237,11 @@ if hiera('step') >= 4 {
clone_params => 'interleave=true',
}
}
+ if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
+ pacemaker::resource::service {'tomcat':
+ clone_params => 'interleave=true',
+ }
+ }
if hiera('neutron::enable_metadata_agent', true) {
pacemaker::resource::service { $::neutron::params::metadata_agent_service:
clone_params => 'interleave=true',
@@ -1128,7 +1292,6 @@ if hiera('step') >= 4 {
}
}
- #another chain keystone-->neutron-server-->ovs-agent-->dhcp-->l3
pacemaker::constraint::base { 'keystone-to-neutron-server-constraint':
constraint_type => 'order',
first_resource => "${::keystone::params::service_name}-clone",
@@ -1204,28 +1367,65 @@ if hiera('step') >= 4 {
Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]]
}
}
+ if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
+ #midonet-chain chain keystone-->neutron-server-->dhcp-->metadata->tomcat
+ pacemaker::constraint::base { 'neutron-server-to-dhcp-agent-constraint':
+ constraint_type => 'order',
+ first_resource => "${::neutron::params::server_service}-clone",
+ second_resource => "${::neutron::params::dhcp_agent_service}-clone",
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service[$::neutron::params::server_service],
+ Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
+ }
+ pacemaker::constraint::base { 'neutron-dhcp-agent-to-metadata-agent-constraint':
+ constraint_type => 'order',
+ first_resource => "${::neutron::params::dhcp_agent_service}-clone",
+ second_resource => "${::neutron::params::metadata_agent_service}-clone",
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
+ Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
+ }
+ pacemaker::constraint::base { 'neutron-metadata-agent-to-tomcat-constraint':
+ constraint_type => 'order',
+ first_resource => "${::neutron::params::metadata_agent_service}-clone",
+ second_resource => 'tomcat-clone',
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service],
+ Pacemaker::Resource::Service['tomcat']],
+ }
+ pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-metadata-agent-colocation':
+ source => "${::neutron::params::metadata_agent_service}-clone",
+ target => "${::neutron::params::dhcp_agent_service}-clone",
+ score => 'INFINITY',
+ require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
+ Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
+ }
+ }
# Nova
pacemaker::resource::service { $::nova::params::api_service_name :
clone_params => 'interleave=true',
- op_params => 'start timeout=100s stop timeout=100s monitor start-delay=10s',
+ op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s',
}
pacemaker::resource::service { $::nova::params::conductor_service_name :
clone_params => 'interleave=true',
- op_params => 'start timeout=100s stop timeout=100s monitor start-delay=10s',
+ op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s',
}
pacemaker::resource::service { $::nova::params::consoleauth_service_name :
clone_params => 'interleave=true',
- op_params => 'start timeout=100s stop timeout=100s monitor start-delay=10s',
+ op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s',
require => Pacemaker::Resource::Service[$::keystone::params::service_name],
}
pacemaker::resource::service { $::nova::params::vncproxy_service_name :
clone_params => 'interleave=true',
- op_params => 'start timeout=100s stop timeout=100s monitor start-delay=10s',
+ op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s',
}
pacemaker::resource::service { $::nova::params::scheduler_service_name :
clone_params => 'interleave=true',
- op_params => 'start timeout=100s stop timeout=100s monitor start-delay=10s',
+ op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s',
}
pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint':