diff options
Diffstat (limited to 'puppet/manifests')
-rw-r--r-- | puppet/manifests/overcloud_cephstorage.pp | 41 | ||||
-rw-r--r-- | puppet/manifests/overcloud_compute.pp | 144 | ||||
-rw-r--r-- | puppet/manifests/overcloud_controller.pp | 291 | ||||
-rw-r--r-- | puppet/manifests/overcloud_controller_pacemaker.pp | 911 | ||||
-rw-r--r-- | puppet/manifests/overcloud_object.pp | 11 | ||||
-rw-r--r-- | puppet/manifests/overcloud_volume.pp | 45 | ||||
-rw-r--r-- | puppet/manifests/ringbuilder.pp | 99 |
7 files changed, 9 insertions, 1533 deletions
diff --git a/puppet/manifests/overcloud_cephstorage.pp b/puppet/manifests/overcloud_cephstorage.pp index 2169002e..2653badf 100644 --- a/puppet/manifests/overcloud_cephstorage.pp +++ b/puppet/manifests/overcloud_cephstorage.pp @@ -13,45 +13,8 @@ # License for the specific language governing permissions and limitations # under the License. -include ::tripleo::packages -include ::tripleo::firewall - -if hiera('step') >= 1 { - - create_resources(kmod::load, hiera('kernel_modules'), {}) - create_resources(sysctl::value, hiera('sysctl_settings'), {}) - Exec <| tag == 'kmod::load' |> -> Sysctl <| |> - -} - -if hiera('step') >= 3 { - if str2bool(hiera('ceph_osd_selinux_permissive', true)) { - exec { 'set selinux to permissive on boot': - command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config", - onlyif => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config", - path => ['/usr/bin', '/usr/sbin'], - } - - exec { 'set selinux to permissive': - command => 'setenforce 0', - onlyif => "which setenforce && getenforce | grep -i 'enforcing'", - path => ['/usr/bin', '/usr/sbin'], - } -> Class['ceph::profile::osd'] - } - - if str2bool(hiera('ceph_ipv6', false)) { - $mon_host = hiera('ceph_mon_host_v6') - } else { - $mon_host = hiera('ceph_mon_host') - } - class { '::ceph::profile::params': - mon_host => $mon_host, - } - include ::ceph::conf - include ::ceph::profile::client - include ::ceph::profile::osd - - hiera_include('ceph_classes') +if hiera('step') >= 4 { + hiera_include('ceph_classes', []) } $package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_ceph', hiera('step')]) diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp index 1ec376b1..f96c193c 100644 --- a/puppet/manifests/overcloud_compute.pp +++ b/puppet/manifests/overcloud_compute.pp @@ -13,150 +13,8 @@ # License for the specific language governing permissions and limitations # under the License. -include ::tripleo::packages -include ::tripleo::firewall - -create_resources(kmod::load, hiera('kernel_modules'), { }) -create_resources(sysctl::value, hiera('sysctl_settings'), { }) -Exec <| tag == 'kmod::load' |> -> Sysctl <| |> - if hiera('step') >= 4 { - - # When utilising images for deployment, we need to reset the iSCSI initiator name to make it unique - exec { 'reset-iscsi-initiator-name': - command => '/bin/echo InitiatorName=$(/usr/sbin/iscsi-iname) > /etc/iscsi/initiatorname.iscsi', - onlyif => '/usr/bin/test ! -f /etc/iscsi/.initiator_reset', - }-> - - file { '/etc/iscsi/.initiator_reset': - ensure => present, - } - - $rbd_ephemeral_storage = hiera('nova::compute::rbd::ephemeral_storage', false) - $rbd_persistent_storage = hiera('rbd_persistent_storage', false) - if $rbd_ephemeral_storage or $rbd_persistent_storage { - if str2bool(hiera('ceph_ipv6', false)) { - $mon_host = hiera('ceph_mon_host_v6') - } else { - $mon_host = hiera('ceph_mon_host') - } - class { '::ceph::profile::params': - mon_host => $mon_host, - } - include ::ceph::conf - include ::ceph::profile::client - } - - if hiera('cinder_enable_nfs_backend', false) { - if str2bool($::selinux) { - selboolean { 'virt_use_nfs': - value => on, - persistent => true, - } -> Package['nfs-utils'] - } - - package { 'nfs-utils': } -> Service['nova-compute'] - } - - # TODO(emilien): figure if we *really* need those 2 parameters: - nova_config { - 'DEFAULT/my_ip': value => $ipaddress; - 'DEFAULT/linuxnet_interface_driver': value => 'nova.network.linux_net.LinuxOVSInterfaceDriver'; - } - - if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - file { '/etc/libvirt/qemu.conf': - ensure => present, - content => hiera('midonet_libvirt_qemu_data') - } - } - - include ::neutron - include ::neutron::config - - # If the value of core plugin is set to 'nuage', - # include nuage agent, - # If the value of core plugin is set to 'midonet', - # include midonet agent, - # else use the default value of 'ml2' - if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' { - include ::nuage::vrs - include ::nova::compute::neutron - - class { '::nuage::metadataagent': - nova_os_tenant_name => hiera('nova::api::admin_tenant_name'), - nova_os_password => hiera('nova_password'), - nova_metadata_ip => hiera('nova_metadata_node_ips'), - nova_auth_ip => hiera('keystone_public_api_virtual_ip'), - } - } - elsif hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - - # TODO(devvesa) provide non-controller ips for these services - $zookeeper_node_ips = hiera('neutron_api_node_ips') - $cassandra_node_ips = hiera('neutron_api_node_ips') - - class { '::tripleo::network::midonet::agent': - zookeeper_servers => $zookeeper_node_ips, - cassandra_seeds => $cassandra_node_ips - } - } - elsif hiera('neutron::core_plugin') == 'neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2' { - - include ::contrail::vrouter - # NOTE: it's not possible to use this class without a functional - # contrail controller up and running - #class {'::contrail::vrouter::provision_vrouter': - # require => Class['contrail::vrouter'], - #} - } - elsif hiera('neutron::core_plugin') == 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2' { - # forward all ipv4 traffic - # this is required for the vms to pass through the gateways public interface - sysctl::value { 'net.ipv4.ip_forward': value => '1' } - - # ifc_ctl_pp needs to be invoked by root as part of the vif.py when a VM is powered on - file { '/etc/sudoers.d/ifc_ctl_sudoers': - ensure => file, - owner => root, - group => root, - mode => '0440', - content => "nova ALL=(root) NOPASSWD: /opt/pg/bin/ifc_ctl_pp *\n", - } - } - else { - - # NOTE: this code won't live in puppet-neutron until Neutron OVS agent - # can be gracefully restarted. See https://review.openstack.org/#/c/297211 - # In the meantime, it's safe to restart the agent on each change in neutron.conf, - # because Puppet changes are supposed to be done during bootstrap and upgrades. - # Some resource managed by Neutron_config (like messaging and logging options) require - # a restart of OVS agent. This code does it. - # In Newton, OVS agent will be able to be restarted gracefully so we'll drop the code - # from here and fix it in puppet-neutron. - Neutron_config<||> ~> Service['neutron-ovs-agent-service'] - - include ::neutron::plugins::ml2 - include ::neutron::agents::ml2::ovs - - if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { - class { '::neutron::agents::n1kv_vem': - n1kv_source => hiera('n1kv_vem_source', undef), - n1kv_version => hiera('n1kv_vem_version', undef), - } - } - - if 'bsn_ml2' in hiera('neutron::plugins::ml2::mechanism_drivers') { - include ::neutron::agents::bigswitch - } - } - - include ::ceilometer - include ::ceilometer::config - include ::ceilometer::agent::compute - include ::ceilometer::agent::auth - - hiera_include('compute_classes') + hiera_include('compute_classes', []) } $package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_compute', hiera('step')]) diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp index d8166f20..25bdbfb2 100644 --- a/puppet/manifests/overcloud_controller.pp +++ b/puppet/manifests/overcloud_controller.pp @@ -13,296 +13,9 @@ # License for the specific language governing permissions and limitations # under the License. -include ::tripleo::packages -include ::tripleo::firewall - -$enable_load_balancer = hiera('enable_load_balancer', true) - -if hiera('step') >= 1 { - - create_resources(kmod::load, hiera('kernel_modules'), {}) - create_resources(sysctl::value, hiera('sysctl_settings'), {}) - Exec <| tag == 'kmod::load' |> -> Sysctl <| |> - -} - -if hiera('step') >= 2 { - - # MongoDB - if downcase(hiera('ceilometer_backend')) == 'mongodb' { - # NOTE(gfidente): We need to pass the list of IPv6 addresses *with* port and - # without the brackets as 'members' argument for the 'mongodb_replset' - # resource. - if str2bool(hiera('mongodb::server::ipv6', false)) { - $mongo_node_ips_with_port_prefixed = prefix(hiera('mongo_node_ips'), '[') - $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017') - $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017') - } else { - $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017') - $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017') - } - $mongo_node_string = join($mongo_node_ips_with_port, ',') - - $mongodb_replset = hiera('mongodb::server::replset') - $ceilometer_mongodb_conn_string = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}" - } - - if str2bool(hiera('enable_galera', true)) { - $mysql_config_file = '/etc/my.cnf.d/galera.cnf' - } else { - $mysql_config_file = '/etc/my.cnf.d/server.cnf' - } - # TODO Galara - # FIXME: due to https://bugzilla.redhat.com/show_bug.cgi?id=1298671 we - # set bind-address to a hostname instead of an ip address; to move Mysql - # from internal_api on another network we'll have to customize both - # MysqlNetwork and ControllerHostnameResolveNetwork in ServiceNetMap - class { '::mysql::server': - config_file => $mysql_config_file, - override_options => { - 'mysqld' => { - 'bind-address' => $::hostname, - 'max_connections' => hiera('mysql_max_connections'), - 'open_files_limit' => '-1', - }, - }, - remove_default_accounts => true, - } - - # FIXME: this should only occur on the bootstrap host (ditto for db syncs) - # Create all the database schemas - if downcase(hiera('gnocchi_indexer_backend')) == 'mysql' { - include ::gnocchi::db::mysql - } - if downcase(hiera('ceilometer_backend')) == 'mysql' { - include ::ceilometer::db::mysql - } - include ::aodh::db::mysql - - $enable_ceph = hiera('ceph_storage_count', 0) > 0 or hiera('enable_ceph_storage', false) - - if $enable_ceph { - $mon_initial_members = downcase(hiera('ceph_mon_initial_members')) - if str2bool(hiera('ceph_ipv6', false)) { - $mon_host = hiera('ceph_mon_host_v6') - } else { - $mon_host = hiera('ceph_mon_host') - } - class { '::ceph::profile::params': - mon_initial_members => $mon_initial_members, - mon_host => $mon_host, - } - include ::ceph::conf - include ::ceph::profile::mon - } - - if str2bool(hiera('enable_ceph_storage', false)) { - if str2bool(hiera('ceph_osd_selinux_permissive', true)) { - exec { 'set selinux to permissive on boot': - command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config", - onlyif => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config", - path => ['/usr/bin', '/usr/sbin'], - } - - exec { 'set selinux to permissive': - command => 'setenforce 0', - onlyif => "which setenforce && getenforce | grep -i 'enforcing'", - path => ['/usr/bin', '/usr/sbin'], - } -> Class['ceph::profile::osd'] - } - - include ::ceph::conf - include ::ceph::profile::osd - } - - if str2bool(hiera('enable_external_ceph', false)) { - if str2bool(hiera('ceph_ipv6', false)) { - $mon_host = hiera('ceph_mon_host_v6') - } else { - $mon_host = hiera('ceph_mon_host') - } - class { '::ceph::profile::params': - mon_host => $mon_host, - } - include ::ceph::conf - include ::ceph::profile::client - } - -} #END STEP 2 - if hiera('step') >= 4 { - - $nova_ipv6 = hiera('nova::use_ipv6', false) - if $nova_ipv6 { - $memcached_servers = suffix(hiera('memcache_node_ips_v6'), ':11211') - } else { - $memcached_servers = suffix(hiera('memcache_node_ips'), ':11211') - } - - class { '::nova' : - memcached_servers => $memcached_servers - } - include ::nova::config - - if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - - # TODO(devvesa) provide non-controller ips for these services - $zookeeper_node_ips = hiera('neutron_api_node_ips') - $cassandra_node_ips = hiera('neutron_api_node_ips') - - # Run zookeeper in the controller if configured - if hiera('enable_zookeeper_on_controller') { - class {'::tripleo::cluster::zookeeper': - zookeeper_server_ips => $zookeeper_node_ips, - # TODO: create a 'bind' hiera key for zookeeper - zookeeper_client_ip => hiera('neutron::bind_host'), - zookeeper_hostnames => hiera('controller_node_names') - } - } - - # Run cassandra in the controller if configured - if hiera('enable_cassandra_on_controller') { - class {'::tripleo::cluster::cassandra': - cassandra_servers => $cassandra_node_ips, - # TODO: create a 'bind' hiera key for cassandra - cassandra_ip => hiera('neutron::bind_host'), - } - } - - class {'::tripleo::network::midonet::agent': - zookeeper_servers => $zookeeper_node_ips, - cassandra_seeds => $cassandra_node_ips - } - - class {'::tripleo::network::midonet::api': - zookeeper_servers => $zookeeper_node_ips, - vip => hiera('public_virtual_ip'), - keystone_ip => hiera('public_virtual_ip'), - keystone_admin_token => hiera('keystone::admin_token'), - # TODO: create a 'bind' hiera key for api - bind_address => hiera('neutron::bind_host'), - admin_password => hiera('admin_password') - } - - # TODO: find a way to get an empty list from hiera - # TODO: when doing the composable midonet plugin, don't forget to - # set service_plugins to an empty array in Hiera. - class {'::neutron': - service_plugins => [] - } - - } - - # If the value of core plugin is set to 'midonet', - # skip all the ML2 configuration - if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - - class {'::neutron::plugins::midonet': - midonet_api_ip => hiera('public_virtual_ip'), - keystone_tenant => hiera('neutron::server::auth_tenant'), - keystone_password => hiera('neutron::server::password') - } - } - - if $enable_ceph { - $ceph_pools = hiera('ceph_pools') - ceph::pool { $ceph_pools : - pg_num => hiera('ceph::profile::params::osd_pool_default_pg_num'), - pgp_num => hiera('ceph::profile::params::osd_pool_default_pgp_num'), - size => hiera('ceph::profile::params::osd_pool_default_size'), - } - } - - # Ceilometer - $ceilometer_backend = downcase(hiera('ceilometer_backend')) - case $ceilometer_backend { - /mysql/ : { - $ceilometer_database_connection = hiera('ceilometer_mysql_conn_string') - } - default : { - $ceilometer_database_connection = $ceilometer_mongodb_conn_string - } - } - include ::ceilometer - include ::ceilometer::config - include ::ceilometer::api - include ::ceilometer::agent::notification - include ::ceilometer::agent::central - include ::ceilometer::expirer - include ::ceilometer::collector - include ::ceilometer::agent::auth - include ::ceilometer::dispatcher::gnocchi - class { '::ceilometer::db' : - database_connection => $ceilometer_database_connection, - } - - Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" } - - # Aodh - class { '::aodh' : - database_connection => hiera('aodh_mysql_conn_string'), - } - include ::aodh::db::sync - include ::aodh::auth - include ::aodh::api - include ::aodh::wsgi::apache - include ::aodh::evaluator - include ::aodh::notifier - include ::aodh::listener - include ::aodh::client - - # Horizon - include ::apache::mod::remoteip - if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { - $_profile_support = 'cisco' - } else { - $_profile_support = 'None' - } - $neutron_options = {'profile_support' => $_profile_support } - - $memcached_ipv6 = hiera('memcached_ipv6', false) - if $memcached_ipv6 { - $horizon_memcached_servers = hiera('memcache_node_ips_v6', '[::1]') - } else { - $horizon_memcached_servers = hiera('memcache_node_ips', '127.0.0.1') - } - - class { '::horizon': - cache_server_ip => $horizon_memcached_servers, - neutron_options => $neutron_options, - } - - # Gnocchi - $gnocchi_database_connection = hiera('gnocchi_mysql_conn_string') - class { '::gnocchi': - database_connection => $gnocchi_database_connection, - } - include ::gnocchi::api - include ::gnocchi::wsgi::apache - include ::gnocchi::client - include ::gnocchi::db::sync - include ::gnocchi::storage - include ::gnocchi::metricd - include ::gnocchi::statsd - $gnocchi_backend = downcase(hiera('gnocchi_backend', 'swift')) - case $gnocchi_backend { - 'swift': { include ::gnocchi::storage::swift } - 'file': { include ::gnocchi::storage::file } - 'rbd': { include ::gnocchi::storage::ceph } - default: { fail('Unrecognized gnocchi_backend parameter.') } - } - - hiera_include('controller_classes') - -} #END STEP 4 - -if hiera('step') >= 5 { - $nova_enable_db_purge = hiera('nova_enable_db_purge', true) - - if $nova_enable_db_purge { - include ::nova::cron::archive_deleted_rows - } -} #END STEP 5 + hiera_include('controller_classes', []) +} $package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller', hiera('step')]) package_manifest{$package_manifest_name: ensure => present} diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp index 9f636ddd..d329d5fc 100644 --- a/puppet/manifests/overcloud_controller_pacemaker.pp +++ b/puppet/manifests/overcloud_controller_pacemaker.pp @@ -13,916 +13,9 @@ # License for the specific language governing permissions and limitations # under the License. -Pcmk_resource <| |> { - tries => 10, - try_sleep => 3, +if hiera('step') >= 4 { + hiera_include('controller_classes', []) } -# TODO(jistr): use pcs resource provider instead of just no-ops -Service <| - tag == 'aodh-service' or - tag == 'ceilometer-service' or - tag == 'gnocchi-service' or - tag == 'neutron-service' or - tag == 'nova-service' -|> { - hasrestart => true, - restart => '/bin/true', - start => '/bin/true', - stop => '/bin/true', -} - -include ::tripleo::packages -include ::tripleo::firewall - -if $::hostname == downcase(hiera('bootstrap_nodeid')) { - $pacemaker_master = true - $sync_db = true -} else { - $pacemaker_master = false - $sync_db = false -} - -$enable_fencing = str2bool(hiera('enable_fencing', false)) and hiera('step') >= 5 -$enable_load_balancer = hiera('enable_load_balancer', true) - -# When to start and enable services which haven't been Pacemakerized -# FIXME: remove when we start all OpenStack services using Pacemaker -# (occurrences of this variable will be gradually replaced with false) -$non_pcmk_start = hiera('step') >= 5 - -if hiera('step') >= 1 { - - create_resources(kmod::load, hiera('kernel_modules'), {}) - create_resources(sysctl::value, hiera('sysctl_settings'), {}) - Exec <| tag == 'kmod::load' |> -> Sysctl <| |> - - $pacemaker_cluster_members = downcase(regsubst(hiera('controller_node_names'), ',', ' ', 'G')) - $corosync_ipv6 = str2bool(hiera('corosync_ipv6', false)) - if $corosync_ipv6 { - $cluster_setup_extras = { '--token' => hiera('corosync_token_timeout', 1000), '--ipv6' => '' } - } else { - $cluster_setup_extras = { '--token' => hiera('corosync_token_timeout', 1000) } - } - class { '::pacemaker': - hacluster_pwd => hiera('hacluster_pwd'), - } -> - class { '::pacemaker::corosync': - cluster_members => $pacemaker_cluster_members, - setup_cluster => $pacemaker_master, - cluster_setup_extras => $cluster_setup_extras, - } - class { '::pacemaker::stonith': - disable => !$enable_fencing, - } - if $enable_fencing { - include ::tripleo::fencing - - # enable stonith after all Pacemaker resources have been created - Pcmk_resource<||> -> Class['tripleo::fencing'] - Pcmk_constraint<||> -> Class['tripleo::fencing'] - Exec <| tag == 'pacemaker_constraint' |> -> Class['tripleo::fencing'] - # enable stonith after all fencing devices have been created - Class['tripleo::fencing'] -> Class['pacemaker::stonith'] - } - - # FIXME(gfidente): sets 200secs as default start timeout op - # param; until we can use pcmk global defaults we'll still - # need to add it to every resource which redefines op params - Pacemaker::Resource::Service { - op_params => 'start timeout=200s stop timeout=200s', - } - - if downcase(hiera('ceilometer_backend')) == 'mongodb' { - include ::mongodb::params - } - - # Galera - if str2bool(hiera('enable_galera', true)) { - $mysql_config_file = '/etc/my.cnf.d/galera.cnf' - } else { - $mysql_config_file = '/etc/my.cnf.d/server.cnf' - } - $galera_nodes = downcase(hiera('galera_node_names', $::hostname)) - $galera_nodes_count = count(split($galera_nodes, ',')) - - # FIXME: due to https://bugzilla.redhat.com/show_bug.cgi?id=1298671 we - # set bind-address to a hostname instead of an ip address; to move Mysql - # from internal_api on another network we'll have to customize both - # MysqlNetwork and ControllerHostnameResolveNetwork in ServiceNetMap - $mysql_bind_host = hiera('mysql_bind_host') - $mysqld_options = { - 'mysqld' => { - 'skip-name-resolve' => '1', - 'binlog_format' => 'ROW', - 'default-storage-engine' => 'innodb', - 'innodb_autoinc_lock_mode' => '2', - 'innodb_locks_unsafe_for_binlog'=> '1', - 'query_cache_size' => '0', - 'query_cache_type' => '0', - 'bind-address' => $::hostname, - 'max_connections' => hiera('mysql_max_connections'), - 'open_files_limit' => '-1', - 'wsrep_on' => 'ON', - 'wsrep_provider' => '/usr/lib64/galera/libgalera_smm.so', - 'wsrep_cluster_name' => 'galera_cluster', - 'wsrep_cluster_address' => "gcomm://${galera_nodes}", - 'wsrep_slave_threads' => '1', - 'wsrep_certify_nonPK' => '1', - 'wsrep_max_ws_rows' => '131072', - 'wsrep_max_ws_size' => '1073741824', - 'wsrep_debug' => '0', - 'wsrep_convert_LOCK_to_trx' => '0', - 'wsrep_retry_autocommit' => '1', - 'wsrep_auto_increment_control' => '1', - 'wsrep_drupal_282555_workaround'=> '0', - 'wsrep_causal_reads' => '0', - 'wsrep_sst_method' => 'rsync', - 'wsrep_provider_options' => "gmcast.listen_addr=tcp://[${mysql_bind_host}]:4567;", - }, - } - - class { '::mysql::server': - create_root_user => false, - create_root_my_cnf => false, - config_file => $mysql_config_file, - override_options => $mysqld_options, - remove_default_accounts => $pacemaker_master, - service_manage => false, - service_enabled => false, - } - -} - -if hiera('step') >= 2 { - - - # NOTE(gfidente): the following vars are needed on all nodes so they - # need to stay out of pacemaker_master conditional. - # The addresses mangling will hopefully go away when we'll be able to - # configure the connection string via hostnames, until then, we need to pass - # the list of IPv6 addresses *with* port and without the brackets as 'members' - # argument for the 'mongodb_replset' resource. - if str2bool(hiera('mongodb::server::ipv6', false)) { - $mongo_node_ips_with_port_prefixed = prefix(hiera('mongo_node_ips'), '[') - $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017') - $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017') - } else { - $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017') - $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017') - } - $mongodb_replset = hiera('mongodb::server::replset') - - if $pacemaker_master { - - include ::pacemaker::resource_defaults - - # Create an openstack-core dummy resource. See RHBZ 1290121 - pacemaker::resource::ocf { 'openstack-core': - ocf_agent_name => 'heartbeat:Dummy', - clone_params => true, - } - - pacemaker::resource::ocf { 'galera' : - ocf_agent_name => 'heartbeat:galera', - op_params => 'promote timeout=300s on-fail=block', - master_params => '', - meta_params => "master-max=${galera_nodes_count} ordered=true", - resource_params => "additional_parameters='--open-files-limit=16384' enable_creation=true wsrep_cluster_address='gcomm://${galera_nodes}'", - require => Class['::mysql::server'], - before => Exec['galera-ready'], - } - } - $mysql_root_password = hiera('mysql::server::root_password') - $mysql_clustercheck_password = hiera('mysql_clustercheck_password') - # This step is to create a sysconfig clustercheck file with the root user and empty password - # on the first install only (because later on the clustercheck db user will be used) - # We are using exec and not file in order to not have duplicate definition errors in puppet - # when we later set the the file to contain the clustercheck data - exec { 'create-root-sysconfig-clustercheck': - command => "/bin/echo 'MYSQL_USERNAME=root\nMYSQL_PASSWORD=\'\'\nMYSQL_HOST=localhost\n' > /etc/sysconfig/clustercheck", - unless => '/bin/test -e /etc/sysconfig/clustercheck && grep -q clustercheck /etc/sysconfig/clustercheck', - } - - exec { 'galera-ready' : - command => '/usr/bin/clustercheck >/dev/null', - timeout => 30, - tries => 180, - try_sleep => 10, - environment => ['AVAILABLE_WHEN_READONLY=0'], - require => Exec['create-root-sysconfig-clustercheck'], - } - - xinetd::service { 'galera-monitor' : - port => '9200', - server => '/usr/bin/clustercheck', - per_source => 'UNLIMITED', - log_on_success => '', - log_on_failure => 'HOST', - flags => 'REUSE', - service_type => 'UNLISTED', - user => 'root', - group => 'root', - require => Exec['create-root-sysconfig-clustercheck'], - } - # We add a clustercheck db user and we will switch /etc/sysconfig/clustercheck - # to it in a later step. We do this only on one node as it will replicate on - # the other members. We also make sure that the permissions are the minimum necessary - if $pacemaker_master { - mysql_user { 'clustercheck@localhost': - ensure => 'present', - password_hash => mysql_password($mysql_clustercheck_password), - require => Exec['galera-ready'], - } - mysql_grant { 'clustercheck@localhost/*.*': - ensure => 'present', - options => ['GRANT'], - privileges => ['PROCESS'], - table => '*.*', - user => 'clustercheck@localhost', - } - } - - # Create all the database schemas - if $sync_db { - if downcase(hiera('ceilometer_backend')) == 'mysql' { - class { '::ceilometer::db::mysql': - require => Exec['galera-ready'], - } - } - - if downcase(hiera('gnocchi_indexer_backend')) == 'mysql' { - class { '::gnocchi::db::mysql': - require => Exec['galera-ready'], - } - } - - class { '::aodh::db::mysql': - require => Exec['galera-ready'], - } - } - - # Ceph - $enable_ceph = hiera('ceph_storage_count', 0) > 0 or hiera('enable_ceph_storage', false) - - if $enable_ceph { - $mon_initial_members = downcase(hiera('ceph_mon_initial_members')) - if str2bool(hiera('ceph_ipv6', false)) { - $mon_host = hiera('ceph_mon_host_v6') - } else { - $mon_host = hiera('ceph_mon_host') - } - class { '::ceph::profile::params': - mon_initial_members => $mon_initial_members, - mon_host => $mon_host, - } - include ::ceph::conf - include ::ceph::profile::mon - } - - if str2bool(hiera('enable_ceph_storage', false)) { - if str2bool(hiera('ceph_osd_selinux_permissive', true)) { - exec { 'set selinux to permissive on boot': - command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config", - onlyif => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config", - path => ['/usr/bin', '/usr/sbin'], - } - - exec { 'set selinux to permissive': - command => 'setenforce 0', - onlyif => "which setenforce && getenforce | grep -i 'enforcing'", - path => ['/usr/bin', '/usr/sbin'], - } -> Class['ceph::profile::osd'] - } - - include ::ceph::conf - include ::ceph::profile::osd - } - - if str2bool(hiera('enable_external_ceph', false)) { - if str2bool(hiera('ceph_ipv6', false)) { - $mon_host = hiera('ceph_mon_host_v6') - } else { - $mon_host = hiera('ceph_mon_host') - } - class { '::ceph::profile::params': - mon_host => $mon_host, - } - include ::ceph::conf - include ::ceph::profile::client - } - - -} #END STEP 2 - -if hiera('step') >= 4 or ( hiera('step') >= 3 and $sync_db ) { - # At this stage we are guaranteed that the clustercheck db user exists - # so we switch the resource agent to use it. - file { '/etc/sysconfig/clustercheck' : - ensure => file, - mode => '0600', - owner => 'root', - group => 'root', - content => "MYSQL_USERNAME=clustercheck\n -MYSQL_PASSWORD='${mysql_clustercheck_password}'\n -MYSQL_HOST=localhost\n", - } - - $nova_ipv6 = hiera('nova::use_ipv6', false) - if $nova_ipv6 { - $memcached_servers = suffix(hiera('memcache_node_ips_v6'), ':11211') - } else { - $memcached_servers = suffix(hiera('memcache_node_ips'), ':11211') - } - - class { '::nova' : - memcached_servers => $memcached_servers - } - - include ::nova::config - - if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - - # TODO(devvesa) provide non-controller ips for these services - $zookeeper_node_ips = hiera('neutron_api_node_ips') - $cassandra_node_ips = hiera('neutron_api_node_ips') - - # Run zookeeper in the controller if configured - if hiera('enable_zookeeper_on_controller') { - class {'::tripleo::cluster::zookeeper': - zookeeper_server_ips => $zookeeper_node_ips, - # TODO: create a 'bind' hiera key for zookeeper - zookeeper_client_ip => hiera('neutron::bind_host'), - zookeeper_hostnames => split(hiera('controller_node_names'), ',') - } - } - - # Run cassandra in the controller if configured - if hiera('enable_cassandra_on_controller') { - class {'::tripleo::cluster::cassandra': - cassandra_servers => $cassandra_node_ips, - # TODO: create a 'bind' hiera key for cassandra - cassandra_ip => hiera('neutron::bind_host'), - } - } - - class {'::tripleo::network::midonet::agent': - zookeeper_servers => $zookeeper_node_ips, - cassandra_seeds => $cassandra_node_ips - } - - class {'::tripleo::network::midonet::api': - zookeeper_servers => $zookeeper_node_ips, - vip => hiera('public_virtual_ip'), - keystone_ip => hiera('public_virtual_ip'), - keystone_admin_token => hiera('keystone::admin_token'), - # TODO: create a 'bind' hiera key for api - bind_address => hiera('neutron::bind_host'), - admin_password => hiera('admin_password') - } - - # Configure Neutron - # TODO: when doing the composable midonet plugin, don't forget to - # set service_plugins to an empty array in Hiera. - class {'::neutron': - service_plugins => [] - } - - } - - if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - class {'::neutron::plugins::midonet': - midonet_api_ip => hiera('public_virtual_ip'), - keystone_tenant => hiera('neutron::server::auth_tenant'), - keystone_password => hiera('neutron::server::password') - } - } - - if $enable_ceph { - $ceph_pools = hiera('ceph_pools') - ceph::pool { $ceph_pools : - pg_num => hiera('ceph::profile::params::osd_pool_default_pg_num'), - pgp_num => hiera('ceph::profile::params::osd_pool_default_pgp_num'), - size => hiera('ceph::profile::params::osd_pool_default_size'), - } - } - - # Ceilometer - case downcase(hiera('ceilometer_backend')) { - /mysql/: { - $ceilometer_database_connection = hiera('ceilometer_mysql_conn_string') - } - default: { - $mongo_node_string = join($mongo_node_ips_with_port, ',') - $ceilometer_database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}" - } - } - include ::ceilometer - include ::ceilometer::config - class { '::ceilometer::api' : - manage_service => false, - enabled => false, - } - class { '::ceilometer::agent::notification' : - manage_service => false, - enabled => false, - } - class { '::ceilometer::agent::central' : - manage_service => false, - enabled => false, - } - class { '::ceilometer::collector' : - manage_service => false, - enabled => false, - } - include ::ceilometer::expirer - class { '::ceilometer::db' : - database_connection => $ceilometer_database_connection, - sync_db => $sync_db, - } - include ::ceilometer::agent::auth - include ::ceilometer::dispatcher::gnocchi - - Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" } - - # httpd/apache and horizon - # NOTE(gfidente): server-status can be consumed by the pacemaker resource agent - class { '::apache' : - service_enable => false, - # service_manage => false, # <-- not supported with horizon&apache mod_wsgi? - } - include ::apache::mod::remoteip - include ::apache::mod::status - if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { - $_profile_support = 'cisco' - } else { - $_profile_support = 'None' - } - $neutron_options = {'profile_support' => $_profile_support } - - $memcached_ipv6 = hiera('memcached_ipv6', false) - if $memcached_ipv6 { - $horizon_memcached_servers = hiera('memcache_node_ips_v6', '[::1]') - } else { - $horizon_memcached_servers = hiera('memcache_node_ips', '127.0.0.1') - } - - class { '::horizon': - cache_server_ip => $horizon_memcached_servers, - neutron_options => $neutron_options, - } - - # Aodh - class { '::aodh' : - database_connection => hiera('aodh_mysql_conn_string'), - } - include ::aodh::config - include ::aodh::auth - include ::aodh::client - include ::aodh::wsgi::apache - class { '::aodh::api': - manage_service => false, - enabled => false, - service_name => 'httpd', - } - class { '::aodh::evaluator': - manage_service => false, - enabled => false, - } - class { '::aodh::notifier': - manage_service => false, - enabled => false, - } - class { '::aodh::listener': - manage_service => false, - enabled => false, - } - - # Gnocchi - $gnocchi_database_connection = hiera('gnocchi_mysql_conn_string') - include ::gnocchi::client - if $sync_db { - include ::gnocchi::db::sync - } - include ::gnocchi::storage - $gnocchi_backend = downcase(hiera('gnocchi_backend', 'swift')) - case $gnocchi_backend { - 'swift': { include ::gnocchi::storage::swift } - 'file': { include ::gnocchi::storage::file } - 'rbd': { include ::gnocchi::storage::ceph } - default: { fail('Unrecognized gnocchi_backend parameter.') } - } - class { '::gnocchi': - database_connection => $gnocchi_database_connection, - } - class { '::gnocchi::api' : - manage_service => false, - enabled => false, - service_name => 'httpd', - } - class { '::gnocchi::wsgi::apache' : - ssl => false, - } - class { '::gnocchi::metricd' : - manage_service => false, - enabled => false, - } - class { '::gnocchi::statsd' : - manage_service => false, - enabled => false, - } - - hiera_include('controller_classes') - -} #END STEP 4 - -if hiera('step') >= 5 { - # We now make sure that the root db password is set to a random one - # At first installation /root/.my.cnf will be empty and we connect without a root - # password. On second runs or updates /root/.my.cnf will already be populated - # with proper credentials. This step happens on every node because this sql - # statement does not automatically replicate across nodes. - exec { 'galera-set-root-password': - command => "/bin/touch /root/.my.cnf && /bin/echo \"UPDATE mysql.user SET Password = PASSWORD('${mysql_root_password}') WHERE user = 'root'; flush privileges;\" | /bin/mysql --defaults-extra-file=/root/.my.cnf -u root", - } - file { '/root/.my.cnf' : - ensure => file, - mode => '0600', - owner => 'root', - group => 'root', - content => "[client] -user=root -password=\"${mysql_root_password}\" - -[mysql] -user=root -password=\"${mysql_root_password}\"", - require => Exec['galera-set-root-password'], - } - - $nova_enable_db_purge = hiera('nova_enable_db_purge', true) - - if $nova_enable_db_purge { - include ::nova::cron::archive_deleted_rows - } - - if $pacemaker_master { - - pacemaker::constraint::base { 'openstack-core-then-httpd-constraint': - constraint_type => 'order', - first_resource => 'openstack-core-clone', - second_resource => "${::apache::params::service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::apache::params::service_name], - Pacemaker::Resource::Ocf['openstack-core']], - } - pacemaker::constraint::base { 'galera-then-openstack-core-constraint': - constraint_type => 'order', - first_resource => 'galera-master', - second_resource => 'openstack-core-clone', - first_action => 'promote', - second_action => 'start', - require => [Pacemaker::Resource::Ocf['galera'], - Pacemaker::Resource::Ocf['openstack-core']], - } - - if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - pacemaker::resource::service {'tomcat': - clone_params => 'interleave=true', - } - } - if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - #midonet-chain chain keystone-->neutron-server-->dhcp-->metadata->tomcat - pacemaker::constraint::base { 'neutron-server-to-dhcp-agent-constraint': - constraint_type => 'order', - first_resource => "${::neutron::params::server_service}-clone", - second_resource => "${::neutron::params::dhcp_agent_service}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::neutron::params::server_service], - Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]], - } - pacemaker::constraint::base { 'neutron-dhcp-agent-to-metadata-agent-constraint': - constraint_type => 'order', - first_resource => "${::neutron::params::dhcp_agent_service}-clone", - second_resource => "${::neutron::params::metadata_agent_service}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service], - Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]], - } - pacemaker::constraint::base { 'neutron-metadata-agent-to-tomcat-constraint': - constraint_type => 'order', - first_resource => "${::neutron::params::metadata_agent_service}-clone", - second_resource => 'tomcat-clone', - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service], - Pacemaker::Resource::Service['tomcat']], - } - pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-metadata-agent-colocation': - source => "${::neutron::params::metadata_agent_service}-clone", - target => "${::neutron::params::dhcp_agent_service}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service], - Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]], - } - } - - # Nova - pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint': - constraint_type => 'order', - first_resource => 'openstack-core-clone', - second_resource => "${::nova::params::consoleauth_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name], - Pacemaker::Resource::Ocf['openstack-core']], - } - pacemaker::constraint::colocation { 'nova-consoleauth-with-openstack-core': - source => "${::nova::params::consoleauth_service_name}-clone", - target => 'openstack-core-clone', - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name], - Pacemaker::Resource::Ocf['openstack-core']], - } - pacemaker::constraint::base { 'nova-consoleauth-then-nova-vncproxy-constraint': - constraint_type => 'order', - first_resource => "${::nova::params::consoleauth_service_name}-clone", - second_resource => "${::nova::params::vncproxy_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name], - Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]], - } - pacemaker::constraint::colocation { 'nova-vncproxy-with-nova-consoleauth-colocation': - source => "${::nova::params::vncproxy_service_name}-clone", - target => "${::nova::params::consoleauth_service_name}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name], - Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]], - } - pacemaker::constraint::base { 'nova-vncproxy-then-nova-api-constraint': - constraint_type => 'order', - first_resource => "${::nova::params::vncproxy_service_name}-clone", - second_resource => "${::nova::params::api_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name], - Pacemaker::Resource::Service[$::nova::params::api_service_name]], - } - pacemaker::constraint::colocation { 'nova-api-with-nova-vncproxy-colocation': - source => "${::nova::params::api_service_name}-clone", - target => "${::nova::params::vncproxy_service_name}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name], - Pacemaker::Resource::Service[$::nova::params::api_service_name]], - } - pacemaker::constraint::base { 'nova-api-then-nova-scheduler-constraint': - constraint_type => 'order', - first_resource => "${::nova::params::api_service_name}-clone", - second_resource => "${::nova::params::scheduler_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::nova::params::api_service_name], - Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]], - } - pacemaker::constraint::colocation { 'nova-scheduler-with-nova-api-colocation': - source => "${::nova::params::scheduler_service_name}-clone", - target => "${::nova::params::api_service_name}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::nova::params::api_service_name], - Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]], - } - pacemaker::constraint::base { 'nova-scheduler-then-nova-conductor-constraint': - constraint_type => 'order', - first_resource => "${::nova::params::scheduler_service_name}-clone", - second_resource => "${::nova::params::conductor_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name], - Pacemaker::Resource::Service[$::nova::params::conductor_service_name]], - } - pacemaker::constraint::colocation { 'nova-conductor-with-nova-scheduler-colocation': - source => "${::nova::params::conductor_service_name}-clone", - target => "${::nova::params::scheduler_service_name}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name], - Pacemaker::Resource::Service[$::nova::params::conductor_service_name]], - } - - # Ceilometer and Aodh - case downcase(hiera('ceilometer_backend')) { - /mysql/: { - pacemaker::resource::service { $::ceilometer::params::agent_central_service_name: - clone_params => 'interleave=true', - require => Pacemaker::Resource::Ocf['openstack-core'], - } - } - default: { - pacemaker::resource::service { $::ceilometer::params::agent_central_service_name: - clone_params => 'interleave=true', - require => [Pacemaker::Resource::Ocf['openstack-core'], - Pacemaker::Resource::Service[$::mongodb::params::service_name]], - } - } - } - pacemaker::resource::service { $::ceilometer::params::collector_service_name : - clone_params => 'interleave=true', - } - pacemaker::resource::service { $::ceilometer::params::api_service_name : - clone_params => 'interleave=true', - } - pacemaker::resource::service { $::ceilometer::params::agent_notification_service_name : - clone_params => 'interleave=true', - } - # Fedora doesn't know `require-all` parameter for constraints yet - if $::operatingsystem == 'Fedora' { - $redis_ceilometer_constraint_params = undef - $redis_aodh_constraint_params = undef - } else { - $redis_ceilometer_constraint_params = 'require-all=false' - $redis_aodh_constraint_params = 'require-all=false' - } - pacemaker::constraint::base { 'redis-then-ceilometer-central-constraint': - constraint_type => 'order', - first_resource => 'redis-master', - second_resource => "${::ceilometer::params::agent_central_service_name}-clone", - first_action => 'promote', - second_action => 'start', - constraint_params => $redis_ceilometer_constraint_params, - require => [Pacemaker::Resource::Ocf['redis'], - Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name]], - } - pacemaker::constraint::base { 'redis-then-aodh-evaluator-constraint': - constraint_type => 'order', - first_resource => 'redis-master', - second_resource => "${::aodh::params::evaluator_service_name}-clone", - first_action => 'promote', - second_action => 'start', - constraint_params => $redis_aodh_constraint_params, - require => [Pacemaker::Resource::Ocf['redis'], - Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name]], - } - pacemaker::constraint::base { 'keystone-then-ceilometer-central-constraint': - constraint_type => 'order', - first_resource => 'openstack-core-clone', - second_resource => "${::ceilometer::params::agent_central_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name], - Pacemaker::Resource::Ocf['openstack-core']], - } - pacemaker::constraint::base { 'keystone-then-ceilometer-notification-constraint': - constraint_type => 'order', - first_resource => 'openstack-core-clone', - second_resource => "${::ceilometer::params::agent_notification_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name], - Pacemaker::Resource::Ocf['openstack-core']], - } - pacemaker::constraint::base { 'ceilometer-central-then-ceilometer-collector-constraint': - constraint_type => 'order', - first_resource => "${::ceilometer::params::agent_central_service_name}-clone", - second_resource => "${::ceilometer::params::collector_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name], - Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]], - } - pacemaker::constraint::base { 'ceilometer-collector-then-ceilometer-api-constraint': - constraint_type => 'order', - first_resource => "${::ceilometer::params::collector_service_name}-clone", - second_resource => "${::ceilometer::params::api_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name], - Pacemaker::Resource::Service[$::ceilometer::params::api_service_name]], - } - pacemaker::constraint::colocation { 'ceilometer-api-with-ceilometer-collector-colocation': - source => "${::ceilometer::params::api_service_name}-clone", - target => "${::ceilometer::params::collector_service_name}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name], - Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]], - } - # Aodh - pacemaker::resource::service { $::aodh::params::evaluator_service_name : - clone_params => 'interleave=true', - } - pacemaker::resource::service { $::aodh::params::notifier_service_name : - clone_params => 'interleave=true', - } - pacemaker::resource::service { $::aodh::params::listener_service_name : - clone_params => 'interleave=true', - } - pacemaker::constraint::base { 'aodh-evaluator-then-aodh-notifier-constraint': - constraint_type => 'order', - first_resource => "${::aodh::params::evaluator_service_name}-clone", - second_resource => "${::aodh::params::notifier_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name], - Pacemaker::Resource::Service[$::aodh::params::notifier_service_name]], - } - pacemaker::constraint::colocation { 'aodh-notifier-with-aodh-evaluator-colocation': - source => "${::aodh::params::notifier_service_name}-clone", - target => "${::aodh::params::evaluator_service_name}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name], - Pacemaker::Resource::Service[$::aodh::params::notifier_service_name]], - } - pacemaker::constraint::base { 'aodh-evaluator-then-aodh-listener-constraint': - constraint_type => 'order', - first_resource => "${::aodh::params::evaluator_service_name}-clone", - second_resource => "${::aodh::params::listener_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name], - Pacemaker::Resource::Service[$::aodh::params::listener_service_name]], - } - pacemaker::constraint::colocation { 'aodh-listener-with-aodh-evaluator-colocation': - source => "${::aodh::params::listener_service_name}-clone", - target => "${::aodh::params::evaluator_service_name}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name], - Pacemaker::Resource::Service[$::aodh::params::listener_service_name]], - } - if downcase(hiera('ceilometer_backend')) == 'mongodb' { - pacemaker::constraint::base { 'mongodb-then-ceilometer-central-constraint': - constraint_type => 'order', - first_resource => "${::mongodb::params::service_name}-clone", - second_resource => "${::ceilometer::params::agent_central_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name], - Pacemaker::Resource::Service[$::mongodb::params::service_name]], - } - } - - # gnocchi - pacemaker::resource::service { $::gnocchi::params::metricd_service_name : - clone_params => 'interleave=true', - } - pacemaker::resource::service { $::gnocchi::params::statsd_service_name : - clone_params => 'interleave=true', - } - pacemaker::constraint::base { 'gnocchi-metricd-then-gnocchi-statsd-constraint': - constraint_type => 'order', - first_resource => "${::gnocchi::params::metricd_service_name}-clone", - second_resource => "${::gnocchi::params::statsd_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::gnocchi::params::metricd_service_name], - Pacemaker::Resource::Service[$::gnocchi::params::statsd_service_name]], - } - pacemaker::constraint::colocation { 'gnocchi-statsd-with-metricd-colocation': - source => "${::gnocchi::params::statsd_service_name}-clone", - target => "${::gnocchi::params::metricd_service_name}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::gnocchi::params::metricd_service_name], - Pacemaker::Resource::Service[$::gnocchi::params::statsd_service_name]], - } - - # Horizon and Keystone - pacemaker::resource::service { $::apache::params::service_name: - clone_params => 'interleave=true', - verify_on_create => true, - require => [File['/etc/keystone/ssl/certs/ca.pem'], - File['/etc/keystone/ssl/private/signing_key.pem'], - File['/etc/keystone/ssl/certs/signing_cert.pem']], - } - - #VSM - if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { - pacemaker::resource::ocf { 'vsm-p' : - ocf_agent_name => 'heartbeat:VirtualDomain', - resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_primary_deploy.xml', - require => Class['n1k_vsm'], - meta_params => 'resource-stickiness=INFINITY', - } - if str2bool(hiera('n1k_vsm::pacemaker_control', true)) { - pacemaker::resource::ocf { 'vsm-s' : - ocf_agent_name => 'heartbeat:VirtualDomain', - resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_secondary_deploy.xml', - require => Class['n1k_vsm'], - meta_params => 'resource-stickiness=INFINITY', - } - pacemaker::constraint::colocation { 'vsm-colocation-contraint': - source => 'vsm-p', - target => 'vsm-s', - score => '-INFINITY', - require => [Pacemaker::Resource::Ocf['vsm-p'], - Pacemaker::Resource::Ocf['vsm-s']], - } - } - } - - } - -} #END STEP 5 - $package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller_pacemaker', hiera('step')]) package_manifest{$package_manifest_name: ensure => present} diff --git a/puppet/manifests/overcloud_object.pp b/puppet/manifests/overcloud_object.pp index 418c56e6..414a06ba 100644 --- a/puppet/manifests/overcloud_object.pp +++ b/puppet/manifests/overcloud_object.pp @@ -13,17 +13,8 @@ # License for the specific language governing permissions and limitations # under the License. -include ::tripleo::packages -include ::tripleo::firewall - -if hiera('step') >= 1 { - create_resources(kmod::load, hiera('kernel_modules'), {}) - create_resources(sysctl::value, hiera('sysctl_settings'), {}) - Exec <| tag == 'kmod::load' |> -> Sysctl <| |> -} - if hiera('step') >= 4 { - hiera_include('object_classes') + hiera_include('object_classes', []) } $package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_object', hiera('step')]) diff --git a/puppet/manifests/overcloud_volume.pp b/puppet/manifests/overcloud_volume.pp index 7fc27d60..e1cdadd5 100644 --- a/puppet/manifests/overcloud_volume.pp +++ b/puppet/manifests/overcloud_volume.pp @@ -13,51 +13,8 @@ # License for the specific language governing permissions and limitations # under the License. -include ::tripleo::packages -include ::tripleo::firewall - -if hiera('step') >= 1 { - - create_resources(kmod::load, hiera('kernel_modules'), {}) - create_resources(sysctl::value, hiera('sysctl_settings'), {}) - Exec <| tag == 'kmod::load' |> -> Sysctl <| |> - -} - if hiera('step') >= 4 { - - include ::cinder - include ::cinder::config - include ::cinder::glance - include ::cinder::volume - include ::cinder::setup_test_volume - - $cinder_enable_iscsi = hiera('cinder_enable_iscsi_backend', true) - if $cinder_enable_iscsi { - $cinder_iscsi_backend = 'tripleo_iscsi' - - cinder::backend::iscsi { $cinder_iscsi_backend : - iscsi_ip_address => hiera('cinder_iscsi_ip_address'), - iscsi_helper => hiera('cinder_iscsi_helper'), - } - } - - $cinder_enabled_backends = any2array($cinder_iscsi_backend) - class { '::cinder::backends' : - enabled_backends => union($cinder_enabled_backends, hiera('cinder_user_enabled_backends')), - } - - $snmpd_user = hiera('snmpd_readonly_user_name') - snmp::snmpv3_user { $snmpd_user: - authtype => 'MD5', - authpass => hiera('snmpd_readonly_user_password'), - } - class { '::snmp': - agentaddress => ['udp:161','udp6:[::1]:161'], - snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], - } - - hiera_include('volume_classes') + hiera_include('volume_classes', []) } $package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_volume', hiera('step')]) diff --git a/puppet/manifests/ringbuilder.pp b/puppet/manifests/ringbuilder.pp deleted file mode 100644 index 2411ff84..00000000 --- a/puppet/manifests/ringbuilder.pp +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -define add_devices( - $swift_zones = '1' -){ - - # NOTE(dprince): Swift zones is not yet properly wired into the Heat - # templates. See: https://review.openstack.org/#/c/97758/3 - # For now our regex supports the r1z1-192.0.2.6:%PORT%/d1 syntax or the - # newer r1z%<controller or SwiftStorage><N>%-192.0.2.6:%PORT%/d1 syntax. - $server_num_or_device = regsubst($name,'^r1z%+[A-Za-z]*([0-9]+)%+-(.*)$','\1') - if (is_integer($server_num_or_device)) { - $server_num = $server_num_or_device - } else { - $server_num = '1' - } - # Function to place server in its zone. Zone is calculated by - # server number in heat template modulo the number of zones + 1. - $zone = (($server_num%$swift_zones) + 1) - - # add the rings - $base = regsubst($name,'^r1.*-(.*)$','\1') - $object = regsubst($base, '%PORT%', '6000') - ring_object_device { $object: - zone => '1', - weight => 100, - } - $container = regsubst($base, '%PORT%', '6001') - ring_container_device { $container: - zone => '1', - weight => 100, - } - $account = regsubst($base, '%PORT%', '6002') - ring_account_device { $account: - zone => '1', - weight => 100, - } -} - -class tripleo::ringbuilder ( - $swift_zones = '1', - $devices = '', - $build_ring = true, - $part_power, - $replicas, - $min_part_hours, -) { - - validate_bool($build_ring) - - if $build_ring { - - $device_array = strip(split(rstrip($devices), ',')) - - # create local rings - swift::ringbuilder::create{ ['object', 'account', 'container']: - part_power => $part_power, - replicas => min(count($device_array), $replicas), - min_part_hours => $min_part_hours, - } -> - - # add all other devices - add_devices {$device_array: - swift_zones => $swift_zones, - } -> - - # rebalance - swift::ringbuilder::rebalance{ ['object', 'account', 'container']: - seed => 999, - } - - Ring_object_device<| |> ~> Exec['rebalance_object'] - Ring_object_device<| |> ~> Exec['rebalance_account'] - Ring_object_device<| |> ~> Exec['rebalance_container'] - - } -} - -if hiera('step') >= 2 { - # pre-install swift here so we can build rings - include ::swift -} - -if hiera('step') >= 3 { - include ::tripleo::ringbuilder -} |