diff options
Diffstat (limited to 'puppet/manifests')
-rw-r--r-- | puppet/manifests/overcloud_cephstorage.pp | 53 | ||||
-rw-r--r-- | puppet/manifests/overcloud_compute.pp | 265 | ||||
-rw-r--r-- | puppet/manifests/overcloud_controller.pp | 495 | ||||
-rw-r--r-- | puppet/manifests/overcloud_controller_pacemaker.pp | 1434 | ||||
-rw-r--r-- | puppet/manifests/overcloud_object.pp | 59 | ||||
-rw-r--r-- | puppet/manifests/overcloud_volume.pp | 6 | ||||
-rw-r--r-- | puppet/manifests/ringbuilder.pp | 11 |
7 files changed, 701 insertions, 1622 deletions
diff --git a/puppet/manifests/overcloud_cephstorage.pp b/puppet/manifests/overcloud_cephstorage.pp index 7444155c..e69353b0 100644 --- a/puppet/manifests/overcloud_cephstorage.pp +++ b/puppet/manifests/overcloud_cephstorage.pp @@ -16,30 +16,43 @@ include ::tripleo::packages include ::tripleo::firewall -create_resources(sysctl::value, hiera('sysctl_settings'), {}) +if hiera('step') >= 1 { -if count(hiera('ntp::servers')) > 0 { - include ::ntp + create_resources(kmod::load, hiera('kernel_modules'), {}) + create_resources(sysctl::value, hiera('sysctl_settings'), {}) + Exec <| tag == 'kmod::load' |> -> Sysctl <| |> + + include ::timezone + } -include ::timezone +if hiera('step') >= 3 { + if str2bool(hiera('ceph_osd_selinux_permissive', true)) { + exec { 'set selinux to permissive on boot': + command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config", + onlyif => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config", + path => ['/usr/bin', '/usr/sbin'], + } + + exec { 'set selinux to permissive': + command => 'setenforce 0', + onlyif => "which setenforce && getenforce | grep -i 'enforcing'", + path => ['/usr/bin', '/usr/sbin'], + } -> Class['ceph::profile::osd'] + } -if str2bool(hiera('ceph_osd_selinux_permissive', true)) { - exec { 'set selinux to permissive on boot': - command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config", - onlyif => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config", - path => ['/usr/bin', '/usr/sbin'], + if str2bool(hiera('ceph_ipv6', false)) { + $mon_host = hiera('ceph_mon_host_v6') + } else { + $mon_host = hiera('ceph_mon_host') + } + class { '::ceph::profile::params': + mon_host => $mon_host, } + include ::ceph::conf + include ::ceph::profile::client + include ::ceph::profile::osd - exec { 'set selinux to permissive': - command => 'setenforce 0', - onlyif => "which setenforce && getenforce | grep -i 'enforcing'", - path => ['/usr/bin', '/usr/sbin'], - } -> Class['ceph::profile::osd'] + hiera_include('ceph_classes') + package_manifest{'/var/lib/tripleo/installed-packages/overcloud_ceph': ensure => present} } - -include ::ceph::profile::client -include ::ceph::profile::osd - -hiera_include('ceph_classes') -package_manifest{'/var/lib/tripleo/installed-packages/overcloud_ceph': ensure => present} diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp index bb3575cf..da84927e 100644 --- a/puppet/manifests/overcloud_compute.pp +++ b/puppet/manifests/overcloud_compute.pp @@ -16,124 +16,195 @@ include ::tripleo::packages include ::tripleo::firewall -create_resources(sysctl::value, hiera('sysctl_settings'), {}) - -if count(hiera('ntp::servers')) > 0 { - include ::ntp -} +create_resources(kmod::load, hiera('kernel_modules'), { }) +create_resources(sysctl::value, hiera('sysctl_settings'), { }) +Exec <| tag == 'kmod::load' |> -> Sysctl <| |> include ::timezone -file { ['/etc/libvirt/qemu/networks/autostart/default.xml', - '/etc/libvirt/qemu/networks/default.xml']: - ensure => absent, - before => Service['libvirt'], -} -# in case libvirt has been already running before the Puppet run, make -# sure the default network is destroyed -exec { 'libvirt-default-net-destroy': - command => '/usr/bin/virsh net-destroy default', - onlyif => '/usr/bin/virsh net-info default | /bin/grep -i "^active:\s*yes"', - before => Service['libvirt'], -} - -include ::nova -include ::nova::config -include ::nova::compute +if hiera('step') >= 4 { -nova_config { - 'DEFAULT/my_ip': value => $ipaddress; - 'DEFAULT/linuxnet_interface_driver': value => 'nova.network.linux_net.LinuxOVSInterfaceDriver'; -} + file { ['/etc/libvirt/qemu/networks/autostart/default.xml', + '/etc/libvirt/qemu/networks/default.xml']: + ensure => absent, + before => Service['libvirt'], + } + # in case libvirt has been already running before the Puppet run, make + # sure the default network is destroyed + exec { 'libvirt-default-net-destroy': + command => '/usr/bin/virsh net-destroy default', + onlyif => '/usr/bin/virsh net-info default | /bin/grep -i "^active:\s*yes"', + before => Service['libvirt'], + } -$rbd_ephemeral_storage = hiera('nova::compute::rbd::ephemeral_storage', false) -$rbd_persistent_storage = hiera('rbd_persistent_storage', false) -if $rbd_ephemeral_storage or $rbd_persistent_storage { - include ::ceph::profile::client + # When utilising images for deployment, we need to reset the iSCSI initiator name to make it unique + exec { 'reset-iscsi-initiator-name': + command => '/bin/echo InitiatorName=$(/usr/sbin/iscsi-iname) > /etc/iscsi/initiatorname.iscsi', + onlyif => '/usr/bin/test ! -f /etc/iscsi/.initiator_reset', + }-> - $client_keys = hiera('ceph::profile::params::client_keys') - $client_user = join(['client.', hiera('ceph_client_user_name')]) - class { '::nova::compute::rbd': - libvirt_rbd_secret_key => $client_keys[$client_user]['secret'], + file { '/etc/iscsi/.initiator_reset': + ensure => present, } -} -if hiera('cinder_enable_nfs_backend', false) { - if str2bool($::selinux) { - selboolean { 'virt_use_nfs': - value => on, - persistent => true, - } -> Package['nfs-utils'] + $rbd_ephemeral_storage = hiera('nova::compute::rbd::ephemeral_storage', false) + $rbd_persistent_storage = hiera('rbd_persistent_storage', false) + if $rbd_ephemeral_storage or $rbd_persistent_storage { + if str2bool(hiera('ceph_ipv6', false)) { + $mon_host = hiera('ceph_mon_host_v6') + } else { + $mon_host = hiera('ceph_mon_host') + } + class { '::ceph::profile::params': + mon_host => $mon_host, + } + include ::ceph::conf + include ::ceph::profile::client + + $client_keys = hiera('ceph::profile::params::client_keys') + $client_user = join(['client.', hiera('tripleo::profile::base::cinder::volume::rbd::cinder_rbd_user_name')]) + class { '::nova::compute::rbd': + libvirt_rbd_secret_key => $client_keys[$client_user]['secret'], + } } - package {'nfs-utils': } -> Service['nova-compute'] -} + if hiera('cinder_enable_nfs_backend', false) { + if str2bool($::selinux) { + selboolean { 'virt_use_nfs': + value => on, + persistent => true, + } -> Package['nfs-utils'] + } -include ::nova::compute::libvirt -if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - file {'/etc/libvirt/qemu.conf': - ensure => present, - content => hiera('midonet_libvirt_qemu_data') + package { 'nfs-utils': } -> Service['nova-compute'] } -} -include ::nova::network::neutron -include ::neutron - -# If the value of core plugin is set to 'nuage', -# include nuage agent, -# If the value of core plugin is set to 'midonet', -# include midonet agent, -# else use the default value of 'ml2' -if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' { - include ::nuage::vrs - include ::nova::compute::neutron - - class { '::nuage::metadataagent': - nova_os_tenant_name => hiera('nova::api::admin_tenant_name'), - nova_os_password => hiera('nova_password'), - nova_metadata_ip => hiera('nova_metadata_node_ips'), - nova_auth_ip => hiera('keystone_public_api_virtual_ip'), + + if str2bool(hiera('nova::use_ipv6', false)) { + $vncserver_listen = '::0' + } else { + $vncserver_listen = '0.0.0.0' + } + + if $rbd_ephemeral_storage { + class { '::nova::compute::libvirt': + libvirt_disk_cachemodes => ['network=writeback'], + libvirt_hw_disk_discard => 'unmap', + vncserver_listen => $vncserver_listen, + } + } else { + class { '::nova::compute::libvirt' : + vncserver_listen => $vncserver_listen, + } } -} -elsif hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - # TODO(devvesa) provide non-controller ips for these services - $zookeeper_node_ips = hiera('neutron_api_node_ips') - $cassandra_node_ips = hiera('neutron_api_node_ips') + nova_config { + 'DEFAULT/my_ip': value => $ipaddress; + 'DEFAULT/linuxnet_interface_driver': value => 'nova.network.linux_net.LinuxOVSInterfaceDriver'; + } - class {'::tripleo::network::midonet::agent': - zookeeper_servers => $zookeeper_node_ips, - cassandra_seeds => $cassandra_node_ips + if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { + file { '/etc/libvirt/qemu.conf': + ensure => present, + content => hiera('midonet_libvirt_qemu_data') + } } -} -else { + include ::nova::network::neutron + include ::neutron + include ::neutron::config + + # If the value of core plugin is set to 'nuage', + # include nuage agent, + # If the value of core plugin is set to 'midonet', + # include midonet agent, + # else use the default value of 'ml2' + if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' { + include ::nuage::vrs + include ::nova::compute::neutron + + class { '::nuage::metadataagent': + nova_os_tenant_name => hiera('nova::api::admin_tenant_name'), + nova_os_password => hiera('nova_password'), + nova_metadata_ip => hiera('nova_metadata_node_ips'), + nova_auth_ip => hiera('keystone_public_api_virtual_ip'), + } + } + elsif hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - include ::neutron::plugins::ml2 - include ::neutron::agents::ml2::ovs + # TODO(devvesa) provide non-controller ips for these services + $zookeeper_node_ips = hiera('neutron_api_node_ips') + $cassandra_node_ips = hiera('neutron_api_node_ips') - if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { - class { '::neutron::agents::n1kv_vem': - n1kv_source => hiera('n1kv_vem_source', undef), - n1kv_version => hiera('n1kv_vem_version', undef), + class { '::tripleo::network::midonet::agent': + zookeeper_servers => $zookeeper_node_ips, + cassandra_seeds => $cassandra_node_ips } } -} + elsif hiera('neutron::core_plugin') == 'neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2' { + + include ::contrail::vrouter + # NOTE: it's not possible to use this class without a functional + # contrail controller up and running + #class {'::contrail::vrouter::provision_vrouter': + # require => Class['contrail::vrouter'], + #} + } + elsif hiera('neutron::core_plugin') == 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2' { + # forward all ipv4 traffic + # this is required for the vms to pass through the gateways public interface + sysctl::value { 'net.ipv4.ip_forward': value => '1' } + + # ifc_ctl_pp needs to be invoked by root as part of the vif.py when a VM is powered on + file { '/etc/sudoers.d/ifc_ctl_sudoers': + ensure => file, + owner => root, + group => root, + mode => '0440', + content => "nova ALL=(root) NOPASSWD: /opt/pg/bin/ifc_ctl_pp *\n", + } + } + else { + + # NOTE: this code won't live in puppet-neutron until Neutron OVS agent + # can be gracefully restarted. See https://review.openstack.org/#/c/297211 + # In the meantime, it's safe to restart the agent on each change in neutron.conf, + # because Puppet changes are supposed to be done during bootstrap and upgrades. + # Some resource managed by Neutron_config (like messaging and logging options) require + # a restart of OVS agent. This code does it. + # In Newton, OVS agent will be able to be restarted gracefully so we'll drop the code + # from here and fix it in puppet-neutron. + Neutron_config<||> ~> Service['neutron-ovs-agent-service'] + + include ::neutron::plugins::ml2 + include ::neutron::agents::ml2::ovs + + if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { + class { '::neutron::agents::n1kv_vem': + n1kv_source => hiera('n1kv_vem_source', undef), + n1kv_version => hiera('n1kv_vem_version', undef), + } + } + if 'bsn_ml2' in hiera('neutron::plugins::ml2::mechanism_drivers') { + include ::neutron::agents::bigswitch + } + } -include ::ceilometer -include ::ceilometer::config -include ::ceilometer::agent::compute -include ::ceilometer::agent::auth + include ::ceilometer + include ::ceilometer::config + include ::ceilometer::agent::compute + include ::ceilometer::agent::auth -$snmpd_user = hiera('snmpd_readonly_user_name') -snmp::snmpv3_user { $snmpd_user: - authtype => 'MD5', - authpass => hiera('snmpd_readonly_user_password'), -} -class { '::snmp': - agentaddress => ['udp:161','udp6:[::1]:161'], - snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], -} + $snmpd_user = hiera('snmpd_readonly_user_name') + snmp::snmpv3_user { $snmpd_user: + authtype => 'MD5', + authpass => hiera('snmpd_readonly_user_password'), + } + class { '::snmp': + agentaddress => ['udp:161','udp6:[::1]:161'], + snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], + } -hiera_include('compute_classes') -package_manifest{'/var/lib/tripleo/installed-packages/overcloud_compute': ensure => present} + hiera_include('compute_classes') + package_manifest{ '/var/lib/tripleo/installed-packages/overcloud_compute': ensure => present } + +} diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp index 2ea9c60d..628856e6 100644 --- a/puppet/manifests/overcloud_controller.pp +++ b/puppet/manifests/overcloud_controller.pp @@ -20,61 +20,33 @@ $enable_load_balancer = hiera('enable_load_balancer', true) if hiera('step') >= 1 { + create_resources(kmod::load, hiera('kernel_modules'), {}) create_resources(sysctl::value, hiera('sysctl_settings'), {}) - - $controller_node_ips = split(hiera('controller_node_ips'), ',') - - if $enable_load_balancer { - class { '::tripleo::loadbalancer' : - controller_hosts => $controller_node_ips, - manage_vip => true, - } - } + Exec <| tag == 'kmod::load' |> -> Sysctl <| |> } if hiera('step') >= 2 { - if count(hiera('ntp::servers')) > 0 { - include ::ntp - } - include ::timezone # MongoDB if downcase(hiera('ceilometer_backend')) == 'mongodb' { - include ::mongodb::globals - - include ::mongodb::server - $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017') + # NOTE(gfidente): We need to pass the list of IPv6 addresses *with* port and + # without the brackets as 'members' argument for the 'mongodb_replset' + # resource. + if str2bool(hiera('mongodb::server::ipv6', false)) { + $mongo_node_ips_with_port_prefixed = prefix(hiera('mongo_node_ips'), '[') + $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017') + $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017') + } else { + $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017') + $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017') + } $mongo_node_string = join($mongo_node_ips_with_port, ',') $mongodb_replset = hiera('mongodb::server::replset') $ceilometer_mongodb_conn_string = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}" - if downcase(hiera('bootstrap_nodeid')) == $::hostname { - mongodb_replset { $mongodb_replset : - members => $mongo_node_ips_with_port, - } - } - } - - # Redis - $redis_node_ips = hiera('redis_node_ips') - $redis_master_hostname = downcase(hiera('bootstrap_nodeid')) - - if $redis_master_hostname == $::hostname { - $slaveof = undef - } else { - $slaveof = "${redis_master_hostname} 6379" - } - class {'::redis' : - slaveof => $slaveof, - } - - if count($redis_node_ips) > 1 { - Class['::tripleo::redis_notification'] -> Service['redis-sentinel'] - include ::redis::sentinel - include ::tripleo::redis_notification } if str2bool(hiera('enable_galera', true)) { @@ -83,11 +55,15 @@ if hiera('step') >= 2 { $mysql_config_file = '/etc/my.cnf.d/server.cnf' } # TODO Galara + # FIXME: due to https://bugzilla.redhat.com/show_bug.cgi?id=1298671 we + # set bind-address to a hostname instead of an ip address; to move Mysql + # from internal_api on another network we'll have to customize both + # MysqlNetwork and ControllerHostnameResolveNetwork in ServiceNetMap class { '::mysql::server': config_file => $mysql_config_file, override_options => { 'mysqld' => { - 'bind-address' => hiera('mysql_bind_host'), + 'bind-address' => $::hostname, 'max_connections' => hiera('mysql_max_connections'), 'open_files_limit' => '-1', }, @@ -97,45 +73,28 @@ if hiera('step') >= 2 { # FIXME: this should only occur on the bootstrap host (ditto for db syncs) # Create all the database schemas - include ::keystone::db::mysql - include ::glance::db::mysql - include ::nova::db::mysql - include ::neutron::db::mysql - include ::cinder::db::mysql - include ::heat::db::mysql + if downcase(hiera('gnocchi_indexer_backend')) == 'mysql' { + include ::gnocchi::db::mysql + } if downcase(hiera('ceilometer_backend')) == 'mysql' { include ::ceilometer::db::mysql } - - $rabbit_nodes = hiera('rabbit_node_ips') - if count($rabbit_nodes) > 1 { - class { '::rabbitmq': - config_cluster => true, - cluster_nodes => $rabbit_nodes, - tcp_keepalive => false, - config_kernel_variables => hiera('rabbitmq_kernel_variables'), - config_variables => hiera('rabbitmq_config_variables'), - environment_variables => hiera('rabbitmq_environment'), - } - rabbitmq_policy { 'ha-all@/': - pattern => '^(?!amq\.).*', - definition => { - 'ha-mode' => 'all', - }, - } - } else { - include ::rabbitmq - } - - # pre-install swift here so we can build rings - include ::swift + include ::aodh::db::mysql $enable_ceph = hiera('ceph_storage_count', 0) > 0 or hiera('enable_ceph_storage', false) if $enable_ceph { + $mon_initial_members = downcase(hiera('ceph_mon_initial_members')) + if str2bool(hiera('ceph_ipv6', false)) { + $mon_host = hiera('ceph_mon_host_v6') + } else { + $mon_host = hiera('ceph_mon_host') + } class { '::ceph::profile::params': - mon_initial_members => downcase(hiera('ceph_mon_initial_members')), + mon_initial_members => $mon_initial_members, + mon_host => $mon_host, } + include ::ceph::conf include ::ceph::profile::mon } @@ -154,83 +113,38 @@ if hiera('step') >= 2 { } -> Class['ceph::profile::osd'] } + include ::ceph::conf include ::ceph::profile::osd } if str2bool(hiera('enable_external_ceph', false)) { + if str2bool(hiera('ceph_ipv6', false)) { + $mon_host = hiera('ceph_mon_host_v6') + } else { + $mon_host = hiera('ceph_mon_host') + } + class { '::ceph::profile::params': + mon_host => $mon_host, + } + include ::ceph::conf include ::ceph::profile::client } } #END STEP 2 -if hiera('step') >= 3 { - - include ::keystone - include ::keystone::roles::admin - include ::keystone::endpoint - - #TODO: need a cleanup-keystone-tokens.sh solution here - keystone_config { - 'ec2/driver': value => 'keystone.contrib.ec2.backends.sql.Ec2'; - } - file { [ '/etc/keystone/ssl', '/etc/keystone/ssl/certs', '/etc/keystone/ssl/private' ]: - ensure => 'directory', - owner => 'keystone', - group => 'keystone', - require => Package['keystone'], - } - file { '/etc/keystone/ssl/certs/signing_cert.pem': - content => hiera('keystone_signing_certificate'), - owner => 'keystone', - group => 'keystone', - notify => Service['keystone'], - require => File['/etc/keystone/ssl/certs'], - } - file { '/etc/keystone/ssl/private/signing_key.pem': - content => hiera('keystone_signing_key'), - owner => 'keystone', - group => 'keystone', - notify => Service['keystone'], - require => File['/etc/keystone/ssl/private'], - } - file { '/etc/keystone/ssl/certs/ca.pem': - content => hiera('keystone_ca_certificate'), - owner => 'keystone', - group => 'keystone', - notify => Service['keystone'], - require => File['/etc/keystone/ssl/certs'], - } - - $glance_backend = downcase(hiera('glance_backend', 'swift')) - case $glance_backend { - 'swift': { $backend_store = 'glance.store.swift.Store' } - 'file': { $backend_store = 'glance.store.filesystem.Store' } - 'rbd': { $backend_store = 'glance.store.rbd.Store' } - default: { fail('Unrecognized glance_backend parameter.') } - } - $http_store = ['glance.store.http.Store'] - $glance_store = concat($http_store, $backend_store) +if hiera('step') >= 4 { - # TODO: notifications, scrubber, etc. - include ::glance - class { '::glance::api': - known_stores => $glance_store, + $nova_ipv6 = hiera('nova::use_ipv6', false) + if $nova_ipv6 { + $memcached_servers = suffix(hiera('memcache_node_ips_v6'), ':11211') + } else { + $memcached_servers = suffix(hiera('memcache_node_ips'), ':11211') } - include ::glance::registry - include join(['::glance::backend::', $glance_backend]) class { '::nova' : - memcached_servers => suffix(hiera('memcache_node_ips'), ':11211'), + memcached_servers => $memcached_servers } include ::nova::config - include ::nova::api - include ::nova::cert - include ::nova::conductor - include ::nova::consoleauth - include ::nova::network::neutron - include ::nova::vncproxy - include ::nova::scheduler - include ::nova::scheduler::filter if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { @@ -242,7 +156,8 @@ if hiera('step') >= 3 { if hiera('enable_zookeeper_on_controller') { class {'::tripleo::cluster::zookeeper': zookeeper_server_ips => $zookeeper_node_ips, - zookeeper_client_ip => $ipaddress, + # TODO: create a 'bind' hiera key for zookeeper + zookeeper_client_ip => hiera('neutron::bind_host'), zookeeper_hostnames => hiera('controller_node_names') } } @@ -251,7 +166,8 @@ if hiera('step') >= 3 { if hiera('enable_cassandra_on_controller') { class {'::tripleo::cluster::cassandra': cassandra_servers => $cassandra_node_ips, - cassandra_ip => $ipaddress + # TODO: create a 'bind' hiera key for cassandra + cassandra_ip => hiera('neutron::bind_host'), } } @@ -262,259 +178,43 @@ if hiera('step') >= 3 { class {'::tripleo::network::midonet::api': zookeeper_servers => $zookeeper_node_ips, - vip => $ipaddress, - keystone_ip => $ipaddress, + vip => hiera('public_virtual_ip'), + keystone_ip => hiera('public_virtual_ip'), keystone_admin_token => hiera('keystone::admin_token'), - bind_address => $ipaddress, + # TODO: create a 'bind' hiera key for api + bind_address => hiera('neutron::bind_host'), admin_password => hiera('admin_password') } # TODO: find a way to get an empty list from hiera + # TODO: when doing the composable midonet plugin, don't forget to + # set service_plugins to an empty array in Hiera. class {'::neutron': service_plugins => [] } } - else { - - # ML2 plugin - include ::neutron - } - - include ::neutron::server - include ::neutron::server::notifications - - # If the value of core plugin is set to 'nuage', - # include nuage core plugin, and it does not - # need the l3, dhcp and metadata agents - if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' { - include ::neutron::plugins::nuage - } else { - include ::neutron::agents::l3 - include ::neutron::agents::dhcp - include ::neutron::agents::metadata - - file { '/etc/neutron/dnsmasq-neutron.conf': - content => hiera('neutron_dnsmasq_options'), - owner => 'neutron', - group => 'neutron', - notify => Service['neutron-dhcp-service'], - require => Package['neutron'], - } - - # If the value of core plugin is set to 'midonet', - # skip all the ML2 configuration - if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - - class {'::neutron::plugins::midonet': - midonet_api_ip => $ipaddress, - keystone_tenant => hiera('neutron::server::auth_tenant'), - keystone_password => hiera('neutron::server::auth_password') - } - } else { - - include ::neutron::plugins::ml2 - include ::neutron::agents::ml2::ovs - - if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { - include ::neutron::plugins::ml2::cisco::nexus1000v - - class { '::neutron::agents::n1kv_vem': - n1kv_source => hiera('n1kv_vem_source', undef), - n1kv_version => hiera('n1kv_vem_version', undef), - } - - class { '::n1k_vsm': - n1kv_source => hiera('n1kv_vsm_source', undef), - n1kv_version => hiera('n1kv_vsm_version', undef), - pacemaker_control => false, - } - } - - if 'cisco_ucsm' in hiera('neutron::plugins::ml2::mechanism_drivers') { - include ::neutron::plugins::ml2::cisco::ucsm - } - if 'cisco_nexus' in hiera('neutron::plugins::ml2::mechanism_drivers') { - include ::neutron::plugins::ml2::cisco::nexus - include ::neutron::plugins::ml2::cisco::type_nexus_vxlan - } - - if hiera('neutron_enable_bigswitch_ml2', false) { - include ::neutron::plugins::ml2::bigswitch::restproxy - } - neutron_l3_agent_config { - 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); - } - neutron_dhcp_agent_config { - 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); - } - Service['neutron-server'] -> Service['neutron-ovs-agent-service'] - } - - Service['neutron-server'] -> Service['neutron-dhcp-service'] - Service['neutron-server'] -> Service['neutron-l3'] - Service['neutron-server'] -> Service['neutron-metadata'] - } - include ::cinder - include ::cinder::api - include ::cinder::glance - include ::cinder::scheduler - include ::cinder::volume - class { '::cinder::setup_test_volume': - size => join([hiera('cinder_lvm_loop_device_size'), 'M']), - } - - $cinder_enable_iscsi = hiera('cinder_enable_iscsi_backend', true) - if $cinder_enable_iscsi { - $cinder_iscsi_backend = 'tripleo_iscsi' + # If the value of core plugin is set to 'midonet', + # skip all the ML2 configuration + if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - cinder::backend::iscsi { $cinder_iscsi_backend : - iscsi_ip_address => hiera('cinder_iscsi_ip_address'), - iscsi_helper => hiera('cinder_iscsi_helper'), + class {'::neutron::plugins::midonet': + midonet_api_ip => hiera('public_virtual_ip'), + keystone_tenant => hiera('neutron::server::auth_tenant'), + keystone_password => hiera('neutron::server::password') } } if $enable_ceph { - $ceph_pools = hiera('ceph_pools') ceph::pool { $ceph_pools : pg_num => hiera('ceph::profile::params::osd_pool_default_pg_num'), pgp_num => hiera('ceph::profile::params::osd_pool_default_pgp_num'), size => hiera('ceph::profile::params::osd_pool_default_size'), } - - $cinder_pool_requires = [Ceph::Pool[hiera('cinder_rbd_pool_name')]] - - } else { - $cinder_pool_requires = [] - } - - if hiera('cinder_enable_rbd_backend', false) { - $cinder_rbd_backend = 'tripleo_ceph' - - cinder::backend::rbd { $cinder_rbd_backend : - rbd_pool => hiera('cinder_rbd_pool_name'), - rbd_user => hiera('ceph_client_user_name'), - rbd_secret_uuid => hiera('ceph::profile::params::fsid'), - require => $cinder_pool_requires, - } - } - - if hiera('cinder_enable_eqlx_backend', false) { - $cinder_eqlx_backend = hiera('cinder::backend::eqlx::volume_backend_name') - - cinder_config { - "${cinder_eqlx_backend}/host": value => 'hostgroup'; - } - - cinder::backend::eqlx { $cinder_eqlx_backend : - volume_backend_name => hiera('cinder::backend::eqlx::volume_backend_name', undef), - san_ip => hiera('cinder::backend::eqlx::san_ip', undef), - san_login => hiera('cinder::backend::eqlx::san_login', undef), - san_password => hiera('cinder::backend::eqlx::san_password', undef), - san_thin_provision => hiera('cinder::backend::eqlx::san_thin_provision', undef), - eqlx_group_name => hiera('cinder::backend::eqlx::eqlx_group_name', undef), - eqlx_pool => hiera('cinder::backend::eqlx::eqlx_lpool', undef), - eqlx_use_chap => hiera('cinder::backend::eqlx::eqlx_use_chap', undef), - eqlx_chap_login => hiera('cinder::backend::eqlx::eqlx_chap_login', undef), - eqlx_chap_password => hiera('cinder::backend::eqlx::eqlx_san_password', undef), - } - } - - if hiera('cinder_enable_dellsc_backend', false) { - $cinder_dellsc_backend = hiera('cinder::backend::dellsc_iscsi::volume_backend_name') - - cinder_config { - "${cinder_dellsc_backend}/host": value => 'hostgroup'; - } - - cinder::backend::dellsc_iscsi{ $cinder_dellsc_backend : - volume_backend_name => hiera('cinder::backend::dellsc_iscsi::volume_backend_name', undef), - san_ip => hiera('cinder::backend::dellsc_iscsi::san_ip', undef), - san_login => hiera('cinder::backend::dellsc_iscsi::san_login', undef), - san_password => hiera('cinder::backend::dellsc_iscsi::san_password', undef), - dell_sc_ssn => hiera('cinder::backend::dellsc_iscsi::dell_sc_ssn', undef), - iscsi_ip_address => hiera('cinder::backend::dellsc_iscsi::iscsi_ip_address', undef), - iscsi_port => hiera('cinder::backend::dellsc_iscsi::iscsi_port', undef), - dell_sc_port => hiera('cinder::backend::dellsc_iscsi::dell_sc_port', undef), - dell_sc_server_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_server_folder', undef), - dell_sc_volume_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_volume_folder', undef), - } - } - - if hiera('cinder_enable_netapp_backend', false) { - $cinder_netapp_backend = hiera('cinder::backend::netapp::title') - - cinder_config { - "${cinder_netapp_backend}/host": value => 'hostgroup'; - } - - if hiera('cinder::backend::netapp::nfs_shares', undef) { - $cinder_netapp_nfs_shares = split(hiera('cinder::backend::netapp::nfs_shares', undef), ',') - } - - cinder::backend::netapp { $cinder_netapp_backend : - netapp_login => hiera('cinder::backend::netapp::netapp_login', undef), - netapp_password => hiera('cinder::backend::netapp::netapp_password', undef), - netapp_server_hostname => hiera('cinder::backend::netapp::netapp_server_hostname', undef), - netapp_server_port => hiera('cinder::backend::netapp::netapp_server_port', undef), - netapp_size_multiplier => hiera('cinder::backend::netapp::netapp_size_multiplier', undef), - netapp_storage_family => hiera('cinder::backend::netapp::netapp_storage_family', undef), - netapp_storage_protocol => hiera('cinder::backend::netapp::netapp_storage_protocol', undef), - netapp_transport_type => hiera('cinder::backend::netapp::netapp_transport_type', undef), - netapp_vfiler => hiera('cinder::backend::netapp::netapp_vfiler', undef), - netapp_volume_list => hiera('cinder::backend::netapp::netapp_volume_list', undef), - netapp_vserver => hiera('cinder::backend::netapp::netapp_vserver', undef), - netapp_partner_backend_name => hiera('cinder::backend::netapp::netapp_partner_backend_name', undef), - nfs_shares => $cinder_netapp_nfs_shares, - nfs_shares_config => hiera('cinder::backend::netapp::nfs_shares_config', undef), - netapp_copyoffload_tool_path => hiera('cinder::backend::netapp::netapp_copyoffload_tool_path', undef), - netapp_controller_ips => hiera('cinder::backend::netapp::netapp_controller_ips', undef), - netapp_sa_password => hiera('cinder::backend::netapp::netapp_sa_password', undef), - netapp_storage_pools => hiera('cinder::backend::netapp::netapp_storage_pools', undef), - netapp_eseries_host_type => hiera('cinder::backend::netapp::netapp_eseries_host_type', undef), - netapp_webservice_path => hiera('cinder::backend::netapp::netapp_webservice_path', undef), - } - } - - if hiera('cinder_enable_nfs_backend', false) { - $cinder_nfs_backend = 'tripleo_nfs' - - if str2bool($::selinux) { - selboolean { 'virt_use_nfs': - value => on, - persistent => true, - } -> Package['nfs-utils'] - } - - package {'nfs-utils': } -> - cinder::backend::nfs { $cinder_nfs_backend : - nfs_servers => hiera('cinder_nfs_servers'), - nfs_mount_options => hiera('cinder_nfs_mount_options',''), - nfs_shares_config => '/etc/cinder/shares-nfs.conf', - } } - $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_eqlx_backend, $cinder_dellsc_backend, $cinder_netapp_backend, $cinder_nfs_backend]) - class { '::cinder::backends' : - enabled_backends => $cinder_enabled_backends, - } - - # swift proxy - include ::memcached - include ::swift::proxy - include ::swift::proxy::proxy_logging - include ::swift::proxy::healthcheck - include ::swift::proxy::cache - include ::swift::proxy::keystone - include ::swift::proxy::authtoken - include ::swift::proxy::staticweb - include ::swift::proxy::ratelimit - include ::swift::proxy::catch_errors - include ::swift::proxy::tempurl - include ::swift::proxy::formpost - # swift storage if str2bool(hiera('enable_swift_storage', true)) { class { '::swift::storage::all': @@ -551,20 +251,28 @@ if hiera('step') >= 3 { include ::ceilometer::expirer include ::ceilometer::collector include ::ceilometer::agent::auth + include ::ceilometer::dispatcher::gnocchi class { '::ceilometer::db' : database_connection => $ceilometer_database_connection, } Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" } - # Heat - include ::heat - include ::heat::api - include ::heat::api_cfn - include ::heat::api_cloudwatch - include ::heat::engine + # Aodh + class { '::aodh' : + database_connection => hiera('aodh_mysql_conn_string'), + } + include ::aodh::db::sync + include ::aodh::auth + include ::aodh::api + include ::aodh::wsgi::apache + include ::aodh::evaluator + include ::aodh::notifier + include ::aodh::listener + include ::aodh::client # Horizon + include ::apache::mod::remoteip if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { $_profile_support = 'cisco' } else { @@ -572,11 +280,38 @@ if hiera('step') >= 3 { } $neutron_options = {'profile_support' => $_profile_support } + $memcached_ipv6 = hiera('memcached_ipv6', false) + if $memcached_ipv6 { + $horizon_memcached_servers = hiera('memcache_node_ips_v6', '[::1]') + } else { + $horizon_memcached_servers = hiera('memcache_node_ips', '127.0.0.1') + } + class { '::horizon': - cache_server_ip => hiera('memcache_node_ips', '127.0.0.1'), + cache_server_ip => $horizon_memcached_servers, neutron_options => $neutron_options, } + # Gnocchi + $gnocchi_database_connection = hiera('gnocchi_mysql_conn_string') + class { '::gnocchi': + database_connection => $gnocchi_database_connection, + } + include ::gnocchi::api + include ::gnocchi::wsgi::apache + include ::gnocchi::client + include ::gnocchi::db::sync + include ::gnocchi::storage + include ::gnocchi::metricd + include ::gnocchi::statsd + $gnocchi_backend = downcase(hiera('gnocchi_backend', 'swift')) + case $gnocchi_backend { + 'swift': { include ::gnocchi::storage::swift } + 'file': { include ::gnocchi::storage::file } + 'rbd': { include ::gnocchi::storage::ceph } + default: { fail('Unrecognized gnocchi_backend parameter.') } + } + $snmpd_user = hiera('snmpd_readonly_user_name') snmp::snmpv3_user { $snmpd_user: authtype => 'MD5', @@ -584,28 +319,20 @@ if hiera('step') >= 3 { } class { '::snmp': agentaddress => ['udp:161','udp6:[::1]:161'], - snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], + snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], } hiera_include('controller_classes') -} #END STEP 3 +} #END STEP 4 -if hiera('step') >= 4 { - $keystone_enable_db_purge = hiera('keystone_enable_db_purge', true) +if hiera('step') >= 5 { $nova_enable_db_purge = hiera('nova_enable_db_purge', true) - $cinder_enable_db_purge = hiera('cinder_enable_db_purge', true) - if $keystone_enable_db_purge { - include ::keystone::cron::token_flush - } if $nova_enable_db_purge { include ::nova::cron::archive_deleted_rows } - if $cinder_enable_db_purge { - include ::cinder::cron::db_purge - } -} #END STEP 4 +} #END STEP 5 $package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller', hiera('step')]) package_manifest{$package_manifest_name: ensure => present} diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp index 691736b7..9bb40fca 100644 --- a/puppet/manifests/overcloud_controller_pacemaker.pp +++ b/puppet/manifests/overcloud_controller_pacemaker.pp @@ -18,6 +18,20 @@ Pcmk_resource <| |> { try_sleep => 3, } +# TODO(jistr): use pcs resource provider instead of just no-ops +Service <| + tag == 'aodh-service' or + tag == 'ceilometer-service' or + tag == 'gnocchi-service' or + tag == 'neutron-service' or + tag == 'nova-service' +|> { + hasrestart => true, + restart => '/bin/true', + start => '/bin/true', + stop => '/bin/true', +} + include ::tripleo::packages include ::tripleo::firewall @@ -34,41 +48,31 @@ $enable_load_balancer = hiera('enable_load_balancer', true) # When to start and enable services which haven't been Pacemakerized # FIXME: remove when we start all OpenStack services using Pacemaker -# (occurences of this variable will be gradually replaced with false) -$non_pcmk_start = hiera('step') >= 4 +# (occurrences of this variable will be gradually replaced with false) +$non_pcmk_start = hiera('step') >= 5 if hiera('step') >= 1 { + create_resources(kmod::load, hiera('kernel_modules'), {}) create_resources(sysctl::value, hiera('sysctl_settings'), {}) + Exec <| tag == 'kmod::load' |> -> Sysctl <| |> include ::timezone - if count(hiera('ntp::servers')) > 0 { - include ::ntp - } - - $controller_node_ips = split(hiera('controller_node_ips'), ',') - $controller_node_names = split(downcase(hiera('controller_node_names')), ',') - if $enable_load_balancer { - class { '::tripleo::loadbalancer' : - controller_hosts => $controller_node_ips, - controller_hosts_names => $controller_node_names, - manage_vip => false, - mysql_clustercheck => true, - haproxy_service_manage => false, - } - } - $pacemaker_cluster_members = downcase(regsubst(hiera('controller_node_names'), ',', ' ', 'G')) - user { 'hacluster': - ensure => present, - } -> + $corosync_ipv6 = str2bool(hiera('corosync_ipv6', false)) + if $corosync_ipv6 { + $cluster_setup_extras = { '--token' => hiera('corosync_token_timeout', 1000), '--ipv6' => '' } + } else { + $cluster_setup_extras = { '--token' => hiera('corosync_token_timeout', 1000) } + } class { '::pacemaker': hacluster_pwd => hiera('hacluster_pwd'), } -> class { '::pacemaker::corosync': - cluster_members => $pacemaker_cluster_members, - setup_cluster => $pacemaker_master, + cluster_members => $pacemaker_cluster_members, + setup_cluster => $pacemaker_master, + cluster_setup_extras => $cluster_setup_extras, } class { '::pacemaker::stonith': disable => !$enable_fencing, @@ -76,6 +80,10 @@ if hiera('step') >= 1 { if $enable_fencing { include ::tripleo::fencing + # enable stonith after all Pacemaker resources have been created + Pcmk_resource<||> -> Class['tripleo::fencing'] + Pcmk_constraint<||> -> Class['tripleo::fencing'] + Exec <| tag == 'pacemaker_constraint' |> -> Class['tripleo::fencing'] # enable stonith after all fencing devices have been created Class['tripleo::fencing'] -> Class['pacemaker::stonith'] } @@ -87,42 +95,8 @@ if hiera('step') >= 1 { op_params => 'start timeout=200s stop timeout=200s', } - # Only configure RabbitMQ in this step, don't start it yet to - # avoid races where non-master nodes attempt to start without - # config (eg. binding on 0.0.0.0) - # The module ignores erlang_cookie if cluster_config is false - class { '::rabbitmq': - service_manage => false, - tcp_keepalive => false, - config_kernel_variables => hiera('rabbitmq_kernel_variables'), - config_variables => hiera('rabbitmq_config_variables'), - environment_variables => hiera('rabbitmq_environment'), - } -> - file { '/var/lib/rabbitmq/.erlang.cookie': - ensure => file, - owner => 'rabbitmq', - group => 'rabbitmq', - mode => '0400', - content => hiera('rabbitmq::erlang_cookie'), - replace => true, - } - if downcase(hiera('ceilometer_backend')) == 'mongodb' { - include ::mongodb::globals - class { '::mongodb::server' : - service_manage => false, - } - } - - # Memcached - class {'::memcached' : - service_manage => false, - } - - # Redis - class { '::redis' : - service_manage => false, - notify_service => false, + include ::mongodb::params } # Galera @@ -134,6 +108,11 @@ if hiera('step') >= 1 { $galera_nodes = downcase(hiera('galera_node_names', $::hostname)) $galera_nodes_count = count(split($galera_nodes, ',')) + # FIXME: due to https://bugzilla.redhat.com/show_bug.cgi?id=1298671 we + # set bind-address to a hostname instead of an ip address; to move Mysql + # from internal_api on another network we'll have to customize both + # MysqlNetwork and ControllerHostnameResolveNetwork in ServiceNetMap + $mysql_bind_host = hiera('mysql_bind_host') $mysqld_options = { 'mysqld' => { 'skip-name-resolve' => '1', @@ -143,11 +122,13 @@ if hiera('step') >= 1 { 'innodb_locks_unsafe_for_binlog'=> '1', 'query_cache_size' => '0', 'query_cache_type' => '0', - 'bind-address' => hiera('mysql_bind_host'), + 'bind-address' => $::hostname, 'max_connections' => hiera('mysql_max_connections'), 'open_files_limit' => '-1', + 'wsrep_on' => 'ON', 'wsrep_provider' => '/usr/lib64/galera/libgalera_smm.so', 'wsrep_cluster_name' => 'galera_cluster', + 'wsrep_cluster_address' => "gcomm://${galera_nodes}", 'wsrep_slave_threads' => '1', 'wsrep_certify_nonPK' => '1', 'wsrep_max_ws_rows' => '131072', @@ -158,8 +139,8 @@ if hiera('step') >= 1 { 'wsrep_auto_increment_control' => '1', 'wsrep_drupal_282555_workaround'=> '0', 'wsrep_causal_reads' => '0', - 'wsrep_notify_cmd' => '', 'wsrep_sst_method' => 'rsync', + 'wsrep_provider_options' => "gmcast.listen_addr=tcp://[${mysql_bind_host}]:4567;", }, } @@ -177,197 +158,31 @@ if hiera('step') >= 1 { if hiera('step') >= 2 { + # NOTE(gfidente): the following vars are needed on all nodes so they - # need to stay out of pacemaker_master conditional - $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017') + # need to stay out of pacemaker_master conditional. + # The addresses mangling will hopefully go away when we'll be able to + # configure the connection string via hostnames, until then, we need to pass + # the list of IPv6 addresses *with* port and without the brackets as 'members' + # argument for the 'mongodb_replset' resource. + if str2bool(hiera('mongodb::server::ipv6', false)) { + $mongo_node_ips_with_port_prefixed = prefix(hiera('mongo_node_ips'), '[') + $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017') + $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017') + } else { + $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017') + $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017') + } $mongodb_replset = hiera('mongodb::server::replset') if $pacemaker_master { - if $enable_load_balancer { - - include ::pacemaker::resource_defaults - - # FIXME: we should not have to access tripleo::loadbalancer class - # parameters here to configure pacemaker VIPs. The configuration - # of pacemaker VIPs could move into puppet-tripleo or we should - # make use of less specific hiera parameters here for the settings. - pacemaker::resource::service { 'haproxy': - clone_params => true, - } - - $control_vip = hiera('tripleo::loadbalancer::controller_virtual_ip') - pacemaker::resource::ip { 'control_vip': - ip_address => $control_vip, - } - pacemaker::constraint::base { 'control_vip-then-haproxy': - constraint_type => 'order', - first_resource => "ip-${control_vip}", - second_resource => 'haproxy-clone', - first_action => 'start', - second_action => 'start', - constraint_params => 'kind=Optional', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['control_vip']], - } - pacemaker::constraint::colocation { 'control_vip-with-haproxy': - source => "ip-${control_vip}", - target => 'haproxy-clone', - score => 'INFINITY', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['control_vip']], - } - - $public_vip = hiera('tripleo::loadbalancer::public_virtual_ip') - if $public_vip and $public_vip != $control_vip { - pacemaker::resource::ip { 'public_vip': - ip_address => $public_vip, - } - pacemaker::constraint::base { 'public_vip-then-haproxy': - constraint_type => 'order', - first_resource => "ip-${public_vip}", - second_resource => 'haproxy-clone', - first_action => 'start', - second_action => 'start', - constraint_params => 'kind=Optional', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['public_vip']], - } - pacemaker::constraint::colocation { 'public_vip-with-haproxy': - source => "ip-${public_vip}", - target => 'haproxy-clone', - score => 'INFINITY', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['public_vip']], - } - } - - $redis_vip = hiera('redis_vip') - if $redis_vip and $redis_vip != $control_vip { - pacemaker::resource::ip { 'redis_vip': - ip_address => $redis_vip, - } - pacemaker::constraint::base { 'redis_vip-then-haproxy': - constraint_type => 'order', - first_resource => "ip-${redis_vip}", - second_resource => 'haproxy-clone', - first_action => 'start', - second_action => 'start', - constraint_params => 'kind=Optional', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['redis_vip']], - } - pacemaker::constraint::colocation { 'redis_vip-with-haproxy': - source => "ip-${redis_vip}", - target => 'haproxy-clone', - score => 'INFINITY', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['redis_vip']], - } - } - - $internal_api_vip = hiera('tripleo::loadbalancer::internal_api_virtual_ip') - if $internal_api_vip and $internal_api_vip != $control_vip { - pacemaker::resource::ip { 'internal_api_vip': - ip_address => $internal_api_vip, - } - pacemaker::constraint::base { 'internal_api_vip-then-haproxy': - constraint_type => 'order', - first_resource => "ip-${internal_api_vip}", - second_resource => 'haproxy-clone', - first_action => 'start', - second_action => 'start', - constraint_params => 'kind=Optional', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['internal_api_vip']], - } - pacemaker::constraint::colocation { 'internal_api_vip-with-haproxy': - source => "ip-${internal_api_vip}", - target => 'haproxy-clone', - score => 'INFINITY', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['internal_api_vip']], - } - } - - $storage_vip = hiera('tripleo::loadbalancer::storage_virtual_ip') - if $storage_vip and $storage_vip != $control_vip { - pacemaker::resource::ip { 'storage_vip': - ip_address => $storage_vip, - } - pacemaker::constraint::base { 'storage_vip-then-haproxy': - constraint_type => 'order', - first_resource => "ip-${storage_vip}", - second_resource => 'haproxy-clone', - first_action => 'start', - second_action => 'start', - constraint_params => 'kind=Optional', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['storage_vip']], - } - pacemaker::constraint::colocation { 'storage_vip-with-haproxy': - source => "ip-${storage_vip}", - target => 'haproxy-clone', - score => 'INFINITY', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['storage_vip']], - } - } - - $storage_mgmt_vip = hiera('tripleo::loadbalancer::storage_mgmt_virtual_ip') - if $storage_mgmt_vip and $storage_mgmt_vip != $control_vip { - pacemaker::resource::ip { 'storage_mgmt_vip': - ip_address => $storage_mgmt_vip, - } - pacemaker::constraint::base { 'storage_mgmt_vip-then-haproxy': - constraint_type => 'order', - first_resource => "ip-${storage_mgmt_vip}", - second_resource => 'haproxy-clone', - first_action => 'start', - second_action => 'start', - constraint_params => 'kind=Optional', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['storage_mgmt_vip']], - } - pacemaker::constraint::colocation { 'storage_mgmt_vip-with-haproxy': - source => "ip-${storage_mgmt_vip}", - target => 'haproxy-clone', - score => 'INFINITY', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['storage_mgmt_vip']], - } - } + include ::pacemaker::resource_defaults - } - - pacemaker::resource::service { $::memcached::params::service_name : - clone_params => 'interleave=true', - require => Class['::memcached'], - } - - pacemaker::resource::ocf { 'rabbitmq': - ocf_agent_name => 'heartbeat:rabbitmq-cluster', - resource_params => 'set_policy=\'ha-all ^(?!amq\.).* {"ha-mode":"all"}\'', - clone_params => 'ordered=true interleave=true', - require => Class['::rabbitmq'], - } - - if downcase(hiera('ceilometer_backend')) == 'mongodb' { - pacemaker::resource::service { $::mongodb::params::service_name : - op_params => 'start timeout=370s stop timeout=200s', - clone_params => true, - require => Class['::mongodb::server'], - } - # NOTE (spredzy) : The replset can only be run - # once all the nodes have joined the cluster. - mongodb_conn_validator { $mongo_node_ips_with_port : - timeout => '600', - require => Pacemaker::Resource::Service[$::mongodb::params::service_name], - before => Mongodb_replset[$mongodb_replset], - } - mongodb_replset { $mongodb_replset : - members => $mongo_node_ips_with_port, - } + # Create an openstack-core dummy resource. See RHBZ 1290121 + pacemaker::resource::ocf { 'openstack-core': + ocf_agent_name => 'heartbeat:Dummy', + clone_params => true, } pacemaker::resource::ocf { 'galera' : @@ -379,15 +194,16 @@ if hiera('step') >= 2 { require => Class['::mysql::server'], before => Exec['galera-ready'], } - - pacemaker::resource::ocf { 'redis': - ocf_agent_name => 'heartbeat:redis', - master_params => '', - meta_params => 'notify=true ordered=true interleave=true', - resource_params => 'wait_last_known_master=true', - require => Class['::redis'], - } - + } + $mysql_root_password = hiera('mysql::server::root_password') + $mysql_clustercheck_password = hiera('mysql_clustercheck_password') + # This step is to create a sysconfig clustercheck file with the root user and empty password + # on the first install only (because later on the clustercheck db user will be used) + # We are using exec and not file in order to not have duplicate definition errors in puppet + # when we later set the the file to contain the clustercheck data + exec { 'create-root-sysconfig-clustercheck': + command => "/bin/echo 'MYSQL_USERNAME=root\nMYSQL_PASSWORD=\'\'\nMYSQL_HOST=localhost\n' > /etc/sysconfig/clustercheck", + unless => '/bin/test -e /etc/sysconfig/clustercheck && grep -q clustercheck /etc/sysconfig/clustercheck', } exec { 'galera-ready' : @@ -396,14 +212,7 @@ if hiera('step') >= 2 { tries => 180, try_sleep => 10, environment => ['AVAILABLE_WHEN_READONLY=0'], - require => File['/etc/sysconfig/clustercheck'], - } - - file { '/etc/sysconfig/clustercheck' : - ensure => file, - content => "MYSQL_USERNAME=root\n -MYSQL_PASSWORD=''\n -MYSQL_HOST=localhost\n", + require => Exec['create-root-sysconfig-clustercheck'], } xinetd::service { 'galera-monitor' : @@ -416,47 +225,60 @@ MYSQL_HOST=localhost\n", service_type => 'UNLISTED', user => 'root', group => 'root', - require => File['/etc/sysconfig/clustercheck'], + require => Exec['create-root-sysconfig-clustercheck'], } - - # Create all the database schemas - if $sync_db { - class { '::keystone::db::mysql': - require => Exec['galera-ready'], - } - class { '::glance::db::mysql': - require => Exec['galera-ready'], - } - class { '::nova::db::mysql': - require => Exec['galera-ready'], - } - class { '::neutron::db::mysql': - require => Exec['galera-ready'], - } - class { '::cinder::db::mysql': - require => Exec['galera-ready'], + # We add a clustercheck db user and we will switch /etc/sysconfig/clustercheck + # to it in a later step. We do this only on one node as it will replicate on + # the other members. We also make sure that the permissions are the minimum necessary + if $pacemaker_master { + mysql_user { 'clustercheck@localhost': + ensure => 'present', + password_hash => mysql_password($mysql_clustercheck_password), + require => Exec['galera-ready'], } - class { '::heat::db::mysql': - require => Exec['galera-ready'], + mysql_grant { 'clustercheck@localhost/*.*': + ensure => 'present', + options => ['GRANT'], + privileges => ['PROCESS'], + table => '*.*', + user => 'clustercheck@localhost', } + } + # Create all the database schemas + if $sync_db { if downcase(hiera('ceilometer_backend')) == 'mysql' { class { '::ceilometer::db::mysql': require => Exec['galera-ready'], } } - } - # pre-install swift here so we can build rings - include ::swift + if downcase(hiera('gnocchi_indexer_backend')) == 'mysql' { + class { '::gnocchi::db::mysql': + require => Exec['galera-ready'], + } + } + + class { '::aodh::db::mysql': + require => Exec['galera-ready'], + } + } # Ceph $enable_ceph = hiera('ceph_storage_count', 0) > 0 or hiera('enable_ceph_storage', false) if $enable_ceph { + $mon_initial_members = downcase(hiera('ceph_mon_initial_members')) + if str2bool(hiera('ceph_ipv6', false)) { + $mon_host = hiera('ceph_mon_host_v6') + } else { + $mon_host = hiera('ceph_mon_host') + } class { '::ceph::profile::params': - mon_initial_members => downcase(hiera('ceph_mon_initial_members')), + mon_initial_members => $mon_initial_members, + mon_host => $mon_host, } + include ::ceph::conf include ::ceph::profile::mon } @@ -475,125 +297,52 @@ MYSQL_HOST=localhost\n", } -> Class['ceph::profile::osd'] } + include ::ceph::conf include ::ceph::profile::osd } if str2bool(hiera('enable_external_ceph', false)) { + if str2bool(hiera('ceph_ipv6', false)) { + $mon_host = hiera('ceph_mon_host_v6') + } else { + $mon_host = hiera('ceph_mon_host') + } + class { '::ceph::profile::params': + mon_host => $mon_host, + } + include ::ceph::conf include ::ceph::profile::client } } #END STEP 2 -if hiera('step') >= 3 { - - class { '::keystone': - sync_db => $sync_db, - manage_service => false, - enabled => false, - } - - #TODO: need a cleanup-keystone-tokens.sh solution here - keystone_config { - 'ec2/driver': value => 'keystone.contrib.ec2.backends.sql.Ec2'; - } - file { [ '/etc/keystone/ssl', '/etc/keystone/ssl/certs', '/etc/keystone/ssl/private' ]: - ensure => 'directory', - owner => 'keystone', - group => 'keystone', - require => Package['keystone'], - } - file { '/etc/keystone/ssl/certs/signing_cert.pem': - content => hiera('keystone_signing_certificate'), - owner => 'keystone', - group => 'keystone', - notify => Service['keystone'], - require => File['/etc/keystone/ssl/certs'], - } - file { '/etc/keystone/ssl/private/signing_key.pem': - content => hiera('keystone_signing_key'), - owner => 'keystone', - group => 'keystone', - notify => Service['keystone'], - require => File['/etc/keystone/ssl/private'], - } - file { '/etc/keystone/ssl/certs/ca.pem': - content => hiera('keystone_ca_certificate'), - owner => 'keystone', - group => 'keystone', - notify => Service['keystone'], - require => File['/etc/keystone/ssl/certs'], - } - - $glance_backend = downcase(hiera('glance_backend', 'swift')) - case $glance_backend { - 'swift': { $backend_store = 'glance.store.swift.Store' } - 'file': { $backend_store = 'glance.store.filesystem.Store' } - 'rbd': { $backend_store = 'glance.store.rbd.Store' } - default: { fail('Unrecognized glance_backend parameter.') } - } - $http_store = ['glance.store.http.Store'] - $glance_store = concat($http_store, $backend_store) - - if $glance_backend == 'file' and hiera('glance_file_pcmk_manage', false) { - $secontext = 'context="system_u:object_r:glance_var_lib_t:s0"' - pacemaker::resource::filesystem { 'glance-fs': - device => hiera('glance_file_pcmk_device'), - directory => hiera('glance_file_pcmk_directory'), - fstype => hiera('glance_file_pcmk_fstype'), - fsoptions => join([$secontext, hiera('glance_file_pcmk_options', '')],','), - clone_params => '', - } +if hiera('step') >= 4 or ( hiera('step') >= 3 and $sync_db ) { + # At this stage we are guaranteed that the clustercheck db user exists + # so we switch the resource agent to use it. + file { '/etc/sysconfig/clustercheck' : + ensure => file, + mode => '0600', + owner => 'root', + group => 'root', + content => "MYSQL_USERNAME=clustercheck\n +MYSQL_PASSWORD='${mysql_clustercheck_password}'\n +MYSQL_HOST=localhost\n", } - # TODO: notifications, scrubber, etc. - include ::glance - class { '::glance::api': - known_stores => $glance_store, - manage_service => false, - enabled => false, - } - class { '::glance::registry' : - sync_db => $sync_db, - manage_service => false, - enabled => false, + $nova_ipv6 = hiera('nova::use_ipv6', false) + if $nova_ipv6 { + $memcached_servers = suffix(hiera('memcache_node_ips_v6'), ':11211') + } else { + $memcached_servers = suffix(hiera('memcache_node_ips'), ':11211') } - include join(['::glance::backend::', $glance_backend]) class { '::nova' : - memcached_servers => suffix(hiera('memcache_node_ips'), ':11211'), + memcached_servers => $memcached_servers } include ::nova::config - class { '::nova::api' : - sync_db => $sync_db, - manage_service => false, - enabled => false, - } - class { '::nova::cert' : - manage_service => false, - enabled => false, - } - class { '::nova::conductor' : - manage_service => false, - enabled => false, - } - class { '::nova::consoleauth' : - manage_service => false, - enabled => false, - } - class { '::nova::vncproxy' : - manage_service => false, - enabled => false, - } - include ::nova::scheduler::filter - class { '::nova::scheduler' : - manage_service => false, - enabled => false, - } - include ::nova::network::neutron - if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { # TODO(devvesa) provide non-controller ips for these services @@ -604,8 +353,9 @@ if hiera('step') >= 3 { if hiera('enable_zookeeper_on_controller') { class {'::tripleo::cluster::zookeeper': zookeeper_server_ips => $zookeeper_node_ips, - zookeeper_client_ip => $ipaddress, - zookeeper_hostnames => hiera('controller_node_names') + # TODO: create a 'bind' hiera key for zookeeper + zookeeper_client_ip => hiera('neutron::bind_host'), + zookeeper_hostnames => split(hiera('controller_node_names'), ',') } } @@ -613,7 +363,8 @@ if hiera('step') >= 3 { if hiera('enable_cassandra_on_controller') { class {'::tripleo::cluster::cassandra': cassandra_servers => $cassandra_node_ips, - cassandra_ip => $ipaddress + # TODO: create a 'bind' hiera key for cassandra + cassandra_ip => hiera('neutron::bind_host'), } } @@ -623,274 +374,41 @@ if hiera('step') >= 3 { } class {'::tripleo::network::midonet::api': - zookeeper_servers => hiera('neutron_api_node_ips'), - vip => $public_vip, - keystone_ip => $public_vip, + zookeeper_servers => $zookeeper_node_ips, + vip => hiera('public_virtual_ip'), + keystone_ip => hiera('public_virtual_ip'), keystone_admin_token => hiera('keystone::admin_token'), - bind_address => $ipaddress, + # TODO: create a 'bind' hiera key for api + bind_address => hiera('neutron::bind_host'), admin_password => hiera('admin_password') } # Configure Neutron + # TODO: when doing the composable midonet plugin, don't forget to + # set service_plugins to an empty array in Hiera. class {'::neutron': service_plugins => [] } } - else { - # Neutron class definitions - include ::neutron - } - class { '::neutron::server' : - sync_db => $sync_db, - manage_service => false, - enabled => false, - } - include ::neutron::server::notifications - if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' { - include ::neutron::plugins::nuage - } if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { class {'::neutron::plugins::midonet': - midonet_api_ip => $public_vip, + midonet_api_ip => hiera('public_virtual_ip'), keystone_tenant => hiera('neutron::server::auth_tenant'), - keystone_password => hiera('neutron::server::auth_password') - } - } - if hiera('neutron::enable_dhcp_agent',true) { - class { '::neutron::agents::dhcp' : - manage_service => false, - enabled => false, - } - file { '/etc/neutron/dnsmasq-neutron.conf': - content => hiera('neutron_dnsmasq_options'), - owner => 'neutron', - group => 'neutron', - notify => Service['neutron-dhcp-service'], - require => Package['neutron'], - } - } - if hiera('neutron::enable_l3_agent',true) { - class { '::neutron::agents::l3' : - manage_service => false, - enabled => false, - } - } - if hiera('neutron::enable_metadata_agent',true) { - class { '::neutron::agents::metadata': - manage_service => false, - enabled => false, - } - } - include ::neutron::plugins::ml2 - class { '::neutron::agents::ml2::ovs': - manage_service => false, - enabled => false, - } - - if 'cisco_ucsm' in hiera('neutron::plugins::ml2::mechanism_drivers') { - include ::neutron::plugins::ml2::cisco::ucsm - } - if 'cisco_nexus' in hiera('neutron::plugins::ml2::mechanism_drivers') { - include ::neutron::plugins::ml2::cisco::nexus - include ::neutron::plugins::ml2::cisco::type_nexus_vxlan - } - if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { - include ::neutron::plugins::ml2::cisco::nexus1000v - - class { '::neutron::agents::n1kv_vem': - n1kv_source => hiera('n1kv_vem_source', undef), - n1kv_version => hiera('n1kv_vem_version', undef), - } - - class { '::n1k_vsm': - n1kv_source => hiera('n1kv_vsm_source', undef), - n1kv_version => hiera('n1kv_vsm_version', undef), - } - } - - if hiera('neutron_enable_bigswitch_ml2', false) { - include ::neutron::plugins::ml2::bigswitch::restproxy - } - neutron_l3_agent_config { - 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); - } - neutron_dhcp_agent_config { - 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); - } - - include ::cinder - class { '::cinder::api': - sync_db => $sync_db, - manage_service => false, - enabled => false, - } - class { '::cinder::scheduler' : - manage_service => false, - enabled => false, - } - class { '::cinder::volume' : - manage_service => false, - enabled => false, - } - include ::cinder::glance - class { '::cinder::setup_test_volume': - size => join([hiera('cinder_lvm_loop_device_size'), 'M']), - } - - $cinder_enable_iscsi = hiera('cinder_enable_iscsi_backend', true) - if $cinder_enable_iscsi { - $cinder_iscsi_backend = 'tripleo_iscsi' - - cinder::backend::iscsi { $cinder_iscsi_backend : - iscsi_ip_address => hiera('cinder_iscsi_ip_address'), - iscsi_helper => hiera('cinder_iscsi_helper'), + keystone_password => hiera('neutron::server::password') } } if $enable_ceph { - $ceph_pools = hiera('ceph_pools') ceph::pool { $ceph_pools : pg_num => hiera('ceph::profile::params::osd_pool_default_pg_num'), pgp_num => hiera('ceph::profile::params::osd_pool_default_pgp_num'), size => hiera('ceph::profile::params::osd_pool_default_size'), } - - $cinder_pool_requires = [Ceph::Pool[hiera('cinder_rbd_pool_name')]] - - } else { - $cinder_pool_requires = [] - } - - if hiera('cinder_enable_rbd_backend', false) { - $cinder_rbd_backend = 'tripleo_ceph' - - cinder::backend::rbd { $cinder_rbd_backend : - rbd_pool => hiera('cinder_rbd_pool_name'), - rbd_user => hiera('ceph_client_user_name'), - rbd_secret_uuid => hiera('ceph::profile::params::fsid'), - require => $cinder_pool_requires, - } - } - - if hiera('cinder_enable_eqlx_backend', false) { - $cinder_eqlx_backend = hiera('cinder::backend::eqlx::volume_backend_name') - - cinder_config { - "${cinder_eqlx_backend}/host": value => 'hostgroup'; - } - - cinder::backend::eqlx { $cinder_eqlx_backend : - volume_backend_name => hiera('cinder::backend::eqlx::volume_backend_name', undef), - san_ip => hiera('cinder::backend::eqlx::san_ip', undef), - san_login => hiera('cinder::backend::eqlx::san_login', undef), - san_password => hiera('cinder::backend::eqlx::san_password', undef), - san_thin_provision => hiera('cinder::backend::eqlx::san_thin_provision', undef), - eqlx_group_name => hiera('cinder::backend::eqlx::eqlx_group_name', undef), - eqlx_pool => hiera('cinder::backend::eqlx::eqlx_lpool', undef), - eqlx_use_chap => hiera('cinder::backend::eqlx::eqlx_use_chap', undef), - eqlx_chap_login => hiera('cinder::backend::eqlx::eqlx_chap_login', undef), - eqlx_chap_password => hiera('cinder::backend::eqlx::eqlx_san_password', undef), - } - } - - if hiera('cinder_enable_dellsc_backend', false) { - $cinder_dellsc_backend = hiera('cinder::backend::dellsc_iscsi::volume_backend_name') - - cinder_config { - "${cinder_dellsc_backend}/host": value => 'hostgroup'; - } - - cinder::backend::dellsc_iscsi{ $cinder_dellsc_backend : - volume_backend_name => hiera('cinder::backend::dellsc_iscsi::volume_backend_name', undef), - san_ip => hiera('cinder::backend::dellsc_iscsi::san_ip', undef), - san_login => hiera('cinder::backend::dellsc_iscsi::san_login', undef), - san_password => hiera('cinder::backend::dellsc_iscsi::san_password', undef), - dell_sc_ssn => hiera('cinder::backend::dellsc_iscsi::dell_sc_ssn', undef), - iscsi_ip_address => hiera('cinder::backend::dellsc_iscsi::iscsi_ip_address', undef), - iscsi_port => hiera('cinder::backend::dellsc_iscsi::iscsi_port', undef), - dell_sc_port => hiera('cinder::backend::dellsc_iscsi::dell_sc_port', undef), - dell_sc_server_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_server_folder', undef), - dell_sc_volume_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_volume_folder', undef), - } - } - - if hiera('cinder_enable_netapp_backend', false) { - $cinder_netapp_backend = hiera('cinder::backend::netapp::title') - - cinder_config { - "${cinder_netapp_backend}/host": value => 'hostgroup'; - } - - if hiera('cinder::backend::netapp::nfs_shares', undef) { - $cinder_netapp_nfs_shares = split(hiera('cinder::backend::netapp::nfs_shares', undef), ',') - } - - cinder::backend::netapp { $cinder_netapp_backend : - netapp_login => hiera('cinder::backend::netapp::netapp_login', undef), - netapp_password => hiera('cinder::backend::netapp::netapp_password', undef), - netapp_server_hostname => hiera('cinder::backend::netapp::netapp_server_hostname', undef), - netapp_server_port => hiera('cinder::backend::netapp::netapp_server_port', undef), - netapp_size_multiplier => hiera('cinder::backend::netapp::netapp_size_multiplier', undef), - netapp_storage_family => hiera('cinder::backend::netapp::netapp_storage_family', undef), - netapp_storage_protocol => hiera('cinder::backend::netapp::netapp_storage_protocol', undef), - netapp_transport_type => hiera('cinder::backend::netapp::netapp_transport_type', undef), - netapp_vfiler => hiera('cinder::backend::netapp::netapp_vfiler', undef), - netapp_volume_list => hiera('cinder::backend::netapp::netapp_volume_list', undef), - netapp_vserver => hiera('cinder::backend::netapp::netapp_vserver', undef), - netapp_partner_backend_name => hiera('cinder::backend::netapp::netapp_partner_backend_name', undef), - nfs_shares => $cinder_netapp_nfs_shares, - nfs_shares_config => hiera('cinder::backend::netapp::nfs_shares_config', undef), - netapp_copyoffload_tool_path => hiera('cinder::backend::netapp::netapp_copyoffload_tool_path', undef), - netapp_controller_ips => hiera('cinder::backend::netapp::netapp_controller_ips', undef), - netapp_sa_password => hiera('cinder::backend::netapp::netapp_sa_password', undef), - netapp_storage_pools => hiera('cinder::backend::netapp::netapp_storage_pools', undef), - netapp_eseries_host_type => hiera('cinder::backend::netapp::netapp_eseries_host_type', undef), - netapp_webservice_path => hiera('cinder::backend::netapp::netapp_webservice_path', undef), - } } - if hiera('cinder_enable_nfs_backend', false) { - $cinder_nfs_backend = 'tripleo_nfs' - - if str2bool($::selinux) { - selboolean { 'virt_use_nfs': - value => on, - persistent => true, - } -> Package['nfs-utils'] - } - - package { 'nfs-utils': } -> - cinder::backend::nfs { $cinder_nfs_backend: - nfs_servers => hiera('cinder_nfs_servers'), - nfs_mount_options => hiera('cinder_nfs_mount_options',''), - nfs_shares_config => '/etc/cinder/shares-nfs.conf', - } - } - - $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_eqlx_backend, $cinder_dellsc_backend, $cinder_netapp_backend, $cinder_nfs_backend]) - class { '::cinder::backends' : - enabled_backends => $cinder_enabled_backends, - } - - # swift proxy - class { '::swift::proxy' : - manage_service => $non_pcmk_start, - enabled => $non_pcmk_start, - } - include ::swift::proxy::proxy_logging - include ::swift::proxy::healthcheck - include ::swift::proxy::cache - include ::swift::proxy::keystone - include ::swift::proxy::authtoken - include ::swift::proxy::staticweb - include ::swift::proxy::ratelimit - include ::swift::proxy::catch_errors - include ::swift::proxy::tempurl - include ::swift::proxy::formpost - # swift storage if str2bool(hiera('enable_swift_storage', true)) { class {'::swift::storage::all': @@ -955,46 +473,95 @@ if hiera('step') >= 3 { sync_db => $sync_db, } include ::ceilometer::agent::auth + include ::ceilometer::dispatcher::gnocchi Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" } - # Heat - class { '::heat' : - sync_db => $sync_db, + # httpd/apache and horizon + # NOTE(gfidente): server-status can be consumed by the pacemaker resource agent + class { '::apache' : + service_enable => false, + # service_manage => false, # <-- not supported with horizon&apache mod_wsgi? + } + include ::apache::mod::remoteip + include ::apache::mod::status + if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { + $_profile_support = 'cisco' + } else { + $_profile_support = 'None' + } + $neutron_options = {'profile_support' => $_profile_support } + + $memcached_ipv6 = hiera('memcached_ipv6', false) + if $memcached_ipv6 { + $horizon_memcached_servers = hiera('memcache_node_ips_v6', '[::1]') + } else { + $horizon_memcached_servers = hiera('memcache_node_ips', '127.0.0.1') + } + + class { '::horizon': + cache_server_ip => $horizon_memcached_servers, + neutron_options => $neutron_options, + } + + # Aodh + class { '::aodh' : + database_connection => hiera('aodh_mysql_conn_string'), } - class { '::heat::api' : + include ::aodh::config + include ::aodh::auth + include ::aodh::client + include ::aodh::wsgi::apache + class { '::aodh::api': manage_service => false, enabled => false, + service_name => 'httpd', } - class { '::heat::api_cfn' : + class { '::aodh::evaluator': manage_service => false, enabled => false, } - class { '::heat::api_cloudwatch' : + class { '::aodh::notifier': manage_service => false, enabled => false, } - class { '::heat::engine' : + class { '::aodh::listener': manage_service => false, enabled => false, } - # httpd/apache and horizon - # NOTE(gfidente): server-status can be consumed by the pacemaker resource agent - class { '::apache' : - service_enable => false, - # service_manage => false, # <-- not supported with horizon&apache mod_wsgi? + # Gnocchi + $gnocchi_database_connection = hiera('gnocchi_mysql_conn_string') + include ::gnocchi::client + if $sync_db { + include ::gnocchi::db::sync } - include ::apache::mod::status - if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { - $_profile_support = 'cisco' - } else { - $_profile_support = 'None' + include ::gnocchi::storage + $gnocchi_backend = downcase(hiera('gnocchi_backend', 'swift')) + case $gnocchi_backend { + 'swift': { include ::gnocchi::storage::swift } + 'file': { include ::gnocchi::storage::file } + 'rbd': { include ::gnocchi::storage::ceph } + default: { fail('Unrecognized gnocchi_backend parameter.') } } - $neutron_options = {'profile_support' => $_profile_support } - class { '::horizon': - cache_server_ip => hiera('memcache_node_ips', '127.0.0.1'), - neutron_options => $neutron_options, + class { '::gnocchi': + database_connection => $gnocchi_database_connection, + } + class { '::gnocchi::api' : + manage_service => false, + enabled => false, + service_name => 'httpd', + } + class { '::gnocchi::wsgi::apache' : + ssl => false, + } + class { '::gnocchi::metricd' : + manage_service => false, + enabled => false, + } + class { '::gnocchi::statsd' : + manage_service => false, + enabled => false, } $snmpd_user = hiera('snmpd_readonly_user_name') @@ -1004,336 +571,69 @@ if hiera('step') >= 3 { } class { '::snmp': agentaddress => ['udp:161','udp6:[::1]:161'], - snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], + snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], } hiera_include('controller_classes') -} #END STEP 3 +} #END STEP 4 -if hiera('step') >= 4 { - $keystone_enable_db_purge = hiera('keystone_enable_db_purge', true) - $nova_enable_db_purge = hiera('nova_enable_db_purge', true) - $cinder_enable_db_purge = hiera('cinder_enable_db_purge', true) +if hiera('step') >= 5 { + # We now make sure that the root db password is set to a random one + # At first installation /root/.my.cnf will be empty and we connect without a root + # password. On second runs or updates /root/.my.cnf will already be populated + # with proper credentials. This step happens on every node because this sql + # statement does not automatically replicate across nodes. + exec { 'galera-set-root-password': + command => "/bin/touch /root/.my.cnf && /bin/echo \"UPDATE mysql.user SET Password = PASSWORD('${mysql_root_password}') WHERE user = 'root'; flush privileges;\" | /bin/mysql --defaults-extra-file=/root/.my.cnf -u root", + } + file { '/root/.my.cnf' : + ensure => file, + mode => '0600', + owner => 'root', + group => 'root', + content => "[client] +user=root +password=\"${mysql_root_password}\" - if $keystone_enable_db_purge { - include ::keystone::cron::token_flush +[mysql] +user=root +password=\"${mysql_root_password}\"", + require => Exec['galera-set-root-password'], } + + $nova_enable_db_purge = hiera('nova_enable_db_purge', true) + if $nova_enable_db_purge { include ::nova::cron::archive_deleted_rows } - if $cinder_enable_db_purge { - include ::cinder::cron::db_purge - } if $pacemaker_master { - # Keystone - pacemaker::resource::service { $::keystone::params::service_name : - clone_params => 'interleave=true', - verify_on_create => true, - require => [File['/etc/keystone/ssl/certs/ca.pem'], - File['/etc/keystone/ssl/private/signing_key.pem'], - File['/etc/keystone/ssl/certs/signing_cert.pem']], - } - if $enable_load_balancer { - pacemaker::constraint::base { 'haproxy-then-keystone-constraint': - constraint_type => 'order', - first_resource => 'haproxy-clone', - second_resource => "${::keystone::params::service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Service[$::keystone::params::service_name]], - } - } - pacemaker::constraint::base { 'rabbitmq-then-keystone-constraint': - constraint_type => 'order', - first_resource => 'rabbitmq-clone', - second_resource => "${::keystone::params::service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Ocf['rabbitmq'], - Pacemaker::Resource::Service[$::keystone::params::service_name]], - } - pacemaker::constraint::base { 'memcached-then-keystone-constraint': + pacemaker::constraint::base { 'openstack-core-then-httpd-constraint': constraint_type => 'order', - first_resource => 'memcached-clone', - second_resource => "${::keystone::params::service_name}-clone", + first_resource => 'openstack-core-clone', + second_resource => "${::apache::params::service_name}-clone", first_action => 'start', second_action => 'start', - require => [Pacemaker::Resource::Service['memcached'], - Pacemaker::Resource::Service[$::keystone::params::service_name]], + require => [Pacemaker::Resource::Service[$::apache::params::service_name], + Pacemaker::Resource::Ocf['openstack-core']], } - pacemaker::constraint::base { 'galera-then-keystone-constraint': + pacemaker::constraint::base { 'galera-then-openstack-core-constraint': constraint_type => 'order', first_resource => 'galera-master', - second_resource => "${::keystone::params::service_name}-clone", + second_resource => 'openstack-core-clone', first_action => 'promote', second_action => 'start', require => [Pacemaker::Resource::Ocf['galera'], - Pacemaker::Resource::Service[$::keystone::params::service_name]], - } - - # Cinder - pacemaker::resource::service { $::cinder::params::api_service : - clone_params => 'interleave=true', - require => Pacemaker::Resource::Service[$::keystone::params::service_name], - } - pacemaker::resource::service { $::cinder::params::scheduler_service : - clone_params => 'interleave=true', + Pacemaker::Resource::Ocf['openstack-core']], } - pacemaker::resource::service { $::cinder::params::volume_service : } - pacemaker::constraint::base { 'keystone-then-cinder-api-constraint': - constraint_type => 'order', - first_resource => "${::keystone::params::service_name}-clone", - second_resource => "${::cinder::params::api_service}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::cinder::params::api_service], - Pacemaker::Resource::Service[$::keystone::params::service_name]], - } - pacemaker::constraint::base { 'cinder-api-then-cinder-scheduler-constraint': - constraint_type => 'order', - first_resource => "${::cinder::params::api_service}-clone", - second_resource => "${::cinder::params::scheduler_service}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::cinder::params::api_service], - Pacemaker::Resource::Service[$::cinder::params::scheduler_service]], - } - pacemaker::constraint::colocation { 'cinder-scheduler-with-cinder-api-colocation': - source => "${::cinder::params::scheduler_service}-clone", - target => "${::cinder::params::api_service}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::cinder::params::api_service], - Pacemaker::Resource::Service[$::cinder::params::scheduler_service]], - } - pacemaker::constraint::base { 'cinder-scheduler-then-cinder-volume-constraint': - constraint_type => 'order', - first_resource => "${::cinder::params::scheduler_service}-clone", - second_resource => $::cinder::params::volume_service, - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service], - Pacemaker::Resource::Service[$::cinder::params::volume_service]], - } - pacemaker::constraint::colocation { 'cinder-volume-with-cinder-scheduler-colocation': - source => $::cinder::params::volume_service, - target => "${::cinder::params::scheduler_service}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service], - Pacemaker::Resource::Service[$::cinder::params::volume_service]], - } - - # Glance - pacemaker::resource::service { $::glance::params::registry_service_name : - clone_params => 'interleave=true', - require => Pacemaker::Resource::Service[$::keystone::params::service_name], - } - pacemaker::resource::service { $::glance::params::api_service_name : - clone_params => 'interleave=true', - } - - pacemaker::constraint::base { 'keystone-then-glance-registry-constraint': - constraint_type => 'order', - first_resource => "${::keystone::params::service_name}-clone", - second_resource => "${::glance::params::registry_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name], - Pacemaker::Resource::Service[$::keystone::params::service_name]], - } - pacemaker::constraint::base { 'glance-registry-then-glance-api-constraint': - constraint_type => 'order', - first_resource => "${::glance::params::registry_service_name}-clone", - second_resource => "${::glance::params::api_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name], - Pacemaker::Resource::Service[$::glance::params::api_service_name]], - } - pacemaker::constraint::colocation { 'glance-api-with-glance-registry-colocation': - source => "${::glance::params::api_service_name}-clone", - target => "${::glance::params::registry_service_name}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name], - Pacemaker::Resource::Service[$::glance::params::api_service_name]], - } - - if hiera('step') == 4 { - # Neutron - # NOTE(gfidente): Neutron will try to populate the database with some data - # as soon as neutron-server is started; to avoid races we want to make this - # happen only on one node, before normal Pacemaker initialization - # https://bugzilla.redhat.com/show_bug.cgi?id=1233061 - # NOTE(emilien): we need to run this Exec only at Step 4 otherwise this exec - # will try to start the service while it's already started by Pacemaker - # It would result to a deployment failure since systemd would return 1 to Puppet - # and the overcloud would fail to deploy (6 would be returned). - # This conditional prevents from a race condition during the deployment. - # https://bugzilla.redhat.com/show_bug.cgi?id=1290582 - exec { 'neutron-server-systemd-start-sleep' : - command => 'systemctl start neutron-server && /usr/bin/sleep 5', - path => '/usr/bin', - unless => '/sbin/pcs resource show neutron-server', - } -> - pacemaker::resource::service { $::neutron::params::server_service: - clone_params => 'interleave=true', - require => Pacemaker::Resource::Service[$::keystone::params::service_name] - } - } else { - pacemaker::resource::service { $::neutron::params::server_service: - clone_params => 'interleave=true', - require => Pacemaker::Resource::Service[$::keystone::params::service_name] - } - } - if hiera('neutron::enable_l3_agent', true) { - pacemaker::resource::service { $::neutron::params::l3_agent_service: - clone_params => 'interleave=true', - } - } - if hiera('neutron::enable_dhcp_agent', true) { - pacemaker::resource::service { $::neutron::params::dhcp_agent_service: - clone_params => 'interleave=true', - } - } - if hiera('neutron::enable_ovs_agent', true) { - pacemaker::resource::service { $::neutron::params::ovs_agent_service: - clone_params => 'interleave=true', - } - } if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { pacemaker::resource::service {'tomcat': clone_params => 'interleave=true', } } - if hiera('neutron::enable_metadata_agent', true) { - pacemaker::resource::service { $::neutron::params::metadata_agent_service: - clone_params => 'interleave=true', - } - } - if hiera('neutron::enable_ovs_agent', true) { - pacemaker::resource::ocf { $::neutron::params::ovs_cleanup_service: - ocf_agent_name => 'neutron:OVSCleanup', - clone_params => 'interleave=true', - } - pacemaker::resource::ocf { 'neutron-netns-cleanup': - ocf_agent_name => 'neutron:NetnsCleanup', - clone_params => 'interleave=true', - } - - # neutron - one chain ovs-cleanup-->netns-cleanup-->ovs-agent - pacemaker::constraint::base { 'neutron-ovs-cleanup-to-netns-cleanup-constraint': - constraint_type => 'order', - first_resource => "${::neutron::params::ovs_cleanup_service}-clone", - second_resource => 'neutron-netns-cleanup-clone', - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Ocf[$::neutron::params::ovs_cleanup_service], - Pacemaker::Resource::Ocf['neutron-netns-cleanup']], - } - pacemaker::constraint::colocation { 'neutron-ovs-cleanup-to-netns-cleanup-colocation': - source => 'neutron-netns-cleanup-clone', - target => "${::neutron::params::ovs_cleanup_service}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Ocf[$::neutron::params::ovs_cleanup_service], - Pacemaker::Resource::Ocf['neutron-netns-cleanup']], - } - pacemaker::constraint::base { 'neutron-netns-cleanup-to-openvswitch-agent-constraint': - constraint_type => 'order', - first_resource => 'neutron-netns-cleanup-clone', - second_resource => "${::neutron::params::ovs_agent_service}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Ocf['neutron-netns-cleanup'], - Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]], - } - pacemaker::constraint::colocation { 'neutron-netns-cleanup-to-openvswitch-agent-colocation': - source => "${::neutron::params::ovs_agent_service}-clone", - target => 'neutron-netns-cleanup-clone', - score => 'INFINITY', - require => [Pacemaker::Resource::Ocf['neutron-netns-cleanup'], - Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]], - } - } - - pacemaker::constraint::base { 'keystone-to-neutron-server-constraint': - constraint_type => 'order', - first_resource => "${::keystone::params::service_name}-clone", - second_resource => "${::neutron::params::server_service}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::keystone::params::service_name], - Pacemaker::Resource::Service[$::neutron::params::server_service]], - } - if hiera('neutron::enable_ovs_agent',true) { - pacemaker::constraint::base { 'neutron-server-to-openvswitch-agent-constraint': - constraint_type => 'order', - first_resource => "${::neutron::params::server_service}-clone", - second_resource => "${::neutron::params::ovs_agent_service}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::neutron::params::server_service], - Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]], - } - } - if hiera('neutron::enable_dhcp_agent',true) and hiera('neutron::enable_ovs_agent',true) { - pacemaker::constraint::base { 'neutron-openvswitch-agent-to-dhcp-agent-constraint': - constraint_type => 'order', - first_resource => "${::neutron::params::ovs_agent_service}-clone", - second_resource => "${::neutron::params::dhcp_agent_service}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service], - Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]], - - } - pacemaker::constraint::colocation { 'neutron-openvswitch-agent-to-dhcp-agent-colocation': - source => "${::neutron::params::dhcp_agent_service}-clone", - target => "${::neutron::params::ovs_agent_service}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service], - Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]], - } - } - if hiera('neutron::enable_dhcp_agent',true) and hiera('l3_agent_service',true) { - pacemaker::constraint::base { 'neutron-dhcp-agent-to-l3-agent-constraint': - constraint_type => 'order', - first_resource => "${::neutron::params::dhcp_agent_service}-clone", - second_resource => "${::neutron::params::l3_agent_service}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service], - Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]] - } - pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-l3-agent-colocation': - source => "${::neutron::params::l3_agent_service}-clone", - target => "${::neutron::params::dhcp_agent_service}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service], - Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]] - } - } - if hiera('neutron::enable_l3_agent',true) and hiera('neutron::enable_metadata_agent',true) { - pacemaker::constraint::base { 'neutron-l3-agent-to-metadata-agent-constraint': - constraint_type => 'order', - first_resource => "${::neutron::params::l3_agent_service}-clone", - second_resource => "${::neutron::params::metadata_agent_service}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service], - Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]] - } - pacemaker::constraint::colocation { 'neutron-l3-agent-to-metadata-agent-colocation': - source => "${::neutron::params::metadata_agent_service}-clone", - target => "${::neutron::params::l3_agent_service}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service], - Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]] - } - } if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { #midonet-chain chain keystone-->neutron-server-->dhcp-->metadata->tomcat pacemaker::constraint::base { 'neutron-server-to-dhcp-agent-constraint': @@ -1373,36 +673,21 @@ if hiera('step') >= 4 { } # Nova - pacemaker::resource::service { $::nova::params::api_service_name : - clone_params => 'interleave=true', - op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s', - } - pacemaker::resource::service { $::nova::params::conductor_service_name : - clone_params => 'interleave=true', - op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s', - } - pacemaker::resource::service { $::nova::params::consoleauth_service_name : - clone_params => 'interleave=true', - op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s', - require => Pacemaker::Resource::Service[$::keystone::params::service_name], - } - pacemaker::resource::service { $::nova::params::vncproxy_service_name : - clone_params => 'interleave=true', - op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s', - } - pacemaker::resource::service { $::nova::params::scheduler_service_name : - clone_params => 'interleave=true', - op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s', - } - pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint': constraint_type => 'order', - first_resource => "${::keystone::params::service_name}-clone", + first_resource => 'openstack-core-clone', second_resource => "${::nova::params::consoleauth_service_name}-clone", first_action => 'start', second_action => 'start', require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name], - Pacemaker::Resource::Service[$::keystone::params::service_name]], + Pacemaker::Resource::Ocf['openstack-core']], + } + pacemaker::constraint::colocation { 'nova-consoleauth-with-openstack-core': + source => "${::nova::params::consoleauth_service_name}-clone", + target => 'openstack-core-clone', + score => 'INFINITY', + require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name], + Pacemaker::Resource::Ocf['openstack-core']], } pacemaker::constraint::base { 'nova-consoleauth-then-nova-vncproxy-constraint': constraint_type => 'order', @@ -1469,19 +754,19 @@ if hiera('step') >= 4 { Pacemaker::Resource::Service[$::nova::params::conductor_service_name]], } - # Ceilometer + # Ceilometer and Aodh case downcase(hiera('ceilometer_backend')) { /mysql/: { - pacemaker::resource::service { $::ceilometer::params::agent_central_service_name : + pacemaker::resource::service { $::ceilometer::params::agent_central_service_name: clone_params => 'interleave=true', - require => Pacemaker::Resource::Service[$::keystone::params::service_name], + require => Pacemaker::Resource::Ocf['openstack-core'], } } default: { - pacemaker::resource::service { $::ceilometer::params::agent_central_service_name : + pacemaker::resource::service { $::ceilometer::params::agent_central_service_name: clone_params => 'interleave=true', - require => [Pacemaker::Resource::Service[$::keystone::params::service_name], - Pacemaker::Resource::Service[$::mongodb::params::service_name]], + require => [Pacemaker::Resource::Ocf['openstack-core'], + Pacemaker::Resource::Service[$::mongodb::params::service_name]], } } } @@ -1494,16 +779,13 @@ if hiera('step') >= 4 { pacemaker::resource::service { $::ceilometer::params::agent_notification_service_name : clone_params => 'interleave=true', } - pacemaker::resource::ocf { 'delay' : - ocf_agent_name => 'heartbeat:Delay', - clone_params => 'interleave=true', - resource_params => 'startdelay=10', - } # Fedora doesn't know `require-all` parameter for constraints yet if $::operatingsystem == 'Fedora' { $redis_ceilometer_constraint_params = undef + $redis_aodh_constraint_params = undef } else { $redis_ceilometer_constraint_params = 'require-all=false' + $redis_aodh_constraint_params = 'require-all=false' } pacemaker::constraint::base { 'redis-then-ceilometer-central-constraint': constraint_type => 'order', @@ -1515,14 +797,33 @@ if hiera('step') >= 4 { require => [Pacemaker::Resource::Ocf['redis'], Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name]], } + pacemaker::constraint::base { 'redis-then-aodh-evaluator-constraint': + constraint_type => 'order', + first_resource => 'redis-master', + second_resource => "${::aodh::params::evaluator_service_name}-clone", + first_action => 'promote', + second_action => 'start', + constraint_params => $redis_aodh_constraint_params, + require => [Pacemaker::Resource::Ocf['redis'], + Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name]], + } pacemaker::constraint::base { 'keystone-then-ceilometer-central-constraint': constraint_type => 'order', - first_resource => "${::keystone::params::service_name}-clone", + first_resource => 'openstack-core-clone', second_resource => "${::ceilometer::params::agent_central_service_name}-clone", first_action => 'start', second_action => 'start', require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name], - Pacemaker::Resource::Service[$::keystone::params::service_name]], + Pacemaker::Resource::Ocf['openstack-core']], + } + pacemaker::constraint::base { 'keystone-then-ceilometer-notification-constraint': + constraint_type => 'order', + first_resource => 'openstack-core-clone', + second_resource => "${::ceilometer::params::agent_notification_service_name}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name], + Pacemaker::Resource::Ocf['openstack-core']], } pacemaker::constraint::base { 'ceilometer-central-then-ceilometer-collector-constraint': constraint_type => 'order', @@ -1549,21 +850,47 @@ if hiera('step') >= 4 { require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name], Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]], } - pacemaker::constraint::base { 'ceilometer-api-then-ceilometer-delay-constraint': + # Aodh + pacemaker::resource::service { $::aodh::params::evaluator_service_name : + clone_params => 'interleave=true', + } + pacemaker::resource::service { $::aodh::params::notifier_service_name : + clone_params => 'interleave=true', + } + pacemaker::resource::service { $::aodh::params::listener_service_name : + clone_params => 'interleave=true', + } + pacemaker::constraint::base { 'aodh-evaluator-then-aodh-notifier-constraint': constraint_type => 'order', - first_resource => "${::ceilometer::params::api_service_name}-clone", - second_resource => 'delay-clone', + first_resource => "${::aodh::params::evaluator_service_name}-clone", + second_resource => "${::aodh::params::notifier_service_name}-clone", first_action => 'start', second_action => 'start', - require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name], - Pacemaker::Resource::Ocf['delay']], + require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name], + Pacemaker::Resource::Service[$::aodh::params::notifier_service_name]], } - pacemaker::constraint::colocation { 'ceilometer-delay-with-ceilometer-api-colocation': - source => 'delay-clone', - target => "${::ceilometer::params::api_service_name}-clone", + pacemaker::constraint::colocation { 'aodh-notifier-with-aodh-evaluator-colocation': + source => "${::aodh::params::notifier_service_name}-clone", + target => "${::aodh::params::evaluator_service_name}-clone", score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name], - Pacemaker::Resource::Ocf['delay']], + require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name], + Pacemaker::Resource::Service[$::aodh::params::notifier_service_name]], + } + pacemaker::constraint::base { 'aodh-evaluator-then-aodh-listener-constraint': + constraint_type => 'order', + first_resource => "${::aodh::params::evaluator_service_name}-clone", + second_resource => "${::aodh::params::listener_service_name}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name], + Pacemaker::Resource::Service[$::aodh::params::listener_service_name]], + } + pacemaker::constraint::colocation { 'aodh-listener-with-aodh-evaluator-colocation': + source => "${::aodh::params::listener_service_name}-clone", + target => "${::aodh::params::evaluator_service_name}-clone", + score => 'INFINITY', + require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name], + Pacemaker::Resource::Service[$::aodh::params::listener_service_name]], } if downcase(hiera('ceilometer_backend')) == 'mongodb' { pacemaker::constraint::base { 'mongodb-then-ceilometer-central-constraint': @@ -1577,89 +904,37 @@ if hiera('step') >= 4 { } } - # Heat - pacemaker::resource::service { $::heat::params::api_service_name : - clone_params => 'interleave=true', - } - pacemaker::resource::service { $::heat::params::api_cloudwatch_service_name : + # gnocchi + pacemaker::resource::service { $::gnocchi::params::metricd_service_name : clone_params => 'interleave=true', } - pacemaker::resource::service { $::heat::params::api_cfn_service_name : + pacemaker::resource::service { $::gnocchi::params::statsd_service_name : clone_params => 'interleave=true', } - pacemaker::resource::service { $::heat::params::engine_service_name : - clone_params => 'interleave=true', - } - pacemaker::constraint::base { 'keystone-then-heat-api-constraint': + pacemaker::constraint::base { 'gnocchi-metricd-then-gnocchi-statsd-constraint': constraint_type => 'order', - first_resource => "${::keystone::params::service_name}-clone", - second_resource => "${::heat::params::api_service_name}-clone", + first_resource => "${::gnocchi::params::metricd_service_name}-clone", + second_resource => "${::gnocchi::params::statsd_service_name}-clone", first_action => 'start', second_action => 'start', - require => [Pacemaker::Resource::Service[$::heat::params::api_service_name], - Pacemaker::Resource::Service[$::keystone::params::service_name]], + require => [Pacemaker::Resource::Service[$::gnocchi::params::metricd_service_name], + Pacemaker::Resource::Service[$::gnocchi::params::statsd_service_name]], } - pacemaker::constraint::base { 'heat-api-then-heat-api-cfn-constraint': - constraint_type => 'order', - first_resource => "${::heat::params::api_service_name}-clone", - second_resource => "${::heat::params::api_cfn_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::heat::params::api_service_name], - Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]], - } - pacemaker::constraint::colocation { 'heat-api-cfn-with-heat-api-colocation': - source => "${::heat::params::api_cfn_service_name}-clone", - target => "${::heat::params::api_service_name}-clone", + pacemaker::constraint::colocation { 'gnocchi-statsd-with-metricd-colocation': + source => "${::gnocchi::params::statsd_service_name}-clone", + target => "${::gnocchi::params::metricd_service_name}-clone", score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name], - Pacemaker::Resource::Service[$::heat::params::api_service_name]], - } - pacemaker::constraint::base { 'heat-api-cfn-then-heat-api-cloudwatch-constraint': - constraint_type => 'order', - first_resource => "${::heat::params::api_cfn_service_name}-clone", - second_resource => "${::heat::params::api_cloudwatch_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name], - Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]], - } - pacemaker::constraint::colocation { 'heat-api-cloudwatch-with-heat-api-cfn-colocation': - source => "${::heat::params::api_cloudwatch_service_name}-clone", - target => "${::heat::params::api_cfn_service_name}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name], - Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name]], - } - pacemaker::constraint::base { 'heat-api-cloudwatch-then-heat-engine-constraint': - constraint_type => 'order', - first_resource => "${::heat::params::api_cloudwatch_service_name}-clone", - second_resource => "${::heat::params::engine_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name], - Pacemaker::Resource::Service[$::heat::params::engine_service_name]], - } - pacemaker::constraint::colocation { 'heat-engine-with-heat-api-cloudwatch-colocation': - source => "${::heat::params::engine_service_name}-clone", - target => "${::heat::params::api_cloudwatch_service_name}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name], - Pacemaker::Resource::Service[$::heat::params::engine_service_name]], - } - pacemaker::constraint::base { 'ceilometer-notification-then-heat-api-constraint': - constraint_type => 'order', - first_resource => "${::ceilometer::params::agent_notification_service_name}-clone", - second_resource => "${::heat::params::api_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::heat::params::api_service_name], - Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name]], + require => [Pacemaker::Resource::Service[$::gnocchi::params::metricd_service_name], + Pacemaker::Resource::Service[$::gnocchi::params::statsd_service_name]], } - # Horizon - pacemaker::resource::service { $::horizon::params::http_service: - clone_params => 'interleave=true', + # Horizon and Keystone + pacemaker::resource::service { $::apache::params::service_name: + clone_params => 'interleave=true', + verify_on_create => true, + require => [File['/etc/keystone/ssl/certs/ca.pem'], + File['/etc/keystone/ssl/private/signing_key.pem'], + File['/etc/keystone/ssl/certs/signing_cert.pem']], } #VSM @@ -1689,21 +964,6 @@ if hiera('step') >= 4 { } -} #END STEP 4 - -if hiera('step') >= 5 { - - if $pacemaker_master { - - class {'::keystone::roles::admin' : - require => Pacemaker::Resource::Service[$::keystone::params::service_name], - } -> - class {'::keystone::endpoint' : - require => Pacemaker::Resource::Service[$::keystone::params::service_name], - } - - } - } #END STEP 5 $package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller_pacemaker', hiera('step')]) diff --git a/puppet/manifests/overcloud_object.pp b/puppet/manifests/overcloud_object.pp index 63ac396e..4fca8f5f 100644 --- a/puppet/manifests/overcloud_object.pp +++ b/puppet/manifests/overcloud_object.pp @@ -16,40 +16,43 @@ include ::tripleo::packages include ::tripleo::firewall -create_resources(sysctl::value, hiera('sysctl_settings'), {}) +if hiera('step') >= 1 { + create_resources(kmod::load, hiera('kernel_modules'), {}) + create_resources(sysctl::value, hiera('sysctl_settings'), {}) + Exec <| tag == 'kmod::load' |> -> Sysctl <| |> -if count(hiera('ntp::servers')) > 0 { - include ::ntp -} - -include ::timezone + include ::timezone -include ::swift -class { '::swift::storage::all': - mount_check => str2bool(hiera('swift_mount_check')), } -if(!defined(File['/srv/node'])) { - file { '/srv/node': - ensure => directory, - owner => 'swift', - group => 'swift', - require => Package['openstack-swift'], + +if hiera('step') >= 4 { + class { '::swift::storage::all': + mount_check => str2bool(hiera('swift_mount_check')), + } + if(!defined(File['/srv/node'])) { + file { '/srv/node': + ensure => directory, + owner => 'swift', + group => 'swift', + require => Package['openstack-swift'], + } } -} -$swift_components = ['account', 'container', 'object'] -swift::storage::filter::recon { $swift_components : } -swift::storage::filter::healthcheck { $swift_components : } + $swift_components = ['account', 'container', 'object'] + swift::storage::filter::recon { $swift_components : } + swift::storage::filter::healthcheck { $swift_components : } -$snmpd_user = hiera('snmpd_readonly_user_name') -snmp::snmpv3_user { $snmpd_user: - authtype => 'MD5', - authpass => hiera('snmpd_readonly_user_password'), -} -class { '::snmp': - agentaddress => ['udp:161','udp6:[::1]:161'], - snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], + $snmpd_user = hiera('snmpd_readonly_user_name') + snmp::snmpv3_user { $snmpd_user: + authtype => 'MD5', + authpass => hiera('snmpd_readonly_user_password'), + } + class { '::snmp': + agentaddress => ['udp:161','udp6:[::1]:161'], + snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], + } + + hiera_include('object_classes') } -hiera_include('object_classes') package_manifest{'/var/lib/tripleo/installed-packages/overcloud_object': ensure => present} diff --git a/puppet/manifests/overcloud_volume.pp b/puppet/manifests/overcloud_volume.pp index 5a69725a..134dc43b 100644 --- a/puppet/manifests/overcloud_volume.pp +++ b/puppet/manifests/overcloud_volume.pp @@ -16,7 +16,9 @@ include ::tripleo::packages include ::tripleo::firewall +create_resources(kmod::load, hiera('kernel_modules'), {}) create_resources(sysctl::value, hiera('sysctl_settings'), {}) +Exec <| tag == 'kmod::load' |> -> Sysctl <| |> if count(hiera('ntp::servers')) > 0 { include ::ntp @@ -42,7 +44,7 @@ if $cinder_enable_iscsi { $cinder_enabled_backends = any2array($cinder_iscsi_backend) class { '::cinder::backends' : - enabled_backends => $cinder_enabled_backends, + enabled_backends => union($cinder_enabled_backends, hiera('cinder_user_enabled_backends')), } $snmpd_user = hiera('snmpd_readonly_user_name') @@ -52,7 +54,7 @@ snmp::snmpv3_user { $snmpd_user: } class { '::snmp': agentaddress => ['udp:161','udp6:[::1]:161'], - snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], + snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], } hiera_include('volume_classes') diff --git a/puppet/manifests/ringbuilder.pp b/puppet/manifests/ringbuilder.pp index 2d880d33..2411ff84 100644 --- a/puppet/manifests/ringbuilder.pp +++ b/puppet/manifests/ringbuilder.pp @@ -13,8 +13,6 @@ # License for the specific language governing permissions and limitations # under the License. -include ::tripleo::packages - define add_devices( $swift_zones = '1' ){ @@ -91,6 +89,11 @@ class tripleo::ringbuilder ( } } -include ::tripleo::ringbuilder +if hiera('step') >= 2 { + # pre-install swift here so we can build rings + include ::swift +} -package_manifest{'/var/lib/tripleo/installed-packages/ringbuilder': ensure => present} +if hiera('step') >= 3 { + include ::tripleo::ringbuilder +} |