aboutsummaryrefslogtreecommitdiffstats
path: root/puppet
diff options
context:
space:
mode:
Diffstat (limited to 'puppet')
-rw-r--r--puppet/manifests/overcloud_compute.pp6
-rw-r--r--puppet/manifests/overcloud_controller.pp24
-rw-r--r--puppet/manifests/overcloud_controller_pacemaker.pp184
-rw-r--r--puppet/services/database/mysql.yaml20
-rw-r--r--puppet/services/pacemaker/database/mysql.yaml20
5 files changed, 41 insertions, 213 deletions
diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp
index 7255db83..ab7f846f 100644
--- a/puppet/manifests/overcloud_compute.pp
+++ b/puppet/manifests/overcloud_compute.pp
@@ -17,12 +17,6 @@ include ::tripleo::packages
include ::tripleo::firewall
if hiera('step') >= 4 {
-
- nova_config {
- 'DEFAULT/my_ip': value => $ipaddress;
- 'DEFAULT/linuxnet_interface_driver': value => 'nova.network.linux_net.LinuxOVSInterfaceDriver';
- }
-
hiera_include('compute_classes')
}
diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp
index bc9a42a4..a635ef73 100644
--- a/puppet/manifests/overcloud_controller.pp
+++ b/puppet/manifests/overcloud_controller.pp
@@ -16,31 +16,7 @@
include ::tripleo::packages
include ::tripleo::firewall
-$enable_load_balancer = hiera('enable_load_balancer', true)
-
if hiera('step') >= 2 {
- if str2bool(hiera('enable_galera', true)) {
- $mysql_config_file = '/etc/my.cnf.d/galera.cnf'
- } else {
- $mysql_config_file = '/etc/my.cnf.d/server.cnf'
- }
- # TODO Galara
- # FIXME: due to https://bugzilla.redhat.com/show_bug.cgi?id=1298671 we
- # set bind-address to a hostname instead of an ip address; to move Mysql
- # from internal_api on another network we'll have to customize both
- # MysqlNetwork and ControllerHostnameResolveNetwork in ServiceNetMap
- class { '::mysql::server':
- config_file => $mysql_config_file,
- override_options => {
- 'mysqld' => {
- 'bind-address' => $::hostname,
- 'max_connections' => hiera('mysql_max_connections'),
- 'open_files_limit' => '-1',
- },
- },
- remove_default_accounts => true,
- }
-
# FIXME: this should only occur on the bootstrap host (ditto for db syncs)
# Create all the database schemas
include ::aodh::db::mysql
diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp
index 313aec6d..c0f219ca 100644
--- a/puppet/manifests/overcloud_controller_pacemaker.pp
+++ b/puppet/manifests/overcloud_controller_pacemaker.pp
@@ -15,8 +15,7 @@
# TODO(jistr): use pcs resource provider instead of just no-ops
Service <|
- tag == 'aodh-service' or
- tag == 'gnocchi-service'
+ tag == 'aodh-service'
|> {
hasrestart => true,
restart => '/bin/true',
@@ -35,162 +34,15 @@ if $::hostname == downcase(hiera('bootstrap_nodeid')) {
$sync_db = false
}
-if hiera('step') >= 1 {
- # Galera
- if str2bool(hiera('enable_galera', true)) {
- $mysql_config_file = '/etc/my.cnf.d/galera.cnf'
- } else {
- $mysql_config_file = '/etc/my.cnf.d/server.cnf'
- }
- $galera_nodes = downcase(hiera('galera_node_names', $::hostname))
- $galera_nodes_count = count(split($galera_nodes, ','))
-
- # FIXME: due to https://bugzilla.redhat.com/show_bug.cgi?id=1298671 we
- # set bind-address to a hostname instead of an ip address; to move Mysql
- # from internal_api on another network we'll have to customize both
- # MysqlNetwork and ControllerHostnameResolveNetwork in ServiceNetMap
- $mysql_bind_host = hiera('mysql_bind_host')
- $mysqld_options = {
- 'mysqld' => {
- 'skip-name-resolve' => '1',
- 'binlog_format' => 'ROW',
- 'default-storage-engine' => 'innodb',
- 'innodb_autoinc_lock_mode' => '2',
- 'innodb_locks_unsafe_for_binlog'=> '1',
- 'query_cache_size' => '0',
- 'query_cache_type' => '0',
- 'bind-address' => $::hostname,
- 'max_connections' => hiera('mysql_max_connections'),
- 'open_files_limit' => '-1',
- 'wsrep_on' => 'ON',
- 'wsrep_provider' => '/usr/lib64/galera/libgalera_smm.so',
- 'wsrep_cluster_name' => 'galera_cluster',
- 'wsrep_cluster_address' => "gcomm://${galera_nodes}",
- 'wsrep_slave_threads' => '1',
- 'wsrep_certify_nonPK' => '1',
- 'wsrep_max_ws_rows' => '131072',
- 'wsrep_max_ws_size' => '1073741824',
- 'wsrep_debug' => '0',
- 'wsrep_convert_LOCK_to_trx' => '0',
- 'wsrep_retry_autocommit' => '1',
- 'wsrep_auto_increment_control' => '1',
- 'wsrep_drupal_282555_workaround'=> '0',
- 'wsrep_causal_reads' => '0',
- 'wsrep_sst_method' => 'rsync',
- 'wsrep_provider_options' => "gmcast.listen_addr=tcp://[${mysql_bind_host}]:4567;",
- },
- }
-
- class { '::mysql::server':
- create_root_user => false,
- create_root_my_cnf => false,
- config_file => $mysql_config_file,
- override_options => $mysqld_options,
- remove_default_accounts => $pacemaker_master,
- service_manage => false,
- service_enabled => false,
- }
-
-}
-
if hiera('step') >= 2 {
-
-
- # NOTE(gfidente): the following vars are needed on all nodes so they
- # need to stay out of pacemaker_master conditional.
- # The addresses mangling will hopefully go away when we'll be able to
- # configure the connection string via hostnames, until then, we need to pass
- # the list of IPv6 addresses *with* port and without the brackets as 'members'
- # argument for the 'mongodb_replset' resource.
- if str2bool(hiera('mongodb::server::ipv6', false)) {
- $mongo_node_ips_with_port_prefixed = prefix(hiera('mongo_node_ips'), '[')
- $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017')
- $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
- } else {
- $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017')
- $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
- }
- $mongodb_replset = hiera('mongodb::server::replset')
-
if $pacemaker_master {
- pacemaker::resource::ocf { 'galera' :
- ocf_agent_name => 'heartbeat:galera',
- op_params => 'promote timeout=300s on-fail=block',
- master_params => '',
- meta_params => "master-max=${galera_nodes_count} ordered=true",
- resource_params => "additional_parameters='--open-files-limit=16384' enable_creation=true wsrep_cluster_address='gcomm://${galera_nodes}'",
- require => Class['::mysql::server'],
- before => Exec['galera-ready'],
- }
-
- exec { 'galera-ready' :
- command => '/usr/bin/clustercheck >/dev/null',
- timeout => 30,
- tries => 180,
- try_sleep => 10,
- environment => ['AVAILABLE_WHEN_READONLY=0'],
- require => Exec['create-root-sysconfig-clustercheck'],
- }
-
- # We add a clustercheck db user and we will switch /etc/sysconfig/clustercheck
- # to it in a later step. We do this only on one node as it will replicate on
- # the other members. We also make sure that the permissions are the minimum necessary
- mysql_user { 'clustercheck@localhost':
- ensure => 'present',
- password_hash => mysql_password(hiera('mysql_clustercheck_password')),
- require => Exec['galera-ready'],
- }
-
- mysql_grant { 'clustercheck@localhost/*.*':
- ensure => 'present',
- options => ['GRANT'],
- privileges => ['PROCESS'],
- table => '*.*',
- user => 'clustercheck@localhost',
- }
-
class { '::aodh::db::mysql':
require => Exec['galera-ready'],
}
}
- # This step is to create a sysconfig clustercheck file with the root user and empty password
- # on the first install only (because later on the clustercheck db user will be used)
- # We are using exec and not file in order to not have duplicate definition errors in puppet
- # when we later set the the file to contain the clustercheck data
- exec { 'create-root-sysconfig-clustercheck':
- command => "/bin/echo 'MYSQL_USERNAME=root\nMYSQL_PASSWORD=\'\'\nMYSQL_HOST=localhost\n' > /etc/sysconfig/clustercheck",
- unless => '/bin/test -e /etc/sysconfig/clustercheck && grep -q clustercheck /etc/sysconfig/clustercheck',
- }
-
- xinetd::service { 'galera-monitor' :
- port => '9200',
- server => '/usr/bin/clustercheck',
- per_source => 'UNLIMITED',
- log_on_success => '',
- log_on_failure => 'HOST',
- flags => 'REUSE',
- service_type => 'UNLISTED',
- user => 'root',
- group => 'root',
- require => Exec['create-root-sysconfig-clustercheck'],
- }
-
} #END STEP 2
if hiera('step') >= 4 or ( hiera('step') >= 3 and $sync_db ) {
- # At this stage we are guaranteed that the clustercheck db user exists
- # so we switch the resource agent to use it.
- $mysql_clustercheck_password = hiera('mysql_clustercheck_password')
- file { '/etc/sysconfig/clustercheck' :
- ensure => file,
- mode => '0600',
- owner => 'root',
- group => 'root',
- content => "MYSQL_USERNAME=clustercheck\n
-MYSQL_PASSWORD='${mysql_clustercheck_password}'\n
-MYSQL_HOST=localhost\n",
- }
-
$nova_ipv6 = hiera('nova::use_ipv6', false)
if $nova_ipv6 {
$memcached_servers = suffix(hiera('memcache_node_ips_v6'), ':11211')
@@ -235,32 +87,7 @@ MYSQL_HOST=localhost\n",
} #END STEP 4
if hiera('step') >= 5 {
- # We now make sure that the root db password is set to a random one
- # At first installation /root/.my.cnf will be empty and we connect without a root
- # password. On second runs or updates /root/.my.cnf will already be populated
- # with proper credentials. This step happens on every node because this sql
- # statement does not automatically replicate across nodes.
- $mysql_root_password = hiera('mysql::server::root_password')
- exec { 'galera-set-root-password':
- command => "/bin/touch /root/.my.cnf && /bin/echo \"UPDATE mysql.user SET Password = PASSWORD('${mysql_root_password}') WHERE user = 'root'; flush privileges;\" | /bin/mysql --defaults-extra-file=/root/.my.cnf -u root",
- }
- file { '/root/.my.cnf' :
- ensure => file,
- mode => '0600',
- owner => 'root',
- group => 'root',
- content => "[client]
-user=root
-password=\"${mysql_root_password}\"
-
-[mysql]
-user=root
-password=\"${mysql_root_password}\"",
- require => Exec['galera-set-root-password'],
- }
-
$nova_enable_db_purge = hiera('nova_enable_db_purge', true)
-
if $nova_enable_db_purge {
include ::nova::cron::archive_deleted_rows
}
@@ -276,15 +103,6 @@ password=\"${mysql_root_password}\"",
require => [Pacemaker::Resource::Service[$::apache::params::service_name],
Pacemaker::Resource::Ocf['openstack-core']],
}
- pacemaker::constraint::base { 'galera-then-openstack-core-constraint':
- constraint_type => 'order',
- first_resource => 'galera-master',
- second_resource => 'openstack-core-clone',
- first_action => 'promote',
- second_action => 'start',
- require => [Pacemaker::Resource::Ocf['galera'],
- Pacemaker::Resource::Ocf['openstack-core']],
- }
# Nova
pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint':
diff --git a/puppet/services/database/mysql.yaml b/puppet/services/database/mysql.yaml
new file mode 100644
index 00000000..3a23650b
--- /dev/null
+++ b/puppet/services/database/mysql.yaml
@@ -0,0 +1,20 @@
+heat_template_version: 2016-04-08
+
+description: >
+ MySQL service deployment using puppet
+
+parameters:
+ #Parameters not used EndpointMap
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+outputs:
+ role_data:
+ description: Service MySQL using composable services.
+ value:
+ config_settings:
+ step_config: |
+ include ::tripleo::profile::base::database::mysql
diff --git a/puppet/services/pacemaker/database/mysql.yaml b/puppet/services/pacemaker/database/mysql.yaml
new file mode 100644
index 00000000..1cecbfae
--- /dev/null
+++ b/puppet/services/pacemaker/database/mysql.yaml
@@ -0,0 +1,20 @@
+heat_template_version: 2016-04-08
+
+description: >
+ MySQL with Pacemaker service deployment using puppet
+
+parameters:
+ #Parameters not used EndpointMap
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+outputs:
+ role_data:
+ description: Service MySQL with Pacemaker using composable services.
+ value:
+ config_settings:
+ step_config: |
+ include ::tripleo::profile::pacemaker::database::mysql