aboutsummaryrefslogtreecommitdiffstats
path: root/puppet
diff options
context:
space:
mode:
Diffstat (limited to 'puppet')
-rw-r--r--puppet/all-nodes-config.yaml23
-rw-r--r--puppet/cinder-storage-puppet.yaml7
-rw-r--r--puppet/compute-puppet.yaml9
-rw-r--r--puppet/controller-puppet.yaml16
-rw-r--r--puppet/hieradata/common.yaml8
-rw-r--r--puppet/hieradata/controller.yaml1
-rw-r--r--puppet/manifests/overcloud_cephstorage.pp2
-rw-r--r--puppet/manifests/overcloud_compute.pp2
-rw-r--r--puppet/manifests/overcloud_controller.pp4
-rw-r--r--puppet/manifests/overcloud_controller_pacemaker.pp285
-rw-r--r--puppet/manifests/overcloud_object.pp2
-rw-r--r--puppet/manifests/overcloud_volume.pp2
12 files changed, 218 insertions, 143 deletions
diff --git a/puppet/all-nodes-config.yaml b/puppet/all-nodes-config.yaml
index b703d5cb..963835e9 100644
--- a/puppet/all-nodes-config.yaml
+++ b/puppet/all-nodes-config.yaml
@@ -42,10 +42,6 @@ resources:
- list_join:
- "\n"
- {get_param: ceph_storage_hosts}
- sysctl:
- net.ipv4.tcp_keepalive_time: 5
- net.ipv4.tcp_keepalive_probes: 5
- net.ipv4.tcp_keepalive_intvl: 1
hiera:
datafiles:
all_nodes:
@@ -62,10 +58,14 @@ resources:
list_join:
- ','
- {get_param: controller_names}
- rabbit_node_ips:
- list_join:
- - ','
- - {get_param: controller_ips}
+ rabbit_node_ips: &rabbit_nodes_array
+ str_replace:
+ template: "['SERVERS_LIST']"
+ params:
+ SERVERS_LIST:
+ list_join:
+ - "','"
+ - {get_param: controller_ips}
mongo_node_ips:
list_join:
- ','
@@ -78,6 +78,13 @@ resources:
list_join:
- ','
- {get_param: controller_ips}
+ # NOTE(gfidente): interpolation with %{} in the
+ # hieradata file can't be used as it returns string
+ ceilometer::rabbit_hosts: *rabbit_nodes_array
+ cinder::rabbit_hosts: *rabbit_nodes_array
+ heat::rabbit_hosts: *rabbit_nodes_array
+ neutron::rabbit_hosts: *rabbit_nodes_array
+ nova::rabbit_hosts: *rabbit_nodes_array
outputs:
config_id:
diff --git a/puppet/cinder-storage-puppet.yaml b/puppet/cinder-storage-puppet.yaml
index adf667ff..e373c928 100644
--- a/puppet/cinder-storage-puppet.yaml
+++ b/puppet/cinder-storage-puppet.yaml
@@ -149,11 +149,6 @@ resources:
size: {get_param: CinderLVMLoopDeviceSize}
cinder_enable_iscsi_backend: {get_param: CinderEnableIscsiBackend}
cinder_iscsi_helper: {get_param: CinderISCSIHelper}
- rabbit_hosts:
- str_replace:
- template: '["host"]'
- params:
- host: {get_param: VirtualIP}
rabbit_username: {get_param: RabbitUserName}
rabbit_password: {get_param: RabbitPassword}
rabbit_client_use_ssl: {get_param: RabbitClientUseSSL}
@@ -176,6 +171,7 @@ resources:
hierarchy:
- heat_config_%{::deploy_config_name}
- volume
+ - all_nodes # provided by allNodesConfig
- '"%{::osfamily}"'
- common
datafiles:
@@ -191,7 +187,6 @@ resources:
cinder::setup_test_volume::size: {get_input: cinder_lvm_loop_device_size}
cinder_iscsi_helper: {get_input: cinder_iscsi_helper}
cinder::database_connection: {get_input: cinder_dsn}
- cinder::rabbit_hosts: {get_input: rabbit_hosts}
cinder::rabbit_userid: {get_input: rabbit_username}
cinder::rabbit_password: {get_input: rabbit_password}
cinder::rabbit_use_ssl: {get_input: rabbit_client_use_ssl}
diff --git a/puppet/compute-puppet.yaml b/puppet/compute-puppet.yaml
index b5eb2781..b9106e60 100644
--- a/puppet/compute-puppet.yaml
+++ b/puppet/compute-puppet.yaml
@@ -291,6 +291,7 @@ resources:
- compute
- ceph_cluster # provided by CephClusterConfig
- ceph
+ - all_nodes # provided by allNodesConfig
- '"%{::osfamily}"'
- common
datafiles:
@@ -304,7 +305,6 @@ resources:
nova::compute::vncserver_proxyclient_address: local-ipv4
mapped_data:
nova::debug: {get_input: debug}
- nova::rabbit_hosts: {get_input: rabbit_hosts}
nova::rabbit_userid: {get_input: rabbit_username}
nova::rabbit_password: {get_input: rabbit_password}
nova::rabbit_use_ssl: {get_input: rabbit_client_use_ssl}
@@ -316,7 +316,6 @@ resources:
nova_enable_rbd_backend: {get_input: nova_enable_rbd_backend}
nova_password: {get_input: nova_password}
ceilometer::debug: {get_input: debug}
- ceilometer::rabbit_hosts: {get_input: rabbit_hosts}
ceilometer::rabbit_userid: {get_input: rabbit_username}
ceilometer::rabbit_password: {get_input: rabbit_password}
ceilometer::rabbit_use_ssl: {get_input: rabbit_client_use_ssl}
@@ -329,7 +328,6 @@ resources:
snmpd_readonly_user_password: {get_input: snmpd_readonly_user_password}
nova::glance_api_servers: {get_input: glance_api_servers}
neutron::debug: {get_input: debug}
- neutron::rabbit_hosts: {get_input: rabbit_hosts}
neutron::rabbit_password: {get_input: rabbit_password}
neutron::rabbit_user: {get_input: rabbit_user}
neutron::rabbit_use_ssl: {get_input: rabbit_client_use_ssl}
@@ -418,11 +416,6 @@ resources:
- {get_param: NeutronHost}
- ':35357/v2.0'
admin_password: {get_param: AdminPassword}
- rabbit_hosts:
- str_replace:
- template: '["host"]'
- params:
- host: {get_param: RabbitHost}
rabbit_username: {get_param: RabbitUserName}
rabbit_password: {get_param: RabbitPassword}
rabbit_client_use_ssl: {get_param: RabbitClientUseSSL}
diff --git a/puppet/controller-puppet.yaml b/puppet/controller-puppet.yaml
index 73a866f4..bcbaf56e 100644
--- a/puppet/controller-puppet.yaml
+++ b/puppet/controller-puppet.yaml
@@ -226,10 +226,6 @@ parameters:
type: string
hidden: true
default: '' # Has to be here because of the ignored empty value bug
- MysqlClustercheckPassword:
- type: string
- hidden: true
- default: '' # Has to be here because of the ignored empty value bug
NeutronBridgeMappings:
description: >
The OVS logical->physical bridge mappings to use. See the Neutron
@@ -556,7 +552,6 @@ resources:
enable_swift_storage: {get_param: EnableSwiftStorage}
mysql_innodb_buffer_pool_size: {get_param: MysqlInnodbBufferPoolSize}
mysql_root_password: {get_param: MysqlRootPassword}
- mysql_clustercheck_password: {get_param: MysqlClustercheckPassword}
mysql_cluster_name:
str_replace:
template: tripleo-CLUSTER
@@ -616,11 +611,6 @@ resources:
- {get_param: VirtualIP}
- '/nova'
pcsd_password: {get_param: PcsdPassword}
- rabbit_hosts:
- str_replace:
- template: '["host"]'
- params:
- host: {get_param: VirtualIP}
rabbit_username: {get_param: RabbitUserName}
rabbit_password: {get_param: RabbitPassword}
rabbit_cookie: {get_param: RabbitCookie}
@@ -702,7 +692,6 @@ resources:
cinder::api::auth_uri: {get_input: keystone_auth_uri}
cinder::api::identity_uri: {get_input: keystone_identity_uri}
cinder::api::bind_host: {get_input: controller_host}
- cinder::rabbit_hosts: {get_input: rabbit_hosts}
cinder::rabbit_userid: {get_input: rabbit_username}
cinder::rabbit_password: {get_input: rabbit_password}
cinder::rabbit_use_ssl: {get_input: rabbit_client_use_ssl}
@@ -742,7 +731,6 @@ resources:
heat::engine::heat_metadata_server_url: {get_input: heat.metadata_server_url}
heat::engine::heat_waitcondition_server_url: {get_input: heat.waitcondition_server_url}
heat::engine::auth_encryption_key: {get_input: heat_auth_encryption_key}
- heat::rabbit_hosts: {get_input: rabbit_hosts}
heat::rabbit_userid: {get_input: rabbit_username}
heat::rabbit_password: {get_input: rabbit_password}
heat::rabbit_use_ssl: {get_input: rabbit_client_use_ssl}
@@ -778,11 +766,9 @@ resources:
mysql_innodb_buffer_pool_size: {get_input: mysql_innodb_buffer_pool_size}
mysql::server::root_password: {get_input: mysql_root_password}
mysql_cluster_name: {get_input: mysql_cluster_name}
- mysql_clustercheck_password: {get_input: mysql_clustercheck_password}
# Neutron
neutron::bind_host: {get_input: controller_host}
- neutron::rabbit_hosts: {get_input: rabbit_hosts}
neutron::rabbit_password: {get_input: rabbit_password}
neutron::rabbit_user: {get_input: rabbit_user}
neutron::rabbit_use_ssl: {get_input: rabbit_client_use_ssl}
@@ -818,7 +804,6 @@ resources:
ceilometer_backend: {get_input: ceilometer_backend}
ceilometer_mysql_conn_string: {get_input: ceilometer_dsn}
ceilometer::metering_secret: {get_input: ceilometer_metering_secret}
- ceilometer::rabbit_hosts: {get_input: rabbit_hosts}
ceilometer::rabbit_userid: {get_input: rabbit_username}
ceilometer::rabbit_password: {get_input: rabbit_password}
ceilometer::rabbit_use_ssl: {get_input: rabbit_client_use_ssl}
@@ -833,7 +818,6 @@ resources:
snmpd_readonly_user_password: {get_input: snmpd_readonly_user_password}
# Nova
- nova::rabbit_hosts: {get_input: rabbit_hosts}
nova::rabbit_userid: {get_input: rabbit_username}
nova::rabbit_password: {get_input: rabbit_password}
nova::rabbit_use_ssl: {get_input: rabbit_client_use_ssl}
diff --git a/puppet/hieradata/common.yaml b/puppet/hieradata/common.yaml
index b7fb84ab..c15d43ea 100644
--- a/puppet/hieradata/common.yaml
+++ b/puppet/hieradata/common.yaml
@@ -12,3 +12,11 @@ nova::network::neutron::neutron_admin_username: 'neutron'
nova::network::neutron::vif_plugging_is_fatal: false
nova::network::neutron::vif_plugging_timeout: 30
nova::network::neutron::dhcp_domain: ''
+
+sysctl_settings:
+ net.ipv4.tcp_keepalive_intvl:
+ value: 1
+ net.ipv4.tcp_keepalive_probes:
+ value: 5
+ net.ipv4.tcp_keepalive_time:
+ value: 5 \ No newline at end of file
diff --git a/puppet/hieradata/controller.yaml b/puppet/hieradata/controller.yaml
index 02b7c42a..1748ccdb 100644
--- a/puppet/hieradata/controller.yaml
+++ b/puppet/hieradata/controller.yaml
@@ -109,7 +109,6 @@ tripleo::loadbalancer::nova_osapi: true
tripleo::loadbalancer::nova_metadata: true
tripleo::loadbalancer::nova_novncproxy: true
tripleo::loadbalancer::mysql: true
-tripleo::loadbalancer::rabbitmq: true
tripleo::loadbalancer::redis: true
tripleo::loadbalancer::swift_proxy_server: true
tripleo::loadbalancer::ceilometer: true
diff --git a/puppet/manifests/overcloud_cephstorage.pp b/puppet/manifests/overcloud_cephstorage.pp
index ce2ab3af..b645f9fe 100644
--- a/puppet/manifests/overcloud_cephstorage.pp
+++ b/puppet/manifests/overcloud_cephstorage.pp
@@ -24,6 +24,8 @@ if !str2bool(hiera('enable_package_install', 'false')) {
}
}
+create_resources(sysctl::value, hiera('sysctl_settings'), {})
+
if count(hiera('ntp::servers')) > 0 {
include ::ntp
}
diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp
index ffa55cdb..caca89a8 100644
--- a/puppet/manifests/overcloud_compute.pp
+++ b/puppet/manifests/overcloud_compute.pp
@@ -24,6 +24,8 @@ if !str2bool(hiera('enable_package_install', 'false')) {
}
}
+create_resources(sysctl::value, hiera('sysctl_settings'), {})
+
if count(hiera('ntp::servers')) > 0 {
include ::ntp
}
diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp
index 2b4e2052..27272643 100644
--- a/puppet/manifests/overcloud_controller.pp
+++ b/puppet/manifests/overcloud_controller.pp
@@ -26,6 +26,8 @@ if !str2bool(hiera('enable_package_install', 'false')) {
if hiera('step') >= 1 {
+ create_resources(sysctl::value, hiera('sysctl_settings'), {})
+
$controller_node_ips = split(hiera('controller_node_ips'), ',')
class { '::tripleo::loadbalancer' :
@@ -160,7 +162,7 @@ if hiera('step') >= 2 {
}
}
- $rabbit_nodes = split(hiera('rabbit_node_ips'), ',')
+ $rabbit_nodes = hiera('rabbit_node_ips')
if count($rabbit_nodes) > 1 {
class { '::rabbitmq':
config_cluster => true,
diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp
index c66460d9..ce8e81ee 100644
--- a/puppet/manifests/overcloud_controller_pacemaker.pp
+++ b/puppet/manifests/overcloud_controller_pacemaker.pp
@@ -31,12 +31,25 @@ if !str2bool(hiera('enable_package_install', 'false')) {
if $::hostname == downcase(hiera('bootstrap_nodeid')) {
$pacemaker_master = true
+ $sync_db = true
} else {
$pacemaker_master = false
+ $sync_db = false
}
+# When to start and enable services which haven't been Pacemakerized
+# FIXME: remove when we start all OpenStack services using Pacemaker
+# (occurences of this variable will be gradually replaced with false)
+$non_pcmk_start = hiera('step') >= 4
+
if hiera('step') >= 1 {
+ create_resources(sysctl::value, hiera('sysctl_settings'), {})
+
+ if count(hiera('ntp::servers')) > 0 {
+ include ::ntp
+ }
+
$controller_node_ips = split(hiera('controller_node_ips'), ',')
$controller_node_names = split(downcase(hiera('controller_node_names')), ',')
class { '::tripleo::loadbalancer' :
@@ -46,7 +59,7 @@ if hiera('step') >= 1 {
haproxy_service_manage => false,
}
- $pacemaker_cluster_members = regsubst(hiera('controller_node_ips'), ',', ' ', 'G')
+ $pacemaker_cluster_members = downcase(regsubst(hiera('controller_node_names'), ',', ' ', 'G'))
user { 'hacluster':
ensure => present,
} ->
@@ -60,20 +73,6 @@ if hiera('step') >= 1 {
class { '::pacemaker::stonith':
disable => true,
}
- if $pacemaker_master {
- $control_vip = hiera('tripleo::loadbalancer::controller_virtual_ip')
- pacemaker::resource::ip { 'control_vip':
- ip_address => $control_vip,
- }
- $public_vip = hiera('tripleo::loadbalancer::public_virtual_ip')
- pacemaker::resource::ip { 'public_vip':
- ip_address => $public_vip,
- }
- }
-
- Class['::pacemaker::corosync'] -> Pacemaker::Resource::Ip <| |>
- Class['::pacemaker::corosync'] -> Pacemaker::Resource::Ocf <| |>
- Class['::pacemaker::corosync'] -> Pacemaker::Resource::Service <| |>
# Only configure RabbitMQ in this step, don't start it yet to
# avoid races where non-master nodes attempt to start without
@@ -113,8 +112,6 @@ if hiera('step') >= 1 {
}
$galera_nodes = downcase(hiera('galera_node_names', $::hostname))
$galera_nodes_count = count(split($galera_nodes, ','))
- $clustercheck_password = hiera('mysql_clustercheck_password')
- $mysql_root_password = hiera('mysql::server::root_password')
$mysqld_options = {
'mysqld' => {
@@ -158,25 +155,26 @@ if hiera('step') >= 1 {
if hiera('step') >= 2 {
if $pacemaker_master {
+ $control_vip = hiera('tripleo::loadbalancer::controller_virtual_ip')
+ pacemaker::resource::ip { 'control_vip':
+ ip_address => $control_vip,
+ }
+ $public_vip = hiera('tripleo::loadbalancer::public_virtual_ip')
+ pacemaker::resource::ip { 'public_vip':
+ ip_address => $public_vip,
+ }
pacemaker::resource::service { 'haproxy':
clone_params => true,
}
- }
- if count(hiera('ntp::servers')) > 0 {
- include ::ntp
- }
-
- # MongoDB
- if downcase(hiera('ceilometer_backend')) == 'mongodb' {
- $mongo_node_ips = split(hiera('mongo_node_ips'), ',')
- $mongo_node_ips_with_port = suffix($mongo_node_ips, ':27017')
- $mongo_node_string = join($mongo_node_ips_with_port, ',')
-
- $mongodb_replset = hiera('mongodb::server::replset')
- $ceilometer_mongodb_conn_string = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
- if downcase(hiera('bootstrap_nodeid')) == $::hostname {
+ pacemaker::resource::ocf { 'rabbitmq':
+ ocf_agent_name => 'heartbeat:rabbitmq-cluster',
+ resource_params => 'set_policy=\'ha-all ^(?!amq\.).* {"ha-mode":"all"}\'',
+ clone_params => 'ordered=true interleave=true',
+ require => Class['::rabbitmq'],
+ }
+ if downcase(hiera('ceilometer_backend')) == 'mongodb' {
pacemaker::resource::service { $::mongodb::params::service_name :
op_params => 'start timeout=120s',
clone_params => true,
@@ -185,19 +183,35 @@ if hiera('step') >= 2 {
}
# NOTE (spredzy) : The replset can only be run
# once all the nodes have joined the cluster.
+ $mongo_node_ips = split(hiera('mongo_node_ips'), ',')
+ $mongo_node_ips_with_port = suffix($mongo_node_ips, ':27017')
+ $mongo_node_string = join($mongo_node_ips_with_port, ',')
+ $mongodb_replset = hiera('mongodb::server::replset')
$mongodb_cluster_ready_command = join(suffix(prefix($mongo_node_ips, '/bin/nc -w1 '), ' 27017 < /dev/null'), ' && ')
exec { 'mongodb-ready' :
command => $mongodb_cluster_ready_command,
- timeout => 600,
- tries => 60,
+ timeout => 30,
+ tries => 180,
try_sleep => 10,
- before => Mongodb_replset[$mongodb_replset],
}
-
mongodb_replset { $mongodb_replset :
members => $mongo_node_ips_with_port,
+ require => Exec['mongodb-ready'],
}
}
+
+ pacemaker::resource::ocf { 'galera' :
+ ocf_agent_name => 'heartbeat:galera',
+ op_params => 'promote timeout=300s on-fail=block --master',
+ meta_params => "master-max=${galera_nodes_count} ordered=true",
+ resource_params => "additional_parameters='--open-files-limit=16384' enable_creation=true wsrep_cluster_address='gcomm://${galera_nodes}'",
+ require => Class['::mysql::server'],
+ before => Exec['galera-ready'],
+ }
+ mysql_user { 'clustercheckuser@localhost' :
+ password_hash => mysql_password($clustercheck_password),
+ require => Exec['galera-ready'],
+ }
}
# Redis
@@ -221,42 +235,20 @@ if hiera('step') >= 2 {
}
}
- # Galera
- if $pacemaker_master {
- $sync_db = true
-
- pacemaker::resource::ocf { 'galera' :
- ocf_agent_name => 'heartbeat:galera',
- op_params => 'promote timeout=300s on-fail=block --master',
- meta_params => "master-max=${galera_nodes_count} ordered=true",
- resource_params => "additional_parameters='--open-files-limit=16384' enable_creation=true wsrep_cluster_address='gcomm://${galera_nodes}'",
- require => Class['::mysql::server'],
- before => Exec['galera-ready'],
- }
-
- mysql_user { 'clustercheckuser@localhost' :
- password_hash => mysql_password($clustercheck_password),
- require => Exec['galera-ready'],
- }
- } else {
- $sync_db = false
- }
-
exec { 'galera-ready' :
- command => '/bin/mysql -e "SHOW GLOBAL VARIABLES LIKE \'read_only\'" | /bin/grep -i off',
- timeout => 3600,
- tries => 60,
- try_sleep => 60,
- environment => 'HOME=/root',
- require => Class['::mysql::server'],
+ command => '/usr/bin/clustercheck >/dev/null',
+ timeout => 30,
+ tries => 180,
+ try_sleep => 10,
+ environment => ["AVAILABLE_WHEN_READONLY=0"],
+ require => File['/etc/sysconfig/clustercheck'],
}
file { '/etc/sysconfig/clustercheck' :
ensure => file,
- content => "MYSQL_USERNAME=clustercheckuser\n
-MYSQL_PASSWORD=${clustercheck_password}\n
+ content => "MYSQL_USERNAME=root\n
+MYSQL_PASSWORD=''\n
MYSQL_HOST=localhost\n",
- require => Exec['galera-ready'],
}
xinetd::service { 'galera-monitor' :
@@ -272,7 +264,6 @@ MYSQL_HOST=localhost\n",
require => File['/etc/sysconfig/clustercheck'],
}
- # FIXME: this should only occur on the bootstrap host (ditto for db syncs)
# Create all the database schemas
# Example DSN format: mysql://user:password@host/dbname
if $sync_db {
@@ -344,16 +335,6 @@ MYSQL_HOST=localhost\n",
}
}
- # RabbitMQ
- if $pacemaker_master {
- pacemaker::resource::ocf { 'rabbitmq':
- ocf_agent_name => 'heartbeat:rabbitmq-cluster',
- resource_params => 'set_policy=\'ha-all ^(?!amq\.).* {"ha-mode":"all"}\'',
- clone_params => true,
- require => Class['::rabbitmq'],
- }
- }
-
# pre-install swift here so we can build rings
include ::swift
@@ -373,11 +354,18 @@ MYSQL_HOST=localhost\n",
include ::ceph::profile::osd
}
+ # Memcached
+ include ::memcached
+
} #END STEP 2
-if (hiera('step') >= 3 and $pacemaker_master) or hiera('step') >= 4 {
+if hiera('step') >= 3 {
- include ::keystone
+ class { '::keystone':
+ sync_db => $sync_db,
+ manage_service => $non_pcmk_start,
+ enabled => $non_pcmk_start,
+ }
#TODO: need a cleanup-keystone-tokens.sh solution here
keystone_config {
@@ -422,10 +410,14 @@ if (hiera('step') >= 3 and $pacemaker_master) or hiera('step') >= 4 {
# TODO: notifications, scrubber, etc.
include ::glance
class { 'glance::api':
- known_stores => [$glance_store]
+ known_stores => [$glance_store],
+ manage_service => $non_pcmk_start,
+ enabled => $non_pcmk_start,
}
class { '::glance::registry' :
sync_db => $sync_db,
+ manage_service => $non_pcmk_start,
+ enabled => $non_pcmk_start,
}
include join(['::glance::backend::', $glance_backend])
@@ -435,20 +427,45 @@ if (hiera('step') >= 3 and $pacemaker_master) or hiera('step') >= 4 {
class { '::nova::api' :
sync_db => $sync_db,
+ manage_service => $non_pcmk_start,
+ enabled => $non_pcmk_start,
+ }
+ class { '::nova::cert' :
+ manage_service => $non_pcmk_start,
+ enabled => $non_pcmk_start,
+ }
+ class { '::nova::conductor' :
+ manage_service => $non_pcmk_start,
+ enabled => $non_pcmk_start,
+ }
+ class { '::nova::consoleauth' :
+ manage_service => $non_pcmk_start,
+ enabled => $non_pcmk_start,
+ }
+ class { '::nova::vncproxy' :
+ manage_service => $non_pcmk_start,
+ enabled => $non_pcmk_start,
+ }
+ class { '::nova::scheduler' :
+ manage_service => $non_pcmk_start,
+ enabled => $non_pcmk_start,
}
- include ::nova::cert
- include ::nova::conductor
- include ::nova::consoleauth
include ::nova::network::neutron
- include ::nova::vncproxy
- include ::nova::scheduler
include ::neutron
class { '::neutron::server' :
sync_db => $sync_db,
+ manage_service => $non_pcmk_start,
+ enabled => $non_pcmk_start,
+ }
+ class { '::neutron::agents::dhcp' :
+ manage_service => $non_pcmk_start,
+ enabled => $non_pcmk_start,
+ }
+ class { '::neutron::agents::l3' :
+ manage_service => $non_pcmk_start,
+ enabled => $non_pcmk_start,
}
- include ::neutron::agents::dhcp
- include ::neutron::agents::l3
file { '/etc/neutron/dnsmasq-neutron.conf':
content => hiera('neutron_dnsmasq_options'),
@@ -465,11 +482,15 @@ if (hiera('step') >= 3 and $pacemaker_master) or hiera('step') >= 4 {
}
class { 'neutron::agents::ml2::ovs':
+ # manage_service => $non_pcmk_start, -- not implemented
+ enabled => $non_pcmk_start,
bridge_mappings => split(hiera('neutron_bridge_mappings'), ','),
tunnel_types => split(hiera('neutron_tunnel_types'), ','),
}
class { 'neutron::agents::metadata':
+ manage_service => $non_pcmk_start,
+ enabled => $non_pcmk_start,
auth_url => join(['http://', hiera('controller_virtual_ip'), ':35357/v2.0']),
}
@@ -479,10 +500,20 @@ if (hiera('step') >= 3 and $pacemaker_master) or hiera('step') >= 4 {
Service['neutron-server'] -> Service['neutron-metadata']
include ::cinder
- include ::cinder::api
+ class { '::cinder::api':
+ sync_db => $sync_db,
+ manage_service => $non_pcmk_start,
+ enabled => $non_pcmk_start,
+ }
+ class { '::cinder::scheduler' :
+ manage_service => $non_pcmk_start,
+ enabled => $non_pcmk_start,
+ }
+ class { '::cinder::volume' :
+ manage_service => $non_pcmk_start,
+ enabled => $non_pcmk_start,
+ }
include ::cinder::glance
- include ::cinder::scheduler
- include ::cinder::volume
class {'cinder::setup_test_volume':
size => join([hiera('cinder_lvm_loop_device_size'), 'M']),
}
@@ -530,8 +561,10 @@ if (hiera('step') >= 3 and $pacemaker_master) or hiera('step') >= 4 {
}
# swift proxy
- include ::memcached
- include ::swift::proxy
+ class { '::swift::proxy' :
+ manage_service => $non_pcmk_start,
+ enabled => $non_pcmk_start,
+ }
include ::swift::proxy::proxy_logging
include ::swift::proxy::healthcheck
include ::swift::proxy::cache
@@ -546,9 +579,21 @@ if (hiera('step') >= 3 and $pacemaker_master) or hiera('step') >= 4 {
# swift storage
if str2bool(hiera('enable_swift_storage', 'true')) {
- class {'swift::storage::all':
+ class {'::swift::storage::all':
mount_check => str2bool(hiera('swift_mount_check'))
}
+ class {'::swift::storage::account':
+ manage_service => $non_pcmk_start,
+ enabled => $non_pcmk_start,
+ }
+ class {'::swift::storage::container':
+ manage_service => $non_pcmk_start,
+ enabled => $non_pcmk_start,
+ }
+ class {'::swift::storage::object':
+ manage_service => $non_pcmk_start,
+ enabled => $non_pcmk_start,
+ }
if(!defined(File['/srv/node'])) {
file { '/srv/node':
ensure => directory,
@@ -569,17 +614,35 @@ if (hiera('step') >= 3 and $pacemaker_master) or hiera('step') >= 4 {
$ceilometer_database_connection = hiera('ceilometer_mysql_conn_string')
}
default : {
- $ceilometer_database_connection = $ceilometer_mongodb_conn_string
+ $ceilometer_database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
}
}
include ::ceilometer
- include ::ceilometer::api
- include ::ceilometer::agent::notification
- include ::ceilometer::agent::central
- include ::ceilometer::alarm::notifier
- include ::ceilometer::alarm::evaluator
+ class { '::ceilometer::api' :
+ manage_service => $non_pcmk_start,
+ enabled => $non_pcmk_start,
+ }
+ class { '::ceilometer::agent::notification' :
+ manage_service => $non_pcmk_start,
+ enabled => $non_pcmk_start,
+ }
+ class { '::ceilometer::agent::central' :
+ manage_service => $non_pcmk_start,
+ enabled => $non_pcmk_start,
+ }
+ class { '::ceilometer::alarm::notifier' :
+ manage_service => $non_pcmk_start,
+ enabled => $non_pcmk_start,
+ }
+ class { '::ceilometer::alarm::evaluator' :
+ manage_service => $non_pcmk_start,
+ enabled => $non_pcmk_start,
+ }
+ class { '::ceilometer::collector' :
+ manage_service => $non_pcmk_start,
+ enabled => $non_pcmk_start,
+ }
include ::ceilometer::expirer
- include ::ceilometer::collector
class { '::ceilometer::db' :
database_connection => $ceilometer_database_connection,
sync_db => $sync_db,
@@ -594,10 +657,22 @@ if (hiera('step') >= 3 and $pacemaker_master) or hiera('step') >= 4 {
class { '::heat' :
sync_db => $sync_db,
}
- include ::heat::api
- include ::heat::api_cfn
- include ::heat::api_cloudwatch
- include ::heat::engine
+ class { '::heat::api' :
+ manage_service => $non_pcmk_start,
+ enabled => $non_pcmk_start,
+ }
+ class { '::heat::api_cfn' :
+ manage_service => $non_pcmk_start,
+ enabled => $non_pcmk_start,
+ }
+ class { '::heat::api_cloudwatch' :
+ manage_service => $non_pcmk_start,
+ enabled => $non_pcmk_start,
+ }
+ class { '::heat::engine' :
+ manage_service => $non_pcmk_start,
+ enabled => $non_pcmk_start,
+ }
# Horizon
$vhost_params = { add_listen => false }
@@ -617,3 +692,7 @@ if (hiera('step') >= 3 and $pacemaker_master) or hiera('step') >= 4 {
}
} #END STEP 3
+
+if hiera('step') >= 4 {
+ # TODO: pacemaker::resource::service for OpenStack services go here
+} #END STEP 4
diff --git a/puppet/manifests/overcloud_object.pp b/puppet/manifests/overcloud_object.pp
index 8d0ad783..24799c8c 100644
--- a/puppet/manifests/overcloud_object.pp
+++ b/puppet/manifests/overcloud_object.pp
@@ -24,6 +24,8 @@ if !str2bool(hiera('enable_package_install', 'false')) {
}
}
+create_resources(sysctl::value, hiera('sysctl_settings'), {})
+
if count(hiera('ntp::servers')) > 0 {
include ::ntp
}
diff --git a/puppet/manifests/overcloud_volume.pp b/puppet/manifests/overcloud_volume.pp
index 3d7cb490..80cf6a21 100644
--- a/puppet/manifests/overcloud_volume.pp
+++ b/puppet/manifests/overcloud_volume.pp
@@ -24,6 +24,8 @@ if str2bool(hiera('disable_package_install', 'false')) {
}
}
+create_resources(sysctl::value, hiera('sysctl_settings'), {})
+
if count(hiera('ntp::servers')) > 0 {
include ::ntp
}