summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--environments/puppet-pacemaker-no-restart.yaml3
-rw-r--r--environments/puppet-pacemaker.yaml2
-rw-r--r--extraconfig/tasks/post_puppet_pacemaker.yaml15
-rw-r--r--extraconfig/tasks/post_puppet_pacemaker_restart.yaml28
-rw-r--r--overcloud-resource-registry-puppet.yaml1
-rw-r--r--overcloud.yaml1
-rw-r--r--puppet/all-nodes-config.yaml2
-rw-r--r--puppet/hieradata/README.rst1
-rw-r--r--puppet/hieradata/RedHat.yaml9
-rw-r--r--puppet/hieradata/common.yaml50
-rw-r--r--puppet/hieradata/compute.yaml18
-rw-r--r--puppet/hieradata/controller.yaml186
-rw-r--r--puppet/hieradata/database.yaml1
-rw-r--r--puppet/hieradata/object.yaml19
-rw-r--r--puppet/hieradata/volume.yaml13
-rw-r--r--puppet/manifests/overcloud_controller_pacemaker.pp82
-rw-r--r--puppet/services/ceilometer-api.yaml1
-rw-r--r--puppet/services/ceilometer-base.yaml5
-rw-r--r--puppet/services/cinder-api.yaml7
-rw-r--r--puppet/services/cinder-base.yaml4
-rw-r--r--puppet/services/cinder-scheduler.yaml4
-rw-r--r--puppet/services/database/mongodb-base.yaml3
-rw-r--r--puppet/services/database/mysql.yaml7
-rw-r--r--puppet/services/database/redis-base.yaml13
-rw-r--r--puppet/services/glance-api.yaml6
-rw-r--r--puppet/services/glance-registry.yaml2
-rw-r--r--puppet/services/gnocchi-api.yaml1
-rw-r--r--puppet/services/gnocchi-base.yaml2
-rw-r--r--puppet/services/haproxy.yaml21
-rw-r--r--puppet/services/heat-base.yaml11
-rw-r--r--puppet/services/heat-engine.yaml2
-rw-r--r--puppet/services/horizon.yaml6
-rw-r--r--puppet/services/kernel.yaml21
-rw-r--r--puppet/services/keystone.yaml12
-rw-r--r--puppet/services/neutron-base.yaml5
-rw-r--r--puppet/services/neutron-l3.yaml1
-rw-r--r--puppet/services/neutron-metadata.yaml1
-rw-r--r--puppet/services/neutron-server.yaml3
-rw-r--r--puppet/services/nova-api.yaml5
-rw-r--r--puppet/services/nova-base.yaml23
-rw-r--r--puppet/services/nova-compute.yaml10
-rw-r--r--puppet/services/nova-vncproxy.yaml4
-rw-r--r--puppet/services/pacemaker.yaml5
-rw-r--r--puppet/services/pacemaker/core.yaml20
-rw-r--r--puppet/services/pacemaker/glance-api.yaml1
-rw-r--r--puppet/services/rabbitmq.yaml15
-rw-r--r--puppet/services/sahara-base.yaml2
-rw-r--r--puppet/services/sahara-engine.yaml4
-rw-r--r--puppet/services/swift-proxy.yaml26
-rw-r--r--puppet/services/swift-storage.yaml12
50 files changed, 299 insertions, 397 deletions
diff --git a/environments/puppet-pacemaker-no-restart.yaml b/environments/puppet-pacemaker-no-restart.yaml
new file mode 100644
index 00000000..67d8692d
--- /dev/null
+++ b/environments/puppet-pacemaker-no-restart.yaml
@@ -0,0 +1,3 @@
+# use this file *in addition* to puppet-pacemaker.yaml
+resource_registry:
+ OS::TripleO::Tasks::ControllerPostPuppetRestart: OS::Heat::None
diff --git a/environments/puppet-pacemaker.yaml b/environments/puppet-pacemaker.yaml
index 8674e009..4622bc4d 100644
--- a/environments/puppet-pacemaker.yaml
+++ b/environments/puppet-pacemaker.yaml
@@ -4,6 +4,7 @@ resource_registry:
OS::TripleO::ControllerConfig: ../puppet/controller-config-pacemaker.yaml
OS::TripleO::Tasks::ControllerPrePuppet: ../extraconfig/tasks/pre_puppet_pacemaker.yaml
OS::TripleO::Tasks::ControllerPostPuppet: ../extraconfig/tasks/post_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPostPuppetRestart: ../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
# custom pacemaker services
# NOTE: For now we will need to specify overrides to all services
@@ -12,6 +13,7 @@ resource_registry:
OS::TripleO::Services::CinderApi: ../puppet/services/pacemaker/cinder-api.yaml
OS::TripleO::Services::CinderScheduler: ../puppet/services/pacemaker/cinder-scheduler.yaml
OS::TripleO::Services::CinderVolume: ../puppet/services/pacemaker/cinder-volume.yaml
+ OS::TripleO::Services::Core: ../puppet/services/pacemaker/core.yaml
OS::TripleO::Services::Keystone: ../puppet/services/pacemaker/keystone.yaml
OS::TripleO::Services::GlanceApi: ../puppet/services/pacemaker/glance-api.yaml
OS::TripleO::Services::GlanceRegistry: ../puppet/services/pacemaker/glance-registry.yaml
diff --git a/extraconfig/tasks/post_puppet_pacemaker.yaml b/extraconfig/tasks/post_puppet_pacemaker.yaml
index fbed9ce5..b62502f8 100644
--- a/extraconfig/tasks/post_puppet_pacemaker.yaml
+++ b/extraconfig/tasks/post_puppet_pacemaker.yaml
@@ -29,20 +29,9 @@ resources:
config: {get_resource: ControllerPostPuppetMaintenanceModeConfig}
input_values: {get_param: input_values}
- ControllerPostPuppetRestartConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - get_file: pacemaker_common_functions.sh
- - get_file: pacemaker_resource_restart.sh
-
- ControllerPostPuppetRestartDeployment:
- type: OS::Heat::SoftwareDeployments
+ ControllerPostPuppetRestart:
+ type: OS::TripleO::Tasks::ControllerPostPuppetRestart
depends_on: ControllerPostPuppetMaintenanceModeDeployment
properties:
servers: {get_param: servers}
- config: {get_resource: ControllerPostPuppetRestartConfig}
input_values: {get_param: input_values}
diff --git a/extraconfig/tasks/post_puppet_pacemaker_restart.yaml b/extraconfig/tasks/post_puppet_pacemaker_restart.yaml
new file mode 100644
index 00000000..52760c87
--- /dev/null
+++ b/extraconfig/tasks/post_puppet_pacemaker_restart.yaml
@@ -0,0 +1,28 @@
+heat_template_version: 2014-10-16
+description: 'Post-Puppet restart config for Pacemaker deployments'
+
+parameters:
+ servers:
+ type: json
+ input_values:
+ type: json
+ description: input values for the software deployments
+
+resources:
+
+ ControllerPostPuppetRestartConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - get_file: pacemaker_common_functions.sh
+ - get_file: pacemaker_resource_restart.sh
+
+ ControllerPostPuppetRestartDeployment:
+ type: OS::Heat::SoftwareDeployments
+ properties:
+ servers: {get_param: servers}
+ config: {get_resource: ControllerPostPuppetRestartConfig}
+ input_values: {get_param: input_values}
diff --git a/overcloud-resource-registry-puppet.yaml b/overcloud-resource-registry-puppet.yaml
index 9f253024..c71ced2d 100644
--- a/overcloud-resource-registry-puppet.yaml
+++ b/overcloud-resource-registry-puppet.yaml
@@ -133,6 +133,7 @@ resource_registry:
OS::TripleO::Services::CinderApi: puppet/services/cinder-api.yaml
OS::TripleO::Services::CinderScheduler: puppet/services/cinder-scheduler.yaml
OS::TripleO::Services::CinderVolume: puppet/services/cinder-volume.yaml
+ OS::TripleO::Services::Core: OS::Heat::None
OS::TripleO::Services::Keystone: puppet/services/keystone.yaml
OS::TripleO::Services::GlanceApi: puppet/services/glance-api.yaml
OS::TripleO::Services::GlanceRegistry: puppet/services/glance-registry.yaml
diff --git a/overcloud.yaml b/overcloud.yaml
index 38944d72..077ffacc 100644
--- a/overcloud.yaml
+++ b/overcloud.yaml
@@ -359,6 +359,7 @@ parameters:
- OS::TripleO::Services::CinderApi
- OS::TripleO::Services::CinderScheduler
- OS::TripleO::Services::CinderVolume
+ - OS::TripleO::Services::Core
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::GlanceApi
diff --git a/puppet/all-nodes-config.yaml b/puppet/all-nodes-config.yaml
index 55120912..803a5d49 100644
--- a/puppet/all-nodes-config.yaml
+++ b/puppet/all-nodes-config.yaml
@@ -106,8 +106,6 @@ resources:
- {get_param: ceph_storage_hosts}
hiera:
datafiles:
- RedHat:
- raw_data: {get_file: hieradata/RedHat.yaml}
bootstrap_node:
mapped_data:
bootstrap_nodeid: {get_input: bootstrap_nodeid}
diff --git a/puppet/hieradata/README.rst b/puppet/hieradata/README.rst
new file mode 100644
index 00000000..64a60229
--- /dev/null
+++ b/puppet/hieradata/README.rst
@@ -0,0 +1 @@
+Do not add more hieradata in this directory, and use composable services.
diff --git a/puppet/hieradata/RedHat.yaml b/puppet/hieradata/RedHat.yaml
deleted file mode 100644
index 25902828..00000000
--- a/puppet/hieradata/RedHat.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
-# RedHat specific overrides go here
-rabbitmq::package_provider: 'yum'
-
-# The Galera package should work in cluster and
-# non-cluster modes based on the config file.
-# We set the package name here explicitly so
-# that it matches what we pre-install
-# in tripleo-puppet-elements.
-mysql::server::package_name: 'mariadb-galera-server'
diff --git a/puppet/hieradata/common.yaml b/puppet/hieradata/common.yaml
index 65cf9577..3bda874f 100644
--- a/puppet/hieradata/common.yaml
+++ b/puppet/hieradata/common.yaml
@@ -1,51 +1,3 @@
-# Common Hiera data gets applied to all nodes
-ssh::server::storeconfigs_enabled: false
-
-# ceilometer settings used by compute and controller ceilo auth settings
-ceilometer::agent::auth::auth_region: 'regionOne'
-ceilometer::agent::auth::auth_tenant_name: 'service'
-
+# TODO(emilien) move it to composable aodh roles later
aodh::auth::auth_region: 'regionOne'
aodh::auth::auth_tenant_name: 'service'
-
-gnocchi::auth::auth_region: 'regionOne'
-gnocchi::auth::auth_tenant_name: 'service'
-
-nova::api::admin_tenant_name: 'service'
-nova::network::neutron::neutron_project_name: 'service'
-nova::network::neutron::neutron_username: 'neutron'
-nova::network::neutron::dhcp_domain: ''
-
-neutron::allow_overlapping_ips: true
-neutron::server::project_name: 'service'
-
-kernel_modules:
- nf_conntrack: {}
-
-sysctl_settings:
- net.ipv4.tcp_keepalive_intvl:
- value: 1
- net.ipv4.tcp_keepalive_probes:
- value: 5
- net.ipv4.tcp_keepalive_time:
- value: 5
- net.nf_conntrack_max:
- value: 500000
- net.netfilter.nf_conntrack_max:
- value: 500000
- # prevent neutron bridges from autoconfiguring ipv6 addresses
- net.ipv6.conf.default.accept_ra:
- value: 0
- net.ipv6.conf.default.autoconf:
- value: 0
- net.core.netdev_max_backlog:
- value: 10000
-
-nova::rabbit_heartbeat_timeout_threshold: 60
-neutron::rabbit_heartbeat_timeout_threshold: 60
-cinder::rabbit_heartbeat_timeout_threshold: 60
-ceilometer::rabbit_heartbeat_timeout_threshold: 60
-heat::rabbit_heartbeat_timeout_threshold: 60
-keystone::rabbit_heartbeat_timeout_threshold: 60
-
-nova::cinder_catalog_info: 'volumev2:cinderv2:internalURL'
diff --git a/puppet/hieradata/compute.yaml b/puppet/hieradata/compute.yaml
index fe203be7..219f0d0a 100644
--- a/puppet/hieradata/compute.yaml
+++ b/puppet/hieradata/compute.yaml
@@ -1,21 +1,3 @@
# Hiera data here applies to all compute nodes
-nova::notify_on_state_change: 'vm_and_task_state'
-nova::notification_driver: messagingv2
-nova::compute::instance_usage_audit: true
-nova::compute::instance_usage_audit_period: 'hour'
-
-nova::compute::rbd::libvirt_rbd_secret_uuid: "%{hiera('ceph::profile::params::fsid')}"
-
-nova::network::neutron::neutron_auth_type: 'v3password'
-
-# Changing the default from 512MB. The current templates can not deploy
-# overclouds with swap. On an idle compute node, we see ~1024MB of RAM
-# used. 2048 is suggested to account for other possible operations for
-# example openvswitch.
-nova::compute::reserved_host_memory: 2048
-
-ceilometer::agent::auth::auth_tenant_name: 'service'
-ceilometer::agent::auth::auth_endpoint_type: 'internalURL'
-
compute_classes: []
diff --git a/puppet/hieradata/controller.yaml b/puppet/hieradata/controller.yaml
index 3ec656dc..71c53b47 100644
--- a/puppet/hieradata/controller.yaml
+++ b/puppet/hieradata/controller.yaml
@@ -1,190 +1,16 @@
# Hiera data here applies to all controller nodes
-nova::api::enabled: true
-nova::vncproxy::enabled: true
-
-# gnocchi
-gnocchi::storage::swift::swift_user: 'service:gnocchi'
-gnocchi::storage::swift::swift_auth_version: 2
-gnocchi::statsd::resource_id: '0a8b55df-f90f-491c-8cb9-7cdecec6fc26'
-gnocchi::statsd::user_id: '27c0d3f8-e7ee-42f0-8317-72237d1c5ae3'
-gnocchi::statsd::project_id: '6c38cd8d-099a-4cb2-aecf-17be688e8616'
-gnocchi::statsd::flush_delay: 10
-gnocchi::statsd::archive_policy_name: 'low'
-
-# rabbitmq
-rabbitmq::delete_guest_user: false
-rabbitmq::wipe_db_on_cookie_change: true
-rabbitmq::port: '5672'
-rabbitmq::package_source: undef
-rabbitmq::repos_ensure: false
-rabbitmq_environment:
- RABBITMQ_NODENAME: "rabbit@%{::hostname}"
- RABBITMQ_SERVER_ERL_ARGS: '"+K true +A30 +P 1048576 -kernel inet_default_connect_options [{nodelay,true},{raw,6,18,<<5000:64/native>>}] -kernel inet_default_listen_options [{raw,6,18,<<5000:64/native>>}]"'
-rabbitmq_kernel_variables:
- inet_dist_listen_min: '35672'
- inet_dist_listen_max: '35672'
-rabbitmq_config_variables:
- tcp_listen_options: '[binary, {packet, raw}, {reuseaddr, true}, {backlog, 128}, {nodelay, true}, {exit_on_close, false}, {keepalive, true}]'
- cluster_partition_handling: 'pause_minority'
- loopback_users: '[]'
-
-mongodb::server::replset: tripleo
-mongodb::server::journal: false
-
-redis::port: 6379
-redis::sentinel::master_name: "%{hiera('bootstrap_nodeid')}"
-redis::sentinel::redis_host: "%{hiera('bootstrap_nodeid_ip')}"
-redis::sentinel::notification_script: '/usr/local/bin/redis-notifications.sh'
-
-# keystone
-keystone::roles::admin::email: 'root@localhost'
-
-# service tenant
-glance::api::keystone_tenant: 'service'
+# TODO(emilien) move it to composable aodh roles later
aodh::api::keystone_tenant: 'service'
-glance::registry::keystone_tenant: 'service'
-neutron::server::auth_tenant: 'service'
-neutron::agents::metadata::auth_tenant: 'service'
-neutron::agents::l3::router_delete_namespaces: True
-cinder::api::keystone_tenant: 'service'
-swift::proxy::authtoken::admin_tenant_name: 'service'
-ceilometer::api::keystone_tenant: 'service'
-gnocchi::api::keystone_tenant: 'service'
-heat::keystone_tenant: 'service'
-sahara::admin_tenant_name: 'service'
aodh::keystone::auth::tenant: 'service'
-ceilometer::keystone::auth::tenant: 'service'
-cinder::keystone::auth::tenant: 'service'
-glance::keystone::auth::tenant: 'service'
-gnocchi::keystone::auth::tenant: 'service'
-heat::keystone::auth::tenant: 'service'
-neutron::keystone::auth::tenant: 'service'
-nova::keystone::auth::tenant: 'service'
-sahara::keystone::auth::tenant: 'service'
-swift::keystone::auth::tenant: 'service'
-
-# keystone
-keystone::cron::token_flush::maxdelay: 3600
-keystone::roles::admin::service_tenant: 'service'
-keystone::roles::admin::admin_tenant: 'admin'
-keystone::cron::token_flush::destination: '/dev/null'
-keystone::config::keystone_config:
- DEFAULT/secure_proxy_ssl_header:
- value: 'HTTP_X_FORWARDED_PROTO'
- ec2/driver:
- value: 'keystone.contrib.ec2.backends.sql.Ec2'
-keystone::service_name: 'httpd'
-keystone::wsgi::apache::ssl: false
-
-#swift
-swift::proxy::pipeline:
- - 'catch_errors'
- - 'healthcheck'
- - 'proxy-logging'
- - 'cache'
- - 'ratelimit'
- - 'bulk'
- - 'tempurl'
- - 'formpost'
- - 'authtoken'
- - 'keystone'
- - 'staticweb'
- - 'proxy-logging'
- - 'proxy-server'
-
-swift::proxy::account_autocreate: true
-swift::keystone::auth::configure_s3_endpoint: false
-swift::keystone::auth::operator_roles:
- - admin
- - swiftoperator
-
-# glance
-glance::api::pipeline: 'keystone'
-glance::api::show_image_direct_url: true
-glance::registry::pipeline: 'keystone'
-glance::backend::swift::swift_store_create_container_on_put: true
-glance_file_pcmk_directory: '/var/lib/glance/images'
-
-# neutron
-neutron::server::sync_db: true
-
-# nova
-nova::notify_on_state_change: 'vm_and_task_state'
-nova::api::default_floating_pool: 'public'
-nova::api::sync_db_api: true
-nova::api::enable_proxy_headers_parsing: true
-nova::notification_driver: messaging
-# ceilometer
-ceilometer::agent::auth::auth_endpoint_type: 'internalURL'
-
-# cinder
-cinder::scheduler::scheduler_driver: cinder.scheduler.filter_scheduler.FilterScheduler
-cinder::cron::db_purge::destination: '/dev/null'
-cinder::host: hostgroup
-
-# TODO(jaosorior): Move to cinder profile once cinder is moved as a composable
-# service.
-cinder::api::enable_proxy_headers_parsing: true
-
-# heat
-heat::engine::configure_delegated_roles: false
-heat::engine::trusts_delegated_roles: []
-heat::instance_user: ''
-heat::cron::purge_deleted::age: 30
-heat::cron::purge_deleted::age_type: 'days'
-heat::cron::purge_deleted::maxdelay: 3600
-heat::cron::purge_deleted::destination: '/dev/null'
-heat::keystone::domain::domain_name: 'heat_stack'
-heat::keystone::domain::domain_admin: 'heat_stack_domain_admin'
-heat::keystone::domain::domain_admin_email: 'heat_stack_domain_admin@localhost'
-heat::auth_plugin: 'password'
-
-# pacemaker
-pacemaker::corosync::cluster_name: 'tripleo_cluster'
-pacemaker::corosync::manage_fw: false
-pacemaker::resource_defaults::defaults:
- resource-stickiness: { value: INFINITY }
-corosync_token_timeout: 10000
-
-# horizon
-horizon::cache_backend: django.core.cache.backends.memcached.MemcachedCache
-horizon::django_session_engine: 'django.contrib.sessions.backends.cache'
-horizon::vhost_extra_params:
- add_listen: false
- priority: 10
- access_log_format: '%a %l %u %t \"%r\" %>s %b \"%%{}{Referer}i\" \"%%{}{User-Agent}i\"'
-
-# mysql
-mysql::server::manage_config_file: true
-
-
-tripleo::haproxy::keystone_admin: true
-tripleo::haproxy::keystone_public: true
-tripleo::haproxy::neutron: true
-tripleo::haproxy::cinder: true
-tripleo::haproxy::glance_api: true
-tripleo::haproxy::glance_registry: true
-tripleo::haproxy::nova_osapi: true
-tripleo::haproxy::nova_metadata: true
-tripleo::haproxy::nova_novncproxy: true
-tripleo::haproxy::mysql: true
-tripleo::haproxy::redis: true
-tripleo::haproxy::sahara: true
-tripleo::haproxy::swift_proxy_server: true
-tripleo::haproxy::ceilometer: true
-tripleo::haproxy::aodh: true
-tripleo::haproxy::gnocchi: true
-tripleo::haproxy::heat_api: true
-tripleo::haproxy::heat_cloudwatch: true
-tripleo::haproxy::heat_cfn: true
-tripleo::haproxy::horizon: true
-
-controller_classes: []
-# firewall
+# TODO(emilien) move it to composable roles later
+# Already WIP with https://review.openstack.org/330785
+# and https://review.openstack.org/338527
tripleo::firewall::firewall_rules:
'128 aodh':
dport:
- 8042
- 13042
+
+controller_classes: []
diff --git a/puppet/hieradata/database.yaml b/puppet/hieradata/database.yaml
index 5591f7e0..d93817e7 100644
--- a/puppet/hieradata/database.yaml
+++ b/puppet/hieradata/database.yaml
@@ -1,4 +1,5 @@
# Aodh
+# TODO(emilien) move it to composable aodh roles later
aodh::db::mysql::user: aodh
aodh::db::mysql::host: "%{hiera('mysql_virtual_ip')}"
aodh::db::mysql::dbname: aodh
diff --git a/puppet/hieradata/object.yaml b/puppet/hieradata/object.yaml
index d4a0e81d..da526e39 100644
--- a/puppet/hieradata/object.yaml
+++ b/puppet/hieradata/object.yaml
@@ -1,21 +1,2 @@
# Hiera data for swift storage nodes
-swift::storage::all::incoming_chmod: 'Du=rwx,g=rx,o=rx,Fu=rw,g=r,o=r'
-swift::storage::all::outgoing_chmod: 'Du=rwx,g=rx,o=rx,Fu=rw,g=r,o=r'
-
-swift::storage::all::object_pipeline:
- - healthcheck
- - recon
- - object-server
-swift::storage::all::container_pipeline:
- - healthcheck
- - container-server
-swift::storage::all::account_pipeline:
- - healthcheck
- - account-server
-
-swift::proxy::keystone::operator_roles:
- - admin
- - swiftoperator
- - ResellerAdmin
-
object_classes: []
diff --git a/puppet/hieradata/volume.yaml b/puppet/hieradata/volume.yaml
index 8640c0a7..dd0582fa 100644
--- a/puppet/hieradata/volume.yaml
+++ b/puppet/hieradata/volume.yaml
@@ -1,14 +1,3 @@
# Hiera data here applies to all volume storage nodes
-# cinder
-cinder::scheduler::scheduler_driver: cinder.scheduler.filter_scheduler.FilterScheduler
-
-cinder::config::cinder_config:
- DEFAULT/nova_catalog_info:
- value: 'compute:Compute Service:internalURL'
- DEFAULT/swift_catalog_info:
- value: 'object-store:swift:internalURL'
-
-cinder_user_enabled_backends: []
-
-volume_classes: [] \ No newline at end of file
+volume_classes: []
diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp
index 9ebfb6d5..ca24c443 100644
--- a/puppet/manifests/overcloud_controller_pacemaker.pp
+++ b/puppet/manifests/overcloud_controller_pacemaker.pp
@@ -99,88 +99,6 @@ if hiera('step') >= 5 {
Pacemaker::Resource::Ocf['openstack-core']],
}
- # Nova
- pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint':
- constraint_type => 'order',
- first_resource => 'openstack-core-clone',
- second_resource => "${::nova::params::consoleauth_service_name}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
- Pacemaker::Resource::Ocf['openstack-core']],
- }
- pacemaker::constraint::colocation { 'nova-consoleauth-with-openstack-core':
- source => "${::nova::params::consoleauth_service_name}-clone",
- target => 'openstack-core-clone',
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
- Pacemaker::Resource::Ocf['openstack-core']],
- }
- pacemaker::constraint::base { 'nova-consoleauth-then-nova-vncproxy-constraint':
- constraint_type => 'order',
- first_resource => "${::nova::params::consoleauth_service_name}-clone",
- second_resource => "${::nova::params::vncproxy_service_name}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
- Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
- }
- pacemaker::constraint::colocation { 'nova-vncproxy-with-nova-consoleauth-colocation':
- source => "${::nova::params::vncproxy_service_name}-clone",
- target => "${::nova::params::consoleauth_service_name}-clone",
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
- Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
- }
- pacemaker::constraint::base { 'nova-vncproxy-then-nova-api-constraint':
- constraint_type => 'order',
- first_resource => "${::nova::params::vncproxy_service_name}-clone",
- second_resource => "${::nova::params::api_service_name}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
- Pacemaker::Resource::Service[$::nova::params::api_service_name]],
- }
- pacemaker::constraint::colocation { 'nova-api-with-nova-vncproxy-colocation':
- source => "${::nova::params::api_service_name}-clone",
- target => "${::nova::params::vncproxy_service_name}-clone",
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
- Pacemaker::Resource::Service[$::nova::params::api_service_name]],
- }
- pacemaker::constraint::base { 'nova-api-then-nova-scheduler-constraint':
- constraint_type => 'order',
- first_resource => "${::nova::params::api_service_name}-clone",
- second_resource => "${::nova::params::scheduler_service_name}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
- Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
- }
- pacemaker::constraint::colocation { 'nova-scheduler-with-nova-api-colocation':
- source => "${::nova::params::scheduler_service_name}-clone",
- target => "${::nova::params::api_service_name}-clone",
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
- Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
- }
- pacemaker::constraint::base { 'nova-scheduler-then-nova-conductor-constraint':
- constraint_type => 'order',
- first_resource => "${::nova::params::scheduler_service_name}-clone",
- second_resource => "${::nova::params::conductor_service_name}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
- Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
- }
- pacemaker::constraint::colocation { 'nova-conductor-with-nova-scheduler-colocation':
- source => "${::nova::params::conductor_service_name}-clone",
- target => "${::nova::params::scheduler_service_name}-clone",
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
- Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
- }
-
# Fedora doesn't know `require-all` parameter for constraints yet
if $::operatingsystem == 'Fedora' {
$redis_aodh_constraint_params = undef
diff --git a/puppet/services/ceilometer-api.yaml b/puppet/services/ceilometer-api.yaml
index b4db4fe9..c5c143b0 100644
--- a/puppet/services/ceilometer-api.yaml
+++ b/puppet/services/ceilometer-api.yaml
@@ -30,5 +30,6 @@ outputs:
dport:
- 8777
- 13777
+ - ceilometer::api::keystone_tenant: 'service'
step_config: |
include ::tripleo::profile::base::ceilometer::api
diff --git a/puppet/services/ceilometer-base.yaml b/puppet/services/ceilometer-base.yaml
index aa0c76c9..db5a82b1 100644
--- a/puppet/services/ceilometer-base.yaml
+++ b/puppet/services/ceilometer-base.yaml
@@ -83,6 +83,9 @@ outputs:
ceilometer::agent::auth::auth_password: {get_param: CeilometerPassword}
ceilometer::agent::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
ceilometer::agent::notification::store_events: {get_param: CeilometerStoreEvents}
+ ceilometer::agent::auth::auth_region: 'regionOne'
+ ceilometer::agent::auth::auth_tenant_name: 'service'
+ ceilometer::agent::auth::auth_endpoint_type: 'internalURL'
ceilometer::db::mysql::password: {get_param: CeilometerPassword}
ceilometer::collector::meter_dispatcher: {get_param: CeilometerMeterDispatcher}
ceilometer::dispatcher::gnocchi::url: {get_param: [EndpointMap, GnocchiInternal, uri]}
@@ -94,6 +97,7 @@ outputs:
ceilometer::keystone::auth::admin_url: {get_param: [EndpointMap, CeilometerAdmin, uri]}
ceilometer::keystone::auth::password: {get_param: CeilometerPassword}
ceilometer::keystone::auth::region: {get_param: KeystoneRegion}
+ ceilometer::keystone::auth::tenant: 'service'
ceilometer::rabbit_userid: {get_param: RabbitUserName}
ceilometer::rabbit_password: {get_param: RabbitPassword}
ceilometer::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
@@ -104,3 +108,4 @@ outputs:
ceilometer::db::mysql::allowed_hosts:
- '%'
- "%{hiera('mysql_bind_host')}"
+ ceilometer::rabbit_heartbeat_timeout_threshold: 60
diff --git a/puppet/services/cinder-api.yaml b/puppet/services/cinder-api.yaml
index ba2131c2..5e58dee9 100644
--- a/puppet/services/cinder-api.yaml
+++ b/puppet/services/cinder-api.yaml
@@ -37,6 +37,13 @@ outputs:
- cinder::api::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
cinder::api::identity_uri: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
cinder::api::keystone_password: {get_param: CinderPassword}
+ cinder::api::keystone_tenant: 'service'
+ cinder::api::enable_proxy_headers_parsing: true
+ cinder::api::nova_catalog_info: 'compute:Compute Service:internalURL'
+ # TODO(emilien) move it to puppet-cinder
+ cinder::config:
+ DEFAULT/swift_catalog_info:
+ value: 'object-store:swift:internalURL'
cinder::glance::glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]}
tripleo::profile::base::cinder::cinder_enable_db_purge: {get_param: CinderEnableDBPurge}
tripleo.cinder_api.firewall_rules:
diff --git a/puppet/services/cinder-base.yaml b/puppet/services/cinder-base.yaml
index e3658543..b224cd65 100644
--- a/puppet/services/cinder-base.yaml
+++ b/puppet/services/cinder-base.yaml
@@ -63,3 +63,7 @@ outputs:
cinder::db::mysql::allowed_hosts:
- '%'
- "%{hiera('mysql_bind_host')}"
+ cinder::rabbit_heartbeat_timeout_threshold: 60
+ cinder::keystone::auth::tenant: 'service'
+ cinder::host: hostgroup
+ cinder::cron::db_purge::destination: '/dev/null'
diff --git a/puppet/services/cinder-scheduler.yaml b/puppet/services/cinder-scheduler.yaml
index 28dc6945..e03090a2 100644
--- a/puppet/services/cinder-scheduler.yaml
+++ b/puppet/services/cinder-scheduler.yaml
@@ -23,6 +23,8 @@ outputs:
value:
service_name: cinder_scheduler
config_settings:
- get_attr: [CinderBase, role_data, config_settings]
+ map_merge:
+ - get_attr: [CinderBase, role_data, config_settings]
+ - cinder::scheduler::scheduler_driver: cinder.scheduler.filter_scheduler.FilterScheduler
step_config: |
include ::tripleo::profile::base::cinder::scheduler
diff --git a/puppet/services/database/mongodb-base.yaml b/puppet/services/database/mongodb-base.yaml
index 88d45706..b8761320 100644
--- a/puppet/services/database/mongodb-base.yaml
+++ b/puppet/services/database/mongodb-base.yaml
@@ -27,5 +27,6 @@ outputs:
service_name: mongodb_base
config_settings:
mongodb::server::nojournal: {get_param: MongoDbNoJournal}
+ mongodb::server::journal: false
mongodb::server::ipv6: {get_param: MongoDbIPv6}
- mongodb::server::replset: {get_param: MongoDbReplset} \ No newline at end of file
+ mongodb::server::replset: {get_param: MongoDbReplset}
diff --git a/puppet/services/database/mysql.yaml b/puppet/services/database/mysql.yaml
index 0a19b2a7..6f8f91b5 100644
--- a/puppet/services/database/mysql.yaml
+++ b/puppet/services/database/mysql.yaml
@@ -17,6 +17,13 @@ outputs:
value:
service_name: mysql
config_settings:
+ # The Galera package should work in cluster and
+ # non-cluster modes based on the config file.
+ # We set the package name here explicitly so
+ # that it matches what we pre-install
+ # in tripleo-puppet-elements.
+ mysql::server::package_name: 'mariadb-galera-server'
+ mysql::server::manage_config_file: true
tripleo.mysql.firewall_rules:
'104 mysql galera':
dport:
diff --git a/puppet/services/database/redis-base.yaml b/puppet/services/database/redis-base.yaml
index 2fa624f8..fe8c0659 100644
--- a/puppet/services/database/redis-base.yaml
+++ b/puppet/services/database/redis-base.yaml
@@ -15,8 +15,11 @@ outputs:
value:
service_name: redis_base
config_settings:
- redis::requirepass: {get_param: RedisPassword}
- redis::masterauth: {get_param: RedisPassword}
- redis::sentinel_auth_pass: {get_param: RedisPassword}
- tripleo::loadbalancer::redis_password: {get_param: RedisPassword}
-
+ redis::requirepass: {get_param: RedisPassword}
+ redis::masterauth: {get_param: RedisPassword}
+ redis::sentinel_auth_pass: {get_param: RedisPassword}
+ redis::port: 6379
+ redis::sentinel::master_name: '"%{hiera(\"bootstrap_nodeid\")}"'
+ redis::sentinel::redis_host: '"%{hiera(\"bootstrap_nodeid_ip\")}"'
+ redis::sentinel::notification_script: '/usr/local/bin/redis-notifications.sh'
+ tripleo::loadbalancer::redis_password: {get_param: RedisPassword}
diff --git a/puppet/services/glance-api.yaml b/puppet/services/glance-api.yaml
index 5df12209..b0eeadeb 100644
--- a/puppet/services/glance-api.yaml
+++ b/puppet/services/glance-api.yaml
@@ -92,6 +92,7 @@ outputs:
glance::backend::swift::swift_store_auth_address: {get_param: [EndpointMap, KeystoneInternal, uri] }
glance::backend::swift::swift_store_user: service:glance
glance::backend::swift::swift_store_key: {get_param: GlancePassword}
+ glance::backend::swift::swift_store_create_container_on_put: true
glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName}
glance::backend::rbd::rbd_store_user: {get_param: CephClientUserName}
glance_backend: {get_param: GlanceBackend}
@@ -109,5 +110,10 @@ outputs:
dport:
- 9292
- 13292
+ glance::keystone::auth::tenant: 'service'
+ glance::api::keystone_tenant: 'service'
+ glance::api::pipeline: 'keystone'
+ glance::api::show_image_direct_url: true
+
step_config: |
include ::tripleo::profile::base::glance::api
diff --git a/puppet/services/glance-registry.yaml b/puppet/services/glance-registry.yaml
index d2a6f4fa..5ad4bb9a 100644
--- a/puppet/services/glance-registry.yaml
+++ b/puppet/services/glance-registry.yaml
@@ -38,6 +38,8 @@ outputs:
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/glance'
glance::registry::keystone_password: {get_param: GlancePassword}
+ glance::registry::keystone_tenant: 'service'
+ glance::registry::pipeline: 'keystone'
glance::registry::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
glance::registry::identity_uri: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
glance::registry::debug: {get_param: Debug}
diff --git a/puppet/services/gnocchi-api.yaml b/puppet/services/gnocchi-api.yaml
index 891a60ec..d97626a6 100644
--- a/puppet/services/gnocchi-api.yaml
+++ b/puppet/services/gnocchi-api.yaml
@@ -29,5 +29,6 @@ outputs:
dport:
- 8041
- 13041
+ - gnocchi::api::keystone_tenant: 'service'
step_config: |
include ::tripleo::profile::base::gnocchi::api
diff --git a/puppet/services/gnocchi-base.yaml b/puppet/services/gnocchi-base.yaml
index cb1aad89..5c1e015e 100644
--- a/puppet/services/gnocchi-base.yaml
+++ b/puppet/services/gnocchi-base.yaml
@@ -90,3 +90,5 @@ outputs:
gnocchi::db::mysql::allowed_hosts:
- '%'
- "%{hiera('mysql_bind_host')}"
+ gnocchi::auth::auth_region: 'regionOne'
+ gnocchi::auth::auth_tenant_name: 'service'
diff --git a/puppet/services/haproxy.yaml b/puppet/services/haproxy.yaml
index 1a629c1d..902a1c3f 100644
--- a/puppet/services/haproxy.yaml
+++ b/puppet/services/haproxy.yaml
@@ -19,5 +19,26 @@ outputs:
tripleo.haproxy.firewall_rules:
'107 haproxy stats':
dport: 1993
+ # TODO(emilien) make it composable to find which services are actually running
+ tripleo::haproxy::keystone_admin: true
+ tripleo::haproxy::keystone_public: true
+ tripleo::haproxy::neutron: true
+ tripleo::haproxy::cinder: true
+ tripleo::haproxy::glance_api: true
+ tripleo::haproxy::glance_registry: true
+ tripleo::haproxy::nova_osapi: true
+ tripleo::haproxy::nova_metadata: true
+ tripleo::haproxy::nova_novncproxy: true
+ tripleo::haproxy::mysql: true
+ tripleo::haproxy::redis: true
+ tripleo::haproxy::sahara: true
+ tripleo::haproxy::swift_proxy_server: true
+ tripleo::haproxy::ceilometer: true
+ tripleo::haproxy::aodh: true
+ tripleo::haproxy::gnocchi: true
+ tripleo::haproxy::heat_api: true
+ tripleo::haproxy::heat_cloudwatch: true
+ tripleo::haproxy::heat_cfn: true
+ tripleo::haproxy::horizon: true
step_config: |
include ::tripleo::profile::base::haproxy
diff --git a/puppet/services/heat-base.yaml b/puppet/services/heat-base.yaml
index 11818cb1..c40136f5 100644
--- a/puppet/services/heat-base.yaml
+++ b/puppet/services/heat-base.yaml
@@ -45,3 +45,14 @@ outputs:
context_is_admin:
key: 'context_is_admin'
value: 'role:admin'
+ heat::rabbit_heartbeat_timeout_threshold: 60
+ heat::keystone_tenant: 'service'
+ heat::keystone::auth::tenant: 'service'
+ heat::keystone::domain::domain_name: 'heat_stack'
+ heat::keystone::domain::domain_admin: 'heat_stack_domain_admin'
+ heat::keystone::domain::domain_admin_email: 'heat_stack_domain_admin@localhost'
+ heat::auth_plugin: 'password'
+ heat::cron::purge_deleted::age: 30
+ heat::cron::purge_deleted::age_type: 'days'
+ heat::cron::purge_deleted::maxdelay: 3600
+ heat::cron::purge_deleted::destination: '/dev/null'
diff --git a/puppet/services/heat-engine.yaml b/puppet/services/heat-engine.yaml
index dea1eeae..13555a62 100644
--- a/puppet/services/heat-engine.yaml
+++ b/puppet/services/heat-engine.yaml
@@ -40,6 +40,8 @@ outputs:
map_merge:
- get_attr: [HeatBase, role_data, config_settings]
- heat::engine::num_engine_workers: {get_param: HeatWorkers}
+ heat::engine::configure_delegated_roles: false
+ heat::engine::trusts_delegated_roles: []
tripleo::profile::base::heat::manage_db_purge: {get_param: HeatEnableDBPurge}
heat::database_connection:
list_join:
diff --git a/puppet/services/horizon.yaml b/puppet/services/horizon.yaml
index dc7ba8c9..64cf450a 100644
--- a/puppet/services/horizon.yaml
+++ b/puppet/services/horizon.yaml
@@ -36,5 +36,11 @@ outputs:
dport:
- 80
- 443
+ horizon::cache_backend: django.core.cache.backends.memcached.MemcachedCache
+ horizon::django_session_engine: 'django.contrib.sessions.backends.cache'
+ horizon::vhost_extra_params:
+ add_listen: false
+ priority: 10
+ access_log_format: '%a %l %u %t \"%r\" %>s %b \"%%{}{Referer}i\" \"%%{}{User-Agent}i\"'
step_config: |
include ::tripleo::profile::base::horizon
diff --git a/puppet/services/kernel.yaml b/puppet/services/kernel.yaml
index 9e8a53f0..50ebe925 100644
--- a/puppet/services/kernel.yaml
+++ b/puppet/services/kernel.yaml
@@ -15,5 +15,26 @@ outputs:
description: Role data for the Kernel modules
value:
service_name: kernel
+ config_settings:
+ kernel_modules:
+ nf_conntrack: {}
+ sysctl_settings:
+ net.ipv4.tcp_keepalive_intvl:
+ value: 1
+ net.ipv4.tcp_keepalive_probes:
+ value: 5
+ net.ipv4.tcp_keepalive_time:
+ value: 5
+ net.nf_conntrack_max:
+ value: 500000
+ net.netfilter.nf_conntrack_max:
+ value: 500000
+ # prevent neutron bridges from autoconfiguring ipv6 addresses
+ net.ipv6.conf.default.accept_ra:
+ value: 0
+ net.ipv6.conf.default.autoconf:
+ value: 0
+ net.core.netdev_max_backlog:
+ value: 10000
step_config: |
include ::tripleo::profile::base::kernel
diff --git a/puppet/services/keystone.yaml b/puppet/services/keystone.yaml
index de920de3..48e74875 100644
--- a/puppet/services/keystone.yaml
+++ b/puppet/services/keystone.yaml
@@ -132,6 +132,18 @@ outputs:
keystone::db::mysql::allowed_hosts:
- '%'
- "%{hiera('mysql_bind_host')}"
+ keystone::rabbit_heartbeat_timeout_threshold: 60
+ keystone::cron::token_flush::maxdelay: 3600
+ keystone::roles::admin::service_tenant: 'service'
+ keystone::roles::admin::admin_tenant: 'admin'
+ keystone::cron::token_flush::destination: '/dev/null'
+ keystone::config::keystone_config:
+ DEFAULT/secure_proxy_ssl_header:
+ value: 'HTTP_X_FORWARDED_PROTO'
+ ec2/driver:
+ value: 'keystone.contrib.ec2.backends.sql.Ec2'
+ keystone::service_name: 'httpd'
+ keystone::wsgi::apache::ssl: false
keystone::wsgi::apache::workers: {get_param: KeystoneWorkers}
# override via extraconfig:
diff --git a/puppet/services/neutron-base.yaml b/puppet/services/neutron-base.yaml
index 81da08f8..c1134824 100644
--- a/puppet/services/neutron-base.yaml
+++ b/puppet/services/neutron-base.yaml
@@ -61,4 +61,7 @@ outputs:
params:
PLUGINS: {get_param: NeutronServicePlugins}
neutron::debug: {get_param: Debug}
- neutron::host: '"%{::fqdn}"'
+ neutron::allow_overlapping_ips: true
+ neutron::rabbit_heartbeat_timeout_threshold: 60
+ neutron::host: '"%{::fqdn}"' #NOTE: extra quoting is needed
+ neutron::keystone::auth::tenant: 'service'
diff --git a/puppet/services/neutron-l3.yaml b/puppet/services/neutron-l3.yaml
index f62d2022..a7232a39 100644
--- a/puppet/services/neutron-l3.yaml
+++ b/puppet/services/neutron-l3.yaml
@@ -31,5 +31,6 @@ outputs:
map_merge:
- get_attr: [NeutronBase, role_data, config_settings]
- neutron::agents::l3::external_network_bridge: {get_param: NeutronExternalNetworkBridge}
+ neutron::agents::l3::router_delete_namespaces: True
step_config: |
include tripleo::profile::base::neutron::l3
diff --git a/puppet/services/neutron-metadata.yaml b/puppet/services/neutron-metadata.yaml
index 68ccea67..73d8c3da 100644
--- a/puppet/services/neutron-metadata.yaml
+++ b/puppet/services/neutron-metadata.yaml
@@ -39,5 +39,6 @@ outputs:
neutron::agents::metadata::metadata_workers: {get_param: NeutronWorkers}
neutron::agents::metadata::auth_password: {get_param: NeutronPassword}
neutron::agents::metadata::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
+ neutron::agents::metadata::auth_tenant: 'service'
step_config: |
include tripleo::profile::base::neutron::metadata
diff --git a/puppet/services/neutron-server.yaml b/puppet/services/neutron-server.yaml
index 8c6a2537..017fbea4 100644
--- a/puppet/services/neutron-server.yaml
+++ b/puppet/services/neutron-server.yaml
@@ -54,6 +54,7 @@ outputs:
- '/ovs_neutron?charset=utf8'
neutron::server::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
neutron::server::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
+ neutron::server::auth_tenant: 'service'
neutron::server::identity_uri: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
neutron::server::api_workers: {get_param: NeutronWorkers}
neutron::server::allow_automatic_l3agent_failover: {get_param: NeutronAllowL3AgentFailover}
@@ -65,6 +66,8 @@ outputs:
neutron::server::notifications::tenant_name: 'service'
neutron::server::notifications::project_name: 'service'
neutron::server::notifications::password: {get_param: NovaPassword}
+ neutron::server::project_name: 'service'
+ neutron::server::sync_db: true
neutron::db::mysql::password: {get_param: NeutronPassword}
neutron::db::mysql::user: neutron
neutron::db::mysql::host: {get_param: [EndpointMap, MysqlNoBracketsInternal, host]}
diff --git a/puppet/services/nova-api.yaml b/puppet/services/nova-api.yaml
index 402c39e1..045a8614 100644
--- a/puppet/services/nova-api.yaml
+++ b/puppet/services/nova-api.yaml
@@ -42,5 +42,10 @@ outputs:
- 8774
- 13774
- 8775
+ nova::api::admin_tenant_name: 'service'
+ nova::api::enabled: true
+ nova::api::default_floating_pool: 'public'
+ nova::api::sync_db_api: true
+ nova::api::enable_proxy_headers_parsing: true
step_config: |
include tripleo::profile::base::nova::api
diff --git a/puppet/services/nova-base.yaml b/puppet/services/nova-base.yaml
index 16862615..21dbacd2 100644
--- a/puppet/services/nova-base.yaml
+++ b/puppet/services/nova-base.yaml
@@ -79,4 +79,25 @@ outputs:
- '%'
- "%{hiera('mysql_bind_host')}"
nova::debug: {get_param: Debug}
- nova::host: '"%{::fqdn}"'
+ nova::network::neutron::neutron_project_name: 'service'
+ nova::network::neutron::neutron_username: 'neutron'
+ nova::network::neutron::dhcp_domain: ''
+ nova::rabbit_heartbeat_timeout_threshold: 60
+ nova::cinder_catalog_info: 'volumev2:cinderv2:internalURL'
+ nova::host: '"%{::fqdn}"' # NOTE: extra quoting is needed.
+ nova::notify_on_state_change: 'vm_and_task_state'
+ nova::notification_driver: messagingv2
+ nova::network::neutron::neutron_auth_type: 'v3password'
+ nova::keystone::auth::tenant: 'service'
+ nova::db::mysql::user: nova
+ nova::db::mysql::host: {get_param: [EndpointMap, MysqlNoBracketsInternal, host]}
+ nova::db::mysql::dbname: nova
+ nova::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
+ nova::db::mysql_api::user: nova_api
+ nova::db::mysql_api::host: {get_param: [EndpointMap, MysqlNoBracketsInternal, host]}
+ nova::db::mysql_api::dbname: nova_api
+ nova::db::mysql_api::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
diff --git a/puppet/services/nova-compute.yaml b/puppet/services/nova-compute.yaml
index 0bf27d13..bcc3a232 100644
--- a/puppet/services/nova-compute.yaml
+++ b/puppet/services/nova-compute.yaml
@@ -31,7 +31,7 @@ outputs:
map_merge:
- get_attr: [NovaBase, role_data, config_settings]
- nova::compute::libvirt::manage_libvirt_services: false
- # we manage migration in nova common puppet profile
+ # we manage migration in nova common puppet profile
nova::compute::libvirt::migration_support: false
tripleo::profile::base::nova::manage_migration: true
tripleo::profile::base::nova::nova_compute_enabled: true
@@ -42,6 +42,14 @@ outputs:
- '.'
- - 'client'
- {get_param: CephClientUserName}
+ nova::compute::rbd::libvirt_rbd_secret_uuid: '"%{hiera(\"ceph::profile::params::fsid\")}"'
+ nova::compute::instance_usage_audit: true
+ nova::compute::instance_usage_audit_period: 'hour'
+ # Changing the default from 512MB. The current templates can not deploy
+ # overclouds with swap. On an idle compute node, we see ~1024MB of RAM
+ # used. 2048 is suggested to account for other possible operations for
+ # example openvswitch.
+ nova::compute::reserved_host_memory: 2048
step_config: |
# TODO(emilien): figure how to deal with libvirt profile.
# We'll probably threat it like we do with Neutron plugins.
diff --git a/puppet/services/nova-vncproxy.yaml b/puppet/services/nova-vncproxy.yaml
index f6812103..0b9cef38 100644
--- a/puppet/services/nova-vncproxy.yaml
+++ b/puppet/services/nova-vncproxy.yaml
@@ -22,6 +22,8 @@ outputs:
value:
service_name: nova_vncproxy
config_settings:
- get_attr: [NovaBase, role_data, config_settings]
+ map_merge:
+ - get_attr: [NovaBase, role_data, config_settings]
+ - nova::vncproxy::enabled: true
step_config: |
include tripleo::profile::base::nova::vncproxy
diff --git a/puppet/services/pacemaker.yaml b/puppet/services/pacemaker.yaml
index 9520cb9c..b0ebb7d4 100644
--- a/puppet/services/pacemaker.yaml
+++ b/puppet/services/pacemaker.yaml
@@ -16,6 +16,11 @@ outputs:
value:
service_name: pacemaker
config_settings:
+ pacemaker::corosync::cluster_name: 'tripleo_cluster'
+ pacemaker::corosync::manage_fw: false
+ pacemaker::resource_defaults::defaults:
+ resource-stickiness: { value: INFINITY }
+ corosync_token_timeout: 10000
tripleo.pacemaker.firewall_rules:
'130 pacemaker tcp':
proto: 'tcp'
diff --git a/puppet/services/pacemaker/core.yaml b/puppet/services/pacemaker/core.yaml
new file mode 100644
index 00000000..1c0c043c
--- /dev/null
+++ b/puppet/services/pacemaker/core.yaml
@@ -0,0 +1,20 @@
+heat_template_version: 2016-04-08
+
+description: >
+ OpenStack Core (fake) service with Pacemaker configured with Puppet.
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+outputs:
+ role_data:
+ description: Role data for the Core role.
+ value:
+ service_name: core
+ config_settings: {}
+ step_config: |
+ include ::tripleo::profile::pacemaker::core \ No newline at end of file
diff --git a/puppet/services/pacemaker/glance-api.yaml b/puppet/services/pacemaker/glance-api.yaml
index 4e27fd56..0fc17b22 100644
--- a/puppet/services/pacemaker/glance-api.yaml
+++ b/puppet/services/pacemaker/glance-api.yaml
@@ -53,6 +53,7 @@ outputs:
glance_file_pcmk_fstype: {get_param: GlanceFilePcmkFstype}
glance_file_pcmk_manage: {get_param: GlanceFilePcmkManage}
glance_file_pcmk_options: {get_param: GlanceFilePcmkOptions}
+ glance_file_pcmk_directory: '/var/lib/glance/images'
glance::api::manage_service: false
glance::api::enabled: false
step_config: |
diff --git a/puppet/services/rabbitmq.yaml b/puppet/services/rabbitmq.yaml
index 3c5909ca..4c02f359 100644
--- a/puppet/services/rabbitmq.yaml
+++ b/puppet/services/rabbitmq.yaml
@@ -42,5 +42,20 @@ outputs:
- 4369
- 5672
- 35672
+ rabbitmq::delete_guest_user: false
+ rabbitmq::wipe_db_on_cookie_change: true
+ rabbitmq::port: '5672'
+ rabbitmq::package_source: undef
+ rabbitmq::repos_ensure: false
+ rabbitmq_environment:
+ RABBITMQ_NODENAME: "rabbit@%{::hostname}"
+ RABBITMQ_SERVER_ERL_ARGS: '"+K true +A30 +P 1048576 -kernel inet_default_connect_options [{nodelay,true},{raw,6,18,<<5000:64/native>>}] -kernel inet_default_listen_options [{raw,6,18,<<5000:64/native>>}]"'
+ rabbitmq_kernel_variables:
+ inet_dist_listen_min: '35672'
+ inet_dist_listen_max: '35672'
+ rabbitmq_config_variables:
+ tcp_listen_options: '[binary, {packet, raw}, {reuseaddr, true}, {backlog, 128}, {nodelay, true}, {exit_on_close, false}, {keepalive, true}]'
+ cluster_partition_handling: 'pause_minority'
+ loopback_users: '[]'
step_config: |
include ::tripleo::profile::base::rabbitmq
diff --git a/puppet/services/sahara-base.yaml b/puppet/services/sahara-base.yaml
index 981b7c05..3e320128 100644
--- a/puppet/services/sahara-base.yaml
+++ b/puppet/services/sahara-base.yaml
@@ -47,3 +47,5 @@ outputs:
- spark
- storm
sahara::rpc_backend: rabbit
+ sahara::admin_tenant_name: 'service'
+ sahara::keystone::auth::tenant: 'service'
diff --git a/puppet/services/sahara-engine.yaml b/puppet/services/sahara-engine.yaml
index 548d961f..074f83c7 100644
--- a/puppet/services/sahara-engine.yaml
+++ b/puppet/services/sahara-engine.yaml
@@ -30,11 +30,11 @@ outputs:
- sahara_dsn: &sahara_dsn
list_join:
- ''
- - - {get_param: [EndpointMap, MysqlVirtual, protocol]}
+ - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- '://sahara:'
- {get_param: SaharaPassword}
- '@'
- - {get_param: [EndpointMap, MysqlVirtual, host]}
+ - {get_param: [EndpointMap, MysqlInternal, host]}
- '/sahara'
sahara::database_connection: *sahara_dsn
sahara::db::mysql::password: {get_param: SaharaPassword}
diff --git a/puppet/services/swift-proxy.yaml b/puppet/services/swift-proxy.yaml
index dcd2c2a4..99af7499 100644
--- a/puppet/services/swift-proxy.yaml
+++ b/puppet/services/swift-proxy.yaml
@@ -41,6 +41,7 @@ outputs:
swift::proxy::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
swift::proxy::authtoken::identity_uri: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
swift::proxy::authtoken::admin_password: {get_param: SwiftPassword}
+ swift::proxy::authtoken::admin_tenant_name: 'service'
swift::proxy::node_timeout: {get_param: SwiftProxyNodeTimeout}
swift::proxy::workers: {get_param: SwiftWorkers}
swift::keystone::auth::public_url: {get_param: [EndpointMap, SwiftPublic, uri]}
@@ -56,5 +57,30 @@ outputs:
dport:
- 8080
- 13808
+ swift::keystone::auth::tenant: 'service'
+ swift::keystone::auth::configure_s3_endpoint: false
+ swift::keystone::auth::operator_roles:
+ - admin
+ - swiftoperator
+ - ResellerAdmin
+ swift::proxy::keystone::operator_roles:
+ - admin
+ - swiftoperator
+ - ResellerAdmin
+ swift::proxy::pipeline:
+ - 'catch_errors'
+ - 'healthcheck'
+ - 'proxy-logging'
+ - 'cache'
+ - 'ratelimit'
+ - 'bulk'
+ - 'tempurl'
+ - 'formpost'
+ - 'authtoken'
+ - 'keystone'
+ - 'staticweb'
+ - 'proxy-logging'
+ - 'proxy-server'
+ swift::proxy::account_autocreate: true
step_config: |
include ::tripleo::profile::base::swift::proxy
diff --git a/puppet/services/swift-storage.yaml b/puppet/services/swift-storage.yaml
index effdaf63..74679231 100644
--- a/puppet/services/swift-storage.yaml
+++ b/puppet/services/swift-storage.yaml
@@ -48,5 +48,17 @@ outputs:
- 6000
- 6001
- 6002
+ swift::storage::all::incoming_chmod: 'Du=rwx,g=rx,o=rx,Fu=rw,g=r,o=r'
+ swift::storage::all::outgoing_chmod: 'Du=rwx,g=rx,o=rx,Fu=rw,g=r,o=r'
+ swift::storage::all::object_pipeline:
+ - healthcheck
+ - recon
+ - object-server
+ swift::storage::all::container_pipeline:
+ - healthcheck
+ - container-server
+ swift::storage::all::account_pipeline:
+ - healthcheck
+ - account-server
step_config: |
include ::tripleo::profile::base::swift::storage