From 315fa319630fa22ee9e31a058a2a17376b5f1ea3 Mon Sep 17 00:00:00 2001 From: Emilien Macchi Date: Wed, 13 Jul 2016 11:29:43 -0400 Subject: Migrate Puppet Hieradata to composable services Migrate puppet/hieradata/*.yaml parameters to puppet/services/*.yaml except for some services that are not composable yet. Co-Authored-By: Juan Antonio Osorio Robles Change-Id: I7e5f8b18ee9aa63a1dffc6facaf88315b07d5fd7 --- puppet/all-nodes-config.yaml | 2 - puppet/hieradata/README.rst | 1 + puppet/hieradata/RedHat.yaml | 9 -- puppet/hieradata/common.yaml | 50 +------- puppet/hieradata/compute.yaml | 18 --- puppet/hieradata/controller.yaml | 186 +---------------------------- puppet/hieradata/database.yaml | 1 + puppet/hieradata/object.yaml | 19 --- puppet/hieradata/volume.yaml | 13 +- puppet/services/ceilometer-api.yaml | 1 + puppet/services/ceilometer-base.yaml | 5 + puppet/services/cinder-api.yaml | 7 ++ puppet/services/cinder-base.yaml | 4 + puppet/services/cinder-scheduler.yaml | 4 +- puppet/services/database/mongodb-base.yaml | 3 +- puppet/services/database/mysql.yaml | 7 ++ puppet/services/database/redis-base.yaml | 13 +- puppet/services/glance-api.yaml | 6 + puppet/services/glance-registry.yaml | 2 + puppet/services/gnocchi-api.yaml | 1 + puppet/services/gnocchi-base.yaml | 2 + puppet/services/haproxy.yaml | 21 ++++ puppet/services/heat-base.yaml | 11 ++ puppet/services/heat-engine.yaml | 2 + puppet/services/horizon.yaml | 6 + puppet/services/kernel.yaml | 21 ++++ puppet/services/keystone.yaml | 12 ++ puppet/services/neutron-base.yaml | 5 +- puppet/services/neutron-l3.yaml | 1 + puppet/services/neutron-metadata.yaml | 1 + puppet/services/neutron-server.yaml | 3 + puppet/services/nova-api.yaml | 5 + puppet/services/nova-base.yaml | 23 +++- puppet/services/nova-compute.yaml | 10 +- puppet/services/nova-vncproxy.yaml | 4 +- puppet/services/pacemaker.yaml | 5 + puppet/services/pacemaker/glance-api.yaml | 1 + puppet/services/rabbitmq.yaml | 15 +++ puppet/services/sahara-base.yaml | 2 + puppet/services/sahara-engine.yaml | 4 +- puppet/services/swift-proxy.yaml | 26 ++++ puppet/services/swift-storage.yaml | 12 ++ 42 files changed, 242 insertions(+), 302 deletions(-) create mode 100644 puppet/hieradata/README.rst delete mode 100644 puppet/hieradata/RedHat.yaml diff --git a/puppet/all-nodes-config.yaml b/puppet/all-nodes-config.yaml index 55120912..803a5d49 100644 --- a/puppet/all-nodes-config.yaml +++ b/puppet/all-nodes-config.yaml @@ -106,8 +106,6 @@ resources: - {get_param: ceph_storage_hosts} hiera: datafiles: - RedHat: - raw_data: {get_file: hieradata/RedHat.yaml} bootstrap_node: mapped_data: bootstrap_nodeid: {get_input: bootstrap_nodeid} diff --git a/puppet/hieradata/README.rst b/puppet/hieradata/README.rst new file mode 100644 index 00000000..64a60229 --- /dev/null +++ b/puppet/hieradata/README.rst @@ -0,0 +1 @@ +Do not add more hieradata in this directory, and use composable services. diff --git a/puppet/hieradata/RedHat.yaml b/puppet/hieradata/RedHat.yaml deleted file mode 100644 index 25902828..00000000 --- a/puppet/hieradata/RedHat.yaml +++ /dev/null @@ -1,9 +0,0 @@ -# RedHat specific overrides go here -rabbitmq::package_provider: 'yum' - -# The Galera package should work in cluster and -# non-cluster modes based on the config file. -# We set the package name here explicitly so -# that it matches what we pre-install -# in tripleo-puppet-elements. -mysql::server::package_name: 'mariadb-galera-server' diff --git a/puppet/hieradata/common.yaml b/puppet/hieradata/common.yaml index 65cf9577..3bda874f 100644 --- a/puppet/hieradata/common.yaml +++ b/puppet/hieradata/common.yaml @@ -1,51 +1,3 @@ -# Common Hiera data gets applied to all nodes -ssh::server::storeconfigs_enabled: false - -# ceilometer settings used by compute and controller ceilo auth settings -ceilometer::agent::auth::auth_region: 'regionOne' -ceilometer::agent::auth::auth_tenant_name: 'service' - +# TODO(emilien) move it to composable aodh roles later aodh::auth::auth_region: 'regionOne' aodh::auth::auth_tenant_name: 'service' - -gnocchi::auth::auth_region: 'regionOne' -gnocchi::auth::auth_tenant_name: 'service' - -nova::api::admin_tenant_name: 'service' -nova::network::neutron::neutron_project_name: 'service' -nova::network::neutron::neutron_username: 'neutron' -nova::network::neutron::dhcp_domain: '' - -neutron::allow_overlapping_ips: true -neutron::server::project_name: 'service' - -kernel_modules: - nf_conntrack: {} - -sysctl_settings: - net.ipv4.tcp_keepalive_intvl: - value: 1 - net.ipv4.tcp_keepalive_probes: - value: 5 - net.ipv4.tcp_keepalive_time: - value: 5 - net.nf_conntrack_max: - value: 500000 - net.netfilter.nf_conntrack_max: - value: 500000 - # prevent neutron bridges from autoconfiguring ipv6 addresses - net.ipv6.conf.default.accept_ra: - value: 0 - net.ipv6.conf.default.autoconf: - value: 0 - net.core.netdev_max_backlog: - value: 10000 - -nova::rabbit_heartbeat_timeout_threshold: 60 -neutron::rabbit_heartbeat_timeout_threshold: 60 -cinder::rabbit_heartbeat_timeout_threshold: 60 -ceilometer::rabbit_heartbeat_timeout_threshold: 60 -heat::rabbit_heartbeat_timeout_threshold: 60 -keystone::rabbit_heartbeat_timeout_threshold: 60 - -nova::cinder_catalog_info: 'volumev2:cinderv2:internalURL' diff --git a/puppet/hieradata/compute.yaml b/puppet/hieradata/compute.yaml index fe203be7..219f0d0a 100644 --- a/puppet/hieradata/compute.yaml +++ b/puppet/hieradata/compute.yaml @@ -1,21 +1,3 @@ # Hiera data here applies to all compute nodes -nova::notify_on_state_change: 'vm_and_task_state' -nova::notification_driver: messagingv2 -nova::compute::instance_usage_audit: true -nova::compute::instance_usage_audit_period: 'hour' - -nova::compute::rbd::libvirt_rbd_secret_uuid: "%{hiera('ceph::profile::params::fsid')}" - -nova::network::neutron::neutron_auth_type: 'v3password' - -# Changing the default from 512MB. The current templates can not deploy -# overclouds with swap. On an idle compute node, we see ~1024MB of RAM -# used. 2048 is suggested to account for other possible operations for -# example openvswitch. -nova::compute::reserved_host_memory: 2048 - -ceilometer::agent::auth::auth_tenant_name: 'service' -ceilometer::agent::auth::auth_endpoint_type: 'internalURL' - compute_classes: [] diff --git a/puppet/hieradata/controller.yaml b/puppet/hieradata/controller.yaml index 3ec656dc..71c53b47 100644 --- a/puppet/hieradata/controller.yaml +++ b/puppet/hieradata/controller.yaml @@ -1,190 +1,16 @@ # Hiera data here applies to all controller nodes -nova::api::enabled: true -nova::vncproxy::enabled: true - -# gnocchi -gnocchi::storage::swift::swift_user: 'service:gnocchi' -gnocchi::storage::swift::swift_auth_version: 2 -gnocchi::statsd::resource_id: '0a8b55df-f90f-491c-8cb9-7cdecec6fc26' -gnocchi::statsd::user_id: '27c0d3f8-e7ee-42f0-8317-72237d1c5ae3' -gnocchi::statsd::project_id: '6c38cd8d-099a-4cb2-aecf-17be688e8616' -gnocchi::statsd::flush_delay: 10 -gnocchi::statsd::archive_policy_name: 'low' - -# rabbitmq -rabbitmq::delete_guest_user: false -rabbitmq::wipe_db_on_cookie_change: true -rabbitmq::port: '5672' -rabbitmq::package_source: undef -rabbitmq::repos_ensure: false -rabbitmq_environment: - RABBITMQ_NODENAME: "rabbit@%{::hostname}" - RABBITMQ_SERVER_ERL_ARGS: '"+K true +A30 +P 1048576 -kernel inet_default_connect_options [{nodelay,true},{raw,6,18,<<5000:64/native>>}] -kernel inet_default_listen_options [{raw,6,18,<<5000:64/native>>}]"' -rabbitmq_kernel_variables: - inet_dist_listen_min: '35672' - inet_dist_listen_max: '35672' -rabbitmq_config_variables: - tcp_listen_options: '[binary, {packet, raw}, {reuseaddr, true}, {backlog, 128}, {nodelay, true}, {exit_on_close, false}, {keepalive, true}]' - cluster_partition_handling: 'pause_minority' - loopback_users: '[]' - -mongodb::server::replset: tripleo -mongodb::server::journal: false - -redis::port: 6379 -redis::sentinel::master_name: "%{hiera('bootstrap_nodeid')}" -redis::sentinel::redis_host: "%{hiera('bootstrap_nodeid_ip')}" -redis::sentinel::notification_script: '/usr/local/bin/redis-notifications.sh' - -# keystone -keystone::roles::admin::email: 'root@localhost' - -# service tenant -glance::api::keystone_tenant: 'service' +# TODO(emilien) move it to composable aodh roles later aodh::api::keystone_tenant: 'service' -glance::registry::keystone_tenant: 'service' -neutron::server::auth_tenant: 'service' -neutron::agents::metadata::auth_tenant: 'service' -neutron::agents::l3::router_delete_namespaces: True -cinder::api::keystone_tenant: 'service' -swift::proxy::authtoken::admin_tenant_name: 'service' -ceilometer::api::keystone_tenant: 'service' -gnocchi::api::keystone_tenant: 'service' -heat::keystone_tenant: 'service' -sahara::admin_tenant_name: 'service' aodh::keystone::auth::tenant: 'service' -ceilometer::keystone::auth::tenant: 'service' -cinder::keystone::auth::tenant: 'service' -glance::keystone::auth::tenant: 'service' -gnocchi::keystone::auth::tenant: 'service' -heat::keystone::auth::tenant: 'service' -neutron::keystone::auth::tenant: 'service' -nova::keystone::auth::tenant: 'service' -sahara::keystone::auth::tenant: 'service' -swift::keystone::auth::tenant: 'service' - -# keystone -keystone::cron::token_flush::maxdelay: 3600 -keystone::roles::admin::service_tenant: 'service' -keystone::roles::admin::admin_tenant: 'admin' -keystone::cron::token_flush::destination: '/dev/null' -keystone::config::keystone_config: - DEFAULT/secure_proxy_ssl_header: - value: 'HTTP_X_FORWARDED_PROTO' - ec2/driver: - value: 'keystone.contrib.ec2.backends.sql.Ec2' -keystone::service_name: 'httpd' -keystone::wsgi::apache::ssl: false - -#swift -swift::proxy::pipeline: - - 'catch_errors' - - 'healthcheck' - - 'proxy-logging' - - 'cache' - - 'ratelimit' - - 'bulk' - - 'tempurl' - - 'formpost' - - 'authtoken' - - 'keystone' - - 'staticweb' - - 'proxy-logging' - - 'proxy-server' - -swift::proxy::account_autocreate: true -swift::keystone::auth::configure_s3_endpoint: false -swift::keystone::auth::operator_roles: - - admin - - swiftoperator - -# glance -glance::api::pipeline: 'keystone' -glance::api::show_image_direct_url: true -glance::registry::pipeline: 'keystone' -glance::backend::swift::swift_store_create_container_on_put: true -glance_file_pcmk_directory: '/var/lib/glance/images' - -# neutron -neutron::server::sync_db: true - -# nova -nova::notify_on_state_change: 'vm_and_task_state' -nova::api::default_floating_pool: 'public' -nova::api::sync_db_api: true -nova::api::enable_proxy_headers_parsing: true -nova::notification_driver: messaging -# ceilometer -ceilometer::agent::auth::auth_endpoint_type: 'internalURL' - -# cinder -cinder::scheduler::scheduler_driver: cinder.scheduler.filter_scheduler.FilterScheduler -cinder::cron::db_purge::destination: '/dev/null' -cinder::host: hostgroup - -# TODO(jaosorior): Move to cinder profile once cinder is moved as a composable -# service. -cinder::api::enable_proxy_headers_parsing: true - -# heat -heat::engine::configure_delegated_roles: false -heat::engine::trusts_delegated_roles: [] -heat::instance_user: '' -heat::cron::purge_deleted::age: 30 -heat::cron::purge_deleted::age_type: 'days' -heat::cron::purge_deleted::maxdelay: 3600 -heat::cron::purge_deleted::destination: '/dev/null' -heat::keystone::domain::domain_name: 'heat_stack' -heat::keystone::domain::domain_admin: 'heat_stack_domain_admin' -heat::keystone::domain::domain_admin_email: 'heat_stack_domain_admin@localhost' -heat::auth_plugin: 'password' - -# pacemaker -pacemaker::corosync::cluster_name: 'tripleo_cluster' -pacemaker::corosync::manage_fw: false -pacemaker::resource_defaults::defaults: - resource-stickiness: { value: INFINITY } -corosync_token_timeout: 10000 - -# horizon -horizon::cache_backend: django.core.cache.backends.memcached.MemcachedCache -horizon::django_session_engine: 'django.contrib.sessions.backends.cache' -horizon::vhost_extra_params: - add_listen: false - priority: 10 - access_log_format: '%a %l %u %t \"%r\" %>s %b \"%%{}{Referer}i\" \"%%{}{User-Agent}i\"' - -# mysql -mysql::server::manage_config_file: true - - -tripleo::haproxy::keystone_admin: true -tripleo::haproxy::keystone_public: true -tripleo::haproxy::neutron: true -tripleo::haproxy::cinder: true -tripleo::haproxy::glance_api: true -tripleo::haproxy::glance_registry: true -tripleo::haproxy::nova_osapi: true -tripleo::haproxy::nova_metadata: true -tripleo::haproxy::nova_novncproxy: true -tripleo::haproxy::mysql: true -tripleo::haproxy::redis: true -tripleo::haproxy::sahara: true -tripleo::haproxy::swift_proxy_server: true -tripleo::haproxy::ceilometer: true -tripleo::haproxy::aodh: true -tripleo::haproxy::gnocchi: true -tripleo::haproxy::heat_api: true -tripleo::haproxy::heat_cloudwatch: true -tripleo::haproxy::heat_cfn: true -tripleo::haproxy::horizon: true - -controller_classes: [] -# firewall +# TODO(emilien) move it to composable roles later +# Already WIP with https://review.openstack.org/330785 +# and https://review.openstack.org/338527 tripleo::firewall::firewall_rules: '128 aodh': dport: - 8042 - 13042 + +controller_classes: [] diff --git a/puppet/hieradata/database.yaml b/puppet/hieradata/database.yaml index 5591f7e0..d93817e7 100644 --- a/puppet/hieradata/database.yaml +++ b/puppet/hieradata/database.yaml @@ -1,4 +1,5 @@ # Aodh +# TODO(emilien) move it to composable aodh roles later aodh::db::mysql::user: aodh aodh::db::mysql::host: "%{hiera('mysql_virtual_ip')}" aodh::db::mysql::dbname: aodh diff --git a/puppet/hieradata/object.yaml b/puppet/hieradata/object.yaml index d4a0e81d..da526e39 100644 --- a/puppet/hieradata/object.yaml +++ b/puppet/hieradata/object.yaml @@ -1,21 +1,2 @@ # Hiera data for swift storage nodes -swift::storage::all::incoming_chmod: 'Du=rwx,g=rx,o=rx,Fu=rw,g=r,o=r' -swift::storage::all::outgoing_chmod: 'Du=rwx,g=rx,o=rx,Fu=rw,g=r,o=r' - -swift::storage::all::object_pipeline: - - healthcheck - - recon - - object-server -swift::storage::all::container_pipeline: - - healthcheck - - container-server -swift::storage::all::account_pipeline: - - healthcheck - - account-server - -swift::proxy::keystone::operator_roles: - - admin - - swiftoperator - - ResellerAdmin - object_classes: [] diff --git a/puppet/hieradata/volume.yaml b/puppet/hieradata/volume.yaml index 8640c0a7..dd0582fa 100644 --- a/puppet/hieradata/volume.yaml +++ b/puppet/hieradata/volume.yaml @@ -1,14 +1,3 @@ # Hiera data here applies to all volume storage nodes -# cinder -cinder::scheduler::scheduler_driver: cinder.scheduler.filter_scheduler.FilterScheduler - -cinder::config::cinder_config: - DEFAULT/nova_catalog_info: - value: 'compute:Compute Service:internalURL' - DEFAULT/swift_catalog_info: - value: 'object-store:swift:internalURL' - -cinder_user_enabled_backends: [] - -volume_classes: [] \ No newline at end of file +volume_classes: [] diff --git a/puppet/services/ceilometer-api.yaml b/puppet/services/ceilometer-api.yaml index d0f3767d..3e7198f9 100644 --- a/puppet/services/ceilometer-api.yaml +++ b/puppet/services/ceilometer-api.yaml @@ -30,5 +30,6 @@ outputs: dport: - 8777 - 13777 + - ceilometer::api::keystone_tenant: 'service' step_config: | include ::tripleo::profile::base::ceilometer::api diff --git a/puppet/services/ceilometer-base.yaml b/puppet/services/ceilometer-base.yaml index 40060a11..4ba3afcb 100644 --- a/puppet/services/ceilometer-base.yaml +++ b/puppet/services/ceilometer-base.yaml @@ -83,6 +83,9 @@ outputs: ceilometer::agent::auth::auth_password: {get_param: CeilometerPassword} ceilometer::agent::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] } ceilometer::agent::notification::store_events: {get_param: CeilometerStoreEvents} + ceilometer::agent::auth::auth_region: 'regionOne' + ceilometer::agent::auth::auth_tenant_name: 'service' + ceilometer::agent::auth::auth_endpoint_type: 'internalURL' ceilometer::db::mysql::password: {get_param: CeilometerPassword} ceilometer::collector::meter_dispatcher: {get_param: CeilometerMeterDispatcher} ceilometer::dispatcher::gnocchi::url: {get_param: [EndpointMap, GnocchiInternal, uri]} @@ -94,6 +97,7 @@ outputs: ceilometer::keystone::auth::admin_url: {get_param: [EndpointMap, CeilometerAdmin, uri]} ceilometer::keystone::auth::password: {get_param: CeilometerPassword} ceilometer::keystone::auth::region: {get_param: KeystoneRegion} + ceilometer::keystone::auth::tenant: 'service' ceilometer::rabbit_userid: {get_param: RabbitUserName} ceilometer::rabbit_password: {get_param: RabbitPassword} ceilometer::rabbit_use_ssl: {get_param: RabbitClientUseSSL} @@ -104,3 +108,4 @@ outputs: ceilometer::db::mysql::allowed_hosts: - '%' - "%{hiera('mysql_bind_host')}" + ceilometer::rabbit_heartbeat_timeout_threshold: 60 diff --git a/puppet/services/cinder-api.yaml b/puppet/services/cinder-api.yaml index 0cefb380..d93183ef 100644 --- a/puppet/services/cinder-api.yaml +++ b/puppet/services/cinder-api.yaml @@ -37,6 +37,13 @@ outputs: - cinder::api::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]} cinder::api::identity_uri: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} cinder::api::keystone_password: {get_param: CinderPassword} + cinder::api::keystone_tenant: 'service' + cinder::api::enable_proxy_headers_parsing: true + cinder::api::nova_catalog_info: 'compute:Compute Service:internalURL' + # TODO(emilien) move it to puppet-cinder + cinder::config: + DEFAULT/swift_catalog_info: + value: 'object-store:swift:internalURL' cinder::glance::glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]} tripleo::profile::base::cinder::cinder_enable_db_purge: {get_param: CinderEnableDBPurge} tripleo.cinder_api.firewall_rules: diff --git a/puppet/services/cinder-base.yaml b/puppet/services/cinder-base.yaml index adacc737..c3126903 100644 --- a/puppet/services/cinder-base.yaml +++ b/puppet/services/cinder-base.yaml @@ -63,3 +63,7 @@ outputs: cinder::db::mysql::allowed_hosts: - '%' - "%{hiera('mysql_bind_host')}" + cinder::rabbit_heartbeat_timeout_threshold: 60 + cinder::keystone::auth::tenant: 'service' + cinder::host: hostgroup + cinder::cron::db_purge::destination: '/dev/null' diff --git a/puppet/services/cinder-scheduler.yaml b/puppet/services/cinder-scheduler.yaml index 2e1e852d..d509118d 100644 --- a/puppet/services/cinder-scheduler.yaml +++ b/puppet/services/cinder-scheduler.yaml @@ -23,6 +23,8 @@ outputs: value: service_name: cinder-scheduler config_settings: - get_attr: [CinderBase, role_data, config_settings] + map_merge: + - get_attr: [CinderBase, role_data, config_settings] + - cinder::scheduler::scheduler_driver: cinder.scheduler.filter_scheduler.FilterScheduler step_config: | include ::tripleo::profile::base::cinder::scheduler diff --git a/puppet/services/database/mongodb-base.yaml b/puppet/services/database/mongodb-base.yaml index ed0b92af..ec87e215 100644 --- a/puppet/services/database/mongodb-base.yaml +++ b/puppet/services/database/mongodb-base.yaml @@ -27,5 +27,6 @@ outputs: service_name: mongodb-base config_settings: mongodb::server::nojournal: {get_param: MongoDbNoJournal} + mongodb::server::journal: false mongodb::server::ipv6: {get_param: MongoDbIPv6} - mongodb::server::replset: {get_param: MongoDbReplset} \ No newline at end of file + mongodb::server::replset: {get_param: MongoDbReplset} diff --git a/puppet/services/database/mysql.yaml b/puppet/services/database/mysql.yaml index 0a19b2a7..6f8f91b5 100644 --- a/puppet/services/database/mysql.yaml +++ b/puppet/services/database/mysql.yaml @@ -17,6 +17,13 @@ outputs: value: service_name: mysql config_settings: + # The Galera package should work in cluster and + # non-cluster modes based on the config file. + # We set the package name here explicitly so + # that it matches what we pre-install + # in tripleo-puppet-elements. + mysql::server::package_name: 'mariadb-galera-server' + mysql::server::manage_config_file: true tripleo.mysql.firewall_rules: '104 mysql galera': dport: diff --git a/puppet/services/database/redis-base.yaml b/puppet/services/database/redis-base.yaml index c7b083fa..24175d56 100644 --- a/puppet/services/database/redis-base.yaml +++ b/puppet/services/database/redis-base.yaml @@ -15,8 +15,11 @@ outputs: value: service_name: redis-base config_settings: - redis::requirepass: {get_param: RedisPassword} - redis::masterauth: {get_param: RedisPassword} - redis::sentinel_auth_pass: {get_param: RedisPassword} - tripleo::loadbalancer::redis_password: {get_param: RedisPassword} - + redis::requirepass: {get_param: RedisPassword} + redis::masterauth: {get_param: RedisPassword} + redis::sentinel_auth_pass: {get_param: RedisPassword} + redis::port: 6379 + redis::sentinel::master_name: '"%{hiera(\"bootstrap_nodeid\")}"' + redis::sentinel::redis_host: '"%{hiera(\"bootstrap_nodeid_ip\")}"' + redis::sentinel::notification_script: '/usr/local/bin/redis-notifications.sh' + tripleo::loadbalancer::redis_password: {get_param: RedisPassword} diff --git a/puppet/services/glance-api.yaml b/puppet/services/glance-api.yaml index ee4c17c7..420cb4b0 100644 --- a/puppet/services/glance-api.yaml +++ b/puppet/services/glance-api.yaml @@ -92,6 +92,7 @@ outputs: glance::backend::swift::swift_store_auth_address: {get_param: [EndpointMap, KeystoneInternal, uri] } glance::backend::swift::swift_store_user: service:glance glance::backend::swift::swift_store_key: {get_param: GlancePassword} + glance::backend::swift::swift_store_create_container_on_put: true glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName} glance::backend::rbd::rbd_store_user: {get_param: CephClientUserName} glance_backend: {get_param: GlanceBackend} @@ -109,5 +110,10 @@ outputs: dport: - 9292 - 13292 + glance::keystone::auth::tenant: 'service' + glance::api::keystone_tenant: 'service' + glance::api::pipeline: 'keystone' + glance::api::show_image_direct_url: true + step_config: | include ::tripleo::profile::base::glance::api diff --git a/puppet/services/glance-registry.yaml b/puppet/services/glance-registry.yaml index f9d9dd6b..50a0bfc8 100644 --- a/puppet/services/glance-registry.yaml +++ b/puppet/services/glance-registry.yaml @@ -38,6 +38,8 @@ outputs: - {get_param: [EndpointMap, MysqlInternal, host]} - '/glance' glance::registry::keystone_password: {get_param: GlancePassword} + glance::registry::keystone_tenant: 'service' + glance::registry::pipeline: 'keystone' glance::registry::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } glance::registry::identity_uri: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] } glance::registry::debug: {get_param: Debug} diff --git a/puppet/services/gnocchi-api.yaml b/puppet/services/gnocchi-api.yaml index bf23cda1..0c0a1621 100644 --- a/puppet/services/gnocchi-api.yaml +++ b/puppet/services/gnocchi-api.yaml @@ -29,5 +29,6 @@ outputs: dport: - 8041 - 13041 + - gnocchi::api::keystone_tenant: 'service' step_config: | include ::tripleo::profile::base::gnocchi::api diff --git a/puppet/services/gnocchi-base.yaml b/puppet/services/gnocchi-base.yaml index fa00f736..e39cdeaf 100644 --- a/puppet/services/gnocchi-base.yaml +++ b/puppet/services/gnocchi-base.yaml @@ -90,3 +90,5 @@ outputs: gnocchi::db::mysql::allowed_hosts: - '%' - "%{hiera('mysql_bind_host')}" + gnocchi::auth::auth_region: 'regionOne' + gnocchi::auth::auth_tenant_name: 'service' diff --git a/puppet/services/haproxy.yaml b/puppet/services/haproxy.yaml index 1a629c1d..902a1c3f 100644 --- a/puppet/services/haproxy.yaml +++ b/puppet/services/haproxy.yaml @@ -19,5 +19,26 @@ outputs: tripleo.haproxy.firewall_rules: '107 haproxy stats': dport: 1993 + # TODO(emilien) make it composable to find which services are actually running + tripleo::haproxy::keystone_admin: true + tripleo::haproxy::keystone_public: true + tripleo::haproxy::neutron: true + tripleo::haproxy::cinder: true + tripleo::haproxy::glance_api: true + tripleo::haproxy::glance_registry: true + tripleo::haproxy::nova_osapi: true + tripleo::haproxy::nova_metadata: true + tripleo::haproxy::nova_novncproxy: true + tripleo::haproxy::mysql: true + tripleo::haproxy::redis: true + tripleo::haproxy::sahara: true + tripleo::haproxy::swift_proxy_server: true + tripleo::haproxy::ceilometer: true + tripleo::haproxy::aodh: true + tripleo::haproxy::gnocchi: true + tripleo::haproxy::heat_api: true + tripleo::haproxy::heat_cloudwatch: true + tripleo::haproxy::heat_cfn: true + tripleo::haproxy::horizon: true step_config: | include ::tripleo::profile::base::haproxy diff --git a/puppet/services/heat-base.yaml b/puppet/services/heat-base.yaml index 2a7aeabc..72f414f9 100644 --- a/puppet/services/heat-base.yaml +++ b/puppet/services/heat-base.yaml @@ -45,3 +45,14 @@ outputs: context_is_admin: key: 'context_is_admin' value: 'role:admin' + heat::rabbit_heartbeat_timeout_threshold: 60 + heat::keystone_tenant: 'service' + heat::keystone::auth::tenant: 'service' + heat::keystone::domain::domain_name: 'heat_stack' + heat::keystone::domain::domain_admin: 'heat_stack_domain_admin' + heat::keystone::domain::domain_admin_email: 'heat_stack_domain_admin@localhost' + heat::auth_plugin: 'password' + heat::cron::purge_deleted::age: 30 + heat::cron::purge_deleted::age_type: 'days' + heat::cron::purge_deleted::maxdelay: 3600 + heat::cron::purge_deleted::destination: '/dev/null' diff --git a/puppet/services/heat-engine.yaml b/puppet/services/heat-engine.yaml index 2fd01885..bd3409ea 100644 --- a/puppet/services/heat-engine.yaml +++ b/puppet/services/heat-engine.yaml @@ -40,6 +40,8 @@ outputs: map_merge: - get_attr: [HeatBase, role_data, config_settings] - heat::engine::num_engine_workers: {get_param: HeatWorkers} + heat::engine::configure_delegated_roles: false + heat::engine::trusts_delegated_roles: [] tripleo::profile::base::heat::manage_db_purge: {get_param: HeatEnableDBPurge} heat::database_connection: list_join: diff --git a/puppet/services/horizon.yaml b/puppet/services/horizon.yaml index dc7ba8c9..64cf450a 100644 --- a/puppet/services/horizon.yaml +++ b/puppet/services/horizon.yaml @@ -36,5 +36,11 @@ outputs: dport: - 80 - 443 + horizon::cache_backend: django.core.cache.backends.memcached.MemcachedCache + horizon::django_session_engine: 'django.contrib.sessions.backends.cache' + horizon::vhost_extra_params: + add_listen: false + priority: 10 + access_log_format: '%a %l %u %t \"%r\" %>s %b \"%%{}{Referer}i\" \"%%{}{User-Agent}i\"' step_config: | include ::tripleo::profile::base::horizon diff --git a/puppet/services/kernel.yaml b/puppet/services/kernel.yaml index 9e8a53f0..50ebe925 100644 --- a/puppet/services/kernel.yaml +++ b/puppet/services/kernel.yaml @@ -15,5 +15,26 @@ outputs: description: Role data for the Kernel modules value: service_name: kernel + config_settings: + kernel_modules: + nf_conntrack: {} + sysctl_settings: + net.ipv4.tcp_keepalive_intvl: + value: 1 + net.ipv4.tcp_keepalive_probes: + value: 5 + net.ipv4.tcp_keepalive_time: + value: 5 + net.nf_conntrack_max: + value: 500000 + net.netfilter.nf_conntrack_max: + value: 500000 + # prevent neutron bridges from autoconfiguring ipv6 addresses + net.ipv6.conf.default.accept_ra: + value: 0 + net.ipv6.conf.default.autoconf: + value: 0 + net.core.netdev_max_backlog: + value: 10000 step_config: | include ::tripleo::profile::base::kernel diff --git a/puppet/services/keystone.yaml b/puppet/services/keystone.yaml index de920de3..48e74875 100644 --- a/puppet/services/keystone.yaml +++ b/puppet/services/keystone.yaml @@ -132,6 +132,18 @@ outputs: keystone::db::mysql::allowed_hosts: - '%' - "%{hiera('mysql_bind_host')}" + keystone::rabbit_heartbeat_timeout_threshold: 60 + keystone::cron::token_flush::maxdelay: 3600 + keystone::roles::admin::service_tenant: 'service' + keystone::roles::admin::admin_tenant: 'admin' + keystone::cron::token_flush::destination: '/dev/null' + keystone::config::keystone_config: + DEFAULT/secure_proxy_ssl_header: + value: 'HTTP_X_FORWARDED_PROTO' + ec2/driver: + value: 'keystone.contrib.ec2.backends.sql.Ec2' + keystone::service_name: 'httpd' + keystone::wsgi::apache::ssl: false keystone::wsgi::apache::workers: {get_param: KeystoneWorkers} # override via extraconfig: diff --git a/puppet/services/neutron-base.yaml b/puppet/services/neutron-base.yaml index 301759c7..a1757461 100644 --- a/puppet/services/neutron-base.yaml +++ b/puppet/services/neutron-base.yaml @@ -61,4 +61,7 @@ outputs: params: PLUGINS: {get_param: NeutronServicePlugins} neutron::debug: {get_param: Debug} - neutron::host: '"%{::fqdn}"' + neutron::allow_overlapping_ips: true + neutron::rabbit_heartbeat_timeout_threshold: 60 + neutron::host: '"%{::fqdn}"' #NOTE: extra quoting is needed + neutron::keystone::auth::tenant: 'service' diff --git a/puppet/services/neutron-l3.yaml b/puppet/services/neutron-l3.yaml index 0e568614..974a0df8 100644 --- a/puppet/services/neutron-l3.yaml +++ b/puppet/services/neutron-l3.yaml @@ -31,5 +31,6 @@ outputs: map_merge: - get_attr: [NeutronBase, role_data, config_settings] - neutron::agents::l3::external_network_bridge: {get_param: NeutronExternalNetworkBridge} + neutron::agents::l3::router_delete_namespaces: True step_config: | include tripleo::profile::base::neutron::l3 diff --git a/puppet/services/neutron-metadata.yaml b/puppet/services/neutron-metadata.yaml index 04c80e01..abbb760c 100644 --- a/puppet/services/neutron-metadata.yaml +++ b/puppet/services/neutron-metadata.yaml @@ -39,5 +39,6 @@ outputs: neutron::agents::metadata::metadata_workers: {get_param: NeutronWorkers} neutron::agents::metadata::auth_password: {get_param: NeutronPassword} neutron::agents::metadata::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] } + neutron::agents::metadata::auth_tenant: 'service' step_config: | include tripleo::profile::base::neutron::metadata diff --git a/puppet/services/neutron-server.yaml b/puppet/services/neutron-server.yaml index 253a6bfe..6343a5e0 100644 --- a/puppet/services/neutron-server.yaml +++ b/puppet/services/neutron-server.yaml @@ -54,6 +54,7 @@ outputs: - '/ovs_neutron?charset=utf8' neutron::server::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } neutron::server::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} + neutron::server::auth_tenant: 'service' neutron::server::identity_uri: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] } neutron::server::api_workers: {get_param: NeutronWorkers} neutron::server::allow_automatic_l3agent_failover: {get_param: NeutronAllowL3AgentFailover} @@ -65,6 +66,8 @@ outputs: neutron::server::notifications::tenant_name: 'service' neutron::server::notifications::project_name: 'service' neutron::server::notifications::password: {get_param: NovaPassword} + neutron::server::project_name: 'service' + neutron::server::sync_db: true neutron::db::mysql::password: {get_param: NeutronPassword} neutron::db::mysql::user: neutron neutron::db::mysql::host: {get_param: [EndpointMap, MysqlNoBracketsInternal, host]} diff --git a/puppet/services/nova-api.yaml b/puppet/services/nova-api.yaml index 0dd8fd51..a64c01f8 100644 --- a/puppet/services/nova-api.yaml +++ b/puppet/services/nova-api.yaml @@ -42,5 +42,10 @@ outputs: - 8774 - 13774 - 8775 + nova::api::admin_tenant_name: 'service' + nova::api::enabled: true + nova::api::default_floating_pool: 'public' + nova::api::sync_db_api: true + nova::api::enable_proxy_headers_parsing: true step_config: | include tripleo::profile::base::nova::api diff --git a/puppet/services/nova-base.yaml b/puppet/services/nova-base.yaml index c94e0246..e7f3edac 100644 --- a/puppet/services/nova-base.yaml +++ b/puppet/services/nova-base.yaml @@ -79,4 +79,25 @@ outputs: - '%' - "%{hiera('mysql_bind_host')}" nova::debug: {get_param: Debug} - nova::host: '"%{::fqdn}"' + nova::network::neutron::neutron_project_name: 'service' + nova::network::neutron::neutron_username: 'neutron' + nova::network::neutron::dhcp_domain: '' + nova::rabbit_heartbeat_timeout_threshold: 60 + nova::cinder_catalog_info: 'volumev2:cinderv2:internalURL' + nova::host: '"%{::fqdn}"' # NOTE: extra quoting is needed. + nova::notify_on_state_change: 'vm_and_task_state' + nova::notification_driver: messagingv2 + nova::network::neutron::neutron_auth_type: 'v3password' + nova::keystone::auth::tenant: 'service' + nova::db::mysql::user: nova + nova::db::mysql::host: {get_param: [EndpointMap, MysqlNoBracketsInternal, host]} + nova::db::mysql::dbname: nova + nova::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" + nova::db::mysql_api::user: nova_api + nova::db::mysql_api::host: {get_param: [EndpointMap, MysqlNoBracketsInternal, host]} + nova::db::mysql_api::dbname: nova_api + nova::db::mysql_api::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" diff --git a/puppet/services/nova-compute.yaml b/puppet/services/nova-compute.yaml index a7226bd0..89a22468 100644 --- a/puppet/services/nova-compute.yaml +++ b/puppet/services/nova-compute.yaml @@ -31,7 +31,7 @@ outputs: map_merge: - get_attr: [NovaBase, role_data, config_settings] - nova::compute::libvirt::manage_libvirt_services: false - # we manage migration in nova common puppet profile + # we manage migration in nova common puppet profile nova::compute::libvirt::migration_support: false tripleo::profile::base::nova::manage_migration: true tripleo::profile::base::nova::nova_compute_enabled: true @@ -42,6 +42,14 @@ outputs: - '.' - - 'client' - {get_param: CephClientUserName} + nova::compute::rbd::libvirt_rbd_secret_uuid: '"%{hiera(\"ceph::profile::params::fsid\")}"' + nova::compute::instance_usage_audit: true + nova::compute::instance_usage_audit_period: 'hour' + # Changing the default from 512MB. The current templates can not deploy + # overclouds with swap. On an idle compute node, we see ~1024MB of RAM + # used. 2048 is suggested to account for other possible operations for + # example openvswitch. + nova::compute::reserved_host_memory: 2048 step_config: | # TODO(emilien): figure how to deal with libvirt profile. # We'll probably threat it like we do with Neutron plugins. diff --git a/puppet/services/nova-vncproxy.yaml b/puppet/services/nova-vncproxy.yaml index d8e04cc8..3e2a3d88 100644 --- a/puppet/services/nova-vncproxy.yaml +++ b/puppet/services/nova-vncproxy.yaml @@ -22,6 +22,8 @@ outputs: value: service_name: nova-vncproxy config_settings: - get_attr: [NovaBase, role_data, config_settings] + map_merge: + - get_attr: [NovaBase, role_data, config_settings] + - nova::vncproxy::enabled: true step_config: | include tripleo::profile::base::nova::vncproxy diff --git a/puppet/services/pacemaker.yaml b/puppet/services/pacemaker.yaml index 9520cb9c..b0ebb7d4 100644 --- a/puppet/services/pacemaker.yaml +++ b/puppet/services/pacemaker.yaml @@ -16,6 +16,11 @@ outputs: value: service_name: pacemaker config_settings: + pacemaker::corosync::cluster_name: 'tripleo_cluster' + pacemaker::corosync::manage_fw: false + pacemaker::resource_defaults::defaults: + resource-stickiness: { value: INFINITY } + corosync_token_timeout: 10000 tripleo.pacemaker.firewall_rules: '130 pacemaker tcp': proto: 'tcp' diff --git a/puppet/services/pacemaker/glance-api.yaml b/puppet/services/pacemaker/glance-api.yaml index bc611b0a..1d8809ad 100644 --- a/puppet/services/pacemaker/glance-api.yaml +++ b/puppet/services/pacemaker/glance-api.yaml @@ -53,6 +53,7 @@ outputs: glance_file_pcmk_fstype: {get_param: GlanceFilePcmkFstype} glance_file_pcmk_manage: {get_param: GlanceFilePcmkManage} glance_file_pcmk_options: {get_param: GlanceFilePcmkOptions} + glance_file_pcmk_directory: '/var/lib/glance/images' glance::api::manage_service: false glance::api::enabled: false step_config: | diff --git a/puppet/services/rabbitmq.yaml b/puppet/services/rabbitmq.yaml index 3c5909ca..4c02f359 100644 --- a/puppet/services/rabbitmq.yaml +++ b/puppet/services/rabbitmq.yaml @@ -42,5 +42,20 @@ outputs: - 4369 - 5672 - 35672 + rabbitmq::delete_guest_user: false + rabbitmq::wipe_db_on_cookie_change: true + rabbitmq::port: '5672' + rabbitmq::package_source: undef + rabbitmq::repos_ensure: false + rabbitmq_environment: + RABBITMQ_NODENAME: "rabbit@%{::hostname}" + RABBITMQ_SERVER_ERL_ARGS: '"+K true +A30 +P 1048576 -kernel inet_default_connect_options [{nodelay,true},{raw,6,18,<<5000:64/native>>}] -kernel inet_default_listen_options [{raw,6,18,<<5000:64/native>>}]"' + rabbitmq_kernel_variables: + inet_dist_listen_min: '35672' + inet_dist_listen_max: '35672' + rabbitmq_config_variables: + tcp_listen_options: '[binary, {packet, raw}, {reuseaddr, true}, {backlog, 128}, {nodelay, true}, {exit_on_close, false}, {keepalive, true}]' + cluster_partition_handling: 'pause_minority' + loopback_users: '[]' step_config: | include ::tripleo::profile::base::rabbitmq diff --git a/puppet/services/sahara-base.yaml b/puppet/services/sahara-base.yaml index 72fc33a3..c131f1c3 100644 --- a/puppet/services/sahara-base.yaml +++ b/puppet/services/sahara-base.yaml @@ -47,3 +47,5 @@ outputs: - spark - storm sahara::rpc_backend: rabbit + sahara::admin_tenant_name: 'service' + sahara::keystone::auth::tenant: 'service' diff --git a/puppet/services/sahara-engine.yaml b/puppet/services/sahara-engine.yaml index b799e27c..7574b7dc 100644 --- a/puppet/services/sahara-engine.yaml +++ b/puppet/services/sahara-engine.yaml @@ -30,11 +30,11 @@ outputs: - sahara_dsn: &sahara_dsn list_join: - '' - - - {get_param: [EndpointMap, MysqlVirtual, protocol]} + - - {get_param: [EndpointMap, MysqlInternal, protocol]} - '://sahara:' - {get_param: SaharaPassword} - '@' - - {get_param: [EndpointMap, MysqlVirtual, host]} + - {get_param: [EndpointMap, MysqlInternal, host]} - '/sahara' sahara::database_connection: *sahara_dsn sahara::db::mysql::password: {get_param: SaharaPassword} diff --git a/puppet/services/swift-proxy.yaml b/puppet/services/swift-proxy.yaml index 12165cc1..5d172709 100644 --- a/puppet/services/swift-proxy.yaml +++ b/puppet/services/swift-proxy.yaml @@ -41,6 +41,7 @@ outputs: swift::proxy::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]} swift::proxy::authtoken::identity_uri: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} swift::proxy::authtoken::admin_password: {get_param: SwiftPassword} + swift::proxy::authtoken::admin_tenant_name: 'service' swift::proxy::node_timeout: {get_param: SwiftProxyNodeTimeout} swift::proxy::workers: {get_param: SwiftWorkers} swift::keystone::auth::public_url: {get_param: [EndpointMap, SwiftPublic, uri]} @@ -56,5 +57,30 @@ outputs: dport: - 8080 - 13808 + swift::keystone::auth::tenant: 'service' + swift::keystone::auth::configure_s3_endpoint: false + swift::keystone::auth::operator_roles: + - admin + - swiftoperator + - ResellerAdmin + swift::proxy::keystone::operator_roles: + - admin + - swiftoperator + - ResellerAdmin + swift::proxy::pipeline: + - 'catch_errors' + - 'healthcheck' + - 'proxy-logging' + - 'cache' + - 'ratelimit' + - 'bulk' + - 'tempurl' + - 'formpost' + - 'authtoken' + - 'keystone' + - 'staticweb' + - 'proxy-logging' + - 'proxy-server' + swift::proxy::account_autocreate: true step_config: | include ::tripleo::profile::base::swift::proxy diff --git a/puppet/services/swift-storage.yaml b/puppet/services/swift-storage.yaml index d63dc87c..0c610392 100644 --- a/puppet/services/swift-storage.yaml +++ b/puppet/services/swift-storage.yaml @@ -48,5 +48,17 @@ outputs: - 6000 - 6001 - 6002 + swift::storage::all::incoming_chmod: 'Du=rwx,g=rx,o=rx,Fu=rw,g=r,o=r' + swift::storage::all::outgoing_chmod: 'Du=rwx,g=rx,o=rx,Fu=rw,g=r,o=r' + swift::storage::all::object_pipeline: + - healthcheck + - recon + - object-server + swift::storage::all::container_pipeline: + - healthcheck + - container-server + swift::storage::all::account_pipeline: + - healthcheck + - account-server step_config: | include ::tripleo::profile::base::swift::storage -- cgit 1.2.3-korg