diff options
Diffstat (limited to 'puppet/hieradata')
-rw-r--r-- | puppet/hieradata/ceph.yaml | 1 | ||||
-rw-r--r-- | puppet/hieradata/common.yaml | 29 | ||||
-rw-r--r-- | puppet/hieradata/compute.yaml | 10 | ||||
-rw-r--r-- | puppet/hieradata/controller.yaml | 174 | ||||
-rw-r--r-- | puppet/hieradata/database.yaml | 31 | ||||
-rw-r--r-- | puppet/hieradata/volume.yaml | 2 |
6 files changed, 187 insertions, 60 deletions
diff --git a/puppet/hieradata/ceph.yaml b/puppet/hieradata/ceph.yaml index 1e480e60..b29b91cf 100644 --- a/puppet/hieradata/ceph.yaml +++ b/puppet/hieradata/ceph.yaml @@ -1,4 +1,3 @@ -ceph::profile::params::osd_journal_size: 1024 ceph::profile::params::osd_pool_default_pg_num: 32 ceph::profile::params::osd_pool_default_pgp_num: 32 ceph::profile::params::osd_pool_default_size: 3 diff --git a/puppet/hieradata/common.yaml b/puppet/hieradata/common.yaml index b4b51abf..65cf9577 100644 --- a/puppet/hieradata/common.yaml +++ b/puppet/hieradata/common.yaml @@ -3,16 +3,24 @@ ssh::server::storeconfigs_enabled: false # ceilometer settings used by compute and controller ceilo auth settings ceilometer::agent::auth::auth_region: 'regionOne' -# FIXME: Might be better to use 'service' tenant here but this requires -# changes in the tripleo-incubator keystone role setup -ceilometer::agent::auth::auth_tenant_name: 'admin' +ceilometer::agent::auth::auth_tenant_name: 'service' + +aodh::auth::auth_region: 'regionOne' +aodh::auth::auth_tenant_name: 'service' + +gnocchi::auth::auth_region: 'regionOne' +gnocchi::auth::auth_tenant_name: 'service' nova::api::admin_tenant_name: 'service' -nova::network::neutron::neutron_admin_tenant_name: 'service' -nova::network::neutron::neutron_admin_username: 'neutron' +nova::network::neutron::neutron_project_name: 'service' +nova::network::neutron::neutron_username: 'neutron' nova::network::neutron::dhcp_domain: '' neutron::allow_overlapping_ips: true +neutron::server::project_name: 'service' + +kernel_modules: + nf_conntrack: {} sysctl_settings: net.ipv4.tcp_keepalive_intvl: @@ -21,6 +29,17 @@ sysctl_settings: value: 5 net.ipv4.tcp_keepalive_time: value: 5 + net.nf_conntrack_max: + value: 500000 + net.netfilter.nf_conntrack_max: + value: 500000 + # prevent neutron bridges from autoconfiguring ipv6 addresses + net.ipv6.conf.default.accept_ra: + value: 0 + net.ipv6.conf.default.autoconf: + value: 0 + net.core.netdev_max_backlog: + value: 10000 nova::rabbit_heartbeat_timeout_threshold: 60 neutron::rabbit_heartbeat_timeout_threshold: 60 diff --git a/puppet/hieradata/compute.yaml b/puppet/hieradata/compute.yaml index fa8dcc81..2d928cbf 100644 --- a/puppet/hieradata/compute.yaml +++ b/puppet/hieradata/compute.yaml @@ -1,17 +1,17 @@ # Hiera data here applies to all compute nodes +nova::host: "%{::fqdn}" nova::notify_on_state_change: 'vm_and_task_state' - -nova::compute::enabled: true +nova::notification_driver: messagingv2 nova::compute::instance_usage_audit: true nova::compute::instance_usage_audit_period: 'hour' -nova::compute::vnc_enabled: true -nova::compute::libvirt::vncserver_listen: '0.0.0.0' nova::compute::libvirt::migration_support: true nova::compute::rbd::libvirt_rbd_secret_uuid: "%{hiera('ceph::profile::params::fsid')}" +nova::network::neutron::neutron_auth_type: 'v3password' + # Changing the default from 512MB. The current templates can not deploy # overclouds with swap. On an idle compute node, we see ~1024MB of RAM # used. 2048 is suggested to account for other possible operations for @@ -21,4 +21,6 @@ nova::compute::reserved_host_memory: 2048 ceilometer::agent::auth::auth_tenant_name: 'service' ceilometer::agent::auth::auth_endpoint_type: 'internalURL' +neutron::host: "%{::fqdn}" + compute_classes: [] diff --git a/puppet/hieradata/controller.yaml b/puppet/hieradata/controller.yaml index 229f9a65..7db2b5de 100644 --- a/puppet/hieradata/controller.yaml +++ b/puppet/hieradata/controller.yaml @@ -1,9 +1,17 @@ # Hiera data here applies to all controller nodes + nova::api::enabled: true -nova::conductor::enabled: true -nova::consoleauth::enabled: true nova::vncproxy::enabled: true -nova::scheduler::enabled: true + +# gnocchi +gnocchi::db::sync::extra_opts: '--skip-storage' +gnocchi::storage::swift::swift_user: 'service:gnocchi' +gnocchi::storage::swift::swift_auth_version: 2 +gnocchi::statsd::resource_id: '0a8b55df-f90f-491c-8cb9-7cdecec6fc26' +gnocchi::statsd::user_id: '27c0d3f8-e7ee-42f0-8317-72237d1c5ae3' +gnocchi::statsd::project_id: '6c38cd8d-099a-4cb2-aecf-17be688e8616' +gnocchi::statsd::flush_delay: 10 +gnocchi::statsd::archive_policy_name: 'low' # rabbitmq rabbitmq::delete_guest_user: false @@ -20,6 +28,7 @@ rabbitmq_kernel_variables: rabbitmq_config_variables: tcp_listen_options: '[binary, {packet, raw}, {reuseaddr, true}, {backlog, 128}, {nodelay, true}, {exit_on_close, false}, {keepalive, true}]' cluster_partition_handling: 'pause_minority' + loopback_users: '[]' mongodb::server::replset: tripleo mongodb::server::journal: false @@ -29,21 +38,45 @@ redis::sentinel::master_name: "%{hiera('bootstrap_nodeid')}" redis::sentinel::redis_host: "%{hiera('bootstrap_nodeid_ip')}" redis::sentinel::notification_script: '/usr/local/bin/redis-notifications.sh' +# keystone +keystone::roles::admin::email: 'root@localhost' + # service tenant glance::api::keystone_tenant: 'service' +aodh::api::keystone_tenant: 'service' glance::registry::keystone_tenant: 'service' neutron::server::auth_tenant: 'service' neutron::agents::metadata::auth_tenant: 'service' +neutron::agents::l3::router_delete_namespaces: True cinder::api::keystone_tenant: 'service' swift::proxy::authtoken::admin_tenant_name: 'service' ceilometer::api::keystone_tenant: 'service' +gnocchi::api::keystone_tenant: 'service' heat::keystone_tenant: 'service' +sahara::admin_tenant_name: 'service' +aodh::keystone::auth::tenant: 'service' +ceilometer::keystone::auth::tenant: 'service' +cinder::keystone::auth::tenant: 'service' +glance::keystone::auth::tenant: 'service' +gnocchi::keystone::auth::tenant: 'service' +heat::keystone::auth::tenant: 'service' +neutron::keystone::auth::tenant: 'service' +nova::keystone::auth::tenant: 'service' +sahara::keystone::auth::tenant: 'service' +swift::keystone::auth::tenant: 'service' # keystone keystone::cron::token_flush::maxdelay: 3600 keystone::roles::admin::service_tenant: 'service' keystone::roles::admin::admin_tenant: 'admin' keystone::cron::token_flush::destination: '/dev/null' +keystone::config::keystone_config: + DEFAULT/secure_proxy_ssl_header: + value: 'HTTP_X_FORWARDED_PROTO' + ec2/driver: + value: 'keystone.contrib.ec2.backends.sql.Ec2' +keystone::service_name: 'httpd' +keystone::wsgi::apache::ssl: false #swift swift::proxy::pipeline: @@ -60,26 +93,29 @@ swift::proxy::pipeline: - 'proxy-server' swift::proxy::account_autocreate: true +swift::keystone::auth::configure_s3_endpoint: false +swift::keystone::auth::operator_roles: + - admin + - swiftoperator # glance glance::api::pipeline: 'keystone' glance::api::show_image_direct_url: true glance::registry::pipeline: 'keystone' glance::backend::swift::swift_store_create_container_on_put: true -glance::backend::rbd::rbd_store_user: 'openstack' glance_file_pcmk_directory: '/var/lib/glance/images' # neutron neutron::server::sync_db: true -neutron::agents::dhcp::dnsmasq_config_file: /etc/neutron/dnsmasq-neutron.conf # nova nova::notify_on_state_change: 'vm_and_task_state' nova::api::default_floating_pool: 'public' -nova::api::osapi_v3: true -nova::scheduler::filter::ram_allocation_ratio: '1.0' +nova::api::sync_db_api: true +nova::api::enable_proxy_headers_parsing: true nova::cron::archive_deleted_rows::hour: '*/12' nova::cron::archive_deleted_rows::destination: '/dev/null' +nova::notification_driver: messaging # ceilometer ceilometer::agent::auth::auth_endpoint_type: 'internalURL' @@ -87,17 +123,31 @@ ceilometer::agent::auth::auth_endpoint_type: 'internalURL' # cinder cinder::scheduler::scheduler_driver: cinder.scheduler.filter_scheduler.FilterScheduler cinder::cron::db_purge::destination: '/dev/null' +cinder::host: hostgroup + +# TODO(jaosorior): Move to cinder profile once cinder is moved as a composable +# service. +cinder::api::enable_proxy_headers_parsing: true # heat heat::engine::configure_delegated_roles: false heat::engine::trusts_delegated_roles: [] heat::instance_user: '' +heat::cron::purge_deleted::age: 30 +heat::cron::purge_deleted::age_type: 'days' +heat::cron::purge_deleted::maxdelay: 3600 +heat::cron::purge_deleted::destination: '/dev/null' +heat::keystone::domain::domain_name: 'heat_stack' +heat::keystone::domain::domain_admin: 'heat_stack_domain_admin' +heat::keystone::domain::domain_admin_email: 'heat_stack_domain_admin@localhost' +heat::auth_plugin: 'password' # pacemaker pacemaker::corosync::cluster_name: 'tripleo_cluster' pacemaker::corosync::manage_fw: false pacemaker::resource_defaults::defaults: resource-stickiness: { value: INFINITY } +corosync_token_timeout: 10000 # horizon horizon::cache_backend: django.core.cache.backends.memcached.MemcachedCache @@ -105,41 +155,44 @@ horizon::django_session_engine: 'django.contrib.sessions.backends.cache' horizon::vhost_extra_params: add_listen: false priority: 10 + access_log_format: '%a %l %u %t \"%r\" %>s %b \"%%{}{Referer}i\" \"%%{}{User-Agent}i\"' # mysql mysql::server::manage_config_file: true -tripleo::loadbalancer::keystone_admin: true -tripleo::loadbalancer::keystone_public: true -tripleo::loadbalancer::neutron: true -tripleo::loadbalancer::cinder: true -tripleo::loadbalancer::glance_api: true -tripleo::loadbalancer::glance_registry: true -tripleo::loadbalancer::nova_ec2: true -tripleo::loadbalancer::nova_osapi: true -tripleo::loadbalancer::nova_metadata: true -tripleo::loadbalancer::nova_novncproxy: true -tripleo::loadbalancer::mysql: true -tripleo::loadbalancer::redis: true -tripleo::loadbalancer::swift_proxy_server: true -tripleo::loadbalancer::ceilometer: true -tripleo::loadbalancer::heat_api: true -tripleo::loadbalancer::heat_cloudwatch: true -tripleo::loadbalancer::heat_cfn: true -tripleo::loadbalancer::horizon: true +tripleo::haproxy::keystone_admin: true +tripleo::haproxy::keystone_public: true +tripleo::haproxy::neutron: true +tripleo::haproxy::cinder: true +tripleo::haproxy::glance_api: true +tripleo::haproxy::glance_registry: true +tripleo::haproxy::nova_osapi: true +tripleo::haproxy::nova_metadata: true +tripleo::haproxy::nova_novncproxy: true +tripleo::haproxy::mysql: true +tripleo::haproxy::redis: true +tripleo::haproxy::sahara: true +tripleo::haproxy::swift_proxy_server: true +tripleo::haproxy::ceilometer: true +tripleo::haproxy::aodh: true +tripleo::haproxy::gnocchi: true +tripleo::haproxy::heat_api: true +tripleo::haproxy::heat_cloudwatch: true +tripleo::haproxy::heat_cfn: true +tripleo::haproxy::horizon: true controller_classes: [] # firewall tripleo::firewall::firewall_rules: '101 mongodb_config': - port: 27019 + dport: 27019 '102 mongodb_sharding': - port: 27018 + dport: 27018 '103 mongod': - port: 27017 + dport: 27017 '104 mysql galera': - port: + dport: - 873 - 3306 - 4444 @@ -147,37 +200,37 @@ tripleo::firewall::firewall_rules: - 4568 - 9200 '105 ntp': - port: 123 + dport: 123 proto: udp '106 vrrp': proto: vrrp '107 haproxy stats': - port: 1993 + dport: 1993 '108 redis': - port: + dport: - 6379 - 26379 '109 rabbitmq': - port: + dport: - 5672 - 35672 '110 ceph': - port: + dport: - 6789 - '6800-6810' '111 keystone': - port: + dport: - 5000 - 13000 - 35357 - 13357 '112 glance': - port: + dport: - 9292 - 9191 - 13292 '113 nova': - port: + dport: - 6080 - 13080 - 8773 @@ -186,43 +239,43 @@ tripleo::firewall::firewall_rules: - 13774 - 8775 '114 neutron server': - port: + dport: - 9696 - 13696 '115 neutron dhcp input': proto: 'udp' - port: 67 + dport: 67 '116 neutron dhcp output': proto: 'udp' chain: 'OUTPUT' - port: 68 + dport: 68 '118 neutron vxlan networks': proto: 'udp' - port: 4789 + dport: 4789 '119 cinder': - port: + dport: - 8776 - 13776 '120 iscsi initiator': - port: 3260 + dport: 3260 '121 memcached': - port: 11211 + dport: 11211 '122 swift proxy': - port: + dport: - 8080 - 13808 '123 swift storage': - port: + dport: - 873 - 6000 - 6001 - 6002 '124 ceilometer': - port: + dport: - 8777 - 13777 '125 heat': - port: + dport: - 8000 - 13800 - 8003 @@ -230,9 +283,30 @@ tripleo::firewall::firewall_rules: - 8004 - 13004 '126 horizon': - port: + dport: - 80 - 443 '127 snmp': - port: 161 + dport: 161 + proto: 'udp' + '128 aodh': + dport: + - 8042 + - 13042 + '129 gnocchi-api': + dport: + - 8041 + - 13041 + '130 pacemaker tcp': + proto: 'tcp' + dport: + - 2224 + - 3121 + - 21064 + '131 pacemaker udp': proto: 'udp' + dport: 5405 + '132 sahara': + dport: + - 8386 + - 13386 diff --git a/puppet/hieradata/database.yaml b/puppet/hieradata/database.yaml index 7e925d90..f2c95de6 100644 --- a/puppet/hieradata/database.yaml +++ b/puppet/hieradata/database.yaml @@ -6,6 +6,13 @@ nova::db::mysql::allowed_hosts: - '%' - "%{hiera('mysql_bind_host')}" +nova::db::mysql_api::user: nova_api +nova::db::mysql_api::host: "%{hiera('mysql_virtual_ip')}" +nova::db::mysql_api::dbname: nova_api +nova::db::mysql_api::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" + # Glance glance::db::mysql::user: glance glance::db::mysql::host: "%{hiera('mysql_virtual_ip')}" @@ -53,3 +60,27 @@ ceilometer::db::mysql::dbname: ceilometer ceilometer::db::mysql::allowed_hosts: - '%' - "%{hiera('mysql_bind_host')}" + +# Gnocchi +gnocchi::db::mysql::user: gnocchi +gnocchi::db::mysql::host: "%{hiera('mysql_virtual_ip')}" +gnocchi::db::mysql::dbname: gnocchi +gnocchi::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" + +# Aodh +aodh::db::mysql::user: aodh +aodh::db::mysql::host: "%{hiera('mysql_virtual_ip')}" +aodh::db::mysql::dbname: aodh +aodh::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" + + +sahara::db::mysql::user: sahara +sahara::db::mysql::host: "%{hiera('mysql_virtual_ip')}" +sahara::db::mysql::dbname: sahara +sahara::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" diff --git a/puppet/hieradata/volume.yaml b/puppet/hieradata/volume.yaml index f4cd78a9..8640c0a7 100644 --- a/puppet/hieradata/volume.yaml +++ b/puppet/hieradata/volume.yaml @@ -9,4 +9,6 @@ cinder::config::cinder_config: DEFAULT/swift_catalog_info: value: 'object-store:swift:internalURL' +cinder_user_enabled_backends: [] + volume_classes: []
\ No newline at end of file |