aboutsummaryrefslogtreecommitdiffstats
path: root/spec/fixtures/hieradata/default.yaml
blob: 116444809e32897013027551720b7b4a744f05b3 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
---
my_hash:
  network: '127.0.0.1'
not_hash: string
# aodh profile required hieradata
aodh::rabbit_password: 'password'
aodh_redis_password: 'password'
redis_vip: '127.0.0.1'
redis::bind: '10.0.0.1'
aodh::auth::auth_password: 'password'
aodh::db::mysql::password: 'password'
aodh::keystone::authtoken::password: 'password'
# babican profile required hieradata
barbican::api::rabbit_password: 'password'
barbican::db::mysql::password: 'password'
barbican::keystone::authtoken::password: 'password'
# ceilometer related items
ceilometer::rabbit_password: 'password'
ceilometer::keystone::authtoken::password: 'password'
# ceph related items
ceph::profile::params::mon_key: 'password'
# NOTE(gfidente): we want to use keystone v3 API for RGW so the following are
# needed to comply with the if condition:
# https://github.com/openstack/puppet-ceph/blob/master/manifests/rgw/keystone.pp#L111
ceph::profile::params::rgw_keystone_admin_domain: 'keystone_domain'
ceph::profile::params::rgw_keystone_admin_project: 'keystone_project'
ceph::profile::params::rgw_keystone_admin_user: 'keystone_admin_user'
ceph::profile::params::rgw_keystone_admin_password: 'keystone_admin_password'
# cinder related items
cinder::rabbit_password: 'password'
cinder::keystone::authtoken::password: 'password'
# gnocchi related items
gnocchi::keystone::authtoken::password: 'password'
gnocchi::storage::ceph::ceph_username: 'gnocchi'
gnocchi::storage::ceph::ceph_secret: 'password'
# haproxy related items
mysql_enabled: true
controller_node_ips: '10.1.0.1,10.1.0.2'
# nova related items
nova::rabbit_password: 'password'
nova::keystone::authtoken::password: 'password'
nova::network::neutron::neutron_password: 'password'
# memcache related items
memcached_node_ips_v6:
 - '::1'
memcached_node_ips:
 - '127.0.0.1'
# octavia related items
octavia::rabbit_password: 'password'
horizon::secret_key: 'secrete'
#Neutron related
neutron::rabbit_password: 'password'
lar l-Scalar-Plain">xenial cluster_vip_address: ${_param:infra_kvm_address} cluster_node01_address: ${_param:infra_kvm_node01_address} cluster_node02_address: ${_param:infra_kvm_node02_address} cluster_node03_address: ${_param:infra_kvm_node03_address} keepalived_vip_interface: br-ctl keepalived_vip_virtual_router_id: 69 deploy_nic: enp6s0 salt: control: size: # RAM 4096,8192,16384,32768,65536 # Default production sizing openstack.control: cpu: 4 ram: 12288 disk_profile: small net_profile: default openstack.database: cpu: 4 ram: 6144 disk_profile: large net_profile: default openstack.message_queue: cpu: 4 ram: 2048 disk_profile: small net_profile: default openstack.telemetry: cpu: 2 ram: 3072 disk_profile: xxlarge net_profile: default openstack.proxy: cpu: 2 ram: 2048 disk_profile: small net_profile: default # stacklight.log: # cpu: 2 # ram: 4096 # disk_profile: xxlarge # net_profile: default # stacklight.server: # cpu: 2 # ram: 4096 # disk_profile: small # net_profile: default # stacklight.telemetry: # cpu: 2 # ram: 4096 # disk_profile: xxlarge # net_profile: default cluster: internal: node: mdb01: image: ${_param:salt_control_xenial_image} mdb02: image: ${_param:salt_control_xenial_image} mdb03: image: ${_param:salt_control_xenial_image} ctl01: image: ${_param:salt_control_xenial_image} ctl02: image: ${_param:salt_control_xenial_image} ctl03: image: ${_param:salt_control_xenial_image} dbs01: image: ${_param:salt_control_xenial_image} dbs02: image: ${_param:salt_control_xenial_image} dbs03: image: ${_param:salt_control_xenial_image} msg01: image: ${_param:salt_control_xenial_image} msg02: image: ${_param:salt_control_xenial_image} msg03: image: ${_param:salt_control_xenial_image} prx01: image: ${_param:salt_control_xenial_image} prx02: image: ${_param:salt_control_xenial_image} virt: nic: default: eth1: bridge: br-mgmt model: virtio eth0: bridge: br-ctl model: virtio glusterfs: server: volumes: nova_instances: storage: /srv/glusterfs/nova_instances replica: 3 bricks: - ${_param:cluster_node01_address}:/srv/glusterfs/nova_instances - ${_param:cluster_node02_address}:/srv/glusterfs/nova_instances - ${_param:cluster_node03_address}:/srv/glusterfs/nova_instances options: cluster.readdir-optimize: 'On' nfs.disable: 'On' network.remote-dio: 'On' diagnostics.client-log-level: WARNING diagnostics.brick-log-level: WARNING linux: network: interface: eth3: enabled: true type: eth proto: manual address: 0.0.0.0 netmask: 255.255.255.0 name: ${_param:deploy_nic} noifupdown: true br-mgmt: enabled: true proto: dhcp type: bridge name_servers: - 8.8.8.8 - 8.8.4.4 use_interfaces: - ${_param:deploy_nic} noifupdown: true vlan300: enabled: true proto: manual type: vlan name: ${_param:deploy_nic}.300 use_interfaces: - ${_param:deploy_nic} br-ctl: enabled: true type: bridge proto: static address: ${_param:single_address} netmask: 255.255.255.0 use_interfaces: - ${_param:deploy_nic}.300