aboutsummaryrefslogtreecommitdiffstats
path: root/scenarios/os-odl-sfc/role/os-odl-sfc/files
diff options
context:
space:
mode:
Diffstat (limited to 'scenarios/os-odl-sfc/role/os-odl-sfc/files')
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/ansible-role-requirements.yml57
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/ha/user_sfc_scenarios_variables.yml (renamed from scenarios/os-odl-sfc/role/os-odl-sfc/files/ha/user_variables.yml)0
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/mini/user_sfc_scenarios_variables.yml (renamed from scenarios/os-odl-sfc/role/os-odl-sfc/files/mini/user_variables.yml)0
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/noha/user_sfc_scenarios_variables.yml (renamed from scenarios/os-odl-sfc/role/os-odl-sfc/files/noha/user_variables.yml)2
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/all.yml487
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/ha/openstack_user_config.yml263
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/haproxy_config.yml271
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/mini/openstack_user_config.yml175
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/noha/openstack_user_config.yml177
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/openstack_services.yml238
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/os-tacker-install.yml63
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/setup-openstack.yml25
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/tacker.yml36
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/tacker_all.yml36
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/user_secrets.yml163
15 files changed, 1965 insertions, 28 deletions
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/ansible-role-requirements.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/ansible-role-requirements.yml
index 61848f82..66c37c48 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/ansible-role-requirements.yml
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/files/ansible-role-requirements.yml
@@ -1,8 +1,7 @@
----
- name: ansible-hardening
scm: git
src: https://git.openstack.org/openstack/ansible-hardening
- version: f9299c59730b0c5332e80ec88c0296ccc5c207f4
+ version: 957c0bc39680118862a8aab66a189041827590b5
- name: apt_package_pinning
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-apt_package_pinning
@@ -10,47 +9,47 @@
- name: pip_install
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-pip_install
- version: 2bc7e27c2b36e8cd0265053b446ee5f5cc4f4f1d
+ version: 767b923ba94df9ce067d7d782c1ea40bf423d764
- name: galera_client
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-galera_client
- version: 1d3cdcd33c75a668ac3be046ac53fe1842780058
+ version: abf0fe82fdfec45d3680fbf4b13c9ba26d685e0f
- name: galera_server
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-galera_server
- version: c6d36689655586971fbd4be8a6cba2f54b5f44b3
+ version: 21aed2dde52c29cb550707658bb9a2260d076f61
- name: ceph_client
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-ceph_client
- version: 029abd28ded1e52300cfd3093060e1fc5e9574fd
+ version: 3cb26707ace29be5fc1be631faf26e060cfb6ea5
- name: haproxy_server
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-haproxy_server
- version: 4874ef7e218e0bc29f5ccca3c16f704a85f0c0b0
+ version: 749d22fc362f2453088bccc5e4975c285643d410
- name: keepalived
scm: git
src: https://github.com/evrardjp/ansible-keepalived
- version: 3.0.1
+ version: 3.0.2
- name: lxc_container_create
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-lxc_container_create
- version: 05ed7dfa57b7ead13f63edb72918241e8ef1916e
+ version: 258dad41ced7f9511d4e388470a759f46d9509fa
- name: lxc_hosts
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-lxc_hosts
- version: d22208afede2fdf1b070f3236016ea3f1e3fa49c
+ version: fc561e5acef6d6aef40fff9c194252824981890e
- name: memcached_server
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-memcached_server
- version: 5363432f58334823f7e6c6c88617bb908ca48359
+ version: 53e845e54ebc3cabf71e6612eaef199325b1721b
- name: openstack_hosts
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-openstack_hosts
- version: 8a9dd8cf281f5d1199fc3cbaa635dde91b1037a0
+ version: b3d55e49ff80eba6616b19ab6d99a2fc85aad85d
- name: os_keystone
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_keystone
- version: 4ed06e106a83160d76b56edda09c7ced1caf104a
+ version: dfb6fa3d84455f2e17b58db20e4bcf67efc014c9
- name: openstack_openrc
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-openstack_openrc
@@ -70,7 +69,7 @@
- name: os_cinder
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_cinder
- version: 3e8a5bb467fbef16cc2ea3ac5217b4dd0a0e0a8c
+ version: efda0771083f23c7869e6a52585e8382f570a800
- name: os_glance
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_glance
@@ -82,11 +81,11 @@
- name: os_heat
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_heat
- version: 300f15ee65d2e157a0304caa10fcac279fbd5f18
+ version: 87582ad58070ffb964f982749f71253e0f9148c4
- name: os_horizon
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_horizon
- version: e2e28afd60f92238be9d42abaa08260ade5c60eb
+ version: e4ff13c56a38707e08cfc00b6eb377b2469fb750
- name: os_ironic
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_ironic
@@ -102,11 +101,11 @@
- name: os_neutron
scm: git
src: https://github.com/mardim91/openstack-ansible-os_neutron
- version: ocata-backport-automated-twentyfourth-august
+ version: ocata-backport-automated-twentyninth-september
- name: os_nova
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_nova
- version: 991d3295fc2f8ecb3cfd46f680ceb1816c763291
+ version: e6f2295b369b579633f89938fd48c7075ff789af
- name: os_rally
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_rally
@@ -118,27 +117,27 @@
- name: os_swift
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_swift
- version: 5233a587f242ae5ccab7d7563a33a9f96556a9d6
+ version: 0946e99dbdcde84a21d87a3876139bc31b6afbda
- name: os_tempest
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_tempest
- version: de52855d1215a96fde4c060725502fd172c3d949
+ version: 18dc9333cbbad6c4a22d7f9ddd4bb58fbf377a9e
- name: plugins
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-plugins
- version: 9c26a443a977e73524d9e4c30e018596ce5e48bd
+ version: b4a5996c0e163bf39e399045d372e5877339a72b
- name: rabbitmq_server
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-rabbitmq_server
- version: b7247285e563e24ca31be1decb6f5b3bf88de69d
+ version: f3389273c4795b55ccc800ef1fd977fba1ba0783
- name: repo_build
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-repo_build
- version: ad719d992b366be8fad11493edcb92e644d92ecc
+ version: a40a69f81832484e9c9deb7a13f63f0d0283611d
- name: repo_server
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-repo_server
- version: 96739ea44eeb8153676433311fc1824e2a763c72
+ version: 2c4a2b3ebd9ba4994b86250cf7b789ed8b09a8f6
- name: rsyslog_client
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-rsyslog_client
@@ -174,7 +173,7 @@
- name: ceph.ceph-common
scm: git
src: https://github.com/ceph/ansible-ceph-common
- version: v2.2.4
+ version: v2.2.11
- name: ceph.ceph-docker-common
scm: git
src: https://github.com/ceph/ansible-ceph-docker-common
@@ -182,12 +181,16 @@
- name: ceph-mon
scm: git
src: https://github.com/ceph/ansible-ceph-mon
- version: v2.2.4
+ version: v2.2.11
- name: ceph-osd
scm: git
src: https://github.com/ceph/ansible-ceph-osd
- version: v2.2.4
+ version: v2.2.11
- name: opendaylight
scm: git
src: https://git.opendaylight.org/gerrit/p/integration/packaging/ansible-opendaylight.git
version: cf095a4f71ff054f305f14ffdef7cdd7233e3d71
+- name: os_tacker
+ scm: git
+ src: git://git.openstack.org/openstack/openstack-ansible-os_tacker
+ version: 58855a0e63179ee5603035a8f607a39a4b99c2a6
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/ha/user_variables.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/ha/user_sfc_scenarios_variables.yml
index 1f2370eb..1f2370eb 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/ha/user_variables.yml
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/files/ha/user_sfc_scenarios_variables.yml
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/mini/user_variables.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/mini/user_sfc_scenarios_variables.yml
index 3111a0f0..3111a0f0 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/mini/user_variables.yml
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/files/mini/user_sfc_scenarios_variables.yml
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/noha/user_variables.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/noha/user_sfc_scenarios_variables.yml
index e37d6906..3111a0f0 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/noha/user_variables.yml
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/files/noha/user_sfc_scenarios_variables.yml
@@ -30,7 +30,7 @@ gnocchi_db_sync_options: ""
ovs_nsh_support: true
# Ensure the openvswitch kernel module is loaded
-#openstack_host_specific_kernel_modules:
+# openstack_host_specific_kernel_modules:
# - name: "openvswitch"
# pattern: "CONFIG_OPENVSWITCH"
# group: "network_hosts"
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/all.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/all.yml
new file mode 100644
index 00000000..2af1ba0a
--- /dev/null
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/all.yml
@@ -0,0 +1,487 @@
+
+---
+# Copyright 2016, Rackspace US, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+## OpenStack Source Code Release
+openstack_release: 15.1.7
+
+## Verbosity Options
+debug: False
+
+## SSH connection wait time
+ssh_delay: 5
+
+# Set the package install state for distribution packages
+# Options are 'present' and 'latest'
+package_state: "latest"
+
+# Set "/var/log" to be a bind mount to the physical host.
+default_bind_mount_logs: true
+
+# Set distro variable
+os_distro_version: "{{ ansible_distribution | lower }}-{{ ansible_distribution_version.split('.')[:2] | join('.') }}-{{ ansible_architecture | lower }}"
+
+# Ensure that the package state matches the global setting
+ceph_client_package_state: "{{ package_state }}"
+galera_client_package_state: "{{ package_state }}"
+pip_install_package_state: "{{ package_state }}"
+rsyslog_client_package_state: "{{ package_state }}"
+
+## OpenStack source options
+openstack_repo_url: "http://{{ internal_lb_vip_address }}:{{ repo_server_port }}"
+openstack_repo_git_url: "git://{{ internal_lb_vip_address }}"
+
+# URL for the frozen internal openstack repo.
+repo_server_port: 8181
+repo_pkg_cache_enabled: true
+repo_pkg_cache_port: 3142
+repo_pkg_cache_url: "http://{{ internal_lb_vip_address }}:{{ repo_pkg_cache_port }}"
+repo_release_path: "{{ openstack_repo_url }}/os-releases/{{ openstack_release }}/{{ os_distro_version }}"
+
+# These are pinned to ensure exactly the same behaviour forever!
+# These pins are updated through the sources-branch-updater script
+pip_packages:
+ - pip==9.0.1
+ - setuptools==33.1.1
+ - wheel==0.29.0
+
+pip_links:
+ - { name: "openstack_release", link: "{{ repo_release_path }}/" }
+pip_lock_to_internal_repo: "{{ (pip_links | length) >= 1 }}"
+
+# The upper constraints to apply to all pip installations
+pip_install_upper_constraints: "{{ repo_release_path }}/requirements_absolute_requirements.txt"
+
+# The URL to retrieve the get-pip.py installation script
+pip_upstream_url: "{{ (pip_offline_install | bool) | ternary('https://bootstrap.pypa.io/get-pip.py', repo_release_path ~ '/get-pip.py') }}"
+
+## kernel modules for specific group hosts
+# :param name: name of the kernel module
+# :param pattern: pattern to search for in /boot/config-$kernel_version to check how module is configured inside kernel
+# :param group: group of hosts where the module will be loaded
+openstack_host_specific_kernel_modules:
+ - { name: "ebtables", pattern: "CONFIG_BRIDGE_NF_EBTABLES", group: "network_hosts" }
+
+## DNS resolution (resolvconf) options
+#Group containing resolvers to configure
+resolvconf_resolver_group: unbound
+
+## Memcached options
+memcached_port: 11211
+memcached_servers: "{% for host in groups['memcached'] %}{{ hostvars[host]['ansible_host'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}"
+
+## Galera
+galera_address: "{{ internal_lb_vip_address }}"
+galera_root_user: "root"
+
+## RabbitMQ
+rabbitmq_host_group: "rabbitmq_all"
+rabbitmq_port: "{{ (rabbitmq_use_ssl | bool) | ternary(5671, 5672) }}"
+
+rabbitmq_use_ssl: True
+rabbitmq_servers: "{% for host in groups[rabbitmq_host_group] %}{{ hostvars[host]['ansible_host'] }}{% if not loop.last %},{% endif %}{% endfor %}"
+
+## Enable external SSL handling for general OpenStack services
+openstack_external_ssl: true
+
+## OpenStack global Endpoint Protos
+openstack_service_publicuri_proto: https
+#openstack_service_adminuri_proto: http
+#openstack_service_internaluri_proto: http
+
+## SSL
+# These do not need to be configured unless you're creating certificates for
+# services running behind Apache (currently, Horizon and Keystone).
+ssl_protocol: "ALL -SSLv2 -SSLv3"
+# Cipher suite string from https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
+ssl_cipher_suite: "ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS"
+
+## Region Name
+service_region: RegionOne
+
+## OpenStack Domain
+openstack_domain: openstack.local
+lxc_container_domain: "{{ openstack_domain }}"
+
+## DHCP Domain Name
+dhcp_domain: openstacklocal
+
+## LDAP enabled toggle
+service_ldap_backend_enabled: "{{ keystone_ldap is defined and keystone_ldap.Default is defined }}"
+
+## Base venv configuration
+venv_tag: "{{ openstack_release }}"
+venv_base_download_url: "{{ openstack_repo_url }}/venvs/{{ openstack_release }}/{{ os_distro_version }}"
+
+## Aodh
+aodh_service_region: "{{ service_region }}"
+aodh_galera_user: aodh
+aodh_galera_database: aodh
+aodh_galera_address: "{{ internal_lb_vip_address }}"
+aodh_connection_string: "mysql+pymysql://{{ aodh_galera_user }}:{{ aodh_container_db_password }}@{{ aodh_galera_address }}/{{ aodh_galera_database }}?charset=utf8"
+aodh_rabbitmq_host_group: "{{ rabbitmq_host_group }}"
+
+
+## Ceilometer
+ceilometer_service_user_name: ceilometer
+ceilometer_service_tenant_name: service
+
+# These are here rather than in ceilometer_all because
+# both the os_ceilometer and os_swift roles require them
+ceilometer_rabbitmq_userid: ceilometer
+ceilometer_rabbitmq_vhost: /ceilometer
+ceilometer_rabbitmq_port: "{{ rabbitmq_port }}"
+ceilometer_rabbitmq_use_ssl: "{{ rabbitmq_use_ssl }}"
+ceilometer_rabbitmq_servers: "{{ rabbitmq_servers }}"
+ceilometer_rabbitmq_host_group: "{{ rabbitmq_host_group }}"
+
+
+## Cinder
+cinder_service_region: "{{ service_region }}"
+cinder_service_port: 8776
+# If there are Swift hosts in the environment, then enable cinder backups to it
+cinder_service_backup_program_enabled: "{{ groups['swift_all'] is defined and groups['swift_all'] | length > 0 }}"
+cinder_ceph_client: cinder
+
+# These are here rather than in cinder_all because
+# both the os_ceilometer and os_cinder roles require them
+
+# RPC
+cinder_rabbitmq_userid: cinder
+cinder_rabbitmq_vhost: /cinder
+cinder_rabbitmq_port: "{{ rabbitmq_port }}"
+cinder_rabbitmq_servers: "{{ rabbitmq_servers }}"
+cinder_rabbitmq_use_ssl: "{{ rabbitmq_use_ssl }}"
+cinder_rabbitmq_host_group: "{{ rabbitmq_host_group }}"
+
+# Telemetry notifications
+cinder_rabbitmq_telemetry_userid: "{{ cinder_rabbitmq_userid }}"
+cinder_rabbitmq_telemetry_password: "{{ cinder_rabbitmq_password }}"
+cinder_rabbitmq_telemetry_vhost: "{{ cinder_rabbitmq_vhost }}"
+cinder_rabbitmq_telemetry_port: "{{ cinder_rabbitmq_port }}"
+cinder_rabbitmq_telemetry_servers: "{{ cinder_rabbitmq_servers }}"
+cinder_rabbitmq_telemetry_use_ssl: "{{ cinder_rabbitmq_use_ssl }}"
+cinder_rabbitmq_telemetry_host_group: "{{ cinder_rabbitmq_host_group }}"
+
+# If there are any Ceilometer hosts in the environment, then enable its usage
+cinder_ceilometer_enabled: "{{ (groups['cinder_all'] is defined) and (groups['cinder_all'] | length > 0) and (groups['ceilometer_all'] is defined) and (groups['ceilometer_all'] | length > 0) }}"
+
+## Glance
+glance_service_port: 9292
+glance_service_proto: http
+glance_service_publicuri_proto: "{{ openstack_service_publicuri_proto | default(glance_service_proto) }}"
+glance_service_adminuri_proto: "{{ openstack_service_adminuri_proto | default(glance_service_proto) }}"
+glance_service_internaluri_proto: "{{ openstack_service_internaluri_proto | default(glance_service_proto) }}"
+glance_service_publicuri: "{{ glance_service_publicuri_proto }}://{{ external_lb_vip_address }}:{{ glance_service_port }}"
+glance_service_publicurl: "{{ glance_service_publicuri }}"
+glance_service_internaluri: "{{ glance_service_internaluri_proto }}://{{ internal_lb_vip_address }}:{{ glance_service_port }}"
+glance_service_internalurl: "{{ glance_service_internaluri }}"
+glance_service_adminuri: "{{ glance_service_adminuri_proto }}://{{ internal_lb_vip_address }}:{{ glance_service_port }}"
+glance_service_adminurl: "{{ glance_service_adminuri }}"
+glance_api_servers: "{{ glance_service_internaluri }}"
+glance_service_user_name: glance
+
+# These are here rather than in glance_all because
+# both the os_ceilometer and os_glance roles require them
+
+## Gnocchi
+# Used in both Gnocchi and Swift roles.
+gnocchi_service_project_name: "{{ (gnocchi_storage_driver is defined and gnocchi_storage_driver == 'swift') | ternary('gnocchi_swift', 'service') }}"
+
+# RPC
+glance_rabbitmq_userid: glance
+glance_rabbitmq_vhost: /glance
+glance_rabbitmq_port: "{{ rabbitmq_port }}"
+glance_rabbitmq_servers: "{{ rabbitmq_servers }}"
+glance_rabbitmq_use_ssl: "{{ rabbitmq_use_ssl }}"
+glance_rabbitmq_host_group: "{{ rabbitmq_host_group }}"
+
+# Telemetry notifications
+glance_rabbitmq_telemetry_userid: "{{ glance_rabbitmq_userid }}"
+glance_rabbitmq_telemetry_password: "{{ glance_rabbitmq_password }}"
+glance_rabbitmq_telemetry_vhost: "{{ glance_rabbitmq_vhost }}"
+glance_rabbitmq_telemetry_port: "{{ glance_rabbitmq_port }}"
+glance_rabbitmq_telemetry_servers: "{{ glance_rabbitmq_servers }}"
+glance_rabbitmq_telemetry_use_ssl: "{{ glance_rabbitmq_use_ssl }}"
+glance_rabbitmq_telemetry_host_group: "{{ glance_rabbitmq_host_group }}"
+
+# If there are any Ceilometer hosts in the environment, then enable its usage
+glance_ceilometer_enabled: "{{ (groups['ceilometer_all'] is defined) and (groups['ceilometer_all'] | length > 0) }}"
+
+## Heat
+
+# These are here rather than in heat_all because
+# both the os_ceilometer and os_heat roles require them
+
+# RPC
+heat_rabbitmq_userid: heat
+heat_rabbitmq_vhost: /heat
+heat_rabbitmq_port: "{{ rabbitmq_port }}"
+heat_rabbitmq_servers: "{{ rabbitmq_servers }}"
+heat_rabbitmq_use_ssl: "{{ rabbitmq_use_ssl }}"
+heat_rabbitmq_host_group: "{{ rabbitmq_host_group }}"
+
+# Telemetry notifications
+heat_rabbitmq_telemetry_userid: "{{ heat_rabbitmq_userid }}"
+heat_rabbitmq_telemetry_password: "{{ heat_rabbitmq_password }}"
+heat_rabbitmq_telemetry_vhost: "{{ heat_rabbitmq_vhost }}"
+heat_rabbitmq_telemetry_port: "{{ heat_rabbitmq_port }}"
+heat_rabbitmq_telemetry_servers: "{{ heat_rabbitmq_servers }}"
+heat_rabbitmq_telemetry_use_ssl: "{{ heat_rabbitmq_use_ssl }}"
+heat_rabbitmq_telemetry_host_group: "{{ heat_rabbitmq_host_group }}"
+
+# If there are any Ceilometer hosts in the environment, then enable its usage
+heat_ceilometer_enabled: "{{ (groups['ceilometer_all'] is defined) and (groups['ceilometer_all'] | length > 0) }}"
+
+## Ironic
+ironic_keystone_auth_plugin: password
+ironic_rabbitmq_userid: ironic
+ironic_rabbitmq_vhost: /ironic
+ironic_rabbitmq_port: "{{ rabbitmq_port }}"
+ironic_rabbitmq_servers: "{{ rabbitmq_servers }}"
+ironic_rabbitmq_use_ssl: "{{ rabbitmq_use_ssl }}"
+ironic_rabbitmq_host_group: "{{ rabbitmq_host_group }}"
+ironic_service_name: ironic
+ironic_service_user_name: ironic
+ironic_service_proto: http
+ironic_service_port: 6385
+ironic_service_project_name: service
+ironic_service_adminuri_proto: "{{ openstack_service_adminuri_proto | default(ironic_service_proto) }}"
+ironic_service_adminurl: "{{ ironic_service_adminuri_proto }}://{{ internal_lb_vip_address }}:{{ ironic_service_port }}"
+
+## Keystone
+keystone_admin_user_name: admin
+keystone_admin_tenant_name: admin
+keystone_admin_port: 35357
+keystone_service_port: 5000
+keystone_service_proto: http
+keystone_service_region: "{{ service_region }}"
+
+# These are here rather than in keystone_all because
+# both the os_ceilometer and os_keystone roles require them
+
+# RPC
+keystone_rabbitmq_userid: keystone
+keystone_rabbitmq_vhost: /keystone
+keystone_rabbitmq_port: "{{ rabbitmq_port }}"
+keystone_rabbitmq_servers: "{{ rabbitmq_servers }}"
+keystone_rabbitmq_use_ssl: "{{ rabbitmq_use_ssl }}"
+keystone_rabbitmq_host_group: "{{ rabbitmq_host_group }}"
+
+# Telemetry notifications
+keystone_rabbitmq_telemetry_userid: "{{ keystone_rabbitmq_userid }}"
+keystone_rabbitmq_telemetry_password: "{{ keystone_rabbitmq_password }}"
+keystone_rabbitmq_telemetry_vhost: "{{ keystone_rabbitmq_vhost }}"
+keystone_rabbitmq_telemetry_port: "{{ keystone_rabbitmq_port }}"
+keystone_rabbitmq_telemetry_servers: "{{ keystone_rabbitmq_servers }}"
+keystone_rabbitmq_telemetry_use_ssl: "{{ keystone_rabbitmq_use_ssl }}"
+keystone_rabbitmq_telemetry_host_group: "{{ keystone_rabbitmq_host_group }}"
+
+# If there are any Ceilometer hosts in the environment, then enable its usage
+keystone_ceilometer_enabled: "{{ (groups['ceilometer_all'] is defined) and (groups['ceilometer_all'] | length > 0) }}"
+
+keystone_service_adminuri_proto: "{{ openstack_service_adminuri_proto | default(keystone_service_proto) }}"
+keystone_service_adminuri_insecure: "{% if keystone_service_adminuri_proto == 'https' and (keystone_user_ssl_cert is not defined or haproxy_user_ssl_cert is not defined) | bool %}true{% else %}false{% endif %}"
+keystone_service_adminuri: "{{ keystone_service_adminuri_proto }}://{{ internal_lb_vip_address }}:{{ keystone_admin_port }}"
+keystone_service_adminurl: "{{ keystone_service_adminuri }}/v3"
+
+keystone_service_internaluri_proto: "{{ openstack_service_internaluri_proto | default(keystone_service_proto) }}"
+keystone_service_internaluri_insecure: "{% if keystone_service_internaluri_proto == 'https' and (keystone_user_ssl_cert is not defined or haproxy_user_ssl_cert is not defined) | bool %}true{% else %}false{% endif %}"
+keystone_service_internaluri: "{{ keystone_service_internaluri_proto }}://{{ internal_lb_vip_address }}:{{ keystone_service_port }}"
+keystone_service_internalurl: "{{ keystone_service_internaluri }}/v3"
+
+## Neutron
+neutron_service_port: 9696
+neutron_service_proto: http
+neutron_service_adminuri_proto: "{{ openstack_service_adminuri_proto | default(neutron_service_proto) }}"
+neutron_service_adminuri: "{{ neutron_service_adminuri_proto }}://{{ internal_lb_vip_address }}:{{ neutron_service_port }}"
+neutron_service_adminurl: "{{ neutron_service_adminuri }}"
+neutron_service_user_name: neutron
+neutron_service_project_name: service
+neutron_service_region: "{{ service_region }}"
+
+# These are here rather than in neutron_all because
+# both the os_ceilometer and os_neutron roles require them
+
+# RPC
+neutron_rabbitmq_userid: neutron
+neutron_rabbitmq_vhost: /neutron
+neutron_rabbitmq_port: "{{ rabbitmq_port }}"
+neutron_rabbitmq_servers: "{{ rabbitmq_servers }}"
+neutron_rabbitmq_use_ssl: "{{ rabbitmq_use_ssl }}"
+neutron_rabbitmq_host_group: "{{ rabbitmq_host_group }}"
+
+# Telemetry notifications
+neutron_rabbitmq_telemetry_userid: "{{ neutron_rabbitmq_userid }}"
+neutron_rabbitmq_telemetry_password: "{{ neutron_rabbitmq_password }}"
+neutron_rabbitmq_telemetry_vhost: "{{ neutron_rabbitmq_vhost }}"
+neutron_rabbitmq_telemetry_port: "{{ neutron_rabbitmq_port }}"
+neutron_rabbitmq_telemetry_servers: "{{ neutron_rabbitmq_servers }}"
+neutron_rabbitmq_telemetry_use_ssl: "{{ neutron_rabbitmq_use_ssl }}"
+neutron_rabbitmq_telemetry_host_group: "{{ neutron_rabbitmq_host_group }}"
+
+# If there are any Designate hosts in the environment, then enable its usage
+neutron_designate_enabled: "{{ (groups['designate_all'] is defined) and (groups['designate_all'] | length > 0) }}"
+# If there are any Ceilometer hosts in the environment, then enable its usage
+neutron_ceilometer_enabled: "{{ (groups['ceilometer_all'] is defined) and (groups['ceilometer_all'] | length > 0) }}"
+
+neutron_plugin_type: ml2.lxb
+
+## Nova
+nova_service_port: 8774
+nova_metadata_port: 8775
+nova_service_proto: http
+nova_service_adminuri_proto: "{{ openstack_service_adminuri_proto | default(nova_service_proto) }}"
+nova_service_adminuri: "{{ nova_service_adminuri_proto }}://{{ internal_lb_vip_address }}:{{ nova_service_port }}"
+nova_service_adminurl: "{{ nova_service_adminuri }}/v2.1/%(tenant_id)s"
+nova_service_region: "{{ service_region }}"
+nova_service_user_name: nova
+nova_service_project_name: service
+nova_service_project_domain_id: default
+nova_service_user_domain_id: default
+nova_keystone_auth_plugin: password
+nova_console_type: spice
+nova_novncproxy_port: 6080
+nova_spice_html5proxy_base_port: 6082
+nova_console_port: "{% if nova_console_type == 'spice' %}{{ nova_spice_html5proxy_base_port }}{% else %}{{ nova_novncproxy_port }}{% endif %}"
+
+# These are here rather than in nova_all because
+# both the os_ceilometer and os_nova roles require them
+
+# RPC
+nova_rabbitmq_userid: nova
+nova_rabbitmq_vhost: /nova
+nova_rabbitmq_port: "{{ rabbitmq_port }}"
+nova_rabbitmq_servers: "{{ rabbitmq_servers }}"
+nova_rabbitmq_use_ssl: "{{ rabbitmq_use_ssl }}"
+nova_rabbitmq_host_group: "{{ rabbitmq_host_group }}"
+
+# Telemetry notifications
+nova_rabbitmq_telemetry_userid: "{{ nova_rabbitmq_userid }}"
+nova_rabbitmq_telemetry_password: "{{ nova_rabbitmq_password }}"
+nova_rabbitmq_telemetry_vhost: "{{ nova_rabbitmq_vhost }}"
+nova_rabbitmq_telemetry_port: "{{ nova_rabbitmq_port }}"
+nova_rabbitmq_telemetry_servers: "{{ nova_rabbitmq_servers }}"
+nova_rabbitmq_telemetry_use_ssl: "{{ nova_rabbitmq_use_ssl }}"
+nova_rabbitmq_telemetry_host_group: "{{ nova_rabbitmq_host_group }}"
+
+# If there are any Designate hosts in the environment, then enable its usage
+nova_designate_enabled: "{{ (groups['designate_all'] is defined) and (groups['designate_all'] | length > 0) }}"
+# If there are any Ceilometer hosts in the environment, then enable its usage
+nova_ceilometer_enabled: "{{ (groups['ceilometer_all'] is defined) and (groups['ceilometer_all'] | length > 0) }}"
+# If there are any Barbican hosts in the environment, then enable its usage
+nova_barbican_enabled: "{{ (groups['barbican_all'] is defined) and (groups['barbican_all'] | length > 0) }}"
+
+## Sahara
+#RPC
+sahara_rabbitmq_userid: sahara
+sahara_rabbitmq_vhost: /sahara
+sahara_rabbitmq_port: "{{ rabbitmq_port }}"
+sahara_rabbitmq_servers: "{{ rabbitmq_servers }}"
+sahara_rabbitmq_use_ssl: "{{ rabbitmq_use_ssl }}"
+sahara_rabbitmq_host_group: "{{ rabbitmq_host_group }}"
+
+# Telemetry notifications
+sahara_rabbitmq_telemetry_userid: "{{ sahara_rabbitmq_userid }}"
+sahara_rabbitmq_telemetry_password: "{{ sahara_rabbitmq_password }}"
+sahara_rabbitmq_telemetry_vhost: "{{ sahara_rabbitmq_vhost }}"
+sahara_rabbitmq_telemetry_port: "{{ sahara_rabbitmq_port }}"
+sahara_rabbitmq_telemetry_servers: "{{ sahara_rabbitmq_servers }}"
+sahara_rabbitmq_telemetry_use_ssl: "{{ sahara_rabbitmq_use_ssl }}"
+sahara_rabbitmq_telemetry_host_group: "{{ sahara_rabbitmq_host_group }}"
+
+# If there are any Ceilometer and Sahara hosts in the environment, then enable its usage
+sahara_ceilometer_enabled: "{{ (groups['ceilometer_all'] is defined) and (groups['sahara_all'] is defined) and (groups['ceilometer_all'] | length > 0) and (groups['sahara_all'] | length > 0) }}"
+
+## Swift
+swift_proxy_port: 8080
+swift_system_user_name: swift
+swift_system_shell: /bin/bash
+swift_system_comment: swift system user
+swift_system_home_folder: "/var/lib/{{ swift_system_user_name }}"
+
+# Swift Telemetry notifications
+swift_rabbitmq_telemetry_userid: "swift"
+swift_rabbitmq_telemetry_vhost: "/swift"
+swift_rabbitmq_telemetry_servers: "{{ rabbitmq_servers }}"
+swift_rabbitmq_telemetry_host_group: "{{ rabbitmq_host_group }}"
+
+# If there are any Ceilometer and Swift hosts in the environment, then enable its usage
+swift_ceilometer_enabled: "{{ (groups['ceilometer_all'] is defined) and (groups['swift_proxy'] is defined) and (groups['ceilometer_all'] | length > 0) and (groups['swift_proxy'] | length > 0) }}"
+
+## OpenStack Openrc
+openrc_os_auth_url: "{{ keystone_service_internalurl }}"
+openrc_os_password: "{{ keystone_auth_admin_password }}"
+openrc_os_domain_name: "Default"
+openrc_region_name: "{{ service_region }}"
+
+## Host security hardening
+# The openstack-ansible-security role provides security hardening for hosts
+# by applying security configurations from the STIG. Hardening is enabled by
+# default, but an option to opt out is available by setting the following
+# variable to 'false'.
+# Docs: http://docs.openstack.org/developer/openstack-ansible-security/
+apply_security_hardening: true
+
+## Ansible ssh configuration
+ansible_ssh_extra_args: >
+ -o UserKnownHostsFile=/dev/null
+ -o StrictHostKeyChecking=no
+ -o ServerAliveInterval=64
+ -o ServerAliveCountMax=1024
+ -o Compression=no
+ -o TCPKeepAlive=yes
+ -o VerifyHostKeyDNS=no
+ -o ForwardX11=no
+ -o ForwardAgent=yes
+ -T
+
+# Toggle whether the service is deployed in a container or not
+is_metal: "{{ properties.is_metal | default(false) }}"
+
+## ceph-ansible configuration
+mon_group_name: ceph-mon
+osd_group_name: ceph-osd
+ceph_stable: true
+# The _stable_release var is used by both the OSA ceph_client role and the
+# ceph-ansible roles. It is defaulted in ceph_client but set here to keep the
+# OSA/ceph-ansible integrations in sync.
+ceph_stable_release: jewel
+fetch_directory: /etc/openstack_deploy/ceph-fetch/
+# tries to create /var/log/ceph as a directory and fails if the log link already
+# exists. we handle the log dir creation so this is not something we need
+# ceph-common to prepare for us.
+rbd_client_directories: false
+
+# Magnum
+magnum_bind_port: 9511
+magnum_service_proto: http
+magnum_service_publicuri_proto: "{{ openstack_service_publicuri_proto | default(magnum_service_proto) }}"
+magnum_service_publicurl: "{{ magnum_service_publicuri_proto }}://{{ external_lb_vip_address }}:{{ magnum_bind_port }}"
+magnum_service_internaluri_proto: "{{ openstack_service_internaluri_proto | default(magnum_service_proto) }}"
+magnum_service_internalurl: "{{ magnum_service_internaluri_proto }}://{{ internal_lb_vip_address }}:{{ magnum_bind_port }}"
+magnum_service_adminuri_proto: "{{ openstack_service_adminuri_proto | default(magnum_service_proto) }}"
+magnum_service_adminurl: "{{ magnum_service_adminuri_proto }}://{{ internal_lb_vip_address }}:{{ magnum_bind_port }}"
+
+# Tacker
+tacker_service_user_name: tacker
+tacker_service_tenant_name: service
+
+tacker_rabbitmq_userid: tacker
+tacker_rabbitmq_vhost: /tacker
+tacker_rabbitmq_port: "{{ rabbitmq_port }}"
+tacker_rabbitmq_use_ssl: "{{ rabbitmq_use_ssl }}"
+tacker_rabbitmq_servers: "{{ rabbitmq_servers }}"
+tacker_rabbitmq_host_group: "{{ rabbitmq_host_group }}"
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/ha/openstack_user_config.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/ha/openstack_user_config.yml
new file mode 100644
index 00000000..6d2b490a
--- /dev/null
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/ha/openstack_user_config.yml
@@ -0,0 +1,263 @@
+---
+cidr_networks:
+ container: 172.29.236.0/22
+ tunnel: 172.29.240.0/22
+ storage: 172.29.244.0/22
+
+used_ips:
+ - "172.29.236.1,172.29.236.50"
+ - "172.29.240.1,172.29.240.50"
+ - "172.29.244.1,172.29.244.50"
+ - "172.29.248.1,172.29.248.50"
+
+global_overrides:
+ internal_lb_vip_address: 172.29.236.222
+ external_lb_vip_address: 192.168.122.220
+ tunnel_bridge: "br-vxlan"
+ management_bridge: "br-mgmt"
+ provider_networks:
+ - network:
+ container_bridge: "br-mgmt"
+ container_type: "veth"
+ container_interface: "eth1"
+ ip_from_q: "container"
+ type: "raw"
+ group_binds:
+ - all_containers
+ - hosts
+ is_container_address: true
+ is_ssh_address: true
+ - network:
+ container_bridge: "br-vxlan"
+ container_type: "veth"
+ container_interface: "eth10"
+ ip_from_q: "tunnel"
+ type: "vxlan"
+ range: "1:1000"
+ net_name: "vxlan"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth12"
+ host_bind_override: "eth12"
+ type: "flat"
+ net_name: "flat"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth11"
+ type: "vlan"
+ range: "1:1"
+ net_name: "vlan"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-storage"
+ container_type: "veth"
+ container_interface: "eth2"
+ ip_from_q: "storage"
+ type: "raw"
+ group_binds:
+ - glance_api
+ - cinder_api
+ - cinder_volume
+ - nova_compute
+
+# ##
+# ## Infrastructure
+# ##
+
+# galera, memcache, rabbitmq, utility
+shared-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# repository (apt cache, python packages, etc)
+repo-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# load balancer
+# Ideally the load balancer should not use the Infrastructure hosts.
+# Dedicated hardware is best for improved performance and security.
+haproxy_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# rsyslog server
+# log_hosts:
+# log1:
+# ip: 172.29.236.14
+
+# ##
+# ## OpenStack
+# ##
+
+# keystone
+identity_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# cinder api services
+storage-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# glance
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+image_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.14"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+ controller01:
+ ip: 172.29.236.12
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.14"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+ controller02:
+ ip: 172.29.236.13
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.14"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+
+# nova api, conductor, etc services
+compute-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# heat
+orchestration_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# horizon
+dashboard_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# tacker
+mano_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# neutron server, agents (L3, etc)
+network_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# nova hypervisors
+compute_hosts:
+ compute00:
+ ip: 172.29.236.14
+ compute01:
+ ip: 172.29.236.15
+
+# cinder volume hosts (NFS-backed)
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+storage_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.14"
+ share: "/volumes"
+ controller01:
+ ip: 172.29.236.12
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.14"
+ share: "/volumes"
+ controller02:
+ ip: 172.29.236.13
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.14"
+ share: "/volumes"
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/haproxy_config.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/haproxy_config.yml
new file mode 100644
index 00000000..386a63e3
--- /dev/null
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/haproxy_config.yml
@@ -0,0 +1,271 @@
+haproxy_default_services:
+ - service:
+ haproxy_service_name: galera
+ haproxy_backend_nodes: "{{ [groups['galera_all'][0]] | default([]) }}" # list expected
+ haproxy_backup_nodes: "{{ groups['galera_all'][1:] | default([]) }}"
+ haproxy_bind: "{{ [internal_lb_vip_address] }}"
+ haproxy_port: 3306
+ haproxy_balance_type: tcp
+ haproxy_timeout_client: 5000s
+ haproxy_timeout_server: 5000s
+ haproxy_backend_options:
+ - "mysql-check user {{ galera_monitoring_user }}"
+ haproxy_whitelist_networks: "{{ haproxy_galera_whitelist_networks }}"
+ - service:
+ haproxy_service_name: repo_git
+ haproxy_backend_nodes: "{{ groups['repo_all'] | default([]) }}"
+ haproxy_bind: "{{ [internal_lb_vip_address] }}"
+ haproxy_port: 9418
+ haproxy_balance_type: tcp
+ haproxy_backend_options:
+ - tcp-check
+ haproxy_whitelist_networks: "{{ haproxy_repo_git_whitelist_networks }}"
+ - service:
+ haproxy_service_name: repo_all
+ haproxy_backend_nodes: "{{ groups['repo_all'] | default([]) }}"
+ haproxy_bind: "{{ [internal_lb_vip_address] }}"
+ haproxy_port: 8181
+ haproxy_balance_type: http
+ haproxy_backend_options:
+ - "httpchk HEAD /"
+ - service:
+ haproxy_service_name: repo_cache
+ haproxy_backend_nodes: "{{ [groups['repo_all'][0]] | default([]) }}" # list expected
+ haproxy_backup_nodes: "{{ groups['repo_all'][1:] | default([]) }}"
+ haproxy_bind: "{{ [internal_lb_vip_address] }}"
+ haproxy_port: "{{ repo_pkg_cache_port }}"
+ haproxy_balance_type: http
+ haproxy_backend_options:
+ - "httpchk HEAD /acng-report.html"
+ haproxy_whitelist_networks: "{{ haproxy_repo_cache_whitelist_networks }}"
+ - service:
+ haproxy_service_name: glance_api
+ haproxy_backend_nodes: "{{ groups['glance_api'] | default([]) }}"
+ haproxy_ssl: "{{ haproxy_ssl }}"
+ haproxy_port: 9292
+ haproxy_balance_type: http
+ haproxy_backend_options:
+ - "httpchk /healthcheck"
+ - service:
+ haproxy_service_name: glance_registry
+ haproxy_backend_nodes: "{{ groups['glance_registry'] | default([]) }}"
+ haproxy_ssl: "{{ haproxy_ssl }}"
+ haproxy_port: 9191
+ haproxy_balance_type: http
+ haproxy_backend_options:
+ - "httpchk /healthcheck"
+ haproxy_whitelist_networks: "{{ haproxy_glance_registry_whitelist_networks }}"
+ - service:
+ haproxy_service_name: gnocchi
+ haproxy_backend_nodes: "{{ groups['gnocchi_all'] | default([]) }}"
+ haproxy_port: 8041
+ haproxy_ssl: "{{ haproxy_ssl }}"
+ haproxy_balance_type: http
+ haproxy_backend_options:
+ - "httpchk /healthcheck"
+ - service:
+ haproxy_service_name: heat_api_cfn
+ haproxy_backend_nodes: "{{ groups['heat_api_cfn'] | default([]) }}"
+ haproxy_port: 8000
+ haproxy_ssl: "{{ haproxy_ssl }}"
+ haproxy_balance_type: http
+ haproxy_backend_options:
+ - "httpchk HEAD /"
+ - service:
+ haproxy_service_name: heat_api_cloudwatch
+ haproxy_backend_nodes: "{{ groups['heat_api_cloudwatch'] | default([]) }}"
+ haproxy_port: 8003
+ haproxy_ssl: "{{ haproxy_ssl }}"
+ haproxy_balance_type: http
+ haproxy_backend_options:
+ - "httpchk HEAD /"
+ - service:
+ haproxy_service_name: heat_api
+ haproxy_backend_nodes: "{{ groups['heat_api'] | default([]) }}"
+ haproxy_port: 8004
+ haproxy_ssl: "{{ haproxy_ssl }}"
+ haproxy_balance_type: http
+ haproxy_backend_options:
+ - "httpchk HEAD /"
+ - service:
+ haproxy_service_name: keystone_service
+ haproxy_backend_nodes: "{{ groups['keystone_all'] | default([]) }}"
+ haproxy_port: 5000
+ haproxy_ssl: "{{ haproxy_ssl }}"
+ haproxy_balance_type: "http"
+ haproxy_backend_options:
+ - "httpchk HEAD /"
+ - service:
+ haproxy_service_name: keystone_admin
+ haproxy_backend_nodes: "{{ groups['keystone_all'] | default([]) }}"
+ haproxy_port: 35357
+ haproxy_ssl: "{{ haproxy_ssl }}"
+ haproxy_balance_type: "http"
+ haproxy_backend_options:
+ - "httpchk HEAD /"
+ haproxy_whitelist_networks: "{{ haproxy_keystone_admin_whitelist_networks }}"
+ - service:
+ haproxy_service_name: neutron_server
+ haproxy_backend_nodes: "{{ groups['neutron_server'] | default([]) }}"
+ haproxy_port: 9696
+ haproxy_ssl: "{{ haproxy_ssl }}"
+ haproxy_balance_type: http
+ haproxy_backend_options:
+ - "httpchk HEAD /"
+ - service:
+ haproxy_service_name: nova_api_metadata
+ haproxy_backend_nodes: "{{ groups['nova_api_metadata'] | default([]) }}"
+ haproxy_port: 8775
+ haproxy_ssl: "{{ haproxy_ssl }}"
+ haproxy_balance_type: http
+ haproxy_backend_options:
+ - "httpchk HEAD /"
+ haproxy_whitelist_networks: "{{ haproxy_nova_metadata_whitelist_networks }}"
+ - service:
+ haproxy_service_name: nova_api_os_compute
+ haproxy_backend_nodes: "{{ groups['nova_api_os_compute'] | default([]) }}"
+ haproxy_ssl: "{{ haproxy_ssl }}"
+ haproxy_port: 8774
+ haproxy_balance_type: http
+ haproxy_backend_options:
+ - "httpchk HEAD /"
+ - service:
+ haproxy_service_name: nova_api_placement
+ haproxy_backend_nodes: "{{ groups['nova_api_placement'] | default([]) }}"
+ haproxy_ssl: "{{ haproxy_ssl }}"
+ haproxy_port: 8780
+ haproxy_balance_type: http
+ haproxy_backend_options:
+ - "httpchk HEAD /"
+ - service:
+ haproxy_service_name: nova_console
+ haproxy_backend_nodes: "{{ groups['nova_console'] | default([]) }}"
+ haproxy_ssl: "{{ haproxy_ssl }}"
+ haproxy_port: "{{ nova_console_port }}"
+ haproxy_balance_type: http
+ haproxy_timeout_client: 60m
+ haproxy_timeout_server: 60m
+ haproxy_balance_alg: source
+ haproxy_backend_options:
+ - "httpchk HEAD /"
+ haproxy_backend_httpcheck_options:
+ - "expect status 404"
+ - service:
+ haproxy_service_name: cinder_api
+ haproxy_backend_nodes: "{{ groups['cinder_api'] | default([]) }}"
+ haproxy_ssl: "{{ haproxy_ssl }}"
+ haproxy_port: 8776
+ haproxy_balance_type: http
+ haproxy_backend_options:
+ - "httpchk HEAD /"
+ - service:
+ haproxy_service_name: horizon
+ haproxy_backend_nodes: "{{ groups['horizon_all'] | default([]) }}"
+ haproxy_ssl: "{{ haproxy_ssl }}"
+ haproxy_ssl_all_vips: true
+ haproxy_port: "{{ haproxy_ssl | ternary(443,80) }}"
+ haproxy_backend_port: 80
+ haproxy_redirect_http_port: 80
+ haproxy_balance_type: http
+ haproxy_balance_alg: source
+ haproxy_backend_options:
+ - "httpchk HEAD /"
+ - service:
+ haproxy_service_name: sahara_api
+ haproxy_backend_nodes: "{{ groups['sahara_api'] | default([]) }}"
+ haproxy_ssl: "{{ haproxy_ssl }}"
+ haproxy_balance_alg: source
+ haproxy_port: 8386
+ haproxy_balance_type: http
+ haproxy_backend_options:
+ - "httpchk /healthcheck"
+ - service:
+ haproxy_service_name: swift_proxy
+ haproxy_backend_nodes: "{{ groups['swift_proxy'] | default([]) }}"
+ haproxy_ssl: "{{ haproxy_ssl }}"
+ haproxy_balance_alg: source
+ haproxy_port: 8080
+ haproxy_balance_type: http
+ haproxy_backend_options:
+ - "httpchk /healthcheck"
+ - service:
+ haproxy_service_name: ceilometer_api
+ haproxy_backend_nodes: "{{ groups['ceilometer_api_container'] | default([]) }}"
+ haproxy_ssl: "{{ haproxy_ssl }}"
+ haproxy_port: 8777
+ haproxy_balance_type: tcp
+ haproxy_backend_options:
+ - tcp-check
+ - service:
+ haproxy_service_name: aodh_api
+ haproxy_backend_nodes: "{{ groups['aodh_api'] | default([]) }}"
+ haproxy_ssl: "{{ haproxy_ssl }}"
+ haproxy_port: 8042
+ haproxy_balance_type: http
+ haproxy_backend_options:
+ - "httpchk HEAD /"
+ haproxy_backend_httpcheck_options:
+ - "expect status 401"
+ - service:
+ haproxy_service_name: ironic_api
+ haproxy_backend_nodes: "{{ groups['ironic_api'] | default([]) }}"
+ haproxy_ssl: "{{ haproxy_ssl }}"
+ haproxy_port: 6385
+ haproxy_balance_type: http
+ haproxy_backend_options:
+ - "httpchk GET /"
+ - service:
+ haproxy_service_name: rabbitmq_mgmt
+ haproxy_backend_nodes: "{{ groups['rabbitmq'] | default([]) }}"
+ haproxy_ssl: "{{ haproxy_ssl }}"
+ haproxy_port: 15672
+ haproxy_balance_type: http
+ haproxy_backend_options:
+ - "httpchk HEAD /"
+ haproxy_whitelist_networks: "{{ haproxy_rabbitmq_management_whitelist_networks }}"
+ - service:
+ haproxy_service_name: magnum
+ haproxy_backend_nodes: "{{ groups['magnum_all'] | default([]) }}"
+ haproxy_ssl: "{{ haproxy_ssl }}"
+ haproxy_port: 9511
+ haproxy_balance_type: http
+ haproxy_backend_options:
+ - "httpchk GET /"
+ - service:
+ haproxy_service_name: trove
+ haproxy_backend_nodes: "{{ groups['trove_api'] | default([]) }}"
+ haproxy_ssl: "{{ haproxy_ssl }}"
+ haproxy_port: 8779
+ haproxy_balance_type: http
+ haproxy_backend_options:
+ - "httpchk HEAD /"
+ - service:
+ haproxy_service_name: barbican
+ haproxy_backend_nodes: "{{ groups['barbican_api'] | default([]) }}"
+ haproxy_ssl: "{{ haproxy_ssl }}"
+ haproxy_port: 9311
+ haproxy_balance_type: http
+ haproxy_backend_options:
+ - "httpchk HEAD /"
+ haproxy_backend_httpcheck_options:
+ - "expect status 401"
+ - service:
+ haproxy_service_name: designate_api
+ haproxy_backend_nodes: "{{ groups['designate_api'] | default([]) }}"
+ haproxy_ssl: "{{ haproxy_ssl }}"
+ haproxy_port: 9001
+ haproxy_balance_type: http
+ haproxy_backend_options:
+ - "forwardfor"
+ - "httpchk /versions"
+ - "httplog"
+ - service:
+ haproxy_service_name: tacker
+ haproxy_backend_nodes: "{{ groups['tacker_all'] | default([]) }}"
+ haproxy_port: 9890
+ haproxy_ssl: "{{ haproxy_ssl }}"
+ haproxy_balance_type: http
+ haproxy_backend_options:
+ - "forwardfor"
+ - "httpchk"
+ - "httplog"
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/mini/openstack_user_config.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/mini/openstack_user_config.yml
new file mode 100644
index 00000000..ac17d89d
--- /dev/null
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/mini/openstack_user_config.yml
@@ -0,0 +1,175 @@
+---
+cidr_networks:
+ container: 172.29.236.0/22
+ tunnel: 172.29.240.0/22
+ storage: 172.29.244.0/22
+
+used_ips:
+ - "172.29.236.1,172.29.236.50"
+ - "172.29.240.1,172.29.240.50"
+ - "172.29.244.1,172.29.244.50"
+ - "172.29.248.1,172.29.248.50"
+
+global_overrides:
+ internal_lb_vip_address: 172.29.236.11
+ external_lb_vip_address: 192.168.122.3
+ tunnel_bridge: "br-vxlan"
+ management_bridge: "br-mgmt"
+ provider_networks:
+ - network:
+ container_bridge: "br-mgmt"
+ container_type: "veth"
+ container_interface: "eth1"
+ ip_from_q: "container"
+ type: "raw"
+ group_binds:
+ - all_containers
+ - hosts
+ is_container_address: true
+ is_ssh_address: true
+ - network:
+ container_bridge: "br-vxlan"
+ container_type: "veth"
+ container_interface: "eth10"
+ ip_from_q: "tunnel"
+ type: "vxlan"
+ range: "1:1000"
+ net_name: "vxlan"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth12"
+ host_bind_override: "eth12"
+ type: "flat"
+ net_name: "flat"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth11"
+ type: "vlan"
+ range: "1:1"
+ net_name: "vlan"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-storage"
+ container_type: "veth"
+ container_interface: "eth2"
+ ip_from_q: "storage"
+ type: "raw"
+ group_binds:
+ - glance_api
+ - cinder_api
+ - cinder_volume
+ - nova_compute
+
+# ##
+# ## Infrastructure
+# ##
+
+# galera, memcache, rabbitmq, utility
+shared-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# repository (apt cache, python packages, etc)
+repo-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# load balancer
+# Ideally the load balancer should not use the Infrastructure hosts.
+# Dedicated hardware is best for improved performance and security.
+haproxy_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# rsyslog server
+# log_hosts:
+# log1:
+# ip: 172.29.236.14
+
+# ##
+# ## OpenStack
+# ##
+
+# keystone
+identity_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# cinder api services
+storage-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# glance
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+image_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.12"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+
+# nova api, conductor, etc services
+compute-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# heat
+orchestration_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# horizon
+dashboard_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# tacker
+mano_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# neutron server, agents (L3, etc)
+network_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# nova hypervisors
+compute_hosts:
+ compute00:
+ ip: 172.29.236.12
+
+# cinder volume hosts (NFS-backed)
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+storage_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.12"
+ share: "/volumes"
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/noha/openstack_user_config.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/noha/openstack_user_config.yml
new file mode 100644
index 00000000..ee8889d2
--- /dev/null
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/noha/openstack_user_config.yml
@@ -0,0 +1,177 @@
+---
+cidr_networks:
+ container: 172.29.236.0/22
+ tunnel: 172.29.240.0/22
+ storage: 172.29.244.0/22
+
+used_ips:
+ - "172.29.236.1,172.29.236.50"
+ - "172.29.240.1,172.29.240.50"
+ - "172.29.244.1,172.29.244.50"
+ - "172.29.248.1,172.29.248.50"
+
+global_overrides:
+ internal_lb_vip_address: 172.29.236.11
+ external_lb_vip_address: 192.168.122.3
+ tunnel_bridge: "br-vxlan"
+ management_bridge: "br-mgmt"
+ provider_networks:
+ - network:
+ container_bridge: "br-mgmt"
+ container_type: "veth"
+ container_interface: "eth1"
+ ip_from_q: "container"
+ type: "raw"
+ group_binds:
+ - all_containers
+ - hosts
+ is_container_address: true
+ is_ssh_address: true
+ - network:
+ container_bridge: "br-vxlan"
+ container_type: "veth"
+ container_interface: "eth10"
+ ip_from_q: "tunnel"
+ type: "vxlan"
+ range: "1:1000"
+ net_name: "vxlan"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth12"
+ host_bind_override: "eth12"
+ type: "flat"
+ net_name: "flat"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth11"
+ type: "vlan"
+ range: "1:1"
+ net_name: "vlan"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-storage"
+ container_type: "veth"
+ container_interface: "eth2"
+ ip_from_q: "storage"
+ type: "raw"
+ group_binds:
+ - glance_api
+ - cinder_api
+ - cinder_volume
+ - nova_compute
+
+# ##
+# ## Infrastructure
+# ##
+
+# galera, memcache, rabbitmq, utility
+shared-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# repository (apt cache, python packages, etc)
+repo-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# load balancer
+# Ideally the load balancer should not use the Infrastructure hosts.
+# Dedicated hardware is best for improved performance and security.
+haproxy_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# rsyslog server
+# log_hosts:
+# log1:
+# ip: 172.29.236.14
+
+# ##
+# ## OpenStack
+# ##
+
+# keystone
+identity_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# cinder api services
+storage-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# glance
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+image_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.12"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+
+# nova api, conductor, etc services
+compute-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# heat
+orchestration_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# horizon
+dashboard_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# tacker
+mano_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# neutron server, agents (L3, etc)
+network_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# nova hypervisors
+compute_hosts:
+ compute00:
+ ip: 172.29.236.12
+ compute01:
+ ip: 172.29.236.13
+
+# cinder volume hosts (NFS-backed)
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+storage_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.12"
+ share: "/volumes"
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/openstack_services.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/openstack_services.yml
new file mode 100644
index 00000000..8cecf968
--- /dev/null
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/openstack_services.yml
@@ -0,0 +1,238 @@
+---
+# Copyright 2014, Rackspace US, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+## NOTICE on items in this file:
+## * If you use anything in the *._git_install_branch field that is not a TAG
+## make sure to leave an in-line comment as to "why".
+
+## For the sake of anyone else editing this file:
+## * If you add services to this file please do so in alphabetical order.
+## * Every entry should be name spaced with the name of the client followed by an "_"
+## * All items with this file should be separated by `name_` note that the name of the
+## package should be one long name with no additional `_` separating it.
+
+
+### Before this is shipped all of these services should have a tag set as the branch,
+### or have a comment / reason attached to them as to why a tag can not work.
+
+
+## Global Requirements
+requirements_git_repo: https://git.openstack.org/openstack/requirements
+requirements_git_install_branch: 0651044b20227dad1d8247be47614a2447c21793 # HEAD of "stable/ocata" as of 25.08.2017
+requirements_git_dest: "/opt/requirements_{{ requirements_git_install_branch | replace('/', '_') }}"
+
+
+## Aodh service
+aodh_git_repo: https://git.openstack.org/openstack/aodh
+aodh_git_install_branch: ab45afcfe9a531127ab6f06a128ec5db9cfe7a06 # HEAD of "stable/ocata" as of 25.08.2017
+aodh_git_dest: "/opt/aodh_{{ aodh_git_install_branch | replace('/', '_') }}"
+aodh_git_project_group: aodh_all
+
+
+## Barbican service
+barbican_git_repo: https://git.openstack.org/openstack/barbican
+barbican_git_install_branch: 67abd259f56a5f991455a68dd9fe321985e195be # HEAD of "stable/ocata" as of 25.08.2017
+barbican_git_dest: "/opt/barbican_{{ barbican_git_install_branch | replace('/', '_') }}"
+barbican_git_project_group: barbican_all
+
+
+## Ceilometer service
+ceilometer_git_repo: https://git.openstack.org/openstack/ceilometer
+ceilometer_git_install_branch: c154befc4226183d0856533583342e004df3bda7 # HEAD of "stable/ocata" as of 25.08.2017
+ceilometer_git_dest: "/opt/ceilometer_{{ceilometer_git_install_branch | replace('/', '_') }}"
+ceilometer_git_project_group: ceilometer_all
+
+
+## Cinder service
+cinder_git_repo: https://git.openstack.org/openstack/cinder
+cinder_git_install_branch: 78d093b864e46dc847e507342c7b1f7d01233fea # HEAD of "stable/ocata" as of 25.08.2017
+cinder_git_dest: "/opt/cinder_{{ cinder_git_install_branch | replace('/', '_') }}"
+cinder_git_project_group: cinder_all
+
+
+## Designate service
+designate_git_repo: https://git.openstack.org/openstack/designate
+designate_git_install_branch: b0b2878b3c356d057f9e2468d55dc0cd8f42ec42 # HEAD of "stable/ocata" as of 25.08.2017
+designate_git_dest: "/opt/designate_{{ designate_git_install_branch | replace('/', '_') }}"
+designate_git_project_group: designate_all
+
+
+## Horizon Designate dashboard plugin
+designate_dashboard_git_repo: https://git.openstack.org/openstack/designate-dashboard
+designate_dashboard_git_install_branch: 02819244c054577656ac4db745b7bf6f47c26b9d # HEAD of "stable/ocata" as of 25.08.2017
+designate_dashboard_git_dest: "/opt/designate_dashboard_{{ designate_dashboard_git_install_branch | replace('/', '_') }}"
+designate_dashboard_git_project_group: horizon_all
+
+
+## Dragonflow service
+dragonflow_git_repo: https://git.openstack.org/openstack/dragonflow
+dragonflow_git_install_branch: b72ffa06721faf95f42c9702f60a9acc3a7d8b61 # HEAD of "stable/ocata" as of 25.08.2017
+dragonflow_git_dest: "/opt/dragonflow_{{ dragonflow_git_install_branch | replace('/', '_') }}"
+dragonflow_git_project_group: neutron_all
+
+
+## Glance service
+glance_git_repo: https://git.openstack.org/openstack/glance
+glance_git_install_branch: 0a2074ecef67beb7a3b3531534c9d97b1c3ea828 # HEAD of "stable/ocata" as of 25.08.2017
+glance_git_dest: "/opt/glance_{{ glance_git_install_branch | replace('/', '_') }}"
+glance_git_project_group: glance_all
+
+
+## Heat service
+heat_git_repo: https://git.openstack.org/openstack/heat
+heat_git_install_branch: 1e57d98611b661795d2977d00405385dec609dba # HEAD of "stable/ocata" as of 25.08.2017
+heat_git_dest: "/opt/heat_{{ heat_git_install_branch | replace('/', '_') }}"
+heat_git_project_group: heat_all
+
+
+## Horizon service
+horizon_git_repo: https://git.openstack.org/openstack/horizon
+horizon_git_install_branch: d4f4c17a7f3ca3ee3c9b9e0dbdc8412b256a5f78 # HEAD of "stable/ocata" as of 25.08.2017
+horizon_git_dest: "/opt/horizon_{{ horizon_git_install_branch | replace('/', '_') }}"
+horizon_git_project_group: horizon_all
+
+## Horizon Ironic dashboard plugin
+ironic_dashboard_git_repo: https://git.openstack.org/openstack/ironic-ui
+ironic_dashboard_git_install_branch: 93b9054d21a8ee6c270f648383828959818734c1 # HEAD of "stable/ocata" as of 25.08.2017
+ironic_dashboard_git_dest: "/opt/ironic_dashboard_{{ ironic_dashboard_git_install_branch | replace('/', '_') }}"
+ironic_dashboard_git_project_group: horizon_all
+
+## Horizon Magnum dashboard plugin
+magnum_dashboard_git_repo: https://git.openstack.org/openstack/magnum-ui
+magnum_dashboard_git_install_branch: 369d3e0bf9bb562c28bb975736694b598109310a # HEAD of "stable/ocata" as of 25.08.2017
+magnum_dashboard_git_dest: "/opt/magnum_dashboard_{{ magnum_dashboard_git_install_branch | replace('/', '_') }}"
+magnum_dashboard_git_project_group: horizon_all
+
+## Horizon LBaaS dashboard plugin
+neutron_lbaas_dashboard_git_repo: https://git.openstack.org/openstack/neutron-lbaas-dashboard
+neutron_lbaas_dashboard_git_install_branch: 834ea9dd7aeaefed397d6d510b7527ce0fa5bd33 # HEAD of "stable/ocata" as of 25.08.2017
+neutron_lbaas_dashboard_git_dest: "/opt/neutron_lbaas_dashboard_{{ neutron_lbaas_dashboard_git_install_branch | replace('/', '_') }}"
+neutron_lbaas_dashboard_git_project_group: horizon_all
+
+## Horizon Sahara dashboard plugin
+sahara_dashboard_git_repo: https://git.openstack.org/openstack/sahara-dashboard
+sahara_dashboard_git_install_branch: 655b666d760e1e2f005c9711a21dadb11b459e97 # HEAD of "stable/ocata" as of 25.08.2017
+sahara_dashboard_git_dest: "/opt/sahara_dashboard_{{ sahara_dashboard_git_install_branch | replace('/', '_') }}"
+sahara_dashboard_git_project_group: horizon_all
+
+
+## Keystone service
+keystone_git_repo: https://git.openstack.org/openstack/keystone
+keystone_git_install_branch: 16d8f0d11f0ab9678b7e99f063fcce23d32c3c3b # HEAD of "stable/ocata" as of 25.08.2017
+keystone_git_dest: "/opt/keystone_{{ keystone_git_install_branch | replace('/', '_') }}"
+keystone_git_project_group: keystone_all
+
+
+## Neutron service
+neutron_git_repo: https://git.openstack.org/openstack/neutron
+neutron_git_install_branch: fbd60b0991af7a02ebd958f86000dabd59e221b0 # HEAD of "stable/ocata" as of 25.08.2017
+neutron_git_dest: "/opt/neutron_{{ neutron_git_install_branch | replace('/', '_') }}"
+neutron_git_project_group: neutron_all
+
+neutron_lbaas_git_repo: https://git.openstack.org/openstack/neutron-lbaas
+neutron_lbaas_git_install_branch: b3479a1a2652bb45fbdc0b78da9a4a65c6ee9a0e # HEAD of "stable/ocata" as of 25.08.2017
+neutron_lbaas_git_dest: "/opt/neutron_lbaas_{{ neutron_lbaas_git_install_branch | replace('/', '_') }}"
+neutron_lbaas_git_project_group: neutron_all
+
+neutron_vpnaas_git_repo: https://git.openstack.org/openstack/neutron-vpnaas
+neutron_vpnaas_git_install_branch: 6fece0ee069c5dd036b4712cfd74855844aa7f91 # HEAD of "stable/ocata" as of 25.08.2017
+neutron_vpnaas_git_dest: "/opt/neutron_vpnaas_{{ neutron_vpnaas_git_install_branch | replace('/', '_') }}"
+neutron_vpnaas_git_project_group: neutron_all
+
+neutron_fwaas_git_repo: https://git.openstack.org/openstack/neutron-fwaas
+neutron_fwaas_git_install_branch: 1f76429d87327e1d1d70a7a0211098c76f9f4688 # HEAD of "stable/ocata" as of 25.08.2017
+neutron_fwaas_git_dest: "/opt/neutron_fwaas_{{ neutron_fwaas_git_install_branch | replace('/', '_') }}"
+neutron_fwaas_git_project_group: neutron_all
+
+neutron_dynamic_routing_git_repo: https://git.openstack.org/openstack/neutron-dynamic-routing
+neutron_dynamic_routing_git_install_branch: 4b6afd41961743353400f78ddcbc4a5d34665468 # HEAD of "stable/ocata" as of 25.08.2017
+neutron_dynamic_routing_git_dest: "/opt/neutron_dynamic_routing_{{ neutron_dynamic_routing_git_install_branch | replace('/', '_') }}"
+neutron_dynamic_routing_git_project_group: neutron_all
+
+networking_calico_git_repo: https://git.openstack.org/openstack/networking-calico
+networking_calico_git_install_branch: e794848060e7ab3edf320b1847151de4eb6af142 # HEAD of "master" as of 29.06.2017
+networking_calico_git_project_group: neutron_all
+
+## Nova service
+nova_git_repo: https://git.openstack.org/openstack/nova
+nova_git_install_branch: bcf110fe7ca2d989a956303313f413614e7a6548 # HEAD of "stable/ocata" as of 25.08.2017
+nova_git_dest: "/opt/nova_{{ nova_git_install_branch | replace('/', '_') }}"
+nova_git_project_group: nova_all
+
+
+## PowerVM Virt Driver
+nova_powervm_git_repo: https://git.openstack.org/openstack/nova-powervm
+nova_powervm_git_install_branch: a7df8c69b1a3cafbcbccd2a0010b1d21351ba01b # HEAD of "stable/ocata" as of 25.08.2017
+nova_powervm_git_dest: "/opt/nova_powervm_{{ nova_powervm_git_install_branch | replace('/', '_') }}"
+nova_powervm_git_project_group: nova_all
+
+
+## LXD Virt Driver
+nova_lxd_git_repo: https://git.openstack.org/openstack/nova-lxd
+nova_lxd_git_install_branch: 2a452c54ea6cf525e2ef9ff1e29776d2ab618311 # HEAD of "stable/ocata" as of 25.08.2017
+nova_lxd_git_dest: "/opt/nova_lxd_{{ nova_lxd_git_install_branch | replace('/', '_') }}"
+nova_lxd_git_project_group: nova_all
+
+
+## Sahara service
+sahara_git_repo: https://git.openstack.org/openstack/sahara
+sahara_git_install_branch: 4a17aa318b12c93d228cafdd95c892681744b91d # HEAD of "stable/ocata" as of 25.08.2017
+sahara_git_dest: "/opt/sahara_{{ sahara_git_install_branch | replace('/', '_') }}"
+sahara_git_project_group: sahara_all
+
+
+## Swift service
+swift_git_repo: https://git.openstack.org/openstack/swift
+swift_git_install_branch: 72ed8f23a78f11e1ca1688ba086590bb7062a8c7 # HEAD of "stable/ocata" as of 25.08.2017
+swift_git_dest: "/opt/swift_{{ swift_git_install_branch | replace('/', '_') }}"
+swift_git_project_group: swift_all
+
+
+## Swift3 middleware
+swift_swift3_git_repo: https://git.openstack.org/openstack/swift3
+swift_swift3_git_install_branch: a8bbdd66464b735a247159ee6c68e0d71bcf27d6 # HEAD of "master" as of 30.06.2017
+swift_swift3_git_project_group: swift_all
+
+
+## Ironic service
+ironic_git_repo: https://git.openstack.org/openstack/ironic
+ironic_git_install_branch: ac7b8067db9b4b8d9cd9d87767e6760f3af6e1e7 # HEAD of "stable/ocata" as of 25.08.2017
+ironic_git_dest: "/opt/ironic_{{ ironic_git_install_branch | replace('/', '_') }}"
+ironic_git_project_group: ironic_all
+
+## Magnum service
+magnum_git_repo: https://git.openstack.org/openstack/magnum
+magnum_git_install_branch: a422f534101ff9f733dd65d9c1a6e7f8a2003693 # HEAD of "stable/ocata" as of 25.08.2017
+magnum_git_dest: "/opt/magnum_{{ magnum_git_install_branch | replace('/', '_') }}"
+magnum_git_project_group: magnum_all
+
+## Trove service
+trove_git_repo: https://git.openstack.org/openstack/trove
+trove_git_install_branch: 898d364fdf2cb6faad9735cdd01cba16b67f19b7 # HEAD of "stable/ocata" as of 25.08.2017
+trove_git_dest: "/opt/trove_{{ trove_git_install_branch | replace('/', '_') }}"
+trove_git_project_group: trove_all
+
+## Horizon Trove dashboard plugin
+trove_dashboard_git_repo: https://git.openstack.org/openstack/trove-dashboard
+trove_dashboard_git_install_branch: e88344200a0b3e03f644a98723ff5fa1b670df7a # HEAD of "stable/ocata" as of 25.08.2017
+trove_dashboard_git_dest: "/opt/trove_dashboard_{{ trove_dashboard_git_install_branch | replace('/', '_') }}"
+trove_dashboard_git_project_group: horizon_all
+
+## Tacker service
+tacker_git_repo: https://github.com/manuelbuil/tacker
+tacker_git_install_branch: ocata-insecured-bug-fixed
+tacker_git_project_group: tacker_all
+
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/os-tacker-install.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/os-tacker-install.yml
new file mode 100644
index 00000000..dd965951
--- /dev/null
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/os-tacker-install.yml
@@ -0,0 +1,63 @@
+---
+# Copyright 2017, SUSE LINUX GmbH.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+- name: Install the tacker components
+ hosts: tacker_all
+ gather_facts: "{{ gather_facts | default(True) }}"
+ max_fail_percentage: 20
+ user: root
+ pre_tasks:
+ - include: common-tasks/os-lxc-container-setup.yml
+ - include: common-tasks/rabbitmq-vhost-user.yml
+ static: no
+ vars:
+ user: "{{ tacker_rabbitmq_userid }}"
+ password: "{{ tacker_rabbitmq_password }}"
+ vhost: "{{ tacker_rabbitmq_vhost }}"
+ _rabbitmq_host_group: "{{ tacker_rabbitmq_host_group }}"
+ when:
+ - inventory_hostname == groups['tacker_all'][0]
+ - groups[tacker_rabbitmq_host_group] | length > 0
+ - include: common-tasks/os-log-dir-setup.yml
+ vars:
+ log_dirs:
+ - src: "/openstack/log/{{ inventory_hostname }}-tacker"
+ dest: "/var/log/tacker"
+ - include: common-tasks/mysql-db-user.yml
+ static: no
+ vars:
+ user_name: "{{ tacker_galera_user }}"
+ password: "{{ tacker_container_mysql_password }}"
+ login_host: "{{ tacker_galera_address }}"
+ db_name: "{{ tacker_galera_database }}"
+ when: inventory_hostname == groups['tacker_all'][0]
+ - include: common-tasks/package-cache-proxy.yml
+ roles:
+ - role: "os_tacker"
+ - role: "openstack_openrc"
+ tags:
+ - openrc
+ - role: "rsyslog_client"
+ rsyslog_client_log_rotate_file: tacker_log_rotate
+ rsyslog_client_log_dir: "/var/log/tacker"
+ rsyslog_client_config_name: "99-tacker-rsyslog-client.conf"
+ tags:
+ - rsyslog
+ vars:
+ is_metal: "{{ properties.is_metal|default(false) }}"
+ tacker_galera_address: "{{ internal_lb_vip_address }}"
+ environment: "{{ deployment_environment_variables | default({}) }}"
+ tags:
+ - tacker
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/setup-openstack.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/setup-openstack.yml
new file mode 100644
index 00000000..456a8add
--- /dev/null
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/setup-openstack.yml
@@ -0,0 +1,25 @@
+---
+# Copyright 2014, Rackspace US, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+- include: os-keystone-install.yml
+- include: os-glance-install.yml
+- include: os-cinder-install.yml
+- include: os-nova-install.yml
+- include: os-neutron-install.yml
+- include: os-heat-install.yml
+- include: os-horizon-install.yml
+- include: os-swift-install.yml
+- include: os-tacker-install.yml
+- include: os-ironic-install.yml
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/tacker.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/tacker.yml
new file mode 100644
index 00000000..9ceabbc2
--- /dev/null
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/tacker.yml
@@ -0,0 +1,36 @@
+---
+# Copyright 2017, SUSE Linux GmbH
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+component_skel:
+ tacker_server:
+ belongs_to:
+ - tacker_all
+
+
+container_skel:
+ tacker_container:
+ belongs_to:
+ - mano_containers
+ contains:
+ - tacker_server
+
+
+physical_skel:
+ mano_containers:
+ belongs_to:
+ - all_containers
+ mano_hosts:
+ belongs_to:
+ - hosts
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/tacker_all.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/tacker_all.yml
new file mode 100644
index 00000000..4016aa11
--- /dev/null
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/tacker_all.yml
@@ -0,0 +1,36 @@
+---
+# Copyright 2017, SUSE LINUX GmbH
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+tacker_service_publicuri: "{{ openstack_service_publicuri_proto|default(tacker_service_proto) }}://{{ external_lb_vip_address }}:{{ tacker_service_port }}"
+tacker_service_adminurl: "{{ tacker_service_adminuri }}/"
+tacker_service_region: "{{ service_region }}"
+tacker_service_in_ldap: "{{ service_ldap_backend_enabled }}"
+
+tacker_aodh_enabled: "{{ groups['aodh_all'] is defined and groups['aodh_all'] | length > 0 }}"
+tacker_gnocchi_enabled: "{{ groups['gnocchi_all'] is defined and groups['gnocchi_all'] | length > 0 }}"
+
+# NOTE: these and their swift_all.yml counterpart should be moved back to all.yml once swift with tacker gets proper SSL support
+# swift_rabbitmq_telemetry_port: "{{ rabbitmq_port }}"
+# swift_rabbitmq_telemetry_use_ssl: "{{ rabbitmq_use_ssl }}"
+
+# Ensure that the package state matches the global setting
+tacker_package_state: "{{ package_state }}"
+
+# venv fetch configuration
+tacker_venv_tag: "{{ venv_tag }}"
+tacker_venv_download_url: "{{ venv_base_download_url }}/tacker-{{ openstack_release }}-{{ ansible_architecture | lower }}.tgz"
+
+# locations for fetching the default files from the git source
+tacker_git_config_lookup_location: "{{ openstack_repo_url }}/openstackgit/tacker"
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/user_secrets.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/user_secrets.yml
new file mode 100644
index 00000000..50c7c0e8
--- /dev/null
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/user_secrets.yml
@@ -0,0 +1,163 @@
+---
+# Copyright 2014, Rackspace US, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+############################# WARNING ########################################
+# The playbooks do not currently manage changing passwords in an existing
+# environment. Changing passwords and re-running the playbooks will fail
+# and may break your OpenStack environment.
+############################# WARNING ########################################
+
+
+## Rabbitmq Options
+rabbitmq_cookie_token:
+rabbitmq_monitoring_password:
+
+## Tokens
+memcached_encryption_key:
+
+## Galera Options
+galera_root_password:
+
+## Keystone Options
+keystone_container_mysql_password:
+keystone_auth_admin_password:
+keystone_service_password:
+keystone_rabbitmq_password:
+
+## Ceilometer Options:
+ceilometer_container_db_password:
+ceilometer_service_password:
+ceilometer_telemetry_secret:
+ceilometer_rabbitmq_password:
+
+## Aodh Options:
+aodh_container_db_password:
+aodh_service_password:
+aodh_rabbitmq_password:
+
+## Cinder Options
+cinder_container_mysql_password:
+cinder_service_password:
+cinder_profiler_hmac_key:
+cinder_rabbitmq_password:
+
+## Ceph/rbd: a UUID to be used by libvirt to refer to the client.cinder user
+cinder_ceph_client_uuid:
+
+## Glance Options
+glance_container_mysql_password:
+glance_service_password:
+glance_profiler_hmac_key:
+glance_rabbitmq_password:
+
+## Gnocchi Options:
+gnocchi_container_mysql_password:
+gnocchi_service_password:
+
+## Heat Options
+heat_stack_domain_admin_password:
+heat_container_mysql_password:
+### THE HEAT AUTH KEY NEEDS TO BE 32 CHARACTERS LONG ##
+heat_auth_encryption_key:
+### THE HEAT AUTH KEY NEEDS TO BE 32 CHARACTERS LONG ##
+heat_service_password:
+heat_rabbitmq_password:
+
+## Ironic options
+ironic_rabbitmq_password:
+ironic_container_mysql_password:
+ironic_service_password:
+ironic_swift_temp_url_secret_key:
+
+## Horizon Options
+horizon_container_mysql_password:
+horizon_secret_key:
+
+## Neutron Options
+neutron_container_mysql_password:
+neutron_service_password:
+neutron_rabbitmq_password:
+neutron_ha_vrrp_auth_password:
+
+## Nova Options
+nova_container_mysql_password:
+nova_api_container_mysql_password:
+nova_metadata_proxy_secret:
+nova_service_password:
+nova_rabbitmq_password:
+nova_placement_service_password:
+nova_placement_container_mysql_password:
+
+# LXD Options for nova compute
+lxd_trust_password:
+
+## Octavia Options
+octavia_container_mysql_password:
+octavia_service_password:
+octavia_health_hmac_key:
+octavia_rabbitmq_password:
+
+## Sahara Options
+sahara_container_mysql_password:
+sahara_rabbitmq_password:
+sahara_service_password:
+
+## Swift Options:
+swift_service_password:
+swift_dispersion_password:
+### Once the swift cluster has been setup DO NOT change these hash values!
+swift_hash_path_suffix:
+swift_hash_path_prefix:
+# Swift needs a telemetry password when using ceilometer
+swift_rabbitmq_telemetry_password:
+
+## haproxy stats password
+haproxy_stats_password:
+haproxy_keepalived_authentication_password:
+
+## Magnum Options
+magnum_service_password:
+magnum_galera_password:
+magnum_rabbitmq_password:
+magnum_trustee_password:
+
+## Rally Options:
+rally_galera_password:
+
+## Trove Options
+trove_galera_password:
+trove_rabbitmq_password:
+trove_service_password:
+trove_admin_user_password:
+trove_taskmanager_rpc_encr_key:
+trove_inst_rpc_key_encr_key:
+
+## Barbican Options
+barbican_galera_password:
+barbican_rabbitmq_password:
+barbican_service_password:
+
+## Designate Options
+designate_galera_password:
+designate_rabbitmq_password:
+designate_service_password:
+
+## Molteniron Options:
+molteniron_container_mysql_password:
+
+## Tacker options
+tacker_rabbitmq_password:
+tacker_service_password:
+tacker_container_mysql_password: