summaryrefslogtreecommitdiffstats
path: root/xci/installer
diff options
context:
space:
mode:
authorMarkos Chandras <mchandras@suse.de>2018-01-08 10:52:57 +0000
committerMarkos Chandras <mchandras@suse.de>2018-01-08 23:24:10 +0000
commit5eaa9da62dabbf31fc83f0c43c6bc56f68468f9c (patch)
treee04ab5e994f5f35a6164ef0f0ce527c2d9012739 /xci/installer
parentb4a0a044240a09e621f51d1f12f8f02148870c39 (diff)
xci: Rename 'nfvi' to 'installer'
Using 'installer' to describe the tool that will deploy the foundations of a particular XCI scenario is more appropriate than NFVI which normally describes both the physical and virtual resources needed by an NFV deployment. Change-Id: Ib8b1aac58673bf705ce2ff053574fd10cb390d71 Signed-off-by: Markos Chandras <mchandras@suse.de>
Diffstat (limited to 'xci/installer')
-rwxr-xr-xxci/installer/osa/deploy.sh192
-rw-r--r--xci/installer/osa/files/aio/flavor-vars.yml3
-rw-r--r--xci/installer/osa/files/aio/inventory2
-rw-r--r--xci/installer/osa/files/ansible-role-requirements.yml227
-rw-r--r--xci/installer/osa/files/cinder.yml13
-rw-r--r--xci/installer/osa/files/global-requirement-pins.txt14
-rw-r--r--xci/installer/osa/files/ha/ceph.yml15
-rw-r--r--xci/installer/osa/files/ha/flavor-vars.yml39
-rw-r--r--xci/installer/osa/files/ha/inventory11
-rw-r--r--xci/installer/osa/files/ha/openstack_user_config.yml255
-rw-r--r--xci/installer/osa/files/ha/user_ceph.yml16
-rw-r--r--xci/installer/osa/files/ha/user_variables.yml165
-rw-r--r--xci/installer/osa/files/ha/user_variables_ceph.yml32
-rw-r--r--xci/installer/osa/files/mini/ceph.yml9
-rw-r--r--xci/installer/osa/files/mini/flavor-vars.yml21
-rw-r--r--xci/installer/osa/files/mini/inventory8
-rw-r--r--xci/installer/osa/files/mini/openstack_user_config.yml170
-rw-r--r--xci/installer/osa/files/mini/user_ceph.yml16
-rw-r--r--xci/installer/osa/files/mini/user_variables.yml165
-rw-r--r--xci/installer/osa/files/mini/user_variables_ceph.yml32
-rw-r--r--xci/installer/osa/files/noha/ceph.yml11
-rw-r--r--xci/installer/osa/files/noha/flavor-vars.yml27
-rw-r--r--xci/installer/osa/files/noha/inventory9
-rw-r--r--xci/installer/osa/files/noha/openstack_user_config.yml172
-rw-r--r--xci/installer/osa/files/noha/user_ceph.yml16
-rw-r--r--xci/installer/osa/files/noha/user_variables.yml165
-rw-r--r--xci/installer/osa/files/noha/user_variables_ceph.yml32
-rw-r--r--xci/installer/osa/files/openstack_services.yml222
-rw-r--r--xci/installer/osa/files/setup-openstack.yml27
-rw-r--r--xci/installer/osa/playbooks/bootstrap-scenarios.yml23
-rw-r--r--xci/installer/osa/playbooks/configure-localhost.yml75
-rw-r--r--xci/installer/osa/playbooks/configure-opnfvhost.yml185
-rw-r--r--xci/installer/osa/playbooks/configure-targethosts.yml49
-rw-r--r--xci/installer/osa/playbooks/inventory10
34 files changed, 2428 insertions, 0 deletions
diff --git a/xci/installer/osa/deploy.sh b/xci/installer/osa/deploy.sh
new file mode 100755
index 00000000..b8637f22
--- /dev/null
+++ b/xci/installer/osa/deploy.sh
@@ -0,0 +1,192 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2017 SUSE LINUX GmbH.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+OSA_XCI_PLAYBOOKS="$(dirname $(realpath ${BASH_SOURCE[0]}))/playbooks"
+export ANSIBLE_ROLES_PATH=$HOME/.ansible/roles:/etc/ansible/roles:${XCI_PATH}/xci/playbooks/roles
+
+if [[ ${OPENSTACK_OSA_VERSION} =~ (stable/|master) ]]; then
+ echo ""
+ echo "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
+ echo "WARNING: We have detected that you are trying to OpenStack-Ansible from stable or master branch."
+ echo "This will likely not work because, unless you know what you are doing, you are going"
+ echo "to be trying something that has not been verified by XCI or upstream fully."
+ echo "This is _NOT_ supported in any way but we can try to make it work for you."
+ echo "Either way you are on your own so please do not report bugs as they will be considered invalid."
+ echo "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
+ echo ""
+ sleep 15
+ trap - ERR
+ ${XCI_PATH}/xci/scripts/update-osa-version-files.sh ${OPENSTACK_OSA_VERSION}
+fi
+
+#-------------------------------------------------------------------------------
+# Configure localhost
+#-------------------------------------------------------------------------------
+# This playbook
+# - removes directories that were created by the previous xci run
+# - clones opnfv/releng-xci repository
+# - creates log directory
+# - copies flavor files such as playbook, inventory, and var file
+#-------------------------------------------------------------------------------
+
+echo "Info: Configuring localhost for openstack-ansible"
+echo "-----------------------------------------------------------------------"
+cd $OSA_XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_VERBOSITY} -e XCI_PATH="${XCI_PATH}" -i inventory configure-localhost.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Configured localhost host for openstack-ansible"
+
+#-------------------------------------------------------------------------------
+# Configure openstack-ansible deployment host, opnfv
+#-------------------------------------------------------------------------------
+# This playbook
+# - removes directories that were created by the previous xci run
+# - clones opnfv/releng-xci and openstack/openstack-ansible repositories
+# - configures network
+# - generates/prepares ssh keys
+# - bootstraps ansible
+# - copies flavor files to be used by openstack-ansible
+#-------------------------------------------------------------------------------
+echo "Info: Configuring opnfv deployment host for openstack-ansible"
+echo "-----------------------------------------------------------------------"
+cd $OSA_XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_VERBOSITY} -e XCI_PATH="${XCI_PATH}" -i ${XCI_FLAVOR_ANSIBLE_FILE_PATH}/inventory \
+ configure-opnfvhost.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Configured opnfv deployment host for openstack-ansible"
+
+#-------------------------------------------------------------------------------
+# Configure target hosts for openstack-ansible
+#-------------------------------------------------------------------------------
+# This playbook is only run for the all flavors except aio since aio is configured
+# by an upstream script.
+
+# This playbook
+# - adds public keys to target hosts
+# - configures network
+# - configures nfs
+#-------------------------------------------------------------------------------
+if [[ $XCI_FLAVOR != "aio" ]]; then
+ echo "Info: Configuring target hosts for openstack-ansible"
+ echo "-----------------------------------------------------------------------"
+ cd $OSA_XCI_PLAYBOOKS
+ ansible-playbook ${XCI_ANSIBLE_VERBOSITY} -e XCI_PATH="${XCI_PATH}" -i ${XCI_FLAVOR_ANSIBLE_FILE_PATH}/inventory \
+ configure-targethosts.yml
+ echo "-----------------------------------------------------------------------"
+ echo "Info: Configured target hosts"
+fi
+
+#-------------------------------------------------------------------------------
+# Set up target hosts for openstack-ansible
+#-------------------------------------------------------------------------------
+# This is openstack-ansible playbook. Check upstream documentation for details.
+#-------------------------------------------------------------------------------
+echo "Info: Setting up target hosts for openstack-ansible"
+echo "-----------------------------------------------------------------------"
+ssh root@$OPNFV_HOST_IP "set -o pipefail; openstack-ansible ${XCI_ANSIBLE_VERBOSITY} \
+ releng-xci/.cache/repos/openstack-ansible/playbooks/setup-hosts.yml | tee setup-hosts.log "
+scp root@$OPNFV_HOST_IP:~/setup-hosts.log $LOG_PATH/setup-hosts.log
+echo "-----------------------------------------------------------------------"
+echo "Info: Set up target hosts for openstack-ansible successfuly"
+
+# TODO: Check this with the upstream and issue a fix in the documentation if the
+# problem is valid.
+#-------------------------------------------------------------------------------
+# Gather facts for all the hosts and containers
+#-------------------------------------------------------------------------------
+# This is needed in order to gather the facts for containers due to a change in
+# upstream that changed the hosts fact are gathered which causes failures during
+# running setup-infrastructure.yml playbook due to lack of the facts for lxc
+# containers.
+#
+# OSA gate also executes this command. See the link
+# http://logs.openstack.org/64/494664/1/check/gate-openstack-ansible-openstack-ansible-aio-ubuntu-xenial/2a0700e/console.html
+#-------------------------------------------------------------------------------
+echo "Info: Gathering facts"
+echo "-----------------------------------------------------------------------"
+ssh root@$OPNFV_HOST_IP "set -o pipefail; cd releng-xci/.cache/repos/openstack-ansible/playbooks; \
+ ansible ${XCI_ANSIBLE_VERBOSITY} -m setup -a gather_subset=network,hardware,virtual all"
+echo "-----------------------------------------------------------------------"
+
+#-------------------------------------------------------------------------------
+# Set up infrastructure
+#-------------------------------------------------------------------------------
+# This is openstack-ansible playbook. Check upstream documentation for details.
+#-------------------------------------------------------------------------------
+echo "Info: Setting up infrastructure"
+echo "-----------------------------------------------------------------------"
+echo "xci: running ansible playbook setup-infrastructure.yml"
+ssh root@$OPNFV_HOST_IP "set -o pipefail; openstack-ansible ${XCI_ANSIBLE_VERBOSITY} \
+ releng-xci/.cache/repos/openstack-ansible/playbooks/setup-infrastructure.yml | tee setup-infrastructure.log"
+scp root@$OPNFV_HOST_IP:~/setup-infrastructure.log $LOG_PATH/setup-infrastructure.log
+echo "-----------------------------------------------------------------------"
+# check the log to see if we have any error
+if grep -q 'failed=1\|unreachable=1' $LOG_PATH/setup-infrastructure.log; then
+ echo "Error: OpenStack node setup failed!"
+ exit 1
+fi
+
+#-------------------------------------------------------------------------------
+# Verify database cluster
+#-------------------------------------------------------------------------------
+echo "Info: Verifying database cluster"
+echo "-----------------------------------------------------------------------"
+ssh root@$OPNFV_HOST_IP "set -o pipefail; ansible --ssh-extra-args='-o StrictHostKeyChecking=no' \
+ -i releng-xci/.cache/repos/openstack-ansible/playbooks/inventory/ galera_container -m shell \
+ -a \"mysql -h localhost -e \\\"show status like '%wsrep_cluster_%';\\\"\" | tee galera.log"
+scp root@$OPNFV_HOST_IP:~/galera.log $LOG_PATH/galera.log
+echo "-----------------------------------------------------------------------"
+# check the log to see if we have any error
+if grep -q 'FAILED\|UNREACHABLE' $LOG_PATH/galera.log; then
+ echo "Error: Database cluster verification failed!"
+ exit 1
+fi
+echo "Info: Database cluster verification successful!"
+
+#-------------------------------------------------------------------------------
+# Install OpenStack
+#-------------------------------------------------------------------------------
+# This is openstack-ansible playbook. Check upstream documentation for details.
+#-------------------------------------------------------------------------------
+echo "Info: Installing OpenStack on target hosts"
+echo "-----------------------------------------------------------------------"
+ssh root@$OPNFV_HOST_IP "set -o pipefail; openstack-ansible ${XCI_ANSIBLE_VERBOSITY} \
+ releng-xci/.cache/repos/openstack-ansible/playbooks/setup-openstack.yml | tee opnfv-setup-openstack.log"
+scp root@$OPNFV_HOST_IP:~/opnfv-setup-openstack.log $LOG_PATH/opnfv-setup-openstack.log
+echo "-----------------------------------------------------------------------"
+# check the log to see if we have any error
+if grep -q 'failed=1\|unreachable=1' $LOG_PATH/opnfv-setup-openstack.log; then
+ echo "Error: OpenStack installation failed!"
+ exit 1
+fi
+echo "Info: OpenStack installation is successfully completed!"
+
+#-------------------------------------------------------------------------------
+# - Getting OpenStack login information
+#-------------------------------------------------------------------------------
+echo "Info: Openstack login details"
+echo "-----------------------------------------------------------------------"
+OS_USER_CONFIG=$XCI_FLAVOR_ANSIBLE_FILE_PATH/openstack_user_config.yml
+python -c \
+"import yaml
+if '$XCI_FLAVOR' is 'aio':
+ print 'Horizon UI is available at https://$OPNFV_HOST_IP'
+else:
+ host_info = open('$OS_USER_CONFIG', 'r')
+ net_config = yaml.safe_load(host_info)
+ print 'Info: Horizon UI is available at https://{}' \
+ .format(net_config['global_overrides']['external_lb_vip_address'])"
+USERNAME=$(ssh -q root@$OPNFV_HOST_IP awk "/OS_USERNAME=./" openrc)
+PASSWORD=$(ssh -q root@$OPNFV_HOST_IP awk "/OS_PASSWORD=./" openrc)
+echo "Info: Admin username - ${USERNAME##*=}"
+echo "Info: Admin password - ${PASSWORD##*=}"
+echo "Info: It is recommended to change the default password."
+
+# vim: set ts=4 sw=4 expandtab:
diff --git a/xci/installer/osa/files/aio/flavor-vars.yml b/xci/installer/osa/files/aio/flavor-vars.yml
new file mode 100644
index 00000000..6ac1e0fe
--- /dev/null
+++ b/xci/installer/osa/files/aio/flavor-vars.yml
@@ -0,0 +1,3 @@
+---
+# this file is added intentionally in order to simplify putting files in place
+# in future, it might contain vars specific to this flavor
diff --git a/xci/installer/osa/files/aio/inventory b/xci/installer/osa/files/aio/inventory
new file mode 100644
index 00000000..9a3dd9ee
--- /dev/null
+++ b/xci/installer/osa/files/aio/inventory
@@ -0,0 +1,2 @@
+[opnfv]
+opnfv ansible_ssh_host=192.168.122.2
diff --git a/xci/installer/osa/files/ansible-role-requirements.yml b/xci/installer/osa/files/ansible-role-requirements.yml
new file mode 100644
index 00000000..329d24a0
--- /dev/null
+++ b/xci/installer/osa/files/ansible-role-requirements.yml
@@ -0,0 +1,227 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2017 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+# these versions are based on the osa commit 7b3aac28a0a87e5966527829f6b0abcbc2303cc7 on 2017-12-11
+# https://review.openstack.org/gitweb?p=openstack/openstack-ansible.git;a=commit;h=7b3aac28a0a87e5966527829f6b0abcbc2303cc7
+- name: ansible-hardening
+ scm: git
+ src: https://git.openstack.org/openstack/ansible-hardening
+ version: 46a94c72518f83d27b25a5fa960dde7130956215
+- name: apt_package_pinning
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-apt_package_pinning
+ version: eba07d7dd7962d90301c49fc088551f9b35f367a
+- name: pip_install
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-pip_install
+ version: 32c27505c6e0ee00ea0fb4a1c62240c60f17a0e3
+- name: galera_client
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-galera_client
+ version: 9a8302cbba24ea4e5907567e5f93e874d30d79df
+- name: galera_server
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-galera_server
+ version: aa452989d7295111962f67a3f3a96d96bc408846
+- name: ceph_client
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-ceph_client
+ version: 34a04f7b24c80297866bc5ab56618e2211b1d5f9
+- name: haproxy_server
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-haproxy_server
+ version: 9966fd96fede46c3b00c9e069e402eae90c66f17
+- name: keepalived
+ scm: git
+ src: https://github.com/evrardjp/ansible-keepalived
+ version: 5deafcab39de162ac1550c58246963974e8dcf4e
+- name: lxc_container_create
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-lxc_container_create
+ version: 68f81c679be88577633f98e8b9252a62bdcef754
+- name: lxc_hosts
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-lxc_hosts
+ version: 6b529de0315fe6cd12f6e78c00a5f2f2d3a01e28
+- name: memcached_server
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-memcached_server
+ version: ae6f721dc0342e1e7b45ff2448ab51f7539dc01f
+- name: openstack_hosts
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-openstack_hosts
+ version: 05c7f09d181de1809fd596cc0d879c49e3f86bbf
+- name: os_keystone
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_keystone
+ version: cd9d4ef7d8614d241fa40ba33c1c205fd2b47fa1
+- name: openstack_openrc
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-openstack_openrc
+ version: d594c2debc249daa5b7f6f2890f546093efd1ee5
+- name: os_aodh
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_aodh
+ version: ce871dee75511f94bfd24dde8f97e573cf6d3ead
+- name: os_barbican
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_barbican
+ version: c3e191037d0978479e3cb95a59b2986adab28c69
+- name: os_ceilometer
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_ceilometer
+ version: 55bb04eaad4dd5c7fdad742b3557dc30dc9d45bf
+- name: os_cinder
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_cinder
+ version: 536dd3446e0fc7fc68ab42b982ac9affc4215787
+- name: os_designate
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_designate
+ version: a65d7a3394aef340ff94587dd0bb48133ed00763
+- name: os_glance
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_glance
+ version: 43aa00424f233a6125f7a9216cec42da1d8ca4c5
+- name: os_gnocchi
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_gnocchi
+ version: b1f7574dc529f8298a983d8d0e09520e90b571a8
+- name: os_heat
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_heat
+ version: 0b3eb9348d55d6b1cf077a2c45b297f9a1be730d
+- name: os_horizon
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_horizon
+ version: da72526dc1757688ecec8914344e330aaa0be720
+- name: os_ironic
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_ironic
+ version: a90558f7a216e5e661c5d1a4048dbe30559542d1
+- name: os_magnum
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_magnum
+ version: 736d1707339cb99396578018a6bda7af9184fb02
+- name: os_molteniron
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_molteniron
+ version: 9b4c104a252c453bcd798fec9dbae7224b3d8001
+- name: os_neutron
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_neutron
+ version: 962cd92243641092412b6ef09a41bbf5e698c4a1
+- name: os_nova
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_nova
+ version: 53df001c9034f198b9349def3c9158f8bbe43ff3
+- name: os_octavia
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_octavia
+ version: 02ad3c68802287a1ba54cf10de085dcd14c324d8
+- name: os_rally
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_rally
+ version: bc9075dba204e64d11cb397017d32b0c2297eed0
+- name: os_sahara
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_sahara
+ version: 3c45121050ba21bd284f054d7b82a338f347157f
+- name: os_swift
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_swift
+ version: f31217bb097519f15755f2337165657d7eb6b014
+- name: os_tacker
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_tacker
+ version: d95902891c4e6200510509c066006c921cfff8df
+- name: os_tempest
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_tempest
+ version: 866dedbcba180ca82c3c93823cef3db2d3241d1b
+- name: os_trove
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_trove
+ version: b425fa316999d0863a44126f239a33d8c3fec3a6
+- name: plugins
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-plugins
+ version: d2f60237761646968a4b39b15185fb5c84e7386f
+- name: rabbitmq_server
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-rabbitmq_server
+ version: 311f76890c8f99cb0b46958775d84de614609323
+- name: repo_build
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-repo_build
+ version: 59a3f444c263235d8f0f584da8768656179fa02a
+- name: repo_server
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-repo_server
+ version: 7889f37cdd2a90b4b98e8ef2e886f1fd4950fc0a
+- name: rsyslog_client
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-rsyslog_client
+ version: 310cfe9506d3742be10790533ad0d16100d81498
+- name: rsyslog_server
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-rsyslog_server
+ version: ba7bb699c0c874c7977add86ca308ca18be8f9a8
+- name: sshd
+ scm: git
+ src: https://github.com/willshersystems/ansible-sshd
+ version: 537b9b2bc2fd7f23301222098344727f8161993c
+- name: bird
+ scm: git
+ src: https://github.com/logan2211/ansible-bird
+ version: 5033c412398cf6f98097a9ac274a6f12810c807e
+- name: etcd
+ scm: git
+ src: https://github.com/logan2211/ansible-etcd
+ version: 3933355dfe51477822db517d3c07ad561fb61318
+- name: unbound
+ scm: git
+ src: https://github.com/logan2211/ansible-unbound
+ version: 7be67d6b60718896f0c17a7d4a14b912f72a59ae
+- name: resolvconf
+ scm: git
+ src: https://github.com/logan2211/ansible-resolvconf
+ version: d48dd3eea22094b6ecc6aa6ea07279c8e68e28b5
+- name: ceph-defaults
+ scm: git
+ src: https://github.com/ceph/ansible-ceph-defaults
+ version: 19884aaac1bc58921952af955c66602ccca89e93
+- name: ceph-common
+ scm: git
+ src: https://github.com/ceph/ansible-ceph-common
+ version: 08804bd46dff42ebff64e7f27c86f2265fe4d6fc
+- name: ceph-config
+ scm: git
+ src: https://github.com/ceph/ansible-ceph-config
+ version: e070537f443c3ae5d262835c8b0a7a992850283b
+- name: ceph-mon
+ scm: git
+ src: https://github.com/ceph/ansible-ceph-mon
+ version: 309b7e339e057d56d9dd38bdd61998b900f45ba8
+- name: ceph-mgr
+ scm: git
+ src: https://github.com/ceph/ansible-ceph-mgr
+ version: fe8f0864500b54cc7c9f897b871ba2cdf1d37096
+- name: ceph-osd
+ scm: git
+ src: https://github.com/ceph/ansible-ceph-osd
+ version: e022d6773bc827e75ad051b429dec786a75d68f4
+- name: opendaylight
+ scm: git
+ src: https://github.com/opendaylight/integration-packaging-ansible-opendaylight
+ version: ef1367ad15ad10ac8cc9416f6fd49fd8b350d377
+- name: haproxy_endpoints
+ scm: git
+ src: https://github.com/logan2211/ansible-haproxy-endpoints
+ version: 49901861b16b8afaa9bccdbc649ac956610ff22b
diff --git a/xci/installer/osa/files/cinder.yml b/xci/installer/osa/files/cinder.yml
new file mode 100644
index 00000000..e40b3925
--- /dev/null
+++ b/xci/installer/osa/files/cinder.yml
@@ -0,0 +1,13 @@
+---
+# This file contains an example to show how to set
+# the cinder-volume service to run in a container.
+#
+# Important note:
+# When using LVM or any iSCSI-based cinder backends, such as NetApp with
+# iSCSI protocol, the cinder-volume service *must* run on metal.
+# Reference: https://bugs.launchpad.net/ubuntu/+source/lxc/+bug/1226855
+
+container_skel:
+ cinder_volumes_container:
+ properties:
+ is_metal: false
diff --git a/xci/installer/osa/files/global-requirement-pins.txt b/xci/installer/osa/files/global-requirement-pins.txt
new file mode 100644
index 00000000..aa3b1169
--- /dev/null
+++ b/xci/installer/osa/files/global-requirement-pins.txt
@@ -0,0 +1,14 @@
+# This file should only be used to set python package pins that are
+# not present in OpenStack's upper-constraints. Any pins present in
+# this file will override any requirements set in *requirements.txt,
+# upper-constraints and any roles/vars.
+#
+# Use this file with caution!
+#
+###
+### These are pinned to ensure exactly the same behaviour forever! ###
+### These pins are updated through the sources-branch-updater script ###
+###
+pip==9.0.1
+setuptools==36.6.0
+wheel==0.30.0
diff --git a/xci/installer/osa/files/ha/ceph.yml b/xci/installer/osa/files/ha/ceph.yml
new file mode 100644
index 00000000..1567c492
--- /dev/null
+++ b/xci/installer/osa/files/ha/ceph.yml
@@ -0,0 +1,15 @@
+# The infra nodes where the Ceph mon services will run
+ceph-mon_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# The nodes that the Ceph OSD disks will be running on
+ceph-osd_hosts:
+ compute00:
+ ip: 172.29.236.14
+ compute01:
+ ip: 172.29.236.15
diff --git a/xci/installer/osa/files/ha/flavor-vars.yml b/xci/installer/osa/files/ha/flavor-vars.yml
new file mode 100644
index 00000000..167502c9
--- /dev/null
+++ b/xci/installer/osa/files/ha/flavor-vars.yml
@@ -0,0 +1,39 @@
+---
+host_info: {
+ 'opnfv': {
+ 'VLAN_IP': '192.168.122.2',
+ 'MGMT_IP': '172.29.236.10',
+ 'VXLAN_IP': '172.29.240.10',
+ 'STORAGE_IP': '172.29.244.10'
+ },
+ 'controller00': {
+ 'VLAN_IP': '192.168.122.3',
+ 'MGMT_IP': '172.29.236.11',
+ 'VXLAN_IP': '172.29.240.11',
+ 'STORAGE_IP': '172.29.244.11'
+ },
+ 'controller01': {
+ 'VLAN_IP': '192.168.122.4',
+ 'MGMT_IP': '172.29.236.12',
+ 'VXLAN_IP': '172.29.240.12',
+ 'STORAGE_IP': '172.29.244.12'
+ },
+ 'controller02': {
+ 'VLAN_IP': '192.168.122.5',
+ 'MGMT_IP': '172.29.236.13',
+ 'VXLAN_IP': '172.29.240.13',
+ 'STORAGE_IP': '172.29.244.13'
+ },
+ 'compute00': {
+ 'VLAN_IP': '192.168.122.6',
+ 'MGMT_IP': '172.29.236.14',
+ 'VXLAN_IP': '172.29.240.14',
+ 'STORAGE_IP': '172.29.244.14'
+ },
+ 'compute01': {
+ 'VLAN_IP': '192.168.122.7',
+ 'MGMT_IP': '172.29.236.15',
+ 'VXLAN_IP': '172.29.240.15',
+ 'STORAGE_IP': '172.29.244.15'
+ }
+}
diff --git a/xci/installer/osa/files/ha/inventory b/xci/installer/osa/files/ha/inventory
new file mode 100644
index 00000000..94b1d074
--- /dev/null
+++ b/xci/installer/osa/files/ha/inventory
@@ -0,0 +1,11 @@
+[opnfv]
+opnfv ansible_ssh_host=192.168.122.2
+
+[controller]
+controller00 ansible_ssh_host=192.168.122.3
+controller01 ansible_ssh_host=192.168.122.4
+controller02 ansible_ssh_host=192.168.122.5
+
+[compute]
+compute00 ansible_ssh_host=192.168.122.6
+compute01 ansible_ssh_host=192.168.122.7
diff --git a/xci/installer/osa/files/ha/openstack_user_config.yml b/xci/installer/osa/files/ha/openstack_user_config.yml
new file mode 100644
index 00000000..360aa5cb
--- /dev/null
+++ b/xci/installer/osa/files/ha/openstack_user_config.yml
@@ -0,0 +1,255 @@
+---
+cidr_networks:
+ container: 172.29.236.0/22
+ tunnel: 172.29.240.0/22
+ storage: 172.29.244.0/22
+
+used_ips:
+ - "172.29.236.1,172.29.236.50"
+ - "172.29.240.1,172.29.240.50"
+ - "172.29.244.1,172.29.244.50"
+ - "172.29.248.1,172.29.248.50"
+ - "172.29.236.222"
+
+global_overrides:
+ internal_lb_vip_address: 172.29.236.222
+ external_lb_vip_address: 192.168.122.220
+ tunnel_bridge: "br-vxlan"
+ management_bridge: "br-mgmt"
+ provider_networks:
+ - network:
+ container_bridge: "br-mgmt"
+ container_type: "veth"
+ container_interface: "eth1"
+ ip_from_q: "container"
+ type: "raw"
+ group_binds:
+ - all_containers
+ - hosts
+ is_container_address: true
+ is_ssh_address: true
+ - network:
+ container_bridge: "br-vxlan"
+ container_type: "veth"
+ container_interface: "eth10"
+ ip_from_q: "tunnel"
+ type: "vxlan"
+ range: "1:1000"
+ net_name: "vxlan"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth12"
+ host_bind_override: "eth12"
+ type: "flat"
+ net_name: "flat"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth11"
+ type: "vlan"
+ range: "1:1"
+ net_name: "vlan"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-storage"
+ container_type: "veth"
+ container_interface: "eth2"
+ ip_from_q: "storage"
+ type: "raw"
+ group_binds:
+ - glance_api
+ - cinder_api
+ - cinder_volume
+ - nova_compute
+
+# ##
+# ## Infrastructure
+# ##
+
+# galera, memcache, rabbitmq, utility
+shared-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# repository (apt cache, python packages, etc)
+repo-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# load balancer
+# Ideally the load balancer should not use the Infrastructure hosts.
+# Dedicated hardware is best for improved performance and security.
+haproxy_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# rsyslog server
+# log_hosts:
+# log1:
+# ip: 172.29.236.14
+
+# ##
+# ## OpenStack
+# ##
+
+# keystone
+identity_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# cinder api services
+storage-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# glance
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+image_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.14"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+ controller01:
+ ip: 172.29.236.12
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.14"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+ controller02:
+ ip: 172.29.236.13
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.14"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+
+# nova api, conductor, etc services
+compute-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# heat
+orchestration_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# horizon
+dashboard_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# neutron server, agents (L3, etc)
+network_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# nova hypervisors
+compute_hosts:
+ compute00:
+ ip: 172.29.236.14
+ compute01:
+ ip: 172.29.236.15
+
+# cinder volume hosts (NFS-backed)
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+storage_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.14"
+ share: "/volumes"
+ controller01:
+ ip: 172.29.236.12
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.14"
+ share: "/volumes"
+ controller02:
+ ip: 172.29.236.13
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.14"
+ share: "/volumes"
diff --git a/xci/installer/osa/files/ha/user_ceph.yml b/xci/installer/osa/files/ha/user_ceph.yml
new file mode 100644
index 00000000..9d5f13a9
--- /dev/null
+++ b/xci/installer/osa/files/ha/user_ceph.yml
@@ -0,0 +1,16 @@
+---
+# The OSA ceph_client role does not support loading IPs from an inventory group,
+# so we have to feed it a list of IPs
+# yamllint disable rule:line-length
+ceph_mons: "[ {% for host in groups[mon_group_name] %}'{{ hostvars[host]['ansible_host'] }}'{% if not loop.last %},{% endif %}{% endfor %} ]"
+# yamllint enable rule:line-length
+cinder_backends:
+ "RBD":
+ volume_driver: cinder.volume.drivers.rbd.RBDDriver
+ rbd_pool: volumes
+ rbd_ceph_conf: /etc/ceph/ceph.conf
+ rbd_store_chunk_size: 8
+ volume_backend_name: rbddriver
+ rbd_user: cinder
+ rbd_secret_uuid: "{{ cinder_ceph_client_uuid }}"
+ report_discard_supported: true
diff --git a/xci/installer/osa/files/ha/user_variables.yml b/xci/installer/osa/files/ha/user_variables.yml
new file mode 100644
index 00000000..72960a01
--- /dev/null
+++ b/xci/installer/osa/files/ha/user_variables.yml
@@ -0,0 +1,165 @@
+---
+# Copyright 2014, Rackspace US, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# ##
+# ## This file contains commonly used overrides for convenience. Please inspect
+# ## the defaults for each role to find additional override options.
+# ##
+
+# # Debug and Verbose options.
+debug: false
+
+# Allow root logins
+security_sshd_permit_root_login: yes
+
+haproxy_keepalived_external_vip_cidr: "192.168.122.220/32"
+haproxy_keepalived_internal_vip_cidr: "172.29.236.222/32"
+haproxy_keepalived_external_interface: br-vlan
+haproxy_keepalived_internal_interface: br-mgmt
+gnocchi_db_sync_options: ""
+
+# The settings below are taken from aio to ensure we can bump OSA SHA with current
+# RAM allocation. Higher values will be tested once the bump is done.
+# https://github.com/openstack/openstack-ansible/blob/master/tests/roles/bootstrap-host/templates/user_variables.aio.yml.j2
+
+## Galera settings
+galera_innodb_buffer_pool_size: 16M
+galera_innodb_log_buffer_size: 4M
+galera_wsrep_provider_options:
+ - { option: "gcache.size", value: "4M" }
+
+## Neutron settings
+neutron_metadata_checksum_fix: True
+
+### Set workers for all services to optimise memory usage
+
+## Repo
+repo_nginx_threads: 2
+
+## Keystone
+keystone_httpd_mpm_start_servers: 2
+keystone_httpd_mpm_min_spare_threads: 1
+keystone_httpd_mpm_max_spare_threads: 2
+keystone_httpd_mpm_thread_limit: 2
+keystone_httpd_mpm_thread_child: 1
+keystone_wsgi_threads: 1
+keystone_wsgi_processes_max: 2
+
+## Barbican
+barbican_wsgi_processes: 2
+barbican_wsgi_threads: 1
+
+## Cinder
+cinder_wsgi_processes_max: 2
+cinder_wsgi_threads: 1
+cinder_wsgi_buffer_size: 16384
+cinder_osapi_volume_workers_max: 2
+
+## Glance
+glance_api_threads_max: 2
+glance_api_threads: 1
+glance_api_workers: 1
+glance_registry_workers: 1
+
+## Nova
+nova_wsgi_threads: 1
+nova_wsgi_processes_max: 2
+nova_wsgi_processes: 2
+nova_wsgi_buffer_size: 16384
+nova_api_threads_max: 2
+nova_api_threads: 1
+nova_osapi_compute_workers: 1
+nova_conductor_workers: 1
+nova_metadata_workers: 1
+
+## Neutron
+neutron_rpc_workers: 1
+neutron_metadata_workers: 1
+neutron_api_workers: 1
+neutron_api_threads_max: 2
+neutron_api_threads: 2
+neutron_num_sync_threads: 1
+
+## Heat
+heat_api_workers: 1
+heat_api_threads_max: 2
+heat_api_threads: 1
+heat_wsgi_threads: 1
+heat_wsgi_processes_max: 2
+heat_wsgi_processes: 1
+heat_wsgi_buffer_size: 16384
+
+## Horizon
+horizon_wsgi_processes: 1
+horizon_wsgi_threads: 1
+horizon_wsgi_threads_max: 2
+
+## Ceilometer
+ceilometer_notification_workers_max: 2
+ceilometer_notification_workers: 1
+
+## AODH
+aodh_wsgi_threads: 1
+aodh_wsgi_processes_max: 2
+aodh_wsgi_processes: 1
+
+## Gnocchi
+gnocchi_wsgi_threads: 1
+gnocchi_wsgi_processes_max: 2
+gnocchi_wsgi_processes: 1
+
+## Swift
+swift_account_server_replicator_workers: 1
+swift_server_replicator_workers: 1
+swift_object_replicator_workers: 1
+swift_account_server_workers: 1
+swift_container_server_workers: 1
+swift_object_server_workers: 1
+swift_proxy_server_workers_max: 2
+swift_proxy_server_workers_not_capped: 1
+swift_proxy_server_workers_capped: 1
+swift_proxy_server_workers: 1
+
+## Ironic
+ironic_wsgi_threads: 1
+ironic_wsgi_processes_max: 2
+ironic_wsgi_processes: 1
+
+## Trove
+trove_api_workers_max: 2
+trove_api_workers: 1
+trove_conductor_workers_max: 2
+trove_conductor_workers: 1
+trove_wsgi_threads: 1
+trove_wsgi_processes_max: 2
+trove_wsgi_processes: 1
+
+## Sahara
+sahara_api_workers_max: 2
+sahara_api_workers: 1
+
+openrc_os_auth_url: "https://192.168.122.220:5000/v3"
+keystone_auth_admin_password: "opnfv-secret-password"
+openrc_os_password: "opnfv-secret-password"
+openrc_os_domain_name: "Default"
+openrc_cinder_endpoint_type: "publicURL"
+openrc_nova_endpoint_type: "publicURL"
+openrc_os_endpoint_type: "publicURL"
+openrc_clouds_yml_interface: "public"
+openrc_region_name: RegionOne
+haproxy_user_ssl_cert: "/etc/ssl/certs/xci.crt"
+haproxy_user_ssl_key: "/etc/ssl/private/xci.key"
+keystone_service_adminuri_insecure: true
+keystone_service_internaluri_insecure: true
diff --git a/xci/installer/osa/files/ha/user_variables_ceph.yml b/xci/installer/osa/files/ha/user_variables_ceph.yml
new file mode 100644
index 00000000..8f708990
--- /dev/null
+++ b/xci/installer/osa/files/ha/user_variables_ceph.yml
@@ -0,0 +1,32 @@
+---
+# Copyright 2017, Logan Vig <logan2211@gmail.com>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+## ceph-ansible settings
+devices: [/dev/loop0, /dev/loop1, /dev/loop2]
+common_single_host_mode: true
+monitor_interface: eth1
+public_network: "172.29.236.0/22"
+cluster_network: "172.29.244.0/22"
+journal_size: 100
+journal_collocation: true
+pool_default_pg_num: 32
+openstack_config: true # Ceph ansible automatically creates pools & keys
+cinder_ceph_client: cinder
+cinder_default_volume_type: RBD
+glance_ceph_client: glance
+glance_default_store: rbd
+glance_rbd_store_pool: images
+nova_libvirt_images_rbd_pool: vms
+nfs_file_gw: False
diff --git a/xci/installer/osa/files/mini/ceph.yml b/xci/installer/osa/files/mini/ceph.yml
new file mode 100644
index 00000000..5c09b471
--- /dev/null
+++ b/xci/installer/osa/files/mini/ceph.yml
@@ -0,0 +1,9 @@
+# The infra nodes where the Ceph mon services will run
+ceph-mon_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# The nodes that the Ceph OSD disks will be running on
+ceph-osd_hosts:
+ compute00:
+ ip: 172.29.236.12
diff --git a/xci/installer/osa/files/mini/flavor-vars.yml b/xci/installer/osa/files/mini/flavor-vars.yml
new file mode 100644
index 00000000..0d446ba2
--- /dev/null
+++ b/xci/installer/osa/files/mini/flavor-vars.yml
@@ -0,0 +1,21 @@
+---
+host_info: {
+ 'opnfv': {
+ 'VLAN_IP': '192.168.122.2',
+ 'MGMT_IP': '172.29.236.10',
+ 'VXLAN_IP': '172.29.240.10',
+ 'STORAGE_IP': '172.29.244.10'
+ },
+ 'controller00': {
+ 'VLAN_IP': '192.168.122.3',
+ 'MGMT_IP': '172.29.236.11',
+ 'VXLAN_IP': '172.29.240.11',
+ 'STORAGE_IP': '172.29.244.11'
+ },
+ 'compute00': {
+ 'VLAN_IP': '192.168.122.4',
+ 'MGMT_IP': '172.29.236.12',
+ 'VXLAN_IP': '172.29.240.12',
+ 'STORAGE_IP': '172.29.244.12'
+ },
+}
diff --git a/xci/installer/osa/files/mini/inventory b/xci/installer/osa/files/mini/inventory
new file mode 100644
index 00000000..eb73e5e3
--- /dev/null
+++ b/xci/installer/osa/files/mini/inventory
@@ -0,0 +1,8 @@
+[opnfv]
+opnfv ansible_ssh_host=192.168.122.2
+
+[controller]
+controller00 ansible_ssh_host=192.168.122.3
+
+[compute]
+compute00 ansible_ssh_host=192.168.122.4
diff --git a/xci/installer/osa/files/mini/openstack_user_config.yml b/xci/installer/osa/files/mini/openstack_user_config.yml
new file mode 100644
index 00000000..f9ccee24
--- /dev/null
+++ b/xci/installer/osa/files/mini/openstack_user_config.yml
@@ -0,0 +1,170 @@
+---
+cidr_networks:
+ container: 172.29.236.0/22
+ tunnel: 172.29.240.0/22
+ storage: 172.29.244.0/22
+
+used_ips:
+ - "172.29.236.1,172.29.236.50"
+ - "172.29.240.1,172.29.240.50"
+ - "172.29.244.1,172.29.244.50"
+ - "172.29.248.1,172.29.248.50"
+
+global_overrides:
+ internal_lb_vip_address: 172.29.236.11
+ external_lb_vip_address: 192.168.122.3
+ tunnel_bridge: "br-vxlan"
+ management_bridge: "br-mgmt"
+ provider_networks:
+ - network:
+ container_bridge: "br-mgmt"
+ container_type: "veth"
+ container_interface: "eth1"
+ ip_from_q: "container"
+ type: "raw"
+ group_binds:
+ - all_containers
+ - hosts
+ is_container_address: true
+ is_ssh_address: true
+ - network:
+ container_bridge: "br-vxlan"
+ container_type: "veth"
+ container_interface: "eth10"
+ ip_from_q: "tunnel"
+ type: "vxlan"
+ range: "1:1000"
+ net_name: "vxlan"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth12"
+ host_bind_override: "eth12"
+ type: "flat"
+ net_name: "flat"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth11"
+ type: "vlan"
+ range: "1:1"
+ net_name: "vlan"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-storage"
+ container_type: "veth"
+ container_interface: "eth2"
+ ip_from_q: "storage"
+ type: "raw"
+ group_binds:
+ - glance_api
+ - cinder_api
+ - cinder_volume
+ - nova_compute
+
+# ##
+# ## Infrastructure
+# ##
+
+# galera, memcache, rabbitmq, utility
+shared-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# repository (apt cache, python packages, etc)
+repo-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# load balancer
+# Ideally the load balancer should not use the Infrastructure hosts.
+# Dedicated hardware is best for improved performance and security.
+haproxy_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# rsyslog server
+# log_hosts:
+# log1:
+# ip: 172.29.236.14
+
+# ##
+# ## OpenStack
+# ##
+
+# keystone
+identity_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# cinder api services
+storage-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# glance
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+image_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.12"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+
+# nova api, conductor, etc services
+compute-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# heat
+orchestration_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# horizon
+dashboard_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# neutron server, agents (L3, etc)
+network_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# nova hypervisors
+compute_hosts:
+ compute00:
+ ip: 172.29.236.12
+
+# cinder volume hosts (NFS-backed)
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+storage_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.12"
+ share: "/volumes"
diff --git a/xci/installer/osa/files/mini/user_ceph.yml b/xci/installer/osa/files/mini/user_ceph.yml
new file mode 100644
index 00000000..9d5f13a9
--- /dev/null
+++ b/xci/installer/osa/files/mini/user_ceph.yml
@@ -0,0 +1,16 @@
+---
+# The OSA ceph_client role does not support loading IPs from an inventory group,
+# so we have to feed it a list of IPs
+# yamllint disable rule:line-length
+ceph_mons: "[ {% for host in groups[mon_group_name] %}'{{ hostvars[host]['ansible_host'] }}'{% if not loop.last %},{% endif %}{% endfor %} ]"
+# yamllint enable rule:line-length
+cinder_backends:
+ "RBD":
+ volume_driver: cinder.volume.drivers.rbd.RBDDriver
+ rbd_pool: volumes
+ rbd_ceph_conf: /etc/ceph/ceph.conf
+ rbd_store_chunk_size: 8
+ volume_backend_name: rbddriver
+ rbd_user: cinder
+ rbd_secret_uuid: "{{ cinder_ceph_client_uuid }}"
+ report_discard_supported: true
diff --git a/xci/installer/osa/files/mini/user_variables.yml b/xci/installer/osa/files/mini/user_variables.yml
new file mode 100644
index 00000000..9ec9e405
--- /dev/null
+++ b/xci/installer/osa/files/mini/user_variables.yml
@@ -0,0 +1,165 @@
+---
+# Copyright 2014, Rackspace US, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# ##
+# ## This file contains commonly used overrides for convenience. Please inspect
+# ## the defaults for each role to find additional override options.
+# ##
+
+# # Debug and Verbose options.
+debug: false
+
+# Allow root logins
+security_sshd_permit_root_login: yes
+
+haproxy_keepalived_external_vip_cidr: "192.168.122.3/32"
+haproxy_keepalived_internal_vip_cidr: "172.29.236.11/32"
+haproxy_keepalived_external_interface: br-vlan
+haproxy_keepalived_internal_interface: br-mgmt
+gnocchi_db_sync_options: ""
+
+# The settings below are taken from aio since this flavor is mostly
+# for short CI loops and users with lower requirements.
+# https://github.com/openstack/openstack-ansible/blob/master/tests/roles/bootstrap-host/templates/user_variables.aio.yml.j2
+
+## Galera settings
+galera_innodb_buffer_pool_size: 16M
+galera_innodb_log_buffer_size: 4M
+galera_wsrep_provider_options:
+ - { option: "gcache.size", value: "4M" }
+
+## Neutron settings
+neutron_metadata_checksum_fix: True
+
+### Set workers for all services to optimise memory usage
+
+## Repo
+repo_nginx_threads: 2
+
+## Keystone
+keystone_httpd_mpm_start_servers: 2
+keystone_httpd_mpm_min_spare_threads: 1
+keystone_httpd_mpm_max_spare_threads: 2
+keystone_httpd_mpm_thread_limit: 2
+keystone_httpd_mpm_thread_child: 1
+keystone_wsgi_threads: 1
+keystone_wsgi_processes_max: 2
+
+## Barbican
+barbican_wsgi_processes: 2
+barbican_wsgi_threads: 1
+
+## Cinder
+cinder_wsgi_processes_max: 2
+cinder_wsgi_threads: 1
+cinder_wsgi_buffer_size: 16384
+cinder_osapi_volume_workers_max: 2
+
+## Glance
+glance_api_threads_max: 2
+glance_api_threads: 1
+glance_api_workers: 1
+glance_registry_workers: 1
+
+## Nova
+nova_wsgi_threads: 1
+nova_wsgi_processes_max: 2
+nova_wsgi_processes: 2
+nova_wsgi_buffer_size: 16384
+nova_api_threads_max: 2
+nova_api_threads: 1
+nova_osapi_compute_workers: 1
+nova_conductor_workers: 1
+nova_metadata_workers: 1
+
+## Neutron
+neutron_rpc_workers: 1
+neutron_metadata_workers: 1
+neutron_api_workers: 1
+neutron_api_threads_max: 2
+neutron_api_threads: 2
+neutron_num_sync_threads: 1
+
+## Heat
+heat_api_workers: 1
+heat_api_threads_max: 2
+heat_api_threads: 1
+heat_wsgi_threads: 1
+heat_wsgi_processes_max: 2
+heat_wsgi_processes: 1
+heat_wsgi_buffer_size: 16384
+
+## Horizon
+horizon_wsgi_processes: 1
+horizon_wsgi_threads: 1
+horizon_wsgi_threads_max: 2
+
+## Ceilometer
+ceilometer_notification_workers_max: 2
+ceilometer_notification_workers: 1
+
+## AODH
+aodh_wsgi_threads: 1
+aodh_wsgi_processes_max: 2
+aodh_wsgi_processes: 1
+
+## Gnocchi
+gnocchi_wsgi_threads: 1
+gnocchi_wsgi_processes_max: 2
+gnocchi_wsgi_processes: 1
+
+## Swift
+swift_account_server_replicator_workers: 1
+swift_server_replicator_workers: 1
+swift_object_replicator_workers: 1
+swift_account_server_workers: 1
+swift_container_server_workers: 1
+swift_object_server_workers: 1
+swift_proxy_server_workers_max: 2
+swift_proxy_server_workers_not_capped: 1
+swift_proxy_server_workers_capped: 1
+swift_proxy_server_workers: 1
+
+## Ironic
+ironic_wsgi_threads: 1
+ironic_wsgi_processes_max: 2
+ironic_wsgi_processes: 1
+
+## Trove
+trove_api_workers_max: 2
+trove_api_workers: 1
+trove_conductor_workers_max: 2
+trove_conductor_workers: 1
+trove_wsgi_threads: 1
+trove_wsgi_processes_max: 2
+trove_wsgi_processes: 1
+
+## Sahara
+sahara_api_workers_max: 2
+sahara_api_workers: 1
+
+openrc_os_auth_url: "https://192.168.122.3:5000/v3"
+keystone_auth_admin_password: "opnfv-secret-password"
+openrc_os_password: "opnfv-secret-password"
+openrc_os_domain_name: "Default"
+openrc_cinder_endpoint_type: "publicURL"
+openrc_nova_endpoint_type: "publicURL"
+openrc_os_endpoint_type: "publicURL"
+openrc_clouds_yml_interface: "public"
+openrc_region_name: RegionOne
+haproxy_user_ssl_cert: "/etc/ssl/certs/xci.crt"
+haproxy_user_ssl_key: "/etc/ssl/private/xci.key"
+keystone_service_adminuri_insecure: true
+keystone_service_internaluri_insecure: true
diff --git a/xci/installer/osa/files/mini/user_variables_ceph.yml b/xci/installer/osa/files/mini/user_variables_ceph.yml
new file mode 100644
index 00000000..8f708990
--- /dev/null
+++ b/xci/installer/osa/files/mini/user_variables_ceph.yml
@@ -0,0 +1,32 @@
+---
+# Copyright 2017, Logan Vig <logan2211@gmail.com>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+## ceph-ansible settings
+devices: [/dev/loop0, /dev/loop1, /dev/loop2]
+common_single_host_mode: true
+monitor_interface: eth1
+public_network: "172.29.236.0/22"
+cluster_network: "172.29.244.0/22"
+journal_size: 100
+journal_collocation: true
+pool_default_pg_num: 32
+openstack_config: true # Ceph ansible automatically creates pools & keys
+cinder_ceph_client: cinder
+cinder_default_volume_type: RBD
+glance_ceph_client: glance
+glance_default_store: rbd
+glance_rbd_store_pool: images
+nova_libvirt_images_rbd_pool: vms
+nfs_file_gw: False
diff --git a/xci/installer/osa/files/noha/ceph.yml b/xci/installer/osa/files/noha/ceph.yml
new file mode 100644
index 00000000..0deb522e
--- /dev/null
+++ b/xci/installer/osa/files/noha/ceph.yml
@@ -0,0 +1,11 @@
+# The infra nodes where the Ceph mon services will run
+ceph-mon_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# The nodes that the Ceph OSD disks will be running on
+ceph-osd_hosts:
+ compute00:
+ ip: 172.29.236.12
+ compute01:
+ ip: 172.29.236.13
diff --git a/xci/installer/osa/files/noha/flavor-vars.yml b/xci/installer/osa/files/noha/flavor-vars.yml
new file mode 100644
index 00000000..3c69a34b
--- /dev/null
+++ b/xci/installer/osa/files/noha/flavor-vars.yml
@@ -0,0 +1,27 @@
+---
+host_info: {
+ 'opnfv': {
+ 'VLAN_IP': '192.168.122.2',
+ 'MGMT_IP': '172.29.236.10',
+ 'VXLAN_IP': '172.29.240.10',
+ 'STORAGE_IP': '172.29.244.10'
+ },
+ 'controller00': {
+ 'VLAN_IP': '192.168.122.3',
+ 'MGMT_IP': '172.29.236.11',
+ 'VXLAN_IP': '172.29.240.11',
+ 'STORAGE_IP': '172.29.244.11'
+ },
+ 'compute00': {
+ 'VLAN_IP': '192.168.122.4',
+ 'MGMT_IP': '172.29.236.12',
+ 'VXLAN_IP': '172.29.240.12',
+ 'STORAGE_IP': '172.29.244.12'
+ },
+ 'compute01': {
+ 'VLAN_IP': '192.168.122.5',
+ 'MGMT_IP': '172.29.236.13',
+ 'VXLAN_IP': '172.29.240.13',
+ 'STORAGE_IP': '172.29.244.13'
+ }
+}
diff --git a/xci/installer/osa/files/noha/inventory b/xci/installer/osa/files/noha/inventory
new file mode 100644
index 00000000..b4f9f6d0
--- /dev/null
+++ b/xci/installer/osa/files/noha/inventory
@@ -0,0 +1,9 @@
+[opnfv]
+opnfv ansible_ssh_host=192.168.122.2
+
+[controller]
+controller00 ansible_ssh_host=192.168.122.3
+
+[compute]
+compute00 ansible_ssh_host=192.168.122.4
+compute01 ansible_ssh_host=192.168.122.5
diff --git a/xci/installer/osa/files/noha/openstack_user_config.yml b/xci/installer/osa/files/noha/openstack_user_config.yml
new file mode 100644
index 00000000..fb12655e
--- /dev/null
+++ b/xci/installer/osa/files/noha/openstack_user_config.yml
@@ -0,0 +1,172 @@
+---
+cidr_networks:
+ container: 172.29.236.0/22
+ tunnel: 172.29.240.0/22
+ storage: 172.29.244.0/22
+
+used_ips:
+ - "172.29.236.1,172.29.236.50"
+ - "172.29.240.1,172.29.240.50"
+ - "172.29.244.1,172.29.244.50"
+ - "172.29.248.1,172.29.248.50"
+
+global_overrides:
+ internal_lb_vip_address: 172.29.236.11
+ external_lb_vip_address: 192.168.122.3
+ tunnel_bridge: "br-vxlan"
+ management_bridge: "br-mgmt"
+ provider_networks:
+ - network:
+ container_bridge: "br-mgmt"
+ container_type: "veth"
+ container_interface: "eth1"
+ ip_from_q: "container"
+ type: "raw"
+ group_binds:
+ - all_containers
+ - hosts
+ is_container_address: true
+ is_ssh_address: true
+ - network:
+ container_bridge: "br-vxlan"
+ container_type: "veth"
+ container_interface: "eth10"
+ ip_from_q: "tunnel"
+ type: "vxlan"
+ range: "1:1000"
+ net_name: "vxlan"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth12"
+ host_bind_override: "eth12"
+ type: "flat"
+ net_name: "flat"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth11"
+ type: "vlan"
+ range: "1:1"
+ net_name: "vlan"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-storage"
+ container_type: "veth"
+ container_interface: "eth2"
+ ip_from_q: "storage"
+ type: "raw"
+ group_binds:
+ - glance_api
+ - cinder_api
+ - cinder_volume
+ - nova_compute
+
+# ##
+# ## Infrastructure
+# ##
+
+# galera, memcache, rabbitmq, utility
+shared-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# repository (apt cache, python packages, etc)
+repo-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# load balancer
+# Ideally the load balancer should not use the Infrastructure hosts.
+# Dedicated hardware is best for improved performance and security.
+haproxy_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# rsyslog server
+# log_hosts:
+# log1:
+# ip: 172.29.236.14
+
+# ##
+# ## OpenStack
+# ##
+
+# keystone
+identity_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# cinder api services
+storage-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# glance
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+image_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.12"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+
+# nova api, conductor, etc services
+compute-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# heat
+orchestration_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# horizon
+dashboard_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# neutron server, agents (L3, etc)
+network_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# nova hypervisors
+compute_hosts:
+ compute00:
+ ip: 172.29.236.12
+ compute01:
+ ip: 172.29.236.13
+
+# cinder volume hosts (NFS-backed)
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+storage_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.12"
+ share: "/volumes"
diff --git a/xci/installer/osa/files/noha/user_ceph.yml b/xci/installer/osa/files/noha/user_ceph.yml
new file mode 100644
index 00000000..9d5f13a9
--- /dev/null
+++ b/xci/installer/osa/files/noha/user_ceph.yml
@@ -0,0 +1,16 @@
+---
+# The OSA ceph_client role does not support loading IPs from an inventory group,
+# so we have to feed it a list of IPs
+# yamllint disable rule:line-length
+ceph_mons: "[ {% for host in groups[mon_group_name] %}'{{ hostvars[host]['ansible_host'] }}'{% if not loop.last %},{% endif %}{% endfor %} ]"
+# yamllint enable rule:line-length
+cinder_backends:
+ "RBD":
+ volume_driver: cinder.volume.drivers.rbd.RBDDriver
+ rbd_pool: volumes
+ rbd_ceph_conf: /etc/ceph/ceph.conf
+ rbd_store_chunk_size: 8
+ volume_backend_name: rbddriver
+ rbd_user: cinder
+ rbd_secret_uuid: "{{ cinder_ceph_client_uuid }}"
+ report_discard_supported: true
diff --git a/xci/installer/osa/files/noha/user_variables.yml b/xci/installer/osa/files/noha/user_variables.yml
new file mode 100644
index 00000000..66573428
--- /dev/null
+++ b/xci/installer/osa/files/noha/user_variables.yml
@@ -0,0 +1,165 @@
+---
+# Copyright 2014, Rackspace US, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# ##
+# ## This file contains commonly used overrides for convenience. Please inspect
+# ## the defaults for each role to find additional override options.
+# ##
+
+# # Debug and Verbose options.
+debug: false
+
+# Allow root logins
+security_sshd_permit_root_login: yes
+
+haproxy_keepalived_external_vip_cidr: "192.168.122.3/32"
+haproxy_keepalived_internal_vip_cidr: "172.29.236.11/32"
+haproxy_keepalived_external_interface: br-vlan
+haproxy_keepalived_internal_interface: br-mgmt
+gnocchi_db_sync_options: ""
+
+# The settings below are taken from aio to ensure we can bump OSA SHA with current
+# RAM allocation. Higher values will be tested once the bump is done.
+# https://github.com/openstack/openstack-ansible/blob/master/tests/roles/bootstrap-host/templates/user_variables.aio.yml.j2
+
+## Galera settings
+galera_innodb_buffer_pool_size: 16M
+galera_innodb_log_buffer_size: 4M
+galera_wsrep_provider_options:
+ - { option: "gcache.size", value: "4M" }
+
+## Neutron settings
+neutron_metadata_checksum_fix: True
+
+### Set workers for all services to optimise memory usage
+
+## Repo
+repo_nginx_threads: 2
+
+## Keystone
+keystone_httpd_mpm_start_servers: 2
+keystone_httpd_mpm_min_spare_threads: 1
+keystone_httpd_mpm_max_spare_threads: 2
+keystone_httpd_mpm_thread_limit: 2
+keystone_httpd_mpm_thread_child: 1
+keystone_wsgi_threads: 1
+keystone_wsgi_processes_max: 2
+
+## Barbican
+barbican_wsgi_processes: 2
+barbican_wsgi_threads: 1
+
+## Cinder
+cinder_wsgi_processes_max: 2
+cinder_wsgi_threads: 1
+cinder_wsgi_buffer_size: 16384
+cinder_osapi_volume_workers_max: 2
+
+## Glance
+glance_api_threads_max: 2
+glance_api_threads: 1
+glance_api_workers: 1
+glance_registry_workers: 1
+
+## Nova
+nova_wsgi_threads: 1
+nova_wsgi_processes_max: 2
+nova_wsgi_processes: 2
+nova_wsgi_buffer_size: 16384
+nova_api_threads_max: 2
+nova_api_threads: 1
+nova_osapi_compute_workers: 1
+nova_conductor_workers: 1
+nova_metadata_workers: 1
+
+## Neutron
+neutron_rpc_workers: 1
+neutron_metadata_workers: 1
+neutron_api_workers: 1
+neutron_api_threads_max: 2
+neutron_api_threads: 2
+neutron_num_sync_threads: 1
+
+## Heat
+heat_api_workers: 1
+heat_api_threads_max: 2
+heat_api_threads: 1
+heat_wsgi_threads: 1
+heat_wsgi_processes_max: 2
+heat_wsgi_processes: 1
+heat_wsgi_buffer_size: 16384
+
+## Horizon
+horizon_wsgi_processes: 1
+horizon_wsgi_threads: 1
+horizon_wsgi_threads_max: 2
+
+## Ceilometer
+ceilometer_notification_workers_max: 2
+ceilometer_notification_workers: 1
+
+## AODH
+aodh_wsgi_threads: 1
+aodh_wsgi_processes_max: 2
+aodh_wsgi_processes: 1
+
+## Gnocchi
+gnocchi_wsgi_threads: 1
+gnocchi_wsgi_processes_max: 2
+gnocchi_wsgi_processes: 1
+
+## Swift
+swift_account_server_replicator_workers: 1
+swift_server_replicator_workers: 1
+swift_object_replicator_workers: 1
+swift_account_server_workers: 1
+swift_container_server_workers: 1
+swift_object_server_workers: 1
+swift_proxy_server_workers_max: 2
+swift_proxy_server_workers_not_capped: 1
+swift_proxy_server_workers_capped: 1
+swift_proxy_server_workers: 1
+
+## Ironic
+ironic_wsgi_threads: 1
+ironic_wsgi_processes_max: 2
+ironic_wsgi_processes: 1
+
+## Trove
+trove_api_workers_max: 2
+trove_api_workers: 1
+trove_conductor_workers_max: 2
+trove_conductor_workers: 1
+trove_wsgi_threads: 1
+trove_wsgi_processes_max: 2
+trove_wsgi_processes: 1
+
+## Sahara
+sahara_api_workers_max: 2
+sahara_api_workers: 1
+
+openrc_os_auth_url: "https://192.168.122.3:5000/v3"
+keystone_auth_admin_password: "opnfv-secret-password"
+openrc_os_password: "opnfv-secret-password"
+openrc_os_domain_name: "Default"
+openrc_cinder_endpoint_type: "publicURL"
+openrc_nova_endpoint_type: "publicURL"
+openrc_os_endpoint_type: "publicURL"
+openrc_clouds_yml_interface: "public"
+openrc_region_name: RegionOne
+haproxy_user_ssl_cert: "/etc/ssl/certs/xci.crt"
+haproxy_user_ssl_key: "/etc/ssl/private/xci.key"
+keystone_service_adminuri_insecure: true
+keystone_service_internaluri_insecure: true
diff --git a/xci/installer/osa/files/noha/user_variables_ceph.yml b/xci/installer/osa/files/noha/user_variables_ceph.yml
new file mode 100644
index 00000000..8f708990
--- /dev/null
+++ b/xci/installer/osa/files/noha/user_variables_ceph.yml
@@ -0,0 +1,32 @@
+---
+# Copyright 2017, Logan Vig <logan2211@gmail.com>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+## ceph-ansible settings
+devices: [/dev/loop0, /dev/loop1, /dev/loop2]
+common_single_host_mode: true
+monitor_interface: eth1
+public_network: "172.29.236.0/22"
+cluster_network: "172.29.244.0/22"
+journal_size: 100
+journal_collocation: true
+pool_default_pg_num: 32
+openstack_config: true # Ceph ansible automatically creates pools & keys
+cinder_ceph_client: cinder
+cinder_default_volume_type: RBD
+glance_ceph_client: glance
+glance_default_store: rbd
+glance_rbd_store_pool: images
+nova_libvirt_images_rbd_pool: vms
+nfs_file_gw: False
diff --git a/xci/installer/osa/files/openstack_services.yml b/xci/installer/osa/files/openstack_services.yml
new file mode 100644
index 00000000..86501634
--- /dev/null
+++ b/xci/installer/osa/files/openstack_services.yml
@@ -0,0 +1,222 @@
+---
+# Copyright 2014, Rackspace US, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+## NOTICE on items in this file:
+## * If you use anything in the *._git_install_branch field that is not a TAG
+## make sure to leave an in-line comment as to "why".
+
+## For the sake of anyone else editing this file:
+## * If you add services to this file please do so in alphabetical order.
+## * Every entry should be name spaced with the name of the client followed by an "_"
+## * All items with this file should be separated by `name_` note that the name of the
+## package should be one long name with no additional `_` separating it.
+
+
+### Before this is shipped all of these services should have a tag set as the branch,
+### or have a comment / reason attached to them as to why a tag can not work.
+
+
+## Global Requirements
+requirements_git_repo: https://git.openstack.org/openstack/requirements
+requirements_git_install_branch: 691711c0effddd9cbaaadba3d494c15bc422fdd5 # HEAD of "master" as of 24.11.2017
+
+
+## Aodh service
+aodh_git_repo: https://git.openstack.org/openstack/aodh
+aodh_git_install_branch: 359043dc774be847cb539d18d13e336d40453e72 # HEAD of "master" as of 24.11.2017
+aodh_git_project_group: aodh_all
+
+
+## Barbican service
+barbican_git_repo: https://git.openstack.org/openstack/barbican
+barbican_git_install_branch: 5617d605f2e12840933e4a9d6417912cdbb811d5 # HEAD of "master" as of 24.11.2017
+barbican_git_project_group: barbican_all
+
+
+## Ceilometer service
+ceilometer_git_repo: https://git.openstack.org/openstack/ceilometer
+ceilometer_git_install_branch: bd464f1f572ba150f52e284de430d13045dc6c18 # HEAD of "master" as of 24.11.2017
+ceilometer_git_project_group: ceilometer_all
+
+
+## Cinder service
+cinder_git_repo: https://git.openstack.org/openstack/cinder
+cinder_git_install_branch: 80558687d0fa55f2adf699e7369ebe3dbc3591bf # HEAD of "master" as of 24.11.2017
+cinder_git_project_group: cinder_all
+
+
+## Designate service
+designate_git_repo: https://git.openstack.org/openstack/designate
+designate_git_install_branch: 2f75586379e8d611f37e06d385e79d0bc2c84ca1 # HEAD of "master" as of 24.11.2017
+designate_git_project_group: designate_all
+
+
+## Horizon Designate dashboard plugin
+designate_dashboard_git_repo: https://git.openstack.org/openstack/designate-dashboard
+designate_dashboard_git_install_branch: 571e127e5f853aa4dbdd377d831e32f8ff81eafe # HEAD of "master" as of 24.11.2017
+designate_dashboard_git_project_group: horizon_all
+
+
+## Dragonflow service
+dragonflow_git_repo: https://git.openstack.org/openstack/dragonflow
+dragonflow_git_install_branch: 7bf00cf315659252f03f6c65f6159a924da6f978 # HEAD of "master" as of 24.11.2017
+dragonflow_git_project_group: neutron_all
+
+
+## Glance service
+glance_git_repo: https://git.openstack.org/openstack/glance
+glance_git_install_branch: d88bd2ca8ef95810441dae640d3c6b9e79eca353 # HEAD of "master" as of 24.11.2017
+glance_git_project_group: glance_all
+
+
+## Heat service
+heat_git_repo: https://git.openstack.org/openstack/heat
+heat_git_install_branch: f4a06c2a92a361dbb401107b4ea1ab60972f473e # HEAD of "master" as of 24.11.2017
+heat_git_project_group: heat_all
+
+
+## Horizon service
+horizon_git_repo: https://git.openstack.org/openstack/horizon
+horizon_git_install_branch: 846d269d90e01e463b510474040e0ad984a5679f # HEAD of "master" as of 24.11.2017
+horizon_git_project_group: horizon_all
+
+## Horizon Ironic dashboard plugin
+ironic_dashboard_git_repo: https://git.openstack.org/openstack/ironic-ui
+ironic_dashboard_git_install_branch: d6199d51171e6c8700663b0b0618ee0adf033b4d # HEAD of "master" as of 24.11.2017
+ironic_dashboard_git_project_group: horizon_all
+
+## Horizon Magnum dashboard plugin
+magnum_dashboard_git_repo: https://git.openstack.org/openstack/magnum-ui
+magnum_dashboard_git_install_branch: 6160d903fae9c652b459c93c218e0ea75924a85d # HEAD of "master" as of 24.11.2017
+magnum_dashboard_git_project_group: horizon_all
+
+## Horizon LBaaS dashboard plugin
+neutron_lbaas_dashboard_git_repo: https://git.openstack.org/openstack/neutron-lbaas-dashboard
+neutron_lbaas_dashboard_git_install_branch: ef650294bcc7447d441e6a710c39d64e384e1b27 # HEAD of "master" as of 24.11.2017
+neutron_lbaas_dashboard_git_project_group: horizon_all
+
+## Horizon FWaaS dashboard plugin
+neutron_fwaas_dashboard_git_repo: https://git.openstack.org//openstack/neutron-fwaas-dashboard
+neutron_fwaas_dashboard_git_install_branch: 6de122d4753a6db24d2dc4c22a71e702ed980e82 # HEAD of "master" as of 24.11.2017
+neutron_fwaas_dashboard_git_project_group: horizon_all
+
+## Horizon Sahara dashboard plugin
+sahara_dashboard_git_repo: https://git.openstack.org/openstack/sahara-dashboard
+sahara_dashboard_git_install_branch: 3e5c59e6229dac8b303029058fcee9d61200ebc8 # HEAD of "master" as of 24.11.2017
+sahara_dashboard_git_project_group: horizon_all
+
+
+## Keystone service
+keystone_git_repo: https://git.openstack.org/openstack/keystone
+keystone_git_install_branch: 70fe4ec09b55def21361a32c8fa7f12e7c891ab1 # HEAD of "master" as of 24.11.2017
+keystone_git_project_group: keystone_all
+
+
+## Neutron service
+neutron_git_repo: https://git.openstack.org/openstack/neutron
+neutron_git_install_branch: d1277c1630570ca45b490c48371e3f7e97be78c3 # HEAD of "master" as of 24.11.2017
+neutron_git_project_group: neutron_all
+
+neutron_lbaas_git_repo: https://git.openstack.org/openstack/neutron-lbaas
+neutron_lbaas_git_install_branch: b1123e7a759248dfa63afdf8b86aafd692572ebd # HEAD of "master" as of 24.11.2017
+neutron_lbaas_git_project_group: neutron_all
+
+neutron_vpnaas_git_repo: https://git.openstack.org/openstack/neutron-vpnaas
+neutron_vpnaas_git_install_branch: 79e4eb81dd05588bcf68b92d46c62f0d26153542 # HEAD of "master" as of 24.11.2017
+neutron_vpnaas_git_project_group: neutron_all
+
+neutron_fwaas_git_repo: https://git.openstack.org/openstack/neutron-fwaas
+neutron_fwaas_git_install_branch: 74eac2ca2980e6162d9c88ee6bd48830386c392a # HEAD of "master" as of 24.11.2017
+neutron_fwaas_git_project_group: neutron_all
+
+neutron_dynamic_routing_git_repo: https://git.openstack.org/openstack/neutron-dynamic-routing
+neutron_dynamic_routing_git_install_branch: 183c3fa4840d22be1974534eb9e1b28b552f4a42 # HEAD of "master" as of 24.11.2017
+neutron_dynamic_routing_git_project_group: neutron_all
+
+networking_calico_git_repo: https://git.openstack.org/openstack/networking-calico
+networking_calico_git_install_branch: 9688df1a3d1d8b3fd9ba367e82fe6b0559416728 # HEAD of "master" as of 24.11.2017
+networking_calico_git_project_group: neutron_all
+
+## Nova service
+nova_git_repo: https://git.openstack.org/openstack/nova
+nova_git_install_branch: 22a790ef45b0523e8cf2ed97d14e050431c90fd9 # HEAD of "master" as of 24.11.2017
+nova_git_project_group: nova_all
+
+
+## PowerVM Virt Driver
+nova_powervm_git_repo: https://git.openstack.org/openstack/nova-powervm
+nova_powervm_git_install_branch: f2de4441e39b0f66cf31f854b228e9e7037f04de # HEAD of "master" as of 24.11.2017
+nova_powervm_git_project_group: nova_all
+
+
+## LXD Virt Driver
+nova_lxd_git_repo: https://git.openstack.org/openstack/nova-lxd
+nova_lxd_git_install_branch: e498de603b31c189fd32a6067d45a36575b96b0a # HEAD of "master" as of 24.11.2017
+nova_lxd_git_project_group: nova_all
+
+
+## Sahara service
+sahara_git_repo: https://git.openstack.org/openstack/sahara
+sahara_git_install_branch: 395856c513b1efad82db8fa78fb1cbfe0f3a6749 # HEAD of "master" as of 24.11.2017
+sahara_git_project_group: sahara_all
+
+
+## Swift service
+swift_git_repo: https://git.openstack.org/openstack/swift
+swift_git_install_branch: 3135878d2fe9909f49fcadeeb9cc6c6933d06127 # HEAD of "master" as of 24.11.2017
+swift_git_project_group: swift_all
+
+
+## Swift3 middleware
+swift_swift3_git_repo: https://git.openstack.org/openstack/swift3
+swift_swift3_git_install_branch: 1fb6a30ee59a16cd4b6c49bab963ff9e3f974580 # HEAD of "master" as of 24.11.2017
+swift_swift3_git_project_group: swift_all
+
+
+## Ironic service
+ironic_git_repo: https://git.openstack.org/openstack/ironic
+ironic_git_install_branch: 27ce77142bfb9ac56e85db37e0923a0eb47f2f7a # HEAD of "master" as of 24.11.2017
+ironic_git_project_group: ironic_all
+
+## Magnum service
+magnum_git_repo: https://git.openstack.org/openstack/magnum
+magnum_git_install_branch: 4bf3b3263870a4ec81cf372713cacec446b3ee84 # HEAD of "master" as of 24.11.2017
+magnum_git_project_group: magnum_all
+
+## Trove service
+trove_git_repo: https://git.openstack.org/openstack/trove
+trove_git_install_branch: b09d0eb3135047891a369d3c0eb2c6e9ae649f5b # HEAD of "master" as of 24.11.2017
+trove_git_project_group: trove_all
+
+## Horizon Trove dashboard plugin
+trove_dashboard_git_repo: https://git.openstack.org/openstack/trove-dashboard
+trove_dashboard_git_install_branch: 14a4609606d42cae827b8fc6b44453caea258976 # HEAD of "master" as of 24.11.2017
+trove_dashboard_git_project_group: horizon_all
+
+## Octavia service
+octavia_git_repo: https://git.openstack.org/openstack/octavia
+octavia_git_install_branch: bb9bb2d05b268cff9846e0a09ad3940be5fe5a80 # HEAD of "master" as of 24.11.2017
+octavia_git_project_group: octavia_all
+
+## Molteniron service
+molteniron_git_repo: https://git.openstack.org/openstack/molteniron
+molteniron_git_install_branch: 094276cda77d814d07ad885e7d63de8d1243750a # HEAD of "master" as of 24.11.2017
+molteniron_git_project_group: molteniron_all
+
+## Tacker service
+tacker_git_repo: https://git.openstack.org/openstack/tacker
+tacker_git_install_branch: cc03b5d952527b8cad2e2e309a97d55afb1ca559 # HEAD of "master" as of 24.11.2017
+tacker_git_project_group: tacker_all
diff --git a/xci/installer/osa/files/setup-openstack.yml b/xci/installer/osa/files/setup-openstack.yml
new file mode 100644
index 00000000..c2cb1c79
--- /dev/null
+++ b/xci/installer/osa/files/setup-openstack.yml
@@ -0,0 +1,27 @@
+---
+# Copyright 2014, Rackspace US, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+- include: os-keystone-install.yml
+- include: os-glance-install.yml
+- include: os-cinder-install.yml
+- include: os-nova-install.yml
+- include: os-neutron-install.yml
+- include: os-heat-install.yml
+- include: os-horizon-install.yml
+- include: os-swift-install.yml
+- include: os-ironic-install.yml
+- include: os-tacker-install.yml
+- include: os-tempest-install.yml
+ when: (tempest_install | default(False)) | bool or (tempest_run | default(False)) | bool
diff --git a/xci/installer/osa/playbooks/bootstrap-scenarios.yml b/xci/installer/osa/playbooks/bootstrap-scenarios.yml
new file mode 100644
index 00000000..98acf73b
--- /dev/null
+++ b/xci/installer/osa/playbooks/bootstrap-scenarios.yml
@@ -0,0 +1,23 @@
+---
+#
+# This file is aimed to be used by scenarios to plug into the XCI.
+# Ideally, all they need to do at this point is to include their
+# role using a statement like the following one
+#
+# - name: Include foobar role
+# include_role:
+# name: "foobar"
+# when: DEPLOY_SCENARIO == "foobar"
+
+- name: Prepare everything to run the os-nosdn-nofeature scenario
+ include_role:
+ name: "os-nosdn-nofeature"
+ when: DEPLOY_SCENARIO == 'os-nosdn-nofeature'
+- name: Prepare everything to run the os-odl-nofeature scenario
+ include_role:
+ name: "os-odl-nofeature"
+ when: DEPLOY_SCENARIO == 'os-odl-nofeature'
+- name: Prepare everything to run the os-odl-sfc scenario
+ include_role:
+ name: "os-odl-sfc"
+ when: DEPLOY_SCENARIO == 'os-odl-sfc'
diff --git a/xci/installer/osa/playbooks/configure-localhost.yml b/xci/installer/osa/playbooks/configure-localhost.yml
new file mode 100644
index 00000000..caa5d673
--- /dev/null
+++ b/xci/installer/osa/playbooks/configure-localhost.yml
@@ -0,0 +1,75 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2017 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: localhost
+ connection: local
+
+ pre_tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ item }}"
+ failed_when: false
+ with_items:
+ - "{{ XCI_PATH }}/xci/var/opnfv.yml"
+ - "{{ XCI_PATH }}/xci/var/{{ ansible_os_family }}.yml"
+
+ - name: cleanup leftovers of previous deployment
+ file:
+ path: "{{ item }}"
+ state: absent
+ recurse: no
+ with_items:
+ - "{{ XCI_CACHE }}/repos"
+ - "{{ LOG_PATH }} "
+ - "{{ OPNFV_SSH_HOST_KEYS_PATH }}"
+
+ roles:
+ - role: clone-repository
+ project: "openstack/openstack-ansible-openstack_openrc"
+ repo: "{{ OPENSTACK_OSA_OPENRC_GIT_URL }}"
+ dest: roles/openstack-ansible-openstack_openrc
+ version: "master"
+ - role: clone-repository
+ project: "openstack/openstack-ansible"
+ repo: "{{ OPENSTACK_OSA_GIT_URL }}"
+ dest: "{{ XCI_CACHE }}/repos/openstack-ansible"
+ version: "{{ OPENSTACK_OSA_VERSION }}"
+
+ tasks:
+ - name: create log directory {{LOG_PATH}}
+ file:
+ path: "{{LOG_PATH}}"
+ state: directory
+ recurse: no
+ - name: check if certificate directory /etc/ssl/certs exists already
+ stat: path=/etc/ssl/certs
+ register: check_etc_ssl_certs
+ - name: create certificate directory /etc/ssl/certs
+ become: true
+ file:
+ path: "/etc/ssl/certs"
+ state: directory
+ when: check_etc_ssl_certs.stat.exists == false
+ - name: create key directory /etc/ssl/private
+ become: true
+ file:
+ path: "/etc/ssl/private"
+ state: directory
+ - name: generate self signed certificate
+ command: openssl req -new -nodes -x509 -subj "{{ XCI_SSL_SUBJECT }}" -days 3650 -keyout "/etc/ssl/private/xci.key" -out "/etc/ssl/certs/xci.crt" -extensions v3_ca
+ become: true
+ - name: Synchronize local development OSA repository to XCI paths
+ # command module is much faster than the copy module
+ synchronize:
+ src: "{{ OPENSTACK_OSA_DEV_PATH }}"
+ dest: "{{ XCI_CACHE }}/repos/openstack-ansible"
+ recursive: yes
+ delete: yes
+ when:
+ - OPENSTACK_OSA_DEV_PATH != ""
diff --git a/xci/installer/osa/playbooks/configure-opnfvhost.yml b/xci/installer/osa/playbooks/configure-opnfvhost.yml
new file mode 100644
index 00000000..de922d3c
--- /dev/null
+++ b/xci/installer/osa/playbooks/configure-opnfvhost.yml
@@ -0,0 +1,185 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2017 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: opnfv
+ remote_user: root
+ vars_files:
+ - "{{ XCI_PATH }}/xci/var/opnfv.yml"
+
+ pre_tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ item }}"
+ with_items:
+ - "{{ XCI_PATH }}/xci/var/{{ ansible_os_family }}.yml"
+ - "{{ XCI_FLAVOR_ANSIBLE_FILE_PATH }}/flavor-vars.yml"
+ - name: Set facts for remote deployment
+ set_fact:
+ remote_xci_path: "{{ ansible_env.HOME }}/releng-xci"
+ remote_xci_flavor_files: "{{ ansible_env.HOME }}/releng-xci/xci/installer/{{XCI_INSTALLER}}/files/{{ XCI_FLAVOR }}"
+ remote_xci_playbooks: "{{ ansible_env.HOME }}/releng-xci/xci/playbooks"
+
+ roles:
+ - role: configure-network
+ when: XCI_FLAVOR != "aio"
+
+ tasks:
+ - name: generate SSH keys
+ shell: ssh-keygen -b 2048 -t rsa -f /root/.ssh/id_rsa -q -N ""
+ args:
+ creates: "{{ ansible_env.HOME }}/.ssh/id_rsa"
+ - name: fetch public key
+ fetch:
+ src: "{{ ansible_env.HOME }}/.ssh/id_rsa.pub"
+ dest: "{{ XCI_PATH }}/xci/files/authorized_keys"
+ flat: yes
+ - name: Copy releng-xci to remote host
+ synchronize:
+ src: "{{ XCI_PATH }}/"
+ dest: "{{ remote_xci_path }}"
+ recursive: yes
+ delete: yes
+ - name: copy flavor inventory
+ shell: "/bin/cp -rf {{ remote_xci_flavor_files }}/inventory {{ remote_xci_playbooks }}"
+ - name: copy openstack_deploy
+ shell: "/bin/cp -rf {{OPENSTACK_OSA_PATH}}/etc/openstack_deploy {{OPENSTACK_OSA_ETC_PATH}}"
+ - name: copy openstack_user_config.yml
+ shell: "/bin/cp -rf {{ remote_xci_flavor_files }}/openstack_user_config.yml {{OPENSTACK_OSA_ETC_PATH}}"
+ failed_when: false
+ - name: copy all user override files
+ shell: "/bin/cp -rf {{ remote_xci_flavor_files }}/user_variables.yml {{OPENSTACK_OSA_ETC_PATH}}"
+ failed_when: false
+ - name: copy cinder.yml
+ shell: "/bin/cp -rf {{ remote_xci_path }}/xci/installer/osa/files/cinder.yml {{OPENSTACK_OSA_ETC_PATH}}/env.d"
+ - name: Configure AIO tempest
+ lineinfile:
+ path: "{{ OPENSTACK_OSA_ETC_PATH }}/user_variables.yml"
+ line: "{{ item }}: {{ RUN_TEMPEST | bool }}"
+ state: present
+ with_items:
+ - "tempest_install"
+ - "tempest_run"
+ - block:
+ - name: copy ceph.yml
+ shell: "/bin/cp -rf {{ remote_xci_flavor_files }}/ceph.yml {{OPENSTACK_OSA_ETC_PATH}}/conf.d/"
+ - name: copy user_ceph.yml
+ shell: "/bin/cp -rf {{ remote_xci_flavor_files }}/user_ceph.yml {{OPENSTACK_OSA_ETC_PATH}}/user_ceph.yml"
+ - name: copy user_variables_ceph.yml
+ shell: "/bin/cp -rf {{ remote_xci_flavor_files }}/user_variables_ceph.yml {{OPENSTACK_OSA_ETC_PATH}}/user_variables_ceph.yml"
+ when: XCI_CEPH_ENABLED == "true"
+ # TODO: We need to get rid of this as soon as the issue is fixed upstream
+ - name: change the haproxy state from disable to enable
+ replace:
+ dest: "{{OPENSTACK_OSA_PATH}}/playbooks/os-keystone-install.yml"
+ regexp: '(\s+)haproxy_state: disabled'
+ replace: '\1haproxy_state: enabled'
+ - name: copy OPNFV OpenStack playbook
+ shell: "/bin/cp -rf {{ remote_xci_path }}/xci/installer/osa/files/setup-openstack.yml {{OPENSTACK_OSA_PATH}}/playbooks"
+ - name: copy pinned versions of OSA Roles and global requirements
+ shell: "/bin/cp -rf {{ remote_xci_path }}/xci/installer/osa/files/{{ item }} {{OPENSTACK_OSA_PATH}}/{{ item }}"
+ with_items:
+ - "ansible-role-requirements.yml"
+ - "global-requirement-pins.txt"
+ when:
+ - OPENSTACK_OSA_VERSION != "master"
+ - name: copy pinned versions of OpenStack services
+ shell: "/bin/cp -rf {{ remote_xci_path }}/xci/installer/osa/files/openstack_services.yml {{OPENSTACK_OSA_PATH}}/playbooks/defaults/repo_packages/openstack_services.yml"
+ when:
+ - OPENSTACK_OSA_VERSION != "master"
+ - include: bootstrap-scenarios.yml
+ - name: bootstrap ansible on opnfv host
+ command: "/bin/bash ./scripts/bootstrap-ansible.sh"
+ args:
+ chdir: "{{OPENSTACK_OSA_PATH}}"
+ - name: install python Crypto module
+ package:
+ name: "{{ python_crypto_package_name }}"
+ - name: install PyYAML
+ pip:
+ name: pyyaml
+ state: present
+ - name: generate password token
+ command: "python pw-token-gen.py --file {{OPENSTACK_OSA_ETC_PATH}}/user_secrets.yml"
+ args:
+ chdir: "{{OPENSTACK_OSA_PATH}}/scripts"
+ - name: check if certificate directory /etc/ssl/certs exists already
+ stat: path=/etc/ssl/certs
+ register: check_etc_ssl_certs
+ - name: create certificate directory /etc/ssl/certs
+ file:
+ path: "/etc/ssl/certs"
+ state: directory
+ when: check_etc_ssl_certs.stat.exists == false
+ - name: create key directory /etc/ssl/private
+ file:
+ path: "/etc/ssl/private"
+ state: directory
+ - name: copy certificate to /etc/ssl/certs
+ copy:
+ src: "/etc/ssl/certs/xci.crt"
+ dest: "/etc/ssl/certs/"
+ - name: read remote key from /etc/ssl/private
+ set_fact:
+ xci_ssl_key: "{{ lookup('pipe', 'sudo cat /etc/ssl/private/xci.key' ) }}"
+ - name: copy key to /etc/ssl/private
+ copy:
+ content: "{{ xci_ssl_key }}"
+ dest: "/etc/ssl/private/xci.key"
+ become: true
+ - name: install opnfv required packages
+ package:
+ name: "{{ opnfv_required_packages }}"
+ state: latest
+ # Docker is needed for functest
+ - name: Ensure Docker service is started and enabled
+ service:
+ name: "{{ docker_service_name }}"
+ state: started
+ enabled: yes
+ - name: install opnfv required pip packages
+ pip:
+ name: "{{ opnfv_required_pip }}"
+ state: present
+
+- hosts: localhost
+ remote_user: root
+
+ tasks:
+ - name: Append public keys to authorized_keys
+ shell: "/bin/cat {{ ansible_env.HOME }}/.ssh/id_rsa.pub >> {{ XCI_PATH }}/xci/files/authorized_keys"
+
+- hosts: opnfv
+ remote_user: root
+ vars_files:
+ - "{{ XCI_PATH }}/xci/var/opnfv.yml"
+
+ pre_tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ item }}"
+ failed_when: false
+ with_items:
+ - "{{ XCI_PATH }}/xci/var/{{ ansible_os_family }}.yml"
+ - "{{ XCI_FLAVOR_ANSIBLE_FILE_PATH }}/flavor-vars.yml"
+ - "{{ XCI_FLAVOR_ANSIBLE_FILE_PATH }}/user_variables.yml"
+ roles:
+ - role: "openstack-ansible-openstack_openrc"
+
+ tasks:
+ - name: add extra insecure flag to generated openrc
+ blockinfile:
+ dest: "{{ ansible_env.HOME }}/openrc"
+ block: |
+ export OS_INSECURE=true
+
+ - name: fetch generated openrc
+ fetch:
+ src: "{{ ansible_env.HOME }}/openrc"
+ dest: "{{ XCI_PATH }}/.cache/openrc"
+ flat: true
diff --git a/xci/installer/osa/playbooks/configure-targethosts.yml b/xci/installer/osa/playbooks/configure-targethosts.yml
new file mode 100644
index 00000000..fb43a920
--- /dev/null
+++ b/xci/installer/osa/playbooks/configure-targethosts.yml
@@ -0,0 +1,49 @@
+---
+- hosts: all
+ remote_user: root
+ tasks:
+ - name: add public key to host
+ copy:
+ src: "{{ XCI_PATH }}/xci/files/authorized_keys"
+ dest: /root/.ssh/authorized_keys
+
+- hosts: controller
+ remote_user: root
+ vars_files:
+ - "{{ XCI_PATH }}/xci/var/opnfv.yml"
+
+ pre_tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ item }}"
+ with_items:
+ - "{{ XCI_PATH }}/xci/var/{{ ansible_os_family }}.yml"
+ - "{{ XCI_FLAVOR_ANSIBLE_FILE_PATH }}/flavor-vars.yml"
+ roles:
+ - role: configure-network
+ # we need to force sync time with ntp or the nodes will be out of sync timewise
+ - role: synchronize-time
+
+- hosts: compute
+ remote_user: root
+ vars_files:
+ - "{{ XCI_PATH }}/xci/var/opnfv.yml"
+
+ pre_tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ item }}"
+ with_items:
+ - "{{ XCI_PATH }}/xci/var/{{ ansible_os_family }}.yml"
+ - "{{ XCI_FLAVOR_ANSIBLE_FILE_PATH }}/flavor-vars.yml"
+ roles:
+ - role: configure-network
+ # we need to force sync time with ntp or the nodes will be out of sync timewise
+ - role: synchronize-time
+ - role: configure-ceph
+ when: XCI_CEPH_ENABLED == "true"
+
+- hosts: compute00
+ remote_user: root
+ roles:
+ - role: configure-nfs
diff --git a/xci/installer/osa/playbooks/inventory b/xci/installer/osa/playbooks/inventory
new file mode 100644
index 00000000..fd9af901
--- /dev/null
+++ b/xci/installer/osa/playbooks/inventory
@@ -0,0 +1,10 @@
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2017 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+[opnfv]
+opnfv ansible_ssh_host=192.168.122.2