diff options
Diffstat (limited to 'prototypes')
50 files changed, 2406 insertions, 186 deletions
diff --git a/prototypes/bifrost/scripts/test-bifrost-deployment.sh b/prototypes/bifrost/scripts/bifrost-provision.sh index 83cf1cc1b..d3b28ee10 100755 --- a/prototypes/bifrost/scripts/test-bifrost-deployment.sh +++ b/prototypes/bifrost/scripts/bifrost-provision.sh @@ -7,42 +7,45 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## - set -eux set -o pipefail + export PYTHONUNBUFFERED=1 SCRIPT_HOME="$(cd "$(dirname "$0")" && pwd)" BIFROST_HOME=$SCRIPT_HOME/.. ANSIBLE_INSTALL_ROOT=${ANSIBLE_INSTALL_ROOT:-/opt/stack} +ANSIBLE_VERBOSITY=${ANSIBLE_VERBOSITY-"-vvvv"} ENABLE_VENV="false" USE_DHCP="false" USE_VENV="false" BUILD_IMAGE=true PROVISION_WAIT_TIMEOUT=${PROVISION_WAIT_TIMEOUT:-3600} -BAREMETAL_DATA_FILE=${BAREMETAL_DATA_FILE:-'/tmp/baremetal.json'} - -# Set defaults for ansible command-line options to drive the different -# tests. - -# NOTE(TheJulia/cinerama): The variables defined on the command line -# for the default and DHCP tests are to drive the use of Cirros as the -# deployed operating system, and as such sets the test user to cirros, -# and writes a debian style interfaces file out to the configuration -# drive as cirros does not support the network_info.json format file -# placed in the configuration drive. The "build image" test does not -# use cirros. - -TEST_VM_NUM_NODES=3 -export TEST_VM_NODE_NAMES="xcimaster controller00 compute00" -export VM_DOMAIN_TYPE="kvm" + +# Ensure the right inventory files is used based on branch +CURRENT_BIFROST_BRANCH=$(git rev-parse --abbrev-ref HEAD) +if [ $CURRENT_BIFROST_BRANCH = "master" ]; then + BAREMETAL_DATA_FILE=${BAREMETAL_DATA_FILE:-'/tmp/baremetal.json'} + INVENTORY_FILE_FORMAT="baremetal_json_file" +else + BAREMETAL_DATA_FILE=${BAREMETAL_DATA_FILE:-'/tmp/baremetal.csv'} + INVENTORY_FILE_FORMAT="baremetal_csv_file" +fi +export BIFROST_INVENTORY_SOURCE=$BAREMETAL_DATA_FILE + +# Default settings for VMs +export TEST_VM_NUM_NODES=${TEST_VM_NUM_NODES:-3} +export TEST_VM_NODE_NAMES=${TEST_VM_NODE_NAMES:-"opnfv controller00 compute00"} +export VM_DOMAIN_TYPE=${VM_DOMAIN_TYPE:-kvm} export VM_CPU=${VM_CPU:-4} export VM_DISK=${VM_DISK:-100} +export VM_MEMORY_SIZE=${VM_MEMORY_SIZE:-8192} export VM_DISK_CACHE=${VM_DISK_CACHE:-unsafe} + +# Settings for bifrost TEST_PLAYBOOK="opnfv-virtual.yaml" USE_INSPECTOR=true USE_CIRROS=false TESTING_USER=root -VM_MEMORY_SIZE=${VM_MEMORY_SIZE:-8192} DOWNLOAD_IPA=true CREATE_IPA_IMAGE=false INSPECT_NODES=true @@ -50,27 +53,21 @@ INVENTORY_DHCP=false INVENTORY_DHCP_STATIC_IP=false WRITE_INTERFACES_FILE=true -# Set BIFROST_INVENTORY_SOURCE -export BIFROST_INVENTORY_SOURCE=/tmp/baremetal.json - -# settings for console access +# Settings for console access export DIB_DEV_USER_PWDLESS_SUDO=yes export DIB_DEV_USER_PASSWORD=devuser -# settings for distro: trusty/ubuntu-minimal, 7/centos7 +# Settings for distro: trusty/ubuntu-minimal, 7/centos7, 42.2/suse export DIB_OS_RELEASE=${DIB_OS_RELEASE:-trusty} export DIB_OS_ELEMENT=${DIB_OS_ELEMENT:-ubuntu-minimal} -# for centos 7: "vim,less,bridge-utils,iputils,rsyslog,curl" +# DIB OS packages export DIB_OS_PACKAGES=${DIB_OS_PACKAGES:-"vlan,vim,less,bridge-utils,language-pack-en,iputils-ping,rsyslog,curl"} # Additional dib elements export EXTRA_DIB_ELEMENTS=${EXTRA_DIB_ELEMENTS:-"openssh-server"} # Source Ansible -# NOTE(TheJulia): Ansible stable-1.9 source method tosses an error deep -# under the hood which -x will detect, so for this step, we need to suspend -# and then re-enable the feature. set +x +o nounset $SCRIPT_HOME/env-setup.sh source ${ANSIBLE_INSTALL_ROOT}/ansible/hacking/env-setup @@ -87,29 +84,29 @@ cd $BIFROST_HOME/playbooks # Syntax check of dynamic inventory test path for task in syntax-check list-tasks; do - ${ANSIBLE} -vvvv \ + ${ANSIBLE} ${ANSIBLE_VERBOSITY} \ -i inventory/localhost \ test-bifrost-create-vm.yaml \ --${task} - ${ANSIBLE} -vvvv \ + ${ANSIBLE} ${ANSIBLE_VERBOSITY} \ -i inventory/localhost \ ${TEST_PLAYBOOK} \ --${task} \ -e testing_user=${TESTING_USER} done -# Create the test VMS -${ANSIBLE} -vvvv \ +# Create the VMS +${ANSIBLE} ${ANSIBLE_VERBOSITY} \ -i inventory/localhost \ test-bifrost-create-vm.yaml \ -e test_vm_num_nodes=${TEST_VM_NUM_NODES} \ -e test_vm_memory_size=${VM_MEMORY_SIZE} \ -e enable_venv=${ENABLE_VENV} \ -e test_vm_domain_type=${VM_DOMAIN_TYPE} \ - -e baremetal_json_file=${BAREMETAL_DATA_FILE} + -e ${INVENTORY_FILE_FORMAT}=${BAREMETAL_DATA_FILE} -# Execute the installation and VM startup test. -${ANSIBLE} -vvvv \ +# Execute the installation and VM startup test +${ANSIBLE} ${ANSIBLE_VERBOSITY} \ -i inventory/bifrost_inventory.py \ ${TEST_PLAYBOOK} \ -e use_cirros=${USE_CIRROS} \ @@ -128,9 +125,9 @@ ${ANSIBLE} -vvvv \ EXITCODE=$? if [ $EXITCODE != 0 ]; then - echo "****************************" - echo "Test failed. See logs folder" - echo "****************************" + echo "************************************" + echo "Provisioning failed. See logs folder" + echo "************************************" fi exit $EXITCODE diff --git a/prototypes/bifrost/scripts/destroy-env.sh b/prototypes/bifrost/scripts/destroy-env.sh index 7d3db90b0..d570f10ad 100755 --- a/prototypes/bifrost/scripts/destroy-env.sh +++ b/prototypes/bifrost/scripts/destroy-env.sh @@ -14,14 +14,13 @@ if [[ $(whoami) != "root" ]]; then exit 1 fi -# Delete all VMs on the slave since proposed patchsets -# may leave undesired VM leftovers -for vm in $(virsh list --all --name); do +# Start fresh +rm -rf /opt/stack + +# Delete all libvirt VMs and hosts from vbmc (look for a port number) +for vm in $(vbmc list | awk '/[0-9]/{{ print $2 }}'); do virsh destroy $vm || true virsh undefine $vm || true -done -# Delete all hosts from vbmc (look for a port number) -for vm in $(vbmc list | awk '/[0-9]/{{ print $2 }}'); do vbmc delete $vm done diff --git a/prototypes/bifrost/scripts/osa-bifrost-deployment.sh b/prototypes/bifrost/scripts/osa-bifrost-deployment.sh deleted file mode 100755 index fb66ae9e3..000000000 --- a/prototypes/bifrost/scripts/osa-bifrost-deployment.sh +++ /dev/null @@ -1,143 +0,0 @@ -#!/bin/bash -# SPDX-license-identifier: Apache-2.0 -############################################################################## -# Copyright (c) 2016 Ericsson AB and others. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## - -set -eux -set -o pipefail -export PYTHONUNBUFFERED=1 -SCRIPT_HOME="$(cd "$(dirname "$0")" && pwd)" -BIFROST_HOME=$SCRIPT_HOME/.. -ANSIBLE_INSTALL_ROOT=${ANSIBLE_INSTALL_ROOT:-/opt/stack} -ENABLE_VENV="false" -USE_DHCP="false" -USE_VENV="false" -BUILD_IMAGE=true -PROVISION_WAIT_TIMEOUT=${PROVISION_WAIT_TIMEOUT:-3600} - -# ensure the right inventory files is used based on branch -CURRENT_BIFROST_BRANCH=$(git rev-parse --abbrev-ref HEAD) -if [ $CURRENT_BIFROST_BRANCH = "master" ]; then - export BIFROST_INVENTORY_SOURCE=${BIFROST_INVENTORY_SOURCE:-'/tmp/baremetal.json'} -else - export BIFROST_INVENTORY_SOURCE=${BIFROST_INVENTORY_SOURCE:-'/tmp/baremetal.csv'} -fi - -# Set defaults for ansible command-line options to drive the different -# tests. - -# NOTE(TheJulia/cinerama): The variables defined on the command line -# for the default and DHCP tests are to drive the use of Cirros as the -# deployed operating system, and as such sets the test user to cirros, -# and writes a debian style interfaces file out to the configuration -# drive as cirros does not support the network_info.json format file -# placed in the configuration drive. The "build image" test does not -# use cirros. - -TEST_VM_NUM_NODES=6 -export TEST_VM_NODE_NAMES="xcimaster controller00 controller01 controller02 compute00 compute01" -export VM_DOMAIN_TYPE="kvm" -# 8 vCPU, 60 GB HDD are minimum equipment -export VM_CPU=${VM_CPU:-8} -export VM_DISK=${VM_DISK:-100} -export VM_DISK_CACHE=${VM_DISK_CACHE:-unsafe} -TEST_PLAYBOOK="opnfv-virtual.yaml" -USE_INSPECTOR=true -USE_CIRROS=false -TESTING_USER=root -# seting the memory to 16 GB to make more easily success -# 8 GB RAM is minimum equipment, but it work with at least 12 GB. -VM_MEMORY_SIZE=${VM_MEMORY_SIZE:-16384} -DOWNLOAD_IPA=true -CREATE_IPA_IMAGE=false -INSPECT_NODES=true -INVENTORY_DHCP=false -INVENTORY_DHCP_STATIC_IP=false -WRITE_INTERFACES_FILE=true - - -# settings for console access -export DIB_DEV_USER_PWDLESS_SUDO=yes -export DIB_DEV_USER_PASSWORD=devuser - -# settings for distro: trusty/ubuntu-minimal, 7/centos7 -export DIB_OS_RELEASE=${DIB_OS_RELEASE:-xenial} -export DIB_OS_ELEMENT=${DIB_OS_ELEMENT:-ubuntu-minimal} - -# for centos 7: "vim,less,bridge-utils,iputils,rsyslog,curl" -export DIB_OS_PACKAGES=${DIB_OS_PACKAGES:-"vlan,vim,less,bridge-utils,sudo,language-pack-en,iputils-ping,rsyslog,curl,python,debootstrap,ifenslave,ifenslave-2.6,lsof,lvm2,tcpdump,nfs-kernel-server,chrony"} - -# Additional dib elements -export EXTRA_DIB_ELEMENTS=${EXTRA_DIB_ELEMENTS:-"openssh-server"} - -# Source Ansible -# NOTE(TheJulia): Ansible stable-1.9 source method tosses an error deep -# under the hood which -x will detect, so for this step, we need to suspend -# and then re-enable the feature. -set +x +o nounset -$SCRIPT_HOME/env-setup.sh -source ${ANSIBLE_INSTALL_ROOT}/ansible/hacking/env-setup -ANSIBLE=$(which ansible-playbook) -set -x -o nounset - -logs_on_exit() { - $SCRIPT_HOME/collect-test-info.sh -} -trap logs_on_exit EXIT - -# Change working directory -cd $BIFROST_HOME/playbooks - -# Syntax check of dynamic inventory test path -for task in syntax-check list-tasks; do - ${ANSIBLE} \ - -i inventory/localhost \ - test-bifrost-create-vm.yaml \ - --${task} - ${ANSIBLE} \ - -i inventory/localhost \ - ${TEST_PLAYBOOK} \ - --${task} \ - -e testing_user=${TESTING_USER} -done - -# Create the test VMS -${ANSIBLE} \ - -i inventory/localhost \ - test-bifrost-create-vm.yaml \ - -e test_vm_num_nodes=${TEST_VM_NUM_NODES} \ - -e test_vm_memory_size=${VM_MEMORY_SIZE} \ - -e enable_venv=${ENABLE_VENV} \ - -e test_vm_domain_type=${VM_DOMAIN_TYPE} - -# Execute the installation and VM startup test. -${ANSIBLE} \ - -i inventory/bifrost_inventory.py \ - ${TEST_PLAYBOOK} \ - -e use_cirros=${USE_CIRROS} \ - -e testing_user=${TESTING_USER} \ - -e test_vm_num_nodes=${TEST_VM_NUM_NODES} \ - -e inventory_dhcp=${INVENTORY_DHCP} \ - -e inventory_dhcp_static_ip=${INVENTORY_DHCP_STATIC_IP} \ - -e enable_venv=${ENABLE_VENV} \ - -e enable_inspector=${USE_INSPECTOR} \ - -e inspect_nodes=${INSPECT_NODES} \ - -e download_ipa=${DOWNLOAD_IPA} \ - -e create_ipa_image=${CREATE_IPA_IMAGE} \ - -e write_interfaces_file=${WRITE_INTERFACES_FILE} \ - -e ipv4_gateway=192.168.122.1 \ - -e wait_timeout=${PROVISION_WAIT_TIMEOUT} -EXITCODE=$? - -if [ $EXITCODE != 0 ]; then - echo "****************************" - echo "Test failed. See logs folder" - echo "****************************" -fi - -exit $EXITCODE diff --git a/prototypes/xci/README.rst b/prototypes/xci/README.rst new file mode 100644 index 000000000..8318cdb52 --- /dev/null +++ b/prototypes/xci/README.rst @@ -0,0 +1,217 @@ +########################### +OPNFV XCI Developer Sandbox +########################### + +The XCI Developer Sandbox is created by the OPNFV community for the OPNFV +community in order to + +- provide means for OPNFV developers to work with OpenStack master branch, + cutting the time it takes to develop new features significantly and testing + them on OPNFV Infrastructure +- enable OPNFV developers to identify bugs earlier, issue fixes faster, and + get feedback on a daily basis +- establish mechanisms to run additional testing on OPNFV Infrastructure to + provide feedback to OpenStack community +- make the solutions we put in place available to other LF Networking Projects + OPNFV works with closely + +More information about OPNFV XCI and the sandbox can be seen on +`OPNFV Wiki <https://wiki.opnfv.org/pages/viewpage.action?pageId=8687635>`_. + +=================================== +Components of XCI Developer Sandbox +=================================== + +The sandbox uses OpenStack projects for VM node creation, provisioning +and OpenStack installation. + +- **openstack/bifrost:** Bifrost (pronounced bye-frost) is a set of Ansible + playbooks that automates the task of deploying a base image onto a set + of known hardware using ironic. It provides modular utility for one-off + operating system deployment with as few operational requirements as + reasonably possible. Bifrost supports different operating systems such as + Ubuntu, CentOS, and openSUSE. + More information about this project can be seen on + `Bifrost documentation <https://docs.openstack.org/developer/bifrost/>`_. + +- **openstack/openstack-ansible:** OpenStack-Ansible is an official OpenStack + project which aims to deploy production environments from source in a way + that makes it scalable while also being simple to operate, upgrade, and grow. + More information about this project can be seen on + `OpenStack Ansible documentation <https://docs.openstack.org/developer/openstack-ansible/>`_. + +- **opnfv/releng:** OPNFV Releng Project provides additional scripts, Ansible + playbooks and configuration options in order for developers to have easy + way of using openstack/bifrost and openstack/openstack-ansible by just + setting couple of environment variables and executing a single script. + More infromation about this project can be seen on + `OPNFV Releng documentation <https://wiki.opnfv.org/display/releng>_`. + +========== +Basic Flow +========== + +Here are the steps that take place upon the execution of the sandbox script +``xci-deploy.sh``: + +1. Sources environment variables in order to set things up properly. +2. Installs ansible on the host where sandbox script is executed. +3. Creates and provisions VM nodes based on the flavor chosen by the user. +4. Configures the host where the sandbox script is executed. +5. Configures the deployment host which the OpenStack installation will + be driven from. +6. Configures the target hosts where OpenStack will be installed. +7. Configures the target hosts as controller(s) and compute(s) nodes. +8. Starts the OpenStack installation. + +===================== +Sandbox Prerequisites +===================== + +In order to use this sandbox, the host must have certain packages installed. + +- libvirt +- python +- pip +- git +- <fix the list with all the dependencies> +- passwordless sudo + +The host must also have enough CPU/RAM/Disk in order to host number of VM +nodes that will be created based on the chosen flavor. See the details from +`this link <https://wiki.opnfv.org/display/INF/XCI+Developer+Sandbox#XCIDeveloperSandbox-Prerequisites>`_. + +=========================== +Flavors Provided by Sandbox +=========================== + +OPNFV XCI Sandbox provides different flavors such as all in one (aio) which +puts much lower requirements on the host machine and full-blown HA. + +* aio: Single node which acts as the deployment host, controller and compute. +* mini: One deployment host, 1 controller node and 1 compute node. +* noha: One deployment host, 1 controller node and 2 compute nodes. +* ha: One deployment host, 3 controller nodes and 2 compute nodes. + +See the details of the flavors from +`this link <https://wiki.opnfv.org/display/INF/XCI+Developer+Sandbox#XCIDeveloperSandbox-AvailableFlavors>`_. + +========== +How to Use +========== + +Basic Usage +----------- + +clone OPNFV Releng repository + + git clone https://gerrit.opnfv.org/gerrit/releng.git + +change into directory where the sandbox script is located + + cd releng/prototypes/xci + +execute sandbox script + + sudo -E ./xci-deploy.sh + +Issuing above command will start aio sandbox deployment and the sandbox +should be ready between 1,5 and 2 hours depending on the host machine. + +Advanced Usage +-------------- + +The flavor to deploy, the versions of upstream components to use can +be configured by developers by setting certain environment variables. +Below example deploys noha flavor using the latest of openstack-ansible +master branch and stores logs in different location than what is configured. + +clone OPNFV Releng repository + + git clone https://gerrit.opnfv.org/gerrit/releng.git + +change into directory where the sandbox script is located + + cd releng/prototypes/xci + +set the sandbox flavor + + export XCI_FLAVOR=noha + +set the version to use for openstack-ansible + + export OPENSTACK_OSA_VERSION=master + +set where the logs should be stored + + export LOG_PATH=/home/jenkins/xcilogs + +execute sandbox script + + sudo -E ./xci-deploy.sh + +Warning:: + + Please encure you always execute the sandbox script using **sudo -E** + in order to make the environment variables you set available to the + sandbox script or you end up with the default settings. + +=============== +User Variables +=============== + +All user variables can be set from command line by exporting them before +executing the script. The current user variables can be seen from +``releng/prototypes/xci/config/user-vars``. + +The variables can also be set directly within the file before executing +the sandbox script. + +=============== +Pinned Versions +=============== + +As explained above, the users can pick and choose which versions to use. If +you want to be on the safe side, you can use the pinned versions the sandbox +provides. They can be seen from ``releng/prototypes/xci/config/pinned-versions``. + +How Pinned Versions are Determined +---------------------------------- + +OPNFV runs periodic jobs against upstream projects openstack/bifrost and +openstack/ansible using latest on master and stable/ocata branches, +continuously chasing the HEAD of corresponding branches. + +Once a working version is identified, the versions of the upstream components +are then bumped in releng repo. + +=========================================== +Limitations, Known Issues, and Improvements +=========================================== + +The list can be seen using `this link <https://jira.opnfv.org/issues/?filter=11616>`_. + +========= +Changelog +========= + +Changelog can be seen using `this link <https://jira.opnfv.org/issues/?filter=11625>`_. + +======= +Testing +======= + +Sandbox is continuously tested by OPNFV CI to ensure the changes do not impact +users. In fact, OPNFV CI itself uses the sandbox scripts to run daily platform +verification jobs. + +======= +Support +======= + +OPNFV XCI issues are tracked on OPNFV JIRA Releng project. If you encounter +and issue or identify a bug, please submit an issue to JIRA using +`this link <https://jira.opnfv.org/projects/RELENG>_`. + +If you have questions or comments, you can ask them on ``#opnfv-pharos`` IRC +channel on Freenode. diff --git a/prototypes/xci/config/aio-vars b/prototypes/xci/config/aio-vars new file mode 100755 index 000000000..f28ecff1b --- /dev/null +++ b/prototypes/xci/config/aio-vars @@ -0,0 +1,18 @@ +#------------------------------------------------------------------------------- +# XCI Flavor Configuration +#------------------------------------------------------------------------------- +# You are free to modify parts of the configuration to fit into your environment. +# But before doing that, please ensure you checked other flavors to see if one +# them can be used instead, saving you some time. +#------------------------------------------------------------------------------- + +#------------------------------------------------------------------------------- +# Configure VM Nodes +#------------------------------------------------------------------------------- +export TEST_VM_NUM_NODES=1 +export TEST_VM_NODE_NAMES=opnfv +export VM_DOMAIN_TYPE=kvm +export VM_CPU=8 +export VM_DISK=80 +export VM_MEMORY_SIZE=8192 +export VM_DISK_CACHE=unsafe diff --git a/prototypes/xci/config/env-vars b/prototypes/xci/config/env-vars new file mode 100755 index 000000000..052be2ace --- /dev/null +++ b/prototypes/xci/config/env-vars @@ -0,0 +1,21 @@ +#------------------------------------------------------------------------------- +# !!! Changing or overriding these will most likely break everything altogether !!! +# Please do not change these settings if you are not developing for XCI! +#------------------------------------------------------------------------------- +export OPNFV_RELENG_GIT_URL=https://gerrit.opnfv.org/gerrit/releng.git +export OPENSTACK_BIFROST_GIT_URL=https://git.openstack.org/openstack/bifrost +export OPENSTACK_OSA_GIT_URL=https://git.openstack.org/openstack/openstack-ansible +export OPENSTACK_OSA_ETC_PATH=/etc/openstack_deploy +export CLEAN_DIB_IMAGES=false +export OPNFV_HOST_IP=192.168.122.2 +export XCI_FLAVOR_ANSIBLE_FILE_PATH=$OPNFV_RELENG_PATH/prototypes/xci/file/$XCI_FLAVOR +export JOB_NAME=${JOB_NAME:-false} +# TODO: this currently matches to bifrost ansible version +# there is perhaps better way to do this +export XCI_ANSIBLE_PIP_VERSION=2.1.5.0 +export ANSIBLE_HOST_KEY_CHECKING=False +export DISTRO=${DISTRO:-ubuntu} +export DIB_OS_RELEASE=${DIB_OS_RELEASE:-xenial} +export DIB_OS_ELEMENT=${DIB_OS_ELEMENT:-ubuntu-minimal} +export DIB_OS_PACKAGES=${DIB_OS_PACKAGES:-"vlan,vim,less,bridge-utils,sudo,language-pack-en,iputils-ping,rsyslog,curl,python,debootstrap,ifenslave,ifenslave-2.6,lsof,lvm2,tcpdump,nfs-kernel-server,chrony,iptables"} +export EXTRA_DIB_ELEMENTS=${EXTRA_DIB_ELEMENTS:-"openssh-server"} diff --git a/prototypes/xci/config/ha-vars b/prototypes/xci/config/ha-vars new file mode 100755 index 000000000..1ba45890b --- /dev/null +++ b/prototypes/xci/config/ha-vars @@ -0,0 +1,18 @@ +#------------------------------------------------------------------------------- +# XCI Flavor Configuration +#------------------------------------------------------------------------------- +# You are free to modify parts of the configuration to fit into your environment. +# But before doing that, please ensure you checked other flavors to see if one +# them can be used instead, saving you some time. +#------------------------------------------------------------------------------- + +#------------------------------------------------------------------------------- +# Configure VM Nodes +#------------------------------------------------------------------------------- +export TEST_VM_NUM_NODES=6 +export TEST_VM_NODE_NAMES="opnfv controller00 controller01 controller02 compute00 compute01" +export VM_DOMAIN_TYPE=kvm +export VM_CPU=8 +export VM_DISK=80 +export VM_MEMORY_SIZE=16384 +export VM_DISK_CACHE=unsafe diff --git a/prototypes/xci/config/mini-vars b/prototypes/xci/config/mini-vars new file mode 100755 index 000000000..8f1e83cd8 --- /dev/null +++ b/prototypes/xci/config/mini-vars @@ -0,0 +1,18 @@ +#------------------------------------------------------------------------------- +# XCI Flavor Configuration +#------------------------------------------------------------------------------- +# You are free to modify parts of the configuration to fit into your environment. +# But before doing that, please ensure you checked other flavors to see if one +# them can be used instead, saving you some time. +#------------------------------------------------------------------------------- + +#------------------------------------------------------------------------------- +# Configure VM Nodes +#------------------------------------------------------------------------------- +export TEST_VM_NUM_NODES=3 +export TEST_VM_NODE_NAMES="opnfv controller00 compute00" +export VM_DOMAIN_TYPE=kvm +export VM_CPU=8 +export VM_DISK=80 +export VM_MEMORY_SIZE=12288 +export VM_DISK_CACHE=unsafe diff --git a/prototypes/xci/config/noha-vars b/prototypes/xci/config/noha-vars new file mode 100755 index 000000000..935becb27 --- /dev/null +++ b/prototypes/xci/config/noha-vars @@ -0,0 +1,18 @@ +#------------------------------------------------------------------------------- +# XCI Flavor Configuration +#------------------------------------------------------------------------------- +# You are free to modify parts of the configuration to fit into your environment. +# But before doing that, please ensure you checked other flavors to see if one +# them can be used instead, saving you some time. +#------------------------------------------------------------------------------- + +#------------------------------------------------------------------------------- +# Configure VM Nodes +#------------------------------------------------------------------------------- +export TEST_VM_NUM_NODES=4 +export TEST_VM_NODE_NAMES="opnfv controller00 compute00 compute01" +export VM_DOMAIN_TYPE=kvm +export VM_CPU=8 +export VM_DISK=80 +export VM_MEMORY_SIZE=12288 +export VM_DISK_CACHE=unsafe diff --git a/prototypes/xci/config/pinned-versions b/prototypes/xci/config/pinned-versions new file mode 100755 index 000000000..1cd33813c --- /dev/null +++ b/prototypes/xci/config/pinned-versions @@ -0,0 +1,27 @@ +#------------------------------------------------------------------------------- +# Pinned Component Versions +#------------------------------------------------------------------------------- +# You are free to override these versions in user-vars to experiment with +# different branches or with different commits but be aware that things might +# not work as expected. You can set the versions you want to use before running +# the main script on your shell as shown on the examples below. +# +# It is important to be consistent between branches you use for OpenStack +# projects OPNFV XCI uses. +# +# Examples: +# export OPENSTACK_BIFROST_VERSION="stable/ocata" +# export OPENSTACK_OSA_VERSION="stable/ocata" +# or +# export OPENSTACK_BIFROST_VERSION="master" +# export OPENSTACK_OSA_VERSION="master" +# or +# export OPENSTACK_BIFROST_VERSION="a87f7ce6c8725b3bbffec7b2efa1e466796848a9" +# export OPENSTACK_OSA_VERSION="4713cf45e11b4ebca9fbed25d1389854602213d8" +#------------------------------------------------------------------------------- +# use releng from master until the development work with the sandbox is complete +export OPNFV_RELENG_VERSION="master" +# HEAD of "master" as of 28.03.2017 +export OPENSTACK_BIFROST_VERSION=${OPENSTACK_BIFROST_VERSION:-"2600d546ed7116f5aad81972b0987a269f3c45b4"} +# HEAD of "master" as of 26.03.2017 +export OPENSTACK_OSA_VERSION=${OPENSTACK_OSA_VERSION:-"baba7b317a5898cd73b4a11c4ce364c7e2d3d77f"} diff --git a/prototypes/xci/config/user-vars b/prototypes/xci/config/user-vars new file mode 100755 index 000000000..d910405a7 --- /dev/null +++ b/prototypes/xci/config/user-vars @@ -0,0 +1,54 @@ +#------------------------------------------------------------------------------- +# Set Deployment Flavor +#------------------------------------------------------------------------------- +# OPNFV XCI currently supports 4 different types of flavors: +# - all in one (aio): 1 opnfv VM which acts as controller and compute node +# - mini: 3 VMs, 1 opnfv VM deployment host, 1 controller, and 1 compute nodes +# - noha: 4 VMs, 1 opnfv VM deployment host, 1 controller, and 2 compute nodes +# - ha: 6 VMs, 1 opnfv VM deployment host, 3 controllers, and 2 compute nodes +# +# Apart from having different number of nodes, CPU, RAM, and disk allocations +# also differ from each other. Please take a look at the env-vars files for +# each of these flavors. +# +# Examples: +# export XCI_FLAVOR="aio" +# or +# export XCI_FLAVOR="mini" +# or +# export XCI_FLAVOR="noha" +# or +# export XCI_FLAVOR="ha" +#------------------------------------------------------------------------------- +export XCI_FLAVOR=${XCI_FLAVOR:-aio} + +#------------------------------------------------------------------------------- +# Set Paths to where git repositories of XCI Components will be cloned +#------------------------------------------------------------------------------- +# OPNFV XCI Sandbox is not verified to be used as non-root user as of yet so +# changing these paths might break things. +#------------------------------------------------------------------------------- +export OPNFV_RELENG_PATH=/opt/releng +export OPENSTACK_BIFROST_PATH=/opt/bifrost +export OPENSTACK_OSA_PATH=/opt/openstack-ansible + +#------------------------------------------------------------------------------- +# Set the playbook to use for OpenStack deployment +#------------------------------------------------------------------------------- +# The variable can be overriden in order to install additional OpenStack services +# supported by OpenStack Ansible or exclude certain OpenStack services. +#------------------------------------------------------------------------------- +export OPNFV_OSA_PLAYBOOK=${OPNFV_OSA_PLAYBOOK:-"$OPENSTACK_OSA_PATH/playbooks/setup-openstack.yml"} + +#------------------------------------------------------------------------------- +# Configure some other stuff +#------------------------------------------------------------------------------- +# Set the verbosity for ansible +# +# Examples: +# ANSIBLE_VERBOSITY="-v" +# or +# ANSIBLE_VERBOSITY="-vvvv" +export ANSIBLE_VERBOSITY=${ANSIBLE_VERBOSITY-""} +export LOG_PATH=${LOG_PATH:-/opt/opnfv/logs} +export RUN_TEMPEST=${RUN_TEMPEST:-false} diff --git a/prototypes/xci/file/aio/configure-opnfvhost.yml b/prototypes/xci/file/aio/configure-opnfvhost.yml new file mode 100644 index 000000000..5c66d40c7 --- /dev/null +++ b/prototypes/xci/file/aio/configure-opnfvhost.yml @@ -0,0 +1,22 @@ +--- +- hosts: opnfv + remote_user: root + vars_files: + vars_files: + - ../var/opnfv.yml + roles: + - role: remove-folders + - { role: clone-repository, project: "openstack/openstack-ansible", repo: "{{ OPENSTACK_OSA_GIT_URL }}", dest: "{{ OPENSTACK_OSA_PATH }}", version: "{{ OPENSTACK_OSA_VERSION }}" } + tasks: + - name: bootstrap ansible on opnfv host + command: "/bin/bash ./scripts/bootstrap-ansible.sh" + args: + chdir: "{{OPENSTACK_OSA_PATH}}" + - name: bootstrap opnfv host as aio + command: "/bin/bash ./scripts/bootstrap-aio.sh" + args: + chdir: "{{OPENSTACK_OSA_PATH}}" + - name: install OpenStack on opnfv host - this command doesn't log anything to console + command: "/bin/bash ./scripts/run-playbooks.sh" + args: + chdir: "{{OPENSTACK_OSA_PATH}}" diff --git a/prototypes/xci/file/aio/flavor-vars.yml b/prototypes/xci/file/aio/flavor-vars.yml new file mode 100644 index 000000000..6ac1e0fe9 --- /dev/null +++ b/prototypes/xci/file/aio/flavor-vars.yml @@ -0,0 +1,3 @@ +--- +# this file is added intentionally in order to simplify putting files in place +# in future, it might contain vars specific to this flavor diff --git a/prototypes/xci/file/aio/inventory b/prototypes/xci/file/aio/inventory new file mode 100644 index 000000000..9a3dd9ee3 --- /dev/null +++ b/prototypes/xci/file/aio/inventory @@ -0,0 +1,2 @@ +[opnfv] +opnfv ansible_ssh_host=192.168.122.2 diff --git a/prototypes/xci/file/ansible-role-requirements.yml b/prototypes/xci/file/ansible-role-requirements.yml new file mode 100644 index 000000000..4faab1950 --- /dev/null +++ b/prototypes/xci/file/ansible-role-requirements.yml @@ -0,0 +1,205 @@ +--- +# SPDX-license-identifier: Apache-2.0 +############################################################################## +# Copyright (c) 2017 Ericsson AB and others. +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +- name: apt_package_pinning + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-apt_package_pinning + version: master +- name: pip_install + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-pip_install + version: master +- name: galera_client + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-galera_client + version: master +- name: galera_server + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-galera_server + version: master +- name: ceph_client + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-ceph_client + version: master +- name: haproxy_server + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-haproxy_server + version: master +- name: keepalived + scm: git + src: https://github.com/evrardjp/ansible-keepalived + version: master +- name: lxc_container_create + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-lxc_container_create + version: master +- name: lxc_hosts + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-lxc_hosts + version: master +- name: memcached_server + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-memcached_server + version: master +- name: openstack-ansible-security + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-security + version: master +- name: openstack_hosts + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-openstack_hosts + version: master +- name: os_keystone + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-os_keystone + version: master +- name: openstack_openrc + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-openstack_openrc + version: master +- name: os_aodh + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-os_aodh + version: master +- name: os_barbican + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-os_barbican + version: master +- name: os_ceilometer + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-os_ceilometer + version: master +- name: os_cinder + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-os_cinder + version: master +- name: os_glance + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-os_glance + version: master +- name: os_gnocchi + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-os_gnocchi + version: master +- name: os_heat + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-os_heat + version: master +- name: os_horizon + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-os_horizon + version: master +- name: os_ironic + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-os_ironic + version: master +- name: os_magnum + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-os_magnum + version: master +- name: os_trove + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-os_trove + version: master +- name: os_neutron + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-os_neutron + version: master +- name: os_nova + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-os_nova + version: master +- name: os_rally + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-os_rally + version: master +- name: os_sahara + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-os_sahara + version: master +- name: os_swift + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-os_swift + version: master +- name: os_tempest + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-os_tempest + version: master +- name: plugins + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-plugins + version: master +- name: rabbitmq_server + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-rabbitmq_server + version: master +- name: repo_build + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-repo_build + version: master +- name: repo_server + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-repo_server + version: master +- name: rsyslog_client + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-rsyslog_client + version: master +- name: rsyslog_server + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-rsyslog_server + version: master +- name: sshd + scm: git + src: https://github.com/willshersystems/ansible-sshd + version: master +- name: bird + scm: git + src: https://github.com/logan2211/ansible-bird + version: master +- name: etcd + scm: git + src: https://github.com/logan2211/ansible-etcd + version: master +- name: unbound + scm: git + src: https://github.com/logan2211/ansible-unbound + version: master +- name: resolvconf + scm: git + src: https://github.com/logan2211/ansible-resolvconf + version: master +- name: os_designate + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-os_designate + version: master +- name: ceph.ceph-common + scm: git + src: https://github.com/ceph/ansible-ceph-common + version: master +- name: ceph.ceph-docker-common + scm: git + src: https://github.com/ceph/ansible-ceph-docker-common + version: master +- name: ceph-mon + scm: git + src: https://github.com/ceph/ansible-ceph-mon + version: master +- name: ceph-osd + scm: git + src: https://github.com/ceph/ansible-ceph-osd + version: master +- name: os_octavia + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-os_octavia + version: master +- name: os_molteniron + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-os_molteniron + version: master diff --git a/prototypes/xci/file/cinder.yml b/prototypes/xci/file/cinder.yml new file mode 100644 index 000000000..e40b39256 --- /dev/null +++ b/prototypes/xci/file/cinder.yml @@ -0,0 +1,13 @@ +--- +# This file contains an example to show how to set +# the cinder-volume service to run in a container. +# +# Important note: +# When using LVM or any iSCSI-based cinder backends, such as NetApp with +# iSCSI protocol, the cinder-volume service *must* run on metal. +# Reference: https://bugs.launchpad.net/ubuntu/+source/lxc/+bug/1226855 + +container_skel: + cinder_volumes_container: + properties: + is_metal: false diff --git a/prototypes/xci/file/exports b/prototypes/xci/file/exports new file mode 100644 index 000000000..af64d618d --- /dev/null +++ b/prototypes/xci/file/exports @@ -0,0 +1,14 @@ +# /etc/exports: the access control list for filesystems which may be exported +# to NFS clients. See exports(5). +# +# Example for NFSv2 and NFSv3: +# /srv/homes hostname1(rw,sync,no_subtree_check) hostname2(ro,sync,no_subtree_check) +# +# Example for NFSv4: +# /srv/nfs4 gss/krb5i(rw,sync,fsid=0,crossmnt,no_subtree_check) +# /srv/nfs4/homes gss/krb5i(rw,sync,no_subtree_check) +# +# glance images are stored on compute host and made available to image hosts via nfs +# see image_hosts section in openstack_user_config.yml for details +/images *(rw,sync,no_subtree_check,no_root_squash) + diff --git a/prototypes/xci/file/ha/configure-targethosts.yml b/prototypes/xci/file/ha/configure-targethosts.yml new file mode 100644 index 000000000..6dc147f3b --- /dev/null +++ b/prototypes/xci/file/ha/configure-targethosts.yml @@ -0,0 +1,36 @@ +--- +- hosts: all + remote_user: root + tasks: + - name: add public key to host + copy: + src: ../file/authorized_keys + dest: /root/.ssh/authorized_keys + - name: configure modules + copy: + src: ../file/modules + dest: /etc/modules + +- hosts: controller + remote_user: root + vars_files: + - ../var/{{ ansible_os_family }}.yml + - ../var/flavor-vars.yml + roles: + # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros + - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/controller.interface.j2", dest: "/etc/network/interfaces" } + +- hosts: compute + remote_user: root + vars_files: + - ../var/{{ ansible_os_family }}.yml + - ../var/flavor-vars.yml + roles: + # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros + - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/compute.interface.j2", dest: "/etc/network/interfaces" } + +- hosts: compute01 + remote_user: root + # TODO: this role is for configuring NFS on xenial and adjustment needed for other distros + roles: + - role: configure-nfs diff --git a/prototypes/xci/file/ha/flavor-vars.yml b/prototypes/xci/file/ha/flavor-vars.yml new file mode 100644 index 000000000..3cd1d6246 --- /dev/null +++ b/prototypes/xci/file/ha/flavor-vars.yml @@ -0,0 +1,37 @@ +--- +host_info: { + 'opnfv': { + 'MGMT_IP': '172.29.236.10', + 'VLAN_IP': '192.168.122.2', + 'STORAGE_IP': '172.29.244.10' + }, + 'controller00': { + 'MGMT_IP': '172.29.236.11', + 'VLAN_IP': '192.168.122.3', + 'STORAGE_IP': '172.29.244.11' + }, + 'controller01': { + 'MGMT_IP': '172.29.236.12', + 'VLAN_IP': '192.168.122.4', + 'STORAGE_IP': '172.29.244.12' + }, + 'controller02': { + 'MGMT_IP': '172.29.236.13', + 'VLAN_IP': '192.168.122.5', + 'STORAGE_IP': '172.29.244.13' + }, + 'compute00': { + 'MGMT_IP': '172.29.236.14', + 'VLAN_IP': '192.168.122.6', + 'STORAGE_IP': '172.29.244.14', + 'VLAN_IP_SECOND': '173.29.241.1', + 'VXLAN_IP': '172.29.240.14' + }, + 'compute01': { + 'MGMT_IP': '172.29.236.15', + 'VLAN_IP': '192.168.122.7', + 'STORAGE_IP': '172.29.244.15', + 'VLAN_IP_SECOND': '173.29.241.2', + 'VXLAN_IP': '172.29.240.15' + } +} diff --git a/prototypes/xci/file/ha/inventory b/prototypes/xci/file/ha/inventory new file mode 100644 index 000000000..94b1d074d --- /dev/null +++ b/prototypes/xci/file/ha/inventory @@ -0,0 +1,11 @@ +[opnfv] +opnfv ansible_ssh_host=192.168.122.2 + +[controller] +controller00 ansible_ssh_host=192.168.122.3 +controller01 ansible_ssh_host=192.168.122.4 +controller02 ansible_ssh_host=192.168.122.5 + +[compute] +compute00 ansible_ssh_host=192.168.122.6 +compute01 ansible_ssh_host=192.168.122.7 diff --git a/prototypes/xci/file/ha/openstack_user_config.yml b/prototypes/xci/file/ha/openstack_user_config.yml new file mode 100644 index 000000000..43e88c0d0 --- /dev/null +++ b/prototypes/xci/file/ha/openstack_user_config.yml @@ -0,0 +1,278 @@ +--- +cidr_networks: + container: 172.29.236.0/22 + tunnel: 172.29.240.0/22 + storage: 172.29.244.0/22 + +used_ips: + - "172.29.236.1,172.29.236.50" + - "172.29.240.1,172.29.240.50" + - "172.29.244.1,172.29.244.50" + - "172.29.248.1,172.29.248.50" + +global_overrides: + internal_lb_vip_address: 172.29.236.222 + external_lb_vip_address: 192.168.122.220 + tunnel_bridge: "br-vxlan" + management_bridge: "br-mgmt" + provider_networks: + - network: + container_bridge: "br-mgmt" + container_type: "veth" + container_interface: "eth1" + ip_from_q: "container" + type: "raw" + group_binds: + - all_containers + - hosts + is_container_address: true + is_ssh_address: true + - network: + container_bridge: "br-vxlan" + container_type: "veth" + container_interface: "eth10" + ip_from_q: "tunnel" + type: "vxlan" + range: "1:1000" + net_name: "vxlan" + group_binds: + - neutron_linuxbridge_agent + - network: + container_bridge: "br-vlan" + container_type: "veth" + container_interface: "eth12" + host_bind_override: "eth12" + type: "flat" + net_name: "flat" + group_binds: + - neutron_linuxbridge_agent + - network: + container_bridge: "br-vlan" + container_type: "veth" + container_interface: "eth11" + type: "vlan" + range: "1:1" + net_name: "vlan" + group_binds: + - neutron_linuxbridge_agent + - network: + container_bridge: "br-storage" + container_type: "veth" + container_interface: "eth2" + ip_from_q: "storage" + type: "raw" + group_binds: + - glance_api + - cinder_api + - cinder_volume + - nova_compute + +# ## +# ## Infrastructure +# ## + +# galera, memcache, rabbitmq, utility +shared-infra_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# repository (apt cache, python packages, etc) +repo-infra_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# load balancer +# Ideally the load balancer should not use the Infrastructure hosts. +# Dedicated hardware is best for improved performance and security. +haproxy_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# rsyslog server +# log_hosts: +# log1: +# ip: 172.29.236.14 + +# ## +# ## OpenStack +# ## + +# keystone +identity_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# cinder api services +storage-infra_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# glance +# The settings here are repeated for each infra host. +# They could instead be applied as global settings in +# user_variables, but are left here to illustrate that +# each container could have different storage targets. +image_hosts: + controller00: + ip: 172.29.236.11 + container_vars: + limit_container_types: glance + glance_nfs_client: + - server: "172.29.244.15" + remote_path: "/images" + local_path: "/var/lib/glance/images" + type: "nfs" + options: "_netdev,auto" + controller01: + ip: 172.29.236.12 + container_vars: + limit_container_types: glance + glance_nfs_client: + - server: "172.29.244.15" + remote_path: "/images" + local_path: "/var/lib/glance/images" + type: "nfs" + options: "_netdev,auto" + controller02: + ip: 172.29.236.13 + container_vars: + limit_container_types: glance + glance_nfs_client: + - server: "172.29.244.15" + remote_path: "/images" + local_path: "/var/lib/glance/images" + type: "nfs" + options: "_netdev,auto" + +# nova api, conductor, etc services +compute-infra_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# heat +orchestration_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# horizon +dashboard_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# neutron server, agents (L3, etc) +network_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# ceilometer (telemetry API) +metering-infra_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# aodh (telemetry alarm service) +metering-alarm_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# gnocchi (telemetry metrics storage) +metrics_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# nova hypervisors +compute_hosts: + compute00: + ip: 172.29.236.14 + compute01: + ip: 172.29.236.15 + +# ceilometer compute agent (telemetry) +metering-compute_hosts: + compute00: + ip: 172.29.236.14 + compute01: + ip: 172.29.236.15 +# cinder volume hosts (NFS-backed) +# The settings here are repeated for each infra host. +# They could instead be applied as global settings in +# user_variables, but are left here to illustrate that +# each container could have different storage targets. +storage_hosts: + controller00: + ip: 172.29.236.11 + container_vars: + cinder_backends: + limit_container_types: cinder_volume + lvm: + volume_group: cinder-volumes + volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver + volume_backend_name: LVM_iSCSI + iscsi_ip_address: "172.29.244.11" + controller01: + ip: 172.29.236.12 + container_vars: + cinder_backends: + limit_container_types: cinder_volume + lvm: + volume_group: cinder-volumes + volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver + volume_backend_name: LVM_iSCSI + iscsi_ip_address: "172.29.244.12" + controller02: + ip: 172.29.236.13 + container_vars: + cinder_backends: + limit_container_types: cinder_volume + lvm: + volume_group: cinder-volumes + volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver + volume_backend_name: LVM_iSCSI + iscsi_ip_address: "172.29.244.13" diff --git a/prototypes/xci/file/ha/user_variables.yml b/prototypes/xci/file/ha/user_variables.yml new file mode 100644 index 000000000..094cc8cd6 --- /dev/null +++ b/prototypes/xci/file/ha/user_variables.yml @@ -0,0 +1,28 @@ +--- +# Copyright 2014, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ## +# ## This file contains commonly used overrides for convenience. Please inspect +# ## the defaults for each role to find additional override options. +# ## + +# # Debug and Verbose options. +debug: false + +haproxy_keepalived_external_vip_cidr: "192.168.122.220/32" +haproxy_keepalived_internal_vip_cidr: "172.29.236.222/32" +haproxy_keepalived_external_interface: br-vlan +haproxy_keepalived_internal_interface: br-mgmt +gnocchi_db_sync_options: "" diff --git a/prototypes/xci/file/mini/configure-targethosts.yml b/prototypes/xci/file/mini/configure-targethosts.yml new file mode 100644 index 000000000..395f44a64 --- /dev/null +++ b/prototypes/xci/file/mini/configure-targethosts.yml @@ -0,0 +1,32 @@ +--- +- hosts: all + remote_user: root + tasks: + - name: add public key to host + copy: + src: ../file/authorized_keys + dest: /root/.ssh/authorized_keys + - name: configure modules + copy: + src: ../file/modules + dest: /etc/modules + +- hosts: controller + remote_user: root + vars_files: + - ../var/{{ ansible_os_family }}.yml + - ../var/flavor-vars.yml + roles: + # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros + - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/controller.interface.j2", dest: "/etc/network/interfaces" } + +- hosts: compute + remote_user: root + vars_files: + - ../var/{{ ansible_os_family }}.yml + - ../var/flavor-vars.yml + roles: + # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros + - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/compute.interface.j2", dest: "/etc/network/interfaces" } + # TODO: this role is for configuring NFS on xenial and adjustment needed for other distros + - role: configure-nfs diff --git a/prototypes/xci/file/mini/flavor-vars.yml b/prototypes/xci/file/mini/flavor-vars.yml new file mode 100644 index 000000000..01fba7129 --- /dev/null +++ b/prototypes/xci/file/mini/flavor-vars.yml @@ -0,0 +1,20 @@ +--- +host_info: { + 'opnfv': { + 'MGMT_IP': '172.29.236.10', + 'VLAN_IP': '192.168.122.2', + 'STORAGE_IP': '172.29.244.10' + }, + 'controller00': { + 'MGMT_IP': '172.29.236.11', + 'VLAN_IP': '192.168.122.3', + 'STORAGE_IP': '172.29.244.11' + }, + 'compute00': { + 'MGMT_IP': '172.29.236.12', + 'VLAN_IP': '192.168.122.4', + 'VLAN_IP_SECOND': '173.29.241.1', + 'VXLAN_IP': '172.29.240.12', + 'STORAGE_IP': '172.29.244.12' + }, +} diff --git a/prototypes/xci/file/mini/inventory b/prototypes/xci/file/mini/inventory new file mode 100644 index 000000000..eb73e5e34 --- /dev/null +++ b/prototypes/xci/file/mini/inventory @@ -0,0 +1,8 @@ +[opnfv] +opnfv ansible_ssh_host=192.168.122.2 + +[controller] +controller00 ansible_ssh_host=192.168.122.3 + +[compute] +compute00 ansible_ssh_host=192.168.122.4 diff --git a/prototypes/xci/file/mini/openstack_user_config.yml b/prototypes/xci/file/mini/openstack_user_config.yml new file mode 100644 index 000000000..c41f4329d --- /dev/null +++ b/prototypes/xci/file/mini/openstack_user_config.yml @@ -0,0 +1,186 @@ +--- +cidr_networks: + container: 172.29.236.0/22 + tunnel: 172.29.240.0/22 + storage: 172.29.244.0/22 + +used_ips: + - "172.29.236.1,172.29.236.50" + - "172.29.240.1,172.29.240.50" + - "172.29.244.1,172.29.244.50" + - "172.29.248.1,172.29.248.50" + +global_overrides: + internal_lb_vip_address: 172.29.236.11 + external_lb_vip_address: 192.168.122.3 + tunnel_bridge: "br-vxlan" + management_bridge: "br-mgmt" + provider_networks: + - network: + container_bridge: "br-mgmt" + container_type: "veth" + container_interface: "eth1" + ip_from_q: "container" + type: "raw" + group_binds: + - all_containers + - hosts + is_container_address: true + is_ssh_address: true + - network: + container_bridge: "br-vxlan" + container_type: "veth" + container_interface: "eth10" + ip_from_q: "tunnel" + type: "vxlan" + range: "1:1000" + net_name: "vxlan" + group_binds: + - neutron_linuxbridge_agent + - network: + container_bridge: "br-vlan" + container_type: "veth" + container_interface: "eth12" + host_bind_override: "eth12" + type: "flat" + net_name: "flat" + group_binds: + - neutron_linuxbridge_agent + - network: + container_bridge: "br-vlan" + container_type: "veth" + container_interface: "eth11" + type: "vlan" + range: "1:1" + net_name: "vlan" + group_binds: + - neutron_linuxbridge_agent + - network: + container_bridge: "br-storage" + container_type: "veth" + container_interface: "eth2" + ip_from_q: "storage" + type: "raw" + group_binds: + - glance_api + - cinder_api + - cinder_volume + - nova_compute + +# ## +# ## Infrastructure +# ## + +# galera, memcache, rabbitmq, utility +shared-infra_hosts: + controller00: + ip: 172.29.236.11 + +# repository (apt cache, python packages, etc) +repo-infra_hosts: + controller00: + ip: 172.29.236.11 + +# load balancer +# Ideally the load balancer should not use the Infrastructure hosts. +# Dedicated hardware is best for improved performance and security. +haproxy_hosts: + controller00: + ip: 172.29.236.11 + +# rsyslog server +# log_hosts: +# log1: +# ip: 172.29.236.14 + +# ## +# ## OpenStack +# ## + +# keystone +identity_hosts: + controller00: + ip: 172.29.236.11 + +# cinder api services +storage-infra_hosts: + controller00: + ip: 172.29.236.11 + +# glance +# The settings here are repeated for each infra host. +# They could instead be applied as global settings in +# user_variables, but are left here to illustrate that +# each container could have different storage targets. +image_hosts: + controller00: + ip: 172.29.236.11 + container_vars: + limit_container_types: glance + glance_nfs_client: + - server: "172.29.244.12" + remote_path: "/images" + local_path: "/var/lib/glance/images" + type: "nfs" + options: "_netdev,auto" + +# nova api, conductor, etc services +compute-infra_hosts: + controller00: + ip: 172.29.236.11 + +# heat +orchestration_hosts: + controller00: + ip: 172.29.236.11 + +# horizon +dashboard_hosts: + controller00: + ip: 172.29.236.11 + +# neutron server, agents (L3, etc) +network_hosts: + controller00: + ip: 172.29.236.11 + +# ceilometer (telemetry API) +metering-infra_hosts: + controller00: + ip: 172.29.236.11 + +# aodh (telemetry alarm service) +metering-alarm_hosts: + controller00: + ip: 172.29.236.11 + +# gnocchi (telemetry metrics storage) +metrics_hosts: + controller00: + ip: 172.29.236.11 + +# nova hypervisors +compute_hosts: + compute00: + ip: 172.29.236.12 + +# ceilometer compute agent (telemetry) +metering-compute_hosts: + compute00: + ip: 172.29.236.12 +# cinder volume hosts (NFS-backed) +# The settings here are repeated for each infra host. +# They could instead be applied as global settings in +# user_variables, but are left here to illustrate that +# each container could have different storage targets. +storage_hosts: + controller00: + ip: 172.29.236.11 + container_vars: + cinder_backends: + limit_container_types: cinder_volume + lvm: + volume_group: cinder-volumes + volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver + volume_backend_name: LVM_iSCSI + iscsi_ip_address: "172.29.244.11" diff --git a/prototypes/xci/file/mini/user_variables.yml b/prototypes/xci/file/mini/user_variables.yml new file mode 100644 index 000000000..7a0b8064d --- /dev/null +++ b/prototypes/xci/file/mini/user_variables.yml @@ -0,0 +1,28 @@ +--- +# Copyright 2014, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ## +# ## This file contains commonly used overrides for convenience. Please inspect +# ## the defaults for each role to find additional override options. +# ## + +# # Debug and Verbose options. +debug: false + +haproxy_keepalived_external_vip_cidr: "192.168.122.3/32" +haproxy_keepalived_internal_vip_cidr: "172.29.236.11/32" +haproxy_keepalived_external_interface: br-vlan +haproxy_keepalived_internal_interface: br-mgmt +gnocchi_db_sync_options: "" diff --git a/prototypes/xci/file/modules b/prototypes/xci/file/modules new file mode 100644 index 000000000..60a517f18 --- /dev/null +++ b/prototypes/xci/file/modules @@ -0,0 +1,8 @@ +# /etc/modules: kernel modules to load at boot time. +# +# This file contains the names of kernel modules that should be loaded +# at boot time, one per line. Lines beginning with "#" are ignored. +# Parameters can be specified after the module name. + +bonding +8021q diff --git a/prototypes/xci/file/noha/configure-targethosts.yml b/prototypes/xci/file/noha/configure-targethosts.yml new file mode 100644 index 000000000..6dc147f3b --- /dev/null +++ b/prototypes/xci/file/noha/configure-targethosts.yml @@ -0,0 +1,36 @@ +--- +- hosts: all + remote_user: root + tasks: + - name: add public key to host + copy: + src: ../file/authorized_keys + dest: /root/.ssh/authorized_keys + - name: configure modules + copy: + src: ../file/modules + dest: /etc/modules + +- hosts: controller + remote_user: root + vars_files: + - ../var/{{ ansible_os_family }}.yml + - ../var/flavor-vars.yml + roles: + # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros + - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/controller.interface.j2", dest: "/etc/network/interfaces" } + +- hosts: compute + remote_user: root + vars_files: + - ../var/{{ ansible_os_family }}.yml + - ../var/flavor-vars.yml + roles: + # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros + - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/compute.interface.j2", dest: "/etc/network/interfaces" } + +- hosts: compute01 + remote_user: root + # TODO: this role is for configuring NFS on xenial and adjustment needed for other distros + roles: + - role: configure-nfs diff --git a/prototypes/xci/file/noha/flavor-vars.yml b/prototypes/xci/file/noha/flavor-vars.yml new file mode 100644 index 000000000..7f52d343a --- /dev/null +++ b/prototypes/xci/file/noha/flavor-vars.yml @@ -0,0 +1,27 @@ +--- +host_info: { + 'opnfv': { + 'MGMT_IP': '172.29.236.10', + 'VLAN_IP': '192.168.122.2', + 'STORAGE_IP': '172.29.244.10' + }, + 'controller00': { + 'MGMT_IP': '172.29.236.11', + 'VLAN_IP': '192.168.122.3', + 'STORAGE_IP': '172.29.244.11' + }, + 'compute00': { + 'MGMT_IP': '172.29.236.12', + 'VLAN_IP': '192.168.122.4', + 'VLAN_IP_SECOND': '173.29.241.1', + 'VXLAN_IP': '172.29.240.12', + 'STORAGE_IP': '172.29.244.12' + }, + 'compute01': { + 'MGMT_IP': '172.29.236.13', + 'VLAN_IP': '192.168.122.5', + 'VLAN_IP_SECOND': '173.29.241.2', + 'VXLAN_IP': '172.29.240.13', + 'STORAGE_IP': '172.29.244.13' + } +} diff --git a/prototypes/xci/file/noha/inventory b/prototypes/xci/file/noha/inventory new file mode 100644 index 000000000..b4f9f6d0c --- /dev/null +++ b/prototypes/xci/file/noha/inventory @@ -0,0 +1,9 @@ +[opnfv] +opnfv ansible_ssh_host=192.168.122.2 + +[controller] +controller00 ansible_ssh_host=192.168.122.3 + +[compute] +compute00 ansible_ssh_host=192.168.122.4 +compute01 ansible_ssh_host=192.168.122.5 diff --git a/prototypes/xci/file/noha/openstack_user_config.yml b/prototypes/xci/file/noha/openstack_user_config.yml new file mode 100644 index 000000000..999741580 --- /dev/null +++ b/prototypes/xci/file/noha/openstack_user_config.yml @@ -0,0 +1,190 @@ +--- +cidr_networks: + container: 172.29.236.0/22 + tunnel: 172.29.240.0/22 + storage: 172.29.244.0/22 + +used_ips: + - "172.29.236.1,172.29.236.50" + - "172.29.240.1,172.29.240.50" + - "172.29.244.1,172.29.244.50" + - "172.29.248.1,172.29.248.50" + +global_overrides: + internal_lb_vip_address: 172.29.236.11 + external_lb_vip_address: 192.168.122.3 + tunnel_bridge: "br-vxlan" + management_bridge: "br-mgmt" + provider_networks: + - network: + container_bridge: "br-mgmt" + container_type: "veth" + container_interface: "eth1" + ip_from_q: "container" + type: "raw" + group_binds: + - all_containers + - hosts + is_container_address: true + is_ssh_address: true + - network: + container_bridge: "br-vxlan" + container_type: "veth" + container_interface: "eth10" + ip_from_q: "tunnel" + type: "vxlan" + range: "1:1000" + net_name: "vxlan" + group_binds: + - neutron_linuxbridge_agent + - network: + container_bridge: "br-vlan" + container_type: "veth" + container_interface: "eth12" + host_bind_override: "eth12" + type: "flat" + net_name: "flat" + group_binds: + - neutron_linuxbridge_agent + - network: + container_bridge: "br-vlan" + container_type: "veth" + container_interface: "eth11" + type: "vlan" + range: "1:1" + net_name: "vlan" + group_binds: + - neutron_linuxbridge_agent + - network: + container_bridge: "br-storage" + container_type: "veth" + container_interface: "eth2" + ip_from_q: "storage" + type: "raw" + group_binds: + - glance_api + - cinder_api + - cinder_volume + - nova_compute + +# ## +# ## Infrastructure +# ## + +# galera, memcache, rabbitmq, utility +shared-infra_hosts: + controller00: + ip: 172.29.236.11 + +# repository (apt cache, python packages, etc) +repo-infra_hosts: + controller00: + ip: 172.29.236.11 + +# load balancer +# Ideally the load balancer should not use the Infrastructure hosts. +# Dedicated hardware is best for improved performance and security. +haproxy_hosts: + controller00: + ip: 172.29.236.11 + +# rsyslog server +# log_hosts: +# log1: +# ip: 172.29.236.14 + +# ## +# ## OpenStack +# ## + +# keystone +identity_hosts: + controller00: + ip: 172.29.236.11 + +# cinder api services +storage-infra_hosts: + controller00: + ip: 172.29.236.11 + +# glance +# The settings here are repeated for each infra host. +# They could instead be applied as global settings in +# user_variables, but are left here to illustrate that +# each container could have different storage targets. +image_hosts: + controller00: + ip: 172.29.236.11 + container_vars: + limit_container_types: glance + glance_nfs_client: + - server: "172.29.244.13" + remote_path: "/images" + local_path: "/var/lib/glance/images" + type: "nfs" + options: "_netdev,auto" + +# nova api, conductor, etc services +compute-infra_hosts: + controller00: + ip: 172.29.236.11 + +# heat +orchestration_hosts: + controller00: + ip: 172.29.236.11 + +# horizon +dashboard_hosts: + controller00: + ip: 172.29.236.11 + +# neutron server, agents (L3, etc) +network_hosts: + controller00: + ip: 172.29.236.11 + +# ceilometer (telemetry API) +metering-infra_hosts: + controller00: + ip: 172.29.236.11 + +# aodh (telemetry alarm service) +metering-alarm_hosts: + controller00: + ip: 172.29.236.11 + +# gnocchi (telemetry metrics storage) +metrics_hosts: + controller00: + ip: 172.29.236.11 + +# nova hypervisors +compute_hosts: + compute00: + ip: 172.29.236.12 + compute01: + ip: 172.29.236.13 + +# ceilometer compute agent (telemetry) +metering-compute_hosts: + compute00: + ip: 172.29.236.12 + compute01: + ip: 172.29.236.13 +# cinder volume hosts (NFS-backed) +# The settings here are repeated for each infra host. +# They could instead be applied as global settings in +# user_variables, but are left here to illustrate that +# each container could have different storage targets. +storage_hosts: + controller00: + ip: 172.29.236.11 + container_vars: + cinder_backends: + limit_container_types: cinder_volume + lvm: + volume_group: cinder-volumes + volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver + volume_backend_name: LVM_iSCSI + iscsi_ip_address: "172.29.244.11" diff --git a/prototypes/xci/file/noha/user_variables.yml b/prototypes/xci/file/noha/user_variables.yml new file mode 100644 index 000000000..7a0b8064d --- /dev/null +++ b/prototypes/xci/file/noha/user_variables.yml @@ -0,0 +1,28 @@ +--- +# Copyright 2014, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ## +# ## This file contains commonly used overrides for convenience. Please inspect +# ## the defaults for each role to find additional override options. +# ## + +# # Debug and Verbose options. +debug: false + +haproxy_keepalived_external_vip_cidr: "192.168.122.3/32" +haproxy_keepalived_internal_vip_cidr: "172.29.236.11/32" +haproxy_keepalived_external_interface: br-vlan +haproxy_keepalived_internal_interface: br-mgmt +gnocchi_db_sync_options: "" diff --git a/prototypes/xci/file/setup-openstack.yml b/prototypes/xci/file/setup-openstack.yml new file mode 100644 index 000000000..bd5d5cd93 --- /dev/null +++ b/prototypes/xci/file/setup-openstack.yml @@ -0,0 +1,36 @@ +--- +# Copyright 2014, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- include: os-keystone-install.yml +- include: os-glance-install.yml +- include: os-cinder-install.yml +- include: os-nova-install.yml +- include: os-neutron-install.yml +- include: os-heat-install.yml +- include: os-horizon-install.yml +- include: os-ceilometer-install.yml +- include: os-aodh-install.yml +- include: os-designate-install.yml +#NOTE(stevelle) Ensure Gnocchi identities exist before Swift +- include: os-gnocchi-install.yml + when: + - gnocchi_storage_driver is defined + - gnocchi_storage_driver == 'swift' + vars: + gnocchi_identity_only: True +- include: os-swift-install.yml +- include: os-gnocchi-install.yml +- include: os-ironic-install.yml +- include: os-tempest-install.yml diff --git a/prototypes/xci/playbooks/configure-localhost.yml b/prototypes/xci/playbooks/configure-localhost.yml new file mode 100644 index 000000000..2a559645e --- /dev/null +++ b/prototypes/xci/playbooks/configure-localhost.yml @@ -0,0 +1,43 @@ +--- +# SPDX-license-identifier: Apache-2.0 +############################################################################## +# Copyright (c) 2017 Ericsson AB and others. +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +- hosts: localhost + remote_user: root + vars_files: + - ../var/{{ ansible_os_family }}.yml + - ../var/opnfv.yml + roles: + - role: remove-folders + - { role: clone-repository, project: "opnfv/releng", repo: "{{ OPNFV_RELENG_GIT_URL }}", dest: "{{ OPNFV_RELENG_PATH }}", version: "{{ OPNFV_RELENG_VERSION }}" } + tasks: + - name: create log directory {{LOG_PATH}} + file: + path: "{{LOG_PATH}}" + state: directory + recurse: no + # when the deployment is not aio, we use playbook, configure-targethosts.yml, to configure all the hosts + - name: copy multihost playbook + copy: + src: "{{XCI_FLAVOR_ANSIBLE_FILE_PATH}}/configure-targethosts.yml" + dest: "{{OPNFV_RELENG_PATH}}/prototypes/xci/playbooks" + when: XCI_FLAVOR != "aio" + # when the deployment is aio, we overwrite and use playbook, configure-opnfvhost.yml, since everything gets installed on opnfv host + - name: copy aio playbook + copy: + src: "{{XCI_FLAVOR_ANSIBLE_FILE_PATH}}/configure-opnfvhost.yml" + dest: "{{OPNFV_RELENG_PATH}}/prototypes/xci/playbooks" + when: XCI_FLAVOR == "aio" + - name: copy flavor inventory + copy: + src: "{{XCI_FLAVOR_ANSIBLE_FILE_PATH}}/inventory" + dest: "{{OPNFV_RELENG_PATH}}/prototypes/xci/playbooks" + - name: copy flavor vars + copy: + src: "{{XCI_FLAVOR_ANSIBLE_FILE_PATH}}/flavor-vars.yml" + dest: "{{OPNFV_RELENG_PATH}}/prototypes/xci/var" diff --git a/prototypes/xci/playbooks/configure-opnfvhost.yml b/prototypes/xci/playbooks/configure-opnfvhost.yml new file mode 100644 index 000000000..06e27e7fc --- /dev/null +++ b/prototypes/xci/playbooks/configure-opnfvhost.yml @@ -0,0 +1,65 @@ +--- +# SPDX-license-identifier: Apache-2.0 +############################################################################## +# Copyright (c) 2017 Ericsson AB and others. +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +- hosts: opnfv + remote_user: root + vars_files: + - ../var/{{ ansible_os_family }}.yml + - ../var/flavor-vars.yml + - ../var/opnfv.yml + roles: + - role: remove-folders + - { role: clone-repository, project: "opnfv/releng", repo: "{{ OPNFV_RELENG_GIT_URL }}", dest: "{{ OPNFV_RELENG_PATH }}", version: "{{ OPNFV_RELENG_VERSION }}" } + - { role: clone-repository, project: "openstack/openstack-ansible", repo: "{{ OPENSTACK_OSA_GIT_URL }}", dest: "{{ OPENSTACK_OSA_PATH }}", version: "{{ OPENSTACK_OSA_VERSION }}" } + # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros + - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/opnfv.interface.j2", dest: "/etc/network/interfaces" } + tasks: + - name: generate SSH keys + shell: ssh-keygen -b 2048 -t rsa -f /root/.ssh/id_rsa -q -N "" + args: + creates: /root/.ssh/id_rsa + - name: fetch public key + fetch: src="/root/.ssh/id_rsa.pub" dest="/" + - name: copy flavor inventory + shell: "/bin/cp -rf {{XCI_FLAVOR_ANSIBLE_FILE_PATH}}/inventory {{OPNFV_RELENG_PATH}}/prototypes/xci/playbooks" + - name: copy flavor vars + shell: "/bin/cp -rf {{XCI_FLAVOR_ANSIBLE_FILE_PATH}}/flavor-vars.yml {{OPNFV_RELENG_PATH}}/prototypes/xci/var" + - name: copy openstack_deploy + shell: "/bin/cp -rf {{OPENSTACK_OSA_PATH}}/etc/openstack_deploy {{OPENSTACK_OSA_ETC_PATH}}" + - name: copy openstack_user_config.yml + shell: "/bin/cp -rf {{XCI_FLAVOR_ANSIBLE_FILE_PATH}}/openstack_user_config.yml {{OPENSTACK_OSA_ETC_PATH}}" + - name: copy user_variables.yml + shell: "/bin/cp -rf {{XCI_FLAVOR_ANSIBLE_FILE_PATH}}/user_variables.yml {{OPENSTACK_OSA_ETC_PATH}}" + - name: copy cinder.yml + shell: "/bin/cp -rf {{OPNFV_RELENG_PATH}}/prototypes/xci/file/cinder.yml {{OPENSTACK_OSA_ETC_PATH}}/env.d" + - name: bootstrap ansible on opnfv host + command: "/bin/bash ./scripts/bootstrap-ansible.sh" + args: + chdir: "{{OPENSTACK_OSA_PATH}}" + - name: generate password token + command: "python pw-token-gen.py --file {{OPENSTACK_OSA_ETC_PATH}}/user_secrets.yml" + args: + chdir: "{{OPENSTACK_OSA_PATH}}/scripts" + # TODO: We need to get rid of this as soon as the issue is fixed upstream + - name: change the haproxy state from disable to enable + replace: + dest: "{{OPENSTACK_OSA_PATH}}/playbooks/os-keystone-install.yml" + regexp: '(\s+)haproxy_state: disabled' + replace: '\1haproxy_state: enabled' + - name: copy OPNFV OpenStack playbook + shell: "/bin/cp -rf {{OPNFV_RELENG_PATH}}/prototypes/xci/file/setup-openstack.yml {{OPENSTACK_OSA_PATH}}/playbooks" + - name: copy OPNFV role requirements + shell: "/bin/cp -rf {{OPNFV_RELENG_PATH}}/prototypes/xci/file/ansible-role-requirements.yml {{OPENSTACK_OSA_PATH}}" +- hosts: localhost + remote_user: root + tasks: + - name: Generate authorized_keys + shell: "/bin/cat /opnfv/root/.ssh/id_rsa.pub >> ../file/authorized_keys" + - name: Append public keys to authorized_keys + shell: "/bin/cat /root/.ssh/id_rsa.pub >> ../file/authorized_keys" diff --git a/prototypes/xci/playbooks/inventory b/prototypes/xci/playbooks/inventory new file mode 100644 index 000000000..fd9af9016 --- /dev/null +++ b/prototypes/xci/playbooks/inventory @@ -0,0 +1,10 @@ +# SPDX-license-identifier: Apache-2.0 +############################################################################## +# Copyright (c) 2017 Ericsson AB and others. +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +[opnfv] +opnfv ansible_ssh_host=192.168.122.2 diff --git a/prototypes/xci/playbooks/provision-vm-nodes.yml b/prototypes/xci/playbooks/provision-vm-nodes.yml new file mode 100644 index 000000000..9a32d0bfc --- /dev/null +++ b/prototypes/xci/playbooks/provision-vm-nodes.yml @@ -0,0 +1,32 @@ +--- +# SPDX-license-identifier: Apache-2.0 +############################################################################## +# Copyright (c) 2017 Ericsson AB and others. +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +- hosts: localhost + remote_user: root + vars_files: + - ../var/{{ ansible_os_family }}.yml + - ../var/opnfv.yml + roles: + # using these roles here ensures that we can reuse this playbook in different context + - role: remove-folders + - { role: clone-repository, project: "opnfv/releng", repo: "{{ OPNFV_RELENG_GIT_URL }}", dest: "{{ OPNFV_RELENG_PATH }}", version: "{{ OPNFV_RELENG_VERSION }}" } + - { role: clone-repository, project: "opnfv/bifrost", repo: "{{ OPENSTACK_BIFROST_GIT_URL }}", dest: "{{ OPENSTACK_BIFROST_PATH }}", version: "{{ OPENSTACK_BIFROST_VERSION }}" } + tasks: + - name: combine opnfv/releng and openstack/bifrost scripts/playbooks + copy: + src: "{{ OPNFV_RELENG_PATH }}/prototypes/bifrost/" + dest: "{{ OPENSTACK_BIFROST_PATH }}" + - name: destroy VM nodes created by previous deployment + command: "/bin/bash ./scripts/destroy-env.sh" + args: + chdir: "{{ OPENSTACK_BIFROST_PATH }}" + - name: create and provision VM nodes for the flavor {{ XCI_FLAVOR }} + command: "/bin/bash ./scripts/bifrost-provision.sh" + args: + chdir: "{{ OPENSTACK_BIFROST_PATH }}" diff --git a/prototypes/xci/playbooks/roles/clone-repository/tasks/main.yml b/prototypes/xci/playbooks/roles/clone-repository/tasks/main.yml new file mode 100644 index 000000000..3f7e09103 --- /dev/null +++ b/prototypes/xci/playbooks/roles/clone-repository/tasks/main.yml @@ -0,0 +1,14 @@ +--- +# SPDX-license-identifier: Apache-2.0 +############################################################################## +# Copyright (c) 2017 Ericsson AB and others. +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +- name: clone "{{ project }}" and checkout "{{ version }}" + git: + repo: "{{ repo }}" + dest: "{{ dest }}" + version: "{{ version }}" diff --git a/prototypes/xci/playbooks/roles/configure-network/tasks/main.yml b/prototypes/xci/playbooks/roles/configure-network/tasks/main.yml new file mode 100644 index 000000000..8bc84822c --- /dev/null +++ b/prototypes/xci/playbooks/roles/configure-network/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# SPDX-license-identifier: Apache-2.0 +############################################################################## +# Copyright (c) 2017 Ericsson AB and others. +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +# TODO: this role needs to be adjusted for different distros +- name: configure network for {{ ansible_os_family }} on interface {{ interface }} + template: + src: "{{ src }}" + dest: "{{ dest }}" +- name: restart ubuntu xenial network service + shell: "/sbin/ifconfig {{ interface }} 0 &&/sbin/ifdown -a && /sbin/ifup -a" diff --git a/prototypes/xci/playbooks/roles/configure-nfs/tasks/main.yml b/prototypes/xci/playbooks/roles/configure-nfs/tasks/main.yml new file mode 100644 index 000000000..b188f4dbb --- /dev/null +++ b/prototypes/xci/playbooks/roles/configure-nfs/tasks/main.yml @@ -0,0 +1,36 @@ +--- +# SPDX-license-identifier: Apache-2.0 +############################################################################## +# Copyright (c) 2017 Ericsson AB and others. +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +# TODO: this is for xenial and needs to be adjusted for different distros +- block: + - name: make NFS dir + file: + dest: /images + mode: 777 + state: directory + - name: configure NFS service + lineinfile: + dest: /etc/services + state: present + create: yes + line: "{{ item }}" + with_items: + - "nfs 2049/tcp" + - "nfs 2049/udp" + - name: configure NFS exports on ubuntu xenial + copy: + src: ../file/exports + dest: /etc/exports + when: ansible_distribution_release == "xenial" + # TODO: the service name might be different on other distros and needs to be adjusted + - name: restart ubuntu xenial NFS service + service: + name: nfs-kernel-server + state: restarted + when: ansible_distribution_release == "xenial" diff --git a/prototypes/xci/playbooks/roles/remove-folders/tasks/main.yml b/prototypes/xci/playbooks/roles/remove-folders/tasks/main.yml new file mode 100644 index 000000000..ac8c0f7dc --- /dev/null +++ b/prototypes/xci/playbooks/roles/remove-folders/tasks/main.yml @@ -0,0 +1,20 @@ +--- +# SPDX-license-identifier: Apache-2.0 +############################################################################## +# Copyright (c) 2017 Ericsson AB and others. +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +- name: cleanup leftovers of previous deployment + file: + path: "{{ item }}" + state: absent + recurse: no + with_items: + - "{{ OPNFV_RELENG_PATH }}" + - "{{ OPENSTACK_BIFROST_PATH }}" + - "{{ OPENSTACK_OSA_PATH }}" + - "{{ OPENSTACK_OSA_ETC_PATH }}" + - "{{ LOG_PATH }} " diff --git a/prototypes/xci/template/compute.interface.j2 b/prototypes/xci/template/compute.interface.j2 new file mode 100644 index 000000000..0c5147c45 --- /dev/null +++ b/prototypes/xci/template/compute.interface.j2 @@ -0,0 +1,86 @@ +# This file describes the network interfaces available on your system +# and how to activate them. For more information, see interfaces(5). + +# The loopback network interface +auto lo +iface lo inet loopback + + +# Physical interface +auto {{ interface }} +iface {{ interface }} inet manual + +# Container/Host management VLAN interface +auto {{ interface }}.10 +iface {{ interface }}.10 inet manual + vlan-raw-device {{ interface }} + +# OpenStack Networking VXLAN (tunnel/overlay) VLAN interface +auto {{ interface }}.30 +iface {{ interface }}.30 inet manual + vlan-raw-device {{ interface }} + +# Storage network VLAN interface (optional) +auto {{ interface }}.20 +iface {{ interface }}.20 inet manual + vlan-raw-device {{ interface }} + +# Container/Host management bridge +auto br-mgmt +iface br-mgmt inet static + bridge_stp off + bridge_waitport 0 + bridge_fd 0 + bridge_ports {{ interface }}.10 + address {{host_info[inventory_hostname].MGMT_IP}} + netmask 255.255.252.0 + +# compute1 VXLAN (tunnel/overlay) bridge config +auto br-vxlan +iface br-vxlan inet static + bridge_stp off + bridge_waitport 0 + bridge_fd 0 + bridge_ports {{ interface }}.30 + address {{host_info[inventory_hostname].VXLAN_IP}} + netmask 255.255.252.0 + +# OpenStack Networking VLAN bridge +auto br-vlan +iface br-vlan inet static + bridge_stp off + bridge_waitport 0 + bridge_fd 0 + bridge_ports {{ interface }} + address {{host_info[inventory_hostname].VLAN_IP}} + netmask 255.255.255.0 + gateway 192.168.122.1 + offload-sg off + # Create veth pair, don't bomb if already exists + pre-up ip link add br-vlan-veth type veth peer name eth12 || true + # Set both ends UP + pre-up ip link set br-vlan-veth up + pre-up ip link set eth12 up + # Delete veth pair on DOWN + post-down ip link del br-vlan-veth || true + bridge_ports br-vlan-veth + +# Add an additional address to br-vlan +iface br-vlan inet static + # Flat network default gateway + # -- This needs to exist somewhere for network reachability + # -- from the router namespace for floating IP paths. + # -- Putting this here is primarily for tempest to work. + address {{host_info[inventory_hostname].VLAN_IP_SECOND}} + netmask 255.255.252.0 + dns-nameserver 8.8.8.8 8.8.4.4 + +# compute1 Storage bridge +auto br-storage +iface br-storage inet static + bridge_stp off + bridge_waitport 0 + bridge_fd 0 + bridge_ports {{ interface }}.20 + address {{host_info[inventory_hostname].STORAGE_IP}} + netmask 255.255.252.0 diff --git a/prototypes/xci/template/controller.interface.j2 b/prototypes/xci/template/controller.interface.j2 new file mode 100644 index 000000000..fbaa8b8dd --- /dev/null +++ b/prototypes/xci/template/controller.interface.j2 @@ -0,0 +1,71 @@ +# This file describes the network interfaces available on your system +# and how to activate them. For more information, see interfaces(5). + +# The loopback network interface +auto lo +iface lo inet loopback + +# Physical interface +auto {{ interface }} +iface {{ interface }} inet manual + +# Container/Host management VLAN interface +auto {{ interface }}.10 +iface {{ interface }}.10 inet manual + vlan-raw-device {{ interface }} + +# OpenStack Networking VXLAN (tunnel/overlay) VLAN interface +auto {{ interface }}.30 +iface {{ interface }}.30 inet manual + vlan-raw-device {{ interface }} + +# Storage network VLAN interface (optional) +auto {{ interface }}.20 +iface {{ interface }}.20 inet manual + vlan-raw-device {{ interface }} + +# Container/Host management bridge +auto br-mgmt +iface br-mgmt inet static + bridge_stp off + bridge_waitport 0 + bridge_fd 0 + bridge_ports {{ interface }}.10 + address {{host_info[inventory_hostname].MGMT_IP}} + netmask 255.255.252.0 + +# OpenStack Networking VXLAN (tunnel/overlay) bridge +# +# Only the COMPUTE and NETWORK nodes must have an IP address +# on this bridge. When used by infrastructure nodes, the +# IP addresses are assigned to containers which use this +# bridge. +# +auto br-vxlan +iface br-vxlan inet manual + bridge_stp off + bridge_waitport 0 + bridge_fd 0 + bridge_ports {{ interface }}.30 + +# OpenStack Networking VLAN bridge +auto br-vlan +iface br-vlan inet static + bridge_stp off + bridge_waitport 0 + bridge_fd 0 + bridge_ports {{ interface }} + address {{host_info[inventory_hostname].VLAN_IP}} + netmask 255.255.255.0 + gateway 192.168.122.1 + dns-nameserver 8.8.8.8 8.8.4.4 + +# compute1 Storage bridge +auto br-storage +iface br-storage inet static + bridge_stp off + bridge_waitport 0 + bridge_fd 0 + bridge_ports {{ interface }}.20 + address {{host_info[inventory_hostname].STORAGE_IP}} + netmask 255.255.252.0 diff --git a/prototypes/xci/template/opnfv.interface.j2 b/prototypes/xci/template/opnfv.interface.j2 new file mode 100644 index 000000000..fbaa8b8dd --- /dev/null +++ b/prototypes/xci/template/opnfv.interface.j2 @@ -0,0 +1,71 @@ +# This file describes the network interfaces available on your system +# and how to activate them. For more information, see interfaces(5). + +# The loopback network interface +auto lo +iface lo inet loopback + +# Physical interface +auto {{ interface }} +iface {{ interface }} inet manual + +# Container/Host management VLAN interface +auto {{ interface }}.10 +iface {{ interface }}.10 inet manual + vlan-raw-device {{ interface }} + +# OpenStack Networking VXLAN (tunnel/overlay) VLAN interface +auto {{ interface }}.30 +iface {{ interface }}.30 inet manual + vlan-raw-device {{ interface }} + +# Storage network VLAN interface (optional) +auto {{ interface }}.20 +iface {{ interface }}.20 inet manual + vlan-raw-device {{ interface }} + +# Container/Host management bridge +auto br-mgmt +iface br-mgmt inet static + bridge_stp off + bridge_waitport 0 + bridge_fd 0 + bridge_ports {{ interface }}.10 + address {{host_info[inventory_hostname].MGMT_IP}} + netmask 255.255.252.0 + +# OpenStack Networking VXLAN (tunnel/overlay) bridge +# +# Only the COMPUTE and NETWORK nodes must have an IP address +# on this bridge. When used by infrastructure nodes, the +# IP addresses are assigned to containers which use this +# bridge. +# +auto br-vxlan +iface br-vxlan inet manual + bridge_stp off + bridge_waitport 0 + bridge_fd 0 + bridge_ports {{ interface }}.30 + +# OpenStack Networking VLAN bridge +auto br-vlan +iface br-vlan inet static + bridge_stp off + bridge_waitport 0 + bridge_fd 0 + bridge_ports {{ interface }} + address {{host_info[inventory_hostname].VLAN_IP}} + netmask 255.255.255.0 + gateway 192.168.122.1 + dns-nameserver 8.8.8.8 8.8.4.4 + +# compute1 Storage bridge +auto br-storage +iface br-storage inet static + bridge_stp off + bridge_waitport 0 + bridge_fd 0 + bridge_ports {{ interface }}.20 + address {{host_info[inventory_hostname].STORAGE_IP}} + netmask 255.255.252.0 diff --git a/prototypes/xci/var/Debian.yml b/prototypes/xci/var/Debian.yml new file mode 100644 index 000000000..d13d08097 --- /dev/null +++ b/prototypes/xci/var/Debian.yml @@ -0,0 +1,11 @@ +--- +# SPDX-license-identifier: Apache-2.0 +############################################################################## +# Copyright (c) 2017 Ericsson AB and others. +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +# this is the interface the VM nodes are connected to libvirt network "default" +interface: "ens3" diff --git a/prototypes/xci/var/RedHat.yml b/prototypes/xci/var/RedHat.yml new file mode 100644 index 000000000..6d03e0f32 --- /dev/null +++ b/prototypes/xci/var/RedHat.yml @@ -0,0 +1,10 @@ +--- +# SPDX-license-identifier: Apache-2.0 +############################################################################## +# Copyright (c) 2017 Ericsson AB and others. +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +# this is placeholder and left blank intentionally to complete later on diff --git a/prototypes/xci/var/Suse.yml b/prototypes/xci/var/Suse.yml new file mode 100644 index 000000000..6d03e0f32 --- /dev/null +++ b/prototypes/xci/var/Suse.yml @@ -0,0 +1,10 @@ +--- +# SPDX-license-identifier: Apache-2.0 +############################################################################## +# Copyright (c) 2017 Ericsson AB and others. +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +# this is placeholder and left blank intentionally to complete later on diff --git a/prototypes/xci/var/opnfv.yml b/prototypes/xci/var/opnfv.yml new file mode 100644 index 000000000..dd3761bd1 --- /dev/null +++ b/prototypes/xci/var/opnfv.yml @@ -0,0 +1,24 @@ +--- +# SPDX-license-identifier: Apache-2.0 +############################################################################## +# Copyright (c) 2017 Ericsson AB and others. +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +OPNFV_RELENG_GIT_URL: "{{ lookup('env','OPNFV_RELENG_GIT_URL') }}" +OPNFV_RELENG_PATH: "{{ lookup('env','OPNFV_RELENG_PATH') }}" +OPNFV_RELENG_VERSION: "{{ lookup('env','OPNFV_RELENG_VERSION') }}" +OPENSTACK_BIFROST_GIT_URL: "{{ lookup('env','OPENSTACK_BIFROST_GIT_URL') }}" +OPENSTACK_BIFROST_PATH: "{{ lookup('env','OPENSTACK_BIFROST_PATH') }}" +OPENSTACK_BIFROST_VERSION: "{{ lookup('env','OPENSTACK_BIFROST_VERSION') }}" +OPENSTACK_OSA_GIT_URL: "{{ lookup('env','OPENSTACK_OSA_GIT_URL') }}" +OPENSTACK_OSA_PATH: "{{ lookup('env','OPENSTACK_OSA_PATH') }}" +OPENSTACK_OSA_VERSION: "{{ lookup('env','OPENSTACK_OSA_VERSION') }}" +OPENSTACK_OSA_ETC_PATH: "{{ lookup('env','OPENSTACK_OSA_ETC_PATH') }}" +XCI_ANSIBLE_PIP_VERSION: "{{ lookup('env','XCI_ANSIBLE_PIP_VERSION') }}" +XCI_FLAVOR: "{{ lookup('env','XCI_FLAVOR') }}" +XCI_FLAVOR_ANSIBLE_FILE_PATH: "{{ lookup('env','XCI_FLAVOR_ANSIBLE_FILE_PATH') }}" +LOG_PATH: "{{ lookup('env','LOG_PATH') }}" +OPNFV_HOST_IP: "{{ lookup('env','OPNFV_HOST_IP') }}" diff --git a/prototypes/xci/xci-deploy.sh b/prototypes/xci/xci-deploy.sh new file mode 100755 index 000000000..da5bb26cf --- /dev/null +++ b/prototypes/xci/xci-deploy.sh @@ -0,0 +1,200 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +#------------------------------------------------------------------------------- +# This script must run as root +#------------------------------------------------------------------------------- +if [[ $(whoami) != "root" ]]; then + echo "Error: This script must be run as root!" + exit 1 +fi + +#------------------------------------------------------------------------------- +# Set environment variables +#------------------------------------------------------------------------------- +# The order of sourcing the variable files is significant so please do not +# change it or things might stop working. +# - user-vars: variables that can be configured or overriden by user. +# - pinned-versions: versions to checkout. These can be overriden if you want to +# use different/more recent versions of the tools but you might end up using +# something that is not verified by OPNFV XCI. +# - flavor-vars: settings for VM nodes for the chosen flavor. +# - env-vars: variables for the xci itself and you should not need to change or +# override any of them. +#------------------------------------------------------------------------------- +# find where are we +XCI_PATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +# source user vars +source $XCI_PATH/config/user-vars +# source pinned versions +source $XCI_PATH/config/pinned-versions +# source flavor configuration +source "$XCI_PATH/config/${XCI_FLAVOR}-vars" +# source xci configuration +source $XCI_PATH/config/env-vars + +#------------------------------------------------------------------------------- +# Log info to console +#------------------------------------------------------------------------------- +echo "Info: Starting XCI Deployment" +echo "Info: Deployment parameters" +echo "-------------------------------------------------------------------------" +echo "xci flavor: $XCI_FLAVOR" +echo "opnfv/releng version: $OPNFV_RELENG_VERSION" +echo "openstack/bifrost version: $OPENSTACK_BIFROST_VERSION" +echo "openstack/openstack-ansible version: $OPENSTACK_OSA_VERSION" +echo "-------------------------------------------------------------------------" + +#------------------------------------------------------------------------------- +# Install ansible on localhost +#------------------------------------------------------------------------------- +pip install ansible==$XCI_ANSIBLE_PIP_VERSION + +# TODO: The xci playbooks can be put into a playbook which will be done later. + +#------------------------------------------------------------------------------- +# Start provisioning VM nodes +#------------------------------------------------------------------------------- +# This playbook +# - removes directories that were created by the previous xci run +# - clones opnfv/releng and openstack/bifrost repositories +# - combines opnfv/releng and openstack/bifrost scripts/playbooks +# - destorys VMs, removes ironic db, leases, logs +# - creates and provisions VMs for the chosen flavor +#------------------------------------------------------------------------------- +echo "Info: Starting provisining VM nodes using openstack/bifrost" +echo "-------------------------------------------------------------------------" +cd $XCI_PATH/playbooks +ansible-playbook $ANSIBLE_VERBOSITY -i inventory provision-vm-nodes.yml +echo "-----------------------------------------------------------------------" +echo "Info: VM nodes are provisioned!" + +#------------------------------------------------------------------------------- +# Configure localhost +#------------------------------------------------------------------------------- +# This playbook +# - removes directories that were created by the previous xci run +# - clones opnfv/releng repository +# - creates log directory +# - copies flavor files such as playbook, inventory, and var file +#------------------------------------------------------------------------------- +echo "Info: Configuring localhost for openstack-ansible" +echo "-----------------------------------------------------------------------" +cd $XCI_PATH/playbooks +ansible-playbook $ANSIBLE_VERBOSITY -i inventory configure-localhost.yml +echo "-----------------------------------------------------------------------" +echo "Info: Configured localhost host for openstack-ansible" + +#------------------------------------------------------------------------------- +# Configure openstack-ansible deployment host, opnfv +#------------------------------------------------------------------------------- +# This playbook +# - removes directories that were created by the previous xci run +# - clones opnfv/releng and openstack/openstack-ansible repositories +# - configures network +# - generates/prepares ssh keys +# - bootstraps ansible +# - copies flavor files to be used by openstack-ansible +#------------------------------------------------------------------------------- +echo "Info: Configuring opnfv deployment host for openstack-ansible" +echo "-----------------------------------------------------------------------" +cd $OPNFV_RELENG_PATH/prototypes/xci/playbooks +ansible-playbook $ANSIBLE_VERBOSITY -i inventory configure-opnfvhost.yml +echo "-----------------------------------------------------------------------" +echo "Info: Configured opnfv deployment host for openstack-ansible" + +#------------------------------------------------------------------------------- +# Skip the rest if the flavor is aio since the target host for aio is opnfv +#------------------------------------------------------------------------------- +if [[ $XCI_FLAVOR == "aio" ]]; then + echo "xci: aio has been installed" + exit 0 +fi + +#------------------------------------------------------------------------------- +# Configure target hosts for openstack-ansible +#------------------------------------------------------------------------------- +# This playbook +# - adds public keys to target hosts +# - configures network +# - configures nfs +#------------------------------------------------------------------------------- +echo "Info: Configuring target hosts for openstack-ansible" +echo "-----------------------------------------------------------------------" +cd $OPNFV_RELENG_PATH/prototypes/xci/playbooks +ansible-playbook $ANSIBLE_VERBOSITY -i inventory configure-targethosts.yml +echo "-----------------------------------------------------------------------" +echo "Info: Configured target hosts" + +#------------------------------------------------------------------------------- +# Set up target hosts for openstack-ansible +#------------------------------------------------------------------------------- +# This is openstack-ansible playbook. Check upstream documentation for details. +#------------------------------------------------------------------------------- +echo "Info: Setting up target hosts for openstack-ansible" +echo "-----------------------------------------------------------------------" +sudo -E /bin/sh -c "ssh root@$OPNFV_HOST_IP openstack-ansible \ + $OPENSTACK_OSA_PATH/playbooks/setup-hosts.yml" | \ + tee $LOG_PATH/setup-hosts.log +echo "-----------------------------------------------------------------------" +# check the log to see if we have any error +if grep -q 'failed=1\|unreachable=1' $LOG_PATH/setup-hosts.log; then + echo "Error: OpenStack node setup failed!" + exit 1 +fi +echo "Info: Set up target hosts for openstack-ansible successfuly" + +#------------------------------------------------------------------------------- +# Set up infrastructure +#------------------------------------------------------------------------------- +# This is openstack-ansible playbook. Check upstream documentation for details. +#------------------------------------------------------------------------------- +echo "Info: Setting up infrastructure" +echo "-----------------------------------------------------------------------" +echo "xci: running ansible playbook setup-infrastructure.yml" +sudo -E /bin/sh -c "ssh root@$OPNFV_HOST_IP openstack-ansible \ + $OPENSTACK_OSA_PATH/playbooks//setup-infrastructure.yml" | \ + tee $LOG_PATH/setup-infrastructure.log +echo "-----------------------------------------------------------------------" +# check the log to see if we have any error +if grep -q 'failed=1\|unreachable=1' $LOG_PATH/setup-infrastructure.log; then + echo "Error: OpenStack node setup failed!" + exit 1 +fi + +#------------------------------------------------------------------------------- +# Verify database cluster +#------------------------------------------------------------------------------- +echo "Info: Verifying database cluster" +echo "-----------------------------------------------------------------------" +sudo -E /bin/sh -c "ssh root@$OPNFV_HOST_IP ansible -i $OPENSTACK_OSA_PATH/playbooks/inventory/ \ + galera_container -m shell \ + -a "mysql -h localhost -e 'show status like \"%wsrep_cluster_%\";'"" \ + | tee $LOG_PATH/galera.log +echo "-----------------------------------------------------------------------" +# check the log to see if we have any error +if grep -q 'FAILED' $LOG_PATH/galera.log; then + echo "Error: Database cluster verification failed!" + exit 1 +fi +echo "Info: Database cluster verification successful!" + +#------------------------------------------------------------------------------- +# Install OpenStack +#------------------------------------------------------------------------------- +# This is openstack-ansible playbook. Check upstream documentation for details. +#------------------------------------------------------------------------------- +echo "Info: Installing OpenStack on target hosts" +echo "-----------------------------------------------------------------------" +sudo -E /bin/sh -c "ssh root@$OPNFV_HOST_IP openstack-ansible \ + $OPENSTACK_OSA_PATH/playbooks/setup-openstack.yml" | \ + tee $LOG_PATH/opnfv-setup-openstack.log +echo "-----------------------------------------------------------------------" +# check the log to see if we have any error +if grep -q 'failed=1\|unreachable=1' $LOG_PATH/opnfv-setup-openstack.log; then + echo "Error: OpenStack installation failed!" + exit 1 +fi +echo "Info: OpenStack installation is successfully completed!" |