aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore4
-rw-r--r--INFO27
-rw-r--r--INFO.yaml24
-rwxr-xr-xcheck223
-rwxr-xr-xci/build-auto.sh222
-rwxr-xr-xci/deploy-onap-fuel.sh238
-rwxr-xr-xci/deploy-onap-kubespray.sh339
-rwxr-xr-xci/deploy-onap.sh376
-rw-r--r--ci/deploy-opnfv-apex-centos.sh209
-rw-r--r--ci/deploy-opnfv-compass-ubuntu.sh201
-rw-r--r--ci/deploy-opnfv-daisy-centos.sh179
-rw-r--r--ci/deploy-opnfv-fuel-ubuntu.sh199
-rwxr-xr-xci/plot-results.sh101
-rw-r--r--docs/conf.py1
-rw-r--r--docs/conf.yaml3
-rw-r--r--docs/index.rst18
-rw-r--r--docs/release/configguide/Auto-featureconfig.rst196
-rw-r--r--docs/release/configguide/auto-OPFNV-fuel.jpgbin189899 -> 0 bytes
-rw-r--r--docs/release/configguide/auto-OPFNV-fuel.pngbin0 -> 41457 bytes
-rw-r--r--docs/release/configguide/auto-OS-config4ONAP.pngbin0 -> 134668 bytes
-rw-r--r--docs/release/configguide/auto-installTarget-ONAP-B.pngbin0 -> 50086 bytes
-rw-r--r--docs/release/configguide/auto-installTarget-generic.jpgbin154476 -> 0 bytes
-rw-r--r--docs/release/configguide/auto-installTarget-generic.pngbin0 -> 41926 bytes
-rw-r--r--docs/release/configguide/auto-installTarget-initial.jpgbin118641 -> 0 bytes
-rw-r--r--docs/release/configguide/auto-installTarget-initial.pngbin0 -> 35994 bytes
-rw-r--r--docs/release/configguide/auto-repo-folders.jpgbin162411 -> 0 bytes
-rw-r--r--docs/release/configguide/auto-repo-folders.pngbin0 -> 36136 bytes
-rw-r--r--docs/release/configguide/index.rst1
-rw-r--r--docs/release/release-notes/Auto-release-notes.rst329
-rw-r--r--docs/release/release-notes/ONAP-toplevel-beijing.pngbin0 -> 383760 bytes
-rw-r--r--docs/release/release-notes/auto-proj-openstacksummit1805.pngbin0 -> 10928 bytes
-rw-r--r--docs/release/release-notes/auto-proj-parameters.pngbin0 -> 32716 bytes
-rw-r--r--docs/release/release-notes/auto-proj-tests.pngbin0 -> 33348 bytes
-rw-r--r--docs/release/release-notes/auto-project-activities.pngbin55670 -> 25995 bytes
-rw-r--r--docs/release/release-notes/index.rst1
-rw-r--r--docs/release/userguide/UC01-feature.userguide.rst7
-rw-r--r--docs/release/userguide/UC02-feature.userguide.rst49
-rw-r--r--docs/release/userguide/UC03-feature.userguide.rst16
-rw-r--r--docs/release/userguide/index.rst1
-rw-r--r--docs/requirements.txt2
-rw-r--r--lib/auto/testcase/resiliency/AutoResilItfCloud.py30
-rw-r--r--lib/auto/testcase/resiliency/AutoResilMgTestDef.py141
-rw-r--r--lib/auto/testcase/resiliency/clouds.yaml20
-rw-r--r--pylintrc561
-rw-r--r--requirements.txt2
-rw-r--r--setup/VIMs/OpenStack/auto_script_config_openstack_for_onap.py923
-rw-r--r--setup/VIMs/OpenStack/clouds.yaml99
-rw-r--r--setup/onap_on_openstack/onap_os_builder.py4
-rw-r--r--tox.ini17
-rw-r--r--yamllintrc25
50 files changed, 4526 insertions, 262 deletions
diff --git a/.gitignore b/.gitignore
index acb5d9d..f6b7eea 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,3 +5,7 @@
/lib/auto.egg-info
/build
/dist
+/docs_output
+/opnfvdocs
+.tox
+docs/_build/*
diff --git a/INFO b/INFO
deleted file mode 100644
index 10020fd..0000000
--- a/INFO
+++ /dev/null
@@ -1,27 +0,0 @@
-Project: ONAP-Automated OPNFV (Auto)
-Project Creation Date: August 15, 2017
-Project Category:
-Lifecycle State: Incubation
-Primary Contact: tina.tsou@arm.com
-Project Lead: tina.tsou@arm.com
-Jira Project Name: ONAP-Automated OPNFV
-Jira Project Prefix: AUTO
-Mailing list tag: [auto]
-IRC: Server:freenode.net Channel:#opnfv-auto
-Repository: auto
-
-Committers:
-Tina Tsou (tina.tsou@arm.com)
-Harry Huang (huangxiangyu5@huawei.com)
-Song Zhu (song.zhu@arm.com)
-Prasad Gorja (prasad.gorja@nxp.com)
-Liang Ou (oul.gd@chinatelecom.cn)
-Lei Chen (chenlei@caict.ac.cn)
-Xiaoyu Wang (wxy_cttl@126.com)
-Xu Lu (luxu_hd@163.com)
-Eric Maye (eric.dmaye@wipro.com)
-Chen Zhang (zhangchen.bri@chinatelecom.cn)
-Mohankumar Navaneethan (mnavaneethan@mvista.com)
-Gerard Damm (gerard.damm@wipro.com)
-
-Link to TSC approval of the project: http://meetbot.opnfv.org/meetings/opnfv-meeting/2017/opnfv-meeting.2017-08-15-12.59.html
diff --git a/INFO.yaml b/INFO.yaml
index aee9a7b..69e5b01 100644
--- a/INFO.yaml
+++ b/INFO.yaml
@@ -38,22 +38,14 @@ committers:
email: 'huangxiangyu5@huawei.com'
company: 'huawei.com'
id: 'huangxiangyu'
- - name: 'Madhukesh Sambashivaiah'
- email: 'madhukeshs@gmail.com'
- company: 'gmail.com'
- id: 'madhukeshs'
- - name: 'Song Zhu'
- email: 'song.zhu@arm.com'
- company: 'arm.com'
- id: 'mail22song'
- - name: 'Liang Ou'
- email: 'oul.gd@chinatelecom.cn'
- company: 'chinatelecom.cn'
- id: 'ouliang1'
- - name: 'Gerard Damm'
- email: 'gerard.damm@wipro.com'
- company: 'Wipro'
- id: 'gerard_damm'
+ - name: 'Paul Vaduva'
+ email: 'paul.vaduva@enea.com'
+ company: 'enea.com'
+ id: 'pvaduva'
+ - name: 'Martin Klozik'
+ email: 'martin.klozik@tieto.com'
+ company: 'tieto.com'
+ id: 'mklozik'
tsc:
# yamllint disable rule:line-length
approval: 'http//meetbot.opnfv.org/meetings/opnfv-meeting/2017/opnfv-meeting.2017-08-15-12.59.html'
diff --git a/check b/check
new file mode 100755
index 0000000..0428fa6
--- /dev/null
+++ b/check
@@ -0,0 +1,223 @@
+#!/bin/bash
+
+# Copyright 2017-2018 Intel Corporation, Tieto
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Auto project python & yaml formatting checker
+# This script is based on the file ./check from OPNFV vswitchperf
+# project.
+
+#
+# Configuration
+#
+PYLINT="pylint"
+PYLINT_RC='pylintrc'
+PYTHON_FILE_REGEX="\.py$"
+YAMLLINT="yamllint"
+YAMLLINT_RC='yamllintrc'
+YAML_FILE_REGEX="\.yaml$"
+FILE_LIST="/tmp/auto_check_list.txt"
+
+CHECK_PYTHON=0
+CHECK_YAML=0
+
+#
+# Support Functions
+#
+# print usage if requested
+function usage() {
+ cat <<EOM
+Usage: $0 [TARGET]...
+
+Performs code check for defined TARGETs. Target can be file or directory.
+In case that directory is specified, then it will be searched recursively
+for all python and yaml files.
+If TARGET is not specified, then all python and yaml files from current AUTO
+repository will be checked.
+
+
+ -h, --help Script usage
+ -b, --black Suppress colours. Output will be black&white.
+ -m, --modified Script will check python and yaml files, which have
+ been modified within current repository.
+
+Examples:
+ ./check
+
+ Check all python and yaml files in current AUTO repository
+
+ ./check INFO.yaml
+
+ Check just one file.
+
+ ./check -m
+
+ Check all modified files in current AUTO repository
+
+ ./check lib/auto/testcase lib/auto/util
+
+ Check all python and yaml files in given directories
+
+EOM
+}
+
+# get list of files to be checked
+function get_file_list() {
+ # store file regex and shift params to get list of original ./check options
+ TMP_FILE_REGEX=$1
+ shift
+
+ rm $FILE_LIST &> /dev/null
+ if [ "x$1" == "x-m" -o "x$1" == "x--modified" ] ; then
+ # check of modified files requested
+ git status --porcelain | cut -b4- | egrep -i "${TMP_FILE_REGEX}" | sort > $FILE_LIST
+ elif [ "x$*" == "x" ] ; then
+ # list is empty, check all python files
+ git ls-tree --name-only -r HEAD | egrep -i "${TMP_FILE_REGEX}" | sort > $FILE_LIST
+ else
+ for item in $* ; do
+ if [ -d $item ] ; then
+ git ls-tree --name-only -r HEAD $item | egrep -i "${TMP_FILE_REGEX}" | sort >> $FILE_LIST
+ elif [ -f $item ] ; then
+ echo $item | egrep -i "${TMP_FILE_REGEX}" >> $FILE_LIST
+ else
+ echo "$item doesn't exist, thus check was aborted"
+ exit 1
+ fi
+ done
+ fi
+}
+
+function check_lint_binary() {
+ # check if lint binary is available
+ if ! which $1 &>/dev/null ; then
+ echo "$1 is not available, thus check can't be executed"
+ return 1
+ fi
+ return 0
+}
+
+
+function check_python() {
+ echo "Execution of pylint checks:"
+
+ if ! check_lint_binary $PYLINT ; then
+ CHECK_PYTHON=1
+ return
+ fi
+
+ # check if there is anything to check
+ if [ -s $FILE_LIST ] ; then
+ for pyfile in `cat $FILE_LIST | sort` ; do
+ # get base name
+ pyfile_basename="'"`basename $pyfile .py`"'"
+ # run pylint and extract final rating
+ output=`$PYLINT --rcfile $PYLINT_RC $pyfile 2>/dev/null`
+ rating=`echo -e $output | tail -n3 | grep rated | sed -e 's/^.*rated at \(-\?[0-9.]*\).*$/\1/'`
+ # evaluate and display aquired rating
+ if [ "x$rating" == "x" ] ; then
+ # rating is not available for files without python statements
+ printf " %-70s %-6s\n" $pyfile "NA"
+ elif [ "$rating" == "10" ] ; then
+ printf " %-70s ${GREEN}%-6s${BLACK}\n" $pyfile "OK"
+ else
+ CHECK_PYTHON=1
+ echo -e "$output" | awk '/^\*+ Module|^[A-Z]\:/'
+ printf " %-70s ${RED}%-6s${BLACK}\n" $pyfile $rating
+ fi
+ done
+ else
+ echo " Nothing to check."
+ fi
+}
+
+function check_yaml() {
+ echo "Execution of yaml checks:"
+
+ if ! check_lint_binary $YAMLLINT ; then
+ CHECK_YAML=1
+ return
+ fi
+
+ # check if there is anything to check
+ if [ -s $FILE_LIST ] ; then
+ for yamlfile in `cat $FILE_LIST | sort` ; do
+ output=`$YAMLLINT -c $YAMLLINT_RC $yamlfile 2>/dev/null`
+ if [ $? -eq 0 ] ; then
+ printf " %-70s ${GREEN}%-6s${BLACK}\n" $yamlfile "OK"
+ else
+ CHECK_YAML=1
+ echo "$output"
+ printf " %-70s ${RED}%-6s${BLACK}\n" $yamlfile "FAILED"
+ fi
+ done
+ else
+ echo " Nothing to check."
+ fi
+}
+
+#
+# Main
+#
+# check if help is requested
+if [ "x$1" == "x-h" -o "x$1" == "x--help" ] ; then
+ usage
+ exit 0
+fi
+
+# set colours
+if [ "x$1" == "x-b" -o "x$1" == "x--black" ] ; then
+ shift
+ RED=""
+ GREEN=""
+ BLACK=""
+else
+ RED="\e[31m"
+ GREEN="\e[32m"
+ BLACK="\e[0m"
+fi
+
+# check if we were run within auto directory
+if [ ! -x ./check 2> /dev/null ] ; then
+ echo "`basename $0` must be run from auto root directory"
+ exit 1
+fi
+
+# run python checks
+get_file_list $PYTHON_FILE_REGEX $*
+check_python
+
+echo
+
+# run yaml checks
+get_file_list $YAML_FILE_REGEX $*
+check_yaml
+
+# clean up
+rm $FILE_LIST &> /dev/null
+
+# return success or failure based on pylint and yamllint checks
+# NOTE: As of now, failure of pylint checks is not propagated into exit code.
+# This will be turned on again after the rating of existing python
+# files will be improved.
+# if [ $CHECK_PYTHON -eq 0 -a $CHECK_YAML -eq 0 ] ; then
+if [ $CHECK_YAML -eq 0 ] ; then
+ exit 0
+else
+ exit 1
+fi
+
+#
+# The End
+#
diff --git a/ci/build-auto.sh b/ci/build-auto.sh
new file mode 100755
index 0000000..00b67b1
--- /dev/null
+++ b/ci/build-auto.sh
@@ -0,0 +1,222 @@
+#!/bin/bash
+#
+# Copyright 2015-2018 Intel Corporation., Tieto
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# CI helper script for execution of AUTO project jenkins jobs.
+# This script is based on the file ci/build-vsperf.sh from OPNFV vswitchperf
+# project.
+
+# Usage:
+# build-auto.sh job_type
+#
+# Parameters:
+# job_type - is one of "verify", "merge" or "daily"
+#
+# Example:
+# ./ci/build-auto.sh verify
+
+#
+# exit codes
+#
+EXIT=0
+EXIT_UNKNOWN_JOB_TYPE=1
+EXIT_LINT_FAILED=2
+EXIT_FUEL_FAILED=10
+
+#
+# configuration
+#
+AUTOENV_DIR="$HOME/autoenv"
+TIMESTAMP=$(date +%Y%m%d_%H%M%S)
+LOG_DIR=$HOME/auto_ci_daily_logs
+WORKSPACE=${WORKSPACE:-$PWD}
+
+# POD and SCENARIO details used during OPNFV deployment performed by daily job
+NODE_NAME=${NODE_NAME:-"ericsson-virtual1"}
+POD_LAB=$(echo $NODE_NAME | cut -d '-' -f1)
+POD_NAME=$(echo $NODE_NAME | cut -d '-' -f2)
+DEPLOY_SCENARIO=${DEPLOY_SCENARIO:-"os-nosdn-onap-noha"}
+
+#
+# functions
+#
+# execute pylint and yamllint to check code quality
+function execute_auto_lint_check() {
+ if ! ./check -b ; then
+ EXIT=$EXIT_LINT_FAILED
+ fi
+}
+
+# check and install required packages
+function dependencies_check() {
+ . /etc/os-release
+ if [ $ID == "ubuntu" ] ; then
+ echo "Dependencies check"
+ echo "=================="
+ # install system packages
+ for PACKAGE in "virtualenv" "pylint" "yamllint" "gnuplot" ; do
+ if dpkg -s $PACKAGE &> /dev/null ; then
+ printf " %-70s %-6s\n" $PACKAGE "OK"
+ else
+ printf " %-70s %-6s\n" $PACKAGE "missing"
+ sudo apt-get install -y $PACKAGE
+ fi
+ done
+ echo
+ fi
+}
+
+# create virtualenv if needed and enable it
+function virtualenv_prepare() {
+ if [ ! -e $AUTOENV_DIR ] ; then
+ echo "Create AUTO environment"
+ echo "======================="
+ virtualenv "$AUTOENV_DIR"
+ echo
+ fi
+
+ # activate and update virtualenv
+ echo "Update AUTO environment"
+ echo "======================="
+ source "$AUTOENV_DIR"/bin/activate
+ pip install -r ./requirements.txt
+ echo
+}
+
+#
+# main
+#
+echo
+
+# enter workspace dir
+cd $WORKSPACE
+
+# check if required packages are installed
+dependencies_check
+
+# execute job based on passed parameter
+case $1 in
+ "verify")
+ echo "==============="
+ echo "AUTO verify job"
+ echo "==============="
+
+ virtualenv_prepare
+ execute_auto_lint_check
+ #execute_auto_doc_check
+
+ # Everything went well, so report SUCCESS to Jenkins
+ exit $EXIT
+ ;;
+ "merge")
+ echo "=============="
+ echo "AUTO merge job"
+ echo "=============="
+
+ virtualenv_prepare
+ execute_auto_lint_check
+ #execute_auto_doc_check
+
+ # propagate result to the Jenkins job
+ exit $EXIT
+ ;;
+ "daily")
+ echo "=============="
+ echo "AUTO daily job"
+ echo "=============="
+ echo
+ echo "Deployment details:"
+ echo " LAB: $POD_LAB"
+ echo " POD: $POD_NAME"
+ echo " Scenario: $DEPLOY_SCENARIO"
+ echo " WORKSPACE: $WORKSPACE"
+ echo
+
+ # create log dir if needed
+ if [ ! -e $LOG_DIR ] ; then
+ echo "Create AUTO LOG DIRECTORY"
+ echo "========================="
+ echo "mkdir $LOG_DIR"
+ mkdir $LOG_DIR
+ echo
+ fi
+
+ echo "Installation of OPNFV and ONAP"
+ echo "=============================="
+ # clone fuel and execute installation of ONAP scenario to install
+ # ONAP on top of OPNFV deployment
+ [ -e fuel ] && rm -rf fuel
+ git clone https://gerrit.opnfv.org/gerrit/fuel
+ cd fuel
+ # Fuel master branch is currently broken; thus use stable/gambia
+ # branch with recent master version of ONAP scenario
+ git checkout stable/gambia
+ git checkout origin/master mcp/config/states/onap \
+ mcp/config/scenario/os-nosdn-onap-ha.yaml \
+ mcp/config/scenario/os-nosdn-onap-noha.yaml
+ # use larger disk size for virtual nodes
+ sed -i -re 's/(qemu-img resize.*)100G/\1400G/' mcp/scripts/lib_jump_deploy.sh
+
+ LOG_FILE="$LOG_DIR/deploy_${TIMESTAMP}.log"
+ echo "ci/deploy.sh -l $POD_LAB -p $POD_NAME -s $DEPLOY_SCENARIO |&\
+ tee $LOG_FILE"
+ DEPLOY_START=$(date +%Y%m%d_%H%M%S)
+ ci/deploy.sh -l $POD_LAB -p $POD_NAME -s $DEPLOY_SCENARIO |&\
+ tee $LOG_FILE
+
+ # report failure if fuel failed to install OPNFV or ONAP
+ [ $? -ne 0 ] && exit $EXIT_FUEL_FAILED
+
+ # process report
+ DEPLOY_END=$(date +%Y%m%d_%H%M%S)
+ REPORT_FILE="$LOG_DIR/deploy_report_${TIMESTAMP}.txt"
+ CSV_SUMMARY="$LOG_DIR/deploy_summary_${TIMESTAMP}.csv"
+ MARKER="ONAP INSTALLATION REPORT"
+ # cut report from installation log file
+ sed -n "/^$MARKER/,/^END OF $MARKER/p;/^END OF $MARKER/q" \
+ $LOG_FILE > $REPORT_FILE
+ PODS_TOTAL=$(grep "PODs Total" $REPORT_FILE | sed -e 's/[^0-9]//g')
+ PODS_FAILED=$(grep "PODs Failed" $REPORT_FILE | sed -e 's/[^0-9]//g')
+ TC_SUM=$(grep "tests total" $REPORT_FILE | tail -n1 |\
+ sed -e 's/[^0-9,]//g')
+
+ echo "Start Time,End Time,Total PODs,Failed PODs,Total Tests,Passed"\
+ "Tests,Failed Tests" >> $CSV_SUMMARY
+ echo "$DEPLOY_START,$DEPLOY_END,$PODS_TOTAL,$PODS_FAILED,$TC_SUM"\
+ >> $CSV_SUMMARY
+
+ # plot graphs from result summaries and print txt versions if possible
+ cd $WORKSPACE
+ ci/plot-results.sh
+ for GRAPH in $(ls -1 graph*txt 2> /dev/null) ; do
+ cat $GRAPH
+ done
+
+ # propagate result to the Jenkins job
+ exit $EXIT
+ ;;
+ *)
+ echo
+ echo "ERRROR: Unknown job type \"$1\""
+ echo
+ exit $EXIT_UNKNOWN_JOB_TYPE
+ ;;
+esac
+
+exit $EXIT_UNKNOWN_JOB_TYPE
+
+#
+# end
+#
diff --git a/ci/deploy-onap-fuel.sh b/ci/deploy-onap-fuel.sh
new file mode 100755
index 0000000..c120e9c
--- /dev/null
+++ b/ci/deploy-onap-fuel.sh
@@ -0,0 +1,238 @@
+#!/bin/bash
+#
+# Copyright 2018 Tieto
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Script for automated deployment of ONAP on top of OPNFV Fuel/MCP installation
+# In the future both OOM and heat install methods should be supported.
+# At the beginning OOM will be used for simplification.
+
+# TODO:
+# Configure ONAP to be able to control underlying OpenStack
+
+# Configuration to be passed to ci/deploy-onap.sh
+export SSH_USER="ubuntu"
+export SSH_IDENTITY="/root/.ssh/onap_key"
+
+# detect hypervisor details to be used as default values if needed
+OS_HYPER_CMD="openstack hypervisor list --long"
+echo -e "\nOpenStack Hepervisor list\n"
+$OS_HYPER_CMD
+
+DEFAULT_CMP_COUNT=$($OS_HYPER_CMD -f value -c "ID" | wc -l)
+DEFAULT_CMP_MIN_MEM=$($OS_HYPER_CMD -f value -c "Memory MB" | sort | head -n1)
+DEFAULT_CMP_MIN_CPUS=$($OS_HYPER_CMD -f value -c "vCPUs" | sort | head -n1)
+
+# Use default values if compute configuration was not set by FUEL installer
+AUTO_INSTALL_DIR=${AUTO_INSTALL_DIR:-"."}
+AUTO_IMAGE_DIR="${AUTO_INSTALL_DIR}/images"
+CMP_COUNT=${CMP_COUNT:-$DEFAULT_CMP_COUNT} # number of compute nodes
+CMP_MIN_MEM=${CMP_MIN_MEM:-$DEFAULT_CMP_MIN_MEM} # MB RAM of the weakest compute node
+CMP_MIN_CPUS=${CMP_MIN_CPUS:-$DEFAULT_CMP_MIN_CPUS} # CPU count of the weakest compute node
+# size of storage for instances
+CMP_STORAGE_TOTAL=${CMP_STORAGE_TOTAL:-$((80*$CMP_COUNT))}
+VM_COUNT=${VM_COUNT:-6} # number of VMs available for k8s cluster
+
+#
+# Functions
+#
+# function minimum accepts two numbers and prints smaller one
+function minimum(){
+ echo $(($1<$2?$1:$2))
+}
+
+# function remove_openstack_setup removes OS configuration performed by this
+# script; So previously created configuration and deployed VMs will be
+# removed before new ONAP deployment will be started.
+function remove_openstack_setup(){
+ # flavor is created 1st but removed last, so...
+ if ( ! openstack flavor list | grep 'onap.large' &> /dev/null ) ; then
+ #...no flavor means nothing to be removed
+ return
+ fi
+ echo -e "\nRemoving ONAP specific OpenStack configuration"
+ for a in $(openstack server list --name onap_vm -f value -c ID) ; do
+ openstack server delete $a
+ done
+ RULES=$(openstack security group rule list onap_security_group -f value -c ID)
+ for a in $RULES; do
+ openstack security group rule delete $a
+ done
+ openstack security group delete onap_security_group
+ for a in $(openstack floating ip list -f value -c ID) ; do
+ openstack floating ip delete $a
+ done
+ PORTS=$(openstack port list --network onap_private_network -f value -c ID)
+ for a in $PORTS ; do
+ openstack router remove port onap_router $a
+ done
+ PORTS=$(openstack port list --network onap_private_network -f value -c ID)
+ for a in $PORTS ; do
+ openstack port delete $a
+ done
+ openstack router delete onap_router
+ openstack subnet delete onap_private_subnet
+ openstack network delete onap_private_network
+ openstack image delete xenial
+ rm -rf $AUTO_IMAGE_DIR
+ openstack keypair delete onap_key
+ rm $SSH_IDENTITY
+ openstack flavor delete onap.large
+ echo
+}
+
+#
+# Script Main
+#
+
+# remove OpenStack configuration if it exists
+remove_openstack_setup
+
+echo -e "\nOpenStack configuration\n"
+
+# Calculate VM resources, so that flavor can be created
+echo "Configuration of compute node:"
+echo "Number of computes: CMP_COUNT=$CMP_COUNT"
+echo "Minimal RAM: CMP_MIN_MEM=$CMP_MIN_MEM"
+echo "Minimal CPUs count: CMP_MIN_CPUS=$CMP_MIN_CPUS"
+echo "Storage for instances: CMP_STORAGE_TOTAL=$CMP_STORAGE_TOTAL"
+echo "Number of VMs: VM_COUNT=$VM_COUNT"
+# Calculate VM parameters; there will be up to 1 VM per Compute node
+# to maximize resources available for VMs
+PER=85 # % of compute resources will be consumed by VMs
+VM_DISK_MAX=100 # GB - max VM disk size
+VM_MEM_MAX=81920 # MB - max VM RAM size
+VM_CPUS_MAX=56 # max count of VM CPUs
+VM_MEM=$(minimum $(($CMP_MIN_MEM*$CMP_COUNT*$PER/100/$VM_COUNT)) $VM_MEM_MAX)
+VM_CPUS=$(minimum $(($CMP_MIN_CPUS*$CMP_COUNT*$PER/100/$VM_COUNT)) $VM_CPUS_MAX)
+VM_DISK=$(minimum $(($CMP_STORAGE_TOTAL*$PER/100/$VM_COUNT)) $VM_DISK_MAX)
+
+echo -e "\nFlavor configuration:"
+echo "CPUs : $VM_CPUS"
+echo "RAM [MB] : $VM_MEM"
+echo "DISK [GB] : $VM_DISK"
+
+# Create onap flavor
+openstack flavor create --ram $VM_MEM --vcpus $VM_CPUS --disk $VM_DISK \
+ onap.large
+
+# Generate a keypair and store private key
+openstack keypair create onap_key > $SSH_IDENTITY
+chmod 600 $SSH_IDENTITY
+
+# Download and import VM image(s)
+mkdir $AUTO_IMAGE_DIR
+wget -P $AUTO_IMAGE_DIR https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+openstack image create --disk-format qcow2 --container-format bare --public \
+ --file $AUTO_IMAGE_DIR/xenial-server-cloudimg-amd64-disk1.img xenial
+
+# Modify quotas (add 10% to required VM resources)
+openstack quota set --ram $(($VM_MEM*$VM_COUNT*110/100)) admin
+openstack quota set --cores $(($VM_CPUS*$VM_COUNT*110/100)) admin
+
+# Configure networking with DNS for access to the internet
+openstack network create onap_private_network --provider-network-type vxlan
+openstack subnet create onap_private_subnet --network onap_private_network \
+ --subnet-range 192.168.33.0/24 --ip-version 4 --dhcp --dns-nameserver "8.8.8.8"
+openstack router create onap_router
+openstack router add subnet onap_router onap_private_subnet
+openstack router set onap_router --external-gateway floating_net
+
+# Allow selected ports and protocols
+openstack security group create onap_security_group
+openstack security group rule create --protocol icmp onap_security_group
+openstack security group rule create --proto tcp \
+ --dst-port 22:22 onap_security_group
+openstack security group rule create --proto tcp \
+ --dst-port 8080:8080 onap_security_group # rancher
+openstack security group rule create --proto tcp \
+ --dst-port 8078:8078 onap_security_group # horizon
+openstack security group rule create --proto tcp \
+ --dst-port 8879:8879 onap_security_group # helm
+openstack security group rule create --proto tcp \
+ --dst-port 80:80 onap_security_group
+openstack security group rule create --proto tcp \
+ --dst-port 443:443 onap_security_group
+
+# Allow communication between k8s cluster nodes
+PUBLIC_NET=`openstack subnet list --name floating_subnet -f value -c Subnet`
+openstack security group rule create --remote-ip $PUBLIC_NET --proto tcp \
+ --dst-port 1:65535 onap_security_group
+openstack security group rule create --remote-ip $PUBLIC_NET --proto udp \
+ --dst-port 1:65535 onap_security_group
+
+# Get list of hypervisors and their zone
+HOST_ZONE=$(openstack host list -f value | grep compute | head -n1 | cut -d' ' -f3)
+HOST_NAME=($(openstack host list -f value | grep compute | cut -d' ' -f1))
+HOST_COUNT=$(echo ${HOST_NAME[@]} | wc -w)
+# Create VMs and assign floating IPs to them
+VM_ITER=1
+HOST_ITER=0
+while [ $VM_ITER -le $VM_COUNT ] ; do
+ openstack floating ip create floating_net
+ VM_NAME[$VM_ITER]="onap_vm${VM_ITER}"
+ VM_IP[$VM_ITER]=$(openstack floating ip list -c "Floating IP Address" \
+ -c "Port" -f value | grep None | cut -f1 -d " " | head -n1)
+ # dispatch new VMs among compute nodes in round robin fashion
+ openstack server create --flavor onap.large --image xenial \
+ --nic net-id=onap_private_network --security-group onap_security_group \
+ --key-name onap_key ${VM_NAME[$VM_ITER]} \
+ --availability-zone ${HOST_ZONE}:${HOST_NAME[$HOST_ITER]}
+ sleep 10 # wait for VM init before floating IP can be assigned
+ openstack server add floating ip ${VM_NAME[$VM_ITER]} ${VM_IP[$VM_ITER]}
+ echo "Waiting for ${VM_NAME[$VM_ITER]} to start up for 1m at $(date)"
+ sleep 1m
+ VM_ITER=$(($VM_ITER+1))
+ HOST_ITER=$(($HOST_ITER+1))
+ [ $HOST_ITER -ge $HOST_COUNT ] && HOST_ITER=0
+done
+
+openstack server list -c ID -c Name -c Status -c Networks -c Host --long
+
+# check that SSH to all VMs is working
+SSH_OPTIONS="-i $SSH_IDENTITY -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
+COUNTER=1
+while [ $COUNTER -le 10 ] ; do
+ VM_UP=0
+ VM_ITER=1
+ while [ $VM_ITER -le $VM_COUNT ] ; do
+ if ssh $SSH_OPTIONS -l $SSH_USER ${VM_IP[$VM_ITER]} exit &>/dev/null ; then
+ VM_UP=$(($VM_UP+1))
+ echo "${VM_NAME[$VM_ITER]} ${VM_IP[$VM_ITER]}: up"
+ else
+ echo "${VM_NAME[$VM_ITER]} ${VM_IP[$VM_ITER]}: down"
+ fi
+ VM_ITER=$(($VM_ITER+1))
+ done
+ COUNTER=$(($COUNTER+1))
+ if [ $VM_UP -eq $VM_COUNT ] ; then
+ break
+ fi
+ echo "Waiting for VMs to be accessible via ssh for 2m at $(date)"
+ sleep 2m
+done
+
+openstack server list -c ID -c Name -c Status -c Networks -c Host --long
+
+if [ $VM_UP -ne $VM_COUNT ] ; then
+ echo "Only $VM_UP from $VM_COUNT VMs are accessible via ssh. Installation will be terminated."
+ exit 1
+fi
+
+# Start ONAP installation
+DATE_START=$(date)
+echo -e "\nONAP Installation Started at $DATE_START\n"
+$AUTO_INSTALL_DIR/ci/deploy-onap.sh ${VM_IP[@]}
+echo -e "\nONAP Installation Started at $DATE_START"
+echo -e "ONAP Installation Finished at $(date)\n"
diff --git a/ci/deploy-onap-kubespray.sh b/ci/deploy-onap-kubespray.sh
new file mode 100755
index 0000000..a797388
--- /dev/null
+++ b/ci/deploy-onap-kubespray.sh
@@ -0,0 +1,339 @@
+#!/bin/bash
+#
+# Copyright 2018-2019 Tieto
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Script for automated deployment of ONAP with Kubernetes at OPNFV LAAS
+# environment.
+#
+
+#
+# Configuration
+#
+export LC_ALL=C
+export LANG=C
+
+MASTER=$1
+SERVERS=$*
+shift
+SLAVES=$*
+
+ONAP_BRANCH=${ONAP_BRANCH:-'casablanca'}
+KUBESPRAY_COMMIT="bbfd2dc2bd088efc63747d903edd41fe692531d8"
+NAMESPACE='onap'
+SSH_USER=${SSH_USER:-"opnfv"}
+SSH_OPTIONS='-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
+# use identity file from the environment SSH_IDENTITY
+if [ -n "$SSH_IDENTITY" ] ; then
+ SSH_OPTIONS="-i $SSH_IDENTITY $SSH_OPTIONS"
+ ANSIBLE_IDENTITY="--private-key=$SSH_IDENTITY"
+fi
+
+KUBESPRAY_OPTIONS='-e "kubelet_max_pods=250"'
+
+TMP_POD_LIST='/tmp/onap_pod_list.txt'
+
+case "$ONAP_BRANCH" in
+ "beijing")
+ HELM_VERSION=2.8.2
+ ;;
+ "casablanca")
+ HELM_VERSION=2.9.1
+ ;;
+ *)
+ HELM_VERSION=2.9.1
+ ;;
+esac
+
+ONAP_MINIMAL="aai dmaap portal robot sdc sdnc so vid"
+# by defalult install minimal ONAP installation
+# empty list of ONAP_COMPONENT means full ONAP installation
+ONAP_COMPONENT=${ONAP_COMPONENT:-$ONAP_MINIMAL}
+
+#
+# Functions
+#
+function usage() {
+ echo "usage"
+ cat <<EOL
+Usage:
+ $0 <MASTER> [ <SLAVE1> <SLAVE2> ... ]
+
+ where <MASTER> and <SLAVEx> are IP addresses of servers to be used
+ for ONAP installation.
+
+ Script behavior is affected by following environment variables:
+
+ ONAP_COMPONENT - a list of ONAP components to be installed, empty list
+ will trigger a full ONAP installation
+ VALUE: "$ONAP_COMPONENT"
+
+ ONAP_BRANCH - version of ONAP to be installed (OOM branch version)
+ VALUE: "$ONAP_BRANCH"
+
+ NAMESPACE - name of ONAP namespace in kubernetes cluster
+ VALUE: "$NAMESPACE"
+
+ SSH_USER - user name to be used to access <MASTER> and <SLAVEx>
+ servers
+ VALUE: "$SSH_USER"
+
+ SSH_IDENTITY - (optional) ssh identity file to be used to access
+ <MASTER> and <SLAVEx> servers as a SSH_USER
+ VALUE: "$SSH_IDENTITY"
+
+NOTE: Following must be assured for <MASTER> and <SLAVEx> servers before
+ $0 execution:
+ 1) SSH_USER must be able to access servers via ssh without a password
+ 2) SSH_USER must have a password-less sudo access
+EOL
+}
+
+# Check if server IPs of kubernetes nodes are configured at given server.
+# If it is not the case, then kubespray invetory file must be updated.
+function check_server_ips() {
+ for SERVER_IP in $(grep 'ip=' $1 | sed -re 's/^.*ip=([0-9\.]+).*$/\1/') ; do
+ IP_OK="false"
+ for IP in $(ssh $SSH_OPTIONS $SSH_USER@$SERVER_IP "ip a | grep -Ew 'inet' | sed -re 's/^ *inet ([0-9\.]+).*$/\1/g'") ; do
+ if [ "$IP" == "$SERVER_IP" ] ; then
+ IP_OK="true"
+ fi
+ done
+ # access IP (e.g. OpenStack floating IP) is not server local address, so update invetory
+ if [ $IP_OK == "false" ] ; then
+ # get server default GW dev
+ DEV=$(ssh $SSH_OPTIONS $SSH_USER@$SERVER_IP "ip route ls" | grep ^default | sed -re 's/^.*dev (.*)$/\1/')
+ LOCAL_IP=$(ssh $SSH_OPTIONS $SSH_USER@$SERVER_IP "ip -f inet addr show $DEV" | grep -Ew 'inet' | sed -re 's/^ *inet ([0-9\.]+).*$/\1/g')
+ if [ "$LOCAL_IP" == "" ] ; then
+ echo "Can't read local IP for server with IP $SERVER_IP"
+ exit 1
+ fi
+ sed -i'' -e "s/ip=$SERVER_IP/ip=$LOCAL_IP access_ip=$SERVER_IP/" $1
+ fi
+ done
+}
+
+# sanity check
+if [ "$SERVERS" == "" ] ; then
+ usage
+ exit 1
+fi
+
+#
+# Installation
+#
+
+# detect CPU architecture to download correct helm binary
+CPU_ARCH=$(ssh $SSH_OPTIONS $SSH_USER@"$MASTER" "uname -p")
+case "$CPU_ARCH" in
+ "x86_64")
+ ARCH="amd64"
+ ;;
+ "aarch64")
+ ARCH="arm64"
+ ;;
+ *)
+ echo "Unsupported CPU architecture '$CPU_ARCH' was detected."
+ exit 1
+esac
+
+# print configuration
+cat << EOL
+list of configuration options:
+ SERVERS="$SERVERS"
+ ONAP_COMPONENT="$ONAP_COMPONENT"
+ ONAP_BRANCH="$ONAP_BRANCH"
+ NAMESPACE="$NAMESPACE"
+ SSH_USER="$SSH_USER"
+ SSH_IDENTITY="$SSH_IDENTITY"
+ ARCH="$ARCH"
+
+EOL
+
+# install K8S cluster by kubespray
+sudo apt-get -y update
+sudo apt-get -y install git ansible python-jinja2 python3-pip libffi-dev libssl-dev
+git clone https://github.com/kubernetes-incubator/kubespray.git
+cd kubespray
+git checkout $KUBESPRAY_COMMIT
+pip3 install -r requirements.txt
+export CONFIG_FILE=inventory/auto_hosts.ini
+rm $CONFIG_FILE
+python3 contrib/inventory_builder/inventory.py $SERVERS
+check_server_ips $CONFIG_FILE
+cat $CONFIG_FILE
+if ( ! ansible-playbook -i $CONFIG_FILE $KUBESPRAY_OPTIONS -b -u $SSH_USER $ANSIBLE_IDENTITY cluster.yml ) ; then
+ echo "Kubespray installation has failed at $(date)"
+ exit 1
+fi
+
+# use standalone K8S master if there are enough VMs available for the K8S cluster
+SERVERS_COUNT=$(echo $SERVERS | wc -w)
+if [ $SERVERS_COUNT -gt 2 ] ; then
+ K8S_NODES=$SLAVES
+else
+ K8S_NODES=$SERVERS
+fi
+
+echo "INSTALLATION TOPOLOGY:"
+echo "Kubernetes Master: $MASTER"
+echo "Kubernetes Nodes: $K8S_NODES"
+echo
+echo "CONFIGURING NFS ON SLAVES"
+echo "$SLAVES"
+
+for SLAVE in $SLAVES;
+do
+ssh $SSH_OPTIONS $SSH_USER@"$SLAVE" "bash -s" <<CONFIGURENFS &
+ sudo su
+ apt-get install nfs-common -y
+ mkdir /dockerdata-nfs
+ chmod 777 /dockerdata-nfs
+ echo "$MASTER:/dockerdata-nfs /dockerdata-nfs nfs auto 0 0" >> /etc/fstab
+ mount -a
+ mount | grep dockerdata-nfs
+CONFIGURENFS
+done
+wait
+
+echo "DEPLOYING OOM ON MASTER"
+echo "$MASTER"
+
+ssh $SSH_OPTIONS $SSH_USER@"$MASTER" "bash -s" <<OOMDEPLOY
+sudo su
+echo "create namespace '$NAMESPACE'"
+cat <<EOF | kubectl create -f -
+{
+ "kind": "Namespace",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "$NAMESPACE",
+ "labels": {
+ "name": "$NAMESPACE"
+ }
+ }
+}
+EOF
+kubectl get namespaces --show-labels
+kubectl -n kube-system create sa tiller
+kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller
+rm -rf oom
+echo "pulling new oom"
+git clone -b $ONAP_BRANCH http://gerrit.onap.org/r/oom
+
+# NFS FIX for aaf-locate
+sed -i '/persistence:/s/^#//' ./oom/kubernetes/aaf/charts/aaf-locate/values.yaml
+sed -i '/mountPath: \/dockerdata/c\ mountPath: \/dockerdata-nfs'\
+ ./oom/kubernetes/aaf/charts/aaf-locate/values.yaml
+
+echo "Pre-pulling docker images at \$(date)"
+wget https://jira.onap.org/secure/attachment/11261/prepull_docker.sh
+chmod 777 prepull_docker.sh
+./prepull_docker.sh
+echo "starting onap pods"
+cd oom/kubernetes/
+
+# Enable selected ONAP components
+if [ -n "$ONAP_COMPONENT" ] ; then
+ # disable all components and enable only selected in next loop
+ sed -i '/^.*:$/!b;n;s/enabled: *true/enabled: false/' onap/values.yaml
+ echo -n "Enable following ONAP components:"
+ for COMPONENT in $ONAP_COMPONENT; do
+ echo -n " \$COMPONENT"
+ sed -i '/^'\${COMPONENT}':$/!b;n;s/enabled: *false/enabled: true/' onap/values.yaml
+ done
+ echo
+else
+ echo "All ONAP components will be installed"
+fi
+
+wget http://storage.googleapis.com/kubernetes-helm\
+/helm-v${HELM_VERSION}-linux-${ARCH}.tar.gz
+tar -zxvf helm-v${HELM_VERSION}-linux-${ARCH}.tar.gz
+mv linux-${ARCH}/helm /usr/local/bin/helm
+helm init --upgrade --service-account tiller
+# run helm server on the background and detached from current shell
+nohup helm serve 0<&- &>/dev/null &
+echo "Waiting for helm setup for 5 min at \$(date)"
+sleep 5m
+helm version
+helm repo add local http://127.0.0.1:8879
+helm repo list
+make all
+if ( ! helm install local/onap -n dev --namespace $NAMESPACE) ; then
+ echo "ONAP installation has failed at \$(date)"
+ exit 1
+fi
+
+cd ../../
+
+echo "Waiting for ONAP pods to be up \$(date)"
+echo "Ignore failure of sdnc-ansible-server, see SDNC-443"
+function get_onap_pods() {
+ kubectl get pods --namespace $NAMESPACE > $TMP_POD_LIST
+ return \$(cat $TMP_POD_LIST | wc -l)
+}
+FAILED_PODS_LIMIT=1 # maximal number of failed ONAP PODs
+ALL_PODS_LIMIT=20 # minimum ONAP PODs to be up & running
+WAIT_PERIOD=60 # wait period in seconds
+MAX_WAIT_TIME=\$((3600*3)) # max wait time in seconds
+MAX_WAIT_PERIODS=\$((\$MAX_WAIT_TIME/\$WAIT_PERIOD))
+COUNTER=0
+get_onap_pods
+ALL_PODS=\$?
+PENDING=\$(grep -E '0/|1/2' $TMP_POD_LIST | wc -l)
+while [ \$PENDING -gt \$FAILED_PODS_LIMIT -o \$ALL_PODS -lt \$ALL_PODS_LIMIT ]; do
+ # print header every 20th line
+ if [ \$COUNTER -eq \$((\$COUNTER/20*20)) ] ; then
+ printf "%-3s %-29s %-3s/%s\n" "Nr." "Datetime of check" "Err" "Total PODs"
+ fi
+ COUNTER=\$((\$COUNTER+1))
+ printf "%3s %-29s %3s/%-3s\n" \$COUNTER "\$(date)" \$PENDING \$ALL_PODS
+ sleep \$WAIT_PERIOD
+ if [ "\$MAX_WAIT_PERIODS" -eq \$COUNTER ]; then
+ FAILED_PODS_LIMIT=800
+ ALL_PODS_LIMIT=0
+ fi
+ get_onap_pods
+ ALL_PODS=\$?
+ PENDING=\$(grep -E '0/|1/2' $TMP_POD_LIST | wc -l)
+done
+
+get_onap_pods
+cp $TMP_POD_LIST ~/onap_all_pods.txt
+echo
+echo "========================"
+echo "ONAP INSTALLATION REPORT"
+echo "========================"
+echo
+echo "List of Failed PODs"
+echo "-------------------"
+grep -E '0/|1/2' $TMP_POD_LIST | tee ~/onap_failed_pods.txt
+echo
+echo "Summary:"
+echo "--------"
+echo " PODs Failed: \$(cat ~/onap_failed_pods.txt | wc -l)"
+echo " PODs Total: \$(cat ~/onap_all_pods.txt | wc -l)"
+echo
+echo "ONAP health TC results"
+echo "----------------------"
+cd oom/kubernetes/robot
+./ete-k8s.sh $NAMESPACE health | tee ~/onap_health.txt
+echo "==============================="
+echo "END OF ONAP INSTALLATION REPORT"
+echo "==============================="
+OOMDEPLOY
+
+echo "Finished install, ruturned from Master at $(date)"
+exit 0
diff --git a/ci/deploy-onap.sh b/ci/deploy-onap.sh
new file mode 100755
index 0000000..c34eb56
--- /dev/null
+++ b/ci/deploy-onap.sh
@@ -0,0 +1,376 @@
+#!/bin/bash
+#
+# Copyright 2018 Tieto
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Script for automated deployment of ONAP with Kubernetes at OPNFV LAAS
+# environment.
+#
+# Usage:
+# onap-deploy.sh <MASTER> <SLAVE1> <SLAVE2>
+#
+# where <MASTER> and <SLAVE_IPx> are IP addresses of servers to be used
+# for ONAP installation.
+#
+# NOTE: Following must be assured for all MASTER and SLAVE servers before
+# onap-deploy.sh execution:
+# 1) ssh access without a password
+# 2) an user account with password-less sudo access must be
+# available - default user is "opnfv"
+
+#
+# Configuration
+#
+DOCKER_VERSION=17.03
+RANCHER_VERSION=1.6.14
+RANCHER_CLI_VER=0.6.11
+KUBECTL_VERSION=1.8.10
+HELM_VERSION=2.8.2
+
+MASTER=$1
+SERVERS=$*
+shift
+SLAVES=$*
+
+BRANCH='beijing'
+ENVIRON='onap'
+
+SSH_USER=${SSH_USER:-"opnfv"}
+SSH_OPTIONS='-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
+# by defalult install full ONAP installation
+ONAP_COMPONENT_DISABLE=${ONAP_COMPONENT_DISABLE:-""}
+# example of minimal ONAP installation
+#ONAP_COMPONENT_DISABLE="clamp cli consul dcaegen2 esr log msb multicloud nbi oof policy uui vfc vnfsdk"
+
+# use identity file from the environment SSH_IDENTITY
+if [ -n "$SSH_IDENTITY" ] ; then
+ SSH_OPTIONS="-i $SSH_IDENTITY $SSH_OPTIONS"
+fi
+
+#
+# Installation
+#
+
+# use standalone K8S master if there are enough VMs available for the K8S cluster
+SERVERS_COUNT=$(echo $SERVERS | wc -w)
+if [ $SERVERS_COUNT -gt 2 ] ; then
+ RANCHER_SLAVES=$SLAVES
+else
+ RANCHER_SLAVES=$SERVERS
+fi
+
+echo "INSTALLATION TOPOLOGY:"
+echo "Rancher Master: $MASTER"
+echo "Rancher Slaves: $RANCHER_SLAVES"
+echo
+echo "INSTALLING DOCKER ON ALL MACHINES"
+echo "$SERVERS"
+
+for MACHINE in $SERVERS;
+do
+ssh $SSH_OPTIONS $SSH_USER@"$MACHINE" "bash -s" <<DOCKERINSTALL &
+ sudo -i
+ sysctl -w vm.max_map_count=262144
+ apt-get update -y
+ curl https://releases.rancher.com/install-docker/$DOCKER_VERSION.sh | sh
+
+ mkdir -p /etc/systemd/system/docker.service.d/
+ echo "[Service]
+ ExecStart=
+ ExecStart=/usr/bin/dockerd -H fd:// \
+ --insecure-registry=nexus3.onap.org:10001"\
+ > /etc/systemd/system/docker.service.d/docker.conf
+
+ systemctl daemon-reload
+ systemctl restart docker
+ apt-mark hold docker-ce
+
+ for SERVER in $SERVERS;
+ do
+ echo "\$SERVER $ENVIRON\$(echo \$SERVER | cut -d. -f 4 )" >> /etc/hosts
+ done
+
+ hostname $ENVIRON\$(echo $MACHINE | cut -d. -f 4 )
+
+ echo "DOCKER INSTALLED ON $MACHINE"
+DOCKERINSTALL
+done
+wait
+
+echo "INSTALLING RANCHER ON MASTER"
+echo "$MASTER"
+
+ssh $SSH_OPTIONS $SSH_USER@"$MASTER" "bash -s" <<RANCHERINSTALL
+sudo -i
+echo "INSTALL STARTS"
+apt-get install -y jq make htop
+echo "Waiting for 30 seconds at \$(date)"
+sleep 30
+
+docker login -u docker -p docker nexus3.onap.org:10001
+
+docker run -d --restart=unless-stopped -p 8080:8080\
+ --name rancher_server rancher/server:v$RANCHER_VERSION
+curl -LO https://storage.googleapis.com/kubernetes-release/\
+release/v$KUBECTL_VERSION/bin/linux/amd64/kubectl
+chmod +x ./kubectl
+mv ./kubectl /usr/local/bin/kubectl
+mkdir ~/.kube
+wget http://storage.googleapis.com/kubernetes-helm\
+/helm-v${HELM_VERSION}-linux-amd64.tar.gz
+tar -zxvf helm-v${HELM_VERSION}-linux-amd64.tar.gz
+mv linux-amd64/helm /usr/local/bin/helm
+
+echo "Installing nfs server"
+# changed from nfs_share to dockerdata-nfs
+apt-get install nfs-kernel-server -y
+
+mkdir -p /dockerdata-nfs
+chmod 777 /dockerdata-nfs
+echo "/dockerdata-nfs *(rw,no_root_squash,no_subtree_check)">>/etc/exports
+service nfs-kernel-server restart
+
+echo "Waiting 10 minutes for Rancher to setup at \$(date)"
+sleep 10m
+echo "Installing RANCHER CLI, KUBERNETES ENV on RANCHER"
+wget https://github.com/rancher/cli/releases/download/v${RANCHER_CLI_VER}-rc2\
+/rancher-linux-amd64-v${RANCHER_CLI_VER}-rc2.tar.gz
+tar -zxvf rancher-linux-amd64-v${RANCHER_CLI_VER}-rc2.tar.gz
+cp rancher-v${RANCHER_CLI_VER}-rc2/rancher .
+
+API_RESPONSE=\`curl -s 'http://127.0.0.1:8080/v2-beta/apikey'\
+ -d '{"type":"apikey","accountId":"1a1","name":"autoinstall",\
+ "description":"autoinstall","created":null,"kind":null,\
+ "removeTime":null,"removed":null,"uuid":null}'\`
+# Extract and store token
+echo "API_RESPONSE: \${API_RESPONSE}"
+KEY_PUBLIC=\`echo \${API_RESPONSE} | jq -r .publicValue\`
+KEY_SECRET=\`echo \${API_RESPONSE} | jq -r .secretValue\`
+echo "publicValue: \$KEY_PUBLIC secretValue: \$KEY_SECRET"
+
+export RANCHER_URL=http://${MASTER}:8080
+export RANCHER_ACCESS_KEY=\$KEY_PUBLIC
+export RANCHER_SECRET_KEY=\$KEY_SECRET
+
+./rancher env ls
+echo "Creating kubernetes environment named ${ENVIRON}"
+./rancher env create -t kubernetes $ENVIRON > kube_env_id.json
+PROJECT_ID=\$(<kube_env_id.json)
+echo "env id: \$PROJECT_ID"
+
+echo "Waiting for ${ENVIRON} creation - 1 min at \$(date)"
+sleep 1m
+
+export RANCHER_HOST_URL=http://${MASTER}:8080/v1/projects/\$PROJECT_ID
+echo "you should see an additional kubernetes environment"
+./rancher env ls
+
+REG_URL_RESPONSE=\`curl -X POST -u \$KEY_PUBLIC:\$KEY_SECRET\
+ -H 'Accept: application/json'\
+ -H 'ContentType: application/json'\
+ -d '{"name":"$MASTER"}'\
+ "http://$MASTER:8080/v1/projects/\$PROJECT_ID/registrationtokens"\`
+echo "REG_URL_RESPONSE: \$REG_URL_RESPONSE"
+echo "Waiting for the server to finish url configuration - 1 min at \$(date)"
+sleep 1m
+# see registrationUrl in
+REGISTRATION_TOKENS=\`curl http://$MASTER:8080/v2-beta/registrationtokens\`
+echo "REGISTRATION_TOKENS: \$REGISTRATION_TOKENS"
+REGISTRATION_URL=\`echo \$REGISTRATION_TOKENS | jq -r .data[0].registrationUrl\`
+REGISTRATION_DOCKER=\`echo \$REGISTRATION_TOKENS | jq -r .data[0].image\`
+REGISTRATION_TOKEN=\`echo \$REGISTRATION_TOKENS | jq -r .data[0].token\`
+echo "Registering host for image: \$REGISTRATION_DOCKER\
+ url: \$REGISTRATION_URL registrationToken: \$REGISTRATION_TOKEN"
+HOST_REG_COMMAND=\`echo \$REGISTRATION_TOKENS | jq -r .data[0].command\`
+
+# base64 encode the kubectl token from the auth pair
+# generate this after the host is registered
+KUBECTL_TOKEN=\$(echo -n 'Basic '\$(echo\
+ -n "\$RANCHER_ACCESS_KEY:\$RANCHER_SECRET_KEY" | base64 -w 0) | base64 -w 0)
+echo "KUBECTL_TOKEN base64 encoded: \${KUBECTL_TOKEN}"
+
+# add kubectl config - NOTE: the following spacing has to be "exact"
+# or kubectl will not connect - with a localhost:8080 error
+echo 'apiVersion: v1
+kind: Config
+clusters:
+- cluster:
+ api-version: v1
+ insecure-skip-tls-verify: true
+ server: "https://$MASTER:8080/r/projects/'\$PROJECT_ID'/kubernetes:6443"
+ name: "${ENVIRON}"
+contexts:
+- context:
+ cluster: "${ENVIRON}"
+ user: "${ENVIRON}"
+ name: "${ENVIRON}"
+current-context: "${ENVIRON}"
+users:
+- name: "${ENVIRON}"
+ user:
+ token: "'\${KUBECTL_TOKEN}'" ' > ~/.kube/config
+
+echo "docker run --rm --privileged\
+ -v /var/run/docker.sock:/var/run/docker.sock\
+ -v /var/lib/rancher:/var/lib/rancher\
+ \$REGISTRATION_DOCKER\
+ \$RANCHER_URL/v1/scripts/\$REGISTRATION_TOKEN"\
+ > /tmp/rancher_register_host
+chown $SSH_USER /tmp/rancher_register_host
+
+RANCHERINSTALL
+
+echo "REGISTER TOKEN"
+HOSTREGTOKEN=$(ssh $SSH_OPTIONS $SSH_USER@"$MASTER" cat /tmp/rancher_register_host)
+echo "$HOSTREGTOKEN"
+
+echo "REGISTERING HOSTS WITH RANCHER ENVIRONMENT '$ENVIRON'"
+echo "$RANCHER_SLAVES"
+
+for MACHINE in $RANCHER_SLAVES;
+do
+ssh $SSH_OPTIONS $SSH_USER@"$MACHINE" "bash -s" <<REGISTERHOST &
+ sudo -i
+ $HOSTREGTOKEN
+ sleep 5
+ echo "Host $MACHINE waiting for host registration 5 min at \$(date)"
+ sleep 5m
+REGISTERHOST
+done
+wait
+
+echo "CONFIGURING NFS ON SLAVES"
+echo "$SLAVES"
+
+for SLAVE in $SLAVES;
+do
+ssh $SSH_OPTIONS $SSH_USER@"$SLAVE" "bash -s" <<CONFIGURENFS &
+ sudo -i
+ apt-get install nfs-common -y
+ mkdir /dockerdata-nfs
+ chmod 777 /dockerdata-nfs
+ echo "$MASTER:/dockerdata-nfs /dockerdata-nfs nfs auto 0 0" >> /etc/fstab
+ mount -a
+ mount | grep dockerdata-nfs
+CONFIGURENFS
+done
+wait
+
+echo "DEPLOYING OOM ON RANCHER WITH MASTER"
+echo "$MASTER"
+TMP_POD_LIST='/tmp/onap_pod_list.txt'
+
+ssh $SSH_OPTIONS $SSH_USER@"$MASTER" "bash -s" <<OOMDEPLOY
+sudo -i
+rm -rf oom
+echo "pulling new oom"
+git clone -b $BRANCH http://gerrit.onap.org/r/oom
+
+# NFS FIX for aaf-locate
+sed -i '/persistence:/s/^#//' ./oom/kubernetes/aaf/charts/aaf-locate/values.yaml
+sed -i '/mountPath: \/dockerdata/c\ mountPath: \/dockerdata-nfs'\
+ ./oom/kubernetes/aaf/charts/aaf-locate/values.yaml
+
+echo "Pre-pulling docker images at \$(date)"
+wget https://jira.onap.org/secure/attachment/11261/prepull_docker.sh
+chmod 777 prepull_docker.sh
+./prepull_docker.sh
+echo "starting onap pods"
+cd oom/kubernetes/
+
+# Disable ONAP components
+if [ -n "$ONAP_COMPONENT_DISABLE" ] ; then
+ echo -n "Disable following ONAP components:"
+ for COMPONENT in $ONAP_COMPONENT_DISABLE; do
+ echo -n " \$COMPONENT"
+ sed -i '/^'\${COMPONENT}':$/!b;n;s/enabled: *true/enabled: false/' onap/values.yaml
+ done
+ echo
+fi
+
+helm init --upgrade
+# run helm server on the background and detached from current shell
+nohup helm serve 0<&- &>/dev/null &
+echo "Waiting for helm setup for 5 min at \$(date)"
+sleep 5m
+helm version
+helm repo add local http://127.0.0.1:8879
+helm repo list
+make all
+if ( ! helm install local/onap -n dev --namespace $ENVIRON) ; then
+ echo "ONAP installation has failed at \$(date)"
+ exit 1
+fi
+
+cd ../../
+
+echo "Waiting for ONAP pods to be up \$(date)"
+echo "Ignore failure of sdnc-ansible-server, see SDNC-443"
+function get_onap_pods() {
+ kubectl get pods --namespace $ENVIRON > $TMP_POD_LIST
+ return \$(cat $TMP_POD_LIST | wc -l)
+}
+FAILED_PODS_LIMIT=1 # maximal number of failed ONAP PODs
+ALL_PODS_LIMIT=20 # minimum ONAP PODs to be up & running
+WAIT_PERIOD=60 # wait period in seconds
+MAX_WAIT_TIME=\$((3600*3)) # max wait time in seconds
+MAX_WAIT_PERIODS=\$((\$MAX_WAIT_TIME/\$WAIT_PERIOD))
+COUNTER=0
+get_onap_pods
+ALL_PODS=\$?
+PENDING=\$(grep -E '0/|1/2' $TMP_POD_LIST | wc -l)
+while [ \$PENDING -gt \$FAILED_PODS_LIMIT -o \$ALL_PODS -lt \$ALL_PODS_LIMIT ]; do
+ # print header every 20th line
+ if [ \$COUNTER -eq \$((\$COUNTER/20*20)) ] ; then
+ printf "%-3s %-29s %-3s/%s\n" "Nr." "Datetime of check" "Err" "Total PODs"
+ fi
+ COUNTER=\$((\$COUNTER+1))
+ printf "%3s %-29s %3s/%-3s\n" \$COUNTER "\$(date)" \$PENDING \$ALL_PODS
+ sleep \$WAIT_PERIOD
+ if [ "\$MAX_WAIT_PERIODS" -eq \$COUNTER ]; then
+ FAILED_PODS_LIMIT=800
+ ALL_PODS_LIMIT=0
+ fi
+ get_onap_pods
+ ALL_PODS=\$?
+ PENDING=\$(grep -E '0/|1/2' $TMP_POD_LIST | wc -l)
+done
+
+get_onap_pods
+cp $TMP_POD_LIST ~/onap_all_pods.txt
+echo
+echo "========================"
+echo "ONAP INSTALLATION REPORT"
+echo "========================"
+echo
+echo "List of Failed PODs"
+echo "-------------------"
+grep -E '0/|1/2' $TMP_POD_LIST | tee ~/onap_failed_pods.txt
+echo
+echo "Summary:"
+echo "--------"
+echo " PODs Failed: \$(cat ~/onap_failed_pods.txt | wc -l)"
+echo " PODs Total: \$(cat ~/onap_all_pods.txt | wc -l)"
+echo
+echo "ONAP health TC results"
+echo "----------------------"
+cd oom/kubernetes/robot
+./ete-k8s.sh $ENVIRON health | tee ~/onap_health.txt
+echo "==============================="
+echo "END OF ONAP INSTALLATION REPORT"
+echo "==============================="
+OOMDEPLOY
+
+echo "Finished install, ruturned from Master at $(date)"
+exit 0
diff --git a/ci/deploy-opnfv-apex-centos.sh b/ci/deploy-opnfv-apex-centos.sh
new file mode 100644
index 0000000..a3a0433
--- /dev/null
+++ b/ci/deploy-opnfv-apex-centos.sh
@@ -0,0 +1,209 @@
+#!/usr/bin/env bash
+
+# /usr/bin/env bash or /bin/bash ? /usr/bin/env bash is more environment-independent
+# beware of files which were edited in Windows, and have invisible \r end-of-line characters, causing Linux errors
+
+##############################################################################
+# Copyright (c) 2018 Wipro Limited and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# OPNFV contribution guidelines Wiki page:
+# https://wiki.opnfv.org/display/DEV/Contribution+Guidelines
+
+# OPNFV/Auto project:
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+
+# localization control: force script to use default language for output, and force sorting to be bytewise
+# ("C" is from C language, represents "safe" locale everywhere)
+# (result: the script will consider only basic ASCII characters and disable UTF-8 multibyte match)
+export LANG=C
+export LC_ALL=C
+
+##################################################################################
+## installation of OpenStack via OPNFV Apex/TripleO, on CentOS, virtual deployment
+##################################################################################
+# reference manual: https://docs.opnfv.org/en/latest/submodules/apex/docs/release/installation/index.html
+# page for virtual deployment: https://docs.opnfv.org/en/latest/submodules/apex/docs/release/installation/virtual.html
+
+echo "*** begin AUTO install: OPNFV Apex/TripleO"
+
+# check OS version
+echo "*** print OS version (must be CentOS, version 7 or more)"
+cat /etc/*release
+
+# Manage Nested Virtualization
+echo "*** ensure Nested Virtualization is enabled on Intel x86"
+echo "*** nested flag before:"
+cat /sys/module/kvm_intel/parameters/nested
+rm -f /etc/modprobe.d/kvm-nested.conf
+{ printf "options kvm-intel nested=1\n";\
+ printf "options kvm-intel enable_shadow_vmcs=1\n";\
+ printf "options kvm-intel enable_apicv=1\n";\
+ printf "options kvm-intel ept=1\n"; } >> /etc/modprobe.d/kvm-nested.conf
+sudo modprobe -r kvm_intel
+sudo modprobe -a kvm_intel
+echo "*** nested flag after:"
+cat /sys/module/kvm_intel/parameters/nested
+
+echo "*** verify status of modules in the Linux Kernel: kvm_intel module should be loaded for x86_64 machines"
+lsmod | grep kvm_
+grep kvm_ < /proc/modules
+
+# 3 additional pre-installation preparations, lifted from OPNFV/storperf (they are post-installation there):
+# https://wiki.opnfv.org/display/storperf/LaaS+Setup+For+Development#LaaSSetupForDevelopment-InstallOPNFVApex
+# (may of may not be needed, to enable first-time Apex installation on blank server)
+
+# 1) Install Docker
+sudo yum install -y yum-utils device-mapper-persistent-data lvm2
+sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
+sudo yum install -y docker-ce
+sudo systemctl start docker
+
+# 2) Install docker-compose
+sudo curl -L "https://github.com/docker/compose/releases/download/1.21.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
+sudo chmod +x /usr/local/bin/docker-compose
+
+# 3) Install Python
+sudo yum install -y python-virtualenv
+sudo yum groupinstall -y "Development Tools"
+sudo yum install -y openssl-devel
+
+
+# update everything (upgrade: riskier than update, as packages supposed to be unused will be deleted)
+# (note: can take several minutes; may not be necessary)
+sudo yum -y update
+
+
+# download Apex packages
+echo "*** downloading packages:"
+sudo yum -y install https://repos.fedorapeople.org/repos/openstack/openstack-pike/rdo-release-pike-1.noarch.rpm
+sudo yum -y install epel-release
+# note: EPEL = Extra Packages for Enterprise Linux
+sudo curl -o /etc/yum.repos.d/opnfv-apex.repo http://artifacts.opnfv.org/apex/fraser/opnfv-apex.repo
+
+# install three required RPMs (RedHat/RPM Package Managers); this takes several minutes
+sudo yum -y install http://artifacts.opnfv.org/apex/fraser/opnfv-apex-6.2.noarch.rpm http://artifacts.opnfv.org/apex/fraser/opnfv-apex-undercloud-6.2.noarch.rpm http://artifacts.opnfv.org/apex/fraser/opnfv-apex-python34-6.2.noarch.rpm
+
+# clean-up old Apex versions if any
+## precautionary opnfv-clean doesn't work... (even though packages are installed at this point)
+opnfv-clean
+
+# Manage DNS references
+# probably not needed on an already configured server: already has DNS references
+# echo "nameserver 8.8.8.8" >> /etc/resolv.conf
+echo "*** printout of /etc/resolv.conf :"
+cat /etc/resolv.conf
+
+# prepare installation directory
+mkdir -p /opt/opnfv-TripleO-apex
+cd /opt/opnfv-TripleO-apex
+
+# make sure cp is not aliased or a function; same for mv and rm
+unalias cp
+unset -f cp
+unalias mv
+unset -f mv
+unalias rm
+unset -f rm
+
+# 2 YAML files from /etc/opnfv-apex/ are needed for virtual deploys:
+# 1) network_settings.yaml : may need to update NIC names, to match the NIC names on the deployment server
+# 2) standard scenario file (os-nosdn-nofeature-noha.yaml, etc.), or customized deploy_settings.yaml
+
+# make a local copy of YAML files (not necessary: could deploy from /etc/opnfv-apex); local copies are just for clarity
+# 1) network settings
+cp /etc/opnfv-apex/network_settings.yaml .
+# 2) deploy settings
+# copy one of the 40+ pre-defined scenarios (one of the YAML files)
+# for extra customization, git clone Apex repo, and copy and customize the generic deploy_settings.yaml
+# git clone https://git.opnfv.org/apex
+# cp ./apex/config/deploy/deploy_settings.yaml .
+cp /etc/opnfv-apex/os-nosdn-nofeature-noha.yaml ./deploy_settings.yaml
+# cp /etc/opnfv-apex/os-nosdn-nofeature-ha.yaml ./deploy_settings.yaml
+
+# Note: content of os-nosdn-nofeature-noha.yaml
+# ---
+# global_params:
+# ha_enabled: false
+#
+# deploy_options:
+# sdn_controller: false
+# tacker: true
+# congress: true
+# sfc: false
+# vpn: false
+
+
+# modify NIC names in network settings YAML file, specific to your environment (e.g. replace em1 with ens4f0 in LaaS)
+# Note: actually, this should not matter for a virtual environment
+sed -i 's/em1/ens4f0/' network_settings.yaml
+
+# launch deploy (works if openvswitch module is installed, which may not be the case the first time around)
+echo "*** deploying OPNFV by TripleO/Apex:"
+# --debug for detailed debug info
+# -v: Enable virtual deployment
+# note: needs at least 10G RAM for controllers
+sudo opnfv-deploy --debug -v -n network_settings.yaml -d deploy_settings.yaml
+# without --debug:
+# sudo opnfv-deploy -v -n network_settings.yaml -d deploy_settings.yaml
+
+# with specific sizing:
+# sudo opnfv-deploy --debug -v -n network_settings.yaml -d deploy_settings.yaml --virtual-compute-ram 32 --virtual-cpus 16 --virtual-computes 4
+
+
+# verify that the openvswitch module is listed:
+lsmod | grep openvswitch
+grep openvswitch < /proc/modules
+
+##{
+## workaround: do 2 successive installations... not exactly optimal...
+## clean up, as now opnfv-clean should work
+#opnfv-clean
+## second deploy try, should succeed (whether first one failed or succeeded)
+#sudo opnfv-deploy -v -n network_settings.yaml -d deploy_settings.yaml
+##}
+
+
+
+# verifications: https://docs.opnfv.org/en/latest/submodules/apex/docs/release/installation/verification.html
+
+# {
+# if error after deploy.sh: "libvirt.libvirtError: Storage pool not found: no storage pool with matching name 'default'"
+
+# This usually happens if for some reason you are missing a default pool in libvirt:
+# $ virsh pool-list |grep default
+# You can recreate it manually:
+# $ virsh pool-define-as default dir --target /var/lib/libvirt/images/
+# $ virsh pool-autostart default
+# $ virsh pool-start default
+# }
+
+# {
+# if error after deploy.sh: iptc.ip4tc.IPTCError
+# check Apex jira ticket #521 https://jira.opnfv.org/browse/APEX-521
+# }
+
+# OpenvSwitch should not be missing, as it is a requirement from the RPM package:
+# https://github.com/opnfv/apex/blob/stable/fraser/build/rpm_specs/opnfv-apex-common.spec#L15
+
+
+
+# install python 3 on CentOS
+echo "*** begin install python 3.6 (3.4 should be already installed by default)"
+
+sudo yum -y install python36
+# install pip and setup tools
+sudo curl -O https://bootstrap.pypa.io/get-pip.py
+hash -r
+sudo /usr/bin/python3.6 get-pip.py --no-warn-script-location
+
+
+
+echo "*** end AUTO install: OPNFV Apex/TripleO"
+
diff --git a/ci/deploy-opnfv-compass-ubuntu.sh b/ci/deploy-opnfv-compass-ubuntu.sh
new file mode 100644
index 0000000..efccf78
--- /dev/null
+++ b/ci/deploy-opnfv-compass-ubuntu.sh
@@ -0,0 +1,201 @@
+#!/usr/bin/env bash
+
+# /usr/bin/env bash or /bin/bash ? /usr/bin/env bash is more environment-independent
+# beware of files which were edited in Windows, and have invisible \r end-of-line characters, causing Linux errors
+
+##############################################################################
+# Copyright (c) 2018 Wipro Limited and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# OPNFV contribution guidelines Wiki page:
+# https://wiki.opnfv.org/display/DEV/Contribution+Guidelines
+
+# OPNFV/Auto project:
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+
+# localization control: force script to use default language for output, and force sorting to be bytewise
+# ("C" is from C language, represents "safe" locale everywhere)
+# (result: the script will consider only basic ASCII characters and disable UTF-8 multibyte match)
+export LANG=C
+export LC_ALL=C
+
+#################################################################################
+## installation of OpenStack via OPNFV Compass4nfv, on Ubuntu, virtual deployment
+#################################################################################
+# reference manual: https://docs.opnfv.org/en/latest/submodules/compass4nfv/docs/release/installation/index.html
+# page for virtual deployment: https://docs.opnfv.org/en/latest/submodules/compass4nfv/docs/release/installation/vmdeploy.html
+
+echo "*** begin AUTO install: OPNFV Compass4nfv"
+
+# prepare install directory
+export INSTALLDIR=/opt/opnfv-compass
+mkdir -p $INSTALLDIR
+cd $INSTALLDIR
+
+# premptively install latest pip and clear $PATH cache
+# with apt-get (see apt-get -h and man apt-get for details)
+apt-get -y update
+apt-get -y upgrade
+apt-get -y install python-pip
+pip install --upgrade pip
+hash -r
+apt-get -y install python3-openstackclient
+apt-get -y autoremove
+
+## note: apt is more recent than apt-get (apt was formally introduced with Ubuntu 16.04)
+## APT: Advanced Packaging Tool; apt is more high-level, apt-get has more features;
+# apt -y update # Refreshes repository index
+# apt -y full-upgrade # Upgrades packages with auto-handling of dependencies
+# apt -y install python-pip
+# pip install --upgrade pip
+# hash -r
+# apt -y install python3-openstackclient
+# apt -y autoremove
+
+
+# 2 options: (option 1 is preferable)
+# 1) remain in master branch, use build.sh (which builds a tar ball), then launch deploy.sh
+# 2) download a tar ball and launch deploy.sh in a branch matching the tar ball release (e.g. fraser 6.2)
+
+
+##############
+# OPTION 1: build.sh + deploy.sh in master branch
+
+# retrieve the repository of Compass4nfv code (this creates a compass4nfv subdir in the installation directory), current master branch
+echo "*** begin download Compass4nfv repository"
+git clone https://gerrit.opnfv.org/gerrit/compass4nfv
+cd compass4nfv
+
+# launch build script
+echo "*** begin Compass4nfv build:"
+./build.sh |& tee log1-Build.txt
+
+# edit in deploy.sh specific to OPTION 1
+# set path to ISO file (tar ball), as built by build.sh previously
+# absolute path to tar ball file URL (MUST be absolute path)
+sed -i '/#export TAR_URL=/a export TAR_URL=file:///opt/opnfv-compass/compass4nfv/work/building/compass.tar.gz' deploy.sh
+
+# END OPTION 1
+##############
+
+
+##############
+# OPTION 2: tar ball + deploy.sh in matching releases/branches
+
+# download tarball of a certain release/version
+#echo "*** begin download Compass4nfv tar ball"
+#wget http://artifacts.opnfv.org/compass4nfv/fraser/opnfv-6.2.tar.gz
+# note: list of tar ball (ISO) files from Compass4NFV in https://artifacts.opnfv.org/compass4nfv.html
+
+# retrieve the repository of Compass4nfv code (this creates a compass4nfv subdir in the installation directory), current master branch
+#echo "*** begin download Compass4nfv repository"
+#git clone https://gerrit.opnfv.org/gerrit/compass4nfv
+#cd compass4nfv
+# note: list of compass4nfv branch names in https://gerrit.opnfv.org/gerrit/#/admin/projects/compass4nfv,branches
+# checkout to branch (or tag) matching the tarball release
+#git checkout stable/fraser
+
+# edit in deploy.sh specific to OPTION 2
+# set path to ISO file (tar ball), as downloaded previously
+# absolute path to tar ball file URL (MUST be absolute path)
+# sed -i '/#export TAR_URL=/a export TAR_URL=file:///opt/opnfv-compass/opnfv-6.2.tar.gz' deploy.sh
+
+# END OPTION 2
+##############
+
+
+# edit remaining deploy.sh entries as needed
+
+# set operating system version: Ubuntu Xenial Xerus
+sed -i '/#export OS_VERSION=xenial\/centos7/a export OS_VERSION=xenial' deploy.sh
+
+# set path to OPNFV scenario / DHA (Deployment Hardware Adapter) YAML file
+# here, os-nosdn-nofeature-noha scenario
+sed -i '/#export DHA=/a export DHA=/opt/opnfv-compass/compass4nfv/deploy/conf/vm_environment/os-nosdn-nofeature-noha.yml' deploy.sh
+
+# set path to network YAML file
+sed -i '/#export NETWORK=/a export NETWORK=/opt/opnfv-compass/compass4nfv/deploy/conf/vm_environment/network.yml' deploy.sh
+
+# append parameters for virtual machines (for virtual deployments); e.g., 2 nodes for NOHA scenario, 5 for HA, etc.
+# note: this may not be needed in a future release of Compass4nfv
+
+# VIRT_NUMBER – the number of nodes for virtual deployment.
+# VIRT_CPUS – the number of CPUs allocated per virtual machine.
+# VIRT_MEM – the memory size (MB) allocated per virtual machine.
+# VIRT_DISK – the disk size allocated per virtual machine.
+
+# if OPTION 1 (master): OPENSTACK_VERSION is queens, so add the VIRT_NUMBER line after the queens match
+#sed -i '/export OPENSTACK_VERSION=queens/a export VIRT_DISK=200G' deploy.sh
+#sed -i '/export OPENSTACK_VERSION=queens/a export VIRT_MEM=16384' deploy.sh
+#sed -i '/export OPENSTACK_VERSION=queens/a export VIRT_CPUS=4' deploy.sh
+sed -i '/export OPENSTACK_VERSION=queens/a export VIRT_NUMBER=2' deploy.sh
+
+# if OPTION 2 (stable/fraser): OPENSTACK_VERSION is pike, so add the VIRT_NUMBER line after the pike match
+#sed -i '/export OPENSTACK_VERSION=pike/a export VIRT_DISK=200G' deploy.sh
+#sed -i '/export OPENSTACK_VERSION=pike/a export VIRT_MEM=16384' deploy.sh
+#sed -i '/export OPENSTACK_VERSION=pike/a export VIRT_CPUS=4' deploy.sh
+#sed -i '/export OPENSTACK_VERSION=pike/a export VIRT_NUMBER=5' deploy.sh
+
+
+# launch deploy script
+echo "*** begin Compass4nfv deploy:"
+./deploy.sh |& tee log2-Deploy.txt
+
+
+
+
+# To access OpenStack Horizon GUI in Virtual deployment
+# source: https://wiki.opnfv.org/display/compass4nfv/Containerized+Compass
+
+# confirm IP@ of the current server (jump server, such as 10.10.100.xyz on LaaS: 10.10.100.42 for hpe32, etc.)
+external_nic=$(ip route |grep '^default'|awk '{print $5F}')
+echo "external_nic: $external_nic"
+ip addr show "$external_nic"
+
+# Config IPtables rules: pick an unused port number, e.g. 50000+machine number, 50032 for hpe32 at 10.10.100.42
+# 192.16.1.222:443 is the OpenStack Horizon GUI after a Compass installation
+# syntax: iptables -t nat -A PREROUTING -d $EX_IP -p tcp --dport $PORT -j DNAT --to 192.16.1.222:443
+# (note: this could be automated: retrieve IP@, pick port number)
+
+# example: hpe15
+# iptables -t nat -A PREROUTING -d 10.10.100.25 -p tcp --dport 50015 -j DNAT --to 192.16.1.222:443
+# example: hpe33
+# iptables -t nat -A PREROUTING -d 10.10.100.43 -p tcp --dport 50033 -j DNAT --to 192.16.1.222:443
+
+# display IPtables NAT rules
+iptables -t nat -L
+
+# Enter https://$EX_IP:$PORT in you browser to visit the OpenStack Horizon dashboard
+# examples: https://10.10.100.25:50015 , https://10.10.100.43:50033
+# The default user is "admin"
+# to get the Horizon password for "admin":
+sudo docker cp compass-tasks:/opt/openrc ./
+sudo cat openrc | grep OS_PASSWORD
+source ./openrc
+
+# for OpenStack CLI (generic content from openrc)
+export OS_ENDPOINT_TYPE=publicURL
+export OS_INTERFACE=publicURL
+export OS_USERNAME=admin
+export OS_PROJECT_NAME=admin
+export OS_TENANT_NAME=admin
+export OS_AUTH_URL=https://192.16.1.222:5000/v3
+export OS_NO_CACHE=1
+export OS_USER_DOMAIN_NAME=Default
+export OS_PROJECT_DOMAIN_NAME=Default
+export OS_REGION_NAME=RegionOne
+
+# For openstackclient
+export OS_IDENTITY_API_VERSION=3
+export OS_AUTH_VERSION=3
+
+
+
+echo "*** end AUTO install: OPNFV Compass4nfv"
+
diff --git a/ci/deploy-opnfv-daisy-centos.sh b/ci/deploy-opnfv-daisy-centos.sh
new file mode 100644
index 0000000..664ba55
--- /dev/null
+++ b/ci/deploy-opnfv-daisy-centos.sh
@@ -0,0 +1,179 @@
+#!/usr/bin/env bash
+
+# /usr/bin/env bash or /bin/bash ? /usr/bin/env bash is more environment-independent
+# beware of files which were edited in Windows, and have invisible \r end-of-line characters, causing Linux errors
+
+##############################################################################
+# Copyright (c) 2018 Wipro Limited and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# OPNFV contribution guidelines Wiki page:
+# https://wiki.opnfv.org/display/DEV/Contribution+Guidelines
+
+# OPNFV/Auto project:
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+
+# localization control: force script to use default language for output, and force sorting to be bytewise
+# ("C" is from C language, represents "safe" locale everywhere)
+# (result: the script will consider only basic ASCII characters and disable UTF-8 multibyte match)
+export LANG=C
+export LC_ALL=C
+
+
+###############################################################################
+## installation of OpenStack via OPNFV Daisy4nfv, on CentOS, virtual deployment
+###############################################################################
+# reference manual: https://docs.opnfv.org/en/stable-fraser/submodules/daisy/docs/release/installation/index.html#daisy-installation
+# page for virtual deployment: https://docs.opnfv.org/en/stable-fraser/submodules/daisy/docs/release/installation/vmdeploy.html
+
+echo "*** begin AUTO install: OPNFV Daisy4nfv"
+
+# check OS version
+echo "*** print OS version (must be CentOS, version 7.2 or more)"
+cat /etc/*release
+
+# make sure cp is not aliased or a function; same for mv and rm
+unalias cp
+unset -f cp
+unalias mv
+unset -f mv
+unalias rm
+unset -f rm
+
+# Manage Nested Virtualization
+echo "*** ensure Nested Virtualization is enabled on Intel x86"
+echo "*** nested flag before:"
+cat /sys/module/kvm_intel/parameters/nested
+rm -f /etc/modprobe.d/kvm-nested.conf
+{ printf "options kvm-intel nested=1\n";\
+ printf "options kvm-intel enable_shadow_vmcs=1\n";\
+ printf "options kvm-intel enable_apicv=1\n";\
+ printf "options kvm-intel ept=1\n"; } >> /etc/modprobe.d/kvm-nested.conf
+sudo modprobe -r kvm_intel
+sudo modprobe -a kvm_intel
+echo "*** nested flag after:"
+cat /sys/module/kvm_intel/parameters/nested
+
+echo "*** verify status of modules in the Linux Kernel: kvm_intel module should be loaded for x86_64 machines"
+lsmod | grep kvm_
+grep kvm_ < /proc/modules
+
+# download tools: git, kvm, libvirt, python-yaml
+sudo yum -y install git
+sudo yum -y install kvm
+sudo yum -y install libvirt
+sudo yum info libvirt
+sudo yum info qemu-kvm
+sudo yum -y install python-yaml
+
+
+# make sure SELinux is enforced (Security-Enhanced Linux)
+sudo setenforce 1
+echo "getenforce: $(getenforce)"
+
+# Restart the libvirtd daemon:
+sudo service libvirtd restart
+# Verify if the kvm module is loaded, you should see amd or intel depending on the hardware:
+lsmod | grep kvm
+# Note: to test, issue a virsh command to ensure local root connectivity:
+# sudo virsh sysinfo
+
+
+
+# update everything (upgrade: riskier than update, as packages supposed to be unused will be deleted)
+# (note: can take several minutes; may not be necessary)
+sudo yum -y update
+
+# prepare Daisy installation directory
+export INSTALLDIR=/opt/opnfv-daisy
+mkdir $INSTALLDIR
+cd $INSTALLDIR
+
+# oslo-config, needed in daisy/deploy/get_conf.py
+sudo curl -O https://bootstrap.pypa.io/get-pip.py
+hash -r
+python get-pip.py --no-warn-script-location
+pip install --upgrade oslo-config
+
+
+# retrieve Daisy4nfv repository
+git clone https://gerrit.opnfv.org/gerrit/daisy
+cd daisy
+
+
+
+# OPTION 1: master repo and latest bin file: May 17th 2018
+# Download latest bin file from http://artifacts.opnfv.org/daisy.html and name it opnfv.bin
+curl http://artifacts.opnfv.org/daisy/opnfv-2018-05-17_14-00-32.bin -o opnfv.bin
+# make opnfv.bin executable
+chmod 777 opnfv.bin
+
+# OPTION 2: stable release: Fraser 6.0 (so, checkout to stable Fraser release opnfv-6.0)
+# Download matching bin file from http://artifacts.opnfv.org/daisy.html and name it opnfv.bin
+#git checkout opnfv.6.0 # as per Daisy4nfv instructions, but does not work
+#git checkout stable/fraser
+#curl http://artifacts.opnfv.org/daisy/fraser/opnfv-6.0.iso -o opnfv.bin
+# make opnfv.bin executable
+#chmod 777 opnfv.bin
+
+
+
+# The deploy.yaml file is the inventory template of deployment nodes:
+# error from doc: ”./deploy/conf/vm_environment/zte-virtual1/deploy.yml”
+# correct path: "./deploy/config/vm_environment/zte-virtual1/deploy.yml”
+# You can write your own name/roles reference into it:
+# name – Host name for deployment node after installation.
+# roles – Components deployed.
+# note: ./templates/virtual_environment/ contains xml files, for networks and VMs
+
+
+# prepare config dir for Auto lab in daisy dir, and copy deploy and network YAML files from default files (virtual1 or virtual2)
+export AUTO_DAISY_LAB_CONFIG1=labs/auto_daisy_lab/virtual1/daisy/config
+export DAISY_DEFAULT_ENV1=deploy/config/vm_environment/zte-virtual1
+mkdir -p $AUTO_DAISY_LAB_CONFIG1
+cp $DAISY_DEFAULT_ENV1/deploy.yml $AUTO_DAISY_LAB_CONFIG1
+cp $DAISY_DEFAULT_ENV1/network.yml $AUTO_DAISY_LAB_CONFIG1
+
+export AUTO_DAISY_LAB_CONFIG2=labs/auto_daisy_lab/virtual2/daisy/config
+export DAISY_DEFAULT_ENV2=deploy/config/vm_environment/zte-virtual2
+mkdir -p $AUTO_DAISY_LAB_CONFIG2
+cp $DAISY_DEFAULT_ENV2/deploy.yml $AUTO_DAISY_LAB_CONFIG2
+cp $DAISY_DEFAULT_ENV2/network.yml $AUTO_DAISY_LAB_CONFIG2
+
+# Note:
+# - zte-virtual1 config files deploy openstack with five nodes (3 LB nodes and 2 computer nodes).
+# - zte-virtual2 config files deploy an all-in-one openstack
+
+# run deploy script, scenario os-nosdn-nofeature-ha, multinode OpenStack
+sudo ./ci/deploy/deploy.sh -L "$(cd ./;pwd)" -l auto_daisy_lab -p virtual1 -s os-nosdn-nofeature-ha
+
+# run deploy script, scenario os-nosdn-nofeature-noha, all-in-one OpenStack
+# sudo ./ci/deploy/deploy.sh -L "$(cd ./;pwd)" -l auto_daisy_lab -p virtual2 -s os-nosdn-nofeature-noha
+
+
+# Notes about deploy.sh:
+# The value after -L should be an absolute path which points to the directory which includes $AUTO_DAISY_LAB_CONFIG directory.
+# The value after -p parameter (virtual1 or virtual2) should match the one selected for $AUTO_DAISY_LAB_CONFIG.
+# The value after -l parameter (e.g. auto_daisy_lab) should match the lab name selected for $AUTO_DAISY_LAB_CONFIG, after labs/ .
+# Scenario (-s parameter): “os-nosdn-nofeature-ha” is used for deploying multinode openstack (virtual1)
+# Scenario (-s parameter): “os-nosdn-nofeature-noha” used for deploying all-in-one openstack (virtual2)
+
+# more details on deploy.sh OPTIONS:
+# -B PXE Bridge for booting Daisy Master, optional
+# -D Dry-run, does not perform deployment, will be deleted later
+# -L Securelab repo absolute path, optional
+# -l LAB name, necessary
+# -p POD name, necessary
+# -r Remote workspace in target server, optional
+# -w Workdir for temporary usage, optional
+# -h Print this message and exit
+# -s Deployment scenario
+# -S Skip recreate Daisy VM during deployment
+
+# When deployed successfully, the floating IP of openstack is 10.20.11.11, the login account is “admin” and the password is “keystone”
diff --git a/ci/deploy-opnfv-fuel-ubuntu.sh b/ci/deploy-opnfv-fuel-ubuntu.sh
new file mode 100644
index 0000000..db276b2
--- /dev/null
+++ b/ci/deploy-opnfv-fuel-ubuntu.sh
@@ -0,0 +1,199 @@
+#!/usr/bin/env bash
+
+# /usr/bin/env bash or /bin/bash ? /usr/bin/env bash is more environment-independent
+# beware of files which were edited in Windows, and have invisible \r end-of-line characters, causing Linux errors
+
+##############################################################################
+# Copyright (c) 2018 Wipro Limited and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# OPNFV contribution guidelines Wiki page:
+# https://wiki.opnfv.org/display/DEV/Contribution+Guidelines
+
+# OPNFV/Auto project:
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+
+# localization control: force script to use default language for output, and force sorting to be bytewise
+# ("C" is from C language, represents "safe" locale everywhere)
+# (result: the script will consider only basic ASCII characters and disable UTF-8 multibyte match)
+export LANG=C
+export LC_ALL=C
+
+##############################################################################
+## installation of OpenStack via OPNFV Fuel/MCP, on Ubuntu, virtual deployment
+##############################################################################
+# reference manual: https://docs.opnfv.org/en/latest/submodules/fuel/docs/release/installation/index.html
+# page for virtual deployment: https://docs.opnfv.org/en/latest/submodules/fuel/docs/release/installation/installation.instruction.html#opnfv-software-installation-and-deployment
+
+# Steps:
+# step 1: download Fuel/MCP repository and run deploy script
+# (this example: x86, virtual deploy, os-nosdn-nofeature-noha scenario)
+# step 2: download additional packages (python3, OpenStackSDK, OpenStack clients, ...)
+# step 3: add more resources to OpenStack instance (vCPUs, RAM)
+# step 4: download Auto repository
+# step 5: run Auto python script to populate OpenStack instance with objects expected by ONAP
+
+
+echo "*** begin AUTO install: OPNFV Fuel/MCP"
+
+
+# step 1: download Fuel/MCP repository and run deploy script
+
+# prepare install directory
+export INSTALLDIR=/opt/opnfv-fuel
+mkdir -p $INSTALLDIR
+cd $INSTALLDIR
+
+# get Fuel repository
+git clone https://git.opnfv.org/fuel
+# cd in new fuel repository, which contains directories: mcp, ci, etc.
+# note: this is for x86_64 architectures; for aarch64 architectures, git clone https://git.opnfv.org/armband and cd armband instead
+cd fuel
+
+# edit NOHA scenario YAML file with more resources for compute nodes: 32 vCPUs, 192G RAM
+{ printf " cmp01:\n";\
+ printf " vcpus: 32\n";\
+ printf " ram: 196608\n";\
+ printf " cmp02:\n";\
+ printf " vcpus: 32\n";\
+ printf " ram: 196608\n"; } >> mcp/config/scenario/os-nosdn-nofeature-noha.yaml
+
+# provide more storage space to VMs: 350G per compute node (default is 100G)
+sed -i mcp/scripts/lib.sh -e 's/\(qemu-img create.*\) 100G/\1 350G/g'
+
+# launch OPNFV Fuel/MCP deploy script
+ci/deploy.sh -l local -p virtual1 -s os-nosdn-nofeature-noha -D |& tee deploy.log
+
+
+
+# step 2: download additional packages (python3, OpenStackSDK, OpenStack clients, ...)
+
+# install python 3 on Ubuntu
+echo "*** begin install python 3"
+sudo apt-get -y update
+sudo apt-get -y install python3
+# maybe clean-up packages
+# sudo apt -y autoremove
+# specific install of a python version, e.g. 3.6
+# sudo apt-get install python3.6
+
+# http://docs.python-guide.org/en/latest/starting/install3/linux/
+# sudo apt-get install software-properties-common
+# sudo add-apt-repository ppa:deadsnakes/ppa
+# sudo apt-get update
+# sudo apt-get install python3.6
+echo "python2 --version: $(python2 --version)"
+echo "python3 --version: $(python3 --version)"
+echo "which python: $(which python)"
+
+# install pip3 for python3; /usr/local/bin/pip3 vs. /usr/bin/pip3; solve with "hash -r"
+echo "*** begin install pip3 for python3"
+apt -y install python3-pip
+hash -r
+pip3 install --upgrade pip
+hash -r
+
+echo "\$PATH: $PATH"
+echo "which pip: $(which pip)"
+echo "which pip3: $(which pip3)"
+
+# install OpenStack SDK Python client
+echo "*** begin install OpenStack SDK Python client"
+pip3 install openstacksdk
+pip3 install --upgrade openstacksdk
+
+# install OpenStack CLI
+echo "*** begin install OpenStack CLI"
+pip3 install python-openstackclient
+pip3 install --upgrade python-openstackclient
+
+pip3 install --upgrade python-keystoneclient
+pip3 install --upgrade python-neutronclient
+pip3 install --upgrade python-novaclient
+pip3 install --upgrade python-glanceclient
+pip3 install --upgrade python-cinderclient
+
+# install OpenStack Heat (may not be installed by default), may be useful for VNF installation
+#apt install python3-heatclient
+echo "*** begin install OpenStack Heat"
+pip3 install --upgrade python-heatclient
+
+# package verification printouts
+echo "*** begin package verification printouts"
+pip3 list
+pip3 show openstacksdk
+pip3 check
+
+
+
+# step 3: add more resources to OpenStack instance
+
+# now that OpenStack CLI is installed, finish Fuel/MCP installation:
+# take extra resources indicated in os-nosdn-nofeature-noha.yaml into account as quotas in the OpenStack instance
+# (e.g. 2 compute nodes with 32 vCPUs and 192G RAM each => 64 cores and 384G=393,216M RAM)
+# enter environment variables hard-coded here, since always the same for Fuel/MCP; there could be better ways to do this :)
+
+export OS_AUTH_URL=http://10.16.0.107:5000/v3
+export OS_PROJECT_NAME="admin"
+export OS_USER_DOMAIN_NAME="Default"
+export OS_PROJECT_DOMAIN_ID="default"
+unset OS_TENANT_ID
+unset OS_TENANT_NAME
+export OS_USERNAME="admin"
+export OS_PASSWORD="opnfv_secret"
+export OS_REGION_NAME="RegionOne"
+export OS_INTERFACE=public
+export OS_IDENTITY_API_VERSION=3
+
+# at this point, openstack CLI commands should work
+echo "*** finish install OPNFV Fuel/MCP"
+openstack quota set --cores 64 admin
+openstack quota set --ram 393216 admin
+
+
+
+# step 4: download Auto repository
+
+# install OPNFV Auto
+# prepare install directory
+echo "*** begin install OPNFV Auto"
+mkdir -p /opt/opnfv-Auto
+cd /opt/opnfv-Auto
+# get Auto repository from Gerrit
+git clone https://gerrit.opnfv.org/gerrit/auto
+# cd in new auto repository, which contains directories: lib, setup, ci, etc.
+cd auto
+
+
+
+# step 5: run Auto python script to populate OpenStack instance with objects expected by ONAP
+
+# download images used by script, unless downloading images from URL works from the script
+echo "*** begin download images"
+cd setup/VIMs/OpenStack
+mkdir images
+cd images
+#CirrOS
+curl -O http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img
+curl -O http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-arm-disk.img
+curl -O http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-aarch64-disk.img
+# Ubuntu 16.04 LTS (Xenial Xerus)
+curl -O https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+curl -O https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-arm64-disk1.img
+# Ubuntu 14.04.5 LTS (Trusty Tahr)
+curl -O http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img
+curl -O http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-arm64-disk1.img
+
+# launch script to populate the OpenStack instance
+echo "*** begin populate OpenStack instance with ONAP objects"
+cd ..
+python3 auto_script_config_openstack_for_onap.py
+
+echo "*** end AUTO install: OPNFV Fuel/MCP"
+
diff --git a/ci/plot-results.sh b/ci/plot-results.sh
new file mode 100755
index 0000000..22ab1d6
--- /dev/null
+++ b/ci/plot-results.sh
@@ -0,0 +1,101 @@
+#!/bin/bash
+#
+# Copyright 2017-2018 Intel Corporation., Tieto
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Script for graphical representation of AUTO result summaries
+#
+# Usage:
+# ./create_graph [directory]
+#
+# where:
+# "directory" is an optional directory name, where summary of auto
+# installation report is stored
+# Default value: "$HOME/auto_ci_daily_logs"
+
+NUMBER_OF_RESULTS=50 # max number of recent results to be compared in graph
+DIR="$HOME/auto_ci_daily_logs"
+
+function clean_data() {
+ rm -rf summary.csv
+ rm -rf graph*plot
+ rm -rf graph*txt
+ rm -rf graph*png
+}
+
+function prepare_data() {
+ FIRST=1
+ CSV_LIST=$(ls -1 ${DIR}/deploy_summary*csv | tail -n ${NUMBER_OF_RESULTS})
+ for result_file in $CSV_LIST ; do
+ tmp_dir=`dirname $result_file`
+ TIMESTAMP=`basename $tmp_dir | cut -d'_' -f2-`
+ if [ $FIRST -eq 1 ] ; then
+ head -n1 $result_file > summary.csv
+ FIRST=0
+ fi
+ tail -n+2 ${result_file} >> summary.csv
+ done
+}
+
+function plot_data() {
+ echo "Created graphs:"
+ for TYPE in png txt; do
+ for GRAPH in "graph_pods" "graph_tcs" ; do
+ OUTPUT="$GRAPH.plot"
+ GRAPH_NAME="${GRAPH}.${TYPE}"
+ cat > $OUTPUT <<- EOM
+set datafile separator ","
+set xdata time
+set timefmt "%Y%m%d_%H%M%S"
+set format x "%m-%d"
+set xlabel "date"
+set format y "%8.0f"
+EOM
+ if [ "$TYPE" == "png" ] ; then
+ echo 'set term png size 1024,768' >> $OUTPUT
+ else
+ echo 'set term dumb 100,30' >> $OUTPUT
+ fi
+
+ if [ "$GRAPH" == "graph_pods" ] ; then
+ echo 'set ylabel "PODs"' >> $OUTPUT
+ echo 'set yrange [0:]' >> $OUTPUT
+ echo "set title \"ONAP K8S PODs\"" >> $OUTPUT
+ COL1=3
+ COL2=4
+ else
+ echo 'set ylabel "testcases"' >> $OUTPUT
+ echo 'set yrange [0:]' >> $OUTPUT
+ echo "set title \"ONAP Health TestCases\"" >> $OUTPUT
+ COL1=5
+ COL2=6
+ fi
+
+ iter=0
+ echo "set output \"$GRAPH_NAME\"" >> $OUTPUT
+ echo -n "plot " >> $OUTPUT
+ echo $"'summary.csv' using 1:$COL1 with linespoints title columnheader($COL1) \\" >> $OUTPUT
+ echo $", 'summary.csv' using 1:$COL2 with linespoints title columnheader($COL2) \\" >> $OUTPUT
+ gnuplot $OUTPUT
+ echo -e "\t$GRAPH_NAME"
+ done
+ done
+}
+
+#
+# Main body
+#
+clean_data
+prepare_data
+plot_data
diff --git a/docs/conf.py b/docs/conf.py
new file mode 100644
index 0000000..3c4453e
--- /dev/null
+++ b/docs/conf.py
@@ -0,0 +1 @@
+from docs_conf.conf import *
diff --git a/docs/conf.yaml b/docs/conf.yaml
new file mode 100644
index 0000000..ba6ee9d
--- /dev/null
+++ b/docs/conf.yaml
@@ -0,0 +1,3 @@
+---
+project_cfg: opnfv
+project: AUTO
diff --git a/docs/index.rst b/docs/index.rst
new file mode 100644
index 0000000..9e0614b
--- /dev/null
+++ b/docs/index.rst
@@ -0,0 +1,18 @@
+.. _auto:
+
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. SPDX-License-Identifier CC-BY-4.0
+.. (c) Open Platform for NFV Project, Inc. and its contributors
+
+*********************************
+OPNFV Auto (ONAP-Automated OPNFV)
+*********************************
+
+.. toctree::
+ :numbered:
+ :maxdepth: 3
+
+ release/configguide/index
+ release/userguide/index
+ release/release-notes/index
diff --git a/docs/release/configguide/Auto-featureconfig.rst b/docs/release/configguide/Auto-featureconfig.rst
index 4e9705f..15126a8 100644
--- a/docs/release/configguide/Auto-featureconfig.rst
+++ b/docs/release/configguide/Auto-featureconfig.rst
@@ -14,57 +14,100 @@ and provides guidelines on how to perform configurations and additional installa
Goal
====
-The goal of `Auto <http://docs.opnfv.org/en/latest/release/release-notes.html>`_ installation and configuration is to prepare
-an environment where the `Auto use cases <http://docs.opnfv.org/en/latest/submodules/auto/docs/release/userguide/index.html#auto-userguide>`_
-can be assessed, i.e. where the corresponding test cases can be executed and their results can be collected.
+The goal of :ref:`Auto <auto-releasenotes>`
+
+installation and configuration is to prepare an environment where the
+:ref:`Auto use cases <auto-userguide>`
+
+can be assessed, i.e. where the corresponding test cases can be executed and their results can be collected for analysis.
+See the `Auto Release Notes <auto-releasenotes>`
+
+for a discussion of the test results analysis loop.
An instance of ONAP needs to be present, as well as a number of deployed VNFs, in the scope of the use cases.
+Simulated traffic needs to be generated, and then test cases can be executed. There are multiple parameters to
+the Auto environment, and the same set of test cases will be executed on each environment, so as to be able to
+evaluate the influence of each environment parameter.
The initial Auto use cases cover:
-* Edge Cloud (increased autonomy and automation for managing Edge VNFs)
-* Resilience Improvements through ONAP (reduced recovery time for VNFs and end-to-end services in case of failure or suboptimal performance)
-* Enterprise vCPE (automation, cost optimization, and performance assurance of enterprise connectivity to Data Centers and the Internet)
+* **Edge Cloud** (increased autonomy and automation for managing Edge VNFs)
+* **Resilience Improvements through ONAP** (reduced recovery time for VNFs and end-to-end services in case of failure
+ or suboptimal performance)
+* **Enterprise vCPE** (automation, cost optimization, and performance assurance of enterprise connectivity to Data Centers
+ and the Internet)
-The general idea of Auto is to install an OPNFV environment (comprising at least one Cloud Manager),
+The general idea of the Auto feature configuration is to install an OPNFV environment (comprising at least one Cloud Manager),
an ONAP instance, ONAP-deployed VNFs as required by use cases, possibly additional cloud managers not
already installed during the OPNFV environment setup, traffic generators, and the Auto-specific software
-for the use cases (which can include test frameworks such as `Robot <http://robotframework.org/>`_ or `Functest <http://docs.opnfv.org/en/latest/submodules/functest/docs/release/release-notes/index.html#functest-releasenotes>`_).
+for the use cases (which can include test frameworks such as `Robot <http://robotframework.org/>`_ or :doc:`Functest <functest:release/release-notes>`
+
The ONAP instance needs to be configured with policies and closed-loop controls (also as required by use cases),
-and the test framework controls the execution and result collection of all the test cases.
+and the test framework controls the execution and result collection of all the test cases. Then, test case execution
+results can be analyzed, so as to fine-tune policies and closed-loop controls, and to compare environment parameters.
-The following diagram illustrates two execution environments, for x86 architectures and for Arm architectures.
+The following diagram illustrates execution environments, for x86 architectures and for Arm architectures,
+and other environment parameters (see the Release Notes for a more detailed discussion on the parameters).
The installation process depends on the underlying architecture, since certain components may require a
specific binary-compatible version for a given x86 or Arm architecture. The preferred variant of ONAP is one
that runs on Kubernetes, while all VNF types are of interest to Auto: VM-based or containerized (on any cloud
-manager), for x86 or for Arm. The initial VM-based VNFs will cover OpenStack, and in future versions,
-additional cloud managers will be considered. The configuration of ONAP and of test cases should not depend
-on the architecture.
+manager), for x86 or for Arm. In fact, even PNFs could be considered, to support the evaluation of hybrid PNF/VNF
+transition deployments (ONAP has the ability of also managing legacy PNFs).
-.. image:: auto-installTarget-generic.jpg
+The initial VM-based VNFs will cover OpenStack, and in future Auto releases, additional cloud managers will be considered.
+The configuration of ONAP and of test cases should not depend on the underlying architecture and infrastructure.
+.. image:: auto-installTarget-generic.png
-For each component, various installer tools will be selected (based on simplicity and performance), and
-may change from one Auto release to the next. For example, the most natural installer for ONAP should be
-OOM (ONAP Operations Manager).
+
+For each component, various installer tools will be considered (as environment parameters), so as to enable comparison,
+as well as ready-to-use setups for Auto end-users. For example, the most natural installer for ONAP would be
+OOM (ONAP Operations Manager). For the OPNFV infrastructure, supported installer projects will be used: Fuel/MCP,
+Compass4NFV, Apex/TripleO, Daisy4NFV. Note that JOID was last supported in OPNFV Fraser 6.2, and is not supported
+anymore as of Gambia 7.0.
The initial version of Auto will focus on OpenStack VM-based VNFs, onboarded and deployed via ONAP API
-(not by ONAP GUI, for the purpose of automation). ONAP is installed on Kubernetes. Two servers from LaaS
-are used: one to support an OpenStack instance as provided by the OPNFV installation via Fuel/MCP, and
-the other to support ONAP with Kubernetes and Docker. Therefore, the VNF execution environment is the
-server with the OpenStack instance.
+(not by ONAP GUI, for the purpose of automation). ONAP is installed on Kubernetes. Two or more servers from LaaS
+are used: one or more to support an OpenStack instance as provided by the OPNFV installation via Fuel/MCP or other
+OPNFV installers (Compass4NFV, Apex/TripleO, Daisy4NFV), and the other(s) to support ONAP with Kubernetes
+and Docker. Therefore, the VNF execution environment is composed of the server(s) with the OpenStack instance(s).
+Initial tests will also include ONAP instances installed on bare-metal servers (i.e. not directly on an OPNFV
+infrastructure; the ONAP/OPNFV integration can start at the VNF environment level; but ultimately, ONAP should
+be installed within an OPNFV infrastructure, for full integration).
+
+.. image:: auto-installTarget-initial.png
+
+ONAP/K8S has several variants. The initial variant considered by Auto is the basic one recommended by ONAP,
+which relies on the Rancher installer and on OpenStack VMs providing VMs for the Rancher master and for the
+Kubernetes cluster workers, as illustrated below for ONAP-Beijing release:
-.. image:: auto-installTarget-initial.jpg
+.. image:: auto-installTarget-ONAP-B.png
-Jenkins will be used for Continuous Integration in OPNFV releases, to ensure that the latest master
-branch of Auto is always working.
+The OpenStack instance running VNFs may need to be configured as per ONAP expectations, for example creating
+instances of ONAP projects/tenants, users, security groups, networks (private, public), connected to the
+Internet by a Router, and making sure expected VM images and flavors are present. A script (using OpenStack
+SDK, or OpenStack CLI, or even OpenStack Heat templates) would populate the OpenStack instance, as illustrated below:
+
+.. image:: auto-OS-config4ONAP.png
+
+That script can also delete these created objects, so it can be used in tear-down procedures as well
+(use -del or --delete option). It is located in the `Auto repository <https://git.opnfv.org/auto/tree/>`_ ,
+under the setup/VIMs/OpenStack directory:
+
+* auto_script_config_openstack_for_onap.py
+
+
+Jenkins (or more precisely JJB: Jenkins Job Builder) will be used for Continuous Integration in OPNFV releases,
+to ensure that the latest master branch of Auto is always working. The first 3 tasks in the pipeline would be:
+install OpenStack instance via an OPNFV installer (Fuel/MCP, Compass4NFV, Apex/TripleO, Daisy4NFV), configure
+the OpenStack instance for ONAP, install ONAP (using the OpenStack instance network IDs in the ONAP YAML file).
Moreover, Auto will offer an API, which can be imported as a module, and can be accessed for example
by a web application. The following diagram shows the planned structure for the Auto Git repository,
supporting this module, as well as the installation scripts, test case software, utilities, and documentation.
-.. image:: auto-repo-folders.jpg
+.. image:: auto-repo-folders.png
@@ -73,15 +116,26 @@ Pre-configuration activities
The following resources will be required for the initial version of Auto:
-* two LaaS (OPNFV Lab-as-a-Service) pods, with their associated network information. Later, other types of target pods will be supported.
-* the `Auto Git repository <https://git.opnfv.org/auto/tree/>`_ (clone from `Gerrit Auto <https://gerrit.opnfv.org/gerrit/#/admin/projects/auto>`_)
+* at least two LaaS (OPNFV Lab-as-a-Service) pods (or equivalent in another lab), with their associated network
+ information. Later, other types of target pods will be supported, such as clusters (physical bare-metal or virtual).
+ The pods can be either x86 or Arm CPU architectures. An effort is currently ongoing (ONAP Integration team, and Auto team),
+ to ensure Arm binaries are available for all ONAP components in the official ONAP Docker registry.
+* the `Auto Git repository <https://git.opnfv.org/auto/tree/>`_
+ (clone from `Gerrit Auto <https://gerrit.opnfv.org/gerrit/#/admin/projects/auto>`_)
Hardware configuration
======================
-<TBC>
+ONAP needs relatively large servers (at least 512G RAM, 1TB storage, 80-100 CPU threads). Initial deployment
+attempts on single servers did not complete. Current attempts use 3-server clusters, on bare-metal.
+
+For initial VNF deployment environments, virtual deployments by OPNFV installers on a single server should suffice.
+Later, if many large VNFs are deployed for the Auto test cases, and if heavy traffic is generated, more servers
+might be necessary. Also, if many environment parameters are considered, full executions of all test cases
+on all environment configurations could take a long time, so parallel executions of independent test case batches
+on multiple sets of servers and clusters might be considered.
@@ -91,25 +145,26 @@ Feature configuration
Environment installation
^^^^^^^^^^^^^^^^^^^^^^^^
-Current Auto work in progress is captured in the `Auto Lab Deployment wiki page <https://wiki.opnfv.org/display/AUTO/Auto+Lab+Deployment>`_.
+Current Auto work in progress is captured in the
+`Auto Lab Deployment wiki page <https://wiki.opnfv.org/display/AUTO/Auto+Lab+Deployment>`_.
OPNFV with OpenStack
~~~~~~~~~~~~~~~~~~~~
-The Auto installation uses the Fuel/MCP installer for the OPNFV environment (see the
+The first Auto installation used the Fuel/MCP installer for the OPNFV environment (see the
`OPNFV download page <https://www.opnfv.org/software/downloads>`_).
-The following figure summarizes the two installation cases: virtual or baremetal.
+The following figure summarizes the two installation cases for Fuel: virtual or bare-metal.
This OPNFV installer starts with installing a Salt Master, which then configures
subnets and bridges, and install VMs (e.g., for controllers and compute nodes)
and an OpenStack instance with predefined credentials.
-.. image:: auto-OPFNV-fuel.jpg
+.. image:: auto-OPFNV-fuel.png
-The Auto version of OPNFV installation configures additional resources for the OpenStack virtual pod,
-as compared to the default installation. Examples of manual steps are as follows:
+The Auto version of OPNFV installation configures additional resources for the OpenStack virtual pod
+(more virtual CPUs and more RAM), as compared to the default installation. Examples of manual steps are as follows:
.. code-block:: console
@@ -127,20 +182,47 @@ These lines can be added to configure more resources:
gtw01:
ram: 2048
+ cmp01:
- + vcpus: 16
- + ram: 65536
- + disk: 40
+ + vcpus: 32
+ + ram: 196608
+ cmp02:
- + vcpus: 16
- + ram: 65536
- + disk: 40
+ + vcpus: 32
+ + ram: 196608
-The final step deploys OpenStack (duration: approximately between 30 and 45 minutes).
+The final steps deploy OpenStack (duration: approximately between 30 and 45 minutes).
.. code-block:: console
- 6. ci/deploy.sh -l UNH-LaaS -p virtual1 -s os-nosdn-nofeature-noha -D |& tee deploy.log
+ # The following change will provide more space to VMs. Default is 100G per cmp0x. This gives 350 each and 700 total.
+ 6. sed -i mcp/scripts/lib.sh -e 's/\(qemu-img create.*\) 100G/\1 350G/g'
+
+ # Then deploy OpenStack. It should take between 30 and 45 minutes:
+ 7. ci/deploy.sh -l UNH-LaaS -p virtual1 -s os-nosdn-nofeature-noha -D |& tee deploy.log
+
+ # Lastly, to get access to the extra RAM and vCPUs, adjust the quotas (done on the controller at 172.16.10.36):
+ 8. openstack quota set --cores 64 admin
+ 9. openstack quota set --ram 393216 admin
+
+
+Note:
+
+* with Linux Kernel 4.4, the installation of OPNFV is not working properly (seems to be a known bug of 4.4, as it works correctly with 4.13):
+ neither qemu-nbd nor kpartx are able to correctly create a mapping to /dev/nbd0p1 partition in order to resize it to 3G (see Fuel repository,
+ file `mcp/scripts/lib.sh <https://git.opnfv.org/fuel/tree/mcp/scripts/lib.sh>`_ , function mount_image).
+* it is not a big deal in case of x86, because it is still possible to update the image and complete the installation even with the
+ original partition size.
+* however, in the case of ARM, the OPNFV installation will fail, because there isn't enough space to install all required packages into
+ the cloud image.
+
+Using the above as starting point, Auto-specific scripts have been developed, for each of the 4 OPNFV installers Fuel/MCP,
+Compass4NFV, Apex/TripleO, Daisy4NFV. Instructions for virtual deployments from each of these installers have been used, and
+sometimes expanded and clarified (missing details or steps from the instructions).
+They can be found in the `Auto repository <https://git.opnfv.org/auto/tree/>`_ , under the ci directory:
+
+* deploy-opnfv-fuel-ubuntu.sh
+* deploy-opnfv-compass-ubuntu.sh
+* deploy-opnfv-apex-centos.sh
+* deploy-opnfv-daisy-centos.sh
@@ -151,16 +233,15 @@ An ONAP installation on OpenStack has also been investigated, but we focus here
the ONAP on Kubernetes version.
The initial focus is on x86 architectures. The ONAP DCAE component for a while was not operational
-on Kubernetes, and had to be installed separately on OpenStack. So the ONAP instance was a hybrid,
-with all components except DCAE running on Kubernetes, and DCAE running separately on OpenStack.
+on Kubernetes (with ONAP Amsterdam), and had to be installed separately on OpenStack. So the ONAP
+instance was a hybrid, with all components except DCAE running on Kubernetes, and DCAE running
+separately on OpenStack. Starting with ONAP Beijing, DCAE also runs on Kubernetes.
For Arm architectures, specialized Docker images are being developed to provide Arm architecture
-binary compatibility.
-
-The goal for the first release of Auto is to use an ONAP instance where DCAE also runs on Kubernetes,
-for both architectures.
+binary compatibility. See the `Auto Release Notes <auto-releasenotes>`
+for more details on the availability status of these Arm images in the ONAP Docker registry.
-The ONAP reference for this installation is detailed `here <https://wiki.onap.org/display/DW/ONAP+on+Kubernetes>`_.
+The ONAP reference for this installation is detailed `here <http://onap.readthedocs.io/en/latest/submodules/oom.git/docs/oom_user_guide.html>`_.
Examples of manual steps for the deploy procedure are as follows:
@@ -177,6 +258,12 @@ Examples of manual steps for the deploy procedure are as follows:
9 cd ../oneclick
10 ./createAll.bash -n onap
+Several automation efforts to integrate the ONAP installation in Auto CI are in progress.
+One effort involves using a 3-server cluster at OPNFV Pharos LaaS (Lab-as-a-Service).
+The script is available in the `Auto repository <https://git.opnfv.org/auto/tree/>`_ , under the ci directory::
+
+* deploy-onap.sh
+
ONAP configuration
@@ -207,14 +294,17 @@ Traffic Generator configuration
Test Case software installation and execution control
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-<TBC>
+<TBC; mention the management of multiple environments (characterized by their parameters), execution of all test cases
+in each environment, only a subset in official OPNFV CI/CD Jenkins due to size and time limits; then posting and analysis
+of results; failures lead to bug-fixing, successes lead to analysis for comparisons and fine-tuning>
Installation health-check
=========================
-<TBC; the Auto installation will self-check, but indicate here manual steps to double-check that the installation was successful>
+<TBC; the Auto installation will self-check, but indicate here manual steps to double-check that the
+installation was successful>
@@ -230,8 +320,8 @@ Auto Wiki pages:
OPNFV documentation on Auto:
-* `Auto release notes <http://docs.opnfv.org/en/latest/release/release-notes.html>`_
-* `Auto use case user guides <http://docs.opnfv.org/en/latest/submodules/auto/docs/release/userguide/index.html#auto-userguide>`_
+* `Auto Release Notes <release-notes>`
+* `Auto use case user guides <auto-userguide>`
Git&Gerrit Auto repositories:
diff --git a/docs/release/configguide/auto-OPFNV-fuel.jpg b/docs/release/configguide/auto-OPFNV-fuel.jpg
deleted file mode 100644
index 706d997..0000000
--- a/docs/release/configguide/auto-OPFNV-fuel.jpg
+++ /dev/null
Binary files differ
diff --git a/docs/release/configguide/auto-OPFNV-fuel.png b/docs/release/configguide/auto-OPFNV-fuel.png
new file mode 100644
index 0000000..3100d40
--- /dev/null
+++ b/docs/release/configguide/auto-OPFNV-fuel.png
Binary files differ
diff --git a/docs/release/configguide/auto-OS-config4ONAP.png b/docs/release/configguide/auto-OS-config4ONAP.png
new file mode 100644
index 0000000..ecde147
--- /dev/null
+++ b/docs/release/configguide/auto-OS-config4ONAP.png
Binary files differ
diff --git a/docs/release/configguide/auto-installTarget-ONAP-B.png b/docs/release/configguide/auto-installTarget-ONAP-B.png
new file mode 100644
index 0000000..dc069fe
--- /dev/null
+++ b/docs/release/configguide/auto-installTarget-ONAP-B.png
Binary files differ
diff --git a/docs/release/configguide/auto-installTarget-generic.jpg b/docs/release/configguide/auto-installTarget-generic.jpg
deleted file mode 100644
index 3f94871..0000000
--- a/docs/release/configguide/auto-installTarget-generic.jpg
+++ /dev/null
Binary files differ
diff --git a/docs/release/configguide/auto-installTarget-generic.png b/docs/release/configguide/auto-installTarget-generic.png
new file mode 100644
index 0000000..6740933
--- /dev/null
+++ b/docs/release/configguide/auto-installTarget-generic.png
Binary files differ
diff --git a/docs/release/configguide/auto-installTarget-initial.jpg b/docs/release/configguide/auto-installTarget-initial.jpg
deleted file mode 100644
index edc6509..0000000
--- a/docs/release/configguide/auto-installTarget-initial.jpg
+++ /dev/null
Binary files differ
diff --git a/docs/release/configguide/auto-installTarget-initial.png b/docs/release/configguide/auto-installTarget-initial.png
new file mode 100644
index 0000000..465b468
--- /dev/null
+++ b/docs/release/configguide/auto-installTarget-initial.png
Binary files differ
diff --git a/docs/release/configguide/auto-repo-folders.jpg b/docs/release/configguide/auto-repo-folders.jpg
deleted file mode 100644
index ee88866..0000000
--- a/docs/release/configguide/auto-repo-folders.jpg
+++ /dev/null
Binary files differ
diff --git a/docs/release/configguide/auto-repo-folders.png b/docs/release/configguide/auto-repo-folders.png
new file mode 100644
index 0000000..1c9d6a4
--- /dev/null
+++ b/docs/release/configguide/auto-repo-folders.png
Binary files differ
diff --git a/docs/release/configguide/index.rst b/docs/release/configguide/index.rst
index ba1a3da..07b7ab6 100644
--- a/docs/release/configguide/index.rst
+++ b/docs/release/configguide/index.rst
@@ -10,7 +10,6 @@ OPNFV Auto (ONAP-Automated OPNFV) Configuration Guide
*****************************************************
.. toctree::
- :numbered:
:maxdepth: 3
Auto-featureconfig.rst
diff --git a/docs/release/release-notes/Auto-release-notes.rst b/docs/release/release-notes/Auto-release-notes.rst
index eab68cc..ed6524d 100644
--- a/docs/release/release-notes/Auto-release-notes.rst
+++ b/docs/release/release-notes/Auto-release-notes.rst
@@ -7,31 +7,63 @@
Auto Release Notes
==================
-This document provides the release notes for Fraser release of Auto.
+This document provides the release notes for the Gambia 7.0 release of Auto.
Important notes for this release
================================
-Initial release (project inception: July 2017).
+The initial release for Auto was in Fraser 6.0 (project inception: July 2017).
Summary
=======
-OPNFV is a SDNFV system integration project for open-source components, which so far have been mostly limited to the NFVI+VIM as generally described by ETSI.
+Overview
+^^^^^^^^
+
+OPNFV is an SDNFV system integration project for open-source components, which so far have been mostly limited to
+the NFVI+VIM as generally described by `ETSI <https://www.etsi.org/technologies-clusters/technologies/nfv>`_.
In particular, OPNFV has yet to integrate higher-level automation features for VNFs and end-to-end Services.
-Auto ("ONAP-Automated OPNFV") will focus on ONAP component integration and verification with OPNFV reference platforms/scenarios, through primarily a post-install process in order to avoid impact to OPNFV installer projects. As much as possible, this will use a generic installation/integration process (not specific to any OPNFV installer's technology).
+As an OPNFV project, Auto (*ONAP-Automated OPNFV*) will focus on ONAP component integration and verification with
+OPNFV reference platforms/scenarios, through primarily a post-install process, in order to avoid impact to OPNFV
+installer projects (Fuel/MCP, Compass4NFV, Apex/TripleO, Daisy4NFV). As much as possible, this will use a generic
+installation/integration process (not specific to any OPNFV installer's technology).
+
+* `ONAP <https://www.onap.org/>`_ (a Linux Foundation Project) is an open source software platform that delivers
+ robust capabilities for the design, creation, orchestration, monitoring, and life cycle management of
+ Software-Defined Networks (SDNs). The current release of ONAP is B (Beijing).
+
+Auto aims at validating the business value of ONAP in general, but especially within an OPNFV infrastructure
+(integration of ONAP and OPNFV). Business value is measured in terms of improved service quality (performance,
+reliability, ...) and OPEX reduction (VNF management simplification, power consumption reduction, ...), as
+demonstrated by use cases.
+
+Auto also validates multi-architecture software (binary images and containers) availability of ONAP and OPNFV:
+CPUs (x86, ARM) and Clouds (MultiVIM)
-* `ONAP <https://www.onap.org/>`_ (a Linux Foundation Project) is an open source software platform that delivers robust capabilities for the design, creation, orchestration, monitoring, and life cycle management of Software-Defined Networks (SDNs).
+In other words, Auto is a turnkey approach to automatically deploy an integrated open-source virtual network
+based on OPNFV (as infrastructure) and ONAP (as end-to-end service manager), that demonstrates business value
+to end-users (IT/Telco service providers, enterprises).
-While all of ONAP is in scope, as it proceeds, the project will focus on specific aspects of this integration and verification in each release. Some example topics and work items include:
+
+While all of ONAP is in scope, as it proceeds, the Auto project will focus on specific aspects of this integration
+and verification in each release. Some example topics and work items include:
* How ONAP meets VNFM standards, and interacts with VNFs from different vendors
-* How ONAP SDN-C uses OPNFV existing features, e.g. NetReady, in a two-layer controller architecture in which the upper layer (global controller) is replaceable, and the lower layer can use different vendor’s local controller to interact with SDN-C
-* What data collection interface VNF and controllers provide to ONAP DCAE, and (through DCAE), to closed-loop control functions such as Policy Tests which verify interoperability of ONAP automation/lifecycle features with specific NFVI and VIM features, as prioritized by the project with technical community and EUAG input. Examples include:
+* How ONAP SDN-C uses OPNFV existing features, e.g. NetReady, in a two-layer controller architecture in which the
+ upper layer (global controller) is replaceable, and the lower layer can use different vendor’s local controller to
+ interact with SDN-C. For interaction with multiple cloud infrastructures, the MultiVIM ONAP component will be used.
+* How ONAP leverages OPNFV installers (Fuel/MCP, Compass4NFV, Apex/TripleO, Daisy4NFV) to provide a cloud
+ instance (starting with OpenStack) on which to install the tool ONAP
+* What data collection interface VNF and controllers provide to ONAP DCAE, and (through DCAE), to closed-loop control
+ functions such as Policy Tests which verify interoperability of ONAP automation/lifecycle features with specific NFVI
+ and VIM features, as prioritized by the project with OPNFV technical community and
+ EUAG (`End User Advisory Group <https://www.opnfv.org/end-users/end-user-advisory-group>`_) input.
+
+ Examples:
* Abstraction of networking tech/features e.g. through NetReady/Gluon
* Blueprint-based VNF deployment (HOT, TOSCA, YANG)
@@ -39,7 +71,8 @@ While all of ONAP is in scope, as it proceeds, the project will focus on specifi
* Policy (through DCAE)
* Telemetry (through VES/DCAE)
-Initial areas of focus for Auto (in orange dotted lines; this scope can be expanded for future releases). It is understood that:
+Initial areas of focus for Auto (in orange dotted lines; this scope can be expanded for future releases).
+It is understood that:
* ONAP scope extends beyond the lines drawn below
* ONAP architecture does not necessarily align with the ETSI NFV inspired diagrams this is based upon
@@ -47,56 +80,179 @@ Initial areas of focus for Auto (in orange dotted lines; this scope can be expan
.. image:: auto-proj-rn01.png
-Testability:
+The current ONAP architecture overview can be found `here <https://onap.readthedocs.io/en/latest/guides/onap-developer/architecture/onap-architecture.html>`_.
+
+For reference, the ONAP-Beijing architecture diagram is replicated here:
+
+.. image:: ONAP-toplevel-beijing.png
+
+
+Within OPNFV, Auto leverages tools and collaborates with other projects:
+
+* use clouds/VIMs as installed in OPNFV infrastructure (e.g. OpenStack as installed by Fuel/MCP, Compass4NFV, etc.)
+* include VNFs developed by OPNFV data plane groups (e.g., accelerated by VPP (Vector Packet Processing) with DPDK support, ...)
+* validate ONAP+VNFs+VIMs on two major CPU architectures: x86 (CISC), Arm (RISC); collaborate with OPNFV/Armband
+* work with other related groups in OPNFV:
+
+ * FuncTest for software verification (CI/CD, Pass/Fail)
+ * Yardstick for metric management (quantitative measurements)
+ * VES (VNF Event Stream) and Barometer for VNF monitoring (feed to ONAP/DCAE)
+ * Edge Cloud as use case
+
+* leverage OPNFV tools and infrastructure:
+
+ * Pharos as LaaS: transient pods (3-week bookings) and permanent Arm pod (6 servers)
+ * `WorksOnArm <http://worksonarm.com/cluster>`_ (`GitHub link <http://github.com/worksonarm/cluster>`_)
+ * possibly other labs from the community (Huawei pod-12, 6 servers, x86)
+ * JJB/Jenkins for CI/CD (and follow OPNFV scenario convention)
+ * Gerrit/Git for code and documents reviewing and archiving (similar to ONAP: Linux Foundation umbrella)
+ * follow OPNFV releases (Releng group)
+
+
+Testability
+^^^^^^^^^^^
-* Tests will be developed for use cases within the project scope.
+* Tests (test cases) will be developed for use cases within the project scope.
* In future releases, tests will be added to Functest runs for supporting scenarios.
-Auto’s goals include the standup and tests for integrated ONAP-Cloud platforms (“Cloud” here being OPNFV “scenarios” or other cloud environments). Thus, the artifacts would be tools to deploy ONAP (leveraging OOM whenever possible (starting with Beijing release of ONAP), and a preference for the containerized version of ONAP), to integrate it with clouds, to onboard and deploy test VNFs, to configure policies and closed-loop controls, and to run use-case defined tests against that integrated environment. OPNFV scenarios would be a possible component in the above.
+Auto’s goals include the standup and tests for integrated ONAP-Cloud platforms (“Cloud” here being OPNFV “scenarios”
+or other cloud environments). Thus, the artifacts would be tools to deploy ONAP (leveraging OOM whenever possible,
+starting with Beijing release of ONAP, and a preference for the containerized version of ONAP), to integrate it with
+clouds, to onboard and deploy test VNFs, to configure policies and closed-loop controls, and to run use-case defined
+tests against that integrated environment. OPNFV scenarios would be a possible component in the above.
+
+Installing Auto components and running a battery of tests will be automated, with some or all of the tests being
+integrated in OPNFV CI/CD (depending on the execution length and resource consumption).
+
+Combining all potential parameters, a full set of Auto test case executions can result in thousands of individual results.
+The analysis of these results can be performed by humans, or even by ML/AI (Machine Learning, Artificial Intelligence).
+Test results will be used to fine-tune policies and closed-loop controls configured in ONAP, for increased ONAP business
+value (i.e., find/determine policies and controls which yield optimized ONAP business value metrics such as OPEX).
+
+More precisely, the following list shows parameters that could be applied to an Auto full run of test cases:
+
+* Auto test cases for given use cases
+* OPNFV installer {Fuel/MCP, Compass4NFV, Apex/TripleO, Daisy4NFV}
+* OPNFV availability scenario {HA, noHA}
+* environment where ONAP runs {bare metal servers, VMs from clouds (OpenStack, AWS, GCP, Azure, ...), containers}
+* ONAP installation type {bare metal, VM, or container, ...} and options {MultiVIM single|distributed, ...}
+* VNF types {vFW, vCPE, vAAA, vDHCP, vDNS, vHSS, ...} and VNF-based services {vIMS, vEPC, ...}
+* cloud where VNFs run {OpenStack, AWS, GCP, Azure, ...}
+* VNF host type {VM, container}
+* CPU architectures {x86/AMD64, ARM/aarch64} for ONAP software and for VNF software; not really important for Auto software;
+* pod size and technology (RAM, storage, CPU cores/threads, NICs)
+* traffic types and amounts/volumes; traffic generators (although that should not really matter);
+* ONAP configuration {especially policies and closed-loop controls; monitoring types for DCAE: VES, ...}
+* versions of every component {Linux OS (Ubuntu, CentOS), OPNFV release, clouds, ONAP, VNFs, ...}
+
+The diagram below shows Auto parameters:
+
+.. image:: auto-proj-parameters.png
+
+
+The next figure is an illustration of the Auto analysis loop (design, configuration, execution, result analysis)
+based on test cases covering as many parameters as possible :
+
+.. image:: auto-proj-tests.png
+
+
+Auto currently defines three use cases: Edge Cloud (UC1), Resiliency Improvements (UC2), and Enterprise vCPE (UC3). These use cases aim to show:
+
+* increased autonomy of Edge Cloud management (automation, catalog-based deployment). This use case relates to the
+ `OPNFV Edge Cloud <https://wiki.opnfv.org/display/PROJ/Edge+cloud>`_ initiative.
+* increased resilience (i.e. fast VNF recovery in case of failure or problem, thanks to closed-loop control),
+ including end-to-end composite services of which a Cloud Manager may not be aware (VMs or containers could be
+ recovered by a Cloud Manager, but not necessarily an end-to-end service built on top of VMs or containers).
+* enterprise-grade performance of vCPEs (certification during onboarding, then real-time performance assurance with
+ SLAs and HA, as well as scaling).
+
+The use cases define test cases, which initially will be independent, but which might eventually be integrated to `FuncTest <https://wiki.opnfv.org/display/functest/Opnfv+Functional+Testing>`_.
+
+Additional use cases can be added in the future, such as vIMS (example: project `Clearwater <http://www.projectclearwater.org/>`_)
+or residential vHGW (virtual Home Gateways). The interest for vHGW is to reduce overall power consumption: even in idle mode,
+physical HGWs in residential premises consume a lot of energy. Virtualizing that service to the Service Provider edge data center
+would allow to minimize that consumption.
+
-Auto currently defines three use cases: Edge Cloud, Resiliency Improvements, and Enterprise vCPE. These use cases aim to show:
+Lab environment
+^^^^^^^^^^^^^^^
-* increased autonomy of Edge Cloud management (automation, catalog-based deployment)
-* increased resilience (i.e. fast VNF recovery in case of failure or problem, thanks to closed-loop control), including end-to-end composite services of which a Cloud Manager may not be aware
-* enterprise-grade performance of vCPEs (certification during onboarding, then real-time performance assurance with SLAs and HA as well as scaling).
+Target architectures for all Auto use cases and test cases include x86 and Arm. Power consumption analysis will be
+performed, leveraging Functest tools (based on RedFish/IPMI/ILO).
-The use cases define test cases, which initially will be independent, but which might eventually be integrated to FuncTest.
+Initially, an ONAP-Amsterdam instance (without DCAE) had been installed over Kubernetes on bare metal on a single-server
+x86 pod at UNH IOL.
-Additional use cases can be added in the future, such as vIMS (example: project Clearwater).
+A transition is in progress, to leverage OPNFV LaaS (Lab-as-a-Service) pods (`Pharos <https://labs.opnfv.org/>`_).
+These pods can be booked for 3 weeks only (with an extension for a maximum of 2 weeks), so they are not a permanent resource.
-Target architectures include x86 and Arm.
+For ONAP-Beijing, a repeatable automated installation procedure is being developed, using 3 Pharos servers (x86 for now).
+Also, a more permanent ONAP installation is in progress at a Huawei lab (pod-12, consisting of 6 x86 servers,
+1 as jump server, the other 5 with this example allocation: 3 for ONAP components, and 2 for an OPNFV infratructure:
+Openstack installed by Compass4NFV).
-An ONAP instance (without DCAE) has been installed over Kubernetes on bare metal on an x86 pod of 6 servers at UNH IOL. A transition is in progress, to leverage OPNFV LaaS (Lab-as-a-Service) pods (`Pharos <https://labs.opnfv.org/>`_).
-ONAP-based onboarding and deployment of VNFs is in progress (ONAP pre-loading of VNFs must still done outside of ONAP: for VM-based VNFs, need to prepare OpenStack stacks (using Heat templates), then make an instance snapshot which serves as the binary image of the VNF).
+ONAP-based onboarding and deployment of VNFs is in progress (ONAP-Amsterdam pre-loading of VNFs must still done outside
+of ONAP: for VM-based VNFs, users need to prepare OpenStack stacks (using Heat templates), then make an instance snapshot
+which serves as the binary image of the VNF).
+
+A script to prepare an OpenStack instance for ONAP (creation of a public and a private network, with a router,
+pre-loading of images and flavors, creation of a security group and an ONAP user) has been developed. It leverages
+OpenStack SDK. It has a delete option, so it can be invoked to delete these objects for example in a tear-down procedure.
Integration with Arm servers has started (exploring binary compatibility):
-* Openstack is currently installed on a 6-server pod of Arm servers
+* The Auto project has a specific 6-server pod of Arm servers, which is currently loaned to ONAP integration team,
+ to build ONAP images
* A set of 14 additional Arm servers was deployed at UNH, for increased capacity
-* Arm-compatible Docker images are in the process of being developed
+* ONAP Docker registry: ONAP-specific images for ARM are being built, with the purpose of populating ONAP nexus2
+ (Maven2 artifacts) and nexus3 (Docker containers) repositories at Linux Foundation. Docker images are
+ multi-architecture, and the manifest of an image may contain 1 or more layers (for example 2 layers: x86/AMD64
+ and ARM/aarch64). One of ONAP-Casablanca architectural requirements is to be CPU-architecture independent.
+ There are almost 150 Docker containers in a complete ONAP instance. Currently, more disk space is being added
+ to the ARM nodes (configuration of Nova, and/or additional actual physical storage space).
+
+
+Test case design and implementation for the three use cases has started.
+
+OPNFV CI/CD integration with JJD (Jenkins Job Description) has started: see the Auto plan description
+`here <https://wiki.opnfv.org/display/AUTO/CI+for+Auto>`_. The permanent resource for that is the 6-server Arm
+pod, hosted at UNH. The CI directory from the Auto repository is `here <https://git.opnfv.org/auto/tree/ci>`_
-Test case implementation for the three use cases has started.
Finally, the following figure illustrates Auto in terms of project activities:
.. image:: auto-project-activities.png
+Note: a demo was delivered at the OpenStack Summit in Vancouver on May 21st 2018, to illustrate the deployment of
+a WordPress application (WordPress is a platform for websites and blogs) deployed on a multi-architecture cloud (mix
+of x86 and Arm servers).
+This shows how service providers and enterprises can diversify their data centers with servers of different architectures,
+and select architectures best suited to each use case (mapping application components to architectures: DBs,
+interactive servers, number-crunching modules, ...).
+This prefigures how other examples such as ONAP, VIMs, and VNFs could also be deployed on heterogeneous multi-architecture
+environments (open infrastructure), orchestrated by Kubernetes. The Auto installation scripts covering all the parameters
+described above could expand on that approach.
+
+.. image:: auto-proj-openstacksummit1805.png
+
+
+
Release Data
============
+--------------------------------------+--------------------------------------+
-| **Project** | Fraser/auto/auto@opnfv |
+| **Project** | Auto |
| | |
+--------------------------------------+--------------------------------------+
-| **Repo/commit-ID** | |
+| **Repo/commit-ID** | auto/opnfv-7.0.0 |
| | |
+--------------------------------------+--------------------------------------+
-| **Release designation** | Fraser 6.0 |
+| **Release designation** | Gambia 7.0 |
| | |
+--------------------------------------+--------------------------------------+
-| **Release date** | 2018-04-20 |
+| **Release date** | 2018-11-02 |
| | |
+--------------------------------------+--------------------------------------+
| **Purpose of the delivery** | Official OPNFV release |
@@ -122,43 +278,91 @@ Reason for version
Feature additions
~~~~~~~~~~~~~~~~~
-Initial release, with use case descriptions, release plan, and in-progress test cases and ONAP installations.
+Initial release 6.0:
+* Fraser release plan
+* use case descriptions
+* test case descriptions
+* in-progress test case development
+* lab: OPNFV and ONAP (Amsterdam) installations
+
+Point release 6.1:
+
+* added Gambia release plan
+* started integration with CI/CD (JJB) on permanent Arm pod
+* Arm demo at OpenStack Summit
+* initial script for configuring OpenStack instance for ONAP, using OpenStack SDK 0.13
+* initial attempts to install ONAP Beijing
+* alignment with OPNFV Edge Cloud
+* initial contacts with Functest
+
+Point release 6.2:
+
+* initial scripts for OPNFV CI/CD, registration of Jenkins slave on `Arm pod <https://build.opnfv.org/ci/view/auto/>`_
+* updated script for configuring OpenStack instance for ONAP, using OpenStack SDK 0.14
+
+Point release 7.0:
+
+* progress on Docker registry of ONAP's Arm images
+* progress on ONAP installation script for 3-server cluster of UNH servers
+* CI scripts for OPNFV installers: Fuel/MCP (x86), Compass, Apex/TripleO (must run twice)
+* initial CI script for Daisy4NFV (work in progress)
+* JOID script, but supported only until R6.2, not Gambia 7.0
+* completed script for configuring OpenStack instance for ONAP, using OpenStack SDK 0.17
+* use of an additional lab resource for Auto development: 6-server x86 pod (huawei-pod12)
-**JIRA TICKETS:**
+
+
+
+
+**JIRA TICKETS for this release:**
+
+
+`JIRA Auto Gambia 7.0.0 Done <https://jira.opnfv.org/issues/?filter=12403>`_
+
+Manual selection of significant JIRA tickets for this version's highlights:
+--------------------------------------+--------------------------------------+
| **JIRA REFERENCE** | **SLOGAN** |
| | |
+--------------------------------------+--------------------------------------+
-| AUTO-1, UC1 definition | Define Auto-UC-01 Service Provider's |
-| | Management of Edge Cloud |
+| AUTO-37 | Get DCAE running onto Pharos |
+| | deployment |
+--------------------------------------+--------------------------------------+
-| AUTO-2, UC2 definition | Define Auto-UC-02 Resilience |
-| | Improvements through ONAP |
+| AUTO-42 | Use Compass4NFV to create an |
+| | OpenStack instance on a UNH pod |
+--------------------------------------+--------------------------------------+
-| AUTO-7, UC3 definition | Define Auto-UC-03 Enterprise vCPE |
-| | |
+| AUTO-43 | String together scripts for Fuel, |
+| | Tool installation, ONAP preparation |
+--------------------------------------+--------------------------------------+
-| AUTO-4, UC2 test case definition | Develop test cases for Auto-UC-02 |
-| | Resilience Improvements through ONAP |
+| AUTO-44 | Build ONAP components for arm64 |
+| | platform |
+--------------------------------------+--------------------------------------+
-| AUTO-8, UC3 test case definition | Develop test cases for Auto-UC-03 |
-| | Enterprise vCPE |
+| AUTO-45 | CI: Jenkins definition of verify and |
+| | merge jobs |
+--------------------------------------+--------------------------------------+
-| (UC1 test case definition is done, | |
-| but no associated JIRA ticket) | |
+| AUTO-46 | Use Apex to create an OpenStack |
+| | instance on a UNH pod |
+--------------------------------------+--------------------------------------+
-| AUTO-5, install ONAP | Getting ONAP running onto Pharos |
-| | deployment (without DCAE) |
+| AUTO-47 | Install ONAP with Kubernetes on LaaS |
+| | |
++--------------------------------------+--------------------------------------+
+| AUTO-48 | Create documentation for ONAP |
+| | deployment with Kubernetes on LaaS |
++--------------------------------------+--------------------------------------+
+| AUTO-49 | Automate ONAP deployment with |
+| | Kubernetes on LaaS |
++--------------------------------------+--------------------------------------+
+| AUTO-51 | huawei-pod12: Prepare IDF and PDF |
+| | files |
+--------------------------------------+--------------------------------------+
-| AUTO-31, UC1 test case progress | auto-edge-pif-001 Basic OpenStack |
-| | environment check |
+| AUTO-52 | Deploy a running ONAP instance on |
+| | huawei-pod12 |
+--------------------------------------+--------------------------------------+
-| AUTO-13, UC2 test case progress | Develop test script for vif-001: |
-| | Data Management |
+| AUTO-54 | Use Daisy4nfv to create an OpenStack |
+| | instance on a UNH pod |
+--------------------------------------+--------------------------------------+
-| AUTO-20, UC3 test case progress | Onboarding of VNFs via SDC GUI |
+| | |
| | |
+--------------------------------------+--------------------------------------+
@@ -187,18 +391,19 @@ Deliverables
Software deliverables
^^^^^^^^^^^^^^^^^^^^^
-Initial release: in-progress install scripts and test case implementations.
+7.0 release: in-progress Docker ARM images, install scripts, CI scripts, and test case implementations.
Documentation deliverables
^^^^^^^^^^^^^^^^^^^^^^^^^^
-Initial versions of:
+Updated versions of:
-* Release notes (this document)
-* User guide `OPNFV User and Configuration Guide <http://docs.opnfv.org/en/latest/release/userguide.introduction.html>`_
-* Configuration Guide (same landing page as User Guide)
+* Release Notes (this document)
+* User Guide
+* Configuration Guide
+(see links in References section)
@@ -208,9 +413,6 @@ Known Limitations, Issues and Workarounds
System Limitations
^^^^^^^^^^^^^^^^^^
-* ONAP still to be validated for Arm servers
-* DCAE still to be validated for Kubernetes
-
Known issues
@@ -260,8 +462,8 @@ None at this point.
References
==========
-For more information on the OPNFV Fraser release, please see:
-http://opnfv.org/fraser
+For more information on the OPNFV Gambia release, please see:
+http://opnfv.org/gambia
Auto Wiki pages:
@@ -271,9 +473,9 @@ Auto Wiki pages:
OPNFV documentation on Auto:
-* `Auto release notes <http://docs.opnfv.org/en/latest/release/release-notes.html>`_
-* `Auto use case user guides <http://docs.opnfv.org/en/latest/submodules/auto/docs/release/userguide/index.html#auto-userguide>`_
-* `Auto configuration guide <http://docs.opnfv.org/en/latest/submodules/auto/docs/release/configguide/index.html#auto-configguide>`_
+* `Auto release notes <auto-releasenotes>`
+* `Auto use case user guides <auto-userguide>`
+* `Auto configuration guide <auto-configguide>`
Git&Gerrit Auto repositories:
@@ -282,4 +484,7 @@ Git&Gerrit Auto repositories:
* `Gerrit for Auto project <https://gerrit.opnfv.org/gerrit/#/admin/projects/auto>`_
+Demo at OpenStack summit May 2018 (Vancouver, BC, Canada):
+
+* YouTube video (10min 52s): `Integration testing on an OpenStack public cloud <https://youtu.be/BJ05YuusNYw>`_
diff --git a/docs/release/release-notes/ONAP-toplevel-beijing.png b/docs/release/release-notes/ONAP-toplevel-beijing.png
new file mode 100644
index 0000000..62a9d47
--- /dev/null
+++ b/docs/release/release-notes/ONAP-toplevel-beijing.png
Binary files differ
diff --git a/docs/release/release-notes/auto-proj-openstacksummit1805.png b/docs/release/release-notes/auto-proj-openstacksummit1805.png
new file mode 100644
index 0000000..339365a
--- /dev/null
+++ b/docs/release/release-notes/auto-proj-openstacksummit1805.png
Binary files differ
diff --git a/docs/release/release-notes/auto-proj-parameters.png b/docs/release/release-notes/auto-proj-parameters.png
new file mode 100644
index 0000000..a0cbe2e
--- /dev/null
+++ b/docs/release/release-notes/auto-proj-parameters.png
Binary files differ
diff --git a/docs/release/release-notes/auto-proj-tests.png b/docs/release/release-notes/auto-proj-tests.png
new file mode 100644
index 0000000..6b3be10
--- /dev/null
+++ b/docs/release/release-notes/auto-proj-tests.png
Binary files differ
diff --git a/docs/release/release-notes/auto-project-activities.png b/docs/release/release-notes/auto-project-activities.png
index a946372..d25ac2a 100644
--- a/docs/release/release-notes/auto-project-activities.png
+++ b/docs/release/release-notes/auto-project-activities.png
Binary files differ
diff --git a/docs/release/release-notes/index.rst b/docs/release/release-notes/index.rst
index 264f21c..4c879f7 100644
--- a/docs/release/release-notes/index.rst
+++ b/docs/release/release-notes/index.rst
@@ -9,7 +9,6 @@ OPNFV Auto (ONAP-Automated OPNFV) Release Notes
===============================================
.. toctree::
- :numbered:
:maxdepth: 3
Auto-release-notes.rst
diff --git a/docs/release/userguide/UC01-feature.userguide.rst b/docs/release/userguide/UC01-feature.userguide.rst
index ea02bad..5b5edb8 100644
--- a/docs/release/userguide/UC01-feature.userguide.rst
+++ b/docs/release/userguide/UC01-feature.userguide.rst
@@ -17,9 +17,12 @@ Description
This use case aims at showcasing the benefits of using ONAP for autonomous Edge Cloud management.
-A high level of automation of VNF lifecycle event handling after launch is enabled by ONAP policies and closed-loop controls, which take care of most lifecycle events (start, stop, scale up/down/in/out, recovery/migration for HA) as well as their monitoring and SLA management.
+A high level of automation of VNF lifecycle event handling after launch is enabled by ONAP policies and closed-loop
+controls, which take care of most lifecycle events (start, stop, scale up/down/in/out, recovery/migration for HA) as
+well as their monitoring and SLA management.
-Multiple types of VNFs, for different execution environments, are first approved in the catalog thanks to the onboarding process, and then can be deployed and handled by multiple controllers in a systematic way.
+Multiple types of VNFs, for different execution environments, are first approved in the catalog thanks to the onboarding
+process, and then can be deployed and handled by multiple controllers in a systematic way.
This results in management efficiency (lower control/automation overhead) and high degree of autonomy.
diff --git a/docs/release/userguide/UC02-feature.userguide.rst b/docs/release/userguide/UC02-feature.userguide.rst
index 3ed5781..9746914 100644
--- a/docs/release/userguide/UC02-feature.userguide.rst
+++ b/docs/release/userguide/UC02-feature.userguide.rst
@@ -15,15 +15,21 @@ specifically for Use Case 2: Resiliency Improvements Through ONAP.
Description
===========
-This use case illustrates VNF failure recovery time reduction with ONAP, thanks to its automated monitoring and management. It:
+This use case illustrates VNF failure recovery time reduction with ONAP, thanks to its automated monitoring
+and management. It:
* simulates an underlying problem (failure, stress, or any adverse condition in the network that can impact VNFs)
* tracks a VNF
* measures the amount of time it takes for ONAP to restore the VNF functionality.
-The benefit for NFV edge service providers is to assess what degree of added VIM+NFVI platform resilience for VNFs is obtained by leveraging ONAP closed-loop control, vs. VIM+NFVI self-managed resilience (which may not be aware of the VNF or the corresponding end-to-end Service, but only of underlying resources such as VMs and servers).
+The benefit for NFV edge service providers is to assess what degree of added VIM+NFVI platform resilience for VNFs
+is obtained by leveraging ONAP closed-loop control, vs. VIM+NFVI self-managed resilience (which may not be aware
+of the VNF or the corresponding end-to-end Service, but only of underlying resources such as VMs and servers).
-Also, a problem, or challenge, may not necessarily be a failure (which could also be recovered by other layers): it could be an issue leading to suboptimal performance, without failure. A VNF management layer as provided by ONAP may detect such non-failure problems, and provide a recovery solution which no other layer could provide in a given deployment.
+Also, a problem, or challenge, may not necessarily be a failure (which could also be recovered by other layers):
+it could be an issue leading to suboptimal performance, without failure. A VNF management layer as provided by
+ONAP may detect such non-failure problems, and provide a recovery solution which no other layer could provide
+in a given deployment.
Preconditions:
@@ -33,9 +39,13 @@ Preconditions:
#. ONAP has been deployed onto a cloud and is interfaced (i.e. provisioned for API access) to the Edge cloud
#. Components of ONAP have been deployed on the Edge cloud as necessary for specific test objectives
-In future releases, Auto Use cases will also include the deployment of ONAP (if not already installed), the deployment of test VNFs (pre-existing VNFs in pre-existing ONAP can be used in the test as well), the configuration of ONAP for monitoring these VNFs (policies, CLAMP, DCAE), in addition to the test scripts which simulate a problem and measures recovery time.
+In future releases, Auto Use cases will also include the deployment of ONAP (if not already installed),
+the deployment of test VNFs (pre-existing VNFs in pre-existing ONAP can be used in the test as well),
+the configuration of ONAP for monitoring these VNFs (policies, CLAMP, DCAE), in addition to the test
+scripts which simulate a problem and measures recovery time.
-Different types of problems can be simulated, hence the identification of multiple test cases corresponding to this use case, as illustrated in this diagram:
+Different types of problems can be simulated, hence the identification of multiple test cases corresponding
+to this use case, as illustrated in this diagram:
.. image:: auto-UC02-testcases.jpg
@@ -68,7 +78,9 @@ Test execution high-level description
The following two MSCs (Message Sequence Charts) show the actors and high-level interactions.
-The first MSC shows the preparation activities (assuming the hardware, network, cloud, and ONAP have already been installed): onboarding and deployment of VNFs (via ONAP portal and modules in sequence: SDC, VID, SO), and ONAP configuration (policy framework, closed-loops in CLAMP, activation of DCAE).
+The first MSC shows the preparation activities (assuming the hardware, network, cloud, and ONAP have already
+been installed): onboarding and deployment of VNFs (via ONAP portal and modules in sequence: SDC, VID, SO),
+and ONAP configuration (policy framework, closed-loops in CLAMP, activation of DCAE).
.. image:: auto-UC02-preparation.jpg
@@ -94,7 +106,9 @@ The high-level design of classes identifies several entities, described as follo
* ``Test Definition`` : gathers all the information necessary to run a certain test case
* ``Metric Definition`` : describes a certain metric that may be measured for a Test Case, in addition to Recovery Time
* ``Challenge Definition`` : describe the challenge (problem, failure, stress, ...) simulated by the test case
-* ``Recipient`` : entity that can receive commands and send responses, and that is queried by the Test Definition or Challenge Definition (a recipient would be typically a management service, with interfaces (CLI or API) for clients to query)
+* ``Recipient`` : entity that can receive commands and send responses, and that is queried by the Test Definition
+ or Challenge Definition (a recipient would be typically a management service, with interfaces (CLI or API) for
+ clients to query)
* ``Resources`` : with 3 types (VNF, cloud virtual resource such as a VM, physical resource such as a server)
@@ -119,7 +133,9 @@ This next diagram shows the Python classes and attributes, as implemented by thi
Test definition data is stored in serialization files (Python pickles), while test execution data is stored in CSV files, for easier post-analysis.
-The module design is straightforward: functions and classes for managing data, for interfacing with recipients, for executing tests, and for interacting with the test user (choosing a Test Definition, showing the details of a Test Definition, starting the execution).
+The module design is straightforward: functions and classes for managing data, for interfacing with recipients,
+for executing tests, and for interacting with the test user (choosing a Test Definition, showing the details of
+a Test Definition, starting the execution).
.. image:: auto-UC02-module1.jpg
@@ -134,18 +150,27 @@ In future releases of Auto, testing environments such as Robot, FuncTest and Yar
Also, anonymized test results could be collected from users willing to share them, and aggregates could be
maintained as benchmarks.
-As further illustration, the next figure shows cardinalities of class instances: one Test Definition per Test Case, multiple Test Executions per Test Definition, zero or one Recovery Time Metric Value per Test Execution (zero if the test failed for any reason, including if ONAP failed to recover the challenge), etc.
+As further illustration, the next figure shows cardinalities of class instances: one Test Definition per Test Case,
+multiple Test Executions per Test Definition, zero or one Recovery Time Metric Value per Test Execution (zero if
+the test failed for any reason, including if ONAP failed to recover the challenge), etc.
.. image:: auto-UC02-cardinalities.png
-In this particular implementation, both Test Definition and Challenge Definition classes have a generic execution method (e.g., ``run_test_code()`` for Test Definition) which can invoke a particular script, by way of an ID (which can be configured, and serves as a script selector for each Test Definition instance). The overall test execution logic between classes is show in the next figure.
+In this particular implementation, both Test Definition and Challenge Definition classes have a generic execution method
+(e.g., ``run_test_code()`` for Test Definition) which can invoke a particular script, by way of an ID (which can be
+configured, and serves as a script selector for each Test Definition instance). The overall test execution logic
+between classes is show in the next figure.
.. image:: auto-UC02-logic.png
-The execution of a test case starts with invoking the generic method from Test Definition, which then creates Execution instances, invokes Challenge Definition methods, performs the Recovery time calculation, performs script-specific actions, and writes results to the CSV files.
+The execution of a test case starts with invoking the generic method from Test Definition, which then creates Execution
+instances, invokes Challenge Definition methods, performs the Recovery time calculation, performs script-specific
+actions, and writes results to the CSV files.
-Finally, the following diagram show a mapping between these class instances and the initial test case design. It corresponds to the test case which simulates a VM failure, and shows how the OpenStack SDK API is invoked (with a connection object) by the Challenge Definition methods, to suspend and resume a VM.
+Finally, the following diagram show a mapping between these class instances and the initial test case design. It
+corresponds to the test case which simulates a VM failure, and shows how the OpenStack SDK API is invoked (with
+a connection object) by the Challenge Definition methods, to suspend and resume a VM.
.. image:: auto-UC02-TC-mapping.png
diff --git a/docs/release/userguide/UC03-feature.userguide.rst b/docs/release/userguide/UC03-feature.userguide.rst
index cf96981..2c0d9e7 100644
--- a/docs/release/userguide/UC03-feature.userguide.rst
+++ b/docs/release/userguide/UC03-feature.userguide.rst
@@ -18,9 +18,14 @@ Description
This Use Case shows how ONAP can help ensure that virtual CPEs (including vFW: virtual firewalls) in Edge Cloud are enterprise-grade.
Other vCPE examples: vAAA, vDHCP, vDNS, vGW, vBNG, vRouter, ...
-ONAP operations include a verification process for VNF onboarding (i.e., inclusion in the ONAP catalog), with multiple Roles (Designer, Tester, Governor, Operator), responsible for approving proposed VNFs (as VSPs (Vendor Software Products), and eventually as end-to-end Services).
+ONAP operations include a verification process for VNF onboarding (i.e., inclusion in the ONAP catalog), with multiple
+Roles (Designer, Tester, Governor, Operator), responsible for approving proposed VNFs (as VSPs (Vendor Software Products),
+and eventually as end-to-end Services).
-This process guarantees a minimum level of quality of onboarded VNFs. If all deployed vCPEs are only chosen from such an approved ONAP catalog, the resulting deployed end-to-end vCPE services will meet enterprise-grade requirements. ONAP provides a NBI (currently HTTP-based) in addition to a standard GUI portal, thus enabling a programmatic deployment of VNFs, still conforming to ONAP processes.
+This process guarantees a minimum level of quality of onboarded VNFs. If all deployed vCPEs are only chosen from such an
+approved ONAP catalog, the resulting deployed end-to-end vCPE services will meet enterprise-grade requirements. ONAP
+provides a NBI (currently HTTP-based) in addition to a standard GUI portal, thus enabling a programmatic deployment of
+VNFs, still conforming to ONAP processes.
Moreover, ONAP also comprises real-time monitoring (by the DCAE component), which can perform the following functions:
@@ -32,9 +37,12 @@ DCAE executes directives coming from policies described in the Policy Framework,
ONAP can perform the provisioning side of a BSS Order Management application handling vCPE orders.
-Additional processing can be added to ONAP (internally as configured policies and closed-loop controls, or externally as separate systems): Path Computation Element and Load Balancing, and even telemetry-based Network Artificial Intelligence.
+Additional processing can be added to ONAP (internally as configured policies and closed-loop controls, or externally as
+separate systems): Path Computation Element and Load Balancing, and even telemetry-based Network Artificial Intelligence.
-Finally, this automated approach also reduces costs, since repetitive actions are designed once and executed multiple times, as vCPEs are instantiated and decommissioned (frequent events, given the variability of business activity, and a Small Business market similar to the Residential market: many contract updates resulting in many vCPE changes).
+Finally, this automated approach also reduces costs, since repetitive actions are designed once and executed multiple times,
+as vCPEs are instantiated and decommissioned (frequent events, given the variability of business activity, and a Small
+Business market similar to the Residential market: many contract updates resulting in many vCPE changes).
NFV edge service providers need to provide site2site, site2dc (Data Center) and site2internet services to tenants both efficiently and safely, by deploying such qualified enterprise-grade vCPE.
diff --git a/docs/release/userguide/index.rst b/docs/release/userguide/index.rst
index dd308dc..099622c 100644
--- a/docs/release/userguide/index.rst
+++ b/docs/release/userguide/index.rst
@@ -15,7 +15,6 @@ OPNFV Auto (ONAP-Automated OPNFV) User Guide
.. by the installer project.
.. toctree::
- :numbered:
:maxdepth: 3
UC01-feature.userguide.rst
diff --git a/docs/requirements.txt b/docs/requirements.txt
new file mode 100644
index 0000000..9fde2df
--- /dev/null
+++ b/docs/requirements.txt
@@ -0,0 +1,2 @@
+lfdocs-conf
+sphinx_opnfv_theme
diff --git a/lib/auto/testcase/resiliency/AutoResilItfCloud.py b/lib/auto/testcase/resiliency/AutoResilItfCloud.py
index 302a662..7feb518 100644
--- a/lib/auto/testcase/resiliency/AutoResilItfCloud.py
+++ b/lib/auto/testcase/resiliency/AutoResilItfCloud.py
@@ -159,7 +159,8 @@ def gdtest_openstack():
# Method 1 (preferred) : assume there is a clouds.yaml file in PATH, starting path search with local directory
#conn = openstack.connect(cloud='armopenstack', region_name='RegionOne')
#conn = openstack.connect(cloud='hpe16openstackEuphrates', region_name='RegionOne')
- conn = openstack.connect(cloud='hpe16openstackFraser', region_name='RegionOne')
+ #conn = openstack.connect(cloud='hpe16openstackFraser', region_name='RegionOne')
+ conn = openstack.connect(cloud='unh-hpe-openstack-fraser', region_name='RegionOne')
# if getting error: AttributeError: module 'openstack' has no attribute 'connect', check that openstack is installed for this python version
@@ -208,8 +209,8 @@ def gdtest_openstack():
openstack_list_projects(conn)
openstack_list_domains(conn)
- # VM: hpe16-Auto-UC2-gdtest-compute1
- gds_ID = '715c677a-7914-4ca8-8c6d-75bf29eeb940'
+ # VM test: create a test VM in the OpenStack instance, enter its ID here
+ gds_ID = '5d07da11-0e85-4256-9894-482dcee4a5f0'
gds = conn.compute.get_server(gds_ID)
print('\ngds.name=',gds.name)
print('gds.status=',gds.status)
@@ -229,27 +230,8 @@ def gdtest_openstack():
- #VM: test3
- gds_ID = 'd3ceffc3-5967-4f18-b8b5-b1b2bd7ab76d'
- gds = conn.compute.get_server(gds_ID)
- print('\ngds.name=',gds.name)
- print('gds.status=',gds.status)
- print('suspending...')
- conn.compute.suspend_server(gds_ID) # NOT synchronous: returns before suspension action is completed
- wait_seconds = 10
- print(' waiting',wait_seconds,'seconds...')
- time.sleep(wait_seconds)
- gds = conn.compute.get_server(gds_ID) # need to refresh data; not maintained live
- print('gds.status=',gds.status)
- print('resuming...')
- conn.compute.resume_server(gds_ID)
- print(' waiting',wait_seconds,'seconds...')
- time.sleep(wait_seconds)
- gds = conn.compute.get_server(gds_ID) # need to refresh data; not maintained live
- print('gds.status=',gds.status)
-
- #Volume: hpe16-Auto-UC2-gdtest-volume1
- gdv_ID = '5a6c1dbd-5097-4a9b-8f79-6f03cde18bf6'
+ #Volume test: volume attached to test VM; get its ID and enter it here
+ gdv_ID = 'd0206ff2-507c-444a-9871-b5b7ea704994'
gdv = conn.block_storage.get_volume(gdv_ID)
# no API for stopping/restarting a volume... only delete. ONAP would have to completely migrate a VNF depending on this volume
print('\ngdv.name=',gdv.name)
diff --git a/lib/auto/testcase/resiliency/AutoResilMgTestDef.py b/lib/auto/testcase/resiliency/AutoResilMgTestDef.py
index 7e0b50d..edf899a 100644
--- a/lib/auto/testcase/resiliency/AutoResilMgTestDef.py
+++ b/lib/auto/testcase/resiliency/AutoResilMgTestDef.py
@@ -54,7 +54,9 @@ import sys
from enum import Enum
from datetime import datetime, timedelta
import AutoResilGlobal
-#import openstack
+import openstack
+import time
+
# Constants with definition file names
FILE_PHYSICAL_RESOURCES = "ResourcesPhysical.bin"
@@ -319,8 +321,9 @@ class TestDefinition(AutoBaseObject):
self.test_code_list.append(self.test_code010)
- def run_test_code(self):
- """Run currently selected test code. Common code runs here, specific code is invoked through test_code_list and test_code_ID."""
+ def run_test_code(self, *test_code_args, **test_code_kwargs):
+ """Run currently selected test code. Common code runs here, specific code is invoked through test_code_list and test_code_ID.
+ Optional parameters can be passed if needed (unnamed or named), interpreted accordingly by selected test code."""
try:
# here, trigger start code from challenge def (to simulate VM failure), manage Recovery time measurement,
# specific monitoring of VNF, trigger stop code from challenge def
@@ -355,7 +358,8 @@ class TestDefinition(AutoBaseObject):
# call specific test definition code, via table of functions; this code should monitor a VNF and return when restoration is observed
test_code_index = self.test_code_ID - 1 # lists are indexed from 0 to N-1
- self.test_code_list[test_code_index]() # invoke corresponding method, via index; could check for return code
+ # invoke corresponding method, via index; could check for return code
+ self.test_code_list[test_code_index](*test_code_args, **test_code_kwargs)
# memorize restoration detection time and compute recovery time
test_exec.restoration_detection_time = datetime.now()
@@ -382,23 +386,23 @@ class TestDefinition(AutoBaseObject):
# library of test codes, probably 1 per test case, so test_case_ID would be the same as test_code_ID
- def test_code001(self):
+ def test_code001(self, *test_code_args, **test_code_kwargs):
"""Test case code number 001."""
print("This is test_code001 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
- def test_code002(self):
+ def test_code002(self, *test_code_args, **test_code_kwargs):
"""Test case code number 002."""
print("This is test_code002 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
- def test_code003(self):
+ def test_code003(self, *test_code_args, **test_code_kwargs):
"""Test case code number 003."""
print("This is test_code003 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
- def test_code004(self):
+ def test_code004(self, *test_code_args, **test_code_kwargs):
"""Test case code number 004."""
print("This is test_code004 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
- def test_code005(self):
+ def test_code005(self, *test_code_args, **test_code_kwargs):
"""Test case code number 005."""
print("This is test_code005 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
@@ -407,23 +411,46 @@ class TestDefinition(AutoBaseObject):
# return when VNF is recovered
# may provision for failure to recover (max time to wait; return code: recovery OK boolean)
- def test_code006(self):
+ # June 2018, test of code logic, using newly released OpenStack SDK 0.14.0
+ # VM is created arbitrarily, not yet with ONAP
+ # Openstack cloud was created by Fuel/MCP, descriptor in clouds.yaml file
+ # VM resume done in Horizon (to simulate an ONAP-based recovery)
+ # retrieved status values: {'ACTIVE', 'SUSPENDED'}
+ # loop: wait 2 seconds, check status, stop loop when status is ACTIVE
+ conn = openstack.connect(cloud='unh-hpe-openstack-fraser', region_name='RegionOne')
+ test_VM_ID = '5d07da11-0e85-4256-9894-482dcee4a5f0' # arbitrary in this test, grab from OpenStack
+ test_VM = conn.compute.get_server(test_VM_ID)
+ print(' test_VM.name=',test_VM.name)
+ print(' test_VM.status=',test_VM.status)
+ test_VM_current_status = test_VM.status
+ wait_seconds = 2
+ nb_seconds_waited = 0
+ while test_VM_current_status != 'ACTIVE':
+ print(' waiting',wait_seconds,'seconds...')
+ time.sleep(wait_seconds)
+ test_VM = conn.compute.get_server(test_VM_ID) # need to get VM object ID, for an updated status attribute
+ test_VM_current_status = test_VM.status
+ nb_seconds_waited = nb_seconds_waited + wait_seconds
+ print(' nb_seconds_waited=',nb_seconds_waited)
+
+
+ def test_code006(self, *test_code_args, **test_code_kwargs):
"""Test case code number 006."""
print("This is test_code006 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
- def test_code007(self):
+ def test_code007(self, *test_code_args, **test_code_kwargs):
"""Test case code number 007."""
print("This is test_code007 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
- def test_code008(self):
+ def test_code008(self, *test_code_args, **test_code_kwargs):
"""Test case code number 008."""
print("This is test_code008 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
- def test_code009(self):
+ def test_code009(self, *test_code_args, **test_code_kwargs):
"""Test case code number 009."""
print("This is test_code009 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
- def test_code010(self):
+ def test_code010(self, *test_code_args, **test_code_kwargs):
"""Test case code number 010."""
print("This is test_code010 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
@@ -622,20 +649,25 @@ class ChallengeDefinition(AutoBaseObject):
self.stop_challenge_code_list.append(self.stop_challenge_code010)
- def run_start_challenge_code(self):
- """Run currently selected challenge code, start portion."""
+ def run_start_challenge_code(self, *chall_code_args, **chall_code_kwargs):
+ """Run currently selected challenge code, start portion.
+ Optional parameters can be passed if needed (unnamed or named), interpreted accordingly by selected test code."""
+
try:
code_index = self.challenge_code_ID - 1 # lists are indexed from 0 to N-1
- self.start_challenge_code_list[code_index]() # invoke corresponding start method, via index
+ # invoke corresponding start method, via index
+ self.start_challenge_code_list[code_index](*chall_code_args, **chall_code_kwargs)
except Exception as e:
print(type(e), e)
sys.exit()
- def run_stop_challenge_code(self):
- """Run currently selected challenge code, stop portion."""
+ def run_stop_challenge_code(self, *chall_code_args, **chall_code_kwargs):
+ """Run currently selected challenge code, stop portion.
+ Optional parameters can be passed if needed (unnamed or named), interpreted accordingly by selected test code."""
try:
code_index = self.challenge_code_ID - 1 # lists are indexed from 0 to N-1
- self.stop_challenge_code_list[code_index]() # invoke corresponding stop method, via index
+ # invoke corresponding stop method, via index
+ self.stop_challenge_code_list[code_index](*chall_code_args, **chall_code_kwargs)
except Exception as e:
print(type(e), e)
sys.exit()
@@ -643,35 +675,35 @@ class ChallengeDefinition(AutoBaseObject):
# library of challenge codes
- def start_challenge_code001(self):
+ def start_challenge_code001(self, *chall_code_args, **chall_code_kwargs):
"""Start Challenge code number 001."""
print("This is start_challenge_code001 from ChallengeDefinition #",self.ID, sep='')
- def stop_challenge_code001(self):
+ def stop_challenge_code001(self, *chall_code_args, **chall_code_kwargs):
"""Stop Challenge code number 001."""
print("This is stop_challenge_code001 from ChallengeDefinition #",self.ID, sep='')
- def start_challenge_code002(self):
+ def start_challenge_code002(self, *chall_code_args, **chall_code_kwargs):
"""Start Challenge code number 002."""
print("This is start_challenge_code002 from ChallengeDefinition #",self.ID, sep='')
- def stop_challenge_code002(self):
+ def stop_challenge_code002(self, *chall_code_args, **chall_code_kwargs):
"""Stop Challenge code number 002."""
print("This is stop_challenge_code002 from ChallengeDefinition #",self.ID, sep='')
- def start_challenge_code003(self):
+ def start_challenge_code003(self, *chall_code_args, **chall_code_kwargs):
"""Start Challenge code number 003."""
print("This is start_challenge_code003 from ChallengeDefinition #",self.ID, sep='')
- def stop_challenge_code003(self):
+ def stop_challenge_code003(self, *chall_code_args, **chall_code_kwargs):
"""Stop Challenge code number 003."""
print("This is stop_challenge_code003 from ChallengeDefinition #",self.ID, sep='')
- def start_challenge_code004(self):
+ def start_challenge_code004(self, *chall_code_args, **chall_code_kwargs):
"""Start Challenge code number 004."""
print("This is start_challenge_code004 from ChallengeDefinition #",self.ID, sep='')
- def stop_challenge_code004(self):
+ def stop_challenge_code004(self, *chall_code_args, **chall_code_kwargs):
"""Stop Challenge code number 004."""
print("This is stop_challenge_code004 from ChallengeDefinition #",self.ID, sep='')
- def start_challenge_code005(self):
+ def start_challenge_code005(self, *chall_code_args, **chall_code_kwargs):
"""Start Challenge code number 005."""
print("This is start_challenge_code005 from ChallengeDefinition #",self.ID, sep='')
# challenge #5, related to test case #5, i.e. test def #5
@@ -682,8 +714,23 @@ class ChallengeDefinition(AutoBaseObject):
# conn.compute.servers() to get list of servers, using VM ID, check server.id and/or server.name
# conn.compute.suspend_server(this server id)
-
- def stop_challenge_code005(self):
+ # June 2018, test of code logic, using newly released OpenStack SDK 0.14.0
+ # VM is created arbitrarily, not yet with ONAP
+ # Openstack cloud was created by Fuel/MCP, descriptor in clouds.yaml file
+ # VM resume done in Horizon (to simulate an ONAP-based recovery)
+ conn = openstack.connect(cloud='unh-hpe-openstack-fraser', region_name='RegionOne')
+ test_VM_ID = '5d07da11-0e85-4256-9894-482dcee4a5f0' # arbitrary in this test, grab from OpenStack
+ test_VM = conn.compute.get_server(test_VM_ID)
+ print(' test_VM.name=',test_VM.name)
+ print(' test_VM.status=',test_VM.status)
+ print(' suspending...')
+ conn.compute.suspend_server(test_VM_ID)
+ # wait a bit before continuing: ensure VM is actually suspended
+ wait_seconds = 10
+ print(' waiting',wait_seconds,'seconds...')
+ time.sleep(wait_seconds)
+
+ def stop_challenge_code005(self, *chall_code_args, **chall_code_kwargs):
"""Stop Challenge code number 005."""
print("This is stop_challenge_code005 from ChallengeDefinition #",self.ID, sep='')
# challenge #5, related to test case #5, i.e. test def #5
@@ -694,39 +741,49 @@ class ChallengeDefinition(AutoBaseObject):
# conn.compute.servers() to get list of servers, using VM ID, check server.id and/or server.name
# conn.compute.conn.compute.resume_server(this server id)
+ # June 2018, test of code logic, using newly released OpenStack SDK 0.14.0
+ # this resume would be the normal challenge stop, but not in the case of this test
+ conn = openstack.connect(cloud='unh-hpe-openstack-fraser', region_name='RegionOne')
+ test_VM_ID = '5d07da11-0e85-4256-9894-482dcee4a5f0' # arbitrary in this test, grab from OpenStack
+ test_VM = conn.compute.get_server(test_VM_ID)
+ print(' test_VM.name=',test_VM.name)
+ print(' test_VM.status=',test_VM.status)
+ print(' suspending...')
+ conn.compute.resume_server(test_VM_ID)
+
- def start_challenge_code006(self):
+ def start_challenge_code006(self, *chall_code_args, **chall_code_kwargs):
"""Start Challenge code number 006."""
print("This is start_challenge_code006 from ChallengeDefinition #",self.ID, sep='')
- def stop_challenge_code006(self):
+ def stop_challenge_code006(self, *chall_code_args, **chall_code_kwargs):
"""Stop Challenge code number 006."""
print("This is stop_challenge_code006 from ChallengeDefinition #",self.ID, sep='')
- def start_challenge_code007(self):
+ def start_challenge_code007(self, *chall_code_args, **chall_code_kwargs):
"""Start Challenge code number 007."""
print("This is start_challenge_code007 from ChallengeDefinition #",self.ID, sep='')
- def stop_challenge_code007(self):
+ def stop_challenge_code007(self, *chall_code_args, **chall_code_kwargs):
"""Stop Challenge code number 007."""
print("This is stop_challenge_code007 from ChallengeDefinition #",self.ID, sep='')
- def start_challenge_code008(self):
+ def start_challenge_code008(self, *chall_code_args, **chall_code_kwargs):
"""Start Challenge code number 008."""
print("This is start_challenge_code008 from ChallengeDefinition #",self.ID, sep='')
- def stop_challenge_code008(self):
+ def stop_challenge_code008(self, *chall_code_args, **chall_code_kwargs):
"""Stop Challenge code number 008."""
print("This is stop_challenge_code008 from ChallengeDefinition #",self.ID, sep='')
- def start_challenge_code009(self):
+ def start_challenge_code009(self, *chall_code_args, **chall_code_kwargs):
"""Start Challenge code number 009."""
print("This is start_challenge_code009 from ChallengeDefinition #",self.ID, sep='')
- def stop_challenge_code009(self):
+ def stop_challenge_code009(self, *chall_code_args, **chall_code_kwargs):
"""Stop Challenge code number 009."""
print("This is stop_challenge_code009 from ChallengeDefinition #",self.ID, sep='')
- def start_challenge_code010(self):
+ def start_challenge_code010(self, *chall_code_args, **chall_code_kwargs):
"""Start Challenge code number 010."""
print("This is start_challenge_code010 from ChallengeDefinition #",self.ID, sep='')
- def stop_challenge_code010(self):
+ def stop_challenge_code010(self, *chall_code_args, **chall_code_kwargs):
"""Stop Challenge code number 010."""
print("This is stop_challenge_code010 from ChallengeDefinition #",self.ID, sep='')
@@ -797,7 +854,7 @@ def init_challenge_definitions():
# in CLI:
# $ nova suspend NAME
# $ nova resume NAME
- # but better use openstack SDK
+ # but better use OpenStack SDK
chall_def_startChallengeAPICommandSent = []
chall_def_stopChallengeAPICommandSent = []
diff --git a/lib/auto/testcase/resiliency/clouds.yaml b/lib/auto/testcase/resiliency/clouds.yaml
index e6ec824..7bfd717 100644
--- a/lib/auto/testcase/resiliency/clouds.yaml
+++ b/lib/auto/testcase/resiliency/clouds.yaml
@@ -32,14 +32,16 @@ clouds:
password: opnfv_secret
region_name: RegionOne
- # Openstack instance on LaaS hpe16, from OPNFV Fraser, controller IP@ (mgt: 172.16.10.36; public: 10.16.0.107)
+ # Openstack instance on generic LaaS hpe, from OPNFV Fraser, controller IP@ (mgt: 172.16.10.36; public: 10.16.0.107)
# keystone endpoints (openstack endpoint list --service keystone)
# admin: http://172.16.10.36:35357/v3
# internal: http://172.16.10.36:5000/v3
# public: http://10.16.0.107:5000/v3
# Horizon: https://10.16.0.107:8078, but need SSH port forwarding through 10.10.100.26 to be reached from outside
# "If you are using Identity v3 you need to specify the user and the project domain name"
- hpe16openstackFraser:
+
+ # generic cloud name, for a UNH IOL hpe server, for OPNFV Fraser, OpenStack installed by Fuel/MCP
+ unh-hpe-openstack-fraser:
auth:
auth_url: http://10.16.0.107:5000/v3
project_name: admin
@@ -48,6 +50,7 @@ clouds:
user_domain_name: Default
project_domain_name: Default
region_name: RegionOne
+ identity_api_version: 3
# ubuntu@ctl01:~$ openstack project show admin
# +-------------+----------------------------------+
@@ -82,19 +85,6 @@ clouds:
# | name | heat_user_domain |
# +-------------+---------------------------------------------+
-# admin user (from Horizon on hpe16):
-# Domain ID default
-# Domain Name Default
-# User Name admin
-# Description None
-# ID df0ea50cfcff4bbfbfdfefccdb018834
-# Email root@localhost
-# Enabled Yes
-# Primary Project ID 04fcfe7aa83f4df79ae39ca748aa8637
-# Primary Project Name admin
-
-
-
# export OS_AUTH_URL=http://10.16.0.107:5000/v3
# export OS_PROJECT_ID=04fcfe7aa83f4df79ae39ca748aa8637
diff --git a/pylintrc b/pylintrc
new file mode 100644
index 0000000..c213b80
--- /dev/null
+++ b/pylintrc
@@ -0,0 +1,561 @@
+# Copyright 2018 Tieto
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+[MASTER]
+
+# A comma-separated list of package or module names from where C extensions may
+# be loaded. Extensions are loading into the active Python interpreter and may
+# run arbitrary code
+extension-pkg-whitelist=
+
+# Add files or directories to the blacklist. They should be base names, not
+# paths.
+ignore=CVS
+
+# Add files or directories matching the regex patterns to the blacklist. The
+# regex matches against base names, not paths.
+ignore-patterns=
+
+# Python code to execute, usually for sys.path manipulation such as
+# pygtk.require().
+#init-hook=
+
+# Use multiple processes to speed up Pylint.
+jobs=1
+
+# List of plugins (as comma separated values of python modules names) to load,
+# usually to register additional checkers.
+load-plugins=
+
+# Pickle collected data for later comparisons.
+persistent=yes
+
+# Specify a configuration file.
+#rcfile=
+
+# When enabled, pylint would attempt to guess common misconfiguration and emit
+# user-friendly hints instead of false-positive error messages
+suggestion-mode=yes
+
+# Allow loading of arbitrary C extensions. Extensions are imported into the
+# active Python interpreter and may run arbitrary code.
+unsafe-load-any-extension=no
+
+
+[MESSAGES CONTROL]
+
+# Only show warnings with the listed confidence levels. Leave empty to show
+# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
+confidence=
+
+# Disable the message, report, category or checker with the given id(s). You
+# can either give multiple identifiers separated by comma (,) or put this
+# option multiple times (only on the command line, not in the configuration
+# file where it should appear only once).You can also use "--disable=all" to
+# disable everything first and then reenable specific checks. For example, if
+# you want to run only the similarities checker, you can use "--disable=all
+# --enable=similarities". If you want to run only the classes checker, but have
+# no Warning level messages displayed, use"--disable=all --enable=classes
+# --disable=W"
+disable=print-statement,
+ parameter-unpacking,
+ unpacking-in-except,
+ old-raise-syntax,
+ backtick,
+ long-suffix,
+ old-ne-operator,
+ old-octal-literal,
+ import-star-module-level,
+ non-ascii-bytes-literal,
+ invalid-unicode-literal,
+ raw-checker-failed,
+ bad-inline-option,
+ locally-disabled,
+ locally-enabled,
+ file-ignored,
+ suppressed-message,
+ useless-suppression,
+ deprecated-pragma,
+ apply-builtin,
+ basestring-builtin,
+ buffer-builtin,
+ cmp-builtin,
+ coerce-builtin,
+ execfile-builtin,
+ file-builtin,
+ long-builtin,
+ raw_input-builtin,
+ reduce-builtin,
+ standarderror-builtin,
+ unicode-builtin,
+ xrange-builtin,
+ coerce-method,
+ delslice-method,
+ getslice-method,
+ setslice-method,
+ no-absolute-import,
+ old-division,
+ dict-iter-method,
+ dict-view-method,
+ next-method-called,
+ metaclass-assignment,
+ indexing-exception,
+ raising-string,
+ reload-builtin,
+ oct-method,
+ hex-method,
+ nonzero-method,
+ cmp-method,
+ input-builtin,
+ round-builtin,
+ intern-builtin,
+ unichr-builtin,
+ map-builtin-not-iterating,
+ zip-builtin-not-iterating,
+ range-builtin-not-iterating,
+ filter-builtin-not-iterating,
+ using-cmp-argument,
+ eq-without-hash,
+ div-method,
+ idiv-method,
+ rdiv-method,
+ exception-message-attribute,
+ invalid-str-codec,
+ sys-max-int,
+ bad-python3-import,
+ deprecated-string-function,
+ deprecated-str-translate-call,
+ deprecated-itertools-function,
+ deprecated-types-field,
+ next-method-defined,
+ dict-items-not-iterating,
+ dict-keys-not-iterating,
+ dict-values-not-iterating,
+ deprecated-operator-function,
+ deprecated-urllib-function,
+ xreadlines-attribute,
+ deprecated-sys-function,
+ exception-escape,
+ comprehension-escape
+
+# Enable the message, report, category or checker with the given id(s). You can
+# either give multiple identifier separated by comma (,) or put this option
+# multiple time (only on the command line, not in the configuration file where
+# it should appear only once). See also the "--disable" option for examples.
+enable=c-extension-no-member
+
+
+[REPORTS]
+
+# Python expression which should return a note less than 10 (10 is the highest
+# note). You have access to the variables errors warning, statement which
+# respectively contain the number of errors / warnings messages and the total
+# number of statements analyzed. This is used by the global evaluation report
+# (RP0004).
+evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
+
+# Template used to display messages. This is a python new-style format string
+# used to format the message information. See doc for all details
+#msg-template=
+
+# Set the output format. Available formats are text, parseable, colorized, json
+# and msvs (visual studio).You can also give a reporter class, eg
+# mypackage.mymodule.MyReporterClass.
+output-format=text
+
+# Tells whether to display a full report or only the messages
+reports=no
+
+# Activate the evaluation score.
+score=yes
+
+
+[REFACTORING]
+
+# Maximum number of nested blocks for function / method body
+max-nested-blocks=5
+
+# Complete name of functions that never returns. When checking for
+# inconsistent-return-statements if a never returning function is called then
+# it will be considered as an explicit return statement and no message will be
+# printed.
+never-returning-functions=optparse.Values,sys.exit
+
+
+[FORMAT]
+
+# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
+expected-line-ending-format=
+
+# Regexp for a line that is allowed to be longer than the limit.
+ignore-long-lines=^\s*(# )?<?https?://\S+>?$
+
+# Number of spaces of indent required inside a hanging or continued line.
+indent-after-paren=4
+
+# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
+# tab).
+indent-string=' '
+
+# Maximum number of characters on a single line.
+max-line-length=160
+
+# Maximum number of lines in a module
+max-module-lines=1000
+
+# List of optional constructs for which whitespace checking is disabled. `dict-
+# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
+# `trailing-comma` allows a space between comma and closing bracket: (a, ).
+# `empty-line` allows space-only lines.
+no-space-check=trailing-comma,
+ dict-separator
+
+# Allow the body of a class to be on the same line as the declaration if body
+# contains single statement.
+single-line-class-stmt=no
+
+# Allow the body of an if to be on the same line as the test if there is no
+# else.
+single-line-if-stmt=no
+
+
+[BASIC]
+
+# Naming style matching correct argument names
+argument-naming-style=snake_case
+
+# Regular expression matching correct argument names. Overrides argument-
+# naming-style
+#argument-rgx=
+
+# Naming style matching correct attribute names
+attr-naming-style=snake_case
+
+# Regular expression matching correct attribute names. Overrides attr-naming-
+# style
+#attr-rgx=
+
+# Bad variable names which should always be refused, separated by a comma
+bad-names=foo,
+ bar,
+ baz,
+ toto,
+ tutu,
+ tata
+
+# Naming style matching correct class attribute names
+class-attribute-naming-style=any
+
+# Regular expression matching correct class attribute names. Overrides class-
+# attribute-naming-style
+#class-attribute-rgx=
+
+# Naming style matching correct class names
+class-naming-style=PascalCase
+
+# Regular expression matching correct class names. Overrides class-naming-style
+#class-rgx=
+
+# Naming style matching correct constant names
+const-naming-style=UPPER_CASE
+
+# Regular expression matching correct constant names. Overrides const-naming-
+# style
+#const-rgx=
+
+# Minimum line length for functions/classes that require docstrings, shorter
+# ones are exempt.
+docstring-min-length=-1
+
+# Naming style matching correct function names
+function-naming-style=snake_case
+
+# Regular expression matching correct function names. Overrides function-
+# naming-style
+#function-rgx=
+
+# Good variable names which should always be accepted, separated by a comma
+good-names=i,
+ j,
+ k,
+ e,
+ ex,
+ Run,
+ _
+
+# Include a hint for the correct naming format with invalid-name
+include-naming-hint=no
+
+# Naming style matching correct inline iteration names
+inlinevar-naming-style=any
+
+# Regular expression matching correct inline iteration names. Overrides
+# inlinevar-naming-style
+#inlinevar-rgx=
+
+# Naming style matching correct method names
+method-naming-style=snake_case
+
+# Regular expression matching correct method names. Overrides method-naming-
+# style
+#method-rgx=
+
+# Naming style matching correct module names
+module-naming-style=snake_case
+
+# Regular expression matching correct module names. Overrides module-naming-
+# style
+#module-rgx=
+
+# Colon-delimited sets of names that determine each other's naming style when
+# the name regexes allow several styles.
+name-group=
+
+# Regular expression which should only match function or class names that do
+# not require a docstring.
+no-docstring-rgx=^_
+
+# List of decorators that produce properties, such as abc.abstractproperty. Add
+# to this list to register other decorators that produce valid properties.
+property-classes=abc.abstractproperty
+
+# Naming style matching correct variable names
+variable-naming-style=snake_case
+
+# Regular expression matching correct variable names. Overrides variable-
+# naming-style
+#variable-rgx=
+
+
+[MISCELLANEOUS]
+
+# List of note tags to take in consideration, separated by a comma.
+notes=FIXME,
+ XXX,
+ TODO
+
+
+[TYPECHECK]
+
+# List of decorators that produce context managers, such as
+# contextlib.contextmanager. Add to this list to register other decorators that
+# produce valid context managers.
+contextmanager-decorators=contextlib.contextmanager
+
+# List of members which are set dynamically and missed by pylint inference
+# system, and so shouldn't trigger E1101 when accessed. Python regular
+# expressions are accepted.
+generated-members=
+
+# Tells whether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+# This flag controls whether pylint should warn about no-member and similar
+# checks whenever an opaque object is returned when inferring. The inference
+# can return multiple potential results while evaluating a Python object, but
+# some branches might not be evaluated, which results in partial inference. In
+# that case, it might be useful to still emit no-member and other checks for
+# the rest of the inferred objects.
+ignore-on-opaque-inference=yes
+
+# List of class names for which member attributes should not be checked (useful
+# for classes with dynamically set attributes). This supports the use of
+# qualified names.
+ignored-classes=optparse.Values,thread._local,_thread._local
+
+# List of module names for which member attributes should not be checked
+# (useful for modules/projects where namespaces are manipulated during runtime
+# and thus existing member attributes cannot be deduced by static analysis. It
+# supports qualified module names, as well as Unix pattern matching.
+ignored-modules=
+
+# Show a hint with possible names when a member name was not found. The aspect
+# of finding the hint is based on edit distance.
+missing-member-hint=yes
+
+# The minimum edit distance a name should have in order to be considered a
+# similar match for a missing member name.
+missing-member-hint-distance=1
+
+# The total number of similar names that should be taken in consideration when
+# showing a hint for a missing member.
+missing-member-max-choices=1
+
+
+[LOGGING]
+
+# Logging modules to check that the string format arguments are in logging
+# function parameter format
+logging-modules=logging
+
+
+[SIMILARITIES]
+
+# Ignore comments when computing similarities.
+ignore-comments=yes
+
+# Ignore docstrings when computing similarities.
+ignore-docstrings=yes
+
+# Ignore imports when computing similarities.
+ignore-imports=no
+
+# Minimum lines number of a similarity.
+min-similarity-lines=4
+
+
+[SPELLING]
+
+# Limits count of emitted suggestions for spelling mistakes
+max-spelling-suggestions=4
+
+# Spelling dictionary name. Available dictionaries: none. To make it working
+# install python-enchant package.
+spelling-dict=
+
+# List of comma separated words that should not be checked.
+spelling-ignore-words=
+
+# A path to a file that contains private dictionary; one word per line.
+spelling-private-dict-file=
+
+# Tells whether to store unknown words to indicated private dictionary in
+# --spelling-private-dict-file option instead of raising a message.
+spelling-store-unknown-words=no
+
+
+[VARIABLES]
+
+# List of additional names supposed to be defined in builtins. Remember that
+# you should avoid to define new builtins when possible.
+additional-builtins=
+
+# Tells whether unused global variables should be treated as a violation.
+allow-global-unused-variables=yes
+
+# List of strings which can identify a callback function by name. A callback
+# name must start or end with one of those strings.
+callbacks=cb_,
+ _cb
+
+# A regular expression matching the name of dummy variables (i.e. expectedly
+# not used).
+dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
+
+# Argument names that match this expression will be ignored. Default to name
+# with leading underscore
+ignored-argument-names=_.*|^ignored_|^unused_
+
+# Tells whether we should check for unused import in __init__ files.
+init-import=no
+
+# List of qualified module names which can have objects that can redefine
+# builtins.
+redefining-builtins-modules=six.moves,past.builtins,future.builtins,io,builtins
+
+
+[DESIGN]
+
+# Maximum number of arguments for function / method
+max-args=5
+
+# Maximum number of attributes for a class (see R0902).
+max-attributes=7
+
+# Maximum number of boolean expressions in a if statement
+max-bool-expr=5
+
+# Maximum number of branch for function / method body
+max-branches=12
+
+# Maximum number of locals for function / method body
+max-locals=15
+
+# Maximum number of parents for a class (see R0901).
+max-parents=7
+
+# Maximum number of public methods for a class (see R0904).
+max-public-methods=20
+
+# Maximum number of return / yield for function / method body
+max-returns=6
+
+# Maximum number of statements in function / method body
+max-statements=50
+
+# Minimum number of public methods for a class (see R0903).
+min-public-methods=2
+
+
+[CLASSES]
+
+# List of method names used to declare (i.e. assign) instance attributes.
+defining-attr-methods=__init__,
+ __new__,
+ setUp
+
+# List of member names, which should be excluded from the protected access
+# warning.
+exclude-protected=_asdict,
+ _fields,
+ _replace,
+ _source,
+ _make
+
+# List of valid names for the first argument in a class method.
+valid-classmethod-first-arg=cls
+
+# List of valid names for the first argument in a metaclass class method.
+valid-metaclass-classmethod-first-arg=mcs
+
+
+[IMPORTS]
+
+# Allow wildcard imports from modules that define __all__.
+allow-wildcard-with-all=no
+
+# Analyse import fallback blocks. This can be used to support both Python 2 and
+# 3 compatible code, which means that the block might have code that exists
+# only in one or another interpreter, leading to false positives when analysed.
+analyse-fallback-blocks=no
+
+# Deprecated modules which should not be used, separated by a comma
+deprecated-modules=optparse,tkinter.tix
+
+# Create a graph of external dependencies in the given file (report RP0402 must
+# not be disabled)
+ext-import-graph=
+
+# Create a graph of every (i.e. internal and external) dependencies in the
+# given file (report RP0402 must not be disabled)
+import-graph=
+
+# Create a graph of internal dependencies in the given file (report RP0402 must
+# not be disabled)
+int-import-graph=
+
+# Force import order to recognize a module as part of the standard
+# compatibility libraries.
+known-standard-library=
+
+# Force import order to recognize a module as part of a third party library.
+known-third-party=enchant
+
+
+[EXCEPTIONS]
+
+# Exceptions that will emit a warning when being caught. Defaults to
+# "Exception"
+overgeneral-exceptions=Exception
diff --git a/requirements.txt b/requirements.txt
index 8ef8db6..1035c76 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -6,3 +6,5 @@ python-glanceclient>=2.8.0
python-neutronclient>=6.3.0
python-novaclient>=9.0.0
python-heatclient>=1.6.1
+pylint==1.9.2
+yamllint==1.11.1
diff --git a/setup/VIMs/OpenStack/auto_script_config_openstack_for_onap.py b/setup/VIMs/OpenStack/auto_script_config_openstack_for_onap.py
new file mode 100644
index 0000000..e4b94f5
--- /dev/null
+++ b/setup/VIMs/OpenStack/auto_script_config_openstack_for_onap.py
@@ -0,0 +1,923 @@
+#!/usr/bin/env python3
+
+# ===============LICENSE_START=======================================================
+# Apache-2.0
+# ===================================================================================
+# Copyright (C) 2018 Wipro. All rights reserved.
+# ===================================================================================
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ===============LICENSE_END=========================================================
+
+
+# OPNFV Auto project
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+#docstring
+"""This script configures an OpenStack instance to make it ready to interface with an ONAP instance, for example to host VM-based VNFs deployed by ONAP. It can also remove the created objects, when used in a clean-up procedure.
+Use -h option to see usage (-del to delete objects, -deb to print debug information).
+Requirements: python 3, OpenStack SDK (0.14 or greater), clouds.yaml file, .img files are downloaded
+Auto project: https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+"""
+
+######################################################################
+# This script configures an OpenStack instance (e.g. from an OPNFV installer like FUEL/MCP, Compass4nfv, ...)
+# to make it ready to interface with an ONAP instance, for example to host VM-based VNFs deployed by ONAP.
+# After running this script, the created OpenStack object names/IDs can be used for example to populate
+# YAML&ENV files used by ONAP (installation of ONAP itself, VNF descriptor files, etc.).
+
+
+######################################################################
+# Overview of the steps:
+#
+# 1) create an ONAP project/tenant (a tenant is a project; project is a more generic term than tenant)
+# (optional, probably not needed: create a new group, which can be associated to a project, and contains users)
+# 2) create an ONAP user within the ONAP project, so as not to use the "admin" user for ONAP
+# (associate user to group if applicable; credentials: name/pwd or name/APIkey, or token)
+# 3) create an ONAP security group, to allow ICMP traffic (for pings) and TCP port 22 (for SSH),
+# rather than changing default security group(s)
+# (optional, probably not needed: create a new region; default region RegionOne is OK)
+# 4) create a public network for ONAP VNFs, with subnet and CIDR block
+# (so components have access to the Internet, via router and gateway, on unnamed ports, dynamic IP@ allocation)
+# 5) create a private and an OAM network for ONAP VNFs or other ONAP components,
+# with their respective subnet and CIDR block
+# (ONAP VNFs will be deployed in this private and/or OAM network(s), usually with named ports
+# and static IP@ as per VNF configuration file)
+# 6) create an OpenStack router, with interfaces to the public, private and OAM networks,
+# and a reference to an external network (gateway) provided by the OpenStack instance installation
+# 7) create VM flavors as needed: m1.medium, etc.
+# 8) download VM images, as needed for ONAP-deployed VNFs: e.g. Ubuntu 14.04, 16.04, ...
+
+
+######################################################################
+# Assumptions:
+# - python3 is installed
+# - OpenStack SDK is installed for python3
+# - there is a clouds.yaml file (describing the OpenStack instance, especially the Auth URL and admin credentials)
+# - .img files (Ubuntu Trusty Tahr, Xenial Xerus, Cirros, ... are downloaded, and stored in IMAGES_DIR
+# - the script connects to OpenStack as a user with admin rights
+
+# typical commands to install OpenStack SDK Python client:
+# apt install python3-pip
+# pip3 install --upgrade pip
+# hash -r
+# pip3 list
+# pip3 install openstacksdk
+# pip3 install --upgrade openstacksdk
+# pip3 show openstacksdk
+# pip3 check
+
+
+######################################################################
+# useful URLs
+# Identity API: https://docs.openstack.org/openstacksdk/latest/user/proxies/identity_v3.html
+# (User, Project, Group, region, Role, ...)
+# Network API: https://docs.openstack.org/openstacksdk/latest/user/proxies/network.html
+# (Network, Subnet, Port, Router, Floating IP, AZ, Flavor, ...)
+
+
+######################################################################
+# script parameters
+ONAP_USER_NAME = 'ONAP_user'
+ONAP_USER_PASSWORD = 'auto_topsecret'
+ONAP_USER_DESC = 'OpenStack User created for ONAP'
+
+ONAP_TENANT_NAME = 'ONAP_tenant'
+# note: "project" is a more generic concept than "tenant"; a tenant is type of project; quotas are per project;
+ONAP_TENANT_DESC = 'OpenStack Project/Tenant created for ONAP'
+
+ONAP_SECU_GRP_NAME = 'ONAP_security_group'
+ONAP_SECU_GRP_DESC = 'Security Group created for ONAP'
+
+ONAP_PUBLIC_NET_NAME = 'ONAP_public_net'
+ONAP_PUBLIC_SUBNET_NAME = 'ONAP_public_subnet'
+ONAP_PUBLIC_SUBNET_CIDR = '192.168.99.0/24'
+# note: some arbitrary CIDR, but typically in a private (IANA-reserved) address range
+ONAP_PUBLIC_NET_DESC = 'Public network created for ONAP, for unnamed ports, dynamic IP@, access to the Internet (e.g., Nexus repo) via Gateway'
+
+ONAP_PRIVATE_NET_NAME = 'ONAP_private_net'
+ONAP_PRIVATE_SUBNET_NAME = 'ONAP_private_subnet'
+ONAP_PRIVATE_SUBNET_CIDR = '10.0.0.0/16'
+# note: CIDR should match ONAP installation; Private and OAM may be the same network
+ONAP_PRIVATE_NET_DESC = 'Private network created for ONAP, for named ports, static IP@, inter-component communication'
+
+ONAP_OAM_NET_NAME = 'ONAP_OAM_net'
+ONAP_OAM_SUBNET_NAME = 'ONAP_OAM_subnet'
+ONAP_OAM_SUBNET_CIDR = '10.99.0.0/16'
+# note: CIDR should match ONAP installation; Private and OAM may be the same network
+ONAP_OAM_NET_DESC = 'OAM network created for ONAP, for named ports, static IP@, inter-component communication'
+
+ONAP_ROUTER_NAME = 'ONAP_router'
+ONAP_ROUTER_DESC = 'Router created for ONAP'
+
+# OpenStack instance external network (gateway) name to be used as router's gateway
+EXTERNAL_NETWORK_NAME = 'floating_net'
+
+# keypair that can be used to SSH into created servers (VNF VMs)
+ONAP_KEYPAIR_NAME = 'ONAP_keypair'
+
+# OpenStack cloud name and region name, which should be the same as in the clouds.yaml file used by this script
+OPENSTACK_CLOUD_NAME = 'unh-hpe-openstack-fraser'
+OPENSTACK_REGION_NAME = 'RegionOne'
+# note: OpenStack domain is: Default
+
+
+######################################################################
+# constants which could be parameters
+DNS_SERVER_IP = '8.8.8.8'
+# IP addresses of free public DNS service from Google:
+# - IPv4: 8.8.8.8 and 8.8.4.4
+# - IPv6: 2001:4860:4860::8888 and 2001:4860:4860::8844
+
+######################################################################
+# global variables
+DEBUG_VAR = False
+
+######################################################################
+# import statements
+import openstack
+import argparse
+import sys, traceback
+
+######################################################################
+def print_debug(*args):
+ if DEBUG_VAR:
+ for arg in args:
+ print ('***',arg)
+
+######################################################################
+def delete_all_ONAP():
+ """Delete all ONAP-specific OpenStack objects (normally not needed, but may be useful during tests, and for clean-up)."""
+ print('\nOPNFV Auto, script to delete ONAP objects in an OpenStack instance')
+
+ try:
+ # connect to OpenStack instance using Connection object from OpenStack SDK
+ print('Opening connection...')
+ conn = openstack.connect(
+ identity_api_version = 3, # must indicate Identity version (until fixed); can also be in clouds.yaml
+ cloud = OPENSTACK_CLOUD_NAME,
+ region_name = OPENSTACK_REGION_NAME)
+
+
+ # delete router; must delete router before networks (and must delete VMs before routers)
+ print('Deleting ONAP router...')
+ onap_router = conn.network.find_router(ONAP_ROUTER_NAME)
+ print_debug('onap_router:',onap_router)
+ if onap_router != None:
+
+ # delete router interfaces before deleting router
+ router_network = conn.network.find_network(ONAP_PUBLIC_NET_NAME)
+ if router_network != None:
+ if router_network.subnet_ids != None:
+ print_debug('router_network.subnet_ids:',router_network.subnet_ids)
+ for subnet_id in router_network.subnet_ids:
+ print(' Deleting interface to',ONAP_PUBLIC_NET_NAME,'...')
+ conn.network.remove_interface_from_router(onap_router, subnet_id)
+
+ router_network = conn.network.find_network(ONAP_PRIVATE_NET_NAME)
+ if router_network != None:
+ if router_network.subnet_ids != None:
+ print_debug('router_network.subnet_ids:',router_network.subnet_ids)
+ for subnet_id in router_network.subnet_ids:
+ print(' Deleting interface to',ONAP_PRIVATE_NET_NAME,'...')
+ conn.network.remove_interface_from_router(onap_router, subnet_id)
+
+ router_network = conn.network.find_network(ONAP_OAM_NET_NAME)
+ if router_network != None:
+ if router_network.subnet_ids != None:
+ print_debug('router_network.subnet_ids:',router_network.subnet_ids)
+ for subnet_id in router_network.subnet_ids:
+ print(' Deleting interface to',ONAP_OAM_NET_NAME,'...')
+ conn.network.remove_interface_from_router(onap_router, subnet_id)
+
+ # and finally delete ONAP router
+ conn.network.delete_router(onap_router.id)
+
+ else:
+ print('No ONAP router found...')
+
+ # TODO@@@ verify if there are ports on networks (e.g., from VMs); if yes, can't delete network
+
+ # delete private network (which should also delete associated subnet if any)
+ print('Deleting ONAP private network...')
+ private_network = conn.network.find_network(ONAP_PRIVATE_NET_NAME)
+ print_debug('private_network:',private_network)
+ if private_network != None:
+ conn.network.delete_network(private_network.id)
+ else:
+ print('No ONAP private network found...')
+
+ # delete OAM network (which should also delete associated subnet if any)
+ print('Deleting ONAP OAM network...')
+ oam_network = conn.network.find_network(ONAP_OAM_NET_NAME)
+ print_debug('oam_network:',oam_network)
+ if oam_network != None:
+ conn.network.delete_network(oam_network.id)
+ else:
+ print('No ONAP OAM network found...')
+
+ # delete public network (which should also delete associated subnet if any)
+ print('Deleting ONAP public network...')
+ public_network = conn.network.find_network(ONAP_PUBLIC_NET_NAME)
+ print_debug('public_network:',public_network)
+ if public_network != None:
+ conn.network.delete_network(public_network.id)
+ else:
+ print('No ONAP public network found...')
+
+ # TODO@@@ verify if security group is in use (e.g., by a VM), otherwise can't delete it
+
+ # delete security group
+ print('Deleting ONAP security group...')
+ onap_security_group = conn.network.find_security_group(ONAP_SECU_GRP_NAME)
+ print_debug('onap_security_group:',onap_security_group)
+ if onap_security_group != None:
+ conn.network.delete_security_group(onap_security_group.id)
+ else:
+ print('No ONAP security group found...')
+
+ # delete user
+ print('Deleting ONAP user...')
+ onap_user = conn.identity.find_user(ONAP_USER_NAME)
+ print_debug('onap_user:',onap_user)
+ if onap_user != None:
+ conn.identity.delete_user(onap_user.id)
+ else:
+ print('No ONAP user found...')
+
+ # delete project/tenant
+ print('Deleting ONAP project...')
+ onap_project = conn.identity.find_project(ONAP_TENANT_NAME)
+ print_debug('onap_project:',onap_project)
+ if onap_project != None:
+ conn.identity.delete_project(onap_project.id)
+ else:
+ print('No ONAP project found...')
+
+ # delete keypair
+ print('Deleting ONAP keypair...')
+ onap_keypair = conn.compute.find_keypair(ONAP_KEYPAIR_NAME)
+ print_debug('onap_keypair:',onap_keypair)
+ if onap_keypair != None:
+ conn.compute.delete_keypair(onap_keypair.id)
+ else:
+ print('No ONAP keypair found...')
+
+ # no need to delete images and flavors
+
+
+ except Exception as e:
+ print('*** Exception:',type(e), e)
+ exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()
+ print('*** traceback.print_tb():')
+ traceback.print_tb(exceptionTraceback)
+ print('*** traceback.print_exception():')
+ traceback.print_exception(exceptionType, exceptionValue, exceptionTraceback)
+ print('[Script terminated]\n')
+
+ print('OPNFV Auto, end of deletion script\n')
+
+
+######################################################################
+def configure_all_ONAP():
+ """Configure all ONAP-specific OpenStack objects."""
+ print('\nOPNFV Auto, script to configure an OpenStack instance for ONAP')
+
+ try:
+ # connect to OpenStack instance using Connection object from OpenStack SDK
+ print('Opening connection...')
+ conn = openstack.connect(
+ identity_api_version = 3, # must indicate Identity version (until fixed); can also be in clouds.yaml
+ cloud = OPENSTACK_CLOUD_NAME,
+ region_name = OPENSTACK_REGION_NAME)
+
+
+ print('Creating ONAP project/tenant...')
+ onap_project = conn.identity.find_project(ONAP_TENANT_NAME)
+ if onap_project != None:
+ print('ONAP project/tenant already exists')
+ else:
+ onap_project = conn.identity.create_project(
+ name = ONAP_TENANT_NAME,
+ description = ONAP_TENANT_DESC,
+ is_enabled = True)
+ # domain: leave default
+ # project quotas (max #vCPUs, #instances, etc.): as conn.network.<*quota*>, using project id for quota id
+ # https://docs.openstack.org/openstacksdk/latest/user/proxies/network.html#quota-operations
+ # https://docs.openstack.org/openstacksdk/latest/user/resources/network/v2/quota.html#openstack.network.v2.quota.Quota
+ # conn.network.update_quota(project_id = onap_project.id)
+ # SDK for quotas supports floating_ips, networks, ports, etc. but not vCPUs or instances
+ print_debug('onap_project:',onap_project)
+
+
+ print('Creating ONAP user...')
+ onap_user = conn.identity.find_user(ONAP_USER_NAME)
+ if onap_user != None:
+ print('ONAP user already exists')
+ else:
+ onap_user = conn.identity.create_user(
+ name = ONAP_USER_NAME,
+ description = ONAP_USER_DESC,
+ default_project_id = onap_project.id,
+ password = ONAP_USER_PASSWORD,
+ is_enabled = True)
+ # domain: leave default
+ # default_project_id: primary project
+ print_debug('onap_user:',onap_user)
+
+ # TODO@@@ assign Member role to ONAP user in ONAP project
+ # membership_role = conn.identity.find_role('Member')
+ # onap_project.assign_role_to_user(conn, onap_user, membership_role) # no project membership method yet in connection proxy
+
+ # TODO@@@ maybe logout and log back in as ONAP user
+
+ # make sure security group allows ICMP (for ping) and SSH (TCP port 22) traffic; also IPv4/v6 traffic ingress and egress
+ # create new onap_security_group (or maybe just "default" security group ? tests returned multiple "default" security groups)
+ # security group examples: check http://git.openstack.org/cgit/openstack/openstacksdk/tree/examples/network/security_group_rules.py
+ # if rule already exists, OpenStack returns an error, so just try (no harm); try each separately
+ # (SecurityGroup is a Resource)
+ print('Creating ONAP security group...')
+ onap_security_group = conn.network.find_security_group(ONAP_SECU_GRP_NAME)
+ if onap_security_group != None:
+ print('ONAP security group already exists')
+ else:
+ onap_security_group = conn.network.create_security_group(
+ #project_id = onap_project.id,
+ description = ONAP_SECU_GRP_DESC,
+ name = ONAP_SECU_GRP_NAME)
+ print_debug('onap_security_group:',onap_security_group)
+
+ try:
+ description_text = 'enable ICMP ingress IPv4'
+ print(' Creating rule:',description_text,'...')
+ conn.network.create_security_group_rule(
+ security_group_id = onap_security_group.id,
+ description = description_text,
+ protocol = 'ICMP',
+ direction = 'ingress',
+ ethertype = 'IPv4',
+ remote_ip_prefix = '0.0.0.0/0',
+ port_range_min = None,
+ port_range_max = None)
+ except Exception as e:
+ print(' rule:', description_text, ' may already exist')
+ print_debug(description_text, ' Exception:', type(e), e)
+
+ try:
+ description_text = 'enable ICMP egress IPv4'
+ print(' Creating rule:',description_text,'...')
+ conn.network.create_security_group_rule(
+ security_group_id = onap_security_group.id,
+ description = description_text,
+ protocol = 'ICMP',
+ direction = 'egress',
+ ethertype = 'IPv4',
+ remote_ip_prefix = '0.0.0.0/0',
+ port_range_min = None,
+ port_range_max = None)
+ except Exception as e:
+ print(' rule:', description_text, ' may already exist')
+ print_debug(description_text, ' Exception:', type(e), e)
+
+ try:
+ description_text = 'enable SSH (TCP port 22) ingress IPv4'
+ print(' Creating rule:',description_text,'...')
+ conn.network.create_security_group_rule(
+ security_group_id = onap_security_group.id,
+ description = description_text,
+ protocol = 'TCP',
+ direction = 'ingress',
+ ethertype = 'IPv4',
+ remote_ip_prefix = '0.0.0.0/0',
+ port_range_min = '22',
+ port_range_max = '22')
+ except Exception as e:
+ print(' rule:', description_text, ' may already exist')
+ print_debug(description_text, ' Exception:', type(e), e)
+
+ try:
+ description_text = 'enable SSH (TCP port 22) egress IPv4'
+ print(' Creating rule:',description_text,'...')
+ conn.network.create_security_group_rule(
+ security_group_id = onap_security_group.id,
+ description = description_text,
+ protocol = 'TCP',
+ direction = 'egress',
+ ethertype = 'IPv4',
+ remote_ip_prefix = '0.0.0.0/0',
+ port_range_min = '22',
+ port_range_max = '22')
+ except Exception as e:
+ print(' rule:', description_text, ' may already exist')
+ print_debug(description_text, ' Exception:', type(e), e)
+
+ try:
+ description_text = 'enable IP traffic ingress IPv4'
+ print(' Creating rule:',description_text,'...')
+ conn.network.create_security_group_rule(
+ security_group_id = onap_security_group.id,
+ description = description_text,
+ protocol = None,
+ direction = 'ingress',
+ ethertype = 'IPv4',
+ remote_ip_prefix = '0.0.0.0/0',
+ port_range_min = None,
+ port_range_max = None)
+ except Exception as e:
+ print(' rule:', description_text, ' may already exist')
+ print_debug(description_text, ' Exception:', type(e), e)
+
+ try:
+ description_text = 'enable IP traffic ingress IPv6'
+ print(' Creating rule:',description_text,'...')
+ conn.network.create_security_group_rule(
+ security_group_id = onap_security_group.id,
+ description = description_text,
+ protocol = None,
+ direction = 'ingress',
+ ethertype = 'IPv6',
+ remote_ip_prefix = '::/0',
+ port_range_min = None,
+ port_range_max = None)
+ except Exception as e:
+ print(' rule:', description_text, ' may already exist')
+ print_debug(description_text, ' Exception:', type(e), e)
+
+ # IPv4 IP egress rule should already exist by default
+ try:
+ description_text = 'enable IP traffic egress IPv4'
+ print(' Creating rule:',description_text,'...')
+ conn.network.create_security_group_rule(
+ security_group_id = onap_security_group.id,
+ description = description_text,
+ protocol = None,
+ direction = 'egress',
+ ethertype = 'IPv4',
+ remote_ip_prefix = '0.0.0.0/0',
+ port_range_min = None,
+ port_range_max = None)
+ except Exception as e:
+ print(' rule:', description_text, ' may already exist')
+ print_debug(description_text, ' Exception:', type(e), e)
+
+ # IPv6 IP egress rule should already exist by default
+ try:
+ description_text = 'enable IP traffic egress IPv6'
+ print(' Creating rule:',description_text,'...')
+ conn.network.create_security_group_rule(
+ security_group_id = onap_security_group.id,
+ description = description_text,
+ protocol = None,
+ direction = 'egress',
+ ethertype = 'IPv6',
+ remote_ip_prefix = '::/0',
+ port_range_min = None,
+ port_range_max = None)
+ except Exception as e:
+ print(' rule:', description_text, ' may already exist')
+ print_debug(description_text, ' Exception:', type(e), e)
+
+
+ # public network
+ print('Creating ONAP public network...')
+ public_network = conn.network.find_network(ONAP_PUBLIC_NET_NAME)
+ public_subnet = None
+ if public_network != None:
+ print('ONAP public network already exists')
+ else:
+ public_network = conn.network.create_network(
+ name = ONAP_PUBLIC_NET_NAME,
+ description = ONAP_PUBLIC_NET_DESC,
+ #project_id = onap_project.id,
+ is_admin_state_up = True,
+ is_shared = True)
+ # subnet_ids = []: not needed, subnet refers to network_id
+ print_debug('public_network: before subnet',public_network)
+
+ print(' Creating subnetwork for ONAP public network...')
+ public_subnet = conn.network.create_subnet(
+ name = ONAP_PUBLIC_SUBNET_NAME,
+ #project_id = onap_project.id,
+ network_id = public_network.id,
+ cidr = ONAP_PUBLIC_SUBNET_CIDR,
+ ip_version = 4,
+ is_dhcp_enabled = True,
+ dns_nameservers = [DNS_SERVER_IP]) # list of DNS IP@
+ print_debug('public_subnet:',public_subnet)
+ print_debug('public_network: after subnet',public_network)
+
+
+ # private network
+ print('Creating ONAP private network...')
+ private_network = conn.network.find_network(ONAP_PRIVATE_NET_NAME)
+ private_subnet = None
+ if private_network != None:
+ print('ONAP private network already exists')
+ else:
+ private_network = conn.network.create_network(
+ name = ONAP_PRIVATE_NET_NAME,
+ description = ONAP_PRIVATE_NET_DESC,
+ #project_id = onap_project.id,
+ is_admin_state_up = True,
+ is_shared = True)
+ print_debug('private_network: before subnet',private_network)
+
+ print(' Creating subnetwork for ONAP private network...')
+ private_subnet = conn.network.create_subnet(
+ name = ONAP_PRIVATE_SUBNET_NAME,
+ #project_id = onap_project.id,
+ network_id = private_network.id,
+ cidr = ONAP_PRIVATE_SUBNET_CIDR,
+ ip_version = 4,
+ is_dhcp_enabled = True,
+ dns_nameservers = [DNS_SERVER_IP]) # list of DNS IP@; maybe not needed for private network
+ print_debug('private_subnet:',private_subnet)
+ print_debug('private_network: after subnet',private_network)
+
+
+ # OAM network
+ print('Creating ONAP OAM network...')
+ oam_network = conn.network.find_network(ONAP_OAM_NET_NAME)
+ oam_subnet = None
+ if oam_network != None:
+ print('ONAP OAM network already exists')
+ else:
+ oam_network = conn.network.create_network(
+ name = ONAP_OAM_NET_NAME,
+ description = ONAP_OAM_NET_DESC,
+ #project_id = onap_project.id,
+ is_admin_state_up = True,
+ is_shared = True)
+ print_debug('oam_network: before subnet',oam_network)
+
+ print(' Creating subnetwork for ONAP OAM network...')
+ oam_subnet = conn.network.create_subnet(
+ name = ONAP_OAM_SUBNET_NAME,
+ #project_id = onap_project.id,
+ network_id = oam_network.id,
+ cidr = ONAP_OAM_SUBNET_CIDR,
+ ip_version = 4,
+ is_dhcp_enabled = True,
+ dns_nameservers = [DNS_SERVER_IP]) # list of DNS IP@; maybe not needed for OAM network
+ print_debug('oam_subnet:',oam_subnet)
+ print_debug('oam_network: after subnet',oam_network)
+
+
+ # router
+ print('Creating ONAP router...')
+ onap_router = conn.network.find_router(ONAP_ROUTER_NAME)
+ if onap_router != None:
+ print('ONAP router already exists')
+ else:
+
+ # build dictionary for external network (gateway)
+ external_network = conn.network.find_network(EXTERNAL_NETWORK_NAME)
+ print_debug('external_network:',external_network)
+ external_subnet_ID_list = external_network.subnet_ids
+ print_debug('external_subnet_ID_list:',external_subnet_ID_list)
+ # build external_fixed_ips: list of dictionaries, each with 'subnet_id' key (and may have 'ip_address' key as well)
+ onap_gateway_external_subnets = []
+ for ext_subn_id in external_subnet_ID_list: # there should be only one subnet ID in the list, but go through each item, just in case
+ onap_gateway_external_subnets.append({'subnet_id':ext_subn_id})
+ print_debug('onap_gateway_external_subnets:',onap_gateway_external_subnets)
+ network_dict_body = {
+ 'network_id': external_network.id,
+ 'enable_snat': True, # True should be the default, so there should be no need to set it
+ 'external_fixed_ips': onap_gateway_external_subnets
+ }
+ print_debug('network_dict_body:',network_dict_body)
+
+ onap_router = conn.network.create_router(
+ name = ONAP_ROUTER_NAME,
+ description = ONAP_ROUTER_DESC,
+ #project_id = onap_project.id,
+ external_gateway_info = network_dict_body, # linking GW to router creation time (normally, could also use add_gateway_to_router)
+ is_admin_state_up = True)
+ print_debug('onap_router: after creation',onap_router)
+
+ # add interfaces to ONAP networks: Public, Private, and OAM
+ # syntax: add_interface_to_router(router, subnet_id=None, port_id=None)
+ print('Adding interface to ONAP router for ONAP public network...')
+ conn.network.add_interface_to_router(onap_router, subnet_id = public_subnet.id)
+ print('Adding interface to ONAP router for ONAP private network...')
+ conn.network.add_interface_to_router(onap_router, subnet_id = private_subnet.id)
+ print('Adding interface to ONAP router for ONAP OAM network...')
+ conn.network.add_interface_to_router(onap_router, subnet_id = oam_subnet.id)
+ print_debug('onap_router: after adding interfaces',onap_router)
+
+
+ # also create 5 flavors, from tiny to xlarge (hard-coded, no need for parameters)
+ # (Flavor is a Resource)
+ print('Creating flavors...')
+ print('Creating m1.tiny Flavor...')
+ tiny_flavor = conn.compute.find_flavor("m1.tiny")
+ if tiny_flavor != None:
+ print('m1.tiny Flavor already exists')
+ else:
+ tiny_flavor = conn.compute.create_flavor(
+ name = 'm1.tiny',
+ vcpus = 1,
+ disk = 1,
+ ram = 512,
+ ephemeral = 0,
+ #swap = 0,
+ #rxtx_factor = 1.0,
+ is_public = True)
+ print_debug('tiny_flavor: ',tiny_flavor)
+
+ print('Creating m1.small Flavor...')
+ small_flavor = conn.compute.find_flavor("m1.small")
+ if small_flavor != None:
+ print('m1.small Flavor already exists')
+ else:
+ small_flavor = conn.compute.create_flavor(
+ name = 'm1.small',
+ vcpus = 1,
+ disk = 20,
+ ram = 2048,
+ ephemeral = 0,
+ #swap = 0,
+ #rxtx_factor = 1.0,
+ is_public = True)
+ print_debug('small_flavor: ',small_flavor)
+
+ print('Creating m1.medium Flavor...')
+ medium_flavor = conn.compute.find_flavor("m1.medium")
+ if medium_flavor != None:
+ print('m1.medium Flavor already exists')
+ else:
+ medium_flavor = conn.compute.create_flavor(
+ name = 'm1.medium',
+ vcpus = 2,
+ disk = 40,
+ ram = 4096,
+ ephemeral = 0,
+ #swap = 0,
+ #rxtx_factor = 1.0,
+ is_public = True)
+ print_debug('medium_flavor: ',medium_flavor)
+
+ print('Creating m1.large Flavor...')
+ large_flavor = conn.compute.find_flavor("m1.large")
+ if large_flavor != None:
+ print('m1.large Flavor already exists')
+ else:
+ large_flavor = conn.compute.create_flavor(
+ name = 'm1.large',
+ vcpus = 4,
+ disk = 80,
+ ram = 8192,
+ ephemeral = 0,
+ #swap = 0,
+ #rxtx_factor = 1.0,
+ is_public = True)
+ print_debug('large_flavor: ',large_flavor)
+
+ print('Creating m1.xlarge Flavor...')
+ xlarge_flavor = conn.compute.find_flavor("m1.xlarge")
+ if xlarge_flavor != None:
+ print('m1.xlarge Flavor already exists')
+ else:
+ xlarge_flavor = conn.compute.create_flavor(
+ name = 'm1.xlarge',
+ vcpus = 8,
+ disk = 160,
+ ram = 16384,
+ ephemeral = 0,
+ #swap = 0,
+ #rxtx_factor = 1.0,
+ is_public = True)
+ print_debug('xlarge_flavor: ',xlarge_flavor)
+
+
+ # create images: Ubuntu 16.04, 14.04, CirrOS, ...
+ # store them in images/ directory
+ # 64-bit QCOW2 image for cirros-0.4.0-x86_64-disk.img
+ # description: CirrOS minimal Linux distribution
+ # http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img
+ # user/password: cirros/gocubsgo
+
+ # 64-bit QCOW2 image for Ubuntu 16.04 is xenial-server-cloudimg-amd64-disk1.img
+ # description: Ubuntu Server 16.04 LTS (Xenial Xerus)
+ # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ # user: ubuntu
+
+ # 64-bit QCOW2 image for Ubuntu 14.04 is trusty-server-cloudimg-amd64-disk1.img
+ # description: Ubuntu Server 14.04 LTS (Trusty Tahr)
+ # http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img
+ # user: ubuntu
+
+ # do not use compute proxy for images; there is an image proxy (v1, and v2);
+ # use shade layer, directly with Connection object: Connection.create_image()
+ # conn.get_image() returns a Python Munch object (subclass of Dictionary)
+ # However, URL download not supported yet; download image separately, place it in the directory
+ # https://docs.openstack.org/openstacksdk/latest/user/connection.html#openstack.connection.Connection.create_image
+ # image proxy: conn.image.upload_image()
+ # Image class:
+ # https://docs.openstack.org/openstacksdk/latest/user/resources/image/v2/image.html#openstack.image.v2.image.Image
+ # URL should be supported by image proxy
+
+ # TODO@@@ try image v2 proxy, if it supports URLs;
+ # maybe load only images for current CPU (i.e. only x86 images for x86, only Arm images for Arm)
+ # TODO@@@ list image names/URLs in dictionary, and load then in a loop
+
+ # Pattern: prepare an attribute dictionary, then call conn.image.upload_image()
+ # image_attributes_dict = {}
+ # image_attributes_dict['name']='cirros-0.4.0-aarch64-disk.img'
+ # image_attributes_dict['url']='http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-aarch64-disk.img'
+ # conn.image.upload_image(disk_format='qcow2',**image_attributes_dict)
+
+ # With a dictionary of names/URLs :
+ # image_ref_dict = {}
+ # image_ref_dict['cirros-0.4.0-x86_64-disk.img']='http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img'
+ # image_ref_dict['cirros-0.4.0-arm-disk.img']='http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-arm-disk.img'
+ # image_ref_dict['cirros-0.4.0-aarch64-disk.img']='http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-aarch64-disk.img'
+ # etc.
+ # for image_name in image_ref_dict:
+ # image_attributes_dict['name'] = image_name
+ # image_attributes_dict['url'] = image_ref_dict[image_name]
+ # conn.image.upload_image(disk_format='qcow2',**image_attributes_dict)
+
+
+ # Create and populate image dictionary
+ image_ref_dict = {}
+
+ # Ubuntu 16.04 LTS (Xenial Xerus) images
+ image_ref_dict['xenial-server-cloudimg-amd64-disk1.img']='https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img'
+ image_ref_dict['xenial-server-cloudimg-arm64-disk1.img']='https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-arm64-disk1.img'
+
+ # Ubuntu 14.04.5 LTS (Trusty Tahr) images
+ image_ref_dict['trusty-server-cloudimg-amd64-disk1.img']='http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img'
+ image_ref_dict['trusty-server-cloudimg-arm64-disk1.img']='http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-arm64-disk1.img'
+
+ # CirrOS images
+ image_ref_dict['cirros-0.4.0-x86_64-disk.img']='http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img'
+ image_ref_dict['cirros-0.4.0-arm-disk.img']='http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-arm-disk.img'
+ image_ref_dict['cirros-0.4.0-aarch64-disk.img']='http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-aarch64-disk.img'
+
+
+ # if URL-based upload using image proxy works, it will replace the section below which assumes image files
+ # are in a subdirectory, and uses Connection.create_image() instead of Connection.image.upload_image()
+ IMAGES_DIR = 'images/'
+
+ IMAGE_NAME = 'CirrOS_0.4.0_minimal_Linux_distribution x86'
+ print('Creating image:',IMAGE_NAME,'...')
+ if conn.get_image(IMAGE_NAME) != None:
+ print(IMAGE_NAME,'image already exists')
+ else:
+ conn.create_image(IMAGE_NAME, filename=IMAGES_DIR+'cirros-0.4.0-x86_64-disk.img')
+
+ IMAGE_NAME = 'CirrOS_0.4.0_minimal_Linux_distribution ARM'
+ print('Creating image:',IMAGE_NAME,'...')
+ if conn.get_image(IMAGE_NAME) != None:
+ print(IMAGE_NAME,'image already exists')
+ else:
+ conn.create_image(IMAGE_NAME, filename=IMAGES_DIR+'cirros-0.4.0-arm-disk.img')
+
+ IMAGE_NAME = 'CirrOS_0.4.0_minimal_Linux_distribution AARCH64'
+ print('Creating image:',IMAGE_NAME,'...')
+ if conn.get_image(IMAGE_NAME) != None:
+ print(IMAGE_NAME,'image already exists')
+ else:
+ conn.create_image(IMAGE_NAME, filename=IMAGES_DIR+'cirros-0.4.0-aarch64-disk.img')
+
+ IMAGE_NAME = 'Ubuntu_Server_16.04_LTS_Xenial_Xerus x86'
+ print('Creating image:',IMAGE_NAME,'...')
+ if conn.get_image(IMAGE_NAME) != None:
+ print(IMAGE_NAME,'image already exists')
+ else:
+ conn.create_image(IMAGE_NAME, filename=IMAGES_DIR+'xenial-server-cloudimg-amd64-disk1.img')
+
+ IMAGE_NAME = 'Ubuntu_Server_16.04_LTS_Xenial_Xerus ARM64'
+ print('Creating image:',IMAGE_NAME,'...')
+ if conn.get_image(IMAGE_NAME) != None:
+ print(IMAGE_NAME,'image already exists')
+ else:
+ conn.create_image(IMAGE_NAME, filename=IMAGES_DIR+'xenial-server-cloudimg-arm64-disk1.img')
+
+ IMAGE_NAME = 'Ubuntu_Server_14.04_LTS_Trusty_Tahr x86'
+ print('Creating image:',IMAGE_NAME,'...')
+ if conn.get_image(IMAGE_NAME) != None:
+ print(IMAGE_NAME,'image already exists')
+ else:
+ conn.create_image(IMAGE_NAME, filename=IMAGES_DIR+'trusty-server-cloudimg-amd64-disk1.img')
+ # End section with Connection.create_image()
+
+ IMAGE_NAME = 'Ubuntu_Server_14.04_LTS_Trusty_Tahr ARM64'
+ print('Creating image:',IMAGE_NAME,'...')
+ if conn.get_image(IMAGE_NAME) != None:
+ print(IMAGE_NAME,'image already exists')
+ else:
+ conn.create_image(IMAGE_NAME, filename=IMAGES_DIR+'trusty-server-cloudimg-arm64-disk1.img')
+ # End section with Connection.create_image()
+
+
+ # create a keypair, if needed e.g. for VNF VMs; maybe to SSH for testing
+ # (Keypair is a Resource)
+ print('Creating ONAP keypair...')
+ onap_keypair = conn.compute.find_keypair(ONAP_KEYPAIR_NAME)
+ if onap_keypair != None:
+ print('ONAP keypair already exists')
+ else:
+ onap_keypair = conn.compute.create_keypair(name=ONAP_KEYPAIR_NAME)
+ print(' ONAP keypair fingerprint:')
+ print(onap_keypair.fingerprint)
+ print(' ONAP keypair public key:')
+ print(onap_keypair.public_key)
+ print(' \nONAP keypair private key: (save it in a file now: it cannot be retrieved later)')
+ print(onap_keypair.private_key)
+ print_debug('onap_keypair:',onap_keypair)
+
+
+ print('\nSUMMARY:')
+ # Grab live objects (don't reuse earlier references), in case the script is used on an already configured instance
+ # This way, the summary is still displayed even if the script execution did not create anything
+ # Also, this double-checks that displayed information is accurate, freshly retrieved from the OpenStack instance
+
+ public_network = conn.network.find_network(ONAP_PUBLIC_NET_NAME)
+ if public_network != None:
+ print('ONAP public network ID:',public_network.id)
+ for fetched_subnet_ID in public_network.subnet_ids:
+ fetched_subnet = conn.network.get_subnet(fetched_subnet_ID)
+ if fetched_subnet != None:
+ print(' ONAP public network subnet ID:',fetched_subnet.id)
+ print(' ONAP public network subnet CIDR:',fetched_subnet.cidr)
+ else:
+ print('no ONAP public network')
+
+ private_network = conn.network.find_network(ONAP_PRIVATE_NET_NAME)
+ if private_network != None:
+ print('ONAP private network ID:',private_network.id)
+ for fetched_subnet_ID in private_network.subnet_ids:
+ fetched_subnet = conn.network.get_subnet(fetched_subnet_ID)
+ if fetched_subnet != None:
+ print(' ONAP private network subnet ID:',fetched_subnet.id)
+ print(' ONAP private network subnet CIDR:',fetched_subnet.cidr)
+ else:
+ print('no ONAP private network')
+
+ oam_network = conn.network.find_network(ONAP_OAM_NET_NAME)
+ if oam_network != None:
+ print('ONAP OAM network ID:',oam_network.id)
+ for fetched_subnet_ID in oam_network.subnet_ids:
+ fetched_subnet = conn.network.get_subnet(fetched_subnet_ID)
+ if fetched_subnet != None:
+ print(' ONAP OAM network subnet ID:',fetched_subnet.id)
+ print(' ONAP OAM network subnet CIDR:',fetched_subnet.cidr)
+ else:
+ print('no ONAP OAM network')
+ print('END SUMMARY\n')
+
+
+ except Exception as e:
+ print('*** Exception:',type(e), e)
+ exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()
+ print('*** traceback.print_tb():')
+ traceback.print_tb(exceptionTraceback)
+ print('*** traceback.print_exception():')
+ traceback.print_exception(exceptionType, exceptionValue, exceptionTraceback)
+ print('[Script terminated]\n')
+
+
+ print('OPNFV Auto, end of configuration script\n')
+
+
+
+######################################################################
+def main():
+
+ # configure argument parser: 2 optional arguments
+ # "-del" or "--delete" option to delete ONAP configuration in OpenStack
+ # (if no "-del" or "--delete", then configure OpenStack for ONAP
+ # "-deb" or "--debug" option to display debug information
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-deb', '--debug',
+ help = 'display debug information during execution',
+ action = 'store_true')
+ parser.add_argument('-del', '--delete',
+ help = 'delete ONAP configuration',
+ action = 'store_true')
+
+ # parse arguments, modify global variable if need be, and use corresponding script (create objects, or delete objects)
+ args = parser.parse_args()
+ if args.debug:
+ global DEBUG_VAR
+ DEBUG_VAR = True
+ if args.delete:
+ delete_all_ONAP()
+ else:
+ configure_all_ONAP()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/setup/VIMs/OpenStack/clouds.yaml b/setup/VIMs/OpenStack/clouds.yaml
new file mode 100644
index 0000000..7bfd717
--- /dev/null
+++ b/setup/VIMs/OpenStack/clouds.yaml
@@ -0,0 +1,99 @@
+clouds:
+
+ # Openstack instance on Arm pod, controller IP@ 172.16.10.10
+ # Horizon: https://10.10.50.103/project/
+ # Identity API according to Horizon dashboard: https://10.10.50.103:5000/v2.0
+ # other potential auth_url: http://172.16.10.10:35357/v3
+ # (OS_AUTH_URL=http://controller:35357/v3)
+ # 2 project names: admin, service (project = tenant)
+ # project ID: 122caf64b3df4818bf2ce5ba793226b2
+ # EC2 URL: https://10.10.50.103:8773/services/Cloud
+ # EC2 access key: bcf3c69a7d1c405e9757f87f26faf19f
+ # 10.10.50.0/8: floating IP@
+ # 10.10.10.0/8: fixed IP@
+ armopenstack:
+ auth:
+ auth_url: https://10.10.50.103:5000/v2.0
+ project_name: admin
+ username: admin
+ password: opnfv_secret
+ region_name: RegionOne
+
+ # Openstack instance on LaaS hpe16, from OPNFV Euphrates, controller IP@ (mgt: 172.16.10.101; public: 10.16.0.101)
+ # keystone endpoints (openstack endpoint list --service keystone)
+ # admin: http://172.16.10.101:35357/v2.0
+ # internal: http://172.16.10.101:5000/v2.0
+ # public: http://10.16.0.101:5000/v2.0 : works on LaaS hpe16, from hpe16
+ hpe16openstackEuphrates:
+ auth:
+ auth_url: http://10.16.0.101:5000/v2.0
+ project_name: admin
+ username: admin
+ password: opnfv_secret
+ region_name: RegionOne
+
+ # Openstack instance on generic LaaS hpe, from OPNFV Fraser, controller IP@ (mgt: 172.16.10.36; public: 10.16.0.107)
+ # keystone endpoints (openstack endpoint list --service keystone)
+ # admin: http://172.16.10.36:35357/v3
+ # internal: http://172.16.10.36:5000/v3
+ # public: http://10.16.0.107:5000/v3
+ # Horizon: https://10.16.0.107:8078, but need SSH port forwarding through 10.10.100.26 to be reached from outside
+ # "If you are using Identity v3 you need to specify the user and the project domain name"
+
+ # generic cloud name, for a UNH IOL hpe server, for OPNFV Fraser, OpenStack installed by Fuel/MCP
+ unh-hpe-openstack-fraser:
+ auth:
+ auth_url: http://10.16.0.107:5000/v3
+ project_name: admin
+ username: admin
+ password: opnfv_secret
+ user_domain_name: Default
+ project_domain_name: Default
+ region_name: RegionOne
+ identity_api_version: 3
+
+# ubuntu@ctl01:~$ openstack project show admin
+# +-------------+----------------------------------+
+# | Field | Value |
+# +-------------+----------------------------------+
+# | description | OpenStack Admin tenant |
+# | domain_id | default |
+# | enabled | True |
+# | id | 04fcfe7aa83f4df79ae39ca748aa8637 |
+# | is_domain | False |
+# | name | admin |
+# | parent_id | default |
+# +-------------+----------------------------------+
+
+# (openstack) domain show default
+# +-------------+----------------------------------------------------------+
+# | Field | Value |
+# +-------------+----------------------------------------------------------+
+# | description | Domain created automatically to support V2.0 operations. |
+# | enabled | True |
+# | id | default |
+# | name | Default |
+# +-------------+----------------------------------------------------------+
+
+# (openstack) domain show heat_user_domain
+# +-------------+---------------------------------------------+
+# | Field | Value |
+# +-------------+---------------------------------------------+
+# | description | Contains users and projects created by heat |
+# | enabled | True |
+# | id | d9c29adac0fe4816922d783b257879d6 |
+# | name | heat_user_domain |
+# +-------------+---------------------------------------------+
+
+
+# export OS_AUTH_URL=http://10.16.0.107:5000/v3
+# export OS_PROJECT_ID=04fcfe7aa83f4df79ae39ca748aa8637
+# export OS_PROJECT_NAME="admin"
+# export OS_USER_DOMAIN_NAME="Default"
+# export OS_USERNAME="admin"
+# export OS_PASSWORD="opnfv_secret"
+# export OS_REGION_NAME="RegionOne"
+# export OS_INTERFACE=public
+# export OS_IDENTITY_API_VERSION=3
+
+
diff --git a/setup/onap_on_openstack/onap_os_builder.py b/setup/onap_on_openstack/onap_os_builder.py
index b6c5608..b85d301 100644
--- a/setup/onap_on_openstack/onap_os_builder.py
+++ b/setup/onap_on_openstack/onap_os_builder.py
@@ -121,9 +121,9 @@ class ONAP_os_builder(object):
dcae_prikey_data = util.read_file(prikey_path).strip('\n')
dcae_pubkey_data = util.read_file(pubkey_path).strip('\n')
user_config.update({'dcae_public_key':
- literal_unicode(dcae_prikey_data)})
- user_config.update({'dcae_private_key':
literal_unicode(dcae_pubkey_data)})
+ user_config.update({'dcae_private_key':
+ literal_unicode(dcae_prikey_data)})
public_net_id = os_lib.get_network_id(
self.neutron_client,
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 0000000..69aa189
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,17 @@
+[tox]
+minversion = 1.6
+envlist =
+ docs,
+ docs-linkcheck
+skipsdist = true
+
+[testenv:docs]
+deps = -rdocs/requirements.txt
+commands =
+ sphinx-build -b html -n -d {envtmpdir}/doctrees ./docs/ {toxinidir}/docs/_build/html
+ echo "Generated docs available in {toxinidir}/docs/_build/html"
+whitelist_externals = echo
+
+[testenv:docs-linkcheck]
+deps = -rdocs/requirements.txt
+commands = sphinx-build -b linkcheck -d {envtmpdir}/doctrees ./docs/ {toxinidir}/docs/_build/linkcheck
diff --git a/yamllintrc b/yamllintrc
new file mode 100644
index 0000000..a4f3d02
--- /dev/null
+++ b/yamllintrc
@@ -0,0 +1,25 @@
+# Copyright 2018 Tieto
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+extends: relaxed
+
+rules:
+ empty-lines:
+ max-start: 1
+ max-end: 1
+ colons:
+ max-spaces-after: 1
+ max-spaces-before: 1
+ line-length:
+ max: 160