aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore11
-rw-r--r--.gitreview5
-rw-r--r--INFO24
-rw-r--r--INFO.yaml52
-rw-r--r--README.md39
-rwxr-xr-xcheck223
-rwxr-xr-xci/build-auto.sh222
-rwxr-xr-xci/deploy-onap-fuel.sh238
-rwxr-xr-xci/deploy-onap-kubespray.sh339
-rwxr-xr-xci/deploy-onap.sh376
-rw-r--r--ci/deploy-opnfv-apex-centos.sh209
-rw-r--r--ci/deploy-opnfv-compass-ubuntu.sh201
-rw-r--r--ci/deploy-opnfv-daisy-centos.sh179
-rw-r--r--ci/deploy-opnfv-fuel-ubuntu.sh199
-rwxr-xr-xci/plot-results.sh101
-rw-r--r--docs/conf.py1
-rw-r--r--docs/conf.yaml3
-rw-r--r--docs/index.rst18
-rw-r--r--docs/release/configguide/Auto-featureconfig.rst331
-rw-r--r--docs/release/configguide/Auto-postinstall.rst28
-rw-r--r--docs/release/configguide/auto-OPFNV-fuel.pngbin0 -> 41457 bytes
-rw-r--r--docs/release/configguide/auto-OS-config4ONAP.pngbin0 -> 134668 bytes
-rw-r--r--docs/release/configguide/auto-installTarget-ONAP-B.pngbin0 -> 50086 bytes
-rw-r--r--docs/release/configguide/auto-installTarget-generic.pngbin0 -> 41926 bytes
-rw-r--r--docs/release/configguide/auto-installTarget-initial.pngbin0 -> 35994 bytes
-rw-r--r--docs/release/configguide/auto-repo-folders.pngbin0 -> 36136 bytes
-rw-r--r--docs/release/configguide/index.rst16
-rw-r--r--docs/release/release-notes/Auto-release-notes.rst490
-rw-r--r--docs/release/release-notes/ONAP-toplevel-beijing.pngbin0 -> 383760 bytes
-rw-r--r--docs/release/release-notes/auto-proj-openstacksummit1805.pngbin0 -> 10928 bytes
-rw-r--r--docs/release/release-notes/auto-proj-parameters.pngbin0 -> 32716 bytes
-rw-r--r--docs/release/release-notes/auto-proj-rn01.pngbin0 -> 115670 bytes
-rw-r--r--docs/release/release-notes/auto-proj-tests.pngbin0 -> 33348 bytes
-rw-r--r--docs/release/release-notes/auto-project-activities.pngbin0 -> 25995 bytes
-rw-r--r--docs/release/release-notes/index.rst14
-rw-r--r--docs/release/userguide/UC01-feature.userguide.rst78
-rw-r--r--docs/release/userguide/UC02-feature.userguide.rst176
-rw-r--r--docs/release/userguide/UC03-feature.userguide.rst112
-rw-r--r--docs/release/userguide/auto-UC02-TC-mapping.pngbin0 -> 48301 bytes
-rw-r--r--docs/release/userguide/auto-UC02-cardinalities.pngbin0 -> 36684 bytes
-rw-r--r--docs/release/userguide/auto-UC02-control-loop-flow.pngbin0 -> 74976 bytes
-rw-r--r--docs/release/userguide/auto-UC02-data1.jpgbin0 -> 51570 bytes
-rw-r--r--docs/release/userguide/auto-UC02-data2.jpgbin0 -> 217832 bytes
-rw-r--r--docs/release/userguide/auto-UC02-data3.jpgbin0 -> 274235 bytes
-rw-r--r--docs/release/userguide/auto-UC02-logic.pngbin0 -> 39141 bytes
-rw-r--r--docs/release/userguide/auto-UC02-module1.jpgbin0 -> 156059 bytes
-rw-r--r--docs/release/userguide/auto-UC02-module2.jpgbin0 -> 43610 bytes
-rw-r--r--docs/release/userguide/auto-UC02-pattern.jpgbin0 -> 296889 bytes
-rw-r--r--docs/release/userguide/auto-UC02-preparation.jpgbin0 -> 297095 bytes
-rw-r--r--docs/release/userguide/auto-UC02-testcases.jpgbin0 -> 219582 bytes
-rw-r--r--docs/release/userguide/auto-UC03-TC-archit.pngbin0 -> 47579 bytes
-rw-r--r--docs/release/userguide/auto-UC03-TestCases.pngbin0 -> 20920 bytes
-rw-r--r--docs/release/userguide/index.rst26
-rw-r--r--docs/requirements.txt2
-rw-r--r--lib/auto/__init__.py0
-rw-r--r--lib/auto/testcase/EdgeCloud/AutoOSPlatCheck.py164
-rw-r--r--lib/auto/testcase/EdgeCloud/AutoOSPlatTest.py80
-rw-r--r--lib/auto/testcase/EdgeCloud/__init__.py0
-rw-r--r--lib/auto/testcase/resiliency/AutoResilGlobal.py51
-rw-r--r--lib/auto/testcase/resiliency/AutoResilItfCloud.py279
-rw-r--r--lib/auto/testcase/resiliency/AutoResilItfOS.py43
-rw-r--r--lib/auto/testcase/resiliency/AutoResilItfVNFMNFVO.py42
-rw-r--r--lib/auto/testcase/resiliency/AutoResilMain.py187
-rw-r--r--lib/auto/testcase/resiliency/AutoResilMgTestDef.py1854
-rw-r--r--lib/auto/testcase/resiliency/AutoResilRunTest.py59
-rw-r--r--lib/auto/testcase/resiliency/clouds.yaml99
-rw-r--r--lib/auto/testcase/vnf/vbng/MANIFEST.json17
-rw-r--r--lib/auto/testcase/vnf/vbng/base_vcpe_vbng.env35
-rw-r--r--lib/auto/testcase/vnf/vbng/base_vcpe_vbng.yaml288
-rw-r--r--lib/auto/testcase/vnf/vbrgemu/MANIFEST.json17
-rw-r--r--lib/auto/testcase/vnf/vbrgemu/base_vcpe_vbrgemu.env28
-rw-r--r--lib/auto/testcase/vnf/vbrgemu/base_vcpe_vbrgemu.yaml253
-rw-r--r--lib/auto/testcase/vnf/vgmux/MANIFEST.json17
-rw-r--r--lib/auto/testcase/vnf/vgmux/base_vcpe_vgmux.env35
-rw-r--r--lib/auto/testcase/vnf/vgmux/base_vcpe_vgmux.yaml281
-rw-r--r--lib/auto/testcase/vnf/vgw/MANIFEST.json17
-rw-r--r--lib/auto/testcase/vnf/vgw/base_vcpe_vgw.env32
-rw-r--r--lib/auto/testcase/vnf/vgw/base_vcpe_vgw.yaml261
-rw-r--r--lib/auto/util/__init__.py0
-rw-r--r--lib/auto/util/openstack_lib.py332
-rw-r--r--lib/auto/util/util.py86
-rw-r--r--lib/auto/util/yaml_type.py12
-rwxr-xr-xprepare.sh24
-rw-r--r--pylintrc561
-rw-r--r--requirements.txt10
-rw-r--r--setup.py29
-rw-r--r--setup/VIMs/OpenStack/auto_script_config_openstack_for_onap.py923
-rw-r--r--setup/VIMs/OpenStack/clouds.yaml99
-rw-r--r--setup/onap_on_openstack/__init__.py0
-rw-r--r--setup/onap_on_openstack/config.yml64
-rw-r--r--setup/onap_on_openstack/launch_onap.py39
-rw-r--r--setup/onap_on_openstack/onap_os_builder.py151
-rw-r--r--tox.ini17
-rw-r--r--vcpe_spinup.sh99
-rw-r--r--vfw_spinup.sh53
-rw-r--r--vpn_subscribe.sh220
-rw-r--r--vpn_unsubscribe.sh220
-rw-r--r--yamllintrc25
98 files changed, 10792 insertions, 24 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..f6b7eea
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,11 @@
+*.swp
+*.pyc
+/venv
+/work
+/lib/auto.egg-info
+/build
+/dist
+/docs_output
+/opnfvdocs
+.tox
+docs/_build/*
diff --git a/.gitreview b/.gitreview
new file mode 100644
index 0000000..35df001
--- /dev/null
+++ b/.gitreview
@@ -0,0 +1,5 @@
+[gerrit]
+host=gerrit.opnfv.org
+port=29418
+project=auto.git
+defaultbranch=master
diff --git a/INFO b/INFO
deleted file mode 100644
index c9a2d74..0000000
--- a/INFO
+++ /dev/null
@@ -1,24 +0,0 @@
-Project: ONAP-Automated OPNFV (Auto)
-Project Creation Date: August 15, 2017
-Project Category:
-Lifecycle State: Incubation
-Primary Contact: tina.tsou@arm.com
-Project Lead: tina.tsou@arm.com
-Jira Project Name: ONAP-Automated OPNFV
-Jira Project Prefix: AUTO
-Mailing list tag: [auto]
-IRC: Server:freenode.net Channel:#opnfv-auto
-Repository: auto
-
-Committers:
-tina.tsou@arm.com
-huangxiangyu5@huawei.com
-song.zhu@arm.com
-prasad.gorja@nxp.com
-oul.gd@chinatelecom.cn
-chenlei@caict.ac.cn
-wxy_cttl@126.com
-luxu_hd@163.com
-msambashivaiah@mvista.com
-
-Link to TSC approval of the project: http://meetbot.opnfv.org/meetings/opnfv-meeting/2017/opnfv-meeting.2017-08-15-12.59.html
diff --git a/INFO.yaml b/INFO.yaml
new file mode 100644
index 0000000..69e5b01
--- /dev/null
+++ b/INFO.yaml
@@ -0,0 +1,52 @@
+---
+project: 'ONAP-Automated OPNFV (Auto)'
+project_creation_date: 'August 15, 2017'
+project_category: ''
+lifecycle_state: 'Incubation'
+project_lead: &opnfv_auto_ptl
+ name: 'Tina Tsou'
+ email: 'tina.tsou@arm.com'
+ id: 'tinatsou'
+ company: 'arm.com'
+ timezone: 'Unknown'
+primary_contact: *opnfv_auto_ptl
+issue_tracking:
+ type: 'jira'
+ url: 'https://jira.opnfv.org/projects/AUTO'
+ key: 'AUTO'
+mailing_list:
+ type: 'mailman2'
+ url: 'opnfv-tech-discuss@lists.opnfv.org'
+ tag: '[auto]'
+realtime_discussion:
+ type: irc
+ server: 'freenode.net'
+ channel: '#opnfv-auto'
+meetings:
+ - type: 'zoom+irc'
+ agenda: 'https://wiki.opnfv.org/display/AUTO/Auto+Project+Meetings'
+ url: 'https://zoom.us/j/2362828999'
+ server: 'freenode.net'
+ channel: '#opnfv-auto'
+ repeats: 'weekly, mondays'
+ time: '14:00 UTC'
+repositories:
+ - 'auto'
+committers:
+ - <<: *opnfv_auto_ptl
+ - name: 'Harry Huang'
+ email: 'huangxiangyu5@huawei.com'
+ company: 'huawei.com'
+ id: 'huangxiangyu'
+ - name: 'Paul Vaduva'
+ email: 'paul.vaduva@enea.com'
+ company: 'enea.com'
+ id: 'pvaduva'
+ - name: 'Martin Klozik'
+ email: 'martin.klozik@tieto.com'
+ company: 'tieto.com'
+ id: 'mklozik'
+tsc:
+ # yamllint disable rule:line-length
+ approval: 'http//meetbot.opnfv.org/meetings/opnfv-meeting/2017/opnfv-meeting.2017-08-15-12.59.html'
+ # yamllint enable rule:line-length
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..21e6bc9
--- /dev/null
+++ b/README.md
@@ -0,0 +1,39 @@
+Auto
+====
+
+#### Recent Changes ####
+- Add util modules for common use in project
+- Add scripts to setup ONAP (Currently only on OpenStack)
+
+
+#### Current Code Structure ####
+
+ ├── auto # Auto modules
+ │   ├── __init__.py
+ │   └── util # util modules
+ │   ├── __init__.py
+ │   ├── openstack_lib.py
+ │   ├── util.py
+ │   └── yaml_type.py
+ ├── prepare.sh # prepare virtual env, install Auto modules
+ ├── requirements.txt
+ ├── setup # scripts to setup ONAP
+ │   └── onap_on_openstack # set ONAP on OpenStack using heat
+ │   ├── config.yml
+ │   ├── __init__.py
+ │   ├── launch_onap.py
+ │   └── onap_os_builder.py
+ └── setup.py # setup Auto modules
+
+#### Setup ONAP ####
+A working ONAP environment is required before other test activity aiming for ONAP can be carried out.
+
+**Usage**:
+
+1. run command:
+
+ bash prepare.sh
+2. configure setup/onap_on_openstack/config.yml
+3. under setup/onap_on_openstack/ run command:
+
+ python launch_onap.py -c config.yml
diff --git a/check b/check
new file mode 100755
index 0000000..0428fa6
--- /dev/null
+++ b/check
@@ -0,0 +1,223 @@
+#!/bin/bash
+
+# Copyright 2017-2018 Intel Corporation, Tieto
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Auto project python & yaml formatting checker
+# This script is based on the file ./check from OPNFV vswitchperf
+# project.
+
+#
+# Configuration
+#
+PYLINT="pylint"
+PYLINT_RC='pylintrc'
+PYTHON_FILE_REGEX="\.py$"
+YAMLLINT="yamllint"
+YAMLLINT_RC='yamllintrc'
+YAML_FILE_REGEX="\.yaml$"
+FILE_LIST="/tmp/auto_check_list.txt"
+
+CHECK_PYTHON=0
+CHECK_YAML=0
+
+#
+# Support Functions
+#
+# print usage if requested
+function usage() {
+ cat <<EOM
+Usage: $0 [TARGET]...
+
+Performs code check for defined TARGETs. Target can be file or directory.
+In case that directory is specified, then it will be searched recursively
+for all python and yaml files.
+If TARGET is not specified, then all python and yaml files from current AUTO
+repository will be checked.
+
+
+ -h, --help Script usage
+ -b, --black Suppress colours. Output will be black&white.
+ -m, --modified Script will check python and yaml files, which have
+ been modified within current repository.
+
+Examples:
+ ./check
+
+ Check all python and yaml files in current AUTO repository
+
+ ./check INFO.yaml
+
+ Check just one file.
+
+ ./check -m
+
+ Check all modified files in current AUTO repository
+
+ ./check lib/auto/testcase lib/auto/util
+
+ Check all python and yaml files in given directories
+
+EOM
+}
+
+# get list of files to be checked
+function get_file_list() {
+ # store file regex and shift params to get list of original ./check options
+ TMP_FILE_REGEX=$1
+ shift
+
+ rm $FILE_LIST &> /dev/null
+ if [ "x$1" == "x-m" -o "x$1" == "x--modified" ] ; then
+ # check of modified files requested
+ git status --porcelain | cut -b4- | egrep -i "${TMP_FILE_REGEX}" | sort > $FILE_LIST
+ elif [ "x$*" == "x" ] ; then
+ # list is empty, check all python files
+ git ls-tree --name-only -r HEAD | egrep -i "${TMP_FILE_REGEX}" | sort > $FILE_LIST
+ else
+ for item in $* ; do
+ if [ -d $item ] ; then
+ git ls-tree --name-only -r HEAD $item | egrep -i "${TMP_FILE_REGEX}" | sort >> $FILE_LIST
+ elif [ -f $item ] ; then
+ echo $item | egrep -i "${TMP_FILE_REGEX}" >> $FILE_LIST
+ else
+ echo "$item doesn't exist, thus check was aborted"
+ exit 1
+ fi
+ done
+ fi
+}
+
+function check_lint_binary() {
+ # check if lint binary is available
+ if ! which $1 &>/dev/null ; then
+ echo "$1 is not available, thus check can't be executed"
+ return 1
+ fi
+ return 0
+}
+
+
+function check_python() {
+ echo "Execution of pylint checks:"
+
+ if ! check_lint_binary $PYLINT ; then
+ CHECK_PYTHON=1
+ return
+ fi
+
+ # check if there is anything to check
+ if [ -s $FILE_LIST ] ; then
+ for pyfile in `cat $FILE_LIST | sort` ; do
+ # get base name
+ pyfile_basename="'"`basename $pyfile .py`"'"
+ # run pylint and extract final rating
+ output=`$PYLINT --rcfile $PYLINT_RC $pyfile 2>/dev/null`
+ rating=`echo -e $output | tail -n3 | grep rated | sed -e 's/^.*rated at \(-\?[0-9.]*\).*$/\1/'`
+ # evaluate and display aquired rating
+ if [ "x$rating" == "x" ] ; then
+ # rating is not available for files without python statements
+ printf " %-70s %-6s\n" $pyfile "NA"
+ elif [ "$rating" == "10" ] ; then
+ printf " %-70s ${GREEN}%-6s${BLACK}\n" $pyfile "OK"
+ else
+ CHECK_PYTHON=1
+ echo -e "$output" | awk '/^\*+ Module|^[A-Z]\:/'
+ printf " %-70s ${RED}%-6s${BLACK}\n" $pyfile $rating
+ fi
+ done
+ else
+ echo " Nothing to check."
+ fi
+}
+
+function check_yaml() {
+ echo "Execution of yaml checks:"
+
+ if ! check_lint_binary $YAMLLINT ; then
+ CHECK_YAML=1
+ return
+ fi
+
+ # check if there is anything to check
+ if [ -s $FILE_LIST ] ; then
+ for yamlfile in `cat $FILE_LIST | sort` ; do
+ output=`$YAMLLINT -c $YAMLLINT_RC $yamlfile 2>/dev/null`
+ if [ $? -eq 0 ] ; then
+ printf " %-70s ${GREEN}%-6s${BLACK}\n" $yamlfile "OK"
+ else
+ CHECK_YAML=1
+ echo "$output"
+ printf " %-70s ${RED}%-6s${BLACK}\n" $yamlfile "FAILED"
+ fi
+ done
+ else
+ echo " Nothing to check."
+ fi
+}
+
+#
+# Main
+#
+# check if help is requested
+if [ "x$1" == "x-h" -o "x$1" == "x--help" ] ; then
+ usage
+ exit 0
+fi
+
+# set colours
+if [ "x$1" == "x-b" -o "x$1" == "x--black" ] ; then
+ shift
+ RED=""
+ GREEN=""
+ BLACK=""
+else
+ RED="\e[31m"
+ GREEN="\e[32m"
+ BLACK="\e[0m"
+fi
+
+# check if we were run within auto directory
+if [ ! -x ./check 2> /dev/null ] ; then
+ echo "`basename $0` must be run from auto root directory"
+ exit 1
+fi
+
+# run python checks
+get_file_list $PYTHON_FILE_REGEX $*
+check_python
+
+echo
+
+# run yaml checks
+get_file_list $YAML_FILE_REGEX $*
+check_yaml
+
+# clean up
+rm $FILE_LIST &> /dev/null
+
+# return success or failure based on pylint and yamllint checks
+# NOTE: As of now, failure of pylint checks is not propagated into exit code.
+# This will be turned on again after the rating of existing python
+# files will be improved.
+# if [ $CHECK_PYTHON -eq 0 -a $CHECK_YAML -eq 0 ] ; then
+if [ $CHECK_YAML -eq 0 ] ; then
+ exit 0
+else
+ exit 1
+fi
+
+#
+# The End
+#
diff --git a/ci/build-auto.sh b/ci/build-auto.sh
new file mode 100755
index 0000000..00b67b1
--- /dev/null
+++ b/ci/build-auto.sh
@@ -0,0 +1,222 @@
+#!/bin/bash
+#
+# Copyright 2015-2018 Intel Corporation., Tieto
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# CI helper script for execution of AUTO project jenkins jobs.
+# This script is based on the file ci/build-vsperf.sh from OPNFV vswitchperf
+# project.
+
+# Usage:
+# build-auto.sh job_type
+#
+# Parameters:
+# job_type - is one of "verify", "merge" or "daily"
+#
+# Example:
+# ./ci/build-auto.sh verify
+
+#
+# exit codes
+#
+EXIT=0
+EXIT_UNKNOWN_JOB_TYPE=1
+EXIT_LINT_FAILED=2
+EXIT_FUEL_FAILED=10
+
+#
+# configuration
+#
+AUTOENV_DIR="$HOME/autoenv"
+TIMESTAMP=$(date +%Y%m%d_%H%M%S)
+LOG_DIR=$HOME/auto_ci_daily_logs
+WORKSPACE=${WORKSPACE:-$PWD}
+
+# POD and SCENARIO details used during OPNFV deployment performed by daily job
+NODE_NAME=${NODE_NAME:-"ericsson-virtual1"}
+POD_LAB=$(echo $NODE_NAME | cut -d '-' -f1)
+POD_NAME=$(echo $NODE_NAME | cut -d '-' -f2)
+DEPLOY_SCENARIO=${DEPLOY_SCENARIO:-"os-nosdn-onap-noha"}
+
+#
+# functions
+#
+# execute pylint and yamllint to check code quality
+function execute_auto_lint_check() {
+ if ! ./check -b ; then
+ EXIT=$EXIT_LINT_FAILED
+ fi
+}
+
+# check and install required packages
+function dependencies_check() {
+ . /etc/os-release
+ if [ $ID == "ubuntu" ] ; then
+ echo "Dependencies check"
+ echo "=================="
+ # install system packages
+ for PACKAGE in "virtualenv" "pylint" "yamllint" "gnuplot" ; do
+ if dpkg -s $PACKAGE &> /dev/null ; then
+ printf " %-70s %-6s\n" $PACKAGE "OK"
+ else
+ printf " %-70s %-6s\n" $PACKAGE "missing"
+ sudo apt-get install -y $PACKAGE
+ fi
+ done
+ echo
+ fi
+}
+
+# create virtualenv if needed and enable it
+function virtualenv_prepare() {
+ if [ ! -e $AUTOENV_DIR ] ; then
+ echo "Create AUTO environment"
+ echo "======================="
+ virtualenv "$AUTOENV_DIR"
+ echo
+ fi
+
+ # activate and update virtualenv
+ echo "Update AUTO environment"
+ echo "======================="
+ source "$AUTOENV_DIR"/bin/activate
+ pip install -r ./requirements.txt
+ echo
+}
+
+#
+# main
+#
+echo
+
+# enter workspace dir
+cd $WORKSPACE
+
+# check if required packages are installed
+dependencies_check
+
+# execute job based on passed parameter
+case $1 in
+ "verify")
+ echo "==============="
+ echo "AUTO verify job"
+ echo "==============="
+
+ virtualenv_prepare
+ execute_auto_lint_check
+ #execute_auto_doc_check
+
+ # Everything went well, so report SUCCESS to Jenkins
+ exit $EXIT
+ ;;
+ "merge")
+ echo "=============="
+ echo "AUTO merge job"
+ echo "=============="
+
+ virtualenv_prepare
+ execute_auto_lint_check
+ #execute_auto_doc_check
+
+ # propagate result to the Jenkins job
+ exit $EXIT
+ ;;
+ "daily")
+ echo "=============="
+ echo "AUTO daily job"
+ echo "=============="
+ echo
+ echo "Deployment details:"
+ echo " LAB: $POD_LAB"
+ echo " POD: $POD_NAME"
+ echo " Scenario: $DEPLOY_SCENARIO"
+ echo " WORKSPACE: $WORKSPACE"
+ echo
+
+ # create log dir if needed
+ if [ ! -e $LOG_DIR ] ; then
+ echo "Create AUTO LOG DIRECTORY"
+ echo "========================="
+ echo "mkdir $LOG_DIR"
+ mkdir $LOG_DIR
+ echo
+ fi
+
+ echo "Installation of OPNFV and ONAP"
+ echo "=============================="
+ # clone fuel and execute installation of ONAP scenario to install
+ # ONAP on top of OPNFV deployment
+ [ -e fuel ] && rm -rf fuel
+ git clone https://gerrit.opnfv.org/gerrit/fuel
+ cd fuel
+ # Fuel master branch is currently broken; thus use stable/gambia
+ # branch with recent master version of ONAP scenario
+ git checkout stable/gambia
+ git checkout origin/master mcp/config/states/onap \
+ mcp/config/scenario/os-nosdn-onap-ha.yaml \
+ mcp/config/scenario/os-nosdn-onap-noha.yaml
+ # use larger disk size for virtual nodes
+ sed -i -re 's/(qemu-img resize.*)100G/\1400G/' mcp/scripts/lib_jump_deploy.sh
+
+ LOG_FILE="$LOG_DIR/deploy_${TIMESTAMP}.log"
+ echo "ci/deploy.sh -l $POD_LAB -p $POD_NAME -s $DEPLOY_SCENARIO |&\
+ tee $LOG_FILE"
+ DEPLOY_START=$(date +%Y%m%d_%H%M%S)
+ ci/deploy.sh -l $POD_LAB -p $POD_NAME -s $DEPLOY_SCENARIO |&\
+ tee $LOG_FILE
+
+ # report failure if fuel failed to install OPNFV or ONAP
+ [ $? -ne 0 ] && exit $EXIT_FUEL_FAILED
+
+ # process report
+ DEPLOY_END=$(date +%Y%m%d_%H%M%S)
+ REPORT_FILE="$LOG_DIR/deploy_report_${TIMESTAMP}.txt"
+ CSV_SUMMARY="$LOG_DIR/deploy_summary_${TIMESTAMP}.csv"
+ MARKER="ONAP INSTALLATION REPORT"
+ # cut report from installation log file
+ sed -n "/^$MARKER/,/^END OF $MARKER/p;/^END OF $MARKER/q" \
+ $LOG_FILE > $REPORT_FILE
+ PODS_TOTAL=$(grep "PODs Total" $REPORT_FILE | sed -e 's/[^0-9]//g')
+ PODS_FAILED=$(grep "PODs Failed" $REPORT_FILE | sed -e 's/[^0-9]//g')
+ TC_SUM=$(grep "tests total" $REPORT_FILE | tail -n1 |\
+ sed -e 's/[^0-9,]//g')
+
+ echo "Start Time,End Time,Total PODs,Failed PODs,Total Tests,Passed"\
+ "Tests,Failed Tests" >> $CSV_SUMMARY
+ echo "$DEPLOY_START,$DEPLOY_END,$PODS_TOTAL,$PODS_FAILED,$TC_SUM"\
+ >> $CSV_SUMMARY
+
+ # plot graphs from result summaries and print txt versions if possible
+ cd $WORKSPACE
+ ci/plot-results.sh
+ for GRAPH in $(ls -1 graph*txt 2> /dev/null) ; do
+ cat $GRAPH
+ done
+
+ # propagate result to the Jenkins job
+ exit $EXIT
+ ;;
+ *)
+ echo
+ echo "ERRROR: Unknown job type \"$1\""
+ echo
+ exit $EXIT_UNKNOWN_JOB_TYPE
+ ;;
+esac
+
+exit $EXIT_UNKNOWN_JOB_TYPE
+
+#
+# end
+#
diff --git a/ci/deploy-onap-fuel.sh b/ci/deploy-onap-fuel.sh
new file mode 100755
index 0000000..c120e9c
--- /dev/null
+++ b/ci/deploy-onap-fuel.sh
@@ -0,0 +1,238 @@
+#!/bin/bash
+#
+# Copyright 2018 Tieto
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Script for automated deployment of ONAP on top of OPNFV Fuel/MCP installation
+# In the future both OOM and heat install methods should be supported.
+# At the beginning OOM will be used for simplification.
+
+# TODO:
+# Configure ONAP to be able to control underlying OpenStack
+
+# Configuration to be passed to ci/deploy-onap.sh
+export SSH_USER="ubuntu"
+export SSH_IDENTITY="/root/.ssh/onap_key"
+
+# detect hypervisor details to be used as default values if needed
+OS_HYPER_CMD="openstack hypervisor list --long"
+echo -e "\nOpenStack Hepervisor list\n"
+$OS_HYPER_CMD
+
+DEFAULT_CMP_COUNT=$($OS_HYPER_CMD -f value -c "ID" | wc -l)
+DEFAULT_CMP_MIN_MEM=$($OS_HYPER_CMD -f value -c "Memory MB" | sort | head -n1)
+DEFAULT_CMP_MIN_CPUS=$($OS_HYPER_CMD -f value -c "vCPUs" | sort | head -n1)
+
+# Use default values if compute configuration was not set by FUEL installer
+AUTO_INSTALL_DIR=${AUTO_INSTALL_DIR:-"."}
+AUTO_IMAGE_DIR="${AUTO_INSTALL_DIR}/images"
+CMP_COUNT=${CMP_COUNT:-$DEFAULT_CMP_COUNT} # number of compute nodes
+CMP_MIN_MEM=${CMP_MIN_MEM:-$DEFAULT_CMP_MIN_MEM} # MB RAM of the weakest compute node
+CMP_MIN_CPUS=${CMP_MIN_CPUS:-$DEFAULT_CMP_MIN_CPUS} # CPU count of the weakest compute node
+# size of storage for instances
+CMP_STORAGE_TOTAL=${CMP_STORAGE_TOTAL:-$((80*$CMP_COUNT))}
+VM_COUNT=${VM_COUNT:-6} # number of VMs available for k8s cluster
+
+#
+# Functions
+#
+# function minimum accepts two numbers and prints smaller one
+function minimum(){
+ echo $(($1<$2?$1:$2))
+}
+
+# function remove_openstack_setup removes OS configuration performed by this
+# script; So previously created configuration and deployed VMs will be
+# removed before new ONAP deployment will be started.
+function remove_openstack_setup(){
+ # flavor is created 1st but removed last, so...
+ if ( ! openstack flavor list | grep 'onap.large' &> /dev/null ) ; then
+ #...no flavor means nothing to be removed
+ return
+ fi
+ echo -e "\nRemoving ONAP specific OpenStack configuration"
+ for a in $(openstack server list --name onap_vm -f value -c ID) ; do
+ openstack server delete $a
+ done
+ RULES=$(openstack security group rule list onap_security_group -f value -c ID)
+ for a in $RULES; do
+ openstack security group rule delete $a
+ done
+ openstack security group delete onap_security_group
+ for a in $(openstack floating ip list -f value -c ID) ; do
+ openstack floating ip delete $a
+ done
+ PORTS=$(openstack port list --network onap_private_network -f value -c ID)
+ for a in $PORTS ; do
+ openstack router remove port onap_router $a
+ done
+ PORTS=$(openstack port list --network onap_private_network -f value -c ID)
+ for a in $PORTS ; do
+ openstack port delete $a
+ done
+ openstack router delete onap_router
+ openstack subnet delete onap_private_subnet
+ openstack network delete onap_private_network
+ openstack image delete xenial
+ rm -rf $AUTO_IMAGE_DIR
+ openstack keypair delete onap_key
+ rm $SSH_IDENTITY
+ openstack flavor delete onap.large
+ echo
+}
+
+#
+# Script Main
+#
+
+# remove OpenStack configuration if it exists
+remove_openstack_setup
+
+echo -e "\nOpenStack configuration\n"
+
+# Calculate VM resources, so that flavor can be created
+echo "Configuration of compute node:"
+echo "Number of computes: CMP_COUNT=$CMP_COUNT"
+echo "Minimal RAM: CMP_MIN_MEM=$CMP_MIN_MEM"
+echo "Minimal CPUs count: CMP_MIN_CPUS=$CMP_MIN_CPUS"
+echo "Storage for instances: CMP_STORAGE_TOTAL=$CMP_STORAGE_TOTAL"
+echo "Number of VMs: VM_COUNT=$VM_COUNT"
+# Calculate VM parameters; there will be up to 1 VM per Compute node
+# to maximize resources available for VMs
+PER=85 # % of compute resources will be consumed by VMs
+VM_DISK_MAX=100 # GB - max VM disk size
+VM_MEM_MAX=81920 # MB - max VM RAM size
+VM_CPUS_MAX=56 # max count of VM CPUs
+VM_MEM=$(minimum $(($CMP_MIN_MEM*$CMP_COUNT*$PER/100/$VM_COUNT)) $VM_MEM_MAX)
+VM_CPUS=$(minimum $(($CMP_MIN_CPUS*$CMP_COUNT*$PER/100/$VM_COUNT)) $VM_CPUS_MAX)
+VM_DISK=$(minimum $(($CMP_STORAGE_TOTAL*$PER/100/$VM_COUNT)) $VM_DISK_MAX)
+
+echo -e "\nFlavor configuration:"
+echo "CPUs : $VM_CPUS"
+echo "RAM [MB] : $VM_MEM"
+echo "DISK [GB] : $VM_DISK"
+
+# Create onap flavor
+openstack flavor create --ram $VM_MEM --vcpus $VM_CPUS --disk $VM_DISK \
+ onap.large
+
+# Generate a keypair and store private key
+openstack keypair create onap_key > $SSH_IDENTITY
+chmod 600 $SSH_IDENTITY
+
+# Download and import VM image(s)
+mkdir $AUTO_IMAGE_DIR
+wget -P $AUTO_IMAGE_DIR https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+openstack image create --disk-format qcow2 --container-format bare --public \
+ --file $AUTO_IMAGE_DIR/xenial-server-cloudimg-amd64-disk1.img xenial
+
+# Modify quotas (add 10% to required VM resources)
+openstack quota set --ram $(($VM_MEM*$VM_COUNT*110/100)) admin
+openstack quota set --cores $(($VM_CPUS*$VM_COUNT*110/100)) admin
+
+# Configure networking with DNS for access to the internet
+openstack network create onap_private_network --provider-network-type vxlan
+openstack subnet create onap_private_subnet --network onap_private_network \
+ --subnet-range 192.168.33.0/24 --ip-version 4 --dhcp --dns-nameserver "8.8.8.8"
+openstack router create onap_router
+openstack router add subnet onap_router onap_private_subnet
+openstack router set onap_router --external-gateway floating_net
+
+# Allow selected ports and protocols
+openstack security group create onap_security_group
+openstack security group rule create --protocol icmp onap_security_group
+openstack security group rule create --proto tcp \
+ --dst-port 22:22 onap_security_group
+openstack security group rule create --proto tcp \
+ --dst-port 8080:8080 onap_security_group # rancher
+openstack security group rule create --proto tcp \
+ --dst-port 8078:8078 onap_security_group # horizon
+openstack security group rule create --proto tcp \
+ --dst-port 8879:8879 onap_security_group # helm
+openstack security group rule create --proto tcp \
+ --dst-port 80:80 onap_security_group
+openstack security group rule create --proto tcp \
+ --dst-port 443:443 onap_security_group
+
+# Allow communication between k8s cluster nodes
+PUBLIC_NET=`openstack subnet list --name floating_subnet -f value -c Subnet`
+openstack security group rule create --remote-ip $PUBLIC_NET --proto tcp \
+ --dst-port 1:65535 onap_security_group
+openstack security group rule create --remote-ip $PUBLIC_NET --proto udp \
+ --dst-port 1:65535 onap_security_group
+
+# Get list of hypervisors and their zone
+HOST_ZONE=$(openstack host list -f value | grep compute | head -n1 | cut -d' ' -f3)
+HOST_NAME=($(openstack host list -f value | grep compute | cut -d' ' -f1))
+HOST_COUNT=$(echo ${HOST_NAME[@]} | wc -w)
+# Create VMs and assign floating IPs to them
+VM_ITER=1
+HOST_ITER=0
+while [ $VM_ITER -le $VM_COUNT ] ; do
+ openstack floating ip create floating_net
+ VM_NAME[$VM_ITER]="onap_vm${VM_ITER}"
+ VM_IP[$VM_ITER]=$(openstack floating ip list -c "Floating IP Address" \
+ -c "Port" -f value | grep None | cut -f1 -d " " | head -n1)
+ # dispatch new VMs among compute nodes in round robin fashion
+ openstack server create --flavor onap.large --image xenial \
+ --nic net-id=onap_private_network --security-group onap_security_group \
+ --key-name onap_key ${VM_NAME[$VM_ITER]} \
+ --availability-zone ${HOST_ZONE}:${HOST_NAME[$HOST_ITER]}
+ sleep 10 # wait for VM init before floating IP can be assigned
+ openstack server add floating ip ${VM_NAME[$VM_ITER]} ${VM_IP[$VM_ITER]}
+ echo "Waiting for ${VM_NAME[$VM_ITER]} to start up for 1m at $(date)"
+ sleep 1m
+ VM_ITER=$(($VM_ITER+1))
+ HOST_ITER=$(($HOST_ITER+1))
+ [ $HOST_ITER -ge $HOST_COUNT ] && HOST_ITER=0
+done
+
+openstack server list -c ID -c Name -c Status -c Networks -c Host --long
+
+# check that SSH to all VMs is working
+SSH_OPTIONS="-i $SSH_IDENTITY -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
+COUNTER=1
+while [ $COUNTER -le 10 ] ; do
+ VM_UP=0
+ VM_ITER=1
+ while [ $VM_ITER -le $VM_COUNT ] ; do
+ if ssh $SSH_OPTIONS -l $SSH_USER ${VM_IP[$VM_ITER]} exit &>/dev/null ; then
+ VM_UP=$(($VM_UP+1))
+ echo "${VM_NAME[$VM_ITER]} ${VM_IP[$VM_ITER]}: up"
+ else
+ echo "${VM_NAME[$VM_ITER]} ${VM_IP[$VM_ITER]}: down"
+ fi
+ VM_ITER=$(($VM_ITER+1))
+ done
+ COUNTER=$(($COUNTER+1))
+ if [ $VM_UP -eq $VM_COUNT ] ; then
+ break
+ fi
+ echo "Waiting for VMs to be accessible via ssh for 2m at $(date)"
+ sleep 2m
+done
+
+openstack server list -c ID -c Name -c Status -c Networks -c Host --long
+
+if [ $VM_UP -ne $VM_COUNT ] ; then
+ echo "Only $VM_UP from $VM_COUNT VMs are accessible via ssh. Installation will be terminated."
+ exit 1
+fi
+
+# Start ONAP installation
+DATE_START=$(date)
+echo -e "\nONAP Installation Started at $DATE_START\n"
+$AUTO_INSTALL_DIR/ci/deploy-onap.sh ${VM_IP[@]}
+echo -e "\nONAP Installation Started at $DATE_START"
+echo -e "ONAP Installation Finished at $(date)\n"
diff --git a/ci/deploy-onap-kubespray.sh b/ci/deploy-onap-kubespray.sh
new file mode 100755
index 0000000..a797388
--- /dev/null
+++ b/ci/deploy-onap-kubespray.sh
@@ -0,0 +1,339 @@
+#!/bin/bash
+#
+# Copyright 2018-2019 Tieto
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Script for automated deployment of ONAP with Kubernetes at OPNFV LAAS
+# environment.
+#
+
+#
+# Configuration
+#
+export LC_ALL=C
+export LANG=C
+
+MASTER=$1
+SERVERS=$*
+shift
+SLAVES=$*
+
+ONAP_BRANCH=${ONAP_BRANCH:-'casablanca'}
+KUBESPRAY_COMMIT="bbfd2dc2bd088efc63747d903edd41fe692531d8"
+NAMESPACE='onap'
+SSH_USER=${SSH_USER:-"opnfv"}
+SSH_OPTIONS='-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
+# use identity file from the environment SSH_IDENTITY
+if [ -n "$SSH_IDENTITY" ] ; then
+ SSH_OPTIONS="-i $SSH_IDENTITY $SSH_OPTIONS"
+ ANSIBLE_IDENTITY="--private-key=$SSH_IDENTITY"
+fi
+
+KUBESPRAY_OPTIONS='-e "kubelet_max_pods=250"'
+
+TMP_POD_LIST='/tmp/onap_pod_list.txt'
+
+case "$ONAP_BRANCH" in
+ "beijing")
+ HELM_VERSION=2.8.2
+ ;;
+ "casablanca")
+ HELM_VERSION=2.9.1
+ ;;
+ *)
+ HELM_VERSION=2.9.1
+ ;;
+esac
+
+ONAP_MINIMAL="aai dmaap portal robot sdc sdnc so vid"
+# by defalult install minimal ONAP installation
+# empty list of ONAP_COMPONENT means full ONAP installation
+ONAP_COMPONENT=${ONAP_COMPONENT:-$ONAP_MINIMAL}
+
+#
+# Functions
+#
+function usage() {
+ echo "usage"
+ cat <<EOL
+Usage:
+ $0 <MASTER> [ <SLAVE1> <SLAVE2> ... ]
+
+ where <MASTER> and <SLAVEx> are IP addresses of servers to be used
+ for ONAP installation.
+
+ Script behavior is affected by following environment variables:
+
+ ONAP_COMPONENT - a list of ONAP components to be installed, empty list
+ will trigger a full ONAP installation
+ VALUE: "$ONAP_COMPONENT"
+
+ ONAP_BRANCH - version of ONAP to be installed (OOM branch version)
+ VALUE: "$ONAP_BRANCH"
+
+ NAMESPACE - name of ONAP namespace in kubernetes cluster
+ VALUE: "$NAMESPACE"
+
+ SSH_USER - user name to be used to access <MASTER> and <SLAVEx>
+ servers
+ VALUE: "$SSH_USER"
+
+ SSH_IDENTITY - (optional) ssh identity file to be used to access
+ <MASTER> and <SLAVEx> servers as a SSH_USER
+ VALUE: "$SSH_IDENTITY"
+
+NOTE: Following must be assured for <MASTER> and <SLAVEx> servers before
+ $0 execution:
+ 1) SSH_USER must be able to access servers via ssh without a password
+ 2) SSH_USER must have a password-less sudo access
+EOL
+}
+
+# Check if server IPs of kubernetes nodes are configured at given server.
+# If it is not the case, then kubespray invetory file must be updated.
+function check_server_ips() {
+ for SERVER_IP in $(grep 'ip=' $1 | sed -re 's/^.*ip=([0-9\.]+).*$/\1/') ; do
+ IP_OK="false"
+ for IP in $(ssh $SSH_OPTIONS $SSH_USER@$SERVER_IP "ip a | grep -Ew 'inet' | sed -re 's/^ *inet ([0-9\.]+).*$/\1/g'") ; do
+ if [ "$IP" == "$SERVER_IP" ] ; then
+ IP_OK="true"
+ fi
+ done
+ # access IP (e.g. OpenStack floating IP) is not server local address, so update invetory
+ if [ $IP_OK == "false" ] ; then
+ # get server default GW dev
+ DEV=$(ssh $SSH_OPTIONS $SSH_USER@$SERVER_IP "ip route ls" | grep ^default | sed -re 's/^.*dev (.*)$/\1/')
+ LOCAL_IP=$(ssh $SSH_OPTIONS $SSH_USER@$SERVER_IP "ip -f inet addr show $DEV" | grep -Ew 'inet' | sed -re 's/^ *inet ([0-9\.]+).*$/\1/g')
+ if [ "$LOCAL_IP" == "" ] ; then
+ echo "Can't read local IP for server with IP $SERVER_IP"
+ exit 1
+ fi
+ sed -i'' -e "s/ip=$SERVER_IP/ip=$LOCAL_IP access_ip=$SERVER_IP/" $1
+ fi
+ done
+}
+
+# sanity check
+if [ "$SERVERS" == "" ] ; then
+ usage
+ exit 1
+fi
+
+#
+# Installation
+#
+
+# detect CPU architecture to download correct helm binary
+CPU_ARCH=$(ssh $SSH_OPTIONS $SSH_USER@"$MASTER" "uname -p")
+case "$CPU_ARCH" in
+ "x86_64")
+ ARCH="amd64"
+ ;;
+ "aarch64")
+ ARCH="arm64"
+ ;;
+ *)
+ echo "Unsupported CPU architecture '$CPU_ARCH' was detected."
+ exit 1
+esac
+
+# print configuration
+cat << EOL
+list of configuration options:
+ SERVERS="$SERVERS"
+ ONAP_COMPONENT="$ONAP_COMPONENT"
+ ONAP_BRANCH="$ONAP_BRANCH"
+ NAMESPACE="$NAMESPACE"
+ SSH_USER="$SSH_USER"
+ SSH_IDENTITY="$SSH_IDENTITY"
+ ARCH="$ARCH"
+
+EOL
+
+# install K8S cluster by kubespray
+sudo apt-get -y update
+sudo apt-get -y install git ansible python-jinja2 python3-pip libffi-dev libssl-dev
+git clone https://github.com/kubernetes-incubator/kubespray.git
+cd kubespray
+git checkout $KUBESPRAY_COMMIT
+pip3 install -r requirements.txt
+export CONFIG_FILE=inventory/auto_hosts.ini
+rm $CONFIG_FILE
+python3 contrib/inventory_builder/inventory.py $SERVERS
+check_server_ips $CONFIG_FILE
+cat $CONFIG_FILE
+if ( ! ansible-playbook -i $CONFIG_FILE $KUBESPRAY_OPTIONS -b -u $SSH_USER $ANSIBLE_IDENTITY cluster.yml ) ; then
+ echo "Kubespray installation has failed at $(date)"
+ exit 1
+fi
+
+# use standalone K8S master if there are enough VMs available for the K8S cluster
+SERVERS_COUNT=$(echo $SERVERS | wc -w)
+if [ $SERVERS_COUNT -gt 2 ] ; then
+ K8S_NODES=$SLAVES
+else
+ K8S_NODES=$SERVERS
+fi
+
+echo "INSTALLATION TOPOLOGY:"
+echo "Kubernetes Master: $MASTER"
+echo "Kubernetes Nodes: $K8S_NODES"
+echo
+echo "CONFIGURING NFS ON SLAVES"
+echo "$SLAVES"
+
+for SLAVE in $SLAVES;
+do
+ssh $SSH_OPTIONS $SSH_USER@"$SLAVE" "bash -s" <<CONFIGURENFS &
+ sudo su
+ apt-get install nfs-common -y
+ mkdir /dockerdata-nfs
+ chmod 777 /dockerdata-nfs
+ echo "$MASTER:/dockerdata-nfs /dockerdata-nfs nfs auto 0 0" >> /etc/fstab
+ mount -a
+ mount | grep dockerdata-nfs
+CONFIGURENFS
+done
+wait
+
+echo "DEPLOYING OOM ON MASTER"
+echo "$MASTER"
+
+ssh $SSH_OPTIONS $SSH_USER@"$MASTER" "bash -s" <<OOMDEPLOY
+sudo su
+echo "create namespace '$NAMESPACE'"
+cat <<EOF | kubectl create -f -
+{
+ "kind": "Namespace",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "$NAMESPACE",
+ "labels": {
+ "name": "$NAMESPACE"
+ }
+ }
+}
+EOF
+kubectl get namespaces --show-labels
+kubectl -n kube-system create sa tiller
+kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller
+rm -rf oom
+echo "pulling new oom"
+git clone -b $ONAP_BRANCH http://gerrit.onap.org/r/oom
+
+# NFS FIX for aaf-locate
+sed -i '/persistence:/s/^#//' ./oom/kubernetes/aaf/charts/aaf-locate/values.yaml
+sed -i '/mountPath: \/dockerdata/c\ mountPath: \/dockerdata-nfs'\
+ ./oom/kubernetes/aaf/charts/aaf-locate/values.yaml
+
+echo "Pre-pulling docker images at \$(date)"
+wget https://jira.onap.org/secure/attachment/11261/prepull_docker.sh
+chmod 777 prepull_docker.sh
+./prepull_docker.sh
+echo "starting onap pods"
+cd oom/kubernetes/
+
+# Enable selected ONAP components
+if [ -n "$ONAP_COMPONENT" ] ; then
+ # disable all components and enable only selected in next loop
+ sed -i '/^.*:$/!b;n;s/enabled: *true/enabled: false/' onap/values.yaml
+ echo -n "Enable following ONAP components:"
+ for COMPONENT in $ONAP_COMPONENT; do
+ echo -n " \$COMPONENT"
+ sed -i '/^'\${COMPONENT}':$/!b;n;s/enabled: *false/enabled: true/' onap/values.yaml
+ done
+ echo
+else
+ echo "All ONAP components will be installed"
+fi
+
+wget http://storage.googleapis.com/kubernetes-helm\
+/helm-v${HELM_VERSION}-linux-${ARCH}.tar.gz
+tar -zxvf helm-v${HELM_VERSION}-linux-${ARCH}.tar.gz
+mv linux-${ARCH}/helm /usr/local/bin/helm
+helm init --upgrade --service-account tiller
+# run helm server on the background and detached from current shell
+nohup helm serve 0<&- &>/dev/null &
+echo "Waiting for helm setup for 5 min at \$(date)"
+sleep 5m
+helm version
+helm repo add local http://127.0.0.1:8879
+helm repo list
+make all
+if ( ! helm install local/onap -n dev --namespace $NAMESPACE) ; then
+ echo "ONAP installation has failed at \$(date)"
+ exit 1
+fi
+
+cd ../../
+
+echo "Waiting for ONAP pods to be up \$(date)"
+echo "Ignore failure of sdnc-ansible-server, see SDNC-443"
+function get_onap_pods() {
+ kubectl get pods --namespace $NAMESPACE > $TMP_POD_LIST
+ return \$(cat $TMP_POD_LIST | wc -l)
+}
+FAILED_PODS_LIMIT=1 # maximal number of failed ONAP PODs
+ALL_PODS_LIMIT=20 # minimum ONAP PODs to be up & running
+WAIT_PERIOD=60 # wait period in seconds
+MAX_WAIT_TIME=\$((3600*3)) # max wait time in seconds
+MAX_WAIT_PERIODS=\$((\$MAX_WAIT_TIME/\$WAIT_PERIOD))
+COUNTER=0
+get_onap_pods
+ALL_PODS=\$?
+PENDING=\$(grep -E '0/|1/2' $TMP_POD_LIST | wc -l)
+while [ \$PENDING -gt \$FAILED_PODS_LIMIT -o \$ALL_PODS -lt \$ALL_PODS_LIMIT ]; do
+ # print header every 20th line
+ if [ \$COUNTER -eq \$((\$COUNTER/20*20)) ] ; then
+ printf "%-3s %-29s %-3s/%s\n" "Nr." "Datetime of check" "Err" "Total PODs"
+ fi
+ COUNTER=\$((\$COUNTER+1))
+ printf "%3s %-29s %3s/%-3s\n" \$COUNTER "\$(date)" \$PENDING \$ALL_PODS
+ sleep \$WAIT_PERIOD
+ if [ "\$MAX_WAIT_PERIODS" -eq \$COUNTER ]; then
+ FAILED_PODS_LIMIT=800
+ ALL_PODS_LIMIT=0
+ fi
+ get_onap_pods
+ ALL_PODS=\$?
+ PENDING=\$(grep -E '0/|1/2' $TMP_POD_LIST | wc -l)
+done
+
+get_onap_pods
+cp $TMP_POD_LIST ~/onap_all_pods.txt
+echo
+echo "========================"
+echo "ONAP INSTALLATION REPORT"
+echo "========================"
+echo
+echo "List of Failed PODs"
+echo "-------------------"
+grep -E '0/|1/2' $TMP_POD_LIST | tee ~/onap_failed_pods.txt
+echo
+echo "Summary:"
+echo "--------"
+echo " PODs Failed: \$(cat ~/onap_failed_pods.txt | wc -l)"
+echo " PODs Total: \$(cat ~/onap_all_pods.txt | wc -l)"
+echo
+echo "ONAP health TC results"
+echo "----------------------"
+cd oom/kubernetes/robot
+./ete-k8s.sh $NAMESPACE health | tee ~/onap_health.txt
+echo "==============================="
+echo "END OF ONAP INSTALLATION REPORT"
+echo "==============================="
+OOMDEPLOY
+
+echo "Finished install, ruturned from Master at $(date)"
+exit 0
diff --git a/ci/deploy-onap.sh b/ci/deploy-onap.sh
new file mode 100755
index 0000000..c34eb56
--- /dev/null
+++ b/ci/deploy-onap.sh
@@ -0,0 +1,376 @@
+#!/bin/bash
+#
+# Copyright 2018 Tieto
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Script for automated deployment of ONAP with Kubernetes at OPNFV LAAS
+# environment.
+#
+# Usage:
+# onap-deploy.sh <MASTER> <SLAVE1> <SLAVE2>
+#
+# where <MASTER> and <SLAVE_IPx> are IP addresses of servers to be used
+# for ONAP installation.
+#
+# NOTE: Following must be assured for all MASTER and SLAVE servers before
+# onap-deploy.sh execution:
+# 1) ssh access without a password
+# 2) an user account with password-less sudo access must be
+# available - default user is "opnfv"
+
+#
+# Configuration
+#
+DOCKER_VERSION=17.03
+RANCHER_VERSION=1.6.14
+RANCHER_CLI_VER=0.6.11
+KUBECTL_VERSION=1.8.10
+HELM_VERSION=2.8.2
+
+MASTER=$1
+SERVERS=$*
+shift
+SLAVES=$*
+
+BRANCH='beijing'
+ENVIRON='onap'
+
+SSH_USER=${SSH_USER:-"opnfv"}
+SSH_OPTIONS='-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
+# by defalult install full ONAP installation
+ONAP_COMPONENT_DISABLE=${ONAP_COMPONENT_DISABLE:-""}
+# example of minimal ONAP installation
+#ONAP_COMPONENT_DISABLE="clamp cli consul dcaegen2 esr log msb multicloud nbi oof policy uui vfc vnfsdk"
+
+# use identity file from the environment SSH_IDENTITY
+if [ -n "$SSH_IDENTITY" ] ; then
+ SSH_OPTIONS="-i $SSH_IDENTITY $SSH_OPTIONS"
+fi
+
+#
+# Installation
+#
+
+# use standalone K8S master if there are enough VMs available for the K8S cluster
+SERVERS_COUNT=$(echo $SERVERS | wc -w)
+if [ $SERVERS_COUNT -gt 2 ] ; then
+ RANCHER_SLAVES=$SLAVES
+else
+ RANCHER_SLAVES=$SERVERS
+fi
+
+echo "INSTALLATION TOPOLOGY:"
+echo "Rancher Master: $MASTER"
+echo "Rancher Slaves: $RANCHER_SLAVES"
+echo
+echo "INSTALLING DOCKER ON ALL MACHINES"
+echo "$SERVERS"
+
+for MACHINE in $SERVERS;
+do
+ssh $SSH_OPTIONS $SSH_USER@"$MACHINE" "bash -s" <<DOCKERINSTALL &
+ sudo -i
+ sysctl -w vm.max_map_count=262144
+ apt-get update -y
+ curl https://releases.rancher.com/install-docker/$DOCKER_VERSION.sh | sh
+
+ mkdir -p /etc/systemd/system/docker.service.d/
+ echo "[Service]
+ ExecStart=
+ ExecStart=/usr/bin/dockerd -H fd:// \
+ --insecure-registry=nexus3.onap.org:10001"\
+ > /etc/systemd/system/docker.service.d/docker.conf
+
+ systemctl daemon-reload
+ systemctl restart docker
+ apt-mark hold docker-ce
+
+ for SERVER in $SERVERS;
+ do
+ echo "\$SERVER $ENVIRON\$(echo \$SERVER | cut -d. -f 4 )" >> /etc/hosts
+ done
+
+ hostname $ENVIRON\$(echo $MACHINE | cut -d. -f 4 )
+
+ echo "DOCKER INSTALLED ON $MACHINE"
+DOCKERINSTALL
+done
+wait
+
+echo "INSTALLING RANCHER ON MASTER"
+echo "$MASTER"
+
+ssh $SSH_OPTIONS $SSH_USER@"$MASTER" "bash -s" <<RANCHERINSTALL
+sudo -i
+echo "INSTALL STARTS"
+apt-get install -y jq make htop
+echo "Waiting for 30 seconds at \$(date)"
+sleep 30
+
+docker login -u docker -p docker nexus3.onap.org:10001
+
+docker run -d --restart=unless-stopped -p 8080:8080\
+ --name rancher_server rancher/server:v$RANCHER_VERSION
+curl -LO https://storage.googleapis.com/kubernetes-release/\
+release/v$KUBECTL_VERSION/bin/linux/amd64/kubectl
+chmod +x ./kubectl
+mv ./kubectl /usr/local/bin/kubectl
+mkdir ~/.kube
+wget http://storage.googleapis.com/kubernetes-helm\
+/helm-v${HELM_VERSION}-linux-amd64.tar.gz
+tar -zxvf helm-v${HELM_VERSION}-linux-amd64.tar.gz
+mv linux-amd64/helm /usr/local/bin/helm
+
+echo "Installing nfs server"
+# changed from nfs_share to dockerdata-nfs
+apt-get install nfs-kernel-server -y
+
+mkdir -p /dockerdata-nfs
+chmod 777 /dockerdata-nfs
+echo "/dockerdata-nfs *(rw,no_root_squash,no_subtree_check)">>/etc/exports
+service nfs-kernel-server restart
+
+echo "Waiting 10 minutes for Rancher to setup at \$(date)"
+sleep 10m
+echo "Installing RANCHER CLI, KUBERNETES ENV on RANCHER"
+wget https://github.com/rancher/cli/releases/download/v${RANCHER_CLI_VER}-rc2\
+/rancher-linux-amd64-v${RANCHER_CLI_VER}-rc2.tar.gz
+tar -zxvf rancher-linux-amd64-v${RANCHER_CLI_VER}-rc2.tar.gz
+cp rancher-v${RANCHER_CLI_VER}-rc2/rancher .
+
+API_RESPONSE=\`curl -s 'http://127.0.0.1:8080/v2-beta/apikey'\
+ -d '{"type":"apikey","accountId":"1a1","name":"autoinstall",\
+ "description":"autoinstall","created":null,"kind":null,\
+ "removeTime":null,"removed":null,"uuid":null}'\`
+# Extract and store token
+echo "API_RESPONSE: \${API_RESPONSE}"
+KEY_PUBLIC=\`echo \${API_RESPONSE} | jq -r .publicValue\`
+KEY_SECRET=\`echo \${API_RESPONSE} | jq -r .secretValue\`
+echo "publicValue: \$KEY_PUBLIC secretValue: \$KEY_SECRET"
+
+export RANCHER_URL=http://${MASTER}:8080
+export RANCHER_ACCESS_KEY=\$KEY_PUBLIC
+export RANCHER_SECRET_KEY=\$KEY_SECRET
+
+./rancher env ls
+echo "Creating kubernetes environment named ${ENVIRON}"
+./rancher env create -t kubernetes $ENVIRON > kube_env_id.json
+PROJECT_ID=\$(<kube_env_id.json)
+echo "env id: \$PROJECT_ID"
+
+echo "Waiting for ${ENVIRON} creation - 1 min at \$(date)"
+sleep 1m
+
+export RANCHER_HOST_URL=http://${MASTER}:8080/v1/projects/\$PROJECT_ID
+echo "you should see an additional kubernetes environment"
+./rancher env ls
+
+REG_URL_RESPONSE=\`curl -X POST -u \$KEY_PUBLIC:\$KEY_SECRET\
+ -H 'Accept: application/json'\
+ -H 'ContentType: application/json'\
+ -d '{"name":"$MASTER"}'\
+ "http://$MASTER:8080/v1/projects/\$PROJECT_ID/registrationtokens"\`
+echo "REG_URL_RESPONSE: \$REG_URL_RESPONSE"
+echo "Waiting for the server to finish url configuration - 1 min at \$(date)"
+sleep 1m
+# see registrationUrl in
+REGISTRATION_TOKENS=\`curl http://$MASTER:8080/v2-beta/registrationtokens\`
+echo "REGISTRATION_TOKENS: \$REGISTRATION_TOKENS"
+REGISTRATION_URL=\`echo \$REGISTRATION_TOKENS | jq -r .data[0].registrationUrl\`
+REGISTRATION_DOCKER=\`echo \$REGISTRATION_TOKENS | jq -r .data[0].image\`
+REGISTRATION_TOKEN=\`echo \$REGISTRATION_TOKENS | jq -r .data[0].token\`
+echo "Registering host for image: \$REGISTRATION_DOCKER\
+ url: \$REGISTRATION_URL registrationToken: \$REGISTRATION_TOKEN"
+HOST_REG_COMMAND=\`echo \$REGISTRATION_TOKENS | jq -r .data[0].command\`
+
+# base64 encode the kubectl token from the auth pair
+# generate this after the host is registered
+KUBECTL_TOKEN=\$(echo -n 'Basic '\$(echo\
+ -n "\$RANCHER_ACCESS_KEY:\$RANCHER_SECRET_KEY" | base64 -w 0) | base64 -w 0)
+echo "KUBECTL_TOKEN base64 encoded: \${KUBECTL_TOKEN}"
+
+# add kubectl config - NOTE: the following spacing has to be "exact"
+# or kubectl will not connect - with a localhost:8080 error
+echo 'apiVersion: v1
+kind: Config
+clusters:
+- cluster:
+ api-version: v1
+ insecure-skip-tls-verify: true
+ server: "https://$MASTER:8080/r/projects/'\$PROJECT_ID'/kubernetes:6443"
+ name: "${ENVIRON}"
+contexts:
+- context:
+ cluster: "${ENVIRON}"
+ user: "${ENVIRON}"
+ name: "${ENVIRON}"
+current-context: "${ENVIRON}"
+users:
+- name: "${ENVIRON}"
+ user:
+ token: "'\${KUBECTL_TOKEN}'" ' > ~/.kube/config
+
+echo "docker run --rm --privileged\
+ -v /var/run/docker.sock:/var/run/docker.sock\
+ -v /var/lib/rancher:/var/lib/rancher\
+ \$REGISTRATION_DOCKER\
+ \$RANCHER_URL/v1/scripts/\$REGISTRATION_TOKEN"\
+ > /tmp/rancher_register_host
+chown $SSH_USER /tmp/rancher_register_host
+
+RANCHERINSTALL
+
+echo "REGISTER TOKEN"
+HOSTREGTOKEN=$(ssh $SSH_OPTIONS $SSH_USER@"$MASTER" cat /tmp/rancher_register_host)
+echo "$HOSTREGTOKEN"
+
+echo "REGISTERING HOSTS WITH RANCHER ENVIRONMENT '$ENVIRON'"
+echo "$RANCHER_SLAVES"
+
+for MACHINE in $RANCHER_SLAVES;
+do
+ssh $SSH_OPTIONS $SSH_USER@"$MACHINE" "bash -s" <<REGISTERHOST &
+ sudo -i
+ $HOSTREGTOKEN
+ sleep 5
+ echo "Host $MACHINE waiting for host registration 5 min at \$(date)"
+ sleep 5m
+REGISTERHOST
+done
+wait
+
+echo "CONFIGURING NFS ON SLAVES"
+echo "$SLAVES"
+
+for SLAVE in $SLAVES;
+do
+ssh $SSH_OPTIONS $SSH_USER@"$SLAVE" "bash -s" <<CONFIGURENFS &
+ sudo -i
+ apt-get install nfs-common -y
+ mkdir /dockerdata-nfs
+ chmod 777 /dockerdata-nfs
+ echo "$MASTER:/dockerdata-nfs /dockerdata-nfs nfs auto 0 0" >> /etc/fstab
+ mount -a
+ mount | grep dockerdata-nfs
+CONFIGURENFS
+done
+wait
+
+echo "DEPLOYING OOM ON RANCHER WITH MASTER"
+echo "$MASTER"
+TMP_POD_LIST='/tmp/onap_pod_list.txt'
+
+ssh $SSH_OPTIONS $SSH_USER@"$MASTER" "bash -s" <<OOMDEPLOY
+sudo -i
+rm -rf oom
+echo "pulling new oom"
+git clone -b $BRANCH http://gerrit.onap.org/r/oom
+
+# NFS FIX for aaf-locate
+sed -i '/persistence:/s/^#//' ./oom/kubernetes/aaf/charts/aaf-locate/values.yaml
+sed -i '/mountPath: \/dockerdata/c\ mountPath: \/dockerdata-nfs'\
+ ./oom/kubernetes/aaf/charts/aaf-locate/values.yaml
+
+echo "Pre-pulling docker images at \$(date)"
+wget https://jira.onap.org/secure/attachment/11261/prepull_docker.sh
+chmod 777 prepull_docker.sh
+./prepull_docker.sh
+echo "starting onap pods"
+cd oom/kubernetes/
+
+# Disable ONAP components
+if [ -n "$ONAP_COMPONENT_DISABLE" ] ; then
+ echo -n "Disable following ONAP components:"
+ for COMPONENT in $ONAP_COMPONENT_DISABLE; do
+ echo -n " \$COMPONENT"
+ sed -i '/^'\${COMPONENT}':$/!b;n;s/enabled: *true/enabled: false/' onap/values.yaml
+ done
+ echo
+fi
+
+helm init --upgrade
+# run helm server on the background and detached from current shell
+nohup helm serve 0<&- &>/dev/null &
+echo "Waiting for helm setup for 5 min at \$(date)"
+sleep 5m
+helm version
+helm repo add local http://127.0.0.1:8879
+helm repo list
+make all
+if ( ! helm install local/onap -n dev --namespace $ENVIRON) ; then
+ echo "ONAP installation has failed at \$(date)"
+ exit 1
+fi
+
+cd ../../
+
+echo "Waiting for ONAP pods to be up \$(date)"
+echo "Ignore failure of sdnc-ansible-server, see SDNC-443"
+function get_onap_pods() {
+ kubectl get pods --namespace $ENVIRON > $TMP_POD_LIST
+ return \$(cat $TMP_POD_LIST | wc -l)
+}
+FAILED_PODS_LIMIT=1 # maximal number of failed ONAP PODs
+ALL_PODS_LIMIT=20 # minimum ONAP PODs to be up & running
+WAIT_PERIOD=60 # wait period in seconds
+MAX_WAIT_TIME=\$((3600*3)) # max wait time in seconds
+MAX_WAIT_PERIODS=\$((\$MAX_WAIT_TIME/\$WAIT_PERIOD))
+COUNTER=0
+get_onap_pods
+ALL_PODS=\$?
+PENDING=\$(grep -E '0/|1/2' $TMP_POD_LIST | wc -l)
+while [ \$PENDING -gt \$FAILED_PODS_LIMIT -o \$ALL_PODS -lt \$ALL_PODS_LIMIT ]; do
+ # print header every 20th line
+ if [ \$COUNTER -eq \$((\$COUNTER/20*20)) ] ; then
+ printf "%-3s %-29s %-3s/%s\n" "Nr." "Datetime of check" "Err" "Total PODs"
+ fi
+ COUNTER=\$((\$COUNTER+1))
+ printf "%3s %-29s %3s/%-3s\n" \$COUNTER "\$(date)" \$PENDING \$ALL_PODS
+ sleep \$WAIT_PERIOD
+ if [ "\$MAX_WAIT_PERIODS" -eq \$COUNTER ]; then
+ FAILED_PODS_LIMIT=800
+ ALL_PODS_LIMIT=0
+ fi
+ get_onap_pods
+ ALL_PODS=\$?
+ PENDING=\$(grep -E '0/|1/2' $TMP_POD_LIST | wc -l)
+done
+
+get_onap_pods
+cp $TMP_POD_LIST ~/onap_all_pods.txt
+echo
+echo "========================"
+echo "ONAP INSTALLATION REPORT"
+echo "========================"
+echo
+echo "List of Failed PODs"
+echo "-------------------"
+grep -E '0/|1/2' $TMP_POD_LIST | tee ~/onap_failed_pods.txt
+echo
+echo "Summary:"
+echo "--------"
+echo " PODs Failed: \$(cat ~/onap_failed_pods.txt | wc -l)"
+echo " PODs Total: \$(cat ~/onap_all_pods.txt | wc -l)"
+echo
+echo "ONAP health TC results"
+echo "----------------------"
+cd oom/kubernetes/robot
+./ete-k8s.sh $ENVIRON health | tee ~/onap_health.txt
+echo "==============================="
+echo "END OF ONAP INSTALLATION REPORT"
+echo "==============================="
+OOMDEPLOY
+
+echo "Finished install, ruturned from Master at $(date)"
+exit 0
diff --git a/ci/deploy-opnfv-apex-centos.sh b/ci/deploy-opnfv-apex-centos.sh
new file mode 100644
index 0000000..a3a0433
--- /dev/null
+++ b/ci/deploy-opnfv-apex-centos.sh
@@ -0,0 +1,209 @@
+#!/usr/bin/env bash
+
+# /usr/bin/env bash or /bin/bash ? /usr/bin/env bash is more environment-independent
+# beware of files which were edited in Windows, and have invisible \r end-of-line characters, causing Linux errors
+
+##############################################################################
+# Copyright (c) 2018 Wipro Limited and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# OPNFV contribution guidelines Wiki page:
+# https://wiki.opnfv.org/display/DEV/Contribution+Guidelines
+
+# OPNFV/Auto project:
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+
+# localization control: force script to use default language for output, and force sorting to be bytewise
+# ("C" is from C language, represents "safe" locale everywhere)
+# (result: the script will consider only basic ASCII characters and disable UTF-8 multibyte match)
+export LANG=C
+export LC_ALL=C
+
+##################################################################################
+## installation of OpenStack via OPNFV Apex/TripleO, on CentOS, virtual deployment
+##################################################################################
+# reference manual: https://docs.opnfv.org/en/latest/submodules/apex/docs/release/installation/index.html
+# page for virtual deployment: https://docs.opnfv.org/en/latest/submodules/apex/docs/release/installation/virtual.html
+
+echo "*** begin AUTO install: OPNFV Apex/TripleO"
+
+# check OS version
+echo "*** print OS version (must be CentOS, version 7 or more)"
+cat /etc/*release
+
+# Manage Nested Virtualization
+echo "*** ensure Nested Virtualization is enabled on Intel x86"
+echo "*** nested flag before:"
+cat /sys/module/kvm_intel/parameters/nested
+rm -f /etc/modprobe.d/kvm-nested.conf
+{ printf "options kvm-intel nested=1\n";\
+ printf "options kvm-intel enable_shadow_vmcs=1\n";\
+ printf "options kvm-intel enable_apicv=1\n";\
+ printf "options kvm-intel ept=1\n"; } >> /etc/modprobe.d/kvm-nested.conf
+sudo modprobe -r kvm_intel
+sudo modprobe -a kvm_intel
+echo "*** nested flag after:"
+cat /sys/module/kvm_intel/parameters/nested
+
+echo "*** verify status of modules in the Linux Kernel: kvm_intel module should be loaded for x86_64 machines"
+lsmod | grep kvm_
+grep kvm_ < /proc/modules
+
+# 3 additional pre-installation preparations, lifted from OPNFV/storperf (they are post-installation there):
+# https://wiki.opnfv.org/display/storperf/LaaS+Setup+For+Development#LaaSSetupForDevelopment-InstallOPNFVApex
+# (may of may not be needed, to enable first-time Apex installation on blank server)
+
+# 1) Install Docker
+sudo yum install -y yum-utils device-mapper-persistent-data lvm2
+sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
+sudo yum install -y docker-ce
+sudo systemctl start docker
+
+# 2) Install docker-compose
+sudo curl -L "https://github.com/docker/compose/releases/download/1.21.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
+sudo chmod +x /usr/local/bin/docker-compose
+
+# 3) Install Python
+sudo yum install -y python-virtualenv
+sudo yum groupinstall -y "Development Tools"
+sudo yum install -y openssl-devel
+
+
+# update everything (upgrade: riskier than update, as packages supposed to be unused will be deleted)
+# (note: can take several minutes; may not be necessary)
+sudo yum -y update
+
+
+# download Apex packages
+echo "*** downloading packages:"
+sudo yum -y install https://repos.fedorapeople.org/repos/openstack/openstack-pike/rdo-release-pike-1.noarch.rpm
+sudo yum -y install epel-release
+# note: EPEL = Extra Packages for Enterprise Linux
+sudo curl -o /etc/yum.repos.d/opnfv-apex.repo http://artifacts.opnfv.org/apex/fraser/opnfv-apex.repo
+
+# install three required RPMs (RedHat/RPM Package Managers); this takes several minutes
+sudo yum -y install http://artifacts.opnfv.org/apex/fraser/opnfv-apex-6.2.noarch.rpm http://artifacts.opnfv.org/apex/fraser/opnfv-apex-undercloud-6.2.noarch.rpm http://artifacts.opnfv.org/apex/fraser/opnfv-apex-python34-6.2.noarch.rpm
+
+# clean-up old Apex versions if any
+## precautionary opnfv-clean doesn't work... (even though packages are installed at this point)
+opnfv-clean
+
+# Manage DNS references
+# probably not needed on an already configured server: already has DNS references
+# echo "nameserver 8.8.8.8" >> /etc/resolv.conf
+echo "*** printout of /etc/resolv.conf :"
+cat /etc/resolv.conf
+
+# prepare installation directory
+mkdir -p /opt/opnfv-TripleO-apex
+cd /opt/opnfv-TripleO-apex
+
+# make sure cp is not aliased or a function; same for mv and rm
+unalias cp
+unset -f cp
+unalias mv
+unset -f mv
+unalias rm
+unset -f rm
+
+# 2 YAML files from /etc/opnfv-apex/ are needed for virtual deploys:
+# 1) network_settings.yaml : may need to update NIC names, to match the NIC names on the deployment server
+# 2) standard scenario file (os-nosdn-nofeature-noha.yaml, etc.), or customized deploy_settings.yaml
+
+# make a local copy of YAML files (not necessary: could deploy from /etc/opnfv-apex); local copies are just for clarity
+# 1) network settings
+cp /etc/opnfv-apex/network_settings.yaml .
+# 2) deploy settings
+# copy one of the 40+ pre-defined scenarios (one of the YAML files)
+# for extra customization, git clone Apex repo, and copy and customize the generic deploy_settings.yaml
+# git clone https://git.opnfv.org/apex
+# cp ./apex/config/deploy/deploy_settings.yaml .
+cp /etc/opnfv-apex/os-nosdn-nofeature-noha.yaml ./deploy_settings.yaml
+# cp /etc/opnfv-apex/os-nosdn-nofeature-ha.yaml ./deploy_settings.yaml
+
+# Note: content of os-nosdn-nofeature-noha.yaml
+# ---
+# global_params:
+# ha_enabled: false
+#
+# deploy_options:
+# sdn_controller: false
+# tacker: true
+# congress: true
+# sfc: false
+# vpn: false
+
+
+# modify NIC names in network settings YAML file, specific to your environment (e.g. replace em1 with ens4f0 in LaaS)
+# Note: actually, this should not matter for a virtual environment
+sed -i 's/em1/ens4f0/' network_settings.yaml
+
+# launch deploy (works if openvswitch module is installed, which may not be the case the first time around)
+echo "*** deploying OPNFV by TripleO/Apex:"
+# --debug for detailed debug info
+# -v: Enable virtual deployment
+# note: needs at least 10G RAM for controllers
+sudo opnfv-deploy --debug -v -n network_settings.yaml -d deploy_settings.yaml
+# without --debug:
+# sudo opnfv-deploy -v -n network_settings.yaml -d deploy_settings.yaml
+
+# with specific sizing:
+# sudo opnfv-deploy --debug -v -n network_settings.yaml -d deploy_settings.yaml --virtual-compute-ram 32 --virtual-cpus 16 --virtual-computes 4
+
+
+# verify that the openvswitch module is listed:
+lsmod | grep openvswitch
+grep openvswitch < /proc/modules
+
+##{
+## workaround: do 2 successive installations... not exactly optimal...
+## clean up, as now opnfv-clean should work
+#opnfv-clean
+## second deploy try, should succeed (whether first one failed or succeeded)
+#sudo opnfv-deploy -v -n network_settings.yaml -d deploy_settings.yaml
+##}
+
+
+
+# verifications: https://docs.opnfv.org/en/latest/submodules/apex/docs/release/installation/verification.html
+
+# {
+# if error after deploy.sh: "libvirt.libvirtError: Storage pool not found: no storage pool with matching name 'default'"
+
+# This usually happens if for some reason you are missing a default pool in libvirt:
+# $ virsh pool-list |grep default
+# You can recreate it manually:
+# $ virsh pool-define-as default dir --target /var/lib/libvirt/images/
+# $ virsh pool-autostart default
+# $ virsh pool-start default
+# }
+
+# {
+# if error after deploy.sh: iptc.ip4tc.IPTCError
+# check Apex jira ticket #521 https://jira.opnfv.org/browse/APEX-521
+# }
+
+# OpenvSwitch should not be missing, as it is a requirement from the RPM package:
+# https://github.com/opnfv/apex/blob/stable/fraser/build/rpm_specs/opnfv-apex-common.spec#L15
+
+
+
+# install python 3 on CentOS
+echo "*** begin install python 3.6 (3.4 should be already installed by default)"
+
+sudo yum -y install python36
+# install pip and setup tools
+sudo curl -O https://bootstrap.pypa.io/get-pip.py
+hash -r
+sudo /usr/bin/python3.6 get-pip.py --no-warn-script-location
+
+
+
+echo "*** end AUTO install: OPNFV Apex/TripleO"
+
diff --git a/ci/deploy-opnfv-compass-ubuntu.sh b/ci/deploy-opnfv-compass-ubuntu.sh
new file mode 100644
index 0000000..efccf78
--- /dev/null
+++ b/ci/deploy-opnfv-compass-ubuntu.sh
@@ -0,0 +1,201 @@
+#!/usr/bin/env bash
+
+# /usr/bin/env bash or /bin/bash ? /usr/bin/env bash is more environment-independent
+# beware of files which were edited in Windows, and have invisible \r end-of-line characters, causing Linux errors
+
+##############################################################################
+# Copyright (c) 2018 Wipro Limited and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# OPNFV contribution guidelines Wiki page:
+# https://wiki.opnfv.org/display/DEV/Contribution+Guidelines
+
+# OPNFV/Auto project:
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+
+# localization control: force script to use default language for output, and force sorting to be bytewise
+# ("C" is from C language, represents "safe" locale everywhere)
+# (result: the script will consider only basic ASCII characters and disable UTF-8 multibyte match)
+export LANG=C
+export LC_ALL=C
+
+#################################################################################
+## installation of OpenStack via OPNFV Compass4nfv, on Ubuntu, virtual deployment
+#################################################################################
+# reference manual: https://docs.opnfv.org/en/latest/submodules/compass4nfv/docs/release/installation/index.html
+# page for virtual deployment: https://docs.opnfv.org/en/latest/submodules/compass4nfv/docs/release/installation/vmdeploy.html
+
+echo "*** begin AUTO install: OPNFV Compass4nfv"
+
+# prepare install directory
+export INSTALLDIR=/opt/opnfv-compass
+mkdir -p $INSTALLDIR
+cd $INSTALLDIR
+
+# premptively install latest pip and clear $PATH cache
+# with apt-get (see apt-get -h and man apt-get for details)
+apt-get -y update
+apt-get -y upgrade
+apt-get -y install python-pip
+pip install --upgrade pip
+hash -r
+apt-get -y install python3-openstackclient
+apt-get -y autoremove
+
+## note: apt is more recent than apt-get (apt was formally introduced with Ubuntu 16.04)
+## APT: Advanced Packaging Tool; apt is more high-level, apt-get has more features;
+# apt -y update # Refreshes repository index
+# apt -y full-upgrade # Upgrades packages with auto-handling of dependencies
+# apt -y install python-pip
+# pip install --upgrade pip
+# hash -r
+# apt -y install python3-openstackclient
+# apt -y autoremove
+
+
+# 2 options: (option 1 is preferable)
+# 1) remain in master branch, use build.sh (which builds a tar ball), then launch deploy.sh
+# 2) download a tar ball and launch deploy.sh in a branch matching the tar ball release (e.g. fraser 6.2)
+
+
+##############
+# OPTION 1: build.sh + deploy.sh in master branch
+
+# retrieve the repository of Compass4nfv code (this creates a compass4nfv subdir in the installation directory), current master branch
+echo "*** begin download Compass4nfv repository"
+git clone https://gerrit.opnfv.org/gerrit/compass4nfv
+cd compass4nfv
+
+# launch build script
+echo "*** begin Compass4nfv build:"
+./build.sh |& tee log1-Build.txt
+
+# edit in deploy.sh specific to OPTION 1
+# set path to ISO file (tar ball), as built by build.sh previously
+# absolute path to tar ball file URL (MUST be absolute path)
+sed -i '/#export TAR_URL=/a export TAR_URL=file:///opt/opnfv-compass/compass4nfv/work/building/compass.tar.gz' deploy.sh
+
+# END OPTION 1
+##############
+
+
+##############
+# OPTION 2: tar ball + deploy.sh in matching releases/branches
+
+# download tarball of a certain release/version
+#echo "*** begin download Compass4nfv tar ball"
+#wget http://artifacts.opnfv.org/compass4nfv/fraser/opnfv-6.2.tar.gz
+# note: list of tar ball (ISO) files from Compass4NFV in https://artifacts.opnfv.org/compass4nfv.html
+
+# retrieve the repository of Compass4nfv code (this creates a compass4nfv subdir in the installation directory), current master branch
+#echo "*** begin download Compass4nfv repository"
+#git clone https://gerrit.opnfv.org/gerrit/compass4nfv
+#cd compass4nfv
+# note: list of compass4nfv branch names in https://gerrit.opnfv.org/gerrit/#/admin/projects/compass4nfv,branches
+# checkout to branch (or tag) matching the tarball release
+#git checkout stable/fraser
+
+# edit in deploy.sh specific to OPTION 2
+# set path to ISO file (tar ball), as downloaded previously
+# absolute path to tar ball file URL (MUST be absolute path)
+# sed -i '/#export TAR_URL=/a export TAR_URL=file:///opt/opnfv-compass/opnfv-6.2.tar.gz' deploy.sh
+
+# END OPTION 2
+##############
+
+
+# edit remaining deploy.sh entries as needed
+
+# set operating system version: Ubuntu Xenial Xerus
+sed -i '/#export OS_VERSION=xenial\/centos7/a export OS_VERSION=xenial' deploy.sh
+
+# set path to OPNFV scenario / DHA (Deployment Hardware Adapter) YAML file
+# here, os-nosdn-nofeature-noha scenario
+sed -i '/#export DHA=/a export DHA=/opt/opnfv-compass/compass4nfv/deploy/conf/vm_environment/os-nosdn-nofeature-noha.yml' deploy.sh
+
+# set path to network YAML file
+sed -i '/#export NETWORK=/a export NETWORK=/opt/opnfv-compass/compass4nfv/deploy/conf/vm_environment/network.yml' deploy.sh
+
+# append parameters for virtual machines (for virtual deployments); e.g., 2 nodes for NOHA scenario, 5 for HA, etc.
+# note: this may not be needed in a future release of Compass4nfv
+
+# VIRT_NUMBER – the number of nodes for virtual deployment.
+# VIRT_CPUS – the number of CPUs allocated per virtual machine.
+# VIRT_MEM – the memory size (MB) allocated per virtual machine.
+# VIRT_DISK – the disk size allocated per virtual machine.
+
+# if OPTION 1 (master): OPENSTACK_VERSION is queens, so add the VIRT_NUMBER line after the queens match
+#sed -i '/export OPENSTACK_VERSION=queens/a export VIRT_DISK=200G' deploy.sh
+#sed -i '/export OPENSTACK_VERSION=queens/a export VIRT_MEM=16384' deploy.sh
+#sed -i '/export OPENSTACK_VERSION=queens/a export VIRT_CPUS=4' deploy.sh
+sed -i '/export OPENSTACK_VERSION=queens/a export VIRT_NUMBER=2' deploy.sh
+
+# if OPTION 2 (stable/fraser): OPENSTACK_VERSION is pike, so add the VIRT_NUMBER line after the pike match
+#sed -i '/export OPENSTACK_VERSION=pike/a export VIRT_DISK=200G' deploy.sh
+#sed -i '/export OPENSTACK_VERSION=pike/a export VIRT_MEM=16384' deploy.sh
+#sed -i '/export OPENSTACK_VERSION=pike/a export VIRT_CPUS=4' deploy.sh
+#sed -i '/export OPENSTACK_VERSION=pike/a export VIRT_NUMBER=5' deploy.sh
+
+
+# launch deploy script
+echo "*** begin Compass4nfv deploy:"
+./deploy.sh |& tee log2-Deploy.txt
+
+
+
+
+# To access OpenStack Horizon GUI in Virtual deployment
+# source: https://wiki.opnfv.org/display/compass4nfv/Containerized+Compass
+
+# confirm IP@ of the current server (jump server, such as 10.10.100.xyz on LaaS: 10.10.100.42 for hpe32, etc.)
+external_nic=$(ip route |grep '^default'|awk '{print $5F}')
+echo "external_nic: $external_nic"
+ip addr show "$external_nic"
+
+# Config IPtables rules: pick an unused port number, e.g. 50000+machine number, 50032 for hpe32 at 10.10.100.42
+# 192.16.1.222:443 is the OpenStack Horizon GUI after a Compass installation
+# syntax: iptables -t nat -A PREROUTING -d $EX_IP -p tcp --dport $PORT -j DNAT --to 192.16.1.222:443
+# (note: this could be automated: retrieve IP@, pick port number)
+
+# example: hpe15
+# iptables -t nat -A PREROUTING -d 10.10.100.25 -p tcp --dport 50015 -j DNAT --to 192.16.1.222:443
+# example: hpe33
+# iptables -t nat -A PREROUTING -d 10.10.100.43 -p tcp --dport 50033 -j DNAT --to 192.16.1.222:443
+
+# display IPtables NAT rules
+iptables -t nat -L
+
+# Enter https://$EX_IP:$PORT in you browser to visit the OpenStack Horizon dashboard
+# examples: https://10.10.100.25:50015 , https://10.10.100.43:50033
+# The default user is "admin"
+# to get the Horizon password for "admin":
+sudo docker cp compass-tasks:/opt/openrc ./
+sudo cat openrc | grep OS_PASSWORD
+source ./openrc
+
+# for OpenStack CLI (generic content from openrc)
+export OS_ENDPOINT_TYPE=publicURL
+export OS_INTERFACE=publicURL
+export OS_USERNAME=admin
+export OS_PROJECT_NAME=admin
+export OS_TENANT_NAME=admin
+export OS_AUTH_URL=https://192.16.1.222:5000/v3
+export OS_NO_CACHE=1
+export OS_USER_DOMAIN_NAME=Default
+export OS_PROJECT_DOMAIN_NAME=Default
+export OS_REGION_NAME=RegionOne
+
+# For openstackclient
+export OS_IDENTITY_API_VERSION=3
+export OS_AUTH_VERSION=3
+
+
+
+echo "*** end AUTO install: OPNFV Compass4nfv"
+
diff --git a/ci/deploy-opnfv-daisy-centos.sh b/ci/deploy-opnfv-daisy-centos.sh
new file mode 100644
index 0000000..664ba55
--- /dev/null
+++ b/ci/deploy-opnfv-daisy-centos.sh
@@ -0,0 +1,179 @@
+#!/usr/bin/env bash
+
+# /usr/bin/env bash or /bin/bash ? /usr/bin/env bash is more environment-independent
+# beware of files which were edited in Windows, and have invisible \r end-of-line characters, causing Linux errors
+
+##############################################################################
+# Copyright (c) 2018 Wipro Limited and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# OPNFV contribution guidelines Wiki page:
+# https://wiki.opnfv.org/display/DEV/Contribution+Guidelines
+
+# OPNFV/Auto project:
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+
+# localization control: force script to use default language for output, and force sorting to be bytewise
+# ("C" is from C language, represents "safe" locale everywhere)
+# (result: the script will consider only basic ASCII characters and disable UTF-8 multibyte match)
+export LANG=C
+export LC_ALL=C
+
+
+###############################################################################
+## installation of OpenStack via OPNFV Daisy4nfv, on CentOS, virtual deployment
+###############################################################################
+# reference manual: https://docs.opnfv.org/en/stable-fraser/submodules/daisy/docs/release/installation/index.html#daisy-installation
+# page for virtual deployment: https://docs.opnfv.org/en/stable-fraser/submodules/daisy/docs/release/installation/vmdeploy.html
+
+echo "*** begin AUTO install: OPNFV Daisy4nfv"
+
+# check OS version
+echo "*** print OS version (must be CentOS, version 7.2 or more)"
+cat /etc/*release
+
+# make sure cp is not aliased or a function; same for mv and rm
+unalias cp
+unset -f cp
+unalias mv
+unset -f mv
+unalias rm
+unset -f rm
+
+# Manage Nested Virtualization
+echo "*** ensure Nested Virtualization is enabled on Intel x86"
+echo "*** nested flag before:"
+cat /sys/module/kvm_intel/parameters/nested
+rm -f /etc/modprobe.d/kvm-nested.conf
+{ printf "options kvm-intel nested=1\n";\
+ printf "options kvm-intel enable_shadow_vmcs=1\n";\
+ printf "options kvm-intel enable_apicv=1\n";\
+ printf "options kvm-intel ept=1\n"; } >> /etc/modprobe.d/kvm-nested.conf
+sudo modprobe -r kvm_intel
+sudo modprobe -a kvm_intel
+echo "*** nested flag after:"
+cat /sys/module/kvm_intel/parameters/nested
+
+echo "*** verify status of modules in the Linux Kernel: kvm_intel module should be loaded for x86_64 machines"
+lsmod | grep kvm_
+grep kvm_ < /proc/modules
+
+# download tools: git, kvm, libvirt, python-yaml
+sudo yum -y install git
+sudo yum -y install kvm
+sudo yum -y install libvirt
+sudo yum info libvirt
+sudo yum info qemu-kvm
+sudo yum -y install python-yaml
+
+
+# make sure SELinux is enforced (Security-Enhanced Linux)
+sudo setenforce 1
+echo "getenforce: $(getenforce)"
+
+# Restart the libvirtd daemon:
+sudo service libvirtd restart
+# Verify if the kvm module is loaded, you should see amd or intel depending on the hardware:
+lsmod | grep kvm
+# Note: to test, issue a virsh command to ensure local root connectivity:
+# sudo virsh sysinfo
+
+
+
+# update everything (upgrade: riskier than update, as packages supposed to be unused will be deleted)
+# (note: can take several minutes; may not be necessary)
+sudo yum -y update
+
+# prepare Daisy installation directory
+export INSTALLDIR=/opt/opnfv-daisy
+mkdir $INSTALLDIR
+cd $INSTALLDIR
+
+# oslo-config, needed in daisy/deploy/get_conf.py
+sudo curl -O https://bootstrap.pypa.io/get-pip.py
+hash -r
+python get-pip.py --no-warn-script-location
+pip install --upgrade oslo-config
+
+
+# retrieve Daisy4nfv repository
+git clone https://gerrit.opnfv.org/gerrit/daisy
+cd daisy
+
+
+
+# OPTION 1: master repo and latest bin file: May 17th 2018
+# Download latest bin file from http://artifacts.opnfv.org/daisy.html and name it opnfv.bin
+curl http://artifacts.opnfv.org/daisy/opnfv-2018-05-17_14-00-32.bin -o opnfv.bin
+# make opnfv.bin executable
+chmod 777 opnfv.bin
+
+# OPTION 2: stable release: Fraser 6.0 (so, checkout to stable Fraser release opnfv-6.0)
+# Download matching bin file from http://artifacts.opnfv.org/daisy.html and name it opnfv.bin
+#git checkout opnfv.6.0 # as per Daisy4nfv instructions, but does not work
+#git checkout stable/fraser
+#curl http://artifacts.opnfv.org/daisy/fraser/opnfv-6.0.iso -o opnfv.bin
+# make opnfv.bin executable
+#chmod 777 opnfv.bin
+
+
+
+# The deploy.yaml file is the inventory template of deployment nodes:
+# error from doc: ”./deploy/conf/vm_environment/zte-virtual1/deploy.yml”
+# correct path: "./deploy/config/vm_environment/zte-virtual1/deploy.yml”
+# You can write your own name/roles reference into it:
+# name – Host name for deployment node after installation.
+# roles – Components deployed.
+# note: ./templates/virtual_environment/ contains xml files, for networks and VMs
+
+
+# prepare config dir for Auto lab in daisy dir, and copy deploy and network YAML files from default files (virtual1 or virtual2)
+export AUTO_DAISY_LAB_CONFIG1=labs/auto_daisy_lab/virtual1/daisy/config
+export DAISY_DEFAULT_ENV1=deploy/config/vm_environment/zte-virtual1
+mkdir -p $AUTO_DAISY_LAB_CONFIG1
+cp $DAISY_DEFAULT_ENV1/deploy.yml $AUTO_DAISY_LAB_CONFIG1
+cp $DAISY_DEFAULT_ENV1/network.yml $AUTO_DAISY_LAB_CONFIG1
+
+export AUTO_DAISY_LAB_CONFIG2=labs/auto_daisy_lab/virtual2/daisy/config
+export DAISY_DEFAULT_ENV2=deploy/config/vm_environment/zte-virtual2
+mkdir -p $AUTO_DAISY_LAB_CONFIG2
+cp $DAISY_DEFAULT_ENV2/deploy.yml $AUTO_DAISY_LAB_CONFIG2
+cp $DAISY_DEFAULT_ENV2/network.yml $AUTO_DAISY_LAB_CONFIG2
+
+# Note:
+# - zte-virtual1 config files deploy openstack with five nodes (3 LB nodes and 2 computer nodes).
+# - zte-virtual2 config files deploy an all-in-one openstack
+
+# run deploy script, scenario os-nosdn-nofeature-ha, multinode OpenStack
+sudo ./ci/deploy/deploy.sh -L "$(cd ./;pwd)" -l auto_daisy_lab -p virtual1 -s os-nosdn-nofeature-ha
+
+# run deploy script, scenario os-nosdn-nofeature-noha, all-in-one OpenStack
+# sudo ./ci/deploy/deploy.sh -L "$(cd ./;pwd)" -l auto_daisy_lab -p virtual2 -s os-nosdn-nofeature-noha
+
+
+# Notes about deploy.sh:
+# The value after -L should be an absolute path which points to the directory which includes $AUTO_DAISY_LAB_CONFIG directory.
+# The value after -p parameter (virtual1 or virtual2) should match the one selected for $AUTO_DAISY_LAB_CONFIG.
+# The value after -l parameter (e.g. auto_daisy_lab) should match the lab name selected for $AUTO_DAISY_LAB_CONFIG, after labs/ .
+# Scenario (-s parameter): “os-nosdn-nofeature-ha” is used for deploying multinode openstack (virtual1)
+# Scenario (-s parameter): “os-nosdn-nofeature-noha” used for deploying all-in-one openstack (virtual2)
+
+# more details on deploy.sh OPTIONS:
+# -B PXE Bridge for booting Daisy Master, optional
+# -D Dry-run, does not perform deployment, will be deleted later
+# -L Securelab repo absolute path, optional
+# -l LAB name, necessary
+# -p POD name, necessary
+# -r Remote workspace in target server, optional
+# -w Workdir for temporary usage, optional
+# -h Print this message and exit
+# -s Deployment scenario
+# -S Skip recreate Daisy VM during deployment
+
+# When deployed successfully, the floating IP of openstack is 10.20.11.11, the login account is “admin” and the password is “keystone”
diff --git a/ci/deploy-opnfv-fuel-ubuntu.sh b/ci/deploy-opnfv-fuel-ubuntu.sh
new file mode 100644
index 0000000..db276b2
--- /dev/null
+++ b/ci/deploy-opnfv-fuel-ubuntu.sh
@@ -0,0 +1,199 @@
+#!/usr/bin/env bash
+
+# /usr/bin/env bash or /bin/bash ? /usr/bin/env bash is more environment-independent
+# beware of files which were edited in Windows, and have invisible \r end-of-line characters, causing Linux errors
+
+##############################################################################
+# Copyright (c) 2018 Wipro Limited and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# OPNFV contribution guidelines Wiki page:
+# https://wiki.opnfv.org/display/DEV/Contribution+Guidelines
+
+# OPNFV/Auto project:
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+
+# localization control: force script to use default language for output, and force sorting to be bytewise
+# ("C" is from C language, represents "safe" locale everywhere)
+# (result: the script will consider only basic ASCII characters and disable UTF-8 multibyte match)
+export LANG=C
+export LC_ALL=C
+
+##############################################################################
+## installation of OpenStack via OPNFV Fuel/MCP, on Ubuntu, virtual deployment
+##############################################################################
+# reference manual: https://docs.opnfv.org/en/latest/submodules/fuel/docs/release/installation/index.html
+# page for virtual deployment: https://docs.opnfv.org/en/latest/submodules/fuel/docs/release/installation/installation.instruction.html#opnfv-software-installation-and-deployment
+
+# Steps:
+# step 1: download Fuel/MCP repository and run deploy script
+# (this example: x86, virtual deploy, os-nosdn-nofeature-noha scenario)
+# step 2: download additional packages (python3, OpenStackSDK, OpenStack clients, ...)
+# step 3: add more resources to OpenStack instance (vCPUs, RAM)
+# step 4: download Auto repository
+# step 5: run Auto python script to populate OpenStack instance with objects expected by ONAP
+
+
+echo "*** begin AUTO install: OPNFV Fuel/MCP"
+
+
+# step 1: download Fuel/MCP repository and run deploy script
+
+# prepare install directory
+export INSTALLDIR=/opt/opnfv-fuel
+mkdir -p $INSTALLDIR
+cd $INSTALLDIR
+
+# get Fuel repository
+git clone https://git.opnfv.org/fuel
+# cd in new fuel repository, which contains directories: mcp, ci, etc.
+# note: this is for x86_64 architectures; for aarch64 architectures, git clone https://git.opnfv.org/armband and cd armband instead
+cd fuel
+
+# edit NOHA scenario YAML file with more resources for compute nodes: 32 vCPUs, 192G RAM
+{ printf " cmp01:\n";\
+ printf " vcpus: 32\n";\
+ printf " ram: 196608\n";\
+ printf " cmp02:\n";\
+ printf " vcpus: 32\n";\
+ printf " ram: 196608\n"; } >> mcp/config/scenario/os-nosdn-nofeature-noha.yaml
+
+# provide more storage space to VMs: 350G per compute node (default is 100G)
+sed -i mcp/scripts/lib.sh -e 's/\(qemu-img create.*\) 100G/\1 350G/g'
+
+# launch OPNFV Fuel/MCP deploy script
+ci/deploy.sh -l local -p virtual1 -s os-nosdn-nofeature-noha -D |& tee deploy.log
+
+
+
+# step 2: download additional packages (python3, OpenStackSDK, OpenStack clients, ...)
+
+# install python 3 on Ubuntu
+echo "*** begin install python 3"
+sudo apt-get -y update
+sudo apt-get -y install python3
+# maybe clean-up packages
+# sudo apt -y autoremove
+# specific install of a python version, e.g. 3.6
+# sudo apt-get install python3.6
+
+# http://docs.python-guide.org/en/latest/starting/install3/linux/
+# sudo apt-get install software-properties-common
+# sudo add-apt-repository ppa:deadsnakes/ppa
+# sudo apt-get update
+# sudo apt-get install python3.6
+echo "python2 --version: $(python2 --version)"
+echo "python3 --version: $(python3 --version)"
+echo "which python: $(which python)"
+
+# install pip3 for python3; /usr/local/bin/pip3 vs. /usr/bin/pip3; solve with "hash -r"
+echo "*** begin install pip3 for python3"
+apt -y install python3-pip
+hash -r
+pip3 install --upgrade pip
+hash -r
+
+echo "\$PATH: $PATH"
+echo "which pip: $(which pip)"
+echo "which pip3: $(which pip3)"
+
+# install OpenStack SDK Python client
+echo "*** begin install OpenStack SDK Python client"
+pip3 install openstacksdk
+pip3 install --upgrade openstacksdk
+
+# install OpenStack CLI
+echo "*** begin install OpenStack CLI"
+pip3 install python-openstackclient
+pip3 install --upgrade python-openstackclient
+
+pip3 install --upgrade python-keystoneclient
+pip3 install --upgrade python-neutronclient
+pip3 install --upgrade python-novaclient
+pip3 install --upgrade python-glanceclient
+pip3 install --upgrade python-cinderclient
+
+# install OpenStack Heat (may not be installed by default), may be useful for VNF installation
+#apt install python3-heatclient
+echo "*** begin install OpenStack Heat"
+pip3 install --upgrade python-heatclient
+
+# package verification printouts
+echo "*** begin package verification printouts"
+pip3 list
+pip3 show openstacksdk
+pip3 check
+
+
+
+# step 3: add more resources to OpenStack instance
+
+# now that OpenStack CLI is installed, finish Fuel/MCP installation:
+# take extra resources indicated in os-nosdn-nofeature-noha.yaml into account as quotas in the OpenStack instance
+# (e.g. 2 compute nodes with 32 vCPUs and 192G RAM each => 64 cores and 384G=393,216M RAM)
+# enter environment variables hard-coded here, since always the same for Fuel/MCP; there could be better ways to do this :)
+
+export OS_AUTH_URL=http://10.16.0.107:5000/v3
+export OS_PROJECT_NAME="admin"
+export OS_USER_DOMAIN_NAME="Default"
+export OS_PROJECT_DOMAIN_ID="default"
+unset OS_TENANT_ID
+unset OS_TENANT_NAME
+export OS_USERNAME="admin"
+export OS_PASSWORD="opnfv_secret"
+export OS_REGION_NAME="RegionOne"
+export OS_INTERFACE=public
+export OS_IDENTITY_API_VERSION=3
+
+# at this point, openstack CLI commands should work
+echo "*** finish install OPNFV Fuel/MCP"
+openstack quota set --cores 64 admin
+openstack quota set --ram 393216 admin
+
+
+
+# step 4: download Auto repository
+
+# install OPNFV Auto
+# prepare install directory
+echo "*** begin install OPNFV Auto"
+mkdir -p /opt/opnfv-Auto
+cd /opt/opnfv-Auto
+# get Auto repository from Gerrit
+git clone https://gerrit.opnfv.org/gerrit/auto
+# cd in new auto repository, which contains directories: lib, setup, ci, etc.
+cd auto
+
+
+
+# step 5: run Auto python script to populate OpenStack instance with objects expected by ONAP
+
+# download images used by script, unless downloading images from URL works from the script
+echo "*** begin download images"
+cd setup/VIMs/OpenStack
+mkdir images
+cd images
+#CirrOS
+curl -O http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img
+curl -O http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-arm-disk.img
+curl -O http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-aarch64-disk.img
+# Ubuntu 16.04 LTS (Xenial Xerus)
+curl -O https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+curl -O https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-arm64-disk1.img
+# Ubuntu 14.04.5 LTS (Trusty Tahr)
+curl -O http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img
+curl -O http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-arm64-disk1.img
+
+# launch script to populate the OpenStack instance
+echo "*** begin populate OpenStack instance with ONAP objects"
+cd ..
+python3 auto_script_config_openstack_for_onap.py
+
+echo "*** end AUTO install: OPNFV Fuel/MCP"
+
diff --git a/ci/plot-results.sh b/ci/plot-results.sh
new file mode 100755
index 0000000..22ab1d6
--- /dev/null
+++ b/ci/plot-results.sh
@@ -0,0 +1,101 @@
+#!/bin/bash
+#
+# Copyright 2017-2018 Intel Corporation., Tieto
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Script for graphical representation of AUTO result summaries
+#
+# Usage:
+# ./create_graph [directory]
+#
+# where:
+# "directory" is an optional directory name, where summary of auto
+# installation report is stored
+# Default value: "$HOME/auto_ci_daily_logs"
+
+NUMBER_OF_RESULTS=50 # max number of recent results to be compared in graph
+DIR="$HOME/auto_ci_daily_logs"
+
+function clean_data() {
+ rm -rf summary.csv
+ rm -rf graph*plot
+ rm -rf graph*txt
+ rm -rf graph*png
+}
+
+function prepare_data() {
+ FIRST=1
+ CSV_LIST=$(ls -1 ${DIR}/deploy_summary*csv | tail -n ${NUMBER_OF_RESULTS})
+ for result_file in $CSV_LIST ; do
+ tmp_dir=`dirname $result_file`
+ TIMESTAMP=`basename $tmp_dir | cut -d'_' -f2-`
+ if [ $FIRST -eq 1 ] ; then
+ head -n1 $result_file > summary.csv
+ FIRST=0
+ fi
+ tail -n+2 ${result_file} >> summary.csv
+ done
+}
+
+function plot_data() {
+ echo "Created graphs:"
+ for TYPE in png txt; do
+ for GRAPH in "graph_pods" "graph_tcs" ; do
+ OUTPUT="$GRAPH.plot"
+ GRAPH_NAME="${GRAPH}.${TYPE}"
+ cat > $OUTPUT <<- EOM
+set datafile separator ","
+set xdata time
+set timefmt "%Y%m%d_%H%M%S"
+set format x "%m-%d"
+set xlabel "date"
+set format y "%8.0f"
+EOM
+ if [ "$TYPE" == "png" ] ; then
+ echo 'set term png size 1024,768' >> $OUTPUT
+ else
+ echo 'set term dumb 100,30' >> $OUTPUT
+ fi
+
+ if [ "$GRAPH" == "graph_pods" ] ; then
+ echo 'set ylabel "PODs"' >> $OUTPUT
+ echo 'set yrange [0:]' >> $OUTPUT
+ echo "set title \"ONAP K8S PODs\"" >> $OUTPUT
+ COL1=3
+ COL2=4
+ else
+ echo 'set ylabel "testcases"' >> $OUTPUT
+ echo 'set yrange [0:]' >> $OUTPUT
+ echo "set title \"ONAP Health TestCases\"" >> $OUTPUT
+ COL1=5
+ COL2=6
+ fi
+
+ iter=0
+ echo "set output \"$GRAPH_NAME\"" >> $OUTPUT
+ echo -n "plot " >> $OUTPUT
+ echo $"'summary.csv' using 1:$COL1 with linespoints title columnheader($COL1) \\" >> $OUTPUT
+ echo $", 'summary.csv' using 1:$COL2 with linespoints title columnheader($COL2) \\" >> $OUTPUT
+ gnuplot $OUTPUT
+ echo -e "\t$GRAPH_NAME"
+ done
+ done
+}
+
+#
+# Main body
+#
+clean_data
+prepare_data
+plot_data
diff --git a/docs/conf.py b/docs/conf.py
new file mode 100644
index 0000000..3c4453e
--- /dev/null
+++ b/docs/conf.py
@@ -0,0 +1 @@
+from docs_conf.conf import *
diff --git a/docs/conf.yaml b/docs/conf.yaml
new file mode 100644
index 0000000..ba6ee9d
--- /dev/null
+++ b/docs/conf.yaml
@@ -0,0 +1,3 @@
+---
+project_cfg: opnfv
+project: AUTO
diff --git a/docs/index.rst b/docs/index.rst
new file mode 100644
index 0000000..9e0614b
--- /dev/null
+++ b/docs/index.rst
@@ -0,0 +1,18 @@
+.. _auto:
+
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. SPDX-License-Identifier CC-BY-4.0
+.. (c) Open Platform for NFV Project, Inc. and its contributors
+
+*********************************
+OPNFV Auto (ONAP-Automated OPNFV)
+*********************************
+
+.. toctree::
+ :numbered:
+ :maxdepth: 3
+
+ release/configguide/index
+ release/userguide/index
+ release/release-notes/index
diff --git a/docs/release/configguide/Auto-featureconfig.rst b/docs/release/configguide/Auto-featureconfig.rst
new file mode 100644
index 0000000..15126a8
--- /dev/null
+++ b/docs/release/configguide/Auto-featureconfig.rst
@@ -0,0 +1,331 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. SPDX-License-Identifier CC-BY-4.0
+.. (c) Open Platform for NFV Project, Inc. and its contributors
+
+
+Introduction
+============
+
+This document describes the software and hardware reference frameworks used by Auto,
+and provides guidelines on how to perform configurations and additional installations.
+
+
+Goal
+====
+
+The goal of :ref:`Auto <auto-releasenotes>`
+
+installation and configuration is to prepare an environment where the
+:ref:`Auto use cases <auto-userguide>`
+
+can be assessed, i.e. where the corresponding test cases can be executed and their results can be collected for analysis.
+See the `Auto Release Notes <auto-releasenotes>`
+
+for a discussion of the test results analysis loop.
+
+An instance of ONAP needs to be present, as well as a number of deployed VNFs, in the scope of the use cases.
+Simulated traffic needs to be generated, and then test cases can be executed. There are multiple parameters to
+the Auto environment, and the same set of test cases will be executed on each environment, so as to be able to
+evaluate the influence of each environment parameter.
+
+The initial Auto use cases cover:
+
+* **Edge Cloud** (increased autonomy and automation for managing Edge VNFs)
+* **Resilience Improvements through ONAP** (reduced recovery time for VNFs and end-to-end services in case of failure
+ or suboptimal performance)
+* **Enterprise vCPE** (automation, cost optimization, and performance assurance of enterprise connectivity to Data Centers
+ and the Internet)
+
+The general idea of the Auto feature configuration is to install an OPNFV environment (comprising at least one Cloud Manager),
+an ONAP instance, ONAP-deployed VNFs as required by use cases, possibly additional cloud managers not
+already installed during the OPNFV environment setup, traffic generators, and the Auto-specific software
+for the use cases (which can include test frameworks such as `Robot <http://robotframework.org/>`_ or :doc:`Functest <functest:release/release-notes>`
+
+The ONAP instance needs to be configured with policies and closed-loop controls (also as required by use cases),
+and the test framework controls the execution and result collection of all the test cases. Then, test case execution
+results can be analyzed, so as to fine-tune policies and closed-loop controls, and to compare environment parameters.
+
+The following diagram illustrates execution environments, for x86 architectures and for Arm architectures,
+and other environment parameters (see the Release Notes for a more detailed discussion on the parameters).
+The installation process depends on the underlying architecture, since certain components may require a
+specific binary-compatible version for a given x86 or Arm architecture. The preferred variant of ONAP is one
+that runs on Kubernetes, while all VNF types are of interest to Auto: VM-based or containerized (on any cloud
+manager), for x86 or for Arm. In fact, even PNFs could be considered, to support the evaluation of hybrid PNF/VNF
+transition deployments (ONAP has the ability of also managing legacy PNFs).
+
+The initial VM-based VNFs will cover OpenStack, and in future Auto releases, additional cloud managers will be considered.
+The configuration of ONAP and of test cases should not depend on the underlying architecture and infrastructure.
+
+.. image:: auto-installTarget-generic.png
+
+
+For each component, various installer tools will be considered (as environment parameters), so as to enable comparison,
+as well as ready-to-use setups for Auto end-users. For example, the most natural installer for ONAP would be
+OOM (ONAP Operations Manager). For the OPNFV infrastructure, supported installer projects will be used: Fuel/MCP,
+Compass4NFV, Apex/TripleO, Daisy4NFV. Note that JOID was last supported in OPNFV Fraser 6.2, and is not supported
+anymore as of Gambia 7.0.
+
+The initial version of Auto will focus on OpenStack VM-based VNFs, onboarded and deployed via ONAP API
+(not by ONAP GUI, for the purpose of automation). ONAP is installed on Kubernetes. Two or more servers from LaaS
+are used: one or more to support an OpenStack instance as provided by the OPNFV installation via Fuel/MCP or other
+OPNFV installers (Compass4NFV, Apex/TripleO, Daisy4NFV), and the other(s) to support ONAP with Kubernetes
+and Docker. Therefore, the VNF execution environment is composed of the server(s) with the OpenStack instance(s).
+Initial tests will also include ONAP instances installed on bare-metal servers (i.e. not directly on an OPNFV
+infrastructure; the ONAP/OPNFV integration can start at the VNF environment level; but ultimately, ONAP should
+be installed within an OPNFV infrastructure, for full integration).
+
+.. image:: auto-installTarget-initial.png
+
+ONAP/K8S has several variants. The initial variant considered by Auto is the basic one recommended by ONAP,
+which relies on the Rancher installer and on OpenStack VMs providing VMs for the Rancher master and for the
+Kubernetes cluster workers, as illustrated below for ONAP-Beijing release:
+
+.. image:: auto-installTarget-ONAP-B.png
+
+
+The OpenStack instance running VNFs may need to be configured as per ONAP expectations, for example creating
+instances of ONAP projects/tenants, users, security groups, networks (private, public), connected to the
+Internet by a Router, and making sure expected VM images and flavors are present. A script (using OpenStack
+SDK, or OpenStack CLI, or even OpenStack Heat templates) would populate the OpenStack instance, as illustrated below:
+
+.. image:: auto-OS-config4ONAP.png
+
+That script can also delete these created objects, so it can be used in tear-down procedures as well
+(use -del or --delete option). It is located in the `Auto repository <https://git.opnfv.org/auto/tree/>`_ ,
+under the setup/VIMs/OpenStack directory:
+
+* auto_script_config_openstack_for_onap.py
+
+
+Jenkins (or more precisely JJB: Jenkins Job Builder) will be used for Continuous Integration in OPNFV releases,
+to ensure that the latest master branch of Auto is always working. The first 3 tasks in the pipeline would be:
+install OpenStack instance via an OPNFV installer (Fuel/MCP, Compass4NFV, Apex/TripleO, Daisy4NFV), configure
+the OpenStack instance for ONAP, install ONAP (using the OpenStack instance network IDs in the ONAP YAML file).
+
+Moreover, Auto will offer an API, which can be imported as a module, and can be accessed for example
+by a web application. The following diagram shows the planned structure for the Auto Git repository,
+supporting this module, as well as the installation scripts, test case software, utilities, and documentation.
+
+.. image:: auto-repo-folders.png
+
+
+
+Pre-configuration activities
+============================
+
+The following resources will be required for the initial version of Auto:
+
+* at least two LaaS (OPNFV Lab-as-a-Service) pods (or equivalent in another lab), with their associated network
+ information. Later, other types of target pods will be supported, such as clusters (physical bare-metal or virtual).
+ The pods can be either x86 or Arm CPU architectures. An effort is currently ongoing (ONAP Integration team, and Auto team),
+ to ensure Arm binaries are available for all ONAP components in the official ONAP Docker registry.
+* the `Auto Git repository <https://git.opnfv.org/auto/tree/>`_
+ (clone from `Gerrit Auto <https://gerrit.opnfv.org/gerrit/#/admin/projects/auto>`_)
+
+
+
+Hardware configuration
+======================
+
+ONAP needs relatively large servers (at least 512G RAM, 1TB storage, 80-100 CPU threads). Initial deployment
+attempts on single servers did not complete. Current attempts use 3-server clusters, on bare-metal.
+
+For initial VNF deployment environments, virtual deployments by OPNFV installers on a single server should suffice.
+Later, if many large VNFs are deployed for the Auto test cases, and if heavy traffic is generated, more servers
+might be necessary. Also, if many environment parameters are considered, full executions of all test cases
+on all environment configurations could take a long time, so parallel executions of independent test case batches
+on multiple sets of servers and clusters might be considered.
+
+
+
+Feature configuration
+=====================
+
+Environment installation
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+Current Auto work in progress is captured in the
+`Auto Lab Deployment wiki page <https://wiki.opnfv.org/display/AUTO/Auto+Lab+Deployment>`_.
+
+
+OPNFV with OpenStack
+~~~~~~~~~~~~~~~~~~~~
+
+The first Auto installation used the Fuel/MCP installer for the OPNFV environment (see the
+`OPNFV download page <https://www.opnfv.org/software/downloads>`_).
+
+The following figure summarizes the two installation cases for Fuel: virtual or bare-metal.
+This OPNFV installer starts with installing a Salt Master, which then configures
+subnets and bridges, and install VMs (e.g., for controllers and compute nodes)
+and an OpenStack instance with predefined credentials.
+
+.. image:: auto-OPFNV-fuel.png
+
+
+The Auto version of OPNFV installation configures additional resources for the OpenStack virtual pod
+(more virtual CPUs and more RAM), as compared to the default installation. Examples of manual steps are as follows:
+
+.. code-block:: console
+
+ 1. mkdir /opt/fuel
+ 2. cd /opt/fuel
+ 3. git clone https://git.opnfv.org/fuel
+ 4. cd fuel
+ 5. vi /opt/fuel/fuel/mcp/config/scenario/os-nosdn-nofeature-noha.yaml
+
+
+These lines can be added to configure more resources:
+
+.. code-block:: yaml
+
+ gtw01:
+ ram: 2048
+ + cmp01:
+ + vcpus: 32
+ + ram: 196608
+ + cmp02:
+ + vcpus: 32
+ + ram: 196608
+
+
+The final steps deploy OpenStack (duration: approximately between 30 and 45 minutes).
+
+.. code-block:: console
+
+ # The following change will provide more space to VMs. Default is 100G per cmp0x. This gives 350 each and 700 total.
+ 6. sed -i mcp/scripts/lib.sh -e 's/\(qemu-img create.*\) 100G/\1 350G/g'
+
+ # Then deploy OpenStack. It should take between 30 and 45 minutes:
+ 7. ci/deploy.sh -l UNH-LaaS -p virtual1 -s os-nosdn-nofeature-noha -D |& tee deploy.log
+
+ # Lastly, to get access to the extra RAM and vCPUs, adjust the quotas (done on the controller at 172.16.10.36):
+ 8. openstack quota set --cores 64 admin
+ 9. openstack quota set --ram 393216 admin
+
+
+Note:
+
+* with Linux Kernel 4.4, the installation of OPNFV is not working properly (seems to be a known bug of 4.4, as it works correctly with 4.13):
+ neither qemu-nbd nor kpartx are able to correctly create a mapping to /dev/nbd0p1 partition in order to resize it to 3G (see Fuel repository,
+ file `mcp/scripts/lib.sh <https://git.opnfv.org/fuel/tree/mcp/scripts/lib.sh>`_ , function mount_image).
+* it is not a big deal in case of x86, because it is still possible to update the image and complete the installation even with the
+ original partition size.
+* however, in the case of ARM, the OPNFV installation will fail, because there isn't enough space to install all required packages into
+ the cloud image.
+
+Using the above as starting point, Auto-specific scripts have been developed, for each of the 4 OPNFV installers Fuel/MCP,
+Compass4NFV, Apex/TripleO, Daisy4NFV. Instructions for virtual deployments from each of these installers have been used, and
+sometimes expanded and clarified (missing details or steps from the instructions).
+They can be found in the `Auto repository <https://git.opnfv.org/auto/tree/>`_ , under the ci directory:
+
+* deploy-opnfv-fuel-ubuntu.sh
+* deploy-opnfv-compass-ubuntu.sh
+* deploy-opnfv-apex-centos.sh
+* deploy-opnfv-daisy-centos.sh
+
+
+
+ONAP on Kubernetes
+~~~~~~~~~~~~~~~~~~
+
+An ONAP installation on OpenStack has also been investigated, but we focus here on
+the ONAP on Kubernetes version.
+
+The initial focus is on x86 architectures. The ONAP DCAE component for a while was not operational
+on Kubernetes (with ONAP Amsterdam), and had to be installed separately on OpenStack. So the ONAP
+instance was a hybrid, with all components except DCAE running on Kubernetes, and DCAE running
+separately on OpenStack. Starting with ONAP Beijing, DCAE also runs on Kubernetes.
+
+For Arm architectures, specialized Docker images are being developed to provide Arm architecture
+binary compatibility. See the `Auto Release Notes <auto-releasenotes>`
+for more details on the availability status of these Arm images in the ONAP Docker registry.
+
+The ONAP reference for this installation is detailed `here <http://onap.readthedocs.io/en/latest/submodules/oom.git/docs/oom_user_guide.html>`_.
+
+Examples of manual steps for the deploy procedure are as follows:
+
+.. code-block:: console
+
+ 1 git clone https://gerrit.onap.org/r/oom
+ 2 cd oom
+ 3 git pull https://gerrit.onap.org/r/oom refs/changes/19/32019/6
+ 4 cd install/rancher
+ 5 ./oom_rancher_setup.sh -b master -s <your external ip> -e onap
+ 6 cd oom/kubernetes/config
+ 7 (modify onap-parameters.yaml for VIM connection (manual))
+ 8 ./createConfig.sh -n onap
+ 9 cd ../oneclick
+ 10 ./createAll.bash -n onap
+
+Several automation efforts to integrate the ONAP installation in Auto CI are in progress.
+One effort involves using a 3-server cluster at OPNFV Pharos LaaS (Lab-as-a-Service).
+The script is available in the `Auto repository <https://git.opnfv.org/auto/tree/>`_ , under the ci directory::
+
+* deploy-onap.sh
+
+
+
+ONAP configuration
+^^^^^^^^^^^^^^^^^^
+
+This section describes the logical steps performed by the Auto scripts to prepare ONAP and VNFs.
+
+
+VNF deployment
+~~~~~~~~~~~~~~
+
+<TBC; pre-onboarding, onboarding, deployment>
+
+
+Policy and closed-loop control configuration
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+<TBC>
+
+
+Traffic Generator configuration
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+<TBC>
+
+
+
+Test Case software installation and execution control
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+<TBC; mention the management of multiple environments (characterized by their parameters), execution of all test cases
+in each environment, only a subset in official OPNFV CI/CD Jenkins due to size and time limits; then posting and analysis
+of results; failures lead to bug-fixing, successes lead to analysis for comparisons and fine-tuning>
+
+
+
+Installation health-check
+=========================
+
+<TBC; the Auto installation will self-check, but indicate here manual steps to double-check that the
+installation was successful>
+
+
+
+
+References
+==========
+
+Auto Wiki pages:
+
+* `Auto wiki main page <https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095>`_
+* `Auto Lab Deployment wiki page <https://wiki.opnfv.org/display/AUTO/Auto+Lab+Deployment>`_
+
+
+OPNFV documentation on Auto:
+
+* `Auto Release Notes <release-notes>`
+* `Auto use case user guides <auto-userguide>`
+
+
+Git&Gerrit Auto repositories:
+
+* `Auto Git repository <https://git.opnfv.org/auto/tree/>`_
+* `Gerrit for Auto project <https://gerrit.opnfv.org/gerrit/#/admin/projects/auto>`_
+
diff --git a/docs/release/configguide/Auto-postinstall.rst b/docs/release/configguide/Auto-postinstall.rst
new file mode 100644
index 0000000..500a99d
--- /dev/null
+++ b/docs/release/configguide/Auto-postinstall.rst
@@ -0,0 +1,28 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. SPDX-License-Identifier CC-BY-4.0
+.. (c) Open Platform for NFV Project, Inc. and its contributors
+
+
+********************************
+Auto Post Installation Procedure
+********************************
+
+<TBC; normally, the installation is self-contained and there should be no need for post-installation manual steps;
+possibly input for CI toolchain and deployment pipeline in first section>
+
+
+Automated post installation activities
+======================================
+<TBC if needed>
+
+
+<Project> post configuration procedures
+=======================================
+<TBC if needed>
+
+
+Platform components validation
+==============================
+<TBC if needed>
+
diff --git a/docs/release/configguide/auto-OPFNV-fuel.png b/docs/release/configguide/auto-OPFNV-fuel.png
new file mode 100644
index 0000000..3100d40
--- /dev/null
+++ b/docs/release/configguide/auto-OPFNV-fuel.png
Binary files differ
diff --git a/docs/release/configguide/auto-OS-config4ONAP.png b/docs/release/configguide/auto-OS-config4ONAP.png
new file mode 100644
index 0000000..ecde147
--- /dev/null
+++ b/docs/release/configguide/auto-OS-config4ONAP.png
Binary files differ
diff --git a/docs/release/configguide/auto-installTarget-ONAP-B.png b/docs/release/configguide/auto-installTarget-ONAP-B.png
new file mode 100644
index 0000000..dc069fe
--- /dev/null
+++ b/docs/release/configguide/auto-installTarget-ONAP-B.png
Binary files differ
diff --git a/docs/release/configguide/auto-installTarget-generic.png b/docs/release/configguide/auto-installTarget-generic.png
new file mode 100644
index 0000000..6740933
--- /dev/null
+++ b/docs/release/configguide/auto-installTarget-generic.png
Binary files differ
diff --git a/docs/release/configguide/auto-installTarget-initial.png b/docs/release/configguide/auto-installTarget-initial.png
new file mode 100644
index 0000000..465b468
--- /dev/null
+++ b/docs/release/configguide/auto-installTarget-initial.png
Binary files differ
diff --git a/docs/release/configguide/auto-repo-folders.png b/docs/release/configguide/auto-repo-folders.png
new file mode 100644
index 0000000..1c9d6a4
--- /dev/null
+++ b/docs/release/configguide/auto-repo-folders.png
Binary files differ
diff --git a/docs/release/configguide/index.rst b/docs/release/configguide/index.rst
new file mode 100644
index 0000000..07b7ab6
--- /dev/null
+++ b/docs/release/configguide/index.rst
@@ -0,0 +1,16 @@
+.. _auto-configguide:
+
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. SPDX-License-Identifier CC-BY-4.0
+.. (c) Open Platform for NFV Project, Inc. and its contributors
+
+*****************************************************
+OPNFV Auto (ONAP-Automated OPNFV) Configuration Guide
+*****************************************************
+
+.. toctree::
+ :maxdepth: 3
+
+ Auto-featureconfig.rst
+ Auto-postinstall.rst
diff --git a/docs/release/release-notes/Auto-release-notes.rst b/docs/release/release-notes/Auto-release-notes.rst
new file mode 100644
index 0000000..ed6524d
--- /dev/null
+++ b/docs/release/release-notes/Auto-release-notes.rst
@@ -0,0 +1,490 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. SPDX-License-Identifier CC-BY-4.0
+.. (c) Open Platform for NFV Project, Inc. and its contributors
+
+
+Auto Release Notes
+==================
+
+This document provides the release notes for the Gambia 7.0 release of Auto.
+
+
+Important notes for this release
+================================
+
+The initial release for Auto was in Fraser 6.0 (project inception: July 2017).
+
+
+Summary
+=======
+
+Overview
+^^^^^^^^
+
+OPNFV is an SDNFV system integration project for open-source components, which so far have been mostly limited to
+the NFVI+VIM as generally described by `ETSI <https://www.etsi.org/technologies-clusters/technologies/nfv>`_.
+
+In particular, OPNFV has yet to integrate higher-level automation features for VNFs and end-to-end Services.
+
+As an OPNFV project, Auto (*ONAP-Automated OPNFV*) will focus on ONAP component integration and verification with
+OPNFV reference platforms/scenarios, through primarily a post-install process, in order to avoid impact to OPNFV
+installer projects (Fuel/MCP, Compass4NFV, Apex/TripleO, Daisy4NFV). As much as possible, this will use a generic
+installation/integration process (not specific to any OPNFV installer's technology).
+
+* `ONAP <https://www.onap.org/>`_ (a Linux Foundation Project) is an open source software platform that delivers
+ robust capabilities for the design, creation, orchestration, monitoring, and life cycle management of
+ Software-Defined Networks (SDNs). The current release of ONAP is B (Beijing).
+
+Auto aims at validating the business value of ONAP in general, but especially within an OPNFV infrastructure
+(integration of ONAP and OPNFV). Business value is measured in terms of improved service quality (performance,
+reliability, ...) and OPEX reduction (VNF management simplification, power consumption reduction, ...), as
+demonstrated by use cases.
+
+Auto also validates multi-architecture software (binary images and containers) availability of ONAP and OPNFV:
+CPUs (x86, ARM) and Clouds (MultiVIM)
+
+In other words, Auto is a turnkey approach to automatically deploy an integrated open-source virtual network
+based on OPNFV (as infrastructure) and ONAP (as end-to-end service manager), that demonstrates business value
+to end-users (IT/Telco service providers, enterprises).
+
+
+While all of ONAP is in scope, as it proceeds, the Auto project will focus on specific aspects of this integration
+and verification in each release. Some example topics and work items include:
+
+* How ONAP meets VNFM standards, and interacts with VNFs from different vendors
+* How ONAP SDN-C uses OPNFV existing features, e.g. NetReady, in a two-layer controller architecture in which the
+ upper layer (global controller) is replaceable, and the lower layer can use different vendor’s local controller to
+ interact with SDN-C. For interaction with multiple cloud infrastructures, the MultiVIM ONAP component will be used.
+* How ONAP leverages OPNFV installers (Fuel/MCP, Compass4NFV, Apex/TripleO, Daisy4NFV) to provide a cloud
+ instance (starting with OpenStack) on which to install the tool ONAP
+* What data collection interface VNF and controllers provide to ONAP DCAE, and (through DCAE), to closed-loop control
+ functions such as Policy Tests which verify interoperability of ONAP automation/lifecycle features with specific NFVI
+ and VIM features, as prioritized by the project with OPNFV technical community and
+ EUAG (`End User Advisory Group <https://www.opnfv.org/end-users/end-user-advisory-group>`_) input.
+
+ Examples:
+
+ * Abstraction of networking tech/features e.g. through NetReady/Gluon
+ * Blueprint-based VNF deployment (HOT, TOSCA, YANG)
+ * Application level configuration and lifecycle through YANG (for any aspects depending upon OPNFV NFVI+VIM components)
+ * Policy (through DCAE)
+ * Telemetry (through VES/DCAE)
+
+Initial areas of focus for Auto (in orange dotted lines; this scope can be expanded for future releases).
+It is understood that:
+
+* ONAP scope extends beyond the lines drawn below
+* ONAP architecture does not necessarily align with the ETSI NFV inspired diagrams this is based upon
+
+.. image:: auto-proj-rn01.png
+
+
+The current ONAP architecture overview can be found `here <https://onap.readthedocs.io/en/latest/guides/onap-developer/architecture/onap-architecture.html>`_.
+
+For reference, the ONAP-Beijing architecture diagram is replicated here:
+
+.. image:: ONAP-toplevel-beijing.png
+
+
+Within OPNFV, Auto leverages tools and collaborates with other projects:
+
+* use clouds/VIMs as installed in OPNFV infrastructure (e.g. OpenStack as installed by Fuel/MCP, Compass4NFV, etc.)
+* include VNFs developed by OPNFV data plane groups (e.g., accelerated by VPP (Vector Packet Processing) with DPDK support, ...)
+* validate ONAP+VNFs+VIMs on two major CPU architectures: x86 (CISC), Arm (RISC); collaborate with OPNFV/Armband
+* work with other related groups in OPNFV:
+
+ * FuncTest for software verification (CI/CD, Pass/Fail)
+ * Yardstick for metric management (quantitative measurements)
+ * VES (VNF Event Stream) and Barometer for VNF monitoring (feed to ONAP/DCAE)
+ * Edge Cloud as use case
+
+* leverage OPNFV tools and infrastructure:
+
+ * Pharos as LaaS: transient pods (3-week bookings) and permanent Arm pod (6 servers)
+ * `WorksOnArm <http://worksonarm.com/cluster>`_ (`GitHub link <http://github.com/worksonarm/cluster>`_)
+ * possibly other labs from the community (Huawei pod-12, 6 servers, x86)
+ * JJB/Jenkins for CI/CD (and follow OPNFV scenario convention)
+ * Gerrit/Git for code and documents reviewing and archiving (similar to ONAP: Linux Foundation umbrella)
+ * follow OPNFV releases (Releng group)
+
+
+Testability
+^^^^^^^^^^^
+
+* Tests (test cases) will be developed for use cases within the project scope.
+* In future releases, tests will be added to Functest runs for supporting scenarios.
+
+Auto’s goals include the standup and tests for integrated ONAP-Cloud platforms (“Cloud” here being OPNFV “scenarios”
+or other cloud environments). Thus, the artifacts would be tools to deploy ONAP (leveraging OOM whenever possible,
+starting with Beijing release of ONAP, and a preference for the containerized version of ONAP), to integrate it with
+clouds, to onboard and deploy test VNFs, to configure policies and closed-loop controls, and to run use-case defined
+tests against that integrated environment. OPNFV scenarios would be a possible component in the above.
+
+Installing Auto components and running a battery of tests will be automated, with some or all of the tests being
+integrated in OPNFV CI/CD (depending on the execution length and resource consumption).
+
+Combining all potential parameters, a full set of Auto test case executions can result in thousands of individual results.
+The analysis of these results can be performed by humans, or even by ML/AI (Machine Learning, Artificial Intelligence).
+Test results will be used to fine-tune policies and closed-loop controls configured in ONAP, for increased ONAP business
+value (i.e., find/determine policies and controls which yield optimized ONAP business value metrics such as OPEX).
+
+More precisely, the following list shows parameters that could be applied to an Auto full run of test cases:
+
+* Auto test cases for given use cases
+* OPNFV installer {Fuel/MCP, Compass4NFV, Apex/TripleO, Daisy4NFV}
+* OPNFV availability scenario {HA, noHA}
+* environment where ONAP runs {bare metal servers, VMs from clouds (OpenStack, AWS, GCP, Azure, ...), containers}
+* ONAP installation type {bare metal, VM, or container, ...} and options {MultiVIM single|distributed, ...}
+* VNF types {vFW, vCPE, vAAA, vDHCP, vDNS, vHSS, ...} and VNF-based services {vIMS, vEPC, ...}
+* cloud where VNFs run {OpenStack, AWS, GCP, Azure, ...}
+* VNF host type {VM, container}
+* CPU architectures {x86/AMD64, ARM/aarch64} for ONAP software and for VNF software; not really important for Auto software;
+* pod size and technology (RAM, storage, CPU cores/threads, NICs)
+* traffic types and amounts/volumes; traffic generators (although that should not really matter);
+* ONAP configuration {especially policies and closed-loop controls; monitoring types for DCAE: VES, ...}
+* versions of every component {Linux OS (Ubuntu, CentOS), OPNFV release, clouds, ONAP, VNFs, ...}
+
+The diagram below shows Auto parameters:
+
+.. image:: auto-proj-parameters.png
+
+
+The next figure is an illustration of the Auto analysis loop (design, configuration, execution, result analysis)
+based on test cases covering as many parameters as possible :
+
+.. image:: auto-proj-tests.png
+
+
+Auto currently defines three use cases: Edge Cloud (UC1), Resiliency Improvements (UC2), and Enterprise vCPE (UC3). These use cases aim to show:
+
+* increased autonomy of Edge Cloud management (automation, catalog-based deployment). This use case relates to the
+ `OPNFV Edge Cloud <https://wiki.opnfv.org/display/PROJ/Edge+cloud>`_ initiative.
+* increased resilience (i.e. fast VNF recovery in case of failure or problem, thanks to closed-loop control),
+ including end-to-end composite services of which a Cloud Manager may not be aware (VMs or containers could be
+ recovered by a Cloud Manager, but not necessarily an end-to-end service built on top of VMs or containers).
+* enterprise-grade performance of vCPEs (certification during onboarding, then real-time performance assurance with
+ SLAs and HA, as well as scaling).
+
+The use cases define test cases, which initially will be independent, but which might eventually be integrated to `FuncTest <https://wiki.opnfv.org/display/functest/Opnfv+Functional+Testing>`_.
+
+Additional use cases can be added in the future, such as vIMS (example: project `Clearwater <http://www.projectclearwater.org/>`_)
+or residential vHGW (virtual Home Gateways). The interest for vHGW is to reduce overall power consumption: even in idle mode,
+physical HGWs in residential premises consume a lot of energy. Virtualizing that service to the Service Provider edge data center
+would allow to minimize that consumption.
+
+
+Lab environment
+^^^^^^^^^^^^^^^
+
+Target architectures for all Auto use cases and test cases include x86 and Arm. Power consumption analysis will be
+performed, leveraging Functest tools (based on RedFish/IPMI/ILO).
+
+Initially, an ONAP-Amsterdam instance (without DCAE) had been installed over Kubernetes on bare metal on a single-server
+x86 pod at UNH IOL.
+
+A transition is in progress, to leverage OPNFV LaaS (Lab-as-a-Service) pods (`Pharos <https://labs.opnfv.org/>`_).
+These pods can be booked for 3 weeks only (with an extension for a maximum of 2 weeks), so they are not a permanent resource.
+
+For ONAP-Beijing, a repeatable automated installation procedure is being developed, using 3 Pharos servers (x86 for now).
+Also, a more permanent ONAP installation is in progress at a Huawei lab (pod-12, consisting of 6 x86 servers,
+1 as jump server, the other 5 with this example allocation: 3 for ONAP components, and 2 for an OPNFV infratructure:
+Openstack installed by Compass4NFV).
+
+ONAP-based onboarding and deployment of VNFs is in progress (ONAP-Amsterdam pre-loading of VNFs must still done outside
+of ONAP: for VM-based VNFs, users need to prepare OpenStack stacks (using Heat templates), then make an instance snapshot
+which serves as the binary image of the VNF).
+
+A script to prepare an OpenStack instance for ONAP (creation of a public and a private network, with a router,
+pre-loading of images and flavors, creation of a security group and an ONAP user) has been developed. It leverages
+OpenStack SDK. It has a delete option, so it can be invoked to delete these objects for example in a tear-down procedure.
+
+Integration with Arm servers has started (exploring binary compatibility):
+
+* The Auto project has a specific 6-server pod of Arm servers, which is currently loaned to ONAP integration team,
+ to build ONAP images
+* A set of 14 additional Arm servers was deployed at UNH, for increased capacity
+* ONAP Docker registry: ONAP-specific images for ARM are being built, with the purpose of populating ONAP nexus2
+ (Maven2 artifacts) and nexus3 (Docker containers) repositories at Linux Foundation. Docker images are
+ multi-architecture, and the manifest of an image may contain 1 or more layers (for example 2 layers: x86/AMD64
+ and ARM/aarch64). One of ONAP-Casablanca architectural requirements is to be CPU-architecture independent.
+ There are almost 150 Docker containers in a complete ONAP instance. Currently, more disk space is being added
+ to the ARM nodes (configuration of Nova, and/or additional actual physical storage space).
+
+
+Test case design and implementation for the three use cases has started.
+
+OPNFV CI/CD integration with JJD (Jenkins Job Description) has started: see the Auto plan description
+`here <https://wiki.opnfv.org/display/AUTO/CI+for+Auto>`_. The permanent resource for that is the 6-server Arm
+pod, hosted at UNH. The CI directory from the Auto repository is `here <https://git.opnfv.org/auto/tree/ci>`_
+
+
+Finally, the following figure illustrates Auto in terms of project activities:
+
+.. image:: auto-project-activities.png
+
+
+Note: a demo was delivered at the OpenStack Summit in Vancouver on May 21st 2018, to illustrate the deployment of
+a WordPress application (WordPress is a platform for websites and blogs) deployed on a multi-architecture cloud (mix
+of x86 and Arm servers).
+This shows how service providers and enterprises can diversify their data centers with servers of different architectures,
+and select architectures best suited to each use case (mapping application components to architectures: DBs,
+interactive servers, number-crunching modules, ...).
+This prefigures how other examples such as ONAP, VIMs, and VNFs could also be deployed on heterogeneous multi-architecture
+environments (open infrastructure), orchestrated by Kubernetes. The Auto installation scripts covering all the parameters
+described above could expand on that approach.
+
+.. image:: auto-proj-openstacksummit1805.png
+
+
+
+
+Release Data
+============
+
++--------------------------------------+--------------------------------------+
+| **Project** | Auto |
+| | |
++--------------------------------------+--------------------------------------+
+| **Repo/commit-ID** | auto/opnfv-7.0.0 |
+| | |
++--------------------------------------+--------------------------------------+
+| **Release designation** | Gambia 7.0 |
+| | |
++--------------------------------------+--------------------------------------+
+| **Release date** | 2018-11-02 |
+| | |
++--------------------------------------+--------------------------------------+
+| **Purpose of the delivery** | Official OPNFV release |
+| | |
++--------------------------------------+--------------------------------------+
+
+Version change
+^^^^^^^^^^^^^^
+
+Module version changes
+~~~~~~~~~~~~~~~~~~~~~~
+- There have been no version changes.
+
+
+Document version changes
+~~~~~~~~~~~~~~~~~~~~~~~~
+- There have been no version changes.
+
+
+Reason for version
+^^^^^^^^^^^^^^^^^^
+
+Feature additions
+~~~~~~~~~~~~~~~~~
+
+Initial release 6.0:
+
+* Fraser release plan
+* use case descriptions
+* test case descriptions
+* in-progress test case development
+* lab: OPNFV and ONAP (Amsterdam) installations
+
+Point release 6.1:
+
+* added Gambia release plan
+* started integration with CI/CD (JJB) on permanent Arm pod
+* Arm demo at OpenStack Summit
+* initial script for configuring OpenStack instance for ONAP, using OpenStack SDK 0.13
+* initial attempts to install ONAP Beijing
+* alignment with OPNFV Edge Cloud
+* initial contacts with Functest
+
+Point release 6.2:
+
+* initial scripts for OPNFV CI/CD, registration of Jenkins slave on `Arm pod <https://build.opnfv.org/ci/view/auto/>`_
+* updated script for configuring OpenStack instance for ONAP, using OpenStack SDK 0.14
+
+Point release 7.0:
+
+* progress on Docker registry of ONAP's Arm images
+* progress on ONAP installation script for 3-server cluster of UNH servers
+* CI scripts for OPNFV installers: Fuel/MCP (x86), Compass, Apex/TripleO (must run twice)
+* initial CI script for Daisy4NFV (work in progress)
+* JOID script, but supported only until R6.2, not Gambia 7.0
+* completed script for configuring OpenStack instance for ONAP, using OpenStack SDK 0.17
+* use of an additional lab resource for Auto development: 6-server x86 pod (huawei-pod12)
+
+
+
+
+
+**JIRA TICKETS for this release:**
+
+
+`JIRA Auto Gambia 7.0.0 Done <https://jira.opnfv.org/issues/?filter=12403>`_
+
+Manual selection of significant JIRA tickets for this version's highlights:
+
++--------------------------------------+--------------------------------------+
+| **JIRA REFERENCE** | **SLOGAN** |
+| | |
++--------------------------------------+--------------------------------------+
+| AUTO-37 | Get DCAE running onto Pharos |
+| | deployment |
++--------------------------------------+--------------------------------------+
+| AUTO-42 | Use Compass4NFV to create an |
+| | OpenStack instance on a UNH pod |
++--------------------------------------+--------------------------------------+
+| AUTO-43 | String together scripts for Fuel, |
+| | Tool installation, ONAP preparation |
++--------------------------------------+--------------------------------------+
+| AUTO-44 | Build ONAP components for arm64 |
+| | platform |
++--------------------------------------+--------------------------------------+
+| AUTO-45 | CI: Jenkins definition of verify and |
+| | merge jobs |
++--------------------------------------+--------------------------------------+
+| AUTO-46 | Use Apex to create an OpenStack |
+| | instance on a UNH pod |
++--------------------------------------+--------------------------------------+
+| AUTO-47 | Install ONAP with Kubernetes on LaaS |
+| | |
++--------------------------------------+--------------------------------------+
+| AUTO-48 | Create documentation for ONAP |
+| | deployment with Kubernetes on LaaS |
++--------------------------------------+--------------------------------------+
+| AUTO-49 | Automate ONAP deployment with |
+| | Kubernetes on LaaS |
++--------------------------------------+--------------------------------------+
+| AUTO-51 | huawei-pod12: Prepare IDF and PDF |
+| | files |
++--------------------------------------+--------------------------------------+
+| AUTO-52 | Deploy a running ONAP instance on |
+| | huawei-pod12 |
++--------------------------------------+--------------------------------------+
+| AUTO-54 | Use Daisy4nfv to create an OpenStack |
+| | instance on a UNH pod |
++--------------------------------------+--------------------------------------+
+| | |
+| | |
++--------------------------------------+--------------------------------------+
+
+
+
+Bug corrections
+~~~~~~~~~~~~~~~
+
+**JIRA TICKETS:**
+
++--------------------------------------+--------------------------------------+
+| **JIRA REFERENCE** | **SLOGAN** |
+| | |
++--------------------------------------+--------------------------------------+
+| | |
+| | |
++--------------------------------------+--------------------------------------+
+| | |
+| | |
++--------------------------------------+--------------------------------------+
+
+
+Deliverables
+============
+
+Software deliverables
+^^^^^^^^^^^^^^^^^^^^^
+
+7.0 release: in-progress Docker ARM images, install scripts, CI scripts, and test case implementations.
+
+
+Documentation deliverables
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Updated versions of:
+
+* Release Notes (this document)
+* User Guide
+* Configuration Guide
+
+(see links in References section)
+
+
+
+Known Limitations, Issues and Workarounds
+=========================================
+
+System Limitations
+^^^^^^^^^^^^^^^^^^
+
+
+
+Known issues
+^^^^^^^^^^^^
+
+None at this point.
+
+
+**JIRA TICKETS:**
+
++--------------------------------------+--------------------------------------+
+| **JIRA REFERENCE** | **SLOGAN** |
+| | |
++--------------------------------------+--------------------------------------+
+| | |
+| | |
++--------------------------------------+--------------------------------------+
+| | |
+| | |
++--------------------------------------+--------------------------------------+
+
+Workarounds
+^^^^^^^^^^^
+
+None at this point.
+
+
+
+Test Result
+===========
+
+None at this point.
+
+
+
++--------------------------------------+--------------------------------------+
+| **TEST-SUITE** | **Results:** |
+| | |
++--------------------------------------+--------------------------------------+
+| | |
+| | |
++--------------------------------------+--------------------------------------+
+| | |
+| | |
++--------------------------------------+--------------------------------------+
+
+References
+==========
+
+For more information on the OPNFV Gambia release, please see:
+http://opnfv.org/gambia
+
+
+Auto Wiki pages:
+
+* `Auto wiki main page <https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095>`_
+
+
+OPNFV documentation on Auto:
+
+* `Auto release notes <auto-releasenotes>`
+* `Auto use case user guides <auto-userguide>`
+* `Auto configuration guide <auto-configguide>`
+
+
+Git&Gerrit Auto repositories:
+
+* `Auto Git repository <https://git.opnfv.org/auto/tree/>`_
+* `Gerrit for Auto project <https://gerrit.opnfv.org/gerrit/#/admin/projects/auto>`_
+
+
+Demo at OpenStack summit May 2018 (Vancouver, BC, Canada):
+
+* YouTube video (10min 52s): `Integration testing on an OpenStack public cloud <https://youtu.be/BJ05YuusNYw>`_
+
diff --git a/docs/release/release-notes/ONAP-toplevel-beijing.png b/docs/release/release-notes/ONAP-toplevel-beijing.png
new file mode 100644
index 0000000..62a9d47
--- /dev/null
+++ b/docs/release/release-notes/ONAP-toplevel-beijing.png
Binary files differ
diff --git a/docs/release/release-notes/auto-proj-openstacksummit1805.png b/docs/release/release-notes/auto-proj-openstacksummit1805.png
new file mode 100644
index 0000000..339365a
--- /dev/null
+++ b/docs/release/release-notes/auto-proj-openstacksummit1805.png
Binary files differ
diff --git a/docs/release/release-notes/auto-proj-parameters.png b/docs/release/release-notes/auto-proj-parameters.png
new file mode 100644
index 0000000..a0cbe2e
--- /dev/null
+++ b/docs/release/release-notes/auto-proj-parameters.png
Binary files differ
diff --git a/docs/release/release-notes/auto-proj-rn01.png b/docs/release/release-notes/auto-proj-rn01.png
new file mode 100644
index 0000000..65e4aa6
--- /dev/null
+++ b/docs/release/release-notes/auto-proj-rn01.png
Binary files differ
diff --git a/docs/release/release-notes/auto-proj-tests.png b/docs/release/release-notes/auto-proj-tests.png
new file mode 100644
index 0000000..6b3be10
--- /dev/null
+++ b/docs/release/release-notes/auto-proj-tests.png
Binary files differ
diff --git a/docs/release/release-notes/auto-project-activities.png b/docs/release/release-notes/auto-project-activities.png
new file mode 100644
index 0000000..d25ac2a
--- /dev/null
+++ b/docs/release/release-notes/auto-project-activities.png
Binary files differ
diff --git a/docs/release/release-notes/index.rst b/docs/release/release-notes/index.rst
new file mode 100644
index 0000000..4c879f7
--- /dev/null
+++ b/docs/release/release-notes/index.rst
@@ -0,0 +1,14 @@
+.. _auto-releasenotes:
+
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) Open Platform for NFV Project, Inc. and its contributors
+
+===============================================
+OPNFV Auto (ONAP-Automated OPNFV) Release Notes
+===============================================
+
+.. toctree::
+ :maxdepth: 3
+
+ Auto-release-notes.rst
diff --git a/docs/release/userguide/UC01-feature.userguide.rst b/docs/release/userguide/UC01-feature.userguide.rst
new file mode 100644
index 0000000..5b5edb8
--- /dev/null
+++ b/docs/release/userguide/UC01-feature.userguide.rst
@@ -0,0 +1,78 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. SPDX-License-Identifier CC-BY-4.0
+.. (c) Open Platform for NFV Project, Inc. and its contributors
+
+
+======================================
+Auto User Guide: Use Case 1 Edge Cloud
+======================================
+
+This document provides the user guide for Fraser release of Auto,
+specifically for Use Case 1: Edge Cloud.
+
+
+Description
+===========
+
+This use case aims at showcasing the benefits of using ONAP for autonomous Edge Cloud management.
+
+A high level of automation of VNF lifecycle event handling after launch is enabled by ONAP policies and closed-loop
+controls, which take care of most lifecycle events (start, stop, scale up/down/in/out, recovery/migration for HA) as
+well as their monitoring and SLA management.
+
+Multiple types of VNFs, for different execution environments, are first approved in the catalog thanks to the onboarding
+process, and then can be deployed and handled by multiple controllers in a systematic way.
+
+This results in management efficiency (lower control/automation overhead) and high degree of autonomy.
+
+
+Preconditions:
+
+#. hardware environment in which Edge cloud may be deployed
+#. an Edge cloud has been deployed and is ready for operation
+#. ONAP has been deployed onto a Cloud, and is interfaced (i.e. provisioned for API access) to the Edge cloud
+
+
+
+Main Success Scenarios:
+
+* lifecycle management - start, stop, scale (dependent upon telemetry)
+
+* recovering from faults (detect, determine appropriate response, act); i.e. exercise closed-loop policy engine in ONAP
+
+ * verify mechanics of control plane interaction
+
+* collection of telemetry for machine learning
+
+
+Details on the test cases corresponding to this use case:
+
+* Environment check
+
+ * Basic environment check: Create test script to check basic VIM (OpenStack), ONAP, and VNF(s) are up and running
+
+* VNF lifecycle management
+
+ * VNF Instance Management: Validation of VNF Instance Management which includes VNF instantiation, VNF State Management and termination
+
+ * Tacker Monitoring Driver (VNFMonitorPing):
+
+ * Write Tacker Monitor driver to handle monitor_call and, based on return state value, create custom events
+ * If Ping to VNF fails, trigger below events
+
+ * Event 1 : Collect failure logs from VNF
+ * Event 2 : Soft restart/respawn the VNF
+
+ * Integrate with Telemetry
+
+ * Create TOSCA template policies to implement ceilometer data collection service
+ * Collect CPU utilization data, compare with threshold, and perform action accordingly (respawn, scale-in/scale-out)
+
+
+
+Test execution high-level description
+=====================================
+
+<TBC>
+
diff --git a/docs/release/userguide/UC02-feature.userguide.rst b/docs/release/userguide/UC02-feature.userguide.rst
new file mode 100644
index 0000000..9746914
--- /dev/null
+++ b/docs/release/userguide/UC02-feature.userguide.rst
@@ -0,0 +1,176 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. SPDX-License-Identifier CC-BY-4.0
+.. (c) Open Platform for NFV Project, Inc. and its contributors
+
+
+================================================================
+Auto User Guide: Use Case 2 Resiliency Improvements Through ONAP
+================================================================
+
+This document provides the user guide for Fraser release of Auto,
+specifically for Use Case 2: Resiliency Improvements Through ONAP.
+
+
+Description
+===========
+
+This use case illustrates VNF failure recovery time reduction with ONAP, thanks to its automated monitoring
+and management. It:
+
+* simulates an underlying problem (failure, stress, or any adverse condition in the network that can impact VNFs)
+* tracks a VNF
+* measures the amount of time it takes for ONAP to restore the VNF functionality.
+
+The benefit for NFV edge service providers is to assess what degree of added VIM+NFVI platform resilience for VNFs
+is obtained by leveraging ONAP closed-loop control, vs. VIM+NFVI self-managed resilience (which may not be aware
+of the VNF or the corresponding end-to-end Service, but only of underlying resources such as VMs and servers).
+
+Also, a problem, or challenge, may not necessarily be a failure (which could also be recovered by other layers):
+it could be an issue leading to suboptimal performance, without failure. A VNF management layer as provided by
+ONAP may detect such non-failure problems, and provide a recovery solution which no other layer could provide
+in a given deployment.
+
+
+Preconditions:
+
+#. hardware environment in which Edge cloud may be deployed
+#. Edge cloud has been deployed and is ready for operation
+#. ONAP has been deployed onto a cloud and is interfaced (i.e. provisioned for API access) to the Edge cloud
+#. Components of ONAP have been deployed on the Edge cloud as necessary for specific test objectives
+
+In future releases, Auto Use cases will also include the deployment of ONAP (if not already installed),
+the deployment of test VNFs (pre-existing VNFs in pre-existing ONAP can be used in the test as well),
+the configuration of ONAP for monitoring these VNFs (policies, CLAMP, DCAE), in addition to the test
+scripts which simulate a problem and measures recovery time.
+
+Different types of problems can be simulated, hence the identification of multiple test cases corresponding
+to this use case, as illustrated in this diagram:
+
+.. image:: auto-UC02-testcases.jpg
+
+Description of simulated problems/challenges, leading to various test cases:
+
+* Physical Infra Failure
+
+ * Migration upon host failure: Compute host power is interrupted, and affected workloads are migrated to other available hosts.
+ * Migration upon disk failure: Disk volumes are unmounted, and affected workloads are migrated to other available hosts.
+ * Migration upon link failure: Traffic on links is interrupted/corrupted, and affected workloads are migrated to other available hosts.
+ * Migration upon NIC failure: NIC ports are disabled by host commands, and affected workloads are migrated to other available hosts.
+
+* Virtual Infra Failure
+
+ * OpenStack compute host service fail: Core OpenStack service processes on compute hosts are terminated, and auto-restored, or affected workloads are migrated to other available hosts.
+ * SDNC service fail: Core SDNC service processes are terminated, and auto-restored.
+ * OVS fail: OVS bridges are disabled, and affected workloads are migrated to other available hosts.
+ * etc.
+
+* Security
+
+ * Host tampering: Host tampering is detected, the host is fenced, and affected workloads are migrated to other available hosts.
+ * Host intrusion: Host intrusion attempts are detected, an offending workload, device, or flow is identified and fenced, and as needed affected workloads are migrated to other available hosts.
+ * Network intrusion: Network intrusion attempts are detected, and an offending flow is identified and fenced.
+
+
+
+Test execution high-level description
+=====================================
+
+The following two MSCs (Message Sequence Charts) show the actors and high-level interactions.
+
+The first MSC shows the preparation activities (assuming the hardware, network, cloud, and ONAP have already
+been installed): onboarding and deployment of VNFs (via ONAP portal and modules in sequence: SDC, VID, SO),
+and ONAP configuration (policy framework, closed-loops in CLAMP, activation of DCAE).
+
+.. image:: auto-UC02-preparation.jpg
+
+
+The second MSC illustrates the pattern of all test cases for the Resiliency Improvements:
+
+* simulate the chosen problem (a.k.a. a "Challenge") for this test case, for example suspend a VM which may be used by a VNF
+* start tracking the target VNF of this test case
+* measure the ONAP-orchestrated VNF Recovery Time
+* then the test stops simulating the problem (for example: resume the VM that was suspended)
+
+In parallel, the MSC also shows the sequence of events happening in ONAP, thanks to its configuration to provide Service Assurance for the VNF.
+
+.. image:: auto-UC02-pattern.jpg
+
+
+Test design: data model, implementation modules
+===============================================
+
+The high-level design of classes identifies several entities, described as follows:
+
+* ``Test Case`` : as identified above, each is a special case of the overall use case (e.g., categorized by challenge type)
+* ``Test Definition`` : gathers all the information necessary to run a certain test case
+* ``Metric Definition`` : describes a certain metric that may be measured for a Test Case, in addition to Recovery Time
+* ``Challenge Definition`` : describe the challenge (problem, failure, stress, ...) simulated by the test case
+* ``Recipient`` : entity that can receive commands and send responses, and that is queried by the Test Definition
+ or Challenge Definition (a recipient would be typically a management service, with interfaces (CLI or API) for
+ clients to query)
+* ``Resources`` : with 3 types (VNF, cloud virtual resource such as a VM, physical resource such as a server)
+
+
+Three of these entities have execution-time corresponding classes:
+
+* ``Test Execution`` , which captures all the relevant data of the execution of a Test Definition
+* ``Challenge Execution`` , which captures all the relevant data of the execution of a Challenge Definition
+* ``Metric Value`` , which captures the quantitative measurement of a Metric Definition (with a timestamp)
+
+.. image:: auto-UC02-data1.jpg
+
+
+The following diagram illustrates an implementation-independent design of the attributes of these entities:
+
+.. image:: auto-UC02-data2.jpg
+
+
+This next diagram shows the Python classes and attributes, as implemented by this Use Case (for all test cases):
+
+.. image:: auto-UC02-data3.jpg
+
+
+Test definition data is stored in serialization files (Python pickles), while test execution data is stored in CSV files, for easier post-analysis.
+
+The module design is straightforward: functions and classes for managing data, for interfacing with recipients,
+for executing tests, and for interacting with the test user (choosing a Test Definition, showing the details of
+a Test Definition, starting the execution).
+
+.. image:: auto-UC02-module1.jpg
+
+
+This last diagram shows the test user menu functions, when used interactively:
+
+.. image:: auto-UC02-module2.jpg
+
+
+In future releases of Auto, testing environments such as Robot, FuncTest and Yardstick might be leveraged. Use Case code will then be invoked by API, not by a CLI interaction.
+
+Also, anonymized test results could be collected from users willing to share them, and aggregates could be
+maintained as benchmarks.
+
+As further illustration, the next figure shows cardinalities of class instances: one Test Definition per Test Case,
+multiple Test Executions per Test Definition, zero or one Recovery Time Metric Value per Test Execution (zero if
+the test failed for any reason, including if ONAP failed to recover the challenge), etc.
+
+.. image:: auto-UC02-cardinalities.png
+
+
+In this particular implementation, both Test Definition and Challenge Definition classes have a generic execution method
+(e.g., ``run_test_code()`` for Test Definition) which can invoke a particular script, by way of an ID (which can be
+configured, and serves as a script selector for each Test Definition instance). The overall test execution logic
+between classes is show in the next figure.
+
+.. image:: auto-UC02-logic.png
+
+The execution of a test case starts with invoking the generic method from Test Definition, which then creates Execution
+instances, invokes Challenge Definition methods, performs the Recovery time calculation, performs script-specific
+actions, and writes results to the CSV files.
+
+Finally, the following diagram show a mapping between these class instances and the initial test case design. It
+corresponds to the test case which simulates a VM failure, and shows how the OpenStack SDK API is invoked (with
+a connection object) by the Challenge Definition methods, to suspend and resume a VM.
+
+.. image:: auto-UC02-TC-mapping.png
+
diff --git a/docs/release/userguide/UC03-feature.userguide.rst b/docs/release/userguide/UC03-feature.userguide.rst
new file mode 100644
index 0000000..2c0d9e7
--- /dev/null
+++ b/docs/release/userguide/UC03-feature.userguide.rst
@@ -0,0 +1,112 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. SPDX-License-Identifier CC-BY-4.0
+.. (c) Open Platform for NFV Project, Inc. and its contributors
+
+
+===========================================
+Auto User Guide: Use Case 3 Enterprise vCPE
+===========================================
+
+This document provides the user guide for Fraser release of Auto,
+specifically for Use Case 3: Enterprise vCPE.
+
+
+Description
+===========
+
+This Use Case shows how ONAP can help ensure that virtual CPEs (including vFW: virtual firewalls) in Edge Cloud are enterprise-grade.
+Other vCPE examples: vAAA, vDHCP, vDNS, vGW, vBNG, vRouter, ...
+
+ONAP operations include a verification process for VNF onboarding (i.e., inclusion in the ONAP catalog), with multiple
+Roles (Designer, Tester, Governor, Operator), responsible for approving proposed VNFs (as VSPs (Vendor Software Products),
+and eventually as end-to-end Services).
+
+This process guarantees a minimum level of quality of onboarded VNFs. If all deployed vCPEs are only chosen from such an
+approved ONAP catalog, the resulting deployed end-to-end vCPE services will meet enterprise-grade requirements. ONAP
+provides a NBI (currently HTTP-based) in addition to a standard GUI portal, thus enabling a programmatic deployment of
+VNFs, still conforming to ONAP processes.
+
+Moreover, ONAP also comprises real-time monitoring (by the DCAE component), which can perform the following functions:
+
+* monitor VNF performance for SLAs
+* adjust allocated resources accordingly (elastic adjustment at VNF level: scaling out and in, possibly also scaling up and down)
+* ensure High Availability (restoration of failed or underperforming services)
+
+DCAE executes directives coming from policies described in the Policy Framework, and closed-loop controls described in the CLAMP component.
+
+ONAP can perform the provisioning side of a BSS Order Management application handling vCPE orders.
+
+Additional processing can be added to ONAP (internally as configured policies and closed-loop controls, or externally as
+separate systems): Path Computation Element and Load Balancing, and even telemetry-based Network Artificial Intelligence.
+
+Finally, this automated approach also reduces costs, since repetitive actions are designed once and executed multiple times,
+as vCPEs are instantiated and decommissioned (frequent events, given the variability of business activity, and a Small
+Business market similar to the Residential market: many contract updates resulting in many vCPE changes).
+
+NFV edge service providers need to provide site2site, site2dc (Data Center) and site2internet services to tenants both efficiently and safely, by deploying such qualified enterprise-grade vCPE.
+
+
+Preconditions:
+
+#. hardware environment in which Edge cloud may be deployed
+#. an Edge cloud has been deployed and is ready for operation
+#. enterprise edge devices, such as ThinCPE, have access to the Edge cloud with WAN interfaces
+#. ONAP components (MSO, SDN-C, APP-C and VNFM) have been deployed onto a cloud and are interfaced (i.e. provisioned for API access) to the Edge cloud
+
+
+Main Success Scenarios:
+
+* VNF spin-up
+
+ * vFW spin-up: MSO calls the VNFM to spin up a vFW instance from the catalog and then updates the active VNF list
+ * other vCPEs spin-up: MSO calls the VNFM to spin up a vCPE instance from the catalog and then updates the active VNF list
+
+* site2site
+
+ * L3VPN service subscribing: MSO calls the SDNC to create VXLAN tunnels to carry L2 traffic between client's ThinCPE and SP's vCPE, and enables vCPE to route between different sites.
+ * L3VPN service unsubscribing: MSO calls the SDNC to destroy tunnels and routes, thus disable traffic between different sites.
+
+* site2dc (site to Data Center) by VPN
+* site2internet
+* scaling control (start with scaling out/in)
+
+See `ONAP description of vCPE use case <https://wiki.onap.org/display/DW/Use+Case+proposal%3A+Enterprise+vCPE>`_ for more details, including MSCs.
+
+
+Details on the test cases corresponding to this use case:
+
+* vCPE VNF deployment
+
+ * Spin up a vFW instance by calling NBI of the orchestrator.
+ * Following the vFW example and pattern, spin up other vCPE instances
+
+* vCPE VNF networking
+
+ * Subscribe/Unsubscribe to a VPN service: configure tenant/subscriber for vCPE, configure VPN service
+ * Subscribe/Unsubscribe to an Internet Access service: configure tenant/subscriber for vCPE, configure Internet Access service
+
+* vCPE VNF Scaling
+
+ * ONAP-based VNF Scale-out and Scale-in (using measurements arriving in DCAE, policies/CLAMP or external system performing LB function)
+ * later, possibly also scale-up and scale-down
+
+
+
+The following diagram shows these test cases:
+
+.. image:: auto-UC03-TestCases.png
+
+
+Illustration of test cases mapped to architecture, with possible external systems (BSS for Order Management, PCE+LB, Network AI:
+
+.. image:: auto-UC03-TC-archit.png
+
+
+
+
+Test execution high-level description
+=====================================
+
+<TBC>
+
diff --git a/docs/release/userguide/auto-UC02-TC-mapping.png b/docs/release/userguide/auto-UC02-TC-mapping.png
new file mode 100644
index 0000000..c2dd0db
--- /dev/null
+++ b/docs/release/userguide/auto-UC02-TC-mapping.png
Binary files differ
diff --git a/docs/release/userguide/auto-UC02-cardinalities.png b/docs/release/userguide/auto-UC02-cardinalities.png
new file mode 100644
index 0000000..10dd3b0
--- /dev/null
+++ b/docs/release/userguide/auto-UC02-cardinalities.png
Binary files differ
diff --git a/docs/release/userguide/auto-UC02-control-loop-flow.png b/docs/release/userguide/auto-UC02-control-loop-flow.png
new file mode 100644
index 0000000..b234ece
--- /dev/null
+++ b/docs/release/userguide/auto-UC02-control-loop-flow.png
Binary files differ
diff --git a/docs/release/userguide/auto-UC02-data1.jpg b/docs/release/userguide/auto-UC02-data1.jpg
new file mode 100644
index 0000000..62526c5
--- /dev/null
+++ b/docs/release/userguide/auto-UC02-data1.jpg
Binary files differ
diff --git a/docs/release/userguide/auto-UC02-data2.jpg b/docs/release/userguide/auto-UC02-data2.jpg
new file mode 100644
index 0000000..df73a94
--- /dev/null
+++ b/docs/release/userguide/auto-UC02-data2.jpg
Binary files differ
diff --git a/docs/release/userguide/auto-UC02-data3.jpg b/docs/release/userguide/auto-UC02-data3.jpg
new file mode 100644
index 0000000..3f84a20
--- /dev/null
+++ b/docs/release/userguide/auto-UC02-data3.jpg
Binary files differ
diff --git a/docs/release/userguide/auto-UC02-logic.png b/docs/release/userguide/auto-UC02-logic.png
new file mode 100644
index 0000000..90b41dd
--- /dev/null
+++ b/docs/release/userguide/auto-UC02-logic.png
Binary files differ
diff --git a/docs/release/userguide/auto-UC02-module1.jpg b/docs/release/userguide/auto-UC02-module1.jpg
new file mode 100644
index 0000000..184ab95
--- /dev/null
+++ b/docs/release/userguide/auto-UC02-module1.jpg
Binary files differ
diff --git a/docs/release/userguide/auto-UC02-module2.jpg b/docs/release/userguide/auto-UC02-module2.jpg
new file mode 100644
index 0000000..b95f42d
--- /dev/null
+++ b/docs/release/userguide/auto-UC02-module2.jpg
Binary files differ
diff --git a/docs/release/userguide/auto-UC02-pattern.jpg b/docs/release/userguide/auto-UC02-pattern.jpg
new file mode 100644
index 0000000..b2c9dee
--- /dev/null
+++ b/docs/release/userguide/auto-UC02-pattern.jpg
Binary files differ
diff --git a/docs/release/userguide/auto-UC02-preparation.jpg b/docs/release/userguide/auto-UC02-preparation.jpg
new file mode 100644
index 0000000..e2c0ba5
--- /dev/null
+++ b/docs/release/userguide/auto-UC02-preparation.jpg
Binary files differ
diff --git a/docs/release/userguide/auto-UC02-testcases.jpg b/docs/release/userguide/auto-UC02-testcases.jpg
new file mode 100644
index 0000000..ccb676f
--- /dev/null
+++ b/docs/release/userguide/auto-UC02-testcases.jpg
Binary files differ
diff --git a/docs/release/userguide/auto-UC03-TC-archit.png b/docs/release/userguide/auto-UC03-TC-archit.png
new file mode 100644
index 0000000..95d641b
--- /dev/null
+++ b/docs/release/userguide/auto-UC03-TC-archit.png
Binary files differ
diff --git a/docs/release/userguide/auto-UC03-TestCases.png b/docs/release/userguide/auto-UC03-TestCases.png
new file mode 100644
index 0000000..bb84a57
--- /dev/null
+++ b/docs/release/userguide/auto-UC03-TestCases.png
Binary files differ
diff --git a/docs/release/userguide/index.rst b/docs/release/userguide/index.rst
new file mode 100644
index 0000000..099622c
--- /dev/null
+++ b/docs/release/userguide/index.rst
@@ -0,0 +1,26 @@
+.. _auto-userguide:
+
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) Open Platform for NFV Project, Inc. and its contributors
+
+============================================
+OPNFV Auto (ONAP-Automated OPNFV) User Guide
+============================================
+
+.. The feature user guide should provide an OPNFV user with enough information to
+.. use the features provided by the feature project in the supported scenarios.
+.. This guide should walk a user through the usage of the features once a scenario
+.. has been deployed and is active according to the installation guide provided
+.. by the installer project.
+
+.. toctree::
+ :maxdepth: 3
+
+ UC01-feature.userguide.rst
+ UC02-feature.userguide.rst
+ UC03-feature.userguide.rst
+
+.. The feature.userguide.rst files should contain the text for this document
+.. additional documents can be added to this directory and added in the right order
+.. to this file as a list below.
diff --git a/docs/requirements.txt b/docs/requirements.txt
new file mode 100644
index 0000000..9fde2df
--- /dev/null
+++ b/docs/requirements.txt
@@ -0,0 +1,2 @@
+lfdocs-conf
+sphinx_opnfv_theme
diff --git a/lib/auto/__init__.py b/lib/auto/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/auto/__init__.py
diff --git a/lib/auto/testcase/EdgeCloud/AutoOSPlatCheck.py b/lib/auto/testcase/EdgeCloud/AutoOSPlatCheck.py
new file mode 100644
index 0000000..5a19109
--- /dev/null
+++ b/lib/auto/testcase/EdgeCloud/AutoOSPlatCheck.py
@@ -0,0 +1,164 @@
+# !/usr/bin/python
+#
+# Copyright (c) 2018 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+import os
+import re
+
+import logging
+import json
+import requests
+
+logger = logging.getLogger(__name__)
+
+class OS_env_check:
+ """Edge Clould Basic Env Function definition"""
+
+
+ def __init__(self):
+ """Variable Intitialization"""
+ self.osver = "v2.0"
+ self.imagever = "v2"
+ self.keystone_ver = 'v3'
+ self.tacker_ver = 'v1.0'
+
+ def ping_os_endpoints(self):
+ "Simple ping check to OpenStack endpoint"
+
+ os_auth_url = os.environ.get('OS_AUTH_URL', None)
+ password = os.environ.get('OS_PASSWORD', None)
+ if os_auth_url is None:
+ logger.error("Source the OpenStack credentials first")
+ exit(0)
+ try:
+ if os_auth_url:
+ endpoint_ip = re.search(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}', os_auth_url).group()
+ response = os.system("ping -c 1 " + endpoint_ip + ">/dev/null")
+ if response == 0:
+ return 0
+ else:
+ logger.error("Cannot talk to the OpenStack endpoint %s\n" % endpoint_ip)
+ exit(0)
+ except Exception:
+ logger.exception('Errors when verifying connectivity to %s', endpoint_ip)
+ return False
+
+ def fetch_token(self):
+ "Fetch OS_AUTH_TOKEN from OpenStack Service"
+
+ #(e.g.)url = 'http://10.164.16.100:5000/identity/v3/auth/tokens'
+ url = 'http://'+self.endpoint_ip+':5000/'+self.keystone_ver+'/auth/tokens'
+ data = '{"auth":{"identity":{"methods":["password"],"password":{"user":' \
+ '{"domain":{"name":"default"},"name":"admin",' \
+ '"password":"admin"}}},"scope":{"project":' \
+ '{"domain":{"name":"default"},"name":"admin"}}}}'
+ headers = {"Accept": "application/json"}
+ try:
+ response = requests.post(url, headers=headers, data=data)
+ header_data = (response.headers)
+ token = header_data['X-Subject-Token']
+ response_body = response.content
+ except Exception:
+ logger.error(" Failure: Not able to send API request for creating token")
+ if (response.status_code == 201):
+ response_body = response.content.decode('utf-8')
+ res = json.loads(response_body)
+ admin_user_id= res['token']['user']['id']
+ return response.status_code,token
+
+ def check_os_running_services(self):
+ "Get active/running OpenStack Services"
+
+ url = 'http://' + self.endpoint_ip + ':5000/' + self.keystone_ver + '/auth/tokens'
+ data = '{"auth": {"identity": {"methods": ["password"],"password": \
+ {"user": {"domain": {"name": "default"},"name": "admin",\
+ "password": "admin"}}},\
+ "scope": {"project": {"domain": {"name": "default"},"name": "admin"}}}}'
+ headers = {"Accept": "application/json"}
+ response = requests.post(url, headers=headers, data=data)
+ service = []
+ url_ep = []
+ if (response.status_code == 201):
+ response_body = response.content.decode('utf-8')
+ res = json.loads(response_body)
+ catalogs = res['token']['catalog']
+ for x in catalogs:
+ services = x['name']
+ if x['name'] is not None:
+ service.append(x['name'])
+ endpoints = x['endpoints']
+ for y in endpoints:
+ url = y['url']
+ if y['url'] not in url_ep:
+ url_ep.append(url)
+ return response.status_code,service,url_ep
+
+ def check_nova_service(self, endpoints, token):
+ """ checks that a simple nova operation works """
+
+ try:
+ nova_url = endpoints.get('nova')
+ url = nova_url+ '/servers/detail'
+ headers = {"Content-Type": "application/json", "X-Auth-Token": token}
+ response = requests.get(url, headers=headers)
+ if (response.status_code == 200):
+ logger.info("Nova service is Active")
+ except Exception as error:
+ logger.error("Nova service is FAILED")
+ raise error
+ return response.status_code
+
+ def check_neutron_service (self, endpoints, token):
+ """ checks that a simple neutron operation works """
+
+ try:
+ neutron_url = endpoints.get('neutron')
+ url = neutron_url +self.osver+'/networks'
+ headers = {"Content-Type": "application/json", "X-Auth-Token": token}
+ response = requests.get(url, headers=headers)
+ if (response.status_code == 200):
+ logger.info("Neutron service is Active")
+ except Exception as error:
+ logger.error("Neutron service is FAILED")
+ raise error
+ return response.status_code
+
+ def check_glance_service(self, endpoints, token):
+ """ checks that a simple glance operation works """
+
+ try:
+ glance_url = endpoints.get('glance')
+ url = glance_url + '/' + self.imagever + '/images'
+ headers = {"Content-Type": "application/json", "X-Auth-Token": token}
+ response = requests.get(url, headers=headers)
+ if (response.status_code == 200):
+ logger.info("Glance:Image service is Active")
+ except Exception as error:
+ logger.error("Glance:Image service is FAILED")
+ raise error
+ return response.status_code
+
+ def check_tacker_service(self, endpoints, token):
+ """ checks that a simple tacker operation works """
+
+ try:
+ if 'tacker' in endpoints.keys():
+ logger.info("Tacker VNF Manager service is running")
+ else:
+ logger.error("No Tacker VNF Manager service is running")
+ return (0)
+ tacker_url = endpoints.get('tacker')
+ url = tacker_url + '/' + self.tacker_ver + '/vnf.json'
+ headers = {"Content-Type": "application/json", "X-Auth-Token": token}
+ response = requests.get(url, headers=headers)
+ if (response.status_code == 200):
+ logger.info("Tacker:VNF Manager has active VNFs")
+ except Exception as error:
+ logger.error("Tacker:No Active VNFs")
+ raise error
+ return response.status_code
diff --git a/lib/auto/testcase/EdgeCloud/AutoOSPlatTest.py b/lib/auto/testcase/EdgeCloud/AutoOSPlatTest.py
new file mode 100644
index 0000000..ef99ce5
--- /dev/null
+++ b/lib/auto/testcase/EdgeCloud/AutoOSPlatTest.py
@@ -0,0 +1,80 @@
+"""Script to Test the AUTO Edge Cloud OpenStack Services."""
+# !/usr/bin/python
+#
+# Copyright (c) 2018 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#fetch_token
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# ###########################################################################
+# OPNFV AUTO Edge Cloud Script
+# **** Scripted by Mohankumar Navaneethan - mnavaneethan@mvista.com ******
+# ###########################################################################
+
+# Testcase 1 : Ping OpenStack Endpoints
+# Testcase 2 : Creation of Auth-Token
+# TestCase 3 : Check OpenStack Active Services
+# TestCase 4 : Check OpenStack Nova Service
+# TestCase 5 : Check OpenStack Neutron Service
+# TestCase 6 : Check OpenStack Glance Service
+# TestCase 7 : Check OpenStack Tacker Service.
+# ###########################################################################
+#
+import logging
+from AutoOSPlatCheck import OS_env_check
+
+
+class Env_check:
+ """Script to Test AUTO Edge Cloud OpenStack Services."""
+ logger = logging.getLogger(__name__)
+ Env_obj = OS_env_check()
+ print("################################################################")
+ print(" OPNFV AUTO Script ")
+ print("################################################################")
+ logger.info("Prerequisites OpenStack configuration for AUTO")
+ #########################################################################
+ logger.info("\t1. Ping OpenStack Endpoints")
+ if (Env_obj.ping_endpoints == 0):
+ logger.info("\t\tPing to OpenStack Endpoint is successfull")
+ else:
+ logger.error("\t\tPing to OpenStack Endpoint is NOT successfull")
+
+ logger.info("\t2. Creation of Auth-Token")
+ response_code , token = Env_obj.fetch_token()
+ if (response_code == 201):
+ logger.info("\t\tCreation of Token is successfull")
+ else:
+ logger.error("\t\t : Creation of Token is NOT successfull")
+ logger.info("\t3. Check OpenStack Active Services")
+ status, services, endpoint = Env_obj.check_os_running_services()
+ endpoints = dict(zip(services, endpoint))
+ if (status == 201):
+ logger.info("\t\tCheck OpenStack Active Services is successfull")
+ else:
+ logger.error("\t\tCheck OpenStack Active Services is NOT successfull")
+
+ logger.info("\t4. Check OpenStack Nova Service")
+ if (Env_obj.check_nova_service(endpoints, token) == 200):
+ logger.info("\t\tNova service is responsive")
+ else:
+ logger.error("\t\tNova service is NOT responsive")
+
+ logger.info("\t5. Check OpenStack Neutron Service")
+ if (Env_obj.check_neutron_service(endpoints, token) == 200):
+ logger.info("\t\tNeutron service is responsive")
+ else:
+ logger.error("\t\tNeutron service is NOT responsive")
+
+ logger.info("\t6. Check OpenStack Glance Service")
+ if (Env_obj.check_glance_service(endpoints, token) == 200):
+ logger.info("\t\tGlance service is responsive")
+ else:
+ logger.error("\t\tGlance service is NOT responsive")
+
+ logger.info("\t7. Check OpenStack Tacker Service")
+ if (Env_obj.check_glance_service(endpoints, token) == 200):
+ logger.info("\t\tTacker VNF Manager service is responsive")
+ else:
+ logger.error("\t\tTacker VNF Manager is NOT responsive")
diff --git a/lib/auto/testcase/EdgeCloud/__init__.py b/lib/auto/testcase/EdgeCloud/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/auto/testcase/EdgeCloud/__init__.py
diff --git a/lib/auto/testcase/resiliency/AutoResilGlobal.py b/lib/auto/testcase/resiliency/AutoResilGlobal.py
new file mode 100644
index 0000000..1a59f4b
--- /dev/null
+++ b/lib/auto/testcase/resiliency/AutoResilGlobal.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python3
+
+# ===============LICENSE_START=======================================================
+# Apache-2.0
+# ===================================================================================
+# Copyright (C) 2018 Wipro. All rights reserved.
+# ===================================================================================
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ===============LICENSE_END=========================================================
+
+
+# OPNFV Auto project
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+# Use case 02: Resilience Improvements
+# Use Case description: https://wiki.opnfv.org/display/AUTO/Auto+Use+Cases
+# Test case design: https://wiki.opnfv.org/display/AUTO/Use+case+2+%28Resilience+Improvements+through+ONAP%29+analysis
+
+# This module: global variables (list of definition data)
+
+#docstring
+"""This module contains global variable for OPNFV Auto Test Data for Use Case 2: Resilience Improvements Through ONAP.
+Auto project: https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+"""
+
+
+######################################################################
+# import statements
+
+
+# global variables
+test_case_list = None
+test_definition_list = None
+recipient_list = None
+challenge_definition_list = None
+metric_definition_list = None
+physical_resource_list = None
+cloud_virtual_resource_list = None
+VNF_Service_list = None
+
diff --git a/lib/auto/testcase/resiliency/AutoResilItfCloud.py b/lib/auto/testcase/resiliency/AutoResilItfCloud.py
new file mode 100644
index 0000000..7feb518
--- /dev/null
+++ b/lib/auto/testcase/resiliency/AutoResilItfCloud.py
@@ -0,0 +1,279 @@
+#!/usr/bin/env python3
+
+# ===============LICENSE_START=======================================================
+# Apache-2.0
+# ===================================================================================
+# Copyright (C) 2018 Wipro. All rights reserved.
+# ===================================================================================
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ===============LICENSE_END=========================================================
+
+
+# OPNFV Auto project
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+# Use case 02: Resilience Improvements
+# Use Case description: https://wiki.opnfv.org/display/AUTO/Auto+Use+Cases
+# Test case design: https://wiki.opnfv.org/display/AUTO/Use+case+2+%28Resilience+Improvements+through+ONAP%29+analysis
+
+# This module: interfaces with cloud managers (OpenStack, Kubernetes, AWS, ...)
+
+
+######################################################################
+# import statements
+import AutoResilGlobal
+import time
+
+# for method 1 and 2
+import openstack
+
+#for method 3
+#from openstack import connection
+
+def openstack_list_servers(conn):
+ """List OpenStack servers."""
+ # see https://docs.openstack.org/python-openstacksdk/latest/user/proxies/compute.html
+ if conn != None:
+ print("\nList Servers:")
+
+ try:
+ i=1
+ for server in conn.compute.servers():
+ print('Server',str(i))
+ print(' Name:',server.name)
+ print(' ID:',server.id)
+ print(' key:',server.key_name)
+ print(' status:',server.status)
+ print(' AZ:',server.availability_zone)
+ print('Details:\n',server)
+ i+=1
+ except Exception as e:
+ print("Exception:",type(e), e)
+ print("No Servers\n")
+
+
+def openstack_list_networks(conn):
+ """List OpenStack networks."""
+ # see https://docs.openstack.org/python-openstacksdk/latest/user/proxies/network.html
+ if conn != None:
+ print("\nList Networks:")
+
+ try:
+ i=1
+ for network in conn.network.networks():
+ print('Network',str(i),'\n',network,'\n')
+ i+=1
+ except Exception as e:
+ print("Exception:",type(e), e)
+ print("No Networks\n")
+
+
+def openstack_list_volumes(conn):
+ """List OpenStack volumes."""
+ # see https://docs.openstack.org/python-openstacksdk/latest/user/proxies/block_storage.html
+ # note: The block_storage member will only be added if the service is detected.
+ if conn != None:
+ print("\nList Volumes:")
+
+ try:
+ i=1
+ for volume in conn.block_storage.volumes():
+ print('Volume',str(i))
+ print(' Name:',volume.name)
+ print(' ID:',volume.id)
+ print(' size:',volume.size)
+ print(' status:',volume.status)
+ print(' AZ:',volume.availability_zone)
+ print('Details:\n',volume)
+ i+=1
+ except Exception as e:
+ print("Exception:",type(e), e)
+ print("No Volumes\n")
+
+
+def openstack_list_users(conn):
+ """List OpenStack users."""
+ # see https://docs.openstack.org/python-openstacksdk/latest/user/guides/identity.html
+ if conn != None:
+ print("\nList Users:")
+
+ try:
+ i=1
+ for user in conn.identity.users():
+ print('User',str(i),'\n',user,'\n')
+ i+=1
+ except Exception as e:
+ print("Exception:",type(e), e)
+ print("No Users\n")
+
+def openstack_list_projects(conn):
+ """List OpenStack projects."""
+ # see https://docs.openstack.org/python-openstacksdk/latest/user/guides/identity.html
+ if conn != None:
+ print("\nList Projects:")
+
+ try:
+ i=1
+ for project in conn.identity.projects():
+ print('Project',str(i),'\n',project,'\n')
+ i+=1
+ except Exception as e:
+ print("Exception:",type(e), e)
+ print("No Projects\n")
+
+
+def openstack_list_domains(conn):
+ """List OpenStack domains."""
+ # see https://docs.openstack.org/python-openstacksdk/latest/user/guides/identity.html
+ if conn != None:
+ print("\nList Domains:")
+
+ try:
+ i=1
+ for domain in conn.identity.domains():
+ print('Domain',str(i),'\n',domain,'\n')
+ i+=1
+ except Exception as e:
+ print("Exception:",type(e), e)
+ print("No Domains\n")
+
+
+
+
+
+
+
+def gdtest_openstack():
+
+ # Method 1 (preferred) : assume there is a clouds.yaml file in PATH, starting path search with local directory
+ #conn = openstack.connect(cloud='armopenstack', region_name='RegionOne')
+ #conn = openstack.connect(cloud='hpe16openstackEuphrates', region_name='RegionOne')
+ #conn = openstack.connect(cloud='hpe16openstackFraser', region_name='RegionOne')
+ conn = openstack.connect(cloud='unh-hpe-openstack-fraser', region_name='RegionOne')
+ # if getting error: AttributeError: module 'openstack' has no attribute 'connect', check that openstack is installed for this python version
+
+
+ # Method 2: pass arguments directly, all as strings
+ # see details at https://docs.openstack.org/python-openstacksdk/latest/user/connection.html
+ # conn = openstack.connect(
+ # auth_url='https://10.10.50.103:5000/v2.0',
+ # project_name='admin',
+ # username='admin',
+ # password='opnfv_secret',
+ # region_name='RegionOne',
+ # )
+ # conn = openstack.connect(
+ # auth_url='http://10.16.0.101:5000/v2.0',
+ # project_name='admin',
+ # username='admin',
+ # password='opnfv_secret',
+ # region_name='RegionOne',
+ # )
+ # if getting error: AttributeError: module 'openstack' has no attribute 'connect', check that openstack is installed for this python version
+
+
+ # Method 3: create Connection object directly
+ # auth_args = {
+ # #'auth_url': 'https://10.10.50.103:5000/v2.0', # Arm
+ # #'auth_url': 'http://10.16.0.101:5000/v2.0', # hpe16, Euphrates
+ # 'auth_url': 'http://10.16.0.107:5000/v3', # hpe16, Fraser
+ # 'project_name': 'admin',
+ # 'username': 'admin',
+ # 'password': 'opnfv_secret',
+ # 'region_name': 'RegionOne',
+ # 'domain': 'Default'}
+ # conn = connection.Connection(**auth_args)
+
+ #conn = connection.Connection(
+ #auth_url='http://10.16.0.107:5000/v3',
+ #project_name='admin',
+ #username='admin',
+ #password='opnfv_secret')
+
+
+ openstack_list_servers(conn)
+ openstack_list_networks(conn)
+ openstack_list_volumes(conn)
+ openstack_list_users(conn)
+ openstack_list_projects(conn)
+ openstack_list_domains(conn)
+
+ # VM test: create a test VM in the OpenStack instance, enter its ID here
+ gds_ID = '5d07da11-0e85-4256-9894-482dcee4a5f0'
+ gds = conn.compute.get_server(gds_ID)
+ print('\ngds.name=',gds.name)
+ print('gds.status=',gds.status)
+ print('suspending...')
+ conn.compute.suspend_server(gds_ID) # NOT synchronous: returns before suspension action is completed
+ wait_seconds = 10
+ print(' waiting',wait_seconds,'seconds...')
+ time.sleep(wait_seconds)
+ gds = conn.compute.get_server(gds_ID) # need to refresh data; not maintained live
+ print('gds.status=',gds.status)
+ print('resuming...')
+ conn.compute.resume_server(gds_ID)
+ print(' waiting',wait_seconds,'seconds...')
+ time.sleep(wait_seconds)
+ gds = conn.compute.get_server(gds_ID) # need to refresh data; not maintained live
+ print('gds.status=',gds.status)
+
+
+
+ #Volume test: volume attached to test VM; get its ID and enter it here
+ gdv_ID = 'd0206ff2-507c-444a-9871-b5b7ea704994'
+ gdv = conn.block_storage.get_volume(gdv_ID)
+ # no API for stopping/restarting a volume... only delete. ONAP would have to completely migrate a VNF depending on this volume
+ print('\ngdv.name=',gdv.name)
+ print('gdv.status=',gdv.status)
+ #gdv_recreate = gdv
+ #print('deleting...')
+ #conn.block_storage.delete_volume(gdv_ID)
+ #conn.block_storage.delete_volume(gdv)
+ #print('recreating...')
+ #gdv = conn.block_storage.create_volume(<attributes saved in gdv_recreate>)
+
+
+ # get_server(server): Get a single Server
+ # Parameters: server – The value can be the ID of a server or a Server instance.
+ # conn.compute.get_server(server)
+
+ # suspend_server(server): Suspends a server and changes its status to SUSPENDED.
+ # Parameters: server – Either the ID of a server or a Server instance.
+ # conn.compute.suspend_server(server)
+
+ # resume_server(server): Resumes a suspended server and changes its status to ACTIVE.
+ # Parameters: server – Either the ID of a server or a Server instance.
+ # conn.compute.resume_server(server)
+
+
+def main():
+
+ print("\nTest Auto Cloud Interface")
+
+ gdtest_openstack()
+
+ print("\nCiao\n")
+
+if __name__ == "__main__":
+ main()
+
+
+# OpenStack HTTP API: https://developer.openstack.org/api-ref/compute/
+#{your_compute_service_url}/servers/{server_id}/action
+#GET
+#http://mycompute.pvt/compute/v2.1/servers/{server_id}/suspend
+#http://mycompute.pvt/compute/v2.1/servers/{server_id}/resume
+# but better use the python unified client
+
+
diff --git a/lib/auto/testcase/resiliency/AutoResilItfOS.py b/lib/auto/testcase/resiliency/AutoResilItfOS.py
new file mode 100644
index 0000000..5f792f6
--- /dev/null
+++ b/lib/auto/testcase/resiliency/AutoResilItfOS.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python3
+
+# ===============LICENSE_START=======================================================
+# Apache-2.0
+# ===================================================================================
+# Copyright (C) 2018 Wipro. All rights reserved.
+# ===================================================================================
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ===============LICENSE_END=========================================================
+
+
+# OPNFV Auto project
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+# Use case 02: Resilience Improvements
+# Use Case description: https://wiki.opnfv.org/display/AUTO/Auto+Use+Cases
+# Test case design: https://wiki.opnfv.org/display/AUTO/Use+case+2+%28Resilience+Improvements+through+ONAP%29+analysis
+
+# This module: interfaces with OS, or servers
+
+
+######################################################################
+# import statements
+import AutoResilGlobal
+
+
+def f1():
+ return 0
+
+
+
+
diff --git a/lib/auto/testcase/resiliency/AutoResilItfVNFMNFVO.py b/lib/auto/testcase/resiliency/AutoResilItfVNFMNFVO.py
new file mode 100644
index 0000000..494d0ab
--- /dev/null
+++ b/lib/auto/testcase/resiliency/AutoResilItfVNFMNFVO.py
@@ -0,0 +1,42 @@
+#!/usr/bin/env python3
+
+# ===============LICENSE_START=======================================================
+# Apache-2.0
+# ===================================================================================
+# Copyright (C) 2018 Wipro. All rights reserved.
+# ===================================================================================
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ===============LICENSE_END=========================================================
+
+
+# OPNFV Auto project
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+# Use case 02: Resilience Improvements
+# Use Case description: https://wiki.opnfv.org/display/AUTO/Auto+Use+Cases
+# Test case design: https://wiki.opnfv.org/display/AUTO/Use+case+2+%28Resilience+Improvements+through+ONAP%29+analysis
+
+# This module: interfaces with VNF/NVF managers (focus on ONAP)
+# entities that manage VNFs and orchestrates services (VNF-M and NFV-O)
+
+######################################################################
+# import statements
+import AutoResilGlobal
+
+def f1():
+ return 0
+
+
+
+
diff --git a/lib/auto/testcase/resiliency/AutoResilMain.py b/lib/auto/testcase/resiliency/AutoResilMain.py
new file mode 100644
index 0000000..1d21f6a
--- /dev/null
+++ b/lib/auto/testcase/resiliency/AutoResilMain.py
@@ -0,0 +1,187 @@
+#!/usr/bin/env python3
+
+# ===============LICENSE_START=======================================================
+# Apache-2.0
+# ===================================================================================
+# Copyright (C) 2018 Wipro. All rights reserved.
+# ===================================================================================
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ===============LICENSE_END=========================================================
+
+
+# OPNFV Auto project
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+# Use case 02: Resilience Improvements
+# Use Case description: https://wiki.opnfv.org/display/AUTO/Auto+Use+Cases
+# Test case design: https://wiki.opnfv.org/display/AUTO/Use+case+2+%28Resilience+Improvements+through+ONAP%29+analysis
+
+# This module: main program
+# data initialization
+# interactive CLI user menu:
+# 1) select a test definition to run
+# 2) view definition of selected test (pull all data from definition files)
+# 3) start test
+# 4) exit
+
+
+#docstring
+"""This is the main module for OPNFV Auto Test Data for Use Case 2: Resilience Improvements Through ONAP.
+Auto project: https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+"""
+
+
+
+
+######################################################################
+# import statements
+import AutoResilGlobal
+from AutoResilMgTestDef import *
+
+# Constants
+PROJECT_NAME = "Auto"
+USE_CASE_NAME = "Resilience Improvements Through ONAP"
+
+
+
+######################################################################
+
+def show_menu(selected_test_def_ID):
+ """Show menu, with a different first line based on current Test Definition selection."""
+
+ if selected_test_def_ID>0 :
+ print("\nCurrently selected test Definition ID: ",selected_test_def_ID)
+ else:
+ print("\nCurrently selected test Definition ID: (none)")
+ print("1: select Test Definition ID")
+ print("2: view currently selected Test Definition details")
+ print("3: start an execution of currently selected Test Definition")
+ print("4: exit")
+
+
+def get_menu_choice():
+ """Get a user input (a menu entry number)."""
+ while True:
+ try:
+ user_choice = int(input(" Choice: "))
+ except ValueError:
+ print(" Invalid choice (must be an integer). Try again.")
+ continue
+ if user_choice < 1 or user_choice > 4:
+ print(" Invalid choice (must be between 1 and 4). Try again.")
+ continue
+ else:
+ return user_choice
+
+
+def get_test_def_ID():
+ """Get a user input (a test definition ID)."""
+ while True:
+ try:
+ user_test_def_ID = int(input(" Test Definition ID: "))
+ except ValueError:
+ print(" Invalid choice (must be an integer). Try again.")
+ continue
+ if user_test_def_ID <1:
+ print(" Invalid choice (must be a positive integer). Try again.")
+ continue
+
+ test_defs = read_list_bin(FILE_TEST_DEFINITIONS)
+ if (test_defs == None) or (test_defs==[]):
+ print("Problem with test definition file: empty")
+ sys.exit() # stop entire program, because test definition file MUST be correct
+
+ if index_already_there(user_test_def_ID, test_defs):
+ return user_test_def_ID
+ else:
+ print("Invalid choice (Test Definition ID",user_test_def_ID,"does not exist). Try again.")
+ continue
+
+
+
+######################################################################
+def main():
+
+ print("\nProject:\t", PROJECT_NAME)
+ print("Use Case:\t",USE_CASE_NAME)
+
+
+ # Run initializations, to refresh data and make sure files are here. Also, this loads the lists in memory.
+ # For now, initialization functions are self-contained and hard-coded:
+ # all definition data is initialized from the code, not from user interaction.
+ AutoResilGlobal.test_case_list = init_test_cases()
+ AutoResilGlobal.test_definition_list = init_test_definitions()
+ AutoResilGlobal.recipient_list = init_recipients()
+ AutoResilGlobal.challenge_definition_list = init_challenge_definitions()
+ AutoResilGlobal.metric_definition_list = init_metric_definitions()
+
+ AutoResilGlobal.physical_resource_list = init_physical_resources()
+ AutoResilGlobal.cloud_virtual_resource_list = init_cloud_virtual_resources()
+ AutoResilGlobal.VNF_Service_list = init_VNFs_Services()
+
+
+ # start with no test definition selected
+ selected_test_def_ID = -1
+
+ # interactive menu loop
+ while True:
+
+ show_menu(selected_test_def_ID)
+ user_choice = get_menu_choice()
+ #print("***user_choice:",user_choice) #debug
+
+ if user_choice == 1: # select Test Definition ID
+ selected_test_def_ID = get_test_def_ID()
+ selected_test_def = get_indexed_item_from_list(selected_test_def_ID, AutoResilGlobal.test_definition_list)
+ continue
+
+ if user_choice == 2: # view currently selected Test Definition details
+ if selected_test_def_ID > 0:
+ if selected_test_def == None:
+ print("Problem with test definition: empty")
+ sys.exit() # stop entire program, because test definition MUST be correct
+ else:
+ selected_test_def.printout_all(0)
+ continue
+ else:
+ print("No current selection of Test Definition. Try again.")
+ continue
+
+ if user_choice == 3: # start an execution of currently selected Test Definition
+ if selected_test_def_ID > 0:
+ if selected_test_def == None:
+ print("Problem with test definition: empty")
+ sys.exit() # stop entire program, because test definition MUST be correct
+ else:
+ test_def = get_indexed_item_from_list(selected_test_def_ID, AutoResilGlobal.test_definition_list)
+ if test_def != None:
+ test_def.run_test_code()
+
+ else:
+ print("No current selection of Test Definition. Try again.")
+ continue
+
+ if user_choice == 4: # exit
+ print("\nEnd of Main Program")
+ print("\nProject:\t", PROJECT_NAME)
+ print("Use Case:\t",USE_CASE_NAME)
+ print("\nBye!\n")
+ sys.exit()
+
+
+
+
+if __name__ == "__main__":
+ main()
+
diff --git a/lib/auto/testcase/resiliency/AutoResilMgTestDef.py b/lib/auto/testcase/resiliency/AutoResilMgTestDef.py
new file mode 100644
index 0000000..edf899a
--- /dev/null
+++ b/lib/auto/testcase/resiliency/AutoResilMgTestDef.py
@@ -0,0 +1,1854 @@
+#!/usr/bin/env python3
+
+# ===============LICENSE_START=======================================================
+# Apache-2.0
+# ===================================================================================
+# Copyright (C) 2018 Wipro. All rights reserved.
+# ===================================================================================
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ===============LICENSE_END=========================================================
+
+
+# OPNFV Auto project
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+# Use case 02: Resilience Improvements
+# Use Case description: https://wiki.opnfv.org/display/AUTO/Auto+Use+Cases
+# Test case design: https://wiki.opnfv.org/display/AUTO/Use+case+2+%28Resilience+Improvements+through+ONAP%29+analysis
+
+# This module: management of test definitions
+
+# Functions and classes to manage and initialize test data relative to:
+# physical resources
+# cloud resources
+# VNFs
+# recipients (OS, cloud/VNF managers)
+# challenge definitions
+# optional metrics
+# test definitions
+# Storage of definition data in binary files (pickle), and test data results in .CSV files
+
+
+#docstring
+"""This module contains functions and classes to manage OPNFV Auto Test Data for Use Case 2: Resilience Improvements Through ONAP.
+Auto project: https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+"""
+
+
+######################################################################
+# import statements
+import pickle
+import csv
+import sys
+from enum import Enum
+from datetime import datetime, timedelta
+import AutoResilGlobal
+import openstack
+import time
+
+
+# Constants with definition file names
+FILE_PHYSICAL_RESOURCES = "ResourcesPhysical.bin"
+FILE_CLOUD_RESOURCES = "ResourcesCloud.bin"
+FILE_VNFS_SERVICES = "ResourcesVNFServices.bin"
+FILE_RECIPIENTS = "Recipients.bin"
+FILE_TEST_CASES = "TestCases.bin"
+FILE_METRIC_DEFINITIONS = "DefinitionsMetrics.bin"
+FILE_CHALLENGE_DEFINITIONS = "DefinitionsChallenges.bin"
+FILE_TEST_DEFINITIONS = "DefinitionsTests.bin"
+
+# Other constants
+INDENTATION_MULTIPLIER = 4
+
+
+######################################################################
+
+def read_list_bin(file_name):
+ """Generic function to extract a list from a binary file."""
+ try:
+ extracted_list = []
+ with open(file_name, "rb") as binary_file:
+ extracted_list = pickle.load(binary_file)
+ return extracted_list
+ except FileNotFoundError:
+ print("File not found: ",file_name)
+ except Exception as e:
+ print(type(e), e)
+ sys.exit()
+
+
+def write_list_bin(inserted_list, file_name):
+ """Generic function to write a list to a binary file (replace content)."""
+ try:
+ with open(file_name, "wb") as binary_file:
+ pickle.dump(inserted_list, binary_file)
+ except Exception as e:
+ print(type(e), e)
+ sys.exit()
+
+
+class AutoBaseObject:
+ """Base class for Auto project, with common attributes (ID, name)."""
+ def __init__ (self, param_ID, param_name):
+ self.ID = param_ID
+ self.name = param_name
+ # for display
+ def __repr__(self):
+ return ("ID="+str(self.ID)+" name="+self.name)
+ # for print
+ def __str__(self):
+ return ("ID="+str(self.ID)+" name="+self.name)
+
+
+def index_already_there(index, given_list):
+ """Generic function to check if an index already exists in a list of AutoBaseObject."""
+
+ # check if ID already exists
+ already_there = False
+ if len(given_list)>0:
+ for item in given_list:
+ if isinstance(item, AutoBaseObject):
+ if item.ID == index:
+ already_there = True
+ break
+ else:
+ print("Issue with list: item is not AutoBaseObject")
+ print(" index=\n",index)
+ sys.exit()
+ return already_there
+
+
+def get_indexed_item_from_list(index, given_list):
+ """Generic function to get an indexed entry from a list of AutoBaseObject."""
+
+ returned_item = None
+
+ if len(given_list)>0:
+ for item in given_list:
+ if isinstance(item, AutoBaseObject):
+ if item.ID == index:
+ returned_item = item
+ break
+ else:
+ print("Issue with list: item is not AutoBaseObject")
+ print(" index=\n",index)
+ sys.exit()
+ return returned_item
+
+
+def get_indexed_item_from_file(index, file_name):
+ """Generic function to get an indexed entry from a list of AutoBaseObject stored in a binary file."""
+
+ list_in_file = read_list_bin(file_name)
+ return get_indexed_item_from_list(index, list_in_file)
+
+
+
+######################################################################
+
+class TestCase(AutoBaseObject):
+ """Test Case class for Auto project."""
+ def __init__ (self, test_case_ID, test_case_name,
+ test_case_JIRA_URL):
+
+ # superclass constructor
+ AutoBaseObject.__init__(self, test_case_ID, test_case_name)
+
+ # specifics for this subclass
+
+ # Auto JIRA link
+ self.JIRA_URL = test_case_JIRA_URL
+
+ def printout_all(self, indent_level):
+ """Print out all attributes, with an indentation level."""
+ indent = " "*indent_level*INDENTATION_MULTIPLIER
+
+ print(indent, "Test Case ID:", self.ID, sep='')
+ print(indent, "|-name:", self.name, sep='')
+
+ print(indent, "|-JIRA URL:", self.JIRA_URL, sep='')
+
+
+# no need for functions to remove data: ever-growing library, arbitrary ID
+# initial version: should not even add data dynamically, in case object signature changes
+# better stick to initialization functions only to fill data, unless 100% sure signature does not change
+def add_test_case_to_file(test_case_ID, test_case_name, test_case_JIRA_URL):
+ """Function to add persistent data about test cases (in binary file)."""
+
+ test_cases = read_list_bin(FILE_TEST_CASES)
+
+ if index_already_there(test_case_ID, test_cases):
+ print("Test Case ID=",test_case_ID," is already defined and can't be added")
+ else:
+ test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))
+ write_list_bin(test_cases, FILE_TEST_CASES)
+
+ return test_cases
+
+
+
+def init_test_cases():
+ """Function to initialize test case data."""
+ test_cases = []
+
+ # add info to list in memory, one by one, following signature values
+ test_case_ID = 1
+ test_case_name = "auto-resiliency-pif-001"
+ test_case_JIRA_URL = "https://jira.opnfv.org/browse/AUTO-9"
+ test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))
+
+ test_case_ID = 2
+ test_case_name = "auto-resiliency-pif-002"
+ test_case_JIRA_URL = "https://jira.opnfv.org/browse/AUTO-10"
+ test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))
+
+ test_case_ID = 3
+ test_case_name = "auto-resiliency-pif-003"
+ test_case_JIRA_URL = "https://jira.opnfv.org/browse/AUTO-11"
+ test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))
+
+ test_case_ID = 4
+ test_case_name = "auto-resiliency-pif-004"
+ test_case_JIRA_URL = "https://jira.opnfv.org/browse/AUTO-12"
+ test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))
+
+ test_case_ID = 5
+ test_case_name = "auto-resiliency-vif-001"
+ test_case_JIRA_URL = "https://jira.opnfv.org/browse/AUTO-13"
+ test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))
+
+ test_case_ID = 6
+ test_case_name = "auto-resiliency-vif-002"
+ test_case_JIRA_URL = "https://jira.opnfv.org/browse/AUTO-14"
+ test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))
+
+ test_case_ID = 7
+ test_case_name = "auto-resiliency-vif-003"
+ test_case_JIRA_URL = "https://jira.opnfv.org/browse/AUTO-15"
+ test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))
+
+ test_case_ID = 8
+ test_case_name = "auto-resiliency-sec-001"
+ test_case_JIRA_URL = "https://jira.opnfv.org/browse/AUTO-16"
+ test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))
+
+ test_case_ID = 9
+ test_case_name = "auto-resiliency-sec-002"
+ test_case_JIRA_URL = "https://jira.opnfv.org/browse/AUTO-17"
+ test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))
+
+ test_case_ID = 10
+ test_case_name = "auto-resiliency-sec-003"
+ test_case_JIRA_URL = "https://jira.opnfv.org/browse/AUTO-18"
+ test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))
+
+ # write list to binary file
+ write_list_bin(test_cases, FILE_TEST_CASES)
+
+ return test_cases
+
+
+######################################################################
+
+class TestDefinition(AutoBaseObject):
+ """Test Definition class for Auto project."""
+ def __init__ (self, test_def_ID, test_def_name,
+ test_def_challengeDefID,
+ test_def_testCaseID,
+ test_def_VNFIDs,
+ test_def_associatedMetricsIDs,
+ test_def_recipientIDs,
+ test_def_testCLICommandSent,
+ test_def_testAPICommandSent,
+ test_def_codeID):
+
+ # superclass constructor
+ AutoBaseObject.__init__(self, test_def_ID, test_def_name)
+
+ # specifics for this subclass
+
+ # associated Challenge Definition (ID)
+ self.challenge_def_ID = test_def_challengeDefID
+ # associated Test Case (ID)
+ self.test_case_ID = test_def_testCaseID
+ # associated VNFs (list of IDs)
+ self.VNF_ID_list = test_def_VNFIDs
+ # associated Metrics (list of IDs)
+ self.associated_metrics_ID_list = test_def_associatedMetricsIDs
+ # associated Recipients (list of IDs)
+ self.recipient_ID_list = test_def_recipientIDs
+ # associated test CLI commands to Recipients (list of strings)
+ self.test_CLI_command_sent_list = test_def_testCLICommandSent
+ # associated test API commands to Recipients (list of data objects)
+ self.test_API_command_sent_list = test_def_testAPICommandSent
+
+ # constant for total number of test codes (one of them is used per TestDefinition instance); would be 1 per test case
+ self.TOTAL_NUMBER_OF_TEST_CODES = 10
+ # chosen test code ID (the ID is an index in a list of method names) for this instance; convention: [1;N]; in list, index is [0;N-1]
+ # a test code could use for instance Python clients (for OpenStack, Kubernetes, etc.), or HTTP APIs, or some of the CLI/API commands
+ try:
+ if 1 <= test_def_codeID <= self.TOTAL_NUMBER_OF_TEST_CODES:
+ self.test_code_ID = test_def_codeID
+ else:
+ print("TestDefinition constructor: incorrect test_def_codeID=",test_def_codeID)
+ sys.exit() # stop entire program, because code ID MUST be correct
+ except Exception as e:
+ print(type(e), e)
+ sys.exit() # stop entire program, because code ID MUST be correct
+
+ self.test_code_list = [] # list of method names; leave as per-object method (i.e. not as class methods or as static methods)
+ # add one by one, for easier later additions of new methods
+ self.test_code_list.append(self.test_code001)
+ self.test_code_list.append(self.test_code002)
+ self.test_code_list.append(self.test_code003)
+ self.test_code_list.append(self.test_code004)
+ self.test_code_list.append(self.test_code005)
+ self.test_code_list.append(self.test_code006)
+ self.test_code_list.append(self.test_code007)
+ self.test_code_list.append(self.test_code008)
+ self.test_code_list.append(self.test_code009)
+ self.test_code_list.append(self.test_code010)
+
+
+ def run_test_code(self, *test_code_args, **test_code_kwargs):
+ """Run currently selected test code. Common code runs here, specific code is invoked through test_code_list and test_code_ID.
+ Optional parameters can be passed if needed (unnamed or named), interpreted accordingly by selected test code."""
+ try:
+ # here, trigger start code from challenge def (to simulate VM failure), manage Recovery time measurement,
+ # specific monitoring of VNF, trigger stop code from challenge def
+
+ time1 = datetime.now() # get time as soon as execution starts
+
+ # create challenge execution instance
+ chall_exec_ID = 1 # ideally, would be incremented, but need to maintain a number of challenge executions somewhere. or could be random.
+ chall_exec_name = 'challenge execution' # challenge def ID is already passed
+ chall_exec_challDefID = self.challenge_def_ID
+ chall_exec = ChallengeExecution(chall_exec_ID, chall_exec_name, chall_exec_challDefID)
+ chall_exec.log.append_to_list('challenge execution created')
+
+ # create test execution instance
+ test_exec_ID = 1 # ideally, would be incremented, but need to maintain a number of text executions somewhere. or could be random.
+ test_exec_name = 'test execution' # test def ID is already passed
+ test_exec_testDefID = self.ID
+ test_exec_userID = '' # or get user name from getpass module: import getpass and test_exec_userID = getpass.getuser()
+ test_exec = TestExecution(test_exec_ID, test_exec_name, test_exec_testDefID, chall_exec_ID, test_exec_userID)
+ test_exec.log.append_to_list('test execution created')
+
+ # get time1 before anything else, so the setup time is counted
+ test_exec.start_time = time1
+
+ # get challenge definition instance, and start challenge
+ challenge_def = get_indexed_item_from_list(self.challenge_def_ID, AutoResilGlobal.challenge_definition_list)
+ challenge_def.run_start_challenge_code()
+
+ # memorize challenge start time
+ chall_exec.start_time = datetime.now()
+ test_exec.challenge_start_time = chall_exec.start_time
+
+ # call specific test definition code, via table of functions; this code should monitor a VNF and return when restoration is observed
+ test_code_index = self.test_code_ID - 1 # lists are indexed from 0 to N-1
+ # invoke corresponding method, via index; could check for return code
+ self.test_code_list[test_code_index](*test_code_args, **test_code_kwargs)
+
+ # memorize restoration detection time and compute recovery time
+ test_exec.restoration_detection_time = datetime.now()
+ recovery_time_metric_def = get_indexed_item_from_file(1,FILE_METRIC_DEFINITIONS) # get Recovery Time metric definition: ID=1
+ test_exec.recovery_time = recovery_time_metric_def.compute(test_exec.challenge_start_time, test_exec.restoration_detection_time)
+
+ # stop challenge
+ challenge_def.run_stop_challenge_code()
+
+ # memorize challenge stop time
+ chall_exec.stop_time = datetime.now()
+ chall_exec.log.append_to_list('challenge execution finished')
+
+ # write results to CSV files, memorize test finish time
+ chall_exec.write_to_csv()
+ test_exec.finish_time = datetime.now()
+ test_exec.log.append_to_list('test execution finished')
+ test_exec.write_to_csv()
+
+
+ except Exception as e:
+ print(type(e), e)
+ sys.exit()
+
+
+ # library of test codes, probably 1 per test case, so test_case_ID would be the same as test_code_ID
+ def test_code001(self, *test_code_args, **test_code_kwargs):
+ """Test case code number 001."""
+ print("This is test_code001 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
+
+ def test_code002(self, *test_code_args, **test_code_kwargs):
+ """Test case code number 002."""
+ print("This is test_code002 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
+
+ def test_code003(self, *test_code_args, **test_code_kwargs):
+ """Test case code number 003."""
+ print("This is test_code003 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
+
+ def test_code004(self, *test_code_args, **test_code_kwargs):
+ """Test case code number 004."""
+ print("This is test_code004 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
+
+ def test_code005(self, *test_code_args, **test_code_kwargs):
+ """Test case code number 005."""
+ print("This is test_code005 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
+
+ # specific VNF recovery monitoring, specific metrics if any
+ # interact with ONAP, periodic query about VNF status; may also check VM or container status directly with VIM
+ # return when VNF is recovered
+ # may provision for failure to recover (max time to wait; return code: recovery OK boolean)
+
+ # June 2018, test of code logic, using newly released OpenStack SDK 0.14.0
+ # VM is created arbitrarily, not yet with ONAP
+ # Openstack cloud was created by Fuel/MCP, descriptor in clouds.yaml file
+ # VM resume done in Horizon (to simulate an ONAP-based recovery)
+ # retrieved status values: {'ACTIVE', 'SUSPENDED'}
+ # loop: wait 2 seconds, check status, stop loop when status is ACTIVE
+ conn = openstack.connect(cloud='unh-hpe-openstack-fraser', region_name='RegionOne')
+ test_VM_ID = '5d07da11-0e85-4256-9894-482dcee4a5f0' # arbitrary in this test, grab from OpenStack
+ test_VM = conn.compute.get_server(test_VM_ID)
+ print(' test_VM.name=',test_VM.name)
+ print(' test_VM.status=',test_VM.status)
+ test_VM_current_status = test_VM.status
+ wait_seconds = 2
+ nb_seconds_waited = 0
+ while test_VM_current_status != 'ACTIVE':
+ print(' waiting',wait_seconds,'seconds...')
+ time.sleep(wait_seconds)
+ test_VM = conn.compute.get_server(test_VM_ID) # need to get VM object ID, for an updated status attribute
+ test_VM_current_status = test_VM.status
+ nb_seconds_waited = nb_seconds_waited + wait_seconds
+ print(' nb_seconds_waited=',nb_seconds_waited)
+
+
+ def test_code006(self, *test_code_args, **test_code_kwargs):
+ """Test case code number 006."""
+ print("This is test_code006 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
+
+ def test_code007(self, *test_code_args, **test_code_kwargs):
+ """Test case code number 007."""
+ print("This is test_code007 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
+
+ def test_code008(self, *test_code_args, **test_code_kwargs):
+ """Test case code number 008."""
+ print("This is test_code008 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
+
+ def test_code009(self, *test_code_args, **test_code_kwargs):
+ """Test case code number 009."""
+ print("This is test_code009 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
+
+ def test_code010(self, *test_code_args, **test_code_kwargs):
+ """Test case code number 010."""
+ print("This is test_code010 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
+
+
+ def printout_all(self, indent_level):
+ """Print out all attributes, with an indentation level."""
+ indent = " "*indent_level*INDENTATION_MULTIPLIER
+
+ print(indent, "\nTest Definition ID:", self.ID, sep='')
+ print(indent, "|-name:", self.name, sep='')
+
+ print(indent, "|-associated test case ID:", self.test_case_ID, sep='')
+ test_case = get_indexed_item_from_list(self.test_case_ID, AutoResilGlobal.test_case_list)
+ if test_case != None:
+ test_case.printout_all(indent_level+1)
+
+ print(indent, "|-test code ID:", self.test_code_ID, sep='')
+
+ print(indent, "|-associated challenge def ID:", self.challenge_def_ID, sep='')
+ challenge_def = get_indexed_item_from_list(self.challenge_def_ID, AutoResilGlobal.challenge_definition_list)
+ if challenge_def != None:
+ challenge_def.printout_all(indent_level+1)
+
+ if self.VNF_ID_list != None:
+ if len(self.VNF_ID_list) >0:
+ print(indent, "|-associated VNFs:", sep='')
+ for VNF_ID in self.VNF_ID_list:
+ VNF_item = get_indexed_item_from_list(VNF_ID, AutoResilGlobal.VNF_Service_list)
+ if VNF_item != None:
+ VNF_item.printout_all(indent_level+1)
+
+ if self.associated_metrics_ID_list != None:
+ if len(self.associated_metrics_ID_list) >0:
+ print(indent, "|-associated metrics:", sep='')
+ for Metric_ID in self.associated_metrics_ID_list:
+ Metric_item = get_indexed_item_from_list(Metric_ID, AutoResilGlobal.metric_definition_list)
+ if Metric_item != None:
+ Metric_item.printout_all(indent_level+1)
+
+ if self.recipient_ID_list != None:
+ if len(self.recipient_ID_list) >0:
+ print(indent, "|-associated recipients:", sep='')
+ for recipient_ID in self.recipient_ID_list:
+ recipient_item = get_indexed_item_from_list(recipient_ID, AutoResilGlobal.recipient_list)
+ if recipient_item != None:
+ recipient_item.printout_all(indent_level+1)
+
+ if self.test_CLI_command_sent_list != None:
+ if len(self.test_CLI_command_sent_list) >0:
+ print(indent, "|-associated CLI commands:", sep='')
+ for CLI_command in self.test_CLI_command_sent_list:
+ print(" "*INDENTATION_MULTIPLIER, "|- ", CLI_command, sep='')
+
+ # TODO: self.test_API_command_sent_list (depends how API commands are stored: likely a list of strings)
+
+
+
+def init_test_definitions():
+ """Function to initialize test definition data."""
+ test_definitions = []
+
+ # add info to list in memory, one by one, following signature values
+ test_def_ID = 5
+ test_def_name = "VM failure impact on virtual firewall (vFW VNF)"
+ test_def_challengeDefID = 5
+ test_def_testCaseID = 5
+ test_def_VNFIDs = [1]
+ test_def_associatedMetricsIDs = [2]
+ test_def_recipientIDs = [2]
+ test_def_testCLICommandSent = ["pwd","kubectl describe pods --include-uninitialized=false"]
+ test_def_testAPICommandSent = ["data1","data2"]
+ test_def_testCodeID = 5
+ test_definitions.append(TestDefinition(test_def_ID, test_def_name,
+ test_def_challengeDefID,
+ test_def_testCaseID,
+ test_def_VNFIDs,
+ test_def_associatedMetricsIDs,
+ test_def_recipientIDs,
+ test_def_testCLICommandSent,
+ test_def_testAPICommandSent,
+ test_def_testCodeID))
+
+ # write list to binary file
+ write_list_bin(test_definitions, FILE_TEST_DEFINITIONS)
+
+ return test_definitions
+
+
+######################################################################
+
+class ChallengeType(Enum):
+ # physical server-level failures 1XX
+ COMPUTE_HOST_FAILURE = 100
+ DISK_FAILURE = 101
+ LINK_FAILURE = 102
+ NIC_FAILURE = 103
+
+ # cloud-level failures 2XX
+ CLOUD_COMPUTE_FAILURE = 200
+ SDN_C_FAILURE = 201
+ OVS_BRIDGE_FAILURE = 202
+ CLOUD_STORAGE_FAILURE = 203
+ CLOUD_NETWORK_FAILURE = 204
+
+ # security stresses 3XX
+ HOST_TAMPERING = 300
+ HOST_INTRUSION = 301
+ NETWORK_INTRUSION = 302
+
+
+class ChallengeDefinition(AutoBaseObject):
+ """Challenge Definition class for Auto project."""
+ def __init__ (self, chall_def_ID, chall_def_name,
+ chall_def_challengeType,
+ chall_def_recipientID,
+ chall_def_impactedCloudResourcesInfo,
+ chall_def_impactedCloudResourceIDs,
+ chall_def_impactedPhysResourcesInfo,
+ chall_def_impactedPhysResourceIDs,
+ chall_def_startChallengeCLICommandSent,
+ chall_def_stopChallengeCLICommandSent,
+ chall_def_startChallengeAPICommandSent,
+ chall_def_stopChallengeAPICommandSent,
+ chall_def_codeID):
+
+ # superclass constructor
+ AutoBaseObject.__init__(self, chall_def_ID, chall_def_name)
+
+ # specifics for this subclass
+
+ # info about challenge type, categorization
+ self.challenge_type = chall_def_challengeType
+ # recipient instance, to start/stop the challenge
+ self.recipient_ID = chall_def_recipientID
+
+ # free-form info about cloud virtual impacted resource(s)
+ self.impacted_cloud_resources_info = chall_def_impactedCloudResourcesInfo
+ # impacted resources (list of IDs, usually only 1)
+ self.impacted_cloud_resource_ID_list = chall_def_impactedCloudResourceIDs
+
+ # free-form info about physical impacted resource(s)
+ self.impacted_phys_resources_info = chall_def_impactedPhysResourcesInfo
+ # impacted resources (list of IDs, usually only 1)
+ self.impacted_phys_resource_ID_list = chall_def_impactedPhysResourceIDs
+
+ # if CLI; can include hard-coded references to resources
+ self.start_challenge_CLI_command_sent = chall_def_startChallengeCLICommandSent
+ # if CLI; to restore to normal
+ self.stop_challenge_CLI_command_sent = chall_def_stopChallengeCLICommandSent
+ # if API; can include hard-coded references to resources
+ self.start_challenge_API_command_sent = chall_def_startChallengeAPICommandSent
+ # if API; to restore to normal
+ self.stop_challenge_API_command_sent = chall_def_stopChallengeAPICommandSent
+
+ # constant for total number of challenge codes (one of them is used per ChallengeDefinition instance);
+ # may be 1 per test case, maybe not (common challenges, could be re-used across test definitions and test cases)
+ # start and stop challenges are strictly linked: exactly 1 Stop challenge for each Start challenge, so same ID for Start and for Stop
+ self.TOTAL_NUMBER_OF_CHALLENGE_CODES = 10
+
+ # chosen start/stop challenge code ID (the ID is an index in a list of method names) for this instance;
+ # convention: [1;N]; in list, index is [0;N-1]
+ # a challenge code could use for instance Python clients (for OpenStack, Kubernetes, etc.), or HTTP APIs, or some of the CLI/API commands
+ try:
+ if 1 <= chall_def_codeID <= self.TOTAL_NUMBER_OF_CHALLENGE_CODES:
+ self.challenge_code_ID = chall_def_codeID
+ else:
+ print("ChallengeDefinition constructor: incorrect chall_def_codeID=",chall_def_codeID)
+ sys.exit() # stop entire program, because code ID MUST be correct
+ except Exception as e:
+ print(type(e), e)
+ sys.exit() # stop entire program, because code ID MUST be correct
+
+ # list of method names; leave as per-object method (i.e. not as class methods or as static methods)
+ self.start_challenge_code_list = []
+ self.stop_challenge_code_list = []
+ # add one by one, for easier later additions of new methods; MUST be same index for Start and for Stop
+ self.start_challenge_code_list.append(self.start_challenge_code001)
+ self.stop_challenge_code_list.append(self.stop_challenge_code001)
+ self.start_challenge_code_list.append(self.start_challenge_code002)
+ self.stop_challenge_code_list.append(self.stop_challenge_code002)
+ self.start_challenge_code_list.append(self.start_challenge_code003)
+ self.stop_challenge_code_list.append(self.stop_challenge_code003)
+ self.start_challenge_code_list.append(self.start_challenge_code004)
+ self.stop_challenge_code_list.append(self.stop_challenge_code004)
+ self.start_challenge_code_list.append(self.start_challenge_code005)
+ self.stop_challenge_code_list.append(self.stop_challenge_code005)
+ self.start_challenge_code_list.append(self.start_challenge_code006)
+ self.stop_challenge_code_list.append(self.stop_challenge_code006)
+ self.start_challenge_code_list.append(self.start_challenge_code007)
+ self.stop_challenge_code_list.append(self.stop_challenge_code007)
+ self.start_challenge_code_list.append(self.start_challenge_code008)
+ self.stop_challenge_code_list.append(self.stop_challenge_code008)
+ self.start_challenge_code_list.append(self.start_challenge_code009)
+ self.stop_challenge_code_list.append(self.stop_challenge_code009)
+ self.start_challenge_code_list.append(self.start_challenge_code010)
+ self.stop_challenge_code_list.append(self.stop_challenge_code010)
+
+
+ def run_start_challenge_code(self, *chall_code_args, **chall_code_kwargs):
+ """Run currently selected challenge code, start portion.
+ Optional parameters can be passed if needed (unnamed or named), interpreted accordingly by selected test code."""
+
+ try:
+ code_index = self.challenge_code_ID - 1 # lists are indexed from 0 to N-1
+ # invoke corresponding start method, via index
+ self.start_challenge_code_list[code_index](*chall_code_args, **chall_code_kwargs)
+ except Exception as e:
+ print(type(e), e)
+ sys.exit()
+
+ def run_stop_challenge_code(self, *chall_code_args, **chall_code_kwargs):
+ """Run currently selected challenge code, stop portion.
+ Optional parameters can be passed if needed (unnamed or named), interpreted accordingly by selected test code."""
+ try:
+ code_index = self.challenge_code_ID - 1 # lists are indexed from 0 to N-1
+ # invoke corresponding stop method, via index
+ self.stop_challenge_code_list[code_index](*chall_code_args, **chall_code_kwargs)
+ except Exception as e:
+ print(type(e), e)
+ sys.exit()
+
+
+
+ # library of challenge codes
+ def start_challenge_code001(self, *chall_code_args, **chall_code_kwargs):
+ """Start Challenge code number 001."""
+ print("This is start_challenge_code001 from ChallengeDefinition #",self.ID, sep='')
+ def stop_challenge_code001(self, *chall_code_args, **chall_code_kwargs):
+ """Stop Challenge code number 001."""
+ print("This is stop_challenge_code001 from ChallengeDefinition #",self.ID, sep='')
+
+ def start_challenge_code002(self, *chall_code_args, **chall_code_kwargs):
+ """Start Challenge code number 002."""
+ print("This is start_challenge_code002 from ChallengeDefinition #",self.ID, sep='')
+ def stop_challenge_code002(self, *chall_code_args, **chall_code_kwargs):
+ """Stop Challenge code number 002."""
+ print("This is stop_challenge_code002 from ChallengeDefinition #",self.ID, sep='')
+
+ def start_challenge_code003(self, *chall_code_args, **chall_code_kwargs):
+ """Start Challenge code number 003."""
+ print("This is start_challenge_code003 from ChallengeDefinition #",self.ID, sep='')
+ def stop_challenge_code003(self, *chall_code_args, **chall_code_kwargs):
+ """Stop Challenge code number 003."""
+ print("This is stop_challenge_code003 from ChallengeDefinition #",self.ID, sep='')
+
+ def start_challenge_code004(self, *chall_code_args, **chall_code_kwargs):
+ """Start Challenge code number 004."""
+ print("This is start_challenge_code004 from ChallengeDefinition #",self.ID, sep='')
+ def stop_challenge_code004(self, *chall_code_args, **chall_code_kwargs):
+ """Stop Challenge code number 004."""
+ print("This is stop_challenge_code004 from ChallengeDefinition #",self.ID, sep='')
+
+ def start_challenge_code005(self, *chall_code_args, **chall_code_kwargs):
+ """Start Challenge code number 005."""
+ print("This is start_challenge_code005 from ChallengeDefinition #",self.ID, sep='')
+ # challenge #5, related to test case #5, i.e. test def #5
+ # cloud reference (name and region) should be in clouds.yaml file
+ # conn = openstack.connect(cloud='cloudNameForChallenge005', region_name='regionNameForChallenge005')
+ # TestDef knows VNF, gets VNF->VM mapping from ONAP, passes VM ref to ChallengeDef
+ # ChallengeDef suspends/resumes VM
+ # conn.compute.servers() to get list of servers, using VM ID, check server.id and/or server.name
+ # conn.compute.suspend_server(this server id)
+
+ # June 2018, test of code logic, using newly released OpenStack SDK 0.14.0
+ # VM is created arbitrarily, not yet with ONAP
+ # Openstack cloud was created by Fuel/MCP, descriptor in clouds.yaml file
+ # VM resume done in Horizon (to simulate an ONAP-based recovery)
+ conn = openstack.connect(cloud='unh-hpe-openstack-fraser', region_name='RegionOne')
+ test_VM_ID = '5d07da11-0e85-4256-9894-482dcee4a5f0' # arbitrary in this test, grab from OpenStack
+ test_VM = conn.compute.get_server(test_VM_ID)
+ print(' test_VM.name=',test_VM.name)
+ print(' test_VM.status=',test_VM.status)
+ print(' suspending...')
+ conn.compute.suspend_server(test_VM_ID)
+ # wait a bit before continuing: ensure VM is actually suspended
+ wait_seconds = 10
+ print(' waiting',wait_seconds,'seconds...')
+ time.sleep(wait_seconds)
+
+ def stop_challenge_code005(self, *chall_code_args, **chall_code_kwargs):
+ """Stop Challenge code number 005."""
+ print("This is stop_challenge_code005 from ChallengeDefinition #",self.ID, sep='')
+ # challenge #5, related to test case #5, i.e. test def #5
+ # cloud reference (name and region) should be in clouds.yaml file
+ # conn = openstack.connect(cloud='cloudNameForChallenge005', region_name='regionNameForChallenge005')
+ # TestDef knows VNF, gets VNF->VM mapping from ONAP, passes VM ref to ChallengeDef
+ # ChallengeDef suspends/resumes VM
+ # conn.compute.servers() to get list of servers, using VM ID, check server.id and/or server.name
+ # conn.compute.conn.compute.resume_server(this server id)
+
+ # June 2018, test of code logic, using newly released OpenStack SDK 0.14.0
+ # this resume would be the normal challenge stop, but not in the case of this test
+ conn = openstack.connect(cloud='unh-hpe-openstack-fraser', region_name='RegionOne')
+ test_VM_ID = '5d07da11-0e85-4256-9894-482dcee4a5f0' # arbitrary in this test, grab from OpenStack
+ test_VM = conn.compute.get_server(test_VM_ID)
+ print(' test_VM.name=',test_VM.name)
+ print(' test_VM.status=',test_VM.status)
+ print(' suspending...')
+ conn.compute.resume_server(test_VM_ID)
+
+
+ def start_challenge_code006(self, *chall_code_args, **chall_code_kwargs):
+ """Start Challenge code number 006."""
+ print("This is start_challenge_code006 from ChallengeDefinition #",self.ID, sep='')
+ def stop_challenge_code006(self, *chall_code_args, **chall_code_kwargs):
+ """Stop Challenge code number 006."""
+ print("This is stop_challenge_code006 from ChallengeDefinition #",self.ID, sep='')
+
+ def start_challenge_code007(self, *chall_code_args, **chall_code_kwargs):
+ """Start Challenge code number 007."""
+ print("This is start_challenge_code007 from ChallengeDefinition #",self.ID, sep='')
+ def stop_challenge_code007(self, *chall_code_args, **chall_code_kwargs):
+ """Stop Challenge code number 007."""
+ print("This is stop_challenge_code007 from ChallengeDefinition #",self.ID, sep='')
+
+ def start_challenge_code008(self, *chall_code_args, **chall_code_kwargs):
+ """Start Challenge code number 008."""
+ print("This is start_challenge_code008 from ChallengeDefinition #",self.ID, sep='')
+ def stop_challenge_code008(self, *chall_code_args, **chall_code_kwargs):
+ """Stop Challenge code number 008."""
+ print("This is stop_challenge_code008 from ChallengeDefinition #",self.ID, sep='')
+
+ def start_challenge_code009(self, *chall_code_args, **chall_code_kwargs):
+ """Start Challenge code number 009."""
+ print("This is start_challenge_code009 from ChallengeDefinition #",self.ID, sep='')
+ def stop_challenge_code009(self, *chall_code_args, **chall_code_kwargs):
+ """Stop Challenge code number 009."""
+ print("This is stop_challenge_code009 from ChallengeDefinition #",self.ID, sep='')
+
+ def start_challenge_code010(self, *chall_code_args, **chall_code_kwargs):
+ """Start Challenge code number 010."""
+ print("This is start_challenge_code010 from ChallengeDefinition #",self.ID, sep='')
+ def stop_challenge_code010(self, *chall_code_args, **chall_code_kwargs):
+ """Stop Challenge code number 010."""
+ print("This is stop_challenge_code010 from ChallengeDefinition #",self.ID, sep='')
+
+
+
+ def printout_all(self, indent_level):
+ """Print out all attributes, with an indentation level."""
+ indent = " "*indent_level*INDENTATION_MULTIPLIER
+
+ print(indent, "Challenge Definition ID:", self.ID, sep='')
+ print(indent, "|-name:", self.name, sep='')
+
+ print(indent, "|-challenge type:", self.challenge_type, sep='')
+
+ print(indent, "|-challenge code ID:", self.challenge_code_ID, sep='')
+
+ print(indent, "|-associated recipient ID:", self.recipient_ID, sep='')
+ recipient = get_indexed_item_from_list(self.recipient_ID, AutoResilGlobal.recipient_list)
+ if recipient != None:
+ recipient.printout_all(indent_level+1)
+
+ print(indent, "|-info about cloud virtual impacted resource(s):", self.impacted_cloud_resources_info, sep='')
+
+ if self.impacted_cloud_resource_ID_list != None:
+ if len(self.impacted_cloud_resource_ID_list) >0:
+ print(indent, "|-associated cloud virtual impacted resource(s):", sep='')
+ for cloud_resource_ID in self.impacted_cloud_resource_ID_list:
+ cloud_resource_item = get_indexed_item_from_list(cloud_resource_ID, AutoResilGlobal.cloud_virtual_resource_list)
+ if cloud_resource_item != None:
+ cloud_resource_item.printout_all(indent_level+1)
+
+ print(indent, "|-info about physical virtual impacted resource(s):", self.impacted_phys_resources_info, sep='')
+
+ if self.impacted_phys_resource_ID_list != None:
+ if len(self.impacted_phys_resource_ID_list) >0:
+ print(indent, "|-associated physical impacted resource(s):", sep='')
+ for phys_resource_ID in self.impacted_phys_resource_ID_list:
+ phys_resource_item = get_indexed_item_from_list(phys_resource_ID, AutoResilGlobal.physical_resource_list)
+ if phys_resource_item != None:
+ phys_resource_item.printout_all(indent_level+1)
+
+ print(indent, "|-CLI command to start challenge:", self.start_challenge_CLI_command_sent, sep='')
+
+ print(indent, "|-CLI command to stop challenge:", self.stop_challenge_CLI_command_sent, sep='')
+
+ # TODO: self.start_challenge_API_command_sent (depends how API commands are stored: likely a list of strings)
+ # TODO: self.stop_challenge_API_command_sent (depends how API commands are stored: likely a list of strings)
+
+
+
+
+def init_challenge_definitions():
+ """Function to initialize challenge definition data."""
+ challenge_defs = []
+
+ # add info to list in memory, one by one, following signature values
+ chall_def_ID = 5
+ chall_def_name = "VM failure"
+ chall_def_challengeType = ChallengeType.CLOUD_COMPUTE_FAILURE
+ chall_def_recipientID = 1
+ chall_def_impactedCloudResourcesInfo = "OpenStack VM on ctl02 in Arm pod"
+ chall_def_impactedCloudResourceIDs = [2]
+ chall_def_impactedPhysResourcesInfo = "physical server XYZ"
+ chall_def_impactedPhysResourceIDs = [1]
+ chall_def_startChallengeCLICommandSent = "service nova-compute stop"
+ chall_def_stopChallengeCLICommandSent = "service nova-compute restart"
+ # OpenStack VM Suspend vs. Pause: suspend stores the state of VM on disk while pause stores it in memory (RAM)
+ # in CLI:
+ # $ nova suspend NAME
+ # $ nova resume NAME
+ # but better use OpenStack SDK
+
+ chall_def_startChallengeAPICommandSent = []
+ chall_def_stopChallengeAPICommandSent = []
+
+ chall_def_codeID = 5
+
+ challenge_defs.append(ChallengeDefinition(chall_def_ID, chall_def_name,
+ chall_def_challengeType,
+ chall_def_recipientID,
+ chall_def_impactedCloudResourcesInfo,
+ chall_def_impactedCloudResourceIDs,
+ chall_def_impactedPhysResourcesInfo,
+ chall_def_impactedPhysResourceIDs,
+ chall_def_startChallengeCLICommandSent,
+ chall_def_stopChallengeCLICommandSent,
+ chall_def_startChallengeAPICommandSent,
+ chall_def_stopChallengeAPICommandSent,
+ chall_def_codeID))
+
+ # write list to binary file
+ write_list_bin(challenge_defs, FILE_CHALLENGE_DEFINITIONS)
+
+ return challenge_defs
+
+
+######################################################################
+
+class Recipient(AutoBaseObject):
+ """Recipient class for Auto project."""
+ def __init__ (self, recipient_ID, recipient_name,
+ recipient_info,
+ recipient_versionInfo,
+ recipient_accessIPAddress,
+ recipient_accessURL,
+ recipient_userNameCreds,
+ recipient_passwordCreds,
+ recipient_keyCreds,
+ recipient_networkInfo):
+
+ # superclass constructor
+ AutoBaseObject.__init__(self, recipient_ID, recipient_name)
+
+ # specifics for this subclass
+
+ # optional: free-form text info about recipient
+ self.info = recipient_info
+ # optional: version info
+ self.version_info = recipient_versionInfo
+ # optional: IP address of recipient
+ self.access_IP_address = recipient_accessIPAddress
+ # optional: URL of recipient
+ self.access_URL = recipient_accessURL
+ # optional: username for user/pwd credentials
+ self.username_creds = recipient_userNameCreds
+ # optional: password for user/pwd credentials
+ self.password_creds = recipient_passwordCreds
+ # optional: key credentials
+ self.key_creds = recipient_keyCreds
+ # optional: info about recipient's network (VPN, VCN, VN, Neutron, ...)
+ self.network_info = recipient_networkInfo
+
+
+ def printout_all(self, indent_level):
+ """Print out all attributes, with an indentation level."""
+ indent = " "*indent_level*INDENTATION_MULTIPLIER
+
+ print(indent, "Recipient ID:", self.ID, sep='')
+ print(indent, "|-name:", self.name, sep='')
+
+ print(indent, "|-version info:", self.version_info, sep='')
+ print(indent, "|-IP address:", self.access_IP_address, sep='')
+ print(indent, "|-URL:", self.access_URL, sep='')
+ print(indent, "|-username for user/pwd credentials:", self.username_creds, sep='')
+ print(indent, "|-password for user/pwd credentials:", self.password_creds, sep='')
+ print(indent, "|-key credentials:", self.key_creds, sep='')
+ print(indent, "|-info about network:", self.network_info, sep='')
+
+
+
+def init_recipients():
+ """Function to initialize recipient data."""
+ test_recipients = []
+
+ # add info to list in memory, one by one, following signature values
+ recipient_ID = 1
+ recipient_name = "OpenStack on Arm pod"
+ recipient_info = "controller resolves to one of the CTL VMs"
+ recipient_versionInfo = ""
+ recipient_accessIPAddress = "172.16.10.10"
+ recipient_accessURL = ""
+ recipient_userNameCreds = "ali"
+ recipient_passwordCreds = "baba"
+ recipient_keyCreds = "ssh-rsa k7fjsnEFzESfg6phg"
+ recipient_networkInfo = "UNH IOL 172.16.0.0/22"
+
+ test_recipients.append(Recipient(recipient_ID, recipient_name,
+ recipient_info,
+ recipient_versionInfo,
+ recipient_accessIPAddress,
+ recipient_accessURL,
+ recipient_userNameCreds,
+ recipient_passwordCreds,
+ recipient_keyCreds,
+ recipient_networkInfo))
+
+ recipient_ID = 2
+ recipient_name = "Kubernetes on x86 pod"
+ recipient_info = "bare metal"
+ recipient_versionInfo = "v1.9"
+ recipient_accessIPAddress = "8.9.7.6"
+ recipient_accessURL = ""
+ recipient_userNameCreds = "kuber"
+ recipient_passwordCreds = "netes"
+ recipient_keyCreds = "ssh-rsa 0fjs7hjghsa37fhfs"
+ recipient_networkInfo = "UNH IOL 10.10.30.157/22"
+
+
+ test_recipients.append(Recipient(recipient_ID, recipient_name,
+ recipient_info,
+ recipient_versionInfo,
+ recipient_accessIPAddress,
+ recipient_accessURL,
+ recipient_userNameCreds,
+ recipient_passwordCreds,
+ recipient_keyCreds,
+ recipient_networkInfo))
+
+ # write list to binary file
+ write_list_bin(test_recipients, FILE_RECIPIENTS)
+
+ return test_recipients
+
+
+######################################################################
+
+class MetricDefinition(AutoBaseObject):
+ """Metric Definition class for Auto project. Actual metrics are subclasses with specific calculation methods."""
+ def __init__ (self, metric_def_ID, metric_def_name,
+ metric_def_info):
+
+ # superclass constructor
+ AutoBaseObject.__init__(self, metric_def_ID, metric_def_name)
+
+ # specifics for this subclass
+
+ # optional: free-form text info about metric: formula, etc.
+ self.info = metric_def_info
+
+
+ def printout_all(self, indent_level):
+ """Print out all attributes, with an indentation level."""
+ indent = " "*indent_level*INDENTATION_MULTIPLIER
+
+ print(indent, "Metric Definition ID:", self.ID, sep='')
+ print(indent, "|-name:", self.name, sep='')
+
+ print(indent, "|-info:", self.info, sep='')
+
+
+class MetricValue:
+ """Object for storing a measurement of a Metric Definition for Auto project, with common attributes
+ (value, timestamp, metric_def_ID).
+ """
+ def __init__ (self, param_value, param_timestamp, param_metric_def_ID):
+ self.value = param_value
+ self.timestamp = param_timestamp
+ self.metric_def_ID = param_metric_def_ID
+ # for display
+ def __repr__(self):
+ return ("metric_def_ID="+str(self.metric_def_ID)+
+ " value="+str(self.value)+
+ " timestamp="+self.timestamp.strftime("%Y-%m-%d %H:%M:%S"))
+ # for print
+ def __str__(self):
+ return ("metric_def_ID="+str(self.metric_def_ID)+
+ " value="+str(self.value)+
+ " timestamp="+self.timestamp.strftime("%Y-%m-%d %H:%M:%S"))
+
+
+class RecoveryTimeDef(MetricDefinition):
+ """Recovery Time Metric Definition class for Auto project.
+ Formula: recovery_time = time_restoration_detected - time_challenge_started
+ (measured duration between start of challenge (failure, stress, ...) and detection of restoration).
+ Enter values as datetime objects.
+ """
+ def compute (self,
+ time_challenge_started, time_restoration_detected):
+ """time_challenge_started: datetime object, time at which challenge was started;
+ time_restoration_detected: datetime object, time at which restoration was detected;
+ returns a MetricValue containing a timedelta object as value.
+ """
+
+ # a few checks first
+ if time_challenge_started > time_restoration_detected:
+ print("time_challenge_started should be <= time_restoration_detected")
+ print("time_challenge_started=",time_challenge_started," time_restoration_detected=",time_restoration_detected)
+ sys.exit() # stop entire program, because formulas MUST be correct
+
+ measured_metric_value = time_restoration_detected - time_challenge_started #difference between 2 datetime is a timedelta
+ timestamp = datetime.now()
+
+ return MetricValue(measured_metric_value, timestamp, self.ID)
+
+
+class UptimePercentageDef(MetricDefinition):
+ """Uptime Percentage Metric Definition class for Auto project.
+ Formula: uptime / (reference_time - planned_downtime))
+ Enter values in same unit (e.g., all in seconds, or all in minutes, or all in hours, etc.).
+ """
+ def compute (self,
+ measured_uptime, reference_time, planned_downtime):
+ """measured_uptime: amount of time the service/system/resource was up and running;
+ reference_time: amount of time during which the measurement was made;
+ planned_downtime: amount to time during reference_time, which was planned to be down;
+ returns a MetricValue object, with a value between 0 and 100.
+ """
+
+ # a few checks first
+ if measured_uptime < 0.0:
+ print("measured_uptime should be >= 0.0")
+ print("meas=",measured_uptime," ref=",reference_time," pla=",planned_downtime)
+ sys.exit() # stop entire program, because formulas MUST be correct
+ if reference_time <= 0.0:
+ print("reference_time should be > 0.0")
+ print("meas=",measured_uptime," ref=",reference_time," pla=",planned_downtime)
+ sys.exit() # stop entire program, because formulas MUST be correct
+ if planned_downtime < 0.0:
+ print("planned_downtime should be >= 0.0")
+ print("meas=",measured_uptime," ref=",reference_time," pla=",planned_downtime)
+ sys.exit() # stop entire program, because formulas MUST be correct
+ if reference_time < planned_downtime:
+ print("reference_time should be >= planned_downtime")
+ print("meas=",measured_uptime," ref=",reference_time," pla=",planned_downtime)
+ sys.exit() # stop entire program, because formulas MUST be correct
+ if measured_uptime > reference_time:
+ print("measured_uptime should be <= reference_time")
+ print("meas=",measured_uptime," ref=",reference_time," pla=",planned_downtime)
+ sys.exit() # stop entire program, because formulas MUST be correct
+ if measured_uptime > (reference_time - planned_downtime):
+ print("measured_uptime should be <= (reference_time - planned_downtime)")
+ print("meas=",measured_uptime," ref=",reference_time," pla=",planned_downtime)
+ sys.exit() # stop entire program, because formulas MUST be correct
+
+ measured_metric_value = 100 * measured_uptime / (reference_time - planned_downtime)
+ timestamp = datetime.now()
+
+ return MetricValue(measured_metric_value, timestamp, self.ID)
+
+
+
+def init_metric_definitions():
+ """Function to initialize metric definition data."""
+ metric_definitions = []
+
+ # add info to list in memory, one by one, following signature values
+ metric_def_ID = 1
+ metric_def_name = "Recovery Time"
+ metric_def_info = "Measures time taken by ONAP to restore a VNF"
+ metric_definitions.append(RecoveryTimeDef(metric_def_ID, metric_def_name,
+ metric_def_info))
+
+ metric_def_ID = 2
+ metric_def_name = "Uptime Percentage"
+ metric_def_info = "Measures ratio of uptime to reference time, not counting planned downtime"
+ metric_definitions.append(UptimePercentageDef(metric_def_ID, metric_def_name,
+ metric_def_info))
+
+
+ # write list to binary file
+ write_list_bin(metric_definitions, FILE_METRIC_DEFINITIONS)
+
+ return metric_definitions
+
+
+
+######################################################################
+
+class PhysicalResource(AutoBaseObject):
+ """Physical Resource class for Auto project."""
+ def __init__ (self, phys_resrc_ID, phys_resrc_name,
+ phys_resrc_info,
+ phys_resrc_IPAddress,
+ phys_resrc_MACAddress):
+
+ # superclass constructor
+ AutoBaseObject.__init__(self, phys_resrc_ID, phys_resrc_name)
+
+ # specifics for this subclass
+
+ # optional: free-form text info about physical resource
+ self.info = phys_resrc_info
+ # optional: main IP address of physical resource (e.g. management interface for a server)
+ self.IP_address = phys_resrc_IPAddress
+ # optional: main MAC address of physical resource
+ self.MAC_address = phys_resrc_MACAddress
+
+
+ def printout_all(self, indent_level):
+ """Print out all attributes, with an indentation level."""
+ indent = " "*indent_level*INDENTATION_MULTIPLIER
+
+ print(indent, "Physical Resource ID:", self.ID, sep='')
+ print(indent, "|-name:", self.name, sep='')
+
+ print(indent, "|-info:", self.info, sep='')
+ print(indent, "|-IP address:", self.IP_address, sep='')
+ print(indent, "|-MAC address:", self.MAC_address, sep='')
+
+
+
+def init_physical_resources():
+ """Function to initialize physical resource data."""
+ test_physical_resources = []
+
+ # add info to list in memory, one by one, following signature values
+ phys_resrc_ID = 1
+ phys_resrc_name = "small-cavium-1"
+ phys_resrc_info = "Jump server in Arm pod, 48 cores, 64G RAM, 447G SSD, aarch64 Cavium ThunderX, Ubuntu OS"
+ phys_resrc_IPAddress = "10.10.50.12"
+ phys_resrc_MACAddress = "00-14-22-01-23-45"
+
+ test_physical_resources.append(PhysicalResource(phys_resrc_ID, phys_resrc_name,
+ phys_resrc_info,
+ phys_resrc_IPAddress,
+ phys_resrc_MACAddress))
+
+ phys_resrc_ID = 2
+ phys_resrc_name = "medium-cavium-1"
+ phys_resrc_info = "Jump server in New York pod, 96 cores, 64G RAM, 447G SSD, aarch64 Cavium ThunderX, Ubuntu OS"
+ phys_resrc_IPAddress = "30.31.32.33"
+ phys_resrc_MACAddress = "0xb3:22:05:c1:aa:82"
+
+ test_physical_resources.append(PhysicalResource(phys_resrc_ID, phys_resrc_name,
+ phys_resrc_info,
+ phys_resrc_IPAddress,
+ phys_resrc_MACAddress))
+
+ phys_resrc_ID = 3
+ phys_resrc_name = "mega-cavium-666"
+ phys_resrc_info = "Jump server in Las Vegas, 1024 cores, 1024G RAM, 6666G SSD, aarch64 Cavium ThunderX, Ubuntu OS"
+ phys_resrc_IPAddress = "54.53.52.51"
+ phys_resrc_MACAddress = "01-23-45-67-89-ab"
+
+ test_physical_resources.append(PhysicalResource(phys_resrc_ID, phys_resrc_name,
+ phys_resrc_info,
+ phys_resrc_IPAddress,
+ phys_resrc_MACAddress))
+
+
+ # write list to binary file
+ write_list_bin(test_physical_resources, FILE_PHYSICAL_RESOURCES)
+
+ return test_physical_resources
+
+
+######################################################################
+
+class CloudVirtualResource(AutoBaseObject):
+ """Cloud Virtual Resource class for Auto project."""
+ def __init__ (self, cldvirtres_ID, cldvirtres_name,
+ cldvirtres_info,
+ cldvirtres_IPAddress,
+ cldvirtres_URL,
+ cldvirtres_related_phys_rsrcIDs):
+
+ # superclass constructor
+ AutoBaseObject.__init__(self, cldvirtres_ID, cldvirtres_name)
+
+ # specifics for this subclass
+
+ # optional: free-form text info about cloud virtual resource
+ self.info = cldvirtres_info
+ # optional: main IP address of cloud virtual resource (e.g. management interface for a virtual router)
+ self.IP_address = cldvirtres_IPAddress
+ # optional: URL address of cloud virtual resource
+ self.URL = cldvirtres_URL
+ # optional: related/associated physical resources (if known and useful or interesting, list of integer IDs)
+ self.related_phys_rsrc_ID_list = cldvirtres_related_phys_rsrcIDs
+
+ def printout_all(self, indent_level):
+ """Print out all attributes, with an indentation level."""
+ indent = " "*indent_level*INDENTATION_MULTIPLIER
+
+ print(indent, "Cloud Virtual Resource ID:", self.ID, sep='')
+ print(indent, "|-name:", self.name, sep='')
+
+ print(indent, "|-info:", self.info, sep='')
+ print(indent, "|-IP address:", self.IP_address, sep='')
+ print(indent, "|-URL:", self.URL, sep='')
+
+ if self.related_phys_rsrc_ID_list != None:
+ if len(self.related_phys_rsrc_ID_list) >0:
+ print(indent, "|-related/associated physical resource(s):", sep='')
+ for phys_resource_ID in self.related_phys_rsrc_ID_list:
+ phys_resource_item = get_indexed_item_from_list(phys_resource_ID, AutoResilGlobal.physical_resource_list)
+ if phys_resource_item != None:
+ phys_resource_item.printout_all(indent_level+1)
+
+
+def init_cloud_virtual_resources():
+ """Function to initialize cloud virtual resource data."""
+ test_cldvirt_resources = []
+
+ # add info to list in memory, one by one, following signature values
+ cldvirtres_ID = 1
+ cldvirtres_name = "nova-compute-1"
+ cldvirtres_info = "nova VM in Arm pod"
+ cldvirtres_IPAddress = "50.60.70.80"
+ cldvirtres_URL = "http://50.60.70.80:8080"
+ cldvirtres_related_phys_rsrcIDs = [1,3]
+
+ test_cldvirt_resources.append(CloudVirtualResource(cldvirtres_ID, cldvirtres_name,
+ cldvirtres_info,
+ cldvirtres_IPAddress,
+ cldvirtres_URL,
+ cldvirtres_related_phys_rsrcIDs))
+
+ cldvirtres_ID = 2
+ cldvirtres_name = "nova-compute-2"
+ cldvirtres_info = "nova VM in LaaS"
+ cldvirtres_IPAddress = "50.60.70.80"
+ cldvirtres_URL = "http://50.60.70.80:8080"
+ cldvirtres_related_phys_rsrcIDs = [2,3]
+
+ test_cldvirt_resources.append(CloudVirtualResource(cldvirtres_ID, cldvirtres_name,
+ cldvirtres_info,
+ cldvirtres_IPAddress,
+ cldvirtres_URL,
+ cldvirtres_related_phys_rsrcIDs))
+
+ cldvirtres_ID = 3
+ cldvirtres_name = "nova-compute-3"
+ cldvirtres_info = "nova VM in x86 pod"
+ cldvirtres_IPAddress = "50.60.70.80"
+ cldvirtres_URL = "http://50.60.70.80:8080"
+ cldvirtres_related_phys_rsrcIDs = [1]
+
+ test_cldvirt_resources.append(CloudVirtualResource(cldvirtres_ID, cldvirtres_name,
+ cldvirtres_info,
+ cldvirtres_IPAddress,
+ cldvirtres_URL,
+ cldvirtres_related_phys_rsrcIDs))
+
+
+ # write list to binary file
+ write_list_bin(test_cldvirt_resources, FILE_CLOUD_RESOURCES)
+
+ return test_cldvirt_resources
+
+
+######################################################################
+
+class VNFService(AutoBaseObject):
+ """VNF or e2e Service class for Auto project."""
+ def __init__ (self, vnf_serv_ID, vnf_serv_name,
+ vnf_serv_info,
+ vnf_serv_IPAddress,
+ vnf_serv_URL,
+ vnf_serv_related_phys_rsrcIDs,
+ vnf_serv_related_cloudvirt_rsrcIDs):
+
+ # superclass constructor
+ AutoBaseObject.__init__(self, vnf_serv_ID, vnf_serv_name)
+
+ # specifics for this subclass
+
+ # optional: free-form text info about VNF / e2e Service
+ self.info = vnf_serv_info
+ # optional: main IP address of VNF / e2e Service (e.g. management interface for a vCPE)
+ self.IP_address = vnf_serv_IPAddress
+ # optional: URL address of VNF / e2e Service
+ self.URL = vnf_serv_URL
+ # optional: related/associated physical resources (if known and useful or interesting, list of integer IDs)
+ self.related_phys_rsrc_ID_list = vnf_serv_related_phys_rsrcIDs
+ # optional: related/associated cloud virtual resources (if known and useful or interesting, list of integer IDs)
+ self.related_cloud_virt_rsrc_ID_list = vnf_serv_related_cloudvirt_rsrcIDs
+
+
+ def printout_all(self, indent_level):
+ """Print out all attributes, with an indentation level."""
+ indent = " "*indent_level*INDENTATION_MULTIPLIER
+
+ print(indent, "VNF or e2e Service ID:", self.ID, sep='')
+ print(indent, "|-name:", self.name, sep='')
+
+ print(indent, "|-info:", self.info, sep='')
+ print(indent, "|-IP address:", self.IP_address, sep='')
+ print(indent, "|-URL:", self.URL, sep='')
+
+ if self.related_phys_rsrc_ID_list != None:
+ if len(self.related_phys_rsrc_ID_list) >0:
+ print(indent, "|-related/associated physical resource(s):", sep='')
+ for phys_resource_ID in self.related_phys_rsrc_ID_list:
+ phys_resource_item = get_indexed_item_from_list(phys_resource_ID, AutoResilGlobal.physical_resource_list)
+ if phys_resource_item != None:
+ phys_resource_item.printout_all(indent_level+1)
+
+ if self.related_cloud_virt_rsrc_ID_list != None:
+ if len(self.related_cloud_virt_rsrc_ID_list) >0:
+ print(indent, "|-related/associated cloud virtual resource(s):", sep='')
+ for cloud_resource_ID in self.related_cloud_virt_rsrc_ID_list:
+ cloud_resource_item = get_indexed_item_from_list(cloud_resource_ID, AutoResilGlobal.cloud_virtual_resource_list)
+ if cloud_resource_item != None:
+ cloud_resource_item.printout_all(indent_level+1)
+
+
+
+def init_VNFs_Services():
+ """Function to initialize VNFs and e2e Services data."""
+ test_VNFs_Services = []
+
+ # add info to list in memory, one by one, following signature values
+ vnf_serv_ID = 1
+ vnf_serv_name = "vCPE-1"
+ vnf_serv_info = "virtual CPE in Arm pod"
+ vnf_serv_IPAddress = "5.4.3.2"
+ vnf_serv_URL = "http://5.4.3.2:8080"
+ vnf_serv_related_phys_rsrcIDs = [1,2]
+ vnf_serv_related_cloudvirt_rsrcIDs = [1]
+
+ test_VNFs_Services.append(VNFService(vnf_serv_ID, vnf_serv_name,
+ vnf_serv_info,
+ vnf_serv_IPAddress,
+ vnf_serv_URL,
+ vnf_serv_related_phys_rsrcIDs,
+ vnf_serv_related_cloudvirt_rsrcIDs))
+
+
+ vnf_serv_ID = 2
+ vnf_serv_name = "vFW-1"
+ vnf_serv_info = "virtual Firewall in x86 pod"
+ vnf_serv_IPAddress = "6.7.8.9"
+ vnf_serv_URL = "http://6.7.8.9:8080"
+ vnf_serv_related_phys_rsrcIDs = [3]
+ vnf_serv_related_cloudvirt_rsrcIDs = [2,3]
+
+ test_VNFs_Services.append(VNFService(vnf_serv_ID, vnf_serv_name,
+ vnf_serv_info,
+ vnf_serv_IPAddress,
+ vnf_serv_URL,
+ vnf_serv_related_phys_rsrcIDs,
+ vnf_serv_related_cloudvirt_rsrcIDs))
+
+ # write list to binary file
+ write_list_bin(test_VNFs_Services, FILE_VNFS_SERVICES)
+
+ return test_VNFs_Services
+
+
+
+######################################################################
+
+class TimeStampedStringList:
+ """This is a utility class for Auto project, for execution classes (ChallengeExecution and TestExecution).
+ It stores a list of timestrings and timestamps them.
+ """
+ def __init__ (self):
+ self.__string_list = []
+ self.__timestamp_list = []
+
+ def append_to_list(self, string_to_append):
+ """Append an object to a list of strings and adds a timestamp."""
+ if type(string_to_append)==str:
+ current_time = datetime.now()
+ self.__string_list.append(string_to_append)
+ self.__timestamp_list.append(current_time) # timestamp will have the same index as string
+ else:
+ print("appended object must be a string, string_to_append=",string_to_append)
+ sys.exit() # stop entire program, because string MUST be correct
+
+ def get_raw_list(self):
+ return self.__string_list
+
+ def get_raw_list_timestamps(self):
+ return self.__timestamp_list
+
+ def get_timestamped_strings(self):
+ """return a list of strings with timestamps as prefixes (not showing microseconds)."""
+ ret_list = []
+ i = 0
+ while i < len(self.__string_list):
+ ret_list.append(self.__timestamp_list[i].strftime("%Y-%m-%d %H:%M:%S")+" "+self.__string_list[i])
+ i += 1
+ return ret_list
+
+ def length(self):
+ return len(self.__string_list)
+
+
+######################################################################
+
+class ChallengeExecution(AutoBaseObject):
+ """Class for Auto project, tracking the execution details of a Challenge Definition,
+ with a method to dump all results to a CSV file.
+ """
+ def __init__ (self, chall_exec_ID, chall_exec_name,
+ chall_exec_challDefID):
+
+ # superclass constructor
+ AutoBaseObject.__init__(self, chall_exec_ID, chall_exec_name)
+
+ # specifics for this subclass
+
+ # associated Challenge Definition (ID)
+ self.challenge_def_ID = chall_exec_challDefID
+
+ # attributes getting values during execution
+
+ # associated Start and Stop times (when Challenge was started and stopped)
+ self.start_time = None
+ self.stop_time = None
+ # log: list of strings, to capture any interesting or significant event
+ self.log = TimeStampedStringList()
+ # list of CLI responses
+ self.CLI_responses = TimeStampedStringList()
+ # list of API responses (convert to strings)
+ self.API_responses = TimeStampedStringList()
+
+ def write_to_csv(self):
+ """Generic function to dump all Challenge Execution data in a CSV file."""
+
+ dump_list = []
+
+ # add rows one by one, each as a list, even if only 1 element
+
+ dump_list.append(["challenge execution ID",self.ID])
+ dump_list.append(["challenge execution name",self.name])
+
+ dump_list.append(["challenge definition ID",self.challenge_def_ID])
+ challenge_def_name = get_indexed_item_from_file(self.challenge_def_ID, FILE_CHALLENGE_DEFINITIONS)
+ dump_list.append(["challenge definition name",challenge_def_name])
+
+ if self.start_time != None:
+ dump_list.append(["challenge start time",self.start_time.strftime("%Y-%m-%d %H:%M:%S")])
+ if self.stop_time != None:
+ dump_list.append(["challenge stop time",self.stop_time.strftime("%Y-%m-%d %H:%M:%S")])
+
+ if self.log.length() > 0 :
+ dump_list.append(["Log:"])
+ for item in self.log.get_timestamped_strings():
+ dump_list.append([item])
+
+ if self.CLI_responses.length() > 0 :
+ dump_list.append(["CLI responses:"])
+ for item in self.CLI_responses.get_timestamped_strings():
+ dump_list.append([item])
+
+ if self.API_responses.length() > 0 :
+ dump_list.append(["API responses:"])
+ for item in self.API_responses.get_timestamped_strings():
+ dump_list.append([item])
+
+ try:
+ # output CSV file name: challDefExec + ID + start time + .csv
+ file_name = "challDefExec" + "{0:0=3d}".format(self.challenge_def_ID) + "-" + self.start_time.strftime("%Y-%m-%d-%H-%M-%S") + ".csv"
+ with open(file_name, "w", newline="") as file:
+ csv_file_writer = csv.writer(file)
+ csv_file_writer.writerows(dump_list)
+ except Exception as e:
+ print(type(e), e)
+ sys.exit()
+
+
+
+######################################################################
+
+class TimeStampedMetricValueList:
+ """This is a utility class for Auto project, for the test execution class (TestExecution).
+ It stores a list of Metric Values (with their respective timestamps).
+ """
+ def __init__ (self):
+ self.__metric_value_list = []
+
+ def append_to_list(self, metric_value_to_append):
+ """Append a metric value (MetricValue) to the list. MetricValue already has a timestamp attribute."""
+ if type(metric_value_to_append)==MetricValue:
+ self.__metric_value_list.append(metric_value_to_append)
+ else:
+ print("appended object must be a MetricValue, metric_value_to_append=",metric_value_to_append)
+ sys.exit() # stop entire program, because metric_value_to_append MUST be correct
+
+ def get_raw_list(self):
+ return self.__metric_value_list
+
+ def get_timestamped_metric_values_as_strings(self):
+ """Return a list of strings with metric values and timestamps as prefixes (not showing microseconds).
+ Also show the metric def ID in parentheses.
+ """
+ ret_list = []
+ i = 0
+ while i < len(self.__metric_value_list):
+ ret_list.append(self.__metric_value_list[i].timestamp.strftime("%Y-%m-%d %H:%M:%S") + " " +
+ str(self.__metric_value_list[i].value) +
+ "(" + str(self.__metric_value_list[i].metric_def_ID) + ")")
+ i += 1
+ return ret_list
+
+ def length(self):
+ return len(self.__metric_value_list)
+
+
+
+######################################################################
+
+class TestExecution(AutoBaseObject):
+ """Class for Auto project, tracking the execution details of a Test Definition,
+ with a method to dump all results to a CSV file.
+ """
+ def __init__ (self, test_exec_ID, test_exec_name,
+ test_exec_testDefID,
+ test_exec_challengeExecID,
+ test_exec_userID):
+
+ # superclass constructor
+ AutoBaseObject.__init__(self, test_exec_ID, test_exec_name)
+
+ # specifics for this subclass
+
+ # associated Test Definition (ID)
+ self.test_def_ID = test_exec_testDefID
+ # associated Challenge Execution (ID) (execution instance of a challenge definition); get challenge start time from it;
+ self.challenge_exec_ID = test_exec_challengeExecID
+ # associated User (ID)
+ self.user_ID = test_exec_userID
+
+ # attributes getting values during execution
+
+ # associated Start and Finish times (when test was started and finished)
+ self.start_time = None
+ self.finish_time = None
+ # time when the challenge was started [datetime]; same value as associated ChallengeExecution.start_time;
+ # keep a copy here for print convenience;
+ self.challenge_start_time = None
+ # time when the VNF/service restoration (by ONAP) was detected by the test code [datetime]
+ self.restoration_detection_time = None
+ # key metric: recovery time, defined as time elapsed between start of challenge and restoration detection [timedelta]
+ self.recovery_time = None
+ # list of associated metric values
+ self.associated_metric_values = TimeStampedMetricValueList()
+ # log: list of strings, to capture any interesting or significant event
+ self.log = TimeStampedStringList()
+ # list of CLI responses
+ self.CLI_responses = TimeStampedStringList()
+ # list of API responses (convert to strings)
+ self.API_responses = TimeStampedStringList()
+
+
+ def write_to_csv(self):
+ """Generic function to dump all Test Execution data in a CSV file."""
+
+ dump_list = []
+
+ # add rows one by one, each as a list, even if only 1 element
+
+ dump_list.append(["test execution ID",self.ID])
+ dump_list.append(["test execution name",self.name])
+
+ dump_list.append(["test definition ID",self.test_def_ID])
+ test_def_name = get_indexed_item_from_file(self.test_def_ID, FILE_TEST_DEFINITIONS)
+ dump_list.append(["test definition name",test_def_name])
+
+ dump_list.append(["associated challenge execution ID",self.challenge_exec_ID])
+ dump_list.append(["user ID",self.user_ID])
+
+ if self.start_time != None:
+ dump_list.append(["test start time",self.start_time.strftime("%Y-%m-%d %H:%M:%S")])
+
+ if self.finish_time != None:
+ dump_list.append(["test finish time",self.finish_time.strftime("%Y-%m-%d %H:%M:%S")])
+
+ if self.challenge_start_time != None:
+ dump_list.append(["challenge stop time",self.challenge_start_time.strftime("%Y-%m-%d %H:%M:%S")])
+ if self.restoration_detection_time != None:
+ dump_list.append(["restoration detection time",self.restoration_detection_time.strftime("%Y-%m-%d %H:%M:%S")])
+ if self.recovery_time != None:
+ if self.recovery_time.value != None:
+ if type(self.recovery_time.value)==timedelta:
+ # timedelta: days and seconds are attributes, total_seconds() is a method
+ dump_list.append(["MEASURED RECOVERY TIME (s)",self.recovery_time.value.total_seconds()])
+ rtday = self.recovery_time.value.days
+ rthrs = self.recovery_time.value.seconds // 3600
+ rtmin = (self.recovery_time.value.seconds % 3600) // 60
+ rtsec = self.recovery_time.value.seconds % 60
+ rtmil = self.recovery_time.value.microseconds
+ dump_list.append(["MEASURED RECOVERY TIME (days, hours, mins, seconds, microseconds)",
+ rtday, rthrs, rtmin, rtsec, rtmil])
+
+ if self.associated_metric_values.length() > 0 :
+ dump_list.append(["Metric Values:"])
+ for item in self.associated_metric_values.get_timestamped_metric_values_as_strings():
+ dump_list.append([item])
+
+ if self.log.length() > 0 :
+ dump_list.append(["Log:"])
+ for item in self.log.get_timestamped_strings():
+ dump_list.append([item])
+
+ if self.CLI_responses.length() > 0 :
+ dump_list.append(["CLI responses:"])
+ for item in self.CLI_responses.get_timestamped_strings():
+ dump_list.append([item])
+
+ if self.API_responses.length() > 0 :
+ dump_list.append(["API responses:"])
+ for item in self.API_responses.get_timestamped_strings():
+ dump_list.append([item])
+
+ try:
+ # output CSV file name: testDefExec + ID + start time + .csv
+ file_name = "testDefExec" + "{0:0=3d}".format(self.test_def_ID) + "-" + self.start_time.strftime("%Y-%m-%d-%H-%M-%S") + ".csv"
+ with open(file_name, "w", newline="") as file:
+ csv_file_writer = csv.writer(file)
+ csv_file_writer.writerows(dump_list)
+ except Exception as e:
+ print(type(e), e)
+ sys.exit()
+
+
+######################################################################
+def dump_all_binaries_to_CSV():
+ """Get all content from all Definition data binary files, and dump everything in a snapshot CSV file."""
+ ## TODO
+ timenow = datetime.now()
+
+
+######################################################################
+def main():
+
+
+ # everything here is for unit-testing of this module; not part of actual code
+ tcs = init_test_cases()
+ print(tcs)
+
+ test_case_ID = 33
+ test_case_name = "auto-resiliency-xyz"
+ test_case_JIRA_URL = "https://jira.opnfv.org/browse/AUTO-400"
+ add_test_case_to_file(test_case_ID, test_case_name, test_case_JIRA_URL)
+ print(read_list_bin(FILE_TEST_CASES))
+
+ print(get_indexed_item_from_file(3,FILE_TEST_CASES))
+ print(get_indexed_item_from_file(257,FILE_TEST_CASES))
+
+ print("tcs[4]=",tcs[4])
+ print(tcs[4].ID)
+ print(tcs[4].name)
+ print(tcs[4].JIRA_URL)
+
+ print()
+
+ challgs = init_challenge_definitions()
+ print(challgs)
+ chall = get_indexed_item_from_file(5,FILE_CHALLENGE_DEFINITIONS)
+ print(chall)
+ chall.run_start_challenge_code()
+ chall.run_stop_challenge_code()
+
+ print()
+
+ tds = init_test_definitions()
+ print(tds)
+ td = get_indexed_item_from_file(5,FILE_TEST_DEFINITIONS)
+ print(td)
+ #td.printout_all(0)
+ #td.run_test_code()
+
+ print()
+
+ rcps = init_recipients()
+ print(rcps)
+ rcp = get_indexed_item_from_file(1,FILE_RECIPIENTS)
+ print(rcp)
+
+ print()
+
+
+ metricdefs = init_metric_definitions()
+ print(metricdefs)
+
+ metricdef = get_indexed_item_from_file(1,FILE_METRIC_DEFINITIONS)
+ print(metricdef)
+ t1 = datetime(2018,7,1,15,10,12,500000)
+ t2 = datetime(2018,7,1,15,13,43,200000)
+ r1 = metricdef.compute(t1,t2)
+ print(r1)
+ print()
+
+ metricdef = get_indexed_item_from_file(2,FILE_METRIC_DEFINITIONS)
+ print(metricdef)
+ r1 = metricdef.compute(735, 1000, 20)
+ r2 = metricdef.compute(980, 1000, 20)
+ r3 = metricdef.compute(920.0, 1000.0, 0.0)
+ r4 = metricdef.compute(920.0, 1500.0, 500.0)
+ r5 = metricdef.compute(919.99999, 1000.0, 0.000001)
+ print(r1)
+ print(r2)
+ print(r3)
+ print(r4)
+ print(r5)
+
+ print()
+
+ physRs = init_physical_resources()
+ print(physRs)
+ physR = get_indexed_item_from_file(1,FILE_PHYSICAL_RESOURCES)
+ print(physR)
+
+ print()
+
+ cloudRs = init_cloud_virtual_resources()
+ print(cloudRs)
+ cloudR = get_indexed_item_from_file(1,FILE_CLOUD_RESOURCES)
+ print(cloudR)
+
+ print()
+
+ VNFs = init_VNFs_Services()
+ print(VNFs)
+ VNF = get_indexed_item_from_file(1,FILE_VNFS_SERVICES)
+ print(VNF)
+
+ print()
+
+ ce1 = ChallengeExecution(1,"essai challenge execution",5)
+ ce1.start_time = datetime.now()
+ ce1.log.append_to_list("challenge execution log event 1")
+ ce1.log.append_to_list("challenge execution log event 2")
+ ce1.CLI_responses.append_to_list("challenge execution CLI response 1")
+ ce1.log.append_to_list("challenge execution log event 3")
+ ce1.CLI_responses.append_to_list("challenge execution CLI response 2")
+ ce1.log.append_to_list("challenge execution log event 4")
+ ce1.log.append_to_list("challenge execution log event 5")
+ ce1.API_responses.append_to_list("challenge execution API response 1")
+ ce1.log.append_to_list("challenge execution log event 6")
+ print("log length: ", ce1.log.length())
+ print(ce1.log.get_timestamped_strings())
+ print("CLI_responses length: ", ce1.CLI_responses.length())
+ print(ce1.CLI_responses.get_timestamped_strings())
+ print("API_responses length: ", ce1.API_responses.length())
+ print(ce1.API_responses.get_timestamped_strings())
+ ce1.stop_time = datetime.now()
+ ce1.write_to_csv()
+
+ print()
+
+ te1 = TestExecution(1,"essai test execution",5,1,"Gerard")
+ te1.start_time = datetime.now()
+ te1.challenge_start_time = ce1.start_time # illustrate how to set test execution challenge start time
+ print("te1.challenge_start_time:",te1.challenge_start_time)
+
+ te1.log.append_to_list("test execution log event 1")
+ te1.log.append_to_list("test execution log event 2")
+ te1.CLI_responses.append_to_list("test execution CLI response 1")
+ te1.CLI_responses.append_to_list("test execution CLI response 2")
+
+ metricdef = get_indexed_item_from_file(2,FILE_METRIC_DEFINITIONS) # get a metric definition, some ID
+ print(metricdef)
+ r1 = metricdef.compute(735, 1000, 20) # compute a metric value
+ print(r1)
+ te1.associated_metric_values.append_to_list(r1) # append a measured metric value to test execution
+ r1 = metricdef.compute(915, 1000, 20) # compute a metric value
+ print(r1)
+ te1.associated_metric_values.append_to_list(r1) # append a measured metric value to test execution
+
+ te1.log.append_to_list("test execution log event 3")
+ te1.API_responses.append_to_list("test execution API response 1")
+
+ print("log length: ", te1.log.length())
+ print(te1.log.get_timestamped_strings())
+ print("CLI_responses length: ", te1.CLI_responses.length())
+ print(te1.CLI_responses.get_timestamped_strings())
+ print("API_responses length: ", te1.API_responses.length())
+ print(te1.API_responses.get_timestamped_strings())
+ print("associated_metric_values length: ", te1.associated_metric_values.length())
+ print(te1.associated_metric_values.get_timestamped_metric_values_as_strings())
+
+ te1.restoration_detection_time = datetime.now()
+ print("te1.restoration_detection_time:",te1.restoration_detection_time)
+ metricdef = get_indexed_item_from_file(1,FILE_METRIC_DEFINITIONS) # get Recovery Time metric definition: ID=1
+ print(metricdef)
+ r1 = metricdef.compute(te1.challenge_start_time, te1.restoration_detection_time) # compute a metric value, for Recovery time
+ te1.recovery_time = r1 # assignment could be direct, i.e. te1.recovery_time = metricdef.compute(...)
+
+ te1.finish_time = datetime.now() # test execution is finished
+ te1.write_to_csv()
+
+ print()
+
+ print("\nCiao")
+
+if __name__ == "__main__":
+ main()
+
+
+
+
+
+
diff --git a/lib/auto/testcase/resiliency/AutoResilRunTest.py b/lib/auto/testcase/resiliency/AutoResilRunTest.py
new file mode 100644
index 0000000..1364b4a
--- /dev/null
+++ b/lib/auto/testcase/resiliency/AutoResilRunTest.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python3
+
+# ===============LICENSE_START=======================================================
+# Apache-2.0
+# ===================================================================================
+# Copyright (C) 2018 Wipro. All rights reserved.
+# ===================================================================================
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ===============LICENSE_END=========================================================
+
+
+# OPNFV Auto project
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+# Use case 02: Resilience Improvements
+# Use Case description: https://wiki.opnfv.org/display/AUTO/Auto+Use+Cases
+# Test case design: https://wiki.opnfv.org/display/AUTO/Use+case+2+%28Resilience+Improvements+through+ONAP%29+analysis
+
+# This module: execution of tests
+# (might merge this module with Main module)
+## Receive/retrieve chosen test def info
+##pre-test (pings, etc.)
+##launch test:
+## create execution instances of Test and Challenge
+## simulate challenge
+## get time T1
+## loop:
+## wait for VNF recovery
+## optional other metrics
+## store data and logs
+## get time T2
+## stop challenge
+## reset (with ONAP MSO)
+## store data and logs
+##post-tests
+##logs
+
+
+######################################################################
+# import statements
+import AutoResilGlobal
+
+def f1():
+ return 0
+
+
+
+
diff --git a/lib/auto/testcase/resiliency/clouds.yaml b/lib/auto/testcase/resiliency/clouds.yaml
new file mode 100644
index 0000000..7bfd717
--- /dev/null
+++ b/lib/auto/testcase/resiliency/clouds.yaml
@@ -0,0 +1,99 @@
+clouds:
+
+ # Openstack instance on Arm pod, controller IP@ 172.16.10.10
+ # Horizon: https://10.10.50.103/project/
+ # Identity API according to Horizon dashboard: https://10.10.50.103:5000/v2.0
+ # other potential auth_url: http://172.16.10.10:35357/v3
+ # (OS_AUTH_URL=http://controller:35357/v3)
+ # 2 project names: admin, service (project = tenant)
+ # project ID: 122caf64b3df4818bf2ce5ba793226b2
+ # EC2 URL: https://10.10.50.103:8773/services/Cloud
+ # EC2 access key: bcf3c69a7d1c405e9757f87f26faf19f
+ # 10.10.50.0/8: floating IP@
+ # 10.10.10.0/8: fixed IP@
+ armopenstack:
+ auth:
+ auth_url: https://10.10.50.103:5000/v2.0
+ project_name: admin
+ username: admin
+ password: opnfv_secret
+ region_name: RegionOne
+
+ # Openstack instance on LaaS hpe16, from OPNFV Euphrates, controller IP@ (mgt: 172.16.10.101; public: 10.16.0.101)
+ # keystone endpoints (openstack endpoint list --service keystone)
+ # admin: http://172.16.10.101:35357/v2.0
+ # internal: http://172.16.10.101:5000/v2.0
+ # public: http://10.16.0.101:5000/v2.0 : works on LaaS hpe16, from hpe16
+ hpe16openstackEuphrates:
+ auth:
+ auth_url: http://10.16.0.101:5000/v2.0
+ project_name: admin
+ username: admin
+ password: opnfv_secret
+ region_name: RegionOne
+
+ # Openstack instance on generic LaaS hpe, from OPNFV Fraser, controller IP@ (mgt: 172.16.10.36; public: 10.16.0.107)
+ # keystone endpoints (openstack endpoint list --service keystone)
+ # admin: http://172.16.10.36:35357/v3
+ # internal: http://172.16.10.36:5000/v3
+ # public: http://10.16.0.107:5000/v3
+ # Horizon: https://10.16.0.107:8078, but need SSH port forwarding through 10.10.100.26 to be reached from outside
+ # "If you are using Identity v3 you need to specify the user and the project domain name"
+
+ # generic cloud name, for a UNH IOL hpe server, for OPNFV Fraser, OpenStack installed by Fuel/MCP
+ unh-hpe-openstack-fraser:
+ auth:
+ auth_url: http://10.16.0.107:5000/v3
+ project_name: admin
+ username: admin
+ password: opnfv_secret
+ user_domain_name: Default
+ project_domain_name: Default
+ region_name: RegionOne
+ identity_api_version: 3
+
+# ubuntu@ctl01:~$ openstack project show admin
+# +-------------+----------------------------------+
+# | Field | Value |
+# +-------------+----------------------------------+
+# | description | OpenStack Admin tenant |
+# | domain_id | default |
+# | enabled | True |
+# | id | 04fcfe7aa83f4df79ae39ca748aa8637 |
+# | is_domain | False |
+# | name | admin |
+# | parent_id | default |
+# +-------------+----------------------------------+
+
+# (openstack) domain show default
+# +-------------+----------------------------------------------------------+
+# | Field | Value |
+# +-------------+----------------------------------------------------------+
+# | description | Domain created automatically to support V2.0 operations. |
+# | enabled | True |
+# | id | default |
+# | name | Default |
+# +-------------+----------------------------------------------------------+
+
+# (openstack) domain show heat_user_domain
+# +-------------+---------------------------------------------+
+# | Field | Value |
+# +-------------+---------------------------------------------+
+# | description | Contains users and projects created by heat |
+# | enabled | True |
+# | id | d9c29adac0fe4816922d783b257879d6 |
+# | name | heat_user_domain |
+# +-------------+---------------------------------------------+
+
+
+# export OS_AUTH_URL=http://10.16.0.107:5000/v3
+# export OS_PROJECT_ID=04fcfe7aa83f4df79ae39ca748aa8637
+# export OS_PROJECT_NAME="admin"
+# export OS_USER_DOMAIN_NAME="Default"
+# export OS_USERNAME="admin"
+# export OS_PASSWORD="opnfv_secret"
+# export OS_REGION_NAME="RegionOne"
+# export OS_INTERFACE=public
+# export OS_IDENTITY_API_VERSION=3
+
+
diff --git a/lib/auto/testcase/vnf/vbng/MANIFEST.json b/lib/auto/testcase/vnf/vbng/MANIFEST.json
new file mode 100644
index 0000000..0b34111
--- /dev/null
+++ b/lib/auto/testcase/vnf/vbng/MANIFEST.json
@@ -0,0 +1,17 @@
+{
+ "name": "",
+ "description": "",
+ "data": [
+ {
+ "file": "base_vcpe_vbng.yaml",
+ "type": "HEAT",
+ "isBase": "true",
+ "data": [
+ {
+ "file": "base_vcpe_vbng.env",
+ "type": "HEAT_ENV"
+ }
+ ]
+ }
+ ]
+}
diff --git a/lib/auto/testcase/vnf/vbng/base_vcpe_vbng.env b/lib/auto/testcase/vnf/vbng/base_vcpe_vbng.env
new file mode 100644
index 0000000..be4f972
--- /dev/null
+++ b/lib/auto/testcase/vnf/vbng/base_vcpe_vbng.env
@@ -0,0 +1,35 @@
+ parameters:
+ vcpe_image_name: PUT THE IMAGE NAME HERE (Ubuntu 1604 SUGGESTED)
+ vcpe_flavor_name: PUT THE FLAVOR NAME HERE (MEDIUM FLAVOR SUGGESTED)
+ public_net_id: PUT THE PUBLIC NETWORK ID HERE
+ brgemu_bng_private_net_id: zdfw1bngin01_private
+ brgemu_bng_private_subnet_id: zdfw1bngin01_sub_private
+ bng_gmux_private_net_id: zdfw1bngmux01_private
+ bng_gmux_private_subnet_id: zdfw1bngmux01_sub_private
+ onap_private_net_id: PUT THE ONAP PRIVATE NETWORK NAME HERE
+ onap_private_subnet_id: PUT THE ONAP PRIVATE SUBNETWORK NAME HERE
+ onap_private_net_cidr: 10.0.0.0/16
+ cpe_signal_net_id: zdfw1cpe01_private
+ cpe_signal_subnet_id: zdfw1cpe01_sub_private
+ brgemu_bng_private_net_cidr: 10.3.0.0/24
+ bng_gmux_private_net_cidr: 10.1.0.0/24
+ cpe_signal_private_net_cidr: 10.4.0.0/24
+ vbng_private_ip_0: 10.3.0.1
+ vbng_private_ip_1: 10.0.101.10
+ vbng_private_ip_2: 10.4.0.3
+ vbng_private_ip_3: 10.1.0.10
+ vbng_name_0: zdcpe1cpe01bng01
+ vnf_id: vCPE_Infrastructure_Metro_vBNG_demo_app
+ vf_module_id: vCPE_Intrastructure_Metro_vBNG
+ dcae_collector_ip: 10.0.4.102
+ dcae_collector_port: 8080
+ repo_url_blob: https://nexus.onap.org/content/sites/raw
+ repo_url_artifacts: https://nexus.onap.org/content/groups/staging
+ demo_artifacts_version: 1.1.0
+ install_script_version: 1.1.0-SNAPSHOT
+ key_name: vbng_key
+ pub_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDQXYJYYi3/OUZXUiCYWdtc7K0m5C0dJKVxPG0eI8EWZrEHYdfYe6WoTSDJCww+1qlBSpA5ac/Ba4Wn9vh+lR1vtUKkyIC/nrYb90ReUd385Glkgzrfh5HdR5y5S2cL/Frh86lAn9r6b3iWTJD8wBwXFyoe1S2nMTOIuG4RPNvfmyCTYVh8XTCCE8HPvh3xv2r4egawG1P4Q4UDwk+hDBXThY2KS8M5/8EMyxHV0ImpLbpYCTBA6KYDIRtqmgS6iKyy8v2D1aSY5mc9J0T5t9S2Gv+VZQNWQDDKNFnxqYaAo1uEoq/i1q63XC5AD3ckXb2VT6dp23BQMdDfbHyUWfJN
+ cloud_env: PUT THE CLOUD PROVIDED HERE (openstack or rackspace)
+ vpp_source_repo_url: https://gerrit.fd.io/r/vpp
+ vpp_source_repo_branch: stable/1704
+ vpp_patch_url: https://git.onap.org/demo/plain/vnfs/vCPE/vpp-radius-client-for-vbng/src/patches/Vpp-Integrate-FreeRADIUS-Client-for-vBNG.patch
diff --git a/lib/auto/testcase/vnf/vbng/base_vcpe_vbng.yaml b/lib/auto/testcase/vnf/vbng/base_vcpe_vbng.yaml
new file mode 100644
index 0000000..3dd7ca0
--- /dev/null
+++ b/lib/auto/testcase/vnf/vbng/base_vcpe_vbng.yaml
@@ -0,0 +1,288 @@
+##########################################################################
+#
+#==================LICENSE_START==========================================
+#
+#
+# Copyright 2017 AT&T Intellectual Property. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#==================LICENSE_END============================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+#
+##########################################################################
+
+heat_template_version: 2013-05-23
+
+description: Heat template to deploy vCPE virtual Broadband Network Gateway (vBNG)
+
+##############
+# #
+# PARAMETERS #
+# #
+##############
+
+parameters:
+ vcpe_image_name:
+ type: string
+ label: Image name or ID
+ description: Image to be used for compute instance
+ vcpe_flavor_name:
+ type: string
+ label: Flavor
+ description: Type of instance (flavor) to be used
+ public_net_id:
+ type: string
+ label: Public network name or ID
+ description: Public network that enables remote connection to VNF
+ brgemu_bng_private_net_id:
+ type: string
+ label: vBNG IN private network name or ID
+ description: Private network that connects vBRG to vBNG
+ brgemu_bng_private_subnet_id:
+ type: string
+ label: vBNG IN private sub-network name or ID
+ description: vBNG IN private sub-network name or ID
+ brgemu_bng_private_net_cidr:
+ type: string
+ label: vBNG IN private network CIDR
+ description: The CIDR of the input side of vBNG private network
+ bng_gmux_private_net_id:
+ type: string
+ label: vBNG vGMUX private network name or ID
+ description: Private network that connects vBNG to vGMUX
+ bng_gmux_private_subnet_id:
+ type: string
+ label: vBNG vGMUX private sub-network name or ID
+ description: vBNG vGMUX private sub-network name or ID
+ bng_gmux_private_net_cidr:
+ type: string
+ label: vGMUX private network CIDR
+ description: The CIDR of the input side of vGMUX private network
+ onap_private_net_id:
+ type: string
+ label: ONAP management network name or ID
+ description: Private network that connects ONAP components and the VNF
+ onap_private_subnet_id:
+ type: string
+ label: ONAP management sub-network name or ID
+ description: Private sub-network that connects ONAP components and the VNF
+ onap_private_net_cidr:
+ type: string
+ label: ONAP private network CIDR
+ description: The CIDR of the protected private network
+ cpe_signal_net_id:
+ type: string
+ label: vCPE private network name or ID
+ description: Private network that connects vCPE elements with vCPE infrastructure elements
+ cpe_signal_subnet_id:
+ type: string
+ label: vCPE private sub-network name or ID
+ description: vCPE private sub-network name or ID
+ cpe_signal_private_net_cidr:
+ type: string
+ label: vAAA private network CIDR
+ description: The CIDR of the vAAA private network
+ vbng_private_ip_0:
+ type: string
+ label: vBNG IN private IP address
+ description: Private IP address that is assigned to the vBNG IN
+ vbng_private_ip_1:
+ type: string
+ label: vBNG private IP address towards the ONAP management network
+ description: Private IP address that is assigned to the vBNG to communicate with ONAP components
+ vbng_private_ip_2:
+ type: string
+ label: vBNG to CPE_SIGNAL private IP address
+ description: Private IP address that is assigned to the vBNG in the CPE_SIGNAL network
+ vbng_private_ip_3:
+ type: string
+ label: vBNG to vGMUX private IP address
+ description: Private IP address that is assigned to the vBNG to vGMUX port
+ vbng_name_0:
+ type: string
+ label: vBNG name
+ description: Name of the vBNG
+ vnf_id:
+ type: string
+ label: VNF ID
+ description: The VNF ID is provided by ONAP
+ vf_module_id:
+ type: string
+ label: vCPE module ID
+ description: The vCPE Module ID is provided by ONAP
+ dcae_collector_ip:
+ type: string
+ label: DCAE collector IP address
+ description: IP address of the DCAE collector
+ dcae_collector_port:
+ type: string
+ label: DCAE collector port
+ description: Port of the DCAE collector
+ key_name:
+ type: string
+ label: Key pair name
+ description: Public/Private key pair name
+ pub_key:
+ type: string
+ label: Public key
+ description: Public key to be installed on the compute instance
+ repo_url_blob:
+ type: string
+ label: Repository URL
+ description: URL of the repository that hosts the demo packages
+ repo_url_artifacts:
+ type: string
+ label: Repository URL
+ description: URL of the repository that hosts the demo packages
+ install_script_version:
+ type: string
+ label: Installation script version number
+ description: Version number of the scripts that install the vFW demo app
+ demo_artifacts_version:
+ type: string
+ label: Artifacts version used in demo vnfs
+ description: Artifacts (jar, tar.gz) version used in demo vnfs
+ cloud_env:
+ type: string
+ label: Cloud environment
+ description: Cloud environment (e.g., openstack, rackspace)
+ vpp_source_repo_url:
+ type: string
+ label: VPP Source Git Repo
+ description: URL for VPP source codes
+ vpp_source_repo_branch:
+ type: string
+ label: VPP Source Git Branch
+ description: Git Branch for the VPP source codes
+ vpp_patch_url:
+ type: string
+ label: VPP Patch URL
+ description: URL for VPP patch for vBNG
+
+#############
+# #
+# RESOURCES #
+# #
+#############
+
+resources:
+
+ random-str:
+ type: OS::Heat::RandomString
+ properties:
+ length: 4
+
+ my_keypair:
+ type: OS::Nova::KeyPair
+ properties:
+ name:
+ str_replace:
+ template: base_rand
+ params:
+ base: { get_param: key_name }
+ rand: { get_resource: random-str }
+ public_key: { get_param: pub_key }
+ save_private_key: false
+
+
+ # Virtual BNG Instantiation
+ vbng_private_0_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: brgemu_bng_private_net_id }
+ fixed_ips: [{"subnet": { get_param: brgemu_bng_private_subnet_id }, "ip_address": { get_param: vbng_private_ip_0 }}]
+
+ vbng_private_1_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: onap_private_net_id }
+ fixed_ips: [{"subnet": { get_param: onap_private_subnet_id }, "ip_address": { get_param: vbng_private_ip_1 }}]
+
+ vbng_private_2_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: cpe_signal_net_id }
+ fixed_ips: [{"subnet": { get_param: cpe_signal_subnet_id }, "ip_address": { get_param: vbng_private_ip_2 }}]
+
+ vbng_private_3_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: bng_gmux_private_net_id }
+ fixed_ips: [{"subnet": { get_param: bng_gmux_private_subnet_id }, "ip_address": { get_param: vbng_private_ip_3 }}]
+
+ vbng_0:
+ type: OS::Nova::Server
+ properties:
+ image: { get_param: vcpe_image_name }
+ flavor: { get_param: vcpe_flavor_name }
+ name: { get_param: vbng_name_0 }
+ key_name: { get_resource: my_keypair }
+ networks:
+ - network: { get_param: public_net_id }
+ - port: { get_resource: vbng_private_0_port }
+ - port: { get_resource: vbng_private_1_port }
+ - port: { get_resource: vbng_private_2_port }
+ - port: { get_resource: vbng_private_3_port }
+ metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }}
+ user_data_format: RAW
+ user_data:
+ str_replace:
+ params:
+ __oam_ipaddr__: { get_param: vbng_private_ip_1 }
+ __brgemu_bng_net_ipaddr__: { get_param: vbng_private_ip_0 }
+ __cpe_signal_net_ipaddr__: { get_param: vbng_private_ip_2 }
+ __bng_gmux_net_ipaddr__: { get_param: vbng_private_ip_3 }
+ __oam_cidr__: { get_param: onap_private_net_cidr }
+ __brgemu_bng_cidr__: { get_param: brgemu_bng_private_net_cidr }
+ __cpe_signal_cidr__: { get_param: cpe_signal_private_net_cidr }
+ __bng_gmux_cidr__: { get_param: bng_gmux_private_net_cidr }
+ __dcae_collector_ip__: { get_param: dcae_collector_ip }
+ __dcae_collector_port__: { get_param: dcae_collector_port }
+ __repo_url_blob__ : { get_param: repo_url_blob }
+ __repo_url_artifacts__ : { get_param: repo_url_artifacts }
+ __demo_artifacts_version__ : { get_param: demo_artifacts_version }
+ __install_script_version__ : { get_param: install_script_version }
+ __cloud_env__ : { get_param: cloud_env }
+ __vpp_source_repo_url__ : { get_param: vpp_source_repo_url }
+ __vpp_source_repo_branch__ : { get_param: vpp_source_repo_branch }
+ __vpp_patch_url__ : { get_param: vpp_patch_url }
+ template: |
+ #!/bin/bash
+
+ # Create configuration files
+ mkdir /opt/config
+ echo "__brgemu_bng_net_ipaddr__" > /opt/config/brgemu_bng_net_ipaddr.txt
+ echo "__cpe_signal_net_ipaddr__" > /opt/config/cpe_signal_net_ipaddr.txt
+ echo "__bng_gmux_net_ipaddr__" > /opt/config/bng_gmux_net_ipaddr.txt
+ echo "__oam_ipaddr__" > /opt/config/oam_ipaddr.txt
+ echo "__oam_cidr__" > /opt/config/oam_cidr.txt
+ echo "__bng_gmux_cidr__" > /opt/config/bng_gmux_net_cidr.txt
+ echo "__cpe_signal_cidr__" > /opt/config/cpe_signal_net_cidr.txt
+ echo "__brgemu_bng_cidr__" > /opt/config/brgemu_bng_net_cidr.txt
+ echo "__dcae_collector_ip__" > /opt/config/dcae_collector_ip.txt
+ echo "__dcae_collector_port__" > /opt/config/dcae_collector_port.txt
+ echo "__repo_url_blob__" > /opt/config/repo_url_blob.txt
+ echo "__repo_url_artifacts__" > /opt/config/repo_url_artifacts.txt
+ echo "__demo_artifacts_version__" > /opt/config/demo_artifacts_version.txt
+ echo "__install_script_version__" > /opt/config/install_script_version.txt
+ echo "__cloud_env__" > /opt/config/cloud_env.txt
+ echo "__vpp_source_repo_url__" > /opt/config/vpp_source_repo_url.txt
+ echo "__vpp_source_repo_branch__" > /opt/config/vpp_source_repo_branch.txt
+ echo "__vpp_patch_url__" > /opt/config/vpp_patch_url.txt
+
+ # Download and run install script
+ curl -k __repo_url_blob__/org.onap.demo/vnfs/vcpe/__install_script_version__/v_bng_install.sh -o /opt/v_bng_install.sh
+ cd /opt
+ chmod +x v_bng_install.sh
+ ./v_bng_install.sh
diff --git a/lib/auto/testcase/vnf/vbrgemu/MANIFEST.json b/lib/auto/testcase/vnf/vbrgemu/MANIFEST.json
new file mode 100644
index 0000000..3911256
--- /dev/null
+++ b/lib/auto/testcase/vnf/vbrgemu/MANIFEST.json
@@ -0,0 +1,17 @@
+{
+ "name": "",
+ "description": "",
+ "data": [
+ {
+ "file": "base_vcpe_vbrgemu.yaml",
+ "type": "HEAT",
+ "isBase": "true",
+ "data": [
+ {
+ "file": "base_vcpe_vbrgemu.env",
+ "type": "HEAT_ENV"
+ }
+ ]
+ }
+ ]
+}
diff --git a/lib/auto/testcase/vnf/vbrgemu/base_vcpe_vbrgemu.env b/lib/auto/testcase/vnf/vbrgemu/base_vcpe_vbrgemu.env
new file mode 100644
index 0000000..7719f55
--- /dev/null
+++ b/lib/auto/testcase/vnf/vbrgemu/base_vcpe_vbrgemu.env
@@ -0,0 +1,28 @@
+ parameters:
+ vcpe_image_name: PUT THE IMAGE NAME HERE (Ubuntu 1604 or vbrg-base-ubuntu-16-04 SUGGESTED)
+ vcpe_flavor_name: PUT THE FLAVOR NAME HERE (MEDIUM FLAVOR SUGGESTED)
+ compile_state: PUT THE COMPILE STATE (done, auto or build)
+ public_net_id: PUT THE PUBLIC NETWORK ID HERE
+ vbrgemu_bng_private_net_id: zdfw1bngin01_private
+ vbrgemu_bng_private_subnet_id: zdfw1bngin01_sub_private
+ vbrgemu_bng_private_net_cidr: 10.3.0.0/24
+ #vbrgemu_private_net_id: zdfw1vbrgemu01_private
+ #vbrgemu_private_net_cidr: 192.168.1.0/24
+ vbrgemu_private_ip_0: 10.3.0.4
+ #vbrgemu_private_ip_1: 192.168.1.1
+ sdnc_ip: 10.0.7.1
+ vbrgemu_name_0: zdcpe1cpe01brgemu01
+ vnf_id: vCPE_Infrastructure_BGREMU_demo_app
+ vf_module_id: vCPE_Customer_BRGEMU
+ repo_url_blob: https://nexus.onap.org/content/sites/raw
+ repo_url_artifacts: https://nexus.onap.org/content/groups/staging
+ demo_artifacts_version: 1.1.0
+ install_script_version: 1.1.0-SNAPSHOT
+ key_name: vbrgemu_key
+ pub_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKXDgoo3+WOqcUG8/5uUbk81+yczgwC4Y8ywTmuQqbNxlY1oQ0YxdMUqUnhitSXs5S/yRuAVOYHwGg2mCs20oAINrP+mxBI544AMIb9itPjCtgqtE2EWo6MmnFGbHB4Sx3XioE7F4VPsh7japsIwzOjbrQe+Mua1TGQ5d4nfEOQaaglXLLPFfuc7WbhbJbK6Q7rHqZfRcOwAMXgDoBqlyqKeiKwnumddo2RyNT8ljYmvB6buz7KnMinzo7qB0uktVT05FH9Rg0CTWH5norlG5qXgP2aukL0gk1ph8iAt7uYLf1ktp+LJI2gaF6L0/qli9EmVCSLr1uJ38Q8CBflhkh
+ cloud_env: PUT THE CLOUD PROVIDED HERE (openstack or rackspace)
+ vpp_source_repo_url: https://gerrit.fd.io/r/vpp
+ vpp_source_repo_branch: stable/1704
+ hc2vpp_source_repo_url: https://gerrit.fd.io/r/hc2vpp
+ hc2vpp_source_repo_branch: stable/1704
+ vpp_patch_url: https://git.onap.org/demo/plain/vnfs/vCPE/vpp-option-82-for-vbrg/src/patches/VPP-Add-Option82-Nat-Filter-For-vBRG.patch
diff --git a/lib/auto/testcase/vnf/vbrgemu/base_vcpe_vbrgemu.yaml b/lib/auto/testcase/vnf/vbrgemu/base_vcpe_vbrgemu.yaml
new file mode 100644
index 0000000..a786995
--- /dev/null
+++ b/lib/auto/testcase/vnf/vbrgemu/base_vcpe_vbrgemu.yaml
@@ -0,0 +1,253 @@
+##########################################################################
+#
+#==================LICENSE_START==========================================
+#
+#
+# Copyright 2017 AT&T Intellectual Property. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#==================LICENSE_END============================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+#
+##########################################################################
+
+heat_template_version: 2013-05-23
+
+description: Heat template to deploy vCPE vBRG Emulator (vBRGEMU)
+
+#######################################################################
+# #
+# PARAMETERS #
+# #
+# 0_port should get IP address from DHCP discover through vBNG #
+# DCAE is not monitoring the BRGEMULATOR #
+#######################################################################
+
+parameters:
+ vcpe_image_name:
+ type: string
+ label: Image name or ID
+ description: Image to be used for compute instance
+ vcpe_flavor_name:
+ type: string
+ label: Flavor
+ description: Type of instance (flavor) to be used
+ public_net_id:
+ type: string
+ label: Public network name or ID
+ description: Public network that enables remote connection to VNF
+ vbrgemu_bng_private_net_id:
+ type: string
+ label: vBNG private network name or ID
+ description: Private network that connects vBRGEMU to vBNG
+ vbrgemu_bng_private_subnet_id:
+ type: string
+ label: vBNG private sub-network name or ID
+ description: vBNG private sub-network name or ID
+ vbrgemu_bng_private_net_cidr:
+ type: string
+ label: vBNG IN private network CIDR
+ description: The CIDR of the input side of vBNG private network
+ # vbrgemu_private_net_id:
+ # type: string
+ # label: vBRGEMU Home private network name or ID
+ # description: Private network that connects vBRGEMU to local devices
+ #vbrgemu_private_net_cidr:
+ # type: string
+ # label: vBRGEMU Home private network CIDR
+ # description: The CIDR of the input side of vBRGEMU Home private network
+ vbrgemu_private_ip_0:
+ type: string
+ label: vGW private IP address
+ description: Private IP address towards the BRGEMU-BNG network
+ #vbrgemu_private_ip_1:
+ # type: string
+ # label: vGW private IP address
+ # description: Private IP address towards the BRGEMU private network
+ vbrgemu_name_0:
+ type: string
+ label: vGW name
+ description: Name of the vGW
+ vnf_id:
+ type: string
+ label: VNF ID
+ description: The VNF ID is provided by ONAP
+ vf_module_id:
+ type: string
+ label: vCPE module ID
+ description: The vCPE Module ID is provided by ONAP
+ key_name:
+ type: string
+ label: Key pair name
+ description: Public/Private key pair name
+ pub_key:
+ type: string
+ label: Public key
+ description: Public key to be installed on the compute instance
+ repo_url_blob:
+ type: string
+ label: Repository URL
+ description: URL of the repository that hosts the demo packages
+ repo_url_artifacts:
+ type: string
+ label: Repository URL
+ description: URL of the repository that hosts the demo packages
+ install_script_version:
+ type: string
+ label: Installation script version number
+ description: Version number of the scripts that install the vFW demo app
+ demo_artifacts_version:
+ type: string
+ label: Artifacts version used in demo vnfs
+ description: Artifacts (jar, tar.gz) version used in demo vnfs
+ cloud_env:
+ type: string
+ label: Cloud environment
+ description: Cloud environment (e.g., openstack, rackspace)
+ vpp_source_repo_url:
+ type: string
+ label: VPP Source Git Repo
+ description: URL for VPP source codes
+ vpp_source_repo_branch:
+ type: string
+ label: VPP Source Git Branch
+ description: Git Branch for the VPP source codes
+ hc2vpp_source_repo_url:
+ type: string
+ label: Honeycomb Source Git Repo
+ description: URL for Honeycomb source codes
+ hc2vpp_source_repo_branch:
+ type: string
+ label: Honeycomb Source Git Branch
+ description: Git Branch for the Honeycomb source codes
+ vpp_patch_url:
+ type: string
+ label: VPP Patch URL
+ description: URL for VPP patch for vBRG Emulator
+ sdnc_ip:
+ type: string
+ label: SDNC ip address
+ description: SDNC ip address used to set NAT
+ compile_state:
+ type: string
+ label: Compile State
+ description: State to compile code or not
+#############
+# #
+# RESOURCES #
+# #
+#############
+
+resources:
+
+ random-str:
+ type: OS::Heat::RandomString
+ properties:
+ length: 4
+
+ my_keypair:
+ type: OS::Nova::KeyPair
+ properties:
+ name:
+ str_replace:
+ template: base_rand
+ params:
+ base: { get_param: key_name }
+ rand: { get_resource: random-str }
+ public_key: { get_param: pub_key }
+ save_private_key: false
+
+ #vbrgemu_private_network:
+ # type: OS::Neutron::Net
+ # properties:
+ # name: { get_param: vbrgemu_private_net_id }
+
+ #vbrgemu_private_subnet:
+ # type: OS::Neutron::Subnet
+ # properties:
+ # name: { get_param: vbrgemu_private_net_id }
+ # network_id: { get_resource: vbrgemu_private_network }
+ # cidr: { get_param: vbrgemu_private_net_cidr }
+
+ # Virtual BRG Emulator Instantiation
+ # 0_port should get IP address from DHCP discover through vBNG once the VNF is running
+ vbrgemu_private_0_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: vbrgemu_bng_private_net_id }
+ fixed_ips: [{"subnet": { get_param: vbrgemu_bng_private_subnet_id }, "ip_address": { get_param: vbrgemu_private_ip_0 }}]
+
+ #vbrgemu_private_1_port:
+ # type: OS::Neutron::Port
+ # properties:
+ # network: { get_resource: vbrgemu_private_network }
+ # fixed_ips: [{"subnet": { get_resource: vbrgemu_private_subnet }, "ip_address": { get_param: vbrgemu_private_ip_1 }}]
+
+ vbrgemu_0:
+ type: OS::Nova::Server
+ properties:
+ image: { get_param: vcpe_image_name }
+ flavor: { get_param: vcpe_flavor_name }
+ name: { get_param: vbrgemu_name_0 }
+ key_name: { get_resource: my_keypair }
+ networks:
+ - network: { get_param: public_net_id }
+ - port: { get_resource: vbrgemu_private_0_port }
+ #- port: { get_resource: vbrgemu_private_1_port }
+ metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }}
+ user_data_format: RAW
+ user_data:
+ str_replace:
+ params:
+ # __brgemu_net_ipaddr__: { get_param: vbrgemu_private_ip_1 }
+ # __brgemu_cidr__: { get_param: vbrgemu_private_net_cidr }
+ __brgemu_bng_private_net_cidr__: { get_param: vbrgemu_bng_private_net_cidr }
+ __repo_url_blob__ : { get_param: repo_url_blob }
+ __repo_url_artifacts__ : { get_param: repo_url_artifacts }
+ __demo_artifacts_version__ : { get_param: demo_artifacts_version }
+ __install_script_version__ : { get_param: install_script_version }
+ __cloud_env__ : { get_param: cloud_env }
+ __vpp_source_repo_url__ : { get_param: vpp_source_repo_url }
+ __vpp_source_repo_branch__ : { get_param: vpp_source_repo_branch }
+ __hc2vpp_source_repo_url__ : { get_param: hc2vpp_source_repo_url }
+ __hc2vpp_source_repo_branch__ : { get_param: hc2vpp_source_repo_branch }
+ __vpp_patch_url__ : { get_param: vpp_patch_url }
+ __sdnc_ip__ : { get_param: sdnc_ip }
+ __compile_state__ : { get_param: compile_state }
+ template: |
+ #!/bin/bash
+
+ # Create configuration files
+ mkdir /opt/config
+ #echo "__brgemu_net_ipaddr__" > /opt/config/brgemu_net_ipaddr.txt
+ #echo "__brgemu_cidr__" > /opt/config/brgemu_net_cidr.txt
+ echo "__brgemu_bng_private_net_cidr__" > /opt/config/brgemu_bng_private_net_cidr.txt
+ echo "__repo_url_blob__" > /opt/config/repo_url_blob.txt
+ echo "__repo_url_artifacts__" > /opt/config/repo_url_artifacts.txt
+ echo "__demo_artifacts_version__" > /opt/config/demo_artifacts_version.txt
+ echo "__install_script_version__" > /opt/config/install_script_version.txt
+ echo "__cloud_env__" > /opt/config/cloud_env.txt
+ echo "__vpp_source_repo_url__" > /opt/config/vpp_source_repo_url.txt
+ echo "__vpp_source_repo_branch__" > /opt/config/vpp_source_repo_branch.txt
+ echo "__hc2vpp_source_repo_url__" > /opt/config/hc2vpp_source_repo_url.txt
+ echo "__hc2vpp_source_repo_branch__" > /opt/config/hc2vpp_source_repo_branch.txt
+ echo "__vpp_patch_url__" > /opt/config/vpp_patch_url.txt
+ echo "__sdnc_ip__" > /opt/config/sdnc_ip.txt
+ echo "__compile_state__" > /opt/config/compile_state.txt
+
+ # Download and run install script
+ curl -k __repo_url_blob__/org.onap.demo/vnfs/vcpe/__install_script_version__/v_brgemu_install.sh -o /opt/v_brgemu_install.sh
+ cd /opt
+ chmod +x v_brgemu_install.sh
+ ./v_brgemu_install.sh
diff --git a/lib/auto/testcase/vnf/vgmux/MANIFEST.json b/lib/auto/testcase/vnf/vgmux/MANIFEST.json
new file mode 100644
index 0000000..1f62167
--- /dev/null
+++ b/lib/auto/testcase/vnf/vgmux/MANIFEST.json
@@ -0,0 +1,17 @@
+{
+ "name": "",
+ "description": "",
+ "data": [
+ {
+ "file": "base_vcpe_vgmux.yaml",
+ "type": "HEAT",
+ "isBase": "true",
+ "data": [
+ {
+ "file": "base_vcpe_vgmux.env",
+ "type": "HEAT_ENV"
+ }
+ ]
+ }
+ ]
+}
diff --git a/lib/auto/testcase/vnf/vgmux/base_vcpe_vgmux.env b/lib/auto/testcase/vnf/vgmux/base_vcpe_vgmux.env
new file mode 100644
index 0000000..e81afa7
--- /dev/null
+++ b/lib/auto/testcase/vnf/vgmux/base_vcpe_vgmux.env
@@ -0,0 +1,35 @@
+ parameters:
+ vcpe_image_name: PUT THE IMAGE NAME HERE (Ubuntu 1604 SUGGESTED)
+ vcpe_flavor_name: PUT THE FLAVOR NAME HERE (MEDIUM FLAVOR SUGGESTED)
+ public_net_id: PUT THE PUBLIC NETWORK ID HERE
+ bng_gmux_private_net_id: zdfw1bngmux01_private
+ bng_gmux_private_subnet_id: zdfw1bngmux01_sub_private
+ mux_gw_private_net_id: zdfw1muxgw01_private
+ mux_gw_private_subnet_id: zdfw1muxgw01_sub_private
+ onap_private_net_id: PUT THE ONAP PRIVATE NETWORK NAME HERE
+ onap_private_subnet_id: PUT THE ONAP PRIVATE SUBNETWORK NAME HERE
+ onap_private_net_cidr: 10.0.0.0/16
+ bng_gmux_private_net_cidr: 10.1.0.0/24
+ mux_gw_private_net_cidr: 10.5.0.0/24
+ vgmux_private_ip_0: 10.1.0.20
+ vgmux_private_ip_1: 10.0.101.20
+ vgmux_private_ip_2: 10.5.0.20
+ vgmux_name_0: zdcpe1cpe01mux01
+ vnf_id: vCPE_Infrastructure_vGMUX_demo_app
+ vf_module_id: vCPE_Intrastructure_Metro_vGMUX
+ dcae_collector_ip: 10.0.4.102
+ dcae_collector_port: 8080
+ repo_url_blob: https://nexus.onap.org/content/sites/raw
+ repo_url_artifacts: https://nexus.onap.org/content/groups/staging
+ demo_artifacts_version: 1.1.0
+ install_script_version: 1.1.0-SNAPSHOT
+ key_name: vgmux_key
+ pub_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDQXYJYYi3/OUZXUiCYWdtc7K0m5C0dJKVxPG0eI8EWZrEHYdfYe6WoTSDJCww+1qlBSpA5ac/Ba4Wn9vh+lR1vtUKkyIC/nrYb90ReUd385Glkgzrfh5HdR5y5S2cL/Frh86lAn9r6b3iWTJD8wBwXFyoe1S2nMTOIuG4RPNvfmyCTYVh8XTCCE8HPvh3xv2r4egawG1P4Q4UDwk+hDBXThY2KS8M5/8EMyxHV0ImpLbpYCTBA6KYDIRtqmgS6iKyy8v2D1aSY5mc9J0T5t9S2Gv+VZQNWQDDKNFnxqYaAo1uEoq/i1q63XC5AD3ckXb2VT6dp23BQMdDfbHyUWfJN
+ cloud_env: PUT THE CLOUD PROVIDED HERE (openstack or rackspace)
+ vpp_source_repo_url: https://gerrit.fd.io/r/vpp
+ vpp_source_repo_branch: stable/1704
+ hc2vpp_source_repo_url: https://gerrit.fd.io/r/hc2vpp
+ hc2vpp_source_repo_branch: stable/1704
+ vpp_patch_url: https://git.onap.org/demo/plain/vnfs/vCPE/vpp-ves-agent-for-vgmux/src/patches/Vpp-Add-VES-agent-for-vG-MUX.patch
+ hc2vpp_patch_url: https://git.onap.org/demo/plain/vnfs/vCPE/vpp-ves-agent-for-vgmux/src/patches/Hc2vpp-Add-VES-agent-for-vG-MUX.patch
+ libevel_patch_url: https://git.onap.org/demo/plain/vnfs/vCPE/vpp-ves-agent-for-vgmux/src/patches/vCPE-vG-MUX-libevel-fixup.patch
diff --git a/lib/auto/testcase/vnf/vgmux/base_vcpe_vgmux.yaml b/lib/auto/testcase/vnf/vgmux/base_vcpe_vgmux.yaml
new file mode 100644
index 0000000..ecdb1b1
--- /dev/null
+++ b/lib/auto/testcase/vnf/vgmux/base_vcpe_vgmux.yaml
@@ -0,0 +1,281 @@
+##########################################################################
+#
+#==================LICENSE_START==========================================
+#
+#
+# Copyright 2017 AT&T Intellectual Property. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#==================LICENSE_END============================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+#
+##########################################################################
+
+heat_template_version: 2013-05-23
+
+description: Heat template to deploy vCPE Infrastructue Metro vGMUX
+
+##############
+# #
+# PARAMETERS #
+# #
+##############
+
+parameters:
+ vcpe_image_name:
+ type: string
+ label: Image name or ID
+ description: Image to be used for compute instance
+ vcpe_flavor_name:
+ type: string
+ label: Flavor
+ description: Type of instance (flavor) to be used
+ public_net_id:
+ type: string
+ label: Public network name or ID
+ description: Public network that enables remote connection to VNF
+ bng_gmux_private_net_id:
+ type: string
+ label: vBNG vGMUX private network name or ID
+ description: Private network that connects vBNG to vGMUX
+ bng_gmux_private_subnet_id:
+ type: string
+ label: vBNG vGMUX private sub-network name or ID
+ description: vBNG vGMUX private sub-network name or ID
+ bng_gmux_private_net_cidr:
+ type: string
+ label: vBNG vGMUX private network CIDR
+ description: The CIDR of the vBNG-vGMUX private network
+ mux_gw_private_net_id:
+ type: string
+ label: vGMUX vGWs network name or ID
+ description: Private network that connects vGMUX to vGWs
+ mux_gw_private_subnet_id:
+ type: string
+ label: vGMUX vGWs sub-network name or ID
+ description: vGMUX vGWs sub-network name or ID
+ mux_gw_private_net_cidr:
+ type: string
+ label: vGMUX private network CIDR
+ description: The CIDR of the vGMUX private network
+ onap_private_net_id:
+ type: string
+ label: ONAP management network name or ID
+ description: Private network that connects ONAP components and the VNF
+ onap_private_subnet_id:
+ type: string
+ label: ONAP management sub-network name or ID
+ description: Private sub-network that connects ONAP components and the VNF
+ onap_private_net_cidr:
+ type: string
+ label: ONAP private network CIDR
+ description: The CIDR of the protected private network
+ vgmux_private_ip_0:
+ type: string
+ label: vGMUX private IP address towards the vBNG-vGMUX private network
+ description: Private IP address that is assigned to the vGMUX to communicate with the vBNG
+ vgmux_private_ip_1:
+ type: string
+ label: vGMUX private IP address towards the ONAP management network
+ description: Private IP address that is assigned to the vGMUX to communicate with ONAP components
+ vgmux_private_ip_2:
+ type: string
+ label: vGMUX private IP address towards the vGMUX-vGW private network
+ description: Private IP address that is assigned to the vGMUX to communicate with vGWs
+ vgmux_name_0:
+ type: string
+ label: vGMUX name
+ description: Name of the vGMUX
+ vnf_id:
+ type: string
+ label: VNF ID
+ description: The VNF ID is provided by ONAP
+ vf_module_id:
+ type: string
+ label: vCPE module ID
+ description: The vCPE Module ID is provided by ONAP
+ dcae_collector_ip:
+ type: string
+ label: DCAE collector IP address
+ description: IP address of the DCAE collector
+ dcae_collector_port:
+ type: string
+ label: DCAE collector port
+ description: Port of the DCAE collector
+ key_name:
+ type: string
+ label: Key pair name
+ description: Public/Private key pair name
+ pub_key:
+ type: string
+ label: Public key
+ description: Public key to be installed on the compute instance
+ repo_url_blob:
+ type: string
+ label: Repository URL
+ description: URL of the repository that hosts the demo packages
+ repo_url_artifacts:
+ type: string
+ label: Repository URL
+ description: URL of the repository that hosts the demo packages
+ install_script_version:
+ type: string
+ label: Installation script version number
+ description: Version number of the scripts that install the vFW demo app
+ demo_artifacts_version:
+ type: string
+ label: Artifacts version used in demo vnfs
+ description: Artifacts (jar, tar.gz) version used in demo vnfs
+ cloud_env:
+ type: string
+ label: Cloud environment
+ description: Cloud environment (e.g., openstack, rackspace)
+ vpp_source_repo_url:
+ type: string
+ label: VPP Source Git Repo
+ description: URL for VPP source codes
+ vpp_source_repo_branch:
+ type: string
+ label: VPP Source Git Branch
+ description: Git Branch for the VPP source codes
+ hc2vpp_source_repo_url:
+ type: string
+ label: Honeycomb Source Git Repo
+ description: URL for Honeycomb source codes
+ hc2vpp_source_repo_branch:
+ type: string
+ label: Honeycomb Source Git Branch
+ description: Git Branch for the Honeycomb source codes
+ vpp_patch_url:
+ type: string
+ label: VPP Patch URL
+ description: URL for VPP patch for vG-MUX
+ hc2vpp_patch_url:
+ type: string
+ label: Honeycomb Patch URL
+ description: URL for Honeycomb patch for vG-MUX
+ libevel_patch_url:
+ type: string
+ label: libevel Patch URL
+ description: URL for libevel patch for vG-MUX
+
+#############
+# #
+# RESOURCES #
+# #
+#############
+
+resources:
+
+ random-str:
+ type: OS::Heat::RandomString
+ properties:
+ length: 4
+
+ my_keypair:
+ type: OS::Nova::KeyPair
+ properties:
+ name:
+ str_replace:
+ template: base_rand
+ params:
+ base: { get_param: key_name }
+ rand: { get_resource: random-str }
+ public_key: { get_param: pub_key }
+ save_private_key: false
+
+
+ # Virtual GMUX Instantiation
+ vgmux_private_0_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: bng_gmux_private_net_id }
+ fixed_ips: [{"subnet": { get_param: bng_gmux_private_subnet_id }, "ip_address": { get_param: vgmux_private_ip_0 }}]
+
+ vgmux_private_1_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: onap_private_net_id }
+ fixed_ips: [{"subnet": { get_param: onap_private_subnet_id }, "ip_address": { get_param: vgmux_private_ip_1 }}]
+
+ vgmux_private_2_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: mux_gw_private_net_id }
+ fixed_ips: [{"subnet": { get_param: mux_gw_private_subnet_id }, "ip_address": { get_param: vgmux_private_ip_2 }}]
+
+ vgmux_0:
+ type: OS::Nova::Server
+ properties:
+ image: { get_param: vcpe_image_name }
+ flavor: { get_param: vcpe_flavor_name }
+ name: { get_param: vgmux_name_0 }
+ key_name: { get_resource: my_keypair }
+ networks:
+ - network: { get_param: public_net_id }
+ - port: { get_resource: vgmux_private_0_port }
+ - port: { get_resource: vgmux_private_1_port }
+ - port: { get_resource: vgmux_private_2_port }
+ metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }}
+ user_data_format: RAW
+ user_data:
+ str_replace:
+ params:
+ __bng_mux_net_ipaddr__ : { get_param: vgmux_private_ip_0 }
+ __oam_ipaddr__ : { get_param: vgmux_private_ip_1 }
+ __mux_gw_net_ipaddr__ : { get_param: vgmux_private_ip_2 }
+ __bng_mux_net_cidr__ : { get_param: bng_gmux_private_net_cidr }
+ __oam_cidr__ : { get_param: onap_private_net_cidr }
+ __mux_gw_net_cidr__ : { get_param: mux_gw_private_net_cidr }
+ __repo_url_blob__ : { get_param: repo_url_blob }
+ __repo_url_artifacts__ : { get_param: repo_url_artifacts }
+ __demo_artifacts_version__ : { get_param: demo_artifacts_version }
+ __install_script_version__ : { get_param: install_script_version }
+ __cloud_env__ : { get_param: cloud_env }
+ __vpp_source_repo_url__ : { get_param: vpp_source_repo_url }
+ __vpp_source_repo_branch__ : { get_param: vpp_source_repo_branch }
+ __hc2vpp_source_repo_url__ : { get_param: hc2vpp_source_repo_url }
+ __hc2vpp_source_repo_branch__ : { get_param: hc2vpp_source_repo_branch }
+ __vpp_patch_url__ : { get_param: vpp_patch_url }
+ __hc2vpp_patch_url__ : { get_param: hc2vpp_patch_url }
+ __libevel_patch_url__ : { get_param: libevel_patch_url }
+ template: |
+ #!/bin/bash
+
+ # Create configuration files
+ mkdir /opt/config
+ echo "__bng_mux_net_ipaddr__" > /opt/config/bng_mux_net_ipaddr.txt
+ echo "__oam_ipaddr__" > /opt/config/oam_ipaddr.txt
+ echo "__mux_gw_net_ipaddr__" > /opt/config/mux_gw_net_ipaddr.txt
+ echo "__bng_mux_net_cidr__" > /opt/config/bng_mux_net_cidr.txt
+ echo "__oam_cidr__" > /opt/config/oam_cidr.txt
+ echo "__mux_gw_net_cidr__" > /opt/config/mux_gw_net_cidr.txt
+ echo "__repo_url_blob__" > /opt/config/repo_url_blob.txt
+ echo "__repo_url_artifacts__" > /opt/config/repo_url_artifacts.txt
+ echo "__demo_artifacts_version__" > /opt/config/demo_artifacts_version.txt
+ echo "__install_script_version__" > /opt/config/install_script_version.txt
+ echo "__cloud_env__" > /opt/config/cloud_env.txt
+ echo "__vpp_source_repo_url__" > /opt/config/vpp_source_repo_url.txt
+ echo "__vpp_source_repo_branch__" > /opt/config/vpp_source_repo_branch.txt
+ echo "__vpp_patch_url__" > /opt/config/vpp_patch_url.txt
+ echo "__hc2vpp_source_repo_url__" > /opt/config/hc2vpp_source_repo_url.txt
+ echo "__hc2vpp_source_repo_branch__" > /opt/config/hc2vpp_source_repo_branch.txt
+ echo "__hc2vpp_patch_url__" > /opt/config/hc2vpp_patch_url.txt
+ echo "__libevel_patch_url__" > /opt/config/libevel_patch_url.txt
+
+ # Download and run install script
+ curl -k __repo_url_blob__/org.onap.demo/vnfs/vcpe/__install_script_version__/v_gmux_install.sh -o /opt/v_gmux_install.sh
+ cd /opt
+ chmod +x v_gmux_install.sh
+ ./v_gmux_install.sh
diff --git a/lib/auto/testcase/vnf/vgw/MANIFEST.json b/lib/auto/testcase/vnf/vgw/MANIFEST.json
new file mode 100644
index 0000000..8178b1e
--- /dev/null
+++ b/lib/auto/testcase/vnf/vgw/MANIFEST.json
@@ -0,0 +1,17 @@
+{
+ "name": "",
+ "description": "",
+ "data": [
+ {
+ "file": "base_vcpe_vgw.yaml",
+ "type": "HEAT",
+ "isBase": "true",
+ "data": [
+ {
+ "file": "base_vcpe_vgw.env",
+ "type": "HEAT_ENV"
+ }
+ ]
+ }
+ ]
+}
diff --git a/lib/auto/testcase/vnf/vgw/base_vcpe_vgw.env b/lib/auto/testcase/vnf/vgw/base_vcpe_vgw.env
new file mode 100644
index 0000000..f1cadb8
--- /dev/null
+++ b/lib/auto/testcase/vnf/vgw/base_vcpe_vgw.env
@@ -0,0 +1,32 @@
+ parameters:
+ vcpe_image_name: PUT THE IMAGE NAME HERE (Ubuntu 1604 SUGGESTED)
+ vcpe_flavor_name: PUT THE FLAVOR NAME HERE (MEDIUM FLAVOR SUGGESTED)
+ public_net_id: PUT THE PUBLIC NETWORK ID HERE
+ mux_gw_private_net_id: zdfw1muxgw01_private
+ mux_gw_private_subnet_id: zdfw1muxgw01_sub_private
+ mux_gw_private_net_cidr: 10.5.0.0/24
+ cpe_public_net_id: zdfw1cpe01_public
+ cpe_public_subnet_id: zdfw1cpe01_sub_public
+ cpe_public_net_cidr: 10.2.0.0/24
+ onap_private_net_id: PUT THE ONAP PRIVATE NETWORK NAME HERE
+ onap_private_subnet_id: PUT THE ONAP PRIVATE SUBNETWORK NAME HERE
+ onap_private_net_cidr: 10.0.0.0/16
+ vgw_private_ip_0: 10.5.0.21
+ vgw_private_ip_1: 10.0.101.30
+ vgw_private_ip_2: 10.2.0.3
+ vgw_name_0: zdcpe1cpe01gw01
+ vnf_id: vCPE_Infrastructure_GW_demo_app
+ vf_module_id: vCPE_Customer_GW
+ dcae_collector_ip: 10.0.4.102
+ dcae_collector_port: 8080
+ repo_url_blob: https://nexus.onap.org/content/sites/raw
+ repo_url_artifacts: https://nexus.onap.org/content/groups/staging
+ demo_artifacts_version: 1.1.0
+ install_script_version: 1.1.0-SNAPSHOT
+ key_name: vgw_key
+ pub_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDQXYJYYi3/OUZXUiCYWdtc7K0m5C0dJKVxPG0eI8EWZrEHYdfYe6WoTSDJCww+1qlBSpA5ac/Ba4Wn9vh+lR1vtUKkyIC/nrYb90ReUd385Glkgzrfh5HdR5y5S2cL/Frh86lAn9r6b3iWTJD8wBwXFyoe1S2nMTOIuG4RPNvfmyCTYVh8XTCCE8HPvh3xv2r4egawG1P4Q4UDwk+hDBXThY2KS8M5/8EMyxHV0ImpLbpYCTBA6KYDIRtqmgS6iKyy8v2D1aSY5mc9J0T5t9S2Gv+VZQNWQDDKNFnxqYaAo1uEoq/i1q63XC5AD3ckXb2VT6dp23BQMdDfbHyUWfJN
+ cloud_env: PUT THE CLOUD PROVIDED HERE (openstack or rackspace)
+ vpp_source_repo_url: https://gerrit.fd.io/r/vpp
+ vpp_source_repo_branch: stable/1704
+ hc2vpp_source_repo_url: https://gerrit.fd.io/r/hc2vpp
+ hc2vpp_source_repo_branch: stable/1704
diff --git a/lib/auto/testcase/vnf/vgw/base_vcpe_vgw.yaml b/lib/auto/testcase/vnf/vgw/base_vcpe_vgw.yaml
new file mode 100644
index 0000000..173ba6d
--- /dev/null
+++ b/lib/auto/testcase/vnf/vgw/base_vcpe_vgw.yaml
@@ -0,0 +1,261 @@
+##########################################################################
+#
+#==================LICENSE_START==========================================
+#
+#
+# Copyright 2017 AT&T Intellectual Property. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#==================LICENSE_END============================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+#
+##########################################################################
+
+heat_template_version: 2013-05-23
+
+description: Heat template to deploy vCPE vGateway (vG)
+
+##############
+# #
+# PARAMETERS #
+# #
+##############
+
+parameters:
+ vcpe_image_name:
+ type: string
+ label: Image name or ID
+ description: Image to be used for compute instance
+ vcpe_flavor_name:
+ type: string
+ label: Flavor
+ description: Type of instance (flavor) to be used
+ public_net_id:
+ type: string
+ label: Public network name or ID
+ description: Public network that enables remote connection to VNF
+ mux_gw_private_net_id:
+ type: string
+ label: vGMUX private network name or ID
+ description: Private network that connects vGMUX to vGWs
+ mux_gw_private_subnet_id:
+ type: string
+ label: vGMUX private sub-network name or ID
+ description: vGMUX private sub-network name or ID
+ mux_gw_private_net_cidr:
+ type: string
+ label: vGMUX private network CIDR
+ description: The CIDR of the vGMUX private network
+ onap_private_net_id:
+ type: string
+ label: ONAP management network name or ID
+ description: Private network that connects ONAP components and the VNF
+ onap_private_subnet_id:
+ type: string
+ label: ONAP management sub-network name or ID
+ description: Private sub-network that connects ONAP components and the VNF
+ onap_private_net_cidr:
+ type: string
+ label: ONAP private network CIDR
+ description: The CIDR of the protected private network
+ cpe_public_net_id:
+ type: string
+ label: vCPE network that emulates internetmanagement name or ID
+ description: Private network that connects vGW to emulated internet
+ cpe_public_subnet_id:
+ type: string
+ label: vCPE Public subnet
+ description: vCPE Public subnet
+ cpe_public_net_cidr:
+ type: string
+ label: vCPE public network CIDR
+ description: The CIDR of the vCPE public
+ vgw_private_ip_0:
+ type: string
+ label: vGW private IP address towards the vGMUX
+ description: Private IP address that is assigned to the vGW to communicate with vGMUX
+ vgw_private_ip_1:
+ type: string
+ label: vGW private IP address towards the ONAP management network
+ description: Private IP address that is assigned to the vGW to communicate with ONAP components
+ vgw_private_ip_2:
+ type: string
+ label: vGW private IP address towards the vCPE public network
+ description: Private IP address that is assigned to the vGW to communicate with vCPE public network
+ vgw_name_0:
+ type: string
+ label: vGW name
+ description: Name of the vGW
+ vnf_id:
+ type: string
+ label: VNF ID
+ description: The VNF ID is provided by ONAP
+ vf_module_id:
+ type: string
+ label: vCPE module ID
+ description: The vCPE Module ID is provided by ONAP
+ dcae_collector_ip:
+ type: string
+ label: DCAE collector IP address
+ description: IP address of the DCAE collector
+ dcae_collector_port:
+ type: string
+ label: DCAE collector port
+ description: Port of the DCAE collector
+ key_name:
+ type: string
+ label: Key pair name
+ description: Public/Private key pair name
+ pub_key:
+ type: string
+ label: Public key
+ description: Public key to be installed on the compute instance
+ repo_url_blob:
+ type: string
+ label: Repository URL
+ description: URL of the repository that hosts the demo packages
+ repo_url_artifacts:
+ type: string
+ label: Repository URL
+ description: URL of the repository that hosts the demo packages
+ install_script_version:
+ type: string
+ label: Installation script version number
+ description: Version number of the scripts that install the vFW demo app
+ demo_artifacts_version:
+ type: string
+ label: Artifacts version used in demo vnfs
+ description: Artifacts (jar, tar.gz) version used in demo vnfs
+ cloud_env:
+ type: string
+ label: Cloud environment
+ description: Cloud environment (e.g., openstack, rackspace)
+ vpp_source_repo_url:
+ type: string
+ label: VPP Source Git Repo
+ description: URL for VPP source codes
+ vpp_source_repo_branch:
+ type: string
+ label: VPP Source Git Branch
+ description: Git Branch for the VPP source codes
+ hc2vpp_source_repo_url:
+ type: string
+ label: Honeycomb Source Git Repo
+ description: URL for Honeycomb source codes
+ hc2vpp_source_repo_branch:
+ type: string
+ label: Honeycomb Source Git Branch
+ description: Git Branch for the Honeycomb source codes
+
+#############
+# #
+# RESOURCES #
+# #
+#############
+
+resources:
+
+ random-str:
+ type: OS::Heat::RandomString
+ properties:
+ length: 4
+
+ my_keypair:
+ type: OS::Nova::KeyPair
+ properties:
+ name:
+ str_replace:
+ template: base_rand
+ params:
+ base: { get_param: key_name }
+ rand: { get_resource: random-str }
+ public_key: { get_param: pub_key }
+ save_private_key: false
+
+ # Virtual GW Instantiation
+ vgw_private_0_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: mux_gw_private_net_id }
+ fixed_ips: [{"subnet": { get_param: mux_gw_private_subnet_id }, "ip_address": { get_param: vgw_private_ip_0 }}]
+
+ vgw_private_1_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: onap_private_net_id }
+ fixed_ips: [{"subnet": { get_param: onap_private_subnet_id }, "ip_address": { get_param: vgw_private_ip_1 }}]
+
+ vgw_private_2_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: cpe_public_net_id}
+ fixed_ips: [{"subnet": { get_param: cpe_public_subnet_id }, "ip_address": { get_param: vgw_private_ip_2 }}]
+
+ vgw_0:
+ type: OS::Nova::Server
+ properties:
+ image: { get_param: vcpe_image_name }
+ flavor: { get_param: vcpe_flavor_name }
+ name: { get_param: vgw_name_0 }
+ key_name: { get_resource: my_keypair }
+ networks:
+ - network: { get_param: public_net_id }
+ - port: { get_resource: vgw_private_0_port }
+ - port: { get_resource: vgw_private_1_port }
+ - port: { get_resource: vgw_private_2_port }
+ metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }}
+ user_data_format: RAW
+ user_data:
+ str_replace:
+ params:
+ __mux_gw_private_net_ipaddr__ : { get_param: vgw_private_ip_0 }
+ __oam_ipaddr__ : { get_param: vgw_private_ip_1 }
+ __oam_cidr__ : { get_param: onap_private_net_cidr }
+ __cpe_public_net_cidr__ : { get_param: cpe_public_net_cidr }
+ __mux_gw_private_net_cidr__ : { get_param: mux_gw_private_net_cidr }
+ __repo_url_blob__ : { get_param: repo_url_blob }
+ __repo_url_artifacts__ : { get_param: repo_url_artifacts }
+ __demo_artifacts_version__ : { get_param: demo_artifacts_version }
+ __install_script_version__ : { get_param: install_script_version }
+ __cloud_env__ : { get_param: cloud_env }
+ __vpp_source_repo_url__ : { get_param: vpp_source_repo_url }
+ __vpp_source_repo_branch__ : { get_param: vpp_source_repo_branch }
+ __hc2vpp_source_repo_url__ : { get_param: hc2vpp_source_repo_url }
+ __hc2vpp_source_repo_branch__ : { get_param: hc2vpp_source_repo_branch }
+ template: |
+ #!/bin/bash
+
+ # Create configuration files
+ mkdir /opt/config
+ echo "__oam_ipaddr__" > /opt/config/oam_ipaddr.txt
+ echo "__oam_cidr__" > /opt/config/oam_cidr.txt
+ echo "__cpe_public_net_cidr__" > /opt/config/cpe_public_net_cidr.txt
+ echo "__mux_gw_private_net_ipaddr__" > /opt/config/mux_gw_private_net_ipaddr.txt
+ echo "__mux_gw_private_net_cidr__" > /opt/config/mux_gw_private_net_cidr.txt
+ echo "__repo_url_blob__" > /opt/config/repo_url_blob.txt
+ echo "__repo_url_artifacts__" > /opt/config/repo_url_artifacts.txt
+ echo "__demo_artifacts_version__" > /opt/config/demo_artifacts_version.txt
+ echo "__install_script_version__" > /opt/config/install_script_version.txt
+ echo "__cloud_env__" > /opt/config/cloud_env.txt
+ echo "__vpp_source_repo_url__" > /opt/config/vpp_source_repo_url.txt
+ echo "__vpp_source_repo_branch__" > /opt/config/vpp_source_repo_branch.txt
+ echo "__hc2vpp_source_repo_url__" > /opt/config/hc2vpp_source_repo_url.txt
+ echo "__hc2vpp_source_repo_branch__" > /opt/config/hc2vpp_source_repo_branch.txt
+
+ # Download and run install script
+ curl -k __repo_url_blob__/org.onap.demo/vnfs/vcpe/__install_script_version__/v_gw_install.sh -o /opt/v_gw_install.sh
+ cd /opt
+ chmod +x v_gw_install.sh
+ ./v_gw_install.sh
+
diff --git a/lib/auto/util/__init__.py b/lib/auto/util/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/auto/util/__init__.py
diff --git a/lib/auto/util/openstack_lib.py b/lib/auto/util/openstack_lib.py
new file mode 100644
index 0000000..4b62b72
--- /dev/null
+++ b/lib/auto/util/openstack_lib.py
@@ -0,0 +1,332 @@
+#!/usr/bin/env python
+########################################################################
+# Copyright (c) 2018 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+########################################################################
+
+"""Module to manage OpenStack"""
+
+import os
+import re
+import sys
+import time
+import traceback
+
+from keystoneauth1 import loading
+from keystoneauth1 import session
+from keystoneclient import client as keystoneclient
+from glanceclient import client as glanceclient
+from neutronclient.neutron import client as neutronclient
+from novaclient import client as novaclient
+from heatclient import client as heatclient
+
+__author__ = "Harry Huang <huangxiangyu5@huawei.com>"
+
+DEFAULT_API_VERSION = '2'
+DEFAULT_ORCHESTRATION_API_VERSION = '1'
+
+openrc_base_key = ['OS_AUTH_URL', 'OS_USERNAME', 'OS_PASSWORD']
+
+openrc_v3_exkey = ['OS_PROJECT_NAME',
+ 'OS_USER_DOMAIN_NAME',
+ 'OS_PROJECT_DOMAIN_NAME']
+
+openrc_v2_exkey = ['OS_TENANT_NAME']
+
+openrc_vars_mapping = {
+ 'OS_USERNAME': 'username',
+ 'OS_PASSWORD': 'password',
+ 'OS_AUTH_URL': 'auth_url',
+ 'OS_TENANT_NAME': 'tenant_name',
+ 'OS_USER_DOMAIN_NAME': 'user_domain_name',
+ 'OS_PROJECT_DOMAIN_NAME': 'project_domain_name',
+ 'OS_PROJECT_NAME': 'project_name',
+ }
+
+
+def check_identity_api_version():
+ identity_api_version = os.getenv('OS_IDENTITY_API_VERSION')
+ auth_url = os.getenv('OS_AUTH_URL')
+ if not auth_url:
+ raise RuntimeError("Require env var: OS_AUTH_URL")
+ auth_url_parse = auth_url.split('/')
+ url_tail = auth_url_parse[-1] if auth_url_parse[-1] else auth_url_parse[-2]
+ url_identity_version = url_tail.strip('v')
+ if not identity_api_version and \
+ identity_api_version != url_identity_version:
+ raise RuntimeError("identity api version not consistent")
+ return url_identity_version
+
+
+def check_image_api_version():
+ image_api_version = os.getenv('OS_IMAGE_API_VERSION')
+ if image_api_version:
+ return image_api_version
+ else:
+ return DEFAULT_API_VERSION
+
+
+def check_network_api_version():
+ network_api_version = os.getenv('OS_NETWORK_API_VERSION')
+ if network_api_version:
+ return network_api_version
+ else:
+ return DEFAULT_API_VERSION
+
+
+def check_compute_api_version():
+ compute_api_version = os.getenv('OS_COMPUTE_API_VERSION')
+ if compute_api_version:
+ return compute_api_version
+ else:
+ return DEFAULT_API_VERSION
+
+
+def check_orchestration_api_version():
+ orchestration_api_version = os.getenv('OS_ORCHESTRATION_API_VERSION')
+ if orchestration_api_version:
+ return orchestration_api_version
+ else:
+ return DEFAULT_ORCHESTRATION_API_VERSION
+
+
+def get_project_name(creds):
+ identity_version = check_identity_api_version()
+ if identity_version == '3':
+ return creds["project_name"]
+ elif identity_version == '2':
+ return creds["tenant_name"]
+ else:
+ raise RuntimeError("Unsupported identity version")
+
+
+def get_credentials():
+ creds = {}
+ creds_env_key = openrc_base_key
+ identity_api_version = check_identity_api_version()
+
+ if identity_api_version == '3':
+ creds_env_key += openrc_v3_exkey
+ elif identity_api_version == '2':
+ creds_env_key += openrc_v2_exkey
+ else:
+ raise RuntimeError("Unsupported identity version")
+
+ for env_key in creds_env_key:
+ env_value = os.getenv(env_key)
+ if env_value is None:
+ raise RuntimeError("Require env var: %s" % env_key)
+ else:
+ creds_var = openrc_vars_mapping.get(env_key)
+ creds.update({creds_var: env_value})
+
+ return creds
+
+
+def get_session_auth(creds):
+ loader = loading.get_plugin_loader('password')
+ auth = loader.load_from_options(**creds)
+ return auth
+
+
+def get_session(creds):
+ auth = get_session_auth(creds)
+ cacert = os.getenv('OS_CACERT')
+ insecure = os.getenv('OS_INSECURE', '').lower() == 'true'
+ verify = cacert if cacert else not insecure
+ return session.Session(auth=auth, verify=verify)
+
+
+def get_keystone_client(creds):
+ identity_api_version = check_identity_api_version()
+ sess = get_session(creds)
+ return keystoneclient.Client(identity_api_version,
+ session=sess,
+ interface=os.getenv('OS_INTERFACE', 'admin'))
+
+
+def get_glance_client(creds):
+ image_api_version = check_image_api_version()
+ sess = get_session(creds)
+ return glanceclient.Client(image_api_version, session=sess)
+
+
+def get_neutron_client(creds):
+ network_api_version = check_network_api_version()
+ sess = get_session(creds)
+ return neutronclient.Client(network_api_version, session=sess)
+
+
+def get_nova_client(creds):
+ compute_api_version = check_compute_api_version()
+ sess = get_session(creds)
+ return novaclient.Client(compute_api_version, session=sess)
+
+
+def get_heat_client(creds):
+ orchestration_api_version = check_orchestration_api_version()
+ sess = get_session(creds)
+ return heatclient.Client(orchestration_api_version, session=sess)
+
+
+def get_domain_id(keystone_client, domain_name):
+ domains = keystone_client.domains.list()
+ domain_id = None
+ for domain in domains:
+ if domain.name == domain_name:
+ domain_id = domain.id
+ break
+ return domain_id
+
+
+def get_project_id(keystone_client, project_name):
+ identity_version = check_identity_api_version()
+ if identity_version == '3':
+ projects = keystone_client.projects.list()
+ elif identity_version == '2':
+ projects = keystone_client.tenants.list()
+ else:
+ raise RuntimeError("Unsupported identity version")
+ project_id = None
+ for project in projects:
+ if project.name == project_name:
+ project_id = project.id
+ break
+ return project_id
+
+
+def get_image_id(glance_client, image_name):
+ images = glance_client.images.list()
+ image_id = None
+ for image in images:
+ if image.name == image_name:
+ image_id = image.id
+ break
+ return image_id
+
+
+def get_network_id(neutron_client, network_name):
+ networks = neutron_client.list_networks()['networks']
+ network_id = None
+ for network in networks:
+ if network['name'] == network_name:
+ network_id = network['id']
+ break
+ return network_id
+
+
+def get_security_group_id(neutron_client, secgroup_name, project_id=None):
+ security_groups = neutron_client.list_security_groups()['security_groups']
+ secgroup_id = []
+ for security_group in security_groups:
+ if security_group['name'] == secgroup_name:
+ secgroup_id = security_group['id']
+ if security_group['project_id'] == project_id or project_id is None:
+ break
+ return secgroup_id
+
+
+def get_secgroup_rule_id(neutron_client, secgroup_id, json_body):
+ secgroup_rules = \
+ neutron_client.list_security_group_rules()['security_group_rules']
+ secgroup_rule_id = None
+ for secgroup_rule in secgroup_rules:
+ rule_match = True
+ for key, value in json_body['security_group_rule'].items():
+ rule_match = rule_match and (value == secgroup_rule[key])
+ if rule_match:
+ secgroup_rule_id = secgroup_rule['id']
+ break
+ return secgroup_rule_id
+
+
+def get_keypair_id(nova_client, keypair_name):
+ keypairs = nova_client.keypairs.list()
+ keypair_id = None
+ for keypair in keypairs:
+ if keypair.name == keypair_name:
+ keypair_id = keypair.id
+ break
+ return keypair_id
+
+
+def create_project(keystone_client, creds, project_name, project_desc):
+ project_id = get_project_id(keystone_client, project_name)
+ if project_id:
+ return project_id
+
+ identity_version = check_identity_api_version()
+
+ if identity_version == '3':
+ domain_name = creds["user_domain_name"]
+ domain_id = get_domain_id(keystone_client, domain_name)
+ project = keystone_client.projects.create(
+ name=project_name,
+ description=project_desc,
+ domain=domain_id,
+ enabled=True)
+ elif identity_version == '2':
+ project = keystone_client.tenants.create(project_name,
+ project_desc,
+ enabled=True)
+ else:
+ raise RuntimeError("Unsupported identity version")
+
+ return project.id
+
+
+def create_image(glance_client, image_name, image_path, disk_format="qcow2",
+ container_format="bare", visibility="public"):
+ if not os.path.isfile(image_path):
+ raise RuntimeError("Image file not found: %s" % image_path)
+ image_id = get_image_id(glance_client, image_name)
+ if not image_id:
+ image = glance_client.images.create(name=image_name,
+ visibility=visibility,
+ disk_format=disk_format,
+ container_format=container_format)
+ image_id = image.id
+ with open(image_path) as image_data:
+ glance_client.images.upload(image_id, image_data)
+ return image_id
+
+
+def create_secgroup_rule(neutron_client, secgroup_id, protocol, direction,
+ port_range_min=None, port_range_max=None):
+ json_body = {'security_group_rule': {'direction': direction,
+ 'security_group_id': secgroup_id,
+ 'protocol': protocol}}
+
+ if bool(port_range_min) != bool(port_range_max):
+ raise RuntimeError("Start or end of protocol range is empty: [ %s, %s ]"
+ % (port_range_min, port_range_max))
+ elif port_range_min and port_range_max:
+ json_body['security_group_rule'].update({'port_range_min':
+ port_range_min})
+ json_body['security_group_rule'].update({'port_range_max':
+ port_range_max})
+
+ secgroup_id = get_secgroup_rule_id(neutron_client, secgroup_id, json_body)
+ if not secgroup_id:
+ neutron_client.create_security_group_rule(json_body)
+ return secgroup_id
+
+
+def update_compute_quota(nova_client, project_id, quotas):
+ nova_client.quotas.update(project_id, **quotas)
+
+
+def create_keypair(nova_client, keypair_name, keypair_path):
+ keypair_id = get_keypair_id(nova_client, keypair_name)
+ if not keypair_id:
+ with open(os.path.expanduser(keypair_path), 'r') as public_key:
+ key_data = public_key.read().decode('utf-8')
+ keypair = nova_client.keypairs.create(name=keypair_name,
+ public_key=key_data)
+ keypair_id = keypair.id
+ return keypair_id
+
diff --git a/lib/auto/util/util.py b/lib/auto/util/util.py
new file mode 100644
index 0000000..0033900
--- /dev/null
+++ b/lib/auto/util/util.py
@@ -0,0 +1,86 @@
+#!/usr/bin/env python
+########################################################################
+# Copyright (c) 2018 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+########################################################################
+
+"""Utility Module"""
+
+import os
+import git
+import urllib
+import yaml
+import traceback
+from Crypto.PublicKey import RSA
+from yaml_type import literal_unicode
+
+__author__ = "Harry Huang <huangxiangyu5@huawei.com>"
+
+
+def folded_unicode_representer(dumper, data):
+ return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style='>')
+
+
+def literal_unicode_representer(dumper, data):
+ return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style='|')
+
+
+def unicode_representer(dumper, uni):
+ node = yaml.ScalarNode(tag=u'tag:yaml.org,2002:str', value=uni)
+ return node
+
+
+def mkdir(path):
+ path = path.strip()
+ path = path.rstrip("\\")
+ isExist = os.path.exists(path)
+ if not isExist:
+ os.makedirs(path)
+ return True
+ else:
+ return False
+
+
+def download(url, file_path):
+ if os.path.exists(file_path):
+ return False
+ else:
+ urllib.urlretrieve(url, file_path)
+ return True
+
+
+def git_clone(git_repo, git_branch, clone_path):
+ if not os.path.exists(clone_path):
+ git.Repo.clone_from(git_repo, clone_path, branch=git_branch)
+
+
+def read_file(file_path):
+ with open(os.path.expanduser(file_path)) as fd:
+ return fd.read()
+
+
+def read_yaml(yaml_path):
+ with open(os.path.expanduser(yaml_path)) as fd:
+ return yaml.safe_load(fd)
+
+
+def write_yaml(yaml_data, yaml_path, default_style=False):
+ yaml.add_representer(literal_unicode, literal_unicode_representer)
+ yaml.add_representer(unicode, unicode_representer)
+ with open(os.path.expanduser(yaml_path), 'w') as fd:
+ return yaml.dump(yaml_data, fd,
+ default_flow_style=default_style)
+
+
+def create_keypair(prikey_path, pubkey_path, size=2048):
+ key = RSA.generate(size)
+ with open(os.path.expanduser(prikey_path), 'w') as prikey_file:
+ os.chmod(prikey_path, 0600)
+ prikey_file.write(key.exportKey('PEM'))
+ pubkey = key.publickey()
+ with open(os.path.expanduser(pubkey_path), 'w') as pubkey_file:
+ pubkey_file.write(pubkey.exportKey('OpenSSH'))
diff --git a/lib/auto/util/yaml_type.py b/lib/auto/util/yaml_type.py
new file mode 100644
index 0000000..352fc7d
--- /dev/null
+++ b/lib/auto/util/yaml_type.py
@@ -0,0 +1,12 @@
+#!/usr/bin/env python
+########################################################################
+# Copyright (c) 2018 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+########################################################################
+
+class folded_unicode(unicode): pass
+class literal_unicode(unicode): pass
diff --git a/prepare.sh b/prepare.sh
new file mode 100755
index 0000000..75e1108
--- /dev/null
+++ b/prepare.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+########################################################################
+# Copyright (c) 2018 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Run this script to setup Auto virtualenv and install Auto modules into
+# it.
+# Usage:
+# bash prepare.sh
+########################################################################
+
+pip install virtualenv
+virtualenv venv
+source ./venv/bin/activate
+pip install setuptools
+AUTO_DIR=$(pwd)
+cat << EOF >> venv/bin/activate
+export AUTO_DIR=$AUTO_DIR
+EOF
+python setup.py install
diff --git a/pylintrc b/pylintrc
new file mode 100644
index 0000000..c213b80
--- /dev/null
+++ b/pylintrc
@@ -0,0 +1,561 @@
+# Copyright 2018 Tieto
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+[MASTER]
+
+# A comma-separated list of package or module names from where C extensions may
+# be loaded. Extensions are loading into the active Python interpreter and may
+# run arbitrary code
+extension-pkg-whitelist=
+
+# Add files or directories to the blacklist. They should be base names, not
+# paths.
+ignore=CVS
+
+# Add files or directories matching the regex patterns to the blacklist. The
+# regex matches against base names, not paths.
+ignore-patterns=
+
+# Python code to execute, usually for sys.path manipulation such as
+# pygtk.require().
+#init-hook=
+
+# Use multiple processes to speed up Pylint.
+jobs=1
+
+# List of plugins (as comma separated values of python modules names) to load,
+# usually to register additional checkers.
+load-plugins=
+
+# Pickle collected data for later comparisons.
+persistent=yes
+
+# Specify a configuration file.
+#rcfile=
+
+# When enabled, pylint would attempt to guess common misconfiguration and emit
+# user-friendly hints instead of false-positive error messages
+suggestion-mode=yes
+
+# Allow loading of arbitrary C extensions. Extensions are imported into the
+# active Python interpreter and may run arbitrary code.
+unsafe-load-any-extension=no
+
+
+[MESSAGES CONTROL]
+
+# Only show warnings with the listed confidence levels. Leave empty to show
+# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
+confidence=
+
+# Disable the message, report, category or checker with the given id(s). You
+# can either give multiple identifiers separated by comma (,) or put this
+# option multiple times (only on the command line, not in the configuration
+# file where it should appear only once).You can also use "--disable=all" to
+# disable everything first and then reenable specific checks. For example, if
+# you want to run only the similarities checker, you can use "--disable=all
+# --enable=similarities". If you want to run only the classes checker, but have
+# no Warning level messages displayed, use"--disable=all --enable=classes
+# --disable=W"
+disable=print-statement,
+ parameter-unpacking,
+ unpacking-in-except,
+ old-raise-syntax,
+ backtick,
+ long-suffix,
+ old-ne-operator,
+ old-octal-literal,
+ import-star-module-level,
+ non-ascii-bytes-literal,
+ invalid-unicode-literal,
+ raw-checker-failed,
+ bad-inline-option,
+ locally-disabled,
+ locally-enabled,
+ file-ignored,
+ suppressed-message,
+ useless-suppression,
+ deprecated-pragma,
+ apply-builtin,
+ basestring-builtin,
+ buffer-builtin,
+ cmp-builtin,
+ coerce-builtin,
+ execfile-builtin,
+ file-builtin,
+ long-builtin,
+ raw_input-builtin,
+ reduce-builtin,
+ standarderror-builtin,
+ unicode-builtin,
+ xrange-builtin,
+ coerce-method,
+ delslice-method,
+ getslice-method,
+ setslice-method,
+ no-absolute-import,
+ old-division,
+ dict-iter-method,
+ dict-view-method,
+ next-method-called,
+ metaclass-assignment,
+ indexing-exception,
+ raising-string,
+ reload-builtin,
+ oct-method,
+ hex-method,
+ nonzero-method,
+ cmp-method,
+ input-builtin,
+ round-builtin,
+ intern-builtin,
+ unichr-builtin,
+ map-builtin-not-iterating,
+ zip-builtin-not-iterating,
+ range-builtin-not-iterating,
+ filter-builtin-not-iterating,
+ using-cmp-argument,
+ eq-without-hash,
+ div-method,
+ idiv-method,
+ rdiv-method,
+ exception-message-attribute,
+ invalid-str-codec,
+ sys-max-int,
+ bad-python3-import,
+ deprecated-string-function,
+ deprecated-str-translate-call,
+ deprecated-itertools-function,
+ deprecated-types-field,
+ next-method-defined,
+ dict-items-not-iterating,
+ dict-keys-not-iterating,
+ dict-values-not-iterating,
+ deprecated-operator-function,
+ deprecated-urllib-function,
+ xreadlines-attribute,
+ deprecated-sys-function,
+ exception-escape,
+ comprehension-escape
+
+# Enable the message, report, category or checker with the given id(s). You can
+# either give multiple identifier separated by comma (,) or put this option
+# multiple time (only on the command line, not in the configuration file where
+# it should appear only once). See also the "--disable" option for examples.
+enable=c-extension-no-member
+
+
+[REPORTS]
+
+# Python expression which should return a note less than 10 (10 is the highest
+# note). You have access to the variables errors warning, statement which
+# respectively contain the number of errors / warnings messages and the total
+# number of statements analyzed. This is used by the global evaluation report
+# (RP0004).
+evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
+
+# Template used to display messages. This is a python new-style format string
+# used to format the message information. See doc for all details
+#msg-template=
+
+# Set the output format. Available formats are text, parseable, colorized, json
+# and msvs (visual studio).You can also give a reporter class, eg
+# mypackage.mymodule.MyReporterClass.
+output-format=text
+
+# Tells whether to display a full report or only the messages
+reports=no
+
+# Activate the evaluation score.
+score=yes
+
+
+[REFACTORING]
+
+# Maximum number of nested blocks for function / method body
+max-nested-blocks=5
+
+# Complete name of functions that never returns. When checking for
+# inconsistent-return-statements if a never returning function is called then
+# it will be considered as an explicit return statement and no message will be
+# printed.
+never-returning-functions=optparse.Values,sys.exit
+
+
+[FORMAT]
+
+# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
+expected-line-ending-format=
+
+# Regexp for a line that is allowed to be longer than the limit.
+ignore-long-lines=^\s*(# )?<?https?://\S+>?$
+
+# Number of spaces of indent required inside a hanging or continued line.
+indent-after-paren=4
+
+# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
+# tab).
+indent-string=' '
+
+# Maximum number of characters on a single line.
+max-line-length=160
+
+# Maximum number of lines in a module
+max-module-lines=1000
+
+# List of optional constructs for which whitespace checking is disabled. `dict-
+# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
+# `trailing-comma` allows a space between comma and closing bracket: (a, ).
+# `empty-line` allows space-only lines.
+no-space-check=trailing-comma,
+ dict-separator
+
+# Allow the body of a class to be on the same line as the declaration if body
+# contains single statement.
+single-line-class-stmt=no
+
+# Allow the body of an if to be on the same line as the test if there is no
+# else.
+single-line-if-stmt=no
+
+
+[BASIC]
+
+# Naming style matching correct argument names
+argument-naming-style=snake_case
+
+# Regular expression matching correct argument names. Overrides argument-
+# naming-style
+#argument-rgx=
+
+# Naming style matching correct attribute names
+attr-naming-style=snake_case
+
+# Regular expression matching correct attribute names. Overrides attr-naming-
+# style
+#attr-rgx=
+
+# Bad variable names which should always be refused, separated by a comma
+bad-names=foo,
+ bar,
+ baz,
+ toto,
+ tutu,
+ tata
+
+# Naming style matching correct class attribute names
+class-attribute-naming-style=any
+
+# Regular expression matching correct class attribute names. Overrides class-
+# attribute-naming-style
+#class-attribute-rgx=
+
+# Naming style matching correct class names
+class-naming-style=PascalCase
+
+# Regular expression matching correct class names. Overrides class-naming-style
+#class-rgx=
+
+# Naming style matching correct constant names
+const-naming-style=UPPER_CASE
+
+# Regular expression matching correct constant names. Overrides const-naming-
+# style
+#const-rgx=
+
+# Minimum line length for functions/classes that require docstrings, shorter
+# ones are exempt.
+docstring-min-length=-1
+
+# Naming style matching correct function names
+function-naming-style=snake_case
+
+# Regular expression matching correct function names. Overrides function-
+# naming-style
+#function-rgx=
+
+# Good variable names which should always be accepted, separated by a comma
+good-names=i,
+ j,
+ k,
+ e,
+ ex,
+ Run,
+ _
+
+# Include a hint for the correct naming format with invalid-name
+include-naming-hint=no
+
+# Naming style matching correct inline iteration names
+inlinevar-naming-style=any
+
+# Regular expression matching correct inline iteration names. Overrides
+# inlinevar-naming-style
+#inlinevar-rgx=
+
+# Naming style matching correct method names
+method-naming-style=snake_case
+
+# Regular expression matching correct method names. Overrides method-naming-
+# style
+#method-rgx=
+
+# Naming style matching correct module names
+module-naming-style=snake_case
+
+# Regular expression matching correct module names. Overrides module-naming-
+# style
+#module-rgx=
+
+# Colon-delimited sets of names that determine each other's naming style when
+# the name regexes allow several styles.
+name-group=
+
+# Regular expression which should only match function or class names that do
+# not require a docstring.
+no-docstring-rgx=^_
+
+# List of decorators that produce properties, such as abc.abstractproperty. Add
+# to this list to register other decorators that produce valid properties.
+property-classes=abc.abstractproperty
+
+# Naming style matching correct variable names
+variable-naming-style=snake_case
+
+# Regular expression matching correct variable names. Overrides variable-
+# naming-style
+#variable-rgx=
+
+
+[MISCELLANEOUS]
+
+# List of note tags to take in consideration, separated by a comma.
+notes=FIXME,
+ XXX,
+ TODO
+
+
+[TYPECHECK]
+
+# List of decorators that produce context managers, such as
+# contextlib.contextmanager. Add to this list to register other decorators that
+# produce valid context managers.
+contextmanager-decorators=contextlib.contextmanager
+
+# List of members which are set dynamically and missed by pylint inference
+# system, and so shouldn't trigger E1101 when accessed. Python regular
+# expressions are accepted.
+generated-members=
+
+# Tells whether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+# This flag controls whether pylint should warn about no-member and similar
+# checks whenever an opaque object is returned when inferring. The inference
+# can return multiple potential results while evaluating a Python object, but
+# some branches might not be evaluated, which results in partial inference. In
+# that case, it might be useful to still emit no-member and other checks for
+# the rest of the inferred objects.
+ignore-on-opaque-inference=yes
+
+# List of class names for which member attributes should not be checked (useful
+# for classes with dynamically set attributes). This supports the use of
+# qualified names.
+ignored-classes=optparse.Values,thread._local,_thread._local
+
+# List of module names for which member attributes should not be checked
+# (useful for modules/projects where namespaces are manipulated during runtime
+# and thus existing member attributes cannot be deduced by static analysis. It
+# supports qualified module names, as well as Unix pattern matching.
+ignored-modules=
+
+# Show a hint with possible names when a member name was not found. The aspect
+# of finding the hint is based on edit distance.
+missing-member-hint=yes
+
+# The minimum edit distance a name should have in order to be considered a
+# similar match for a missing member name.
+missing-member-hint-distance=1
+
+# The total number of similar names that should be taken in consideration when
+# showing a hint for a missing member.
+missing-member-max-choices=1
+
+
+[LOGGING]
+
+# Logging modules to check that the string format arguments are in logging
+# function parameter format
+logging-modules=logging
+
+
+[SIMILARITIES]
+
+# Ignore comments when computing similarities.
+ignore-comments=yes
+
+# Ignore docstrings when computing similarities.
+ignore-docstrings=yes
+
+# Ignore imports when computing similarities.
+ignore-imports=no
+
+# Minimum lines number of a similarity.
+min-similarity-lines=4
+
+
+[SPELLING]
+
+# Limits count of emitted suggestions for spelling mistakes
+max-spelling-suggestions=4
+
+# Spelling dictionary name. Available dictionaries: none. To make it working
+# install python-enchant package.
+spelling-dict=
+
+# List of comma separated words that should not be checked.
+spelling-ignore-words=
+
+# A path to a file that contains private dictionary; one word per line.
+spelling-private-dict-file=
+
+# Tells whether to store unknown words to indicated private dictionary in
+# --spelling-private-dict-file option instead of raising a message.
+spelling-store-unknown-words=no
+
+
+[VARIABLES]
+
+# List of additional names supposed to be defined in builtins. Remember that
+# you should avoid to define new builtins when possible.
+additional-builtins=
+
+# Tells whether unused global variables should be treated as a violation.
+allow-global-unused-variables=yes
+
+# List of strings which can identify a callback function by name. A callback
+# name must start or end with one of those strings.
+callbacks=cb_,
+ _cb
+
+# A regular expression matching the name of dummy variables (i.e. expectedly
+# not used).
+dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
+
+# Argument names that match this expression will be ignored. Default to name
+# with leading underscore
+ignored-argument-names=_.*|^ignored_|^unused_
+
+# Tells whether we should check for unused import in __init__ files.
+init-import=no
+
+# List of qualified module names which can have objects that can redefine
+# builtins.
+redefining-builtins-modules=six.moves,past.builtins,future.builtins,io,builtins
+
+
+[DESIGN]
+
+# Maximum number of arguments for function / method
+max-args=5
+
+# Maximum number of attributes for a class (see R0902).
+max-attributes=7
+
+# Maximum number of boolean expressions in a if statement
+max-bool-expr=5
+
+# Maximum number of branch for function / method body
+max-branches=12
+
+# Maximum number of locals for function / method body
+max-locals=15
+
+# Maximum number of parents for a class (see R0901).
+max-parents=7
+
+# Maximum number of public methods for a class (see R0904).
+max-public-methods=20
+
+# Maximum number of return / yield for function / method body
+max-returns=6
+
+# Maximum number of statements in function / method body
+max-statements=50
+
+# Minimum number of public methods for a class (see R0903).
+min-public-methods=2
+
+
+[CLASSES]
+
+# List of method names used to declare (i.e. assign) instance attributes.
+defining-attr-methods=__init__,
+ __new__,
+ setUp
+
+# List of member names, which should be excluded from the protected access
+# warning.
+exclude-protected=_asdict,
+ _fields,
+ _replace,
+ _source,
+ _make
+
+# List of valid names for the first argument in a class method.
+valid-classmethod-first-arg=cls
+
+# List of valid names for the first argument in a metaclass class method.
+valid-metaclass-classmethod-first-arg=mcs
+
+
+[IMPORTS]
+
+# Allow wildcard imports from modules that define __all__.
+allow-wildcard-with-all=no
+
+# Analyse import fallback blocks. This can be used to support both Python 2 and
+# 3 compatible code, which means that the block might have code that exists
+# only in one or another interpreter, leading to false positives when analysed.
+analyse-fallback-blocks=no
+
+# Deprecated modules which should not be used, separated by a comma
+deprecated-modules=optparse,tkinter.tix
+
+# Create a graph of external dependencies in the given file (report RP0402 must
+# not be disabled)
+ext-import-graph=
+
+# Create a graph of every (i.e. internal and external) dependencies in the
+# given file (report RP0402 must not be disabled)
+import-graph=
+
+# Create a graph of internal dependencies in the given file (report RP0402 must
+# not be disabled)
+int-import-graph=
+
+# Force import order to recognize a module as part of the standard
+# compatibility libraries.
+known-standard-library=
+
+# Force import order to recognize a module as part of a third party library.
+known-third-party=enchant
+
+
+[EXCEPTIONS]
+
+# Exceptions that will emit a warning when being caught. Defaults to
+# "Exception"
+overgeneral-exceptions=Exception
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..1035c76
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,10 @@
+GitPython
+pycrypto
+keystoneauth1>=3.1.0
+python-keystoneclient>=3.8.0
+python-glanceclient>=2.8.0
+python-neutronclient>=6.3.0
+python-novaclient>=9.0.0
+python-heatclient>=1.6.1
+pylint==1.9.2
+yamllint==1.11.1
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000..59a3c91
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,29 @@
+#!/usr/bin/env python
+########################################################################
+# Copyright (c) 2018 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+########################################################################
+
+import os
+from setuptools import setup, find_packages
+
+__author__ = "Harry Huang <huangxiangyu5@huawei.com>"
+
+
+requirement_path = os.path.join(
+ os.path.dirname(__file__), 'requirements.txt')
+with open(requirement_path, 'r') as fd:
+ requirements = [line.strip() for line in fd if line != '\n']
+
+setup(
+ name="auto",
+ version='1.0.0',
+ package_dir={'': 'lib'},
+ packages=find_packages('lib'),
+ include_package_data=True,
+ install_requires=requirements
+)
diff --git a/setup/VIMs/OpenStack/auto_script_config_openstack_for_onap.py b/setup/VIMs/OpenStack/auto_script_config_openstack_for_onap.py
new file mode 100644
index 0000000..e4b94f5
--- /dev/null
+++ b/setup/VIMs/OpenStack/auto_script_config_openstack_for_onap.py
@@ -0,0 +1,923 @@
+#!/usr/bin/env python3
+
+# ===============LICENSE_START=======================================================
+# Apache-2.0
+# ===================================================================================
+# Copyright (C) 2018 Wipro. All rights reserved.
+# ===================================================================================
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ===============LICENSE_END=========================================================
+
+
+# OPNFV Auto project
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+#docstring
+"""This script configures an OpenStack instance to make it ready to interface with an ONAP instance, for example to host VM-based VNFs deployed by ONAP. It can also remove the created objects, when used in a clean-up procedure.
+Use -h option to see usage (-del to delete objects, -deb to print debug information).
+Requirements: python 3, OpenStack SDK (0.14 or greater), clouds.yaml file, .img files are downloaded
+Auto project: https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+"""
+
+######################################################################
+# This script configures an OpenStack instance (e.g. from an OPNFV installer like FUEL/MCP, Compass4nfv, ...)
+# to make it ready to interface with an ONAP instance, for example to host VM-based VNFs deployed by ONAP.
+# After running this script, the created OpenStack object names/IDs can be used for example to populate
+# YAML&ENV files used by ONAP (installation of ONAP itself, VNF descriptor files, etc.).
+
+
+######################################################################
+# Overview of the steps:
+#
+# 1) create an ONAP project/tenant (a tenant is a project; project is a more generic term than tenant)
+# (optional, probably not needed: create a new group, which can be associated to a project, and contains users)
+# 2) create an ONAP user within the ONAP project, so as not to use the "admin" user for ONAP
+# (associate user to group if applicable; credentials: name/pwd or name/APIkey, or token)
+# 3) create an ONAP security group, to allow ICMP traffic (for pings) and TCP port 22 (for SSH),
+# rather than changing default security group(s)
+# (optional, probably not needed: create a new region; default region RegionOne is OK)
+# 4) create a public network for ONAP VNFs, with subnet and CIDR block
+# (so components have access to the Internet, via router and gateway, on unnamed ports, dynamic IP@ allocation)
+# 5) create a private and an OAM network for ONAP VNFs or other ONAP components,
+# with their respective subnet and CIDR block
+# (ONAP VNFs will be deployed in this private and/or OAM network(s), usually with named ports
+# and static IP@ as per VNF configuration file)
+# 6) create an OpenStack router, with interfaces to the public, private and OAM networks,
+# and a reference to an external network (gateway) provided by the OpenStack instance installation
+# 7) create VM flavors as needed: m1.medium, etc.
+# 8) download VM images, as needed for ONAP-deployed VNFs: e.g. Ubuntu 14.04, 16.04, ...
+
+
+######################################################################
+# Assumptions:
+# - python3 is installed
+# - OpenStack SDK is installed for python3
+# - there is a clouds.yaml file (describing the OpenStack instance, especially the Auth URL and admin credentials)
+# - .img files (Ubuntu Trusty Tahr, Xenial Xerus, Cirros, ... are downloaded, and stored in IMAGES_DIR
+# - the script connects to OpenStack as a user with admin rights
+
+# typical commands to install OpenStack SDK Python client:
+# apt install python3-pip
+# pip3 install --upgrade pip
+# hash -r
+# pip3 list
+# pip3 install openstacksdk
+# pip3 install --upgrade openstacksdk
+# pip3 show openstacksdk
+# pip3 check
+
+
+######################################################################
+# useful URLs
+# Identity API: https://docs.openstack.org/openstacksdk/latest/user/proxies/identity_v3.html
+# (User, Project, Group, region, Role, ...)
+# Network API: https://docs.openstack.org/openstacksdk/latest/user/proxies/network.html
+# (Network, Subnet, Port, Router, Floating IP, AZ, Flavor, ...)
+
+
+######################################################################
+# script parameters
+ONAP_USER_NAME = 'ONAP_user'
+ONAP_USER_PASSWORD = 'auto_topsecret'
+ONAP_USER_DESC = 'OpenStack User created for ONAP'
+
+ONAP_TENANT_NAME = 'ONAP_tenant'
+# note: "project" is a more generic concept than "tenant"; a tenant is type of project; quotas are per project;
+ONAP_TENANT_DESC = 'OpenStack Project/Tenant created for ONAP'
+
+ONAP_SECU_GRP_NAME = 'ONAP_security_group'
+ONAP_SECU_GRP_DESC = 'Security Group created for ONAP'
+
+ONAP_PUBLIC_NET_NAME = 'ONAP_public_net'
+ONAP_PUBLIC_SUBNET_NAME = 'ONAP_public_subnet'
+ONAP_PUBLIC_SUBNET_CIDR = '192.168.99.0/24'
+# note: some arbitrary CIDR, but typically in a private (IANA-reserved) address range
+ONAP_PUBLIC_NET_DESC = 'Public network created for ONAP, for unnamed ports, dynamic IP@, access to the Internet (e.g., Nexus repo) via Gateway'
+
+ONAP_PRIVATE_NET_NAME = 'ONAP_private_net'
+ONAP_PRIVATE_SUBNET_NAME = 'ONAP_private_subnet'
+ONAP_PRIVATE_SUBNET_CIDR = '10.0.0.0/16'
+# note: CIDR should match ONAP installation; Private and OAM may be the same network
+ONAP_PRIVATE_NET_DESC = 'Private network created for ONAP, for named ports, static IP@, inter-component communication'
+
+ONAP_OAM_NET_NAME = 'ONAP_OAM_net'
+ONAP_OAM_SUBNET_NAME = 'ONAP_OAM_subnet'
+ONAP_OAM_SUBNET_CIDR = '10.99.0.0/16'
+# note: CIDR should match ONAP installation; Private and OAM may be the same network
+ONAP_OAM_NET_DESC = 'OAM network created for ONAP, for named ports, static IP@, inter-component communication'
+
+ONAP_ROUTER_NAME = 'ONAP_router'
+ONAP_ROUTER_DESC = 'Router created for ONAP'
+
+# OpenStack instance external network (gateway) name to be used as router's gateway
+EXTERNAL_NETWORK_NAME = 'floating_net'
+
+# keypair that can be used to SSH into created servers (VNF VMs)
+ONAP_KEYPAIR_NAME = 'ONAP_keypair'
+
+# OpenStack cloud name and region name, which should be the same as in the clouds.yaml file used by this script
+OPENSTACK_CLOUD_NAME = 'unh-hpe-openstack-fraser'
+OPENSTACK_REGION_NAME = 'RegionOne'
+# note: OpenStack domain is: Default
+
+
+######################################################################
+# constants which could be parameters
+DNS_SERVER_IP = '8.8.8.8'
+# IP addresses of free public DNS service from Google:
+# - IPv4: 8.8.8.8 and 8.8.4.4
+# - IPv6: 2001:4860:4860::8888 and 2001:4860:4860::8844
+
+######################################################################
+# global variables
+DEBUG_VAR = False
+
+######################################################################
+# import statements
+import openstack
+import argparse
+import sys, traceback
+
+######################################################################
+def print_debug(*args):
+ if DEBUG_VAR:
+ for arg in args:
+ print ('***',arg)
+
+######################################################################
+def delete_all_ONAP():
+ """Delete all ONAP-specific OpenStack objects (normally not needed, but may be useful during tests, and for clean-up)."""
+ print('\nOPNFV Auto, script to delete ONAP objects in an OpenStack instance')
+
+ try:
+ # connect to OpenStack instance using Connection object from OpenStack SDK
+ print('Opening connection...')
+ conn = openstack.connect(
+ identity_api_version = 3, # must indicate Identity version (until fixed); can also be in clouds.yaml
+ cloud = OPENSTACK_CLOUD_NAME,
+ region_name = OPENSTACK_REGION_NAME)
+
+
+ # delete router; must delete router before networks (and must delete VMs before routers)
+ print('Deleting ONAP router...')
+ onap_router = conn.network.find_router(ONAP_ROUTER_NAME)
+ print_debug('onap_router:',onap_router)
+ if onap_router != None:
+
+ # delete router interfaces before deleting router
+ router_network = conn.network.find_network(ONAP_PUBLIC_NET_NAME)
+ if router_network != None:
+ if router_network.subnet_ids != None:
+ print_debug('router_network.subnet_ids:',router_network.subnet_ids)
+ for subnet_id in router_network.subnet_ids:
+ print(' Deleting interface to',ONAP_PUBLIC_NET_NAME,'...')
+ conn.network.remove_interface_from_router(onap_router, subnet_id)
+
+ router_network = conn.network.find_network(ONAP_PRIVATE_NET_NAME)
+ if router_network != None:
+ if router_network.subnet_ids != None:
+ print_debug('router_network.subnet_ids:',router_network.subnet_ids)
+ for subnet_id in router_network.subnet_ids:
+ print(' Deleting interface to',ONAP_PRIVATE_NET_NAME,'...')
+ conn.network.remove_interface_from_router(onap_router, subnet_id)
+
+ router_network = conn.network.find_network(ONAP_OAM_NET_NAME)
+ if router_network != None:
+ if router_network.subnet_ids != None:
+ print_debug('router_network.subnet_ids:',router_network.subnet_ids)
+ for subnet_id in router_network.subnet_ids:
+ print(' Deleting interface to',ONAP_OAM_NET_NAME,'...')
+ conn.network.remove_interface_from_router(onap_router, subnet_id)
+
+ # and finally delete ONAP router
+ conn.network.delete_router(onap_router.id)
+
+ else:
+ print('No ONAP router found...')
+
+ # TODO@@@ verify if there are ports on networks (e.g., from VMs); if yes, can't delete network
+
+ # delete private network (which should also delete associated subnet if any)
+ print('Deleting ONAP private network...')
+ private_network = conn.network.find_network(ONAP_PRIVATE_NET_NAME)
+ print_debug('private_network:',private_network)
+ if private_network != None:
+ conn.network.delete_network(private_network.id)
+ else:
+ print('No ONAP private network found...')
+
+ # delete OAM network (which should also delete associated subnet if any)
+ print('Deleting ONAP OAM network...')
+ oam_network = conn.network.find_network(ONAP_OAM_NET_NAME)
+ print_debug('oam_network:',oam_network)
+ if oam_network != None:
+ conn.network.delete_network(oam_network.id)
+ else:
+ print('No ONAP OAM network found...')
+
+ # delete public network (which should also delete associated subnet if any)
+ print('Deleting ONAP public network...')
+ public_network = conn.network.find_network(ONAP_PUBLIC_NET_NAME)
+ print_debug('public_network:',public_network)
+ if public_network != None:
+ conn.network.delete_network(public_network.id)
+ else:
+ print('No ONAP public network found...')
+
+ # TODO@@@ verify if security group is in use (e.g., by a VM), otherwise can't delete it
+
+ # delete security group
+ print('Deleting ONAP security group...')
+ onap_security_group = conn.network.find_security_group(ONAP_SECU_GRP_NAME)
+ print_debug('onap_security_group:',onap_security_group)
+ if onap_security_group != None:
+ conn.network.delete_security_group(onap_security_group.id)
+ else:
+ print('No ONAP security group found...')
+
+ # delete user
+ print('Deleting ONAP user...')
+ onap_user = conn.identity.find_user(ONAP_USER_NAME)
+ print_debug('onap_user:',onap_user)
+ if onap_user != None:
+ conn.identity.delete_user(onap_user.id)
+ else:
+ print('No ONAP user found...')
+
+ # delete project/tenant
+ print('Deleting ONAP project...')
+ onap_project = conn.identity.find_project(ONAP_TENANT_NAME)
+ print_debug('onap_project:',onap_project)
+ if onap_project != None:
+ conn.identity.delete_project(onap_project.id)
+ else:
+ print('No ONAP project found...')
+
+ # delete keypair
+ print('Deleting ONAP keypair...')
+ onap_keypair = conn.compute.find_keypair(ONAP_KEYPAIR_NAME)
+ print_debug('onap_keypair:',onap_keypair)
+ if onap_keypair != None:
+ conn.compute.delete_keypair(onap_keypair.id)
+ else:
+ print('No ONAP keypair found...')
+
+ # no need to delete images and flavors
+
+
+ except Exception as e:
+ print('*** Exception:',type(e), e)
+ exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()
+ print('*** traceback.print_tb():')
+ traceback.print_tb(exceptionTraceback)
+ print('*** traceback.print_exception():')
+ traceback.print_exception(exceptionType, exceptionValue, exceptionTraceback)
+ print('[Script terminated]\n')
+
+ print('OPNFV Auto, end of deletion script\n')
+
+
+######################################################################
+def configure_all_ONAP():
+ """Configure all ONAP-specific OpenStack objects."""
+ print('\nOPNFV Auto, script to configure an OpenStack instance for ONAP')
+
+ try:
+ # connect to OpenStack instance using Connection object from OpenStack SDK
+ print('Opening connection...')
+ conn = openstack.connect(
+ identity_api_version = 3, # must indicate Identity version (until fixed); can also be in clouds.yaml
+ cloud = OPENSTACK_CLOUD_NAME,
+ region_name = OPENSTACK_REGION_NAME)
+
+
+ print('Creating ONAP project/tenant...')
+ onap_project = conn.identity.find_project(ONAP_TENANT_NAME)
+ if onap_project != None:
+ print('ONAP project/tenant already exists')
+ else:
+ onap_project = conn.identity.create_project(
+ name = ONAP_TENANT_NAME,
+ description = ONAP_TENANT_DESC,
+ is_enabled = True)
+ # domain: leave default
+ # project quotas (max #vCPUs, #instances, etc.): as conn.network.<*quota*>, using project id for quota id
+ # https://docs.openstack.org/openstacksdk/latest/user/proxies/network.html#quota-operations
+ # https://docs.openstack.org/openstacksdk/latest/user/resources/network/v2/quota.html#openstack.network.v2.quota.Quota
+ # conn.network.update_quota(project_id = onap_project.id)
+ # SDK for quotas supports floating_ips, networks, ports, etc. but not vCPUs or instances
+ print_debug('onap_project:',onap_project)
+
+
+ print('Creating ONAP user...')
+ onap_user = conn.identity.find_user(ONAP_USER_NAME)
+ if onap_user != None:
+ print('ONAP user already exists')
+ else:
+ onap_user = conn.identity.create_user(
+ name = ONAP_USER_NAME,
+ description = ONAP_USER_DESC,
+ default_project_id = onap_project.id,
+ password = ONAP_USER_PASSWORD,
+ is_enabled = True)
+ # domain: leave default
+ # default_project_id: primary project
+ print_debug('onap_user:',onap_user)
+
+ # TODO@@@ assign Member role to ONAP user in ONAP project
+ # membership_role = conn.identity.find_role('Member')
+ # onap_project.assign_role_to_user(conn, onap_user, membership_role) # no project membership method yet in connection proxy
+
+ # TODO@@@ maybe logout and log back in as ONAP user
+
+ # make sure security group allows ICMP (for ping) and SSH (TCP port 22) traffic; also IPv4/v6 traffic ingress and egress
+ # create new onap_security_group (or maybe just "default" security group ? tests returned multiple "default" security groups)
+ # security group examples: check http://git.openstack.org/cgit/openstack/openstacksdk/tree/examples/network/security_group_rules.py
+ # if rule already exists, OpenStack returns an error, so just try (no harm); try each separately
+ # (SecurityGroup is a Resource)
+ print('Creating ONAP security group...')
+ onap_security_group = conn.network.find_security_group(ONAP_SECU_GRP_NAME)
+ if onap_security_group != None:
+ print('ONAP security group already exists')
+ else:
+ onap_security_group = conn.network.create_security_group(
+ #project_id = onap_project.id,
+ description = ONAP_SECU_GRP_DESC,
+ name = ONAP_SECU_GRP_NAME)
+ print_debug('onap_security_group:',onap_security_group)
+
+ try:
+ description_text = 'enable ICMP ingress IPv4'
+ print(' Creating rule:',description_text,'...')
+ conn.network.create_security_group_rule(
+ security_group_id = onap_security_group.id,
+ description = description_text,
+ protocol = 'ICMP',
+ direction = 'ingress',
+ ethertype = 'IPv4',
+ remote_ip_prefix = '0.0.0.0/0',
+ port_range_min = None,
+ port_range_max = None)
+ except Exception as e:
+ print(' rule:', description_text, ' may already exist')
+ print_debug(description_text, ' Exception:', type(e), e)
+
+ try:
+ description_text = 'enable ICMP egress IPv4'
+ print(' Creating rule:',description_text,'...')
+ conn.network.create_security_group_rule(
+ security_group_id = onap_security_group.id,
+ description = description_text,
+ protocol = 'ICMP',
+ direction = 'egress',
+ ethertype = 'IPv4',
+ remote_ip_prefix = '0.0.0.0/0',
+ port_range_min = None,
+ port_range_max = None)
+ except Exception as e:
+ print(' rule:', description_text, ' may already exist')
+ print_debug(description_text, ' Exception:', type(e), e)
+
+ try:
+ description_text = 'enable SSH (TCP port 22) ingress IPv4'
+ print(' Creating rule:',description_text,'...')
+ conn.network.create_security_group_rule(
+ security_group_id = onap_security_group.id,
+ description = description_text,
+ protocol = 'TCP',
+ direction = 'ingress',
+ ethertype = 'IPv4',
+ remote_ip_prefix = '0.0.0.0/0',
+ port_range_min = '22',
+ port_range_max = '22')
+ except Exception as e:
+ print(' rule:', description_text, ' may already exist')
+ print_debug(description_text, ' Exception:', type(e), e)
+
+ try:
+ description_text = 'enable SSH (TCP port 22) egress IPv4'
+ print(' Creating rule:',description_text,'...')
+ conn.network.create_security_group_rule(
+ security_group_id = onap_security_group.id,
+ description = description_text,
+ protocol = 'TCP',
+ direction = 'egress',
+ ethertype = 'IPv4',
+ remote_ip_prefix = '0.0.0.0/0',
+ port_range_min = '22',
+ port_range_max = '22')
+ except Exception as e:
+ print(' rule:', description_text, ' may already exist')
+ print_debug(description_text, ' Exception:', type(e), e)
+
+ try:
+ description_text = 'enable IP traffic ingress IPv4'
+ print(' Creating rule:',description_text,'...')
+ conn.network.create_security_group_rule(
+ security_group_id = onap_security_group.id,
+ description = description_text,
+ protocol = None,
+ direction = 'ingress',
+ ethertype = 'IPv4',
+ remote_ip_prefix = '0.0.0.0/0',
+ port_range_min = None,
+ port_range_max = None)
+ except Exception as e:
+ print(' rule:', description_text, ' may already exist')
+ print_debug(description_text, ' Exception:', type(e), e)
+
+ try:
+ description_text = 'enable IP traffic ingress IPv6'
+ print(' Creating rule:',description_text,'...')
+ conn.network.create_security_group_rule(
+ security_group_id = onap_security_group.id,
+ description = description_text,
+ protocol = None,
+ direction = 'ingress',
+ ethertype = 'IPv6',
+ remote_ip_prefix = '::/0',
+ port_range_min = None,
+ port_range_max = None)
+ except Exception as e:
+ print(' rule:', description_text, ' may already exist')
+ print_debug(description_text, ' Exception:', type(e), e)
+
+ # IPv4 IP egress rule should already exist by default
+ try:
+ description_text = 'enable IP traffic egress IPv4'
+ print(' Creating rule:',description_text,'...')
+ conn.network.create_security_group_rule(
+ security_group_id = onap_security_group.id,
+ description = description_text,
+ protocol = None,
+ direction = 'egress',
+ ethertype = 'IPv4',
+ remote_ip_prefix = '0.0.0.0/0',
+ port_range_min = None,
+ port_range_max = None)
+ except Exception as e:
+ print(' rule:', description_text, ' may already exist')
+ print_debug(description_text, ' Exception:', type(e), e)
+
+ # IPv6 IP egress rule should already exist by default
+ try:
+ description_text = 'enable IP traffic egress IPv6'
+ print(' Creating rule:',description_text,'...')
+ conn.network.create_security_group_rule(
+ security_group_id = onap_security_group.id,
+ description = description_text,
+ protocol = None,
+ direction = 'egress',
+ ethertype = 'IPv6',
+ remote_ip_prefix = '::/0',
+ port_range_min = None,
+ port_range_max = None)
+ except Exception as e:
+ print(' rule:', description_text, ' may already exist')
+ print_debug(description_text, ' Exception:', type(e), e)
+
+
+ # public network
+ print('Creating ONAP public network...')
+ public_network = conn.network.find_network(ONAP_PUBLIC_NET_NAME)
+ public_subnet = None
+ if public_network != None:
+ print('ONAP public network already exists')
+ else:
+ public_network = conn.network.create_network(
+ name = ONAP_PUBLIC_NET_NAME,
+ description = ONAP_PUBLIC_NET_DESC,
+ #project_id = onap_project.id,
+ is_admin_state_up = True,
+ is_shared = True)
+ # subnet_ids = []: not needed, subnet refers to network_id
+ print_debug('public_network: before subnet',public_network)
+
+ print(' Creating subnetwork for ONAP public network...')
+ public_subnet = conn.network.create_subnet(
+ name = ONAP_PUBLIC_SUBNET_NAME,
+ #project_id = onap_project.id,
+ network_id = public_network.id,
+ cidr = ONAP_PUBLIC_SUBNET_CIDR,
+ ip_version = 4,
+ is_dhcp_enabled = True,
+ dns_nameservers = [DNS_SERVER_IP]) # list of DNS IP@
+ print_debug('public_subnet:',public_subnet)
+ print_debug('public_network: after subnet',public_network)
+
+
+ # private network
+ print('Creating ONAP private network...')
+ private_network = conn.network.find_network(ONAP_PRIVATE_NET_NAME)
+ private_subnet = None
+ if private_network != None:
+ print('ONAP private network already exists')
+ else:
+ private_network = conn.network.create_network(
+ name = ONAP_PRIVATE_NET_NAME,
+ description = ONAP_PRIVATE_NET_DESC,
+ #project_id = onap_project.id,
+ is_admin_state_up = True,
+ is_shared = True)
+ print_debug('private_network: before subnet',private_network)
+
+ print(' Creating subnetwork for ONAP private network...')
+ private_subnet = conn.network.create_subnet(
+ name = ONAP_PRIVATE_SUBNET_NAME,
+ #project_id = onap_project.id,
+ network_id = private_network.id,
+ cidr = ONAP_PRIVATE_SUBNET_CIDR,
+ ip_version = 4,
+ is_dhcp_enabled = True,
+ dns_nameservers = [DNS_SERVER_IP]) # list of DNS IP@; maybe not needed for private network
+ print_debug('private_subnet:',private_subnet)
+ print_debug('private_network: after subnet',private_network)
+
+
+ # OAM network
+ print('Creating ONAP OAM network...')
+ oam_network = conn.network.find_network(ONAP_OAM_NET_NAME)
+ oam_subnet = None
+ if oam_network != None:
+ print('ONAP OAM network already exists')
+ else:
+ oam_network = conn.network.create_network(
+ name = ONAP_OAM_NET_NAME,
+ description = ONAP_OAM_NET_DESC,
+ #project_id = onap_project.id,
+ is_admin_state_up = True,
+ is_shared = True)
+ print_debug('oam_network: before subnet',oam_network)
+
+ print(' Creating subnetwork for ONAP OAM network...')
+ oam_subnet = conn.network.create_subnet(
+ name = ONAP_OAM_SUBNET_NAME,
+ #project_id = onap_project.id,
+ network_id = oam_network.id,
+ cidr = ONAP_OAM_SUBNET_CIDR,
+ ip_version = 4,
+ is_dhcp_enabled = True,
+ dns_nameservers = [DNS_SERVER_IP]) # list of DNS IP@; maybe not needed for OAM network
+ print_debug('oam_subnet:',oam_subnet)
+ print_debug('oam_network: after subnet',oam_network)
+
+
+ # router
+ print('Creating ONAP router...')
+ onap_router = conn.network.find_router(ONAP_ROUTER_NAME)
+ if onap_router != None:
+ print('ONAP router already exists')
+ else:
+
+ # build dictionary for external network (gateway)
+ external_network = conn.network.find_network(EXTERNAL_NETWORK_NAME)
+ print_debug('external_network:',external_network)
+ external_subnet_ID_list = external_network.subnet_ids
+ print_debug('external_subnet_ID_list:',external_subnet_ID_list)
+ # build external_fixed_ips: list of dictionaries, each with 'subnet_id' key (and may have 'ip_address' key as well)
+ onap_gateway_external_subnets = []
+ for ext_subn_id in external_subnet_ID_list: # there should be only one subnet ID in the list, but go through each item, just in case
+ onap_gateway_external_subnets.append({'subnet_id':ext_subn_id})
+ print_debug('onap_gateway_external_subnets:',onap_gateway_external_subnets)
+ network_dict_body = {
+ 'network_id': external_network.id,
+ 'enable_snat': True, # True should be the default, so there should be no need to set it
+ 'external_fixed_ips': onap_gateway_external_subnets
+ }
+ print_debug('network_dict_body:',network_dict_body)
+
+ onap_router = conn.network.create_router(
+ name = ONAP_ROUTER_NAME,
+ description = ONAP_ROUTER_DESC,
+ #project_id = onap_project.id,
+ external_gateway_info = network_dict_body, # linking GW to router creation time (normally, could also use add_gateway_to_router)
+ is_admin_state_up = True)
+ print_debug('onap_router: after creation',onap_router)
+
+ # add interfaces to ONAP networks: Public, Private, and OAM
+ # syntax: add_interface_to_router(router, subnet_id=None, port_id=None)
+ print('Adding interface to ONAP router for ONAP public network...')
+ conn.network.add_interface_to_router(onap_router, subnet_id = public_subnet.id)
+ print('Adding interface to ONAP router for ONAP private network...')
+ conn.network.add_interface_to_router(onap_router, subnet_id = private_subnet.id)
+ print('Adding interface to ONAP router for ONAP OAM network...')
+ conn.network.add_interface_to_router(onap_router, subnet_id = oam_subnet.id)
+ print_debug('onap_router: after adding interfaces',onap_router)
+
+
+ # also create 5 flavors, from tiny to xlarge (hard-coded, no need for parameters)
+ # (Flavor is a Resource)
+ print('Creating flavors...')
+ print('Creating m1.tiny Flavor...')
+ tiny_flavor = conn.compute.find_flavor("m1.tiny")
+ if tiny_flavor != None:
+ print('m1.tiny Flavor already exists')
+ else:
+ tiny_flavor = conn.compute.create_flavor(
+ name = 'm1.tiny',
+ vcpus = 1,
+ disk = 1,
+ ram = 512,
+ ephemeral = 0,
+ #swap = 0,
+ #rxtx_factor = 1.0,
+ is_public = True)
+ print_debug('tiny_flavor: ',tiny_flavor)
+
+ print('Creating m1.small Flavor...')
+ small_flavor = conn.compute.find_flavor("m1.small")
+ if small_flavor != None:
+ print('m1.small Flavor already exists')
+ else:
+ small_flavor = conn.compute.create_flavor(
+ name = 'm1.small',
+ vcpus = 1,
+ disk = 20,
+ ram = 2048,
+ ephemeral = 0,
+ #swap = 0,
+ #rxtx_factor = 1.0,
+ is_public = True)
+ print_debug('small_flavor: ',small_flavor)
+
+ print('Creating m1.medium Flavor...')
+ medium_flavor = conn.compute.find_flavor("m1.medium")
+ if medium_flavor != None:
+ print('m1.medium Flavor already exists')
+ else:
+ medium_flavor = conn.compute.create_flavor(
+ name = 'm1.medium',
+ vcpus = 2,
+ disk = 40,
+ ram = 4096,
+ ephemeral = 0,
+ #swap = 0,
+ #rxtx_factor = 1.0,
+ is_public = True)
+ print_debug('medium_flavor: ',medium_flavor)
+
+ print('Creating m1.large Flavor...')
+ large_flavor = conn.compute.find_flavor("m1.large")
+ if large_flavor != None:
+ print('m1.large Flavor already exists')
+ else:
+ large_flavor = conn.compute.create_flavor(
+ name = 'm1.large',
+ vcpus = 4,
+ disk = 80,
+ ram = 8192,
+ ephemeral = 0,
+ #swap = 0,
+ #rxtx_factor = 1.0,
+ is_public = True)
+ print_debug('large_flavor: ',large_flavor)
+
+ print('Creating m1.xlarge Flavor...')
+ xlarge_flavor = conn.compute.find_flavor("m1.xlarge")
+ if xlarge_flavor != None:
+ print('m1.xlarge Flavor already exists')
+ else:
+ xlarge_flavor = conn.compute.create_flavor(
+ name = 'm1.xlarge',
+ vcpus = 8,
+ disk = 160,
+ ram = 16384,
+ ephemeral = 0,
+ #swap = 0,
+ #rxtx_factor = 1.0,
+ is_public = True)
+ print_debug('xlarge_flavor: ',xlarge_flavor)
+
+
+ # create images: Ubuntu 16.04, 14.04, CirrOS, ...
+ # store them in images/ directory
+ # 64-bit QCOW2 image for cirros-0.4.0-x86_64-disk.img
+ # description: CirrOS minimal Linux distribution
+ # http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img
+ # user/password: cirros/gocubsgo
+
+ # 64-bit QCOW2 image for Ubuntu 16.04 is xenial-server-cloudimg-amd64-disk1.img
+ # description: Ubuntu Server 16.04 LTS (Xenial Xerus)
+ # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ # user: ubuntu
+
+ # 64-bit QCOW2 image for Ubuntu 14.04 is trusty-server-cloudimg-amd64-disk1.img
+ # description: Ubuntu Server 14.04 LTS (Trusty Tahr)
+ # http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img
+ # user: ubuntu
+
+ # do not use compute proxy for images; there is an image proxy (v1, and v2);
+ # use shade layer, directly with Connection object: Connection.create_image()
+ # conn.get_image() returns a Python Munch object (subclass of Dictionary)
+ # However, URL download not supported yet; download image separately, place it in the directory
+ # https://docs.openstack.org/openstacksdk/latest/user/connection.html#openstack.connection.Connection.create_image
+ # image proxy: conn.image.upload_image()
+ # Image class:
+ # https://docs.openstack.org/openstacksdk/latest/user/resources/image/v2/image.html#openstack.image.v2.image.Image
+ # URL should be supported by image proxy
+
+ # TODO@@@ try image v2 proxy, if it supports URLs;
+ # maybe load only images for current CPU (i.e. only x86 images for x86, only Arm images for Arm)
+ # TODO@@@ list image names/URLs in dictionary, and load then in a loop
+
+ # Pattern: prepare an attribute dictionary, then call conn.image.upload_image()
+ # image_attributes_dict = {}
+ # image_attributes_dict['name']='cirros-0.4.0-aarch64-disk.img'
+ # image_attributes_dict['url']='http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-aarch64-disk.img'
+ # conn.image.upload_image(disk_format='qcow2',**image_attributes_dict)
+
+ # With a dictionary of names/URLs :
+ # image_ref_dict = {}
+ # image_ref_dict['cirros-0.4.0-x86_64-disk.img']='http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img'
+ # image_ref_dict['cirros-0.4.0-arm-disk.img']='http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-arm-disk.img'
+ # image_ref_dict['cirros-0.4.0-aarch64-disk.img']='http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-aarch64-disk.img'
+ # etc.
+ # for image_name in image_ref_dict:
+ # image_attributes_dict['name'] = image_name
+ # image_attributes_dict['url'] = image_ref_dict[image_name]
+ # conn.image.upload_image(disk_format='qcow2',**image_attributes_dict)
+
+
+ # Create and populate image dictionary
+ image_ref_dict = {}
+
+ # Ubuntu 16.04 LTS (Xenial Xerus) images
+ image_ref_dict['xenial-server-cloudimg-amd64-disk1.img']='https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img'
+ image_ref_dict['xenial-server-cloudimg-arm64-disk1.img']='https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-arm64-disk1.img'
+
+ # Ubuntu 14.04.5 LTS (Trusty Tahr) images
+ image_ref_dict['trusty-server-cloudimg-amd64-disk1.img']='http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img'
+ image_ref_dict['trusty-server-cloudimg-arm64-disk1.img']='http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-arm64-disk1.img'
+
+ # CirrOS images
+ image_ref_dict['cirros-0.4.0-x86_64-disk.img']='http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img'
+ image_ref_dict['cirros-0.4.0-arm-disk.img']='http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-arm-disk.img'
+ image_ref_dict['cirros-0.4.0-aarch64-disk.img']='http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-aarch64-disk.img'
+
+
+ # if URL-based upload using image proxy works, it will replace the section below which assumes image files
+ # are in a subdirectory, and uses Connection.create_image() instead of Connection.image.upload_image()
+ IMAGES_DIR = 'images/'
+
+ IMAGE_NAME = 'CirrOS_0.4.0_minimal_Linux_distribution x86'
+ print('Creating image:',IMAGE_NAME,'...')
+ if conn.get_image(IMAGE_NAME) != None:
+ print(IMAGE_NAME,'image already exists')
+ else:
+ conn.create_image(IMAGE_NAME, filename=IMAGES_DIR+'cirros-0.4.0-x86_64-disk.img')
+
+ IMAGE_NAME = 'CirrOS_0.4.0_minimal_Linux_distribution ARM'
+ print('Creating image:',IMAGE_NAME,'...')
+ if conn.get_image(IMAGE_NAME) != None:
+ print(IMAGE_NAME,'image already exists')
+ else:
+ conn.create_image(IMAGE_NAME, filename=IMAGES_DIR+'cirros-0.4.0-arm-disk.img')
+
+ IMAGE_NAME = 'CirrOS_0.4.0_minimal_Linux_distribution AARCH64'
+ print('Creating image:',IMAGE_NAME,'...')
+ if conn.get_image(IMAGE_NAME) != None:
+ print(IMAGE_NAME,'image already exists')
+ else:
+ conn.create_image(IMAGE_NAME, filename=IMAGES_DIR+'cirros-0.4.0-aarch64-disk.img')
+
+ IMAGE_NAME = 'Ubuntu_Server_16.04_LTS_Xenial_Xerus x86'
+ print('Creating image:',IMAGE_NAME,'...')
+ if conn.get_image(IMAGE_NAME) != None:
+ print(IMAGE_NAME,'image already exists')
+ else:
+ conn.create_image(IMAGE_NAME, filename=IMAGES_DIR+'xenial-server-cloudimg-amd64-disk1.img')
+
+ IMAGE_NAME = 'Ubuntu_Server_16.04_LTS_Xenial_Xerus ARM64'
+ print('Creating image:',IMAGE_NAME,'...')
+ if conn.get_image(IMAGE_NAME) != None:
+ print(IMAGE_NAME,'image already exists')
+ else:
+ conn.create_image(IMAGE_NAME, filename=IMAGES_DIR+'xenial-server-cloudimg-arm64-disk1.img')
+
+ IMAGE_NAME = 'Ubuntu_Server_14.04_LTS_Trusty_Tahr x86'
+ print('Creating image:',IMAGE_NAME,'...')
+ if conn.get_image(IMAGE_NAME) != None:
+ print(IMAGE_NAME,'image already exists')
+ else:
+ conn.create_image(IMAGE_NAME, filename=IMAGES_DIR+'trusty-server-cloudimg-amd64-disk1.img')
+ # End section with Connection.create_image()
+
+ IMAGE_NAME = 'Ubuntu_Server_14.04_LTS_Trusty_Tahr ARM64'
+ print('Creating image:',IMAGE_NAME,'...')
+ if conn.get_image(IMAGE_NAME) != None:
+ print(IMAGE_NAME,'image already exists')
+ else:
+ conn.create_image(IMAGE_NAME, filename=IMAGES_DIR+'trusty-server-cloudimg-arm64-disk1.img')
+ # End section with Connection.create_image()
+
+
+ # create a keypair, if needed e.g. for VNF VMs; maybe to SSH for testing
+ # (Keypair is a Resource)
+ print('Creating ONAP keypair...')
+ onap_keypair = conn.compute.find_keypair(ONAP_KEYPAIR_NAME)
+ if onap_keypair != None:
+ print('ONAP keypair already exists')
+ else:
+ onap_keypair = conn.compute.create_keypair(name=ONAP_KEYPAIR_NAME)
+ print(' ONAP keypair fingerprint:')
+ print(onap_keypair.fingerprint)
+ print(' ONAP keypair public key:')
+ print(onap_keypair.public_key)
+ print(' \nONAP keypair private key: (save it in a file now: it cannot be retrieved later)')
+ print(onap_keypair.private_key)
+ print_debug('onap_keypair:',onap_keypair)
+
+
+ print('\nSUMMARY:')
+ # Grab live objects (don't reuse earlier references), in case the script is used on an already configured instance
+ # This way, the summary is still displayed even if the script execution did not create anything
+ # Also, this double-checks that displayed information is accurate, freshly retrieved from the OpenStack instance
+
+ public_network = conn.network.find_network(ONAP_PUBLIC_NET_NAME)
+ if public_network != None:
+ print('ONAP public network ID:',public_network.id)
+ for fetched_subnet_ID in public_network.subnet_ids:
+ fetched_subnet = conn.network.get_subnet(fetched_subnet_ID)
+ if fetched_subnet != None:
+ print(' ONAP public network subnet ID:',fetched_subnet.id)
+ print(' ONAP public network subnet CIDR:',fetched_subnet.cidr)
+ else:
+ print('no ONAP public network')
+
+ private_network = conn.network.find_network(ONAP_PRIVATE_NET_NAME)
+ if private_network != None:
+ print('ONAP private network ID:',private_network.id)
+ for fetched_subnet_ID in private_network.subnet_ids:
+ fetched_subnet = conn.network.get_subnet(fetched_subnet_ID)
+ if fetched_subnet != None:
+ print(' ONAP private network subnet ID:',fetched_subnet.id)
+ print(' ONAP private network subnet CIDR:',fetched_subnet.cidr)
+ else:
+ print('no ONAP private network')
+
+ oam_network = conn.network.find_network(ONAP_OAM_NET_NAME)
+ if oam_network != None:
+ print('ONAP OAM network ID:',oam_network.id)
+ for fetched_subnet_ID in oam_network.subnet_ids:
+ fetched_subnet = conn.network.get_subnet(fetched_subnet_ID)
+ if fetched_subnet != None:
+ print(' ONAP OAM network subnet ID:',fetched_subnet.id)
+ print(' ONAP OAM network subnet CIDR:',fetched_subnet.cidr)
+ else:
+ print('no ONAP OAM network')
+ print('END SUMMARY\n')
+
+
+ except Exception as e:
+ print('*** Exception:',type(e), e)
+ exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()
+ print('*** traceback.print_tb():')
+ traceback.print_tb(exceptionTraceback)
+ print('*** traceback.print_exception():')
+ traceback.print_exception(exceptionType, exceptionValue, exceptionTraceback)
+ print('[Script terminated]\n')
+
+
+ print('OPNFV Auto, end of configuration script\n')
+
+
+
+######################################################################
+def main():
+
+ # configure argument parser: 2 optional arguments
+ # "-del" or "--delete" option to delete ONAP configuration in OpenStack
+ # (if no "-del" or "--delete", then configure OpenStack for ONAP
+ # "-deb" or "--debug" option to display debug information
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-deb', '--debug',
+ help = 'display debug information during execution',
+ action = 'store_true')
+ parser.add_argument('-del', '--delete',
+ help = 'delete ONAP configuration',
+ action = 'store_true')
+
+ # parse arguments, modify global variable if need be, and use corresponding script (create objects, or delete objects)
+ args = parser.parse_args()
+ if args.debug:
+ global DEBUG_VAR
+ DEBUG_VAR = True
+ if args.delete:
+ delete_all_ONAP()
+ else:
+ configure_all_ONAP()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/setup/VIMs/OpenStack/clouds.yaml b/setup/VIMs/OpenStack/clouds.yaml
new file mode 100644
index 0000000..7bfd717
--- /dev/null
+++ b/setup/VIMs/OpenStack/clouds.yaml
@@ -0,0 +1,99 @@
+clouds:
+
+ # Openstack instance on Arm pod, controller IP@ 172.16.10.10
+ # Horizon: https://10.10.50.103/project/
+ # Identity API according to Horizon dashboard: https://10.10.50.103:5000/v2.0
+ # other potential auth_url: http://172.16.10.10:35357/v3
+ # (OS_AUTH_URL=http://controller:35357/v3)
+ # 2 project names: admin, service (project = tenant)
+ # project ID: 122caf64b3df4818bf2ce5ba793226b2
+ # EC2 URL: https://10.10.50.103:8773/services/Cloud
+ # EC2 access key: bcf3c69a7d1c405e9757f87f26faf19f
+ # 10.10.50.0/8: floating IP@
+ # 10.10.10.0/8: fixed IP@
+ armopenstack:
+ auth:
+ auth_url: https://10.10.50.103:5000/v2.0
+ project_name: admin
+ username: admin
+ password: opnfv_secret
+ region_name: RegionOne
+
+ # Openstack instance on LaaS hpe16, from OPNFV Euphrates, controller IP@ (mgt: 172.16.10.101; public: 10.16.0.101)
+ # keystone endpoints (openstack endpoint list --service keystone)
+ # admin: http://172.16.10.101:35357/v2.0
+ # internal: http://172.16.10.101:5000/v2.0
+ # public: http://10.16.0.101:5000/v2.0 : works on LaaS hpe16, from hpe16
+ hpe16openstackEuphrates:
+ auth:
+ auth_url: http://10.16.0.101:5000/v2.0
+ project_name: admin
+ username: admin
+ password: opnfv_secret
+ region_name: RegionOne
+
+ # Openstack instance on generic LaaS hpe, from OPNFV Fraser, controller IP@ (mgt: 172.16.10.36; public: 10.16.0.107)
+ # keystone endpoints (openstack endpoint list --service keystone)
+ # admin: http://172.16.10.36:35357/v3
+ # internal: http://172.16.10.36:5000/v3
+ # public: http://10.16.0.107:5000/v3
+ # Horizon: https://10.16.0.107:8078, but need SSH port forwarding through 10.10.100.26 to be reached from outside
+ # "If you are using Identity v3 you need to specify the user and the project domain name"
+
+ # generic cloud name, for a UNH IOL hpe server, for OPNFV Fraser, OpenStack installed by Fuel/MCP
+ unh-hpe-openstack-fraser:
+ auth:
+ auth_url: http://10.16.0.107:5000/v3
+ project_name: admin
+ username: admin
+ password: opnfv_secret
+ user_domain_name: Default
+ project_domain_name: Default
+ region_name: RegionOne
+ identity_api_version: 3
+
+# ubuntu@ctl01:~$ openstack project show admin
+# +-------------+----------------------------------+
+# | Field | Value |
+# +-------------+----------------------------------+
+# | description | OpenStack Admin tenant |
+# | domain_id | default |
+# | enabled | True |
+# | id | 04fcfe7aa83f4df79ae39ca748aa8637 |
+# | is_domain | False |
+# | name | admin |
+# | parent_id | default |
+# +-------------+----------------------------------+
+
+# (openstack) domain show default
+# +-------------+----------------------------------------------------------+
+# | Field | Value |
+# +-------------+----------------------------------------------------------+
+# | description | Domain created automatically to support V2.0 operations. |
+# | enabled | True |
+# | id | default |
+# | name | Default |
+# +-------------+----------------------------------------------------------+
+
+# (openstack) domain show heat_user_domain
+# +-------------+---------------------------------------------+
+# | Field | Value |
+# +-------------+---------------------------------------------+
+# | description | Contains users and projects created by heat |
+# | enabled | True |
+# | id | d9c29adac0fe4816922d783b257879d6 |
+# | name | heat_user_domain |
+# +-------------+---------------------------------------------+
+
+
+# export OS_AUTH_URL=http://10.16.0.107:5000/v3
+# export OS_PROJECT_ID=04fcfe7aa83f4df79ae39ca748aa8637
+# export OS_PROJECT_NAME="admin"
+# export OS_USER_DOMAIN_NAME="Default"
+# export OS_USERNAME="admin"
+# export OS_PASSWORD="opnfv_secret"
+# export OS_REGION_NAME="RegionOne"
+# export OS_INTERFACE=public
+# export OS_IDENTITY_API_VERSION=3
+
+
diff --git a/setup/onap_on_openstack/__init__.py b/setup/onap_on_openstack/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/setup/onap_on_openstack/__init__.py
diff --git a/setup/onap_on_openstack/config.yml b/setup/onap_on_openstack/config.yml
new file mode 100644
index 0000000..88c5db1
--- /dev/null
+++ b/setup/onap_on_openstack/config.yml
@@ -0,0 +1,64 @@
+---
+
+onap_stack_name: onap
+
+onap_demo_git:
+ repo: https://gerrit.onap.org/r/demo
+ branch: amsterdam
+ heat_template: heat/ONAP/onap_openstack.yaml
+ heat_env: heat/ONAP/onap_openstack.env
+
+onap_vm_images:
+ ubuntu_1404_image:
+ name: Ubuntu_14.04_trusty
+ url: https://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img
+ ubuntu_1604_image:
+ name: Ubuntu_16.04_xenial
+ url: https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ dcae_centos_7_image:
+ name: Centos_7
+ url: https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1711.qcow2
+
+onap_secgroup_rules:
+ - protocol: tcp
+ direction: ingress
+ port_range_min: 1
+ port_range_max: 65535
+
+ - protocol: icmp
+ direction: ingress
+ port_range_min:
+ port_range_max:
+
+onap_quota:
+ instances: 100
+ cores: 100
+ ram: 204800
+
+onap_keypair:
+ name: onap_key
+ pubkey_path: ~/.ssh/id_rsa.pub
+
+onap_user_config:
+ public_net_name: ext-net
+ flavor_small: m1.small
+ flavor_medium: m1.medium
+ flavor_large: m1.large
+ flavor_xlarge: m1.xlarge
+ flavor_xxlarge: m1.xlarge
+ openstack_tenant_name: admin
+ openstack_username: admin
+ openstack_api_key: 49ef27251b38c5124378010e7be8758eb
+ horizon_url: https://192.168.22.222:80
+ keystone_url: https://192.168.22.222:5000
+ dns_list: ["8.8.8.8"]
+ external_dns: 8.8.8.8
+ dns_forwarder: 192.168.22.222
+ dnsaas_config_enabled: true
+ dnsaas_region: RegionOne
+ dnsaas_keystone_url: https://192.168.22.222:5000
+ dnsaas_tenant_name: service
+ dnsaas_username: designate
+ dnsaas_password: 853ff4c5315221ce5a042954eac38ea6692092a33c
+ dcae_keystone_url: https://192.168.22.222:5000
+ dcae_domain: dcaeg2.onap.org
diff --git a/setup/onap_on_openstack/launch_onap.py b/setup/onap_on_openstack/launch_onap.py
new file mode 100644
index 0000000..948adfc
--- /dev/null
+++ b/setup/onap_on_openstack/launch_onap.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+########################################################################
+# Copyright (c) 2018 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+########################################################################
+
+"""Launch ONAP on OpenStack"""
+
+import argparse
+from onap_os_builder import ONAP_os_builder
+
+__author__ = "Harry Huang <huangxiangyu5@huawei.com>"
+
+
+def read_cli_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--config', '-c',
+ dest = 'config',
+ action = 'store',
+ default = './config.yml',
+ help = 'config file')
+ return parser.parse_args()
+
+
+if __name__ == '__main__':
+ args = read_cli_args()
+ config = args.config
+ onap_builder = ONAP_os_builder(config)
+ onap_builder.clone_demo_code()
+ onap_builder.create_onap_vm_images()
+ onap_builder.create_onap_secgroup_rules()
+ onap_builder.set_quota()
+ onap_builder.create_onap_key()
+ onap_builder.set_onap_stack_params()
+ onap_builder.create_onap_stack()
diff --git a/setup/onap_on_openstack/onap_os_builder.py b/setup/onap_on_openstack/onap_os_builder.py
new file mode 100644
index 0000000..b85d301
--- /dev/null
+++ b/setup/onap_on_openstack/onap_os_builder.py
@@ -0,0 +1,151 @@
+#!/usr/bin/env python
+########################################################################
+# Copyright (c) 2018 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+########################################################################
+
+"""ONAP builder for OpenStack"""
+
+import os
+import sys
+
+import auto.util.openstack_lib as os_lib
+import auto.util.util as util
+from auto.util.yaml_type import literal_unicode
+
+__author__ = "Harry Huang <huangxiangyu5@huawei.com>"
+
+
+class ONAP_os_builder(object):
+ """Prepare the OpenStack environment and launch ONAP stack"""
+ def __init__(self, config_file):
+
+ self.config = util.read_yaml(config_file)
+ self.stack_name = self.config['onap_stack_name']
+ self.demo_git = self.config['onap_demo_git']
+ self.vm_images = self.config['onap_vm_images']
+ self.secgroup_rules = self.config['onap_secgroup_rules']
+ self.quota = self.config['onap_quota']
+ self.keypair = self.config['onap_keypair']
+ self.user_config = self.config['onap_user_config']
+
+ self.creds = os_lib.get_credentials()
+ self.keystone_client = os_lib.get_keystone_client(self.creds)
+ self.glance_client = os_lib.get_glance_client(self.creds)
+ self.neutron_client = os_lib.get_neutron_client(self.creds)
+ self.nova_client = os_lib.get_nova_client(self.creds)
+ self.heat_client = os_lib.get_heat_client(self.creds)
+
+ self.auto_dir = os.getenv('AUTO_DIR')
+ self.work_dir = os.path.join(self.auto_dir, "work")
+ self.demo_repo_dir = os.path.join(self.work_dir, "demo")
+ self.heat_template = os.path.join(self.demo_repo_dir,
+ self.demo_git['heat_template'])
+ self.heat_env = os.path.join(self.demo_repo_dir,
+ self.demo_git['heat_env'])
+ self.image_dir = os.path.join(self.work_dir, "images")
+ self.keypair_dir = os.path.join(self.work_dir, "keypair")
+ util.mkdir(self.work_dir)
+
+
+ def clone_demo_code(self):
+ util.git_clone(self.demo_git['repo'], self.demo_git['branch'],
+ self.demo_repo_dir)
+
+
+ def prepare_images(self):
+ util.mkdir(self.image_dir)
+ for _, image_info in self.vm_images.items():
+ image_path = os.path.join(self.image_dir, image_info['name'])
+ util.download(image_info['url'], image_path)
+
+
+ def create_onap_vm_images(self):
+ self.prepare_images()
+ for _, image_info in self.vm_images.items():
+ image_path = os.path.join(self.image_dir, image_info['name'])
+ os_lib.create_image(self.glance_client,
+ image_info['name'],
+ image_path)
+
+
+ def create_onap_secgroup_rules(self):
+ project_name = os_lib.get_project_name(self.creds)
+ project_id = os_lib.get_project_id(self.keystone_client, project_name)
+ secgroup_id = os_lib.get_security_group_id(self.neutron_client,
+ "default", project_id)
+ for secgroup_rule in self.secgroup_rules:
+ os_lib.create_secgroup_rule(self.neutron_client, secgroup_id,
+ secgroup_rule['protocol'],
+ secgroup_rule['direction'],
+ secgroup_rule['port_range_min'],
+ secgroup_rule['port_range_max'])
+
+
+ def set_quota(self):
+ project_name = os_lib.get_project_name(self.creds)
+ project_id = os_lib.get_project_id(self.keystone_client, project_name)
+ os_lib.update_compute_quota(self.nova_client, project_id, self.quota)
+
+
+ def create_onap_key(self):
+ os_lib.create_keypair(self.nova_client, self.keypair['name'],
+ self.keypair['pubkey_path'])
+
+
+ def set_onap_stack_params(self):
+ stack_config = util.read_yaml(self.heat_env)['parameters']
+
+ user_config = self.user_config
+ user_config.update({'ubuntu_1404_image':
+ self.vm_images['ubuntu_1404_image']['name']})
+ user_config.update({'ubuntu_1604_image':
+ self.vm_images['ubuntu_1604_image']['name']})
+ user_config.update({'dcae_centos_7_image':
+ self.vm_images['dcae_centos_7_image']['name']})
+
+ pubkey_data = util.read_file(self.keypair['pubkey_path']).strip('\n')
+ user_config.update({'key_name': self.keypair['name']})
+ user_config.update({'pub_key': literal_unicode(pubkey_data)})
+
+ util.mkdir(self.keypair_dir)
+ prikey_path = os.path.join(self.keypair_dir, 'private.key')
+ pubkey_path = os.path.join(self.keypair_dir, 'public.key')
+ if not os.path.isfile(prikey_path) or not os.path.isfile(pubkey_path):
+ util.create_keypair(prikey_path, pubkey_path)
+
+ dcae_prikey_data = util.read_file(prikey_path).strip('\n')
+ dcae_pubkey_data = util.read_file(pubkey_path).strip('\n')
+ user_config.update({'dcae_public_key':
+ literal_unicode(dcae_pubkey_data)})
+ user_config.update({'dcae_private_key':
+ literal_unicode(dcae_prikey_data)})
+
+ public_net_id = os_lib.get_network_id(
+ self.neutron_client,
+ user_config['public_net_name']
+ )
+ user_config.update({'public_net_id': public_net_id})
+ project_id = os_lib.get_project_id(
+ self.keystone_client,
+ user_config['openstack_tenant_name']
+ )
+ user_config.update({'openstack_tenant_id': project_id})
+
+ for key, value in user_config.items():
+ stack_config[key] = value
+ heat_env_data = {'parameters': stack_config}
+ util.write_yaml(heat_env_data, self.heat_env)
+
+
+ def create_onap_stack(self):
+ stack_args = {}
+ stack_args['stack_name'] = self.stack_name
+ stack_args['template'] = util.read_file(self.heat_template)
+ stack_args['parameters'] = util.read_yaml(self.heat_env)['parameters']
+ self.heat_client.stacks.create(**stack_args)
+
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 0000000..69aa189
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,17 @@
+[tox]
+minversion = 1.6
+envlist =
+ docs,
+ docs-linkcheck
+skipsdist = true
+
+[testenv:docs]
+deps = -rdocs/requirements.txt
+commands =
+ sphinx-build -b html -n -d {envtmpdir}/doctrees ./docs/ {toxinidir}/docs/_build/html
+ echo "Generated docs available in {toxinidir}/docs/_build/html"
+whitelist_externals = echo
+
+[testenv:docs-linkcheck]
+deps = -rdocs/requirements.txt
+commands = sphinx-build -b linkcheck -d {envtmpdir}/doctrees ./docs/ {toxinidir}/docs/_build/linkcheck
diff --git a/vcpe_spinup.sh b/vcpe_spinup.sh
new file mode 100644
index 0000000..0dd8a46
--- /dev/null
+++ b/vcpe_spinup.sh
@@ -0,0 +1,99 @@
+#!/bin/sh
+# spin up a new vcpe instance
+
+URLSPINUP = 'http://127.0.0.1:18003/vnf/v1'
+URLSTATUS = 'http://127.0.0.1:18002/resmgr/v1/dev?dev_id='
+
+URLINTF = 'http://127.0.0.1:18002/resmgr/v1/dev/if'
+URLINTFCONF = 'http://127.0.0.1:18002/ifconfig/v1'
+URLROUTE = 'http://127.0.0.1:18002/rtconfig/v1'
+
+AUTH = 'admin:admin'
+
+dev_id = "2188032VRE2018011814131903B81436"
+vnf_name = "vcpe_20180118150535"
+esn = "2188032VRE2018011814131903B81436"
+
+function spinup {
+
+ result = curl -I -H "Content-type: application/json" -X POST -u $AUTH -d '{ "dev_id": $1, "vnf_name": $2, "ctrler_id": "HW_AC_CAMPUS_CQ2", "vnfm_id": "HW_VNFM_CQ", "dev_vendor": "HUAWEI", "dev_model": "VNFM", "vnf_type": "VCPE", "vnf_esn": $3, "netconf_cfg": { "ipv4": "172.17.11.122", "ipv4_gw": "172.17.11.1"}, "status": "Active" }' $URLSPINUP
+ echo 'trying to spin up a new vcpe instance'
+ return result
+
+}
+
+function checkstatus {
+
+ URL = {$URLSTATUS}{$1}
+
+ result = curl -I -H "Content-type: application/json" -X GET -u $AUTH $URL
+ status = jq '.status' $result
+ return status
+
+}
+
+function cfgwaninterface {
+
+ result = curl -I -H "Content-type: application/json" -X POST -u $AUTH -d '{"dev_id": $1, "if_name": $2, "if_lable": "WAN", "access_ipv4": "192.168.40.30"}' $URLINTF
+
+ if [ $result -eq 200]; then
+
+ result = curl -I -H "Content-type: application/json" -X POST -u $AUTH -d '{"dev_id": $1, "if_name": $2, "ip_cfg": {"ip":$3, "gateway": $4} }' $URLINTFCONF
+ return result
+
+ else
+ return result
+
+ fi
+
+}
+
+function cfgdefaultroute {
+
+ result = curl -I -H "Content-type: application/json" -X POST -u $AUTH -d '{"dev_id": $1, "static_rt": {"dst":"0.0.0.0/0", "nexthop": $2} }' $URLROUTE
+ return result
+
+}
+
+function enablewan {
+
+ result = cfgwaninterface $1 $2 $3 $4
+ if [ $result -eq 200]; then
+ result = cfgdefaultroute $1 $4
+ return result
+ else
+ return result
+ fi
+
+}
+
+data = json
+result = sinup $dev_id $vnf_name $esn
+
+if [ $result -eq 200 ]; then
+
+ echo 'vcpe is being spinned up, wait...'
+
+ while true
+ do
+ sleep 30
+ status = checkstatus $dev_id
+ if [ $status -eq "Active" ]; then
+ echo 'vcpe is active now!'
+ break
+ fi
+ done
+
+ result = enablewan $dev_id "GigabitEthernet0/0/1" "192.168.40.30" "192.168.40.254"
+ if [ $result -eq 200]; then
+ echo 'vcpe is ready for service!'
+ fi
+
+elif [ $result -gt 300 ]; then
+ echo 'error happens!'
+else
+ echo 'illegal json result!'
+fi
+
+
+
diff --git a/vfw_spinup.sh b/vfw_spinup.sh
new file mode 100644
index 0000000..9c9cd82
--- /dev/null
+++ b/vfw_spinup.sh
@@ -0,0 +1,53 @@
+#!/bin/sh
+# spin up a new vfw instance
+
+URLSPINUP = 'http://127.0.0.1:18003/vnf/v1'
+URLSTATUS = 'http://127.0.0.1:18002/resmgr/v1/dev?dev_id='
+AUTH = 'admin:admin'
+
+dev_id = "0488033DDN20180118150535B7F76420"
+vnf_name = "vfw_20180118150535"
+esn = "0488033DDN20180118150535B7F76420"
+
+function spinup {
+
+ result = curl -I -H "Content-type: application/json" -X POST -u $AUTH -d '{ "dev_id": $1, "vnf_name": $2, "ctrler_id": "HW_AC_CAMPUS_CQ2", "vnfm_id": "HW_VNFM_CQ", "dev_vendor": "HUAWEI", "dev_model": "VNFM", "vnf_type": "VFW", "vnf_esn": $3, "netconf_cfg": { "ipv4": "192.168.20.129", "mask_bit": 24, "ipv4_gw": "192.168.20.254"}, "wan_cfg": {"ipv4": "192.168.40.40", "mask_bit": 24, "ipv4_gw": "192.168.40.254"}, "status": "Active" }' $URLSPINUP
+ echo 'trying to spin up a new vfw instance'
+ return result
+
+}
+
+function checkstatus {
+
+ URL = {$URLSTATUS}{$1}
+
+ result = curl -I -H "Content-type: application/json" -X GET -u $AUTH $URL
+ status = jq '.status' $result
+ return status
+
+}
+
+data = json
+result = sinup $dev_id $vnf_name $esn
+
+if [ $result -eq 200 ]; then
+
+ echo 'vfw is being spinned up, wait...'
+
+ while true
+ do
+ sleep 30
+ status = checkstatus $dev_id
+ if [ $status -eq "Active" ]; then
+ echo 'vfw is active now!'
+ break
+ done
+
+elif [ $result -gt 300 ]; then
+ echo 'error happens!'
+else
+ echo 'illegal json result!'
+fi
+
+
+
diff --git a/vpn_subscribe.sh b/vpn_subscribe.sh
new file mode 100644
index 0000000..fc45454
--- /dev/null
+++ b/vpn_subscribe.sh
@@ -0,0 +1,220 @@
+#!/bin/sh
+# test script for vpn subscribing
+
+L3VPN = 3
+
+AUTH = 'admin:admin'
+URLTENANT = 'http://127.0.0.1:8091/v1/tenant'
+URLCPE = 'http://127.0.0.1:8091/v1/cpe'
+URLSTATUS = 'http://127.0.0.1:18002/resmgr/v1/dev?dev_id='
+URLINTERFACE = 'http://127.0.0.1:8091/v1/cpe/interface'
+URLSERVICE = 'http://127.0.0.1:8091/v1/vpn'
+
+tenantid = 'opnfv'
+tenantname = 'opnfv'
+
+esn1 = '21500102003GH5000971'
+interface1 = 'GigabitEthernet0/0/3'
+vlan1 = 3006
+subnet1 = '172.168.2.0'
+mask2 = 24
+gateway1 = '10.10.2.2'
+
+esn2 = '2102114469P0H3000011'
+interface2 = '10GE6/0/16'
+vlan2 = 3000
+subnet2 = '172.168.1.0'
+mask2 = 24
+gateway2 = '10.10.1.2'
+
+function createtenant {
+
+ result = curl -I -H 'Content-type:application/json' -X POST -d '{ "tenant_id": $1,
+ "tenant_name":$2, "cert_type": "A", "cert_num": "000000000000000000001"}' -u $AUTH $URLTENANT
+ echo 'tenant $1 is being created!'
+ return result
+
+}
+
+function enablecpe {
+
+ cpe_model = "4096"
+ if [ $3 -eq "IMG"]; then
+ cpe_model = "4098"
+ fi
+ if [ $3 -eq "UCPE"]; then
+ cpe_model = "4096"
+ fi
+
+ result = curl -I -H 'Content-type:application/json' -X POST -d ' { "cpe_vendor": "HUAWEI", "tenant_id": $2, "ctrler_id": "HW_AC_CAMPUS_CQ1", "access_type": 0, "cpe_model": $cpe_moel, "cpe_esn": $1 }' -u $URLCPE
+ echo 'cpe $1 is being activated!'
+ return result
+
+}
+
+function checkstatus {
+
+ URL = {$URLSTATUS}{$1}
+
+ result = curl -I -H "Content-type: application/json" -X GET -u $AUTH $URL
+ status = jq '.status' $result
+ return status
+
+}
+
+function cfglaninterface {
+
+ result = curl -I -H 'Content-type:application/json' -X POST -d '{ "cpe_esn": $1, "interfaces": [ { "if_name": $2, "if_vlan": $3, "if_ip":$4, "if_mask":"24"}] }' -u $URLINTERFACE
+ echo 'cpe $1 interface $2 vlan $3 is being configured!'
+ return result
+
+}
+
+function enablesite2site {
+
+ result = curl -I -H 'Content-type:application/json' -X POST -d '{
+ "tenant_id": $1,
+ "bandwidth": 51200,
+ "order_id": "20180116-16",
+ "operation": 1,
+ "order_name": "20180116-16",
+ "internet_cfg": null,
+ "vas_cfg": null,
+ "vpn_config": [
+ {
+ "tenant_id": $1,
+ "vpn_id": 1,
+ "vpn_type": $L3VPN,
+ "local_device": $2,
+ "dl_bw": 1000,
+ "ul_bw": 1000,
+ "route_policy": false,
+ "qos_grade": null,
+ "local_type": 0,
+ "local_access": {
+ "web_enable": 1,
+ "dhcp_server": 1,
+ "portvlan_list": [
+
+ {
+ "port": $3,
+ "vlan": $4
+ }
+ ],
+ "subnet_list": [
+ {
+ "ipv4": $5,
+ "mask_bit": "24",
+ "gateway": "$6
+ }
+ ]
+ },
+ "remote_device": $7,
+ "remote_type": 0,
+ "remote_access": {
+ "dhcp_server": 1,
+ "web_enable": 1,
+ "portvlan_list": [
+
+ {
+ "port": $8,
+ "vlan": $9
+ }
+ ],
+ "subnet_list": [
+
+ {
+ "ipv4": $10,
+ "mask_bit": 24,
+ "gateway": $11
+ }
+ ]
+ }
+ }
+ ]
+}' -u $URLSERVICE
+ echo 'site2site between cpe $2 and cpe $3 is being activated for tenant $1!'
+ return result
+
+}
+
+tenantresult = createtenant $tenantid $tenantname
+if [ $tenantresult -eq 201 ]; then
+
+ echo 'tenant opnfv has been successfully created!'
+
+ ucperesult = enablecpe $esn1 $tenantid "UCPE"
+ if [ $ucperesult -eq 201 ]; then
+ echo 'cpe $esn1 has been successfully enabled!'
+ elif [ $cpe1result -eq 404 ]; then
+ echo 'tenant $tenantid not exits!'
+ elif [ $cpe1result -eq 409 ]; then
+ echo 'cpe $esn1 already exists!'
+ else
+ echo 'illegal result!'
+
+ imgresult = enablecpe $esn2 $tenantid "IMG"
+ if [ $imgresult -eq 201 ]; then
+ echo 'cpe $esn2 has been successfully enabled!'
+ elif [ $cpe2result -eq 404 ]; then
+ echo 'tenant $tenantid not exits!'
+ elif [ $cpe2result -eq 409 ]; then
+ echo 'cpe $esn2 already exists!'
+ else
+ echo 'illegal result!'
+
+ while true
+ do
+ sleep 30
+ ucpestatus = checkstatus $esn1
+ imgstatus = checkstatus $esn2
+ if [ $ucpestatus -eq "Active" ] && [ $imgstatus -eq "Active"]; then
+ echo 'ucpe and img are both ready for service!'
+ break
+ fi
+ done
+
+
+ ucpeinterfaceresult = cfglaninterface $esn1 $interface1 $vlan1 $ip1
+ if [ $ucpeinterfaceresult -eq 200 ]; then
+ echo 'cpe $esn1 interface $interface1 has been successfully configured!'
+ elif [ $ucpeinterfaceresult -eq 404 ]; then
+ echo 'cpe $esn1 not exits!'
+ else
+ echo 'illegal result!'
+
+ imginterfaceresult = cfglaninterface $esn2 $interface2 $vlan2 $ip2
+ if [ $imginterfaceresult -eq 200 ]; then
+ echo 'cpe $esn2 interface $interface2 has been successfully configured!'
+ elif [ $imginterfaceresult -eq 404 ]; then
+ echo 'cpe $esn1 not exits!'
+ else
+ echo 'illegal result!'
+
+ serviceresult = enablesite2site $tenantid $esn1 $interface1 $vlan1 $subnet1 $gateway1 $esn2 $interface2 $vlan2 $subnet2 $gateway2
+ if [ $serviceresult -eq 201 ]; then
+ echo 'l3vpn has been successfully enabled between cpe $esn1 and cpe $esn2!'
+ elif [ $serviceresult -eq 404 ]; then
+ echo 'tenant or cpe not exits!'
+ elif [ $serviceresult -eq 409 ]; then
+ echo 'l3vpn already enabled!'
+ elif [ $serviceresult -eq 500 ]; then
+ echo $serviceresult
+ else
+ echo 'illegal result!'
+
+
+elif [ $result -eq 409 ]; then
+ echo 'tenant already exists!'
+else
+ echo 'illegal result!'
+fi
+
+
+
+
+
+
+
+
+
diff --git a/vpn_unsubscribe.sh b/vpn_unsubscribe.sh
new file mode 100644
index 0000000..905a2a3
--- /dev/null
+++ b/vpn_unsubscribe.sh
@@ -0,0 +1,220 @@
+#!/bin/sh
+# test script for vpn subscribing
+
+L3VPN = 3
+
+AUTH = 'admin:admin'
+URLTENANT = 'http://127.0.0.1:8091/v1/tenant'
+URLCPE = 'http://127.0.0.1:8091/v1/cpe'
+URLSTATUS = 'http://127.0.0.1:18002/resmgr/v1/dev?dev_id='
+URLINTERFACE = 'http://127.0.0.1:8091/v1/cpe/interface'
+URLSERVICE = 'http://127.0.0.1:8091/v1/vpn'
+
+tenantid = 'opnfv'
+tenantname = 'opnfv'
+
+esn1 = '21500102003GH5000971'
+interface1 = 'GigabitEthernet0/0/3'
+vlan1 = 3006
+subnet1 = '172.168.2.0'
+mask2 = 24
+gateway1 = '10.10.2.2'
+
+esn2 = '2102114469P0H3000011'
+interface2 = '10GE6/0/16'
+vlan2 = 3000
+subnet2 = '172.168.1.0'
+mask2 = 24
+gateway2 = '10.10.1.2'
+
+function createtenant {
+
+ result = curl -I -H 'Content-type:application/json' -X POST -d '{ "tenant_id": $1,
+ "tenant_name":$2, "cert_type": "A", "cert_num": "000000000000000000001"}' -u $AUTH $URLTENANT
+ echo 'tenant $1 is being created!'
+ return result
+
+}
+
+function enablecpe {
+
+ cpe_model = "4096"
+ if [ $3 -eq "IMG"]; then
+ cpe_model = "4098"
+ fi
+ if [ $3 -eq "UCPE"]; then
+ cpe_model = "4096"
+ fi
+
+ result = curl -I -H 'Content-type:application/json' -X POST -d ' { "cpe_vendor": "HUAWEI", "tenant_id": $2, "ctrler_id": "HW_AC_CAMPUS_CQ1", "access_type": 0, "cpe_model": $cpe_moel, "cpe_esn": $1 }' -u $URLCPE
+ echo 'cpe $1 is being activated!'
+ return result
+
+}
+
+function checkstatus {
+
+ URL = {$URLSTATUS}{$1}
+
+ result = curl -I -H "Content-type: application/json" -X GET -u $AUTH $URL
+ status = jq '.status' $result
+ return status
+
+}
+
+function cfglaninterface {
+
+ result = curl -I -H 'Content-type:application/json' -X POST -d '{ "cpe_esn": $1, "interfaces": [ { "if_name": $2, "if_vlan": $3, "if_ip":$4, "if_mask":"24"}] }' -u $URLINTERFACE
+ echo 'cpe $1 interface $2 vlan $3 is being configured!'
+ return result
+
+}
+
+function enablesite2site {
+
+ result = curl -I -H 'Content-type:application/json' -X POST -d '{
+ "tenant_id": $1,
+ "bandwidth": 51200,
+ "order_id": "20180116-16",
+ "operation": 0,
+ "order_name": "20180116-16",
+ "internet_cfg": null,
+ "vas_cfg": null,
+ "vpn_config": [
+ {
+ "tenant_id": $1,
+ "vpn_id": 1,
+ "vpn_type": $L3VPN,
+ "local_device": $2,
+ "dl_bw": 1000,
+ "ul_bw": 1000,
+ "route_policy": false,
+ "qos_grade": null,
+ "local_type": 0,
+ "local_access": {
+ "web_enable": 1,
+ "dhcp_server": 1,
+ "portvlan_list": [
+
+ {
+ "port": $3,
+ "vlan": $4
+ }
+ ],
+ "subnet_list": [
+ {
+ "ipv4": $5,
+ "mask_bit": "24",
+ "gateway": "$6
+ }
+ ]
+ },
+ "remote_device": $7,
+ "remote_type": 0,
+ "remote_access": {
+ "dhcp_server": 1,
+ "web_enable": 1,
+ "portvlan_list": [
+
+ {
+ "port": $8,
+ "vlan": $9
+ }
+ ],
+ "subnet_list": [
+
+ {
+ "ipv4": $10,
+ "mask_bit": 24,
+ "gateway": $11
+ }
+ ]
+ }
+ }
+ ]
+}' -u $URLSERVICE
+ echo 'site2site between cpe $2 and cpe $3 is being activated for tenant $1!'
+ return result
+
+}
+
+tenantresult = createtenant $tenantid $tenantname
+if [ $tenantresult -eq 201 ]; then
+
+ echo 'tenant opnfv has been successfully created!'
+
+ ucperesult = enablecpe $esn1 $tenantid "UCPE"
+ if [ $ucperesult -eq 201 ]; then
+ echo 'cpe $esn1 has been successfully enabled!'
+ elif [ $cpe1result -eq 404 ]; then
+ echo 'tenant $tenantid not exits!'
+ elif [ $cpe1result -eq 409 ]; then
+ echo 'cpe $esn1 already exists!'
+ else
+ echo 'illegal result!'
+
+ imgresult = enablecpe $esn2 $tenantid "IMG"
+ if [ $imgresult -eq 201 ]; then
+ echo 'cpe $esn2 has been successfully enabled!'
+ elif [ $cpe2result -eq 404 ]; then
+ echo 'tenant $tenantid not exits!'
+ elif [ $cpe2result -eq 409 ]; then
+ echo 'cpe $esn2 already exists!'
+ else
+ echo 'illegal result!'
+
+ while true
+ do
+ sleep 30
+ ucpestatus = checkstatus $esn1
+ imgstatus = checkstatus $esn2
+ if [ $ucpestatus -eq "Active" ] && [ $imgstatus -eq "Active"]; then
+ echo 'ucpe and img are both ready for service!'
+ break
+ fi
+ done
+
+
+ ucpeinterfaceresult = cfglaninterface $esn1 $interface1 $vlan1 $ip1
+ if [ $ucpeinterfaceresult -eq 200 ]; then
+ echo 'cpe $esn1 interface $interface1 has been successfully configured!'
+ elif [ $ucpeinterfaceresult -eq 404 ]; then
+ echo 'cpe $esn1 not exits!'
+ else
+ echo 'illegal result!'
+
+ imginterfaceresult = cfglaninterface $esn2 $interface2 $vlan2 $ip2
+ if [ $imginterfaceresult -eq 200 ]; then
+ echo 'cpe $esn2 interface $interface2 has been successfully configured!'
+ elif [ $imginterfaceresult -eq 404 ]; then
+ echo 'cpe $esn1 not exits!'
+ else
+ echo 'illegal result!'
+
+ serviceresult = enablesite2site $tenantid $esn1 $interface1 $vlan1 $subnet1 $gateway1 $esn2 $interface2 $vlan2 $subnet2 $gateway2
+ if [ $serviceresult -eq 201 ]; then
+ echo 'l3vpn has been successfully enabled between cpe $esn1 and cpe $esn2!'
+ elif [ $serviceresult -eq 404 ]; then
+ echo 'tenant or cpe not exits!'
+ elif [ $serviceresult -eq 409 ]; then
+ echo 'l3vpn already enabled!'
+ elif [ $serviceresult -eq 500 ]; then
+ echo $serviceresult
+ else
+ echo 'illegal result!'
+
+
+elif [ $result -eq 409 ]; then
+ echo 'tenant already exists!'
+else
+ echo 'illegal result!'
+fi
+
+
+
+
+
+
+
+
+
diff --git a/yamllintrc b/yamllintrc
new file mode 100644
index 0000000..a4f3d02
--- /dev/null
+++ b/yamllintrc
@@ -0,0 +1,25 @@
+# Copyright 2018 Tieto
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+extends: relaxed
+
+rules:
+ empty-lines:
+ max-start: 1
+ max-end: 1
+ colons:
+ max-spaces-after: 1
+ max-spaces-before: 1
+ line-length:
+ max: 160