aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore3
-rw-r--r--INFO35
-rw-r--r--INFO.yaml28
-rwxr-xr-xci/build-auto.sh158
-rwxr-xr-xci/deploy-onap-fuel.sh238
-rwxr-xr-xci/deploy-onap-kubespray.sh339
-rwxr-xr-xci/deploy-onap.sh376
-rw-r--r--ci/deploy-opnfv-apex-centos.sh209
-rw-r--r--ci/deploy-opnfv-compass-ubuntu.sh201
-rw-r--r--ci/deploy-opnfv-daisy-centos.sh179
-rw-r--r--ci/deploy-opnfv-fuel-ubuntu.sh199
-rwxr-xr-xci/plot-results.sh101
-rw-r--r--docs/conf.py1
-rw-r--r--docs/conf.yaml3
-rw-r--r--docs/index.rst18
-rw-r--r--docs/release/configguide/Auto-featureconfig.rst112
-rw-r--r--docs/release/configguide/auto-installTarget-initial.pngbin31484 -> 35994 bytes
-rw-r--r--docs/release/configguide/index.rst1
-rw-r--r--docs/release/release-notes/Auto-release-notes.rst180
-rw-r--r--docs/release/release-notes/auto-proj-parameters.pngbin0 -> 32716 bytes
-rw-r--r--docs/release/release-notes/auto-project-activities.pngbin58789 -> 25995 bytes
-rw-r--r--docs/release/release-notes/index.rst1
-rw-r--r--docs/release/userguide/index.rst1
-rw-r--r--docs/requirements.txt2
-rw-r--r--tox.ini17
25 files changed, 2210 insertions, 192 deletions
diff --git a/.gitignore b/.gitignore
index 70a5649..f6b7eea 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,6 +5,7 @@
/lib/auto.egg-info
/build
/dist
-/docs_build
/docs_output
/opnfvdocs
+.tox
+docs/_build/*
diff --git a/INFO b/INFO
deleted file mode 100644
index b266f87..0000000
--- a/INFO
+++ /dev/null
@@ -1,35 +0,0 @@
-(obsolete: use only INFO.yaml)
-
-Project: ONAP-Automated OPNFV (Auto)
-Project Creation Date: August 15, 2017
-Project Category:
-Lifecycle State: Incubation
-Primary Contact: tina.tsou@arm.com
-Project Lead: tina.tsou@arm.com
-Jira Project Name: ONAP-Automated OPNFV
-Jira Project Prefix: AUTO
-Mailing list tag: [auto]
-IRC: Server:freenode.net Channel:#opnfv-auto
-Repository: auto
-
-Committers:
-Tina Tsou (tina.tsou@arm.com)
-Harry Huang (huangxiangyu5@huawei.com)
-Mohankumar Navaneethan (mnavaneethan@mvista.com)
-Song Zhu (song.zhu@arm.com)
-Liang Ou (oul.gd@chinatelecom.cn)
-Gerard Damm (gerard.damm@wipro.com)
-Joe Kidder (joe.kidder@5thlayer.com)
-Cristina Pauna (cristina.pauna@enea.com)
-Paul Vaduva (paul.vaduva@enea.com)
-Martin Klozik (martin.klozik@tieto.com)
-Richard Elias (richard.elias@tieto.com)
-
-Prasad Gorja (prasad.gorja@nxp.com)
-Lei Chen (chenlei@caict.ac.cn)
-Xiaoyu Wang (wxy_cttl@126.com)
-Xu Lu (luxu_hd@163.com)
-Eric Maye (eric.dmaye@wipro.com)
-Chen Zhang (zhangchen.bri@chinatelecom.cn)
-
-Link to TSC approval of the project: http://meetbot.opnfv.org/meetings/opnfv-meeting/2017/opnfv-meeting.2017-08-15-12.59.html
diff --git a/INFO.yaml b/INFO.yaml
index f4e1a04..69e5b01 100644
--- a/INFO.yaml
+++ b/INFO.yaml
@@ -38,30 +38,6 @@ committers:
email: 'huangxiangyu5@huawei.com'
company: 'huawei.com'
id: 'huangxiangyu'
- - name: 'Mohankumar Navaneethan'
- email: 'mnavaneethan@mvista.com'
- company: 'Cavium'
- id: 'nmohankumar'
- - name: 'Song Zhu'
- email: 'song.zhu@arm.com'
- company: 'arm.com'
- id: 'mail22song'
- - name: 'Liang Ou'
- email: 'oul.gd@chinatelecom.cn'
- company: 'chinatelecom.cn'
- id: 'ouliang1'
- - name: 'Gerard Damm'
- email: 'gerard.damm@wipro.com'
- company: 'Wipro'
- id: 'gerard_damm'
- - name: 'Joe Kidder'
- email: 'joe.kidder@5thlayer.com'
- company: '5thlayer.com'
- id: 'joe.kidder'
- - name: 'Cristina Pauna'
- email: 'cristina.pauna@enea.com'
- company: 'enea.com'
- id: 'cristinapauna'
- name: 'Paul Vaduva'
email: 'paul.vaduva@enea.com'
company: 'enea.com'
@@ -70,10 +46,6 @@ committers:
email: 'martin.klozik@tieto.com'
company: 'tieto.com'
id: 'mklozik'
- - name: 'Richard Elias'
- email: 'richard.elias@tieto.com'
- company: 'tieto.com'
- id: 'richardxelias'
tsc:
# yamllint disable rule:line-length
approval: 'http//meetbot.opnfv.org/meetings/opnfv-meeting/2017/opnfv-meeting.2017-08-15-12.59.html'
diff --git a/ci/build-auto.sh b/ci/build-auto.sh
index 96588b9..00b67b1 100755
--- a/ci/build-auto.sh
+++ b/ci/build-auto.sh
@@ -20,10 +20,12 @@
# Usage:
# build-auto.sh job_type
-# where job_type is one of "verify", "merge", "daily"
+#
+# Parameters:
+# job_type - is one of "verify", "merge" or "daily"
#
# Example:
-# ./ci/build-auto.sh daily
+# ./ci/build-auto.sh verify
#
# exit codes
@@ -31,11 +33,21 @@
EXIT=0
EXIT_UNKNOWN_JOB_TYPE=1
EXIT_LINT_FAILED=2
+EXIT_FUEL_FAILED=10
#
# configuration
#
AUTOENV_DIR="$HOME/autoenv"
+TIMESTAMP=$(date +%Y%m%d_%H%M%S)
+LOG_DIR=$HOME/auto_ci_daily_logs
+WORKSPACE=${WORKSPACE:-$PWD}
+
+# POD and SCENARIO details used during OPNFV deployment performed by daily job
+NODE_NAME=${NODE_NAME:-"ericsson-virtual1"}
+POD_LAB=$(echo $NODE_NAME | cut -d '-' -f1)
+POD_NAME=$(echo $NODE_NAME | cut -d '-' -f2)
+DEPLOY_SCENARIO=${DEPLOY_SCENARIO:-"os-nosdn-onap-noha"}
#
# functions
@@ -47,6 +59,42 @@ function execute_auto_lint_check() {
fi
}
+# check and install required packages
+function dependencies_check() {
+ . /etc/os-release
+ if [ $ID == "ubuntu" ] ; then
+ echo "Dependencies check"
+ echo "=================="
+ # install system packages
+ for PACKAGE in "virtualenv" "pylint" "yamllint" "gnuplot" ; do
+ if dpkg -s $PACKAGE &> /dev/null ; then
+ printf " %-70s %-6s\n" $PACKAGE "OK"
+ else
+ printf " %-70s %-6s\n" $PACKAGE "missing"
+ sudo apt-get install -y $PACKAGE
+ fi
+ done
+ echo
+ fi
+}
+
+# create virtualenv if needed and enable it
+function virtualenv_prepare() {
+ if [ ! -e $AUTOENV_DIR ] ; then
+ echo "Create AUTO environment"
+ echo "======================="
+ virtualenv "$AUTOENV_DIR"
+ echo
+ fi
+
+ # activate and update virtualenv
+ echo "Update AUTO environment"
+ echo "======================="
+ source "$AUTOENV_DIR"/bin/activate
+ pip install -r ./requirements.txt
+ echo
+}
+
#
# main
#
@@ -55,20 +103,8 @@ echo
# enter workspace dir
cd $WORKSPACE
-# create virtualenv if needed
-if [ ! -e $AUTOENV_DIR ] ; then
- echo "Create AUTO environment"
- echo "======================="
- virtualenv "$AUTOENV_DIR"
- echo
-fi
-
-# activate and update virtualenv
-echo "Update AUTO environment"
-echo "======================="
-source "$AUTOENV_DIR"/bin/activate
-pip install -r ./requirements.txt
-echo
+# check if required packages are installed
+dependencies_check
# execute job based on passed parameter
case $1 in
@@ -77,15 +113,9 @@ case $1 in
echo "AUTO verify job"
echo "==============="
- # Example of verify job body. Functions can call
- # external scripts, etc.
-
+ virtualenv_prepare
execute_auto_lint_check
#execute_auto_doc_check
- #install_opnfv MCP
- #install_onap
- #execute_sanity_check
- #execute_tests $1
# Everything went well, so report SUCCESS to Jenkins
exit $EXIT
@@ -95,15 +125,9 @@ case $1 in
echo "AUTO merge job"
echo "=============="
- # Example of merge job body. Functions can call
- # external scripts, etc.
-
+ virtualenv_prepare
execute_auto_lint_check
#execute_auto_doc_check
- #install_opnfv MCP
- #install_onap
- #execute_sanity_check
- #execute_tests $1
# propagate result to the Jenkins job
exit $EXIT
@@ -112,15 +136,73 @@ case $1 in
echo "=============="
echo "AUTO daily job"
echo "=============="
+ echo
+ echo "Deployment details:"
+ echo " LAB: $POD_LAB"
+ echo " POD: $POD_NAME"
+ echo " Scenario: $DEPLOY_SCENARIO"
+ echo " WORKSPACE: $WORKSPACE"
+ echo
- # Example of daily job body. Functions can call
- # external scripts, etc.
-
- #install_opnfv MCP
- #install_onap
- #execute_sanity_check
- #execute_tests $1
- #push_results_and_logs_to_artifactory
+ # create log dir if needed
+ if [ ! -e $LOG_DIR ] ; then
+ echo "Create AUTO LOG DIRECTORY"
+ echo "========================="
+ echo "mkdir $LOG_DIR"
+ mkdir $LOG_DIR
+ echo
+ fi
+
+ echo "Installation of OPNFV and ONAP"
+ echo "=============================="
+ # clone fuel and execute installation of ONAP scenario to install
+ # ONAP on top of OPNFV deployment
+ [ -e fuel ] && rm -rf fuel
+ git clone https://gerrit.opnfv.org/gerrit/fuel
+ cd fuel
+ # Fuel master branch is currently broken; thus use stable/gambia
+ # branch with recent master version of ONAP scenario
+ git checkout stable/gambia
+ git checkout origin/master mcp/config/states/onap \
+ mcp/config/scenario/os-nosdn-onap-ha.yaml \
+ mcp/config/scenario/os-nosdn-onap-noha.yaml
+ # use larger disk size for virtual nodes
+ sed -i -re 's/(qemu-img resize.*)100G/\1400G/' mcp/scripts/lib_jump_deploy.sh
+
+ LOG_FILE="$LOG_DIR/deploy_${TIMESTAMP}.log"
+ echo "ci/deploy.sh -l $POD_LAB -p $POD_NAME -s $DEPLOY_SCENARIO |&\
+ tee $LOG_FILE"
+ DEPLOY_START=$(date +%Y%m%d_%H%M%S)
+ ci/deploy.sh -l $POD_LAB -p $POD_NAME -s $DEPLOY_SCENARIO |&\
+ tee $LOG_FILE
+
+ # report failure if fuel failed to install OPNFV or ONAP
+ [ $? -ne 0 ] && exit $EXIT_FUEL_FAILED
+
+ # process report
+ DEPLOY_END=$(date +%Y%m%d_%H%M%S)
+ REPORT_FILE="$LOG_DIR/deploy_report_${TIMESTAMP}.txt"
+ CSV_SUMMARY="$LOG_DIR/deploy_summary_${TIMESTAMP}.csv"
+ MARKER="ONAP INSTALLATION REPORT"
+ # cut report from installation log file
+ sed -n "/^$MARKER/,/^END OF $MARKER/p;/^END OF $MARKER/q" \
+ $LOG_FILE > $REPORT_FILE
+ PODS_TOTAL=$(grep "PODs Total" $REPORT_FILE | sed -e 's/[^0-9]//g')
+ PODS_FAILED=$(grep "PODs Failed" $REPORT_FILE | sed -e 's/[^0-9]//g')
+ TC_SUM=$(grep "tests total" $REPORT_FILE | tail -n1 |\
+ sed -e 's/[^0-9,]//g')
+
+ echo "Start Time,End Time,Total PODs,Failed PODs,Total Tests,Passed"\
+ "Tests,Failed Tests" >> $CSV_SUMMARY
+ echo "$DEPLOY_START,$DEPLOY_END,$PODS_TOTAL,$PODS_FAILED,$TC_SUM"\
+ >> $CSV_SUMMARY
+
+ # plot graphs from result summaries and print txt versions if possible
+ cd $WORKSPACE
+ ci/plot-results.sh
+ for GRAPH in $(ls -1 graph*txt 2> /dev/null) ; do
+ cat $GRAPH
+ done
# propagate result to the Jenkins job
exit $EXIT
diff --git a/ci/deploy-onap-fuel.sh b/ci/deploy-onap-fuel.sh
new file mode 100755
index 0000000..c120e9c
--- /dev/null
+++ b/ci/deploy-onap-fuel.sh
@@ -0,0 +1,238 @@
+#!/bin/bash
+#
+# Copyright 2018 Tieto
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Script for automated deployment of ONAP on top of OPNFV Fuel/MCP installation
+# In the future both OOM and heat install methods should be supported.
+# At the beginning OOM will be used for simplification.
+
+# TODO:
+# Configure ONAP to be able to control underlying OpenStack
+
+# Configuration to be passed to ci/deploy-onap.sh
+export SSH_USER="ubuntu"
+export SSH_IDENTITY="/root/.ssh/onap_key"
+
+# detect hypervisor details to be used as default values if needed
+OS_HYPER_CMD="openstack hypervisor list --long"
+echo -e "\nOpenStack Hepervisor list\n"
+$OS_HYPER_CMD
+
+DEFAULT_CMP_COUNT=$($OS_HYPER_CMD -f value -c "ID" | wc -l)
+DEFAULT_CMP_MIN_MEM=$($OS_HYPER_CMD -f value -c "Memory MB" | sort | head -n1)
+DEFAULT_CMP_MIN_CPUS=$($OS_HYPER_CMD -f value -c "vCPUs" | sort | head -n1)
+
+# Use default values if compute configuration was not set by FUEL installer
+AUTO_INSTALL_DIR=${AUTO_INSTALL_DIR:-"."}
+AUTO_IMAGE_DIR="${AUTO_INSTALL_DIR}/images"
+CMP_COUNT=${CMP_COUNT:-$DEFAULT_CMP_COUNT} # number of compute nodes
+CMP_MIN_MEM=${CMP_MIN_MEM:-$DEFAULT_CMP_MIN_MEM} # MB RAM of the weakest compute node
+CMP_MIN_CPUS=${CMP_MIN_CPUS:-$DEFAULT_CMP_MIN_CPUS} # CPU count of the weakest compute node
+# size of storage for instances
+CMP_STORAGE_TOTAL=${CMP_STORAGE_TOTAL:-$((80*$CMP_COUNT))}
+VM_COUNT=${VM_COUNT:-6} # number of VMs available for k8s cluster
+
+#
+# Functions
+#
+# function minimum accepts two numbers and prints smaller one
+function minimum(){
+ echo $(($1<$2?$1:$2))
+}
+
+# function remove_openstack_setup removes OS configuration performed by this
+# script; So previously created configuration and deployed VMs will be
+# removed before new ONAP deployment will be started.
+function remove_openstack_setup(){
+ # flavor is created 1st but removed last, so...
+ if ( ! openstack flavor list | grep 'onap.large' &> /dev/null ) ; then
+ #...no flavor means nothing to be removed
+ return
+ fi
+ echo -e "\nRemoving ONAP specific OpenStack configuration"
+ for a in $(openstack server list --name onap_vm -f value -c ID) ; do
+ openstack server delete $a
+ done
+ RULES=$(openstack security group rule list onap_security_group -f value -c ID)
+ for a in $RULES; do
+ openstack security group rule delete $a
+ done
+ openstack security group delete onap_security_group
+ for a in $(openstack floating ip list -f value -c ID) ; do
+ openstack floating ip delete $a
+ done
+ PORTS=$(openstack port list --network onap_private_network -f value -c ID)
+ for a in $PORTS ; do
+ openstack router remove port onap_router $a
+ done
+ PORTS=$(openstack port list --network onap_private_network -f value -c ID)
+ for a in $PORTS ; do
+ openstack port delete $a
+ done
+ openstack router delete onap_router
+ openstack subnet delete onap_private_subnet
+ openstack network delete onap_private_network
+ openstack image delete xenial
+ rm -rf $AUTO_IMAGE_DIR
+ openstack keypair delete onap_key
+ rm $SSH_IDENTITY
+ openstack flavor delete onap.large
+ echo
+}
+
+#
+# Script Main
+#
+
+# remove OpenStack configuration if it exists
+remove_openstack_setup
+
+echo -e "\nOpenStack configuration\n"
+
+# Calculate VM resources, so that flavor can be created
+echo "Configuration of compute node:"
+echo "Number of computes: CMP_COUNT=$CMP_COUNT"
+echo "Minimal RAM: CMP_MIN_MEM=$CMP_MIN_MEM"
+echo "Minimal CPUs count: CMP_MIN_CPUS=$CMP_MIN_CPUS"
+echo "Storage for instances: CMP_STORAGE_TOTAL=$CMP_STORAGE_TOTAL"
+echo "Number of VMs: VM_COUNT=$VM_COUNT"
+# Calculate VM parameters; there will be up to 1 VM per Compute node
+# to maximize resources available for VMs
+PER=85 # % of compute resources will be consumed by VMs
+VM_DISK_MAX=100 # GB - max VM disk size
+VM_MEM_MAX=81920 # MB - max VM RAM size
+VM_CPUS_MAX=56 # max count of VM CPUs
+VM_MEM=$(minimum $(($CMP_MIN_MEM*$CMP_COUNT*$PER/100/$VM_COUNT)) $VM_MEM_MAX)
+VM_CPUS=$(minimum $(($CMP_MIN_CPUS*$CMP_COUNT*$PER/100/$VM_COUNT)) $VM_CPUS_MAX)
+VM_DISK=$(minimum $(($CMP_STORAGE_TOTAL*$PER/100/$VM_COUNT)) $VM_DISK_MAX)
+
+echo -e "\nFlavor configuration:"
+echo "CPUs : $VM_CPUS"
+echo "RAM [MB] : $VM_MEM"
+echo "DISK [GB] : $VM_DISK"
+
+# Create onap flavor
+openstack flavor create --ram $VM_MEM --vcpus $VM_CPUS --disk $VM_DISK \
+ onap.large
+
+# Generate a keypair and store private key
+openstack keypair create onap_key > $SSH_IDENTITY
+chmod 600 $SSH_IDENTITY
+
+# Download and import VM image(s)
+mkdir $AUTO_IMAGE_DIR
+wget -P $AUTO_IMAGE_DIR https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+openstack image create --disk-format qcow2 --container-format bare --public \
+ --file $AUTO_IMAGE_DIR/xenial-server-cloudimg-amd64-disk1.img xenial
+
+# Modify quotas (add 10% to required VM resources)
+openstack quota set --ram $(($VM_MEM*$VM_COUNT*110/100)) admin
+openstack quota set --cores $(($VM_CPUS*$VM_COUNT*110/100)) admin
+
+# Configure networking with DNS for access to the internet
+openstack network create onap_private_network --provider-network-type vxlan
+openstack subnet create onap_private_subnet --network onap_private_network \
+ --subnet-range 192.168.33.0/24 --ip-version 4 --dhcp --dns-nameserver "8.8.8.8"
+openstack router create onap_router
+openstack router add subnet onap_router onap_private_subnet
+openstack router set onap_router --external-gateway floating_net
+
+# Allow selected ports and protocols
+openstack security group create onap_security_group
+openstack security group rule create --protocol icmp onap_security_group
+openstack security group rule create --proto tcp \
+ --dst-port 22:22 onap_security_group
+openstack security group rule create --proto tcp \
+ --dst-port 8080:8080 onap_security_group # rancher
+openstack security group rule create --proto tcp \
+ --dst-port 8078:8078 onap_security_group # horizon
+openstack security group rule create --proto tcp \
+ --dst-port 8879:8879 onap_security_group # helm
+openstack security group rule create --proto tcp \
+ --dst-port 80:80 onap_security_group
+openstack security group rule create --proto tcp \
+ --dst-port 443:443 onap_security_group
+
+# Allow communication between k8s cluster nodes
+PUBLIC_NET=`openstack subnet list --name floating_subnet -f value -c Subnet`
+openstack security group rule create --remote-ip $PUBLIC_NET --proto tcp \
+ --dst-port 1:65535 onap_security_group
+openstack security group rule create --remote-ip $PUBLIC_NET --proto udp \
+ --dst-port 1:65535 onap_security_group
+
+# Get list of hypervisors and their zone
+HOST_ZONE=$(openstack host list -f value | grep compute | head -n1 | cut -d' ' -f3)
+HOST_NAME=($(openstack host list -f value | grep compute | cut -d' ' -f1))
+HOST_COUNT=$(echo ${HOST_NAME[@]} | wc -w)
+# Create VMs and assign floating IPs to them
+VM_ITER=1
+HOST_ITER=0
+while [ $VM_ITER -le $VM_COUNT ] ; do
+ openstack floating ip create floating_net
+ VM_NAME[$VM_ITER]="onap_vm${VM_ITER}"
+ VM_IP[$VM_ITER]=$(openstack floating ip list -c "Floating IP Address" \
+ -c "Port" -f value | grep None | cut -f1 -d " " | head -n1)
+ # dispatch new VMs among compute nodes in round robin fashion
+ openstack server create --flavor onap.large --image xenial \
+ --nic net-id=onap_private_network --security-group onap_security_group \
+ --key-name onap_key ${VM_NAME[$VM_ITER]} \
+ --availability-zone ${HOST_ZONE}:${HOST_NAME[$HOST_ITER]}
+ sleep 10 # wait for VM init before floating IP can be assigned
+ openstack server add floating ip ${VM_NAME[$VM_ITER]} ${VM_IP[$VM_ITER]}
+ echo "Waiting for ${VM_NAME[$VM_ITER]} to start up for 1m at $(date)"
+ sleep 1m
+ VM_ITER=$(($VM_ITER+1))
+ HOST_ITER=$(($HOST_ITER+1))
+ [ $HOST_ITER -ge $HOST_COUNT ] && HOST_ITER=0
+done
+
+openstack server list -c ID -c Name -c Status -c Networks -c Host --long
+
+# check that SSH to all VMs is working
+SSH_OPTIONS="-i $SSH_IDENTITY -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
+COUNTER=1
+while [ $COUNTER -le 10 ] ; do
+ VM_UP=0
+ VM_ITER=1
+ while [ $VM_ITER -le $VM_COUNT ] ; do
+ if ssh $SSH_OPTIONS -l $SSH_USER ${VM_IP[$VM_ITER]} exit &>/dev/null ; then
+ VM_UP=$(($VM_UP+1))
+ echo "${VM_NAME[$VM_ITER]} ${VM_IP[$VM_ITER]}: up"
+ else
+ echo "${VM_NAME[$VM_ITER]} ${VM_IP[$VM_ITER]}: down"
+ fi
+ VM_ITER=$(($VM_ITER+1))
+ done
+ COUNTER=$(($COUNTER+1))
+ if [ $VM_UP -eq $VM_COUNT ] ; then
+ break
+ fi
+ echo "Waiting for VMs to be accessible via ssh for 2m at $(date)"
+ sleep 2m
+done
+
+openstack server list -c ID -c Name -c Status -c Networks -c Host --long
+
+if [ $VM_UP -ne $VM_COUNT ] ; then
+ echo "Only $VM_UP from $VM_COUNT VMs are accessible via ssh. Installation will be terminated."
+ exit 1
+fi
+
+# Start ONAP installation
+DATE_START=$(date)
+echo -e "\nONAP Installation Started at $DATE_START\n"
+$AUTO_INSTALL_DIR/ci/deploy-onap.sh ${VM_IP[@]}
+echo -e "\nONAP Installation Started at $DATE_START"
+echo -e "ONAP Installation Finished at $(date)\n"
diff --git a/ci/deploy-onap-kubespray.sh b/ci/deploy-onap-kubespray.sh
new file mode 100755
index 0000000..a797388
--- /dev/null
+++ b/ci/deploy-onap-kubespray.sh
@@ -0,0 +1,339 @@
+#!/bin/bash
+#
+# Copyright 2018-2019 Tieto
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Script for automated deployment of ONAP with Kubernetes at OPNFV LAAS
+# environment.
+#
+
+#
+# Configuration
+#
+export LC_ALL=C
+export LANG=C
+
+MASTER=$1
+SERVERS=$*
+shift
+SLAVES=$*
+
+ONAP_BRANCH=${ONAP_BRANCH:-'casablanca'}
+KUBESPRAY_COMMIT="bbfd2dc2bd088efc63747d903edd41fe692531d8"
+NAMESPACE='onap'
+SSH_USER=${SSH_USER:-"opnfv"}
+SSH_OPTIONS='-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
+# use identity file from the environment SSH_IDENTITY
+if [ -n "$SSH_IDENTITY" ] ; then
+ SSH_OPTIONS="-i $SSH_IDENTITY $SSH_OPTIONS"
+ ANSIBLE_IDENTITY="--private-key=$SSH_IDENTITY"
+fi
+
+KUBESPRAY_OPTIONS='-e "kubelet_max_pods=250"'
+
+TMP_POD_LIST='/tmp/onap_pod_list.txt'
+
+case "$ONAP_BRANCH" in
+ "beijing")
+ HELM_VERSION=2.8.2
+ ;;
+ "casablanca")
+ HELM_VERSION=2.9.1
+ ;;
+ *)
+ HELM_VERSION=2.9.1
+ ;;
+esac
+
+ONAP_MINIMAL="aai dmaap portal robot sdc sdnc so vid"
+# by defalult install minimal ONAP installation
+# empty list of ONAP_COMPONENT means full ONAP installation
+ONAP_COMPONENT=${ONAP_COMPONENT:-$ONAP_MINIMAL}
+
+#
+# Functions
+#
+function usage() {
+ echo "usage"
+ cat <<EOL
+Usage:
+ $0 <MASTER> [ <SLAVE1> <SLAVE2> ... ]
+
+ where <MASTER> and <SLAVEx> are IP addresses of servers to be used
+ for ONAP installation.
+
+ Script behavior is affected by following environment variables:
+
+ ONAP_COMPONENT - a list of ONAP components to be installed, empty list
+ will trigger a full ONAP installation
+ VALUE: "$ONAP_COMPONENT"
+
+ ONAP_BRANCH - version of ONAP to be installed (OOM branch version)
+ VALUE: "$ONAP_BRANCH"
+
+ NAMESPACE - name of ONAP namespace in kubernetes cluster
+ VALUE: "$NAMESPACE"
+
+ SSH_USER - user name to be used to access <MASTER> and <SLAVEx>
+ servers
+ VALUE: "$SSH_USER"
+
+ SSH_IDENTITY - (optional) ssh identity file to be used to access
+ <MASTER> and <SLAVEx> servers as a SSH_USER
+ VALUE: "$SSH_IDENTITY"
+
+NOTE: Following must be assured for <MASTER> and <SLAVEx> servers before
+ $0 execution:
+ 1) SSH_USER must be able to access servers via ssh without a password
+ 2) SSH_USER must have a password-less sudo access
+EOL
+}
+
+# Check if server IPs of kubernetes nodes are configured at given server.
+# If it is not the case, then kubespray invetory file must be updated.
+function check_server_ips() {
+ for SERVER_IP in $(grep 'ip=' $1 | sed -re 's/^.*ip=([0-9\.]+).*$/\1/') ; do
+ IP_OK="false"
+ for IP in $(ssh $SSH_OPTIONS $SSH_USER@$SERVER_IP "ip a | grep -Ew 'inet' | sed -re 's/^ *inet ([0-9\.]+).*$/\1/g'") ; do
+ if [ "$IP" == "$SERVER_IP" ] ; then
+ IP_OK="true"
+ fi
+ done
+ # access IP (e.g. OpenStack floating IP) is not server local address, so update invetory
+ if [ $IP_OK == "false" ] ; then
+ # get server default GW dev
+ DEV=$(ssh $SSH_OPTIONS $SSH_USER@$SERVER_IP "ip route ls" | grep ^default | sed -re 's/^.*dev (.*)$/\1/')
+ LOCAL_IP=$(ssh $SSH_OPTIONS $SSH_USER@$SERVER_IP "ip -f inet addr show $DEV" | grep -Ew 'inet' | sed -re 's/^ *inet ([0-9\.]+).*$/\1/g')
+ if [ "$LOCAL_IP" == "" ] ; then
+ echo "Can't read local IP for server with IP $SERVER_IP"
+ exit 1
+ fi
+ sed -i'' -e "s/ip=$SERVER_IP/ip=$LOCAL_IP access_ip=$SERVER_IP/" $1
+ fi
+ done
+}
+
+# sanity check
+if [ "$SERVERS" == "" ] ; then
+ usage
+ exit 1
+fi
+
+#
+# Installation
+#
+
+# detect CPU architecture to download correct helm binary
+CPU_ARCH=$(ssh $SSH_OPTIONS $SSH_USER@"$MASTER" "uname -p")
+case "$CPU_ARCH" in
+ "x86_64")
+ ARCH="amd64"
+ ;;
+ "aarch64")
+ ARCH="arm64"
+ ;;
+ *)
+ echo "Unsupported CPU architecture '$CPU_ARCH' was detected."
+ exit 1
+esac
+
+# print configuration
+cat << EOL
+list of configuration options:
+ SERVERS="$SERVERS"
+ ONAP_COMPONENT="$ONAP_COMPONENT"
+ ONAP_BRANCH="$ONAP_BRANCH"
+ NAMESPACE="$NAMESPACE"
+ SSH_USER="$SSH_USER"
+ SSH_IDENTITY="$SSH_IDENTITY"
+ ARCH="$ARCH"
+
+EOL
+
+# install K8S cluster by kubespray
+sudo apt-get -y update
+sudo apt-get -y install git ansible python-jinja2 python3-pip libffi-dev libssl-dev
+git clone https://github.com/kubernetes-incubator/kubespray.git
+cd kubespray
+git checkout $KUBESPRAY_COMMIT
+pip3 install -r requirements.txt
+export CONFIG_FILE=inventory/auto_hosts.ini
+rm $CONFIG_FILE
+python3 contrib/inventory_builder/inventory.py $SERVERS
+check_server_ips $CONFIG_FILE
+cat $CONFIG_FILE
+if ( ! ansible-playbook -i $CONFIG_FILE $KUBESPRAY_OPTIONS -b -u $SSH_USER $ANSIBLE_IDENTITY cluster.yml ) ; then
+ echo "Kubespray installation has failed at $(date)"
+ exit 1
+fi
+
+# use standalone K8S master if there are enough VMs available for the K8S cluster
+SERVERS_COUNT=$(echo $SERVERS | wc -w)
+if [ $SERVERS_COUNT -gt 2 ] ; then
+ K8S_NODES=$SLAVES
+else
+ K8S_NODES=$SERVERS
+fi
+
+echo "INSTALLATION TOPOLOGY:"
+echo "Kubernetes Master: $MASTER"
+echo "Kubernetes Nodes: $K8S_NODES"
+echo
+echo "CONFIGURING NFS ON SLAVES"
+echo "$SLAVES"
+
+for SLAVE in $SLAVES;
+do
+ssh $SSH_OPTIONS $SSH_USER@"$SLAVE" "bash -s" <<CONFIGURENFS &
+ sudo su
+ apt-get install nfs-common -y
+ mkdir /dockerdata-nfs
+ chmod 777 /dockerdata-nfs
+ echo "$MASTER:/dockerdata-nfs /dockerdata-nfs nfs auto 0 0" >> /etc/fstab
+ mount -a
+ mount | grep dockerdata-nfs
+CONFIGURENFS
+done
+wait
+
+echo "DEPLOYING OOM ON MASTER"
+echo "$MASTER"
+
+ssh $SSH_OPTIONS $SSH_USER@"$MASTER" "bash -s" <<OOMDEPLOY
+sudo su
+echo "create namespace '$NAMESPACE'"
+cat <<EOF | kubectl create -f -
+{
+ "kind": "Namespace",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "$NAMESPACE",
+ "labels": {
+ "name": "$NAMESPACE"
+ }
+ }
+}
+EOF
+kubectl get namespaces --show-labels
+kubectl -n kube-system create sa tiller
+kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller
+rm -rf oom
+echo "pulling new oom"
+git clone -b $ONAP_BRANCH http://gerrit.onap.org/r/oom
+
+# NFS FIX for aaf-locate
+sed -i '/persistence:/s/^#//' ./oom/kubernetes/aaf/charts/aaf-locate/values.yaml
+sed -i '/mountPath: \/dockerdata/c\ mountPath: \/dockerdata-nfs'\
+ ./oom/kubernetes/aaf/charts/aaf-locate/values.yaml
+
+echo "Pre-pulling docker images at \$(date)"
+wget https://jira.onap.org/secure/attachment/11261/prepull_docker.sh
+chmod 777 prepull_docker.sh
+./prepull_docker.sh
+echo "starting onap pods"
+cd oom/kubernetes/
+
+# Enable selected ONAP components
+if [ -n "$ONAP_COMPONENT" ] ; then
+ # disable all components and enable only selected in next loop
+ sed -i '/^.*:$/!b;n;s/enabled: *true/enabled: false/' onap/values.yaml
+ echo -n "Enable following ONAP components:"
+ for COMPONENT in $ONAP_COMPONENT; do
+ echo -n " \$COMPONENT"
+ sed -i '/^'\${COMPONENT}':$/!b;n;s/enabled: *false/enabled: true/' onap/values.yaml
+ done
+ echo
+else
+ echo "All ONAP components will be installed"
+fi
+
+wget http://storage.googleapis.com/kubernetes-helm\
+/helm-v${HELM_VERSION}-linux-${ARCH}.tar.gz
+tar -zxvf helm-v${HELM_VERSION}-linux-${ARCH}.tar.gz
+mv linux-${ARCH}/helm /usr/local/bin/helm
+helm init --upgrade --service-account tiller
+# run helm server on the background and detached from current shell
+nohup helm serve 0<&- &>/dev/null &
+echo "Waiting for helm setup for 5 min at \$(date)"
+sleep 5m
+helm version
+helm repo add local http://127.0.0.1:8879
+helm repo list
+make all
+if ( ! helm install local/onap -n dev --namespace $NAMESPACE) ; then
+ echo "ONAP installation has failed at \$(date)"
+ exit 1
+fi
+
+cd ../../
+
+echo "Waiting for ONAP pods to be up \$(date)"
+echo "Ignore failure of sdnc-ansible-server, see SDNC-443"
+function get_onap_pods() {
+ kubectl get pods --namespace $NAMESPACE > $TMP_POD_LIST
+ return \$(cat $TMP_POD_LIST | wc -l)
+}
+FAILED_PODS_LIMIT=1 # maximal number of failed ONAP PODs
+ALL_PODS_LIMIT=20 # minimum ONAP PODs to be up & running
+WAIT_PERIOD=60 # wait period in seconds
+MAX_WAIT_TIME=\$((3600*3)) # max wait time in seconds
+MAX_WAIT_PERIODS=\$((\$MAX_WAIT_TIME/\$WAIT_PERIOD))
+COUNTER=0
+get_onap_pods
+ALL_PODS=\$?
+PENDING=\$(grep -E '0/|1/2' $TMP_POD_LIST | wc -l)
+while [ \$PENDING -gt \$FAILED_PODS_LIMIT -o \$ALL_PODS -lt \$ALL_PODS_LIMIT ]; do
+ # print header every 20th line
+ if [ \$COUNTER -eq \$((\$COUNTER/20*20)) ] ; then
+ printf "%-3s %-29s %-3s/%s\n" "Nr." "Datetime of check" "Err" "Total PODs"
+ fi
+ COUNTER=\$((\$COUNTER+1))
+ printf "%3s %-29s %3s/%-3s\n" \$COUNTER "\$(date)" \$PENDING \$ALL_PODS
+ sleep \$WAIT_PERIOD
+ if [ "\$MAX_WAIT_PERIODS" -eq \$COUNTER ]; then
+ FAILED_PODS_LIMIT=800
+ ALL_PODS_LIMIT=0
+ fi
+ get_onap_pods
+ ALL_PODS=\$?
+ PENDING=\$(grep -E '0/|1/2' $TMP_POD_LIST | wc -l)
+done
+
+get_onap_pods
+cp $TMP_POD_LIST ~/onap_all_pods.txt
+echo
+echo "========================"
+echo "ONAP INSTALLATION REPORT"
+echo "========================"
+echo
+echo "List of Failed PODs"
+echo "-------------------"
+grep -E '0/|1/2' $TMP_POD_LIST | tee ~/onap_failed_pods.txt
+echo
+echo "Summary:"
+echo "--------"
+echo " PODs Failed: \$(cat ~/onap_failed_pods.txt | wc -l)"
+echo " PODs Total: \$(cat ~/onap_all_pods.txt | wc -l)"
+echo
+echo "ONAP health TC results"
+echo "----------------------"
+cd oom/kubernetes/robot
+./ete-k8s.sh $NAMESPACE health | tee ~/onap_health.txt
+echo "==============================="
+echo "END OF ONAP INSTALLATION REPORT"
+echo "==============================="
+OOMDEPLOY
+
+echo "Finished install, ruturned from Master at $(date)"
+exit 0
diff --git a/ci/deploy-onap.sh b/ci/deploy-onap.sh
new file mode 100755
index 0000000..c34eb56
--- /dev/null
+++ b/ci/deploy-onap.sh
@@ -0,0 +1,376 @@
+#!/bin/bash
+#
+# Copyright 2018 Tieto
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Script for automated deployment of ONAP with Kubernetes at OPNFV LAAS
+# environment.
+#
+# Usage:
+# onap-deploy.sh <MASTER> <SLAVE1> <SLAVE2>
+#
+# where <MASTER> and <SLAVE_IPx> are IP addresses of servers to be used
+# for ONAP installation.
+#
+# NOTE: Following must be assured for all MASTER and SLAVE servers before
+# onap-deploy.sh execution:
+# 1) ssh access without a password
+# 2) an user account with password-less sudo access must be
+# available - default user is "opnfv"
+
+#
+# Configuration
+#
+DOCKER_VERSION=17.03
+RANCHER_VERSION=1.6.14
+RANCHER_CLI_VER=0.6.11
+KUBECTL_VERSION=1.8.10
+HELM_VERSION=2.8.2
+
+MASTER=$1
+SERVERS=$*
+shift
+SLAVES=$*
+
+BRANCH='beijing'
+ENVIRON='onap'
+
+SSH_USER=${SSH_USER:-"opnfv"}
+SSH_OPTIONS='-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
+# by defalult install full ONAP installation
+ONAP_COMPONENT_DISABLE=${ONAP_COMPONENT_DISABLE:-""}
+# example of minimal ONAP installation
+#ONAP_COMPONENT_DISABLE="clamp cli consul dcaegen2 esr log msb multicloud nbi oof policy uui vfc vnfsdk"
+
+# use identity file from the environment SSH_IDENTITY
+if [ -n "$SSH_IDENTITY" ] ; then
+ SSH_OPTIONS="-i $SSH_IDENTITY $SSH_OPTIONS"
+fi
+
+#
+# Installation
+#
+
+# use standalone K8S master if there are enough VMs available for the K8S cluster
+SERVERS_COUNT=$(echo $SERVERS | wc -w)
+if [ $SERVERS_COUNT -gt 2 ] ; then
+ RANCHER_SLAVES=$SLAVES
+else
+ RANCHER_SLAVES=$SERVERS
+fi
+
+echo "INSTALLATION TOPOLOGY:"
+echo "Rancher Master: $MASTER"
+echo "Rancher Slaves: $RANCHER_SLAVES"
+echo
+echo "INSTALLING DOCKER ON ALL MACHINES"
+echo "$SERVERS"
+
+for MACHINE in $SERVERS;
+do
+ssh $SSH_OPTIONS $SSH_USER@"$MACHINE" "bash -s" <<DOCKERINSTALL &
+ sudo -i
+ sysctl -w vm.max_map_count=262144
+ apt-get update -y
+ curl https://releases.rancher.com/install-docker/$DOCKER_VERSION.sh | sh
+
+ mkdir -p /etc/systemd/system/docker.service.d/
+ echo "[Service]
+ ExecStart=
+ ExecStart=/usr/bin/dockerd -H fd:// \
+ --insecure-registry=nexus3.onap.org:10001"\
+ > /etc/systemd/system/docker.service.d/docker.conf
+
+ systemctl daemon-reload
+ systemctl restart docker
+ apt-mark hold docker-ce
+
+ for SERVER in $SERVERS;
+ do
+ echo "\$SERVER $ENVIRON\$(echo \$SERVER | cut -d. -f 4 )" >> /etc/hosts
+ done
+
+ hostname $ENVIRON\$(echo $MACHINE | cut -d. -f 4 )
+
+ echo "DOCKER INSTALLED ON $MACHINE"
+DOCKERINSTALL
+done
+wait
+
+echo "INSTALLING RANCHER ON MASTER"
+echo "$MASTER"
+
+ssh $SSH_OPTIONS $SSH_USER@"$MASTER" "bash -s" <<RANCHERINSTALL
+sudo -i
+echo "INSTALL STARTS"
+apt-get install -y jq make htop
+echo "Waiting for 30 seconds at \$(date)"
+sleep 30
+
+docker login -u docker -p docker nexus3.onap.org:10001
+
+docker run -d --restart=unless-stopped -p 8080:8080\
+ --name rancher_server rancher/server:v$RANCHER_VERSION
+curl -LO https://storage.googleapis.com/kubernetes-release/\
+release/v$KUBECTL_VERSION/bin/linux/amd64/kubectl
+chmod +x ./kubectl
+mv ./kubectl /usr/local/bin/kubectl
+mkdir ~/.kube
+wget http://storage.googleapis.com/kubernetes-helm\
+/helm-v${HELM_VERSION}-linux-amd64.tar.gz
+tar -zxvf helm-v${HELM_VERSION}-linux-amd64.tar.gz
+mv linux-amd64/helm /usr/local/bin/helm
+
+echo "Installing nfs server"
+# changed from nfs_share to dockerdata-nfs
+apt-get install nfs-kernel-server -y
+
+mkdir -p /dockerdata-nfs
+chmod 777 /dockerdata-nfs
+echo "/dockerdata-nfs *(rw,no_root_squash,no_subtree_check)">>/etc/exports
+service nfs-kernel-server restart
+
+echo "Waiting 10 minutes for Rancher to setup at \$(date)"
+sleep 10m
+echo "Installing RANCHER CLI, KUBERNETES ENV on RANCHER"
+wget https://github.com/rancher/cli/releases/download/v${RANCHER_CLI_VER}-rc2\
+/rancher-linux-amd64-v${RANCHER_CLI_VER}-rc2.tar.gz
+tar -zxvf rancher-linux-amd64-v${RANCHER_CLI_VER}-rc2.tar.gz
+cp rancher-v${RANCHER_CLI_VER}-rc2/rancher .
+
+API_RESPONSE=\`curl -s 'http://127.0.0.1:8080/v2-beta/apikey'\
+ -d '{"type":"apikey","accountId":"1a1","name":"autoinstall",\
+ "description":"autoinstall","created":null,"kind":null,\
+ "removeTime":null,"removed":null,"uuid":null}'\`
+# Extract and store token
+echo "API_RESPONSE: \${API_RESPONSE}"
+KEY_PUBLIC=\`echo \${API_RESPONSE} | jq -r .publicValue\`
+KEY_SECRET=\`echo \${API_RESPONSE} | jq -r .secretValue\`
+echo "publicValue: \$KEY_PUBLIC secretValue: \$KEY_SECRET"
+
+export RANCHER_URL=http://${MASTER}:8080
+export RANCHER_ACCESS_KEY=\$KEY_PUBLIC
+export RANCHER_SECRET_KEY=\$KEY_SECRET
+
+./rancher env ls
+echo "Creating kubernetes environment named ${ENVIRON}"
+./rancher env create -t kubernetes $ENVIRON > kube_env_id.json
+PROJECT_ID=\$(<kube_env_id.json)
+echo "env id: \$PROJECT_ID"
+
+echo "Waiting for ${ENVIRON} creation - 1 min at \$(date)"
+sleep 1m
+
+export RANCHER_HOST_URL=http://${MASTER}:8080/v1/projects/\$PROJECT_ID
+echo "you should see an additional kubernetes environment"
+./rancher env ls
+
+REG_URL_RESPONSE=\`curl -X POST -u \$KEY_PUBLIC:\$KEY_SECRET\
+ -H 'Accept: application/json'\
+ -H 'ContentType: application/json'\
+ -d '{"name":"$MASTER"}'\
+ "http://$MASTER:8080/v1/projects/\$PROJECT_ID/registrationtokens"\`
+echo "REG_URL_RESPONSE: \$REG_URL_RESPONSE"
+echo "Waiting for the server to finish url configuration - 1 min at \$(date)"
+sleep 1m
+# see registrationUrl in
+REGISTRATION_TOKENS=\`curl http://$MASTER:8080/v2-beta/registrationtokens\`
+echo "REGISTRATION_TOKENS: \$REGISTRATION_TOKENS"
+REGISTRATION_URL=\`echo \$REGISTRATION_TOKENS | jq -r .data[0].registrationUrl\`
+REGISTRATION_DOCKER=\`echo \$REGISTRATION_TOKENS | jq -r .data[0].image\`
+REGISTRATION_TOKEN=\`echo \$REGISTRATION_TOKENS | jq -r .data[0].token\`
+echo "Registering host for image: \$REGISTRATION_DOCKER\
+ url: \$REGISTRATION_URL registrationToken: \$REGISTRATION_TOKEN"
+HOST_REG_COMMAND=\`echo \$REGISTRATION_TOKENS | jq -r .data[0].command\`
+
+# base64 encode the kubectl token from the auth pair
+# generate this after the host is registered
+KUBECTL_TOKEN=\$(echo -n 'Basic '\$(echo\
+ -n "\$RANCHER_ACCESS_KEY:\$RANCHER_SECRET_KEY" | base64 -w 0) | base64 -w 0)
+echo "KUBECTL_TOKEN base64 encoded: \${KUBECTL_TOKEN}"
+
+# add kubectl config - NOTE: the following spacing has to be "exact"
+# or kubectl will not connect - with a localhost:8080 error
+echo 'apiVersion: v1
+kind: Config
+clusters:
+- cluster:
+ api-version: v1
+ insecure-skip-tls-verify: true
+ server: "https://$MASTER:8080/r/projects/'\$PROJECT_ID'/kubernetes:6443"
+ name: "${ENVIRON}"
+contexts:
+- context:
+ cluster: "${ENVIRON}"
+ user: "${ENVIRON}"
+ name: "${ENVIRON}"
+current-context: "${ENVIRON}"
+users:
+- name: "${ENVIRON}"
+ user:
+ token: "'\${KUBECTL_TOKEN}'" ' > ~/.kube/config
+
+echo "docker run --rm --privileged\
+ -v /var/run/docker.sock:/var/run/docker.sock\
+ -v /var/lib/rancher:/var/lib/rancher\
+ \$REGISTRATION_DOCKER\
+ \$RANCHER_URL/v1/scripts/\$REGISTRATION_TOKEN"\
+ > /tmp/rancher_register_host
+chown $SSH_USER /tmp/rancher_register_host
+
+RANCHERINSTALL
+
+echo "REGISTER TOKEN"
+HOSTREGTOKEN=$(ssh $SSH_OPTIONS $SSH_USER@"$MASTER" cat /tmp/rancher_register_host)
+echo "$HOSTREGTOKEN"
+
+echo "REGISTERING HOSTS WITH RANCHER ENVIRONMENT '$ENVIRON'"
+echo "$RANCHER_SLAVES"
+
+for MACHINE in $RANCHER_SLAVES;
+do
+ssh $SSH_OPTIONS $SSH_USER@"$MACHINE" "bash -s" <<REGISTERHOST &
+ sudo -i
+ $HOSTREGTOKEN
+ sleep 5
+ echo "Host $MACHINE waiting for host registration 5 min at \$(date)"
+ sleep 5m
+REGISTERHOST
+done
+wait
+
+echo "CONFIGURING NFS ON SLAVES"
+echo "$SLAVES"
+
+for SLAVE in $SLAVES;
+do
+ssh $SSH_OPTIONS $SSH_USER@"$SLAVE" "bash -s" <<CONFIGURENFS &
+ sudo -i
+ apt-get install nfs-common -y
+ mkdir /dockerdata-nfs
+ chmod 777 /dockerdata-nfs
+ echo "$MASTER:/dockerdata-nfs /dockerdata-nfs nfs auto 0 0" >> /etc/fstab
+ mount -a
+ mount | grep dockerdata-nfs
+CONFIGURENFS
+done
+wait
+
+echo "DEPLOYING OOM ON RANCHER WITH MASTER"
+echo "$MASTER"
+TMP_POD_LIST='/tmp/onap_pod_list.txt'
+
+ssh $SSH_OPTIONS $SSH_USER@"$MASTER" "bash -s" <<OOMDEPLOY
+sudo -i
+rm -rf oom
+echo "pulling new oom"
+git clone -b $BRANCH http://gerrit.onap.org/r/oom
+
+# NFS FIX for aaf-locate
+sed -i '/persistence:/s/^#//' ./oom/kubernetes/aaf/charts/aaf-locate/values.yaml
+sed -i '/mountPath: \/dockerdata/c\ mountPath: \/dockerdata-nfs'\
+ ./oom/kubernetes/aaf/charts/aaf-locate/values.yaml
+
+echo "Pre-pulling docker images at \$(date)"
+wget https://jira.onap.org/secure/attachment/11261/prepull_docker.sh
+chmod 777 prepull_docker.sh
+./prepull_docker.sh
+echo "starting onap pods"
+cd oom/kubernetes/
+
+# Disable ONAP components
+if [ -n "$ONAP_COMPONENT_DISABLE" ] ; then
+ echo -n "Disable following ONAP components:"
+ for COMPONENT in $ONAP_COMPONENT_DISABLE; do
+ echo -n " \$COMPONENT"
+ sed -i '/^'\${COMPONENT}':$/!b;n;s/enabled: *true/enabled: false/' onap/values.yaml
+ done
+ echo
+fi
+
+helm init --upgrade
+# run helm server on the background and detached from current shell
+nohup helm serve 0<&- &>/dev/null &
+echo "Waiting for helm setup for 5 min at \$(date)"
+sleep 5m
+helm version
+helm repo add local http://127.0.0.1:8879
+helm repo list
+make all
+if ( ! helm install local/onap -n dev --namespace $ENVIRON) ; then
+ echo "ONAP installation has failed at \$(date)"
+ exit 1
+fi
+
+cd ../../
+
+echo "Waiting for ONAP pods to be up \$(date)"
+echo "Ignore failure of sdnc-ansible-server, see SDNC-443"
+function get_onap_pods() {
+ kubectl get pods --namespace $ENVIRON > $TMP_POD_LIST
+ return \$(cat $TMP_POD_LIST | wc -l)
+}
+FAILED_PODS_LIMIT=1 # maximal number of failed ONAP PODs
+ALL_PODS_LIMIT=20 # minimum ONAP PODs to be up & running
+WAIT_PERIOD=60 # wait period in seconds
+MAX_WAIT_TIME=\$((3600*3)) # max wait time in seconds
+MAX_WAIT_PERIODS=\$((\$MAX_WAIT_TIME/\$WAIT_PERIOD))
+COUNTER=0
+get_onap_pods
+ALL_PODS=\$?
+PENDING=\$(grep -E '0/|1/2' $TMP_POD_LIST | wc -l)
+while [ \$PENDING -gt \$FAILED_PODS_LIMIT -o \$ALL_PODS -lt \$ALL_PODS_LIMIT ]; do
+ # print header every 20th line
+ if [ \$COUNTER -eq \$((\$COUNTER/20*20)) ] ; then
+ printf "%-3s %-29s %-3s/%s\n" "Nr." "Datetime of check" "Err" "Total PODs"
+ fi
+ COUNTER=\$((\$COUNTER+1))
+ printf "%3s %-29s %3s/%-3s\n" \$COUNTER "\$(date)" \$PENDING \$ALL_PODS
+ sleep \$WAIT_PERIOD
+ if [ "\$MAX_WAIT_PERIODS" -eq \$COUNTER ]; then
+ FAILED_PODS_LIMIT=800
+ ALL_PODS_LIMIT=0
+ fi
+ get_onap_pods
+ ALL_PODS=\$?
+ PENDING=\$(grep -E '0/|1/2' $TMP_POD_LIST | wc -l)
+done
+
+get_onap_pods
+cp $TMP_POD_LIST ~/onap_all_pods.txt
+echo
+echo "========================"
+echo "ONAP INSTALLATION REPORT"
+echo "========================"
+echo
+echo "List of Failed PODs"
+echo "-------------------"
+grep -E '0/|1/2' $TMP_POD_LIST | tee ~/onap_failed_pods.txt
+echo
+echo "Summary:"
+echo "--------"
+echo " PODs Failed: \$(cat ~/onap_failed_pods.txt | wc -l)"
+echo " PODs Total: \$(cat ~/onap_all_pods.txt | wc -l)"
+echo
+echo "ONAP health TC results"
+echo "----------------------"
+cd oom/kubernetes/robot
+./ete-k8s.sh $ENVIRON health | tee ~/onap_health.txt
+echo "==============================="
+echo "END OF ONAP INSTALLATION REPORT"
+echo "==============================="
+OOMDEPLOY
+
+echo "Finished install, ruturned from Master at $(date)"
+exit 0
diff --git a/ci/deploy-opnfv-apex-centos.sh b/ci/deploy-opnfv-apex-centos.sh
new file mode 100644
index 0000000..a3a0433
--- /dev/null
+++ b/ci/deploy-opnfv-apex-centos.sh
@@ -0,0 +1,209 @@
+#!/usr/bin/env bash
+
+# /usr/bin/env bash or /bin/bash ? /usr/bin/env bash is more environment-independent
+# beware of files which were edited in Windows, and have invisible \r end-of-line characters, causing Linux errors
+
+##############################################################################
+# Copyright (c) 2018 Wipro Limited and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# OPNFV contribution guidelines Wiki page:
+# https://wiki.opnfv.org/display/DEV/Contribution+Guidelines
+
+# OPNFV/Auto project:
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+
+# localization control: force script to use default language for output, and force sorting to be bytewise
+# ("C" is from C language, represents "safe" locale everywhere)
+# (result: the script will consider only basic ASCII characters and disable UTF-8 multibyte match)
+export LANG=C
+export LC_ALL=C
+
+##################################################################################
+## installation of OpenStack via OPNFV Apex/TripleO, on CentOS, virtual deployment
+##################################################################################
+# reference manual: https://docs.opnfv.org/en/latest/submodules/apex/docs/release/installation/index.html
+# page for virtual deployment: https://docs.opnfv.org/en/latest/submodules/apex/docs/release/installation/virtual.html
+
+echo "*** begin AUTO install: OPNFV Apex/TripleO"
+
+# check OS version
+echo "*** print OS version (must be CentOS, version 7 or more)"
+cat /etc/*release
+
+# Manage Nested Virtualization
+echo "*** ensure Nested Virtualization is enabled on Intel x86"
+echo "*** nested flag before:"
+cat /sys/module/kvm_intel/parameters/nested
+rm -f /etc/modprobe.d/kvm-nested.conf
+{ printf "options kvm-intel nested=1\n";\
+ printf "options kvm-intel enable_shadow_vmcs=1\n";\
+ printf "options kvm-intel enable_apicv=1\n";\
+ printf "options kvm-intel ept=1\n"; } >> /etc/modprobe.d/kvm-nested.conf
+sudo modprobe -r kvm_intel
+sudo modprobe -a kvm_intel
+echo "*** nested flag after:"
+cat /sys/module/kvm_intel/parameters/nested
+
+echo "*** verify status of modules in the Linux Kernel: kvm_intel module should be loaded for x86_64 machines"
+lsmod | grep kvm_
+grep kvm_ < /proc/modules
+
+# 3 additional pre-installation preparations, lifted from OPNFV/storperf (they are post-installation there):
+# https://wiki.opnfv.org/display/storperf/LaaS+Setup+For+Development#LaaSSetupForDevelopment-InstallOPNFVApex
+# (may of may not be needed, to enable first-time Apex installation on blank server)
+
+# 1) Install Docker
+sudo yum install -y yum-utils device-mapper-persistent-data lvm2
+sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
+sudo yum install -y docker-ce
+sudo systemctl start docker
+
+# 2) Install docker-compose
+sudo curl -L "https://github.com/docker/compose/releases/download/1.21.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
+sudo chmod +x /usr/local/bin/docker-compose
+
+# 3) Install Python
+sudo yum install -y python-virtualenv
+sudo yum groupinstall -y "Development Tools"
+sudo yum install -y openssl-devel
+
+
+# update everything (upgrade: riskier than update, as packages supposed to be unused will be deleted)
+# (note: can take several minutes; may not be necessary)
+sudo yum -y update
+
+
+# download Apex packages
+echo "*** downloading packages:"
+sudo yum -y install https://repos.fedorapeople.org/repos/openstack/openstack-pike/rdo-release-pike-1.noarch.rpm
+sudo yum -y install epel-release
+# note: EPEL = Extra Packages for Enterprise Linux
+sudo curl -o /etc/yum.repos.d/opnfv-apex.repo http://artifacts.opnfv.org/apex/fraser/opnfv-apex.repo
+
+# install three required RPMs (RedHat/RPM Package Managers); this takes several minutes
+sudo yum -y install http://artifacts.opnfv.org/apex/fraser/opnfv-apex-6.2.noarch.rpm http://artifacts.opnfv.org/apex/fraser/opnfv-apex-undercloud-6.2.noarch.rpm http://artifacts.opnfv.org/apex/fraser/opnfv-apex-python34-6.2.noarch.rpm
+
+# clean-up old Apex versions if any
+## precautionary opnfv-clean doesn't work... (even though packages are installed at this point)
+opnfv-clean
+
+# Manage DNS references
+# probably not needed on an already configured server: already has DNS references
+# echo "nameserver 8.8.8.8" >> /etc/resolv.conf
+echo "*** printout of /etc/resolv.conf :"
+cat /etc/resolv.conf
+
+# prepare installation directory
+mkdir -p /opt/opnfv-TripleO-apex
+cd /opt/opnfv-TripleO-apex
+
+# make sure cp is not aliased or a function; same for mv and rm
+unalias cp
+unset -f cp
+unalias mv
+unset -f mv
+unalias rm
+unset -f rm
+
+# 2 YAML files from /etc/opnfv-apex/ are needed for virtual deploys:
+# 1) network_settings.yaml : may need to update NIC names, to match the NIC names on the deployment server
+# 2) standard scenario file (os-nosdn-nofeature-noha.yaml, etc.), or customized deploy_settings.yaml
+
+# make a local copy of YAML files (not necessary: could deploy from /etc/opnfv-apex); local copies are just for clarity
+# 1) network settings
+cp /etc/opnfv-apex/network_settings.yaml .
+# 2) deploy settings
+# copy one of the 40+ pre-defined scenarios (one of the YAML files)
+# for extra customization, git clone Apex repo, and copy and customize the generic deploy_settings.yaml
+# git clone https://git.opnfv.org/apex
+# cp ./apex/config/deploy/deploy_settings.yaml .
+cp /etc/opnfv-apex/os-nosdn-nofeature-noha.yaml ./deploy_settings.yaml
+# cp /etc/opnfv-apex/os-nosdn-nofeature-ha.yaml ./deploy_settings.yaml
+
+# Note: content of os-nosdn-nofeature-noha.yaml
+# ---
+# global_params:
+# ha_enabled: false
+#
+# deploy_options:
+# sdn_controller: false
+# tacker: true
+# congress: true
+# sfc: false
+# vpn: false
+
+
+# modify NIC names in network settings YAML file, specific to your environment (e.g. replace em1 with ens4f0 in LaaS)
+# Note: actually, this should not matter for a virtual environment
+sed -i 's/em1/ens4f0/' network_settings.yaml
+
+# launch deploy (works if openvswitch module is installed, which may not be the case the first time around)
+echo "*** deploying OPNFV by TripleO/Apex:"
+# --debug for detailed debug info
+# -v: Enable virtual deployment
+# note: needs at least 10G RAM for controllers
+sudo opnfv-deploy --debug -v -n network_settings.yaml -d deploy_settings.yaml
+# without --debug:
+# sudo opnfv-deploy -v -n network_settings.yaml -d deploy_settings.yaml
+
+# with specific sizing:
+# sudo opnfv-deploy --debug -v -n network_settings.yaml -d deploy_settings.yaml --virtual-compute-ram 32 --virtual-cpus 16 --virtual-computes 4
+
+
+# verify that the openvswitch module is listed:
+lsmod | grep openvswitch
+grep openvswitch < /proc/modules
+
+##{
+## workaround: do 2 successive installations... not exactly optimal...
+## clean up, as now opnfv-clean should work
+#opnfv-clean
+## second deploy try, should succeed (whether first one failed or succeeded)
+#sudo opnfv-deploy -v -n network_settings.yaml -d deploy_settings.yaml
+##}
+
+
+
+# verifications: https://docs.opnfv.org/en/latest/submodules/apex/docs/release/installation/verification.html
+
+# {
+# if error after deploy.sh: "libvirt.libvirtError: Storage pool not found: no storage pool with matching name 'default'"
+
+# This usually happens if for some reason you are missing a default pool in libvirt:
+# $ virsh pool-list |grep default
+# You can recreate it manually:
+# $ virsh pool-define-as default dir --target /var/lib/libvirt/images/
+# $ virsh pool-autostart default
+# $ virsh pool-start default
+# }
+
+# {
+# if error after deploy.sh: iptc.ip4tc.IPTCError
+# check Apex jira ticket #521 https://jira.opnfv.org/browse/APEX-521
+# }
+
+# OpenvSwitch should not be missing, as it is a requirement from the RPM package:
+# https://github.com/opnfv/apex/blob/stable/fraser/build/rpm_specs/opnfv-apex-common.spec#L15
+
+
+
+# install python 3 on CentOS
+echo "*** begin install python 3.6 (3.4 should be already installed by default)"
+
+sudo yum -y install python36
+# install pip and setup tools
+sudo curl -O https://bootstrap.pypa.io/get-pip.py
+hash -r
+sudo /usr/bin/python3.6 get-pip.py --no-warn-script-location
+
+
+
+echo "*** end AUTO install: OPNFV Apex/TripleO"
+
diff --git a/ci/deploy-opnfv-compass-ubuntu.sh b/ci/deploy-opnfv-compass-ubuntu.sh
new file mode 100644
index 0000000..efccf78
--- /dev/null
+++ b/ci/deploy-opnfv-compass-ubuntu.sh
@@ -0,0 +1,201 @@
+#!/usr/bin/env bash
+
+# /usr/bin/env bash or /bin/bash ? /usr/bin/env bash is more environment-independent
+# beware of files which were edited in Windows, and have invisible \r end-of-line characters, causing Linux errors
+
+##############################################################################
+# Copyright (c) 2018 Wipro Limited and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# OPNFV contribution guidelines Wiki page:
+# https://wiki.opnfv.org/display/DEV/Contribution+Guidelines
+
+# OPNFV/Auto project:
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+
+# localization control: force script to use default language for output, and force sorting to be bytewise
+# ("C" is from C language, represents "safe" locale everywhere)
+# (result: the script will consider only basic ASCII characters and disable UTF-8 multibyte match)
+export LANG=C
+export LC_ALL=C
+
+#################################################################################
+## installation of OpenStack via OPNFV Compass4nfv, on Ubuntu, virtual deployment
+#################################################################################
+# reference manual: https://docs.opnfv.org/en/latest/submodules/compass4nfv/docs/release/installation/index.html
+# page for virtual deployment: https://docs.opnfv.org/en/latest/submodules/compass4nfv/docs/release/installation/vmdeploy.html
+
+echo "*** begin AUTO install: OPNFV Compass4nfv"
+
+# prepare install directory
+export INSTALLDIR=/opt/opnfv-compass
+mkdir -p $INSTALLDIR
+cd $INSTALLDIR
+
+# premptively install latest pip and clear $PATH cache
+# with apt-get (see apt-get -h and man apt-get for details)
+apt-get -y update
+apt-get -y upgrade
+apt-get -y install python-pip
+pip install --upgrade pip
+hash -r
+apt-get -y install python3-openstackclient
+apt-get -y autoremove
+
+## note: apt is more recent than apt-get (apt was formally introduced with Ubuntu 16.04)
+## APT: Advanced Packaging Tool; apt is more high-level, apt-get has more features;
+# apt -y update # Refreshes repository index
+# apt -y full-upgrade # Upgrades packages with auto-handling of dependencies
+# apt -y install python-pip
+# pip install --upgrade pip
+# hash -r
+# apt -y install python3-openstackclient
+# apt -y autoremove
+
+
+# 2 options: (option 1 is preferable)
+# 1) remain in master branch, use build.sh (which builds a tar ball), then launch deploy.sh
+# 2) download a tar ball and launch deploy.sh in a branch matching the tar ball release (e.g. fraser 6.2)
+
+
+##############
+# OPTION 1: build.sh + deploy.sh in master branch
+
+# retrieve the repository of Compass4nfv code (this creates a compass4nfv subdir in the installation directory), current master branch
+echo "*** begin download Compass4nfv repository"
+git clone https://gerrit.opnfv.org/gerrit/compass4nfv
+cd compass4nfv
+
+# launch build script
+echo "*** begin Compass4nfv build:"
+./build.sh |& tee log1-Build.txt
+
+# edit in deploy.sh specific to OPTION 1
+# set path to ISO file (tar ball), as built by build.sh previously
+# absolute path to tar ball file URL (MUST be absolute path)
+sed -i '/#export TAR_URL=/a export TAR_URL=file:///opt/opnfv-compass/compass4nfv/work/building/compass.tar.gz' deploy.sh
+
+# END OPTION 1
+##############
+
+
+##############
+# OPTION 2: tar ball + deploy.sh in matching releases/branches
+
+# download tarball of a certain release/version
+#echo "*** begin download Compass4nfv tar ball"
+#wget http://artifacts.opnfv.org/compass4nfv/fraser/opnfv-6.2.tar.gz
+# note: list of tar ball (ISO) files from Compass4NFV in https://artifacts.opnfv.org/compass4nfv.html
+
+# retrieve the repository of Compass4nfv code (this creates a compass4nfv subdir in the installation directory), current master branch
+#echo "*** begin download Compass4nfv repository"
+#git clone https://gerrit.opnfv.org/gerrit/compass4nfv
+#cd compass4nfv
+# note: list of compass4nfv branch names in https://gerrit.opnfv.org/gerrit/#/admin/projects/compass4nfv,branches
+# checkout to branch (or tag) matching the tarball release
+#git checkout stable/fraser
+
+# edit in deploy.sh specific to OPTION 2
+# set path to ISO file (tar ball), as downloaded previously
+# absolute path to tar ball file URL (MUST be absolute path)
+# sed -i '/#export TAR_URL=/a export TAR_URL=file:///opt/opnfv-compass/opnfv-6.2.tar.gz' deploy.sh
+
+# END OPTION 2
+##############
+
+
+# edit remaining deploy.sh entries as needed
+
+# set operating system version: Ubuntu Xenial Xerus
+sed -i '/#export OS_VERSION=xenial\/centos7/a export OS_VERSION=xenial' deploy.sh
+
+# set path to OPNFV scenario / DHA (Deployment Hardware Adapter) YAML file
+# here, os-nosdn-nofeature-noha scenario
+sed -i '/#export DHA=/a export DHA=/opt/opnfv-compass/compass4nfv/deploy/conf/vm_environment/os-nosdn-nofeature-noha.yml' deploy.sh
+
+# set path to network YAML file
+sed -i '/#export NETWORK=/a export NETWORK=/opt/opnfv-compass/compass4nfv/deploy/conf/vm_environment/network.yml' deploy.sh
+
+# append parameters for virtual machines (for virtual deployments); e.g., 2 nodes for NOHA scenario, 5 for HA, etc.
+# note: this may not be needed in a future release of Compass4nfv
+
+# VIRT_NUMBER – the number of nodes for virtual deployment.
+# VIRT_CPUS – the number of CPUs allocated per virtual machine.
+# VIRT_MEM – the memory size (MB) allocated per virtual machine.
+# VIRT_DISK – the disk size allocated per virtual machine.
+
+# if OPTION 1 (master): OPENSTACK_VERSION is queens, so add the VIRT_NUMBER line after the queens match
+#sed -i '/export OPENSTACK_VERSION=queens/a export VIRT_DISK=200G' deploy.sh
+#sed -i '/export OPENSTACK_VERSION=queens/a export VIRT_MEM=16384' deploy.sh
+#sed -i '/export OPENSTACK_VERSION=queens/a export VIRT_CPUS=4' deploy.sh
+sed -i '/export OPENSTACK_VERSION=queens/a export VIRT_NUMBER=2' deploy.sh
+
+# if OPTION 2 (stable/fraser): OPENSTACK_VERSION is pike, so add the VIRT_NUMBER line after the pike match
+#sed -i '/export OPENSTACK_VERSION=pike/a export VIRT_DISK=200G' deploy.sh
+#sed -i '/export OPENSTACK_VERSION=pike/a export VIRT_MEM=16384' deploy.sh
+#sed -i '/export OPENSTACK_VERSION=pike/a export VIRT_CPUS=4' deploy.sh
+#sed -i '/export OPENSTACK_VERSION=pike/a export VIRT_NUMBER=5' deploy.sh
+
+
+# launch deploy script
+echo "*** begin Compass4nfv deploy:"
+./deploy.sh |& tee log2-Deploy.txt
+
+
+
+
+# To access OpenStack Horizon GUI in Virtual deployment
+# source: https://wiki.opnfv.org/display/compass4nfv/Containerized+Compass
+
+# confirm IP@ of the current server (jump server, such as 10.10.100.xyz on LaaS: 10.10.100.42 for hpe32, etc.)
+external_nic=$(ip route |grep '^default'|awk '{print $5F}')
+echo "external_nic: $external_nic"
+ip addr show "$external_nic"
+
+# Config IPtables rules: pick an unused port number, e.g. 50000+machine number, 50032 for hpe32 at 10.10.100.42
+# 192.16.1.222:443 is the OpenStack Horizon GUI after a Compass installation
+# syntax: iptables -t nat -A PREROUTING -d $EX_IP -p tcp --dport $PORT -j DNAT --to 192.16.1.222:443
+# (note: this could be automated: retrieve IP@, pick port number)
+
+# example: hpe15
+# iptables -t nat -A PREROUTING -d 10.10.100.25 -p tcp --dport 50015 -j DNAT --to 192.16.1.222:443
+# example: hpe33
+# iptables -t nat -A PREROUTING -d 10.10.100.43 -p tcp --dport 50033 -j DNAT --to 192.16.1.222:443
+
+# display IPtables NAT rules
+iptables -t nat -L
+
+# Enter https://$EX_IP:$PORT in you browser to visit the OpenStack Horizon dashboard
+# examples: https://10.10.100.25:50015 , https://10.10.100.43:50033
+# The default user is "admin"
+# to get the Horizon password for "admin":
+sudo docker cp compass-tasks:/opt/openrc ./
+sudo cat openrc | grep OS_PASSWORD
+source ./openrc
+
+# for OpenStack CLI (generic content from openrc)
+export OS_ENDPOINT_TYPE=publicURL
+export OS_INTERFACE=publicURL
+export OS_USERNAME=admin
+export OS_PROJECT_NAME=admin
+export OS_TENANT_NAME=admin
+export OS_AUTH_URL=https://192.16.1.222:5000/v3
+export OS_NO_CACHE=1
+export OS_USER_DOMAIN_NAME=Default
+export OS_PROJECT_DOMAIN_NAME=Default
+export OS_REGION_NAME=RegionOne
+
+# For openstackclient
+export OS_IDENTITY_API_VERSION=3
+export OS_AUTH_VERSION=3
+
+
+
+echo "*** end AUTO install: OPNFV Compass4nfv"
+
diff --git a/ci/deploy-opnfv-daisy-centos.sh b/ci/deploy-opnfv-daisy-centos.sh
new file mode 100644
index 0000000..664ba55
--- /dev/null
+++ b/ci/deploy-opnfv-daisy-centos.sh
@@ -0,0 +1,179 @@
+#!/usr/bin/env bash
+
+# /usr/bin/env bash or /bin/bash ? /usr/bin/env bash is more environment-independent
+# beware of files which were edited in Windows, and have invisible \r end-of-line characters, causing Linux errors
+
+##############################################################################
+# Copyright (c) 2018 Wipro Limited and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# OPNFV contribution guidelines Wiki page:
+# https://wiki.opnfv.org/display/DEV/Contribution+Guidelines
+
+# OPNFV/Auto project:
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+
+# localization control: force script to use default language for output, and force sorting to be bytewise
+# ("C" is from C language, represents "safe" locale everywhere)
+# (result: the script will consider only basic ASCII characters and disable UTF-8 multibyte match)
+export LANG=C
+export LC_ALL=C
+
+
+###############################################################################
+## installation of OpenStack via OPNFV Daisy4nfv, on CentOS, virtual deployment
+###############################################################################
+# reference manual: https://docs.opnfv.org/en/stable-fraser/submodules/daisy/docs/release/installation/index.html#daisy-installation
+# page for virtual deployment: https://docs.opnfv.org/en/stable-fraser/submodules/daisy/docs/release/installation/vmdeploy.html
+
+echo "*** begin AUTO install: OPNFV Daisy4nfv"
+
+# check OS version
+echo "*** print OS version (must be CentOS, version 7.2 or more)"
+cat /etc/*release
+
+# make sure cp is not aliased or a function; same for mv and rm
+unalias cp
+unset -f cp
+unalias mv
+unset -f mv
+unalias rm
+unset -f rm
+
+# Manage Nested Virtualization
+echo "*** ensure Nested Virtualization is enabled on Intel x86"
+echo "*** nested flag before:"
+cat /sys/module/kvm_intel/parameters/nested
+rm -f /etc/modprobe.d/kvm-nested.conf
+{ printf "options kvm-intel nested=1\n";\
+ printf "options kvm-intel enable_shadow_vmcs=1\n";\
+ printf "options kvm-intel enable_apicv=1\n";\
+ printf "options kvm-intel ept=1\n"; } >> /etc/modprobe.d/kvm-nested.conf
+sudo modprobe -r kvm_intel
+sudo modprobe -a kvm_intel
+echo "*** nested flag after:"
+cat /sys/module/kvm_intel/parameters/nested
+
+echo "*** verify status of modules in the Linux Kernel: kvm_intel module should be loaded for x86_64 machines"
+lsmod | grep kvm_
+grep kvm_ < /proc/modules
+
+# download tools: git, kvm, libvirt, python-yaml
+sudo yum -y install git
+sudo yum -y install kvm
+sudo yum -y install libvirt
+sudo yum info libvirt
+sudo yum info qemu-kvm
+sudo yum -y install python-yaml
+
+
+# make sure SELinux is enforced (Security-Enhanced Linux)
+sudo setenforce 1
+echo "getenforce: $(getenforce)"
+
+# Restart the libvirtd daemon:
+sudo service libvirtd restart
+# Verify if the kvm module is loaded, you should see amd or intel depending on the hardware:
+lsmod | grep kvm
+# Note: to test, issue a virsh command to ensure local root connectivity:
+# sudo virsh sysinfo
+
+
+
+# update everything (upgrade: riskier than update, as packages supposed to be unused will be deleted)
+# (note: can take several minutes; may not be necessary)
+sudo yum -y update
+
+# prepare Daisy installation directory
+export INSTALLDIR=/opt/opnfv-daisy
+mkdir $INSTALLDIR
+cd $INSTALLDIR
+
+# oslo-config, needed in daisy/deploy/get_conf.py
+sudo curl -O https://bootstrap.pypa.io/get-pip.py
+hash -r
+python get-pip.py --no-warn-script-location
+pip install --upgrade oslo-config
+
+
+# retrieve Daisy4nfv repository
+git clone https://gerrit.opnfv.org/gerrit/daisy
+cd daisy
+
+
+
+# OPTION 1: master repo and latest bin file: May 17th 2018
+# Download latest bin file from http://artifacts.opnfv.org/daisy.html and name it opnfv.bin
+curl http://artifacts.opnfv.org/daisy/opnfv-2018-05-17_14-00-32.bin -o opnfv.bin
+# make opnfv.bin executable
+chmod 777 opnfv.bin
+
+# OPTION 2: stable release: Fraser 6.0 (so, checkout to stable Fraser release opnfv-6.0)
+# Download matching bin file from http://artifacts.opnfv.org/daisy.html and name it opnfv.bin
+#git checkout opnfv.6.0 # as per Daisy4nfv instructions, but does not work
+#git checkout stable/fraser
+#curl http://artifacts.opnfv.org/daisy/fraser/opnfv-6.0.iso -o opnfv.bin
+# make opnfv.bin executable
+#chmod 777 opnfv.bin
+
+
+
+# The deploy.yaml file is the inventory template of deployment nodes:
+# error from doc: ”./deploy/conf/vm_environment/zte-virtual1/deploy.yml”
+# correct path: "./deploy/config/vm_environment/zte-virtual1/deploy.yml”
+# You can write your own name/roles reference into it:
+# name – Host name for deployment node after installation.
+# roles – Components deployed.
+# note: ./templates/virtual_environment/ contains xml files, for networks and VMs
+
+
+# prepare config dir for Auto lab in daisy dir, and copy deploy and network YAML files from default files (virtual1 or virtual2)
+export AUTO_DAISY_LAB_CONFIG1=labs/auto_daisy_lab/virtual1/daisy/config
+export DAISY_DEFAULT_ENV1=deploy/config/vm_environment/zte-virtual1
+mkdir -p $AUTO_DAISY_LAB_CONFIG1
+cp $DAISY_DEFAULT_ENV1/deploy.yml $AUTO_DAISY_LAB_CONFIG1
+cp $DAISY_DEFAULT_ENV1/network.yml $AUTO_DAISY_LAB_CONFIG1
+
+export AUTO_DAISY_LAB_CONFIG2=labs/auto_daisy_lab/virtual2/daisy/config
+export DAISY_DEFAULT_ENV2=deploy/config/vm_environment/zte-virtual2
+mkdir -p $AUTO_DAISY_LAB_CONFIG2
+cp $DAISY_DEFAULT_ENV2/deploy.yml $AUTO_DAISY_LAB_CONFIG2
+cp $DAISY_DEFAULT_ENV2/network.yml $AUTO_DAISY_LAB_CONFIG2
+
+# Note:
+# - zte-virtual1 config files deploy openstack with five nodes (3 LB nodes and 2 computer nodes).
+# - zte-virtual2 config files deploy an all-in-one openstack
+
+# run deploy script, scenario os-nosdn-nofeature-ha, multinode OpenStack
+sudo ./ci/deploy/deploy.sh -L "$(cd ./;pwd)" -l auto_daisy_lab -p virtual1 -s os-nosdn-nofeature-ha
+
+# run deploy script, scenario os-nosdn-nofeature-noha, all-in-one OpenStack
+# sudo ./ci/deploy/deploy.sh -L "$(cd ./;pwd)" -l auto_daisy_lab -p virtual2 -s os-nosdn-nofeature-noha
+
+
+# Notes about deploy.sh:
+# The value after -L should be an absolute path which points to the directory which includes $AUTO_DAISY_LAB_CONFIG directory.
+# The value after -p parameter (virtual1 or virtual2) should match the one selected for $AUTO_DAISY_LAB_CONFIG.
+# The value after -l parameter (e.g. auto_daisy_lab) should match the lab name selected for $AUTO_DAISY_LAB_CONFIG, after labs/ .
+# Scenario (-s parameter): “os-nosdn-nofeature-ha” is used for deploying multinode openstack (virtual1)
+# Scenario (-s parameter): “os-nosdn-nofeature-noha” used for deploying all-in-one openstack (virtual2)
+
+# more details on deploy.sh OPTIONS:
+# -B PXE Bridge for booting Daisy Master, optional
+# -D Dry-run, does not perform deployment, will be deleted later
+# -L Securelab repo absolute path, optional
+# -l LAB name, necessary
+# -p POD name, necessary
+# -r Remote workspace in target server, optional
+# -w Workdir for temporary usage, optional
+# -h Print this message and exit
+# -s Deployment scenario
+# -S Skip recreate Daisy VM during deployment
+
+# When deployed successfully, the floating IP of openstack is 10.20.11.11, the login account is “admin” and the password is “keystone”
diff --git a/ci/deploy-opnfv-fuel-ubuntu.sh b/ci/deploy-opnfv-fuel-ubuntu.sh
new file mode 100644
index 0000000..db276b2
--- /dev/null
+++ b/ci/deploy-opnfv-fuel-ubuntu.sh
@@ -0,0 +1,199 @@
+#!/usr/bin/env bash
+
+# /usr/bin/env bash or /bin/bash ? /usr/bin/env bash is more environment-independent
+# beware of files which were edited in Windows, and have invisible \r end-of-line characters, causing Linux errors
+
+##############################################################################
+# Copyright (c) 2018 Wipro Limited and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# OPNFV contribution guidelines Wiki page:
+# https://wiki.opnfv.org/display/DEV/Contribution+Guidelines
+
+# OPNFV/Auto project:
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+
+# localization control: force script to use default language for output, and force sorting to be bytewise
+# ("C" is from C language, represents "safe" locale everywhere)
+# (result: the script will consider only basic ASCII characters and disable UTF-8 multibyte match)
+export LANG=C
+export LC_ALL=C
+
+##############################################################################
+## installation of OpenStack via OPNFV Fuel/MCP, on Ubuntu, virtual deployment
+##############################################################################
+# reference manual: https://docs.opnfv.org/en/latest/submodules/fuel/docs/release/installation/index.html
+# page for virtual deployment: https://docs.opnfv.org/en/latest/submodules/fuel/docs/release/installation/installation.instruction.html#opnfv-software-installation-and-deployment
+
+# Steps:
+# step 1: download Fuel/MCP repository and run deploy script
+# (this example: x86, virtual deploy, os-nosdn-nofeature-noha scenario)
+# step 2: download additional packages (python3, OpenStackSDK, OpenStack clients, ...)
+# step 3: add more resources to OpenStack instance (vCPUs, RAM)
+# step 4: download Auto repository
+# step 5: run Auto python script to populate OpenStack instance with objects expected by ONAP
+
+
+echo "*** begin AUTO install: OPNFV Fuel/MCP"
+
+
+# step 1: download Fuel/MCP repository and run deploy script
+
+# prepare install directory
+export INSTALLDIR=/opt/opnfv-fuel
+mkdir -p $INSTALLDIR
+cd $INSTALLDIR
+
+# get Fuel repository
+git clone https://git.opnfv.org/fuel
+# cd in new fuel repository, which contains directories: mcp, ci, etc.
+# note: this is for x86_64 architectures; for aarch64 architectures, git clone https://git.opnfv.org/armband and cd armband instead
+cd fuel
+
+# edit NOHA scenario YAML file with more resources for compute nodes: 32 vCPUs, 192G RAM
+{ printf " cmp01:\n";\
+ printf " vcpus: 32\n";\
+ printf " ram: 196608\n";\
+ printf " cmp02:\n";\
+ printf " vcpus: 32\n";\
+ printf " ram: 196608\n"; } >> mcp/config/scenario/os-nosdn-nofeature-noha.yaml
+
+# provide more storage space to VMs: 350G per compute node (default is 100G)
+sed -i mcp/scripts/lib.sh -e 's/\(qemu-img create.*\) 100G/\1 350G/g'
+
+# launch OPNFV Fuel/MCP deploy script
+ci/deploy.sh -l local -p virtual1 -s os-nosdn-nofeature-noha -D |& tee deploy.log
+
+
+
+# step 2: download additional packages (python3, OpenStackSDK, OpenStack clients, ...)
+
+# install python 3 on Ubuntu
+echo "*** begin install python 3"
+sudo apt-get -y update
+sudo apt-get -y install python3
+# maybe clean-up packages
+# sudo apt -y autoremove
+# specific install of a python version, e.g. 3.6
+# sudo apt-get install python3.6
+
+# http://docs.python-guide.org/en/latest/starting/install3/linux/
+# sudo apt-get install software-properties-common
+# sudo add-apt-repository ppa:deadsnakes/ppa
+# sudo apt-get update
+# sudo apt-get install python3.6
+echo "python2 --version: $(python2 --version)"
+echo "python3 --version: $(python3 --version)"
+echo "which python: $(which python)"
+
+# install pip3 for python3; /usr/local/bin/pip3 vs. /usr/bin/pip3; solve with "hash -r"
+echo "*** begin install pip3 for python3"
+apt -y install python3-pip
+hash -r
+pip3 install --upgrade pip
+hash -r
+
+echo "\$PATH: $PATH"
+echo "which pip: $(which pip)"
+echo "which pip3: $(which pip3)"
+
+# install OpenStack SDK Python client
+echo "*** begin install OpenStack SDK Python client"
+pip3 install openstacksdk
+pip3 install --upgrade openstacksdk
+
+# install OpenStack CLI
+echo "*** begin install OpenStack CLI"
+pip3 install python-openstackclient
+pip3 install --upgrade python-openstackclient
+
+pip3 install --upgrade python-keystoneclient
+pip3 install --upgrade python-neutronclient
+pip3 install --upgrade python-novaclient
+pip3 install --upgrade python-glanceclient
+pip3 install --upgrade python-cinderclient
+
+# install OpenStack Heat (may not be installed by default), may be useful for VNF installation
+#apt install python3-heatclient
+echo "*** begin install OpenStack Heat"
+pip3 install --upgrade python-heatclient
+
+# package verification printouts
+echo "*** begin package verification printouts"
+pip3 list
+pip3 show openstacksdk
+pip3 check
+
+
+
+# step 3: add more resources to OpenStack instance
+
+# now that OpenStack CLI is installed, finish Fuel/MCP installation:
+# take extra resources indicated in os-nosdn-nofeature-noha.yaml into account as quotas in the OpenStack instance
+# (e.g. 2 compute nodes with 32 vCPUs and 192G RAM each => 64 cores and 384G=393,216M RAM)
+# enter environment variables hard-coded here, since always the same for Fuel/MCP; there could be better ways to do this :)
+
+export OS_AUTH_URL=http://10.16.0.107:5000/v3
+export OS_PROJECT_NAME="admin"
+export OS_USER_DOMAIN_NAME="Default"
+export OS_PROJECT_DOMAIN_ID="default"
+unset OS_TENANT_ID
+unset OS_TENANT_NAME
+export OS_USERNAME="admin"
+export OS_PASSWORD="opnfv_secret"
+export OS_REGION_NAME="RegionOne"
+export OS_INTERFACE=public
+export OS_IDENTITY_API_VERSION=3
+
+# at this point, openstack CLI commands should work
+echo "*** finish install OPNFV Fuel/MCP"
+openstack quota set --cores 64 admin
+openstack quota set --ram 393216 admin
+
+
+
+# step 4: download Auto repository
+
+# install OPNFV Auto
+# prepare install directory
+echo "*** begin install OPNFV Auto"
+mkdir -p /opt/opnfv-Auto
+cd /opt/opnfv-Auto
+# get Auto repository from Gerrit
+git clone https://gerrit.opnfv.org/gerrit/auto
+# cd in new auto repository, which contains directories: lib, setup, ci, etc.
+cd auto
+
+
+
+# step 5: run Auto python script to populate OpenStack instance with objects expected by ONAP
+
+# download images used by script, unless downloading images from URL works from the script
+echo "*** begin download images"
+cd setup/VIMs/OpenStack
+mkdir images
+cd images
+#CirrOS
+curl -O http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img
+curl -O http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-arm-disk.img
+curl -O http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-aarch64-disk.img
+# Ubuntu 16.04 LTS (Xenial Xerus)
+curl -O https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+curl -O https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-arm64-disk1.img
+# Ubuntu 14.04.5 LTS (Trusty Tahr)
+curl -O http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img
+curl -O http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-arm64-disk1.img
+
+# launch script to populate the OpenStack instance
+echo "*** begin populate OpenStack instance with ONAP objects"
+cd ..
+python3 auto_script_config_openstack_for_onap.py
+
+echo "*** end AUTO install: OPNFV Fuel/MCP"
+
diff --git a/ci/plot-results.sh b/ci/plot-results.sh
new file mode 100755
index 0000000..22ab1d6
--- /dev/null
+++ b/ci/plot-results.sh
@@ -0,0 +1,101 @@
+#!/bin/bash
+#
+# Copyright 2017-2018 Intel Corporation., Tieto
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Script for graphical representation of AUTO result summaries
+#
+# Usage:
+# ./create_graph [directory]
+#
+# where:
+# "directory" is an optional directory name, where summary of auto
+# installation report is stored
+# Default value: "$HOME/auto_ci_daily_logs"
+
+NUMBER_OF_RESULTS=50 # max number of recent results to be compared in graph
+DIR="$HOME/auto_ci_daily_logs"
+
+function clean_data() {
+ rm -rf summary.csv
+ rm -rf graph*plot
+ rm -rf graph*txt
+ rm -rf graph*png
+}
+
+function prepare_data() {
+ FIRST=1
+ CSV_LIST=$(ls -1 ${DIR}/deploy_summary*csv | tail -n ${NUMBER_OF_RESULTS})
+ for result_file in $CSV_LIST ; do
+ tmp_dir=`dirname $result_file`
+ TIMESTAMP=`basename $tmp_dir | cut -d'_' -f2-`
+ if [ $FIRST -eq 1 ] ; then
+ head -n1 $result_file > summary.csv
+ FIRST=0
+ fi
+ tail -n+2 ${result_file} >> summary.csv
+ done
+}
+
+function plot_data() {
+ echo "Created graphs:"
+ for TYPE in png txt; do
+ for GRAPH in "graph_pods" "graph_tcs" ; do
+ OUTPUT="$GRAPH.plot"
+ GRAPH_NAME="${GRAPH}.${TYPE}"
+ cat > $OUTPUT <<- EOM
+set datafile separator ","
+set xdata time
+set timefmt "%Y%m%d_%H%M%S"
+set format x "%m-%d"
+set xlabel "date"
+set format y "%8.0f"
+EOM
+ if [ "$TYPE" == "png" ] ; then
+ echo 'set term png size 1024,768' >> $OUTPUT
+ else
+ echo 'set term dumb 100,30' >> $OUTPUT
+ fi
+
+ if [ "$GRAPH" == "graph_pods" ] ; then
+ echo 'set ylabel "PODs"' >> $OUTPUT
+ echo 'set yrange [0:]' >> $OUTPUT
+ echo "set title \"ONAP K8S PODs\"" >> $OUTPUT
+ COL1=3
+ COL2=4
+ else
+ echo 'set ylabel "testcases"' >> $OUTPUT
+ echo 'set yrange [0:]' >> $OUTPUT
+ echo "set title \"ONAP Health TestCases\"" >> $OUTPUT
+ COL1=5
+ COL2=6
+ fi
+
+ iter=0
+ echo "set output \"$GRAPH_NAME\"" >> $OUTPUT
+ echo -n "plot " >> $OUTPUT
+ echo $"'summary.csv' using 1:$COL1 with linespoints title columnheader($COL1) \\" >> $OUTPUT
+ echo $", 'summary.csv' using 1:$COL2 with linespoints title columnheader($COL2) \\" >> $OUTPUT
+ gnuplot $OUTPUT
+ echo -e "\t$GRAPH_NAME"
+ done
+ done
+}
+
+#
+# Main body
+#
+clean_data
+prepare_data
+plot_data
diff --git a/docs/conf.py b/docs/conf.py
new file mode 100644
index 0000000..3c4453e
--- /dev/null
+++ b/docs/conf.py
@@ -0,0 +1 @@
+from docs_conf.conf import *
diff --git a/docs/conf.yaml b/docs/conf.yaml
new file mode 100644
index 0000000..ba6ee9d
--- /dev/null
+++ b/docs/conf.yaml
@@ -0,0 +1,3 @@
+---
+project_cfg: opnfv
+project: AUTO
diff --git a/docs/index.rst b/docs/index.rst
new file mode 100644
index 0000000..9e0614b
--- /dev/null
+++ b/docs/index.rst
@@ -0,0 +1,18 @@
+.. _auto:
+
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. SPDX-License-Identifier CC-BY-4.0
+.. (c) Open Platform for NFV Project, Inc. and its contributors
+
+*********************************
+OPNFV Auto (ONAP-Automated OPNFV)
+*********************************
+
+.. toctree::
+ :numbered:
+ :maxdepth: 3
+
+ release/configguide/index
+ release/userguide/index
+ release/release-notes/index
diff --git a/docs/release/configguide/Auto-featureconfig.rst b/docs/release/configguide/Auto-featureconfig.rst
index ed68069..15126a8 100644
--- a/docs/release/configguide/Auto-featureconfig.rst
+++ b/docs/release/configguide/Auto-featureconfig.rst
@@ -14,12 +14,20 @@ and provides guidelines on how to perform configurations and additional installa
Goal
====
-The goal of `Auto <http://docs.opnfv.org/en/latest/submodules/auto/docs/release/release-notes/index.html#auto-releasenotes>`_
+The goal of :ref:`Auto <auto-releasenotes>`
+
installation and configuration is to prepare an environment where the
-`Auto use cases <http://docs.opnfv.org/en/latest/submodules/auto/docs/release/userguide/index.html#auto-userguide>`_
-can be assessed, i.e. where the corresponding test cases can be executed and their results can be collected.
+:ref:`Auto use cases <auto-userguide>`
+
+can be assessed, i.e. where the corresponding test cases can be executed and their results can be collected for analysis.
+See the `Auto Release Notes <auto-releasenotes>`
+
+for a discussion of the test results analysis loop.
An instance of ONAP needs to be present, as well as a number of deployed VNFs, in the scope of the use cases.
+Simulated traffic needs to be generated, and then test cases can be executed. There are multiple parameters to
+the Auto environment, and the same set of test cases will be executed on each environment, so as to be able to
+evaluate the influence of each environment parameter.
The initial Auto use cases cover:
@@ -29,35 +37,43 @@ The initial Auto use cases cover:
* **Enterprise vCPE** (automation, cost optimization, and performance assurance of enterprise connectivity to Data Centers
and the Internet)
-The general idea of Auto is to install an OPNFV environment (comprising at least one Cloud Manager),
+The general idea of the Auto feature configuration is to install an OPNFV environment (comprising at least one Cloud Manager),
an ONAP instance, ONAP-deployed VNFs as required by use cases, possibly additional cloud managers not
already installed during the OPNFV environment setup, traffic generators, and the Auto-specific software
-for the use cases (which can include test frameworks such as `Robot <http://robotframework.org/>`_ or
-`Functest <http://docs.opnfv.org/en/latest/submodules/functest/docs/release/release-notes/index.html#functest-releasenotes>`_).
+for the use cases (which can include test frameworks such as `Robot <http://robotframework.org/>`_ or :doc:`Functest <functest:release/release-notes>`
+
The ONAP instance needs to be configured with policies and closed-loop controls (also as required by use cases),
and the test framework controls the execution and result collection of all the test cases. Then, test case execution
-results are analyzed, so as to fine-tune policies and closed-loop controls.
+results can be analyzed, so as to fine-tune policies and closed-loop controls, and to compare environment parameters.
-The following diagram illustrates two execution environments, for x86 architectures and for Arm architectures.
+The following diagram illustrates execution environments, for x86 architectures and for Arm architectures,
+and other environment parameters (see the Release Notes for a more detailed discussion on the parameters).
The installation process depends on the underlying architecture, since certain components may require a
specific binary-compatible version for a given x86 or Arm architecture. The preferred variant of ONAP is one
that runs on Kubernetes, while all VNF types are of interest to Auto: VM-based or containerized (on any cloud
-manager), for x86 or for Arm. The initial VM-based VNFs will cover OpenStack, and in future versions,
-additional cloud managers will be considered. The configuration of ONAP and of test cases should not depend
-on the architecture.
+manager), for x86 or for Arm. In fact, even PNFs could be considered, to support the evaluation of hybrid PNF/VNF
+transition deployments (ONAP has the ability of also managing legacy PNFs).
+
+The initial VM-based VNFs will cover OpenStack, and in future Auto releases, additional cloud managers will be considered.
+The configuration of ONAP and of test cases should not depend on the underlying architecture and infrastructure.
.. image:: auto-installTarget-generic.png
-For each component, various installer tools will be selected (based on simplicity and performance), and
-may change from one Auto release to the next. For example, the most natural installer for ONAP should be
-OOM (ONAP Operations Manager).
+For each component, various installer tools will be considered (as environment parameters), so as to enable comparison,
+as well as ready-to-use setups for Auto end-users. For example, the most natural installer for ONAP would be
+OOM (ONAP Operations Manager). For the OPNFV infrastructure, supported installer projects will be used: Fuel/MCP,
+Compass4NFV, Apex/TripleO, Daisy4NFV. Note that JOID was last supported in OPNFV Fraser 6.2, and is not supported
+anymore as of Gambia 7.0.
The initial version of Auto will focus on OpenStack VM-based VNFs, onboarded and deployed via ONAP API
(not by ONAP GUI, for the purpose of automation). ONAP is installed on Kubernetes. Two or more servers from LaaS
are used: one or more to support an OpenStack instance as provided by the OPNFV installation via Fuel/MCP or other
-OPNFV installers (Compass4NFV, Apex/TripleO, Daisy4NFV, JOID), and the other(s) to support ONAP with Kubernetes
+OPNFV installers (Compass4NFV, Apex/TripleO, Daisy4NFV), and the other(s) to support ONAP with Kubernetes
and Docker. Therefore, the VNF execution environment is composed of the server(s) with the OpenStack instance(s).
+Initial tests will also include ONAP instances installed on bare-metal servers (i.e. not directly on an OPNFV
+infrastructure; the ONAP/OPNFV integration can start at the VNF environment level; but ultimately, ONAP should
+be installed within an OPNFV infrastructure, for full integration).
.. image:: auto-installTarget-initial.png
@@ -75,12 +91,17 @@ SDK, or OpenStack CLI, or even OpenStack Heat templates) would populate the Open
.. image:: auto-OS-config4ONAP.png
+That script can also delete these created objects, so it can be used in tear-down procedures as well
+(use -del or --delete option). It is located in the `Auto repository <https://git.opnfv.org/auto/tree/>`_ ,
+under the setup/VIMs/OpenStack directory:
+
+* auto_script_config_openstack_for_onap.py
Jenkins (or more precisely JJB: Jenkins Job Builder) will be used for Continuous Integration in OPNFV releases,
to ensure that the latest master branch of Auto is always working. The first 3 tasks in the pipeline would be:
-install OpenStack instance via OPNFV installer (Fuel/MCP for example), configure the OpenStack instance for ONAP,
-install ONAP (using the OpenStack instance network IDs in the ONAP YAML file).
+install OpenStack instance via an OPNFV installer (Fuel/MCP, Compass4NFV, Apex/TripleO, Daisy4NFV), configure
+the OpenStack instance for ONAP, install ONAP (using the OpenStack instance network IDs in the ONAP YAML file).
Moreover, Auto will offer an API, which can be imported as a module, and can be accessed for example
by a web application. The following diagram shows the planned structure for the Auto Git repository,
@@ -96,8 +117,9 @@ Pre-configuration activities
The following resources will be required for the initial version of Auto:
* at least two LaaS (OPNFV Lab-as-a-Service) pods (or equivalent in another lab), with their associated network
- information. Later, other types of target pods will be supported, such as clusters (physical bare metal or virtual).
- The pods can be either x86 or Arm CPU architectures.
+ information. Later, other types of target pods will be supported, such as clusters (physical bare-metal or virtual).
+ The pods can be either x86 or Arm CPU architectures. An effort is currently ongoing (ONAP Integration team, and Auto team),
+ to ensure Arm binaries are available for all ONAP components in the official ONAP Docker registry.
* the `Auto Git repository <https://git.opnfv.org/auto/tree/>`_
(clone from `Gerrit Auto <https://gerrit.opnfv.org/gerrit/#/admin/projects/auto>`_)
@@ -106,7 +128,14 @@ The following resources will be required for the initial version of Auto:
Hardware configuration
======================
-<TBC; large servers, at least 512G RAM, 1TB storage, 80-100 CPU threads>
+ONAP needs relatively large servers (at least 512G RAM, 1TB storage, 80-100 CPU threads). Initial deployment
+attempts on single servers did not complete. Current attempts use 3-server clusters, on bare-metal.
+
+For initial VNF deployment environments, virtual deployments by OPNFV installers on a single server should suffice.
+Later, if many large VNFs are deployed for the Auto test cases, and if heavy traffic is generated, more servers
+might be necessary. Also, if many environment parameters are considered, full executions of all test cases
+on all environment configurations could take a long time, so parallel executions of independent test case batches
+on multiple sets of servers and clusters might be considered.
@@ -123,10 +152,10 @@ Current Auto work in progress is captured in the
OPNFV with OpenStack
~~~~~~~~~~~~~~~~~~~~
-The Auto installation uses the Fuel/MCP installer for the OPNFV environment (see the
+The first Auto installation used the Fuel/MCP installer for the OPNFV environment (see the
`OPNFV download page <https://www.opnfv.org/software/downloads>`_).
-The following figure summarizes the two installation cases: virtual or baremetal.
+The following figure summarizes the two installation cases for Fuel: virtual or bare-metal.
This OPNFV installer starts with installing a Salt Master, which then configures
subnets and bridges, and install VMs (e.g., for controllers and compute nodes)
and an OpenStack instance with predefined credentials.
@@ -134,8 +163,8 @@ and an OpenStack instance with predefined credentials.
.. image:: auto-OPFNV-fuel.png
-The Auto version of OPNFV installation configures additional resources for the OpenStack virtual pod,
-as compared to the default installation. Examples of manual steps are as follows:
+The Auto version of OPNFV installation configures additional resources for the OpenStack virtual pod
+(more virtual CPUs and more RAM), as compared to the default installation. Examples of manual steps are as follows:
.. code-block:: console
@@ -185,6 +214,17 @@ Note:
* however, in the case of ARM, the OPNFV installation will fail, because there isn't enough space to install all required packages into
the cloud image.
+Using the above as starting point, Auto-specific scripts have been developed, for each of the 4 OPNFV installers Fuel/MCP,
+Compass4NFV, Apex/TripleO, Daisy4NFV. Instructions for virtual deployments from each of these installers have been used, and
+sometimes expanded and clarified (missing details or steps from the instructions).
+They can be found in the `Auto repository <https://git.opnfv.org/auto/tree/>`_ , under the ci directory:
+
+* deploy-opnfv-fuel-ubuntu.sh
+* deploy-opnfv-compass-ubuntu.sh
+* deploy-opnfv-apex-centos.sh
+* deploy-opnfv-daisy-centos.sh
+
+
ONAP on Kubernetes
~~~~~~~~~~~~~~~~~~
@@ -193,13 +233,13 @@ An ONAP installation on OpenStack has also been investigated, but we focus here
the ONAP on Kubernetes version.
The initial focus is on x86 architectures. The ONAP DCAE component for a while was not operational
-on Kubernetes, and had to be installed separately on OpenStack. So the ONAP instance was a hybrid,
-with all components except DCAE running on Kubernetes, and DCAE running separately on OpenStack.
+on Kubernetes (with ONAP Amsterdam), and had to be installed separately on OpenStack. So the ONAP
+instance was a hybrid, with all components except DCAE running on Kubernetes, and DCAE running
+separately on OpenStack. Starting with ONAP Beijing, DCAE also runs on Kubernetes.
For Arm architectures, specialized Docker images are being developed to provide Arm architecture
-binary compatibility.
-
-The goal for Auto is to use an ONAP instance where DCAE also runs on Kubernetes, for both architectures.
+binary compatibility. See the `Auto Release Notes <auto-releasenotes>`
+for more details on the availability status of these Arm images in the ONAP Docker registry.
The ONAP reference for this installation is detailed `here <http://onap.readthedocs.io/en/latest/submodules/oom.git/docs/oom_user_guide.html>`_.
@@ -218,6 +258,12 @@ Examples of manual steps for the deploy procedure are as follows:
9 cd ../oneclick
10 ./createAll.bash -n onap
+Several automation efforts to integrate the ONAP installation in Auto CI are in progress.
+One effort involves using a 3-server cluster at OPNFV Pharos LaaS (Lab-as-a-Service).
+The script is available in the `Auto repository <https://git.opnfv.org/auto/tree/>`_ , under the ci directory::
+
+* deploy-onap.sh
+
ONAP configuration
@@ -248,7 +294,9 @@ Traffic Generator configuration
Test Case software installation and execution control
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-<TBC>
+<TBC; mention the management of multiple environments (characterized by their parameters), execution of all test cases
+in each environment, only a subset in official OPNFV CI/CD Jenkins due to size and time limits; then posting and analysis
+of results; failures lead to bug-fixing, successes lead to analysis for comparisons and fine-tuning>
@@ -272,8 +320,8 @@ Auto Wiki pages:
OPNFV documentation on Auto:
-* `Auto release notes <http://docs.opnfv.org/en/latest/release/release-notes.html>`_
-* `Auto use case user guides <http://docs.opnfv.org/en/latest/submodules/auto/docs/release/userguide/index.html#auto-userguide>`_
+* `Auto Release Notes <release-notes>`
+* `Auto use case user guides <auto-userguide>`
Git&Gerrit Auto repositories:
diff --git a/docs/release/configguide/auto-installTarget-initial.png b/docs/release/configguide/auto-installTarget-initial.png
index 380738d..465b468 100644
--- a/docs/release/configguide/auto-installTarget-initial.png
+++ b/docs/release/configguide/auto-installTarget-initial.png
Binary files differ
diff --git a/docs/release/configguide/index.rst b/docs/release/configguide/index.rst
index ba1a3da..07b7ab6 100644
--- a/docs/release/configguide/index.rst
+++ b/docs/release/configguide/index.rst
@@ -10,7 +10,6 @@ OPNFV Auto (ONAP-Automated OPNFV) Configuration Guide
*****************************************************
.. toctree::
- :numbered:
:maxdepth: 3
Auto-featureconfig.rst
diff --git a/docs/release/release-notes/Auto-release-notes.rst b/docs/release/release-notes/Auto-release-notes.rst
index e10f497..ed6524d 100644
--- a/docs/release/release-notes/Auto-release-notes.rst
+++ b/docs/release/release-notes/Auto-release-notes.rst
@@ -7,13 +7,13 @@
Auto Release Notes
==================
-This document provides the release notes for the Fraser release of Auto.
+This document provides the release notes for the Gambia 7.0 release of Auto.
Important notes for this release
================================
-The initial release for Auto was in Fraser 6.0 (project inception: July 2017). This is the second point release, in Fraser 6.2.
+The initial release for Auto was in Fraser 6.0 (project inception: July 2017).
Summary
@@ -23,14 +23,14 @@ Overview
^^^^^^^^
OPNFV is an SDNFV system integration project for open-source components, which so far have been mostly limited to
-the NFVI+VIM as generally described by ETSI.
+the NFVI+VIM as generally described by `ETSI <https://www.etsi.org/technologies-clusters/technologies/nfv>`_.
In particular, OPNFV has yet to integrate higher-level automation features for VNFs and end-to-end Services.
-As an OPNFV project, Auto ("ONAP-Automated OPNFV") will focus on ONAP component integration and verification with
+As an OPNFV project, Auto (*ONAP-Automated OPNFV*) will focus on ONAP component integration and verification with
OPNFV reference platforms/scenarios, through primarily a post-install process, in order to avoid impact to OPNFV
-installer projects. As much as possible, this will use a generic installation/integration process (not specific to
-any OPNFV installer's technology).
+installer projects (Fuel/MCP, Compass4NFV, Apex/TripleO, Daisy4NFV). As much as possible, this will use a generic
+installation/integration process (not specific to any OPNFV installer's technology).
* `ONAP <https://www.onap.org/>`_ (a Linux Foundation Project) is an open source software platform that delivers
robust capabilities for the design, creation, orchestration, monitoring, and life cycle management of
@@ -38,7 +38,15 @@ any OPNFV installer's technology).
Auto aims at validating the business value of ONAP in general, but especially within an OPNFV infrastructure
(integration of ONAP and OPNFV). Business value is measured in terms of improved service quality (performance,
-reliability, ...) and OPEX reduction (VNF management simplification, power consumption reduction, ...).
+reliability, ...) and OPEX reduction (VNF management simplification, power consumption reduction, ...), as
+demonstrated by use cases.
+
+Auto also validates multi-architecture software (binary images and containers) availability of ONAP and OPNFV:
+CPUs (x86, ARM) and Clouds (MultiVIM)
+
+In other words, Auto is a turnkey approach to automatically deploy an integrated open-source virtual network
+based on OPNFV (as infrastructure) and ONAP (as end-to-end service manager), that demonstrates business value
+to end-users (IT/Telco service providers, enterprises).
While all of ONAP is in scope, as it proceeds, the Auto project will focus on specific aspects of this integration
@@ -48,7 +56,7 @@ and verification in each release. Some example topics and work items include:
* How ONAP SDN-C uses OPNFV existing features, e.g. NetReady, in a two-layer controller architecture in which the
upper layer (global controller) is replaceable, and the lower layer can use different vendor’s local controller to
interact with SDN-C. For interaction with multiple cloud infrastructures, the MultiVIM ONAP component will be used.
-* How ONAP leverages OPNFV installers (Fuel/MCP, Compass4NFV, Apex/TripleO, Daisy4NFV, JOID) to provide a cloud
+* How ONAP leverages OPNFV installers (Fuel/MCP, Compass4NFV, Apex/TripleO, Daisy4NFV) to provide a cloud
instance (starting with OpenStack) on which to install the tool ONAP
* What data collection interface VNF and controllers provide to ONAP DCAE, and (through DCAE), to closed-loop control
functions such as Policy Tests which verify interoperability of ONAP automation/lifecycle features with specific NFVI
@@ -72,7 +80,7 @@ It is understood that:
.. image:: auto-proj-rn01.png
-The current ONAP architecture overview can be found `here <http://onap.readthedocs.io/en/latest/guides/onap-developer/architecture/onap-architecture.html>`_.
+The current ONAP architecture overview can be found `here <https://onap.readthedocs.io/en/latest/guides/onap-developer/architecture/onap-architecture.html>`_.
For reference, the ONAP-Beijing architecture diagram is replicated here:
@@ -89,17 +97,18 @@ Within OPNFV, Auto leverages tools and collaborates with other projects:
* FuncTest for software verification (CI/CD, Pass/Fail)
* Yardstick for metric management (quantitative measurements)
* VES (VNF Event Stream) and Barometer for VNF monitoring (feed to ONAP/DCAE)
+ * Edge Cloud as use case
* leverage OPNFV tools and infrastructure:
* Pharos as LaaS: transient pods (3-week bookings) and permanent Arm pod (6 servers)
- * possibly other labs from the community
+ * `WorksOnArm <http://worksonarm.com/cluster>`_ (`GitHub link <http://github.com/worksonarm/cluster>`_)
+ * possibly other labs from the community (Huawei pod-12, 6 servers, x86)
* JJB/Jenkins for CI/CD (and follow OPNFV scenario convention)
* Gerrit/Git for code and documents reviewing and archiving (similar to ONAP: Linux Foundation umbrella)
* follow OPNFV releases (Releng group)
-
Testability
^^^^^^^^^^^
@@ -123,21 +132,26 @@ value (i.e., find/determine policies and controls which yield optimized ONAP bus
More precisely, the following list shows parameters that could be applied to an Auto full run of test cases:
* Auto test cases for given use cases
-* OPNFV installer {Fuel/MCP, Compass4NFV, Apex/TripleO, Daisy4NFV, JOID}
+* OPNFV installer {Fuel/MCP, Compass4NFV, Apex/TripleO, Daisy4NFV}
* OPNFV availability scenario {HA, noHA}
-* cloud where ONAP runs {OpenStack, AWS, GCP, Azure, ...}
-* ONAP installation type {bare metal or virtual server, VM or container, ...} and options {MultiVIM single|distributed, ...}
-* VNFs {vFW, vCPE, vAAA, vDHCP, vDNS, vHSS, ...} and VNF-based services {vIMS, vEPC, ...}
+* environment where ONAP runs {bare metal servers, VMs from clouds (OpenStack, AWS, GCP, Azure, ...), containers}
+* ONAP installation type {bare metal, VM, or container, ...} and options {MultiVIM single|distributed, ...}
+* VNF types {vFW, vCPE, vAAA, vDHCP, vDNS, vHSS, ...} and VNF-based services {vIMS, vEPC, ...}
* cloud where VNFs run {OpenStack, AWS, GCP, Azure, ...}
-* VNF type {VM-based, container}
-* CPU architectures {x86/AMD64, ARM/aarch64} for ONAP software and for VNFs
+* VNF host type {VM, container}
+* CPU architectures {x86/AMD64, ARM/aarch64} for ONAP software and for VNF software; not really important for Auto software;
* pod size and technology (RAM, storage, CPU cores/threads, NICs)
-* traffic types and amounts/volumes
+* traffic types and amounts/volumes; traffic generators (although that should not really matter);
* ONAP configuration {especially policies and closed-loop controls; monitoring types for DCAE: VES, ...}
* versions of every component {Linux OS (Ubuntu, CentOS), OPNFV release, clouds, ONAP, VNFs, ...}
+The diagram below shows Auto parameters:
+
+.. image:: auto-proj-parameters.png
-Illustration of Auto analysis loop based on test case executions:
+
+The next figure is an illustration of the Auto analysis loop (design, configuration, execution, result analysis)
+based on test cases covering as many parameters as possible :
.. image:: auto-proj-tests.png
@@ -150,14 +164,14 @@ Auto currently defines three use cases: Edge Cloud (UC1), Resiliency Improvement
including end-to-end composite services of which a Cloud Manager may not be aware (VMs or containers could be
recovered by a Cloud Manager, but not necessarily an end-to-end service built on top of VMs or containers).
* enterprise-grade performance of vCPEs (certification during onboarding, then real-time performance assurance with
- SLAs and HA as well as scaling).
+ SLAs and HA, as well as scaling).
The use cases define test cases, which initially will be independent, but which might eventually be integrated to `FuncTest <https://wiki.opnfv.org/display/functest/Opnfv+Functional+Testing>`_.
-Additional use cases can be added in the future, such as vIMS (example: project Clearwater) or residential vHGW (virtual
-Home Gateways). The interest for vHGW is to reduce overall power consumption: even in idle mode, physical HGWs in
-residential premises consume a lot of energy. Virtualizing that service to the Service Provider edge data center would
-allow to minimize that consumption.
+Additional use cases can be added in the future, such as vIMS (example: project `Clearwater <http://www.projectclearwater.org/>`_)
+or residential vHGW (virtual Home Gateways). The interest for vHGW is to reduce overall power consumption: even in idle mode,
+physical HGWs in residential premises consume a lot of energy. Virtualizing that service to the Service Provider edge data center
+would allow to minimize that consumption.
Lab environment
@@ -172,39 +186,53 @@ x86 pod at UNH IOL.
A transition is in progress, to leverage OPNFV LaaS (Lab-as-a-Service) pods (`Pharos <https://labs.opnfv.org/>`_).
These pods can be booked for 3 weeks only (with an extension for a maximum of 2 weeks), so they are not a permanent resource.
-A repeatable automated installation procedure is being developed.
+For ONAP-Beijing, a repeatable automated installation procedure is being developed, using 3 Pharos servers (x86 for now).
+Also, a more permanent ONAP installation is in progress at a Huawei lab (pod-12, consisting of 6 x86 servers,
+1 as jump server, the other 5 with this example allocation: 3 for ONAP components, and 2 for an OPNFV infratructure:
+Openstack installed by Compass4NFV).
ONAP-based onboarding and deployment of VNFs is in progress (ONAP-Amsterdam pre-loading of VNFs must still done outside
of ONAP: for VM-based VNFs, users need to prepare OpenStack stacks (using Heat templates), then make an instance snapshot
which serves as the binary image of the VNF).
-An initial version of a script to prepare an OpenStack instance for ONAP (creation of a public and a private network,
-with a router) has been developed. It leverages OpenStack SDK.
+A script to prepare an OpenStack instance for ONAP (creation of a public and a private network, with a router,
+pre-loading of images and flavors, creation of a security group and an ONAP user) has been developed. It leverages
+OpenStack SDK. It has a delete option, so it can be invoked to delete these objects for example in a tear-down procedure.
Integration with Arm servers has started (exploring binary compatibility):
-* OpenStack is currently installed on a 6-server pod of Arm servers
+* The Auto project has a specific 6-server pod of Arm servers, which is currently loaned to ONAP integration team,
+ to build ONAP images
* A set of 14 additional Arm servers was deployed at UNH, for increased capacity
-* Arm-compatible Docker images are in the process of being developed
+* ONAP Docker registry: ONAP-specific images for ARM are being built, with the purpose of populating ONAP nexus2
+ (Maven2 artifacts) and nexus3 (Docker containers) repositories at Linux Foundation. Docker images are
+ multi-architecture, and the manifest of an image may contain 1 or more layers (for example 2 layers: x86/AMD64
+ and ARM/aarch64). One of ONAP-Casablanca architectural requirements is to be CPU-architecture independent.
+ There are almost 150 Docker containers in a complete ONAP instance. Currently, more disk space is being added
+ to the ARM nodes (configuration of Nova, and/or additional actual physical storage space).
+
-Test case implementation for the three use cases has started.
+Test case design and implementation for the three use cases has started.
OPNFV CI/CD integration with JJD (Jenkins Job Description) has started: see the Auto plan description
-`here <https://wiki.opnfv.org/display/AUTO/CI+Plan+for+Auto>`_. The permanent resource for that is the 6-server Arm
+`here <https://wiki.opnfv.org/display/AUTO/CI+for+Auto>`_. The permanent resource for that is the 6-server Arm
pod, hosted at UNH. The CI directory from the Auto repository is `here <https://git.opnfv.org/auto/tree/ci>`_
+
Finally, the following figure illustrates Auto in terms of project activities:
.. image:: auto-project-activities.png
-Note: a demo was delivered at the OpenStack Summit in Vancouver on May 21st 2018, to illustrate the deployment of a WordPress application
-(WordPress is a platform for websites and blogs) deployed on a multi-architecture cloud (mix of x86 and Arm servers).
+Note: a demo was delivered at the OpenStack Summit in Vancouver on May 21st 2018, to illustrate the deployment of
+a WordPress application (WordPress is a platform for websites and blogs) deployed on a multi-architecture cloud (mix
+of x86 and Arm servers).
This shows how service providers and enterprises can diversify their data centers with servers of different architectures,
-and select architectures best suited to each use case (mapping application components to architectures: DBs, interactive servers,
-number-crunching modules, ...).
+and select architectures best suited to each use case (mapping application components to architectures: DBs,
+interactive servers, number-crunching modules, ...).
This prefigures how other examples such as ONAP, VIMs, and VNFs could also be deployed on heterogeneous multi-architecture
-environments (open infrastructure), orchestrated by Kubernetes. The Auto installation scripts could expand on that approach.
+environments (open infrastructure), orchestrated by Kubernetes. The Auto installation scripts covering all the parameters
+described above could expand on that approach.
.. image:: auto-proj-openstacksummit1805.png
@@ -218,13 +246,13 @@ Release Data
| **Project** | Auto |
| | |
+--------------------------------------+--------------------------------------+
-| **Repo/commit-ID** | auto/opnfv-6.2.0 |
+| **Repo/commit-ID** | auto/opnfv-7.0.0 |
| | |
+--------------------------------------+--------------------------------------+
-| **Release designation** | Fraser 6.2 |
+| **Release designation** | Gambia 7.0 |
| | |
+--------------------------------------+--------------------------------------+
-| **Release date** | 2018-06-29 |
+| **Release date** | 2018-11-02 |
| | |
+--------------------------------------+--------------------------------------+
| **Purpose of the delivery** | Official OPNFV release |
@@ -273,21 +301,66 @@ Point release 6.2:
* initial scripts for OPNFV CI/CD, registration of Jenkins slave on `Arm pod <https://build.opnfv.org/ci/view/auto/>`_
* updated script for configuring OpenStack instance for ONAP, using OpenStack SDK 0.14
-Notable activities since release 6.1, which may result in new features for Gambia 7.0:
-* researching how to configure multiple Pharos servers in a cluster for Kubernetes
-* started to evaluate Compass4nfv as another OpenStack installer; issues with Python version (2 or 3)
-* common meeting with Functest
-* Plugfest: initiated collaboration with ONAP/MultiVIM (including support for ONAP installation)
+Point release 7.0:
+
+* progress on Docker registry of ONAP's Arm images
+* progress on ONAP installation script for 3-server cluster of UNH servers
+* CI scripts for OPNFV installers: Fuel/MCP (x86), Compass, Apex/TripleO (must run twice)
+* initial CI script for Daisy4NFV (work in progress)
+* JOID script, but supported only until R6.2, not Gambia 7.0
+* completed script for configuring OpenStack instance for ONAP, using OpenStack SDK 0.17
+* use of an additional lab resource for Auto development: 6-server x86 pod (huawei-pod12)
+
+
+
**JIRA TICKETS for this release:**
+
+`JIRA Auto Gambia 7.0.0 Done <https://jira.opnfv.org/issues/?filter=12403>`_
+
+Manual selection of significant JIRA tickets for this version's highlights:
+
+--------------------------------------+--------------------------------------+
| **JIRA REFERENCE** | **SLOGAN** |
| | |
+--------------------------------------+--------------------------------------+
-| AUTO-38, auto-resiliency-vif-001: | UC2: validate VM suspension command |
-| 2/3 Test Logic | and measurement of Recovery Time |
+| AUTO-37 | Get DCAE running onto Pharos |
+| | deployment |
++--------------------------------------+--------------------------------------+
+| AUTO-42 | Use Compass4NFV to create an |
+| | OpenStack instance on a UNH pod |
++--------------------------------------+--------------------------------------+
+| AUTO-43 | String together scripts for Fuel, |
+| | Tool installation, ONAP preparation |
++--------------------------------------+--------------------------------------+
+| AUTO-44 | Build ONAP components for arm64 |
+| | platform |
++--------------------------------------+--------------------------------------+
+| AUTO-45 | CI: Jenkins definition of verify and |
+| | merge jobs |
++--------------------------------------+--------------------------------------+
+| AUTO-46 | Use Apex to create an OpenStack |
+| | instance on a UNH pod |
++--------------------------------------+--------------------------------------+
+| AUTO-47 | Install ONAP with Kubernetes on LaaS |
+| | |
++--------------------------------------+--------------------------------------+
+| AUTO-48 | Create documentation for ONAP |
+| | deployment with Kubernetes on LaaS |
++--------------------------------------+--------------------------------------+
+| AUTO-49 | Automate ONAP deployment with |
+| | Kubernetes on LaaS |
++--------------------------------------+--------------------------------------+
+| AUTO-51 | huawei-pod12: Prepare IDF and PDF |
+| | files |
++--------------------------------------+--------------------------------------+
+| AUTO-52 | Deploy a running ONAP instance on |
+| | huawei-pod12 |
++--------------------------------------+--------------------------------------+
+| AUTO-54 | Use Daisy4nfv to create an OpenStack |
+| | instance on a UNH pod |
+--------------------------------------+--------------------------------------+
| | |
| | |
@@ -318,7 +391,7 @@ Deliverables
Software deliverables
^^^^^^^^^^^^^^^^^^^^^
-6.2 release: in-progress install scripts, CI scripts, and test case implementations.
+7.0 release: in-progress Docker ARM images, install scripts, CI scripts, and test case implementations.
Documentation deliverables
@@ -340,9 +413,6 @@ Known Limitations, Issues and Workarounds
System Limitations
^^^^^^^^^^^^^^^^^^
-* ONAP still to be validated for Arm servers (many Docker images are ready)
-* ONAP installation still to be automated in a repeatable way, and need to configure cluster of Pharos servers
-
Known issues
@@ -392,8 +462,8 @@ None at this point.
References
==========
-For more information on the OPNFV Fraser release, please see:
-http://opnfv.org/fraser
+For more information on the OPNFV Gambia release, please see:
+http://opnfv.org/gambia
Auto Wiki pages:
@@ -403,9 +473,9 @@ Auto Wiki pages:
OPNFV documentation on Auto:
-* `Auto release notes <http://docs.opnfv.org/en/latest/submodules/auto/docs/release/release-notes/index.html#auto-releasenotes>`_
-* `Auto use case user guides <http://docs.opnfv.org/en/latest/submodules/auto/docs/release/userguide/index.html#auto-userguide>`_
-* `Auto configuration guide <http://docs.opnfv.org/en/latest/submodules/auto/docs/release/configguide/index.html#auto-configguide>`_
+* `Auto release notes <auto-releasenotes>`
+* `Auto use case user guides <auto-userguide>`
+* `Auto configuration guide <auto-configguide>`
Git&Gerrit Auto repositories:
diff --git a/docs/release/release-notes/auto-proj-parameters.png b/docs/release/release-notes/auto-proj-parameters.png
new file mode 100644
index 0000000..a0cbe2e
--- /dev/null
+++ b/docs/release/release-notes/auto-proj-parameters.png
Binary files differ
diff --git a/docs/release/release-notes/auto-project-activities.png b/docs/release/release-notes/auto-project-activities.png
index c50bd72..d25ac2a 100644
--- a/docs/release/release-notes/auto-project-activities.png
+++ b/docs/release/release-notes/auto-project-activities.png
Binary files differ
diff --git a/docs/release/release-notes/index.rst b/docs/release/release-notes/index.rst
index 264f21c..4c879f7 100644
--- a/docs/release/release-notes/index.rst
+++ b/docs/release/release-notes/index.rst
@@ -9,7 +9,6 @@ OPNFV Auto (ONAP-Automated OPNFV) Release Notes
===============================================
.. toctree::
- :numbered:
:maxdepth: 3
Auto-release-notes.rst
diff --git a/docs/release/userguide/index.rst b/docs/release/userguide/index.rst
index dd308dc..099622c 100644
--- a/docs/release/userguide/index.rst
+++ b/docs/release/userguide/index.rst
@@ -15,7 +15,6 @@ OPNFV Auto (ONAP-Automated OPNFV) User Guide
.. by the installer project.
.. toctree::
- :numbered:
:maxdepth: 3
UC01-feature.userguide.rst
diff --git a/docs/requirements.txt b/docs/requirements.txt
new file mode 100644
index 0000000..9fde2df
--- /dev/null
+++ b/docs/requirements.txt
@@ -0,0 +1,2 @@
+lfdocs-conf
+sphinx_opnfv_theme
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 0000000..69aa189
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,17 @@
+[tox]
+minversion = 1.6
+envlist =
+ docs,
+ docs-linkcheck
+skipsdist = true
+
+[testenv:docs]
+deps = -rdocs/requirements.txt
+commands =
+ sphinx-build -b html -n -d {envtmpdir}/doctrees ./docs/ {toxinidir}/docs/_build/html
+ echo "Generated docs available in {toxinidir}/docs/_build/html"
+whitelist_externals = echo
+
+[testenv:docs-linkcheck]
+deps = -rdocs/requirements.txt
+commands = sphinx-build -b linkcheck -d {envtmpdir}/doctrees ./docs/ {toxinidir}/docs/_build/linkcheck