aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xci/deploy-onap.sh342
-rw-r--r--ci/deploy-opnfv-apex-centos.sh209
-rw-r--r--ci/deploy-opnfv-compass-ubuntu.sh201
-rw-r--r--ci/deploy-opnfv-daisy-centos.sh179
-rw-r--r--ci/deploy-opnfv-fuel-ubuntu.sh199
-rw-r--r--docs/release/release-notes/Auto-release-notes.rst173
-rw-r--r--docs/release/release-notes/auto-proj-parameters.pngbin0 -> 32716 bytes
-rw-r--r--docs/release/release-notes/auto-project-activities.pngbin58789 -> 25995 bytes
8 files changed, 1251 insertions, 52 deletions
diff --git a/ci/deploy-onap.sh b/ci/deploy-onap.sh
new file mode 100755
index 0000000..e886492
--- /dev/null
+++ b/ci/deploy-onap.sh
@@ -0,0 +1,342 @@
+#!/bin/bash
+#
+# Copyright 2018 Tieto
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Script for automated deployment of ONAP with Kubernetes at OPNFV LAAS
+# environment.
+#
+# Usage:
+# onap-deploy.sh <MASTER> <SLAVE1> <SLAVE2>
+#
+# where <MASTER> and <SLAVE_IPx> are IP addresses of servers to be used
+# for ONAP installation.
+#
+# NOTE: Following must be assured for all MASTER and SLAVE servers before
+# onap-deploy.sh execution:
+# 1) ssh access without a password
+# 2) an "opnfv" user account with password-less sudo access must be
+# available
+
+#
+# Configuration
+#
+DOCKER_VERSION=17.03
+RANCHER_VERSION=1.6.14
+RANCHER_CLI_VER=0.6.11
+KUBECTL_VERSION=1.8.10
+HELM_VERSION=2.8.2
+
+MASTER=$1
+SERVERS=$*
+
+BRANCH='master'
+ENVIRON='onap'
+
+#
+# Installation
+#
+echo "INSTALLING DOCKER ON ALL MACHINES"
+echo "$SERVERS"
+
+for MACHINE in $SERVERS;
+do
+ssh opnfv@"$MACHINE" "bash -s" <<DOCKERINSTALL &
+ sudo su
+ apt-get update
+ curl https://releases.rancher.com/install-docker/$DOCKER_VERSION.sh | sh
+
+ mkdir -p /etc/systemd/system/docker.service.d/
+ echo "[Service]
+ ExecStart=
+ ExecStart=/usr/bin/dockerd -H fd:// \
+ --insecure-registry=nexus3.onap.org:10001"\
+ > /etc/systemd/system/docker.service.d/docker.conf
+
+ systemctl daemon-reload
+ systemctl restart docker
+ apt-mark hold docker-ce
+
+ for SERVER in $SERVERS;
+ do
+ echo "\$SERVER $ENVIRON\$(echo \$SERVER | cut -d. -f 4 )" >> /etc/hosts
+ done
+
+ hostname $ENVIRON\$(echo $MACHINE | cut -d. -f 4 )
+
+ echo "DOCKER INSTALLED ON $MACHINE"
+DOCKERINSTALL
+done
+wait
+
+echo "INSTALLING RANCHER ON MASTER"
+echo "$MASTER"
+
+ssh opnfv@"$MASTER" "bash -s" <<RANCHERINSTALL &
+sudo su
+apt install jq -y
+echo "Waiting for 30 seconds at \$(date)"
+sleep 30
+
+docker login -u docker -p docker nexus3.onap.org:10001
+
+echo "INSTALL STARTS"
+apt-get install make -y
+
+docker run -d --restart=unless-stopped -p 8080:8080\
+ --name rancher_server rancher/server:v$RANCHER_VERSION
+curl -LO https://storage.googleapis.com/kubernetes-release/\
+release/v$KUBECTL_VERSION/bin/linux/amd64/kubectl
+chmod +x ./kubectl
+mv ./kubectl /usr/local/bin/kubectl
+mkdir ~/.kube
+wget http://storage.googleapis.com/kubernetes-helm\
+/helm-v${HELM_VERSION}-linux-amd64.tar.gz
+tar -zxvf helm-v${HELM_VERSION}-linux-amd64.tar.gz
+mv linux-amd64/helm /usr/local/bin/helm
+
+echo "Installing nfs server"
+# changed from nfs_share to dockerdata-nfs
+apt-get install nfs-kernel-server -y
+
+mkdir -p /dockerdata-nfs
+chmod 777 /dockerdata-nfs
+echo "/dockerdata-nfs *(rw,no_root_squash,no_subtree_check)">>/etc/exports
+service nfs-kernel-server restart
+
+echo "Waiting 10 minutes for Rancher to setup at \$(date)"
+sleep 600
+echo "Installing RANCHER CLI, KUBERNETES ENV on RANCHER"
+wget https://github.com/rancher/cli/releases/download/v${RANCHER_CLI_VER}-rc2\
+/rancher-linux-amd64-v${RANCHER_CLI_VER}-rc2.tar.gz
+tar -zxvf rancher-linux-amd64-v${RANCHER_CLI_VER}-rc2.tar.gz
+cp rancher-v${RANCHER_CLI_VER}-rc2/rancher .
+
+API_RESPONSE=\`curl -s 'http://127.0.0.1:8080/v2-beta/apikey'\
+ -d '{"type":"apikey","accountId":"1a1","name":"autoinstall",\
+ "description":"autoinstall","created":null,"kind":null,\
+ "removeTime":null,"removed":null,"uuid":null}'\`
+# Extract and store token
+echo "API_RESPONSE: \${API_RESPONSE}"
+KEY_PUBLIC=\`echo \${API_RESPONSE} | jq -r .publicValue\`
+KEY_SECRET=\`echo \${API_RESPONSE} | jq -r .secretValue\`
+echo "publicValue: \$KEY_PUBLIC secretValue: \$KEY_SECRET"
+
+export RANCHER_URL=http://${MASTER}:8080
+export RANCHER_ACCESS_KEY=\$KEY_PUBLIC
+export RANCHER_SECRET_KEY=\$KEY_SECRET
+
+./rancher env ls
+echo "Creating kubernetes environment named ${ENVIRON}"
+./rancher env create -t kubernetes $ENVIRON > kube_env_id.json
+PROJECT_ID=\$(<kube_env_id.json)
+echo "env id: \$PROJECT_ID"
+export RANCHER_HOST_URL=http://${MASTER}:8080/v1/projects/\$PROJECT_ID
+echo "you should see an additional kubernetes environment"
+./rancher env ls
+
+REG_URL_RESPONSE=\`curl -X POST -u \$KEY_PUBLIC:\$KEY_SECRET\
+ -H 'Accept: application/json'\
+ -H 'ContentType: application/json'\
+ -d '{"name":"$MASTER"}'\
+ "http://$MASTER:8080/v1/projects/\$PROJECT_ID/registrationtokens"\`
+echo "REG_URL_RESPONSE: \$REG_URL_RESPONSE"
+echo "Waiting for the server to finish url configuration - 1 min at \$(date)"
+sleep 60
+# see registrationUrl in
+REGISTRATION_TOKENS=\`curl http://$MASTER:8080/v2-beta/registrationtokens\`
+echo "REGISTRATION_TOKENS: \$REGISTRATION_TOKENS"
+REGISTRATION_URL=\`echo \$REGISTRATION_TOKENS | jq -r .data[0].registrationUrl\`
+REGISTRATION_DOCKER=\`echo \$REGISTRATION_TOKENS | jq -r .data[0].image\`
+REGISTRATION_TOKEN=\`echo \$REGISTRATION_TOKENS | jq -r .data[0].token\`
+echo "Registering host for image: \$REGISTRATION_DOCKER\
+ url: \$REGISTRATION_URL registrationToken: \$REGISTRATION_TOKEN"
+HOST_REG_COMMAND=\`echo \$REGISTRATION_TOKENS | jq -r .data[0].command\`
+
+# base64 encode the kubectl token from the auth pair
+# generate this after the host is registered
+KUBECTL_TOKEN=\$(echo -n 'Basic '\$(echo\
+ -n "\$RANCHER_ACCESS_KEY:\$RANCHER_SECRET_KEY" | base64 -w 0) | base64 -w 0)
+echo "KUBECTL_TOKEN base64 encoded: \${KUBECTL_TOKEN}"
+
+# add kubectl config - NOTE: the following spacing has to be "exact"
+# or kubectl will not connect - with a localhost:8080 error
+echo 'apiVersion: v1
+kind: Config
+clusters:
+- cluster:
+ api-version: v1
+ insecure-skip-tls-verify: true
+ server: "https://$MASTER:8080/r/projects/'\$PROJECT_ID'/kubernetes:6443"
+ name: "${ENVIRON}"
+contexts:
+- context:
+ cluster: "${ENVIRON}"
+ user: "${ENVIRON}"
+ name: "${ENVIRON}"
+current-context: "${ENVIRON}"
+users:
+- name: "${ENVIRON}"
+ user:
+ token: "'\${KUBECTL_TOKEN}'" ' > ~/.kube/config
+
+echo "docker run --rm --privileged\
+ -v /var/run/docker.sock:/var/run/docker.sock\
+ -v /var/lib/rancher:/var/lib/rancher\
+ \$REGISTRATION_DOCKER\
+ \$RANCHER_URL/v1/scripts/\$REGISTRATION_TOKEN"\
+ > /tmp/rancher_register_host
+chown opnfv /tmp/rancher_register_host
+
+RANCHERINSTALL
+wait
+
+echo "REGISTER TOKEN"
+HOSTREGTOKEN=$(ssh opnfv@"$MASTER" cat /tmp/rancher_register_host)
+echo "$HOSTREGTOKEN"
+
+echo "REGISTERING HOSTS WITH RANCHER ENVIRONMENT '$ENVIRON'"
+echo "$SERVERS"
+
+for MACHINE in $SERVERS;
+do
+ssh opnfv@"$MACHINE" "bash -s" <<REGISTERHOST &
+ sudo su
+ $HOSTREGTOKEN
+ sleep 5
+ echo "Host $MACHINE waiting for host registration 5 min at \$(date)"
+ sleep 300
+REGISTERHOST
+done
+wait
+
+echo "DEPLOYING OOM ON RANCHER WITH MASTER"
+echo "$MASTER"
+
+ssh opnfv@"$MASTER" "bash -s" <<OOMDEPLOY &
+sudo su
+sysctl -w vm.max_map_count=262144
+rm -rf oom
+echo "pulling new oom"
+git clone -b $BRANCH http://gerrit.onap.org/r/oom
+
+# NFS FIX for aaf-locate
+sed -i '/persistence:/s/^#//' ./oom/kubernetes/aaf/charts/aaf-locate/values.yaml
+sed -i '/mountPath: \/dockerdata/c\ mountPath: \/dockerdata-nfs'\
+ ./oom/kubernetes/aaf/charts/aaf-locate/values.yaml
+
+echo "Pre-pulling docker images at \$(date)"
+wget https://jira.onap.org/secure/attachment/11261/prepull_docker.sh
+chmod 777 prepull_docker.sh
+./prepull_docker.sh
+echo "starting onap pods"
+cd oom/kubernetes/
+helm init --upgrade
+helm serve &
+echo "Waiting for helm setup for 5 min at \$(date)"
+sleep 300
+helm version
+helm repo add local http://127.0.0.1:8879
+helm repo list
+make all
+helm install local/onap -n dev --namespace $ENVIRON
+cd ../../
+
+echo "Waiting for all pods to be up for 15-80 min at \$(date)"
+FAILED_PODS_LIMIT=0
+MAX_WAIT_PERIODS=480 # 120 MIN
+COUNTER=0
+PENDING_PODS=0
+while [ \$(kubectl get pods --all-namespaces | grep -E '0/|1/2' | wc -l) \
+-gt \$FAILED_PODS_LIMIT ]; do
+ PENDING=\$(kubectl get pods --all-namespaces | grep -E '0/|1/2' | wc -l)
+ PENDING_PODS=\$PENDING
+ sleep 15
+ LIST_PENDING=\$(kubectl get pods --all-namespaces -o wide | grep -E '0/|1/2' )
+ echo "\${LIST_PENDING}"
+ echo "\${PENDING} pending > \${FAILED_PODS_LIMIT} at the \${COUNTER}th"\
+ " 15 sec interval out of \${MAX_WAIT_PERIODS}"
+ echo ""
+ COUNTER=\$((\$COUNTER + 1 ))
+ if [ "\$MAX_WAIT_PERIODS" -eq \$COUNTER ]; then
+ FAILED_PODS_LIMIT=800
+ fi
+done
+
+echo "Report on non-running containers"
+PENDING=\$(kubectl get pods --all-namespaces | grep -E '0/|1/2')
+PENDING_COUNT=\$(kubectl get pods --all-namespaces | grep -E '0/|1/2' | wc -l)
+PENDING_COUNT_AAI=\$(kubectl get pods -n $ENVIRON | grep aai- \
+| grep -E '0/|1/2' | wc -l)
+
+echo "Check filebeat 2/2 count for ELK stack logging consumption"
+FILEBEAT=\$(kubectl get pods --all-namespaces -a | grep 2/)
+echo "\${FILEBEAT}"
+echo "sleep 5 min - to allow rest frameworks to finish at \$(date)"
+sleep 300
+echo "List of ONAP Modules"
+LIST_ALL=\$(kubectl get pods --all-namespaces -a --show-all )
+echo "\${LIST_ALL}"
+echo "run healthcheck 2 times to warm caches and frameworks"\
+ "so rest endpoints report properly - see OOM-447"
+
+echo "curl with aai cert to cloud-region PUT"
+curl -X PUT https://127.0.0.1:30233/aai/v11/cloud-infrastructure/\
+cloud-regions/cloud-region/CloudOwner/RegionOne \
+--data "@aai-cloud-region-put.json" \
+-H "authorization: Basic TW9kZWxMb2FkZXI6TW9kZWxMb2FkZXI=" \
+-H "X-TransactionId:jimmy-postman" \
+-H "X-FromAppId:AAI" \
+-H "Content-Type:application/json" \
+-H "Accept:application/json" \
+--cacert aaiapisimpledemoopenecomporg_20171003.crt -k
+
+echo "get the cloud region back"
+curl -X GET https://127.0.0.1:30233/aai/v11/cloud-infrastructure/\
+cloud-regions/ \
+-H "authorization: Basic TW9kZWxMb2FkZXI6TW9kZWxMb2FkZXI=" \
+-H "X-TransactionId:jimmy-postman" \
+-H "X-FromAppId:AAI" \
+-H "Content-Type:application/json" \
+-H "Accept:application/json" \
+--cacert aaiapisimpledemoopenecomporg_20171003.crt -k
+
+# OOM-484 - robot scripts moved
+cd oom/kubernetes/robot
+echo "run healthcheck prep 1"
+# OOM-722 adds namespace parameter
+if [ "$BRANCH" == "amsterdam" ]; then
+ ./ete-k8s.sh health > ~/health1.out
+else
+ ./ete-k8s.sh $ENVIRON health > ~/health1.out
+fi
+echo "sleep 5 min at \$(date)"
+sleep 300
+echo "run healthcheck prep 2"
+if [ "$BRANCH" == "amsterdam" ]; then
+ ./ete-k8s.sh health > ~/health2.out
+else
+ ./ete-k8s.sh $ENVIRON health > ~/health2.out
+fi
+echo "run healthcheck for real - wait a further 5 min at \$(date)"
+sleep 300
+if [ "$BRANCH" == "amsterdam" ]; then
+ ./ete-k8s.sh health
+else
+ ./ete-k8s.sh $ENVIRON health
+fi
+OOMDEPLOY
+wait
+echo "Finished install, ruturned from Master"
+exit 0
diff --git a/ci/deploy-opnfv-apex-centos.sh b/ci/deploy-opnfv-apex-centos.sh
new file mode 100644
index 0000000..a3a0433
--- /dev/null
+++ b/ci/deploy-opnfv-apex-centos.sh
@@ -0,0 +1,209 @@
+#!/usr/bin/env bash
+
+# /usr/bin/env bash or /bin/bash ? /usr/bin/env bash is more environment-independent
+# beware of files which were edited in Windows, and have invisible \r end-of-line characters, causing Linux errors
+
+##############################################################################
+# Copyright (c) 2018 Wipro Limited and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# OPNFV contribution guidelines Wiki page:
+# https://wiki.opnfv.org/display/DEV/Contribution+Guidelines
+
+# OPNFV/Auto project:
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+
+# localization control: force script to use default language for output, and force sorting to be bytewise
+# ("C" is from C language, represents "safe" locale everywhere)
+# (result: the script will consider only basic ASCII characters and disable UTF-8 multibyte match)
+export LANG=C
+export LC_ALL=C
+
+##################################################################################
+## installation of OpenStack via OPNFV Apex/TripleO, on CentOS, virtual deployment
+##################################################################################
+# reference manual: https://docs.opnfv.org/en/latest/submodules/apex/docs/release/installation/index.html
+# page for virtual deployment: https://docs.opnfv.org/en/latest/submodules/apex/docs/release/installation/virtual.html
+
+echo "*** begin AUTO install: OPNFV Apex/TripleO"
+
+# check OS version
+echo "*** print OS version (must be CentOS, version 7 or more)"
+cat /etc/*release
+
+# Manage Nested Virtualization
+echo "*** ensure Nested Virtualization is enabled on Intel x86"
+echo "*** nested flag before:"
+cat /sys/module/kvm_intel/parameters/nested
+rm -f /etc/modprobe.d/kvm-nested.conf
+{ printf "options kvm-intel nested=1\n";\
+ printf "options kvm-intel enable_shadow_vmcs=1\n";\
+ printf "options kvm-intel enable_apicv=1\n";\
+ printf "options kvm-intel ept=1\n"; } >> /etc/modprobe.d/kvm-nested.conf
+sudo modprobe -r kvm_intel
+sudo modprobe -a kvm_intel
+echo "*** nested flag after:"
+cat /sys/module/kvm_intel/parameters/nested
+
+echo "*** verify status of modules in the Linux Kernel: kvm_intel module should be loaded for x86_64 machines"
+lsmod | grep kvm_
+grep kvm_ < /proc/modules
+
+# 3 additional pre-installation preparations, lifted from OPNFV/storperf (they are post-installation there):
+# https://wiki.opnfv.org/display/storperf/LaaS+Setup+For+Development#LaaSSetupForDevelopment-InstallOPNFVApex
+# (may of may not be needed, to enable first-time Apex installation on blank server)
+
+# 1) Install Docker
+sudo yum install -y yum-utils device-mapper-persistent-data lvm2
+sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
+sudo yum install -y docker-ce
+sudo systemctl start docker
+
+# 2) Install docker-compose
+sudo curl -L "https://github.com/docker/compose/releases/download/1.21.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
+sudo chmod +x /usr/local/bin/docker-compose
+
+# 3) Install Python
+sudo yum install -y python-virtualenv
+sudo yum groupinstall -y "Development Tools"
+sudo yum install -y openssl-devel
+
+
+# update everything (upgrade: riskier than update, as packages supposed to be unused will be deleted)
+# (note: can take several minutes; may not be necessary)
+sudo yum -y update
+
+
+# download Apex packages
+echo "*** downloading packages:"
+sudo yum -y install https://repos.fedorapeople.org/repos/openstack/openstack-pike/rdo-release-pike-1.noarch.rpm
+sudo yum -y install epel-release
+# note: EPEL = Extra Packages for Enterprise Linux
+sudo curl -o /etc/yum.repos.d/opnfv-apex.repo http://artifacts.opnfv.org/apex/fraser/opnfv-apex.repo
+
+# install three required RPMs (RedHat/RPM Package Managers); this takes several minutes
+sudo yum -y install http://artifacts.opnfv.org/apex/fraser/opnfv-apex-6.2.noarch.rpm http://artifacts.opnfv.org/apex/fraser/opnfv-apex-undercloud-6.2.noarch.rpm http://artifacts.opnfv.org/apex/fraser/opnfv-apex-python34-6.2.noarch.rpm
+
+# clean-up old Apex versions if any
+## precautionary opnfv-clean doesn't work... (even though packages are installed at this point)
+opnfv-clean
+
+# Manage DNS references
+# probably not needed on an already configured server: already has DNS references
+# echo "nameserver 8.8.8.8" >> /etc/resolv.conf
+echo "*** printout of /etc/resolv.conf :"
+cat /etc/resolv.conf
+
+# prepare installation directory
+mkdir -p /opt/opnfv-TripleO-apex
+cd /opt/opnfv-TripleO-apex
+
+# make sure cp is not aliased or a function; same for mv and rm
+unalias cp
+unset -f cp
+unalias mv
+unset -f mv
+unalias rm
+unset -f rm
+
+# 2 YAML files from /etc/opnfv-apex/ are needed for virtual deploys:
+# 1) network_settings.yaml : may need to update NIC names, to match the NIC names on the deployment server
+# 2) standard scenario file (os-nosdn-nofeature-noha.yaml, etc.), or customized deploy_settings.yaml
+
+# make a local copy of YAML files (not necessary: could deploy from /etc/opnfv-apex); local copies are just for clarity
+# 1) network settings
+cp /etc/opnfv-apex/network_settings.yaml .
+# 2) deploy settings
+# copy one of the 40+ pre-defined scenarios (one of the YAML files)
+# for extra customization, git clone Apex repo, and copy and customize the generic deploy_settings.yaml
+# git clone https://git.opnfv.org/apex
+# cp ./apex/config/deploy/deploy_settings.yaml .
+cp /etc/opnfv-apex/os-nosdn-nofeature-noha.yaml ./deploy_settings.yaml
+# cp /etc/opnfv-apex/os-nosdn-nofeature-ha.yaml ./deploy_settings.yaml
+
+# Note: content of os-nosdn-nofeature-noha.yaml
+# ---
+# global_params:
+# ha_enabled: false
+#
+# deploy_options:
+# sdn_controller: false
+# tacker: true
+# congress: true
+# sfc: false
+# vpn: false
+
+
+# modify NIC names in network settings YAML file, specific to your environment (e.g. replace em1 with ens4f0 in LaaS)
+# Note: actually, this should not matter for a virtual environment
+sed -i 's/em1/ens4f0/' network_settings.yaml
+
+# launch deploy (works if openvswitch module is installed, which may not be the case the first time around)
+echo "*** deploying OPNFV by TripleO/Apex:"
+# --debug for detailed debug info
+# -v: Enable virtual deployment
+# note: needs at least 10G RAM for controllers
+sudo opnfv-deploy --debug -v -n network_settings.yaml -d deploy_settings.yaml
+# without --debug:
+# sudo opnfv-deploy -v -n network_settings.yaml -d deploy_settings.yaml
+
+# with specific sizing:
+# sudo opnfv-deploy --debug -v -n network_settings.yaml -d deploy_settings.yaml --virtual-compute-ram 32 --virtual-cpus 16 --virtual-computes 4
+
+
+# verify that the openvswitch module is listed:
+lsmod | grep openvswitch
+grep openvswitch < /proc/modules
+
+##{
+## workaround: do 2 successive installations... not exactly optimal...
+## clean up, as now opnfv-clean should work
+#opnfv-clean
+## second deploy try, should succeed (whether first one failed or succeeded)
+#sudo opnfv-deploy -v -n network_settings.yaml -d deploy_settings.yaml
+##}
+
+
+
+# verifications: https://docs.opnfv.org/en/latest/submodules/apex/docs/release/installation/verification.html
+
+# {
+# if error after deploy.sh: "libvirt.libvirtError: Storage pool not found: no storage pool with matching name 'default'"
+
+# This usually happens if for some reason you are missing a default pool in libvirt:
+# $ virsh pool-list |grep default
+# You can recreate it manually:
+# $ virsh pool-define-as default dir --target /var/lib/libvirt/images/
+# $ virsh pool-autostart default
+# $ virsh pool-start default
+# }
+
+# {
+# if error after deploy.sh: iptc.ip4tc.IPTCError
+# check Apex jira ticket #521 https://jira.opnfv.org/browse/APEX-521
+# }
+
+# OpenvSwitch should not be missing, as it is a requirement from the RPM package:
+# https://github.com/opnfv/apex/blob/stable/fraser/build/rpm_specs/opnfv-apex-common.spec#L15
+
+
+
+# install python 3 on CentOS
+echo "*** begin install python 3.6 (3.4 should be already installed by default)"
+
+sudo yum -y install python36
+# install pip and setup tools
+sudo curl -O https://bootstrap.pypa.io/get-pip.py
+hash -r
+sudo /usr/bin/python3.6 get-pip.py --no-warn-script-location
+
+
+
+echo "*** end AUTO install: OPNFV Apex/TripleO"
+
diff --git a/ci/deploy-opnfv-compass-ubuntu.sh b/ci/deploy-opnfv-compass-ubuntu.sh
new file mode 100644
index 0000000..efccf78
--- /dev/null
+++ b/ci/deploy-opnfv-compass-ubuntu.sh
@@ -0,0 +1,201 @@
+#!/usr/bin/env bash
+
+# /usr/bin/env bash or /bin/bash ? /usr/bin/env bash is more environment-independent
+# beware of files which were edited in Windows, and have invisible \r end-of-line characters, causing Linux errors
+
+##############################################################################
+# Copyright (c) 2018 Wipro Limited and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# OPNFV contribution guidelines Wiki page:
+# https://wiki.opnfv.org/display/DEV/Contribution+Guidelines
+
+# OPNFV/Auto project:
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+
+# localization control: force script to use default language for output, and force sorting to be bytewise
+# ("C" is from C language, represents "safe" locale everywhere)
+# (result: the script will consider only basic ASCII characters and disable UTF-8 multibyte match)
+export LANG=C
+export LC_ALL=C
+
+#################################################################################
+## installation of OpenStack via OPNFV Compass4nfv, on Ubuntu, virtual deployment
+#################################################################################
+# reference manual: https://docs.opnfv.org/en/latest/submodules/compass4nfv/docs/release/installation/index.html
+# page for virtual deployment: https://docs.opnfv.org/en/latest/submodules/compass4nfv/docs/release/installation/vmdeploy.html
+
+echo "*** begin AUTO install: OPNFV Compass4nfv"
+
+# prepare install directory
+export INSTALLDIR=/opt/opnfv-compass
+mkdir -p $INSTALLDIR
+cd $INSTALLDIR
+
+# premptively install latest pip and clear $PATH cache
+# with apt-get (see apt-get -h and man apt-get for details)
+apt-get -y update
+apt-get -y upgrade
+apt-get -y install python-pip
+pip install --upgrade pip
+hash -r
+apt-get -y install python3-openstackclient
+apt-get -y autoremove
+
+## note: apt is more recent than apt-get (apt was formally introduced with Ubuntu 16.04)
+## APT: Advanced Packaging Tool; apt is more high-level, apt-get has more features;
+# apt -y update # Refreshes repository index
+# apt -y full-upgrade # Upgrades packages with auto-handling of dependencies
+# apt -y install python-pip
+# pip install --upgrade pip
+# hash -r
+# apt -y install python3-openstackclient
+# apt -y autoremove
+
+
+# 2 options: (option 1 is preferable)
+# 1) remain in master branch, use build.sh (which builds a tar ball), then launch deploy.sh
+# 2) download a tar ball and launch deploy.sh in a branch matching the tar ball release (e.g. fraser 6.2)
+
+
+##############
+# OPTION 1: build.sh + deploy.sh in master branch
+
+# retrieve the repository of Compass4nfv code (this creates a compass4nfv subdir in the installation directory), current master branch
+echo "*** begin download Compass4nfv repository"
+git clone https://gerrit.opnfv.org/gerrit/compass4nfv
+cd compass4nfv
+
+# launch build script
+echo "*** begin Compass4nfv build:"
+./build.sh |& tee log1-Build.txt
+
+# edit in deploy.sh specific to OPTION 1
+# set path to ISO file (tar ball), as built by build.sh previously
+# absolute path to tar ball file URL (MUST be absolute path)
+sed -i '/#export TAR_URL=/a export TAR_URL=file:///opt/opnfv-compass/compass4nfv/work/building/compass.tar.gz' deploy.sh
+
+# END OPTION 1
+##############
+
+
+##############
+# OPTION 2: tar ball + deploy.sh in matching releases/branches
+
+# download tarball of a certain release/version
+#echo "*** begin download Compass4nfv tar ball"
+#wget http://artifacts.opnfv.org/compass4nfv/fraser/opnfv-6.2.tar.gz
+# note: list of tar ball (ISO) files from Compass4NFV in https://artifacts.opnfv.org/compass4nfv.html
+
+# retrieve the repository of Compass4nfv code (this creates a compass4nfv subdir in the installation directory), current master branch
+#echo "*** begin download Compass4nfv repository"
+#git clone https://gerrit.opnfv.org/gerrit/compass4nfv
+#cd compass4nfv
+# note: list of compass4nfv branch names in https://gerrit.opnfv.org/gerrit/#/admin/projects/compass4nfv,branches
+# checkout to branch (or tag) matching the tarball release
+#git checkout stable/fraser
+
+# edit in deploy.sh specific to OPTION 2
+# set path to ISO file (tar ball), as downloaded previously
+# absolute path to tar ball file URL (MUST be absolute path)
+# sed -i '/#export TAR_URL=/a export TAR_URL=file:///opt/opnfv-compass/opnfv-6.2.tar.gz' deploy.sh
+
+# END OPTION 2
+##############
+
+
+# edit remaining deploy.sh entries as needed
+
+# set operating system version: Ubuntu Xenial Xerus
+sed -i '/#export OS_VERSION=xenial\/centos7/a export OS_VERSION=xenial' deploy.sh
+
+# set path to OPNFV scenario / DHA (Deployment Hardware Adapter) YAML file
+# here, os-nosdn-nofeature-noha scenario
+sed -i '/#export DHA=/a export DHA=/opt/opnfv-compass/compass4nfv/deploy/conf/vm_environment/os-nosdn-nofeature-noha.yml' deploy.sh
+
+# set path to network YAML file
+sed -i '/#export NETWORK=/a export NETWORK=/opt/opnfv-compass/compass4nfv/deploy/conf/vm_environment/network.yml' deploy.sh
+
+# append parameters for virtual machines (for virtual deployments); e.g., 2 nodes for NOHA scenario, 5 for HA, etc.
+# note: this may not be needed in a future release of Compass4nfv
+
+# VIRT_NUMBER – the number of nodes for virtual deployment.
+# VIRT_CPUS – the number of CPUs allocated per virtual machine.
+# VIRT_MEM – the memory size (MB) allocated per virtual machine.
+# VIRT_DISK – the disk size allocated per virtual machine.
+
+# if OPTION 1 (master): OPENSTACK_VERSION is queens, so add the VIRT_NUMBER line after the queens match
+#sed -i '/export OPENSTACK_VERSION=queens/a export VIRT_DISK=200G' deploy.sh
+#sed -i '/export OPENSTACK_VERSION=queens/a export VIRT_MEM=16384' deploy.sh
+#sed -i '/export OPENSTACK_VERSION=queens/a export VIRT_CPUS=4' deploy.sh
+sed -i '/export OPENSTACK_VERSION=queens/a export VIRT_NUMBER=2' deploy.sh
+
+# if OPTION 2 (stable/fraser): OPENSTACK_VERSION is pike, so add the VIRT_NUMBER line after the pike match
+#sed -i '/export OPENSTACK_VERSION=pike/a export VIRT_DISK=200G' deploy.sh
+#sed -i '/export OPENSTACK_VERSION=pike/a export VIRT_MEM=16384' deploy.sh
+#sed -i '/export OPENSTACK_VERSION=pike/a export VIRT_CPUS=4' deploy.sh
+#sed -i '/export OPENSTACK_VERSION=pike/a export VIRT_NUMBER=5' deploy.sh
+
+
+# launch deploy script
+echo "*** begin Compass4nfv deploy:"
+./deploy.sh |& tee log2-Deploy.txt
+
+
+
+
+# To access OpenStack Horizon GUI in Virtual deployment
+# source: https://wiki.opnfv.org/display/compass4nfv/Containerized+Compass
+
+# confirm IP@ of the current server (jump server, such as 10.10.100.xyz on LaaS: 10.10.100.42 for hpe32, etc.)
+external_nic=$(ip route |grep '^default'|awk '{print $5F}')
+echo "external_nic: $external_nic"
+ip addr show "$external_nic"
+
+# Config IPtables rules: pick an unused port number, e.g. 50000+machine number, 50032 for hpe32 at 10.10.100.42
+# 192.16.1.222:443 is the OpenStack Horizon GUI after a Compass installation
+# syntax: iptables -t nat -A PREROUTING -d $EX_IP -p tcp --dport $PORT -j DNAT --to 192.16.1.222:443
+# (note: this could be automated: retrieve IP@, pick port number)
+
+# example: hpe15
+# iptables -t nat -A PREROUTING -d 10.10.100.25 -p tcp --dport 50015 -j DNAT --to 192.16.1.222:443
+# example: hpe33
+# iptables -t nat -A PREROUTING -d 10.10.100.43 -p tcp --dport 50033 -j DNAT --to 192.16.1.222:443
+
+# display IPtables NAT rules
+iptables -t nat -L
+
+# Enter https://$EX_IP:$PORT in you browser to visit the OpenStack Horizon dashboard
+# examples: https://10.10.100.25:50015 , https://10.10.100.43:50033
+# The default user is "admin"
+# to get the Horizon password for "admin":
+sudo docker cp compass-tasks:/opt/openrc ./
+sudo cat openrc | grep OS_PASSWORD
+source ./openrc
+
+# for OpenStack CLI (generic content from openrc)
+export OS_ENDPOINT_TYPE=publicURL
+export OS_INTERFACE=publicURL
+export OS_USERNAME=admin
+export OS_PROJECT_NAME=admin
+export OS_TENANT_NAME=admin
+export OS_AUTH_URL=https://192.16.1.222:5000/v3
+export OS_NO_CACHE=1
+export OS_USER_DOMAIN_NAME=Default
+export OS_PROJECT_DOMAIN_NAME=Default
+export OS_REGION_NAME=RegionOne
+
+# For openstackclient
+export OS_IDENTITY_API_VERSION=3
+export OS_AUTH_VERSION=3
+
+
+
+echo "*** end AUTO install: OPNFV Compass4nfv"
+
diff --git a/ci/deploy-opnfv-daisy-centos.sh b/ci/deploy-opnfv-daisy-centos.sh
new file mode 100644
index 0000000..664ba55
--- /dev/null
+++ b/ci/deploy-opnfv-daisy-centos.sh
@@ -0,0 +1,179 @@
+#!/usr/bin/env bash
+
+# /usr/bin/env bash or /bin/bash ? /usr/bin/env bash is more environment-independent
+# beware of files which were edited in Windows, and have invisible \r end-of-line characters, causing Linux errors
+
+##############################################################################
+# Copyright (c) 2018 Wipro Limited and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# OPNFV contribution guidelines Wiki page:
+# https://wiki.opnfv.org/display/DEV/Contribution+Guidelines
+
+# OPNFV/Auto project:
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+
+# localization control: force script to use default language for output, and force sorting to be bytewise
+# ("C" is from C language, represents "safe" locale everywhere)
+# (result: the script will consider only basic ASCII characters and disable UTF-8 multibyte match)
+export LANG=C
+export LC_ALL=C
+
+
+###############################################################################
+## installation of OpenStack via OPNFV Daisy4nfv, on CentOS, virtual deployment
+###############################################################################
+# reference manual: https://docs.opnfv.org/en/stable-fraser/submodules/daisy/docs/release/installation/index.html#daisy-installation
+# page for virtual deployment: https://docs.opnfv.org/en/stable-fraser/submodules/daisy/docs/release/installation/vmdeploy.html
+
+echo "*** begin AUTO install: OPNFV Daisy4nfv"
+
+# check OS version
+echo "*** print OS version (must be CentOS, version 7.2 or more)"
+cat /etc/*release
+
+# make sure cp is not aliased or a function; same for mv and rm
+unalias cp
+unset -f cp
+unalias mv
+unset -f mv
+unalias rm
+unset -f rm
+
+# Manage Nested Virtualization
+echo "*** ensure Nested Virtualization is enabled on Intel x86"
+echo "*** nested flag before:"
+cat /sys/module/kvm_intel/parameters/nested
+rm -f /etc/modprobe.d/kvm-nested.conf
+{ printf "options kvm-intel nested=1\n";\
+ printf "options kvm-intel enable_shadow_vmcs=1\n";\
+ printf "options kvm-intel enable_apicv=1\n";\
+ printf "options kvm-intel ept=1\n"; } >> /etc/modprobe.d/kvm-nested.conf
+sudo modprobe -r kvm_intel
+sudo modprobe -a kvm_intel
+echo "*** nested flag after:"
+cat /sys/module/kvm_intel/parameters/nested
+
+echo "*** verify status of modules in the Linux Kernel: kvm_intel module should be loaded for x86_64 machines"
+lsmod | grep kvm_
+grep kvm_ < /proc/modules
+
+# download tools: git, kvm, libvirt, python-yaml
+sudo yum -y install git
+sudo yum -y install kvm
+sudo yum -y install libvirt
+sudo yum info libvirt
+sudo yum info qemu-kvm
+sudo yum -y install python-yaml
+
+
+# make sure SELinux is enforced (Security-Enhanced Linux)
+sudo setenforce 1
+echo "getenforce: $(getenforce)"
+
+# Restart the libvirtd daemon:
+sudo service libvirtd restart
+# Verify if the kvm module is loaded, you should see amd or intel depending on the hardware:
+lsmod | grep kvm
+# Note: to test, issue a virsh command to ensure local root connectivity:
+# sudo virsh sysinfo
+
+
+
+# update everything (upgrade: riskier than update, as packages supposed to be unused will be deleted)
+# (note: can take several minutes; may not be necessary)
+sudo yum -y update
+
+# prepare Daisy installation directory
+export INSTALLDIR=/opt/opnfv-daisy
+mkdir $INSTALLDIR
+cd $INSTALLDIR
+
+# oslo-config, needed in daisy/deploy/get_conf.py
+sudo curl -O https://bootstrap.pypa.io/get-pip.py
+hash -r
+python get-pip.py --no-warn-script-location
+pip install --upgrade oslo-config
+
+
+# retrieve Daisy4nfv repository
+git clone https://gerrit.opnfv.org/gerrit/daisy
+cd daisy
+
+
+
+# OPTION 1: master repo and latest bin file: May 17th 2018
+# Download latest bin file from http://artifacts.opnfv.org/daisy.html and name it opnfv.bin
+curl http://artifacts.opnfv.org/daisy/opnfv-2018-05-17_14-00-32.bin -o opnfv.bin
+# make opnfv.bin executable
+chmod 777 opnfv.bin
+
+# OPTION 2: stable release: Fraser 6.0 (so, checkout to stable Fraser release opnfv-6.0)
+# Download matching bin file from http://artifacts.opnfv.org/daisy.html and name it opnfv.bin
+#git checkout opnfv.6.0 # as per Daisy4nfv instructions, but does not work
+#git checkout stable/fraser
+#curl http://artifacts.opnfv.org/daisy/fraser/opnfv-6.0.iso -o opnfv.bin
+# make opnfv.bin executable
+#chmod 777 opnfv.bin
+
+
+
+# The deploy.yaml file is the inventory template of deployment nodes:
+# error from doc: ”./deploy/conf/vm_environment/zte-virtual1/deploy.yml”
+# correct path: "./deploy/config/vm_environment/zte-virtual1/deploy.yml”
+# You can write your own name/roles reference into it:
+# name – Host name for deployment node after installation.
+# roles – Components deployed.
+# note: ./templates/virtual_environment/ contains xml files, for networks and VMs
+
+
+# prepare config dir for Auto lab in daisy dir, and copy deploy and network YAML files from default files (virtual1 or virtual2)
+export AUTO_DAISY_LAB_CONFIG1=labs/auto_daisy_lab/virtual1/daisy/config
+export DAISY_DEFAULT_ENV1=deploy/config/vm_environment/zte-virtual1
+mkdir -p $AUTO_DAISY_LAB_CONFIG1
+cp $DAISY_DEFAULT_ENV1/deploy.yml $AUTO_DAISY_LAB_CONFIG1
+cp $DAISY_DEFAULT_ENV1/network.yml $AUTO_DAISY_LAB_CONFIG1
+
+export AUTO_DAISY_LAB_CONFIG2=labs/auto_daisy_lab/virtual2/daisy/config
+export DAISY_DEFAULT_ENV2=deploy/config/vm_environment/zte-virtual2
+mkdir -p $AUTO_DAISY_LAB_CONFIG2
+cp $DAISY_DEFAULT_ENV2/deploy.yml $AUTO_DAISY_LAB_CONFIG2
+cp $DAISY_DEFAULT_ENV2/network.yml $AUTO_DAISY_LAB_CONFIG2
+
+# Note:
+# - zte-virtual1 config files deploy openstack with five nodes (3 LB nodes and 2 computer nodes).
+# - zte-virtual2 config files deploy an all-in-one openstack
+
+# run deploy script, scenario os-nosdn-nofeature-ha, multinode OpenStack
+sudo ./ci/deploy/deploy.sh -L "$(cd ./;pwd)" -l auto_daisy_lab -p virtual1 -s os-nosdn-nofeature-ha
+
+# run deploy script, scenario os-nosdn-nofeature-noha, all-in-one OpenStack
+# sudo ./ci/deploy/deploy.sh -L "$(cd ./;pwd)" -l auto_daisy_lab -p virtual2 -s os-nosdn-nofeature-noha
+
+
+# Notes about deploy.sh:
+# The value after -L should be an absolute path which points to the directory which includes $AUTO_DAISY_LAB_CONFIG directory.
+# The value after -p parameter (virtual1 or virtual2) should match the one selected for $AUTO_DAISY_LAB_CONFIG.
+# The value after -l parameter (e.g. auto_daisy_lab) should match the lab name selected for $AUTO_DAISY_LAB_CONFIG, after labs/ .
+# Scenario (-s parameter): “os-nosdn-nofeature-ha” is used for deploying multinode openstack (virtual1)
+# Scenario (-s parameter): “os-nosdn-nofeature-noha” used for deploying all-in-one openstack (virtual2)
+
+# more details on deploy.sh OPTIONS:
+# -B PXE Bridge for booting Daisy Master, optional
+# -D Dry-run, does not perform deployment, will be deleted later
+# -L Securelab repo absolute path, optional
+# -l LAB name, necessary
+# -p POD name, necessary
+# -r Remote workspace in target server, optional
+# -w Workdir for temporary usage, optional
+# -h Print this message and exit
+# -s Deployment scenario
+# -S Skip recreate Daisy VM during deployment
+
+# When deployed successfully, the floating IP of openstack is 10.20.11.11, the login account is “admin” and the password is “keystone”
diff --git a/ci/deploy-opnfv-fuel-ubuntu.sh b/ci/deploy-opnfv-fuel-ubuntu.sh
new file mode 100644
index 0000000..db276b2
--- /dev/null
+++ b/ci/deploy-opnfv-fuel-ubuntu.sh
@@ -0,0 +1,199 @@
+#!/usr/bin/env bash
+
+# /usr/bin/env bash or /bin/bash ? /usr/bin/env bash is more environment-independent
+# beware of files which were edited in Windows, and have invisible \r end-of-line characters, causing Linux errors
+
+##############################################################################
+# Copyright (c) 2018 Wipro Limited and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# OPNFV contribution guidelines Wiki page:
+# https://wiki.opnfv.org/display/DEV/Contribution+Guidelines
+
+# OPNFV/Auto project:
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+
+# localization control: force script to use default language for output, and force sorting to be bytewise
+# ("C" is from C language, represents "safe" locale everywhere)
+# (result: the script will consider only basic ASCII characters and disable UTF-8 multibyte match)
+export LANG=C
+export LC_ALL=C
+
+##############################################################################
+## installation of OpenStack via OPNFV Fuel/MCP, on Ubuntu, virtual deployment
+##############################################################################
+# reference manual: https://docs.opnfv.org/en/latest/submodules/fuel/docs/release/installation/index.html
+# page for virtual deployment: https://docs.opnfv.org/en/latest/submodules/fuel/docs/release/installation/installation.instruction.html#opnfv-software-installation-and-deployment
+
+# Steps:
+# step 1: download Fuel/MCP repository and run deploy script
+# (this example: x86, virtual deploy, os-nosdn-nofeature-noha scenario)
+# step 2: download additional packages (python3, OpenStackSDK, OpenStack clients, ...)
+# step 3: add more resources to OpenStack instance (vCPUs, RAM)
+# step 4: download Auto repository
+# step 5: run Auto python script to populate OpenStack instance with objects expected by ONAP
+
+
+echo "*** begin AUTO install: OPNFV Fuel/MCP"
+
+
+# step 1: download Fuel/MCP repository and run deploy script
+
+# prepare install directory
+export INSTALLDIR=/opt/opnfv-fuel
+mkdir -p $INSTALLDIR
+cd $INSTALLDIR
+
+# get Fuel repository
+git clone https://git.opnfv.org/fuel
+# cd in new fuel repository, which contains directories: mcp, ci, etc.
+# note: this is for x86_64 architectures; for aarch64 architectures, git clone https://git.opnfv.org/armband and cd armband instead
+cd fuel
+
+# edit NOHA scenario YAML file with more resources for compute nodes: 32 vCPUs, 192G RAM
+{ printf " cmp01:\n";\
+ printf " vcpus: 32\n";\
+ printf " ram: 196608\n";\
+ printf " cmp02:\n";\
+ printf " vcpus: 32\n";\
+ printf " ram: 196608\n"; } >> mcp/config/scenario/os-nosdn-nofeature-noha.yaml
+
+# provide more storage space to VMs: 350G per compute node (default is 100G)
+sed -i mcp/scripts/lib.sh -e 's/\(qemu-img create.*\) 100G/\1 350G/g'
+
+# launch OPNFV Fuel/MCP deploy script
+ci/deploy.sh -l local -p virtual1 -s os-nosdn-nofeature-noha -D |& tee deploy.log
+
+
+
+# step 2: download additional packages (python3, OpenStackSDK, OpenStack clients, ...)
+
+# install python 3 on Ubuntu
+echo "*** begin install python 3"
+sudo apt-get -y update
+sudo apt-get -y install python3
+# maybe clean-up packages
+# sudo apt -y autoremove
+# specific install of a python version, e.g. 3.6
+# sudo apt-get install python3.6
+
+# http://docs.python-guide.org/en/latest/starting/install3/linux/
+# sudo apt-get install software-properties-common
+# sudo add-apt-repository ppa:deadsnakes/ppa
+# sudo apt-get update
+# sudo apt-get install python3.6
+echo "python2 --version: $(python2 --version)"
+echo "python3 --version: $(python3 --version)"
+echo "which python: $(which python)"
+
+# install pip3 for python3; /usr/local/bin/pip3 vs. /usr/bin/pip3; solve with "hash -r"
+echo "*** begin install pip3 for python3"
+apt -y install python3-pip
+hash -r
+pip3 install --upgrade pip
+hash -r
+
+echo "\$PATH: $PATH"
+echo "which pip: $(which pip)"
+echo "which pip3: $(which pip3)"
+
+# install OpenStack SDK Python client
+echo "*** begin install OpenStack SDK Python client"
+pip3 install openstacksdk
+pip3 install --upgrade openstacksdk
+
+# install OpenStack CLI
+echo "*** begin install OpenStack CLI"
+pip3 install python-openstackclient
+pip3 install --upgrade python-openstackclient
+
+pip3 install --upgrade python-keystoneclient
+pip3 install --upgrade python-neutronclient
+pip3 install --upgrade python-novaclient
+pip3 install --upgrade python-glanceclient
+pip3 install --upgrade python-cinderclient
+
+# install OpenStack Heat (may not be installed by default), may be useful for VNF installation
+#apt install python3-heatclient
+echo "*** begin install OpenStack Heat"
+pip3 install --upgrade python-heatclient
+
+# package verification printouts
+echo "*** begin package verification printouts"
+pip3 list
+pip3 show openstacksdk
+pip3 check
+
+
+
+# step 3: add more resources to OpenStack instance
+
+# now that OpenStack CLI is installed, finish Fuel/MCP installation:
+# take extra resources indicated in os-nosdn-nofeature-noha.yaml into account as quotas in the OpenStack instance
+# (e.g. 2 compute nodes with 32 vCPUs and 192G RAM each => 64 cores and 384G=393,216M RAM)
+# enter environment variables hard-coded here, since always the same for Fuel/MCP; there could be better ways to do this :)
+
+export OS_AUTH_URL=http://10.16.0.107:5000/v3
+export OS_PROJECT_NAME="admin"
+export OS_USER_DOMAIN_NAME="Default"
+export OS_PROJECT_DOMAIN_ID="default"
+unset OS_TENANT_ID
+unset OS_TENANT_NAME
+export OS_USERNAME="admin"
+export OS_PASSWORD="opnfv_secret"
+export OS_REGION_NAME="RegionOne"
+export OS_INTERFACE=public
+export OS_IDENTITY_API_VERSION=3
+
+# at this point, openstack CLI commands should work
+echo "*** finish install OPNFV Fuel/MCP"
+openstack quota set --cores 64 admin
+openstack quota set --ram 393216 admin
+
+
+
+# step 4: download Auto repository
+
+# install OPNFV Auto
+# prepare install directory
+echo "*** begin install OPNFV Auto"
+mkdir -p /opt/opnfv-Auto
+cd /opt/opnfv-Auto
+# get Auto repository from Gerrit
+git clone https://gerrit.opnfv.org/gerrit/auto
+# cd in new auto repository, which contains directories: lib, setup, ci, etc.
+cd auto
+
+
+
+# step 5: run Auto python script to populate OpenStack instance with objects expected by ONAP
+
+# download images used by script, unless downloading images from URL works from the script
+echo "*** begin download images"
+cd setup/VIMs/OpenStack
+mkdir images
+cd images
+#CirrOS
+curl -O http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img
+curl -O http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-arm-disk.img
+curl -O http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-aarch64-disk.img
+# Ubuntu 16.04 LTS (Xenial Xerus)
+curl -O https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+curl -O https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-arm64-disk1.img
+# Ubuntu 14.04.5 LTS (Trusty Tahr)
+curl -O http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img
+curl -O http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-arm64-disk1.img
+
+# launch script to populate the OpenStack instance
+echo "*** begin populate OpenStack instance with ONAP objects"
+cd ..
+python3 auto_script_config_openstack_for_onap.py
+
+echo "*** end AUTO install: OPNFV Fuel/MCP"
+
diff --git a/docs/release/release-notes/Auto-release-notes.rst b/docs/release/release-notes/Auto-release-notes.rst
index 26445b6..c30bd4c 100644
--- a/docs/release/release-notes/Auto-release-notes.rst
+++ b/docs/release/release-notes/Auto-release-notes.rst
@@ -7,13 +7,13 @@
Auto Release Notes
==================
-This document provides the release notes for the Fraser release of Auto.
+This document provides the release notes for the Gambia 7.0 release of Auto.
Important notes for this release
================================
-The initial release for Auto was in Fraser 6.0 (project inception: July 2017). This is the second point release, in Fraser 6.2.
+The initial release for Auto was in Fraser 6.0 (project inception: July 2017).
Summary
@@ -23,14 +23,14 @@ Overview
^^^^^^^^
OPNFV is an SDNFV system integration project for open-source components, which so far have been mostly limited to
-the NFVI+VIM as generally described by ETSI.
+the NFVI+VIM as generally described by `ETSI <https://www.etsi.org/technologies-clusters/technologies/nfv>`_.
In particular, OPNFV has yet to integrate higher-level automation features for VNFs and end-to-end Services.
-As an OPNFV project, Auto ("ONAP-Automated OPNFV") will focus on ONAP component integration and verification with
+As an OPNFV project, Auto (*ONAP-Automated OPNFV*) will focus on ONAP component integration and verification with
OPNFV reference platforms/scenarios, through primarily a post-install process, in order to avoid impact to OPNFV
-installer projects. As much as possible, this will use a generic installation/integration process (not specific to
-any OPNFV installer's technology).
+installer projects (Fuel/MCP, Compass4NFV, Apex/TripleO, Daisy4NFV). As much as possible, this will use a generic
+installation/integration process (not specific to any OPNFV installer's technology).
* `ONAP <https://www.onap.org/>`_ (a Linux Foundation Project) is an open source software platform that delivers
robust capabilities for the design, creation, orchestration, monitoring, and life cycle management of
@@ -38,7 +38,15 @@ any OPNFV installer's technology).
Auto aims at validating the business value of ONAP in general, but especially within an OPNFV infrastructure
(integration of ONAP and OPNFV). Business value is measured in terms of improved service quality (performance,
-reliability, ...) and OPEX reduction (VNF management simplification, power consumption reduction, ...).
+reliability, ...) and OPEX reduction (VNF management simplification, power consumption reduction, ...), as
+demonstrated by use cases.
+
+Auto also validates multi-architecture software (binary images and containers) availability of ONAP and OPNFV:
+CPUs (x86, ARM) and Clouds (MultiVIM)
+
+In other words, Auto is a turnkey approach to automatically deploy an integrated open-source virtual network
+based on OPNFV (as infrastructure) and ONAP (as end-to-end service manager), that demonstrates business value
+to end-users (IT/Telco service providers, enterprises).
While all of ONAP is in scope, as it proceeds, the Auto project will focus on specific aspects of this integration
@@ -48,7 +56,7 @@ and verification in each release. Some example topics and work items include:
* How ONAP SDN-C uses OPNFV existing features, e.g. NetReady, in a two-layer controller architecture in which the
upper layer (global controller) is replaceable, and the lower layer can use different vendor’s local controller to
interact with SDN-C. For interaction with multiple cloud infrastructures, the MultiVIM ONAP component will be used.
-* How ONAP leverages OPNFV installers (Fuel/MCP, Compass4NFV, Apex/TripleO, Daisy4NFV, JOID) to provide a cloud
+* How ONAP leverages OPNFV installers (Fuel/MCP, Compass4NFV, Apex/TripleO, Daisy4NFV) to provide a cloud
instance (starting with OpenStack) on which to install the tool ONAP
* What data collection interface VNF and controllers provide to ONAP DCAE, and (through DCAE), to closed-loop control
functions such as Policy Tests which verify interoperability of ONAP automation/lifecycle features with specific NFVI
@@ -72,7 +80,7 @@ It is understood that:
.. image:: auto-proj-rn01.png
-The current ONAP architecture overview can be found `here <http://onap.readthedocs.io/en/latest/guides/onap-developer/architecture/onap-architecture.html>`_.
+The current ONAP architecture overview can be found `here <https://onap.readthedocs.io/en/latest/guides/onap-developer/architecture/onap-architecture.html>`_.
For reference, the ONAP-Beijing architecture diagram is replicated here:
@@ -89,17 +97,18 @@ Within OPNFV, Auto leverages tools and collaborates with other projects:
* FuncTest for software verification (CI/CD, Pass/Fail)
* Yardstick for metric management (quantitative measurements)
* VES (VNF Event Stream) and Barometer for VNF monitoring (feed to ONAP/DCAE)
+ * Edge Cloud as use case
* leverage OPNFV tools and infrastructure:
* Pharos as LaaS: transient pods (3-week bookings) and permanent Arm pod (6 servers)
- * possibly other labs from the community
+ * `WorksOnArm <http://worksonarm.com/cluster>`_ (`GitHub link <http://github.com/worksonarm/cluster>`_)
+ * possibly other labs from the community (Huawei pod-12, 6 servers, x86)
* JJB/Jenkins for CI/CD (and follow OPNFV scenario convention)
* Gerrit/Git for code and documents reviewing and archiving (similar to ONAP: Linux Foundation umbrella)
* follow OPNFV releases (Releng group)
-
Testability
^^^^^^^^^^^
@@ -123,21 +132,26 @@ value (i.e., find/determine policies and controls which yield optimized ONAP bus
More precisely, the following list shows parameters that could be applied to an Auto full run of test cases:
* Auto test cases for given use cases
-* OPNFV installer {Fuel/MCP, Compass4NFV, Apex/TripleO, Daisy4NFV, JOID}
+* OPNFV installer {Fuel/MCP, Compass4NFV, Apex/TripleO, Daisy4NFV}
* OPNFV availability scenario {HA, noHA}
-* cloud where ONAP runs {OpenStack, AWS, GCP, Azure, ...}
-* ONAP installation type {bare metal or virtual server, VM or container, ...} and options {MultiVIM single|distributed, ...}
-* VNFs {vFW, vCPE, vAAA, vDHCP, vDNS, vHSS, ...} and VNF-based services {vIMS, vEPC, ...}
+* environment where ONAP runs {bare metal servers, VMs from clouds (OpenStack, AWS, GCP, Azure, ...), containers}
+* ONAP installation type {bare metal, VM, or container, ...} and options {MultiVIM single|distributed, ...}
+* VNF types {vFW, vCPE, vAAA, vDHCP, vDNS, vHSS, ...} and VNF-based services {vIMS, vEPC, ...}
* cloud where VNFs run {OpenStack, AWS, GCP, Azure, ...}
-* VNF type {VM-based, container}
-* CPU architectures {x86/AMD64, ARM/aarch64} for ONAP software and for VNFs
+* VNF host type {VM, container}
+* CPU architectures {x86/AMD64, ARM/aarch64} for ONAP software and for VNF software; not really important for Auto software;
* pod size and technology (RAM, storage, CPU cores/threads, NICs)
-* traffic types and amounts/volumes
+* traffic types and amounts/volumes; traffic generators (although that should not really matter);
* ONAP configuration {especially policies and closed-loop controls; monitoring types for DCAE: VES, ...}
* versions of every component {Linux OS (Ubuntu, CentOS), OPNFV release, clouds, ONAP, VNFs, ...}
+The diagram below shows Auto parameters:
+
+.. image:: auto-proj-parameters.png
-Illustration of Auto analysis loop based on test case executions:
+
+The next figure is an illustration of the Auto analysis loop (design, configuration, execution, result analysis)
+based on test cases covering as many parameters as possible :
.. image:: auto-proj-tests.png
@@ -150,14 +164,14 @@ Auto currently defines three use cases: Edge Cloud (UC1), Resiliency Improvement
including end-to-end composite services of which a Cloud Manager may not be aware (VMs or containers could be
recovered by a Cloud Manager, but not necessarily an end-to-end service built on top of VMs or containers).
* enterprise-grade performance of vCPEs (certification during onboarding, then real-time performance assurance with
- SLAs and HA as well as scaling).
+ SLAs and HA, as well as scaling).
The use cases define test cases, which initially will be independent, but which might eventually be integrated to `FuncTest <https://wiki.opnfv.org/display/functest/Opnfv+Functional+Testing>`_.
-Additional use cases can be added in the future, such as vIMS (example: project Clearwater) or residential vHGW (virtual
-Home Gateways). The interest for vHGW is to reduce overall power consumption: even in idle mode, physical HGWs in
-residential premises consume a lot of energy. Virtualizing that service to the Service Provider edge data center would
-allow to minimize that consumption.
+Additional use cases can be added in the future, such as vIMS (example: project `Clearwater <http://www.projectclearwater.org/>`_)
+or residential vHGW (virtual Home Gateways). The interest for vHGW is to reduce overall power consumption: even in idle mode,
+physical HGWs in residential premises consume a lot of energy. Virtualizing that service to the Service Provider edge data center
+would allow to minimize that consumption.
Lab environment
@@ -172,39 +186,53 @@ x86 pod at UNH IOL.
A transition is in progress, to leverage OPNFV LaaS (Lab-as-a-Service) pods (`Pharos <https://labs.opnfv.org/>`_).
These pods can be booked for 3 weeks only (with an extension for a maximum of 2 weeks), so they are not a permanent resource.
-A repeatable automated installation procedure is being developed.
+For ONAP-Beijing, a repeatable automated installation procedure is being developed, using 3 Pharos servers (x86 for now).
+Also, a more permanent ONAP installation is in progress at a Huawei lab (pod-12, consisting of 6 x86 servers,
+1 as jump server, the other 5 with this example allocation: 3 for ONAP components, and 2 for an OPNFV infratructure:
+Openstack installed by Compass4NFV).
ONAP-based onboarding and deployment of VNFs is in progress (ONAP-Amsterdam pre-loading of VNFs must still done outside
of ONAP: for VM-based VNFs, users need to prepare OpenStack stacks (using Heat templates), then make an instance snapshot
which serves as the binary image of the VNF).
-An initial version of a script to prepare an OpenStack instance for ONAP (creation of a public and a private network,
-with a router) has been developed. It leverages OpenStack SDK.
+A script to prepare an OpenStack instance for ONAP (creation of a public and a private network, with a router,
+pre-loading of images and flavors, creation of a security group and an ONAP user) has been developed. It leverages
+OpenStack SDK. It has a delete option, so it can be invoked to delete these objects for example in a tear-down procedure.
Integration with Arm servers has started (exploring binary compatibility):
-* OpenStack is currently installed on a 6-server pod of Arm servers
+* The Auto project has a specific 6-server pod of Arm servers, which is currently loaned to ONAP integration team,
+ to build ONAP images
* A set of 14 additional Arm servers was deployed at UNH, for increased capacity
-* Arm-compatible Docker images are in the process of being developed
+* ONAP Docker registry: ONAP-specific images for ARM are being built, with the purpose of populating ONAP nexus2
+ (Maven2 artifacts) and nexus3 (Docker containers) repositories at Linux Foundation. Docker images are
+ multi-architecture, and the manifest of an image may contain 1 or more layers (for example 2 layers: x86/AMD64
+ and ARM/aarch64). One of ONAP-Casablanca architectural requirements is to be CPU-architecture independent.
+ There are almost 150 Docker containers in a complete ONAP instance. Currently, more disk space is being added
+ to the ARM nodes (configuration of Nova, and/or additional actual physical storage space).
+
-Test case implementation for the three use cases has started.
+Test case design and implementation for the three use cases has started.
OPNFV CI/CD integration with JJD (Jenkins Job Description) has started: see the Auto plan description
-`here <https://wiki.opnfv.org/display/AUTO/CI+Plan+for+Auto>`_. The permanent resource for that is the 6-server Arm
+`here <https://wiki.opnfv.org/display/AUTO/CI+for+Auto>`_. The permanent resource for that is the 6-server Arm
pod, hosted at UNH. The CI directory from the Auto repository is `here <https://git.opnfv.org/auto/tree/ci>`_
+
Finally, the following figure illustrates Auto in terms of project activities:
.. image:: auto-project-activities.png
-Note: a demo was delivered at the OpenStack Summit in Vancouver on May 21st 2018, to illustrate the deployment of a WordPress application
-(WordPress is a platform for websites and blogs) deployed on a multi-architecture cloud (mix of x86 and Arm servers).
+Note: a demo was delivered at the OpenStack Summit in Vancouver on May 21st 2018, to illustrate the deployment of
+a WordPress application (WordPress is a platform for websites and blogs) deployed on a multi-architecture cloud (mix
+of x86 and Arm servers).
This shows how service providers and enterprises can diversify their data centers with servers of different architectures,
-and select architectures best suited to each use case (mapping application components to architectures: DBs, interactive servers,
-number-crunching modules, ...).
+and select architectures best suited to each use case (mapping application components to architectures: DBs,
+interactive servers, number-crunching modules, ...).
This prefigures how other examples such as ONAP, VIMs, and VNFs could also be deployed on heterogeneous multi-architecture
-environments (open infrastructure), orchestrated by Kubernetes. The Auto installation scripts could expand on that approach.
+environments (open infrastructure), orchestrated by Kubernetes. The Auto installation scripts covering all the parameters
+described above could expand on that approach.
.. image:: auto-proj-openstacksummit1805.png
@@ -218,13 +246,13 @@ Release Data
| **Project** | Auto |
| | |
+--------------------------------------+--------------------------------------+
-| **Repo/commit-ID** | auto/opnfv-6.2.0 |
+| **Repo/commit-ID** | auto/opnfv-7.0.0 |
| | |
+--------------------------------------+--------------------------------------+
-| **Release designation** | Fraser 6.2 |
+| **Release designation** | Gambia 7.0 |
| | |
+--------------------------------------+--------------------------------------+
-| **Release date** | 2018-06-29 |
+| **Release date** | 2018-11-02 |
| | |
+--------------------------------------+--------------------------------------+
| **Purpose of the delivery** | Official OPNFV release |
@@ -273,22 +301,66 @@ Point release 6.2:
* initial scripts for OPNFV CI/CD, registration of Jenkins slave on `Arm pod <https://build.opnfv.org/ci/view/auto/>`_
* updated script for configuring OpenStack instance for ONAP, using OpenStack SDK 0.14
-Notable activities since release 6.1, which may result in new features for Gambia 7.0:
+Point release 7.0:
+
+* progress on Docker registry of ONAP's Arm images
+* progress on ONAP installation script for 3-server cluster of UNH servers
+* CI scripts for OPNFV installers: Fuel/MCP (x86), Compass, Apex/TripleO (must run twice)
+* initial CI script for Daisy4NFV (work in progress)
+* JOID script, but supported only until R6.2, not Gambia 7.0
+* completed script for configuring OpenStack instance for ONAP, using OpenStack SDK 0.17
+* use of an additional lab resource for Auto development: 6-server x86 pod (huawei-pod12)
+
+
-* researching how to configure multiple Pharos servers in a cluster for Kubernetes
-* started to evaluate Compass4nfv as another OpenStack installer; issues with Python version (2 or 3)
-* common meeting with Functest
-* Plugfest: initiated collaboration with ONAP/MultiVIM (including support for ONAP installation)
**JIRA TICKETS for this release:**
+
+`JIRA Auto Gambia 7.0.0 Done <https://jira.opnfv.org/issues/?filter=12403>`_
+
+Manual selection of significant JIRA tickets for this version's highlights:
+
+--------------------------------------+--------------------------------------+
| **JIRA REFERENCE** | **SLOGAN** |
| | |
+--------------------------------------+--------------------------------------+
-| AUTO-38, auto-resiliency-vif-001: | UC2: validate VM suspension command |
-| 2/3 Test Logic | and measurement of Recovery Time |
+| AUTO-37 | Get DCAE running onto Pharos |
+| | deployment |
++--------------------------------------+--------------------------------------+
+| AUTO-42 | Use Compass4NFV to create an |
+| | OpenStack instance on a UNH pod |
++--------------------------------------+--------------------------------------+
+| AUTO-43 | String together scripts for Fuel, |
+| | Tool installation, ONAP preparation |
++--------------------------------------+--------------------------------------+
+| AUTO-44 | Build ONAP components for arm64 |
+| | platform |
++--------------------------------------+--------------------------------------+
+| AUTO-45 | CI: Jenkins definition of verify and |
+| | merge jobs |
++--------------------------------------+--------------------------------------+
+| AUTO-46 | Use Apex to create an OpenStack |
+| | instance on a UNH pod |
++--------------------------------------+--------------------------------------+
+| AUTO-47 | Install ONAP with Kubernetes on LaaS |
+| | |
++--------------------------------------+--------------------------------------+
+| AUTO-48 | Create documentation for ONAP |
+| | deployment with Kubernetes on LaaS |
++--------------------------------------+--------------------------------------+
+| AUTO-49 | Automate ONAP deployment with |
+| | Kubernetes on LaaS |
++--------------------------------------+--------------------------------------+
+| AUTO-51 | huawei-pod12: Prepare IDF and PDF |
+| | files |
++--------------------------------------+--------------------------------------+
+| AUTO-52 | Deploy a running ONAP instance on |
+| | huawei-pod12 |
++--------------------------------------+--------------------------------------+
+| AUTO-54 | Use Daisy4nfv to create an OpenStack |
+| | instance on a UNH pod |
+--------------------------------------+--------------------------------------+
| | |
| | |
@@ -319,7 +391,7 @@ Deliverables
Software deliverables
^^^^^^^^^^^^^^^^^^^^^
-6.2 release: in-progress install scripts, CI scripts, and test case implementations.
+7.0 release: in-progress Docker ARM images, install scripts, CI scripts, and test case implementations.
Documentation deliverables
@@ -341,9 +413,6 @@ Known Limitations, Issues and Workarounds
System Limitations
^^^^^^^^^^^^^^^^^^
-* ONAP still to be validated for Arm servers (many Docker images are ready)
-* ONAP installation still to be automated in a repeatable way, and need to configure cluster of Pharos servers
-
Known issues
@@ -393,8 +462,8 @@ None at this point.
References
==========
-For more information on the OPNFV Fraser release, please see:
-http://opnfv.org/fraser
+For more information on the OPNFV Gambia release, please see:
+http://opnfv.org/gambia
Auto Wiki pages:
diff --git a/docs/release/release-notes/auto-proj-parameters.png b/docs/release/release-notes/auto-proj-parameters.png
new file mode 100644
index 0000000..a0cbe2e
--- /dev/null
+++ b/docs/release/release-notes/auto-proj-parameters.png
Binary files differ
diff --git a/docs/release/release-notes/auto-project-activities.png b/docs/release/release-notes/auto-project-activities.png
index c50bd72..d25ac2a 100644
--- a/docs/release/release-notes/auto-project-activities.png
+++ b/docs/release/release-notes/auto-project-activities.png
Binary files differ