aboutsummaryrefslogtreecommitdiffstats
path: root/ci
diff options
context:
space:
mode:
Diffstat (limited to 'ci')
-rwxr-xr-xci/deploy-onap.sh342
-rw-r--r--ci/deploy-opnfv-apex-centos.sh209
-rw-r--r--ci/deploy-opnfv-compass-ubuntu.sh201
-rw-r--r--ci/deploy-opnfv-daisy-centos.sh179
-rw-r--r--ci/deploy-opnfv-fuel-ubuntu.sh199
5 files changed, 1130 insertions, 0 deletions
diff --git a/ci/deploy-onap.sh b/ci/deploy-onap.sh
new file mode 100755
index 0000000..e886492
--- /dev/null
+++ b/ci/deploy-onap.sh
@@ -0,0 +1,342 @@
+#!/bin/bash
+#
+# Copyright 2018 Tieto
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Script for automated deployment of ONAP with Kubernetes at OPNFV LAAS
+# environment.
+#
+# Usage:
+# onap-deploy.sh <MASTER> <SLAVE1> <SLAVE2>
+#
+# where <MASTER> and <SLAVE_IPx> are IP addresses of servers to be used
+# for ONAP installation.
+#
+# NOTE: Following must be assured for all MASTER and SLAVE servers before
+# onap-deploy.sh execution:
+# 1) ssh access without a password
+# 2) an "opnfv" user account with password-less sudo access must be
+# available
+
+#
+# Configuration
+#
+DOCKER_VERSION=17.03
+RANCHER_VERSION=1.6.14
+RANCHER_CLI_VER=0.6.11
+KUBECTL_VERSION=1.8.10
+HELM_VERSION=2.8.2
+
+MASTER=$1
+SERVERS=$*
+
+BRANCH='master'
+ENVIRON='onap'
+
+#
+# Installation
+#
+echo "INSTALLING DOCKER ON ALL MACHINES"
+echo "$SERVERS"
+
+for MACHINE in $SERVERS;
+do
+ssh opnfv@"$MACHINE" "bash -s" <<DOCKERINSTALL &
+ sudo su
+ apt-get update
+ curl https://releases.rancher.com/install-docker/$DOCKER_VERSION.sh | sh
+
+ mkdir -p /etc/systemd/system/docker.service.d/
+ echo "[Service]
+ ExecStart=
+ ExecStart=/usr/bin/dockerd -H fd:// \
+ --insecure-registry=nexus3.onap.org:10001"\
+ > /etc/systemd/system/docker.service.d/docker.conf
+
+ systemctl daemon-reload
+ systemctl restart docker
+ apt-mark hold docker-ce
+
+ for SERVER in $SERVERS;
+ do
+ echo "\$SERVER $ENVIRON\$(echo \$SERVER | cut -d. -f 4 )" >> /etc/hosts
+ done
+
+ hostname $ENVIRON\$(echo $MACHINE | cut -d. -f 4 )
+
+ echo "DOCKER INSTALLED ON $MACHINE"
+DOCKERINSTALL
+done
+wait
+
+echo "INSTALLING RANCHER ON MASTER"
+echo "$MASTER"
+
+ssh opnfv@"$MASTER" "bash -s" <<RANCHERINSTALL &
+sudo su
+apt install jq -y
+echo "Waiting for 30 seconds at \$(date)"
+sleep 30
+
+docker login -u docker -p docker nexus3.onap.org:10001
+
+echo "INSTALL STARTS"
+apt-get install make -y
+
+docker run -d --restart=unless-stopped -p 8080:8080\
+ --name rancher_server rancher/server:v$RANCHER_VERSION
+curl -LO https://storage.googleapis.com/kubernetes-release/\
+release/v$KUBECTL_VERSION/bin/linux/amd64/kubectl
+chmod +x ./kubectl
+mv ./kubectl /usr/local/bin/kubectl
+mkdir ~/.kube
+wget http://storage.googleapis.com/kubernetes-helm\
+/helm-v${HELM_VERSION}-linux-amd64.tar.gz
+tar -zxvf helm-v${HELM_VERSION}-linux-amd64.tar.gz
+mv linux-amd64/helm /usr/local/bin/helm
+
+echo "Installing nfs server"
+# changed from nfs_share to dockerdata-nfs
+apt-get install nfs-kernel-server -y
+
+mkdir -p /dockerdata-nfs
+chmod 777 /dockerdata-nfs
+echo "/dockerdata-nfs *(rw,no_root_squash,no_subtree_check)">>/etc/exports
+service nfs-kernel-server restart
+
+echo "Waiting 10 minutes for Rancher to setup at \$(date)"
+sleep 600
+echo "Installing RANCHER CLI, KUBERNETES ENV on RANCHER"
+wget https://github.com/rancher/cli/releases/download/v${RANCHER_CLI_VER}-rc2\
+/rancher-linux-amd64-v${RANCHER_CLI_VER}-rc2.tar.gz
+tar -zxvf rancher-linux-amd64-v${RANCHER_CLI_VER}-rc2.tar.gz
+cp rancher-v${RANCHER_CLI_VER}-rc2/rancher .
+
+API_RESPONSE=\`curl -s 'http://127.0.0.1:8080/v2-beta/apikey'\
+ -d '{"type":"apikey","accountId":"1a1","name":"autoinstall",\
+ "description":"autoinstall","created":null,"kind":null,\
+ "removeTime":null,"removed":null,"uuid":null}'\`
+# Extract and store token
+echo "API_RESPONSE: \${API_RESPONSE}"
+KEY_PUBLIC=\`echo \${API_RESPONSE} | jq -r .publicValue\`
+KEY_SECRET=\`echo \${API_RESPONSE} | jq -r .secretValue\`
+echo "publicValue: \$KEY_PUBLIC secretValue: \$KEY_SECRET"
+
+export RANCHER_URL=http://${MASTER}:8080
+export RANCHER_ACCESS_KEY=\$KEY_PUBLIC
+export RANCHER_SECRET_KEY=\$KEY_SECRET
+
+./rancher env ls
+echo "Creating kubernetes environment named ${ENVIRON}"
+./rancher env create -t kubernetes $ENVIRON > kube_env_id.json
+PROJECT_ID=\$(<kube_env_id.json)
+echo "env id: \$PROJECT_ID"
+export RANCHER_HOST_URL=http://${MASTER}:8080/v1/projects/\$PROJECT_ID
+echo "you should see an additional kubernetes environment"
+./rancher env ls
+
+REG_URL_RESPONSE=\`curl -X POST -u \$KEY_PUBLIC:\$KEY_SECRET\
+ -H 'Accept: application/json'\
+ -H 'ContentType: application/json'\
+ -d '{"name":"$MASTER"}'\
+ "http://$MASTER:8080/v1/projects/\$PROJECT_ID/registrationtokens"\`
+echo "REG_URL_RESPONSE: \$REG_URL_RESPONSE"
+echo "Waiting for the server to finish url configuration - 1 min at \$(date)"
+sleep 60
+# see registrationUrl in
+REGISTRATION_TOKENS=\`curl http://$MASTER:8080/v2-beta/registrationtokens\`
+echo "REGISTRATION_TOKENS: \$REGISTRATION_TOKENS"
+REGISTRATION_URL=\`echo \$REGISTRATION_TOKENS | jq -r .data[0].registrationUrl\`
+REGISTRATION_DOCKER=\`echo \$REGISTRATION_TOKENS | jq -r .data[0].image\`
+REGISTRATION_TOKEN=\`echo \$REGISTRATION_TOKENS | jq -r .data[0].token\`
+echo "Registering host for image: \$REGISTRATION_DOCKER\
+ url: \$REGISTRATION_URL registrationToken: \$REGISTRATION_TOKEN"
+HOST_REG_COMMAND=\`echo \$REGISTRATION_TOKENS | jq -r .data[0].command\`
+
+# base64 encode the kubectl token from the auth pair
+# generate this after the host is registered
+KUBECTL_TOKEN=\$(echo -n 'Basic '\$(echo\
+ -n "\$RANCHER_ACCESS_KEY:\$RANCHER_SECRET_KEY" | base64 -w 0) | base64 -w 0)
+echo "KUBECTL_TOKEN base64 encoded: \${KUBECTL_TOKEN}"
+
+# add kubectl config - NOTE: the following spacing has to be "exact"
+# or kubectl will not connect - with a localhost:8080 error
+echo 'apiVersion: v1
+kind: Config
+clusters:
+- cluster:
+ api-version: v1
+ insecure-skip-tls-verify: true
+ server: "https://$MASTER:8080/r/projects/'\$PROJECT_ID'/kubernetes:6443"
+ name: "${ENVIRON}"
+contexts:
+- context:
+ cluster: "${ENVIRON}"
+ user: "${ENVIRON}"
+ name: "${ENVIRON}"
+current-context: "${ENVIRON}"
+users:
+- name: "${ENVIRON}"
+ user:
+ token: "'\${KUBECTL_TOKEN}'" ' > ~/.kube/config
+
+echo "docker run --rm --privileged\
+ -v /var/run/docker.sock:/var/run/docker.sock\
+ -v /var/lib/rancher:/var/lib/rancher\
+ \$REGISTRATION_DOCKER\
+ \$RANCHER_URL/v1/scripts/\$REGISTRATION_TOKEN"\
+ > /tmp/rancher_register_host
+chown opnfv /tmp/rancher_register_host
+
+RANCHERINSTALL
+wait
+
+echo "REGISTER TOKEN"
+HOSTREGTOKEN=$(ssh opnfv@"$MASTER" cat /tmp/rancher_register_host)
+echo "$HOSTREGTOKEN"
+
+echo "REGISTERING HOSTS WITH RANCHER ENVIRONMENT '$ENVIRON'"
+echo "$SERVERS"
+
+for MACHINE in $SERVERS;
+do
+ssh opnfv@"$MACHINE" "bash -s" <<REGISTERHOST &
+ sudo su
+ $HOSTREGTOKEN
+ sleep 5
+ echo "Host $MACHINE waiting for host registration 5 min at \$(date)"
+ sleep 300
+REGISTERHOST
+done
+wait
+
+echo "DEPLOYING OOM ON RANCHER WITH MASTER"
+echo "$MASTER"
+
+ssh opnfv@"$MASTER" "bash -s" <<OOMDEPLOY &
+sudo su
+sysctl -w vm.max_map_count=262144
+rm -rf oom
+echo "pulling new oom"
+git clone -b $BRANCH http://gerrit.onap.org/r/oom
+
+# NFS FIX for aaf-locate
+sed -i '/persistence:/s/^#//' ./oom/kubernetes/aaf/charts/aaf-locate/values.yaml
+sed -i '/mountPath: \/dockerdata/c\ mountPath: \/dockerdata-nfs'\
+ ./oom/kubernetes/aaf/charts/aaf-locate/values.yaml
+
+echo "Pre-pulling docker images at \$(date)"
+wget https://jira.onap.org/secure/attachment/11261/prepull_docker.sh
+chmod 777 prepull_docker.sh
+./prepull_docker.sh
+echo "starting onap pods"
+cd oom/kubernetes/
+helm init --upgrade
+helm serve &
+echo "Waiting for helm setup for 5 min at \$(date)"
+sleep 300
+helm version
+helm repo add local http://127.0.0.1:8879
+helm repo list
+make all
+helm install local/onap -n dev --namespace $ENVIRON
+cd ../../
+
+echo "Waiting for all pods to be up for 15-80 min at \$(date)"
+FAILED_PODS_LIMIT=0
+MAX_WAIT_PERIODS=480 # 120 MIN
+COUNTER=0
+PENDING_PODS=0
+while [ \$(kubectl get pods --all-namespaces | grep -E '0/|1/2' | wc -l) \
+-gt \$FAILED_PODS_LIMIT ]; do
+ PENDING=\$(kubectl get pods --all-namespaces | grep -E '0/|1/2' | wc -l)
+ PENDING_PODS=\$PENDING
+ sleep 15
+ LIST_PENDING=\$(kubectl get pods --all-namespaces -o wide | grep -E '0/|1/2' )
+ echo "\${LIST_PENDING}"
+ echo "\${PENDING} pending > \${FAILED_PODS_LIMIT} at the \${COUNTER}th"\
+ " 15 sec interval out of \${MAX_WAIT_PERIODS}"
+ echo ""
+ COUNTER=\$((\$COUNTER + 1 ))
+ if [ "\$MAX_WAIT_PERIODS" -eq \$COUNTER ]; then
+ FAILED_PODS_LIMIT=800
+ fi
+done
+
+echo "Report on non-running containers"
+PENDING=\$(kubectl get pods --all-namespaces | grep -E '0/|1/2')
+PENDING_COUNT=\$(kubectl get pods --all-namespaces | grep -E '0/|1/2' | wc -l)
+PENDING_COUNT_AAI=\$(kubectl get pods -n $ENVIRON | grep aai- \
+| grep -E '0/|1/2' | wc -l)
+
+echo "Check filebeat 2/2 count for ELK stack logging consumption"
+FILEBEAT=\$(kubectl get pods --all-namespaces -a | grep 2/)
+echo "\${FILEBEAT}"
+echo "sleep 5 min - to allow rest frameworks to finish at \$(date)"
+sleep 300
+echo "List of ONAP Modules"
+LIST_ALL=\$(kubectl get pods --all-namespaces -a --show-all )
+echo "\${LIST_ALL}"
+echo "run healthcheck 2 times to warm caches and frameworks"\
+ "so rest endpoints report properly - see OOM-447"
+
+echo "curl with aai cert to cloud-region PUT"
+curl -X PUT https://127.0.0.1:30233/aai/v11/cloud-infrastructure/\
+cloud-regions/cloud-region/CloudOwner/RegionOne \
+--data "@aai-cloud-region-put.json" \
+-H "authorization: Basic TW9kZWxMb2FkZXI6TW9kZWxMb2FkZXI=" \
+-H "X-TransactionId:jimmy-postman" \
+-H "X-FromAppId:AAI" \
+-H "Content-Type:application/json" \
+-H "Accept:application/json" \
+--cacert aaiapisimpledemoopenecomporg_20171003.crt -k
+
+echo "get the cloud region back"
+curl -X GET https://127.0.0.1:30233/aai/v11/cloud-infrastructure/\
+cloud-regions/ \
+-H "authorization: Basic TW9kZWxMb2FkZXI6TW9kZWxMb2FkZXI=" \
+-H "X-TransactionId:jimmy-postman" \
+-H "X-FromAppId:AAI" \
+-H "Content-Type:application/json" \
+-H "Accept:application/json" \
+--cacert aaiapisimpledemoopenecomporg_20171003.crt -k
+
+# OOM-484 - robot scripts moved
+cd oom/kubernetes/robot
+echo "run healthcheck prep 1"
+# OOM-722 adds namespace parameter
+if [ "$BRANCH" == "amsterdam" ]; then
+ ./ete-k8s.sh health > ~/health1.out
+else
+ ./ete-k8s.sh $ENVIRON health > ~/health1.out
+fi
+echo "sleep 5 min at \$(date)"
+sleep 300
+echo "run healthcheck prep 2"
+if [ "$BRANCH" == "amsterdam" ]; then
+ ./ete-k8s.sh health > ~/health2.out
+else
+ ./ete-k8s.sh $ENVIRON health > ~/health2.out
+fi
+echo "run healthcheck for real - wait a further 5 min at \$(date)"
+sleep 300
+if [ "$BRANCH" == "amsterdam" ]; then
+ ./ete-k8s.sh health
+else
+ ./ete-k8s.sh $ENVIRON health
+fi
+OOMDEPLOY
+wait
+echo "Finished install, ruturned from Master"
+exit 0
diff --git a/ci/deploy-opnfv-apex-centos.sh b/ci/deploy-opnfv-apex-centos.sh
new file mode 100644
index 0000000..a3a0433
--- /dev/null
+++ b/ci/deploy-opnfv-apex-centos.sh
@@ -0,0 +1,209 @@
+#!/usr/bin/env bash
+
+# /usr/bin/env bash or /bin/bash ? /usr/bin/env bash is more environment-independent
+# beware of files which were edited in Windows, and have invisible \r end-of-line characters, causing Linux errors
+
+##############################################################################
+# Copyright (c) 2018 Wipro Limited and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# OPNFV contribution guidelines Wiki page:
+# https://wiki.opnfv.org/display/DEV/Contribution+Guidelines
+
+# OPNFV/Auto project:
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+
+# localization control: force script to use default language for output, and force sorting to be bytewise
+# ("C" is from C language, represents "safe" locale everywhere)
+# (result: the script will consider only basic ASCII characters and disable UTF-8 multibyte match)
+export LANG=C
+export LC_ALL=C
+
+##################################################################################
+## installation of OpenStack via OPNFV Apex/TripleO, on CentOS, virtual deployment
+##################################################################################
+# reference manual: https://docs.opnfv.org/en/latest/submodules/apex/docs/release/installation/index.html
+# page for virtual deployment: https://docs.opnfv.org/en/latest/submodules/apex/docs/release/installation/virtual.html
+
+echo "*** begin AUTO install: OPNFV Apex/TripleO"
+
+# check OS version
+echo "*** print OS version (must be CentOS, version 7 or more)"
+cat /etc/*release
+
+# Manage Nested Virtualization
+echo "*** ensure Nested Virtualization is enabled on Intel x86"
+echo "*** nested flag before:"
+cat /sys/module/kvm_intel/parameters/nested
+rm -f /etc/modprobe.d/kvm-nested.conf
+{ printf "options kvm-intel nested=1\n";\
+ printf "options kvm-intel enable_shadow_vmcs=1\n";\
+ printf "options kvm-intel enable_apicv=1\n";\
+ printf "options kvm-intel ept=1\n"; } >> /etc/modprobe.d/kvm-nested.conf
+sudo modprobe -r kvm_intel
+sudo modprobe -a kvm_intel
+echo "*** nested flag after:"
+cat /sys/module/kvm_intel/parameters/nested
+
+echo "*** verify status of modules in the Linux Kernel: kvm_intel module should be loaded for x86_64 machines"
+lsmod | grep kvm_
+grep kvm_ < /proc/modules
+
+# 3 additional pre-installation preparations, lifted from OPNFV/storperf (they are post-installation there):
+# https://wiki.opnfv.org/display/storperf/LaaS+Setup+For+Development#LaaSSetupForDevelopment-InstallOPNFVApex
+# (may of may not be needed, to enable first-time Apex installation on blank server)
+
+# 1) Install Docker
+sudo yum install -y yum-utils device-mapper-persistent-data lvm2
+sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
+sudo yum install -y docker-ce
+sudo systemctl start docker
+
+# 2) Install docker-compose
+sudo curl -L "https://github.com/docker/compose/releases/download/1.21.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
+sudo chmod +x /usr/local/bin/docker-compose
+
+# 3) Install Python
+sudo yum install -y python-virtualenv
+sudo yum groupinstall -y "Development Tools"
+sudo yum install -y openssl-devel
+
+
+# update everything (upgrade: riskier than update, as packages supposed to be unused will be deleted)
+# (note: can take several minutes; may not be necessary)
+sudo yum -y update
+
+
+# download Apex packages
+echo "*** downloading packages:"
+sudo yum -y install https://repos.fedorapeople.org/repos/openstack/openstack-pike/rdo-release-pike-1.noarch.rpm
+sudo yum -y install epel-release
+# note: EPEL = Extra Packages for Enterprise Linux
+sudo curl -o /etc/yum.repos.d/opnfv-apex.repo http://artifacts.opnfv.org/apex/fraser/opnfv-apex.repo
+
+# install three required RPMs (RedHat/RPM Package Managers); this takes several minutes
+sudo yum -y install http://artifacts.opnfv.org/apex/fraser/opnfv-apex-6.2.noarch.rpm http://artifacts.opnfv.org/apex/fraser/opnfv-apex-undercloud-6.2.noarch.rpm http://artifacts.opnfv.org/apex/fraser/opnfv-apex-python34-6.2.noarch.rpm
+
+# clean-up old Apex versions if any
+## precautionary opnfv-clean doesn't work... (even though packages are installed at this point)
+opnfv-clean
+
+# Manage DNS references
+# probably not needed on an already configured server: already has DNS references
+# echo "nameserver 8.8.8.8" >> /etc/resolv.conf
+echo "*** printout of /etc/resolv.conf :"
+cat /etc/resolv.conf
+
+# prepare installation directory
+mkdir -p /opt/opnfv-TripleO-apex
+cd /opt/opnfv-TripleO-apex
+
+# make sure cp is not aliased or a function; same for mv and rm
+unalias cp
+unset -f cp
+unalias mv
+unset -f mv
+unalias rm
+unset -f rm
+
+# 2 YAML files from /etc/opnfv-apex/ are needed for virtual deploys:
+# 1) network_settings.yaml : may need to update NIC names, to match the NIC names on the deployment server
+# 2) standard scenario file (os-nosdn-nofeature-noha.yaml, etc.), or customized deploy_settings.yaml
+
+# make a local copy of YAML files (not necessary: could deploy from /etc/opnfv-apex); local copies are just for clarity
+# 1) network settings
+cp /etc/opnfv-apex/network_settings.yaml .
+# 2) deploy settings
+# copy one of the 40+ pre-defined scenarios (one of the YAML files)
+# for extra customization, git clone Apex repo, and copy and customize the generic deploy_settings.yaml
+# git clone https://git.opnfv.org/apex
+# cp ./apex/config/deploy/deploy_settings.yaml .
+cp /etc/opnfv-apex/os-nosdn-nofeature-noha.yaml ./deploy_settings.yaml
+# cp /etc/opnfv-apex/os-nosdn-nofeature-ha.yaml ./deploy_settings.yaml
+
+# Note: content of os-nosdn-nofeature-noha.yaml
+# ---
+# global_params:
+# ha_enabled: false
+#
+# deploy_options:
+# sdn_controller: false
+# tacker: true
+# congress: true
+# sfc: false
+# vpn: false
+
+
+# modify NIC names in network settings YAML file, specific to your environment (e.g. replace em1 with ens4f0 in LaaS)
+# Note: actually, this should not matter for a virtual environment
+sed -i 's/em1/ens4f0/' network_settings.yaml
+
+# launch deploy (works if openvswitch module is installed, which may not be the case the first time around)
+echo "*** deploying OPNFV by TripleO/Apex:"
+# --debug for detailed debug info
+# -v: Enable virtual deployment
+# note: needs at least 10G RAM for controllers
+sudo opnfv-deploy --debug -v -n network_settings.yaml -d deploy_settings.yaml
+# without --debug:
+# sudo opnfv-deploy -v -n network_settings.yaml -d deploy_settings.yaml
+
+# with specific sizing:
+# sudo opnfv-deploy --debug -v -n network_settings.yaml -d deploy_settings.yaml --virtual-compute-ram 32 --virtual-cpus 16 --virtual-computes 4
+
+
+# verify that the openvswitch module is listed:
+lsmod | grep openvswitch
+grep openvswitch < /proc/modules
+
+##{
+## workaround: do 2 successive installations... not exactly optimal...
+## clean up, as now opnfv-clean should work
+#opnfv-clean
+## second deploy try, should succeed (whether first one failed or succeeded)
+#sudo opnfv-deploy -v -n network_settings.yaml -d deploy_settings.yaml
+##}
+
+
+
+# verifications: https://docs.opnfv.org/en/latest/submodules/apex/docs/release/installation/verification.html
+
+# {
+# if error after deploy.sh: "libvirt.libvirtError: Storage pool not found: no storage pool with matching name 'default'"
+
+# This usually happens if for some reason you are missing a default pool in libvirt:
+# $ virsh pool-list |grep default
+# You can recreate it manually:
+# $ virsh pool-define-as default dir --target /var/lib/libvirt/images/
+# $ virsh pool-autostart default
+# $ virsh pool-start default
+# }
+
+# {
+# if error after deploy.sh: iptc.ip4tc.IPTCError
+# check Apex jira ticket #521 https://jira.opnfv.org/browse/APEX-521
+# }
+
+# OpenvSwitch should not be missing, as it is a requirement from the RPM package:
+# https://github.com/opnfv/apex/blob/stable/fraser/build/rpm_specs/opnfv-apex-common.spec#L15
+
+
+
+# install python 3 on CentOS
+echo "*** begin install python 3.6 (3.4 should be already installed by default)"
+
+sudo yum -y install python36
+# install pip and setup tools
+sudo curl -O https://bootstrap.pypa.io/get-pip.py
+hash -r
+sudo /usr/bin/python3.6 get-pip.py --no-warn-script-location
+
+
+
+echo "*** end AUTO install: OPNFV Apex/TripleO"
+
diff --git a/ci/deploy-opnfv-compass-ubuntu.sh b/ci/deploy-opnfv-compass-ubuntu.sh
new file mode 100644
index 0000000..efccf78
--- /dev/null
+++ b/ci/deploy-opnfv-compass-ubuntu.sh
@@ -0,0 +1,201 @@
+#!/usr/bin/env bash
+
+# /usr/bin/env bash or /bin/bash ? /usr/bin/env bash is more environment-independent
+# beware of files which were edited in Windows, and have invisible \r end-of-line characters, causing Linux errors
+
+##############################################################################
+# Copyright (c) 2018 Wipro Limited and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# OPNFV contribution guidelines Wiki page:
+# https://wiki.opnfv.org/display/DEV/Contribution+Guidelines
+
+# OPNFV/Auto project:
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+
+# localization control: force script to use default language for output, and force sorting to be bytewise
+# ("C" is from C language, represents "safe" locale everywhere)
+# (result: the script will consider only basic ASCII characters and disable UTF-8 multibyte match)
+export LANG=C
+export LC_ALL=C
+
+#################################################################################
+## installation of OpenStack via OPNFV Compass4nfv, on Ubuntu, virtual deployment
+#################################################################################
+# reference manual: https://docs.opnfv.org/en/latest/submodules/compass4nfv/docs/release/installation/index.html
+# page for virtual deployment: https://docs.opnfv.org/en/latest/submodules/compass4nfv/docs/release/installation/vmdeploy.html
+
+echo "*** begin AUTO install: OPNFV Compass4nfv"
+
+# prepare install directory
+export INSTALLDIR=/opt/opnfv-compass
+mkdir -p $INSTALLDIR
+cd $INSTALLDIR
+
+# premptively install latest pip and clear $PATH cache
+# with apt-get (see apt-get -h and man apt-get for details)
+apt-get -y update
+apt-get -y upgrade
+apt-get -y install python-pip
+pip install --upgrade pip
+hash -r
+apt-get -y install python3-openstackclient
+apt-get -y autoremove
+
+## note: apt is more recent than apt-get (apt was formally introduced with Ubuntu 16.04)
+## APT: Advanced Packaging Tool; apt is more high-level, apt-get has more features;
+# apt -y update # Refreshes repository index
+# apt -y full-upgrade # Upgrades packages with auto-handling of dependencies
+# apt -y install python-pip
+# pip install --upgrade pip
+# hash -r
+# apt -y install python3-openstackclient
+# apt -y autoremove
+
+
+# 2 options: (option 1 is preferable)
+# 1) remain in master branch, use build.sh (which builds a tar ball), then launch deploy.sh
+# 2) download a tar ball and launch deploy.sh in a branch matching the tar ball release (e.g. fraser 6.2)
+
+
+##############
+# OPTION 1: build.sh + deploy.sh in master branch
+
+# retrieve the repository of Compass4nfv code (this creates a compass4nfv subdir in the installation directory), current master branch
+echo "*** begin download Compass4nfv repository"
+git clone https://gerrit.opnfv.org/gerrit/compass4nfv
+cd compass4nfv
+
+# launch build script
+echo "*** begin Compass4nfv build:"
+./build.sh |& tee log1-Build.txt
+
+# edit in deploy.sh specific to OPTION 1
+# set path to ISO file (tar ball), as built by build.sh previously
+# absolute path to tar ball file URL (MUST be absolute path)
+sed -i '/#export TAR_URL=/a export TAR_URL=file:///opt/opnfv-compass/compass4nfv/work/building/compass.tar.gz' deploy.sh
+
+# END OPTION 1
+##############
+
+
+##############
+# OPTION 2: tar ball + deploy.sh in matching releases/branches
+
+# download tarball of a certain release/version
+#echo "*** begin download Compass4nfv tar ball"
+#wget http://artifacts.opnfv.org/compass4nfv/fraser/opnfv-6.2.tar.gz
+# note: list of tar ball (ISO) files from Compass4NFV in https://artifacts.opnfv.org/compass4nfv.html
+
+# retrieve the repository of Compass4nfv code (this creates a compass4nfv subdir in the installation directory), current master branch
+#echo "*** begin download Compass4nfv repository"
+#git clone https://gerrit.opnfv.org/gerrit/compass4nfv
+#cd compass4nfv
+# note: list of compass4nfv branch names in https://gerrit.opnfv.org/gerrit/#/admin/projects/compass4nfv,branches
+# checkout to branch (or tag) matching the tarball release
+#git checkout stable/fraser
+
+# edit in deploy.sh specific to OPTION 2
+# set path to ISO file (tar ball), as downloaded previously
+# absolute path to tar ball file URL (MUST be absolute path)
+# sed -i '/#export TAR_URL=/a export TAR_URL=file:///opt/opnfv-compass/opnfv-6.2.tar.gz' deploy.sh
+
+# END OPTION 2
+##############
+
+
+# edit remaining deploy.sh entries as needed
+
+# set operating system version: Ubuntu Xenial Xerus
+sed -i '/#export OS_VERSION=xenial\/centos7/a export OS_VERSION=xenial' deploy.sh
+
+# set path to OPNFV scenario / DHA (Deployment Hardware Adapter) YAML file
+# here, os-nosdn-nofeature-noha scenario
+sed -i '/#export DHA=/a export DHA=/opt/opnfv-compass/compass4nfv/deploy/conf/vm_environment/os-nosdn-nofeature-noha.yml' deploy.sh
+
+# set path to network YAML file
+sed -i '/#export NETWORK=/a export NETWORK=/opt/opnfv-compass/compass4nfv/deploy/conf/vm_environment/network.yml' deploy.sh
+
+# append parameters for virtual machines (for virtual deployments); e.g., 2 nodes for NOHA scenario, 5 for HA, etc.
+# note: this may not be needed in a future release of Compass4nfv
+
+# VIRT_NUMBER – the number of nodes for virtual deployment.
+# VIRT_CPUS – the number of CPUs allocated per virtual machine.
+# VIRT_MEM – the memory size (MB) allocated per virtual machine.
+# VIRT_DISK – the disk size allocated per virtual machine.
+
+# if OPTION 1 (master): OPENSTACK_VERSION is queens, so add the VIRT_NUMBER line after the queens match
+#sed -i '/export OPENSTACK_VERSION=queens/a export VIRT_DISK=200G' deploy.sh
+#sed -i '/export OPENSTACK_VERSION=queens/a export VIRT_MEM=16384' deploy.sh
+#sed -i '/export OPENSTACK_VERSION=queens/a export VIRT_CPUS=4' deploy.sh
+sed -i '/export OPENSTACK_VERSION=queens/a export VIRT_NUMBER=2' deploy.sh
+
+# if OPTION 2 (stable/fraser): OPENSTACK_VERSION is pike, so add the VIRT_NUMBER line after the pike match
+#sed -i '/export OPENSTACK_VERSION=pike/a export VIRT_DISK=200G' deploy.sh
+#sed -i '/export OPENSTACK_VERSION=pike/a export VIRT_MEM=16384' deploy.sh
+#sed -i '/export OPENSTACK_VERSION=pike/a export VIRT_CPUS=4' deploy.sh
+#sed -i '/export OPENSTACK_VERSION=pike/a export VIRT_NUMBER=5' deploy.sh
+
+
+# launch deploy script
+echo "*** begin Compass4nfv deploy:"
+./deploy.sh |& tee log2-Deploy.txt
+
+
+
+
+# To access OpenStack Horizon GUI in Virtual deployment
+# source: https://wiki.opnfv.org/display/compass4nfv/Containerized+Compass
+
+# confirm IP@ of the current server (jump server, such as 10.10.100.xyz on LaaS: 10.10.100.42 for hpe32, etc.)
+external_nic=$(ip route |grep '^default'|awk '{print $5F}')
+echo "external_nic: $external_nic"
+ip addr show "$external_nic"
+
+# Config IPtables rules: pick an unused port number, e.g. 50000+machine number, 50032 for hpe32 at 10.10.100.42
+# 192.16.1.222:443 is the OpenStack Horizon GUI after a Compass installation
+# syntax: iptables -t nat -A PREROUTING -d $EX_IP -p tcp --dport $PORT -j DNAT --to 192.16.1.222:443
+# (note: this could be automated: retrieve IP@, pick port number)
+
+# example: hpe15
+# iptables -t nat -A PREROUTING -d 10.10.100.25 -p tcp --dport 50015 -j DNAT --to 192.16.1.222:443
+# example: hpe33
+# iptables -t nat -A PREROUTING -d 10.10.100.43 -p tcp --dport 50033 -j DNAT --to 192.16.1.222:443
+
+# display IPtables NAT rules
+iptables -t nat -L
+
+# Enter https://$EX_IP:$PORT in you browser to visit the OpenStack Horizon dashboard
+# examples: https://10.10.100.25:50015 , https://10.10.100.43:50033
+# The default user is "admin"
+# to get the Horizon password for "admin":
+sudo docker cp compass-tasks:/opt/openrc ./
+sudo cat openrc | grep OS_PASSWORD
+source ./openrc
+
+# for OpenStack CLI (generic content from openrc)
+export OS_ENDPOINT_TYPE=publicURL
+export OS_INTERFACE=publicURL
+export OS_USERNAME=admin
+export OS_PROJECT_NAME=admin
+export OS_TENANT_NAME=admin
+export OS_AUTH_URL=https://192.16.1.222:5000/v3
+export OS_NO_CACHE=1
+export OS_USER_DOMAIN_NAME=Default
+export OS_PROJECT_DOMAIN_NAME=Default
+export OS_REGION_NAME=RegionOne
+
+# For openstackclient
+export OS_IDENTITY_API_VERSION=3
+export OS_AUTH_VERSION=3
+
+
+
+echo "*** end AUTO install: OPNFV Compass4nfv"
+
diff --git a/ci/deploy-opnfv-daisy-centos.sh b/ci/deploy-opnfv-daisy-centos.sh
new file mode 100644
index 0000000..664ba55
--- /dev/null
+++ b/ci/deploy-opnfv-daisy-centos.sh
@@ -0,0 +1,179 @@
+#!/usr/bin/env bash
+
+# /usr/bin/env bash or /bin/bash ? /usr/bin/env bash is more environment-independent
+# beware of files which were edited in Windows, and have invisible \r end-of-line characters, causing Linux errors
+
+##############################################################################
+# Copyright (c) 2018 Wipro Limited and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# OPNFV contribution guidelines Wiki page:
+# https://wiki.opnfv.org/display/DEV/Contribution+Guidelines
+
+# OPNFV/Auto project:
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+
+# localization control: force script to use default language for output, and force sorting to be bytewise
+# ("C" is from C language, represents "safe" locale everywhere)
+# (result: the script will consider only basic ASCII characters and disable UTF-8 multibyte match)
+export LANG=C
+export LC_ALL=C
+
+
+###############################################################################
+## installation of OpenStack via OPNFV Daisy4nfv, on CentOS, virtual deployment
+###############################################################################
+# reference manual: https://docs.opnfv.org/en/stable-fraser/submodules/daisy/docs/release/installation/index.html#daisy-installation
+# page for virtual deployment: https://docs.opnfv.org/en/stable-fraser/submodules/daisy/docs/release/installation/vmdeploy.html
+
+echo "*** begin AUTO install: OPNFV Daisy4nfv"
+
+# check OS version
+echo "*** print OS version (must be CentOS, version 7.2 or more)"
+cat /etc/*release
+
+# make sure cp is not aliased or a function; same for mv and rm
+unalias cp
+unset -f cp
+unalias mv
+unset -f mv
+unalias rm
+unset -f rm
+
+# Manage Nested Virtualization
+echo "*** ensure Nested Virtualization is enabled on Intel x86"
+echo "*** nested flag before:"
+cat /sys/module/kvm_intel/parameters/nested
+rm -f /etc/modprobe.d/kvm-nested.conf
+{ printf "options kvm-intel nested=1\n";\
+ printf "options kvm-intel enable_shadow_vmcs=1\n";\
+ printf "options kvm-intel enable_apicv=1\n";\
+ printf "options kvm-intel ept=1\n"; } >> /etc/modprobe.d/kvm-nested.conf
+sudo modprobe -r kvm_intel
+sudo modprobe -a kvm_intel
+echo "*** nested flag after:"
+cat /sys/module/kvm_intel/parameters/nested
+
+echo "*** verify status of modules in the Linux Kernel: kvm_intel module should be loaded for x86_64 machines"
+lsmod | grep kvm_
+grep kvm_ < /proc/modules
+
+# download tools: git, kvm, libvirt, python-yaml
+sudo yum -y install git
+sudo yum -y install kvm
+sudo yum -y install libvirt
+sudo yum info libvirt
+sudo yum info qemu-kvm
+sudo yum -y install python-yaml
+
+
+# make sure SELinux is enforced (Security-Enhanced Linux)
+sudo setenforce 1
+echo "getenforce: $(getenforce)"
+
+# Restart the libvirtd daemon:
+sudo service libvirtd restart
+# Verify if the kvm module is loaded, you should see amd or intel depending on the hardware:
+lsmod | grep kvm
+# Note: to test, issue a virsh command to ensure local root connectivity:
+# sudo virsh sysinfo
+
+
+
+# update everything (upgrade: riskier than update, as packages supposed to be unused will be deleted)
+# (note: can take several minutes; may not be necessary)
+sudo yum -y update
+
+# prepare Daisy installation directory
+export INSTALLDIR=/opt/opnfv-daisy
+mkdir $INSTALLDIR
+cd $INSTALLDIR
+
+# oslo-config, needed in daisy/deploy/get_conf.py
+sudo curl -O https://bootstrap.pypa.io/get-pip.py
+hash -r
+python get-pip.py --no-warn-script-location
+pip install --upgrade oslo-config
+
+
+# retrieve Daisy4nfv repository
+git clone https://gerrit.opnfv.org/gerrit/daisy
+cd daisy
+
+
+
+# OPTION 1: master repo and latest bin file: May 17th 2018
+# Download latest bin file from http://artifacts.opnfv.org/daisy.html and name it opnfv.bin
+curl http://artifacts.opnfv.org/daisy/opnfv-2018-05-17_14-00-32.bin -o opnfv.bin
+# make opnfv.bin executable
+chmod 777 opnfv.bin
+
+# OPTION 2: stable release: Fraser 6.0 (so, checkout to stable Fraser release opnfv-6.0)
+# Download matching bin file from http://artifacts.opnfv.org/daisy.html and name it opnfv.bin
+#git checkout opnfv.6.0 # as per Daisy4nfv instructions, but does not work
+#git checkout stable/fraser
+#curl http://artifacts.opnfv.org/daisy/fraser/opnfv-6.0.iso -o opnfv.bin
+# make opnfv.bin executable
+#chmod 777 opnfv.bin
+
+
+
+# The deploy.yaml file is the inventory template of deployment nodes:
+# error from doc: ”./deploy/conf/vm_environment/zte-virtual1/deploy.yml”
+# correct path: "./deploy/config/vm_environment/zte-virtual1/deploy.yml”
+# You can write your own name/roles reference into it:
+# name – Host name for deployment node after installation.
+# roles – Components deployed.
+# note: ./templates/virtual_environment/ contains xml files, for networks and VMs
+
+
+# prepare config dir for Auto lab in daisy dir, and copy deploy and network YAML files from default files (virtual1 or virtual2)
+export AUTO_DAISY_LAB_CONFIG1=labs/auto_daisy_lab/virtual1/daisy/config
+export DAISY_DEFAULT_ENV1=deploy/config/vm_environment/zte-virtual1
+mkdir -p $AUTO_DAISY_LAB_CONFIG1
+cp $DAISY_DEFAULT_ENV1/deploy.yml $AUTO_DAISY_LAB_CONFIG1
+cp $DAISY_DEFAULT_ENV1/network.yml $AUTO_DAISY_LAB_CONFIG1
+
+export AUTO_DAISY_LAB_CONFIG2=labs/auto_daisy_lab/virtual2/daisy/config
+export DAISY_DEFAULT_ENV2=deploy/config/vm_environment/zte-virtual2
+mkdir -p $AUTO_DAISY_LAB_CONFIG2
+cp $DAISY_DEFAULT_ENV2/deploy.yml $AUTO_DAISY_LAB_CONFIG2
+cp $DAISY_DEFAULT_ENV2/network.yml $AUTO_DAISY_LAB_CONFIG2
+
+# Note:
+# - zte-virtual1 config files deploy openstack with five nodes (3 LB nodes and 2 computer nodes).
+# - zte-virtual2 config files deploy an all-in-one openstack
+
+# run deploy script, scenario os-nosdn-nofeature-ha, multinode OpenStack
+sudo ./ci/deploy/deploy.sh -L "$(cd ./;pwd)" -l auto_daisy_lab -p virtual1 -s os-nosdn-nofeature-ha
+
+# run deploy script, scenario os-nosdn-nofeature-noha, all-in-one OpenStack
+# sudo ./ci/deploy/deploy.sh -L "$(cd ./;pwd)" -l auto_daisy_lab -p virtual2 -s os-nosdn-nofeature-noha
+
+
+# Notes about deploy.sh:
+# The value after -L should be an absolute path which points to the directory which includes $AUTO_DAISY_LAB_CONFIG directory.
+# The value after -p parameter (virtual1 or virtual2) should match the one selected for $AUTO_DAISY_LAB_CONFIG.
+# The value after -l parameter (e.g. auto_daisy_lab) should match the lab name selected for $AUTO_DAISY_LAB_CONFIG, after labs/ .
+# Scenario (-s parameter): “os-nosdn-nofeature-ha” is used for deploying multinode openstack (virtual1)
+# Scenario (-s parameter): “os-nosdn-nofeature-noha” used for deploying all-in-one openstack (virtual2)
+
+# more details on deploy.sh OPTIONS:
+# -B PXE Bridge for booting Daisy Master, optional
+# -D Dry-run, does not perform deployment, will be deleted later
+# -L Securelab repo absolute path, optional
+# -l LAB name, necessary
+# -p POD name, necessary
+# -r Remote workspace in target server, optional
+# -w Workdir for temporary usage, optional
+# -h Print this message and exit
+# -s Deployment scenario
+# -S Skip recreate Daisy VM during deployment
+
+# When deployed successfully, the floating IP of openstack is 10.20.11.11, the login account is “admin” and the password is “keystone”
diff --git a/ci/deploy-opnfv-fuel-ubuntu.sh b/ci/deploy-opnfv-fuel-ubuntu.sh
new file mode 100644
index 0000000..db276b2
--- /dev/null
+++ b/ci/deploy-opnfv-fuel-ubuntu.sh
@@ -0,0 +1,199 @@
+#!/usr/bin/env bash
+
+# /usr/bin/env bash or /bin/bash ? /usr/bin/env bash is more environment-independent
+# beware of files which were edited in Windows, and have invisible \r end-of-line characters, causing Linux errors
+
+##############################################################################
+# Copyright (c) 2018 Wipro Limited and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# OPNFV contribution guidelines Wiki page:
+# https://wiki.opnfv.org/display/DEV/Contribution+Guidelines
+
+# OPNFV/Auto project:
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095
+
+
+# localization control: force script to use default language for output, and force sorting to be bytewise
+# ("C" is from C language, represents "safe" locale everywhere)
+# (result: the script will consider only basic ASCII characters and disable UTF-8 multibyte match)
+export LANG=C
+export LC_ALL=C
+
+##############################################################################
+## installation of OpenStack via OPNFV Fuel/MCP, on Ubuntu, virtual deployment
+##############################################################################
+# reference manual: https://docs.opnfv.org/en/latest/submodules/fuel/docs/release/installation/index.html
+# page for virtual deployment: https://docs.opnfv.org/en/latest/submodules/fuel/docs/release/installation/installation.instruction.html#opnfv-software-installation-and-deployment
+
+# Steps:
+# step 1: download Fuel/MCP repository and run deploy script
+# (this example: x86, virtual deploy, os-nosdn-nofeature-noha scenario)
+# step 2: download additional packages (python3, OpenStackSDK, OpenStack clients, ...)
+# step 3: add more resources to OpenStack instance (vCPUs, RAM)
+# step 4: download Auto repository
+# step 5: run Auto python script to populate OpenStack instance with objects expected by ONAP
+
+
+echo "*** begin AUTO install: OPNFV Fuel/MCP"
+
+
+# step 1: download Fuel/MCP repository and run deploy script
+
+# prepare install directory
+export INSTALLDIR=/opt/opnfv-fuel
+mkdir -p $INSTALLDIR
+cd $INSTALLDIR
+
+# get Fuel repository
+git clone https://git.opnfv.org/fuel
+# cd in new fuel repository, which contains directories: mcp, ci, etc.
+# note: this is for x86_64 architectures; for aarch64 architectures, git clone https://git.opnfv.org/armband and cd armband instead
+cd fuel
+
+# edit NOHA scenario YAML file with more resources for compute nodes: 32 vCPUs, 192G RAM
+{ printf " cmp01:\n";\
+ printf " vcpus: 32\n";\
+ printf " ram: 196608\n";\
+ printf " cmp02:\n";\
+ printf " vcpus: 32\n";\
+ printf " ram: 196608\n"; } >> mcp/config/scenario/os-nosdn-nofeature-noha.yaml
+
+# provide more storage space to VMs: 350G per compute node (default is 100G)
+sed -i mcp/scripts/lib.sh -e 's/\(qemu-img create.*\) 100G/\1 350G/g'
+
+# launch OPNFV Fuel/MCP deploy script
+ci/deploy.sh -l local -p virtual1 -s os-nosdn-nofeature-noha -D |& tee deploy.log
+
+
+
+# step 2: download additional packages (python3, OpenStackSDK, OpenStack clients, ...)
+
+# install python 3 on Ubuntu
+echo "*** begin install python 3"
+sudo apt-get -y update
+sudo apt-get -y install python3
+# maybe clean-up packages
+# sudo apt -y autoremove
+# specific install of a python version, e.g. 3.6
+# sudo apt-get install python3.6
+
+# http://docs.python-guide.org/en/latest/starting/install3/linux/
+# sudo apt-get install software-properties-common
+# sudo add-apt-repository ppa:deadsnakes/ppa
+# sudo apt-get update
+# sudo apt-get install python3.6
+echo "python2 --version: $(python2 --version)"
+echo "python3 --version: $(python3 --version)"
+echo "which python: $(which python)"
+
+# install pip3 for python3; /usr/local/bin/pip3 vs. /usr/bin/pip3; solve with "hash -r"
+echo "*** begin install pip3 for python3"
+apt -y install python3-pip
+hash -r
+pip3 install --upgrade pip
+hash -r
+
+echo "\$PATH: $PATH"
+echo "which pip: $(which pip)"
+echo "which pip3: $(which pip3)"
+
+# install OpenStack SDK Python client
+echo "*** begin install OpenStack SDK Python client"
+pip3 install openstacksdk
+pip3 install --upgrade openstacksdk
+
+# install OpenStack CLI
+echo "*** begin install OpenStack CLI"
+pip3 install python-openstackclient
+pip3 install --upgrade python-openstackclient
+
+pip3 install --upgrade python-keystoneclient
+pip3 install --upgrade python-neutronclient
+pip3 install --upgrade python-novaclient
+pip3 install --upgrade python-glanceclient
+pip3 install --upgrade python-cinderclient
+
+# install OpenStack Heat (may not be installed by default), may be useful for VNF installation
+#apt install python3-heatclient
+echo "*** begin install OpenStack Heat"
+pip3 install --upgrade python-heatclient
+
+# package verification printouts
+echo "*** begin package verification printouts"
+pip3 list
+pip3 show openstacksdk
+pip3 check
+
+
+
+# step 3: add more resources to OpenStack instance
+
+# now that OpenStack CLI is installed, finish Fuel/MCP installation:
+# take extra resources indicated in os-nosdn-nofeature-noha.yaml into account as quotas in the OpenStack instance
+# (e.g. 2 compute nodes with 32 vCPUs and 192G RAM each => 64 cores and 384G=393,216M RAM)
+# enter environment variables hard-coded here, since always the same for Fuel/MCP; there could be better ways to do this :)
+
+export OS_AUTH_URL=http://10.16.0.107:5000/v3
+export OS_PROJECT_NAME="admin"
+export OS_USER_DOMAIN_NAME="Default"
+export OS_PROJECT_DOMAIN_ID="default"
+unset OS_TENANT_ID
+unset OS_TENANT_NAME
+export OS_USERNAME="admin"
+export OS_PASSWORD="opnfv_secret"
+export OS_REGION_NAME="RegionOne"
+export OS_INTERFACE=public
+export OS_IDENTITY_API_VERSION=3
+
+# at this point, openstack CLI commands should work
+echo "*** finish install OPNFV Fuel/MCP"
+openstack quota set --cores 64 admin
+openstack quota set --ram 393216 admin
+
+
+
+# step 4: download Auto repository
+
+# install OPNFV Auto
+# prepare install directory
+echo "*** begin install OPNFV Auto"
+mkdir -p /opt/opnfv-Auto
+cd /opt/opnfv-Auto
+# get Auto repository from Gerrit
+git clone https://gerrit.opnfv.org/gerrit/auto
+# cd in new auto repository, which contains directories: lib, setup, ci, etc.
+cd auto
+
+
+
+# step 5: run Auto python script to populate OpenStack instance with objects expected by ONAP
+
+# download images used by script, unless downloading images from URL works from the script
+echo "*** begin download images"
+cd setup/VIMs/OpenStack
+mkdir images
+cd images
+#CirrOS
+curl -O http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img
+curl -O http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-arm-disk.img
+curl -O http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-aarch64-disk.img
+# Ubuntu 16.04 LTS (Xenial Xerus)
+curl -O https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+curl -O https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-arm64-disk1.img
+# Ubuntu 14.04.5 LTS (Trusty Tahr)
+curl -O http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img
+curl -O http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-arm64-disk1.img
+
+# launch script to populate the OpenStack instance
+echo "*** begin populate OpenStack instance with ONAP objects"
+cd ..
+python3 auto_script_config_openstack_for_onap.py
+
+echo "*** end AUTO install: OPNFV Fuel/MCP"
+