diff options
Diffstat (limited to 'ci')
-rwxr-xr-x | ci/build-auto.sh | 158 | ||||
-rwxr-xr-x | ci/deploy-onap-fuel.sh | 238 | ||||
-rwxr-xr-x | ci/deploy-onap-kubespray.sh | 339 | ||||
-rwxr-xr-x | ci/deploy-onap.sh | 376 | ||||
-rw-r--r-- | ci/deploy-opnfv-apex-centos.sh | 209 | ||||
-rw-r--r-- | ci/deploy-opnfv-compass-ubuntu.sh | 201 | ||||
-rw-r--r-- | ci/deploy-opnfv-daisy-centos.sh | 179 | ||||
-rw-r--r-- | ci/deploy-opnfv-fuel-ubuntu.sh | 199 | ||||
-rwxr-xr-x | ci/plot-results.sh | 101 |
9 files changed, 1962 insertions, 38 deletions
diff --git a/ci/build-auto.sh b/ci/build-auto.sh index 96588b9..00b67b1 100755 --- a/ci/build-auto.sh +++ b/ci/build-auto.sh @@ -20,10 +20,12 @@ # Usage: # build-auto.sh job_type -# where job_type is one of "verify", "merge", "daily" +# +# Parameters: +# job_type - is one of "verify", "merge" or "daily" # # Example: -# ./ci/build-auto.sh daily +# ./ci/build-auto.sh verify # # exit codes @@ -31,11 +33,21 @@ EXIT=0 EXIT_UNKNOWN_JOB_TYPE=1 EXIT_LINT_FAILED=2 +EXIT_FUEL_FAILED=10 # # configuration # AUTOENV_DIR="$HOME/autoenv" +TIMESTAMP=$(date +%Y%m%d_%H%M%S) +LOG_DIR=$HOME/auto_ci_daily_logs +WORKSPACE=${WORKSPACE:-$PWD} + +# POD and SCENARIO details used during OPNFV deployment performed by daily job +NODE_NAME=${NODE_NAME:-"ericsson-virtual1"} +POD_LAB=$(echo $NODE_NAME | cut -d '-' -f1) +POD_NAME=$(echo $NODE_NAME | cut -d '-' -f2) +DEPLOY_SCENARIO=${DEPLOY_SCENARIO:-"os-nosdn-onap-noha"} # # functions @@ -47,6 +59,42 @@ function execute_auto_lint_check() { fi } +# check and install required packages +function dependencies_check() { + . /etc/os-release + if [ $ID == "ubuntu" ] ; then + echo "Dependencies check" + echo "==================" + # install system packages + for PACKAGE in "virtualenv" "pylint" "yamllint" "gnuplot" ; do + if dpkg -s $PACKAGE &> /dev/null ; then + printf " %-70s %-6s\n" $PACKAGE "OK" + else + printf " %-70s %-6s\n" $PACKAGE "missing" + sudo apt-get install -y $PACKAGE + fi + done + echo + fi +} + +# create virtualenv if needed and enable it +function virtualenv_prepare() { + if [ ! -e $AUTOENV_DIR ] ; then + echo "Create AUTO environment" + echo "=======================" + virtualenv "$AUTOENV_DIR" + echo + fi + + # activate and update virtualenv + echo "Update AUTO environment" + echo "=======================" + source "$AUTOENV_DIR"/bin/activate + pip install -r ./requirements.txt + echo +} + # # main # @@ -55,20 +103,8 @@ echo # enter workspace dir cd $WORKSPACE -# create virtualenv if needed -if [ ! -e $AUTOENV_DIR ] ; then - echo "Create AUTO environment" - echo "=======================" - virtualenv "$AUTOENV_DIR" - echo -fi - -# activate and update virtualenv -echo "Update AUTO environment" -echo "=======================" -source "$AUTOENV_DIR"/bin/activate -pip install -r ./requirements.txt -echo +# check if required packages are installed +dependencies_check # execute job based on passed parameter case $1 in @@ -77,15 +113,9 @@ case $1 in echo "AUTO verify job" echo "===============" - # Example of verify job body. Functions can call - # external scripts, etc. - + virtualenv_prepare execute_auto_lint_check #execute_auto_doc_check - #install_opnfv MCP - #install_onap - #execute_sanity_check - #execute_tests $1 # Everything went well, so report SUCCESS to Jenkins exit $EXIT @@ -95,15 +125,9 @@ case $1 in echo "AUTO merge job" echo "==============" - # Example of merge job body. Functions can call - # external scripts, etc. - + virtualenv_prepare execute_auto_lint_check #execute_auto_doc_check - #install_opnfv MCP - #install_onap - #execute_sanity_check - #execute_tests $1 # propagate result to the Jenkins job exit $EXIT @@ -112,15 +136,73 @@ case $1 in echo "==============" echo "AUTO daily job" echo "==============" + echo + echo "Deployment details:" + echo " LAB: $POD_LAB" + echo " POD: $POD_NAME" + echo " Scenario: $DEPLOY_SCENARIO" + echo " WORKSPACE: $WORKSPACE" + echo - # Example of daily job body. Functions can call - # external scripts, etc. - - #install_opnfv MCP - #install_onap - #execute_sanity_check - #execute_tests $1 - #push_results_and_logs_to_artifactory + # create log dir if needed + if [ ! -e $LOG_DIR ] ; then + echo "Create AUTO LOG DIRECTORY" + echo "=========================" + echo "mkdir $LOG_DIR" + mkdir $LOG_DIR + echo + fi + + echo "Installation of OPNFV and ONAP" + echo "==============================" + # clone fuel and execute installation of ONAP scenario to install + # ONAP on top of OPNFV deployment + [ -e fuel ] && rm -rf fuel + git clone https://gerrit.opnfv.org/gerrit/fuel + cd fuel + # Fuel master branch is currently broken; thus use stable/gambia + # branch with recent master version of ONAP scenario + git checkout stable/gambia + git checkout origin/master mcp/config/states/onap \ + mcp/config/scenario/os-nosdn-onap-ha.yaml \ + mcp/config/scenario/os-nosdn-onap-noha.yaml + # use larger disk size for virtual nodes + sed -i -re 's/(qemu-img resize.*)100G/\1400G/' mcp/scripts/lib_jump_deploy.sh + + LOG_FILE="$LOG_DIR/deploy_${TIMESTAMP}.log" + echo "ci/deploy.sh -l $POD_LAB -p $POD_NAME -s $DEPLOY_SCENARIO |&\ + tee $LOG_FILE" + DEPLOY_START=$(date +%Y%m%d_%H%M%S) + ci/deploy.sh -l $POD_LAB -p $POD_NAME -s $DEPLOY_SCENARIO |&\ + tee $LOG_FILE + + # report failure if fuel failed to install OPNFV or ONAP + [ $? -ne 0 ] && exit $EXIT_FUEL_FAILED + + # process report + DEPLOY_END=$(date +%Y%m%d_%H%M%S) + REPORT_FILE="$LOG_DIR/deploy_report_${TIMESTAMP}.txt" + CSV_SUMMARY="$LOG_DIR/deploy_summary_${TIMESTAMP}.csv" + MARKER="ONAP INSTALLATION REPORT" + # cut report from installation log file + sed -n "/^$MARKER/,/^END OF $MARKER/p;/^END OF $MARKER/q" \ + $LOG_FILE > $REPORT_FILE + PODS_TOTAL=$(grep "PODs Total" $REPORT_FILE | sed -e 's/[^0-9]//g') + PODS_FAILED=$(grep "PODs Failed" $REPORT_FILE | sed -e 's/[^0-9]//g') + TC_SUM=$(grep "tests total" $REPORT_FILE | tail -n1 |\ + sed -e 's/[^0-9,]//g') + + echo "Start Time,End Time,Total PODs,Failed PODs,Total Tests,Passed"\ + "Tests,Failed Tests" >> $CSV_SUMMARY + echo "$DEPLOY_START,$DEPLOY_END,$PODS_TOTAL,$PODS_FAILED,$TC_SUM"\ + >> $CSV_SUMMARY + + # plot graphs from result summaries and print txt versions if possible + cd $WORKSPACE + ci/plot-results.sh + for GRAPH in $(ls -1 graph*txt 2> /dev/null) ; do + cat $GRAPH + done # propagate result to the Jenkins job exit $EXIT diff --git a/ci/deploy-onap-fuel.sh b/ci/deploy-onap-fuel.sh new file mode 100755 index 0000000..c120e9c --- /dev/null +++ b/ci/deploy-onap-fuel.sh @@ -0,0 +1,238 @@ +#!/bin/bash +# +# Copyright 2018 Tieto +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Script for automated deployment of ONAP on top of OPNFV Fuel/MCP installation +# In the future both OOM and heat install methods should be supported. +# At the beginning OOM will be used for simplification. + +# TODO: +# Configure ONAP to be able to control underlying OpenStack + +# Configuration to be passed to ci/deploy-onap.sh +export SSH_USER="ubuntu" +export SSH_IDENTITY="/root/.ssh/onap_key" + +# detect hypervisor details to be used as default values if needed +OS_HYPER_CMD="openstack hypervisor list --long" +echo -e "\nOpenStack Hepervisor list\n" +$OS_HYPER_CMD + +DEFAULT_CMP_COUNT=$($OS_HYPER_CMD -f value -c "ID" | wc -l) +DEFAULT_CMP_MIN_MEM=$($OS_HYPER_CMD -f value -c "Memory MB" | sort | head -n1) +DEFAULT_CMP_MIN_CPUS=$($OS_HYPER_CMD -f value -c "vCPUs" | sort | head -n1) + +# Use default values if compute configuration was not set by FUEL installer +AUTO_INSTALL_DIR=${AUTO_INSTALL_DIR:-"."} +AUTO_IMAGE_DIR="${AUTO_INSTALL_DIR}/images" +CMP_COUNT=${CMP_COUNT:-$DEFAULT_CMP_COUNT} # number of compute nodes +CMP_MIN_MEM=${CMP_MIN_MEM:-$DEFAULT_CMP_MIN_MEM} # MB RAM of the weakest compute node +CMP_MIN_CPUS=${CMP_MIN_CPUS:-$DEFAULT_CMP_MIN_CPUS} # CPU count of the weakest compute node +# size of storage for instances +CMP_STORAGE_TOTAL=${CMP_STORAGE_TOTAL:-$((80*$CMP_COUNT))} +VM_COUNT=${VM_COUNT:-6} # number of VMs available for k8s cluster + +# +# Functions +# +# function minimum accepts two numbers and prints smaller one +function minimum(){ + echo $(($1<$2?$1:$2)) +} + +# function remove_openstack_setup removes OS configuration performed by this +# script; So previously created configuration and deployed VMs will be +# removed before new ONAP deployment will be started. +function remove_openstack_setup(){ + # flavor is created 1st but removed last, so... + if ( ! openstack flavor list | grep 'onap.large' &> /dev/null ) ; then + #...no flavor means nothing to be removed + return + fi + echo -e "\nRemoving ONAP specific OpenStack configuration" + for a in $(openstack server list --name onap_vm -f value -c ID) ; do + openstack server delete $a + done + RULES=$(openstack security group rule list onap_security_group -f value -c ID) + for a in $RULES; do + openstack security group rule delete $a + done + openstack security group delete onap_security_group + for a in $(openstack floating ip list -f value -c ID) ; do + openstack floating ip delete $a + done + PORTS=$(openstack port list --network onap_private_network -f value -c ID) + for a in $PORTS ; do + openstack router remove port onap_router $a + done + PORTS=$(openstack port list --network onap_private_network -f value -c ID) + for a in $PORTS ; do + openstack port delete $a + done + openstack router delete onap_router + openstack subnet delete onap_private_subnet + openstack network delete onap_private_network + openstack image delete xenial + rm -rf $AUTO_IMAGE_DIR + openstack keypair delete onap_key + rm $SSH_IDENTITY + openstack flavor delete onap.large + echo +} + +# +# Script Main +# + +# remove OpenStack configuration if it exists +remove_openstack_setup + +echo -e "\nOpenStack configuration\n" + +# Calculate VM resources, so that flavor can be created +echo "Configuration of compute node:" +echo "Number of computes: CMP_COUNT=$CMP_COUNT" +echo "Minimal RAM: CMP_MIN_MEM=$CMP_MIN_MEM" +echo "Minimal CPUs count: CMP_MIN_CPUS=$CMP_MIN_CPUS" +echo "Storage for instances: CMP_STORAGE_TOTAL=$CMP_STORAGE_TOTAL" +echo "Number of VMs: VM_COUNT=$VM_COUNT" +# Calculate VM parameters; there will be up to 1 VM per Compute node +# to maximize resources available for VMs +PER=85 # % of compute resources will be consumed by VMs +VM_DISK_MAX=100 # GB - max VM disk size +VM_MEM_MAX=81920 # MB - max VM RAM size +VM_CPUS_MAX=56 # max count of VM CPUs +VM_MEM=$(minimum $(($CMP_MIN_MEM*$CMP_COUNT*$PER/100/$VM_COUNT)) $VM_MEM_MAX) +VM_CPUS=$(minimum $(($CMP_MIN_CPUS*$CMP_COUNT*$PER/100/$VM_COUNT)) $VM_CPUS_MAX) +VM_DISK=$(minimum $(($CMP_STORAGE_TOTAL*$PER/100/$VM_COUNT)) $VM_DISK_MAX) + +echo -e "\nFlavor configuration:" +echo "CPUs : $VM_CPUS" +echo "RAM [MB] : $VM_MEM" +echo "DISK [GB] : $VM_DISK" + +# Create onap flavor +openstack flavor create --ram $VM_MEM --vcpus $VM_CPUS --disk $VM_DISK \ + onap.large + +# Generate a keypair and store private key +openstack keypair create onap_key > $SSH_IDENTITY +chmod 600 $SSH_IDENTITY + +# Download and import VM image(s) +mkdir $AUTO_IMAGE_DIR +wget -P $AUTO_IMAGE_DIR https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img +openstack image create --disk-format qcow2 --container-format bare --public \ + --file $AUTO_IMAGE_DIR/xenial-server-cloudimg-amd64-disk1.img xenial + +# Modify quotas (add 10% to required VM resources) +openstack quota set --ram $(($VM_MEM*$VM_COUNT*110/100)) admin +openstack quota set --cores $(($VM_CPUS*$VM_COUNT*110/100)) admin + +# Configure networking with DNS for access to the internet +openstack network create onap_private_network --provider-network-type vxlan +openstack subnet create onap_private_subnet --network onap_private_network \ + --subnet-range 192.168.33.0/24 --ip-version 4 --dhcp --dns-nameserver "8.8.8.8" +openstack router create onap_router +openstack router add subnet onap_router onap_private_subnet +openstack router set onap_router --external-gateway floating_net + +# Allow selected ports and protocols +openstack security group create onap_security_group +openstack security group rule create --protocol icmp onap_security_group +openstack security group rule create --proto tcp \ + --dst-port 22:22 onap_security_group +openstack security group rule create --proto tcp \ + --dst-port 8080:8080 onap_security_group # rancher +openstack security group rule create --proto tcp \ + --dst-port 8078:8078 onap_security_group # horizon +openstack security group rule create --proto tcp \ + --dst-port 8879:8879 onap_security_group # helm +openstack security group rule create --proto tcp \ + --dst-port 80:80 onap_security_group +openstack security group rule create --proto tcp \ + --dst-port 443:443 onap_security_group + +# Allow communication between k8s cluster nodes +PUBLIC_NET=`openstack subnet list --name floating_subnet -f value -c Subnet` +openstack security group rule create --remote-ip $PUBLIC_NET --proto tcp \ + --dst-port 1:65535 onap_security_group +openstack security group rule create --remote-ip $PUBLIC_NET --proto udp \ + --dst-port 1:65535 onap_security_group + +# Get list of hypervisors and their zone +HOST_ZONE=$(openstack host list -f value | grep compute | head -n1 | cut -d' ' -f3) +HOST_NAME=($(openstack host list -f value | grep compute | cut -d' ' -f1)) +HOST_COUNT=$(echo ${HOST_NAME[@]} | wc -w) +# Create VMs and assign floating IPs to them +VM_ITER=1 +HOST_ITER=0 +while [ $VM_ITER -le $VM_COUNT ] ; do + openstack floating ip create floating_net + VM_NAME[$VM_ITER]="onap_vm${VM_ITER}" + VM_IP[$VM_ITER]=$(openstack floating ip list -c "Floating IP Address" \ + -c "Port" -f value | grep None | cut -f1 -d " " | head -n1) + # dispatch new VMs among compute nodes in round robin fashion + openstack server create --flavor onap.large --image xenial \ + --nic net-id=onap_private_network --security-group onap_security_group \ + --key-name onap_key ${VM_NAME[$VM_ITER]} \ + --availability-zone ${HOST_ZONE}:${HOST_NAME[$HOST_ITER]} + sleep 10 # wait for VM init before floating IP can be assigned + openstack server add floating ip ${VM_NAME[$VM_ITER]} ${VM_IP[$VM_ITER]} + echo "Waiting for ${VM_NAME[$VM_ITER]} to start up for 1m at $(date)" + sleep 1m + VM_ITER=$(($VM_ITER+1)) + HOST_ITER=$(($HOST_ITER+1)) + [ $HOST_ITER -ge $HOST_COUNT ] && HOST_ITER=0 +done + +openstack server list -c ID -c Name -c Status -c Networks -c Host --long + +# check that SSH to all VMs is working +SSH_OPTIONS="-i $SSH_IDENTITY -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" +COUNTER=1 +while [ $COUNTER -le 10 ] ; do + VM_UP=0 + VM_ITER=1 + while [ $VM_ITER -le $VM_COUNT ] ; do + if ssh $SSH_OPTIONS -l $SSH_USER ${VM_IP[$VM_ITER]} exit &>/dev/null ; then + VM_UP=$(($VM_UP+1)) + echo "${VM_NAME[$VM_ITER]} ${VM_IP[$VM_ITER]}: up" + else + echo "${VM_NAME[$VM_ITER]} ${VM_IP[$VM_ITER]}: down" + fi + VM_ITER=$(($VM_ITER+1)) + done + COUNTER=$(($COUNTER+1)) + if [ $VM_UP -eq $VM_COUNT ] ; then + break + fi + echo "Waiting for VMs to be accessible via ssh for 2m at $(date)" + sleep 2m +done + +openstack server list -c ID -c Name -c Status -c Networks -c Host --long + +if [ $VM_UP -ne $VM_COUNT ] ; then + echo "Only $VM_UP from $VM_COUNT VMs are accessible via ssh. Installation will be terminated." + exit 1 +fi + +# Start ONAP installation +DATE_START=$(date) +echo -e "\nONAP Installation Started at $DATE_START\n" +$AUTO_INSTALL_DIR/ci/deploy-onap.sh ${VM_IP[@]} +echo -e "\nONAP Installation Started at $DATE_START" +echo -e "ONAP Installation Finished at $(date)\n" diff --git a/ci/deploy-onap-kubespray.sh b/ci/deploy-onap-kubespray.sh new file mode 100755 index 0000000..a797388 --- /dev/null +++ b/ci/deploy-onap-kubespray.sh @@ -0,0 +1,339 @@ +#!/bin/bash +# +# Copyright 2018-2019 Tieto +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Script for automated deployment of ONAP with Kubernetes at OPNFV LAAS +# environment. +# + +# +# Configuration +# +export LC_ALL=C +export LANG=C + +MASTER=$1 +SERVERS=$* +shift +SLAVES=$* + +ONAP_BRANCH=${ONAP_BRANCH:-'casablanca'} +KUBESPRAY_COMMIT="bbfd2dc2bd088efc63747d903edd41fe692531d8" +NAMESPACE='onap' +SSH_USER=${SSH_USER:-"opnfv"} +SSH_OPTIONS='-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no' +# use identity file from the environment SSH_IDENTITY +if [ -n "$SSH_IDENTITY" ] ; then + SSH_OPTIONS="-i $SSH_IDENTITY $SSH_OPTIONS" + ANSIBLE_IDENTITY="--private-key=$SSH_IDENTITY" +fi + +KUBESPRAY_OPTIONS='-e "kubelet_max_pods=250"' + +TMP_POD_LIST='/tmp/onap_pod_list.txt' + +case "$ONAP_BRANCH" in + "beijing") + HELM_VERSION=2.8.2 + ;; + "casablanca") + HELM_VERSION=2.9.1 + ;; + *) + HELM_VERSION=2.9.1 + ;; +esac + +ONAP_MINIMAL="aai dmaap portal robot sdc sdnc so vid" +# by defalult install minimal ONAP installation +# empty list of ONAP_COMPONENT means full ONAP installation +ONAP_COMPONENT=${ONAP_COMPONENT:-$ONAP_MINIMAL} + +# +# Functions +# +function usage() { + echo "usage" + cat <<EOL +Usage: + $0 <MASTER> [ <SLAVE1> <SLAVE2> ... ] + + where <MASTER> and <SLAVEx> are IP addresses of servers to be used + for ONAP installation. + + Script behavior is affected by following environment variables: + + ONAP_COMPONENT - a list of ONAP components to be installed, empty list + will trigger a full ONAP installation + VALUE: "$ONAP_COMPONENT" + + ONAP_BRANCH - version of ONAP to be installed (OOM branch version) + VALUE: "$ONAP_BRANCH" + + NAMESPACE - name of ONAP namespace in kubernetes cluster + VALUE: "$NAMESPACE" + + SSH_USER - user name to be used to access <MASTER> and <SLAVEx> + servers + VALUE: "$SSH_USER" + + SSH_IDENTITY - (optional) ssh identity file to be used to access + <MASTER> and <SLAVEx> servers as a SSH_USER + VALUE: "$SSH_IDENTITY" + +NOTE: Following must be assured for <MASTER> and <SLAVEx> servers before + $0 execution: + 1) SSH_USER must be able to access servers via ssh without a password + 2) SSH_USER must have a password-less sudo access +EOL +} + +# Check if server IPs of kubernetes nodes are configured at given server. +# If it is not the case, then kubespray invetory file must be updated. +function check_server_ips() { + for SERVER_IP in $(grep 'ip=' $1 | sed -re 's/^.*ip=([0-9\.]+).*$/\1/') ; do + IP_OK="false" + for IP in $(ssh $SSH_OPTIONS $SSH_USER@$SERVER_IP "ip a | grep -Ew 'inet' | sed -re 's/^ *inet ([0-9\.]+).*$/\1/g'") ; do + if [ "$IP" == "$SERVER_IP" ] ; then + IP_OK="true" + fi + done + # access IP (e.g. OpenStack floating IP) is not server local address, so update invetory + if [ $IP_OK == "false" ] ; then + # get server default GW dev + DEV=$(ssh $SSH_OPTIONS $SSH_USER@$SERVER_IP "ip route ls" | grep ^default | sed -re 's/^.*dev (.*)$/\1/') + LOCAL_IP=$(ssh $SSH_OPTIONS $SSH_USER@$SERVER_IP "ip -f inet addr show $DEV" | grep -Ew 'inet' | sed -re 's/^ *inet ([0-9\.]+).*$/\1/g') + if [ "$LOCAL_IP" == "" ] ; then + echo "Can't read local IP for server with IP $SERVER_IP" + exit 1 + fi + sed -i'' -e "s/ip=$SERVER_IP/ip=$LOCAL_IP access_ip=$SERVER_IP/" $1 + fi + done +} + +# sanity check +if [ "$SERVERS" == "" ] ; then + usage + exit 1 +fi + +# +# Installation +# + +# detect CPU architecture to download correct helm binary +CPU_ARCH=$(ssh $SSH_OPTIONS $SSH_USER@"$MASTER" "uname -p") +case "$CPU_ARCH" in + "x86_64") + ARCH="amd64" + ;; + "aarch64") + ARCH="arm64" + ;; + *) + echo "Unsupported CPU architecture '$CPU_ARCH' was detected." + exit 1 +esac + +# print configuration +cat << EOL +list of configuration options: + SERVERS="$SERVERS" + ONAP_COMPONENT="$ONAP_COMPONENT" + ONAP_BRANCH="$ONAP_BRANCH" + NAMESPACE="$NAMESPACE" + SSH_USER="$SSH_USER" + SSH_IDENTITY="$SSH_IDENTITY" + ARCH="$ARCH" + +EOL + +# install K8S cluster by kubespray +sudo apt-get -y update +sudo apt-get -y install git ansible python-jinja2 python3-pip libffi-dev libssl-dev +git clone https://github.com/kubernetes-incubator/kubespray.git +cd kubespray +git checkout $KUBESPRAY_COMMIT +pip3 install -r requirements.txt +export CONFIG_FILE=inventory/auto_hosts.ini +rm $CONFIG_FILE +python3 contrib/inventory_builder/inventory.py $SERVERS +check_server_ips $CONFIG_FILE +cat $CONFIG_FILE +if ( ! ansible-playbook -i $CONFIG_FILE $KUBESPRAY_OPTIONS -b -u $SSH_USER $ANSIBLE_IDENTITY cluster.yml ) ; then + echo "Kubespray installation has failed at $(date)" + exit 1 +fi + +# use standalone K8S master if there are enough VMs available for the K8S cluster +SERVERS_COUNT=$(echo $SERVERS | wc -w) +if [ $SERVERS_COUNT -gt 2 ] ; then + K8S_NODES=$SLAVES +else + K8S_NODES=$SERVERS +fi + +echo "INSTALLATION TOPOLOGY:" +echo "Kubernetes Master: $MASTER" +echo "Kubernetes Nodes: $K8S_NODES" +echo +echo "CONFIGURING NFS ON SLAVES" +echo "$SLAVES" + +for SLAVE in $SLAVES; +do +ssh $SSH_OPTIONS $SSH_USER@"$SLAVE" "bash -s" <<CONFIGURENFS & + sudo su + apt-get install nfs-common -y + mkdir /dockerdata-nfs + chmod 777 /dockerdata-nfs + echo "$MASTER:/dockerdata-nfs /dockerdata-nfs nfs auto 0 0" >> /etc/fstab + mount -a + mount | grep dockerdata-nfs +CONFIGURENFS +done +wait + +echo "DEPLOYING OOM ON MASTER" +echo "$MASTER" + +ssh $SSH_OPTIONS $SSH_USER@"$MASTER" "bash -s" <<OOMDEPLOY +sudo su +echo "create namespace '$NAMESPACE'" +cat <<EOF | kubectl create -f - +{ + "kind": "Namespace", + "apiVersion": "v1", + "metadata": { + "name": "$NAMESPACE", + "labels": { + "name": "$NAMESPACE" + } + } +} +EOF +kubectl get namespaces --show-labels +kubectl -n kube-system create sa tiller +kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller +rm -rf oom +echo "pulling new oom" +git clone -b $ONAP_BRANCH http://gerrit.onap.org/r/oom + +# NFS FIX for aaf-locate +sed -i '/persistence:/s/^#//' ./oom/kubernetes/aaf/charts/aaf-locate/values.yaml +sed -i '/mountPath: \/dockerdata/c\ mountPath: \/dockerdata-nfs'\ + ./oom/kubernetes/aaf/charts/aaf-locate/values.yaml + +echo "Pre-pulling docker images at \$(date)" +wget https://jira.onap.org/secure/attachment/11261/prepull_docker.sh +chmod 777 prepull_docker.sh +./prepull_docker.sh +echo "starting onap pods" +cd oom/kubernetes/ + +# Enable selected ONAP components +if [ -n "$ONAP_COMPONENT" ] ; then + # disable all components and enable only selected in next loop + sed -i '/^.*:$/!b;n;s/enabled: *true/enabled: false/' onap/values.yaml + echo -n "Enable following ONAP components:" + for COMPONENT in $ONAP_COMPONENT; do + echo -n " \$COMPONENT" + sed -i '/^'\${COMPONENT}':$/!b;n;s/enabled: *false/enabled: true/' onap/values.yaml + done + echo +else + echo "All ONAP components will be installed" +fi + +wget http://storage.googleapis.com/kubernetes-helm\ +/helm-v${HELM_VERSION}-linux-${ARCH}.tar.gz +tar -zxvf helm-v${HELM_VERSION}-linux-${ARCH}.tar.gz +mv linux-${ARCH}/helm /usr/local/bin/helm +helm init --upgrade --service-account tiller +# run helm server on the background and detached from current shell +nohup helm serve 0<&- &>/dev/null & +echo "Waiting for helm setup for 5 min at \$(date)" +sleep 5m +helm version +helm repo add local http://127.0.0.1:8879 +helm repo list +make all +if ( ! helm install local/onap -n dev --namespace $NAMESPACE) ; then + echo "ONAP installation has failed at \$(date)" + exit 1 +fi + +cd ../../ + +echo "Waiting for ONAP pods to be up \$(date)" +echo "Ignore failure of sdnc-ansible-server, see SDNC-443" +function get_onap_pods() { + kubectl get pods --namespace $NAMESPACE > $TMP_POD_LIST + return \$(cat $TMP_POD_LIST | wc -l) +} +FAILED_PODS_LIMIT=1 # maximal number of failed ONAP PODs +ALL_PODS_LIMIT=20 # minimum ONAP PODs to be up & running +WAIT_PERIOD=60 # wait period in seconds +MAX_WAIT_TIME=\$((3600*3)) # max wait time in seconds +MAX_WAIT_PERIODS=\$((\$MAX_WAIT_TIME/\$WAIT_PERIOD)) +COUNTER=0 +get_onap_pods +ALL_PODS=\$? +PENDING=\$(grep -E '0/|1/2' $TMP_POD_LIST | wc -l) +while [ \$PENDING -gt \$FAILED_PODS_LIMIT -o \$ALL_PODS -lt \$ALL_PODS_LIMIT ]; do + # print header every 20th line + if [ \$COUNTER -eq \$((\$COUNTER/20*20)) ] ; then + printf "%-3s %-29s %-3s/%s\n" "Nr." "Datetime of check" "Err" "Total PODs" + fi + COUNTER=\$((\$COUNTER+1)) + printf "%3s %-29s %3s/%-3s\n" \$COUNTER "\$(date)" \$PENDING \$ALL_PODS + sleep \$WAIT_PERIOD + if [ "\$MAX_WAIT_PERIODS" -eq \$COUNTER ]; then + FAILED_PODS_LIMIT=800 + ALL_PODS_LIMIT=0 + fi + get_onap_pods + ALL_PODS=\$? + PENDING=\$(grep -E '0/|1/2' $TMP_POD_LIST | wc -l) +done + +get_onap_pods +cp $TMP_POD_LIST ~/onap_all_pods.txt +echo +echo "========================" +echo "ONAP INSTALLATION REPORT" +echo "========================" +echo +echo "List of Failed PODs" +echo "-------------------" +grep -E '0/|1/2' $TMP_POD_LIST | tee ~/onap_failed_pods.txt +echo +echo "Summary:" +echo "--------" +echo " PODs Failed: \$(cat ~/onap_failed_pods.txt | wc -l)" +echo " PODs Total: \$(cat ~/onap_all_pods.txt | wc -l)" +echo +echo "ONAP health TC results" +echo "----------------------" +cd oom/kubernetes/robot +./ete-k8s.sh $NAMESPACE health | tee ~/onap_health.txt +echo "===============================" +echo "END OF ONAP INSTALLATION REPORT" +echo "===============================" +OOMDEPLOY + +echo "Finished install, ruturned from Master at $(date)" +exit 0 diff --git a/ci/deploy-onap.sh b/ci/deploy-onap.sh new file mode 100755 index 0000000..c34eb56 --- /dev/null +++ b/ci/deploy-onap.sh @@ -0,0 +1,376 @@ +#!/bin/bash +# +# Copyright 2018 Tieto +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Script for automated deployment of ONAP with Kubernetes at OPNFV LAAS +# environment. +# +# Usage: +# onap-deploy.sh <MASTER> <SLAVE1> <SLAVE2> +# +# where <MASTER> and <SLAVE_IPx> are IP addresses of servers to be used +# for ONAP installation. +# +# NOTE: Following must be assured for all MASTER and SLAVE servers before +# onap-deploy.sh execution: +# 1) ssh access without a password +# 2) an user account with password-less sudo access must be +# available - default user is "opnfv" + +# +# Configuration +# +DOCKER_VERSION=17.03 +RANCHER_VERSION=1.6.14 +RANCHER_CLI_VER=0.6.11 +KUBECTL_VERSION=1.8.10 +HELM_VERSION=2.8.2 + +MASTER=$1 +SERVERS=$* +shift +SLAVES=$* + +BRANCH='beijing' +ENVIRON='onap' + +SSH_USER=${SSH_USER:-"opnfv"} +SSH_OPTIONS='-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no' +# by defalult install full ONAP installation +ONAP_COMPONENT_DISABLE=${ONAP_COMPONENT_DISABLE:-""} +# example of minimal ONAP installation +#ONAP_COMPONENT_DISABLE="clamp cli consul dcaegen2 esr log msb multicloud nbi oof policy uui vfc vnfsdk" + +# use identity file from the environment SSH_IDENTITY +if [ -n "$SSH_IDENTITY" ] ; then + SSH_OPTIONS="-i $SSH_IDENTITY $SSH_OPTIONS" +fi + +# +# Installation +# + +# use standalone K8S master if there are enough VMs available for the K8S cluster +SERVERS_COUNT=$(echo $SERVERS | wc -w) +if [ $SERVERS_COUNT -gt 2 ] ; then + RANCHER_SLAVES=$SLAVES +else + RANCHER_SLAVES=$SERVERS +fi + +echo "INSTALLATION TOPOLOGY:" +echo "Rancher Master: $MASTER" +echo "Rancher Slaves: $RANCHER_SLAVES" +echo +echo "INSTALLING DOCKER ON ALL MACHINES" +echo "$SERVERS" + +for MACHINE in $SERVERS; +do +ssh $SSH_OPTIONS $SSH_USER@"$MACHINE" "bash -s" <<DOCKERINSTALL & + sudo -i + sysctl -w vm.max_map_count=262144 + apt-get update -y + curl https://releases.rancher.com/install-docker/$DOCKER_VERSION.sh | sh + + mkdir -p /etc/systemd/system/docker.service.d/ + echo "[Service] + ExecStart= + ExecStart=/usr/bin/dockerd -H fd:// \ + --insecure-registry=nexus3.onap.org:10001"\ + > /etc/systemd/system/docker.service.d/docker.conf + + systemctl daemon-reload + systemctl restart docker + apt-mark hold docker-ce + + for SERVER in $SERVERS; + do + echo "\$SERVER $ENVIRON\$(echo \$SERVER | cut -d. -f 4 )" >> /etc/hosts + done + + hostname $ENVIRON\$(echo $MACHINE | cut -d. -f 4 ) + + echo "DOCKER INSTALLED ON $MACHINE" +DOCKERINSTALL +done +wait + +echo "INSTALLING RANCHER ON MASTER" +echo "$MASTER" + +ssh $SSH_OPTIONS $SSH_USER@"$MASTER" "bash -s" <<RANCHERINSTALL +sudo -i +echo "INSTALL STARTS" +apt-get install -y jq make htop +echo "Waiting for 30 seconds at \$(date)" +sleep 30 + +docker login -u docker -p docker nexus3.onap.org:10001 + +docker run -d --restart=unless-stopped -p 8080:8080\ + --name rancher_server rancher/server:v$RANCHER_VERSION +curl -LO https://storage.googleapis.com/kubernetes-release/\ +release/v$KUBECTL_VERSION/bin/linux/amd64/kubectl +chmod +x ./kubectl +mv ./kubectl /usr/local/bin/kubectl +mkdir ~/.kube +wget http://storage.googleapis.com/kubernetes-helm\ +/helm-v${HELM_VERSION}-linux-amd64.tar.gz +tar -zxvf helm-v${HELM_VERSION}-linux-amd64.tar.gz +mv linux-amd64/helm /usr/local/bin/helm + +echo "Installing nfs server" +# changed from nfs_share to dockerdata-nfs +apt-get install nfs-kernel-server -y + +mkdir -p /dockerdata-nfs +chmod 777 /dockerdata-nfs +echo "/dockerdata-nfs *(rw,no_root_squash,no_subtree_check)">>/etc/exports +service nfs-kernel-server restart + +echo "Waiting 10 minutes for Rancher to setup at \$(date)" +sleep 10m +echo "Installing RANCHER CLI, KUBERNETES ENV on RANCHER" +wget https://github.com/rancher/cli/releases/download/v${RANCHER_CLI_VER}-rc2\ +/rancher-linux-amd64-v${RANCHER_CLI_VER}-rc2.tar.gz +tar -zxvf rancher-linux-amd64-v${RANCHER_CLI_VER}-rc2.tar.gz +cp rancher-v${RANCHER_CLI_VER}-rc2/rancher . + +API_RESPONSE=\`curl -s 'http://127.0.0.1:8080/v2-beta/apikey'\ + -d '{"type":"apikey","accountId":"1a1","name":"autoinstall",\ + "description":"autoinstall","created":null,"kind":null,\ + "removeTime":null,"removed":null,"uuid":null}'\` +# Extract and store token +echo "API_RESPONSE: \${API_RESPONSE}" +KEY_PUBLIC=\`echo \${API_RESPONSE} | jq -r .publicValue\` +KEY_SECRET=\`echo \${API_RESPONSE} | jq -r .secretValue\` +echo "publicValue: \$KEY_PUBLIC secretValue: \$KEY_SECRET" + +export RANCHER_URL=http://${MASTER}:8080 +export RANCHER_ACCESS_KEY=\$KEY_PUBLIC +export RANCHER_SECRET_KEY=\$KEY_SECRET + +./rancher env ls +echo "Creating kubernetes environment named ${ENVIRON}" +./rancher env create -t kubernetes $ENVIRON > kube_env_id.json +PROJECT_ID=\$(<kube_env_id.json) +echo "env id: \$PROJECT_ID" + +echo "Waiting for ${ENVIRON} creation - 1 min at \$(date)" +sleep 1m + +export RANCHER_HOST_URL=http://${MASTER}:8080/v1/projects/\$PROJECT_ID +echo "you should see an additional kubernetes environment" +./rancher env ls + +REG_URL_RESPONSE=\`curl -X POST -u \$KEY_PUBLIC:\$KEY_SECRET\ + -H 'Accept: application/json'\ + -H 'ContentType: application/json'\ + -d '{"name":"$MASTER"}'\ + "http://$MASTER:8080/v1/projects/\$PROJECT_ID/registrationtokens"\` +echo "REG_URL_RESPONSE: \$REG_URL_RESPONSE" +echo "Waiting for the server to finish url configuration - 1 min at \$(date)" +sleep 1m +# see registrationUrl in +REGISTRATION_TOKENS=\`curl http://$MASTER:8080/v2-beta/registrationtokens\` +echo "REGISTRATION_TOKENS: \$REGISTRATION_TOKENS" +REGISTRATION_URL=\`echo \$REGISTRATION_TOKENS | jq -r .data[0].registrationUrl\` +REGISTRATION_DOCKER=\`echo \$REGISTRATION_TOKENS | jq -r .data[0].image\` +REGISTRATION_TOKEN=\`echo \$REGISTRATION_TOKENS | jq -r .data[0].token\` +echo "Registering host for image: \$REGISTRATION_DOCKER\ + url: \$REGISTRATION_URL registrationToken: \$REGISTRATION_TOKEN" +HOST_REG_COMMAND=\`echo \$REGISTRATION_TOKENS | jq -r .data[0].command\` + +# base64 encode the kubectl token from the auth pair +# generate this after the host is registered +KUBECTL_TOKEN=\$(echo -n 'Basic '\$(echo\ + -n "\$RANCHER_ACCESS_KEY:\$RANCHER_SECRET_KEY" | base64 -w 0) | base64 -w 0) +echo "KUBECTL_TOKEN base64 encoded: \${KUBECTL_TOKEN}" + +# add kubectl config - NOTE: the following spacing has to be "exact" +# or kubectl will not connect - with a localhost:8080 error +echo 'apiVersion: v1 +kind: Config +clusters: +- cluster: + api-version: v1 + insecure-skip-tls-verify: true + server: "https://$MASTER:8080/r/projects/'\$PROJECT_ID'/kubernetes:6443" + name: "${ENVIRON}" +contexts: +- context: + cluster: "${ENVIRON}" + user: "${ENVIRON}" + name: "${ENVIRON}" +current-context: "${ENVIRON}" +users: +- name: "${ENVIRON}" + user: + token: "'\${KUBECTL_TOKEN}'" ' > ~/.kube/config + +echo "docker run --rm --privileged\ + -v /var/run/docker.sock:/var/run/docker.sock\ + -v /var/lib/rancher:/var/lib/rancher\ + \$REGISTRATION_DOCKER\ + \$RANCHER_URL/v1/scripts/\$REGISTRATION_TOKEN"\ + > /tmp/rancher_register_host +chown $SSH_USER /tmp/rancher_register_host + +RANCHERINSTALL + +echo "REGISTER TOKEN" +HOSTREGTOKEN=$(ssh $SSH_OPTIONS $SSH_USER@"$MASTER" cat /tmp/rancher_register_host) +echo "$HOSTREGTOKEN" + +echo "REGISTERING HOSTS WITH RANCHER ENVIRONMENT '$ENVIRON'" +echo "$RANCHER_SLAVES" + +for MACHINE in $RANCHER_SLAVES; +do +ssh $SSH_OPTIONS $SSH_USER@"$MACHINE" "bash -s" <<REGISTERHOST & + sudo -i + $HOSTREGTOKEN + sleep 5 + echo "Host $MACHINE waiting for host registration 5 min at \$(date)" + sleep 5m +REGISTERHOST +done +wait + +echo "CONFIGURING NFS ON SLAVES" +echo "$SLAVES" + +for SLAVE in $SLAVES; +do +ssh $SSH_OPTIONS $SSH_USER@"$SLAVE" "bash -s" <<CONFIGURENFS & + sudo -i + apt-get install nfs-common -y + mkdir /dockerdata-nfs + chmod 777 /dockerdata-nfs + echo "$MASTER:/dockerdata-nfs /dockerdata-nfs nfs auto 0 0" >> /etc/fstab + mount -a + mount | grep dockerdata-nfs +CONFIGURENFS +done +wait + +echo "DEPLOYING OOM ON RANCHER WITH MASTER" +echo "$MASTER" +TMP_POD_LIST='/tmp/onap_pod_list.txt' + +ssh $SSH_OPTIONS $SSH_USER@"$MASTER" "bash -s" <<OOMDEPLOY +sudo -i +rm -rf oom +echo "pulling new oom" +git clone -b $BRANCH http://gerrit.onap.org/r/oom + +# NFS FIX for aaf-locate +sed -i '/persistence:/s/^#//' ./oom/kubernetes/aaf/charts/aaf-locate/values.yaml +sed -i '/mountPath: \/dockerdata/c\ mountPath: \/dockerdata-nfs'\ + ./oom/kubernetes/aaf/charts/aaf-locate/values.yaml + +echo "Pre-pulling docker images at \$(date)" +wget https://jira.onap.org/secure/attachment/11261/prepull_docker.sh +chmod 777 prepull_docker.sh +./prepull_docker.sh +echo "starting onap pods" +cd oom/kubernetes/ + +# Disable ONAP components +if [ -n "$ONAP_COMPONENT_DISABLE" ] ; then + echo -n "Disable following ONAP components:" + for COMPONENT in $ONAP_COMPONENT_DISABLE; do + echo -n " \$COMPONENT" + sed -i '/^'\${COMPONENT}':$/!b;n;s/enabled: *true/enabled: false/' onap/values.yaml + done + echo +fi + +helm init --upgrade +# run helm server on the background and detached from current shell +nohup helm serve 0<&- &>/dev/null & +echo "Waiting for helm setup for 5 min at \$(date)" +sleep 5m +helm version +helm repo add local http://127.0.0.1:8879 +helm repo list +make all +if ( ! helm install local/onap -n dev --namespace $ENVIRON) ; then + echo "ONAP installation has failed at \$(date)" + exit 1 +fi + +cd ../../ + +echo "Waiting for ONAP pods to be up \$(date)" +echo "Ignore failure of sdnc-ansible-server, see SDNC-443" +function get_onap_pods() { + kubectl get pods --namespace $ENVIRON > $TMP_POD_LIST + return \$(cat $TMP_POD_LIST | wc -l) +} +FAILED_PODS_LIMIT=1 # maximal number of failed ONAP PODs +ALL_PODS_LIMIT=20 # minimum ONAP PODs to be up & running +WAIT_PERIOD=60 # wait period in seconds +MAX_WAIT_TIME=\$((3600*3)) # max wait time in seconds +MAX_WAIT_PERIODS=\$((\$MAX_WAIT_TIME/\$WAIT_PERIOD)) +COUNTER=0 +get_onap_pods +ALL_PODS=\$? +PENDING=\$(grep -E '0/|1/2' $TMP_POD_LIST | wc -l) +while [ \$PENDING -gt \$FAILED_PODS_LIMIT -o \$ALL_PODS -lt \$ALL_PODS_LIMIT ]; do + # print header every 20th line + if [ \$COUNTER -eq \$((\$COUNTER/20*20)) ] ; then + printf "%-3s %-29s %-3s/%s\n" "Nr." "Datetime of check" "Err" "Total PODs" + fi + COUNTER=\$((\$COUNTER+1)) + printf "%3s %-29s %3s/%-3s\n" \$COUNTER "\$(date)" \$PENDING \$ALL_PODS + sleep \$WAIT_PERIOD + if [ "\$MAX_WAIT_PERIODS" -eq \$COUNTER ]; then + FAILED_PODS_LIMIT=800 + ALL_PODS_LIMIT=0 + fi + get_onap_pods + ALL_PODS=\$? + PENDING=\$(grep -E '0/|1/2' $TMP_POD_LIST | wc -l) +done + +get_onap_pods +cp $TMP_POD_LIST ~/onap_all_pods.txt +echo +echo "========================" +echo "ONAP INSTALLATION REPORT" +echo "========================" +echo +echo "List of Failed PODs" +echo "-------------------" +grep -E '0/|1/2' $TMP_POD_LIST | tee ~/onap_failed_pods.txt +echo +echo "Summary:" +echo "--------" +echo " PODs Failed: \$(cat ~/onap_failed_pods.txt | wc -l)" +echo " PODs Total: \$(cat ~/onap_all_pods.txt | wc -l)" +echo +echo "ONAP health TC results" +echo "----------------------" +cd oom/kubernetes/robot +./ete-k8s.sh $ENVIRON health | tee ~/onap_health.txt +echo "===============================" +echo "END OF ONAP INSTALLATION REPORT" +echo "===============================" +OOMDEPLOY + +echo "Finished install, ruturned from Master at $(date)" +exit 0 diff --git a/ci/deploy-opnfv-apex-centos.sh b/ci/deploy-opnfv-apex-centos.sh new file mode 100644 index 0000000..a3a0433 --- /dev/null +++ b/ci/deploy-opnfv-apex-centos.sh @@ -0,0 +1,209 @@ +#!/usr/bin/env bash + +# /usr/bin/env bash or /bin/bash ? /usr/bin/env bash is more environment-independent +# beware of files which were edited in Windows, and have invisible \r end-of-line characters, causing Linux errors + +############################################################################## +# Copyright (c) 2018 Wipro Limited and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# OPNFV contribution guidelines Wiki page: +# https://wiki.opnfv.org/display/DEV/Contribution+Guidelines + +# OPNFV/Auto project: +# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095 + + +# localization control: force script to use default language for output, and force sorting to be bytewise +# ("C" is from C language, represents "safe" locale everywhere) +# (result: the script will consider only basic ASCII characters and disable UTF-8 multibyte match) +export LANG=C +export LC_ALL=C + +################################################################################## +## installation of OpenStack via OPNFV Apex/TripleO, on CentOS, virtual deployment +################################################################################## +# reference manual: https://docs.opnfv.org/en/latest/submodules/apex/docs/release/installation/index.html +# page for virtual deployment: https://docs.opnfv.org/en/latest/submodules/apex/docs/release/installation/virtual.html + +echo "*** begin AUTO install: OPNFV Apex/TripleO" + +# check OS version +echo "*** print OS version (must be CentOS, version 7 or more)" +cat /etc/*release + +# Manage Nested Virtualization +echo "*** ensure Nested Virtualization is enabled on Intel x86" +echo "*** nested flag before:" +cat /sys/module/kvm_intel/parameters/nested +rm -f /etc/modprobe.d/kvm-nested.conf +{ printf "options kvm-intel nested=1\n";\ + printf "options kvm-intel enable_shadow_vmcs=1\n";\ + printf "options kvm-intel enable_apicv=1\n";\ + printf "options kvm-intel ept=1\n"; } >> /etc/modprobe.d/kvm-nested.conf +sudo modprobe -r kvm_intel +sudo modprobe -a kvm_intel +echo "*** nested flag after:" +cat /sys/module/kvm_intel/parameters/nested + +echo "*** verify status of modules in the Linux Kernel: kvm_intel module should be loaded for x86_64 machines" +lsmod | grep kvm_ +grep kvm_ < /proc/modules + +# 3 additional pre-installation preparations, lifted from OPNFV/storperf (they are post-installation there): +# https://wiki.opnfv.org/display/storperf/LaaS+Setup+For+Development#LaaSSetupForDevelopment-InstallOPNFVApex +# (may of may not be needed, to enable first-time Apex installation on blank server) + +# 1) Install Docker +sudo yum install -y yum-utils device-mapper-persistent-data lvm2 +sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo +sudo yum install -y docker-ce +sudo systemctl start docker + +# 2) Install docker-compose +sudo curl -L "https://github.com/docker/compose/releases/download/1.21.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose +sudo chmod +x /usr/local/bin/docker-compose + +# 3) Install Python +sudo yum install -y python-virtualenv +sudo yum groupinstall -y "Development Tools" +sudo yum install -y openssl-devel + + +# update everything (upgrade: riskier than update, as packages supposed to be unused will be deleted) +# (note: can take several minutes; may not be necessary) +sudo yum -y update + + +# download Apex packages +echo "*** downloading packages:" +sudo yum -y install https://repos.fedorapeople.org/repos/openstack/openstack-pike/rdo-release-pike-1.noarch.rpm +sudo yum -y install epel-release +# note: EPEL = Extra Packages for Enterprise Linux +sudo curl -o /etc/yum.repos.d/opnfv-apex.repo http://artifacts.opnfv.org/apex/fraser/opnfv-apex.repo + +# install three required RPMs (RedHat/RPM Package Managers); this takes several minutes +sudo yum -y install http://artifacts.opnfv.org/apex/fraser/opnfv-apex-6.2.noarch.rpm http://artifacts.opnfv.org/apex/fraser/opnfv-apex-undercloud-6.2.noarch.rpm http://artifacts.opnfv.org/apex/fraser/opnfv-apex-python34-6.2.noarch.rpm + +# clean-up old Apex versions if any +## precautionary opnfv-clean doesn't work... (even though packages are installed at this point) +opnfv-clean + +# Manage DNS references +# probably not needed on an already configured server: already has DNS references +# echo "nameserver 8.8.8.8" >> /etc/resolv.conf +echo "*** printout of /etc/resolv.conf :" +cat /etc/resolv.conf + +# prepare installation directory +mkdir -p /opt/opnfv-TripleO-apex +cd /opt/opnfv-TripleO-apex + +# make sure cp is not aliased or a function; same for mv and rm +unalias cp +unset -f cp +unalias mv +unset -f mv +unalias rm +unset -f rm + +# 2 YAML files from /etc/opnfv-apex/ are needed for virtual deploys: +# 1) network_settings.yaml : may need to update NIC names, to match the NIC names on the deployment server +# 2) standard scenario file (os-nosdn-nofeature-noha.yaml, etc.), or customized deploy_settings.yaml + +# make a local copy of YAML files (not necessary: could deploy from /etc/opnfv-apex); local copies are just for clarity +# 1) network settings +cp /etc/opnfv-apex/network_settings.yaml . +# 2) deploy settings +# copy one of the 40+ pre-defined scenarios (one of the YAML files) +# for extra customization, git clone Apex repo, and copy and customize the generic deploy_settings.yaml +# git clone https://git.opnfv.org/apex +# cp ./apex/config/deploy/deploy_settings.yaml . +cp /etc/opnfv-apex/os-nosdn-nofeature-noha.yaml ./deploy_settings.yaml +# cp /etc/opnfv-apex/os-nosdn-nofeature-ha.yaml ./deploy_settings.yaml + +# Note: content of os-nosdn-nofeature-noha.yaml +# --- +# global_params: +# ha_enabled: false +# +# deploy_options: +# sdn_controller: false +# tacker: true +# congress: true +# sfc: false +# vpn: false + + +# modify NIC names in network settings YAML file, specific to your environment (e.g. replace em1 with ens4f0 in LaaS) +# Note: actually, this should not matter for a virtual environment +sed -i 's/em1/ens4f0/' network_settings.yaml + +# launch deploy (works if openvswitch module is installed, which may not be the case the first time around) +echo "*** deploying OPNFV by TripleO/Apex:" +# --debug for detailed debug info +# -v: Enable virtual deployment +# note: needs at least 10G RAM for controllers +sudo opnfv-deploy --debug -v -n network_settings.yaml -d deploy_settings.yaml +# without --debug: +# sudo opnfv-deploy -v -n network_settings.yaml -d deploy_settings.yaml + +# with specific sizing: +# sudo opnfv-deploy --debug -v -n network_settings.yaml -d deploy_settings.yaml --virtual-compute-ram 32 --virtual-cpus 16 --virtual-computes 4 + + +# verify that the openvswitch module is listed: +lsmod | grep openvswitch +grep openvswitch < /proc/modules + +##{ +## workaround: do 2 successive installations... not exactly optimal... +## clean up, as now opnfv-clean should work +#opnfv-clean +## second deploy try, should succeed (whether first one failed or succeeded) +#sudo opnfv-deploy -v -n network_settings.yaml -d deploy_settings.yaml +##} + + + +# verifications: https://docs.opnfv.org/en/latest/submodules/apex/docs/release/installation/verification.html + +# { +# if error after deploy.sh: "libvirt.libvirtError: Storage pool not found: no storage pool with matching name 'default'" + +# This usually happens if for some reason you are missing a default pool in libvirt: +# $ virsh pool-list |grep default +# You can recreate it manually: +# $ virsh pool-define-as default dir --target /var/lib/libvirt/images/ +# $ virsh pool-autostart default +# $ virsh pool-start default +# } + +# { +# if error after deploy.sh: iptc.ip4tc.IPTCError +# check Apex jira ticket #521 https://jira.opnfv.org/browse/APEX-521 +# } + +# OpenvSwitch should not be missing, as it is a requirement from the RPM package: +# https://github.com/opnfv/apex/blob/stable/fraser/build/rpm_specs/opnfv-apex-common.spec#L15 + + + +# install python 3 on CentOS +echo "*** begin install python 3.6 (3.4 should be already installed by default)" + +sudo yum -y install python36 +# install pip and setup tools +sudo curl -O https://bootstrap.pypa.io/get-pip.py +hash -r +sudo /usr/bin/python3.6 get-pip.py --no-warn-script-location + + + +echo "*** end AUTO install: OPNFV Apex/TripleO" + diff --git a/ci/deploy-opnfv-compass-ubuntu.sh b/ci/deploy-opnfv-compass-ubuntu.sh new file mode 100644 index 0000000..efccf78 --- /dev/null +++ b/ci/deploy-opnfv-compass-ubuntu.sh @@ -0,0 +1,201 @@ +#!/usr/bin/env bash + +# /usr/bin/env bash or /bin/bash ? /usr/bin/env bash is more environment-independent +# beware of files which were edited in Windows, and have invisible \r end-of-line characters, causing Linux errors + +############################################################################## +# Copyright (c) 2018 Wipro Limited and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# OPNFV contribution guidelines Wiki page: +# https://wiki.opnfv.org/display/DEV/Contribution+Guidelines + +# OPNFV/Auto project: +# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095 + + +# localization control: force script to use default language for output, and force sorting to be bytewise +# ("C" is from C language, represents "safe" locale everywhere) +# (result: the script will consider only basic ASCII characters and disable UTF-8 multibyte match) +export LANG=C +export LC_ALL=C + +################################################################################# +## installation of OpenStack via OPNFV Compass4nfv, on Ubuntu, virtual deployment +################################################################################# +# reference manual: https://docs.opnfv.org/en/latest/submodules/compass4nfv/docs/release/installation/index.html +# page for virtual deployment: https://docs.opnfv.org/en/latest/submodules/compass4nfv/docs/release/installation/vmdeploy.html + +echo "*** begin AUTO install: OPNFV Compass4nfv" + +# prepare install directory +export INSTALLDIR=/opt/opnfv-compass +mkdir -p $INSTALLDIR +cd $INSTALLDIR + +# premptively install latest pip and clear $PATH cache +# with apt-get (see apt-get -h and man apt-get for details) +apt-get -y update +apt-get -y upgrade +apt-get -y install python-pip +pip install --upgrade pip +hash -r +apt-get -y install python3-openstackclient +apt-get -y autoremove + +## note: apt is more recent than apt-get (apt was formally introduced with Ubuntu 16.04) +## APT: Advanced Packaging Tool; apt is more high-level, apt-get has more features; +# apt -y update # Refreshes repository index +# apt -y full-upgrade # Upgrades packages with auto-handling of dependencies +# apt -y install python-pip +# pip install --upgrade pip +# hash -r +# apt -y install python3-openstackclient +# apt -y autoremove + + +# 2 options: (option 1 is preferable) +# 1) remain in master branch, use build.sh (which builds a tar ball), then launch deploy.sh +# 2) download a tar ball and launch deploy.sh in a branch matching the tar ball release (e.g. fraser 6.2) + + +############## +# OPTION 1: build.sh + deploy.sh in master branch + +# retrieve the repository of Compass4nfv code (this creates a compass4nfv subdir in the installation directory), current master branch +echo "*** begin download Compass4nfv repository" +git clone https://gerrit.opnfv.org/gerrit/compass4nfv +cd compass4nfv + +# launch build script +echo "*** begin Compass4nfv build:" +./build.sh |& tee log1-Build.txt + +# edit in deploy.sh specific to OPTION 1 +# set path to ISO file (tar ball), as built by build.sh previously +# absolute path to tar ball file URL (MUST be absolute path) +sed -i '/#export TAR_URL=/a export TAR_URL=file:///opt/opnfv-compass/compass4nfv/work/building/compass.tar.gz' deploy.sh + +# END OPTION 1 +############## + + +############## +# OPTION 2: tar ball + deploy.sh in matching releases/branches + +# download tarball of a certain release/version +#echo "*** begin download Compass4nfv tar ball" +#wget http://artifacts.opnfv.org/compass4nfv/fraser/opnfv-6.2.tar.gz +# note: list of tar ball (ISO) files from Compass4NFV in https://artifacts.opnfv.org/compass4nfv.html + +# retrieve the repository of Compass4nfv code (this creates a compass4nfv subdir in the installation directory), current master branch +#echo "*** begin download Compass4nfv repository" +#git clone https://gerrit.opnfv.org/gerrit/compass4nfv +#cd compass4nfv +# note: list of compass4nfv branch names in https://gerrit.opnfv.org/gerrit/#/admin/projects/compass4nfv,branches +# checkout to branch (or tag) matching the tarball release +#git checkout stable/fraser + +# edit in deploy.sh specific to OPTION 2 +# set path to ISO file (tar ball), as downloaded previously +# absolute path to tar ball file URL (MUST be absolute path) +# sed -i '/#export TAR_URL=/a export TAR_URL=file:///opt/opnfv-compass/opnfv-6.2.tar.gz' deploy.sh + +# END OPTION 2 +############## + + +# edit remaining deploy.sh entries as needed + +# set operating system version: Ubuntu Xenial Xerus +sed -i '/#export OS_VERSION=xenial\/centos7/a export OS_VERSION=xenial' deploy.sh + +# set path to OPNFV scenario / DHA (Deployment Hardware Adapter) YAML file +# here, os-nosdn-nofeature-noha scenario +sed -i '/#export DHA=/a export DHA=/opt/opnfv-compass/compass4nfv/deploy/conf/vm_environment/os-nosdn-nofeature-noha.yml' deploy.sh + +# set path to network YAML file +sed -i '/#export NETWORK=/a export NETWORK=/opt/opnfv-compass/compass4nfv/deploy/conf/vm_environment/network.yml' deploy.sh + +# append parameters for virtual machines (for virtual deployments); e.g., 2 nodes for NOHA scenario, 5 for HA, etc. +# note: this may not be needed in a future release of Compass4nfv + +# VIRT_NUMBER – the number of nodes for virtual deployment. +# VIRT_CPUS – the number of CPUs allocated per virtual machine. +# VIRT_MEM – the memory size (MB) allocated per virtual machine. +# VIRT_DISK – the disk size allocated per virtual machine. + +# if OPTION 1 (master): OPENSTACK_VERSION is queens, so add the VIRT_NUMBER line after the queens match +#sed -i '/export OPENSTACK_VERSION=queens/a export VIRT_DISK=200G' deploy.sh +#sed -i '/export OPENSTACK_VERSION=queens/a export VIRT_MEM=16384' deploy.sh +#sed -i '/export OPENSTACK_VERSION=queens/a export VIRT_CPUS=4' deploy.sh +sed -i '/export OPENSTACK_VERSION=queens/a export VIRT_NUMBER=2' deploy.sh + +# if OPTION 2 (stable/fraser): OPENSTACK_VERSION is pike, so add the VIRT_NUMBER line after the pike match +#sed -i '/export OPENSTACK_VERSION=pike/a export VIRT_DISK=200G' deploy.sh +#sed -i '/export OPENSTACK_VERSION=pike/a export VIRT_MEM=16384' deploy.sh +#sed -i '/export OPENSTACK_VERSION=pike/a export VIRT_CPUS=4' deploy.sh +#sed -i '/export OPENSTACK_VERSION=pike/a export VIRT_NUMBER=5' deploy.sh + + +# launch deploy script +echo "*** begin Compass4nfv deploy:" +./deploy.sh |& tee log2-Deploy.txt + + + + +# To access OpenStack Horizon GUI in Virtual deployment +# source: https://wiki.opnfv.org/display/compass4nfv/Containerized+Compass + +# confirm IP@ of the current server (jump server, such as 10.10.100.xyz on LaaS: 10.10.100.42 for hpe32, etc.) +external_nic=$(ip route |grep '^default'|awk '{print $5F}') +echo "external_nic: $external_nic" +ip addr show "$external_nic" + +# Config IPtables rules: pick an unused port number, e.g. 50000+machine number, 50032 for hpe32 at 10.10.100.42 +# 192.16.1.222:443 is the OpenStack Horizon GUI after a Compass installation +# syntax: iptables -t nat -A PREROUTING -d $EX_IP -p tcp --dport $PORT -j DNAT --to 192.16.1.222:443 +# (note: this could be automated: retrieve IP@, pick port number) + +# example: hpe15 +# iptables -t nat -A PREROUTING -d 10.10.100.25 -p tcp --dport 50015 -j DNAT --to 192.16.1.222:443 +# example: hpe33 +# iptables -t nat -A PREROUTING -d 10.10.100.43 -p tcp --dport 50033 -j DNAT --to 192.16.1.222:443 + +# display IPtables NAT rules +iptables -t nat -L + +# Enter https://$EX_IP:$PORT in you browser to visit the OpenStack Horizon dashboard +# examples: https://10.10.100.25:50015 , https://10.10.100.43:50033 +# The default user is "admin" +# to get the Horizon password for "admin": +sudo docker cp compass-tasks:/opt/openrc ./ +sudo cat openrc | grep OS_PASSWORD +source ./openrc + +# for OpenStack CLI (generic content from openrc) +export OS_ENDPOINT_TYPE=publicURL +export OS_INTERFACE=publicURL +export OS_USERNAME=admin +export OS_PROJECT_NAME=admin +export OS_TENANT_NAME=admin +export OS_AUTH_URL=https://192.16.1.222:5000/v3 +export OS_NO_CACHE=1 +export OS_USER_DOMAIN_NAME=Default +export OS_PROJECT_DOMAIN_NAME=Default +export OS_REGION_NAME=RegionOne + +# For openstackclient +export OS_IDENTITY_API_VERSION=3 +export OS_AUTH_VERSION=3 + + + +echo "*** end AUTO install: OPNFV Compass4nfv" + diff --git a/ci/deploy-opnfv-daisy-centos.sh b/ci/deploy-opnfv-daisy-centos.sh new file mode 100644 index 0000000..664ba55 --- /dev/null +++ b/ci/deploy-opnfv-daisy-centos.sh @@ -0,0 +1,179 @@ +#!/usr/bin/env bash + +# /usr/bin/env bash or /bin/bash ? /usr/bin/env bash is more environment-independent +# beware of files which were edited in Windows, and have invisible \r end-of-line characters, causing Linux errors + +############################################################################## +# Copyright (c) 2018 Wipro Limited and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# OPNFV contribution guidelines Wiki page: +# https://wiki.opnfv.org/display/DEV/Contribution+Guidelines + +# OPNFV/Auto project: +# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095 + + +# localization control: force script to use default language for output, and force sorting to be bytewise +# ("C" is from C language, represents "safe" locale everywhere) +# (result: the script will consider only basic ASCII characters and disable UTF-8 multibyte match) +export LANG=C +export LC_ALL=C + + +############################################################################### +## installation of OpenStack via OPNFV Daisy4nfv, on CentOS, virtual deployment +############################################################################### +# reference manual: https://docs.opnfv.org/en/stable-fraser/submodules/daisy/docs/release/installation/index.html#daisy-installation +# page for virtual deployment: https://docs.opnfv.org/en/stable-fraser/submodules/daisy/docs/release/installation/vmdeploy.html + +echo "*** begin AUTO install: OPNFV Daisy4nfv" + +# check OS version +echo "*** print OS version (must be CentOS, version 7.2 or more)" +cat /etc/*release + +# make sure cp is not aliased or a function; same for mv and rm +unalias cp +unset -f cp +unalias mv +unset -f mv +unalias rm +unset -f rm + +# Manage Nested Virtualization +echo "*** ensure Nested Virtualization is enabled on Intel x86" +echo "*** nested flag before:" +cat /sys/module/kvm_intel/parameters/nested +rm -f /etc/modprobe.d/kvm-nested.conf +{ printf "options kvm-intel nested=1\n";\ + printf "options kvm-intel enable_shadow_vmcs=1\n";\ + printf "options kvm-intel enable_apicv=1\n";\ + printf "options kvm-intel ept=1\n"; } >> /etc/modprobe.d/kvm-nested.conf +sudo modprobe -r kvm_intel +sudo modprobe -a kvm_intel +echo "*** nested flag after:" +cat /sys/module/kvm_intel/parameters/nested + +echo "*** verify status of modules in the Linux Kernel: kvm_intel module should be loaded for x86_64 machines" +lsmod | grep kvm_ +grep kvm_ < /proc/modules + +# download tools: git, kvm, libvirt, python-yaml +sudo yum -y install git +sudo yum -y install kvm +sudo yum -y install libvirt +sudo yum info libvirt +sudo yum info qemu-kvm +sudo yum -y install python-yaml + + +# make sure SELinux is enforced (Security-Enhanced Linux) +sudo setenforce 1 +echo "getenforce: $(getenforce)" + +# Restart the libvirtd daemon: +sudo service libvirtd restart +# Verify if the kvm module is loaded, you should see amd or intel depending on the hardware: +lsmod | grep kvm +# Note: to test, issue a virsh command to ensure local root connectivity: +# sudo virsh sysinfo + + + +# update everything (upgrade: riskier than update, as packages supposed to be unused will be deleted) +# (note: can take several minutes; may not be necessary) +sudo yum -y update + +# prepare Daisy installation directory +export INSTALLDIR=/opt/opnfv-daisy +mkdir $INSTALLDIR +cd $INSTALLDIR + +# oslo-config, needed in daisy/deploy/get_conf.py +sudo curl -O https://bootstrap.pypa.io/get-pip.py +hash -r +python get-pip.py --no-warn-script-location +pip install --upgrade oslo-config + + +# retrieve Daisy4nfv repository +git clone https://gerrit.opnfv.org/gerrit/daisy +cd daisy + + + +# OPTION 1: master repo and latest bin file: May 17th 2018 +# Download latest bin file from http://artifacts.opnfv.org/daisy.html and name it opnfv.bin +curl http://artifacts.opnfv.org/daisy/opnfv-2018-05-17_14-00-32.bin -o opnfv.bin +# make opnfv.bin executable +chmod 777 opnfv.bin + +# OPTION 2: stable release: Fraser 6.0 (so, checkout to stable Fraser release opnfv-6.0) +# Download matching bin file from http://artifacts.opnfv.org/daisy.html and name it opnfv.bin +#git checkout opnfv.6.0 # as per Daisy4nfv instructions, but does not work +#git checkout stable/fraser +#curl http://artifacts.opnfv.org/daisy/fraser/opnfv-6.0.iso -o opnfv.bin +# make opnfv.bin executable +#chmod 777 opnfv.bin + + + +# The deploy.yaml file is the inventory template of deployment nodes: +# error from doc: ”./deploy/conf/vm_environment/zte-virtual1/deploy.yml” +# correct path: "./deploy/config/vm_environment/zte-virtual1/deploy.yml” +# You can write your own name/roles reference into it: +# name – Host name for deployment node after installation. +# roles – Components deployed. +# note: ./templates/virtual_environment/ contains xml files, for networks and VMs + + +# prepare config dir for Auto lab in daisy dir, and copy deploy and network YAML files from default files (virtual1 or virtual2) +export AUTO_DAISY_LAB_CONFIG1=labs/auto_daisy_lab/virtual1/daisy/config +export DAISY_DEFAULT_ENV1=deploy/config/vm_environment/zte-virtual1 +mkdir -p $AUTO_DAISY_LAB_CONFIG1 +cp $DAISY_DEFAULT_ENV1/deploy.yml $AUTO_DAISY_LAB_CONFIG1 +cp $DAISY_DEFAULT_ENV1/network.yml $AUTO_DAISY_LAB_CONFIG1 + +export AUTO_DAISY_LAB_CONFIG2=labs/auto_daisy_lab/virtual2/daisy/config +export DAISY_DEFAULT_ENV2=deploy/config/vm_environment/zte-virtual2 +mkdir -p $AUTO_DAISY_LAB_CONFIG2 +cp $DAISY_DEFAULT_ENV2/deploy.yml $AUTO_DAISY_LAB_CONFIG2 +cp $DAISY_DEFAULT_ENV2/network.yml $AUTO_DAISY_LAB_CONFIG2 + +# Note: +# - zte-virtual1 config files deploy openstack with five nodes (3 LB nodes and 2 computer nodes). +# - zte-virtual2 config files deploy an all-in-one openstack + +# run deploy script, scenario os-nosdn-nofeature-ha, multinode OpenStack +sudo ./ci/deploy/deploy.sh -L "$(cd ./;pwd)" -l auto_daisy_lab -p virtual1 -s os-nosdn-nofeature-ha + +# run deploy script, scenario os-nosdn-nofeature-noha, all-in-one OpenStack +# sudo ./ci/deploy/deploy.sh -L "$(cd ./;pwd)" -l auto_daisy_lab -p virtual2 -s os-nosdn-nofeature-noha + + +# Notes about deploy.sh: +# The value after -L should be an absolute path which points to the directory which includes $AUTO_DAISY_LAB_CONFIG directory. +# The value after -p parameter (virtual1 or virtual2) should match the one selected for $AUTO_DAISY_LAB_CONFIG. +# The value after -l parameter (e.g. auto_daisy_lab) should match the lab name selected for $AUTO_DAISY_LAB_CONFIG, after labs/ . +# Scenario (-s parameter): “os-nosdn-nofeature-ha” is used for deploying multinode openstack (virtual1) +# Scenario (-s parameter): “os-nosdn-nofeature-noha” used for deploying all-in-one openstack (virtual2) + +# more details on deploy.sh OPTIONS: +# -B PXE Bridge for booting Daisy Master, optional +# -D Dry-run, does not perform deployment, will be deleted later +# -L Securelab repo absolute path, optional +# -l LAB name, necessary +# -p POD name, necessary +# -r Remote workspace in target server, optional +# -w Workdir for temporary usage, optional +# -h Print this message and exit +# -s Deployment scenario +# -S Skip recreate Daisy VM during deployment + +# When deployed successfully, the floating IP of openstack is 10.20.11.11, the login account is “admin” and the password is “keystone” diff --git a/ci/deploy-opnfv-fuel-ubuntu.sh b/ci/deploy-opnfv-fuel-ubuntu.sh new file mode 100644 index 0000000..db276b2 --- /dev/null +++ b/ci/deploy-opnfv-fuel-ubuntu.sh @@ -0,0 +1,199 @@ +#!/usr/bin/env bash + +# /usr/bin/env bash or /bin/bash ? /usr/bin/env bash is more environment-independent +# beware of files which were edited in Windows, and have invisible \r end-of-line characters, causing Linux errors + +############################################################################## +# Copyright (c) 2018 Wipro Limited and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# OPNFV contribution guidelines Wiki page: +# https://wiki.opnfv.org/display/DEV/Contribution+Guidelines + +# OPNFV/Auto project: +# https://wiki.opnfv.org/pages/viewpage.action?pageId=12389095 + + +# localization control: force script to use default language for output, and force sorting to be bytewise +# ("C" is from C language, represents "safe" locale everywhere) +# (result: the script will consider only basic ASCII characters and disable UTF-8 multibyte match) +export LANG=C +export LC_ALL=C + +############################################################################## +## installation of OpenStack via OPNFV Fuel/MCP, on Ubuntu, virtual deployment +############################################################################## +# reference manual: https://docs.opnfv.org/en/latest/submodules/fuel/docs/release/installation/index.html +# page for virtual deployment: https://docs.opnfv.org/en/latest/submodules/fuel/docs/release/installation/installation.instruction.html#opnfv-software-installation-and-deployment + +# Steps: +# step 1: download Fuel/MCP repository and run deploy script +# (this example: x86, virtual deploy, os-nosdn-nofeature-noha scenario) +# step 2: download additional packages (python3, OpenStackSDK, OpenStack clients, ...) +# step 3: add more resources to OpenStack instance (vCPUs, RAM) +# step 4: download Auto repository +# step 5: run Auto python script to populate OpenStack instance with objects expected by ONAP + + +echo "*** begin AUTO install: OPNFV Fuel/MCP" + + +# step 1: download Fuel/MCP repository and run deploy script + +# prepare install directory +export INSTALLDIR=/opt/opnfv-fuel +mkdir -p $INSTALLDIR +cd $INSTALLDIR + +# get Fuel repository +git clone https://git.opnfv.org/fuel +# cd in new fuel repository, which contains directories: mcp, ci, etc. +# note: this is for x86_64 architectures; for aarch64 architectures, git clone https://git.opnfv.org/armband and cd armband instead +cd fuel + +# edit NOHA scenario YAML file with more resources for compute nodes: 32 vCPUs, 192G RAM +{ printf " cmp01:\n";\ + printf " vcpus: 32\n";\ + printf " ram: 196608\n";\ + printf " cmp02:\n";\ + printf " vcpus: 32\n";\ + printf " ram: 196608\n"; } >> mcp/config/scenario/os-nosdn-nofeature-noha.yaml + +# provide more storage space to VMs: 350G per compute node (default is 100G) +sed -i mcp/scripts/lib.sh -e 's/\(qemu-img create.*\) 100G/\1 350G/g' + +# launch OPNFV Fuel/MCP deploy script +ci/deploy.sh -l local -p virtual1 -s os-nosdn-nofeature-noha -D |& tee deploy.log + + + +# step 2: download additional packages (python3, OpenStackSDK, OpenStack clients, ...) + +# install python 3 on Ubuntu +echo "*** begin install python 3" +sudo apt-get -y update +sudo apt-get -y install python3 +# maybe clean-up packages +# sudo apt -y autoremove +# specific install of a python version, e.g. 3.6 +# sudo apt-get install python3.6 + +# http://docs.python-guide.org/en/latest/starting/install3/linux/ +# sudo apt-get install software-properties-common +# sudo add-apt-repository ppa:deadsnakes/ppa +# sudo apt-get update +# sudo apt-get install python3.6 +echo "python2 --version: $(python2 --version)" +echo "python3 --version: $(python3 --version)" +echo "which python: $(which python)" + +# install pip3 for python3; /usr/local/bin/pip3 vs. /usr/bin/pip3; solve with "hash -r" +echo "*** begin install pip3 for python3" +apt -y install python3-pip +hash -r +pip3 install --upgrade pip +hash -r + +echo "\$PATH: $PATH" +echo "which pip: $(which pip)" +echo "which pip3: $(which pip3)" + +# install OpenStack SDK Python client +echo "*** begin install OpenStack SDK Python client" +pip3 install openstacksdk +pip3 install --upgrade openstacksdk + +# install OpenStack CLI +echo "*** begin install OpenStack CLI" +pip3 install python-openstackclient +pip3 install --upgrade python-openstackclient + +pip3 install --upgrade python-keystoneclient +pip3 install --upgrade python-neutronclient +pip3 install --upgrade python-novaclient +pip3 install --upgrade python-glanceclient +pip3 install --upgrade python-cinderclient + +# install OpenStack Heat (may not be installed by default), may be useful for VNF installation +#apt install python3-heatclient +echo "*** begin install OpenStack Heat" +pip3 install --upgrade python-heatclient + +# package verification printouts +echo "*** begin package verification printouts" +pip3 list +pip3 show openstacksdk +pip3 check + + + +# step 3: add more resources to OpenStack instance + +# now that OpenStack CLI is installed, finish Fuel/MCP installation: +# take extra resources indicated in os-nosdn-nofeature-noha.yaml into account as quotas in the OpenStack instance +# (e.g. 2 compute nodes with 32 vCPUs and 192G RAM each => 64 cores and 384G=393,216M RAM) +# enter environment variables hard-coded here, since always the same for Fuel/MCP; there could be better ways to do this :) + +export OS_AUTH_URL=http://10.16.0.107:5000/v3 +export OS_PROJECT_NAME="admin" +export OS_USER_DOMAIN_NAME="Default" +export OS_PROJECT_DOMAIN_ID="default" +unset OS_TENANT_ID +unset OS_TENANT_NAME +export OS_USERNAME="admin" +export OS_PASSWORD="opnfv_secret" +export OS_REGION_NAME="RegionOne" +export OS_INTERFACE=public +export OS_IDENTITY_API_VERSION=3 + +# at this point, openstack CLI commands should work +echo "*** finish install OPNFV Fuel/MCP" +openstack quota set --cores 64 admin +openstack quota set --ram 393216 admin + + + +# step 4: download Auto repository + +# install OPNFV Auto +# prepare install directory +echo "*** begin install OPNFV Auto" +mkdir -p /opt/opnfv-Auto +cd /opt/opnfv-Auto +# get Auto repository from Gerrit +git clone https://gerrit.opnfv.org/gerrit/auto +# cd in new auto repository, which contains directories: lib, setup, ci, etc. +cd auto + + + +# step 5: run Auto python script to populate OpenStack instance with objects expected by ONAP + +# download images used by script, unless downloading images from URL works from the script +echo "*** begin download images" +cd setup/VIMs/OpenStack +mkdir images +cd images +#CirrOS +curl -O http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img +curl -O http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-arm-disk.img +curl -O http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-aarch64-disk.img +# Ubuntu 16.04 LTS (Xenial Xerus) +curl -O https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img +curl -O https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-arm64-disk1.img +# Ubuntu 14.04.5 LTS (Trusty Tahr) +curl -O http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img +curl -O http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-arm64-disk1.img + +# launch script to populate the OpenStack instance +echo "*** begin populate OpenStack instance with ONAP objects" +cd .. +python3 auto_script_config_openstack_for_onap.py + +echo "*** end AUTO install: OPNFV Fuel/MCP" + diff --git a/ci/plot-results.sh b/ci/plot-results.sh new file mode 100755 index 0000000..22ab1d6 --- /dev/null +++ b/ci/plot-results.sh @@ -0,0 +1,101 @@ +#!/bin/bash +# +# Copyright 2017-2018 Intel Corporation., Tieto +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Script for graphical representation of AUTO result summaries +# +# Usage: +# ./create_graph [directory] +# +# where: +# "directory" is an optional directory name, where summary of auto +# installation report is stored +# Default value: "$HOME/auto_ci_daily_logs" + +NUMBER_OF_RESULTS=50 # max number of recent results to be compared in graph +DIR="$HOME/auto_ci_daily_logs" + +function clean_data() { + rm -rf summary.csv + rm -rf graph*plot + rm -rf graph*txt + rm -rf graph*png +} + +function prepare_data() { + FIRST=1 + CSV_LIST=$(ls -1 ${DIR}/deploy_summary*csv | tail -n ${NUMBER_OF_RESULTS}) + for result_file in $CSV_LIST ; do + tmp_dir=`dirname $result_file` + TIMESTAMP=`basename $tmp_dir | cut -d'_' -f2-` + if [ $FIRST -eq 1 ] ; then + head -n1 $result_file > summary.csv + FIRST=0 + fi + tail -n+2 ${result_file} >> summary.csv + done +} + +function plot_data() { + echo "Created graphs:" + for TYPE in png txt; do + for GRAPH in "graph_pods" "graph_tcs" ; do + OUTPUT="$GRAPH.plot" + GRAPH_NAME="${GRAPH}.${TYPE}" + cat > $OUTPUT <<- EOM +set datafile separator "," +set xdata time +set timefmt "%Y%m%d_%H%M%S" +set format x "%m-%d" +set xlabel "date" +set format y "%8.0f" +EOM + if [ "$TYPE" == "png" ] ; then + echo 'set term png size 1024,768' >> $OUTPUT + else + echo 'set term dumb 100,30' >> $OUTPUT + fi + + if [ "$GRAPH" == "graph_pods" ] ; then + echo 'set ylabel "PODs"' >> $OUTPUT + echo 'set yrange [0:]' >> $OUTPUT + echo "set title \"ONAP K8S PODs\"" >> $OUTPUT + COL1=3 + COL2=4 + else + echo 'set ylabel "testcases"' >> $OUTPUT + echo 'set yrange [0:]' >> $OUTPUT + echo "set title \"ONAP Health TestCases\"" >> $OUTPUT + COL1=5 + COL2=6 + fi + + iter=0 + echo "set output \"$GRAPH_NAME\"" >> $OUTPUT + echo -n "plot " >> $OUTPUT + echo $"'summary.csv' using 1:$COL1 with linespoints title columnheader($COL1) \\" >> $OUTPUT + echo $", 'summary.csv' using 1:$COL2 with linespoints title columnheader($COL2) \\" >> $OUTPUT + gnuplot $OUTPUT + echo -e "\t$GRAPH_NAME" + done + done +} + +# +# Main body +# +clean_data +prepare_data +plot_data |