diff options
-rw-r--r-- | INFO | 35 | ||||
-rw-r--r-- | INFO.yaml | 24 | ||||
-rwxr-xr-x | ci/build-auto.sh | 158 | ||||
-rwxr-xr-x | ci/deploy-onap-fuel.sh | 238 | ||||
-rwxr-xr-x | ci/deploy-onap-kubespray.sh | 339 | ||||
-rwxr-xr-x | ci/deploy-onap.sh | 246 | ||||
-rwxr-xr-x | ci/plot-results.sh | 101 | ||||
-rw-r--r-- | docs/index.rst | 18 | ||||
-rw-r--r-- | docs/release/configguide/index.rst | 1 | ||||
-rw-r--r-- | docs/release/release-notes/index.rst | 1 | ||||
-rw-r--r-- | docs/release/userguide/index.rst | 1 |
11 files changed, 956 insertions, 206 deletions
@@ -1,35 +0,0 @@ -(obsolete: use only INFO.yaml) - -Project: ONAP-Automated OPNFV (Auto) -Project Creation Date: August 15, 2017 -Project Category: -Lifecycle State: Incubation -Primary Contact: tina.tsou@arm.com -Project Lead: tina.tsou@arm.com -Jira Project Name: ONAP-Automated OPNFV -Jira Project Prefix: AUTO -Mailing list tag: [auto] -IRC: Server:freenode.net Channel:#opnfv-auto -Repository: auto - -Committers: -Tina Tsou (tina.tsou@arm.com) -Harry Huang (huangxiangyu5@huawei.com) -Mohankumar Navaneethan (mnavaneethan@mvista.com) -Song Zhu (song.zhu@arm.com) -Liang Ou (oul.gd@chinatelecom.cn) -Gerard Damm (gerard.damm@wipro.com) -Joe Kidder (joe.kidder@5thlayer.com) -Cristina Pauna (cristina.pauna@enea.com) -Paul Vaduva (paul.vaduva@enea.com) -Martin Klozik (martin.klozik@tieto.com) -Richard Elias (richard.elias@tieto.com) - -Prasad Gorja (prasad.gorja@nxp.com) -Lei Chen (chenlei@caict.ac.cn) -Xiaoyu Wang (wxy_cttl@126.com) -Xu Lu (luxu_hd@163.com) -Eric Maye (eric.dmaye@wipro.com) -Chen Zhang (zhangchen.bri@chinatelecom.cn) - -Link to TSC approval of the project: http://meetbot.opnfv.org/meetings/opnfv-meeting/2017/opnfv-meeting.2017-08-15-12.59.html @@ -38,26 +38,6 @@ committers: email: 'huangxiangyu5@huawei.com' company: 'huawei.com' id: 'huangxiangyu' - - name: 'Song Zhu' - email: 'song.zhu@arm.com' - company: 'arm.com' - id: 'mail22song' - - name: 'Liang Ou' - email: 'oul.gd@chinatelecom.cn' - company: 'chinatelecom.cn' - id: 'ouliang1' - - name: 'Gerard Damm' - email: 'gerard.damm@wipro.com' - company: 'Wipro' - id: 'gerard_damm' - - name: 'Joe Kidder' - email: 'joe.kidder@5thlayer.com' - company: '5thlayer.com' - id: 'joe.kidder' - - name: 'Cristina Pauna' - email: 'cristina.pauna@enea.com' - company: 'enea.com' - id: 'cristinapauna' - name: 'Paul Vaduva' email: 'paul.vaduva@enea.com' company: 'enea.com' @@ -66,10 +46,6 @@ committers: email: 'martin.klozik@tieto.com' company: 'tieto.com' id: 'mklozik' - - name: 'Richard Elias' - email: 'richard.elias@tieto.com' - company: 'tieto.com' - id: 'richardxelias' tsc: # yamllint disable rule:line-length approval: 'http//meetbot.opnfv.org/meetings/opnfv-meeting/2017/opnfv-meeting.2017-08-15-12.59.html' diff --git a/ci/build-auto.sh b/ci/build-auto.sh index 96588b9..00b67b1 100755 --- a/ci/build-auto.sh +++ b/ci/build-auto.sh @@ -20,10 +20,12 @@ # Usage: # build-auto.sh job_type -# where job_type is one of "verify", "merge", "daily" +# +# Parameters: +# job_type - is one of "verify", "merge" or "daily" # # Example: -# ./ci/build-auto.sh daily +# ./ci/build-auto.sh verify # # exit codes @@ -31,11 +33,21 @@ EXIT=0 EXIT_UNKNOWN_JOB_TYPE=1 EXIT_LINT_FAILED=2 +EXIT_FUEL_FAILED=10 # # configuration # AUTOENV_DIR="$HOME/autoenv" +TIMESTAMP=$(date +%Y%m%d_%H%M%S) +LOG_DIR=$HOME/auto_ci_daily_logs +WORKSPACE=${WORKSPACE:-$PWD} + +# POD and SCENARIO details used during OPNFV deployment performed by daily job +NODE_NAME=${NODE_NAME:-"ericsson-virtual1"} +POD_LAB=$(echo $NODE_NAME | cut -d '-' -f1) +POD_NAME=$(echo $NODE_NAME | cut -d '-' -f2) +DEPLOY_SCENARIO=${DEPLOY_SCENARIO:-"os-nosdn-onap-noha"} # # functions @@ -47,6 +59,42 @@ function execute_auto_lint_check() { fi } +# check and install required packages +function dependencies_check() { + . /etc/os-release + if [ $ID == "ubuntu" ] ; then + echo "Dependencies check" + echo "==================" + # install system packages + for PACKAGE in "virtualenv" "pylint" "yamllint" "gnuplot" ; do + if dpkg -s $PACKAGE &> /dev/null ; then + printf " %-70s %-6s\n" $PACKAGE "OK" + else + printf " %-70s %-6s\n" $PACKAGE "missing" + sudo apt-get install -y $PACKAGE + fi + done + echo + fi +} + +# create virtualenv if needed and enable it +function virtualenv_prepare() { + if [ ! -e $AUTOENV_DIR ] ; then + echo "Create AUTO environment" + echo "=======================" + virtualenv "$AUTOENV_DIR" + echo + fi + + # activate and update virtualenv + echo "Update AUTO environment" + echo "=======================" + source "$AUTOENV_DIR"/bin/activate + pip install -r ./requirements.txt + echo +} + # # main # @@ -55,20 +103,8 @@ echo # enter workspace dir cd $WORKSPACE -# create virtualenv if needed -if [ ! -e $AUTOENV_DIR ] ; then - echo "Create AUTO environment" - echo "=======================" - virtualenv "$AUTOENV_DIR" - echo -fi - -# activate and update virtualenv -echo "Update AUTO environment" -echo "=======================" -source "$AUTOENV_DIR"/bin/activate -pip install -r ./requirements.txt -echo +# check if required packages are installed +dependencies_check # execute job based on passed parameter case $1 in @@ -77,15 +113,9 @@ case $1 in echo "AUTO verify job" echo "===============" - # Example of verify job body. Functions can call - # external scripts, etc. - + virtualenv_prepare execute_auto_lint_check #execute_auto_doc_check - #install_opnfv MCP - #install_onap - #execute_sanity_check - #execute_tests $1 # Everything went well, so report SUCCESS to Jenkins exit $EXIT @@ -95,15 +125,9 @@ case $1 in echo "AUTO merge job" echo "==============" - # Example of merge job body. Functions can call - # external scripts, etc. - + virtualenv_prepare execute_auto_lint_check #execute_auto_doc_check - #install_opnfv MCP - #install_onap - #execute_sanity_check - #execute_tests $1 # propagate result to the Jenkins job exit $EXIT @@ -112,15 +136,73 @@ case $1 in echo "==============" echo "AUTO daily job" echo "==============" + echo + echo "Deployment details:" + echo " LAB: $POD_LAB" + echo " POD: $POD_NAME" + echo " Scenario: $DEPLOY_SCENARIO" + echo " WORKSPACE: $WORKSPACE" + echo - # Example of daily job body. Functions can call - # external scripts, etc. - - #install_opnfv MCP - #install_onap - #execute_sanity_check - #execute_tests $1 - #push_results_and_logs_to_artifactory + # create log dir if needed + if [ ! -e $LOG_DIR ] ; then + echo "Create AUTO LOG DIRECTORY" + echo "=========================" + echo "mkdir $LOG_DIR" + mkdir $LOG_DIR + echo + fi + + echo "Installation of OPNFV and ONAP" + echo "==============================" + # clone fuel and execute installation of ONAP scenario to install + # ONAP on top of OPNFV deployment + [ -e fuel ] && rm -rf fuel + git clone https://gerrit.opnfv.org/gerrit/fuel + cd fuel + # Fuel master branch is currently broken; thus use stable/gambia + # branch with recent master version of ONAP scenario + git checkout stable/gambia + git checkout origin/master mcp/config/states/onap \ + mcp/config/scenario/os-nosdn-onap-ha.yaml \ + mcp/config/scenario/os-nosdn-onap-noha.yaml + # use larger disk size for virtual nodes + sed -i -re 's/(qemu-img resize.*)100G/\1400G/' mcp/scripts/lib_jump_deploy.sh + + LOG_FILE="$LOG_DIR/deploy_${TIMESTAMP}.log" + echo "ci/deploy.sh -l $POD_LAB -p $POD_NAME -s $DEPLOY_SCENARIO |&\ + tee $LOG_FILE" + DEPLOY_START=$(date +%Y%m%d_%H%M%S) + ci/deploy.sh -l $POD_LAB -p $POD_NAME -s $DEPLOY_SCENARIO |&\ + tee $LOG_FILE + + # report failure if fuel failed to install OPNFV or ONAP + [ $? -ne 0 ] && exit $EXIT_FUEL_FAILED + + # process report + DEPLOY_END=$(date +%Y%m%d_%H%M%S) + REPORT_FILE="$LOG_DIR/deploy_report_${TIMESTAMP}.txt" + CSV_SUMMARY="$LOG_DIR/deploy_summary_${TIMESTAMP}.csv" + MARKER="ONAP INSTALLATION REPORT" + # cut report from installation log file + sed -n "/^$MARKER/,/^END OF $MARKER/p;/^END OF $MARKER/q" \ + $LOG_FILE > $REPORT_FILE + PODS_TOTAL=$(grep "PODs Total" $REPORT_FILE | sed -e 's/[^0-9]//g') + PODS_FAILED=$(grep "PODs Failed" $REPORT_FILE | sed -e 's/[^0-9]//g') + TC_SUM=$(grep "tests total" $REPORT_FILE | tail -n1 |\ + sed -e 's/[^0-9,]//g') + + echo "Start Time,End Time,Total PODs,Failed PODs,Total Tests,Passed"\ + "Tests,Failed Tests" >> $CSV_SUMMARY + echo "$DEPLOY_START,$DEPLOY_END,$PODS_TOTAL,$PODS_FAILED,$TC_SUM"\ + >> $CSV_SUMMARY + + # plot graphs from result summaries and print txt versions if possible + cd $WORKSPACE + ci/plot-results.sh + for GRAPH in $(ls -1 graph*txt 2> /dev/null) ; do + cat $GRAPH + done # propagate result to the Jenkins job exit $EXIT diff --git a/ci/deploy-onap-fuel.sh b/ci/deploy-onap-fuel.sh new file mode 100755 index 0000000..c120e9c --- /dev/null +++ b/ci/deploy-onap-fuel.sh @@ -0,0 +1,238 @@ +#!/bin/bash +# +# Copyright 2018 Tieto +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Script for automated deployment of ONAP on top of OPNFV Fuel/MCP installation +# In the future both OOM and heat install methods should be supported. +# At the beginning OOM will be used for simplification. + +# TODO: +# Configure ONAP to be able to control underlying OpenStack + +# Configuration to be passed to ci/deploy-onap.sh +export SSH_USER="ubuntu" +export SSH_IDENTITY="/root/.ssh/onap_key" + +# detect hypervisor details to be used as default values if needed +OS_HYPER_CMD="openstack hypervisor list --long" +echo -e "\nOpenStack Hepervisor list\n" +$OS_HYPER_CMD + +DEFAULT_CMP_COUNT=$($OS_HYPER_CMD -f value -c "ID" | wc -l) +DEFAULT_CMP_MIN_MEM=$($OS_HYPER_CMD -f value -c "Memory MB" | sort | head -n1) +DEFAULT_CMP_MIN_CPUS=$($OS_HYPER_CMD -f value -c "vCPUs" | sort | head -n1) + +# Use default values if compute configuration was not set by FUEL installer +AUTO_INSTALL_DIR=${AUTO_INSTALL_DIR:-"."} +AUTO_IMAGE_DIR="${AUTO_INSTALL_DIR}/images" +CMP_COUNT=${CMP_COUNT:-$DEFAULT_CMP_COUNT} # number of compute nodes +CMP_MIN_MEM=${CMP_MIN_MEM:-$DEFAULT_CMP_MIN_MEM} # MB RAM of the weakest compute node +CMP_MIN_CPUS=${CMP_MIN_CPUS:-$DEFAULT_CMP_MIN_CPUS} # CPU count of the weakest compute node +# size of storage for instances +CMP_STORAGE_TOTAL=${CMP_STORAGE_TOTAL:-$((80*$CMP_COUNT))} +VM_COUNT=${VM_COUNT:-6} # number of VMs available for k8s cluster + +# +# Functions +# +# function minimum accepts two numbers and prints smaller one +function minimum(){ + echo $(($1<$2?$1:$2)) +} + +# function remove_openstack_setup removes OS configuration performed by this +# script; So previously created configuration and deployed VMs will be +# removed before new ONAP deployment will be started. +function remove_openstack_setup(){ + # flavor is created 1st but removed last, so... + if ( ! openstack flavor list | grep 'onap.large' &> /dev/null ) ; then + #...no flavor means nothing to be removed + return + fi + echo -e "\nRemoving ONAP specific OpenStack configuration" + for a in $(openstack server list --name onap_vm -f value -c ID) ; do + openstack server delete $a + done + RULES=$(openstack security group rule list onap_security_group -f value -c ID) + for a in $RULES; do + openstack security group rule delete $a + done + openstack security group delete onap_security_group + for a in $(openstack floating ip list -f value -c ID) ; do + openstack floating ip delete $a + done + PORTS=$(openstack port list --network onap_private_network -f value -c ID) + for a in $PORTS ; do + openstack router remove port onap_router $a + done + PORTS=$(openstack port list --network onap_private_network -f value -c ID) + for a in $PORTS ; do + openstack port delete $a + done + openstack router delete onap_router + openstack subnet delete onap_private_subnet + openstack network delete onap_private_network + openstack image delete xenial + rm -rf $AUTO_IMAGE_DIR + openstack keypair delete onap_key + rm $SSH_IDENTITY + openstack flavor delete onap.large + echo +} + +# +# Script Main +# + +# remove OpenStack configuration if it exists +remove_openstack_setup + +echo -e "\nOpenStack configuration\n" + +# Calculate VM resources, so that flavor can be created +echo "Configuration of compute node:" +echo "Number of computes: CMP_COUNT=$CMP_COUNT" +echo "Minimal RAM: CMP_MIN_MEM=$CMP_MIN_MEM" +echo "Minimal CPUs count: CMP_MIN_CPUS=$CMP_MIN_CPUS" +echo "Storage for instances: CMP_STORAGE_TOTAL=$CMP_STORAGE_TOTAL" +echo "Number of VMs: VM_COUNT=$VM_COUNT" +# Calculate VM parameters; there will be up to 1 VM per Compute node +# to maximize resources available for VMs +PER=85 # % of compute resources will be consumed by VMs +VM_DISK_MAX=100 # GB - max VM disk size +VM_MEM_MAX=81920 # MB - max VM RAM size +VM_CPUS_MAX=56 # max count of VM CPUs +VM_MEM=$(minimum $(($CMP_MIN_MEM*$CMP_COUNT*$PER/100/$VM_COUNT)) $VM_MEM_MAX) +VM_CPUS=$(minimum $(($CMP_MIN_CPUS*$CMP_COUNT*$PER/100/$VM_COUNT)) $VM_CPUS_MAX) +VM_DISK=$(minimum $(($CMP_STORAGE_TOTAL*$PER/100/$VM_COUNT)) $VM_DISK_MAX) + +echo -e "\nFlavor configuration:" +echo "CPUs : $VM_CPUS" +echo "RAM [MB] : $VM_MEM" +echo "DISK [GB] : $VM_DISK" + +# Create onap flavor +openstack flavor create --ram $VM_MEM --vcpus $VM_CPUS --disk $VM_DISK \ + onap.large + +# Generate a keypair and store private key +openstack keypair create onap_key > $SSH_IDENTITY +chmod 600 $SSH_IDENTITY + +# Download and import VM image(s) +mkdir $AUTO_IMAGE_DIR +wget -P $AUTO_IMAGE_DIR https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img +openstack image create --disk-format qcow2 --container-format bare --public \ + --file $AUTO_IMAGE_DIR/xenial-server-cloudimg-amd64-disk1.img xenial + +# Modify quotas (add 10% to required VM resources) +openstack quota set --ram $(($VM_MEM*$VM_COUNT*110/100)) admin +openstack quota set --cores $(($VM_CPUS*$VM_COUNT*110/100)) admin + +# Configure networking with DNS for access to the internet +openstack network create onap_private_network --provider-network-type vxlan +openstack subnet create onap_private_subnet --network onap_private_network \ + --subnet-range 192.168.33.0/24 --ip-version 4 --dhcp --dns-nameserver "8.8.8.8" +openstack router create onap_router +openstack router add subnet onap_router onap_private_subnet +openstack router set onap_router --external-gateway floating_net + +# Allow selected ports and protocols +openstack security group create onap_security_group +openstack security group rule create --protocol icmp onap_security_group +openstack security group rule create --proto tcp \ + --dst-port 22:22 onap_security_group +openstack security group rule create --proto tcp \ + --dst-port 8080:8080 onap_security_group # rancher +openstack security group rule create --proto tcp \ + --dst-port 8078:8078 onap_security_group # horizon +openstack security group rule create --proto tcp \ + --dst-port 8879:8879 onap_security_group # helm +openstack security group rule create --proto tcp \ + --dst-port 80:80 onap_security_group +openstack security group rule create --proto tcp \ + --dst-port 443:443 onap_security_group + +# Allow communication between k8s cluster nodes +PUBLIC_NET=`openstack subnet list --name floating_subnet -f value -c Subnet` +openstack security group rule create --remote-ip $PUBLIC_NET --proto tcp \ + --dst-port 1:65535 onap_security_group +openstack security group rule create --remote-ip $PUBLIC_NET --proto udp \ + --dst-port 1:65535 onap_security_group + +# Get list of hypervisors and their zone +HOST_ZONE=$(openstack host list -f value | grep compute | head -n1 | cut -d' ' -f3) +HOST_NAME=($(openstack host list -f value | grep compute | cut -d' ' -f1)) +HOST_COUNT=$(echo ${HOST_NAME[@]} | wc -w) +# Create VMs and assign floating IPs to them +VM_ITER=1 +HOST_ITER=0 +while [ $VM_ITER -le $VM_COUNT ] ; do + openstack floating ip create floating_net + VM_NAME[$VM_ITER]="onap_vm${VM_ITER}" + VM_IP[$VM_ITER]=$(openstack floating ip list -c "Floating IP Address" \ + -c "Port" -f value | grep None | cut -f1 -d " " | head -n1) + # dispatch new VMs among compute nodes in round robin fashion + openstack server create --flavor onap.large --image xenial \ + --nic net-id=onap_private_network --security-group onap_security_group \ + --key-name onap_key ${VM_NAME[$VM_ITER]} \ + --availability-zone ${HOST_ZONE}:${HOST_NAME[$HOST_ITER]} + sleep 10 # wait for VM init before floating IP can be assigned + openstack server add floating ip ${VM_NAME[$VM_ITER]} ${VM_IP[$VM_ITER]} + echo "Waiting for ${VM_NAME[$VM_ITER]} to start up for 1m at $(date)" + sleep 1m + VM_ITER=$(($VM_ITER+1)) + HOST_ITER=$(($HOST_ITER+1)) + [ $HOST_ITER -ge $HOST_COUNT ] && HOST_ITER=0 +done + +openstack server list -c ID -c Name -c Status -c Networks -c Host --long + +# check that SSH to all VMs is working +SSH_OPTIONS="-i $SSH_IDENTITY -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" +COUNTER=1 +while [ $COUNTER -le 10 ] ; do + VM_UP=0 + VM_ITER=1 + while [ $VM_ITER -le $VM_COUNT ] ; do + if ssh $SSH_OPTIONS -l $SSH_USER ${VM_IP[$VM_ITER]} exit &>/dev/null ; then + VM_UP=$(($VM_UP+1)) + echo "${VM_NAME[$VM_ITER]} ${VM_IP[$VM_ITER]}: up" + else + echo "${VM_NAME[$VM_ITER]} ${VM_IP[$VM_ITER]}: down" + fi + VM_ITER=$(($VM_ITER+1)) + done + COUNTER=$(($COUNTER+1)) + if [ $VM_UP -eq $VM_COUNT ] ; then + break + fi + echo "Waiting for VMs to be accessible via ssh for 2m at $(date)" + sleep 2m +done + +openstack server list -c ID -c Name -c Status -c Networks -c Host --long + +if [ $VM_UP -ne $VM_COUNT ] ; then + echo "Only $VM_UP from $VM_COUNT VMs are accessible via ssh. Installation will be terminated." + exit 1 +fi + +# Start ONAP installation +DATE_START=$(date) +echo -e "\nONAP Installation Started at $DATE_START\n" +$AUTO_INSTALL_DIR/ci/deploy-onap.sh ${VM_IP[@]} +echo -e "\nONAP Installation Started at $DATE_START" +echo -e "ONAP Installation Finished at $(date)\n" diff --git a/ci/deploy-onap-kubespray.sh b/ci/deploy-onap-kubespray.sh new file mode 100755 index 0000000..a797388 --- /dev/null +++ b/ci/deploy-onap-kubespray.sh @@ -0,0 +1,339 @@ +#!/bin/bash +# +# Copyright 2018-2019 Tieto +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Script for automated deployment of ONAP with Kubernetes at OPNFV LAAS +# environment. +# + +# +# Configuration +# +export LC_ALL=C +export LANG=C + +MASTER=$1 +SERVERS=$* +shift +SLAVES=$* + +ONAP_BRANCH=${ONAP_BRANCH:-'casablanca'} +KUBESPRAY_COMMIT="bbfd2dc2bd088efc63747d903edd41fe692531d8" +NAMESPACE='onap' +SSH_USER=${SSH_USER:-"opnfv"} +SSH_OPTIONS='-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no' +# use identity file from the environment SSH_IDENTITY +if [ -n "$SSH_IDENTITY" ] ; then + SSH_OPTIONS="-i $SSH_IDENTITY $SSH_OPTIONS" + ANSIBLE_IDENTITY="--private-key=$SSH_IDENTITY" +fi + +KUBESPRAY_OPTIONS='-e "kubelet_max_pods=250"' + +TMP_POD_LIST='/tmp/onap_pod_list.txt' + +case "$ONAP_BRANCH" in + "beijing") + HELM_VERSION=2.8.2 + ;; + "casablanca") + HELM_VERSION=2.9.1 + ;; + *) + HELM_VERSION=2.9.1 + ;; +esac + +ONAP_MINIMAL="aai dmaap portal robot sdc sdnc so vid" +# by defalult install minimal ONAP installation +# empty list of ONAP_COMPONENT means full ONAP installation +ONAP_COMPONENT=${ONAP_COMPONENT:-$ONAP_MINIMAL} + +# +# Functions +# +function usage() { + echo "usage" + cat <<EOL +Usage: + $0 <MASTER> [ <SLAVE1> <SLAVE2> ... ] + + where <MASTER> and <SLAVEx> are IP addresses of servers to be used + for ONAP installation. + + Script behavior is affected by following environment variables: + + ONAP_COMPONENT - a list of ONAP components to be installed, empty list + will trigger a full ONAP installation + VALUE: "$ONAP_COMPONENT" + + ONAP_BRANCH - version of ONAP to be installed (OOM branch version) + VALUE: "$ONAP_BRANCH" + + NAMESPACE - name of ONAP namespace in kubernetes cluster + VALUE: "$NAMESPACE" + + SSH_USER - user name to be used to access <MASTER> and <SLAVEx> + servers + VALUE: "$SSH_USER" + + SSH_IDENTITY - (optional) ssh identity file to be used to access + <MASTER> and <SLAVEx> servers as a SSH_USER + VALUE: "$SSH_IDENTITY" + +NOTE: Following must be assured for <MASTER> and <SLAVEx> servers before + $0 execution: + 1) SSH_USER must be able to access servers via ssh without a password + 2) SSH_USER must have a password-less sudo access +EOL +} + +# Check if server IPs of kubernetes nodes are configured at given server. +# If it is not the case, then kubespray invetory file must be updated. +function check_server_ips() { + for SERVER_IP in $(grep 'ip=' $1 | sed -re 's/^.*ip=([0-9\.]+).*$/\1/') ; do + IP_OK="false" + for IP in $(ssh $SSH_OPTIONS $SSH_USER@$SERVER_IP "ip a | grep -Ew 'inet' | sed -re 's/^ *inet ([0-9\.]+).*$/\1/g'") ; do + if [ "$IP" == "$SERVER_IP" ] ; then + IP_OK="true" + fi + done + # access IP (e.g. OpenStack floating IP) is not server local address, so update invetory + if [ $IP_OK == "false" ] ; then + # get server default GW dev + DEV=$(ssh $SSH_OPTIONS $SSH_USER@$SERVER_IP "ip route ls" | grep ^default | sed -re 's/^.*dev (.*)$/\1/') + LOCAL_IP=$(ssh $SSH_OPTIONS $SSH_USER@$SERVER_IP "ip -f inet addr show $DEV" | grep -Ew 'inet' | sed -re 's/^ *inet ([0-9\.]+).*$/\1/g') + if [ "$LOCAL_IP" == "" ] ; then + echo "Can't read local IP for server with IP $SERVER_IP" + exit 1 + fi + sed -i'' -e "s/ip=$SERVER_IP/ip=$LOCAL_IP access_ip=$SERVER_IP/" $1 + fi + done +} + +# sanity check +if [ "$SERVERS" == "" ] ; then + usage + exit 1 +fi + +# +# Installation +# + +# detect CPU architecture to download correct helm binary +CPU_ARCH=$(ssh $SSH_OPTIONS $SSH_USER@"$MASTER" "uname -p") +case "$CPU_ARCH" in + "x86_64") + ARCH="amd64" + ;; + "aarch64") + ARCH="arm64" + ;; + *) + echo "Unsupported CPU architecture '$CPU_ARCH' was detected." + exit 1 +esac + +# print configuration +cat << EOL +list of configuration options: + SERVERS="$SERVERS" + ONAP_COMPONENT="$ONAP_COMPONENT" + ONAP_BRANCH="$ONAP_BRANCH" + NAMESPACE="$NAMESPACE" + SSH_USER="$SSH_USER" + SSH_IDENTITY="$SSH_IDENTITY" + ARCH="$ARCH" + +EOL + +# install K8S cluster by kubespray +sudo apt-get -y update +sudo apt-get -y install git ansible python-jinja2 python3-pip libffi-dev libssl-dev +git clone https://github.com/kubernetes-incubator/kubespray.git +cd kubespray +git checkout $KUBESPRAY_COMMIT +pip3 install -r requirements.txt +export CONFIG_FILE=inventory/auto_hosts.ini +rm $CONFIG_FILE +python3 contrib/inventory_builder/inventory.py $SERVERS +check_server_ips $CONFIG_FILE +cat $CONFIG_FILE +if ( ! ansible-playbook -i $CONFIG_FILE $KUBESPRAY_OPTIONS -b -u $SSH_USER $ANSIBLE_IDENTITY cluster.yml ) ; then + echo "Kubespray installation has failed at $(date)" + exit 1 +fi + +# use standalone K8S master if there are enough VMs available for the K8S cluster +SERVERS_COUNT=$(echo $SERVERS | wc -w) +if [ $SERVERS_COUNT -gt 2 ] ; then + K8S_NODES=$SLAVES +else + K8S_NODES=$SERVERS +fi + +echo "INSTALLATION TOPOLOGY:" +echo "Kubernetes Master: $MASTER" +echo "Kubernetes Nodes: $K8S_NODES" +echo +echo "CONFIGURING NFS ON SLAVES" +echo "$SLAVES" + +for SLAVE in $SLAVES; +do +ssh $SSH_OPTIONS $SSH_USER@"$SLAVE" "bash -s" <<CONFIGURENFS & + sudo su + apt-get install nfs-common -y + mkdir /dockerdata-nfs + chmod 777 /dockerdata-nfs + echo "$MASTER:/dockerdata-nfs /dockerdata-nfs nfs auto 0 0" >> /etc/fstab + mount -a + mount | grep dockerdata-nfs +CONFIGURENFS +done +wait + +echo "DEPLOYING OOM ON MASTER" +echo "$MASTER" + +ssh $SSH_OPTIONS $SSH_USER@"$MASTER" "bash -s" <<OOMDEPLOY +sudo su +echo "create namespace '$NAMESPACE'" +cat <<EOF | kubectl create -f - +{ + "kind": "Namespace", + "apiVersion": "v1", + "metadata": { + "name": "$NAMESPACE", + "labels": { + "name": "$NAMESPACE" + } + } +} +EOF +kubectl get namespaces --show-labels +kubectl -n kube-system create sa tiller +kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller +rm -rf oom +echo "pulling new oom" +git clone -b $ONAP_BRANCH http://gerrit.onap.org/r/oom + +# NFS FIX for aaf-locate +sed -i '/persistence:/s/^#//' ./oom/kubernetes/aaf/charts/aaf-locate/values.yaml +sed -i '/mountPath: \/dockerdata/c\ mountPath: \/dockerdata-nfs'\ + ./oom/kubernetes/aaf/charts/aaf-locate/values.yaml + +echo "Pre-pulling docker images at \$(date)" +wget https://jira.onap.org/secure/attachment/11261/prepull_docker.sh +chmod 777 prepull_docker.sh +./prepull_docker.sh +echo "starting onap pods" +cd oom/kubernetes/ + +# Enable selected ONAP components +if [ -n "$ONAP_COMPONENT" ] ; then + # disable all components and enable only selected in next loop + sed -i '/^.*:$/!b;n;s/enabled: *true/enabled: false/' onap/values.yaml + echo -n "Enable following ONAP components:" + for COMPONENT in $ONAP_COMPONENT; do + echo -n " \$COMPONENT" + sed -i '/^'\${COMPONENT}':$/!b;n;s/enabled: *false/enabled: true/' onap/values.yaml + done + echo +else + echo "All ONAP components will be installed" +fi + +wget http://storage.googleapis.com/kubernetes-helm\ +/helm-v${HELM_VERSION}-linux-${ARCH}.tar.gz +tar -zxvf helm-v${HELM_VERSION}-linux-${ARCH}.tar.gz +mv linux-${ARCH}/helm /usr/local/bin/helm +helm init --upgrade --service-account tiller +# run helm server on the background and detached from current shell +nohup helm serve 0<&- &>/dev/null & +echo "Waiting for helm setup for 5 min at \$(date)" +sleep 5m +helm version +helm repo add local http://127.0.0.1:8879 +helm repo list +make all +if ( ! helm install local/onap -n dev --namespace $NAMESPACE) ; then + echo "ONAP installation has failed at \$(date)" + exit 1 +fi + +cd ../../ + +echo "Waiting for ONAP pods to be up \$(date)" +echo "Ignore failure of sdnc-ansible-server, see SDNC-443" +function get_onap_pods() { + kubectl get pods --namespace $NAMESPACE > $TMP_POD_LIST + return \$(cat $TMP_POD_LIST | wc -l) +} +FAILED_PODS_LIMIT=1 # maximal number of failed ONAP PODs +ALL_PODS_LIMIT=20 # minimum ONAP PODs to be up & running +WAIT_PERIOD=60 # wait period in seconds +MAX_WAIT_TIME=\$((3600*3)) # max wait time in seconds +MAX_WAIT_PERIODS=\$((\$MAX_WAIT_TIME/\$WAIT_PERIOD)) +COUNTER=0 +get_onap_pods +ALL_PODS=\$? +PENDING=\$(grep -E '0/|1/2' $TMP_POD_LIST | wc -l) +while [ \$PENDING -gt \$FAILED_PODS_LIMIT -o \$ALL_PODS -lt \$ALL_PODS_LIMIT ]; do + # print header every 20th line + if [ \$COUNTER -eq \$((\$COUNTER/20*20)) ] ; then + printf "%-3s %-29s %-3s/%s\n" "Nr." "Datetime of check" "Err" "Total PODs" + fi + COUNTER=\$((\$COUNTER+1)) + printf "%3s %-29s %3s/%-3s\n" \$COUNTER "\$(date)" \$PENDING \$ALL_PODS + sleep \$WAIT_PERIOD + if [ "\$MAX_WAIT_PERIODS" -eq \$COUNTER ]; then + FAILED_PODS_LIMIT=800 + ALL_PODS_LIMIT=0 + fi + get_onap_pods + ALL_PODS=\$? + PENDING=\$(grep -E '0/|1/2' $TMP_POD_LIST | wc -l) +done + +get_onap_pods +cp $TMP_POD_LIST ~/onap_all_pods.txt +echo +echo "========================" +echo "ONAP INSTALLATION REPORT" +echo "========================" +echo +echo "List of Failed PODs" +echo "-------------------" +grep -E '0/|1/2' $TMP_POD_LIST | tee ~/onap_failed_pods.txt +echo +echo "Summary:" +echo "--------" +echo " PODs Failed: \$(cat ~/onap_failed_pods.txt | wc -l)" +echo " PODs Total: \$(cat ~/onap_all_pods.txt | wc -l)" +echo +echo "ONAP health TC results" +echo "----------------------" +cd oom/kubernetes/robot +./ete-k8s.sh $NAMESPACE health | tee ~/onap_health.txt +echo "===============================" +echo "END OF ONAP INSTALLATION REPORT" +echo "===============================" +OOMDEPLOY + +echo "Finished install, ruturned from Master at $(date)" +exit 0 diff --git a/ci/deploy-onap.sh b/ci/deploy-onap.sh index e886492..c34eb56 100755 --- a/ci/deploy-onap.sh +++ b/ci/deploy-onap.sh @@ -26,8 +26,8 @@ # NOTE: Following must be assured for all MASTER and SLAVE servers before # onap-deploy.sh execution: # 1) ssh access without a password -# 2) an "opnfv" user account with password-less sudo access must be -# available +# 2) an user account with password-less sudo access must be +# available - default user is "opnfv" # # Configuration @@ -40,21 +40,49 @@ HELM_VERSION=2.8.2 MASTER=$1 SERVERS=$* +shift +SLAVES=$* -BRANCH='master' +BRANCH='beijing' ENVIRON='onap' +SSH_USER=${SSH_USER:-"opnfv"} +SSH_OPTIONS='-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no' +# by defalult install full ONAP installation +ONAP_COMPONENT_DISABLE=${ONAP_COMPONENT_DISABLE:-""} +# example of minimal ONAP installation +#ONAP_COMPONENT_DISABLE="clamp cli consul dcaegen2 esr log msb multicloud nbi oof policy uui vfc vnfsdk" + +# use identity file from the environment SSH_IDENTITY +if [ -n "$SSH_IDENTITY" ] ; then + SSH_OPTIONS="-i $SSH_IDENTITY $SSH_OPTIONS" +fi + # # Installation # + +# use standalone K8S master if there are enough VMs available for the K8S cluster +SERVERS_COUNT=$(echo $SERVERS | wc -w) +if [ $SERVERS_COUNT -gt 2 ] ; then + RANCHER_SLAVES=$SLAVES +else + RANCHER_SLAVES=$SERVERS +fi + +echo "INSTALLATION TOPOLOGY:" +echo "Rancher Master: $MASTER" +echo "Rancher Slaves: $RANCHER_SLAVES" +echo echo "INSTALLING DOCKER ON ALL MACHINES" echo "$SERVERS" for MACHINE in $SERVERS; do -ssh opnfv@"$MACHINE" "bash -s" <<DOCKERINSTALL & - sudo su - apt-get update +ssh $SSH_OPTIONS $SSH_USER@"$MACHINE" "bash -s" <<DOCKERINSTALL & + sudo -i + sysctl -w vm.max_map_count=262144 + apt-get update -y curl https://releases.rancher.com/install-docker/$DOCKER_VERSION.sh | sh mkdir -p /etc/systemd/system/docker.service.d/ @@ -83,17 +111,15 @@ wait echo "INSTALLING RANCHER ON MASTER" echo "$MASTER" -ssh opnfv@"$MASTER" "bash -s" <<RANCHERINSTALL & -sudo su -apt install jq -y +ssh $SSH_OPTIONS $SSH_USER@"$MASTER" "bash -s" <<RANCHERINSTALL +sudo -i +echo "INSTALL STARTS" +apt-get install -y jq make htop echo "Waiting for 30 seconds at \$(date)" sleep 30 docker login -u docker -p docker nexus3.onap.org:10001 -echo "INSTALL STARTS" -apt-get install make -y - docker run -d --restart=unless-stopped -p 8080:8080\ --name rancher_server rancher/server:v$RANCHER_VERSION curl -LO https://storage.googleapis.com/kubernetes-release/\ @@ -116,7 +142,7 @@ echo "/dockerdata-nfs *(rw,no_root_squash,no_subtree_check)">>/etc/exports service nfs-kernel-server restart echo "Waiting 10 minutes for Rancher to setup at \$(date)" -sleep 600 +sleep 10m echo "Installing RANCHER CLI, KUBERNETES ENV on RANCHER" wget https://github.com/rancher/cli/releases/download/v${RANCHER_CLI_VER}-rc2\ /rancher-linux-amd64-v${RANCHER_CLI_VER}-rc2.tar.gz @@ -142,6 +168,10 @@ echo "Creating kubernetes environment named ${ENVIRON}" ./rancher env create -t kubernetes $ENVIRON > kube_env_id.json PROJECT_ID=\$(<kube_env_id.json) echo "env id: \$PROJECT_ID" + +echo "Waiting for ${ENVIRON} creation - 1 min at \$(date)" +sleep 1m + export RANCHER_HOST_URL=http://${MASTER}:8080/v1/projects/\$PROJECT_ID echo "you should see an additional kubernetes environment" ./rancher env ls @@ -153,7 +183,7 @@ REG_URL_RESPONSE=\`curl -X POST -u \$KEY_PUBLIC:\$KEY_SECRET\ "http://$MASTER:8080/v1/projects/\$PROJECT_ID/registrationtokens"\` echo "REG_URL_RESPONSE: \$REG_URL_RESPONSE" echo "Waiting for the server to finish url configuration - 1 min at \$(date)" -sleep 60 +sleep 1m # see registrationUrl in REGISTRATION_TOKENS=\`curl http://$MASTER:8080/v2-beta/registrationtokens\` echo "REGISTRATION_TOKENS: \$REGISTRATION_TOKENS" @@ -197,36 +227,52 @@ echo "docker run --rm --privileged\ \$REGISTRATION_DOCKER\ \$RANCHER_URL/v1/scripts/\$REGISTRATION_TOKEN"\ > /tmp/rancher_register_host -chown opnfv /tmp/rancher_register_host +chown $SSH_USER /tmp/rancher_register_host RANCHERINSTALL -wait echo "REGISTER TOKEN" -HOSTREGTOKEN=$(ssh opnfv@"$MASTER" cat /tmp/rancher_register_host) +HOSTREGTOKEN=$(ssh $SSH_OPTIONS $SSH_USER@"$MASTER" cat /tmp/rancher_register_host) echo "$HOSTREGTOKEN" echo "REGISTERING HOSTS WITH RANCHER ENVIRONMENT '$ENVIRON'" -echo "$SERVERS" +echo "$RANCHER_SLAVES" -for MACHINE in $SERVERS; +for MACHINE in $RANCHER_SLAVES; do -ssh opnfv@"$MACHINE" "bash -s" <<REGISTERHOST & - sudo su +ssh $SSH_OPTIONS $SSH_USER@"$MACHINE" "bash -s" <<REGISTERHOST & + sudo -i $HOSTREGTOKEN sleep 5 echo "Host $MACHINE waiting for host registration 5 min at \$(date)" - sleep 300 + sleep 5m REGISTERHOST done wait +echo "CONFIGURING NFS ON SLAVES" +echo "$SLAVES" + +for SLAVE in $SLAVES; +do +ssh $SSH_OPTIONS $SSH_USER@"$SLAVE" "bash -s" <<CONFIGURENFS & + sudo -i + apt-get install nfs-common -y + mkdir /dockerdata-nfs + chmod 777 /dockerdata-nfs + echo "$MASTER:/dockerdata-nfs /dockerdata-nfs nfs auto 0 0" >> /etc/fstab + mount -a + mount | grep dockerdata-nfs +CONFIGURENFS +done +wait + echo "DEPLOYING OOM ON RANCHER WITH MASTER" echo "$MASTER" +TMP_POD_LIST='/tmp/onap_pod_list.txt' -ssh opnfv@"$MASTER" "bash -s" <<OOMDEPLOY & -sudo su -sysctl -w vm.max_map_count=262144 +ssh $SSH_OPTIONS $SSH_USER@"$MASTER" "bash -s" <<OOMDEPLOY +sudo -i rm -rf oom echo "pulling new oom" git clone -b $BRANCH http://gerrit.onap.org/r/oom @@ -242,101 +288,89 @@ chmod 777 prepull_docker.sh ./prepull_docker.sh echo "starting onap pods" cd oom/kubernetes/ + +# Disable ONAP components +if [ -n "$ONAP_COMPONENT_DISABLE" ] ; then + echo -n "Disable following ONAP components:" + for COMPONENT in $ONAP_COMPONENT_DISABLE; do + echo -n " \$COMPONENT" + sed -i '/^'\${COMPONENT}':$/!b;n;s/enabled: *true/enabled: false/' onap/values.yaml + done + echo +fi + helm init --upgrade -helm serve & +# run helm server on the background and detached from current shell +nohup helm serve 0<&- &>/dev/null & echo "Waiting for helm setup for 5 min at \$(date)" -sleep 300 +sleep 5m helm version helm repo add local http://127.0.0.1:8879 helm repo list make all -helm install local/onap -n dev --namespace $ENVIRON +if ( ! helm install local/onap -n dev --namespace $ENVIRON) ; then + echo "ONAP installation has failed at \$(date)" + exit 1 +fi + cd ../../ -echo "Waiting for all pods to be up for 15-80 min at \$(date)" -FAILED_PODS_LIMIT=0 -MAX_WAIT_PERIODS=480 # 120 MIN +echo "Waiting for ONAP pods to be up \$(date)" +echo "Ignore failure of sdnc-ansible-server, see SDNC-443" +function get_onap_pods() { + kubectl get pods --namespace $ENVIRON > $TMP_POD_LIST + return \$(cat $TMP_POD_LIST | wc -l) +} +FAILED_PODS_LIMIT=1 # maximal number of failed ONAP PODs +ALL_PODS_LIMIT=20 # minimum ONAP PODs to be up & running +WAIT_PERIOD=60 # wait period in seconds +MAX_WAIT_TIME=\$((3600*3)) # max wait time in seconds +MAX_WAIT_PERIODS=\$((\$MAX_WAIT_TIME/\$WAIT_PERIOD)) COUNTER=0 -PENDING_PODS=0 -while [ \$(kubectl get pods --all-namespaces | grep -E '0/|1/2' | wc -l) \ --gt \$FAILED_PODS_LIMIT ]; do - PENDING=\$(kubectl get pods --all-namespaces | grep -E '0/|1/2' | wc -l) - PENDING_PODS=\$PENDING - sleep 15 - LIST_PENDING=\$(kubectl get pods --all-namespaces -o wide | grep -E '0/|1/2' ) - echo "\${LIST_PENDING}" - echo "\${PENDING} pending > \${FAILED_PODS_LIMIT} at the \${COUNTER}th"\ - " 15 sec interval out of \${MAX_WAIT_PERIODS}" - echo "" - COUNTER=\$((\$COUNTER + 1 )) +get_onap_pods +ALL_PODS=\$? +PENDING=\$(grep -E '0/|1/2' $TMP_POD_LIST | wc -l) +while [ \$PENDING -gt \$FAILED_PODS_LIMIT -o \$ALL_PODS -lt \$ALL_PODS_LIMIT ]; do + # print header every 20th line + if [ \$COUNTER -eq \$((\$COUNTER/20*20)) ] ; then + printf "%-3s %-29s %-3s/%s\n" "Nr." "Datetime of check" "Err" "Total PODs" + fi + COUNTER=\$((\$COUNTER+1)) + printf "%3s %-29s %3s/%-3s\n" \$COUNTER "\$(date)" \$PENDING \$ALL_PODS + sleep \$WAIT_PERIOD if [ "\$MAX_WAIT_PERIODS" -eq \$COUNTER ]; then FAILED_PODS_LIMIT=800 + ALL_PODS_LIMIT=0 fi + get_onap_pods + ALL_PODS=\$? + PENDING=\$(grep -E '0/|1/2' $TMP_POD_LIST | wc -l) done -echo "Report on non-running containers" -PENDING=\$(kubectl get pods --all-namespaces | grep -E '0/|1/2') -PENDING_COUNT=\$(kubectl get pods --all-namespaces | grep -E '0/|1/2' | wc -l) -PENDING_COUNT_AAI=\$(kubectl get pods -n $ENVIRON | grep aai- \ -| grep -E '0/|1/2' | wc -l) - -echo "Check filebeat 2/2 count for ELK stack logging consumption" -FILEBEAT=\$(kubectl get pods --all-namespaces -a | grep 2/) -echo "\${FILEBEAT}" -echo "sleep 5 min - to allow rest frameworks to finish at \$(date)" -sleep 300 -echo "List of ONAP Modules" -LIST_ALL=\$(kubectl get pods --all-namespaces -a --show-all ) -echo "\${LIST_ALL}" -echo "run healthcheck 2 times to warm caches and frameworks"\ - "so rest endpoints report properly - see OOM-447" - -echo "curl with aai cert to cloud-region PUT" -curl -X PUT https://127.0.0.1:30233/aai/v11/cloud-infrastructure/\ -cloud-regions/cloud-region/CloudOwner/RegionOne \ ---data "@aai-cloud-region-put.json" \ --H "authorization: Basic TW9kZWxMb2FkZXI6TW9kZWxMb2FkZXI=" \ --H "X-TransactionId:jimmy-postman" \ --H "X-FromAppId:AAI" \ --H "Content-Type:application/json" \ --H "Accept:application/json" \ ---cacert aaiapisimpledemoopenecomporg_20171003.crt -k - -echo "get the cloud region back" -curl -X GET https://127.0.0.1:30233/aai/v11/cloud-infrastructure/\ -cloud-regions/ \ --H "authorization: Basic TW9kZWxMb2FkZXI6TW9kZWxMb2FkZXI=" \ --H "X-TransactionId:jimmy-postman" \ --H "X-FromAppId:AAI" \ --H "Content-Type:application/json" \ --H "Accept:application/json" \ ---cacert aaiapisimpledemoopenecomporg_20171003.crt -k - -# OOM-484 - robot scripts moved +get_onap_pods +cp $TMP_POD_LIST ~/onap_all_pods.txt +echo +echo "========================" +echo "ONAP INSTALLATION REPORT" +echo "========================" +echo +echo "List of Failed PODs" +echo "-------------------" +grep -E '0/|1/2' $TMP_POD_LIST | tee ~/onap_failed_pods.txt +echo +echo "Summary:" +echo "--------" +echo " PODs Failed: \$(cat ~/onap_failed_pods.txt | wc -l)" +echo " PODs Total: \$(cat ~/onap_all_pods.txt | wc -l)" +echo +echo "ONAP health TC results" +echo "----------------------" cd oom/kubernetes/robot -echo "run healthcheck prep 1" -# OOM-722 adds namespace parameter -if [ "$BRANCH" == "amsterdam" ]; then - ./ete-k8s.sh health > ~/health1.out -else - ./ete-k8s.sh $ENVIRON health > ~/health1.out -fi -echo "sleep 5 min at \$(date)" -sleep 300 -echo "run healthcheck prep 2" -if [ "$BRANCH" == "amsterdam" ]; then - ./ete-k8s.sh health > ~/health2.out -else - ./ete-k8s.sh $ENVIRON health > ~/health2.out -fi -echo "run healthcheck for real - wait a further 5 min at \$(date)" -sleep 300 -if [ "$BRANCH" == "amsterdam" ]; then - ./ete-k8s.sh health -else - ./ete-k8s.sh $ENVIRON health -fi +./ete-k8s.sh $ENVIRON health | tee ~/onap_health.txt +echo "===============================" +echo "END OF ONAP INSTALLATION REPORT" +echo "===============================" OOMDEPLOY -wait -echo "Finished install, ruturned from Master" + +echo "Finished install, ruturned from Master at $(date)" exit 0 diff --git a/ci/plot-results.sh b/ci/plot-results.sh new file mode 100755 index 0000000..22ab1d6 --- /dev/null +++ b/ci/plot-results.sh @@ -0,0 +1,101 @@ +#!/bin/bash +# +# Copyright 2017-2018 Intel Corporation., Tieto +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Script for graphical representation of AUTO result summaries +# +# Usage: +# ./create_graph [directory] +# +# where: +# "directory" is an optional directory name, where summary of auto +# installation report is stored +# Default value: "$HOME/auto_ci_daily_logs" + +NUMBER_OF_RESULTS=50 # max number of recent results to be compared in graph +DIR="$HOME/auto_ci_daily_logs" + +function clean_data() { + rm -rf summary.csv + rm -rf graph*plot + rm -rf graph*txt + rm -rf graph*png +} + +function prepare_data() { + FIRST=1 + CSV_LIST=$(ls -1 ${DIR}/deploy_summary*csv | tail -n ${NUMBER_OF_RESULTS}) + for result_file in $CSV_LIST ; do + tmp_dir=`dirname $result_file` + TIMESTAMP=`basename $tmp_dir | cut -d'_' -f2-` + if [ $FIRST -eq 1 ] ; then + head -n1 $result_file > summary.csv + FIRST=0 + fi + tail -n+2 ${result_file} >> summary.csv + done +} + +function plot_data() { + echo "Created graphs:" + for TYPE in png txt; do + for GRAPH in "graph_pods" "graph_tcs" ; do + OUTPUT="$GRAPH.plot" + GRAPH_NAME="${GRAPH}.${TYPE}" + cat > $OUTPUT <<- EOM +set datafile separator "," +set xdata time +set timefmt "%Y%m%d_%H%M%S" +set format x "%m-%d" +set xlabel "date" +set format y "%8.0f" +EOM + if [ "$TYPE" == "png" ] ; then + echo 'set term png size 1024,768' >> $OUTPUT + else + echo 'set term dumb 100,30' >> $OUTPUT + fi + + if [ "$GRAPH" == "graph_pods" ] ; then + echo 'set ylabel "PODs"' >> $OUTPUT + echo 'set yrange [0:]' >> $OUTPUT + echo "set title \"ONAP K8S PODs\"" >> $OUTPUT + COL1=3 + COL2=4 + else + echo 'set ylabel "testcases"' >> $OUTPUT + echo 'set yrange [0:]' >> $OUTPUT + echo "set title \"ONAP Health TestCases\"" >> $OUTPUT + COL1=5 + COL2=6 + fi + + iter=0 + echo "set output \"$GRAPH_NAME\"" >> $OUTPUT + echo -n "plot " >> $OUTPUT + echo $"'summary.csv' using 1:$COL1 with linespoints title columnheader($COL1) \\" >> $OUTPUT + echo $", 'summary.csv' using 1:$COL2 with linespoints title columnheader($COL2) \\" >> $OUTPUT + gnuplot $OUTPUT + echo -e "\t$GRAPH_NAME" + done + done +} + +# +# Main body +# +clean_data +prepare_data +plot_data diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 0000000..9e0614b --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,18 @@ +.. _auto: + +.. This work is licensed under a Creative Commons Attribution 4.0 International License. +.. http://creativecommons.org/licenses/by/4.0 +.. SPDX-License-Identifier CC-BY-4.0 +.. (c) Open Platform for NFV Project, Inc. and its contributors + +********************************* +OPNFV Auto (ONAP-Automated OPNFV) +********************************* + +.. toctree:: + :numbered: + :maxdepth: 3 + + release/configguide/index + release/userguide/index + release/release-notes/index diff --git a/docs/release/configguide/index.rst b/docs/release/configguide/index.rst index ba1a3da..07b7ab6 100644 --- a/docs/release/configguide/index.rst +++ b/docs/release/configguide/index.rst @@ -10,7 +10,6 @@ OPNFV Auto (ONAP-Automated OPNFV) Configuration Guide ***************************************************** .. toctree:: - :numbered: :maxdepth: 3 Auto-featureconfig.rst diff --git a/docs/release/release-notes/index.rst b/docs/release/release-notes/index.rst index 264f21c..4c879f7 100644 --- a/docs/release/release-notes/index.rst +++ b/docs/release/release-notes/index.rst @@ -9,7 +9,6 @@ OPNFV Auto (ONAP-Automated OPNFV) Release Notes =============================================== .. toctree:: - :numbered: :maxdepth: 3 Auto-release-notes.rst diff --git a/docs/release/userguide/index.rst b/docs/release/userguide/index.rst index dd308dc..099622c 100644 --- a/docs/release/userguide/index.rst +++ b/docs/release/userguide/index.rst @@ -15,7 +15,6 @@ OPNFV Auto (ONAP-Automated OPNFV) User Guide .. by the installer project. .. toctree:: - :numbered: :maxdepth: 3 UC01-feature.userguide.rst |