summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBryan Sullivan <bryan.sullivan@att.com>2017-03-06 17:18:44 +0000
committerGerrit Code Review <gerrit@opnfv.org>2017-03-06 17:18:44 +0000
commit146b361b37bb0433f0fdb5c8ef3bcb1ab9580266 (patch)
tree94983a5a9452374dbee09114cf671e161266f5ee
parent0655e7e09e0e4368c962c96b0db537b4c7a7cd37 (diff)
parentc5fe902ac6e773ec7232ccca67a64856e1b508d0 (diff)
Merge "Update for Danube (work in progress)"
-rw-r--r--tests/VES_Reference.sh411
-rwxr-xr-xtests/blueprints/tosca-vnfd-hello-ves/blueprint.yaml146
-rwxr-xr-xtests/blueprints/tosca-vnfd-hello-ves/start.sh101
-rw-r--r--tests/vHello_VES.sh597
-rw-r--r--tests/vHello_VES_3Node.sh430
-rw-r--r--tests/vLamp_Ansible_VES.sh46
6 files changed, 952 insertions, 779 deletions
diff --git a/tests/VES_Reference.sh b/tests/VES_Reference.sh
deleted file mode 100644
index fa5bde1..0000000
--- a/tests/VES_Reference.sh
+++ /dev/null
@@ -1,411 +0,0 @@
-#!/bin/bash
-# Copyright 2016 AT&T Intellectual Property, Inc
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# What this is: Deployment script for the VNF Event Stream (VES) Reference VNF
-# and Test Collector. Runs the VES Collector in a docker container on the
-# OPNFV jumphost, and the VES Reference VNF as an OpenStack VM.
-#
-# Status: this is a work in progress, under test.
-#
-# How to use:
-# $ git clone https://gerrit.opnfv.org/gerrit/ves
-# $ cd ves/tests
-# $ bash VES_Reference.sh [setup|start|run|stop|clean]
-# setup: setup test environment
-# start: install blueprint and run test
-# run: setup test environment and run test
-# stop: stop test and uninstall blueprint
-# clean: cleanup after test
-
-trap 'fail' ERR
-
-pass() {
- echo "$0: Hooray!"
- set +x #echo off
- exit 0
-}
-
-fail() {
- echo "$0: Test Failed!"
- set +x
- exit 1
-}
-
-function setenv () {
- echo "$0: Setup OpenStack environment variables"
- source utils/setenv.sh /tmp/VES
-}
-
-get_floating_net () {
- network_ids=($(neutron net-list|grep -v "+"|grep -v name|awk '{print $2}'))
- for id in ${network_ids[@]}; do
- [[ $(neutron net-show ${id}|grep 'router:external'|grep -i "true") != "" ]] && FLOATING_NETWORK_ID=${id}
- done
- if [[ $FLOATING_NETWORK_ID ]]; then
- FLOATING_NETWORK_NAME=$(openstack network show $FLOATING_NETWORK_ID | awk "/ name / { print \$4 }")
- else
- echo "$0: Floating network not found"
- exit 1
- fi
-}
-
-try () {
- count=$1
- $3
- while [[ $? -eq 1 && $count -gt 0 ]]
- do
- sleep $2
- let count=$count-1
- $3
- done
- if [[ $count -eq 0 ]]; then echo "$0: Command \"$3\" was not successful after $1 tries"; fi
-}
-
-function create_container () {
- echo "$0: Creating docker container"
- echo "$0: Copy this script to /tmp/VES"
- mkdir /tmp/VES
- cp $0 /tmp/VES/.
- chmod 755 /tmp/VES/*.sh
-
- echo "$0: reset blueprints folder"
- if [[ -d /tmp/VES/blueprints/ ]]; then rm -rf /tmp/VES/blueprints/; fi
- mkdir -p /tmp/VES/blueprints/
-
- echo "$0: Setup admin-openrc.sh"
- setenv
-
- echo "$0: Setup container"
- if [ "$dist" == "Ubuntu" ]; then
- # xenial is needed for python 3.5
- sudo docker pull ubuntu:xenial
- sudo service docker start
- # Port 30000 is the default for the VES Collector
- sudo docker run -it -d -p 30000:30000 -v /tmp/VES/:/tmp/VES \
- --name VES ubuntu:xenial /bin/bash
- else
- # Centos
- echo "Centos-based install"
- sudo tee /etc/yum.repos.d/docker.repo <<-'EOF'
-[dockerrepo]
-name=Docker Repository--parents
-baseurl=https://yum.dockerproject.org/repo/main/centos/7/
-enabled=1
-gpgcheck=1
-gpgkey=https://yum.dockerproject.org/gpg
-EOF
- sudo yum install -y docker-engine
- # xenial is needed for python 3.5
- sudo service docker start
- sudo docker pull ubuntu:xenial
- # Port 30000 is the default for the VES Collector
- sudo docker run -i -t -d -p 30000:30000 -v /tmp/VES/:/tmp/VES \
- --name VES ubuntu:xenial /bin/bash
- fi
-}
-
-setup_Openstack () {
- echo "$0: install OpenStack clients"
- pip install --upgrade python-openstackclient
- pip install --upgrade python-glanceclient
- pip install --upgrade python-neutronclient
- pip install --upgrade python-heatclient
-# pip install --upgrade keystonemiddleware
-
- echo "$0: setup OpenStack environment"
- source /tmp/VES/admin-openrc.sh
-
- echo "$0: determine external (public) network as the floating ip network" echo "$0: setup OpenStack environment"
- get_floating_net
-
- echo "$0: Setup centos7-server glance image if needed"
- if [[ -z $(openstack image list | awk "/ centos7-server / { print \$2 }") ]]; \
- then glance --os-image-api-version 1 image-create \
- --name centos7-server \
- --disk-format qcow2 \
- --location http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1607.qcow2 \
- --container-format bare; fi
-
- if [[ -z $(neutron net-list | awk "/ internal / { print \$2 }") ]]; then
- echo "$0: Create internal network"
- neutron net-create internal
-
- echo "$0: Create internal subnet"
- neutron subnet-create internal 10.0.0.0/24 --name internal \
- --gateway 10.0.0.1 --enable-dhcp \
- --allocation-pool start=10.0.0.2,end=10.0.0.254 \
- --dns-nameserver 8.8.8.8
- fi
-
- if [[ -z $(neutron router-list | awk "/ public_router / { print \$2 }") ]]; then
- echo "$0: Create router"
- neutron router-create public_router
-
- echo "$0: Create router gateway"
- neutron router-gateway-set public_router $FLOATING_NETWORK_NAME
-
- echo "$0: Add router interface for internal network"
- neutron router-interface-add public_router subnet=internal
- fi
-}
-
-setup_Collector () {
- echo "$0: Install dependencies - OS specific"
- if [ "$dist" == "Ubuntu" ]; then
- apt-get update
- apt-get install -y python
- apt-get install -y python-pip
- apt-get install -y git
- else
- yum install -y python
- yum install -y python-pip
- yum install -y git
- fi
- pip install --upgrade pip
-
- echo "$0: clone VES Collector repo"
- cd /tmp/VES/blueprints/
- git clone https://github.com/att/evel-test-collector.git
- echo "$0: update collector.conf"
- cd /tmp/VES/blueprints/evel-test-collector
- sed -i -- 's~/var/log/att/~/tmp/VES/~g' config/collector.conf
-}
-
-start_Collector () {
- echo "$0: start the VES Collector"
- cd /tmp/VES/blueprints/evel-test-collector
- python code/collector/collector.py \
- --config config/collector.conf \
- --section default \
- --verbose
-}
-
-setup_Reference_VNF_VM () {
- echo "$0: Create Nova key pair"
- nova keypair-add VES > /tmp/VES/VES-key
- chmod 600 /tmp/VES/VES-key
-
- echo "$0: Add ssh key"
- eval $(ssh-agent -s)
- ssh-add /tmp/VES/VES-key
-
- echo "$0: clone VES Reference VNF repo"
- cd /tmp/VES/blueprints/
- git clone https://github.com/att/evel-reporting-reference-vnf.git
-
- echo "$0: customize VES Reference VNF Heat template"
- cd evel-reporting-reference-vnf/hot
- ID=$(openstack image list | awk "/ centos7-server / { print \$2 }")
- sed -i -- "s/40299aa3-2921-43b0-86b9-56c28a2b5232/$ID/g" event_reporting_vnf.env.yaml
- ID=$(neutron net-list | awk "/ internal / { print \$2 }")
- sed -i -- "s/84985f60-fbba-4a78-ba83-2815ff620dbc/$ID/g" event_reporting_vnf.env.yaml
- sed -i -- "s/127.0.0.1/$JUMPHOST/g" event_reporting_vnf.env.yaml
- sed -i -- "s/my-keyname/VES/g" event_reporting_vnf.env.yaml
-
- echo "$0: Create VES Reference VNF via Heat"
- heat stack-create -e event_reporting_vnf.env.yaml \
- -f event_reporting_vnf.template.yaml VES
-
- echo "$0: Wait for VES Reference VNF to go Active"
- COUNTER=0
- until [[ $(heat stack-list | awk "/ VES / { print \$6 }") == "CREATE_COMPLETE" ]]; do
- sleep 5
- let COUNTER+=1
- if [[ $COUNTER > "20" ]]; then fail; fi
- done
-
- echo "$0: Get Server ID"
- SID=$(heat resource-list VES | awk "/ OS::Nova::Server / { print \$4 }")
-
- echo "$0: associate SSH security group"
- # TODO: Update Heat template to include security group
- if [[ $(openstack security group list | awk "/ vHello / { print \$2 }") ]]; then neutron security-group-delete vHello; fi
- openstack security group create VES_Reference
- openstack security group rule create --ingress --protocol TCP --dst-port 22:22 VES_Reference
- openstack security group rule create --ingress --protocol TCP --dst-port 80:80 VES_Reference
- openstack server add security group $SID VES_Reference
-
- echo "$0: associate floating IP"
- # TODO: Update Heat template to include floating IP (if supported)
- FIP=$(openstack floating ip create $FLOATING_NETWORK_NAME | awk "/floating_ip_address/ { print \$4 }")
- nova floating-ip-associate $SID $FIP
-
-# scp -i /tmp/VES/VES-key -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no /tmp/VES/VES_Reference.sh centos@$FIP:/home/centos
- scp -i /tmp/VES/VES-key -o UserKnownHostsFile=/dev/null \
- -o StrictHostKeyChecking=no \
- $0 centos@$FIP:/home/centos
-# run thru setup_Reference_VNF manually to verify
-# ssh -i /tmp/VES/VES-key -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no centos@$FIP
-# ssh -i /tmp/VES/VES-key -x -o UserKnownHostsFile=/dev/null
-# -o StrictHostKeyChecking=no
-# centos@$FIP \
-# "nohup source $0 setup_VNF &"
-}
-
-setup_Reference_VNF () {
- echo "$0: Install dependencies"
- sudo yum update -y
- sudo yum install -y wget
- sudo yum install -y gcc
- sudo yum install -y openssl-devel
- sudo yum install -y epel-release
- sudo yum install -y python-pip
- sudo pip install --upgrade pip
- sudo yum install -y git
-
- echo "$0: Install Django"
- sudo pip install django
-
- echo "$0: Install Apache"
- sudo yum install -y httpd httpd-devel
-
- echo "$0: Install mod_python"
- sudo yum install -y python-devel
- mkdir ~/mod_python-3.4.1
- cd ~/mod_python-3.4.1
- wget http://dist.modpython.org/dist/mod_python-3.4.1.tgz
- tar xvf mod_python-3.4.1.tgz
- cd mod_python-3.4.1
-
- # Edit .../dist/version.sh to remove the dependency on Git as described at
- # http://stackoverflow.com/questions/20022952/fatal-not-a-git-repository-when-installing-mod-python
- sed \
- -e 's/(git describe --always)/(git describe --always 2>\/dev\/null)/g' \
- -e 's/`git describe --always`/`git describe --always 2>\/dev\/null`/g' \
- -i $( find . -type f -name Makefile\* -o -name version.sh )
-
- ./configure
- make
- sudo make install
- make test
-
- echo "$0: Install mod_wsgi"
- sudo yum install -y mod_wsgi
-
- echo "$0: clone VES Reference VNF repo"
- cd ~
- git clone https://github.com/att/evel-reporting-reference-vnf.git
-
- echo "$0: Setup collector"
-
- sudo mkdir -p /opt/att/collector
- sudo install -m=644 -t /opt/att/collector ~/evel-reporting-reference-vnf/code/collector/*
-
- echo "$0: Setup Reference VNF website"
- sudo mkdir -p /opt/att/website/
- sudo cp -r ~/evel-reporting-reference-vnf/code/webserver/django/* /opt/att/website/
- sudo chown -R root:root /opt/att/website/
- sudo mkdir -p /var/log/att/
- echo "eh?" | sudo tee /var/log/att/django.log
-
- echo "$0: Create database"
-
- cd /opt/att/website
- sudo python manage.py migrate
- sudo python manage.py createsuperuser
- sudo rm -f /var/log/att/django.log
-
- sudo systemctl daemon-reload
- sudo systemctl enable httpd
- sudo systemctl restart httpd
-
- echo "$0: Setup website backend"
- sudo mkdir -p /opt/att/backend/
- sudo install -m=644 -t /opt/att/backend ~/evel-reporting-reference-vnf/code/backend/*
- sudo install -m=644 ~/evel-reporting-reference-vnf/config/backend.service /etc/systemd/system
- sudo systemctl daemon-reload
- sudo systemctl enable backend
- sudo systemctl restart backend
-
-
- echo "$0: Change security context for database"
- chcon -t httpd_sys_content_t db.sqlite3
- chcon -t httpd_sys_content_t .
- setsebool -P httpd_unified 1
- setsebool -P httpd_can_network_connect=1
-
- echo "$0: Gather static files"
- sudo python manage.py collectstatic
-
- echo "$0: Install jsonschema"
- sudo pip install jsonschema
-
- echo "$0: Put backend.service into /etc/systemd/system"
- sudo systemctl daemon-reload
- sudo systemctl start backend
- sudo systemctl status backend
- sudo systemctl enable backend
-
- # from initialize-event-database.sh
- cd /opt/att/website
- sudo python manage.py migrate
- sudo python manage.py createsuperuser
-
- # from go-webserver.sh
- sudo python /opt/att/website/manage.py runserver &
-
- # from go-backend.sh
- sudo python /opt/att/backend/backend.py --config ~/evel-reporting-reference-vnf/config/backend.conf --section default --verbose &
-}
-
-clean () {
- echo "$0: delete container"
- CONTAINER=$(sudo docker ps -a | awk "/VES/ { print \$1 }")
- sudo docker stop $CONTAINER
- sudo docker rm -v $CONTAINER
-}
-
-forward_to_container () {
- echo "$0: pass $1 command to VES_Reference.sh in container"
- CONTAINER=$(sudo docker ps -a | awk "/VES/ { print \$1 }")
- sudo docker exec $CONTAINER /bin/bash /tmp/VES/VES_Reference.sh $1 $1
- if [ $? -eq 1 ]; then fail; fi
-}
-
-dist=`grep DISTRIB_ID /etc/*-release | awk -F '=' '{print $2}'`
-case "$1" in
- setup)
- if [[ $# -eq 1 ]]; then
- create_container
- echo "$0: Execute VES_Reference.sh in the container"
- CONTAINER=$(sudo docker ps -l | awk "/VES/ { print \$1 }")
- if [ "$dist" == "Ubuntu" ]; then
- sudo docker exec -it $CONTAINER /bin/bash /tmp/VES/VES_Reference.sh setup setup
- else
- sudo docker exec -i -t $CONTAINER /bin/bash /tmp/VES/VES_Reference.sh setup setup
- fi
- else
- # Running in the container, continue VES setup
- setup_Collector
- setup_Openstack
- setup_Reference_VNF_VM
- start_Collector
- fi
- pass
- ;;
- setup_VNF)
- setup_Reference_VNF
- ;;
- clean)
- echo "$0: Uninstall"
- clean
- pass
- ;;
- *)
- echo "usage: bash VES_Reference.sh [setup|clean]"
- echo "setup: setup test environment"
- echo "clean: cleanup after test"
- fail
-esac
diff --git a/tests/blueprints/tosca-vnfd-hello-ves/blueprint.yaml b/tests/blueprints/tosca-vnfd-hello-ves/blueprint.yaml
index d17e300..c652423 100755
--- a/tests/blueprints/tosca-vnfd-hello-ves/blueprint.yaml
+++ b/tests/blueprints/tosca-vnfd-hello-ves/blueprint.yaml
@@ -19,6 +19,61 @@ topology_template:
image: models-xenial-server
availability_zone: nova
mgmt_driver: noop
+ config_drive: true
+ user_data_format: RAW
+ user_data: |
+ #!/bin/bash
+ set -x
+ mkdir /home/ubuntu
+ chown -R ubuntu /home/ubuntu
+ mkdir /home/ubuntu/.ssh
+ cat << EOM >/home/ubuntu/.ssh/authorized_keys
+ <pubkey>
+ EOM
+ sudo mount /dev/sr0 /mnt/
+ mkdir /tmp/www
+ cd /tmp/www
+ mkdir html
+ cat >Dockerfile <<EOM
+ FROM nginx
+ COPY html /usr/share/nginx/html
+ EOM
+ host=$(hostname)
+ id=$(cut -d ',' -f 3 /mnt/openstack/latest/meta_data.json)
+ cat <<EOM >html/index.html
+ <!DOCTYPE html>
+ <html>
+ <head>
+ <title>Hello World!</title>
+ <meta name="viewport" content="width=device-width, minimum-scale=1.0, initial-scale=1"/>
+ <style>
+ body { width: 100%; background-color: white; color: black; padding: 0px; margin: 0px; font-family: sans-serif; font-size:100%; }
+ </style>
+ </head>
+ <body>
+ <large>Welcome to OPNFV @ $host!</large><br/>
+ <a href="http://wiki.opnfv.org"><img src="https://www.opnfv.org/sites/all/themes/opnfv/logo.png"></a>
+ <div>
+ <p>Instance ID fom config drive file /mnt/openstack/latest/meta_data.json></p>
+ <pre>
+ $id
+ </pre>
+ <p>Server setup completed at $(date)</p>
+ </div>
+ </body></html>
+ EOM
+ wget -O /tmp/www/html/favicon.ico https://git.opnfv.org/models/plain/tests/blueprints/tosca-vnfd-3node-tacker/favicon.ico
+ sudo apt-get install apt-transport-https ca-certificates curl software-properties-common
+ curl -fsSL https://apt.dockerproject.org/gpg | sudo apt-key add -
+ sudo apt-key update
+ echo "deb https://apt.dockerproject.org/repo ubuntu-xenial main" >~/dockerrepo
+ sudo tee -a /etc/apt/sources.list.d/docker.list ~/dockerrepo
+ sudo add-apt-repository "deb https://apt.dockerproject.org/repo/ ubuntu-xenial main"
+ sudo apt-get update
+ sudo apt-get install -y docker-engine
+ sudo docker pull nginx
+ sudo docker build -t vhello .
+ sudo docker run --name vHello -d -p 80:80 vhello
config: |
param0: key1
param1: key2
@@ -56,6 +111,61 @@ topology_template:
image: models-xenial-server
availability_zone: nova
mgmt_driver: noop
+ config_drive: true
+ user_data_format: RAW
+ user_data: |
+ #!/bin/bash
+ set -x
+ mkdir /home/ubuntu
+ chown -R ubuntu /home/ubuntu
+ mkdir /home/ubuntu/.ssh
+ cat << EOM >/home/ubuntu/.ssh/authorized_keys
+ <pubkey>
+ EOM
+ sudo mount /dev/sr0 /mnt/
+ mkdir /tmp/www
+ cd /tmp/www
+ mkdir html
+ cat >Dockerfile <<EOM
+ FROM nginx
+ COPY html /usr/share/nginx/html
+ EOM
+ host=$(hostname)
+ id=$(cut -d ',' -f 3 /mnt/openstack/latest/meta_data.json)
+ cat <<EOM >html/index.html
+ <!DOCTYPE html>
+ <html>
+ <head>
+ <title>Hello World!</title>
+ <meta name="viewport" content="width=device-width, minimum-scale=1.0, initial-scale=1"/>
+ <style>
+ body { width: 100%; background-color: white; color: black; padding: 0px; margin: 0px; font-family: sans-serif; font-size:100%; }
+ </style>
+ </head>
+ <body>
+ <large>Welcome to OPNFV @ $host!</large><br/>
+ <a href="http://wiki.opnfv.org"><img src="https://www.opnfv.org/sites/all/themes/opnfv/logo.png"></a>
+ <div>
+ <p>Instance ID fom config drive file /mnt/openstack/latest/meta_data.json></p>
+ <pre>
+ $id
+ </pre>
+ <p>Server setup completed at $(date)</p>
+ </div>
+ </body></html>
+ EOM
+ wget -O /tmp/www/html/favicon.ico https://git.opnfv.org/models/plain/tests/blueprints/tosca-vnfd-3node-tacker/favicon.ico
+ sudo apt-get install apt-transport-https ca-certificates curl software-properties-common
+ curl -fsSL https://apt.dockerproject.org/gpg | sudo apt-key add -
+ sudo apt-key update
+ echo "deb https://apt.dockerproject.org/repo ubuntu-xenial main" >~/dockerrepo
+ sudo tee -a /etc/apt/sources.list.d/docker.list ~/dockerrepo
+ sudo add-apt-repository "deb https://apt.dockerproject.org/repo/ ubuntu-xenial main"
+ sudo apt-get update
+ sudo apt-get install -y docker-engine
+ sudo docker pull nginx
+ sudo docker build -t vhello .
+ sudo docker run --name vHello -d -p 80:80 vhello
config: |
param0: key1
param1: key2
@@ -93,6 +203,32 @@ topology_template:
image: models-xenial-server
availability_zone: nova
mgmt_driver: noop
+ user_data_format: RAW
+ user_data: |
+ #!/bin/bash
+ set -x
+ mkdir /home/ubuntu
+ chown -R ubuntu /home/ubuntu
+ mkdir /home/ubuntu/.ssh
+ cat << EOM >/home/ubuntu/.ssh/authorized_keys
+ <pubkey>
+ EOM
+ cat << EOF >/tmp/setup.sh
+ echo "1" | sudo tee /proc/sys/net/ipv4/ip_forward
+ sudo sysctl net.ipv4.ip_forward=1
+ sudo iptables -t nat -A PREROUTING -p tcp --dport 80 -m state \\
+ --state NEW -m statistic --mode nth --every 2 --packet 0 \\
+ -j DNAT --to-destination <vdu1_ip>:80
+ sudo iptables -t nat -A PREROUTING -p tcp --dport 80 -m state \\
+ --state NEW -m statistic --mode nth --every 2 --packet 0 \\
+ -j DNAT --to-destination <vdu2_ip>:80
+ sudo iptables -t nat -A POSTROUTING -j MASQUERADE
+ EOF
+ bash /tmp/setup.sh
+ config: |
+ param0: key1
+ param1: key2
+
config: |
param0: key1
param1: key2
@@ -130,6 +266,16 @@ topology_template:
image: models-xenial-server
availability_zone: nova
mgmt_driver: noop
+ user_data_format: RAW
+ user_data: |
+ #!/bin/bash
+ set -x
+ mkdir /home/ubuntu
+ chown -R ubuntu /home/ubuntu
+ mkdir /home/ubuntu/.ssh
+ cat << EOM >/home/ubuntu/.ssh/authorized_keys
+ <pubkey>
+ EOM
config: |
param0: key1
param1: key2
diff --git a/tests/blueprints/tosca-vnfd-hello-ves/start.sh b/tests/blueprints/tosca-vnfd-hello-ves/start.sh
index 26fb667..daaaf80 100755
--- a/tests/blueprints/tosca-vnfd-hello-ves/start.sh
+++ b/tests/blueprints/tosca-vnfd-hello-ves/start.sh
@@ -21,15 +21,13 @@
# How to use:
# Intended to be invoked from vHello_VES.sh
# $ bash start.sh type params
-# type: type of VNF component [webserver|lb|monitor|collectd]
-# webserver params: ID CollectorIP username password
-# lb params: ID CollectorIP username password app1_ip app2_ip
+# type: type of VNF component [monitor|collectd]
+# collector params: ID CollectorIP username password
# monitor params: VDU1_ID VDU1_ID VDU1_ID username password
# ID: VM ID
# CollectorIP: IP address of the collector
# username: Username for Collector RESTful API authentication
# password: Password for Collector RESTful API authentication
-# app1_ip app2_ip: address of the web servers
setup_collectd () {
guest=$1
@@ -53,13 +51,13 @@ setup_collectd () {
cd ~
echo "$0: Install VES collectd plugin"
- git clone https://github.com/maryamtahhan/OpenStackBarcelonaDemo.git
+ git clone https://git.opnfv.org/barometer
- sudo sed -i -- "s/FQDNLookup true/FQDNLookup false/" /etc/collectd/collectd.conf
- sudo sed -i -- "s/#LoadPlugin cpu/LoadPlugin cpu/" /etc/collectd/collectd.conf
- sudo sed -i -- "s/#LoadPlugin disk/LoadPlugin disk/" /etc/collectd/collectd.conf
- sudo sed -i -- "s/#LoadPlugin interface/LoadPlugin interface/" /etc/collectd/collectd.conf
- sudo sed -i -- "s/#LoadPlugin memory/LoadPlugin memory/" /etc/collectd/collectd.conf
+ sudo sed -i -- "s/FQDNLookup true/FQDNLookup false/" $conf
+ sudo sed -i -- "s/#LoadPlugin cpu/LoadPlugin cpu/" $conf
+ sudo sed -i -- "s/#LoadPlugin disk/LoadPlugin disk/" $conf
+ sudo sed -i -- "s/#LoadPlugin interface/LoadPlugin interface/" $conf
+ sudo sed -i -- "s/#LoadPlugin memory/LoadPlugin memory/" $conf
if [[ "$guest" == true ]]; then
cat <<EOF | sudo tee -a $conf
@@ -67,7 +65,7 @@ setup_collectd () {
Globals true
</LoadPlugin>
<Plugin python>
- ModulePath "/home/ubuntu/OpenStackBarcelonaDemo/ves_plugin/"
+ ModulePath "/home/$USER/barometer/3rd_party/collectd-ves-plugin/ves_plugin/"
LogTraces true
Interactive false
Import "ves_plugin"
@@ -106,7 +104,7 @@ EOF
Globals true
</LoadPlugin>
<Plugin python>
- ModulePath "/home/$USER/OpenStackBarcelonaDemo/ves_plugin/"
+ ModulePath "/home/$USER/barometer/3rd_party/collectd-ves-plugin/ves_plugin/"
LogTraces true
Interactive false
Import "ves_plugin"
@@ -143,6 +141,7 @@ LoadPlugin aggregation
CalculateAverage true
</Aggregation>
</Plugin>
+LoadPlugin uuid
EOF
fi
sudo service collectd restart
@@ -182,80 +181,6 @@ setup_agent () {
setup_collectd true
}
-setup_webserver () {
- echo "$0: Setup website and dockerfile"
- mkdir ~/www
- mkdir ~/www/html
-
- # ref: https://hub.docker.com/_/nginx/
- cat > ~/www/Dockerfile <<EOM
-FROM nginx
-COPY html /usr/share/nginx/html
-EOM
-
- host=$(hostname)
- cat > ~/www/html/index.html <<EOM
-<!DOCTYPE html>
-<html>
-<head>
-<title>Hello World!</title>
-<meta name="viewport" content="width=device-width, minimum-scale=1.0, initial-scale=1"/>
-<style>
-body { width: 100%; background-color: white; color: black; padding: 0px; margin: 0px; font-family: sans-serif; font-size:100%; }
-</style>
-</head>
-<body>
-Hello World!<br>
-Welcome to OPNFV @ $host!</large><br/>
-<a href="http://wiki.opnfv.org"><img src="https://www.opnfv.org/sites/all/themes/opnfv/logo.png"></a>
-</body></html>
-EOM
-
- wget https://git.opnfv.org/cgit/ves/plain/tests/blueprints/tosca-vnfd-hello-ves/favicon.ico -O ~/www/html/favicon.ico
-
- echo "$0: Install docker"
- # Per https://docs.docker.com/engine/installation/linux/ubuntulinux/
- # Per https://www.digitalocean.com/community/tutorials/how-to-install-and-use-docker-on-ubuntu-16-04
- sudo apt-get install apt-transport-https ca-certificates
- sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
- echo "deb https://apt.dockerproject.org/repo ubuntu-xenial main" | sudo tee /etc/apt/sources.list.d/docker.list
- sudo apt-get update
- sudo apt-get purge lxc-docker
- sudo apt-get install -y linux-image-extra-$(uname -r) linux-image-extra-virtual
- sudo apt-get install -y docker-engine
-
- echo "$0: Get nginx container and start website in docker"
- # Per https://hub.docker.com/_/nginx/
- sudo docker pull nginx
- cd ~/www
- sudo docker build -t vhello .
- sudo docker run --name vHello -d -p 80:80 vhello
-
- echo "$0: setup VES agents"
- setup_agent
-
- # Debug hints
- # id=$(sudo ls /var/lib/docker/containers)
- # sudo tail -f /var/lib/docker/containers/$id/$id-json.log \
- }
-
-setup_lb () {
- echo "$0: setup VES load balancer"
- echo "$0: install dependencies"
- sudo apt-get update
-
- echo "$0: Setup iptables rules"
- echo "1" | sudo tee /proc/sys/net/ipv4/ip_forward
- sudo sysctl net.ipv4.ip_forward=1
- sudo iptables -t nat -A PREROUTING -p tcp --dport 80 -m state --state NEW -m statistic --mode nth --every 2 --packet 0 -j DNAT --to-destination $app1_ip:80
- sudo iptables -t nat -A PREROUTING -p tcp --dport 80 -m state --state NEW -m statistic --mode nth --every 2 --packet 0 -j DNAT --to-destination $app2_ip:80
- sudo iptables -t nat -A POSTROUTING -j MASQUERADE
- # debug hints: list rules (sudo iptables -S -t nat), flush (sudo iptables -F -t nat)
-
- echo "$0: setup VES agents"
- setup_agent
-}
-
setup_monitor () {
echo "$0: setup VES Monitor"
echo "$0: install dependencies"
@@ -277,7 +202,7 @@ setup_monitor () {
sed -i -- "/vel_topic_name = /a vdu2_id = $vdu2_id" evel-test-collector/config/collector.conf
sed -i -- "/vel_topic_name = /a vdu1_id = $vdu1_id" evel-test-collector/config/collector.conf
- python monitor.py --config evel-test-collector/config/collector.conf --section default
+# python monitor.py --config evel-test-collector/config/collector.conf --section default
}
type=$1
@@ -293,8 +218,6 @@ else
collector_ip=$3
username=$4
password=$5
- app1_ip=$6
- app2_ip=$7
fi
setup_$type $1
diff --git a/tests/vHello_VES.sh b/tests/vHello_VES.sh
index cc85ebc..bbddfa5 100644
--- a/tests/vHello_VES.sh
+++ b/tests/vHello_VES.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# Copyright 2016 AT&T Intellectual Property, Inc
+# Copyright 2016-2017 AT&T Intellectual Property, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,27 +14,54 @@
# limitations under the License.
#
# What this is: Deployment test for the VES agent and collector based
-# upon the Tacker Hello World blueprint
+# upon the Tacker Hello World blueprint, designed as a manual demo of the VES
+# concept and integration with the Barometer project collectd agent. Typical
+# demo procedure is to execute the following actions from the OPNFV jumphost
+# or some host wth access to the OpenStack controller (see below for details):
+# setup: install Tacker in a docker container. Note: only needs to be done
+# once per session, and can be reused across OPNFV VES and Models tests,
+# i.e. you can start another test at the "start" step below.
+# start: install blueprint and start the VNF, including the app (load-balanced
+# web server) and VES agents running on the VMs. Installs the VES
+# monitor code but does not start the monitor (see below).
+# start_collectd: start the collectd daemon on bare metal hypervisor hosts
+# monitor: start the VES monitor, typically run in a second shell session.
+# pause: pause the app at one of the web server VDUs (VDU1 or VDU2)
+# stop: stop the VNF and uninstall the blueprint
+# start_collectd: start the collectd daemon on bare metal hypervisor hosts
+# clean: remove the tacker container and service (if desired, when done)
#
# Status: this is a work in progress, under test.
#
# How to use:
# $ git clone https://gerrit.opnfv.org/gerrit/ves
# $ cd ves/tests
-# $ bash vHello_VES.sh [setup|start|run|test|stop|clean] [VDU1|VDU2|VDU3]
-# [monitor|traffic|pause|nic]
-# setup: setup test environment
-# start: install blueprint and run test
-# run: setup test environment and run test
-# test: run test tools/scenario - see below
-# stop: stop test and uninstall blueprint
-# clean: cleanup after test
-# Test:
+# $ bash vHello_VES.sh setup <openrc> [branch]
+# setup: setup test environment
+# <openrc>: location of OpenStack openrc file
+# branch: OpenStack branch to install (default: master)
+# $ bash vHello_VES.sh start
+# start: install blueprint and run test
+# $ bash vHello_VES.sh start_collectd|stop_collectd <ip> <user> <monitor_ip>
+# start_collectd: install and start collectd daemon on hypervisor
+# stop_collectd: stop and uninstall collectd daemon on hypervisor
+# <ip>: hypervisor ip
+# <user>: username on hypervisor hosts, for ssh (user must be setup for
+# key-based auth on the hosts)
+# $ bash vHello_VES.sh monitor
# monitor: attach to the collector VM and run the VES Monitor
+# $ bash vHello_VES.sh traffic
# traffic: generate some traffic
+# $ bash vHello_VES.sh pause VDU1|VDU2
# pause: pause the VNF (web server) for a minute to generate a state change
-# VDU1|VDU2
-# nic: timed ifdown/ifup to generate a NIC fault report (not yet implemented)
+# VDU1: Pause VDU1
+# VDU2: Pause VDU2
+# $ bash vHello_VES.sh stop
+# stop: stop test and uninstall blueprint
+# $ bash vHello_VES.sh clean <hpvuser> <hpvpw>
+# clean: cleanup after test
+# <hpvuser>: username on hypervisor
+# <hpvpw>: password on hypervisor
trap 'fail' ERR
@@ -48,13 +75,21 @@ fail() {
exit 1
}
+assert() {
+ if [[ $2 == true ]]; then echo "$0 test assertion passed: $1"
+ else
+ echo "$0 test assertion failed: $1"
+ fail
+ fi
+}
+
get_floating_net () {
network_ids=($(neutron net-list|grep -v "+"|grep -v name|awk '{print $2}'))
for id in ${network_ids[@]}; do
[[ $(neutron net-show ${id}|grep 'router:external'|grep -i "true") != "" ]] && FLOATING_NETWORK_ID=${id}
done
if [[ $FLOATING_NETWORK_ID ]]; then
- FLOATING_NETWORK_NAME=$(openstack network show $FLOATING_NETWORK_ID | awk "/ name / { print \$4 }")
+ FLOATING_NETWORK_NAME=$(neutron net-show $FLOATING_NETWORK_ID | awk "/ name / { print \$4 }")
else
echo "$0: $(date) Floating network not found"
exit 1
@@ -64,8 +99,7 @@ get_floating_net () {
try () {
count=$1
$3
- while [[ $? -eq 1 && $count -gt 0 ]]
- do
+ while [[ $? == 1 && $count > 0 ]]; do
sleep $2
let count=$count-1
$3
@@ -74,255 +108,289 @@ try () {
}
setup () {
- echo "$0: $(date) Started"
- echo "$0: $(date) Setup temp test folder /tmp/tacker and copy this script there"
- mkdir -p /tmp/tacker
- chmod 777 /tmp/tacker/
- cp $0 /tmp/tacker/.
- chmod 755 /tmp/tacker/*.sh
+ trap 'fail' ERR
+
+ echo "$0: $(date) Setup shared test folder /opt/tacker"
+ if [ -d /opt/tacker ]; then sudo rm -rf /opt/tacker; fi
+ sudo mkdir -p /opt/tacker
+ sudo chown $USER /opt/tacker
+ chmod 777 /opt/tacker/
+
+ echo "$0: $(date) copy test script and openrc to /opt/tacker"
+ cp $0 /opt/tacker/.
+ cp $1 /opt/tacker/admin-openrc.sh
+
+ source /opt/tacker/admin-openrc.sh
+ chmod 755 /opt/tacker/*.sh
echo "$0: $(date) tacker-setup part 1"
- wget https://git.opnfv.org/cgit/models/plain/tests/utils/tacker-setup.sh -O /tmp/tacker/tacker-setup.sh
- bash /tmp/tacker/tacker-setup.sh tacker-cli init
+ bash utils/tacker-setup.sh init
+ if [ $? -eq 1 ]; then fail; fi
echo "$0: $(date) tacker-setup part 2"
- CONTAINER=$(sudo docker ps -l | awk "/tacker/ { print \$1 }")
dist=`grep DISTRIB_ID /etc/*-release | awk -F '=' '{print $2}'`
if [ "$dist" == "Ubuntu" ]; then
- echo "$0: $(date) JOID workaround for Colorado - enable ML2 port security"
- juju set neutron-api enable-ml2-port-security=true
-
echo "$0: $(date) Execute tacker-setup.sh in the container"
- sudo docker exec -it $CONTAINER /bin/bash /tmp/tacker/tacker-setup.sh tacker-cli setup
+ sudo docker exec -it tacker /bin/bash /opt/tacker/tacker-setup.sh setup $2
else
- echo "$0: $(date) Copy private key to the container (needed for later install steps)"
- cp ~/.ssh/id_rsa /tmp/tacker/id_rsa
echo "$0: $(date) Execute tacker-setup.sh in the container"
- sudo docker exec -i -t $CONTAINER /bin/bash /tmp/tacker/tacker-setup.sh tacker-cli setup
+ sudo docker exec -i -t tacker /bin/bash /opt/tacker/tacker-setup.sh setup $2
fi
- echo "$0: $(date) reset blueprints folder"
- if [[ -d /tmp/tacker/blueprints/tosca-vnfd-hello-ves ]]; then rm -rf /tmp/tacker/blueprints/tosca-vnfd-hello-ves; fi
- mkdir -p /tmp/tacker/blueprints/tosca-vnfd-hello-ves
+ assert "models-tacker-001 (Tacker installation in a docker container on the jumphost)" true
+}
- echo "$0: $(date) copy tosca-vnfd-hello-ves to blueprints folder"
- cp -r blueprints/tosca-vnfd-hello-ves /tmp/tacker/blueprints
-
- # Following two steps are in testing still. The guestfish step needs work.
-
- # echo "$0: $(date) Create Nova key pair"
- # mkdir -p ~/.ssh
- # nova keypair-delete vHello
- # nova keypair-add vHello > /tmp/tacker/vHello.pem
- # chmod 600 /tmp/tacker/vHello.pem
- # pubkey=$(nova keypair-show vHello | grep "Public key:" | sed -- 's/Public key: //g')
- # nova keypair-show vHello | grep "Public key:" | sed -- 's/Public key: //g' >/tmp/tacker/vHello.pub
-
- echo "$0: $(date) Inject key into xenial server image"
- # wget http://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- # sudo yum install -y libguestfs-tools
- # guestfish <<EOF
-#add xenial-server-cloudimg-amd64-disk1.img
-#run
-#mount /dev/sda1 /
-#mkdir /home/ubuntu
-#mkdir /home/ubuntu/.ssh
-#cat <<EOM >/home/ubuntu/.ssh/authorized_keys
-#$pubkey
-#EOM
-#exit
-#chown -R ubuntu /home/ubuntu
-#EOF
-
- # Using pre-key-injected image for now, vHello.pem as provided in the blueprint
- if [ ! -f /tmp/xenial-server-cloudimg-amd64-disk1.img ]; then
- wget -O /tmp/xenial-server-cloudimg-amd64-disk1.img http://artifacts.opnfv.org/models/images/xenial-server-cloudimg-amd64-disk1.img
- fi
- cp blueprints/tosca-vnfd-hello-ves/vHello.pem /tmp/tacker
- chmod 600 /tmp/tacker/vHello.pem
+say_hello() {
+ echo "$0: $(date) Testing $1"
+ pass=false
+ count=10
+ while [[ $count > 0 && $pass != true ]]
+ do
+ sleep 30
+ let count=$count-1
+ if [[ $(curl $1 | grep -c "Hello World") > 0 ]]; then
+ echo "$0: $(date) Hello World found at $1"
+ pass=true
+ fi
+ done
+ if [[ $pass != true ]]; then fail; fi
+}
- echo "$0: $(date) setup OpenStack CLI environment"
- source /tmp/tacker/admin-openrc.sh
+copy_blueprint() {
+ echo "$0: $(date) copy test script to /opt/tacker"
+ cp $0 /opt/tacker/.
- echo "$0: $(date) Setup image_id"
- image_id=$(openstack image list | awk "/ models-xenial-server / { print \$2 }")
- if [[ -z "$image_id" ]]; then glance --os-image-api-version 1 image-create --name models-xenial-server --disk-format qcow2 --file /tmp/xenial-server-cloudimg-amd64-disk1.img --container-format bare; fi
+ echo "$0: $(date) reset blueprints folder"
+ if [[ -d /opt/tacker/blueprints/tosca-vnfd-hello-ves ]]; then
+ rm -rf /opt/tacker/blueprints/tosca-vnfd-hello-ves
+ fi
- echo "$0: $(date) Completed"
+ echo "$0: $(date) copy tosca-vnfd-hello-ves to blueprints folder"
+ cp -r blueprints/tosca-vnfd-hello-ves /opt/tacker/blueprints/tosca-vnfd-hello-ves
+ cp $0 /opt/tacker/.
}
start() {
- echo "$0: $(date) Started"
+# Disable trap for now, need to test to ensure premature fail does not occur
+# trap 'fail' ERR
+
echo "$0: $(date) setup OpenStack CLI environment"
- source /tmp/tacker/admin-openrc.sh
+ source /opt/tacker/admin-openrc.sh
+
+ echo "$0: $(date) Create Nova key pair"
+ if [[ -f /opt/tacker/vHello ]]; then rm /opt/tacker/vHello; fi
+ ssh-keygen -t rsa -N "" -f /opt/tacker/vHello -C ubuntu@vHello
+ chmod 600 /opt/tacker/vHello
+ openstack keypair create --public-key /opt/tacker/vHello.pub vHello
+ assert "models-nova-001 (Keypair creation)" true
+
+ echo "$0: $(date) Inject public key into blueprint"
+ pubkey=$(cat /opt/tacker/vHello.pub)
+ sed -i -- "s~<pubkey>~$pubkey~" /opt/tacker/blueprints/tosca-vnfd-hello-ves/blueprint.yaml
+
+ vdus="VDU1 VDU2 VDU3 VDU4"
+ vdui="1 2 3 4"
+ vnf_vdui="1 2 3"
+ declare -a vdu_id=()
+ declare -a vdu_ip=()
+ declare -a vdu_url=()
+
+ # Setup for workarounds
+ echo "$0: $(date) allocate floating IPs"
+ get_floating_net
+ for i in $vdui; do
+ vdu_ip[$i]=$(nova floating-ip-create $FLOATING_NETWORK_NAME | awk "/$FLOATING_NETWORK_NAME/ { print \$4 }")
+ echo "$0: $(date) Pre-allocated ${vdu_ip[$i]} to VDU$i"
+ done
+
+ echo "$0: $(date) Inject web server floating IPs into LB code in blueprint"
+ sed -i -- "s/<vdu1_ip>/${vdu_ip[1]}/" /opt/tacker/blueprints/tosca-vnfd-hello-ves/blueprint.yaml
+ sed -i -- "s/<vdu2_ip>/${vdu_ip[2]}/" /opt/tacker/blueprints/tosca-vnfd-hello-ves/blueprint.yaml
+ # End setup for workarounds
echo "$0: $(date) create VNFD"
- cd /tmp/tacker/blueprints/tosca-vnfd-hello-ves
- tacker vnfd-create --vnfd-file blueprint.yaml --name hello-ves
- if [ $? -eq 1 ]; then fail; fi
+ cd /opt/tacker/blueprints/tosca-vnfd-hello-ves
+ # newton: NAME (was "--name") is now a positional parameter
+ tacker vnfd-create --vnfd-file blueprint.yaml hello-ves
+ if [[ $? -eq 0 ]]; then
+ assert "models-tacker-002 (VNFD creation)" true
+ else
+ assert "models-tacker-002 (VNFD creation)" false
+ fi
echo "$0: $(date) create VNF"
- tacker vnf-create --vnfd-name hello-ves --name hello-ves
+ # newton: NAME (was "--name") is now a positional parameter
+ tacker vnf-create --vnfd-name hello-ves hello-ves
if [ $? -eq 1 ]; then fail; fi
echo "$0: $(date) wait for hello-ves to go ACTIVE"
active=""
- while [[ -z $active ]]
+ count=24
+ while [[ -z $active && $count -gt 0 ]]
do
active=$(tacker vnf-show hello-ves | grep ACTIVE)
- if [ "$(tacker vnf-show hello-ves | grep -c ERROR)" == "1" ]; then
+ if [[ $(tacker vnf-show hello-ves | grep -c ERROR) > 0 ]]; then
echo "$0: $(date) hello-ves VNF creation failed with state ERROR"
- fail
+ assert "models-tacker-002 (VNF creation)" false
fi
- sleep 10
+ let count=$count-1
+ sleep 30
+ echo "$0: $(date) wait for hello-ves to go ACTIVE"
done
+ if [[ $count == 0 ]]; then
+ echo "$0: $(date) hello-ves VNF creation failed - timed out"
+ assert "models-tacker-002 (VNF creation)" false
+ fi
- echo "$0: $(date) directly set port security on ports (bug/unsupported in Mitaka Tacker?)"
- vdus="VDU1 VDU2 VDU3 VDU4"
- vdui="1 2 3 4"
- declare -a vdu_id=()
- declare -a vdu_ip=()
- declare -a vdu_url=()
- HEAT_ID=$(tacker vnf-show hello-ves | awk "/instance_id/ { print \$4 }")
- vdu_id[1]=$(openstack stack resource list $HEAT_ID | awk "/VDU1 / { print \$4 }")
- vdu_id[2]=$(openstack stack resource list $HEAT_ID | awk "/VDU2 / { print \$4 }")
- vdu_id[3]=$(openstack stack resource list $HEAT_ID | awk "/VDU3 / { print \$4 }")
- vdu_id[4]=$(openstack stack resource list $HEAT_ID | awk "/VDU4 / { print \$4 }")
-
-cat >/tmp/grep <<EOF
-${vdu_id[1]}
-${vdu_id[2]}
-${vdu_id[3]}
-${vdu_id[4]}
-EOF
- id=($(neutron port-list|grep -v "+"|grep -v name|awk '{print $2}'))
- for id in ${id[@]}; do
- if [[ $(neutron port-show $id | grep -f /tmp/grep) ]]; then
- neutron port-update ${id} --port-security-enabled=True
- fi
+ # Setup for workarounds
+ echo "$0: $(date) directly set port security on ports (unsupported in Mitaka Tacker)"
+ # Alternate method
+ # HEAT_ID=$(tacker vnf-show hello-ves | awk "/instance_id/ { print \$4 }")
+ # SERVER_ID=$(openstack stack resource list $HEAT_ID | awk "/VDU1 / { print \$4 }")
+ for vdu in $vdus; do
+ echo "$0: $(date) Setting port security on $vdu"
+ SERVER_ID=$(openstack server list | awk "/$vdu/ { print \$2 }")
+ id=($(neutron port-list|grep -v "+"|grep -v name|awk '{print $2}'))
+ for id in ${id[@]}; do
+ if [[ $(neutron port-show $id|grep $SERVER_ID) ]]; then neutron port-update ${id} --port-security-enabled=True; fi
+ done
done
echo "$0: $(date) directly assign security group (unsupported in Mitaka Tacker)"
- if [[ $(openstack security group list | awk "/ vHello / { print \$2 }") ]]; then openstack security group delete vHello; fi
- openstack security group create vHello
- openstack security group rule create --ingress --protocol TCP --dst-port 22:22 vHello
- openstack security group rule create --ingress --protocol TCP --dst-port 80:80 vHello
+ if [[ $(neutron security-group-list | awk "/ vHello / { print \$2 }") ]]; then neutron security-group-delete vHello; fi
+ neutron security-group-create vHello
+ neutron security-group-rule-create --direction ingress --protocol TCP --port-range-min 22 --port-range-max 22 vHello
+ neutron security-group-rule-create --direction ingress --protocol TCP --port-range-min 80 --port-range-max 80 vHello
+ neutron security-group-rule-create --direction ingress --protocol TCP --port-range-min 30000 --port-range-max 30000 vHello
for i in $vdui; do
+ vdu_id[$i]=$(openstack server list | awk "/VDU$i/ { print \$2 }")
+ echo "$0: $(date) Assigning security groups to VDU$i (${vdu_id[$i]})"
openstack server add security group ${vdu_id[$i]} vHello
openstack server add security group ${vdu_id[$i]} default
done
echo "$0: $(date) associate floating IPs"
- get_floating_net
for i in $vdui; do
- vdu_ip[$i]=$(openstack floating ip create $FLOATING_NETWORK_NAME | awk "/floating_ip_address/ { print \$4 }")
nova floating-ip-associate ${vdu_id[$i]} ${vdu_ip[$i]}
done
echo "$0: $(date) get web server addresses"
vdu_url[1]="http://${vdu_ip[1]}"
- vdu_url[2]="http://${vdu_ip[3]}"
+ vdu_url[2]="http://${vdu_ip[2]}"
vdu_url[3]="http://${vdu_ip[3]}"
vdu_url[4]="http://${vdu_ip[4]}:30000/eventListener/v1"
- if [[ -f /tmp/tacker/id_rsa ]]; then
- echo "$0: $(date) setup private key for ssh to hypervisors"
- cp -p /tmp/tacker/id_rsa ~/.ssh/id_rsa
- chown root ~/.ssh/id_rsa
- chmod 600 ~/.ssh/id_rsa
- fi
-
- echo "$0: $(date) start collectd agent on bare metal hypervisor hosts"
- hosts=($(openstack hypervisor list | grep -v Hostname | grep -v "+" | awk '{print $4}'))
- for host in ${hosts[@]}; do
- ip=$(openstack hypervisor show $host | grep host_ip | awk '{print $4}')
- if [[ "$OS_CLOUDNAME" == "overcloud" ]]; then
- u="heat-admin"
- p=""
- else
- u="ubuntu"
- p=":ubuntu"
- fi
- scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no /tmp/tacker/blueprints/tosca-vnfd-hello-ves/start.sh $u@$ip:/home/$u/start.sh
- ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $u$p@$ip \
- "nohup bash /home/$u/start.sh collectd $ip ${vdu_ip[4]} hello world > /dev/null 2>&1 &"
+ apt-get install -y curl
+
+ echo "$0: $(date) wait for VNF web service to be ready"
+ count=0
+ resp=$(curl http://${vdu_ip[1]})
+ echo $resp
+ while [[ $count < 10 && "$resp" == "" ]]; do
+ echo "$0: $(date) waiting for HTTP response from LB"
+ sleep 60
+ let count=$count+1
+ resp=$(curl http://${vdu_ip[3]})
+ echo $resp
done
-
- echo "$0: $(date) wait 30 seconds for server SSH to be available"
- sleep 30
-
- echo "$0: $(date) Copy startup script to the VMs"
- for i in $vdui; do
- ssh -i /tmp/tacker/vHello.pem -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@${vdu_ip[$i]} "sudo chown ubuntu /home/ubuntu"
- scp -i /tmp/tacker/vHello.pem -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no /tmp/tacker/blueprints/tosca-vnfd-hello-ves/start.sh ubuntu@${vdu_ip[$i]}:/home/ubuntu/start.sh
+
+ echo "$0: $(date) verify vHello server is running at each web server and via the LB"
+ say_hello http://${vdu_ip[1]}
+ say_hello http://${vdu_ip[2]}
+ say_hello http://${vdu_ip[3]}
+
+ assert "models-vhello-001 (vHello VNF creation)" true
+ assert "models-tacker-003 (VNF creation)" true
+ assert "models-tacker-vnfd-002 (artifacts creation)" true
+ assert "models-tacker-vnfd-003 (user_data creation)" true
+
+ echo "$0: $(date) Execute agent startup script in the VNF VMs"
+ for i in $vnf_vdui; do
+ ssh -i /opt/tacker/vHello -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@${vdu_ip[$i]} "sudo chown ubuntu /home/ubuntu"
+ scp -i /opt/tacker/vHello -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no /opt/tacker/blueprints/tosca-vnfd-hello-ves/start.sh ubuntu@${vdu_ip[$i]}:/home/ubuntu/start.sh
+# ssh -i /opt/tacker/vHello -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+# ubuntu@${vdu_ip[$i]} "nohup bash /home/ubuntu/start.sh agent ${vdu_id[$i]} ${vdu_ip[4]} hello world > /dev/null 2>&1 &"
done
- echo "$0: $(date) start vHello webserver in VDU1 at ${vdu_ip[1]}"
- ssh -i /tmp/tacker/vHello.pem -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
- ubuntu@${vdu_ip[1]} "nohup bash /home/ubuntu/start.sh webserver ${vdu_id[1]} ${vdu_ip[4]} hello world > /dev/null 2>&1 &"
-
- echo "$0: $(date) start vHello webserver in VDU2 at ${vdu_ip[2]}"
- ssh -i /tmp/tacker/vHello.pem -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
- ubuntu@${vdu_ip[2]} "nohup bash /home/ubuntu/start.sh webserver ${vdu_id[2]} ${vdu_ip[4]} hello world > /dev/null 2>&1 &"
-
- echo "$0: $(date) start LB in VDU3 at ${vdu_ip[3]}"
- ssh -i /tmp/tacker/vHello.pem -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
- ubuntu@${vdu_ip[3]} "nohup bash /home/ubuntu/start.sh lb ${vdu_id[3]} ${vdu_ip[4]} hello world ${vdu_ip[1]} ${vdu_ip[2]} > /dev/null 2>&1 &"
-
- echo "$0: $(date) start Monitor in VDU4 at ${vdu_ip[4]}"
- # Replacing the default collector with monitor.py which has processing logic as well
- scp -i /tmp/tacker/vHello.pem -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no /tmp/tacker/blueprints/tosca-vnfd-hello-ves/monitor.py ubuntu@${vdu_ip[4]}:/home/ubuntu/monitor.py
- ssh -i /tmp/tacker/vHello.pem -t -t -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@${vdu_ip[4]} "bash /home/ubuntu/start.sh monitor ${vdu_id[1]} ${vdu_id[2]} ${vdu_id[3]} hello world"
-
-# echo "$0: $(date) verify vHello server is running at http://${vdu_ip[3]}"
-# apt-get install -y curl
-# count=10
-# while [[ $count -gt 0 ]]
-# do
-# sleep 60
-# let count=$count-1
-# if [[ $(curl http://${vdu_ip[3]} | grep -c "Hello World") > 0 ]]; then pass; fi
-# done
-# fail
+ echo "$0: $(date) setup Monitor in VDU4 at ${vdu_ip[4]}"
+ scp -i /opt/tacker/vHello -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no /opt/tacker/blueprints/tosca-vnfd-hello-ves/start.sh ubuntu@${vdu_ip[4]}:/home/ubuntu/start.sh
+ scp -i /opt/tacker/vHello -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no /opt/tacker/blueprints/tosca-vnfd-hello-ves/monitor.py ubuntu@${vdu_ip[4]}:/home/ubuntu/monitor.py
+ ssh -i /opt/tacker/vHello -t -t -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@${vdu_ip[4]} "bash /home/ubuntu/start.sh monitor ${vdu_id[1]} ${vdu_id[2]} ${vdu_id[3]} hello world"
}
stop() {
+ trap 'fail' ERR
+
echo "$0: $(date) setup OpenStack CLI environment"
- source /tmp/tacker/admin-openrc.sh
+ source /opt/tacker/admin-openrc.sh
+
+ if [[ "$(tacker vnf-list|grep hello-ves|awk '{print $2}')" != '' ]]; then
+ echo "$0: $(date) uninstall vHello blueprint via CLI"
+ try 12 10 "tacker vnf-delete hello-ves"
+ # It can take some time to delete a VNF - thus wait 2 minutes
+ count=12
+ while [[ $count > 0 && "$(tacker vnf-list|grep hello-ves|awk '{print $2}')" != '' ]]; do
+ echo "$0: $(date) waiting for hello-ves VNF delete to complete"
+ sleep 10
+ let count=$count-1
+ done
+ if [[ "$(tacker vnf-list|grep hello-ves|awk '{print $2}')" == '' ]]; then
+ assert "models-tacker-004 (VNF deletion)" true
+ else
+ assert "models-tacker-004 (VNF deletion)" false
+ fi
+ fi
+
+ # It can take some time to delete a VNFD - thus wait 2 minutes
+ if [[ "$(tacker vnfd-list|grep hello-ves|awk '{print $2}')" != '' ]]; then
+ echo "$0: $(date) trying to delete the hello-ves VNFD"
+ try 12 10 "tacker vnfd-delete hello-ves"
+ if [[ "$(tacker vnfd-list|grep hello-ves|awk '{print $2}')" == '' ]]; then
+ assert "models-tacker-005 (VNFD deletion)" true
+ else
+ assert "models-tacker-005 (VNFD deletion)" false
+ fi
+ fi
+
+# This part will apply for tests that dynamically create the VDU base image
+# iid=($(openstack image list|grep VNFImage|awk '{print $2}')); for id in ${iid[@]}; do openstack image delete ${id}; done
+# if [[ "$(openstack image list|grep VNFImage|awk '{print $2}')" == '' ]]; then
+# assert "models-tacker-vnfd-004 (artifacts deletion)" true
+# else
+# assert "models-tacker-vnfd-004 (artifacts deletion)" false
+# fi
- echo "$0: $(date) uninstall vHello blueprint via CLI"
- vid=($(tacker vnf-list|grep hello-ves|awk '{print $2}')); for id in ${vid[@]}; do tacker vnf-delete ${id}; done
- vid=($(tacker vnfd-list|grep hello-ves|awk '{print $2}')); for id in ${vid[@]}; do tacker vnfd-delete ${id}; done
-# Need to remove the floatingip deletion or make it specific to the vHello VM
+ # Cleanup for workarounds
fip=($(neutron floatingip-list|grep -v "+"|grep -v id|awk '{print $2}')); for id in ${fip[@]}; do neutron floatingip-delete ${id}; done
sg=($(openstack security group list|grep vHello|awk '{print $2}'))
- for id in ${sg[@]}; do try 10 5 "openstack security group delete ${id}"; done
-
+ for id in ${sg[@]}; do try 5 5 "openstack security group delete ${id}"; done
+ kid=($(openstack keypair list|grep vHello|awk '{print $2}')); for id in ${kid[@]}; do openstack keypair delete ${id}; done
+}
+
+start_collectd() {
+ # NOTE: ensure hypervisor hostname is resolvable e.g. thru /etc/hosts
+ echo "$0: $(date) update start.sh script in case it changed"
+ cp -r blueprints/tosca-vnfd-hello-ves/start.sh /opt/tacker/blueprints/tosca-vnfd-hello-ves
+ echo "$0: $(date) start collectd agent on bare metal hypervisor host"
+ get_vdu_ip VDU4
+ scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no /opt/tacker/blueprints/tosca-vnfd-hello-ves/start.sh $2@$1:/home/$2/start.sh
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $2@$1 \
+ "nohup bash /home/$2/start.sh collectd $1 $ip hello world > /dev/null 2>&1 &"
+}
+
+stop_collectd() {
echo "$0: $(date) remove collectd agent on bare metal hypervisor hosts"
- hosts=($(openstack hypervisor list | grep -v Hostname | grep -v "+" | awk '{print $4}'))
- for host in ${hosts[@]}; do
- ip=$(openstack hypervisor show $host | grep host_ip | awk '{print $4}')
- if [[ "$OS_CLOUDNAME" == "overcloud" ]]; then
- u="heat-admin"
- p=""
- else
- u="ubuntu"
- p=":ubuntu"
- fi
- ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $u$p@$ip <<'EOF'
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $2@$1 <<'EOF'
dist=`grep DISTRIB_ID /etc/*-release | awk -F '=' '{print $2}'`
if [ "$dist" == "Ubuntu" ]; then
sudo service collectd stop
sudo apt-get remove -y collectd
- sudo rm /etc/collectd/collectd.conf
else
sudo service collectd stop
- sudo yum remove -y collectd
- sudo rm /etc/collectd.conf
+ sudo yum remove -y collectd collectd-virt
fi
-rm -rf $HOME/OpenStackBarcelonaDemo
+rm -rf $HOME/barometer
EOF
- done
}
#
@@ -330,7 +398,7 @@ EOF
#
get_vdu_ip () {
- source /tmp/tacker/admin-openrc.sh
+ source /opt/tacker/admin-openrc.sh
echo "$0: $(date) find VM IP for $1"
ip=$(openstack server list | awk "/$1/ { print \$10 }")
@@ -339,10 +407,10 @@ get_vdu_ip () {
monitor () {
echo "$0: $(date) Start the VES Monitor in VDU4 - Stop first if running"
get_vdu_ip VDU4
- sudo cp /tmp/tacker/vHello.pem /tmp/vHello.pem
- sudo chown $USER:$USER /tmp/vHello.pem
- chmod 600 /tmp/vHello.pem
- ssh -t -t -i /tmp/vHello.pem -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$ip << 'EOF'
+ sudo cp /opt/tacker/vHello /tmp/vHello
+ sudo chown $USER:$USER /tmp/vHello
+ chmod 600 /tmp/vHello
+ ssh -t -t -i /tmp/vHello -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$ip << 'EOF'
sudo kill $(ps -ef | grep evel-test-collector | awk '{print $2}')
python monitor.py --config evel-test-collector/config/collector.conf --section default
EOF
@@ -364,67 +432,130 @@ traffic () {
pause () {
echo "$0: $(date) Pause the VNF (web server) in $1 for 30 seconds to generate a state change fault report (Stopped)"
get_vdu_ip $1
- ssh -i /tmp/vHello.pem -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$ip "sudo docker pause vHello"
+ ssh -i /tmp/vHello -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$ip "sudo docker pause vHello"
sleep 20
echo "$0: $(date) Unpausing the VNF to generate a state change fault report (Started)"
- ssh -i /tmp/vHello.pem -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$ip "sudo docker unpause vHello"
+ ssh -i /tmp/vHello -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$ip "sudo docker unpause vHello"
}
forward_to_container () {
- echo "$0: $(date) pass $1 command to this script in the tacker container"
- CONTAINER=$(sudo docker ps -a | awk "/tacker/ { print \$1 }")
- sudo docker exec $CONTAINER /bin/bash /tmp/tacker/vHello_VES.sh $1 $1
+ echo "$0: $(date) pass $1 command to vHello.sh in tacker container"
+ sudo docker exec tacker /bin/bash /opt/tacker/vHello_VES.sh $1
if [ $? -eq 1 ]; then fail; fi
}
dist=`grep DISTRIB_ID /etc/*-release | awk -F '=' '{print $2}'`
case "$1" in
setup)
- setup
+ setup $2 $3
+ if [ $? -eq 1 ]; then fail; fi
pass
;;
run)
- setup
+ setup $2 $3
+ copy_blueprint
forward_to_container start
+ if [ $? -eq 1 ]; then fail; fi
pass
;;
- start|stop)
- if [[ $# -eq 1 ]]; then forward_to_container $1
+ start)
+ if [[ -f /.dockerenv ]]; then
+ start
else
- # running inside the tacker container, ready to go
- $1
+ copy_blueprint
+ forward_to_container start
fi
pass
;;
+ start_collectd)
+ start_collectd $2 $3 $4
+ if [ $? -eq 1 ]; then fail; fi
+ pass
+ ;;
+ stop_collectd)
+ stop_collectd $2 $3
+ if [ $? -eq 1 ]; then fail; fi
+ pass
+ ;;
+ monitor)
+ monitor
+ pass
+ ;;
traffic)
- $1
+ traffic
pass
;;
- test)
- $2 $3
+ pause)
+ pause $2
+ ;;
+ stop)
+ if [[ -f /.dockerenv ]]; then
+ stop
+ else
+ forward_to_container stop
+ fi
+ if [ $? -eq 1 ]; then fail; fi
+ pass
;;
clean)
+ stop_collectd $1
echo "$0: $(date) Uninstall Tacker and test environment"
- bash /tmp/tacker/tacker-setup.sh $1 clean
+ sudo docker exec -it tacker /bin/bash /opt/tacker/tacker-setup.sh clean
+ sudo docker stop tacker
+ sudo docker rm -v tacker
+ sudo rm -rf /opt/tacker
pass
;;
*)
- echo "usage: bash vHello_VES.sh [setup|start|run|clean]"
- echo "setup: setup test environment"
- echo "start: install blueprint and run test"
- echo "run: setup test environment and run test"
- echo "stop: stop test and uninstall blueprint"
- echo "clean: cleanup after test"
- echo "usage: bash vHello_VES.sh [setup|start|run|test|stop|clean] [monitor|traffic|pause|nic]"
- echo "setup: setup test environment"
- echo "start: install blueprint and run test"
- echo "run: setup test environment and run test"
- echo "test: run test tools/scenario - see below"
- echo "stop: stop test and uninstall blueprint"
- echo "clean: cleanup after test"
- echo "Test:"
- echo " monitor: attach to the collector VM and run the VES Monitor"
- echo " traffic: generate some traffic"
- echo " pause: pause the VNF (web server) for a minute to generate a state change"
- fail
+ cat <<EOF
+ What this is: Deployment test for the VES agent and collector based
+ upon the Tacker Hello World blueprint, designed as a manual demo of the VES
+ concept and integration with the Barometer project collectd agent. Typical
+ demo procedure is to execute the following actions from the OPNFV jumphost
+ or some host wth access to the OpenStack controller (see below for details):
+ setup: install Tacker in a docker container. Note: only needs to be done
+ once per session, and can be reused across OPNFV VES and Models tests,
+ i.e. you can start another test at the "start" step below.
+ start: install blueprint and start the VNF, including the app (load-balanced
+ web server) and VES agents running on the VMs. Installs the VES
+ monitor code but does not start the monitor (see below).
+ start_collectd: start the collectd daemon on bare metal hypervisor hosts
+ monitor: start the VES monitor, typically run in a second shell session.
+ pause: pause the app at one of the web server VDUs (VDU1 or VDU2)
+ stop: stop the VNF and uninstall the blueprint
+ start_collectd: start the collectd daemon on bare metal hypervisor hosts
+ clean: remove the tacker container and service (if desired, when done)
+
+ How to use:
+ $ git clone https://gerrit.opnfv.org/gerrit/ves
+ $ cd ves/tests
+ $ bash vHello_VES.sh <setup> <openrc> [branch]
+ setup: setup test environment
+ <openrc>: location of OpenStack openrc file
+ branch: OpenStack branch to install (default: master)
+ $ bash vHello_VES.sh start
+ start: install blueprint and run test
+ <user>: username on hypervisor hosts, for ssh (user must be setup for
+ key-based auth on the hosts)
+ $ bash vHello_VES.sh start_collectd|stop_collectd <ip> <user> <monitor_ip>
+ start_collectd: install and start collectd daemon on hypervisor
+ stop_collectd: stop and uninstall collectd daemon on hypervisor
+ <ip>: hypervisor ip
+ <user>: username on hypervisor hosts, for ssh (user must be setup for
+ key-based auth on the hosts)
+ $ bash vHello_VES.sh monitor
+ monitor: attach to the collector VM and run the VES Monitor
+ $ bash vHello_VES.sh traffic
+ traffic: generate some traffic
+ $ bash vHello_VES.sh pause VDU1|VDU2
+ pause: pause the VNF (web server) for a minute to generate a state change
+ VDU1: Pause VDU1
+ VDU2: Pause VDU2
+ $ bash vHello_VES.sh stop
+ stop: stop test and uninstall blueprint
+ $ bash vHello_VES.sh clean <user>
+ clean: cleanup after test
+ <user>: username on hypervisor hosts, for ssh (user must be setup for
+ key-based auth on the hosts)
+EOF
esac
diff --git a/tests/vHello_VES_3Node.sh b/tests/vHello_VES_3Node.sh
new file mode 100644
index 0000000..cc85ebc
--- /dev/null
+++ b/tests/vHello_VES_3Node.sh
@@ -0,0 +1,430 @@
+#!/bin/bash
+# Copyright 2016 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# What this is: Deployment test for the VES agent and collector based
+# upon the Tacker Hello World blueprint
+#
+# Status: this is a work in progress, under test.
+#
+# How to use:
+# $ git clone https://gerrit.opnfv.org/gerrit/ves
+# $ cd ves/tests
+# $ bash vHello_VES.sh [setup|start|run|test|stop|clean] [VDU1|VDU2|VDU3]
+# [monitor|traffic|pause|nic]
+# setup: setup test environment
+# start: install blueprint and run test
+# run: setup test environment and run test
+# test: run test tools/scenario - see below
+# stop: stop test and uninstall blueprint
+# clean: cleanup after test
+# Test:
+# monitor: attach to the collector VM and run the VES Monitor
+# traffic: generate some traffic
+# pause: pause the VNF (web server) for a minute to generate a state change
+# VDU1|VDU2
+# nic: timed ifdown/ifup to generate a NIC fault report (not yet implemented)
+
+trap 'fail' ERR
+
+pass() {
+ echo "$0: $(date) Hooray!"
+ exit 0
+}
+
+fail() {
+ echo "$0: $(date) Test Failed!"
+ exit 1
+}
+
+get_floating_net () {
+ network_ids=($(neutron net-list|grep -v "+"|grep -v name|awk '{print $2}'))
+ for id in ${network_ids[@]}; do
+ [[ $(neutron net-show ${id}|grep 'router:external'|grep -i "true") != "" ]] && FLOATING_NETWORK_ID=${id}
+ done
+ if [[ $FLOATING_NETWORK_ID ]]; then
+ FLOATING_NETWORK_NAME=$(openstack network show $FLOATING_NETWORK_ID | awk "/ name / { print \$4 }")
+ else
+ echo "$0: $(date) Floating network not found"
+ exit 1
+ fi
+}
+
+try () {
+ count=$1
+ $3
+ while [[ $? -eq 1 && $count -gt 0 ]]
+ do
+ sleep $2
+ let count=$count-1
+ $3
+ done
+ if [[ $count -eq 0 ]]; then echo "$0: $(date) Command \"$3\" was not successful after $1 tries"; fi
+}
+
+setup () {
+ echo "$0: $(date) Started"
+ echo "$0: $(date) Setup temp test folder /tmp/tacker and copy this script there"
+ mkdir -p /tmp/tacker
+ chmod 777 /tmp/tacker/
+ cp $0 /tmp/tacker/.
+ chmod 755 /tmp/tacker/*.sh
+
+ echo "$0: $(date) tacker-setup part 1"
+ wget https://git.opnfv.org/cgit/models/plain/tests/utils/tacker-setup.sh -O /tmp/tacker/tacker-setup.sh
+ bash /tmp/tacker/tacker-setup.sh tacker-cli init
+
+ echo "$0: $(date) tacker-setup part 2"
+ CONTAINER=$(sudo docker ps -l | awk "/tacker/ { print \$1 }")
+ dist=`grep DISTRIB_ID /etc/*-release | awk -F '=' '{print $2}'`
+ if [ "$dist" == "Ubuntu" ]; then
+ echo "$0: $(date) JOID workaround for Colorado - enable ML2 port security"
+ juju set neutron-api enable-ml2-port-security=true
+
+ echo "$0: $(date) Execute tacker-setup.sh in the container"
+ sudo docker exec -it $CONTAINER /bin/bash /tmp/tacker/tacker-setup.sh tacker-cli setup
+ else
+ echo "$0: $(date) Copy private key to the container (needed for later install steps)"
+ cp ~/.ssh/id_rsa /tmp/tacker/id_rsa
+ echo "$0: $(date) Execute tacker-setup.sh in the container"
+ sudo docker exec -i -t $CONTAINER /bin/bash /tmp/tacker/tacker-setup.sh tacker-cli setup
+ fi
+
+ echo "$0: $(date) reset blueprints folder"
+ if [[ -d /tmp/tacker/blueprints/tosca-vnfd-hello-ves ]]; then rm -rf /tmp/tacker/blueprints/tosca-vnfd-hello-ves; fi
+ mkdir -p /tmp/tacker/blueprints/tosca-vnfd-hello-ves
+
+ echo "$0: $(date) copy tosca-vnfd-hello-ves to blueprints folder"
+ cp -r blueprints/tosca-vnfd-hello-ves /tmp/tacker/blueprints
+
+ # Following two steps are in testing still. The guestfish step needs work.
+
+ # echo "$0: $(date) Create Nova key pair"
+ # mkdir -p ~/.ssh
+ # nova keypair-delete vHello
+ # nova keypair-add vHello > /tmp/tacker/vHello.pem
+ # chmod 600 /tmp/tacker/vHello.pem
+ # pubkey=$(nova keypair-show vHello | grep "Public key:" | sed -- 's/Public key: //g')
+ # nova keypair-show vHello | grep "Public key:" | sed -- 's/Public key: //g' >/tmp/tacker/vHello.pub
+
+ echo "$0: $(date) Inject key into xenial server image"
+ # wget http://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ # sudo yum install -y libguestfs-tools
+ # guestfish <<EOF
+#add xenial-server-cloudimg-amd64-disk1.img
+#run
+#mount /dev/sda1 /
+#mkdir /home/ubuntu
+#mkdir /home/ubuntu/.ssh
+#cat <<EOM >/home/ubuntu/.ssh/authorized_keys
+#$pubkey
+#EOM
+#exit
+#chown -R ubuntu /home/ubuntu
+#EOF
+
+ # Using pre-key-injected image for now, vHello.pem as provided in the blueprint
+ if [ ! -f /tmp/xenial-server-cloudimg-amd64-disk1.img ]; then
+ wget -O /tmp/xenial-server-cloudimg-amd64-disk1.img http://artifacts.opnfv.org/models/images/xenial-server-cloudimg-amd64-disk1.img
+ fi
+ cp blueprints/tosca-vnfd-hello-ves/vHello.pem /tmp/tacker
+ chmod 600 /tmp/tacker/vHello.pem
+
+ echo "$0: $(date) setup OpenStack CLI environment"
+ source /tmp/tacker/admin-openrc.sh
+
+ echo "$0: $(date) Setup image_id"
+ image_id=$(openstack image list | awk "/ models-xenial-server / { print \$2 }")
+ if [[ -z "$image_id" ]]; then glance --os-image-api-version 1 image-create --name models-xenial-server --disk-format qcow2 --file /tmp/xenial-server-cloudimg-amd64-disk1.img --container-format bare; fi
+
+ echo "$0: $(date) Completed"
+}
+
+start() {
+ echo "$0: $(date) Started"
+ echo "$0: $(date) setup OpenStack CLI environment"
+ source /tmp/tacker/admin-openrc.sh
+
+ echo "$0: $(date) create VNFD"
+ cd /tmp/tacker/blueprints/tosca-vnfd-hello-ves
+ tacker vnfd-create --vnfd-file blueprint.yaml --name hello-ves
+ if [ $? -eq 1 ]; then fail; fi
+
+ echo "$0: $(date) create VNF"
+ tacker vnf-create --vnfd-name hello-ves --name hello-ves
+ if [ $? -eq 1 ]; then fail; fi
+
+ echo "$0: $(date) wait for hello-ves to go ACTIVE"
+ active=""
+ while [[ -z $active ]]
+ do
+ active=$(tacker vnf-show hello-ves | grep ACTIVE)
+ if [ "$(tacker vnf-show hello-ves | grep -c ERROR)" == "1" ]; then
+ echo "$0: $(date) hello-ves VNF creation failed with state ERROR"
+ fail
+ fi
+ sleep 10
+ done
+
+ echo "$0: $(date) directly set port security on ports (bug/unsupported in Mitaka Tacker?)"
+ vdus="VDU1 VDU2 VDU3 VDU4"
+ vdui="1 2 3 4"
+ declare -a vdu_id=()
+ declare -a vdu_ip=()
+ declare -a vdu_url=()
+ HEAT_ID=$(tacker vnf-show hello-ves | awk "/instance_id/ { print \$4 }")
+ vdu_id[1]=$(openstack stack resource list $HEAT_ID | awk "/VDU1 / { print \$4 }")
+ vdu_id[2]=$(openstack stack resource list $HEAT_ID | awk "/VDU2 / { print \$4 }")
+ vdu_id[3]=$(openstack stack resource list $HEAT_ID | awk "/VDU3 / { print \$4 }")
+ vdu_id[4]=$(openstack stack resource list $HEAT_ID | awk "/VDU4 / { print \$4 }")
+
+cat >/tmp/grep <<EOF
+${vdu_id[1]}
+${vdu_id[2]}
+${vdu_id[3]}
+${vdu_id[4]}
+EOF
+ id=($(neutron port-list|grep -v "+"|grep -v name|awk '{print $2}'))
+ for id in ${id[@]}; do
+ if [[ $(neutron port-show $id | grep -f /tmp/grep) ]]; then
+ neutron port-update ${id} --port-security-enabled=True
+ fi
+ done
+
+ echo "$0: $(date) directly assign security group (unsupported in Mitaka Tacker)"
+ if [[ $(openstack security group list | awk "/ vHello / { print \$2 }") ]]; then openstack security group delete vHello; fi
+ openstack security group create vHello
+ openstack security group rule create --ingress --protocol TCP --dst-port 22:22 vHello
+ openstack security group rule create --ingress --protocol TCP --dst-port 80:80 vHello
+ for i in $vdui; do
+ openstack server add security group ${vdu_id[$i]} vHello
+ openstack server add security group ${vdu_id[$i]} default
+ done
+
+ echo "$0: $(date) associate floating IPs"
+ get_floating_net
+ for i in $vdui; do
+ vdu_ip[$i]=$(openstack floating ip create $FLOATING_NETWORK_NAME | awk "/floating_ip_address/ { print \$4 }")
+ nova floating-ip-associate ${vdu_id[$i]} ${vdu_ip[$i]}
+ done
+
+ echo "$0: $(date) get web server addresses"
+ vdu_url[1]="http://${vdu_ip[1]}"
+ vdu_url[2]="http://${vdu_ip[3]}"
+ vdu_url[3]="http://${vdu_ip[3]}"
+ vdu_url[4]="http://${vdu_ip[4]}:30000/eventListener/v1"
+
+ if [[ -f /tmp/tacker/id_rsa ]]; then
+ echo "$0: $(date) setup private key for ssh to hypervisors"
+ cp -p /tmp/tacker/id_rsa ~/.ssh/id_rsa
+ chown root ~/.ssh/id_rsa
+ chmod 600 ~/.ssh/id_rsa
+ fi
+
+ echo "$0: $(date) start collectd agent on bare metal hypervisor hosts"
+ hosts=($(openstack hypervisor list | grep -v Hostname | grep -v "+" | awk '{print $4}'))
+ for host in ${hosts[@]}; do
+ ip=$(openstack hypervisor show $host | grep host_ip | awk '{print $4}')
+ if [[ "$OS_CLOUDNAME" == "overcloud" ]]; then
+ u="heat-admin"
+ p=""
+ else
+ u="ubuntu"
+ p=":ubuntu"
+ fi
+ scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no /tmp/tacker/blueprints/tosca-vnfd-hello-ves/start.sh $u@$ip:/home/$u/start.sh
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $u$p@$ip \
+ "nohup bash /home/$u/start.sh collectd $ip ${vdu_ip[4]} hello world > /dev/null 2>&1 &"
+ done
+
+ echo "$0: $(date) wait 30 seconds for server SSH to be available"
+ sleep 30
+
+ echo "$0: $(date) Copy startup script to the VMs"
+ for i in $vdui; do
+ ssh -i /tmp/tacker/vHello.pem -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@${vdu_ip[$i]} "sudo chown ubuntu /home/ubuntu"
+ scp -i /tmp/tacker/vHello.pem -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no /tmp/tacker/blueprints/tosca-vnfd-hello-ves/start.sh ubuntu@${vdu_ip[$i]}:/home/ubuntu/start.sh
+ done
+
+ echo "$0: $(date) start vHello webserver in VDU1 at ${vdu_ip[1]}"
+ ssh -i /tmp/tacker/vHello.pem -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ ubuntu@${vdu_ip[1]} "nohup bash /home/ubuntu/start.sh webserver ${vdu_id[1]} ${vdu_ip[4]} hello world > /dev/null 2>&1 &"
+
+ echo "$0: $(date) start vHello webserver in VDU2 at ${vdu_ip[2]}"
+ ssh -i /tmp/tacker/vHello.pem -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ ubuntu@${vdu_ip[2]} "nohup bash /home/ubuntu/start.sh webserver ${vdu_id[2]} ${vdu_ip[4]} hello world > /dev/null 2>&1 &"
+
+ echo "$0: $(date) start LB in VDU3 at ${vdu_ip[3]}"
+ ssh -i /tmp/tacker/vHello.pem -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ ubuntu@${vdu_ip[3]} "nohup bash /home/ubuntu/start.sh lb ${vdu_id[3]} ${vdu_ip[4]} hello world ${vdu_ip[1]} ${vdu_ip[2]} > /dev/null 2>&1 &"
+
+ echo "$0: $(date) start Monitor in VDU4 at ${vdu_ip[4]}"
+ # Replacing the default collector with monitor.py which has processing logic as well
+ scp -i /tmp/tacker/vHello.pem -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no /tmp/tacker/blueprints/tosca-vnfd-hello-ves/monitor.py ubuntu@${vdu_ip[4]}:/home/ubuntu/monitor.py
+ ssh -i /tmp/tacker/vHello.pem -t -t -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@${vdu_ip[4]} "bash /home/ubuntu/start.sh monitor ${vdu_id[1]} ${vdu_id[2]} ${vdu_id[3]} hello world"
+
+# echo "$0: $(date) verify vHello server is running at http://${vdu_ip[3]}"
+# apt-get install -y curl
+# count=10
+# while [[ $count -gt 0 ]]
+# do
+# sleep 60
+# let count=$count-1
+# if [[ $(curl http://${vdu_ip[3]} | grep -c "Hello World") > 0 ]]; then pass; fi
+# done
+# fail
+}
+
+stop() {
+ echo "$0: $(date) setup OpenStack CLI environment"
+ source /tmp/tacker/admin-openrc.sh
+
+ echo "$0: $(date) uninstall vHello blueprint via CLI"
+ vid=($(tacker vnf-list|grep hello-ves|awk '{print $2}')); for id in ${vid[@]}; do tacker vnf-delete ${id}; done
+ vid=($(tacker vnfd-list|grep hello-ves|awk '{print $2}')); for id in ${vid[@]}; do tacker vnfd-delete ${id}; done
+# Need to remove the floatingip deletion or make it specific to the vHello VM
+ fip=($(neutron floatingip-list|grep -v "+"|grep -v id|awk '{print $2}')); for id in ${fip[@]}; do neutron floatingip-delete ${id}; done
+ sg=($(openstack security group list|grep vHello|awk '{print $2}'))
+ for id in ${sg[@]}; do try 10 5 "openstack security group delete ${id}"; done
+
+ echo "$0: $(date) remove collectd agent on bare metal hypervisor hosts"
+ hosts=($(openstack hypervisor list | grep -v Hostname | grep -v "+" | awk '{print $4}'))
+ for host in ${hosts[@]}; do
+ ip=$(openstack hypervisor show $host | grep host_ip | awk '{print $4}')
+ if [[ "$OS_CLOUDNAME" == "overcloud" ]]; then
+ u="heat-admin"
+ p=""
+ else
+ u="ubuntu"
+ p=":ubuntu"
+ fi
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $u$p@$ip <<'EOF'
+dist=`grep DISTRIB_ID /etc/*-release | awk -F '=' '{print $2}'`
+if [ "$dist" == "Ubuntu" ]; then
+ sudo service collectd stop
+ sudo apt-get remove -y collectd
+ sudo rm /etc/collectd/collectd.conf
+else
+ sudo service collectd stop
+ sudo yum remove -y collectd
+ sudo rm /etc/collectd.conf
+fi
+rm -rf $HOME/OpenStackBarcelonaDemo
+EOF
+ done
+}
+
+#
+# Test tools and scenarios
+#
+
+get_vdu_ip () {
+ source /tmp/tacker/admin-openrc.sh
+
+ echo "$0: $(date) find VM IP for $1"
+ ip=$(openstack server list | awk "/$1/ { print \$10 }")
+}
+
+monitor () {
+ echo "$0: $(date) Start the VES Monitor in VDU4 - Stop first if running"
+ get_vdu_ip VDU4
+ sudo cp /tmp/tacker/vHello.pem /tmp/vHello.pem
+ sudo chown $USER:$USER /tmp/vHello.pem
+ chmod 600 /tmp/vHello.pem
+ ssh -t -t -i /tmp/vHello.pem -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$ip << 'EOF'
+sudo kill $(ps -ef | grep evel-test-collector | awk '{print $2}')
+python monitor.py --config evel-test-collector/config/collector.conf --section default
+EOF
+}
+
+traffic () {
+ echo "$0: $(date) Generate some traffic, somewhat randomly"
+ get_vdu_ip VDU3
+ ns="0 00 000"
+ while true
+ do
+ for n in $ns; do
+ sleep .$n$[ ( $RANDOM % 10 ) + 1 ]s
+ curl -s http://$ip > /dev/null
+ done
+ done
+}
+
+pause () {
+ echo "$0: $(date) Pause the VNF (web server) in $1 for 30 seconds to generate a state change fault report (Stopped)"
+ get_vdu_ip $1
+ ssh -i /tmp/vHello.pem -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$ip "sudo docker pause vHello"
+ sleep 20
+ echo "$0: $(date) Unpausing the VNF to generate a state change fault report (Started)"
+ ssh -i /tmp/vHello.pem -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$ip "sudo docker unpause vHello"
+}
+
+forward_to_container () {
+ echo "$0: $(date) pass $1 command to this script in the tacker container"
+ CONTAINER=$(sudo docker ps -a | awk "/tacker/ { print \$1 }")
+ sudo docker exec $CONTAINER /bin/bash /tmp/tacker/vHello_VES.sh $1 $1
+ if [ $? -eq 1 ]; then fail; fi
+}
+
+dist=`grep DISTRIB_ID /etc/*-release | awk -F '=' '{print $2}'`
+case "$1" in
+ setup)
+ setup
+ pass
+ ;;
+ run)
+ setup
+ forward_to_container start
+ pass
+ ;;
+ start|stop)
+ if [[ $# -eq 1 ]]; then forward_to_container $1
+ else
+ # running inside the tacker container, ready to go
+ $1
+ fi
+ pass
+ ;;
+ traffic)
+ $1
+ pass
+ ;;
+ test)
+ $2 $3
+ ;;
+ clean)
+ echo "$0: $(date) Uninstall Tacker and test environment"
+ bash /tmp/tacker/tacker-setup.sh $1 clean
+ pass
+ ;;
+ *)
+ echo "usage: bash vHello_VES.sh [setup|start|run|clean]"
+ echo "setup: setup test environment"
+ echo "start: install blueprint and run test"
+ echo "run: setup test environment and run test"
+ echo "stop: stop test and uninstall blueprint"
+ echo "clean: cleanup after test"
+ echo "usage: bash vHello_VES.sh [setup|start|run|test|stop|clean] [monitor|traffic|pause|nic]"
+ echo "setup: setup test environment"
+ echo "start: install blueprint and run test"
+ echo "run: setup test environment and run test"
+ echo "test: run test tools/scenario - see below"
+ echo "stop: stop test and uninstall blueprint"
+ echo "clean: cleanup after test"
+ echo "Test:"
+ echo " monitor: attach to the collector VM and run the VES Monitor"
+ echo " traffic: generate some traffic"
+ echo " pause: pause the VNF (web server) for a minute to generate a state change"
+ fail
+esac
diff --git a/tests/vLamp_Ansible_VES.sh b/tests/vLamp_Ansible_VES.sh
deleted file mode 100644
index 1ae6fdc..0000000
--- a/tests/vLamp_Ansible_VES.sh
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/bin/bash
-# Copyright 2016 AT&T Intellectual Property, Inc
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# What this is: Enhancements to the OpenStack Interop Challenge "Lampstack"
-# blueprint to add OPNFV VES event capture.
-#
-# Status: this is a work in progress, under test.
-#
-# How to use:
-# $ bash vLamp_Ansible_VES.sh
-
-echo "$0: Add ssh key"
-eval $(ssh-agent -s)
-ssh-add /tmp/ansible/ansible
-
-echo "$0: setup OpenStack environment"
-source /tmp/ansible/admin-openrc.sh
-
-$BALANCER=$(openstack server show balancer | awk "/ addresses / { print \$6 }")
-sudo cp /tmp/ansible/ansible /tmp/ansible/lampstack
-sudo chown $USER /tmp/ansible/lampstack
-ssh -i /tmp/ansible/lampstack ubuntu@$BALANCER
-
-# scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ~/congress/env.sh $CTLUSER@$CONTROLLER_HOST1:/home/$CTLUSER/congress
-
-echo "$0: Enable haproxy logging"
-# Example /var/log/haproxy.log entries after logging enabled
-# Oct 6 20:03:34 balancer haproxy[2075]: 192.168.37.199:36193 [06/Oct/2016:20:03:34.349] webfarm webfarm/ws10.0.0.9 107/0/1/1/274 304 144 - - ---- 1/1/1/0/0 0/0 "GET /wp-content/themes/iribbon/elements/lib/images/boxes/slidericon.png HTTP/1.1"
-# Oct 6 20:03:34 balancer haproxy[2075]: 192.168.37.199:36194 [06/Oct/2016:20:03:34.365] webfarm webfarm/ws10.0.0.10 95/0/0/1/258 304 144 - - ---- 0/0/0/0/0 0/0 "GET /wp-content/themes/iribbon/elements/lib/images/boxes/blueprint.png HTTP/1.1"
-ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$BALANCER <<EOF
-sudo sed -i -- 's/#$ModLoad imudp/$ModLoad imudp/g' /etc/rsyslog.conf
-sudo sed -i -- 's/#$UDPServerRun 514/$UDPServerRun 514\n$UDPServerAddress 127.0.0.1/g' /etc/rsyslog.conf
-sudo service rsyslog restart
-EOF