summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBryan Sullivan <bryan.sullivan@att.com>2016-10-11 22:42:35 -0700
committerBryan Sullivan <bryan.sullivan@att.com>2016-10-11 22:42:35 -0700
commitb2e526f7469e3f7214442a1cc8d4cbcfea6901ca (patch)
tree585f01cf43e5d20c8cd48f4f77622fd30a2ae5ae
parentb192ba22c01390703d6cdce3c701df9ae5ef8ca4 (diff)
Baseline of tests
JIRA: VES-1 Change-Id: I1a78a179414f6bccc93934639554c4fb7d65c64a Signed-off-by: Bryan Sullivan <bryan.sullivan@att.com>
-rw-r--r--INFO18
-rw-r--r--LICENSE13
-rw-r--r--tests/VES_Reference.sh411
-rwxr-xr-xtests/blueprints/tosca-vnfd-hello-ves/blueprint.yaml107
-rwxr-xr-xtests/blueprints/tosca-vnfd-hello-ves/start.sh84
-rw-r--r--tests/utils/setenv.sh82
-rw-r--r--tests/vHello_VES.sh322
-rw-r--r--tests/vLamp_Ansible_VES.sh46
8 files changed, 1083 insertions, 0 deletions
diff --git a/INFO b/INFO
new file mode 100644
index 0000000..a0584d8
--- /dev/null
+++ b/INFO
@@ -0,0 +1,18 @@
+VNF Event Stream (VES)
+Creation Date: May 31, 2016
+Lifecycle State: Incubation
+Primary Contact: ag1367@att.com
+Project Lead: ag1367@att.com
+Jira Name: VNF Event Stream
+Jira Prefix: VES
+Mailing list tag [ves]
+Repo: ves
+
+Committers:
+ag1367@att.com
+bryan.sullivan@att.com
+feng.liu1@huawei.com
+li.yuanzhen@zte.com.cn
+
+Link to TSC approval: https://wiki.opnfv.org/wiki/tsc#february_16_2016
+Link to approval of additional submitters: \ No newline at end of file
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..89701fd
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,13 @@
+Copyright 2016 Open Platform for NFV Project, Inc. and its contributors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/tests/VES_Reference.sh b/tests/VES_Reference.sh
new file mode 100644
index 0000000..fa5bde1
--- /dev/null
+++ b/tests/VES_Reference.sh
@@ -0,0 +1,411 @@
+#!/bin/bash
+# Copyright 2016 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# What this is: Deployment script for the VNF Event Stream (VES) Reference VNF
+# and Test Collector. Runs the VES Collector in a docker container on the
+# OPNFV jumphost, and the VES Reference VNF as an OpenStack VM.
+#
+# Status: this is a work in progress, under test.
+#
+# How to use:
+# $ git clone https://gerrit.opnfv.org/gerrit/ves
+# $ cd ves/tests
+# $ bash VES_Reference.sh [setup|start|run|stop|clean]
+# setup: setup test environment
+# start: install blueprint and run test
+# run: setup test environment and run test
+# stop: stop test and uninstall blueprint
+# clean: cleanup after test
+
+trap 'fail' ERR
+
+pass() {
+ echo "$0: Hooray!"
+ set +x #echo off
+ exit 0
+}
+
+fail() {
+ echo "$0: Test Failed!"
+ set +x
+ exit 1
+}
+
+function setenv () {
+ echo "$0: Setup OpenStack environment variables"
+ source utils/setenv.sh /tmp/VES
+}
+
+get_floating_net () {
+ network_ids=($(neutron net-list|grep -v "+"|grep -v name|awk '{print $2}'))
+ for id in ${network_ids[@]}; do
+ [[ $(neutron net-show ${id}|grep 'router:external'|grep -i "true") != "" ]] && FLOATING_NETWORK_ID=${id}
+ done
+ if [[ $FLOATING_NETWORK_ID ]]; then
+ FLOATING_NETWORK_NAME=$(openstack network show $FLOATING_NETWORK_ID | awk "/ name / { print \$4 }")
+ else
+ echo "$0: Floating network not found"
+ exit 1
+ fi
+}
+
+try () {
+ count=$1
+ $3
+ while [[ $? -eq 1 && $count -gt 0 ]]
+ do
+ sleep $2
+ let count=$count-1
+ $3
+ done
+ if [[ $count -eq 0 ]]; then echo "$0: Command \"$3\" was not successful after $1 tries"; fi
+}
+
+function create_container () {
+ echo "$0: Creating docker container"
+ echo "$0: Copy this script to /tmp/VES"
+ mkdir /tmp/VES
+ cp $0 /tmp/VES/.
+ chmod 755 /tmp/VES/*.sh
+
+ echo "$0: reset blueprints folder"
+ if [[ -d /tmp/VES/blueprints/ ]]; then rm -rf /tmp/VES/blueprints/; fi
+ mkdir -p /tmp/VES/blueprints/
+
+ echo "$0: Setup admin-openrc.sh"
+ setenv
+
+ echo "$0: Setup container"
+ if [ "$dist" == "Ubuntu" ]; then
+ # xenial is needed for python 3.5
+ sudo docker pull ubuntu:xenial
+ sudo service docker start
+ # Port 30000 is the default for the VES Collector
+ sudo docker run -it -d -p 30000:30000 -v /tmp/VES/:/tmp/VES \
+ --name VES ubuntu:xenial /bin/bash
+ else
+ # Centos
+ echo "Centos-based install"
+ sudo tee /etc/yum.repos.d/docker.repo <<-'EOF'
+[dockerrepo]
+name=Docker Repository--parents
+baseurl=https://yum.dockerproject.org/repo/main/centos/7/
+enabled=1
+gpgcheck=1
+gpgkey=https://yum.dockerproject.org/gpg
+EOF
+ sudo yum install -y docker-engine
+ # xenial is needed for python 3.5
+ sudo service docker start
+ sudo docker pull ubuntu:xenial
+ # Port 30000 is the default for the VES Collector
+ sudo docker run -i -t -d -p 30000:30000 -v /tmp/VES/:/tmp/VES \
+ --name VES ubuntu:xenial /bin/bash
+ fi
+}
+
+setup_Openstack () {
+ echo "$0: install OpenStack clients"
+ pip install --upgrade python-openstackclient
+ pip install --upgrade python-glanceclient
+ pip install --upgrade python-neutronclient
+ pip install --upgrade python-heatclient
+# pip install --upgrade keystonemiddleware
+
+ echo "$0: setup OpenStack environment"
+ source /tmp/VES/admin-openrc.sh
+
+ echo "$0: determine external (public) network as the floating ip network" echo "$0: setup OpenStack environment"
+ get_floating_net
+
+ echo "$0: Setup centos7-server glance image if needed"
+ if [[ -z $(openstack image list | awk "/ centos7-server / { print \$2 }") ]]; \
+ then glance --os-image-api-version 1 image-create \
+ --name centos7-server \
+ --disk-format qcow2 \
+ --location http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1607.qcow2 \
+ --container-format bare; fi
+
+ if [[ -z $(neutron net-list | awk "/ internal / { print \$2 }") ]]; then
+ echo "$0: Create internal network"
+ neutron net-create internal
+
+ echo "$0: Create internal subnet"
+ neutron subnet-create internal 10.0.0.0/24 --name internal \
+ --gateway 10.0.0.1 --enable-dhcp \
+ --allocation-pool start=10.0.0.2,end=10.0.0.254 \
+ --dns-nameserver 8.8.8.8
+ fi
+
+ if [[ -z $(neutron router-list | awk "/ public_router / { print \$2 }") ]]; then
+ echo "$0: Create router"
+ neutron router-create public_router
+
+ echo "$0: Create router gateway"
+ neutron router-gateway-set public_router $FLOATING_NETWORK_NAME
+
+ echo "$0: Add router interface for internal network"
+ neutron router-interface-add public_router subnet=internal
+ fi
+}
+
+setup_Collector () {
+ echo "$0: Install dependencies - OS specific"
+ if [ "$dist" == "Ubuntu" ]; then
+ apt-get update
+ apt-get install -y python
+ apt-get install -y python-pip
+ apt-get install -y git
+ else
+ yum install -y python
+ yum install -y python-pip
+ yum install -y git
+ fi
+ pip install --upgrade pip
+
+ echo "$0: clone VES Collector repo"
+ cd /tmp/VES/blueprints/
+ git clone https://github.com/att/evel-test-collector.git
+ echo "$0: update collector.conf"
+ cd /tmp/VES/blueprints/evel-test-collector
+ sed -i -- 's~/var/log/att/~/tmp/VES/~g' config/collector.conf
+}
+
+start_Collector () {
+ echo "$0: start the VES Collector"
+ cd /tmp/VES/blueprints/evel-test-collector
+ python code/collector/collector.py \
+ --config config/collector.conf \
+ --section default \
+ --verbose
+}
+
+setup_Reference_VNF_VM () {
+ echo "$0: Create Nova key pair"
+ nova keypair-add VES > /tmp/VES/VES-key
+ chmod 600 /tmp/VES/VES-key
+
+ echo "$0: Add ssh key"
+ eval $(ssh-agent -s)
+ ssh-add /tmp/VES/VES-key
+
+ echo "$0: clone VES Reference VNF repo"
+ cd /tmp/VES/blueprints/
+ git clone https://github.com/att/evel-reporting-reference-vnf.git
+
+ echo "$0: customize VES Reference VNF Heat template"
+ cd evel-reporting-reference-vnf/hot
+ ID=$(openstack image list | awk "/ centos7-server / { print \$2 }")
+ sed -i -- "s/40299aa3-2921-43b0-86b9-56c28a2b5232/$ID/g" event_reporting_vnf.env.yaml
+ ID=$(neutron net-list | awk "/ internal / { print \$2 }")
+ sed -i -- "s/84985f60-fbba-4a78-ba83-2815ff620dbc/$ID/g" event_reporting_vnf.env.yaml
+ sed -i -- "s/127.0.0.1/$JUMPHOST/g" event_reporting_vnf.env.yaml
+ sed -i -- "s/my-keyname/VES/g" event_reporting_vnf.env.yaml
+
+ echo "$0: Create VES Reference VNF via Heat"
+ heat stack-create -e event_reporting_vnf.env.yaml \
+ -f event_reporting_vnf.template.yaml VES
+
+ echo "$0: Wait for VES Reference VNF to go Active"
+ COUNTER=0
+ until [[ $(heat stack-list | awk "/ VES / { print \$6 }") == "CREATE_COMPLETE" ]]; do
+ sleep 5
+ let COUNTER+=1
+ if [[ $COUNTER > "20" ]]; then fail; fi
+ done
+
+ echo "$0: Get Server ID"
+ SID=$(heat resource-list VES | awk "/ OS::Nova::Server / { print \$4 }")
+
+ echo "$0: associate SSH security group"
+ # TODO: Update Heat template to include security group
+ if [[ $(openstack security group list | awk "/ vHello / { print \$2 }") ]]; then neutron security-group-delete vHello; fi
+ openstack security group create VES_Reference
+ openstack security group rule create --ingress --protocol TCP --dst-port 22:22 VES_Reference
+ openstack security group rule create --ingress --protocol TCP --dst-port 80:80 VES_Reference
+ openstack server add security group $SID VES_Reference
+
+ echo "$0: associate floating IP"
+ # TODO: Update Heat template to include floating IP (if supported)
+ FIP=$(openstack floating ip create $FLOATING_NETWORK_NAME | awk "/floating_ip_address/ { print \$4 }")
+ nova floating-ip-associate $SID $FIP
+
+# scp -i /tmp/VES/VES-key -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no /tmp/VES/VES_Reference.sh centos@$FIP:/home/centos
+ scp -i /tmp/VES/VES-key -o UserKnownHostsFile=/dev/null \
+ -o StrictHostKeyChecking=no \
+ $0 centos@$FIP:/home/centos
+# run thru setup_Reference_VNF manually to verify
+# ssh -i /tmp/VES/VES-key -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no centos@$FIP
+# ssh -i /tmp/VES/VES-key -x -o UserKnownHostsFile=/dev/null
+# -o StrictHostKeyChecking=no
+# centos@$FIP \
+# "nohup source $0 setup_VNF &"
+}
+
+setup_Reference_VNF () {
+ echo "$0: Install dependencies"
+ sudo yum update -y
+ sudo yum install -y wget
+ sudo yum install -y gcc
+ sudo yum install -y openssl-devel
+ sudo yum install -y epel-release
+ sudo yum install -y python-pip
+ sudo pip install --upgrade pip
+ sudo yum install -y git
+
+ echo "$0: Install Django"
+ sudo pip install django
+
+ echo "$0: Install Apache"
+ sudo yum install -y httpd httpd-devel
+
+ echo "$0: Install mod_python"
+ sudo yum install -y python-devel
+ mkdir ~/mod_python-3.4.1
+ cd ~/mod_python-3.4.1
+ wget http://dist.modpython.org/dist/mod_python-3.4.1.tgz
+ tar xvf mod_python-3.4.1.tgz
+ cd mod_python-3.4.1
+
+ # Edit .../dist/version.sh to remove the dependency on Git as described at
+ # http://stackoverflow.com/questions/20022952/fatal-not-a-git-repository-when-installing-mod-python
+ sed \
+ -e 's/(git describe --always)/(git describe --always 2>\/dev\/null)/g' \
+ -e 's/`git describe --always`/`git describe --always 2>\/dev\/null`/g' \
+ -i $( find . -type f -name Makefile\* -o -name version.sh )
+
+ ./configure
+ make
+ sudo make install
+ make test
+
+ echo "$0: Install mod_wsgi"
+ sudo yum install -y mod_wsgi
+
+ echo "$0: clone VES Reference VNF repo"
+ cd ~
+ git clone https://github.com/att/evel-reporting-reference-vnf.git
+
+ echo "$0: Setup collector"
+
+ sudo mkdir -p /opt/att/collector
+ sudo install -m=644 -t /opt/att/collector ~/evel-reporting-reference-vnf/code/collector/*
+
+ echo "$0: Setup Reference VNF website"
+ sudo mkdir -p /opt/att/website/
+ sudo cp -r ~/evel-reporting-reference-vnf/code/webserver/django/* /opt/att/website/
+ sudo chown -R root:root /opt/att/website/
+ sudo mkdir -p /var/log/att/
+ echo "eh?" | sudo tee /var/log/att/django.log
+
+ echo "$0: Create database"
+
+ cd /opt/att/website
+ sudo python manage.py migrate
+ sudo python manage.py createsuperuser
+ sudo rm -f /var/log/att/django.log
+
+ sudo systemctl daemon-reload
+ sudo systemctl enable httpd
+ sudo systemctl restart httpd
+
+ echo "$0: Setup website backend"
+ sudo mkdir -p /opt/att/backend/
+ sudo install -m=644 -t /opt/att/backend ~/evel-reporting-reference-vnf/code/backend/*
+ sudo install -m=644 ~/evel-reporting-reference-vnf/config/backend.service /etc/systemd/system
+ sudo systemctl daemon-reload
+ sudo systemctl enable backend
+ sudo systemctl restart backend
+
+
+ echo "$0: Change security context for database"
+ chcon -t httpd_sys_content_t db.sqlite3
+ chcon -t httpd_sys_content_t .
+ setsebool -P httpd_unified 1
+ setsebool -P httpd_can_network_connect=1
+
+ echo "$0: Gather static files"
+ sudo python manage.py collectstatic
+
+ echo "$0: Install jsonschema"
+ sudo pip install jsonschema
+
+ echo "$0: Put backend.service into /etc/systemd/system"
+ sudo systemctl daemon-reload
+ sudo systemctl start backend
+ sudo systemctl status backend
+ sudo systemctl enable backend
+
+ # from initialize-event-database.sh
+ cd /opt/att/website
+ sudo python manage.py migrate
+ sudo python manage.py createsuperuser
+
+ # from go-webserver.sh
+ sudo python /opt/att/website/manage.py runserver &
+
+ # from go-backend.sh
+ sudo python /opt/att/backend/backend.py --config ~/evel-reporting-reference-vnf/config/backend.conf --section default --verbose &
+}
+
+clean () {
+ echo "$0: delete container"
+ CONTAINER=$(sudo docker ps -a | awk "/VES/ { print \$1 }")
+ sudo docker stop $CONTAINER
+ sudo docker rm -v $CONTAINER
+}
+
+forward_to_container () {
+ echo "$0: pass $1 command to VES_Reference.sh in container"
+ CONTAINER=$(sudo docker ps -a | awk "/VES/ { print \$1 }")
+ sudo docker exec $CONTAINER /bin/bash /tmp/VES/VES_Reference.sh $1 $1
+ if [ $? -eq 1 ]; then fail; fi
+}
+
+dist=`grep DISTRIB_ID /etc/*-release | awk -F '=' '{print $2}'`
+case "$1" in
+ setup)
+ if [[ $# -eq 1 ]]; then
+ create_container
+ echo "$0: Execute VES_Reference.sh in the container"
+ CONTAINER=$(sudo docker ps -l | awk "/VES/ { print \$1 }")
+ if [ "$dist" == "Ubuntu" ]; then
+ sudo docker exec -it $CONTAINER /bin/bash /tmp/VES/VES_Reference.sh setup setup
+ else
+ sudo docker exec -i -t $CONTAINER /bin/bash /tmp/VES/VES_Reference.sh setup setup
+ fi
+ else
+ # Running in the container, continue VES setup
+ setup_Collector
+ setup_Openstack
+ setup_Reference_VNF_VM
+ start_Collector
+ fi
+ pass
+ ;;
+ setup_VNF)
+ setup_Reference_VNF
+ ;;
+ clean)
+ echo "$0: Uninstall"
+ clean
+ pass
+ ;;
+ *)
+ echo "usage: bash VES_Reference.sh [setup|clean]"
+ echo "setup: setup test environment"
+ echo "clean: cleanup after test"
+ fail
+esac
diff --git a/tests/blueprints/tosca-vnfd-hello-ves/blueprint.yaml b/tests/blueprints/tosca-vnfd-hello-ves/blueprint.yaml
new file mode 100755
index 0000000..fc9e1b8
--- /dev/null
+++ b/tests/blueprints/tosca-vnfd-hello-ves/blueprint.yaml
@@ -0,0 +1,107 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+description: Hello VES
+
+metadata:
+ template_name: tosca-vnfd-hello-ves
+
+topology_template:
+ node_templates:
+ VDU1:
+ type: tosca.nodes.nfv.VDU.Tacker
+ capabilities:
+ nfv_compute:
+ properties:
+ num_cpus: 1
+ mem_size: 1024 MB
+ disk_size: 4 GB
+ properties:
+ image: models-xenial-server
+ availability_zone: nova
+ mgmt_driver: noop
+ config: |
+ param0: key1
+ param1: key2
+
+ CP1:
+ type: tosca.nodes.nfv.CP.Tacker
+ properties:
+ management: true
+ anti_spoofing_protection: false
+ requirements:
+ - virtualLink:
+ node: VL1
+ - virtualBinding:
+ node: VDU1
+
+ CP2:
+ type: tosca.nodes.nfv.CP.Tacker
+ properties:
+ anti_spoofing_protection: false
+ requirements:
+ - virtualLink:
+ node: VL2
+ - virtualBinding:
+ node: VDU1
+
+ VL1:
+ type: tosca.nodes.nfv.VL
+ properties:
+ network_name: vnf_mgmt
+ vendor: Tacker
+
+ VL2:
+ type: tosca.nodes.nfv.VL
+ properties:
+ network_name: vnf_private
+ vendor: Tacker
+
+ VDU2:
+ type: tosca.nodes.nfv.VDU.Tacker
+ capabilities:
+ nfv_compute:
+ properties:
+ num_cpus: 1
+ mem_size: 1024 MB
+ disk_size: 4 GB
+ properties:
+ image: models-xenial-server
+ availability_zone: nova
+ mgmt_driver: noop
+ config: |
+ param0: key1
+ param1: key2
+
+ CP3:
+ type: tosca.nodes.nfv.CP.Tacker
+ properties:
+ management: true
+ anti_spoofing_protection: false
+ requirements:
+ - virtualLink:
+ node: VL3
+ - virtualBinding:
+ node: VDU2
+
+ CP4:
+ type: tosca.nodes.nfv.CP.Tacker
+ properties:
+ anti_spoofing_protection: false
+ requirements:
+ - virtualLink:
+ node: VL4
+ - virtualBinding:
+ node: VDU2
+
+ VL3:
+ type: tosca.nodes.nfv.VL
+ properties:
+ network_name: vnf_mgmt
+ vendor: Tacker
+
+ VL4:
+ type: tosca.nodes.nfv.VL
+ properties:
+ network_name: vnf_private
+ vendor: Tacker
+
diff --git a/tests/blueprints/tosca-vnfd-hello-ves/start.sh b/tests/blueprints/tosca-vnfd-hello-ves/start.sh
new file mode 100755
index 0000000..6c8fbeb
--- /dev/null
+++ b/tests/blueprints/tosca-vnfd-hello-ves/start.sh
@@ -0,0 +1,84 @@
+#!/bin/bash
+# Copyright 2016 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# What this is: Startup script for a simple web server as part of the
+# vHello_VES test of the OPNFV VES project.
+#
+# Status: this is a work in progress, under test.
+#
+# How to use:
+# $ bash start.sh IP ID
+# IP: IP address of the collector
+# ID: username:password to use in REST
+#
+
+echo "$0: Setup website and dockerfile"
+mkdir ~/www
+mkdir ~/www/html
+
+# ref: https://hub.docker.com/_/nginx/
+cat > ~/www/Dockerfile <<EOM
+FROM nginx
+COPY html /usr/share/nginx/html
+EOM
+
+cat > ~/www/html/index.html <<EOM
+<!DOCTYPE html>
+<html>
+<head>
+<title>Hello World!</title>
+<meta name="viewport" content="width=device-width, minimum-scale=1.0, initial-scale=1"/>
+<style>
+body { width: 100%; background-color: white; color: black; padding: 0px; margin: 0px; font-family: sans-serif; font-size:100%; }
+</style>
+</head>
+<body>
+Hello World!<br>
+<a href="http://wiki.opnfv.org"><img src="https://www.opnfv.org/sites/all/themes/opnfv/logo.png"></a>
+</body></html>
+EOM
+
+echo "$0: Install docker"
+# Per https://docs.docker.com/engine/installation/linux/ubuntulinux/
+# Per https://www.digitalocean.com/community/tutorials/how-to-install-and-use-docker-on-ubuntu-16-04
+sudo apt-get install apt-transport-https ca-certificates
+sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
+echo "deb https://apt.dockerproject.org/repo ubuntu-xenial main" | sudo tee /etc/apt/sources.list.d/docker.list
+sudo apt-get update
+sudo apt-get purge lxc-docker
+sudo apt-get install -y linux-image-extra-$(uname -r) linux-image-extra-virtual
+sudo apt-get install -y docker-engine
+
+echo "$0: Get nginx container and start website in docker"
+# Per https://hub.docker.com/_/nginx/
+sudo docker pull nginx
+cd ~/www
+sudo docker build -t vhello .
+sudo docker run --name vHello -d -p 80:80 vhello
+
+echo "$0: setup VES event delivery for the nginx server"
+
+# id=$(sudo ls /var/lib/docker/containers)
+# sudo tail -f /var/lib/docker/containers/$id/$id-json.log
+
+export COL_IP=$1
+export COL_ID=$2
+
+while true
+do
+ sleep 30
+ curl --user $COL_ID -H "Content-Type: application/json" -X POST -d '{ "event": { "commonEventHeader": { "domain": "fault", "eventType": "Fault_MobileCallRecording_PilotNumberPoolExhaustion", "eventId": "ab305d54-85b4-a31b-7db2-fb6b9e546015", "sequence": "0", "priority": "High", "sourceId": "de305d54-75b4-431b-adb2-eb6b9e546014", "sourceName": "EricssonECE", "functionalRole": "SCF", "startEpochMicrosec": "1413378172000000", "lastEpochMicrosec": "1413378172000000", "reportingEntityId": "de305d54-75b4-431b-adb2-eb6b9e546014", "reportingEntityName": "EricssonECE" }, "faultFields": { "alarmCondition": "PilotNumberPoolExhaustion", "eventSourceType": "other(0)", "specificProblem": "Calls cannot complete because pilot numbers are unavailable", "eventSeverity": "CRITICAL", "vfStatus": "Active" } } }' http://$COL_IP:30000/eventListener/v1
+done
+
diff --git a/tests/utils/setenv.sh b/tests/utils/setenv.sh
new file mode 100644
index 0000000..94c0b0b
--- /dev/null
+++ b/tests/utils/setenv.sh
@@ -0,0 +1,82 @@
+#!/bin/bash
+# Copyright 2016 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# What this is: OpenStack environment file setup for OPNFV deployments. Sets up
+# the environment parameters allowing use of OpenStack CLI commands, and as needed
+# for OPNFV test scripts.
+#
+# Status: this is a work in progress, under test.
+#
+# How to use:
+# $ wget https://git.opnfv.org/cgit/ves/plain/tests/utils/setenv.sh -O [folder]
+# folder: folder to place the script in
+# $ source /tmp/setenv.sh [target]
+# folder: folder in which to put the created admin-openrc.sh file
+
+# TODO: Find a more precise way to determine the OPNFV install... currently
+# this assumes that the script is running on the OPNFV jumphost, and
+# Ubuntu=JOID, Centos=Apex
+
+dist=`grep DISTRIB_ID /etc/*-release | awk -F '=' '{print $2}'`
+
+if [ "$dist" == "Ubuntu" ]; then
+ # Ubuntu: assumes JOID-based install, and that this script is being run on the jumphost.
+ echo "$0: Ubuntu-based install"
+ echo "$0: Create the environment file"
+ KEYSTONE_HOST=$(juju status --format=short | awk "/keystone\/0/ { print \$3 }")
+ cat <<EOF >$1/admin-openrc.sh
+export CONGRESS_HOST=$(juju status --format=short | awk "/openstack-dashboard/ { print \$3 }")
+export HORIZON_HOST=$(juju status --format=short | awk "/openstack-dashboard/ { print \$3 }")
+export KEYSTONE_HOST=$KEYSTONE_HOST
+export CEILOMETER_HOST=$(juju status --format=short | awk "/ceilometer\/0/ { print \$3 }")
+export CINDER_HOST=$(juju status --format=short | awk "/cinder\/0/ { print \$3 }")
+export GLANCE_HOST=$(juju status --format=short | awk "/glance\/0/ { print \$3 }")
+export NEUTRON_HOST=$(juju status --format=short | awk "/neutron-api\/0/ { print \$3 }")
+export NOVA_HOST=$(juju status --format=short | awk "/nova-cloud-controller\/0/ { print \$3 }")
+export JUMPHOST=$(ifconfig brAdm | awk "/inet addr/ { print \$2 }" | sed 's/addr://g')
+export OS_USERNAME=admin
+export OS_PASSWORD=openstack
+export OS_TENANT_NAME=admin
+export OS_AUTH_URL=http://$KEYSTONE_HOST:5000/v2.0
+export OS_REGION_NAME=RegionOne
+EOF
+else
+ # Centos: assumes Apex-based install, and that this script is being run on the Undercloud controller VM.
+ echo "$0: Centos-based install"
+ echo "$0: Setup undercloud environment so we can get overcloud Controller server address"
+ source ~/stackrc
+ echo "$0: Get address of Controller node"
+ export CONTROLLER_HOST1=$(openstack server list | awk "/overcloud-controller-0/ { print \$8 }" | sed 's/ctlplane=//g')
+ echo "$0: Create the environment file"
+ cat <<EOF >$1/admin-openrc.sh
+export HORIZON_HOST=$CONTROLLER_HOST1
+export CONGRESS_HOST=$CONTROLLER_HOST1
+export KEYSTONE_HOST=$CONTROLLER_HOST1
+export CEILOMETER_HOST=$CONTROLLER_HOST1
+export CINDER_HOST=$CONTROLLER_HOST1
+export GLANCE_HOST=$CONTROLLER_HOST1
+export NEUTRON_HOST=$CONTROLLER_HOST1
+export NOVA_HOST=$CONTROLLER_HOST1
+export JUMPHOST=$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)
+EOF
+ cat ~/overcloudrc >>$1/admin-openrc.sh
+ source ~/overcloudrc
+ export OS_REGION_NAME=$(openstack endpoint list | awk "/ nova / { print \$4 }")
+ # sed command below is a workaound for a bug - region shows up twice for some reason
+ cat <<EOF | sed '$d' $1/admin-openrc.sh
+export OS_REGION_NAME=$OS_REGION_NAME
+EOF
+fi
+source $1/admin-openrc.sh
diff --git a/tests/vHello_VES.sh b/tests/vHello_VES.sh
new file mode 100644
index 0000000..85120d2
--- /dev/null
+++ b/tests/vHello_VES.sh
@@ -0,0 +1,322 @@
+#!/bin/bash
+# Copyright 2016 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# What this is: Deployment test for the VES agent and collector based
+# upon the Tacker Hello World blueprint
+#
+# Status: this is a work in progress, under test.
+#
+# How to use:
+# $ git clone https://gerrit.opnfv.org/gerrit/ves
+# $ cd ves/tests
+# $ bash vHello_VES.sh [setup|start|run|stop|clean|collector]
+# setup: setup test environment
+# start: install blueprint and run test
+# run: setup test environment and run test
+# stop: stop test and uninstall blueprint
+# clean: cleanup after test
+# collector: attach to the collector VM and run the collector
+
+set -x
+
+trap 'fail' ERR
+
+pass() {
+ echo "$0: Hooray!"
+ set +x #echo off
+ exit 0
+}
+
+fail() {
+ echo "$0: Test Failed!"
+ set +x
+ exit 1
+}
+
+get_floating_net () {
+ network_ids=($(neutron net-list|grep -v "+"|grep -v name|awk '{print $2}'))
+ for id in ${network_ids[@]}; do
+ [[ $(neutron net-show ${id}|grep 'router:external'|grep -i "true") != "" ]] && FLOATING_NETWORK_ID=${id}
+ done
+ if [[ $FLOATING_NETWORK_ID ]]; then
+ FLOATING_NETWORK_NAME=$(openstack network show $FLOATING_NETWORK_ID | awk "/ name / { print \$4 }")
+ else
+ echo "$0: Floating network not found"
+ exit 1
+ fi
+}
+
+try () {
+ count=$1
+ $3
+ while [[ $? -eq 1 && $count -gt 0 ]]
+ do
+ sleep $2
+ let count=$count-1
+ $3
+ done
+ if [[ $count -eq 0 ]]; then echo "$0: Command \"$3\" was not successful after $1 tries"; fi
+}
+
+setup () {
+ echo "$0: Setup temp test folder /tmp/tacker and copy this script there"
+ mkdir -p /tmp/tacker
+ chmod 777 /tmp/tacker/
+ cp $0 /tmp/tacker/.
+ chmod 755 /tmp/tacker/*.sh
+
+ echo "$0: tacker-setup part 1"
+ wget https://git.opnfv.org/cgit/models/plain/tests/utils/tacker-setup.sh -O /tmp/tacker/tacker-setup.sh
+ bash /tmp/tacker/tacker-setup.sh tacker-cli init
+
+ echo "$0: tacker-setup part 2"
+ CONTAINER=$(sudo docker ps -l | awk "/tacker/ { print \$1 }")
+ dist=`grep DISTRIB_ID /etc/*-release | awk -F '=' '{print $2}'`
+ if [ "$dist" == "Ubuntu" ]; then
+ echo "$0: JOID workaround for Colorado - enable ML2 port security"
+ juju set neutron-api enable-ml2-port-security=true
+
+ echo "$0: Execute tacker-setup.sh in the container"
+ sudo docker exec -it $CONTAINER /bin/bash /tmp/tacker/tacker-setup.sh tacker-cli setup
+ else
+ echo "$0: Execute tacker-setup.sh in the container"
+ sudo docker exec -i -t $CONTAINER /bin/bash /tmp/tacker/tacker-setup.sh tacker-cli setup
+ fi
+
+ echo "$0: reset blueprints folder"
+ if [[ -d /tmp/tacker/blueprints/tosca-vnfd-hello-ves ]]; then rm -rf /tmp/tacker/blueprints/tosca-vnfd-hello-ves; fi
+ mkdir -p /tmp/tacker/blueprints/tosca-vnfd-hello-ves
+
+ echo "$0: copy tosca-vnfd-hello-ves to blueprints folder"
+ cp -r blueprints/tosca-vnfd-hello-ves /tmp/tacker/blueprints
+
+ # Following two steps are in testing still. The guestfish step needs work.
+
+ # echo "$0: Create Nova key pair"
+ # mkdir -p ~/.ssh
+ # nova keypair-delete vHello
+ # nova keypair-add vHello > /tmp/tacker/vHello.pem
+ # chmod 600 /tmp/tacker/vHello.pem
+ # pubkey=$(nova keypair-show vHello | grep "Public key:" | sed -- 's/Public key: //g')
+ # nova keypair-show vHello | grep "Public key:" | sed -- 's/Public key: //g' >/tmp/tacker/vHello.pub
+
+ echo "$0: Inject key into xenial server image"
+ # wget http://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ # sudo yum install -y libguestfs-tools
+ # guestfish <<EOF
+#add xenial-server-cloudimg-amd64-disk1.img
+#run
+#mount /dev/sda1 /
+#mkdir /home/ubuntu
+#mkdir /home/ubuntu/.ssh
+#cat <<EOM >/home/ubuntu/.ssh/authorized_keys
+#$pubkey
+#EOM
+#exit
+#chown -R ubuntu /home/ubuntu
+#EOF
+
+ # Using pre-key-injected image for now, vHello.pem as provided in the blueprint
+ if [ ! -f /tmp/xenial-server-cloudimg-amd64-disk1.img ]; then
+ wget -O /tmp/xenial-server-cloudimg-amd64-disk1.img http://artifacts.opnfv.org/models/images/xenial-server-cloudimg-amd64-disk1.img
+ fi
+ cp blueprints/tosca-vnfd-hello-ves/vHello.pem /tmp/tacker
+ chmod 600 /tmp/tacker/vHello.pem
+
+ echo "$0: setup OpenStack CLI environment"
+ source /tmp/tacker/admin-openrc.sh
+
+ echo "$0: Setup image_id"
+ image_id=$(openstack image list | awk "/ models-xenial-server / { print \$2 }")
+ if [[ -z "$image_id" ]]; then glance --os-image-api-version 1 image-create --name models-xenial-server --disk-format qcow2 --file /tmp/xenial-server-cloudimg-amd64-disk1.img --container-format bare; fi
+}
+
+start() {
+ echo "$0: setup OpenStack CLI environment"
+ source /tmp/tacker/admin-openrc.sh
+
+ echo "$0: create VNFD"
+ cd /tmp/tacker/blueprints/tosca-vnfd-hello-ves
+ tacker vnfd-create --vnfd-file blueprint.yaml --name hello-ves
+ if [ $? -eq 1 ]; then fail; fi
+
+ echo "$0: create VNF"
+ tacker vnf-create --vnfd-name hello-ves --name hello-ves
+ if [ $? -eq 1 ]; then fail; fi
+
+ echo "$0: wait for hello-ves to go ACTIVE"
+ active=""
+ while [[ -z $active ]]
+ do
+ active=$(tacker vnf-show hello-ves | grep ACTIVE)
+ if [ "$(tacker vnf-show hello-ves | grep -c ERROR)" == "1" ]; then
+ echo "$0: hello-ves VNF creation failed with state ERROR"
+ fail
+ fi
+ sleep 10
+ done
+
+ echo "$0: directly set port security on ports (bug/unsupported in Mitaka Tacker?)"
+ HEAT_ID=$(tacker vnf-show hello-ves | awk "/instance_id/ { print \$4 }")
+ VDU1_ID=$(openstack stack resource list $HEAT_ID | awk "/VDU1 / { print \$4 }")
+ id=($(neutron port-list|grep -v "+"|grep -v name|awk '{print $2}'))
+ for id in ${id[@]}; do
+ if [[ $(neutron port-show $id|grep $VDU1_ID) ]]; then neutron port-update ${id} --port-security-enabled=True; fi
+ done
+
+ VDU2_ID=$(openstack stack resource list $HEAT_ID | awk "/VDU2 / { print \$4 }")
+ id=($(neutron port-list|grep -v "+"|grep -v name|awk '{print $2}'))
+ for id in ${id[@]}; do
+ if [[ $(neutron port-show $id|grep $VDU2_ID) ]]; then neutron port-update ${id} --port-security-enabled=True; fi
+ done
+
+ echo "$0: directly assign security group (unsupported in Mitaka Tacker)"
+ if [[ $(openstack security group list | awk "/ vHello / { print \$2 }") ]]; then openstack security group delete vHello; fi
+ openstack security group create vHello
+ openstack security group rule create --ingress --protocol TCP --dst-port 22:22 vHello
+ openstack security group rule create --ingress --protocol TCP --dst-port 80:80 vHello
+ openstack server add security group $VDU1_ID vHello
+ openstack server add security group $VDU1_ID default
+ openstack server add security group $VDU2_ID vHello
+ openstack server add security group $VDU2_ID default
+
+ echo "$0: associate floating IPs"
+ get_floating_net
+ FIP=$(openstack floating ip create $FLOATING_NETWORK_NAME | awk "/floating_ip_address/ { print \$4 }")
+ nova floating-ip-associate $VDU1_ID $FIP
+ FIP=$(openstack floating ip create $FLOATING_NETWORK_NAME | awk "/floating_ip_address/ { print \$4 }")
+ nova floating-ip-associate $VDU2_ID $FIP
+
+ echo "$0: get web server addresses"
+ VDU1_IP=$(openstack server show $VDU1_ID | awk "/ addresses / { print \$6 }")
+ VDU1_URL="http://$VUD1_IP"
+ VDU2_IP=$(openstack server show $VDU2_ID | awk "/ addresses / { print \$6 }")
+ VDU2_URL="http://$VUD2_IP:30000"
+
+ echo "$0: wait 30 seconds for server SSH to be available"
+ sleep 30
+
+ echo "$0: Setup the VES Collector in VDU2"
+ chown root /tmp/tacker/vHello.pem
+ # Note below: python (2.7) is required due to dependency on module 'ConfigParser'
+ ssh -i /tmp/tacker/vHello.pem -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$VDU2_IP << EOF
+sudo apt-get update
+sudo apt-get upgrade -y
+sudo apt-get install -y python python-jsonschema
+sudo mkdir /var/log/att
+sudo chown ubuntu /var/log/att
+touch /var/log/att/collector.log
+sudo chown ubuntu /home/ubuntu/
+cd /home/ubuntu/
+git clone https://github.com/att/evel-test-collector.git
+sed -i -- 's/vel_username = /vel_username = hello/' evel-test-collector/config/collector.conf
+sed -i -- 's/vel_password = /vel_password = world/' evel-test-collector/config/collector.conf
+nohup python evel-test-collector/code/collector/collector.py \
+ --config evel-test-collector/config/collector.conf \
+ --section default \
+ --verbose > ~/collector.log &
+exit
+EOF
+
+ echo "$0: start vHello web server in VDU1"
+ ssh -i /tmp/tacker/vHello.pem -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$VDU1_IP "sudo chown ubuntu /home/ubuntu"
+ scp -i /tmp/tacker/vHello.pem -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no /tmp/tacker/blueprints/tosca-vnfd-hello-ves/start.sh ubuntu@$VDU1_IP:/home/ubuntu/start.sh
+ ssh -i /tmp/tacker/vHello.pem -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$VDU1_IP "bash /home/ubuntu/start.sh $VDU2_IP hello:world"
+
+ echo "$0: verify vHello server is running"
+ apt-get install -y curl
+ count=10
+ while [[ $count -gt 0 ]]
+ do
+ sleep 60
+ let count=$count-1
+ if [[ $(curl http://$VDU1_IP | grep -c "Hello World") == 1 ]]; then pass; fi
+ done
+ fail
+}
+
+collector () {
+ echo "$0: setup OpenStack CLI environment"
+ source /tmp/tacker/admin-openrc.sh
+
+ echo "$0: find Collector VM IP"
+ HEAT_ID=$(tacker vnf-show hello-ves | awk "/instance_id/ { print \$4 }")
+ VDU2_ID=$(openstack stack resource list $HEAT_ID | awk "/VDU2 / { print \$4 }")
+ VDU2_IP=$(openstack server show $VDU2_ID | awk "/ addresses / { print \$6 }")
+ VDU2_URL="http://$VUD2_IP:30000"
+
+ echo "$0: Start the VES Collector in VDU2"
+ ssh -i /tmp/tacker/vHello.pem -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$VDU2_IP << EOF
+cd /home/ubuntu/
+python evel-test-collector/code/collector/collector.py \
+ --config evel-test-collector/config/collector.conf \
+ --section default \
+ --verbose
+EOF
+}
+
+stop() {
+ echo "$0: setup OpenStack CLI environment"
+ source /tmp/tacker/admin-openrc.sh
+
+ echo "$0: uninstall vHello blueprint via CLI"
+ vid=($(tacker vnf-list|grep hello-ves|awk '{print $2}')); for id in ${vid[@]}; do tacker vnf-delete ${id}; done
+ vid=($(tacker vnfd-list|grep hello-ves|awk '{print $2}')); for id in ${vid[@]}; do tacker vnfd-delete ${id}; done
+# Need to remove the floatingip deletion or make it specific to the vHello VM
+# fip=($(neutron floatingip-list|grep -v "+"|grep -v id|awk '{print $2}')); for id in ${fip[@]}; do neutron floatingip-delete ${id}; done
+ sg=($(openstack security group list|grep vHello|awk '{print $2}'))
+ for id in ${sg[@]}; do try 5 5 "openstack security group delete ${id}"; done
+}
+
+forward_to_container () {
+ echo "$0: pass $1 command to this script in the tacker container"
+ CONTAINER=$(sudo docker ps -a | awk "/tacker/ { print \$1 }")
+ sudo docker exec $CONTAINER /bin/bash /tmp/tacker/vHello_VES.sh $1 $1
+ if [ $? -eq 1 ]; then fail; fi
+}
+
+dist=`grep DISTRIB_ID /etc/*-release | awk -F '=' '{print $2}'`
+case "$1" in
+ setup)
+ setup
+ pass
+ ;;
+ run)
+ setup
+ forward_to_container start
+ pass
+ ;;
+ start|stop|collector)
+ if [[ $# -eq 1 ]]; then forward_to_container $1
+ else
+ # running inside the tacker container, ready to go
+ $1
+ fi
+ pass
+ ;;
+ clean)
+ echo "$0: Uninstall Tacker and test environment"
+ bash /tmp/tacker/tacker-setup.sh $1 clean
+ pass
+ ;;
+ *)
+ echo "usage: bash vHello_VES.sh [setup|start|run|clean]"
+ echo "setup: setup test environment"
+ echo "start: install blueprint and run test"
+ echo "run: setup test environment and run test"
+ echo "stop: stop test and uninstall blueprint"
+ echo "clean: cleanup after test"
+ fail
+esac
diff --git a/tests/vLamp_Ansible_VES.sh b/tests/vLamp_Ansible_VES.sh
new file mode 100644
index 0000000..1ae6fdc
--- /dev/null
+++ b/tests/vLamp_Ansible_VES.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+# Copyright 2016 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# What this is: Enhancements to the OpenStack Interop Challenge "Lampstack"
+# blueprint to add OPNFV VES event capture.
+#
+# Status: this is a work in progress, under test.
+#
+# How to use:
+# $ bash vLamp_Ansible_VES.sh
+
+echo "$0: Add ssh key"
+eval $(ssh-agent -s)
+ssh-add /tmp/ansible/ansible
+
+echo "$0: setup OpenStack environment"
+source /tmp/ansible/admin-openrc.sh
+
+$BALANCER=$(openstack server show balancer | awk "/ addresses / { print \$6 }")
+sudo cp /tmp/ansible/ansible /tmp/ansible/lampstack
+sudo chown $USER /tmp/ansible/lampstack
+ssh -i /tmp/ansible/lampstack ubuntu@$BALANCER
+
+# scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ~/congress/env.sh $CTLUSER@$CONTROLLER_HOST1:/home/$CTLUSER/congress
+
+echo "$0: Enable haproxy logging"
+# Example /var/log/haproxy.log entries after logging enabled
+# Oct 6 20:03:34 balancer haproxy[2075]: 192.168.37.199:36193 [06/Oct/2016:20:03:34.349] webfarm webfarm/ws10.0.0.9 107/0/1/1/274 304 144 - - ---- 1/1/1/0/0 0/0 "GET /wp-content/themes/iribbon/elements/lib/images/boxes/slidericon.png HTTP/1.1"
+# Oct 6 20:03:34 balancer haproxy[2075]: 192.168.37.199:36194 [06/Oct/2016:20:03:34.365] webfarm webfarm/ws10.0.0.10 95/0/0/1/258 304 144 - - ---- 0/0/0/0/0 0/0 "GET /wp-content/themes/iribbon/elements/lib/images/boxes/blueprint.png HTTP/1.1"
+ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$BALANCER <<EOF
+sudo sed -i -- 's/#$ModLoad imudp/$ModLoad imudp/g' /etc/rsyslog.conf
+sudo sed -i -- 's/#$UDPServerRun 514/$UDPServerRun 514\n$UDPServerAddress 127.0.0.1/g' /etc/rsyslog.conf
+sudo service rsyslog restart
+EOF