summaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
authorAimee Ukasick <aimeeu.opensource@gmail.com>2017-05-17 20:20:24 -0500
committerAimee Ukasick <aimeeu.opensource@gmail.com>2017-05-17 20:20:24 -0500
commit0a7e112bb543e73dadb1b78a2ad9d1850c9c9d2e (patch)
treeef469259632ee051d9c83fdd723521caf848cfaf /tests
parent2cb6b9722e7d7e038b7d2846bd56186d94dcc4c1 (diff)
WIP
Script and blueprint for demo that uses ONAP VNFs. JIRA: VES-11 Change-Id: I1f5d2c2bb87a1e24f5cc2eb788bfb3f1efed9e46 Signed-off-by: Aimee Ukasick <aimeeu.opensource@gmail.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/onap-demo/blueprints/tosca-vnfd-onap-demo/blueprint.yaml292
-rw-r--r--tests/onap-demo/blueprints/tosca-vnfd-onap-demo/evel_demo.c1516
-rw-r--r--tests/onap-demo/blueprints/tosca-vnfd-onap-demo/favicon.icobin0 -> 1150 bytes
-rw-r--r--tests/onap-demo/blueprints/tosca-vnfd-onap-demo/logo.pngbin0 -> 4212 bytes
-rw-r--r--tests/onap-demo/blueprints/tosca-vnfd-onap-demo/monitor.py713
-rw-r--r--tests/onap-demo/blueprints/tosca-vnfd-onap-demo/start.sh224
-rw-r--r--tests/onap-demo/ves_onap_demo.sh599
7 files changed, 3344 insertions, 0 deletions
diff --git a/tests/onap-demo/blueprints/tosca-vnfd-onap-demo/blueprint.yaml b/tests/onap-demo/blueprints/tosca-vnfd-onap-demo/blueprint.yaml
new file mode 100644
index 0000000..6d9abf9
--- /dev/null
+++ b/tests/onap-demo/blueprints/tosca-vnfd-onap-demo/blueprint.yaml
@@ -0,0 +1,292 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+description: Hello VES ONAP
+
+metadata:
+ template_name: tosca-vnfd-onap-demo
+
+topology_template:
+ node_templates:
+ VDU1:
+ type: tosca.nodes.nfv.VDU.Tacker
+ properties:
+ image: models-xenial-server
+ flavor: onap.demo
+ availability_zone: nova
+ config_drive: true
+ user_data_format: RAW
+ user_data: |
+ #!/bin/bash
+ set -x
+ mkdir /home/ubuntu
+ chown -R ubuntu /home/ubuntu
+ mkdir /home/ubuntu/.ssh
+ cat << EOM >/home/ubuntu/.ssh/authorized_keys
+ <pubkey>
+ EOM
+ sudo mount /dev/sr0 /mnt/
+ mkdir /tmp/www
+ cd /tmp/www
+ mkdir html
+ cat >Dockerfile <<EOM
+ FROM nginx
+ COPY html /usr/share/nginx/html
+ EOM
+ host=$(hostname)
+ id=$(cut -d ',' -f 3 /mnt/openstack/latest/meta_data.json)
+ cat <<EOM >html/index.html
+ <!DOCTYPE html>
+ <html>
+ <head>
+ <title>Hello World!</title>
+ <meta name="viewport" content="width=device-width, minimum-scale=1.0, initial-scale=1"/>
+ <style>
+ body { width: 100%; background-color: white; color: black; padding: 0px; margin: 0px; font-family: sans-serif; font-size:100%; }
+ </style>
+ </head>
+ <body>
+ <large>Welcome to OPNFV @ $host!</large><br/>
+ <a href="http://wiki.opnfv.org"><img src="https://www.opnfv.org/sites/all/themes/opnfv/logo.png"></a>
+ <div>
+ <p>Instance ID fom config drive file /mnt/openstack/latest/meta_data.json></p>
+ <pre>
+ $id
+ </pre>
+ <p>Server setup completed at $(date)</p>
+ </div>
+ </body></html>
+ EOM
+ wget -O /tmp/www/html/favicon.ico https://git.opnfv.org/models/plain/tests/blueprints/tosca-vnfd-3node-tacker/favicon.ico
+ sudo apt-get install apt-transport-https ca-certificates curl software-properties-common
+ curl -fsSL https://apt.dockerproject.org/gpg | sudo apt-key add -
+ sudo apt-key update
+ echo "deb https://apt.dockerproject.org/repo ubuntu-xenial main" >~/dockerrepo
+ sudo tee -a /etc/apt/sources.list.d/docker.list ~/dockerrepo
+ sudo add-apt-repository "deb https://apt.dockerproject.org/repo/ ubuntu-xenial main"
+ sudo apt-get update
+ sudo apt-get install -y docker-engine
+ sudo docker pull nginx
+ sudo docker build -t vhello .
+ sudo docker run --name vHello -d -p 80:80 vhello
+ config: |
+ param0: key1
+ param1: key2
+
+ CP11:
+ type: tosca.nodes.nfv.CP.Tacker
+ properties:
+ management: true
+ anti_spoofing_protection: false
+ requirements:
+ - virtualLink:
+ node: VL1
+ - virtualBinding:
+ node: VDU1
+
+ CP12:
+ type: tosca.nodes.nfv.CP.Tacker
+ properties:
+ anti_spoofing_protection: false
+ requirements:
+ - virtualLink:
+ node: VL2
+ - virtualBinding:
+ node: VDU1
+
+ VDU2:
+ type: tosca.nodes.nfv.VDU.Tacker
+ properties:
+ image: models-xenial-server
+ flavor: onap.demo
+ availability_zone: nova
+ config_drive: true
+ user_data_format: RAW
+ user_data: |
+ #!/bin/bash
+ set -x
+ mkdir /home/ubuntu
+ chown -R ubuntu /home/ubuntu
+ mkdir /home/ubuntu/.ssh
+ cat << EOM >/home/ubuntu/.ssh/authorized_keys
+ <pubkey>
+ EOM
+ sudo mount /dev/sr0 /mnt/
+ mkdir /tmp/www
+ cd /tmp/www
+ mkdir html
+ cat >Dockerfile <<EOM
+ FROM nginx
+ COPY html /usr/share/nginx/html
+ EOM
+ host=$(hostname)
+ id=$(cut -d ',' -f 3 /mnt/openstack/latest/meta_data.json)
+ cat <<EOM >html/index.html
+ <!DOCTYPE html>
+ <html>
+ <head>
+ <title>Hello World!</title>
+ <meta name="viewport" content="width=device-width, minimum-scale=1.0, initial-scale=1"/>
+ <style>
+ body { width: 100%; background-color: white; color: black; padding: 0px; margin: 0px; font-family: sans-serif; font-size:100%; }
+ </style>
+ </head>
+ <body>
+ <large>Welcome to OPNFV @ $host!</large><br/>
+ <a href="http://wiki.opnfv.org"><img src="https://www.opnfv.org/sites/all/themes/opnfv/logo.png"></a>
+ <div>
+ <p>Instance ID fom config drive file /mnt/openstack/latest/meta_data.json></p>
+ <pre>
+ $id
+ </pre>
+ <p>Server setup completed at $(date)</p>
+ </div>
+ </body></html>
+ EOM
+ wget -O /tmp/www/html/favicon.ico https://git.opnfv.org/models/plain/tests/blueprints/tosca-vnfd-3node-tacker/favicon.ico
+ sudo apt-get install apt-transport-https ca-certificates curl software-properties-common
+ curl -fsSL https://apt.dockerproject.org/gpg | sudo apt-key add -
+ sudo apt-key update
+ echo "deb https://apt.dockerproject.org/repo ubuntu-xenial main" >~/dockerrepo
+ sudo tee -a /etc/apt/sources.list.d/docker.list ~/dockerrepo
+ sudo add-apt-repository "deb https://apt.dockerproject.org/repo/ ubuntu-xenial main"
+ sudo apt-get update
+ sudo apt-get install -y docker-engine
+ sudo docker pull nginx
+ sudo docker build -t vhello .
+ sudo docker run --name vHello -d -p 80:80 vhello
+ config: |
+ param0: key1
+ param1: key2
+
+ CP21:
+ type: tosca.nodes.nfv.CP.Tacker
+ properties:
+ management: true
+ anti_spoofing_protection: false
+ requirements:
+ - virtualLink:
+ node: VL1
+ - virtualBinding:
+ node: VDU2
+
+ CP22:
+ type: tosca.nodes.nfv.CP.Tacker
+ properties:
+ anti_spoofing_protection: false
+ requirements:
+ - virtualLink:
+ node: VL2
+ - virtualBinding:
+ node: VDU2
+
+ VDU3:
+ type: tosca.nodes.nfv.VDU.Tacker
+ properties:
+ image: models-xenial-server
+ flavor: onap.demo
+ availability_zone: nova
+ config_drive: true
+ user_data_format: RAW
+ user_data: |
+ #!/bin/bash
+ set -x
+ mkdir /home/ubuntu
+ chown -R ubuntu /home/ubuntu
+ mkdir /home/ubuntu/.ssh
+ cat << EOM >/home/ubuntu/.ssh/authorized_keys
+ <pubkey>
+ EOM
+ sudo mount /dev/sr0 /mnt/
+ cat << EOF >/tmp/setup.sh
+ echo "1" | sudo tee /proc/sys/net/ipv4/ip_forward
+ sudo sysctl net.ipv4.ip_forward=1
+ sudo iptables -t nat -A PREROUTING -p tcp --dport 80 -m state \\
+ --state NEW -m statistic --mode nth --every 2 --packet 0 \\
+ -j DNAT --to-destination <vdu1_ip>:80
+ sudo iptables -t nat -A PREROUTING -p tcp --dport 80 -m state \\
+ --state NEW -m statistic --mode nth --every 2 --packet 0 \\
+ -j DNAT --to-destination <vdu2_ip>:80
+ sudo iptables -t nat -A POSTROUTING -j MASQUERADE
+ EOF
+ bash /tmp/setup.sh
+ config: |
+ param0: key1
+ param1: key2
+
+ config: |
+ param0: key1
+ param1: key2
+
+ CP31:
+ type: tosca.nodes.nfv.CP.Tacker
+ properties:
+ management: true
+ anti_spoofing_protection: false
+ requirements:
+ - virtualLink:
+ node: VL1
+ - virtualBinding:
+ node: VDU3
+
+ CP32:
+ type: tosca.nodes.nfv.CP.Tacker
+ properties:
+ anti_spoofing_protection: false
+ requirements:
+ - virtualLink:
+ node: VL2
+ - virtualBinding:
+ node: VDU3
+
+ VDU4:
+ type: tosca.nodes.nfv.VDU.Tacker
+ properties:
+ image: models-xenial-server
+ flavor: onap.demo
+ availability_zone: nova
+ user_data_format: RAW
+ user_data: |
+ #!/bin/bash
+ set -x
+ mkdir /home/ubuntu
+ chown -R ubuntu /home/ubuntu
+ mkdir /home/ubuntu/.ssh
+ cat << EOM >/home/ubuntu/.ssh/authorized_keys
+ <pubkey>
+ EOM
+ config: |
+ param0: key1
+ param1: key2
+
+ CP41:
+ type: tosca.nodes.nfv.CP.Tacker
+ properties:
+ management: true
+ anti_spoofing_protection: false
+ requirements:
+ - virtualLink:
+ node: VL1
+ - virtualBinding:
+ node: VDU4
+
+ CP42:
+ type: tosca.nodes.nfv.CP.Tacker
+ properties:
+ anti_spoofing_protection: false
+ requirements:
+ - virtualLink:
+ node: VL2
+ - virtualBinding:
+ node: VDU4
+
+ VL1:
+ type: tosca.nodes.nfv.VL
+ properties:
+ network_name: vnf_mgmt
+ vendor: Tacker
+
+ VL2:
+ type: tosca.nodes.nfv.VL
+ properties:
+ network_name: vnf_private
+ vendor: Tacker
diff --git a/tests/onap-demo/blueprints/tosca-vnfd-onap-demo/evel_demo.c b/tests/onap-demo/blueprints/tosca-vnfd-onap-demo/evel_demo.c
new file mode 100644
index 0000000..fc244d8
--- /dev/null
+++ b/tests/onap-demo/blueprints/tosca-vnfd-onap-demo/evel_demo.c
@@ -0,0 +1,1516 @@
+/**************************************************************************//**
+ * @file
+ * Agent for the OPNFV VNF Event Stream (VES) vHello_VES test
+ *
+ * Copyright 2016-2017 AT&T Intellectual Property, Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *****************************************************************************/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <getopt.h>
+#include <sys/signal.h>
+#include <pthread.h>
+#include <mcheck.h>
+#include <sys/time.h>
+
+#include "evel.h"
+#include "evel_demo.h"
+#include "evel_test_control.h"
+
+/**************************************************************************//**
+ * Definition of long options to the program.
+ *
+ * See the documentation for getopt_long() for details of the structure's use.
+ *****************************************************************************/
+static const struct option long_options[] = {
+ {"help", no_argument, 0, 'h'},
+ {"id", required_argument, 0, 'i'},
+ {"fqdn", required_argument, 0, 'f'},
+ {"port", required_argument, 0, 'n'},
+ {"path", required_argument, 0, 'p'},
+ {"topic", required_argument, 0, 't'},
+ {"https", no_argument, 0, 's'},
+ {"verbose", no_argument, 0, 'v'},
+ {"cycles", required_argument, 0, 'c'},
+ {"username", required_argument, 0, 'u'},
+ {"password", required_argument, 0, 'w'},
+ {"nothrott", no_argument, 0, 'x'},
+ {0, 0, 0, 0}
+ };
+
+/**************************************************************************//**
+ * Definition of short options to the program.
+ *****************************************************************************/
+static const char* short_options = "hi:f:n:p:t:sc:u:w:vx";
+
+/**************************************************************************//**
+ * Basic user help text describing the usage of the application.
+ *****************************************************************************/
+static const char* usage_text =
+"evel_demo [--help]\n"
+" --id <Agent host ID>\n"
+" --fqdn <domain>\n"
+" --port <port_number>\n"
+" [--path <path>]\n"
+" [--topic <topic>]\n"
+" [--username <username>]\n"
+" [--password <password>]\n"
+" [--https]\n"
+" [--cycles <cycles>]\n"
+" [--nothrott]\n"
+"\n"
+"Demonstrate use of the ECOMP Vendor Event Listener API.\n"
+"\n"
+" -h Display this usage message.\n"
+" --help\n"
+"\n"
+" -i The ID of the agent host.\n"
+" --id\n"
+"\n"
+" -f The FQDN or IP address to the RESTful API.\n"
+" --fqdn\n"
+"\n"
+" -n The port number the RESTful API.\n"
+" --port\n"
+"\n"
+" -p The optional path prefix to the RESTful API.\n"
+" --path\n"
+"\n"
+" -t The optional topic part of the RESTful API.\n"
+" --topic\n"
+"\n"
+" -u The optional username for basic authentication of requests.\n"
+" --username\n"
+"\n"
+" -w The optional password for basic authentication of requests.\n"
+" --password\n"
+"\n"
+" -s Use HTTPS rather than HTTP for the transport.\n"
+" --https\n"
+"\n"
+" -c Loop <cycles> times round the main loop. Default = 1.\n"
+" --cycles\n"
+"\n"
+" -v Generate much chattier logs.\n"
+" --verbose\n"
+"\n"
+" -x Exclude throttling commands from demonstration.\n"
+" --nothrott\n";
+
+#define DEFAULT_SLEEP_SECONDS 3
+#define MINIMUM_SLEEP_SECONDS 1
+
+unsigned long long epoch_start = 0;
+
+typedef enum {
+ SERVICE_CODEC,
+ SERVICE_TRANSCODING,
+ SERVICE_RTCP,
+ SERVICE_EOC_VQM,
+ SERVICE_MARKER
+} SERVICE_EVENT;
+
+/*****************************************************************************/
+/* Local prototypes. */
+/*****************************************************************************/
+static void demo_heartbeat(void);
+static void demo_fault(void);
+static void demo_measurement(const int interval);
+static void demo_mobile_flow(void);
+static void demo_service(void);
+static void demo_service_event(const SERVICE_EVENT service_event);
+static void demo_signaling(void);
+static void demo_state_change(void);
+static void demo_syslog(void);
+static void demo_other(void);
+
+/**************************************************************************//**
+ * Global flags related the applicaton.
+ *****************************************************************************/
+
+char *app_prevstate = "Stopped";
+
+/**************************************************************************//**
+ * Report app state change fault.
+ *
+ * Reports the change in app state.
+ *
+ * param[in] char *change The type of change ("Started", "Stopped")
+ *****************************************************************************/
+void report_app_statechange(char *change)
+{
+ printf("report_app_statechange(%s)\n", change);
+ EVENT_FAULT * fault = NULL;
+ EVEL_ERR_CODES evel_rc = EVEL_SUCCESS;
+
+ fault = evel_new_fault("App state change",
+ change,
+ EVEL_PRIORITY_HIGH,
+ EVEL_SEVERITY_MAJOR);
+
+ if (fault != NULL) {
+ evel_fault_type_set(fault, "App state change");
+ evel_fault_addl_info_add(fault, "change", change);
+ evel_rc = evel_post_event((EVENT_HEADER *)fault);
+ if (evel_rc != EVEL_SUCCESS) {
+ EVEL_ERROR("Post failed %d (%s)", evel_rc, evel_error_string());
+ }
+ else {
+ EVEL_ERROR("Unable to send new fault report");
+ }
+ }
+}
+
+/**************************************************************************//**
+ * Check status of the app container.
+ *
+ * Checks and reports any change in app state.
+ *
+ * param[in] none
+ *****************************************************************************/
+void check_app_container_state() {
+ printf("Checking status of app container\n");
+ FILE *fp;
+ int status;
+ char state[100];
+
+ fp = popen("sudo docker inspect vHello | grep Status | sed -- 's/,//g' | sed -- 's/\"//g' | sed -- 's/ Status: //g'", "r");
+ if (fp == NULL) {
+ EVEL_ERROR("popen failed to execute command");
+ }
+
+ fgets(state, 100, fp);
+ if (strstr(state, "running") != NULL) {
+ if (strcmp(app_prevstate,"Stopped") == 0) {
+ printf("App state change detected: Started\n");
+ report_app_statechange("Started");
+ app_prevstate = "Running";
+ }
+ }
+ else {
+ if (strcmp(app_prevstate, "Running") == 0) {
+ printf("App state change detected: Stopped\n");
+ report_app_statechange("Stopped");
+ app_prevstate = "Stopped";
+ }
+ }
+ status = pclose(fp);
+ if (status == -1) {
+ EVEL_ERROR("pclose returned an error");
+ }
+}
+
+/**************************************************************************//**
+ * Measure app traffic
+ *
+ * Reports transactions per second in the last second.
+ *
+ * param[in] none
+ *****************************************************************************/
+
+double cpu() {
+ double a[4], b[4], loadavg;
+ FILE *fp;
+ int status;
+
+ fp = fopen("/proc/stat","r");
+ fscanf(fp,"%*s %lf %lf %lf %lf",&a[0],&a[1],&a[2],&a[3]);
+ fclose(fp);
+ sleep(1);
+
+ fp = fopen("/proc/stat","r");
+ fscanf(fp,"%*s %lf %lf %lf %lf",&b[0],&b[1],&b[2],&b[3]);
+ fclose(fp);
+
+ loadavg = ((b[0]+b[1]+b[2]) - (a[0]+a[1]+a[2])) / ((b[0]+b[1]+b[2]+b[3]) - (a[0]+a[1]+a[2]+a[3]));
+
+ return(loadavg);
+}
+
+void measure_traffic() {
+
+ printf("Checking app traffic\n");
+ EVENT_FAULT * fault = NULL;
+ EVEL_ERR_CODES evel_rc = EVEL_SUCCESS;
+ EVENT_MEASUREMENT * measurement = NULL;
+ FILE *fp;
+ int status;
+ char count[10];
+ time_t rawtime;
+ struct tm * timeinfo;
+ char period [21];
+ char cmd [100];
+ int concurrent_sessions = 0;
+ int configured_entities = 0;
+ double mean_request_latency = 0;
+ double measurement_interval = 1;
+ double memory_configured = 0;
+ double memory_used = 0;
+ int request_rate;
+ char secs [3];
+ int sec;
+ double loadavg;
+
+ time (&rawtime);
+ timeinfo = localtime (&rawtime);
+ strftime(period,21,"%d/%b/%Y:%H:%M:",timeinfo);
+ strftime(secs,3,"%S",timeinfo);
+ sec = atoi(secs);
+ if (sec == 0) sec = 59;
+ sprintf(secs, "%02d", sec);
+ strncat(period, secs, 21);
+ // ....x....1....x....2.
+ // 15/Oct/2016:17:51:19
+ strcpy(cmd, "sudo docker logs vHello | grep -c ");
+ strncat(cmd, period, 100);
+
+ fp = popen(cmd, "r");
+ if (fp == NULL) {
+ EVEL_ERROR("popen failed to execute command");
+ }
+
+ if (fgets(count, 10, fp) != NULL) {
+ request_rate = atoi(count);
+ printf("Reporting request rate for second: %s as %d\n", period, request_rate);
+ measurement = evel_new_measurement(measurement_interval);
+
+ if (measurement != NULL) {
+ cpu();
+ evel_measurement_type_set(measurement, "HTTP request rate");
+ evel_measurement_request_rate_set(measurement, request_rate);
+// evel_measurement_agg_cpu_use_set(measurement, loadavg);
+// evel_measurement_cpu_use_add(measurement, "cpu0", loadavg);
+
+ evel_rc = evel_post_event((EVENT_HEADER *)measurement);
+ if (evel_rc != EVEL_SUCCESS) {
+ EVEL_ERROR("Post Measurement failed %d (%s)",
+ evel_rc,
+ evel_error_string());
+ }
+ }
+ else {
+ EVEL_ERROR("New Measurement failed");
+ }
+ printf("Processed measurement\n");
+ }
+ status = pclose(fp);
+ if (status == -1) {
+ EVEL_ERROR("pclose returned an error");
+ }
+}
+
+/**************************************************************************//**
+ * Global flag to initiate shutdown.
+ *****************************************************************************/
+static int glob_exit_now = 0;
+
+static char * api_fqdn = NULL;
+static int api_port = 0;
+static int api_secure = 0;
+
+static void show_usage(FILE* fp)
+{
+ fputs(usage_text, fp);
+}
+
+/**************************************************************************//**
+ * Main function.
+ *
+ * Parses the command-line then ...
+ *
+ * @param[in] argc Argument count.
+ * @param[in] argv Argument vector - for usage see usage_text.
+ *****************************************************************************/
+int main(int argc, char ** argv)
+{
+ sigset_t sig_set;
+ pthread_t thread_id;
+ int option_index = 0;
+ int param = 0;
+ char * api_vmid = NULL;
+ char * api_path = NULL;
+ char * api_topic = NULL;
+ char * api_username = "";
+ char * api_password = "";
+ int verbose_mode = 0;
+ int exclude_throttling = 0;
+ int cycles = 2147483647;
+ int cycle;
+ int measurement_interval = EVEL_MEASUREMENT_INTERVAL_UKNOWN;
+ EVENT_HEADER * heartbeat = NULL;
+ EVEL_ERR_CODES evel_rc = EVEL_SUCCESS;
+
+ /***************************************************************************/
+ /* We're very interested in memory management problems so check behavior. */
+ /***************************************************************************/
+ mcheck(NULL);
+
+ if (argc < 2)
+ {
+ show_usage(stderr);
+ exit(-1);
+ }
+ param = getopt_long(argc, argv,
+ short_options,
+ long_options,
+ &option_index);
+ while (param != -1)
+ {
+ switch (param)
+ {
+ case 'h':
+ show_usage(stdout);
+ exit(0);
+ break;
+
+ case 'i':
+ api_vmid = optarg;
+ break;
+
+ case 'f':
+ api_fqdn = optarg;
+ break;
+
+ case 'n':
+ api_port = atoi(optarg);
+ break;
+
+ case 'p':
+ api_path = optarg;
+ break;
+
+ case 't':
+ api_topic = optarg;
+ break;
+
+ case 'u':
+ api_username = optarg;
+ break;
+
+ case 'w':
+ api_password = optarg;
+ break;
+
+ case 's':
+ api_secure = 1;
+ break;
+
+ case 'c':
+ cycles = atoi(optarg);
+ break;
+
+ case 'v':
+ verbose_mode = 1;
+ break;
+
+ case 'x':
+ exclude_throttling = 1;
+ break;
+
+ case '?':
+ /*********************************************************************/
+ /* Unrecognized parameter - getopt_long already printed an error */
+ /* message. */
+ /*********************************************************************/
+ break;
+
+ default:
+ fprintf(stderr, "Code error: recognized but missing option (%d)!\n",
+ param);
+ exit(-1);
+ }
+
+ /*************************************************************************/
+ /* Extract next parameter. */
+ /*************************************************************************/
+ param = getopt_long(argc, argv,
+ short_options,
+ long_options,
+ &option_index);
+ }
+
+ /***************************************************************************/
+ /* All the command-line has parsed cleanly, so now check that the options */
+ /* are meaningful. */
+ /***************************************************************************/
+ if (api_fqdn == NULL)
+ {
+ fprintf(stderr, "FQDN of the Vendor Event Listener API server must be "
+ "specified.\n");
+ exit(1);
+ }
+ if (api_port <= 0 || api_port > 65535)
+ {
+ fprintf(stderr, "Port for the Vendor Event Listener API server must be "
+ "specified between 1 and 65535.\n");
+ exit(1);
+ }
+ if (cycles <= 0)
+ {
+ fprintf(stderr, "Number of cycles around the main loop must be an"
+ "integer greater than zero.\n");
+ exit(1);
+ }
+
+ /***************************************************************************/
+ /* Set up default signal behaviour. Block all signals we trap explicitly */
+ /* on the signal_watcher thread. */
+ /***************************************************************************/
+ sigemptyset(&sig_set);
+ sigaddset(&sig_set, SIGALRM);
+ sigaddset(&sig_set, SIGINT);
+ pthread_sigmask(SIG_BLOCK, &sig_set, NULL);
+
+ /***************************************************************************/
+ /* Start the signal watcher thread. */
+ /***************************************************************************/
+ if (pthread_create(&thread_id, NULL, signal_watcher, &sig_set) != 0)
+ {
+ fprintf(stderr, "Failed to start signal watcher thread.");
+ exit(1);
+ }
+ pthread_detach(thread_id);
+
+ /***************************************************************************/
+ /* Version info */
+ /***************************************************************************/
+ printf("%s built %s %s\n", argv[0], __DATE__, __TIME__);
+
+ /***************************************************************************/
+ /* Initialize the EVEL interface. */
+ /***************************************************************************/
+ if (evel_initialize(api_fqdn,
+ api_port,
+ api_path,
+ api_topic,
+ api_secure,
+ api_username,
+ api_password,
+ EVEL_SOURCE_VIRTUAL_MACHINE,
+ "EVEL demo client",
+ verbose_mode))
+ {
+ fprintf(stderr, "Failed to initialize the EVEL library!!!");
+ exit(-1);
+ }
+ else
+ {
+ EVEL_INFO("Initialization completed");
+ }
+
+ /***************************************************************************/
+ /* Work out a start time for measurements, and sleep for initial period. */
+ /***************************************************************************/
+ struct timeval tv_start;
+ gettimeofday(&tv_start, NULL);
+ epoch_start = tv_start.tv_usec + 1000000 * tv_start.tv_sec;
+ sleep(DEFAULT_SLEEP_SECONDS);
+
+ /***************************************************************************/
+ /* MAIN LOOP */
+ /***************************************************************************/
+ printf("Starting %d loops...\n", cycles);
+ cycle = 0;
+ while (cycle++ < cycles)
+ {
+ EVEL_INFO("MAI: Starting main loop");
+ printf("\nStarting main loop %d\n", cycle);
+
+ /*************************************************************************/
+ /* A 20s-long repeating cycle of behaviour. */
+ /*************************************************************************/
+ if (exclude_throttling == 0)
+ {
+ switch (cycle % 20)
+ {
+ case 1:
+ printf(" 1 - Resetting throttle specification for all domains\n");
+ evel_test_control_scenario(TC_RESET_ALL_DOMAINS,
+ api_secure,
+ api_fqdn,
+ api_port);
+ break;
+
+ case 2:
+ printf(" 2 - Switching measurement interval to 2s\n");
+ evel_test_control_meas_interval(2,
+ api_secure,
+ api_fqdn,
+ api_port);
+ break;
+
+ case 3:
+ printf(" 3 - Suppressing fault domain\n");
+ evel_test_control_scenario(TC_FAULT_SUPPRESS_FIELDS_AND_PAIRS,
+ api_secure,
+ api_fqdn,
+ api_port);
+ break;
+
+ case 4:
+ printf(" 4 - Suppressing measurement domain\n");
+ evel_test_control_scenario(TC_MEAS_SUPPRESS_FIELDS_AND_PAIRS,
+ api_secure,
+ api_fqdn,
+ api_port);
+ break;
+
+ case 5:
+ printf(" 5 - Switching measurement interval to 5s\n");
+ evel_test_control_meas_interval(5,
+ api_secure,
+ api_fqdn,
+ api_port);
+ break;
+
+ case 6:
+ printf(" 6 - Suppressing mobile flow domain\n");
+ evel_test_control_scenario(TC_MOBILE_SUPPRESS_FIELDS_AND_PAIRS,
+ api_secure,
+ api_fqdn,
+ api_port);
+ break;
+
+ case 7:
+ printf(" 7 - Suppressing state change domain\n");
+ evel_test_control_scenario(TC_STATE_SUPPRESS_FIELDS_AND_PAIRS,
+ api_secure,
+ api_fqdn,
+ api_port);
+ break;
+
+ case 8:
+ printf(" 8 - Suppressing signaling domain\n");
+ evel_test_control_scenario(TC_SIGNALING_SUPPRESS_FIELDS,
+ api_secure,
+ api_fqdn,
+ api_port);
+ break;
+
+ case 9:
+ printf(" 9 - Suppressing service event domain\n");
+ evel_test_control_scenario(TC_SERVICE_SUPPRESS_FIELDS_AND_PAIRS,
+ api_secure,
+ api_fqdn,
+ api_port);
+ break;
+
+ case 10:
+ printf(" 10 - Switching measurement interval to 20s\n");
+ evel_test_control_meas_interval(20,
+ api_secure,
+ api_fqdn,
+ api_port);
+ break;
+
+ case 11:
+ printf(" 11 - Suppressing syslog domain\n");
+ evel_test_control_scenario(TC_SYSLOG_SUPPRESS_FIELDS_AND_PAIRS,
+ api_secure,
+ api_fqdn,
+ api_port);
+ break;
+
+ case 12:
+ printf(" 12 - Switching measurement interval to 10s\n");
+ evel_test_control_meas_interval(10,
+ api_secure,
+ api_fqdn,
+ api_port);
+ break;
+
+ case 15:
+ printf(" Requesting provide throttling spec\n");
+ evel_test_control_scenario(TC_PROVIDE_THROTTLING_SPEC,
+ api_secure,
+ api_fqdn,
+ api_port);
+ break;
+ }
+ }
+ fflush(stdout);
+
+ /*************************************************************************/
+ /* Send a bunch of events. */
+ /*************************************************************************/
+
+ printf("Sending heartbeat\n");
+ heartbeat = evel_new_heartbeat();
+ if (heartbeat != NULL)
+ {
+ evel_rc = evel_post_event(heartbeat);
+ if (evel_rc != EVEL_SUCCESS)
+ {
+ EVEL_ERROR("Post failed %d (%s)", evel_rc, evel_error_string());
+ }
+ }
+ else
+ {
+ EVEL_ERROR("New heartbeat failed");
+ }
+
+ check_app_container_state();
+ measure_traffic();
+
+// demo_heartbeat();
+// demo_fault();
+// demo_measurement((measurement_interval ==
+// EVEL_MEASUREMENT_INTERVAL_UKNOWN) ?
+// DEFAULT_SLEEP_SECONDS : measurement_interval);
+// demo_mobile_flow();
+// demo_service();
+// demo_signaling();
+// demo_state_change();
+// demo_syslog();
+// demo_other();
+
+ /*************************************************************************/
+ /* MAIN RETRY LOOP. Check and implement the measurement interval. */
+ /*************************************************************************/
+ if (cycle <= cycles)
+ {
+ int sleep_time;
+
+ /***********************************************************************/
+ /* We have a minimum loop time. */
+ /***********************************************************************/
+ sleep(MINIMUM_SLEEP_SECONDS);
+
+ /***********************************************************************/
+ /* Get the latest measurement interval and sleep for the remainder. */
+ /***********************************************************************/
+ measurement_interval = evel_get_measurement_interval();
+ printf("Measurement Interval = %d\n", measurement_interval);
+
+ if (measurement_interval == EVEL_MEASUREMENT_INTERVAL_UKNOWN)
+ {
+ sleep_time = DEFAULT_SLEEP_SECONDS - MINIMUM_SLEEP_SECONDS;
+ }
+ else
+ {
+ sleep_time = measurement_interval - MINIMUM_SLEEP_SECONDS;
+ }
+ sleep(sleep_time);
+ }
+ }
+
+ /***************************************************************************/
+ /* We are exiting, but allow the final set of events to be dispatched */
+ /* properly first. */
+ /***************************************************************************/
+ sleep(2);
+ printf("All done - exiting!\n");
+ return 0;
+}
+
+/**************************************************************************//**
+ * Signal watcher.
+ *
+ * Signal catcher for incoming signal processing. Work out which signal has
+ * been received and process it accordingly.
+ *
+ * param[in] void_sig_set The signal mask to listen for.
+ *****************************************************************************/
+void *signal_watcher(void *void_sig_set)
+{
+ sigset_t *sig_set = (sigset_t *)void_sig_set;
+ int sig = 0;
+ int old_type = 0;
+ siginfo_t sig_info;
+
+ /***************************************************************************/
+ /* Set this thread to be cancellable immediately. */
+ /***************************************************************************/
+ pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
+
+ while (!glob_exit_now)
+ {
+ /*************************************************************************/
+ /* Wait for a signal to be received. */
+ /*************************************************************************/
+ sig = sigwaitinfo(sig_set, &sig_info);
+ switch (sig)
+ {
+ case SIGALRM:
+ /*********************************************************************/
+ /* Failed to do something in the given amount of time. Exit. */
+ /*********************************************************************/
+ EVEL_ERROR( "Timeout alarm");
+ fprintf(stderr,"Timeout alarm - quitting!\n");
+ exit(2);
+ break;
+
+ case SIGINT:
+ EVEL_INFO( "Interrupted - quitting");
+ printf("\n\nInterrupted - quitting!\n");
+ glob_exit_now = 1;
+ break;
+ }
+ }
+
+ evel_terminate();
+ exit(0);
+ return(NULL);
+}
+
+/**************************************************************************//**
+ * Create and send a heartbeat event.
+ *****************************************************************************/
+void demo_heartbeat(void)
+{
+ EVENT_HEADER * heartbeat = NULL;
+ EVEL_ERR_CODES evel_rc = EVEL_SUCCESS;
+
+ /***************************************************************************/
+ /* Heartbeat */
+ /***************************************************************************/
+ heartbeat = evel_new_heartbeat();
+ if (heartbeat != NULL)
+ {
+ evel_rc = evel_post_event(heartbeat);
+ if (evel_rc != EVEL_SUCCESS)
+ {
+ EVEL_ERROR("Post failed %d (%s)", evel_rc, evel_error_string());
+ }
+ }
+ else
+ {
+ EVEL_ERROR("New Heartbeat failed");
+ }
+ printf(" Processed Heartbeat\n");
+}
+
+/**************************************************************************//**
+ * Create and send three fault events.
+ *****************************************************************************/
+void demo_fault(void)
+{
+ EVENT_FAULT * fault = NULL;
+ EVEL_ERR_CODES evel_rc = EVEL_SUCCESS;
+
+ /***************************************************************************/
+ /* Fault */
+ /***************************************************************************/
+ fault = evel_new_fault("An alarm condition",
+ "Things are broken",
+ EVEL_PRIORITY_NORMAL,
+ EVEL_SEVERITY_MAJOR);
+ if (fault != NULL)
+ {
+ evel_rc = evel_post_event((EVENT_HEADER *)fault);
+ if (evel_rc != EVEL_SUCCESS)
+ {
+ EVEL_ERROR("Post failed %d (%s)", evel_rc, evel_error_string());
+ }
+ }
+ else
+ {
+ EVEL_ERROR("New Fault failed");
+ }
+ printf(" Processed empty Fault\n");
+
+ fault = evel_new_fault("Another alarm condition",
+ "It broke badly",
+ EVEL_PRIORITY_NORMAL,
+ EVEL_SEVERITY_MAJOR);
+ if (fault != NULL)
+ {
+ evel_fault_type_set(fault, "Bad things happening");
+ evel_fault_interface_set(fault, "An Interface Card");
+ evel_rc = evel_post_event((EVENT_HEADER *)fault);
+ if (evel_rc != EVEL_SUCCESS)
+ {
+ EVEL_ERROR("Post failed %d (%s)", evel_rc, evel_error_string());
+ }
+ }
+ else
+ {
+ EVEL_ERROR("New Fault failed");
+ }
+ printf(" Processed partial Fault\n");
+
+ fault = evel_new_fault("My alarm condition",
+ "It broke very badly",
+ EVEL_PRIORITY_NORMAL,
+ EVEL_SEVERITY_MAJOR);
+ if (fault != NULL)
+ {
+ evel_fault_type_set(fault, "Bad things happen...");
+ evel_fault_interface_set(fault, "My Interface Card");
+ evel_fault_addl_info_add(fault, "name1", "value1");
+ evel_fault_addl_info_add(fault, "name2", "value2");
+ evel_rc = evel_post_event((EVENT_HEADER *)fault);
+ if (evel_rc != EVEL_SUCCESS)
+ {
+ EVEL_ERROR("Post failed %d (%s)", evel_rc, evel_error_string());
+ }
+ }
+ else
+ {
+ EVEL_ERROR("New Fault failed");
+ }
+ printf(" Processed full Fault\n");
+}
+
+/**************************************************************************//**
+ * Create and send a measurement event.
+ *****************************************************************************/
+void demo_measurement(const int interval)
+{
+ EVENT_MEASUREMENT * measurement = NULL;
+ MEASUREMENT_LATENCY_BUCKET * bucket = NULL;
+ MEASUREMENT_VNIC_USE * vnic_use = NULL;
+ EVEL_ERR_CODES evel_rc = EVEL_SUCCESS;
+
+ /***************************************************************************/
+ /* Measurement */
+ /***************************************************************************/
+ measurement = evel_new_measurement(interval);
+ if (measurement != NULL)
+ {
+ evel_measurement_type_set(measurement, "Perf management...");
+ evel_measurement_conc_sess_set(measurement, 1);
+ evel_measurement_cfg_ents_set(measurement, 2);
+ evel_measurement_mean_req_lat_set(measurement, 4.4);
+ evel_measurement_mem_cfg_set(measurement, 6.6);
+ evel_measurement_mem_used_set(measurement, 3.3);
+ evel_measurement_request_rate_set(measurement, 6);
+ evel_measurement_agg_cpu_use_set(measurement, 8.8);
+ evel_measurement_cpu_use_add(measurement, "cpu1", 11.11);
+ evel_measurement_cpu_use_add(measurement, "cpu2", 22.22);
+ evel_measurement_fsys_use_add(measurement,"00-11-22",100.11, 100.22, 33,
+ 200.11, 200.22, 44);
+ evel_measurement_fsys_use_add(measurement,"33-44-55",300.11, 300.22, 55,
+ 400.11, 400.22, 66);
+
+ bucket = evel_new_meas_latency_bucket(20);
+ evel_meas_latency_bucket_low_end_set(bucket, 0.0);
+ evel_meas_latency_bucket_high_end_set(bucket, 10.0);
+ evel_meas_latency_bucket_add(measurement, bucket);
+
+ bucket = evel_new_meas_latency_bucket(30);
+ evel_meas_latency_bucket_low_end_set(bucket, 10.0);
+ evel_meas_latency_bucket_high_end_set(bucket, 20.0);
+ evel_meas_latency_bucket_add(measurement, bucket);
+
+ vnic_use = evel_new_measurement_vnic_use("eth0", 100, 200, 3, 4);
+ evel_vnic_use_bcast_pkt_in_set(vnic_use, 1);
+ evel_vnic_use_bcast_pkt_out_set(vnic_use, 2);
+ evel_vnic_use_mcast_pkt_in_set(vnic_use, 5);
+ evel_vnic_use_mcast_pkt_out_set(vnic_use, 6);
+ evel_vnic_use_ucast_pkt_in_set(vnic_use, 7);
+ evel_vnic_use_ucast_pkt_out_set(vnic_use, 8);
+ evel_meas_vnic_use_add(measurement, vnic_use);
+
+ vnic_use = evel_new_measurement_vnic_use("eth1", 110, 240, 13, 14);
+ evel_vnic_use_bcast_pkt_in_set(vnic_use, 11);
+ evel_vnic_use_bcast_pkt_out_set(vnic_use, 12);
+ evel_vnic_use_mcast_pkt_in_set(vnic_use, 15);
+ evel_vnic_use_mcast_pkt_out_set(vnic_use, 16);
+ evel_vnic_use_ucast_pkt_in_set(vnic_use, 17);
+ evel_vnic_use_ucast_pkt_out_set(vnic_use, 18);
+ evel_meas_vnic_use_add(measurement, vnic_use);
+
+ evel_measurement_errors_set(measurement, 1, 0, 2, 1);
+
+ evel_measurement_feature_use_add(measurement, "FeatureA", 123);
+ evel_measurement_feature_use_add(measurement, "FeatureB", 567);
+
+ evel_measurement_codec_use_add(measurement, "G711a", 91);
+ evel_measurement_codec_use_add(measurement, "G729ab", 92);
+
+ evel_measurement_media_port_use_set(measurement, 1234);
+
+ evel_measurement_vnfc_scaling_metric_set(measurement, 1234.5678);
+
+ evel_measurement_custom_measurement_add(measurement,
+ "Group1", "Name1", "Value1");
+ evel_measurement_custom_measurement_add(measurement,
+ "Group2", "Name1", "Value1");
+ evel_measurement_custom_measurement_add(measurement,
+ "Group2", "Name2", "Value2");
+
+ /*************************************************************************/
+ /* Work out the time, to use as end of measurement period. */
+ /*************************************************************************/
+ struct timeval tv_now;
+ gettimeofday(&tv_now, NULL);
+ unsigned long long epoch_now = tv_now.tv_usec + 1000000 * tv_now.tv_sec;
+ evel_start_epoch_set(&measurement->header, epoch_start);
+ evel_last_epoch_set(&measurement->header, epoch_now);
+ epoch_start = epoch_now;
+ evel_reporting_entity_name_set(&measurement->header, "measurer");
+ evel_reporting_entity_id_set(&measurement->header, "measurer_id");
+
+ evel_rc = evel_post_event((EVENT_HEADER *)measurement);
+ if (evel_rc != EVEL_SUCCESS)
+ {
+ EVEL_ERROR("Post Measurement failed %d (%s)",
+ evel_rc,
+ evel_error_string());
+ }
+ }
+ else
+ {
+ EVEL_ERROR("New Measurement failed");
+ }
+ printf(" Processed Measurement\n");
+}
+
+/**************************************************************************//**
+ * Create and send three mobile flow events.
+ *****************************************************************************/
+void demo_mobile_flow(void)
+{
+ MOBILE_GTP_PER_FLOW_METRICS * metrics = NULL;
+ EVENT_MOBILE_FLOW * mobile_flow = NULL;
+ EVEL_ERR_CODES evel_rc = EVEL_SUCCESS;
+
+ /***************************************************************************/
+ /* Mobile Flow */
+ /***************************************************************************/
+ metrics = evel_new_mobile_gtp_flow_metrics(12.3,
+ 3.12,
+ 100,
+ 2100,
+ 500,
+ 1470409421,
+ 987,
+ 1470409431,
+ 11,
+ (time_t)1470409431,
+ "Working",
+ 87,
+ 3,
+ 17,
+ 123654,
+ 4561,
+ 0,
+ 12,
+ 10,
+ 1,
+ 3,
+ 7,
+ 899,
+ 901,
+ 302,
+ 6,
+ 2,
+ 0,
+ 110,
+ 225);
+ if (metrics != NULL)
+ {
+ mobile_flow = evel_new_mobile_flow("Outbound",
+ metrics,
+ "TCP",
+ "IPv4",
+ "2.3.4.1",
+ 2341,
+ "4.2.3.1",
+ 4321);
+ if (mobile_flow != NULL)
+ {
+ evel_rc = evel_post_event((EVENT_HEADER *)mobile_flow);
+ if (evel_rc != EVEL_SUCCESS)
+ {
+ EVEL_ERROR("Post Mobile Flow failed %d (%s)",
+ evel_rc,
+ evel_error_string());
+ }
+ }
+ else
+ {
+ EVEL_ERROR("New Mobile Flow failed");
+ }
+ printf(" Processed empty Mobile Flow\n");
+ }
+ else
+ {
+ EVEL_ERROR("New GTP Per Flow Metrics failed - skipping Mobile Flow");
+ printf(" Skipped empty Mobile Flow\n");
+ }
+
+ metrics = evel_new_mobile_gtp_flow_metrics(132.0001,
+ 31.2,
+ 101,
+ 2101,
+ 501,
+ 1470409422,
+ 988,
+ 1470409432,
+ 12,
+ (time_t)1470409432,
+ "Inactive",
+ 88,
+ 4,
+ 18,
+ 123655,
+ 4562,
+ 1,
+ 13,
+ 11,
+ 2,
+ 4,
+ 8,
+ 900,
+ 902,
+ 303,
+ 7,
+ 3,
+ 1,
+ 111,
+ 226);
+ if (metrics != NULL)
+ {
+ mobile_flow = evel_new_mobile_flow("Inbound",
+ metrics,
+ "UDP",
+ "IPv6",
+ "2.3.4.2",
+ 2342,
+ "4.2.3.2",
+ 4322);
+ if (mobile_flow != NULL)
+ {
+ evel_mobile_flow_app_type_set(mobile_flow, "Demo application");
+ evel_mobile_flow_app_prot_type_set(mobile_flow, "GSM");
+ evel_mobile_flow_app_prot_ver_set(mobile_flow, "1");
+ evel_mobile_flow_cid_set(mobile_flow, "65535");
+ evel_mobile_flow_con_type_set(mobile_flow, "S1-U");
+ evel_mobile_flow_ecgi_set(mobile_flow, "e65535");
+ evel_mobile_flow_gtp_prot_type_set(mobile_flow, "GTP-U");
+ evel_mobile_flow_gtp_prot_ver_set(mobile_flow, "1");
+ evel_mobile_flow_http_header_set(mobile_flow,
+ "http://www.something.com");
+ evel_mobile_flow_imei_set(mobile_flow, "209917614823");
+ evel_mobile_flow_imsi_set(mobile_flow, "355251/05/850925/8");
+ evel_mobile_flow_lac_set(mobile_flow, "1");
+ evel_mobile_flow_mcc_set(mobile_flow, "410");
+ evel_mobile_flow_mnc_set(mobile_flow, "04");
+ evel_mobile_flow_msisdn_set(mobile_flow, "6017123456789");
+ evel_mobile_flow_other_func_role_set(mobile_flow, "MME");
+ evel_mobile_flow_rac_set(mobile_flow, "514");
+ evel_mobile_flow_radio_acc_tech_set(mobile_flow, "LTE");
+ evel_mobile_flow_sac_set(mobile_flow, "1");
+ evel_mobile_flow_samp_alg_set(mobile_flow, 1);
+ evel_mobile_flow_tac_set(mobile_flow, "2099");
+ evel_mobile_flow_tunnel_id_set(mobile_flow, "Tunnel 1");
+ evel_mobile_flow_vlan_id_set(mobile_flow, "15");
+
+ evel_rc = evel_post_event((EVENT_HEADER *)mobile_flow);
+ if (evel_rc != EVEL_SUCCESS)
+ {
+ EVEL_ERROR("Post Mobile Flow failed %d (%s)",
+ evel_rc,
+ evel_error_string());
+ }
+ }
+ else
+ {
+ EVEL_ERROR("New Mobile Flow failed");
+ }
+ printf(" Processed partial Mobile Flow\n");
+ }
+ else
+ {
+ EVEL_ERROR("New GTP Per Flow Metrics failed - skipping Mobile Flow");
+ printf(" Skipped partial Mobile Flow\n");
+ }
+
+ metrics = evel_new_mobile_gtp_flow_metrics(12.32,
+ 3.122,
+ 1002,
+ 21002,
+ 5002,
+ 1470409423,
+ 9872,
+ 1470409433,
+ 112,
+ (time_t)1470409433,
+ "Failed",
+ 872,
+ 32,
+ 172,
+ 1236542,
+ 45612,
+ 2,
+ 122,
+ 102,
+ 12,
+ 32,
+ 72,
+ 8992,
+ 9012,
+ 3022,
+ 62,
+ 22,
+ 2,
+ 1102,
+ 2252);
+ if (metrics != NULL)
+ {
+ evel_mobile_gtp_metrics_dur_con_fail_set(metrics, 12);
+ evel_mobile_gtp_metrics_dur_tun_fail_set(metrics, 13);
+ evel_mobile_gtp_metrics_act_by_set(metrics, "Remote");
+ evel_mobile_gtp_metrics_act_time_set(metrics, (time_t)1470409423);
+ evel_mobile_gtp_metrics_deact_by_set(metrics, "Remote");
+ evel_mobile_gtp_metrics_con_status_set(metrics, "Connected");
+ evel_mobile_gtp_metrics_tun_status_set(metrics, "Not tunneling");
+ evel_mobile_gtp_metrics_iptos_set(metrics, 1, 13);
+ evel_mobile_gtp_metrics_iptos_set(metrics, 17, 1);
+ evel_mobile_gtp_metrics_iptos_set(metrics, 4, 99);
+ evel_mobile_gtp_metrics_large_pkt_rtt_set(metrics, 80);
+ evel_mobile_gtp_metrics_large_pkt_thresh_set(metrics, 600.0);
+ evel_mobile_gtp_metrics_max_rcv_bit_rate_set(metrics, 1357924680);
+ evel_mobile_gtp_metrics_max_trx_bit_rate_set(metrics, 235711);
+ evel_mobile_gtp_metrics_num_echo_fail_set(metrics, 1);
+ evel_mobile_gtp_metrics_num_tun_fail_set(metrics, 4);
+ evel_mobile_gtp_metrics_num_http_errors_set(metrics, 2);
+ evel_mobile_gtp_metrics_tcp_flag_count_add(metrics, EVEL_TCP_CWR, 10);
+ evel_mobile_gtp_metrics_tcp_flag_count_add(metrics, EVEL_TCP_URG, 121);
+ evel_mobile_gtp_metrics_qci_cos_count_add(
+ metrics, EVEL_QCI_COS_UMTS_CONVERSATIONAL, 11);
+ evel_mobile_gtp_metrics_qci_cos_count_add(
+ metrics, EVEL_QCI_COS_LTE_65, 122);
+
+ mobile_flow = evel_new_mobile_flow("Outbound",
+ metrics,
+ "RTP",
+ "IPv8",
+ "2.3.4.3",
+ 2343,
+ "4.2.3.3",
+ 4323);
+ if (mobile_flow != NULL)
+ {
+ evel_mobile_flow_app_type_set(mobile_flow, "Demo application 2");
+ evel_mobile_flow_app_prot_type_set(mobile_flow, "GSM");
+ evel_mobile_flow_app_prot_ver_set(mobile_flow, "2");
+ evel_mobile_flow_cid_set(mobile_flow, "1");
+ evel_mobile_flow_con_type_set(mobile_flow, "S1-U");
+ evel_mobile_flow_ecgi_set(mobile_flow, "e1");
+ evel_mobile_flow_gtp_prot_type_set(mobile_flow, "GTP-U");
+ evel_mobile_flow_gtp_prot_ver_set(mobile_flow, "1");
+ evel_mobile_flow_http_header_set(mobile_flow, "http://www.google.com");
+ evel_mobile_flow_imei_set(mobile_flow, "209917614823");
+ evel_mobile_flow_imsi_set(mobile_flow, "355251/05/850925/8");
+ evel_mobile_flow_lac_set(mobile_flow, "1");
+ evel_mobile_flow_mcc_set(mobile_flow, "410");
+ evel_mobile_flow_mnc_set(mobile_flow, "04");
+ evel_mobile_flow_msisdn_set(mobile_flow, "6017123456789");
+ evel_mobile_flow_other_func_role_set(mobile_flow, "MMF");
+ evel_mobile_flow_rac_set(mobile_flow, "514");
+ evel_mobile_flow_radio_acc_tech_set(mobile_flow, "3G");
+ evel_mobile_flow_sac_set(mobile_flow, "1");
+ evel_mobile_flow_samp_alg_set(mobile_flow, 2);
+ evel_mobile_flow_tac_set(mobile_flow, "2099");
+ evel_mobile_flow_tunnel_id_set(mobile_flow, "Tunnel 2");
+ evel_mobile_flow_vlan_id_set(mobile_flow, "4096");
+
+ evel_rc = evel_post_event((EVENT_HEADER *)mobile_flow);
+ if (evel_rc != EVEL_SUCCESS)
+ {
+ EVEL_ERROR("Post Mobile Flow failed %d (%s)",
+ evel_rc,
+ evel_error_string());
+ }
+ }
+ else
+ {
+ EVEL_ERROR("New Mobile Flow failed");
+ }
+ printf(" Processed full Mobile Flow\n");
+ }
+ else
+ {
+ EVEL_ERROR("New GTP Per Flow Metrics failed - skipping Mobile Flow");
+ printf(" Skipped full Mobile Flow\n");
+ }
+}
+
+/**************************************************************************//**
+ * Create and send a Service event.
+ *****************************************************************************/
+void demo_service()
+{
+ demo_service_event(SERVICE_CODEC);
+ demo_service_event(SERVICE_TRANSCODING);
+ demo_service_event(SERVICE_RTCP);
+ demo_service_event(SERVICE_EOC_VQM);
+ demo_service_event(SERVICE_MARKER);
+}
+
+void demo_service_event(const SERVICE_EVENT service_event)
+{
+ EVENT_SERVICE * event = NULL;
+ EVEL_ERR_CODES evel_rc = EVEL_SUCCESS;
+
+ event = evel_new_service("vendor_x_id", "vendor_x_event_id");
+ if (event != NULL)
+ {
+ evel_service_type_set(event, "Service Event");
+ evel_service_product_id_set(event, "vendor_x_product_id");
+ evel_service_subsystem_id_set(event, "vendor_x_subsystem_id");
+ evel_service_friendly_name_set(event, "vendor_x_frieldly_name");
+ evel_service_correlator_set(event, "vendor_x_correlator");
+ evel_service_addl_field_add(event, "Name1", "Value1");
+ evel_service_addl_field_add(event, "Name2", "Value2");
+
+ switch (service_event)
+ {
+ case SERVICE_CODEC:
+ evel_service_codec_set(event, "PCMA");
+ break;
+ case SERVICE_TRANSCODING:
+ evel_service_callee_codec_set(event, "PCMA");
+ evel_service_caller_codec_set(event, "G729A");
+ break;
+ case SERVICE_RTCP:
+ evel_service_rtcp_data_set(event, "some_rtcp_data");
+ break;
+ case SERVICE_EOC_VQM:
+ evel_service_adjacency_name_set(event, "vendor_x_adjacency");
+ evel_service_endpoint_desc_set(event, EVEL_SERVICE_ENDPOINT_CALLER);
+ evel_service_endpoint_jitter_set(event, 66);
+ evel_service_endpoint_rtp_oct_disc_set(event, 100);
+ evel_service_endpoint_rtp_oct_recv_set(event, 200);
+ evel_service_endpoint_rtp_oct_sent_set(event, 300);
+ evel_service_endpoint_rtp_pkt_disc_set(event, 400);
+ evel_service_endpoint_rtp_pkt_recv_set(event, 500);
+ evel_service_endpoint_rtp_pkt_sent_set(event, 600);
+ evel_service_local_jitter_set(event, 99);
+ evel_service_local_rtp_oct_disc_set(event, 150);
+ evel_service_local_rtp_oct_recv_set(event, 250);
+ evel_service_local_rtp_oct_sent_set(event, 350);
+ evel_service_local_rtp_pkt_disc_set(event, 450);
+ evel_service_local_rtp_pkt_recv_set(event, 550);
+ evel_service_local_rtp_pkt_sent_set(event, 650);
+ evel_service_mos_cqe_set(event, 12.255);
+ evel_service_packets_lost_set(event, 157);
+ evel_service_packet_loss_percent_set(event, 0.232);
+ evel_service_r_factor_set(event, 11);
+ evel_service_round_trip_delay_set(event, 15);
+ break;
+ case SERVICE_MARKER:
+ evel_service_phone_number_set(event, "0888888888");
+ break;
+ }
+
+ evel_rc = evel_post_event((EVENT_HEADER *) event);
+ if (evel_rc != EVEL_SUCCESS)
+ {
+ EVEL_ERROR("Post failed %d (%s)", evel_rc, evel_error_string());
+ }
+ }
+ else
+ {
+ EVEL_ERROR("New Service failed");
+ }
+ printf(" Processed Service Events\n");
+}
+
+/**************************************************************************//**
+ * Create and send a Signaling event.
+ *****************************************************************************/
+void demo_signaling(void)
+{
+ EVENT_SIGNALING * event = NULL;
+ EVEL_ERR_CODES evel_rc = EVEL_SUCCESS;
+
+ event = evel_new_signaling("vendor_x_id", "vendor_x_event_id");
+ if (event != NULL)
+ {
+ evel_signaling_type_set(event, "Signaling");
+ evel_signaling_product_id_set(event, "vendor_x_product_id");
+ evel_signaling_subsystem_id_set(event, "vendor_x_subsystem_id");
+ evel_signaling_friendly_name_set(event, "vendor_x_frieldly_name");
+ evel_signaling_correlator_set(event, "vendor_x_correlator");
+ evel_signaling_local_ip_address_set(event, "1.0.3.1");
+ evel_signaling_local_port_set(event, "1031");
+ evel_signaling_remote_ip_address_set(event, "5.3.3.0");
+ evel_signaling_remote_port_set(event, "5330");
+ evel_signaling_compressed_sip_set(event, "compressed_sip");
+ evel_signaling_summary_sip_set(event, "summary_sip");
+ evel_rc = evel_post_event((EVENT_HEADER *) event);
+ if (evel_rc != EVEL_SUCCESS)
+ {
+ EVEL_ERROR("Post failed %d (%s)", evel_rc, evel_error_string());
+ }
+ }
+ else
+ {
+ EVEL_ERROR("New Signaling failed");
+ }
+ printf(" Processed Signaling\n");
+}
+
+/**************************************************************************//**
+ * Create and send a state change event.
+ *****************************************************************************/
+void demo_state_change(void)
+{
+ EVENT_STATE_CHANGE * state_change = NULL;
+ EVEL_ERR_CODES evel_rc = EVEL_SUCCESS;
+
+ /***************************************************************************/
+ /* State Change */
+ /***************************************************************************/
+ state_change = evel_new_state_change(EVEL_ENTITY_STATE_IN_SERVICE,
+ EVEL_ENTITY_STATE_OUT_OF_SERVICE,
+ "Interface");
+ if (state_change != NULL)
+ {
+ evel_state_change_type_set(state_change, "State Change");
+ evel_state_change_addl_field_add(state_change, "Name1", "Value1");
+ evel_state_change_addl_field_add(state_change, "Name2", "Value2");
+ evel_rc = evel_post_event((EVENT_HEADER *)state_change);
+ if (evel_rc != EVEL_SUCCESS)
+ {
+ EVEL_ERROR("Post failed %d (%s)", evel_rc, evel_error_string());
+ }
+ }
+ else
+ {
+ EVEL_ERROR("New State Change failed");
+ }
+ printf(" Processed State Change\n");
+}
+
+/**************************************************************************//**
+ * Create and send two syslog events.
+ *****************************************************************************/
+void demo_syslog(void)
+{
+ EVENT_SYSLOG * syslog = NULL;
+ EVEL_ERR_CODES evel_rc = EVEL_SUCCESS;
+
+ /***************************************************************************/
+ /* Syslog */
+ /***************************************************************************/
+ syslog = evel_new_syslog(EVEL_SOURCE_VIRTUAL_NETWORK_FUNCTION,
+ "EVEL library message",
+ "EVEL");
+ if (syslog != NULL)
+ {
+ evel_rc = evel_post_event((EVENT_HEADER *)syslog);
+ if (evel_rc != EVEL_SUCCESS)
+ {
+ EVEL_ERROR("Post failed %d (%s)", evel_rc, evel_error_string());
+ }
+ }
+ else
+ {
+ EVEL_ERROR("New Syslog failed");
+ }
+ printf(" Processed empty Syslog\n");
+
+ syslog = evel_new_syslog(EVEL_SOURCE_VIRTUAL_MACHINE,
+ "EVEL library message",
+ "EVEL");
+ if (syslog != NULL)
+ {
+ evel_syslog_event_source_host_set(syslog, "Virtual host");
+ evel_syslog_facility_set(syslog, EVEL_SYSLOG_FACILITY_LOCAL0);
+ evel_syslog_proc_set(syslog, "vnf_process");
+ evel_syslog_proc_id_set(syslog, 1423);
+ evel_syslog_version_set(syslog, 1);
+ evel_syslog_addl_field_add(syslog, "Name1", "Value1");
+ evel_syslog_addl_field_add(syslog, "Name2", "Value2");
+ evel_syslog_addl_field_add(syslog, "Name3", "Value3");
+ evel_syslog_addl_field_add(syslog, "Name4", "Value4");
+ evel_rc = evel_post_event((EVENT_HEADER *)syslog);
+ if (evel_rc != EVEL_SUCCESS)
+ {
+ EVEL_ERROR("Post failed %d (%s)", evel_rc, evel_error_string());
+ }
+ }
+ else
+ {
+ EVEL_ERROR("New Syslog failed");
+ }
+ printf(" Processed full Syslog\n");
+}
+
+/**************************************************************************//**
+ * Create and send two other events.
+ *****************************************************************************/
+void demo_other(void)
+{
+ EVENT_OTHER * other = NULL;
+ EVEL_ERR_CODES evel_rc = EVEL_SUCCESS;
+
+ /***************************************************************************/
+ /* Other */
+ /***************************************************************************/
+ other = evel_new_other();
+ if (other != NULL)
+ {
+ evel_rc = evel_post_event((EVENT_HEADER *)other);
+ if (evel_rc != EVEL_SUCCESS)
+ {
+ EVEL_ERROR("Post failed %d (%s)", evel_rc, evel_error_string());
+ }
+ }
+ else
+ {
+ EVEL_ERROR("New Other failed");
+ }
+ printf(" Processed empty Other\n");
+
+ other = evel_new_other();
+ if (other != NULL)
+ {
+ evel_other_field_add(other,
+ "Other field 1",
+ "Other value 1");
+ evel_rc = evel_post_event((EVENT_HEADER *)other);
+ if (evel_rc != EVEL_SUCCESS)
+ {
+ EVEL_ERROR("Post failed %d (%s)", evel_rc, evel_error_string());
+ }
+ }
+ else
+ {
+ EVEL_ERROR("New Other failed");
+ }
+ printf(" Processed small Other\n");
+
+ other = evel_new_other();
+ if (other != NULL)
+ {
+ evel_other_field_add(other,
+ "Other field A",
+ "Other value A");
+ evel_other_field_add(other,
+ "Other field B",
+ "Other value B");
+ evel_other_field_add(other,
+ "Other field C",
+ "Other value C");
+ evel_rc = evel_post_event((EVENT_HEADER *)other);
+ if (evel_rc != EVEL_SUCCESS)
+ {
+ EVEL_ERROR("Post failed %d (%s)", evel_rc, evel_error_string());
+ }
+ }
+ else
+ {
+ EVEL_ERROR("New Other failed");
+ }
+ printf(" Processed large Other\n");
+}
diff --git a/tests/onap-demo/blueprints/tosca-vnfd-onap-demo/favicon.ico b/tests/onap-demo/blueprints/tosca-vnfd-onap-demo/favicon.ico
new file mode 100644
index 0000000..cd95189
--- /dev/null
+++ b/tests/onap-demo/blueprints/tosca-vnfd-onap-demo/favicon.ico
Binary files differ
diff --git a/tests/onap-demo/blueprints/tosca-vnfd-onap-demo/logo.png b/tests/onap-demo/blueprints/tosca-vnfd-onap-demo/logo.png
new file mode 100644
index 0000000..a4bf310
--- /dev/null
+++ b/tests/onap-demo/blueprints/tosca-vnfd-onap-demo/logo.png
Binary files differ
diff --git a/tests/onap-demo/blueprints/tosca-vnfd-onap-demo/monitor.py b/tests/onap-demo/blueprints/tosca-vnfd-onap-demo/monitor.py
new file mode 100644
index 0000000..9b16af3
--- /dev/null
+++ b/tests/onap-demo/blueprints/tosca-vnfd-onap-demo/monitor.py
@@ -0,0 +1,713 @@
+#!/usr/bin/env python
+#
+# Copyright 2016-2017 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# What this is: Monitor and closed-loop policy agent as part of the OPNFV VES
+# vHello_VES demo.
+#
+# Status: this is a work in progress, under test.
+
+from rest_dispatcher import PathDispatcher, set_404_content
+from wsgiref.simple_server import make_server
+import sys
+import os
+import platform
+import traceback
+import time
+from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
+import ConfigParser
+import logging.handlers
+from base64 import b64decode
+import string
+import json
+import jsonschema
+from functools import partial
+
+monitor_mode = "f"
+vdu_id = ['','','','']
+summary_e = ['***** Summary of key stats *****','','','']
+summary_c = ['Collectd agents:']
+status = ['','Started','Started','Started']
+base_url = ''
+template_404 = b'''POST {0}'''
+columns = 0
+rows = 0
+
+class JSONObject:
+ def __init__(self, d):
+ self.__dict__ = d
+
+_hello_resp = '''\
+<html>
+ <head>
+ <title>Hello {name}</title>
+ </head>
+ <body>
+ <h1>Hello {name}!</h1>
+ </body>
+</html>'''
+
+_localtime_resp = '''\
+<?xml version="1.0"?>
+<time>
+ <year>{t.tm_year}</year>
+ <month>{t.tm_mon}</month>
+ <day>{t.tm_mday}</day>
+ <hour>{t.tm_hour}</hour>
+ <minute>{t.tm_min}</minute>
+ <second>{t.tm_sec}</second>
+</time>'''
+
+__all__ = []
+__version__ = 0.1
+__date__ = '2015-12-04'
+__updated__ = '2015-12-04'
+
+TESTRUN = False
+DEBUG = False
+PROFILE = False
+
+#------------------------------------------------------------------------------
+# Credentials we expect clients to authenticate themselves with.
+#------------------------------------------------------------------------------
+vel_username = ''
+vel_password = ''
+
+#------------------------------------------------------------------------------
+# The JSON schema which we will use to validate events.
+#------------------------------------------------------------------------------
+vel_schema = None
+
+#------------------------------------------------------------------------------
+# The JSON schema which we will use to validate client throttle state.
+#------------------------------------------------------------------------------
+throttle_schema = None
+
+#------------------------------------------------------------------------------
+# The JSON schema which we will use to provoke throttling commands for testing.
+#------------------------------------------------------------------------------
+test_control_schema = None
+
+#------------------------------------------------------------------------------
+# Pending command list from the testControl API
+# This is sent as a response commandList to the next received event.
+#------------------------------------------------------------------------------
+pending_command_list = None
+
+#------------------------------------------------------------------------------
+# Logger for this module.
+#------------------------------------------------------------------------------
+logger = None
+
+def listener(environ, start_response, schema):
+ '''
+ Handler for the Vendor Event Listener REST API.
+
+ Extract headers and the body and check that:
+
+ 1) The client authenticated themselves correctly.
+ 2) The body validates against the provided schema for the API.
+
+ '''
+ logger.info('Got a Vendor Event request')
+ print('==== ' + time.asctime() + ' ' + '=' * 49)
+
+ #--------------------------------------------------------------------------
+ # Extract the content from the request.
+ #--------------------------------------------------------------------------
+ length = int(environ.get('CONTENT_LENGTH', '0'))
+ logger.debug('Content Length: {0}'.format(length))
+ body = environ['wsgi.input'].read(length)
+ logger.debug('Content Body: {0}'.format(body))
+
+ mode, b64_credentials = string.split(environ.get('HTTP_AUTHORIZATION',
+ 'None None'))
+ # logger.debug('Auth. Mode: {0} Credentials: {1}'.format(mode,
+ # b64_credentials))
+ logger.debug('Auth. Mode: {0} Credentials: ****'.format(mode))
+ if (b64_credentials != 'None'):
+ credentials = b64decode(b64_credentials)
+ else:
+ credentials = None
+
+ # logger.debug('Credentials: {0}'.format(credentials))
+ logger.debug('Credentials: ****')
+
+ #--------------------------------------------------------------------------
+ # If we have a schema file then check that the event matches that expected.
+ #--------------------------------------------------------------------------
+ if (schema is not None):
+ logger.debug('Attempting to validate data: {0}\n'
+ 'Against schema: {1}'.format(body, schema))
+ try:
+ decoded_body = json.loads(body)
+ jsonschema.validate(decoded_body, schema)
+ logger.info('Event is valid!')
+ print('Valid body decoded & checked against schema OK:\n'
+ '{0}'.format(json.dumps(decoded_body,
+ sort_keys=True,
+ indent=4,
+ separators=(',', ': '))))
+
+ except jsonschema.SchemaError as e:
+ logger.error('Schema is not valid! {0}'.format(e))
+ print('Schema is not valid! {0}'.format(e))
+
+ except jsonschema.ValidationError as e:
+ logger.warn('Event is not valid against schema! {0}'.format(e))
+ print('Event is not valid against schema! {0}'.format(e))
+ print('Bad JSON body decoded:\n'
+ '{0}'.format(json.dumps(decoded_body,
+ sort_keys=True,
+ indent=4,
+ separators=(',', ': '))))
+
+ except Exception as e:
+ logger.error('Event invalid for unexpected reason! {0}'.format(e))
+ print('Schema is not valid for unexpected reason! {0}'.format(e))
+ else:
+ logger.debug('No schema so just decode JSON: {0}'.format(body))
+ try:
+ decoded_body = json.loads(body)
+ print('Valid JSON body (no schema checking) decoded:\n'
+ '{0}'.format(json.dumps(decoded_body,
+ sort_keys=True,
+ indent=4,
+ separators=(',', ': '))))
+ logger.info('Event is valid JSON but not checked against schema!')
+
+ except Exception as e:
+ logger.error('Event invalid for unexpected reason! {0}'.format(e))
+ print('JSON body not valid for unexpected reason! {0}'.format(e))
+
+ #--------------------------------------------------------------------------
+ # See whether the user authenticated themselves correctly.
+ #--------------------------------------------------------------------------
+ if (credentials == (vel_username + ':' + vel_password)):
+ logger.debug('Authenticated OK')
+ print('Authenticated OK')
+
+ #----------------------------------------------------------------------
+ # Respond to the caller. If we have a pending commandList from the
+ # testControl API, send it in response.
+ #----------------------------------------------------------------------
+ global pending_command_list
+ if pending_command_list is not None:
+ start_response('202 Accepted',
+ [('Content-type', 'application/json')])
+ response = pending_command_list
+ pending_command_list = None
+
+ print('\n'+ '='*80)
+ print('Sending pending commandList in the response:\n'
+ '{0}'.format(json.dumps(response,
+ sort_keys=True,
+ indent=4,
+ separators=(',', ': '))))
+ print('='*80 + '\n')
+ yield json.dumps(response)
+ else:
+ start_response('202 Accepted', [])
+ yield ''
+ else:
+ logger.warn('Failed to authenticate OK')
+ print('Failed to authenticate agent credentials: ', credentials)
+
+ #----------------------------------------------------------------------
+ # Respond to the caller.
+ #----------------------------------------------------------------------
+ start_response('401 Unauthorized', [ ('Content-type',
+ 'application/json')])
+ req_error = { 'requestError': {
+ 'policyException': {
+ 'messageId': 'POL0001',
+ 'text': 'Failed to authenticate'
+ }
+ }
+ }
+ yield json.dumps(req_error)
+
+ process_event(body)
+
+#--------------------------------------------------------------------------
+# Event processing
+#--------------------------------------------------------------------------
+def process_event(body):
+ global status
+ global summary_e
+ global summary_c
+ global vdu_id
+ vdu = 0
+
+ e = json.loads(body, object_hook=JSONObject)
+
+ epoch = e.event.commonEventHeader.lastEpochMicrosec
+ sourceId = e.event.commonEventHeader.sourceId
+
+ report_time = time.strftime('%Y-%m-%d %H:%M:%S',
+ time.localtime(int(epoch)/1000000))
+
+ host = e.event.commonEventHeader.reportingEntityName
+ if 'VDU1' in host or 'vdu1' in host: vdu = 1
+ if 'VDU2' in host or 'vdu2' in host: vdu = 2
+ if 'VDU3' in host or 'vdu3' in host: vdu = 3
+
+ domain = e.event.commonEventHeader.domain
+
+ if domain == 'measurementsForVfScaling':
+ if vdu >= 1:
+ requestRate = e.event.measurementsForVfScalingFields.requestRate
+ summary_e[vdu] = host + ": state=" + status[vdu] + ", tps=" + str(requestRate)
+ else:
+ aggregateCpuUsage = e.event.measurementsForVfScalingFields.aggregateCpuUsage
+ vNicUsageArray = e.event.measurementsForVfScalingFields.vNicUsageArray
+ s = ""
+ for i in range(1,len(vdu_id)):
+ if sourceId.upper() in vdu_id[i].upper():
+ s = "(VDU"+ str(i) + ") "
+ if s:
+ s += host + ": cpu=" + str(aggregateCpuUsage)
+ found = False
+ for i in range(1,len(summary_c)):
+ if host in summary_c[i]:
+ summary_c[i] = s
+ found = True
+ break
+ if not found:
+ summary_c.extend([s])
+
+ for s in summary_e:
+ print '{0}'.format(s)
+ for s in summary_c:
+ print '{0}'.format(s)
+
+ if domain == 'fault' and vdu >= 1:
+ alarmCondition = e.event.faultFields.alarmCondition
+ specificProblem = e.event.faultFields.specificProblem
+# status[vdu] = e.event.faultFields.vfStatus
+ status[vdu] = e.event.faultFields.specificProblem
+
+def test_listener(environ, start_response, schema):
+ '''
+ Handler for the Test Collector Test Control API.
+
+ There is no authentication on this interface.
+
+ This simply stores a commandList which will be sent in response to the next
+ incoming event on the EVEL interface.
+ '''
+ global pending_command_list
+ logger.info('Got a Test Control input')
+ print('============================')
+ print('==== TEST CONTROL INPUT ====')
+
+ #--------------------------------------------------------------------------
+ # GET allows us to get the current pending request.
+ #--------------------------------------------------------------------------
+ if environ.get('REQUEST_METHOD') == 'GET':
+ start_response('200 OK', [('Content-type', 'application/json')])
+ yield json.dumps(pending_command_list)
+ return
+
+ #--------------------------------------------------------------------------
+ # Extract the content from the request.
+ #--------------------------------------------------------------------------
+ length = int(environ.get('CONTENT_LENGTH', '0'))
+ logger.debug('TestControl Content Length: {0}'.format(length))
+ body = environ['wsgi.input'].read(length)
+ logger.debug('TestControl Content Body: {0}'.format(body))
+
+ #--------------------------------------------------------------------------
+ # If we have a schema file then check that the event matches that expected.
+ #--------------------------------------------------------------------------
+ if (schema is not None):
+ logger.debug('Attempting to validate data: {0}\n'
+ 'Against schema: {1}'.format(body, schema))
+ try:
+ decoded_body = json.loads(body)
+ jsonschema.validate(decoded_body, schema)
+ logger.info('TestControl is valid!')
+ print('TestControl:\n'
+ '{0}'.format(json.dumps(decoded_body,
+ sort_keys=True,
+ indent=4,
+ separators=(',', ': '))))
+
+ except jsonschema.SchemaError as e:
+ logger.error('TestControl Schema is not valid: {0}'.format(e))
+ print('TestControl Schema is not valid: {0}'.format(e))
+
+ except jsonschema.ValidationError as e:
+ logger.warn('TestControl input not valid: {0}'.format(e))
+ print('TestControl input not valid: {0}'.format(e))
+ print('Bad JSON body decoded:\n'
+ '{0}'.format(json.dumps(decoded_body,
+ sort_keys=True,
+ indent=4,
+ separators=(',', ': '))))
+
+ except Exception as e:
+ logger.error('TestControl input not valid: {0}'.format(e))
+ print('TestControl Schema not valid: {0}'.format(e))
+ else:
+ logger.debug('Missing schema just decode JSON: {0}'.format(body))
+ try:
+ decoded_body = json.loads(body)
+ print('Valid JSON body (no schema checking) decoded:\n'
+ '{0}'.format(json.dumps(decoded_body,
+ sort_keys=True,
+ indent=4,
+ separators=(',', ': '))))
+ logger.info('TestControl input not checked against schema!')
+
+ except Exception as e:
+ logger.error('TestControl input not valid: {0}'.format(e))
+ print('TestControl input not valid: {0}'.format(e))
+
+ #--------------------------------------------------------------------------
+ # Respond to the caller. If we received otherField 'ThrottleRequest',
+ # generate the appropriate canned response.
+ #--------------------------------------------------------------------------
+ pending_command_list = decoded_body
+ print('===== TEST CONTROL END =====')
+ print('============================')
+ start_response('202 Accepted', [])
+ yield ''
+
+def main(argv=None):
+ '''
+ Main function for the collector start-up.
+
+ Called with command-line arguments:
+ * --config *<file>*
+ * --section *<section>*
+ * --verbose
+
+ Where:
+
+ *<file>* specifies the path to the configuration file.
+
+ *<section>* specifies the section within that config file.
+
+ *verbose* generates more information in the log files.
+
+ The process listens for REST API invocations and checks them. Errors are
+ displayed to stdout and logged.
+ '''
+
+ if argv is None:
+ argv = sys.argv
+ else:
+ sys.argv.extend(argv)
+
+ program_name = os.path.basename(sys.argv[0])
+ program_version = 'v{0}'.format(__version__)
+ program_build_date = str(__updated__)
+ program_version_message = '%%(prog)s {0} ({1})'.format(program_version,
+ program_build_date)
+ if (__import__('__main__').__doc__ is not None):
+ program_shortdesc = __import__('__main__').__doc__.split('\n')[1]
+ else:
+ program_shortdesc = 'Running in test harness'
+ program_license = '''{0}
+
+ Created on {1}.
+ Copyright 2015 Metaswitch Networks Ltd. All rights reserved.
+
+ Distributed on an "AS IS" basis without warranties
+ or conditions of any kind, either express or implied.
+
+USAGE
+'''.format(program_shortdesc, str(__date__))
+
+ try:
+ #----------------------------------------------------------------------
+ # Setup argument parser so we can parse the command-line.
+ #----------------------------------------------------------------------
+ parser = ArgumentParser(description=program_license,
+ formatter_class=ArgumentDefaultsHelpFormatter)
+ parser.add_argument('-v', '--verbose',
+ dest='verbose',
+ action='count',
+ help='set verbosity level')
+ parser.add_argument('-V', '--version',
+ action='version',
+ version=program_version_message,
+ help='Display version information')
+ parser.add_argument('-a', '--api-version',
+ dest='api_version',
+ default='3',
+ help='set API version')
+ parser.add_argument('-c', '--config',
+ dest='config',
+ default='/etc/opt/att/collector.conf',
+ help='Use this config file.',
+ metavar='<file>')
+ parser.add_argument('-s', '--section',
+ dest='section',
+ default='default',
+ metavar='<section>',
+ help='section to use in the config file')
+
+ #----------------------------------------------------------------------
+ # Process arguments received.
+ #----------------------------------------------------------------------
+ args = parser.parse_args()
+ verbose = args.verbose
+ api_version = args.api_version
+ config_file = args.config
+ config_section = args.section
+
+ #----------------------------------------------------------------------
+ # Now read the config file, using command-line supplied values as
+ # overrides.
+ #----------------------------------------------------------------------
+ defaults = {'log_file': 'collector.log',
+ 'vel_port': '12233',
+ 'vel_path': '',
+ 'vel_topic_name': ''
+ }
+ overrides = {}
+ config = ConfigParser.SafeConfigParser(defaults)
+ config.read(config_file)
+
+ #----------------------------------------------------------------------
+ # extract the values we want.
+ #----------------------------------------------------------------------
+ log_file = config.get(config_section, 'log_file', vars=overrides)
+ vel_port = config.get(config_section, 'vel_port', vars=overrides)
+ vel_path = config.get(config_section, 'vel_path', vars=overrides)
+ vel_topic_name = config.get(config_section,
+ 'vel_topic_name',
+ vars=overrides)
+ global vel_username
+ global vel_password
+ vel_username = config.get(config_section,
+ 'vel_username',
+ vars=overrides)
+ vel_password = config.get(config_section,
+ 'vel_password',
+ vars=overrides)
+ vel_schema_file = config.get(config_section,
+ 'schema_file',
+ vars=overrides)
+ base_schema_file = config.get(config_section,
+ 'base_schema_file',
+ vars=overrides)
+ throttle_schema_file = config.get(config_section,
+ 'throttle_schema_file',
+ vars=overrides)
+ test_control_schema_file = config.get(config_section,
+ 'test_control_schema_file',
+ vars=overrides)
+
+ #----------------------------------------------------------------------
+ # Finally we have enough info to start a proper flow trace.
+ #----------------------------------------------------------------------
+ global logger
+ print('Logfile: {0}'.format(log_file))
+ logger = logging.getLogger('collector')
+ if verbose > 0:
+ print('Verbose mode on')
+ logger.setLevel(logging.DEBUG)
+ else:
+ logger.setLevel(logging.INFO)
+ handler = logging.handlers.RotatingFileHandler(log_file,
+ maxBytes=1000000,
+ backupCount=10)
+ if (platform.system() == 'Windows'):
+ date_format = '%Y-%m-%d %H:%M:%S'
+ else:
+ date_format = '%Y-%m-%d %H:%M:%S.%f %z'
+ formatter = logging.Formatter('%(asctime)s %(name)s - '
+ '%(levelname)s - %(message)s',
+ date_format)
+ handler.setFormatter(formatter)
+ logger.addHandler(handler)
+ logger.info('Started')
+
+ #----------------------------------------------------------------------
+ # Log the details of the configuration.
+ #----------------------------------------------------------------------
+ logger.debug('Log file = {0}'.format(log_file))
+ logger.debug('Event Listener Port = {0}'.format(vel_port))
+ logger.debug('Event Listener Path = {0}'.format(vel_path))
+ logger.debug('Event Listener Topic = {0}'.format(vel_topic_name))
+ logger.debug('Event Listener Username = {0}'.format(vel_username))
+ # logger.debug('Event Listener Password = {0}'.format(vel_password))
+ logger.debug('Event Listener JSON Schema File = {0}'.format(
+ vel_schema_file))
+ logger.debug('Base JSON Schema File = {0}'.format(base_schema_file))
+ logger.debug('Throttle JSON Schema File = {0}'.format(
+ throttle_schema_file))
+ logger.debug('Test Control JSON Schema File = {0}'.format(
+ test_control_schema_file))
+
+ #----------------------------------------------------------------------
+ # Perform some basic error checking on the config.
+ #----------------------------------------------------------------------
+ if (int(vel_port) < 1024 or int(vel_port) > 65535):
+ logger.error('Invalid Vendor Event Listener port ({0}) '
+ 'specified'.format(vel_port))
+ raise RuntimeError('Invalid Vendor Event Listener port ({0}) '
+ 'specified'.format(vel_port))
+
+ if (len(vel_path) > 0 and vel_path[-1] != '/'):
+ logger.warning('Event Listener Path ({0}) should have terminating '
+ '"/"! Adding one on to configured string.'.format(
+ vel_path))
+ vel_path += '/'
+
+ #----------------------------------------------------------------------
+ # Load up the vel_schema, if it exists.
+ #----------------------------------------------------------------------
+ if not os.path.exists(vel_schema_file):
+ logger.warning('Event Listener Schema File ({0}) not found. '
+ 'No validation will be undertaken.'.format(
+ vel_schema_file))
+ else:
+ global vel_schema
+ global throttle_schema
+ global test_control_schema
+ vel_schema = json.load(open(vel_schema_file, 'r'))
+ logger.debug('Loaded the JSON schema file')
+
+ #------------------------------------------------------------------
+ # Load up the throttle_schema, if it exists.
+ #------------------------------------------------------------------
+ if (os.path.exists(throttle_schema_file)):
+ logger.debug('Loading throttle schema')
+ throttle_fragment = json.load(open(throttle_schema_file, 'r'))
+ throttle_schema = {}
+ throttle_schema.update(vel_schema)
+ throttle_schema.update(throttle_fragment)
+ logger.debug('Loaded the throttle schema')
+
+ #------------------------------------------------------------------
+ # Load up the test control _schema, if it exists.
+ #------------------------------------------------------------------
+ if (os.path.exists(test_control_schema_file)):
+ logger.debug('Loading test control schema')
+ test_control_fragment = json.load(
+ open(test_control_schema_file, 'r'))
+ test_control_schema = {}
+ test_control_schema.update(vel_schema)
+ test_control_schema.update(test_control_fragment)
+ logger.debug('Loaded the test control schema')
+
+ #------------------------------------------------------------------
+ # Load up the base_schema, if it exists.
+ #------------------------------------------------------------------
+ if (os.path.exists(base_schema_file)):
+ logger.debug('Updating the schema with base definition')
+ base_schema = json.load(open(base_schema_file, 'r'))
+ vel_schema.update(base_schema)
+ logger.debug('Updated the JSON schema file')
+
+ #----------------------------------------------------------------------
+ # We are now ready to get started with processing. Start-up the various
+ # components of the system in order:
+ #
+ # 1) Create the dispatcher.
+ # 2) Register the functions for the URLs of interest.
+ # 3) Run the webserver.
+ #----------------------------------------------------------------------
+ root_url = '/{0}eventListener/v{1}{2}'.\
+ format(vel_path,
+ api_version,
+ '/' + vel_topic_name
+ if len(vel_topic_name) > 0
+ else '')
+ throttle_url = '/{0}eventListener/v{1}/clientThrottlingState'.\
+ format(vel_path, api_version)
+ set_404_content(root_url)
+ dispatcher = PathDispatcher()
+ vendor_event_listener = partial(listener, schema = vel_schema)
+ dispatcher.register('GET', root_url, vendor_event_listener)
+ dispatcher.register('POST', root_url, vendor_event_listener)
+ vendor_throttle_listener = partial(listener, schema = throttle_schema)
+ dispatcher.register('GET', throttle_url, vendor_throttle_listener)
+ dispatcher.register('POST', throttle_url, vendor_throttle_listener)
+
+ #----------------------------------------------------------------------
+ # We also add a POST-only mechanism for test control, so that we can
+ # send commands to a single attached client.
+ #----------------------------------------------------------------------
+ test_control_url = '/testControl/v{0}/commandList'.format(api_version)
+ test_control_listener = partial(test_listener,
+ schema = test_control_schema)
+ dispatcher.register('POST', test_control_url, test_control_listener)
+ dispatcher.register('GET', test_control_url, test_control_listener)
+
+ httpd = make_server('', int(vel_port), dispatcher)
+ print('Serving on port {0}...'.format(vel_port))
+ httpd.serve_forever()
+
+ logger.error('Main loop exited unexpectedly!')
+ return 0
+
+ except KeyboardInterrupt:
+ #----------------------------------------------------------------------
+ # handle keyboard interrupt
+ #----------------------------------------------------------------------
+ logger.info('Exiting on keyboard interrupt!')
+ return 0
+
+ except Exception as e:
+ #----------------------------------------------------------------------
+ # Handle unexpected exceptions.
+ #----------------------------------------------------------------------
+ if DEBUG or TESTRUN:
+ raise(e)
+ indent = len(program_name) * ' '
+ sys.stderr.write(program_name + ': ' + repr(e) + '\n')
+ sys.stderr.write(indent + ' for help use --help\n')
+ sys.stderr.write(traceback.format_exc())
+ logger.critical('Exiting because of exception: {0}'.format(e))
+ logger.critical(traceback.format_exc())
+ return 2
+
+#------------------------------------------------------------------------------
+# MAIN SCRIPT ENTRY POINT.
+#------------------------------------------------------------------------------
+if __name__ == '__main__':
+ if TESTRUN:
+ #----------------------------------------------------------------------
+ # Running tests - note that doctest comments haven't been included so
+ # this is a hook for future improvements.
+ #----------------------------------------------------------------------
+ import doctest
+ doctest.testmod()
+
+ if PROFILE:
+ #----------------------------------------------------------------------
+ # Profiling performance. Performance isn't expected to be a major
+ # issue, but this should all work as expected.
+ #----------------------------------------------------------------------
+ import cProfile
+ import pstats
+ profile_filename = 'collector_profile.txt'
+ cProfile.run('main()', profile_filename)
+ statsfile = open('collector_profile_stats.txt', 'wb')
+ p = pstats.Stats(profile_filename, stream=statsfile)
+ stats = p.strip_dirs().sort_stats('cumulative')
+ stats.print_stats()
+ statsfile.close()
+ sys.exit(0)
+
+ #--------------------------------------------------------------------------
+ # Normal operation - call through to the main function.
+ #--------------------------------------------------------------------------
+ sys.exit(main())
diff --git a/tests/onap-demo/blueprints/tosca-vnfd-onap-demo/start.sh b/tests/onap-demo/blueprints/tosca-vnfd-onap-demo/start.sh
new file mode 100644
index 0000000..0f27dd3
--- /dev/null
+++ b/tests/onap-demo/blueprints/tosca-vnfd-onap-demo/start.sh
@@ -0,0 +1,224 @@
+#!/bin/bash
+# Copyright 2016 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# What this is: Startup script for a simple web server as part of the
+# ves_onap_demo test of the OPNFV VES project.
+#
+# Status: this is a work in progress, under test.
+#
+# How to use:
+# Intended to be invoked from ves_onap_demo.sh
+# $ bash start.sh type params
+# type: type of VNF component [monitor|collectd]
+# collector params: ID CollectorIP username password
+# monitor params: VDU1_ID VDU1_ID VDU1_ID username password
+# ID: VM ID
+# CollectorIP: IP address of the collector
+# username: Username for Collector RESTful API authentication
+# password: Password for Collector RESTful API authentication
+
+setup_collectd () {
+ guest=$1
+ echo "$0: Install prerequisites"
+ dist=`grep DISTRIB_ID /etc/*-release | awk -F '=' '{print $2}'`
+ if [ "$dist" == "Ubuntu" ]; then
+ conf="/etc/collectd/collectd.conf"
+ else
+ conf="/etc/collectd.conf"
+ fi
+
+ if [ "$dist" == "Ubuntu" ]; then
+ sudo apt-get update
+ sudo apt-get install -y collectd
+ else
+ sudo yum update -y
+ sudo yum install -y epel-release
+ sudo yum install -y collectd
+ sudo yum install -y collectd-virt
+ fi
+ cd ~
+
+ echo "$0: Install VES collectd plugin"
+ git clone https://git.opnfv.org/barometer
+ sudo sed -i -- "s/v1/v3/" barometer/3rd_party/collectd-ves-plugin/ves_plugin/ves_plugin.py
+
+ sudo sed -i -- "s/FQDNLookup true/FQDNLookup false/" $conf
+ sudo sed -i -- "s/#LoadPlugin cpu/LoadPlugin cpu/" $conf
+ sudo sed -i -- "s/#LoadPlugin disk/LoadPlugin disk/" $conf
+ sudo sed -i -- "s/#LoadPlugin interface/LoadPlugin interface/" $conf
+ sudo sed -i -- "s/#LoadPlugin memory/LoadPlugin memory/" $conf
+
+ if [[ "$guest" == true ]]; then
+ cat <<EOF | sudo tee -a $conf
+<LoadPlugin python>
+ Globals true
+</LoadPlugin>
+<Plugin python>
+ ModulePath "/home/$USER/barometer/3rd_party/collectd-ves-plugin/ves_plugin/"
+ LogTraces true
+ Interactive false
+ Import "ves_plugin"
+<Module ves_plugin>
+ Domain "$collector_ip"
+ Port 30000
+ Path ""
+ Topic ""
+ UseHttps false
+ Username "hello"
+ Password "world"
+ FunctionalRole "Collectd VES Agent"
+ GuestRunning true
+</Module>
+</Plugin>
+<Plugin cpu>
+ ReportByCpu false
+ ValuesPercentage true
+</Plugin>
+LoadPlugin aggregation
+<Plugin aggregation>
+ <Aggregation>
+ Plugin "cpu"
+ Type "percent"
+ GroupBy "Host"
+ GroupBy "TypeInstance"
+ SetPlugin "cpu-aggregation"
+ CalculateAverage true
+ </Aggregation>
+</Plugin>
+LoadPlugin uuid
+EOF
+ else
+ cat <<EOF | sudo tee -a $conf
+<LoadPlugin python>
+ Globals true
+</LoadPlugin>
+<Plugin python>
+ ModulePath "/home/$USER/barometer/3rd_party/collectd-ves-plugin/ves_plugin/"
+ LogTraces true
+ Interactive false
+ Import "ves_plugin"
+<Module ves_plugin>
+ Domain "$collector_ip"
+ Port 30000
+ Path ""
+ Topic ""
+ UseHttps false
+ Username "hello"
+ Password "world"
+ FunctionalRole "Collectd VES Agent"
+ GuestRunning false
+</Module>
+</Plugin>
+LoadPlugin virt
+<Plugin virt>
+ Connection "qemu:///system"
+ RefreshInterval 60
+ HostnameFormat uuid
+</Plugin>
+<Plugin cpu>
+ ReportByCpu false
+ ValuesPercentage true
+</Plugin>
+LoadPlugin aggregation
+<Plugin aggregation>
+ <Aggregation>
+ Plugin "cpu"
+ Type "percent"
+ GroupBy "Host"
+ GroupBy "TypeInstance"
+ SetPlugin "cpu-aggregation"
+ CalculateAverage true
+ </Aggregation>
+</Plugin>
+LoadPlugin uuid
+EOF
+ fi
+ sudo service collectd restart
+}
+
+setup_agent () {
+ echo "$0: Install prerequisites"
+ sudo apt-get install -y gcc
+ # NOTE: force is required as some packages can't be authenticated...
+ sudo apt-get install -y --force-yes libcurl4-openssl-dev
+ sudo apt-get install -y make
+
+ echo "$0: Clone agent library"
+ cd /home/ubuntu
+ git clone https://github.com/att/evel-library.git
+
+ echo "$0: Clone VES repo"
+ git clone https://gerrit.opnfv.org/gerrit/ves
+
+ echo "$0: Use ves_onap_demo blueprint version of agent_demo.c"
+ cp ves/tests/blueprints/tosca-vnfd-onap-demo/evel_demo.c evel-library/code/evel_demo/evel_demo.c
+
+ echo "$0: Build evel_demo agent"
+ cd evel-library/bldjobs
+ make
+ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/home/ubuntu/evel-library/libs/x86_64
+
+ echo "$0: Start evel_demo agent"
+ id=$(cut -d ',' -f 3 /mnt/openstack/latest/meta_data.json | cut -d '"' -f 4)
+ nohup ../output/x86_64/evel_demo --id $id --fqdn $collector_ip --port 30000 --username $username --password $password -x > /dev/null 2>&1 &
+
+ echo "$0: Start collectd agent running in the VM"
+ setup_collectd true
+}
+
+setup_monitor () {
+ echo "$0: setup VES Monitor"
+ echo "$0: install dependencies"
+ # Note below: python (2.7) is required due to dependency on module 'ConfigParser'
+ sudo apt-get update
+ sudo apt-get upgrade -y
+ sudo apt-get install -y python python-jsonschema
+
+ echo "$0: setup VES Monitor config"
+ sudo mkdir /var/log/att
+ sudo chown ubuntu /var/log/att
+ touch /var/log/att/collector.log
+ sudo chown ubuntu /home/ubuntu/
+ cd /home/ubuntu/
+ git clone https://github.com/att/evel-test-collector.git
+ sed -i -- "s/vel_username = /vel_username = $username/" evel-test-collector/config/collector.conf
+ sed -i -- "s/vel_password = /vel_password = $password/" evel-test-collector/config/collector.conf
+ sed -i -- "s~vel_path = vendor_event_listener/~vel_path = ~" evel-test-collector/config/collector.conf
+ sed -i -- "s/vel_topic_name = example_vnf/vel_topic_name = /" evel-test-collector/config/collector.conf
+ sed -i -- "/vel_topic_name = /a vdu3_id = $vdu3_id" evel-test-collector/config/collector.conf
+ sed -i -- "/vel_topic_name = /a vdu2_id = $vdu2_id" evel-test-collector/config/collector.conf
+ sed -i -- "/vel_topic_name = /a vdu1_id = $vdu1_id" evel-test-collector/config/collector.conf
+
+ cp monitor.py evel-test-collector/code/collector/monitor.py
+ nohup python evel-test-collector/code/collector/monitor.py --config evel-test-collector/config/collector.conf --section default > /home/ubuntu/monitor.log
+}
+
+type=$1
+
+if [[ "$type" == "monitor" ]]; then
+ vdu1_id=$2
+ vdu2_id=$3
+ vdu3_id=$4
+ username=$5
+ password=$6
+else
+ vm_id=$2
+ collector_ip=$3
+ username=$4
+ password=$5
+fi
+
+setup_$type $1
+exit 0
diff --git a/tests/onap-demo/ves_onap_demo.sh b/tests/onap-demo/ves_onap_demo.sh
new file mode 100644
index 0000000..c4ee2e4
--- /dev/null
+++ b/tests/onap-demo/ves_onap_demo.sh
@@ -0,0 +1,599 @@
+#!/bin/bash
+# Copyright 2016-2017 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# What this is: Deployment test for the VES agent and collector based
+# upon the Tacker Hello World blueprint, designed as a manual demo of the VES
+# concept using ONAP VNFs and integrating with the Barometer project collectd
+# agent.
+# Typical demo procedure is to execute the following actions from the OPNFV
+# jumphost or some host wth access to the OpenStack controller
+# (see below for details):
+# setup: install Tacker in a Docker container. Note: only needs to be done
+# once per session and can be reused across OPNFV VES and Models tests,
+# i.e. you can start another test at the "start" step below.
+# start: install blueprint and start the VNF, including the app (load-balanced
+# web server) and VES agents running on the VMs. Installs the VES
+# monitor code but does not start the monitor (see below).
+# start_collectd: start the collectd daemon on bare metal hypervisor hosts
+# monitor: start the VES monitor, typically run in a second shell session.
+# pause: pause the app at one of the web server VDUs (VDU1 or VDU2)
+# stop: stop the VNF and uninstall the blueprint
+# start_collectd: start the collectd daemon on bare metal hypervisor hosts
+# clean: remove the tacker container and service (if desired, when done)
+#
+#
+# Note: if you want to try this on DevStack, your DevStack VM needs at minimum
+# 20 GB RAM and 20 GB hard drive.
+#
+#
+# Status: this is a work in progress, under test.
+#
+# How to use:
+# $ git clone https://gerrit.opnfv.org/gerrit/ves
+# $ cd ves/tests/onap-demo
+# $ bash ves_onap_demo.sh setup <openrc> [branch]
+# setup: setup test environment
+# <openrc>: location of OpenStack openrc file
+# branch: OpenStack branch of Tacker to install (default: master)
+# $ bash ves_onap_demo.sh start
+# start: install blueprint and run test
+# $ bash ves_onap_demo.sh start_collectd|stop_collectd <hpv_ip> <user> <mon_ip>
+# start_collectd: install and start collectd daemon on hypervisor
+# stop_collectd: stop and uninstall collectd daemon on hypervisor
+# <hpv_ip>: hypervisor ip
+# <user>: username on hypervisor hosts, for ssh (user must be setup for
+# key-based auth on the hosts)
+# <mon_ip>: IP address of VES monitor
+# $ bash ves_onap_demo.sh monitor <mon_ip>
+# monitor: attach to the collector VM and run the VES Monitor
+# <mon_ip>: IP address of VDU4 (monitor VM)
+# $ bash ves_onap_demo.sh traffic <ip>
+# traffic: generate some traffic
+# <ip>: address of server
+# $ bash ves_onap_demo.sh pause <ip>
+# pause: pause the VNF (web server) for a minute to generate a state change
+# <ip>: address of server
+# $ bash ves_onap_demo.sh stop
+# stop: stop test and uninstall blueprint
+# $ bash ves_onap_demo.sh clean <hpvuser> <hpvpw>
+# clean: cleanup after test
+# <hpvuser>: username on hypervisor
+# <hpvpw>: password on hypervisor
+
+trap 'fail' ERR
+
+pass() {
+ echo "$0: $(date) Hooray!"
+ end=`date +%s`
+ runtime=$((end-test_start))
+ echo "$0: $(date) Test Duration = $runtime seconds"
+ exit 0
+}
+
+fail() {
+ echo "$0: $(date) Test Failed!"
+ end=`date +%s`
+ runtime=$((end-test_start))
+ runtime=$((runtime/60))
+ echo "$0: $(date) Test Duration = $runtime seconds"
+ exit 1
+}
+
+assert() {
+ if [[ $2 == true ]]; then echo "$0 test assertion passed: $1"
+ else
+ echo "$0 test assertion failed: $1"
+ fail
+ fi
+}
+
+get_floating_net () {
+ echo "$0: $(date) get_floating_net start"
+ network_ids=($(neutron net-list | grep -v "+" | grep -v name | awk '{print $2}'))
+ for id in "${network_ids[@]}"; do
+ [[ $(neutron net-show ${id} | grep 'router:external' | grep -i "true") != "" ]] && FLOATING_NETWORK_ID=${id}
+ done
+ if [[ $FLOATING_NETWORK_ID ]]; then
+ FLOATING_NETWORK_NAME=$(neutron net-show $FLOATING_NETWORK_ID | awk "/ name / { print \$4 }")
+ echo "$0: $(date) floating network name is $FLOATING_NETWORK_NAME"
+ else
+ echo "$0: $(date) Floating network not found"
+ exit 1
+ fi
+ echo "$0: $(date) get_floating_net end"
+}
+
+try () {
+ count=$1
+ $3
+ while [[ $? == 1 && $count -gt 0 ]]; do
+ sleep $2
+ let count=$count-1
+ $3
+ done
+ if [[ $count -eq 0 ]]; then echo "$0: $(date) Command \"$3\" was not successful after $1 tries"; fi
+}
+
+setup () {
+ trap 'fail' ERR
+
+ echo "$0: $(date) Setup shared test folder /opt/tacker"
+ if [ -d /opt/tacker ]; then sudo rm -rf /opt/tacker; fi
+ sudo mkdir -p /opt/tacker
+ sudo chown $USER /opt/tacker
+ chmod 777 /opt/tacker/
+
+ echo "$0: $(date) copy test script and openrc to /opt/tacker"
+ cp $0 /opt/tacker/.
+ cp $1 /opt/tacker/admin-openrc.sh
+
+ source /opt/tacker/admin-openrc.sh
+ chmod 755 /opt/tacker/*.sh
+
+ echo "$0: $(date) tacker-setup part 1 fetching script from Models"
+ wget https://git.opnfv.org/models/plain/tests/utils/tacker-setup.sh -O /tmp/tacker-setup.sh
+ bash /tmp/tacker-setup.sh init
+ if [ $? -eq 1 ]; then fail; fi
+
+ echo "$0: $(date) tacker-setup part 2"
+ dist=`grep DISTRIB_ID /etc/*-release | awk -F '=' '{print $2}'`
+ if [ "$dist" == "Ubuntu" ]; then
+ echo "$0: $(date) Execute tacker-setup.sh in the container"
+ sudo docker exec -it tacker /bin/bash /opt/tacker/tacker-setup.sh setup $2
+ if [ $? -eq 1 ]; then fail; fi
+ else
+ echo "$0: $(date) Execute tacker-setup.sh in the container"
+ sudo docker exec -i -t tacker /bin/bash /opt/tacker/tacker-setup.sh setup $2
+ if [ $? -eq 1 ]; then fail; fi
+ fi
+
+ assert "ves-onap-demo-tacker-001 (Tacker installation in a Docker container on the jumphost)" true
+}
+
+say_hello() {
+ echo "$0: $(date) Testing $1"
+ pass=false
+ count=10
+ while [[ $count -gt 0 && $pass != true ]]
+ do
+ sleep 30
+ let count=$count-1
+ if [[ $(curl $1 | grep -c "Hello World") -gt 0 ]]; then
+ echo "$0: $(date) Hello World found at $1"
+ pass=true
+ fi
+ done
+ if [[ $pass != true ]]; then fail; fi
+}
+
+copy_blueprint() {
+ echo "$0: $(date) copy test script to /opt/tacker"
+ cp $0 /opt/tacker/.
+
+ echo "$0: $(date) reset blueprints folder"
+ if [[ -d /opt/tacker/blueprints/tosca-vnfd-onap-demo ]]; then
+ rm -rf /opt/tacker/blueprints/tosca-vnfd-onap-demo
+ fi
+
+ echo "$0: $(date) copy tosca-vnfd-onap-demo to blueprints folder"
+ if [[ ! -d /opt/tacker/blueprints ]]; then mkdir /opt/tacker/blueprints; fi
+ cp -r blueprints/tosca-vnfd-onap-demo /opt/tacker/blueprints/tosca-vnfd-onap-demo
+}
+
+start() {
+# Disable trap for now, need to test to ensure premature fail does not occur
+# trap 'fail' ERR
+
+ echo "$0: $(date) setup OpenStack CLI environment"
+ source /opt/tacker/admin-openrc.sh
+
+ echo "$0: $(date) create flavor to use in blueprint"
+ openstack flavor create onap.demo --id auto --ram 1024 --disk 3 --vcpus 1
+
+ echo "$0: $(date) Create Nova key pair"
+ if [[ -f /opt/tacker/onap-demo ]]; then rm /opt/tacker/onap-demo; fi
+ ssh-keygen -t rsa -N "" -f /opt/tacker/onap-demo -C ubuntu@onap-demo
+ chmod 600 /opt/tacker/onap-demo
+ openstack keypair create --public-key /opt/tacker/onap-demo.pub onap-demo
+ assert "onap-demo-nova-001 (Keypair creation)" true
+
+ echo "$0: $(date) Inject public key into blueprint"
+ pubkey=$(cat /opt/tacker/onap-demo.pub)
+ sed -i -- "s~<pubkey>~$pubkey~" /opt/tacker/blueprints/tosca-vnfd-onap-demo/blueprint.yaml
+
+ vdus="VDU1 VDU2 VDU3 VDU4"
+ vdui="1 2 3 4"
+ vnf_vdui="1 2 3"
+ declare -a vdu_id=()
+ declare -a vdu_ip=()
+ declare -a vdu_url=()
+
+ # Setup for workarounds
+ echo "$0: $(date) allocate floating IPs no loop no array assignment"
+ get_floating_net
+
+ # stack@us16-newton:~$ (openstack floating ip create public | awk "NR==7 { print \$4 }")
+ # 172.24.4.11
+ # stack@us16-newton:~$ (openstack floating ip create public | awk "/floating_ip_address/ { print \$4 }")
+ # 172.24.4.7
+
+ for i in $vdui; do
+ vdu_ip[$i]=$(nova floating-ip-create $FLOATING_NETWORK_NAME | awk "/$FLOATING_NETWORK_NAME/ { print \$4 }")
+ echo "$0: $(date) Pre-allocated ${vdu_ip[$i]} to VDU$i"
+ done
+
+
+ echo "$0: $(date) Inject web server floating IPs into LB code in blueprint"
+ sed -i -- "s/<vdu1_ip>/${vdu_ip[1]}/" /opt/tacker/blueprints/tosca-vnfd-onap-demo/blueprint.yaml
+ sed -i -- "s/<vdu2_ip>/${vdu_ip[1]}/" /opt/tacker/blueprints/tosca-vnfd-onap-demo/blueprint.yaml
+ # End setup for workarounds
+
+ echo "$0: $(date) create VNFD named onap-demo-vnfd"
+ cd /opt/tacker/blueprints/tosca-vnfd-onap-demo
+ # newton: NAME (was "--name") is now a positional parameter
+ tacker vnfd-create --vnfd-file blueprint.yaml onap-demo-vnfd
+ if [[ $? -eq 0 ]]; then
+ assert "onap-demo-tacker-002 (VNFD creation onap-demo-vnfd)" true
+ else
+ assert "onap-demo-tacker-002 (VNFD creation onap-demo-vnfd)" false
+ fi
+
+ echo "$0: $(date) create VNF named onap-demo-vnf"
+ # newton: NAME (was "--name") is now a positional parameter
+ tacker vnf-create --vnfd-name onap-demo-vnfd onap-demo-vnf
+ if [ $? -eq 1 ]; then fail; fi
+
+ echo "$0: $(date) wait for onap-demo-vnf to go ACTIVE"
+ active=""
+ count=24
+ while [[ -z $active && $count -gt 0 ]]
+ do
+ active=$(tacker vnf-show onap-demo-vnf | grep ACTIVE)
+ if [[ $(tacker vnf-show onap-demo-vnf | grep -c ERROR) -gt 0 ]]; then
+ echo "$0: $(date) onap-demo-vnf VNF creation failed with state ERROR"
+ assert "onap-demo-tacker-002 (onap-demo-vnf creation)" false
+ fi
+ let count=$count-1
+ sleep 30
+ echo "$0: $(date) wait for onap-demo-vnf to go ACTIVE"
+ done
+ if [[ $count == 0 ]]; then
+ echo "$0: $(date) onap-demo-vnf VNF creation failed - timed out"
+ assert "onap-demo-tacker-002 (VNF creation)" false
+ fi
+
+ # Setup for workarounds
+ echo "$0: $(date) directly set port security on ports (unsupported in Mitaka Tacker)"
+ # Alternate method
+ # HEAT_ID=$(tacker vnf-show onap-demo-vnfd | awk "/instance_id/ { print \$4 }")
+ # SERVER_ID=$(openstack stack resource list $HEAT_ID | awk "/VDU1 / { print \$4 }")
+ for vdu in $vdus; do
+ echo "$0: $(date) Setting port security on $vdu"
+ SERVER_ID=$(openstack server list | awk "/$vdu/ { print \$2 }")
+ id=($(neutron port-list -F id -f value))
+ for id in "${id[@]}"; do
+ if [[ $(neutron port-show $id | grep $SERVER_ID) ]]; then neutron port-update ${id} --port-security-enabled=True; fi
+ done
+ done
+
+ echo "$0: $(date) directly assign security group (unsupported in Mitaka Tacker)"
+ if [[ $(neutron security-group-list | awk "/ onap-demo / { print \$2 }") ]]; then neutron security-group-delete onap-demo; fi
+ neutron security-group-create onap-demo
+ neutron security-group-rule-create --direction ingress --protocol TCP --port-range-min 22 --port-range-max 22 onap-demo
+ neutron security-group-rule-create --direction ingress --protocol TCP --port-range-min 80 --port-range-max 80 onap-demo
+ neutron security-group-rule-create --direction ingress --protocol TCP --port-range-min 30000 --port-range-max 30000 onap-demo
+ for i in $vdui; do
+ vdu_id[$i]=$(openstack server list | awk "/VDU$i/ { print \$2 }")
+ echo "$0: $(date) Assigning security groups to VDU$i (${vdu_id[$i]})"
+ openstack server add security group ${vdu_id[$i]} onap-demo
+ openstack server add security group ${vdu_id[$i]} default
+ done
+
+ echo "$0: $(date) associate floating IPs"
+ # openstack server add floating ip INSTANCE_NAME_OR_ID FLOATING_IP_ADDRESS
+ for i in $vdui; do
+ openstack server add floating ip ${vdu_id[$i]} ${vdu_ip[$i]}
+ done
+
+ echo "$0: $(date) get web server addresses"
+ vdu_url[1]="http://${vdu_ip[1]}"
+ vdu_url[2]="http://${vdu_ip[2]}"
+ vdu_url[3]="http://${vdu_ip[3]}"
+ vdu_url[4]="http://${vdu_ip[4]}:30000/eventListener/v3"
+
+ apt-get install -y curl
+
+ echo "$0: $(date) wait for VNF web service to be ready"
+ count=0
+ resp=$(curl http://${vdu_ip[1]})
+ echo $resp
+ while [[ $count -gt 10 && "$resp" == "" ]]; do
+ echo "$0: $(date) waiting for HTTP response from LB"
+ sleep 60
+ let count=$count+1
+ resp=$(curl http://${vdu_ip[3]})
+ echo $resp
+ done
+
+ echo "$0: $(date) verify onap-demo server is running at each web server and via the LB"
+ say_hello http://${vdu_ip[1]}
+ say_hello http://${vdu_ip[2]}
+ say_hello http://${vdu_ip[3]}
+
+ assert "onap-demo-onap-demo-vnf-001 (onap-demo VNF creation)" true
+ assert "onap-demo-tacker-003 (VNF creation)" true
+ assert "onap-demo-tacker-vnfd-002 (artifacts creation)" true
+ assert "onap-demo-tacker-vnfd-003 (user_data creation)" true
+
+ echo "$0: $(date) setup Monitor in VDU4 at ${vdu_ip[4]}"
+ scp -i /opt/tacker/onap-demo -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no /opt/tacker/blueprints/tosca-vnfd-onap-demo/start.sh ubuntu@${vdu_ip[4]}:/home/ubuntu/start.sh
+ scp -i /opt/tacker/onap-demo -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no /opt/tacker/blueprints/tosca-vnfd-onap-demo/monitor.py ubuntu@${vdu_ip[4]}:/home/ubuntu/monitor.py
+ ssh -i /opt/tacker/onap-demo -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@${vdu_ip[4]} "nohup bash /home/ubuntu/start.sh monitor ${vdu_id[1]} ${vdu_id[2]} ${vdu_id[3]} hello world > /dev/null 2>&1 &"
+
+ echo "$0: $(date) Execute agent startup script in the VNF VMs"
+ for i in $vnf_vdui; do
+ ssh -i /opt/tacker/onap-demo -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@${vdu_ip[$i]} "sudo chown ubuntu /home/ubuntu"
+ scp -i /opt/tacker/onap-demo -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no /opt/tacker/blueprints/tosca-vnfd-onap-demo/start.sh ubuntu@${vdu_ip[$i]}:/home/ubuntu/start.sh
+ ssh -i /opt/tacker/onap-demo -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ ubuntu@${vdu_ip[$i]} "nohup bash /home/ubuntu/start.sh agent ${vdu_id[$i]} ${vdu_ip[4]} hello world > /dev/null 2>&1 &"
+ done
+
+ echo "$0: $(date) Startup complete. VDU addresses:"
+ echo "web server 1: ${vdu_ip[1]}"
+ echo "web server 2: ${vdu_ip[2]}"
+ echo "load balancer: ${vdu_ip[3]}"
+ echo "monitor : ${vdu_ip[4]}"
+}
+
+stop() {
+ trap 'fail' ERR
+
+ echo "$0: $(date) setup OpenStack CLI environment"
+ source /opt/tacker/admin-openrc.sh
+
+ if [[ "$(tacker vnf-list | grep onap-demo-vnf | awk '{print $2}')" != '' ]]; then
+ echo "$0: $(date) uninstall onap-demo-vnf blueprint via CLI"
+ try 12 10 "tacker vnf-delete onap-demo-vnf"
+ # It can take some time to delete a VNF - thus wait 2 minutes
+ count=12
+ while [[ $count -gt 0 && "$(tacker vnf-list | grep onap-demo-vnfd | awk '{print $2}')" != '' ]]; do
+ echo "$0: $(date) waiting for onap-demo-vnf VNF delete to complete"
+ sleep 10
+ let count=$count-1
+ done
+ if [[ "$(tacker vnf-list | grep onap-demo-vnf | awk '{print $2}')" == '' ]]; then
+ assert "onap-demo-tacker-004 (VNF onap-demo-vnf deletion)" true
+ else
+ assert "onap-demo-tacker-004 (VNF onap-demo-vnf deletion)" false
+ fi
+ fi
+
+ # It can take some time to delete a VNFD - thus wait 2 minutes
+ if [[ "$(tacker vnfd-list | grep onap-demo-vnfd | awk '{print $2}')" != '' ]]; then
+ echo "$0: $(date) trying to delete the onap-demo-vnfd VNFD"
+ try 12 10 "tacker vnfd-delete onap-demo-vnfd"
+ if [[ "$(tacker vnfd-list | grep onap-demo-vnfd | awk '{print $2}')" == '' ]]; then
+ assert "onap-demo-tacker-005 (VNFD deletion onap-demo-vnfd)" true
+ else
+ assert "onap-demo-tacker-005 (VNFD deletion onap-demo-vnfd)" false
+ fi
+ fi
+
+# This part will apply for tests that dynamically create the VDU base image
+# iid=($(openstack image list|grep VNFImage|awk '{print $2}')); for id in ${iid[@]}; do openstack image delete ${id}; done
+# if [[ "$(openstack image list|grep VNFImage|awk '{print $2}')" == '' ]]; then
+# assert "models-tacker-vnfd-004 (artifacts deletion)" true
+# else
+# assert "models-tacker-vnfd-004 (artifacts deletion)" false
+# fi
+
+ # Cleanup for workarounds
+ fip=($(neutron floatingip-list | grep -v "+" | grep -v id | awk '{print $2}')); for id in "${fip[@]}"; do neutron floatingip-delete ${id}; done
+ sg=($(openstack security group list | grep onap-demo |awk '{print $2}'))
+ for id in "${sg[@]}"; do try 5 5 "openstack security group delete ${id}"; done
+ kid=($(openstack keypair list | grep onap-demo | awk '{print $2}')); for id in "${kid[@]}"; do openstack keypair delete ${id}; done
+
+ openstack flavor delete onap.demo
+}
+
+start_collectd() {
+ # NOTE: ensure hypervisor hostname is resolvable e.g. thru /etc/hosts
+ echo "$0: $(date) update start.sh script in case it changed"
+ cp -r blueprints/tosca-vnfd-onap-demo/start.sh /opt/tacker/blueprints/tosca-vnfd-onap-demo
+ echo "$0: $(date) start collectd agent on bare metal hypervisor host"
+ scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no /opt/tacker/blueprints/tosca-vnfd-onap-demo/start.sh $2@$1:/home/$2/start.sh
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $2@$1 \
+ "nohup bash /home/$2/start.sh collectd $1 $3 hello world > /dev/null 2>&1 &"
+}
+
+stop_collectd() {
+ echo "$0: $(date) remove collectd agent on bare metal hypervisor hosts"
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $2@$1 <<'EOF'
+dist=`grep DISTRIB_ID /etc/*-release | awk -F '=' '{print $2}'`
+if [ "$dist" == "Ubuntu" ]; then
+ sudo service collectd stop
+ sudo apt-get remove -y collectd
+else
+ sudo service collectd stop
+ sudo yum remove -y collectd collectd-virt
+fi
+rm -rf $HOME/barometer
+EOF
+}
+
+#
+# Test tools and scenarios
+#
+
+get_vdu_ip () {
+ source /opt/tacker/admin-openrc.sh
+
+ echo "$0: $(date) find VM IP for $1"
+ ip=$(openstack server list | awk "/$1/ { print \$10 }")
+}
+
+monitor () {
+ echo "$0: $(date) Start the VES Monitor in VDU4 - Stop first if running"
+ sudo ssh -t -t -i /opt/tacker/onap-demo -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$1 << 'EOF'
+sudo kill $(ps -ef | grep evel-test-collector | awk '{print $2}')
+nohup python evel-test-collector/code/collector/monitor.py --config evel-test-collector/config/collector.conf --section default > /home/ubuntu/monitor.log &
+tail -f monitor.log
+EOF
+}
+
+traffic () {
+ echo "$0: $(date) Generate some traffic, somewhat randomly"
+ ns="0 00 000"
+ while true
+ do
+ for n in $ns; do
+ sleep .$n$[ ( $RANDOM % 10 ) + 1 ]s
+ curl -s http://$1 > /dev/null
+ done
+ done
+}
+
+pause () {
+ echo "$0: $(date) Pause the VNF (web server) in $1 for 30 seconds to generate a state change fault report (Stopped)"
+ $1
+ sudo ssh -t -t -i /opt/tacker/onap-demo -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$1 "sudo docker pause onap-demo"
+ sleep 20
+ echo "$0: $(date) Unpausing the VNF to generate a state change fault report (Started)"
+ sudo ssh -t -t -i /opt/tacker/onap-demo -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$1 "sudo docker unpause onap-demo"
+}
+
+forward_to_container () {
+ echo "$0: $(date) pass $1 command to ves_onap_demo in tacker container"
+ sudo docker exec tacker /bin/bash /opt/tacker/ves_onap_demo.sh $1
+ if [ $? -eq 1 ]; then fail; fi
+}
+
+dist=`grep DISTRIB_ID /etc/*-release | awk -F '=' '{print $2}'`
+case "$1" in
+ setup)
+ setup $2 $3
+ if [ $? -eq 1 ]; then fail; fi
+ pass
+ ;;
+ run)
+ setup $2 $3
+ copy_blueprint
+ forward_to_container start
+ if [ $? -eq 1 ]; then fail; fi
+ pass
+ ;;
+ start)
+ if [[ -f /.dockerenv ]]; then
+ start
+ else
+ copy_blueprint
+ forward_to_container start
+ fi
+ pass
+ ;;
+ start_collectd)
+ start_collectd $2 $3 $4
+ if [ $? -eq 1 ]; then fail; fi
+ pass
+ ;;
+ stop_collectd)
+ stop_collectd $2 $3
+ if [ $? -eq 1 ]; then fail; fi
+ pass
+ ;;
+ monitor)
+ monitor $2
+ if [ $? -eq 1 ]; then fail; fi
+ pass
+ ;;
+ traffic)
+ traffic $2
+ pass
+ ;;
+ pause)
+ pause $2
+ ;;
+ stop)
+ if [[ -f /.dockerenv ]]; then
+ stop
+ else
+ forward_to_container stop
+ fi
+ if [ $? -eq 1 ]; then fail; fi
+ pass
+ ;;
+ clean)
+ echo "$0: $(date) Uninstall Tacker and test environment"
+ sudo docker exec -it tacker /bin/bash /opt/tacker/tacker-setup.sh clean
+ sudo docker stop tacker
+ sudo docker rm -v tacker
+ sudo rm -rf /opt/tacker
+ pass
+ ;;
+ *)
+ cat <<EOF
+ What this is: Deployment test for the VES agent and collector based
+ upon the Tacker Hello World blueprint, designed as a manual demo of the VES
+ concept and integration with the Barometer project collectd agent. Typical
+ demo procedure is to execute the following actions from the OPNFV jumphost
+ or some host wth access to the OpenStack controller (see below for details):
+ setup: install Tacker in a docker container. Note: only needs to be done
+ once per session, and can be reused across OPNFV VES and Models tests,
+ i.e. you can start another test at the "start" step below.
+ start: install blueprint and start the VNF, including the app (load-balanced
+ web server) and VES agents running on the VMs. Installs the VES
+ monitor code but does not start the monitor (see below).
+ start_collectd: start the collectd daemon on bare metal hypervisor hosts
+ monitor: start the VES monitor, typically run in a second shell session.
+ pause: pause the app at one of the web server VDUs (VDU1 or VDU2)
+ stop: stop the VNF and uninstall the blueprint
+ start_collectd: start the collectd daemon on bare metal hypervisor hosts
+ clean: remove the tacker container and service (if desired, when done)
+
+ How to use:
+ $ git clone https://gerrit.opnfv.org/gerrit/ves
+ $ cd ves/tests
+ $ bash ves_onap_demo.sh <setup> <openrc> [branch]
+ setup: setup test environment
+ <openrc>: location of OpenStack openrc file
+ branch: OpenStack branch to install (default: master)
+ $ bash ves_onap_demo.sh start
+ start: install blueprint and run test
+ <user>: username on hypervisor hosts, for ssh (user must be setup for
+ key-based auth on the hosts)
+ $ bash ves_onap_demo.sh start_collectd|stop_collectd <hpv_ip> <user> <mon_ip>
+ start_collectd: install and start collectd daemon on hypervisor
+ stop_collectd: stop and uninstall collectd daemon on hypervisor
+ <hpv_ip>: hypervisor ip
+ <user>: username on hypervisor hosts, for ssh (user must be setup for
+ key-based auth on the hosts)
+ <mon_ip>: IP address of VES monitor
+ $ bash ves_onap_demo.sh monitor <mon_ip>
+ monitor: attach to the collector VM and run the VES Monitor
+ <mon_ip>: IP address of VDU4 (monitor VM)
+ $ bash ves_onap_demo.sh traffic <ip>
+ traffic: generate some traffic
+ <ip>: address of server
+ $ bash ves_onap_demo.sh pause <ip>
+ pause: pause the VNF (web server) for a minute to generate a state change
+ <ip>: address of server
+ $ bash ves_onap_demo.sh stop
+ stop: stop test and uninstall blueprint
+ $ bash ves_onap_demo.sh clean <user>
+ clean: cleanup after test
+ <user>: username on hypervisor hosts, for ssh (user must be setup for
+ key-based auth on the hosts)
+EOF
+esac