From d18cb7f951829c18834e3e8905f810cbe97b538a Mon Sep 17 00:00:00 2001 From: Bryan Sullivan Date: Sun, 26 Nov 2017 20:45:21 -0800 Subject: Factor out agent and collector container build JIRA: VES-2 Change-Id: Idd9655b3233937edeea00a38ef983026e1486284 Signed-off-by: Bryan Sullivan --- .gitignore | 1 - build/ves-agent.sh | 45 +++ build/ves-agent/Dockerfile | 59 +++ build/ves-agent/start.sh | 44 +++ build/ves-collector.sh | 47 +++ build/ves-collector/Dockerfile | 41 +++ build/ves-collector/monitor.py | 819 +++++++++++++++++++++++++++++++++++++++++ build/ves-collector/start.sh | 40 ++ tools/demo_deploy.sh | 110 ++++-- tools/monitor.py | 819 ----------------------------------------- tools/ves-agent/Dockerfile | 59 --- tools/ves-agent/start.sh | 43 --- tools/ves-setup.sh | 365 ++++++++++-------- 13 files changed, 1376 insertions(+), 1116 deletions(-) create mode 100644 build/ves-agent.sh create mode 100644 build/ves-agent/Dockerfile create mode 100644 build/ves-agent/start.sh create mode 100644 build/ves-collector.sh create mode 100644 build/ves-collector/Dockerfile create mode 100644 build/ves-collector/monitor.py create mode 100644 build/ves-collector/start.sh delete mode 100644 tools/monitor.py delete mode 100644 tools/ves-agent/Dockerfile delete mode 100644 tools/ves-agent/start.sh diff --git a/.gitignore b/.gitignore index 2f5ddf9..33a0451 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,5 @@ *~ .*.sw? -/build/ /docs_build/ /docs_output/ /releng/ diff --git a/build/ves-agent.sh b/build/ves-agent.sh new file mode 100644 index 0000000..b3d3616 --- /dev/null +++ b/build/ves-agent.sh @@ -0,0 +1,45 @@ +#!/bin/bash +# Copyright 2017 AT&T Intellectual Property, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +#. What this is: Build script for the VES Agent docker image on Ubuntu. +#. +#. Usage: +#. bash ves-agent.sh +#. hub-user: username for dockerhub +#. hub-pass: password for dockerhub +#. +#. Status: this is a work in progress, under test. + +echo; echo "$0 $(date): Update package repos" +sudo apt-get update + +echo; echo "$0 $(date): Starting VES agent build process" +if [[ -d /tmp/ves ]]; then rm -rf /tmp/ves; fi + +echo; echo "$0 $(date): Cloning VES repo to /tmp/ves" +git clone https://gerrit.opnfv.org/gerrit/ves /tmp/ves + +echo; echo "$0 $(date): Building the image" +cd /tmp/ves/build/ves-agent +sudo docker build -t ves-agent . +sudo docker login -u $1 -p $2 + +echo; echo "$0 $(date): Tagging the image" +id=$(sudo docker images | grep ves-agent | awk '{print $3}') +id=$(echo $id | cut -d ' ' -f 1) +sudo docker tag $id $1/ves-agent:latest + +echo; echo "$0 $(date): Pushing the image to dockerhub as $1/ves-agent" +sudo docker push $1/ves-agent diff --git a/build/ves-agent/Dockerfile b/build/ves-agent/Dockerfile new file mode 100644 index 0000000..4c37197 --- /dev/null +++ b/build/ves-agent/Dockerfile @@ -0,0 +1,59 @@ +# Copyright 2017 AT&T Intellectual Property, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# What this is: A Dockerfile for building an OPFNV VES Agent container image. +# +# Status: this is a work in progress, under test. +# + +FROM ubuntu:xenial + +MAINTAINER Bryan Sullivan + +RUN apt-get update +RUN apt-get install -y apt-utils +RUN apt-get -y upgrade +RUN apt-get install -y git +# Required for kafka +RUN apt-get install -y default-jre +RUN apt-get install -y zookeeperd +RUN apt-get install -y python-pip +RUN pip install kafka-python +# Required for building collectd +RUN apt-get install -y pkg-config + +RUN mkdir /opt/ves + +# Build Kafka client +RUN apt-get install -y build-essential +RUN apt-get install -y libpthread-stubs0-dev +RUN apt-get install -y libssl-dev +RUN apt-get install -y libsasl2-dev +RUN apt-get install -y liblz4-dev +RUN /bin/bash -c 'git clone --branch v0.9.5 \ +https://github.com/edenhill/librdkafka.git /opt/ves/librdkafka; \ +cd /opt/ves/librdkafka; ./configure --prefix=/usr; \ +make; make install' + +# Install VES Agent +RUN pip install pyaml + +RUN git clone https://gerrit.opnfv.org/gerrit/barometer /opt/ves/barometer +# Test patch +RUN /bin/bash -c 'cd /opt/ves/barometer; \ +git fetch https://gerrit.opnfv.org/gerrit/barometer \ +refs/changes/27/47427/1 && git checkout FETCH_HEAD' + +COPY start.sh /opt/ves/start.sh +ENTRYPOINT ["/bin/bash", "/opt/ves/start.sh"] diff --git a/build/ves-agent/start.sh b/build/ves-agent/start.sh new file mode 100644 index 0000000..0476316 --- /dev/null +++ b/build/ves-agent/start.sh @@ -0,0 +1,44 @@ +#!/bin/bash +# Copyright 2017 AT&T Intellectual Property, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +#. What this is: Startup script for the OPNFV VES Agent running under docker. + +echo "$ves_kafka_host $ves_kafka_hostname" >>/etc/hosts +echo "ves_kafka_hostname=$ves_kafka_hostname" +echo "*** /etc/hosts ***" +cat /etc/hosts + +cd /opt/ves/barometer/3rd_party/collectd-ves-app/ves_app +cat <ves_app_config.conf +[config] +Domain = $ves_host +Port = $ves_port +Path = $ves_path +Topic = $ves_topic +UseHttps = $ves_https +Username = $ves_user +Password = $ves_pass +SendEventInterval = $ves_interval +ApiVersion = $ves_version +KafkaPort = $ves_kafka_port +KafkaBroker = $ves_kafka_host +EOF + +cat ves_app_config.conf +echo "ves_mode=$ves_mode" +python ves_app.py --events-schema=$ves_mode.yaml --loglevel DEBUG \ + --config=ves_app_config.conf +echo "*** ves_app.log ***" +cat ves_app.log diff --git a/build/ves-collector.sh b/build/ves-collector.sh new file mode 100644 index 0000000..4b33f24 --- /dev/null +++ b/build/ves-collector.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# Copyright 2017 AT&T Intellectual Property, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +#. What this is: Build script for the VES Collector docker image on Ubuntu. +#. +#. Prerequisites: +#. Docker hub user logged in e.g. via "sudo docker login" +#. +#. Usage: +#. bash ves-collector.sh +#. hub-user: username for dockerhub +#. hub-pass: password for dockerhub +#. +#. Status: this is a work in progress, under test. + +echo; echo "$0 $(date): Update package repos" +sudo apt-get update + +echo; echo "$0 $(date): Starting VES collector build process" +if [[ -d /tmp/ves ]]; then rm -rf /tmp/ves; fi + +echo; echo "$0 $(date): Cloning VES repo to /tmp/ves" +git clone https://gerrit.opnfv.org/gerrit/ves /tmp/ves + +echo; echo "$0 $(date): Building the image" +cd /tmp/ves/build/ves-collector +sudo docker build -t ves-collector . + +echo; echo "$0 $(date): Tagging the image" +id=$(sudo docker images | grep ves-collector | awk '{print $3}') +id=$(echo $id | cut -d ' ' -f 1) +sudo docker tag $id $1/ves-collector:latest + +echo; echo "$0 $(date): Pushing the image to dockerhub as $1/ves-collector" +sudo docker push $1/ves-collector diff --git a/build/ves-collector/Dockerfile b/build/ves-collector/Dockerfile new file mode 100644 index 0000000..9161871 --- /dev/null +++ b/build/ves-collector/Dockerfile @@ -0,0 +1,41 @@ +# Copyright 2017 AT&T Intellectual Property, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# What this is: A Dockerfile for building an OPFNV VES Agent container image. +# +# Status: this is a work in progress, under test. +# + +FROM ubuntu:xenial + +MAINTAINER Bryan Sullivan + +RUN apt-get update +RUN apt-get install -y apt-utils +RUN apt-get -y upgrade +RUN apt-get update +RUN apt-get install -y git + +# Required for VES collector +RUN apt-get install -y python-pip python-jsonschema +RUN pip install requests + +RUN mkdir /opt/ves + +# Clone VES Collector +RUN git clone https://github.com/att/evel-test-collector.git /opt/ves/evel-test-collector + +COPY monitor.py /opt/ves/evel-test-collector/code/collector/monitor.py +COPY start.sh /opt/ves/start.sh +ENTRYPOINT ["/bin/bash", "/opt/ves/start.sh"] diff --git a/build/ves-collector/monitor.py b/build/ves-collector/monitor.py new file mode 100644 index 0000000..b5cc697 --- /dev/null +++ b/build/ves-collector/monitor.py @@ -0,0 +1,819 @@ +#!/usr/bin/env python +# +# Copyright 2016-2017 AT&T Intellectual Property, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# What this is: Monitor and closed-loop policy agent as part of the OPNFV VES +# ves_onap_demo. +# +# Status: this is a work in progress, under test. + +from rest_dispatcher import PathDispatcher, set_404_content +from wsgiref.simple_server import make_server +import sys +import os +import platform +import traceback +import time +from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter +import ConfigParser +import logging.handlers +from base64 import b64decode +import string +import json +import jsonschema +from functools import partial +import requests + +monitor_mode = "f" +vdu_id = ['','','','','',''] +summary_e = ['***** Summary of key stats *****','','',''] +summary_c = ['Collectd agents:'] +status = ['','Started','Started','Started'] +base_url = '' +template_404 = b'''POST {0}''' +columns = 0 +rows = 0 + +class JSONObject: + def __init__(self, d): + self.__dict__ = d + +__all__ = [] +__version__ = 0.1 +__date__ = '2015-12-04' +__updated__ = '2015-12-04' + +TESTRUN = False +DEBUG = False +PROFILE = False + +#------------------------------------------------------------------------------ +# Address of influxdb server. +#------------------------------------------------------------------------------ + +influxdb = '127.0.0.1' + +#------------------------------------------------------------------------------ +# Credentials we expect clients to authenticate themselves with. +#------------------------------------------------------------------------------ +vel_username = '' +vel_password = '' + +#------------------------------------------------------------------------------ +# The JSON schema which we will use to validate events. +#------------------------------------------------------------------------------ +vel_schema = None + +#------------------------------------------------------------------------------ +# The JSON schema which we will use to validate client throttle state. +#------------------------------------------------------------------------------ +throttle_schema = None + +#------------------------------------------------------------------------------ +# The JSON schema which we will use to provoke throttling commands for testing. +#------------------------------------------------------------------------------ +test_control_schema = None + +#------------------------------------------------------------------------------ +# Pending command list from the testControl API +# This is sent as a response commandList to the next received event. +#------------------------------------------------------------------------------ +pending_command_list = None + +#------------------------------------------------------------------------------ +# Logger for this module. +#------------------------------------------------------------------------------ +logger = None + +def listener(environ, start_response, schema): + ''' + Handler for the Vendor Event Listener REST API. + + Extract headers and the body and check that: + + 1) The client authenticated themselves correctly. + 2) The body validates against the provided schema for the API. + + ''' + logger.info('Got a Vendor Event request') + print('==== ' + time.asctime() + ' ' + '=' * 49) + + #-------------------------------------------------------------------------- + # Extract the content from the request. + #-------------------------------------------------------------------------- + length = int(environ.get('CONTENT_LENGTH', '0')) + logger.debug('Content Length: {0}'.format(length)) + body = environ['wsgi.input'].read(length) + logger.debug('Content Body: {0}'.format(body)) + + mode, b64_credentials = string.split(environ.get('HTTP_AUTHORIZATION', + 'None None')) + # logger.debug('Auth. Mode: {0} Credentials: {1}'.format(mode, + # b64_credentials)) + logger.debug('Auth. Mode: {0} Credentials: ****'.format(mode)) + if (b64_credentials != 'None'): + credentials = b64decode(b64_credentials) + else: + credentials = None + + # logger.debug('Credentials: {0}'.format(credentials)) + logger.debug('Credentials: ****') + + #-------------------------------------------------------------------------- + # If we have a schema file then check that the event matches that expected. + #-------------------------------------------------------------------------- + if (schema is not None): + logger.debug('Attempting to validate data: {0}\n' + 'Against schema: {1}'.format(body, schema)) + try: + decoded_body = json.loads(body) + jsonschema.validate(decoded_body, schema) + logger.info('Event is valid!') + print('Valid body decoded & checked against schema OK:\n' + '{0}'.format(json.dumps(decoded_body, + sort_keys=True, + indent=4, + separators=(',', ': ')))) + + except jsonschema.SchemaError as e: + logger.error('Schema is not valid! {0}'.format(e)) + print('Schema is not valid! {0}'.format(e)) + + except jsonschema.ValidationError as e: + logger.warn('Event is not valid against schema! {0}'.format(e)) + print('Event is not valid against schema! {0}'.format(e)) + print('Bad JSON body decoded:\n' + '{0}'.format(json.dumps(decoded_body, + sort_keys=True, + indent=4, + separators=(',', ': ')))) + + except Exception as e: + logger.error('Event invalid for unexpected reason! {0}'.format(e)) + print('Schema is not valid for unexpected reason! {0}'.format(e)) + else: + logger.debug('No schema so just decode JSON: {0}'.format(body)) + try: + decoded_body = json.loads(body) + print('Valid JSON body (no schema checking) decoded:\n' + '{0}'.format(json.dumps(decoded_body, + sort_keys=True, + indent=4, + separators=(',', ': ')))) + logger.info('Event is valid JSON but not checked against schema!') + + except Exception as e: + logger.error('Event invalid for unexpected reason! {0}'.format(e)) + print('JSON body not valid for unexpected reason! {0}'.format(e)) + + #-------------------------------------------------------------------------- + # See whether the user authenticated themselves correctly. + #-------------------------------------------------------------------------- + if (credentials == (vel_username + ':' + vel_password)): + logger.debug('Authenticated OK') +# print('Authenticated OK') + + #---------------------------------------------------------------------- + # Respond to the caller. If we have a pending commandList from the + # testControl API, send it in response. + #---------------------------------------------------------------------- + global pending_command_list + if pending_command_list is not None: + start_response('202 Accepted', + [('Content-type', 'application/json')]) + response = pending_command_list + pending_command_list = None + + print('\n'+ '='*80) + print('Sending pending commandList in the response:\n' + '{0}'.format(json.dumps(response, + sort_keys=True, + indent=4, + separators=(',', ': ')))) + print('='*80 + '\n') + yield json.dumps(response) + else: + start_response('202 Accepted', []) + yield '' + else: + logger.warn('Failed to authenticate OK; creds: ' + credentials) + print('Failed to authenticate agent credentials: ', credentials, + 'against expected ', vel_username, ':', vel_password) + + #---------------------------------------------------------------------- + # Respond to the caller. + #---------------------------------------------------------------------- + start_response('401 Unauthorized', [ ('Content-type', + 'application/json')]) + req_error = { 'requestError': { + 'policyException': { + 'messageId': 'POL0001', + 'text': 'Failed to authenticate' + } + } + } + yield json.dumps(req_error) + + save_event(body) + +#-------------------------------------------------------------------------- +# Send event to influxdb +#-------------------------------------------------------------------------- +def send_to_influxdb(event,pdata): + url = 'http://{}/write?db=veseventsdb'.format(influxdb) + print('Send {} to influxdb at {}: {}'.format(event,influxdb,pdata)) + r = requests.post(url, data=pdata, headers={'Content-Type': 'text/plain'}) + print('influxdb return code {}'.format(r.status_code)) + if r.status_code != 204: + print('*** Influxdb save failed, return code {} ***'.format(r.status_code)) + +#-------------------------------------------------------------------------- +# Save event data +#-------------------------------------------------------------------------- +def save_event(body): + jobj = json.loads(body) + e = json.loads(body, object_hook=JSONObject) + + domain = jobj['event']['commonEventHeader']['domain'] + timestamp = jobj['event']['commonEventHeader']['lastEpochMicrosec'] + agent = jobj['event']['commonEventHeader']['reportingEntityName'].upper( ) + if "LOCALHOST" in agent: + agent = "computehost" + source = jobj['event']['commonEventHeader']['sourceId'].upper( ) + + if e.event.commonEventHeader.domain == "heartbeat": + print('Found Heartbeat') + send_to_influxdb(event,'heartbeat,system={},sequence={}'.format(agent,e.event.commonEventHeader.sequence)) + + if 'measurementsForVfScalingFields' in jobj['event']: + print('Found measurementsForVfScalingFields') + +# "measurementsForVfScalingFields": { +# "additionalMeasurements": [ +# { +# "arrayOfFields": [ +# { +# "name": "load-longterm", +# "value": "0.34" +# }, +# { +# "name": "load-shortterm", +# "value": "0.32" +# }, +# { +# "name": "load-midterm", +# "value": "0.34" +# } +# ], +# "name": "load" +# } +# ], + + if 'additionalMeasurements' in jobj['event']['measurementsForVfScalingFields']: + for meas in jobj['event']['measurementsForVfScalingFields']['additionalMeasurements']: + name = meas['name'] + pdata = '{},system={}'.format(name,source) + for field in meas['arrayOfFields']: + pdata = pdata + ",{}={}".format(field['name'],field['value']) + i=pdata.find(',', pdata.find('system')) + pdata = pdata[:i] + ' ' + pdata[i+1:] + send_to_influxdb("systemLoad", pdata) + +# "cpuUsageArray": [ +# { +# "cpuIdentifier": "15", +# "cpuIdle": 99.8998998999, +# "cpuUsageInterrupt": 0, +# "cpuUsageNice": 0, +# "cpuUsageSoftIrq": 0, +# "cpuUsageSteal": 0, +# "cpuUsageSystem": 0, +# "cpuUsageUser": 0.1001001001, +# "cpuWait": 0, +# "percentUsage": 0.0 +# }, + + if 'cpuUsageArray' in jobj['event']['measurementsForVfScalingFields']: + print('Found diskUsageArray') + for disk in jobj['event']['measurementsForVfScalingFields']['cpuUsageArray']: + id=disk['cpuIdentifier'] + pdata = 'cpuUsage,system={},cpu={}'.format(source,id) + d = disk.items() + for key,val in d: + if key != 'cpuIdentifier': + pdata = pdata + ',{}={}'.format(key,val) + i=pdata.find(',', pdata.find('cpu=')) + pdata = pdata[:i] + ' ' + pdata[i+1:] + send_to_influxdb("cpuUsage", pdata) + +# "diskUsageArray": [ +# { +# "diskIdentifier": "sda", +# "diskIoTimeLast": 0.3996139893, +# "diskMergedReadLast": 0, +# "diskMergedWriteLast": 26.1747155344, +# "diskOctetsReadLast": 0, +# "diskOctetsWriteLast": 309767.93302, +# "diskOpsReadLast": 0, +# "diskOpsWriteLast": 10.9893839563, +# "diskTimeReadLast": 0, +# "diskTimeWriteLast": 0.699324445683 +# }, + + if 'diskUsageArray' in jobj['event']['measurementsForVfScalingFields']: + print('Found diskUsageArray') + for disk in jobj['event']['measurementsForVfScalingFields']['diskUsageArray']: + id=disk['diskIdentifier'] + pdata = 'diskUsage,system={},disk={}'.format(source,id) + d = disk.items() + for key,val in d: + if key != 'diskIdentifier': + pdata = pdata + ',{}={}'.format(key,val) + i=pdata.find(',', pdata.find('disk=')) + pdata = pdata[:i] + ' ' + pdata[i+1:] + send_to_influxdb("diskUsage", pdata) + +# "memoryUsageArray": [ +# { +# "memoryBuffered": 269056.0, +# "memoryCached": 17636956.0, +# "memoryFree": 244731658240, +# "memorySlabRecl": 753160.0, +# "memorySlabUnrecl": 210800.0, +# "memoryUsed": 6240064.0, +# "vmIdentifier": "opnfv01" +# } +# ], + + if 'memoryUsageArray' in jobj['event']['measurementsForVfScalingFields']: + print('Found memoryUsageArray') + pdata = 'memoryUsage,system={}'.format(source) + vmid=e.event.measurementsForVfScalingFields.memoryUsageArray[0].vmIdentifier + d = jobj['event']['measurementsForVfScalingFields']['memoryUsageArray'][0].items() + for key,val in d: + if key != 'vmIdentifier': + pdata = pdata + ',{}={}'.format(key,val) + i=pdata.find(',', pdata.find('system')) + pdata = pdata[:i] + ' ' + pdata[i+1:] + send_to_influxdb("memoryUsage", pdata) + +# "vNicPerformanceArray": [ +# { +# "receivedDiscardedPacketsAccumulated": 0, +# "receivedErrorPacketsAccumulated": 0, +# "receivedOctetsAccumulated": 476.801524578, +# "receivedTotalPacketsAccumulated": 2.90000899705, +# "transmittedDiscardedPacketsAccumulated": 0, +# "transmittedErrorPacketsAccumulated": 0, +# "transmittedOctetsAccumulated": 230.100735749, +# "transmittedTotalPacketsAccumulated": 1.20000372292, +# "vNicIdentifier": "eno4", +# "valuesAreSuspect": "true" +# }, + + if 'vNicPerformanceArray' in jobj['event']['measurementsForVfScalingFields']: + print('Found vNicPerformanceArray') + for vnic in jobj['event']['measurementsForVfScalingFields']['vNicPerformanceArray']: + vnid=vnic['vNicIdentifier'] + pdata = 'vNicPerformance,system={},vnic={}'.format(vmid,vnid) + d = vnic.items() + for key,val in d: + if key != 'vNicIdentifier': + pdata = pdata + ',{}={}'.format(key,val) + i=pdata.find(',', pdata.find('vnic')) + pdata = pdata[:i] + ' ' + pdata[i+1:] + send_to_influxdb("vNicPerformance", pdata) + +def test_listener(environ, start_response, schema): + ''' + Handler for the Test Collector Test Control API. + + There is no authentication on this interface. + + This simply stores a commandList which will be sent in response to the next + incoming event on the EVEL interface. + ''' + global pending_command_list + logger.info('Got a Test Control input') + print('============================') + print('==== TEST CONTROL INPUT ====') + + #-------------------------------------------------------------------------- + # GET allows us to get the current pending request. + #-------------------------------------------------------------------------- + if environ.get('REQUEST_METHOD') == 'GET': + start_response('200 OK', [('Content-type', 'application/json')]) + yield json.dumps(pending_command_list) + return + + #-------------------------------------------------------------------------- + # Extract the content from the request. + #-------------------------------------------------------------------------- + length = int(environ.get('CONTENT_LENGTH', '0')) + logger.debug('TestControl Content Length: {0}'.format(length)) + body = environ['wsgi.input'].read(length) + logger.debug('TestControl Content Body: {0}'.format(body)) + + #-------------------------------------------------------------------------- + # If we have a schema file then check that the event matches that expected. + #-------------------------------------------------------------------------- + if (schema is not None): + logger.debug('Attempting to validate data: {0}\n' + 'Against schema: {1}'.format(body, schema)) + try: + decoded_body = json.loads(body) + jsonschema.validate(decoded_body, schema) + logger.info('TestControl is valid!') + print('TestControl:\n' + '{0}'.format(json.dumps(decoded_body, + sort_keys=True, + indent=4, + separators=(',', ': ')))) + + except jsonschema.SchemaError as e: + logger.error('TestControl Schema is not valid: {0}'.format(e)) + print('TestControl Schema is not valid: {0}'.format(e)) + + except jsonschema.ValidationError as e: + logger.warn('TestControl input not valid: {0}'.format(e)) + print('TestControl input not valid: {0}'.format(e)) + print('Bad JSON body decoded:\n' + '{0}'.format(json.dumps(decoded_body, + sort_keys=True, + indent=4, + separators=(',', ': ')))) + + except Exception as e: + logger.error('TestControl input not valid: {0}'.format(e)) + print('TestControl Schema not valid: {0}'.format(e)) + else: + logger.debug('Missing schema just decode JSON: {0}'.format(body)) + try: + decoded_body = json.loads(body) + print('Valid JSON body (no schema checking) decoded:\n' + '{0}'.format(json.dumps(decoded_body, + sort_keys=True, + indent=4, + separators=(',', ': ')))) + logger.info('TestControl input not checked against schema!') + + except Exception as e: + logger.error('TestControl input not valid: {0}'.format(e)) + print('TestControl input not valid: {0}'.format(e)) + + #-------------------------------------------------------------------------- + # Respond to the caller. If we received otherField 'ThrottleRequest', + # generate the appropriate canned response. + #-------------------------------------------------------------------------- + pending_command_list = decoded_body + print('===== TEST CONTROL END =====') + print('============================') + start_response('202 Accepted', []) + yield '' + +def main(argv=None): + ''' + Main function for the collector start-up. + + Called with command-line arguments: + * --config ** + * --section *
* + * --verbose + + Where: + + ** specifies the path to the configuration file. + + *
* specifies the section within that config file. + + *verbose* generates more information in the log files. + + The process listens for REST API invocations and checks them. Errors are + displayed to stdout and logged. + ''' + + if argv is None: + argv = sys.argv + else: + sys.argv.extend(argv) + + program_name = os.path.basename(sys.argv[0]) + program_version = 'v{0}'.format(__version__) + program_build_date = str(__updated__) + program_version_message = '%%(prog)s {0} ({1})'.format(program_version, + program_build_date) + if (__import__('__main__').__doc__ is not None): + program_shortdesc = __import__('__main__').__doc__.split('\n')[1] + else: + program_shortdesc = 'Running in test harness' + program_license = '''{0} + + Created on {1}. + Copyright 2015 Metaswitch Networks Ltd. All rights reserved. + + Distributed on an "AS IS" basis without warranties + or conditions of any kind, either express or implied. + +USAGE +'''.format(program_shortdesc, str(__date__)) + + try: + #---------------------------------------------------------------------- + # Setup argument parser so we can parse the command-line. + #---------------------------------------------------------------------- + parser = ArgumentParser(description=program_license, + formatter_class=ArgumentDefaultsHelpFormatter) + parser.add_argument('-i', '--influxdb', + dest='influxdb', + default='localhost', + help='InfluxDB server addresss') + parser.add_argument('-v', '--verbose', + dest='verbose', + action='count', + help='set verbosity level') + parser.add_argument('-V', '--version', + action='version', + version=program_version_message, + help='Display version information') + parser.add_argument('-a', '--api-version', + dest='api_version', + default='5', + help='set API version') + parser.add_argument('-c', '--config', + dest='config', + default='/etc/opt/att/collector.conf', + help='Use this config file.', + metavar='') + parser.add_argument('-s', '--section', + dest='section', + default='default', + metavar='
', + help='section to use in the config file') + + #---------------------------------------------------------------------- + # Process arguments received. + #---------------------------------------------------------------------- + args = parser.parse_args() + verbose = args.verbose + api_version = args.api_version + config_file = args.config + config_section = args.section + + #---------------------------------------------------------------------- + # Now read the config file, using command-line supplied values as + # overrides. + #---------------------------------------------------------------------- + defaults = {'log_file': 'collector.log', + 'vel_port': '12233', + 'vel_path': '', + 'vel_topic_name': '' + } + overrides = {} + config = ConfigParser.SafeConfigParser(defaults) + config.read(config_file) + + #---------------------------------------------------------------------- + # extract the values we want. + #---------------------------------------------------------------------- + global influxdb + global vel_username + global vel_password + global vel_topic_name + influxdb = config.get(config_section, 'influxdb', vars=overrides) + log_file = config.get(config_section, 'log_file', vars=overrides) + vel_port = config.get(config_section, 'vel_port', vars=overrides) + vel_path = config.get(config_section, 'vel_path', vars=overrides) + + vel_topic_name = config.get(config_section, + 'vel_topic_name', + vars=overrides) + vel_username = config.get(config_section, + 'vel_username', + vars=overrides) + vel_password = config.get(config_section, + 'vel_password', + vars=overrides) + vel_schema_file = config.get(config_section, + 'schema_file', + vars=overrides) + base_schema_file = config.get(config_section, + 'base_schema_file', + vars=overrides) + throttle_schema_file = config.get(config_section, + 'throttle_schema_file', + vars=overrides) + test_control_schema_file = config.get(config_section, + 'test_control_schema_file', + vars=overrides) + + #---------------------------------------------------------------------- + # Finally we have enough info to start a proper flow trace. + #---------------------------------------------------------------------- + global logger + print('Logfile: {0}'.format(log_file)) + logger = logging.getLogger('monitor') + if verbose > 0: + print('Verbose mode on') + logger.setLevel(logging.DEBUG) + else: + logger.setLevel(logging.INFO) + handler = logging.handlers.RotatingFileHandler(log_file, + maxBytes=1000000, + backupCount=10) + if (platform.system() == 'Windows'): + date_format = '%Y-%m-%d %H:%M:%S' + else: + date_format = '%Y-%m-%d %H:%M:%S.%f %z' + formatter = logging.Formatter('%(asctime)s %(name)s - ' + '%(levelname)s - %(message)s', + date_format) + handler.setFormatter(formatter) + logger.addHandler(handler) + logger.info('Started') + + #---------------------------------------------------------------------- + # Log the details of the configuration. + #---------------------------------------------------------------------- + logger.debug('Log file = {0}'.format(log_file)) + logger.debug('Influxdb server = {0}'.format(influxdb)) + logger.debug('Event Listener Port = {0}'.format(vel_port)) + logger.debug('Event Listener Path = {0}'.format(vel_path)) + logger.debug('Event Listener Topic = {0}'.format(vel_topic_name)) + logger.debug('Event Listener Username = {0}'.format(vel_username)) + # logger.debug('Event Listener Password = {0}'.format(vel_password)) + logger.debug('Event Listener JSON Schema File = {0}'.format( + vel_schema_file)) + logger.debug('Base JSON Schema File = {0}'.format(base_schema_file)) + logger.debug('Throttle JSON Schema File = {0}'.format( + throttle_schema_file)) + logger.debug('Test Control JSON Schema File = {0}'.format( + test_control_schema_file)) + + #---------------------------------------------------------------------- + # Perform some basic error checking on the config. + #---------------------------------------------------------------------- + if (int(vel_port) < 1024 or int(vel_port) > 65535): + logger.error('Invalid Vendor Event Listener port ({0}) ' + 'specified'.format(vel_port)) + raise RuntimeError('Invalid Vendor Event Listener port ({0}) ' + 'specified'.format(vel_port)) + + if (len(vel_path) > 0 and vel_path[-1] != '/'): + logger.warning('Event Listener Path ({0}) should have terminating ' + '"/"! Adding one on to configured string.'.format( + vel_path)) + vel_path += '/' + + #---------------------------------------------------------------------- + # Load up the vel_schema, if it exists. + #---------------------------------------------------------------------- + if not os.path.exists(vel_schema_file): + logger.warning('Event Listener Schema File ({0}) not found. ' + 'No validation will be undertaken.'.format( + vel_schema_file)) + else: + global vel_schema + global throttle_schema + global test_control_schema + vel_schema = json.load(open(vel_schema_file, 'r')) + logger.debug('Loaded the JSON schema file') + + #------------------------------------------------------------------ + # Load up the throttle_schema, if it exists. + #------------------------------------------------------------------ + if (os.path.exists(throttle_schema_file)): + logger.debug('Loading throttle schema') + throttle_fragment = json.load(open(throttle_schema_file, 'r')) + throttle_schema = {} + throttle_schema.update(vel_schema) + throttle_schema.update(throttle_fragment) + logger.debug('Loaded the throttle schema') + + #------------------------------------------------------------------ + # Load up the test control _schema, if it exists. + #------------------------------------------------------------------ + if (os.path.exists(test_control_schema_file)): + logger.debug('Loading test control schema') + test_control_fragment = json.load( + open(test_control_schema_file, 'r')) + test_control_schema = {} + test_control_schema.update(vel_schema) + test_control_schema.update(test_control_fragment) + logger.debug('Loaded the test control schema') + + #------------------------------------------------------------------ + # Load up the base_schema, if it exists. + #------------------------------------------------------------------ + if (os.path.exists(base_schema_file)): + logger.debug('Updating the schema with base definition') + base_schema = json.load(open(base_schema_file, 'r')) + vel_schema.update(base_schema) + logger.debug('Updated the JSON schema file') + + #---------------------------------------------------------------------- + # We are now ready to get started with processing. Start-up the various + # components of the system in order: + # + # 1) Create the dispatcher. + # 2) Register the functions for the URLs of interest. + # 3) Run the webserver. + #---------------------------------------------------------------------- + root_url = '/{0}eventListener/v{1}{2}'.\ + format(vel_path, + api_version, + '/' + vel_topic_name + if len(vel_topic_name) > 0 + else '') + throttle_url = '/{0}eventListener/v{1}/clientThrottlingState'.\ + format(vel_path, api_version) + set_404_content(root_url) + dispatcher = PathDispatcher() + vendor_event_listener = partial(listener, schema = vel_schema) + dispatcher.register('GET', root_url, vendor_event_listener) + dispatcher.register('POST', root_url, vendor_event_listener) + vendor_throttle_listener = partial(listener, schema = throttle_schema) + dispatcher.register('GET', throttle_url, vendor_throttle_listener) + dispatcher.register('POST', throttle_url, vendor_throttle_listener) + + #---------------------------------------------------------------------- + # We also add a POST-only mechanism for test control, so that we can + # send commands to a single attached client. + #---------------------------------------------------------------------- + test_control_url = '/testControl/v{0}/commandList'.format(api_version) + test_control_listener = partial(test_listener, + schema = test_control_schema) + dispatcher.register('POST', test_control_url, test_control_listener) + dispatcher.register('GET', test_control_url, test_control_listener) + + httpd = make_server('', int(vel_port), dispatcher) + print('Serving on port {0}...'.format(vel_port)) + httpd.serve_forever() + + logger.error('Main loop exited unexpectedly!') + return 0 + + except KeyboardInterrupt: + #---------------------------------------------------------------------- + # handle keyboard interrupt + #---------------------------------------------------------------------- + logger.info('Exiting on keyboard interrupt!') + return 0 + + except Exception as e: + #---------------------------------------------------------------------- + # Handle unexpected exceptions. + #---------------------------------------------------------------------- + if DEBUG or TESTRUN: + raise(e) + indent = len(program_name) * ' ' + sys.stderr.write(program_name + ': ' + repr(e) + '\n') + sys.stderr.write(indent + ' for help use --help\n') + sys.stderr.write(traceback.format_exc()) + logger.critical('Exiting because of exception: {0}'.format(e)) + logger.critical(traceback.format_exc()) + return 2 + +#------------------------------------------------------------------------------ +# MAIN SCRIPT ENTRY POINT. +#------------------------------------------------------------------------------ +if __name__ == '__main__': + if TESTRUN: + #---------------------------------------------------------------------- + # Running tests - note that doctest comments haven't been included so + # this is a hook for future improvements. + #---------------------------------------------------------------------- + import doctest + doctest.testmod() + + if PROFILE: + #---------------------------------------------------------------------- + # Profiling performance. Performance isn't expected to be a major + # issue, but this should all work as expected. + #---------------------------------------------------------------------- + import cProfile + import pstats + profile_filename = 'collector_profile.txt' + cProfile.run('main()', profile_filename) + statsfile = open('collector_profile_stats.txt', 'wb') + p = pstats.Stats(profile_filename, stream=statsfile) + stats = p.strip_dirs().sort_stats('cumulative') + stats.print_stats() + statsfile.close() + sys.exit(0) + + #-------------------------------------------------------------------------- + # Normal operation - call through to the main function. + #-------------------------------------------------------------------------- + sys.exit(main()) diff --git a/build/ves-collector/start.sh b/build/ves-collector/start.sh new file mode 100644 index 0000000..1c41778 --- /dev/null +++ b/build/ves-collector/start.sh @@ -0,0 +1,40 @@ +#!/bin/bash +# Copyright 2017 AT&T Intellectual Property, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +#. What this is: Startup script for the OPNFV VES Collector running under docker. + +cd /opt/ves +touch monitor.log + +sed -i -- \ + "s~log_file = /var/log/att/collector.log~log_file = /opt/ves/collector.log~" \ + evel-test-collector/config/collector.conf +sed -i -- "s/vel_domain = 127.0.0.1/vel_domain = $ves_host/g" \ + evel-test-collector/config/collector.conf +sed -i -- "s/vel_username =/vel_username = $ves_user/g" \ + evel-test-collector/config/collector.conf +sed -i -- "s/vel_password =/vel_password = $ves_pass/g" \ + evel-test-collector/config/collector.conf +sed -i -- "s~vel_path = vendor_event_listener/~vel_path = $ves_path~g" \ + evel-test-collector/config/collector.conf +sed -i -- "s~vel_topic_name = example_vnf~vel_topic_name = $ves_topic~g" \ + evel-test-collector/config/collector.conf +sed -i -- "/vel_topic_name = /a influxdb = $ves_influxdb_host" \ + evel-test-collector/config/collector.conf + +python /opt/ves/evel-test-collector/code/collector/monitor.py \ + --config /opt/ves/evel-test-collector/config/collector.conf \ + --influxdb $ves_influxdb_host \ + --section default > /opt/ves/monitor.log 2>&1 diff --git a/tools/demo_deploy.sh b/tools/demo_deploy.sh index ab21bd6..973b381 100644 --- a/tools/demo_deploy.sh +++ b/tools/demo_deploy.sh @@ -13,48 +13,73 @@ # See the License for the specific language governing permissions and # limitations under the License. # -#. What this is: Complete scripted deployment of the VES monitoring framework -# When complete, the following will be installed: -#. - On the specified master node, a Kafka server and containers running the -# OPNFV Barometer VES agent, OPNFV VES collector, InfluxDB, and Grafana +#. What this is: Complete scripted deployment of the VES monitoring framework. +#. Intended to be invoked from a server used to manage the nodes where the VES +#. framework is to be installed, referred to here as the "admin server". When +#. complete, the following will be installed: +#. - On the specified master node, a Kafka server and containers running the +#. VES "core components" (OPNFV Barometer VES agent, OPNFV VES collector, +#. and optionally InfluxDB and Grafana if pre-existing instances of those +#. components are not accessible at the default or provided hosts as +#. described below). +#. "master" as used here refers to the node where these common VES framework +#. elements are deployed. It may typically be a master/control plane node +#. for a set of nodes, but can also be any other node. #. - On each specified worker node, collectd configured per OPNFV Barometer #. #. Prerequisites: -#. - Ubuntu server for kubernetes cluster nodes (master and worker nodes) -#. - MAAS server as cluster admin for kubernetes master/worker nodes +#. - Ubuntu Xenial host for the admin server +#. - Ubuntu Xenial server for master and worker nodes #. - Password-less ssh key provided for node setup -#. - hostname of kubernetes master setup in DNS or /etc/hosts -#. Usage: on the MAAS server +#. - hostname of selected master node in DNS or /etc/hosts +#. - env variables set prior to running this script, as per ves-setup.sh +#. ves_kafka_hostname: hostname of the node where the kafka server runs +#. - optional env varibles set prior to running this script, as per ves-setup.sh +#. ves_influxdb_host: ip:port of the influxdb service +#. ves_influxdb_auth: authentication for the influxdb service +#. ves_grafana_host: ip:port of the grafana service +#. ves_grafana_auth: authentication for the grafana service +#. +#. For deployment in a kubernetes cluster as setup by OPNFV Models scripts: +#. - k8s cluster setup as in OPNFV Models repo tools/kubernetes/demo_deploy.sh +#. which also allows use of Cloudify to deploy VES core services as +#. k8s services. +#. +#. Usage: on the admin server #. $ git clone https://gerrit.opnfv.org/gerrit/ves ~/ves -#. $ bash ~/ves/tools/demo_deploy.sh master -#. master: setup VES on k8s master -#. : IP of cluster master node -#. : SSH key enabling password-less SSH to nodes -#. $ bash ~/ves/tools/demo_deploy.sh worker -#. worker: setup VES on k8s worker -#. : IP of worker node +#. $ bash ~/ves/tools/demo_deploy.sh [cloudify] #. : SSH key enabling password-less SSH to nodes +#. : master node where core components will be installed +#. : list of worker nodes where collectd will be installed +#. cloudify: flag indicating to deploy VES core services via Cloudify -node=$2 -key=$3 +key=$1 +master=$2 +workers="$3" +cloudify=$4 eval `ssh-agent` ssh-add $key -if [[ "$1" == "master" ]]; then - echo; echo "$0 $(date): Setting up master node" - scp -r -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \ - ~/ves ubuntu@$node:/tmp - ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$node <* - * --section *
* - * --verbose - - Where: - - ** specifies the path to the configuration file. - - *
* specifies the section within that config file. - - *verbose* generates more information in the log files. - - The process listens for REST API invocations and checks them. Errors are - displayed to stdout and logged. - ''' - - if argv is None: - argv = sys.argv - else: - sys.argv.extend(argv) - - program_name = os.path.basename(sys.argv[0]) - program_version = 'v{0}'.format(__version__) - program_build_date = str(__updated__) - program_version_message = '%%(prog)s {0} ({1})'.format(program_version, - program_build_date) - if (__import__('__main__').__doc__ is not None): - program_shortdesc = __import__('__main__').__doc__.split('\n')[1] - else: - program_shortdesc = 'Running in test harness' - program_license = '''{0} - - Created on {1}. - Copyright 2015 Metaswitch Networks Ltd. All rights reserved. - - Distributed on an "AS IS" basis without warranties - or conditions of any kind, either express or implied. - -USAGE -'''.format(program_shortdesc, str(__date__)) - - try: - #---------------------------------------------------------------------- - # Setup argument parser so we can parse the command-line. - #---------------------------------------------------------------------- - parser = ArgumentParser(description=program_license, - formatter_class=ArgumentDefaultsHelpFormatter) - parser.add_argument('-i', '--influxdb', - dest='influxdb', - default='localhost', - help='InfluxDB server addresss') - parser.add_argument('-v', '--verbose', - dest='verbose', - action='count', - help='set verbosity level') - parser.add_argument('-V', '--version', - action='version', - version=program_version_message, - help='Display version information') - parser.add_argument('-a', '--api-version', - dest='api_version', - default='5', - help='set API version') - parser.add_argument('-c', '--config', - dest='config', - default='/etc/opt/att/collector.conf', - help='Use this config file.', - metavar='') - parser.add_argument('-s', '--section', - dest='section', - default='default', - metavar='
', - help='section to use in the config file') - - #---------------------------------------------------------------------- - # Process arguments received. - #---------------------------------------------------------------------- - args = parser.parse_args() - verbose = args.verbose - api_version = args.api_version - config_file = args.config - config_section = args.section - - #---------------------------------------------------------------------- - # Now read the config file, using command-line supplied values as - # overrides. - #---------------------------------------------------------------------- - defaults = {'log_file': 'collector.log', - 'vel_port': '12233', - 'vel_path': '', - 'vel_topic_name': '' - } - overrides = {} - config = ConfigParser.SafeConfigParser(defaults) - config.read(config_file) - - #---------------------------------------------------------------------- - # extract the values we want. - #---------------------------------------------------------------------- - global influxdb - global vel_username - global vel_password - global vel_topic_name - influxdb = config.get(config_section, 'influxdb', vars=overrides) - log_file = config.get(config_section, 'log_file', vars=overrides) - vel_port = config.get(config_section, 'vel_port', vars=overrides) - vel_path = config.get(config_section, 'vel_path', vars=overrides) - - vel_topic_name = config.get(config_section, - 'vel_topic_name', - vars=overrides) - vel_username = config.get(config_section, - 'vel_username', - vars=overrides) - vel_password = config.get(config_section, - 'vel_password', - vars=overrides) - vel_schema_file = config.get(config_section, - 'schema_file', - vars=overrides) - base_schema_file = config.get(config_section, - 'base_schema_file', - vars=overrides) - throttle_schema_file = config.get(config_section, - 'throttle_schema_file', - vars=overrides) - test_control_schema_file = config.get(config_section, - 'test_control_schema_file', - vars=overrides) - - #---------------------------------------------------------------------- - # Finally we have enough info to start a proper flow trace. - #---------------------------------------------------------------------- - global logger - print('Logfile: {0}'.format(log_file)) - logger = logging.getLogger('monitor') - if verbose > 0: - print('Verbose mode on') - logger.setLevel(logging.DEBUG) - else: - logger.setLevel(logging.INFO) - handler = logging.handlers.RotatingFileHandler(log_file, - maxBytes=1000000, - backupCount=10) - if (platform.system() == 'Windows'): - date_format = '%Y-%m-%d %H:%M:%S' - else: - date_format = '%Y-%m-%d %H:%M:%S.%f %z' - formatter = logging.Formatter('%(asctime)s %(name)s - ' - '%(levelname)s - %(message)s', - date_format) - handler.setFormatter(formatter) - logger.addHandler(handler) - logger.info('Started') - - #---------------------------------------------------------------------- - # Log the details of the configuration. - #---------------------------------------------------------------------- - logger.debug('Log file = {0}'.format(log_file)) - logger.debug('Influxdb server = {0}'.format(influxdb)) - logger.debug('Event Listener Port = {0}'.format(vel_port)) - logger.debug('Event Listener Path = {0}'.format(vel_path)) - logger.debug('Event Listener Topic = {0}'.format(vel_topic_name)) - logger.debug('Event Listener Username = {0}'.format(vel_username)) - # logger.debug('Event Listener Password = {0}'.format(vel_password)) - logger.debug('Event Listener JSON Schema File = {0}'.format( - vel_schema_file)) - logger.debug('Base JSON Schema File = {0}'.format(base_schema_file)) - logger.debug('Throttle JSON Schema File = {0}'.format( - throttle_schema_file)) - logger.debug('Test Control JSON Schema File = {0}'.format( - test_control_schema_file)) - - #---------------------------------------------------------------------- - # Perform some basic error checking on the config. - #---------------------------------------------------------------------- - if (int(vel_port) < 1024 or int(vel_port) > 65535): - logger.error('Invalid Vendor Event Listener port ({0}) ' - 'specified'.format(vel_port)) - raise RuntimeError('Invalid Vendor Event Listener port ({0}) ' - 'specified'.format(vel_port)) - - if (len(vel_path) > 0 and vel_path[-1] != '/'): - logger.warning('Event Listener Path ({0}) should have terminating ' - '"/"! Adding one on to configured string.'.format( - vel_path)) - vel_path += '/' - - #---------------------------------------------------------------------- - # Load up the vel_schema, if it exists. - #---------------------------------------------------------------------- - if not os.path.exists(vel_schema_file): - logger.warning('Event Listener Schema File ({0}) not found. ' - 'No validation will be undertaken.'.format( - vel_schema_file)) - else: - global vel_schema - global throttle_schema - global test_control_schema - vel_schema = json.load(open(vel_schema_file, 'r')) - logger.debug('Loaded the JSON schema file') - - #------------------------------------------------------------------ - # Load up the throttle_schema, if it exists. - #------------------------------------------------------------------ - if (os.path.exists(throttle_schema_file)): - logger.debug('Loading throttle schema') - throttle_fragment = json.load(open(throttle_schema_file, 'r')) - throttle_schema = {} - throttle_schema.update(vel_schema) - throttle_schema.update(throttle_fragment) - logger.debug('Loaded the throttle schema') - - #------------------------------------------------------------------ - # Load up the test control _schema, if it exists. - #------------------------------------------------------------------ - if (os.path.exists(test_control_schema_file)): - logger.debug('Loading test control schema') - test_control_fragment = json.load( - open(test_control_schema_file, 'r')) - test_control_schema = {} - test_control_schema.update(vel_schema) - test_control_schema.update(test_control_fragment) - logger.debug('Loaded the test control schema') - - #------------------------------------------------------------------ - # Load up the base_schema, if it exists. - #------------------------------------------------------------------ - if (os.path.exists(base_schema_file)): - logger.debug('Updating the schema with base definition') - base_schema = json.load(open(base_schema_file, 'r')) - vel_schema.update(base_schema) - logger.debug('Updated the JSON schema file') - - #---------------------------------------------------------------------- - # We are now ready to get started with processing. Start-up the various - # components of the system in order: - # - # 1) Create the dispatcher. - # 2) Register the functions for the URLs of interest. - # 3) Run the webserver. - #---------------------------------------------------------------------- - root_url = '/{0}eventListener/v{1}{2}'.\ - format(vel_path, - api_version, - '/' + vel_topic_name - if len(vel_topic_name) > 0 - else '') - throttle_url = '/{0}eventListener/v{1}/clientThrottlingState'.\ - format(vel_path, api_version) - set_404_content(root_url) - dispatcher = PathDispatcher() - vendor_event_listener = partial(listener, schema = vel_schema) - dispatcher.register('GET', root_url, vendor_event_listener) - dispatcher.register('POST', root_url, vendor_event_listener) - vendor_throttle_listener = partial(listener, schema = throttle_schema) - dispatcher.register('GET', throttle_url, vendor_throttle_listener) - dispatcher.register('POST', throttle_url, vendor_throttle_listener) - - #---------------------------------------------------------------------- - # We also add a POST-only mechanism for test control, so that we can - # send commands to a single attached client. - #---------------------------------------------------------------------- - test_control_url = '/testControl/v{0}/commandList'.format(api_version) - test_control_listener = partial(test_listener, - schema = test_control_schema) - dispatcher.register('POST', test_control_url, test_control_listener) - dispatcher.register('GET', test_control_url, test_control_listener) - - httpd = make_server('', int(vel_port), dispatcher) - print('Serving on port {0}...'.format(vel_port)) - httpd.serve_forever() - - logger.error('Main loop exited unexpectedly!') - return 0 - - except KeyboardInterrupt: - #---------------------------------------------------------------------- - # handle keyboard interrupt - #---------------------------------------------------------------------- - logger.info('Exiting on keyboard interrupt!') - return 0 - - except Exception as e: - #---------------------------------------------------------------------- - # Handle unexpected exceptions. - #---------------------------------------------------------------------- - if DEBUG or TESTRUN: - raise(e) - indent = len(program_name) * ' ' - sys.stderr.write(program_name + ': ' + repr(e) + '\n') - sys.stderr.write(indent + ' for help use --help\n') - sys.stderr.write(traceback.format_exc()) - logger.critical('Exiting because of exception: {0}'.format(e)) - logger.critical(traceback.format_exc()) - return 2 - -#------------------------------------------------------------------------------ -# MAIN SCRIPT ENTRY POINT. -#------------------------------------------------------------------------------ -if __name__ == '__main__': - if TESTRUN: - #---------------------------------------------------------------------- - # Running tests - note that doctest comments haven't been included so - # this is a hook for future improvements. - #---------------------------------------------------------------------- - import doctest - doctest.testmod() - - if PROFILE: - #---------------------------------------------------------------------- - # Profiling performance. Performance isn't expected to be a major - # issue, but this should all work as expected. - #---------------------------------------------------------------------- - import cProfile - import pstats - profile_filename = 'collector_profile.txt' - cProfile.run('main()', profile_filename) - statsfile = open('collector_profile_stats.txt', 'wb') - p = pstats.Stats(profile_filename, stream=statsfile) - stats = p.strip_dirs().sort_stats('cumulative') - stats.print_stats() - statsfile.close() - sys.exit(0) - - #-------------------------------------------------------------------------- - # Normal operation - call through to the main function. - #-------------------------------------------------------------------------- - sys.exit(main()) diff --git a/tools/ves-agent/Dockerfile b/tools/ves-agent/Dockerfile deleted file mode 100644 index 4c37197..0000000 --- a/tools/ves-agent/Dockerfile +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2017 AT&T Intellectual Property, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# What this is: A Dockerfile for building an OPFNV VES Agent container image. -# -# Status: this is a work in progress, under test. -# - -FROM ubuntu:xenial - -MAINTAINER Bryan Sullivan - -RUN apt-get update -RUN apt-get install -y apt-utils -RUN apt-get -y upgrade -RUN apt-get install -y git -# Required for kafka -RUN apt-get install -y default-jre -RUN apt-get install -y zookeeperd -RUN apt-get install -y python-pip -RUN pip install kafka-python -# Required for building collectd -RUN apt-get install -y pkg-config - -RUN mkdir /opt/ves - -# Build Kafka client -RUN apt-get install -y build-essential -RUN apt-get install -y libpthread-stubs0-dev -RUN apt-get install -y libssl-dev -RUN apt-get install -y libsasl2-dev -RUN apt-get install -y liblz4-dev -RUN /bin/bash -c 'git clone --branch v0.9.5 \ -https://github.com/edenhill/librdkafka.git /opt/ves/librdkafka; \ -cd /opt/ves/librdkafka; ./configure --prefix=/usr; \ -make; make install' - -# Install VES Agent -RUN pip install pyaml - -RUN git clone https://gerrit.opnfv.org/gerrit/barometer /opt/ves/barometer -# Test patch -RUN /bin/bash -c 'cd /opt/ves/barometer; \ -git fetch https://gerrit.opnfv.org/gerrit/barometer \ -refs/changes/27/47427/1 && git checkout FETCH_HEAD' - -COPY start.sh /opt/ves/start.sh -ENTRYPOINT ["/bin/bash", "/opt/ves/start.sh"] diff --git a/tools/ves-agent/start.sh b/tools/ves-agent/start.sh deleted file mode 100644 index 1153ab1..0000000 --- a/tools/ves-agent/start.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash -# Copyright 2017 AT&T Intellectual Property, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -#. What this is: Startup script for the OPNFV VES Agent running under docker. - -echo "$ves_kafka_host $ves_hostname" >>/etc/hosts - -cd /opt/ves/barometer/3rd_party/collectd-ves-app/ves_app -cat <ves_app_config.conf -[config] -Domain = $ves_host -Port = $ves_port -Path = $ves_path -Topic = $ves_topic -UseHttps = $ves_https -Username = $ves_user -Password = $ves_pass -SendEventInterval = $ves_interval -ApiVersion = $ves_version -KafkaPort = $ves_kafka_port -KafkaBroker = $ves_kafka_host -EOF - -cat ves_app_config.conf -echo "ves_mode=$ves_mode" -python ves_app.py --events-schema=$ves_mode.yaml --loglevel DEBUG \ - --config=ves_app_config.conf -echo "*** ves_app.log ***" -cat ves_app.log -echo "*** /opt/ves/kafka_2.11-0.11.0.2/kafka.log ***" -cat /opt/ves/kafka_2.11-0.11.0.2/kafka.log diff --git a/tools/ves-setup.sh b/tools/ves-setup.sh index 9b37740..027b440 100644 --- a/tools/ves-setup.sh +++ b/tools/ves-setup.sh @@ -27,11 +27,7 @@ #. - Install Kafka server on one of the hosts, or use a pre-installed server #. accessible from the agent hosts. #. - Install collectd on each host. -#. - Install the VES agent on each host. -#. - As needed, install the VES agent on each virtual host. This could include -#. pre-installed VES agents in VM or container images, which are configured -#. upon virtual host deployment, or agent install/config as part of virtual -#. host deploy. NOTE: support for pre-installed VES agents is a WIP. +#. - Install the VES agent on one of the hosts. #. #. Prerequisites: #. - Ubuntu Xenial (Centos support to be provided) @@ -47,8 +43,9 @@ #. ves_pass: password for basic auth with collector (default: empty) #. ves_interval: frequency in sec for collectd data reports (default: 20) #. ves_version: VES API version (default: 5.1) +#. ves_kafka_host: kafka host IP (default: 127.0.0.1) +#. ves_kafka_hostname: kafka host hostname (default: localhost) #. ves_kafka_port: kafka port (default: 9092) -#. ves_kafka_host: kafka host IP or hostname (default: 127.0.0.1) #. ves_influxdb_host: influxdb host:port (default: none) #. ves_influxdb_auth: credentials in form "user/pass" (default: none) #. ves_grafana_host: grafana host:port (default: none) @@ -56,11 +53,17 @@ #. #. Usage: #. git clone https://gerrit.opnfv.org/gerrit/ves /tmp/ves -#. bash /tmp/ves/ves-setup.sh +#. bash /tmp/ves/ves-setup.sh [cloudify] #. collector: setup VES collector (test collector) #. kafka: setup kafka server for VES events from collect agent(s) #. collectd: setup collectd with libvirt plugin, as a kafka publisher #. agent: setup VES agent in host or guest mode, as a kafka consumer +#. cloudify: (optional) use cloudify to deploy the component, as setup by +#. tools/cloudify/k8s-cloudify.sh in the OPNFV Models repo. +#. bash /tmp/ves/ves-setup.sh +#. master: VES master node IP +#. workers: quoted, space-separated list of worker node IPs +#. #. See demo_deploy.sh in this repo for a recommended sequence of the above. #. #. Status: this is a work in progress, under test. @@ -96,12 +99,10 @@ function common_prereqs() { function setup_env() { if [[ ! -d /tmp/ves ]]; then mkdir /tmp/ves; fi cp $0 /tmp/ves - if [[ ! -f /tmp/ves/ves_env.sh ]]; then - cat </tmp/ves/ves_env.sh + cat <<'EOF' >/tmp/ves/ves_env.sh #!/bin/bash ves_mode="${ves_mode:=node}" ves_host="${ves_host:=127.0.0.1}" -ves_hostname="${ves_hostname:=localhost}" ves_port="${ves_port:=30000}" ves_path="${ves_path:=}" ves_topic="${ves_topic:=}" @@ -111,14 +112,14 @@ ves_pass="${ves_pass:=}" ves_interval="${ves_interval:=20}" ves_version="${ves_version:=5.1}" ves_kafka_host="${ves_kafka_host:=127.0.0.1}" +ves_kafka_hostname="${ves_kafka_hostname:=localhost}" ves_kafka_port="${ves_kafka_port:=9092}" -ves_influxdb_host="${ves_influxdb_host:=}" +ves_influxdb_host="${ves_influxdb_host:=localhost:8086}" ves_influxdb_auth="${ves_influxdb_auth:=}" -ves_grafana_host="${ves_grafana_host:=}" -ves_grafana_auth="${ves_grafana_auth:=admin/admin}" +ves_grafana_host="${ves_grafana_host:=localhost:3000}" +ves_grafana_auth="${ves_grafana_auth:=admin:admin}" export ves_mode export ves_host -export ves_hostname export ves_port export ves_path export ves_topic @@ -126,14 +127,15 @@ export ves_https export ves_user export ves_pass export ves_interval -export ves_kafka_hostame +export ves_kafka_host +export ves_kafka_hostname export ves_kafka_port export ves_influxdb_host export ves_influxdb_auth export ves_grafana_host export ves_grafana_auth EOF - fi + source /tmp/ves/ves_env.sh echo /tmp/ves/ves_env.sh } @@ -159,90 +161,83 @@ function setup_kafka() { > kafka_2.11-$ver/kafka.log 2>&1 & } -function setup_kafka_client() { - log "Install Apache Kafka C/C++ client library" - if [[ ! -f /.dockerenv ]]; then dosudo="sudo"; fi - $dosudo apt-get install -y build-essential - git clone https://github.com/edenhill/librdkafka.git ~/librdkafka - cd ~/librdkafka - git checkout -b v0.9.5 v0.9.5 - # TODO: Barometer VES guide to clarify specific prerequisites for Ubuntu - $dosudo apt-get install -y libpthread-stubs0-dev - $dosudo apt-get install -y libssl-dev - $dosudo apt-get install -y libsasl2-dev - $dosudo apt-get install -y liblz4-dev - ./configure --prefix=/usr - make - $dosudo make install -} - function setup_collectd() { log "setup collectd" common_prereqs - setup_env + source /tmp/ves/ves_env.sh log "cleanup any previous failed install" sudo rm -rf ~/collectd-virt sudo rm -rf ~/librdkafka sudo rm -rf ~/collectd - setup_kafka_client - - log "Build collectd with Kafka support" - git clone https://github.com/collectd/collectd.git ~/collectd - cd ~/collectd + log "Install Apache Kafka C/C++ client library" + # TODO: asap, replace the build process below with package install + # sudo apt-get install -y librdkafka1 librdkafka-dev + sudo apt-get install -y build-essential + git clone https://github.com/edenhill/librdkafka.git ~/librdkafka + cd ~/librdkafka + git checkout -b v0.9.5 v0.9.5 # TODO: Barometer VES guide to clarify specific prerequisites for Ubuntu - sudo apt-get install -y flex bison - sudo apt-get install -y autoconf - sudo apt-get install -y libtool - ./build.sh - ./configure --with-librdkafka=/usr --without-perl-bindings --enable-perl=no + sudo apt-get install -y libpthread-stubs0-dev + sudo apt-get install -y libssl-dev + sudo apt-get install -y libsasl2-dev + sudo apt-get install -y liblz4-dev + ./configure --prefix=/usr make sudo make install - # TODO: Barometer VES guide to clarify collectd.service is correct - log "install collectd as a service" - sed -i -- 's~ExecStart=/usr/sbin/collectd~ExecStart=/opt/collectd/sbin/collectd~'\ - contrib/systemd.collectd.service - sed -i -- 's~EnvironmentFile=-/etc/sysconfig/collectd~EnvironmentFile=-/opt/collectd/etc/~'\ - contrib/systemd.collectd.service - sed -i -- 's~EnvironmentFile=-/etc/default/collectd~EnvironmentFile=-/opt/collectd/etc/~'\ - contrib/systemd.collectd.service - sed -i -- 's~CapabilityBoundingSet=~CapabilityBoundingSet=CAP_SETUID CAP_SETGID~'\ - contrib/systemd.collectd.service - - sudo cp contrib/systemd.collectd.service /etc/systemd/system/ - cd /etc/systemd/system/ - sudo mv systemd.collectd.service collectd.service - sudo chmod +x collectd.service + log "Install collectd" + if [[ "$ves_collectd" != "build" ]]; then + sudo apt-get install -y collectd + else + log "Build collectd with Kafka support" + git clone https://github.com/collectd/collectd.git ~/collectd + cd ~/collectd + # TODO: Barometer VES guide to clarify specific prerequisites for Ubuntu + sudo apt-get install -y flex bison + sudo apt-get install -y autoconf + sudo apt-get install -y libtool + ./build.sh + ./configure --with-librdkafka=/usr --without-perl-bindings --enable-perl=no + make + sudo make install + + # TODO: Barometer VES guide to clarify collectd.service is correct + log "install collectd as a service" + sed -i -- 's~ExecStart=/usr/sbin/collectd~ExecStart=/opt/collectd/sbin/collectd~'\ + contrib/systemd.collectd.service + sed -i -- 's~EnvironmentFile=-/etc/sysconfig/collectd~EnvironmentFile=-/opt/collectd/etc/~'\ + contrib/systemd.collectd.service + sed -i -- 's~EnvironmentFile=-/etc/default/collectd~EnvironmentFile=-/opt/collectd/etc/~'\ + contrib/systemd.collectd.service + sed -i -- 's~CapabilityBoundingSet=~CapabilityBoundingSet=CAP_SETUID CAP_SETGID~'\ + contrib/systemd.collectd.service + + sudo cp contrib/systemd.collectd.service /etc/systemd/system/ + cd /etc/systemd/system/ + sudo mv systemd.collectd.service collectd.service + sudo chmod +x collectd.service + fi sudo systemctl daemon-reload sudo systemctl start collectd.service log "setup VES collectd config for VES $ves_mode mode" if [[ "$ves_mode" == "node" ]]; then # TODO: Barometer VES guide to clarify prerequisites install for Ubuntu - log "setup additional prerequisites for VES host mode" + log "setup additional prerequisites for VES node mode" sudo apt-get install -y libxml2-dev libpciaccess-dev libyajl-dev \ libdevmapper-dev + log "start libvirtd" # TODO: install libvirt from source to enable all features per # http://docs.opnfv.org/en/latest/submodules/barometer/docs/release/userguide/feature.userguide.html#virt-plugin sudo systemctl start libvirtd - # TODO: supposed to be merged with main collectd repo, but without this - # collectd still fails "plugin_load: Could not find plugin "virt" in /opt/collectd/lib/collectd" - rm -rf /tmp/ves/collectd-virt - git clone https://github.com/maryamtahhan/collectd /tmp/ves/collectd-virt - cd /tmp/ves/collectd-virt - ./build.sh - ./configure --enable-syslog --enable-logfile --enable-debug - make - sudo make install - - # TODO: fix for journalctl -xe report "... is marked executable" - sudo chmod 744 /etc/systemd/system/collectd.service +# # TODO: fix for journalctl -xe report "... is marked executable" +# sudo chmod 744 /etc/systemd/system/collectd.service - cat < @@ -304,7 +299,7 @@ LoadPlugin write_kafka EOF else - cat < @@ -354,12 +349,13 @@ LoadPlugin match_regex EOF fi + log "collectd config updated" # sudo sed -i -- "s/#Hostname \"localhost\"/Hostname \"$HOSTNAME\"/" /opt/collectd/etc/collectd.conf - if [[ $(grep -c $ves_hostname /etc/hosts) -eq 0 ]]; then - log "add to /etc/hosts: $ves_kafka_host $ves_hostname" - echo "$ves_kafka_host $ves_hostname" | sudo tee -a /etc/hosts + if [[ $(grep -c $ves_kafka_hostname /etc/hosts) -eq 0 ]]; then + log "add to /etc/hosts: $ves_kafka_host $ves_kafka_hostname" + echo "$ves_kafka_host $ves_kafka_hostname" | sudo tee -a /etc/hosts fi log "restart collectd to apply updated config" sudo systemctl restart collectd @@ -368,23 +364,83 @@ EOF function setup_agent() { log "setup VES agent" source /tmp/ves/ves_env.sh - cd /tmp/ves/tools - sudo docker build -t ves-agent ves-agent - sudo docker run -it -d \ - -e ves_mode=$ves_mode \ - -e ves_host=$ves_host \ - -e ves_port=$ves_port \ - -e ves_path=$ves_path \ - -e ves_topic=$ves_topic \ - -e ves_https=$ves_https \ - -e ves_user=$ves_user \ - -e ves_pass=$ves_pass \ - -e ves_interval=$ves_interval \ - -e ves_version=$ves_version \ - -e ves_kafka_port=$ves_kafka_port \ - -e ves_kafka_host=$ves_kafka_host \ - -e ves_hostname=$ves_hostname \ - --name ves-agent ves-agent + + log "deploy the VES agent container" + if [[ "$1" == "cloudify" ]]; then + cd /tmp/ves/tools/cloudify + # Cloudify is deployed on the k8s master node + source ~/k8s_env.sh + manager_ip=$k8s_master + log "copy kube config from k8s master for insertion into blueprint" + scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \ + ubuntu@$manager_ip:/home/ubuntu/.kube/config ves-agent/kube.config + + log "package the blueprint" + # CLI: cfy blueprints package -o /tmp/$bp $bp + tar ckf /tmp/blueprint.tar ves-agent + + log "upload the blueprint" + # CLI: cfy blueprints upload -t default_tenant -b $bp /tmp/$bp.tar.gz + curl -s -X PUT -u admin:admin --header 'Tenant: default_tenant' \ + --header "Content-Type: application/octet-stream" -o /tmp/json \ + http://$manager_ip/api/v3.1/blueprints/ves-agent?application_file_name=blueprint.yaml \ + -T /tmp/blueprint.tar + + inputs="{ \ + \"ves_mode\": \"$ves_mode\", + \"ves_host\": \"$ves_host\", + \"ves_port\": \"$ves_port\", + \"ves_path\": \"$ves_path\", + \"ves_topic\": \"$ves_topic\", + \"ves_https\": \"$ves_https\", + \"ves_user\": \"$ves_user\", + \"ves_pass\": \"$ves_pass\", + \"ves_interval\": \"$ves_interval\", + \"ves_version\": \"$ves_version\", + \"ves_kafka_port\": \"$ves_kafka_port\", + \"ves_kafka_host\": \"$ves_kafka_host\", + \"ves_kafka_hostname\": \"$ves_kafka_hostname\"}" + + log "create a deployment for the blueprint" + # CLI: cfy deployments create -t default_tenant -b $bp $bp + curl -s -X PUT -u admin:admin --header 'Tenant: default_tenant' \ + --header "Content-Type: application/json" -o /tmp/json \ + -d "{\"blueprint_id\": \"ves-agent\", \"inputs\": $inputs}" \ + http://$manager_ip/api/v3.1/deployments/ves-agent + sleep 10 + + # CLI: cfy workflows list -d $bp + + log "install the deployment pod and service" + # CLI: cfy executions start install -d $bp + curl -s -X POST -u admin:admin --header 'Tenant: default_tenant' \ + --header "Content-Type: application/json" -o /tmp/json \ + -d "{\"deployment_id\":\"ves-agent\", \"workflow_id\":\"install\"}" \ + http://$manager_ip/api/v3.1/executions + else + sudo docker run -it -d \ + -e ves_mode=$ves_mode \ + -e ves_host=$ves_host \ + -e ves_port=$ves_port \ + -e ves_path=$ves_path \ + -e ves_topic=$ves_topic \ + -e ves_https=$ves_https \ + -e ves_user=$ves_user \ + -e ves_pass=$ves_pass \ + -e ves_interval=$ves_interval \ + -e ves_version=$ves_version \ + -e ves_kafka_port=$ves_kafka_port \ + -e ves_kafka_host=$ves_kafka_host \ + -e ves_kafka_hostname=$ves_kafka_hostname \ + --name ves-agent blsaws/ves-agent:latest + fi + + # debug hints + # sudo docker exec -it ves-agent apt-get install -y wget + # sudo docker exec -it ves-agent wget http://www-eu.apache.org/dist/kafka/0.11.0.2/kafka_2.11-0.11.0.2.tgz -O /opt/ves/kafka_2.11-0.11.0.2.tgz + # sudo docker exec -it ves-agent tar -xvzf /opt/ves/kafka_2.11-0.11.0.2.tgz + # sudo docker exec -it ves-agent kafka_2.11-0.11.0.2/bin/kafka-console-consumer.sh --zookeeper :2181 --topic collectd + # /tmp/ves/kafka_2.11-0.11.0.2/bin/kafka-console-consumer.sh --zookeeper localhost:2181 --topic collectd } function setup_collector() { @@ -392,24 +448,19 @@ function setup_collector() { $2 $3 $4 log "install prerequistes" - sudo apt-get install -y jq + sudo apt-get install -y jq - ves_hostname=$HOSTNAME - export ves_hostname ves_host=$(ip route get 8.8.8.8 | awk '{print $NF; exit}') export ves_host setup_env - echo "ves_influxdb_host=$ves_influxdb_host" - echo "ves_grafana_host=$ves_grafana_host" - - if [[ "$ves_influxdb_host" == "" ]]; then + if ! curl http://$ves_influxdb_host/ping ; then # TODO: migrate to deployment via Helm log "setup influxdb container" ves_influxdb_host="$ves_host:8086" export ves_influxdb_host - sed -i -- "s/ves_influxdb_host=/ves_influxdb_host=$ves_influxdb_host/" \ - /tmp/ves/ves_env.sh + rm /tmp/ves/ves_env.sh + setup_env sudo docker run -d --name=ves-influxdb -p 8086:8086 influxdb status=$(sudo docker inspect ves-influxdb | jq -r '.[0].State.Status') while [[ "x$status" != "xrunning" ]]; do @@ -425,13 +476,14 @@ function setup_collector() { sleep 10 done fi + echo "ves_influxdb_host=$ves_influxdb_host" log "setup InfluxDB database" # TODO: check if pre-existing and skip curl -X POST http://$ves_influxdb_host/query \ --data-urlencode "q=CREATE DATABASE veseventsdb" - if [[ "$ves_grafana_host" == "" ]]; then + if ! curl http://$ves_grafana_host ; then # TODO: migrate to deployment via Helm log "install Grafana container" ves_grafana_host="$ves_host:3000" @@ -448,6 +500,7 @@ function setup_collector() { status=$(sudo docker inspect ves-grafana | jq -r '.[0].State.Status') done log "Grafana container state is $status" + echo "ves_grafana_host=$ves_grafana_host" log "wait for Grafana API to be active" while ! curl http://$ves_grafana_host ; do @@ -487,66 +540,57 @@ EOF log "setup collector container" # TODO: migrate to deployment via Helm - cd /tmp/ves - touch monitor.log - rm -rf /tmp/ves/evel-test-collector - git clone https://github.com/att/evel-test-collector.git - sed -i -- \ - "s~log_file = /var/log/att/collector.log~log_file = /opt/ves/collector.log~" \ - evel-test-collector/config/collector.conf - sed -i -- "s/vel_domain = 127.0.0.1/vel_domain = $ves_host/g" \ - evel-test-collector/config/collector.conf - sed -i -- "s/vel_username =/vel_username = $ves_user/g" \ - evel-test-collector/config/collector.conf - sed -i -- "s/vel_password =/vel_password = $ves_pass/g" \ - evel-test-collector/config/collector.conf - sed -i -- "s~vel_path = vendor_event_listener/~vel_path = $ves_path~g" \ - evel-test-collector/config/collector.conf - sed -i -- "s~vel_topic_name = example_vnf~vel_topic_name = $ves_topic~g" \ - evel-test-collector/config/collector.conf - sed -i -- "/vel_topic_name = /a influxdb = $ves_host" \ - evel-test-collector/config/collector.conf - - cp /tmp/ves/tools/monitor.py \ - evel-test-collector/code/collector/monitor.py - - # Note below: python (2.7) is required due to dependency on module 'ConfigParser' - cat </tmp/ves/setup-collector.sh -apt-get update -apt-get upgrade -y -apt-get install -y python python-jsonschema python-pip -pip install requests -python /opt/ves/evel-test-collector/code/collector/monitor.py \ ---config /opt/ves/evel-test-collector/config/collector.conf \ ---influxdb $ves_host \ ---section default > /opt/ves/monitor.log 2>&1 & -EOF + sudo docker run -it -d -p 30000:30000 \ + -e ves_host=$ves_host \ + -e ves_port=$ves_port \ + -e ves_path=$ves_path \ + -e ves_topic=$ves_topic \ + -e ves_https=$ves_https \ + -e ves_user=$ves_user \ + -e ves_pass=$ves_pass \ + -e ves_interval=$ves_interval \ + -e ves_version=$ves_version \ + -e ves_influxdb_host=$ves_influxdb_host \ + --name ves-collector blsaws/ves-collector:latest - sudo docker run -it -d -v /tmp/ves:/opt/ves --name=ves-collector \ - -p 30000:30000 ubuntu:xenial /bin/bash - sudo docker exec ves-collector /bin/bash /opt/ves/setup-collector.sh # debug hints # sudo docker exec -it ves-collector apt-get install -y tcpdump # sudo docker exec -it ves-collector tcpdump -A -v -s 0 -i any port 30000 # curl http://$ves_host:30000 # sudo docker exec -it ves-collector /bin/bash - # ~/kafka_2.11-0.11.0.2/bin/kafka-console-consumer.sh --zookeeper localhost:2181 --topic collectd } function clean() { - log "clean installation for $1 at $2" - if [[ "$1" == "master" ]]; then - cs="ves-agent ves-collector ves-grafana ves-influxdb" - for c in $cs; do - log "stop and remove container $c" - sudo docker stop $c - sudo docker rm -v $c - done - fi - log "remove collectd config for VES" - sudo sed -i -- '/VES plugin/,$d' /opt/collectd/etc/collectd.conf - sudo systemctl restart collectd - sudo rm -rf /tmp/ves + log "clean installation" + master=$1 + workers="$2" + + all_nodes="$master $workers" + for node in $all_nodes; do + log "remove collectd config for VES at node $node" + ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \ + ubuntu@$node <