summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--build/ves-agent.sh5
-rw-r--r--build/ves-agent/Dockerfile29
-rw-r--r--build/ves-barometer.sh51
-rw-r--r--build/ves-barometer/Dockerfile38
-rw-r--r--build/ves-barometer/start.sh151
-rw-r--r--build/ves-collector.sh8
-rw-r--r--build/ves-collector/Dashboard.json (renamed from tools/grafana/Dashboard.json)0
-rw-r--r--build/ves-collector/Dockerfile14
-rw-r--r--build/ves-collector/start.sh59
-rw-r--r--build/ves-kafka.sh2
-rw-r--r--build/ves-kafka/start.sh6
-rw-r--r--tools/cloudify/ves-agent/blueprint.yaml178
-rw-r--r--tools/cloudify/ves-collector/blueprint.yaml183
-rw-r--r--tools/cloudify/ves-grafana/blueprint.yaml123
-rw-r--r--tools/cloudify/ves-influxdb/blueprint.yaml122
-rw-r--r--tools/cloudify/ves-kafka/blueprint.yaml142
-rw-r--r--tools/cloudify/ves-zookeeper/blueprint.yaml122
-rw-r--r--tools/demo_deploy.sh157
-rw-r--r--tools/kubernetes/ves-barometer/daemonset.yaml66
-rw-r--r--tools/ves-clean.sh231
-rw-r--r--tools/ves-setup.sh611
21 files changed, 1829 insertions, 469 deletions
diff --git a/build/ves-agent.sh b/build/ves-agent.sh
index 87d4b07..396d7ea 100644
--- a/build/ves-agent.sh
+++ b/build/ves-agent.sh
@@ -19,7 +19,7 @@
#. Docker hub user logged in e.g. via "sudo docker login"
#.
#. Usage:
-#. bash ves-agent.sh <hub-user> <hub-pass>
+#. bash ves-agent.sh <hub-user>
#. hub-user: username for dockerhub
#.
#. NOTE: To allow patch testing, this script will not reclone the VES repo
@@ -31,9 +31,6 @@ echo; echo "$0 $(date): Update package repos"
sudo apt-get update
echo; echo "$0 $(date): Starting VES agent build process"
-if [[ -d /tmp/ves ]]; then rm -rf /tmp/ves; fi
-
-echo; echo "$0 $(date): Starting VES kafka build process"
if [[ ! -d /tmp/ves ]]; then
echo; echo "$0 $(date): Cloning VES repo to /tmp/ves"
git clone https://gerrit.opnfv.org/gerrit/ves /tmp/ves
diff --git a/build/ves-agent/Dockerfile b/build/ves-agent/Dockerfile
index 4c37197..293fcd5 100644
--- a/build/ves-agent/Dockerfile
+++ b/build/ves-agent/Dockerfile
@@ -21,26 +21,19 @@ FROM ubuntu:xenial
MAINTAINER Bryan Sullivan
-RUN apt-get update
-RUN apt-get install -y apt-utils
+RUN mkdir /opt/ves
+
+RUN apt-get update && apt-get install -y apt-utils
RUN apt-get -y upgrade
-RUN apt-get install -y git
-# Required for kafka
-RUN apt-get install -y default-jre
-RUN apt-get install -y zookeeperd
-RUN apt-get install -y python-pip
+# Required for kafka: default-jre zookeeperd python-pip kafka-python
+# Required for building librdkafka: git build-essential libpthread-stubs0-dev libssl-dev libsasl2-dev liblz4-dev
+# Required for building collectd: pkg-config
+RUN apt-get update && apt-get install -y default-jre \
+zookeeperd python-pip pkg-config \
+git build-essential libpthread-stubs0-dev libssl-dev libsasl2-dev liblz4-dev
RUN pip install kafka-python
-# Required for building collectd
-RUN apt-get install -y pkg-config
-
-RUN mkdir /opt/ves
# Build Kafka client
-RUN apt-get install -y build-essential
-RUN apt-get install -y libpthread-stubs0-dev
-RUN apt-get install -y libssl-dev
-RUN apt-get install -y libsasl2-dev
-RUN apt-get install -y liblz4-dev
RUN /bin/bash -c 'git clone --branch v0.9.5 \
https://github.com/edenhill/librdkafka.git /opt/ves/librdkafka; \
cd /opt/ves/librdkafka; ./configure --prefix=/usr; \
@@ -50,10 +43,6 @@ make; make install'
RUN pip install pyaml
RUN git clone https://gerrit.opnfv.org/gerrit/barometer /opt/ves/barometer
-# Test patch
-RUN /bin/bash -c 'cd /opt/ves/barometer; \
-git fetch https://gerrit.opnfv.org/gerrit/barometer \
-refs/changes/27/47427/1 && git checkout FETCH_HEAD'
COPY start.sh /opt/ves/start.sh
ENTRYPOINT ["/bin/bash", "/opt/ves/start.sh"]
diff --git a/build/ves-barometer.sh b/build/ves-barometer.sh
new file mode 100644
index 0000000..86e81f4
--- /dev/null
+++ b/build/ves-barometer.sh
@@ -0,0 +1,51 @@
+#!/bin/bash
+# Copyright 2018 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#. What this is: Build script for the OPNFV Barometer collectd agent docker image.
+#.
+#. Prerequisites:
+#. Docker hub user logged in e.g. via "sudo docker login"
+#.
+#. Usage:
+#. bash ves-barometer.sh <hub-user> [--no-cache]
+#. hub-user: username for dockerhub
+#. --no-cache
+#.
+#. NOTE: To allow patch testing, this script will not reclone the VES repo
+#. if it exists under /tmp
+#.
+#. Status: this is a work in progress, under test.
+
+cache="$2"
+echo; echo "$0 $(date): Update package repos"
+sudo apt-get update
+
+echo; echo "$0 $(date): Starting VES agent build process"
+if [[ ! -d /tmp/ves ]]; then
+ echo; echo "$0 $(date): Cloning VES repo to /tmp/ves"
+ git clone https://gerrit.opnfv.org/gerrit/ves /tmp/ves
+fi
+
+echo; echo "$0 $(date): Building the image"
+cd /tmp/ves/build/ves-barometer
+sudo docker build $cache -t ves-barometer .
+
+echo; echo "$0 $(date): Tagging the image"
+id=$(sudo docker images | grep ves-barometer | awk '{print $3}')
+id=$(echo $id | cut -d ' ' -f 1)
+sudo docker tag $id $1/ves-barometer:latest
+
+echo; echo "$0 $(date): Pushing the image to dockerhub as $1/ves-barometer"
+sudo docker push $1/ves-barometer
diff --git a/build/ves-barometer/Dockerfile b/build/ves-barometer/Dockerfile
new file mode 100644
index 0000000..4bd4c51
--- /dev/null
+++ b/build/ves-barometer/Dockerfile
@@ -0,0 +1,38 @@
+# Copyright 2018 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# What this is: A Dockerfile for building an OPFNV VES Agent container image.
+#
+# Status: this is a work in progress, under test.
+#
+FROM centos:7
+RUN yum update -y && yum install -y which sudo git
+ENV DOCKER y
+ENV repos_dir /src
+ENV openstack_plugins /src/barometer/src/collectd-openstack-plugins
+RUN git config --global http.sslVerify false
+
+WORKDIR ${repos_dir}
+RUN git clone https://gerrit.opnfv.org/gerrit/barometer
+WORKDIR ${repos_dir}/barometer/systems
+RUN sh ./build_base_machine.sh
+
+RUN useradd -ms /bin/bash collectd_exec
+RUN echo "collectd_exec ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
+
+WORKDIR ${openstack_plugins}
+RUN make
+
+COPY start.sh /opt/ves/start.sh
+ENTRYPOINT ["/bin/bash", "/opt/ves/start.sh"]
diff --git a/build/ves-barometer/start.sh b/build/ves-barometer/start.sh
new file mode 100644
index 0000000..da452bf
--- /dev/null
+++ b/build/ves-barometer/start.sh
@@ -0,0 +1,151 @@
+#!/bin/bash
+# Copyright 2018 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# What this is: Startup script for the OPNFV Barometer collectd agent running
+# under docker.
+
+rm -f /opt/collectd/etc/collectd.conf.d/*
+
+if [[ "$ves_mode" == "node" ]]; then
+ cat <<EOF >/opt/collectd/etc/collectd.conf.d/collectd.conf
+# for VES plugin
+LoadPlugin logfile
+<Plugin logfile>
+ LogLevel debug
+ File STDOUT
+ Timestamp true
+ PrintSeverity false
+</Plugin>
+
+LoadPlugin csv
+<Plugin csv>
+ DataDir "/work-dir/collectd/install/var/lib/csv"
+ StoreRates false
+</Plugin>
+
+LoadPlugin target_set
+LoadPlugin match_regex
+<Chain "PreCache">
+ <Rule "mark_memory_as_host">
+ <Match "regex">
+ Plugin "^memory$"
+ </Match>
+ <Target "set">
+ PluginInstance "host"
+ </Target>
+ </Rule>
+</Chain>
+
+LoadPlugin cpu
+<Plugin cpu>
+ ReportByCpu true
+ ReportByState true
+ ValuesPercentage true
+</Plugin>
+
+LoadPlugin interface
+LoadPlugin memory
+LoadPlugin load
+LoadPlugin disk
+# TODO: how to set this option only to apply to VMs (not nodes)
+LoadPlugin uuid
+
+LoadPlugin write_kafka
+<Plugin write_kafka>
+ Property "metadata.broker.list" "$ves_kafka_hostname:$ves_kafka_port"
+ <Topic "collectd">
+ Format JSON
+ </Topic>
+</Plugin>
+EOF
+
+ if [[ -d /etc/nova ]]; then
+ cat <<EOF >>~/collectd/collectd.conf
+LoadPlugin virt
+<Plugin virt>
+ Connection "qemu:///system"
+ RefreshInterval 60
+ HostnameFormat uuid
+ PluginInstanceFormat name
+ ExtraStats "cpu_util"
+</Plugin>
+EOF
+ fi
+else
+ cat <<EOF >/opt/collectd/etc/collectd.conf.d/collectd.conf
+# for VES plugin
+LoadPlugin logfile
+<Plugin logfile>
+ LogLevel debug
+ File STDOUT
+ Timestamp true
+ PrintSeverity false
+</Plugin>
+
+LoadPlugin cpu
+<Plugin cpu>
+ ReportByCpu true
+ ReportByState true
+ ValuesPercentage true
+</Plugin>
+
+LoadPlugin csv
+<Plugin csv>
+ DataDir "/tmp"
+</Plugin>
+
+LoadPlugin interface
+LoadPlugin memory
+LoadPlugin load
+LoadPlugin disk
+LoadPlugin uuid
+
+LoadPlugin write_kafka
+<Plugin write_kafka>
+ Property "metadata.broker.list" "$ves_kafka_hostname:$ves_kafka_port"
+ <Topic "collectd">
+ Format JSON
+ </Topic>
+</Plugin>
+
+LoadPlugin target_set
+LoadPlugin match_regex
+<Chain "PreCache">
+ <Rule "mark_memory_as_guest">
+ <Match "regex">
+ Plugin "^memory$"
+ </Match>
+ <Target "set">
+ PluginInstance "guest"
+ </Target>
+ </Rule>
+</Chain>
+EOF
+fi
+
+echo; echo "cat /opt/collectd/etc/collectd.conf.d/collectd.conf"
+cat /opt/collectd/etc/collectd.conf.d/collectd.conf
+
+#echo "Delete conf files causing collectd to fail"
+#rm -f /opt/collectd/etc/collectd.conf.d/dpdk*.conf
+#rm -f /opt/collectd/etc/collectd.conf.d/snmp*.conf
+#rm -f /opt/collectd/etc/collectd.conf.d/virt.conf
+#rm -f /opt/collectd/etc/collectd.conf.d/mcelog.conf
+#rm -f /opt/collectd/etc/collectd.conf.d/rdt.conf
+#sed -i -- 's/LoadPlugin cpufreq/#LoadPlugin cpufreq/' /opt/collectd/etc/collectd.conf.d/default_plugins.conf
+
+/opt/collectd/sbin/collectd -f
+echo "collectd has exited. sleeping for an hour to enable debugging"
+sleep 3600
diff --git a/build/ves-collector.sh b/build/ves-collector.sh
index 58aa354..a09eeec 100644
--- a/build/ves-collector.sh
+++ b/build/ves-collector.sh
@@ -31,10 +31,10 @@ echo; echo "$0 $(date): Update package repos"
sudo apt-get update
echo; echo "$0 $(date): Starting VES collector build process"
-if [[ -d /tmp/ves ]]; then rm -rf /tmp/ves; fi
-
-echo; echo "$0 $(date): Cloning VES repo to /tmp/ves"
-git clone https://gerrit.opnfv.org/gerrit/ves /tmp/ves
+if [[ ! -d /tmp/ves ]]; then
+ echo; echo "$0 $(date): Cloning VES repo to /tmp/ves"
+ git clone https://gerrit.opnfv.org/gerrit/ves /tmp/ves
+fi
echo; echo "$0 $(date): Building the image"
cd /tmp/ves/build/ves-collector
diff --git a/tools/grafana/Dashboard.json b/build/ves-collector/Dashboard.json
index b88646c..b88646c 100644
--- a/tools/grafana/Dashboard.json
+++ b/build/ves-collector/Dashboard.json
diff --git a/build/ves-collector/Dockerfile b/build/ves-collector/Dockerfile
index 9161871..4cd135f 100644
--- a/build/ves-collector/Dockerfile
+++ b/build/ves-collector/Dockerfile
@@ -1,4 +1,4 @@
-# Copyright 2017 AT&T Intellectual Property, Inc
+# Copyright 2017-2018 AT&T Intellectual Property, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-# What this is: A Dockerfile for building an OPFNV VES Agent container image.
+# What this is: A Dockerfile for building an OPFNV VES Collector container image.
#
# Status: this is a work in progress, under test.
#
@@ -21,14 +21,9 @@ FROM ubuntu:xenial
MAINTAINER Bryan Sullivan
-RUN apt-get update
-RUN apt-get install -y apt-utils
+RUN apt-get update && apt-get install -y apt-utils
RUN apt-get -y upgrade
-RUN apt-get update
-RUN apt-get install -y git
-
-# Required for VES collector
-RUN apt-get install -y python-pip python-jsonschema
+RUN apt-get update && apt-get install -y git python-pip python-jsonschema curl
RUN pip install requests
RUN mkdir /opt/ves
@@ -37,5 +32,6 @@ RUN mkdir /opt/ves
RUN git clone https://github.com/att/evel-test-collector.git /opt/ves/evel-test-collector
COPY monitor.py /opt/ves/evel-test-collector/code/collector/monitor.py
+COPY Dashboard.json /opt/ves/Dashboard.json
COPY start.sh /opt/ves/start.sh
ENTRYPOINT ["/bin/bash", "/opt/ves/start.sh"]
diff --git a/build/ves-collector/start.sh b/build/ves-collector/start.sh
index be30c9a..250af34 100644
--- a/build/ves-collector/start.sh
+++ b/build/ves-collector/start.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# Copyright 2017 AT&T Intellectual Property, Inc
+# Copyright 2017-2018 AT&T Intellectual Property, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -23,6 +23,8 @@ sed -i -- \
evel-test-collector/config/collector.conf
sed -i -- "s/vel_domain = 127.0.0.1/vel_domain = $ves_host/g" \
evel-test-collector/config/collector.conf
+sed -i -- "s/vel_port = 30000/vel_port = $ves_port/g" \
+ evel-test-collector/config/collector.conf
sed -i -- "s/vel_username =/vel_username = $ves_user/g" \
evel-test-collector/config/collector.conf
sed -i -- "s/vel_password =/vel_password = $ves_pass/g" \
@@ -31,18 +33,65 @@ sed -i -- "s~vel_path = vendor_event_listener/~vel_path = $ves_path~g" \
evel-test-collector/config/collector.conf
sed -i -- "s~vel_topic_name = example_vnf~vel_topic_name = $ves_topic~g" \
evel-test-collector/config/collector.conf
-sed -i -- "/vel_topic_name = /a influxdb = $ves_influxdb_host" \
+sed -i -- "/vel_topic_name = /a influxdb = $ves_influxdb_host:$ves_influxdb_port" \
evel-test-collector/config/collector.conf
+echo; echo "evel-test-collector/config/collector.conf"
+cat evel-test-collector/config/collector.conf
+
+echo; echo "wait for InfluxDB API at $ves_influxdb_host:$ves_influxdb_port"
+while ! curl http://$ves_influxdb_host:$ves_influxdb_port/ping ; do
+ echo "InfluxDB API is not yet responding... waiting 10 seconds"
+ sleep 10
+done
+
+echo; echo "setup veseventsdb in InfluxDB"
+# TODO: check if pre-existing and skip
+curl -X POST http://$ves_influxdb_host:$ves_influxdb_port/query \
+ --data-urlencode "q=CREATE DATABASE veseventsdb"
+
+echo; echo "wait for Grafana API to be active"
+while ! curl http://$ves_grafana_host:$ves_grafana_port ; do
+ echo "Grafana API is not yet responding... waiting 10 seconds"
+ sleep 10
+done
+
+echo; echo "add VESEvents datasource to Grafana"
+# TODO: check if pre-existing and skip
+cat <<EOF >/opt/ves/datasource.json
+{ "name":"VESEvents",
+ "type":"influxdb",
+ "access":"direct",
+ "url":"http://$ves_influxdb_host:$ves_influxdb_port",
+ "password":"root",
+ "user":"root",
+ "database":"veseventsdb",
+ "basicAuth":false,
+ "basicAuthUser":"",
+ "basicAuthPassword":"",
+ "withCredentials":false,
+ "isDefault":false,
+ "jsonData":null
+}
+EOF
+
+curl -H "Accept: application/json" -H "Content-type: application/json" \
+ -X POST -d @/opt/ves/datasource.json \
+ http://$ves_grafana_auth@$ves_grafana_host:$ves_grafana_port/api/datasources
+
+echo; echo "add VES dashboard to Grafana"
+curl -H "Accept: application/json" -H "Content-type: application/json" \
+ -X POST -d @/opt/ves/Dashboard.json \
+ http://$ves_grafana_auth@$ves_grafana_host:$ves_grafana_port/api/dashboards/db
+
if [[ "$ves_loglevel" != "" ]]; then
python /opt/ves/evel-test-collector/code/collector/monitor.py \
--config /opt/ves/evel-test-collector/config/collector.conf \
- --influxdb $ves_influxdb_host \
+ --influxdb $ves_influxdb_host:$ves_influxdb_port \
--section default > /opt/ves/monitor.log 2>&1
else
python /opt/ves/evel-test-collector/code/collector/monitor.py \
--config /opt/ves/evel-test-collector/config/collector.conf \
- --influxdb $ves_influxdb_host \
+ --influxdb $ves_influxdb_host:$ves_influxdb_port \
--section default
fi
-
diff --git a/build/ves-kafka.sh b/build/ves-kafka.sh
index 19a632b..c489535 100644
--- a/build/ves-kafka.sh
+++ b/build/ves-kafka.sh
@@ -19,7 +19,7 @@
#. Docker hub user logged in e.g. via "sudo docker login"
#.
#. Usage:
-#. bash ves-kafka.sh <hub-user> <hub-pass>
+#. bash ves-kafka.sh <hub-user>
#. hub-user: username for dockerhub
#.
#. NOTE: To allow patch testing, this script will not reclone the VES repo
diff --git a/build/ves-kafka/start.sh b/build/ves-kafka/start.sh
index ab4169b..37c36c2 100644
--- a/build/ves-kafka/start.sh
+++ b/build/ves-kafka/start.sh
@@ -16,14 +16,14 @@
#. What this is: Startup script for a kafka server as used by the OPNFV VES
#. framework.
-echo "$zookeeper $zookeeper_host" >>/etc/hosts
+echo "$zookeeper_host $zookeeper_hostname" >>/etc/hosts
cat /etc/hosts
cd /opt/ves
-sed -i "s/localhost:2181/$zookeeper_host:2181/" \
+sed -i "s/localhost:2181/$zookeeper_hostname:$zookeeper_port/" \
kafka_2.11-0.11.0.2/config/server.properties
grep 2181 kafka_2.11-0.11.0.2/config/server.properties
-sed -i "s~#advertised.listeners=PLAINTEXT://your.host.name:9092~advertised.listeners=PLAINTEXT://$kafka_hostname:9092~" \
+sed -i "s~#advertised.listeners=PLAINTEXT://your.host.name:9092~advertised.listeners=PLAINTEXT://$kafka_hostname:$kafka_port~" \
kafka_2.11-0.11.0.2/config/server.properties
grep advertised.listeners kafka_2.11-0.11.0.2/config/server.properties
diff --git a/tools/cloudify/ves-agent/blueprint.yaml b/tools/cloudify/ves-agent/blueprint.yaml
new file mode 100644
index 0000000..dc2f977
--- /dev/null
+++ b/tools/cloudify/ves-agent/blueprint.yaml
@@ -0,0 +1,178 @@
+tosca_definitions_version: cloudify_dsl_1_3
+#
+# Copyright 2017 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# What this is: Cloudify blueprint for running the OPNFV VES Agent under
+# kubernetes.
+
+imports:
+ - http://www.getcloudify.org/spec/cloudify/3.4/types.yaml
+ - https://raw.githubusercontent.com/cloudify-incubator/cloudify-kubernetes-plugin/1.2.1/plugin.yaml
+
+inputs:
+
+ ves_host:
+ default: 127.0.0.1
+ ves_port:
+ default: 30000
+ ves_path:
+ default:
+ ves_topic:
+ default:
+ ves_https:
+ default: false
+ ves_user:
+ default:
+ ves_pass:
+ default:
+ ves_interval:
+ default: 20
+ ves_version:
+ default: 5.1
+ ves_kafka_host:
+ default: 127.0.0.1
+ ves_kafka_hostname:
+ default: localhost
+ ves_kafka_port:
+ default: 9092
+ ves_mode:
+ default: node
+ ves_loglevel:
+ default: info
+
+ spec_port:
+ default: 80
+ container_port:
+ default: 80
+
+ kubernetes_configuration_file_content:
+ default: kube.config
+
+ kubernetes_master_ip:
+ type: string
+ default: { get_secret: kubernetes_master_ip }
+
+ kubernetes_master_port:
+ type: string
+ default: { get_secret: kubernetes_master_port }
+
+ kubernetes_certificate_authority_data:
+ default: { get_secret: kubernetes_certificate_authority_data }
+
+ kubernetes-admin_client_certificate_data:
+ default: { get_secret: kubernetes-admin_client_certificate_data }
+
+ kubernetes-admin_client_key_data:
+ default: { get_secret: kubernetes-admin_client_key_data }
+
+ kubernetes_master_configuration:
+ default:
+ apiVersion: v1
+ kind: Config
+ preferences: {}
+ current-context: kubernetes-admin@kubernetes
+ clusters:
+ - name: kubernetes
+ cluster:
+ certificate-authority-data: { get_input: kubernetes_certificate_authority_data }
+ server: { concat: [ 'https://', { get_input: kubernetes_master_ip}, ':', { get_input: kubernetes_master_port } ] }
+ contexts:
+ - name: kubernetes-admin@kubernetes
+ context:
+ cluster: kubernetes
+ user: kubernetes-admin
+ users:
+ - name: kubernetes-admin
+ user:
+ client-certificate-data: { get_input: kubernetes-admin_client_certificate_data }
+ client-key-data: { get_input: kubernetes-admin_client_key_data }
+
+ kubernetes_configuration_file_content:
+ description: >
+ File content of kubernetes master YAML configuration
+ default: { get_input: kubernetes_master_configuration }
+
+node_templates:
+ kubernetes_master:
+ type: cloudify.kubernetes.nodes.Master
+ properties:
+ configuration:
+ file_content: { get_input: kubernetes_configuration_file_content }
+
+ ves_agent_service:
+ type: cloudify.kubernetes.resources.Service
+ properties:
+ definition:
+ apiVersion: v1
+ metadata:
+ name: ves-agent-service
+ spec:
+ ports:
+ - port: { get_input: spec_port }
+ selector:
+ app: ves-agent
+ relationships:
+ - type: cloudify.kubernetes.relationships.managed_by_master
+ target: kubernetes_master
+ - type: cloudify.relationships.depends_on
+ target: ves_agent_pod
+
+ ves_agent_pod:
+ type: cloudify.kubernetes.resources.Pod
+ properties:
+ definition:
+ apiVersion: v1
+ metadata:
+ name: ves-agent-pod
+ labels:
+ app: ves-agent
+ spec:
+ containers:
+ - name: ves-agent
+ image: blsaws/ves-agent:latest
+ env:
+ - name: ves_host
+ value: { get_input: ves_host }
+ - name: ves_port
+ value: { get_input: ves_port }
+ - name: ves_path
+ value: { get_input: ves_path }
+ - name: ves_topic
+ value: { get_input: ves_topic }
+ - name: ves_https
+ value: { get_input: ves_https }
+ - name: ves_user
+ value: { get_input: ves_user }
+ - name: ves_pass
+ value: { get_input: ves_pass }
+ - name: ves_interval
+ value: { get_input: ves_interval }
+ - name: ves_version
+ value: { get_input: ves_version }
+ - name: ves_kafka_host
+ value: { get_input: ves_kafka_host }
+ - name: ves_kafka_hostname
+ value: { get_input: ves_kafka_hostname }
+ - name: ves_kafka_port
+ value: { get_input: ves_kafka_port }
+ - name: ves_mode
+ value: { get_input: ves_mode }
+ - name: ves_loglevel
+ value: { get_input: ves_loglevel }
+ ports:
+ - containerPort: { get_input: container_port }
+ relationships:
+ - type: cloudify.kubernetes.relationships.managed_by_master
+ target: kubernetes_master
diff --git a/tools/cloudify/ves-collector/blueprint.yaml b/tools/cloudify/ves-collector/blueprint.yaml
new file mode 100644
index 0000000..7c4a051
--- /dev/null
+++ b/tools/cloudify/ves-collector/blueprint.yaml
@@ -0,0 +1,183 @@
+tosca_definitions_version: cloudify_dsl_1_3
+#
+# Copyright 2018 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# What this is: Cloudify blueprint for running the OPNFV VES Collector under
+# kubernetes.
+
+imports:
+ - http://www.getcloudify.org/spec/cloudify/3.4/types.yaml
+ - https://raw.githubusercontent.com/cloudify-incubator/cloudify-kubernetes-plugin/1.2.1/plugin.yaml
+
+inputs:
+
+ ves_host:
+ default: 127.0.0.1
+ ves_port:
+ default: 3001
+ ves_path:
+ default:
+ ves_topic:
+ default:
+ ves_https:
+ default: false
+ ves_user:
+ default:
+ ves_pass:
+ default:
+ ves_interval:
+ default: 20
+ ves_version:
+ default: 5.1
+ ves_loglevel:
+ default: info
+ ves_influxdb_host:
+ default: 127.0.0.1
+ ves_influxdb_port:
+ default: 8086
+ ves_grafana_host:
+ default: 127.0.0.1
+ ves_grafana_port:
+ default: 3000
+ ves_grafana_auth:
+ default: admin:admin
+ spec_port:
+ default: 3001
+ container_port:
+ default: 3001
+
+ kubernetes_configuration_file_content:
+ default: kube.config
+
+ kubernetes_master_ip:
+ type: string
+ default: { get_secret: kubernetes_master_ip }
+
+ kubernetes_master_port:
+ type: string
+ default: { get_secret: kubernetes_master_port }
+
+ kubernetes_certificate_authority_data:
+ default: { get_secret: kubernetes_certificate_authority_data }
+
+ kubernetes-admin_client_certificate_data:
+ default: { get_secret: kubernetes-admin_client_certificate_data }
+
+ kubernetes-admin_client_key_data:
+ default: { get_secret: kubernetes-admin_client_key_data }
+
+ kubernetes_master_configuration:
+ default:
+ apiVersion: v1
+ kind: Config
+ preferences: {}
+ current-context: kubernetes-admin@kubernetes
+ clusters:
+ - name: kubernetes
+ cluster:
+ certificate-authority-data: { get_input: kubernetes_certificate_authority_data }
+ server: { concat: [ 'https://', { get_input: kubernetes_master_ip}, ':', { get_input: kubernetes_master_port } ] }
+ contexts:
+ - name: kubernetes-admin@kubernetes
+ context:
+ cluster: kubernetes
+ user: kubernetes-admin
+ users:
+ - name: kubernetes-admin
+ user:
+ client-certificate-data: { get_input: kubernetes-admin_client_certificate_data }
+ client-key-data: { get_input: kubernetes-admin_client_key_data }
+
+ kubernetes_configuration_file_content:
+ description: >
+ File content of kubernetes master YAML configuration
+ default: { get_input: kubernetes_master_configuration }
+
+node_templates:
+ kubernetes_master:
+ type: cloudify.kubernetes.nodes.Master
+ properties:
+ configuration:
+ file_content: { get_input: kubernetes_configuration_file_content }
+
+ ves_collector_service:
+ type: cloudify.kubernetes.resources.Service
+ properties:
+ definition:
+ apiVersion: v1
+ metadata:
+ name: ves-collector-service
+ spec:
+ ports:
+ - port: { get_input: spec_port }
+ selector:
+ app: ves-collector
+ relationships:
+ - type: cloudify.kubernetes.relationships.managed_by_master
+ target: kubernetes_master
+ - type: cloudify.relationships.depends_on
+ target: ves_collector_pod
+
+ ves_collector_pod:
+ type: cloudify.kubernetes.resources.Pod
+ properties:
+ definition:
+ apiVersion: v1
+ metadata:
+ name: ves-collector-pod
+ labels:
+ app: ves-collector
+ spec:
+ nodeSelector:
+ role: worker
+ containers:
+ - name: ves-collector
+ image: blsaws/ves-collector:latest
+ env:
+ - name: ves_host
+ value: { get_input: ves_host }
+ - name: ves_port
+ value: { get_input: ves_port }
+ - name: ves_path
+ value: { get_input: ves_path }
+ - name: ves_topic
+ value: { get_input: ves_topic }
+ - name: ves_https
+ value: { get_input: ves_https }
+ - name: ves_user
+ value: { get_input: ves_user }
+ - name: ves_pass
+ value: { get_input: ves_pass }
+ - name: ves_interval
+ value: { get_input: ves_interval }
+ - name: ves_version
+ value: { get_input: ves_version }
+ - name: ves_loglevel
+ value: { get_input: ves_loglevel }
+ - name: ves_influxdb_host
+ value: { get_input: ves_influxdb_host }
+ - name: ves_influxdb_port
+ value: { get_input: ves_influxdb_port }
+ - name: ves_grafana_host
+ value: { get_input: ves_grafana_host }
+ - name: ves_grafana_port
+ value: { get_input: ves_grafana_port }
+ - name: ves_grafana_auth
+ value: { get_input: ves_grafana_auth }
+ ports:
+ - containerPort: { get_input: container_port }
+ relationships:
+ - type: cloudify.kubernetes.relationships.managed_by_master
+ target: kubernetes_master
diff --git a/tools/cloudify/ves-grafana/blueprint.yaml b/tools/cloudify/ves-grafana/blueprint.yaml
new file mode 100644
index 0000000..00963b3
--- /dev/null
+++ b/tools/cloudify/ves-grafana/blueprint.yaml
@@ -0,0 +1,123 @@
+tosca_definitions_version: cloudify_dsl_1_3
+#
+# Copyright 2018 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# What this is: Cloudify blueprint for running a grafana service for OPNFV VES
+# under kubernetes.
+
+imports:
+ - http://www.getcloudify.org/spec/cloudify/3.4/types.yaml
+ - https://raw.githubusercontent.com/cloudify-incubator/cloudify-kubernetes-plugin/1.2.1/plugin.yaml
+
+inputs:
+
+ spec_port:
+ default: 3000
+
+ container_port:
+ default: 3000
+
+ kubernetes_configuration_file_content:
+ default: kube.config
+
+ kubernetes_master_ip:
+ type: string
+ default: { get_secret: kubernetes_master_ip }
+
+ kubernetes_master_port:
+ type: string
+ default: { get_secret: kubernetes_master_port }
+
+ kubernetes_certificate_authority_data:
+ default: { get_secret: kubernetes_certificate_authority_data }
+
+ kubernetes-admin_client_certificate_data:
+ default: { get_secret: kubernetes-admin_client_certificate_data }
+
+ kubernetes-admin_client_key_data:
+ default: { get_secret: kubernetes-admin_client_key_data }
+
+ kubernetes_master_configuration:
+ default:
+ apiVersion: v1
+ kind: Config
+ preferences: {}
+ current-context: kubernetes-admin@kubernetes
+ clusters:
+ - name: kubernetes
+ cluster:
+ certificate-authority-data: { get_input: kubernetes_certificate_authority_data }
+ server: { concat: [ 'https://', { get_input: kubernetes_master_ip}, ':', { get_input: kubernetes_master_port } ] }
+ contexts:
+ - name: kubernetes-admin@kubernetes
+ context:
+ cluster: kubernetes
+ user: kubernetes-admin
+ users:
+ - name: kubernetes-admin
+ user:
+ client-certificate-data: { get_input: kubernetes-admin_client_certificate_data }
+ client-key-data: { get_input: kubernetes-admin_client_key_data }
+
+ kubernetes_configuration_file_content:
+ description: >
+ File content of kubernetes master YAML configuration
+ default: { get_input: kubernetes_master_configuration }
+
+node_templates:
+ kubernetes_master:
+ type: cloudify.kubernetes.nodes.Master
+ properties:
+ configuration:
+ file_content: { get_input: kubernetes_configuration_file_content }
+
+ ves_grafana_service:
+ type: cloudify.kubernetes.resources.Service
+ properties:
+ definition:
+ apiVersion: v1
+ metadata:
+ name: ves-grafana-service
+ spec:
+ type: NodePort
+ ports:
+ - port: { get_input: spec_port }
+ selector:
+ app: ves-grafana
+ relationships:
+ - type: cloudify.kubernetes.relationships.managed_by_master
+ target: kubernetes_master
+ - type: cloudify.relationships.depends_on
+ target: ves_grafana_pod
+
+ ves_grafana_pod:
+ type: cloudify.kubernetes.resources.Pod
+ properties:
+ definition:
+ apiVersion: v1
+ metadata:
+ name: ves-grafana-pod
+ labels:
+ app: ves-grafana
+ spec:
+ containers:
+ - name: ves-grafana
+ image: grafana/grafana
+ env:
+ ports:
+ - containerPort: { get_input: container_port }
+ relationships:
+ - type: cloudify.kubernetes.relationships.managed_by_master
+ target: kubernetes_master
diff --git a/tools/cloudify/ves-influxdb/blueprint.yaml b/tools/cloudify/ves-influxdb/blueprint.yaml
new file mode 100644
index 0000000..d0a400b
--- /dev/null
+++ b/tools/cloudify/ves-influxdb/blueprint.yaml
@@ -0,0 +1,122 @@
+tosca_definitions_version: cloudify_dsl_1_3
+#
+# Copyright 2018 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# What this is: Cloudify blueprint for running a influxdb service for OPNFV VES
+# under kubernetes.
+
+imports:
+ - http://www.getcloudify.org/spec/cloudify/3.4/types.yaml
+ - https://raw.githubusercontent.com/cloudify-incubator/cloudify-kubernetes-plugin/1.2.1/plugin.yaml
+
+inputs:
+
+ spec_port:
+ default: 8086
+
+ container_port:
+ default: 8086
+
+ kubernetes_configuration_file_content:
+ default: kube.config
+
+ kubernetes_master_ip:
+ type: string
+ default: { get_secret: kubernetes_master_ip }
+
+ kubernetes_master_port:
+ type: string
+ default: { get_secret: kubernetes_master_port }
+
+ kubernetes_certificate_authority_data:
+ default: { get_secret: kubernetes_certificate_authority_data }
+
+ kubernetes-admin_client_certificate_data:
+ default: { get_secret: kubernetes-admin_client_certificate_data }
+
+ kubernetes-admin_client_key_data:
+ default: { get_secret: kubernetes-admin_client_key_data }
+
+ kubernetes_master_configuration:
+ default:
+ apiVersion: v1
+ kind: Config
+ preferences: {}
+ current-context: kubernetes-admin@kubernetes
+ clusters:
+ - name: kubernetes
+ cluster:
+ certificate-authority-data: { get_input: kubernetes_certificate_authority_data }
+ server: { concat: [ 'https://', { get_input: kubernetes_master_ip}, ':', { get_input: kubernetes_master_port } ] }
+ contexts:
+ - name: kubernetes-admin@kubernetes
+ context:
+ cluster: kubernetes
+ user: kubernetes-admin
+ users:
+ - name: kubernetes-admin
+ user:
+ client-certificate-data: { get_input: kubernetes-admin_client_certificate_data }
+ client-key-data: { get_input: kubernetes-admin_client_key_data }
+
+ kubernetes_configuration_file_content:
+ description: >
+ File content of kubernetes master YAML configuration
+ default: { get_input: kubernetes_master_configuration }
+
+node_templates:
+ kubernetes_master:
+ type: cloudify.kubernetes.nodes.Master
+ properties:
+ configuration:
+ file_content: { get_input: kubernetes_configuration_file_content }
+
+ ves_influxdb_service:
+ type: cloudify.kubernetes.resources.Service
+ properties:
+ definition:
+ apiVersion: v1
+ metadata:
+ name: ves-influxdb-service
+ spec:
+ ports:
+ - port: { get_input: spec_port }
+ selector:
+ app: ves-influxdb
+ relationships:
+ - type: cloudify.kubernetes.relationships.managed_by_master
+ target: kubernetes_master
+ - type: cloudify.relationships.depends_on
+ target: ves_influxdb_pod
+
+ ves_influxdb_pod:
+ type: cloudify.kubernetes.resources.Pod
+ properties:
+ definition:
+ apiVersion: v1
+ metadata:
+ name: ves-influxdb-pod
+ labels:
+ app: ves-influxdb
+ spec:
+ containers:
+ - name: ves-influxdb
+ image: influxdb
+ env:
+ ports:
+ - containerPort: { get_input: container_port }
+ relationships:
+ - type: cloudify.kubernetes.relationships.managed_by_master
+ target: kubernetes_master
diff --git a/tools/cloudify/ves-kafka/blueprint.yaml b/tools/cloudify/ves-kafka/blueprint.yaml
new file mode 100644
index 0000000..45f4378
--- /dev/null
+++ b/tools/cloudify/ves-kafka/blueprint.yaml
@@ -0,0 +1,142 @@
+tosca_definitions_version: cloudify_dsl_1_3
+#
+# Copyright 2018 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# What this is: Cloudify blueprint for running a Kafka service for OPNFV VES
+# under kubernetes.
+
+imports:
+ - http://www.getcloudify.org/spec/cloudify/3.4/types.yaml
+ - https://raw.githubusercontent.com/cloudify-incubator/cloudify-kubernetes-plugin/1.2.1/plugin.yaml
+
+inputs:
+
+ zookeeper_hostname:
+ default: localhost
+ zookeeper_host:
+ default: 127.0.0.1
+ zookeeper_port:
+ default: 2181
+ kafka_hostname:
+ default: localhost
+ kafka_port:
+ default: 9092
+ spec_port:
+ default: 9092
+ container_port:
+ default: 9092
+
+ kubernetes_configuration_file_content:
+ default: kube.config
+
+ kubernetes_master_ip:
+ type: string
+ default: { get_secret: kubernetes_master_ip }
+
+ kubernetes_master_port:
+ type: string
+ default: { get_secret: kubernetes_master_port }
+
+ kubernetes_certificate_authority_data:
+ default: { get_secret: kubernetes_certificate_authority_data }
+
+ kubernetes-admin_client_certificate_data:
+ default: { get_secret: kubernetes-admin_client_certificate_data }
+
+ kubernetes-admin_client_key_data:
+ default: { get_secret: kubernetes-admin_client_key_data }
+
+ kubernetes_master_configuration:
+ default:
+ apiVersion: v1
+ kind: Config
+ preferences: {}
+ current-context: kubernetes-admin@kubernetes
+ clusters:
+ - name: kubernetes
+ cluster:
+ certificate-authority-data: { get_input: kubernetes_certificate_authority_data }
+ server: { concat: [ 'https://', { get_input: kubernetes_master_ip}, ':', { get_input: kubernetes_master_port } ] }
+ contexts:
+ - name: kubernetes-admin@kubernetes
+ context:
+ cluster: kubernetes
+ user: kubernetes-admin
+ users:
+ - name: kubernetes-admin
+ user:
+ client-certificate-data: { get_input: kubernetes-admin_client_certificate_data }
+ client-key-data: { get_input: kubernetes-admin_client_key_data }
+
+ kubernetes_configuration_file_content:
+ description: >
+ File content of kubernetes master YAML configuration
+ default: { get_input: kubernetes_master_configuration }
+
+node_templates:
+ kubernetes_master:
+ type: cloudify.kubernetes.nodes.Master
+ properties:
+ configuration:
+ file_content: { get_input: kubernetes_configuration_file_content }
+
+ ves_kafka_service:
+ type: cloudify.kubernetes.resources.Service
+ properties:
+ definition:
+ apiVersion: v1
+ metadata:
+ name: ves-kafka-service
+ spec:
+ ports:
+ - port: { get_input: spec_port }
+ targetPort: { get_input: spec_port }
+ selector:
+ app: ves-kafka
+ relationships:
+ - type: cloudify.kubernetes.relationships.managed_by_master
+ target: kubernetes_master
+ - type: cloudify.relationships.depends_on
+ target: ves_kafka_pod
+
+ ves_kafka_pod:
+ type: cloudify.kubernetes.resources.Pod
+ properties:
+ definition:
+ apiVersion: v1
+ metadata:
+ name: ves-kafka-pod
+ labels:
+ app: ves-kafka
+ spec:
+ containers:
+ - name: ves-kafka
+ image: blsaws/ves-kafka:latest
+ env:
+ - name: zookeeper_hostname
+ value: { get_input: zookeeper_hostname }
+ - name: zookeeper_host
+ value: { get_input: zookeeper_host }
+ - name: zookeeper_port
+ value: { get_input: zookeeper_port }
+ - name: kafka_hostname
+ value: { get_input: kafka_hostname }
+ - name: kafka_port
+ value: { get_input: kafka_port }
+ ports:
+ - containerPort: { get_input: container_port }
+ relationships:
+ - type: cloudify.kubernetes.relationships.managed_by_master
+ target: kubernetes_master
diff --git a/tools/cloudify/ves-zookeeper/blueprint.yaml b/tools/cloudify/ves-zookeeper/blueprint.yaml
new file mode 100644
index 0000000..8e22ed3
--- /dev/null
+++ b/tools/cloudify/ves-zookeeper/blueprint.yaml
@@ -0,0 +1,122 @@
+tosca_definitions_version: cloudify_dsl_1_3
+#
+# Copyright 2018 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# What this is: Cloudify blueprint for running a zookeeper service for OPNFV VES
+# under kubernetes.
+
+imports:
+ - http://www.getcloudify.org/spec/cloudify/3.4/types.yaml
+ - https://raw.githubusercontent.com/cloudify-incubator/cloudify-kubernetes-plugin/1.2.1/plugin.yaml
+
+inputs:
+
+ spec_port:
+ default: 2181
+
+ container_port:
+ default: 2181
+
+ kubernetes_configuration_file_content:
+ default: kube.config
+
+ kubernetes_master_ip:
+ type: string
+ default: { get_secret: kubernetes_master_ip }
+
+ kubernetes_master_port:
+ type: string
+ default: { get_secret: kubernetes_master_port }
+
+ kubernetes_certificate_authority_data:
+ default: { get_secret: kubernetes_certificate_authority_data }
+
+ kubernetes-admin_client_certificate_data:
+ default: { get_secret: kubernetes-admin_client_certificate_data }
+
+ kubernetes-admin_client_key_data:
+ default: { get_secret: kubernetes-admin_client_key_data }
+
+ kubernetes_master_configuration:
+ default:
+ apiVersion: v1
+ kind: Config
+ preferences: {}
+ current-context: kubernetes-admin@kubernetes
+ clusters:
+ - name: kubernetes
+ cluster:
+ certificate-authority-data: { get_input: kubernetes_certificate_authority_data }
+ server: { concat: [ 'https://', { get_input: kubernetes_master_ip}, ':', { get_input: kubernetes_master_port } ] }
+ contexts:
+ - name: kubernetes-admin@kubernetes
+ context:
+ cluster: kubernetes
+ user: kubernetes-admin
+ users:
+ - name: kubernetes-admin
+ user:
+ client-certificate-data: { get_input: kubernetes-admin_client_certificate_data }
+ client-key-data: { get_input: kubernetes-admin_client_key_data }
+
+ kubernetes_configuration_file_content:
+ description: >
+ File content of kubernetes master YAML configuration
+ default: { get_input: kubernetes_master_configuration }
+
+node_templates:
+ kubernetes_master:
+ type: cloudify.kubernetes.nodes.Master
+ properties:
+ configuration:
+ file_content: { get_input: kubernetes_configuration_file_content }
+
+ ves_zookeeper_service:
+ type: cloudify.kubernetes.resources.Service
+ properties:
+ definition:
+ apiVersion: v1
+ metadata:
+ name: ves-zookeeper-service
+ spec:
+ ports:
+ - port: { get_input: spec_port }
+ selector:
+ app: ves-zookeeper
+ relationships:
+ - type: cloudify.kubernetes.relationships.managed_by_master
+ target: kubernetes_master
+ - type: cloudify.relationships.depends_on
+ target: ves_zookeeper_pod
+
+ ves_zookeeper_pod:
+ type: cloudify.kubernetes.resources.Pod
+ properties:
+ definition:
+ apiVersion: v1
+ metadata:
+ name: ves-zookeeper-pod
+ labels:
+ app: ves-zookeeper
+ spec:
+ containers:
+ - name: ves-zookeeper
+ image: zookeeper
+ env:
+ ports:
+ - containerPort: { get_input: container_port }
+ relationships:
+ - type: cloudify.kubernetes.relationships.managed_by_master
+ target: kubernetes_master
diff --git a/tools/demo_deploy.sh b/tools/demo_deploy.sh
index f939a83..9a4bd15 100644
--- a/tools/demo_deploy.sh
+++ b/tools/demo_deploy.sh
@@ -35,9 +35,17 @@
#. - env variables set prior to running this script, as per ves-setup.sh
#. ves_kafka_hostname: hostname of the node where the kafka server runs
#. - optional env varibles set prior to running this script, as per ves-setup.sh
-#. ves_influxdb_host: ip:port of the influxdb service
+#. ves_host: ip of the VES collector service
+#. ves_zookeeper_host: ip of the zookeeper service
+#. ves_zookeeper_port: port of the zookeeper service
+#. ves_kafka_host: ip of the kafka service
+#. ves_kafka_port: port of the kafka service
+#. ves_port: port of the VES collector service
+#. ves_influxdb_host: ip of the influxdb service
+#. ves_influxdb_port: port of the influxdb service
#. ves_influxdb_auth: authentication for the influxdb service
-#. ves_grafana_host: ip:port of the grafana service
+#. ves_grafana_host: ip of the grafana service
+#. ves_grafana_port: port of the grafana service
#. ves_grafana_auth: authentication for the grafana service
#. ves_loglevel: loglevel for VES Agent and Collector (ERROR|DEBUG)
#.
@@ -49,87 +57,104 @@
#. Usage: on the admin server
#. $ git clone https://gerrit.opnfv.org/gerrit/ves ~/ves
#. $ cd ~/ves/tools
-#. $ bash demo_deploy.sh <key> <user> <master> <workers> [cloudify]
-#. <key>: SSH key enabling password-less SSH to nodes
+#. $ bash demo_deploy.sh <user> <master> [cloudify]
#. <user>: username on node with password-less SSH authorized
-#. <master>: master node where core components will be installed
-#. <workers>: list of worker nodes where collectd will be installed
+#. <master>: hostname of k8s master node
#. cloudify: flag indicating to deploy VES core services via Cloudify
-key=$1
-user=$2
-master=$3
-workers="$4"
-cloudify=$5
+trap 'fail' ERR
-eval `ssh-agent`
-ssh-add $key
+function fail() {
+ log $1
+ exit 1
+}
+
+function log() {
+ f=$(caller 0 | awk '{print $2}')
+ l=$(caller 0 | awk '{print $1}')
+ echo; echo "$f:$l ($(date)) $1"
+}
+
+function run() {
+ log "$1"
+ start=$((`date +%s`/60))
+ $1
+ step_end "$1"
+}
+
+function step_end() {
+ end=$((`date +%s`/60))
+ runtime=$((end-start))
+ log "step \"$1\" duration = $runtime minutes"
+}
-echo; echo "$0 $(date): Setting up master node"
-ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
- $user@$master mkdir /home/$user/ves
-scp -r -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
- ~/ves/tools $user@$master:/home/$user/ves
-ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
- $user@$master <<EOF
- ves_host=$master
- export ves_host
+function run_master() {
+ log "$1"
+ start=$((`date +%s`/60))
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ $k8s_user@$k8s_master "$1"
+ step_end "$1"
+}
+
+function deploy() {
+ if [[ -f ~/ves/tools/ves_env.sh ]]; then rm ~/ves/tools/ves_env.sh; fi
+ ves_host=$ves_host
+ ves_port=$ves_port
ves_mode=node
- export ves_mode
ves_user=hello
- export ves_user
ves_pass=world
- export ves_pass
- ves_kafka_host=$master
- export ves_kafka_host
+ ves_kafka_host=$ves_kafka_host
ves_kafka_hostname=$ves_kafka_hostname
- export ves_kafka_hostname
+ ves_zookeeper_host=$ves_zookeeper_host
+ ves_zookeeper_port=$ves_zookeeper_port
ves_influxdb_host=$ves_influxdb_host
- export ves_influxdb_host
+ ves_influxdb_port=$ves_influxdb_port
ves_influxdb_auth=$ves_influxdb_auth
- export ves_influxdb_auth
ves_grafana_host=$ves_grafana_host
- export ves_grafana_host
+ ves_grafana_port=$ves_grafana_port
ves_grafana_auth=$ves_grafana_auth
- export ves_grafana_auth
ves_loglevel=$ves_loglevel
- export ves_loglevel
- env | grep ves
- bash /home/$user/ves/tools/ves-setup.sh collector
- bash /home/$user/ves/tools/ves-setup.sh kafka
- bash /home/$user/ves/tools/ves-setup.sh agent $cloudify
-EOF
+ source ~/ves/tools/ves-setup.sh env
-scp -r -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
- $user@$master:/home/$user/ves/tools/ves_env.sh ~/ves/tools/.
+ log "Setting up master node"
+ run_master "mkdir /home/$user/ves"
+ scp -r -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ ~/ves/tools $user@$master:/home/$user/ves
+ run "bash ves/tools/ves-setup.sh influxdb"
+ run "bash ves/tools/ves-setup.sh grafana"
+ run "bash ves/tools/ves-setup.sh collector $cloudify"
+ run "bash ves/tools/ves-setup.sh kafka $cloudify"
+ run "bash ves/tools/ves-setup.sh agent $cloudify"
-if [[ "$master" == "$workers" ]]; then
- nodes=$master
-else
- nodes="$master $workers"
-fi
-
-for node in $nodes; do
- echo; echo "$0 $(date): Setting up collectd at $node"
- if [[ "$node" != "$master" ]]; then
- ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
- $user@$node mkdir /home/$user/ves
- scp -r -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
- ~/ves/tools $user@$node:/home/$user/ves
+ if [[ "$k8s_master" == "$k8s_workers" ]]; then
+ nodes=$k8s_master
+ else
+ nodes="$k8s_master $k8s_workers"
fi
- ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
- $user@$node <<EOF > ves-collectd-$node.log 2>&1 &
- ves_kafka_host=$master
- export ves_kafka_host
- ves_kafka_port=$ves_kafka_port
- export ves_kafka_port
- ves_kafka_hostname=$ves_kafka_hostname
- export ves_kafka_hostname
- ves_mode=node
- export ves_mode
- bash /home/$user/ves/tools/ves-setup.sh collectd
+
+ for node in $nodes; do
+ log "Setting up barometer at $node"
+ if [[ "$node" != "$k8s_master" ]]; then
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ $user@$node mkdir /home/$user/ves
+ scp -r -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ ~/ves/tools $user@$node:/home/$user/ves
+ fi
+ run "bash ves/tools/ves-setup.sh barometer"
EOF
-done
+ done
-echo; echo "$0 $(date): VES Grafana dashboards are available at http://$ves_grafana_host:3000 (login as admin/admin)"
+ source ~/ves/tools/ves_env.sh
+ log "VES Grafana dashboards are available at http://$ves_grafana_host:$ves_grafana_port (login as admin/admin)"
+}
+deploy_start=$((`date +%s`/60))
+user=$1
+master=$2
+cloudify=$3
+source ~/k8s_env.sh
+log "k8s environment as input"
+env | grep k8s
+eval `ssh-agent`
+ssh-add $k8s_key
+deploy
diff --git a/tools/kubernetes/ves-barometer/daemonset.yaml b/tools/kubernetes/ves-barometer/daemonset.yaml
new file mode 100644
index 0000000..8f4c1d9
--- /dev/null
+++ b/tools/kubernetes/ves-barometer/daemonset.yaml
@@ -0,0 +1,66 @@
+#
+# Copyright 2017 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# What this is: Kuberbetes chart for the OPNFV Barometer collectd agent
+# running as a daemonset (one each kluster node) under kubernetes.
+#
+# Notes:
+# - apiVersion: extensions/v1beta1 is required for Ubuntu (apps/v1 failed with
+# "error: unable to recognize... no matches for apps/, Kind=DaemonSet"
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: ves-barometer
+ namespace: default
+ labels:
+ k8s-app: ves-barometer
+spec:
+ selector:
+ matchLabels:
+ name: ves-barometer
+ template:
+ metadata:
+ labels:
+ name: ves-barometer
+ spec:
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ containers:
+ - name: ves-barometer
+ image: blsaws/ves-barometer:latest
+ env:
+ - name: ves_mode
+ value: <ves_mode>
+ - name: ves_kafka_hostname
+ value: <ves_kafka_hostname>
+ - name: ves_kafka_port
+ value: "<ves_kafka_port>"
+ volumeMounts:
+ - name: varrun
+ mountPath: /var/run
+ - name: tmp
+ mountPath: /tmp
+ readOnly: true
+ securityContext:
+ privileged: true
+ terminationGracePeriodSeconds: 30
+ volumes:
+ - name: varrun
+ hostPath:
+ path: /var/run
+ - name: tmp
+ hostPath:
+ path: /tmp
diff --git a/tools/ves-clean.sh b/tools/ves-clean.sh
new file mode 100644
index 0000000..0115322
--- /dev/null
+++ b/tools/ves-clean.sh
@@ -0,0 +1,231 @@
+#!/bin/bash
+# Copyright 2017-2018 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#. What this is: Cleanup script for the VES monitoring framework.
+#. With this script a VES deployment can be cleaned from one or more hosts.
+#.
+#. Prerequisites:
+#. - VES framework deployed as in ves-setup.sh in this repo
+#.
+#. Usage:
+#. bash ~/ves/ves-setup.sh clean <what> [cloudify]
+#. what: one of all|influxdb|grafana|collector|kafka|collectd|agent|nodes
+#. barometer: clean barometer
+#. agent: clean VES agent
+#. kafka: clean kafka
+#. zookeeper: clean zookeeper
+#. grafana: clean grafana
+#. influxdb: clean influxdb
+#. collector: clean VES collector
+#. nodes: clean VES code etc at nodes
+#. cloudify: (optional) clean up cloudify-based deployments
+#.
+#. See demo_deploy.sh in this repo for a recommended sequence of the above.
+#.
+#. Status: this is a work in progress, under test.
+
+# http://docs.opnfv.org/en/latest/submodules/barometer/docs/release/userguide/collectd.ves.userguide.html
+
+function fail() {
+ log "$1"
+ exit 1
+}
+
+function log() {
+ f=$(caller 0 | awk '{print $2}')
+ l=$(caller 0 | awk '{print $1}')
+ echo ""
+ echo "$f:$l ($(date)) $1"
+}
+
+function clean_all() {
+ log "clean installation"
+ clean_barometer
+ clean_agent
+ clean_kafka
+ clean_zookeeper
+ clean_grafana
+ clean_influxdb
+ clean_collector
+ clean_nodes
+}
+
+function clean_via_docker() {
+ log "clean docker container $1 at k8s master $k8s_master"
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ $k8s_user@$k8s_master <<EOF
+sudo docker stop $1
+sudo docker rm -v $1
+EOF
+}
+
+function clean_via_cloudify() {
+ log "clean $1 via cloudify"
+ bash ~/models/tools/cloudify/k8s-cloudify.sh stop $1 $1
+}
+
+
+function clean_grafana() {
+ log "clean grafana"
+
+ log "VES datasources and dashboards at grafana server, if needed"
+ curl -X DELETE \
+ http://$ves_grafana_auth@$ves_grafana_host:$ves_grafana_port/api/datasources/name/VESEvents
+ curl -X DELETE \
+ http://$ves_grafana_auth@$ves_grafana_host:$ves_grafana_port/api/dashboards/db/ves-demo
+
+ clean_via_docker ves-grafana
+}
+
+function clean_influxdb() {
+ log "clean influxdb"
+ clean_via_docker ves-influxdb
+}
+
+function clean_agent() {
+ log "clean ves-agent"
+ if [[ "$cloudify" == "cloudify" ]]; then
+ clean_via_cloudify ves-agent
+ force_k8s_clean ves-agent
+ else
+ clean_via_docker ves-agent
+ fi
+}
+
+function clean_kafka() {
+ log "clean ves-kafka"
+ if [[ "$cloudify" == "cloudify" ]]; then
+ clean_via_cloudify ves-kafka
+ force_k8s_clean ves-kafka
+ else
+ clean_via_docker ves-kafka
+ fi
+}
+
+function clean_zookeeper() {
+ log "clean ves-zookeeper"
+ if [[ "$cloudify" == "cloudify" ]]; then
+ clean_via_cloudify ves-zookeeper
+ force_k8s_clean ves-zookeeper
+ else
+ clean_via_docker ves-zookeeper
+ fi
+}
+
+function clean_collector() {
+ log "clean ves-zookeeper"
+ if [[ "$cloudify" == "cloudify" ]]; then
+ clean_via_cloudify ves-collector
+ force_k8s_clean ves-collector
+ else
+ clean_via_docker ves-collector
+ fi
+}
+
+function clean_barometer() {
+ log "clean ves-barometer"
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ $k8s_user@$node <<EOF
+kubectl delete daemonset --namespace default ves-barometer
+EOF
+ force_k8s_clean ves-barometer
+}
+
+function clean_nodes() {
+ log "clean ves code etc from nodes"
+ if [[ "$k8s_master" == "$k8s_workers" ]]; then
+ nodes=$k8s_master
+ else
+ nodes="$k8s_master $k8s_workers"
+ fi
+ for node in $nodes; do
+ log "remove ves-barometer container and config for VES at node $node"
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ $k8s_user@$node <<EOF
+sudo rm -rf /home/$k8s_user/ves
+sudo rm -rf /home/$k8s_user/collectd
+EOF
+ done
+}
+
+function force_k8s_clean() {
+ log "force cleanup of k8s pod for $1 if still present"
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ $k8s_user@$k8s_master "kubectl delete pods --namespace default $1-pod"
+ pods=$(ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $k8s_user@$k8s_master kubectl get pods --namespace default | grep -c $1)
+ echo "wait for all kubectl pods to be terminated"
+ tries=10
+ while [[ $pods -gt 0 && $tries -gt 0 ]]; do
+ echo "$pods VES pods remaining in kubectl"
+ sleep 30
+ ((tries--))
+ pods=$(ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $k8s_user@$k8s_master kubectl get pods --namespace default | grep -c $1)
+ done
+ if [[ $pods -gt 0 ]]; then
+ log "manually terminate $1 pods via docker"
+ cs=$(ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $k8s_user@$k8s_master sudo docker ps -a | awk "/$1/ {print $1}")
+ for c in $cs ; do
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $k8s_user@$k8s_master "sudo docker stop $c"
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $k8s_user@$k8s_master "sudo docker rm -v $c"
+ done
+ fi
+}
+
+dist=$(grep --m 1 ID /etc/os-release | awk -F '=' '{print $2}' | sed 's/"//g')
+if [[ $(grep -c $HOSTNAME /etc/hosts) -eq 0 ]]; then
+ echo "$(ip route get 8.8.8.8 | awk '{print $NF; exit}') $HOSTNAME" |\
+ sudo tee -a /etc/hosts
+fi
+
+source ~/k8s_env.sh
+if [[ -f ~/ves/tools/ves_env.sh ]]; then
+ source ~/ves/tools/ves_env.sh
+fi
+log "VES environment as input"
+env | grep ves_
+
+trap 'fail' ERR
+
+cloudify=$2
+
+case "$1" in
+ "all")
+ clean_all
+ ;;
+ "modes")
+ clean_nodes
+ ;;
+ "collectd")
+ clean_collectd
+ ;;
+ "agent")
+ clean_agent
+ ;;
+ "influxdb")
+ clean_influxdb
+ ;;
+ "grafana")
+ clean_grafana
+ ;;
+ "collector")
+ clean_collector
+ ;;
+ "kafka")
+ clean_kafka
+ ;;
+ *)
+ grep '#. ' $0
+esac
+trap '' ERR
diff --git a/tools/ves-setup.sh b/tools/ves-setup.sh
index 00d7db8..8d5c902 100644
--- a/tools/ves-setup.sh
+++ b/tools/ves-setup.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# Copyright 2017 AT&T Intellectual Property, Inc
+# Copyright 2017-2018 AT&T Intellectual Property, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -15,8 +15,8 @@
#
#. What this is: Setup script for the VES monitoring framework.
#. With this script VES support can be installed in one or more hosts, with:
-#. - a dedicated or shared Kafka server for collection of events from collectd
-#. - VES collectd agents running in host or guest mode
+#. - a dedicated or shared Kafka server for collection of events from barometer
+#. - VES barometer agents running in host or guest mode
#. - VES monitor (test collector)
#. - Influxdb service (if an existing service is not passed as an option)
#. - Grafana service (if an existing service is not passed as an option)
@@ -26,22 +26,22 @@
#. pre-installed VES collector e.g. from the ONAP project.
#. - Install Kafka server on one of the hosts, or use a pre-installed server
#. accessible from the agent hosts.
-#. - Install collectd on each host.
+#. - Install barometer on each host.
#. - Install the VES agent on one of the hosts.
#.
#. Prerequisites:
#. - Ubuntu Xenial (Centos support to be provided)
#. - passwordless sudo setup for user running this script
#. - shell environment variables setup as below (for non-default setting)
-#. ves_mode: install mode (node|guest) for VES collectd plugin (default: node)
+#. ves_mode: install mode (node|guest) for VES barometer plugin (default: node)
#. ves_host: VES collector IP or hostname (default: 127.0.0.1)
-#. ves_port: VES collector port (default: 30000)
+#. ves_port: VES collector port (default: 3001)
#. ves_path: REST path optionalRoutingPath element (default: empty)
#. ves_topic: REST path topicName element (default: empty)
#. ves_https: use HTTPS instead of HTTP (default: false)
#. ves_user: username for basic auth with collector (default: empty)
#. ves_pass: password for basic auth with collector (default: empty)
-#. ves_interval: frequency in sec for collectd data reports (default: 20)
+#. ves_interval: frequency in sec for barometer data reports (default: 20)
#. ves_version: VES API version (default: 5.1)
#. ves_kafka_host: kafka host IP (default: 127.0.0.1)
#. ves_kafka_hostname: kafka host hostname (default: localhost)
@@ -54,18 +54,28 @@
#.
#. Usage:
#. git clone https://gerrit.opnfv.org/gerrit/ves ~/ves
-#. bash ~/ves/tools/ves-setup.sh <collector|kafka|collectd|agent> [cloudify]
+#. bash ~/ves/tools/ves-setup.sh <what> [cloudify]
+#. what: one of env|influxdb|grafana|collector|zookeeper|kafka|agent|barometer
+#. env: setup VES environment script ~/ves/tools/ves_env.sh
+#. influxdb: setup influxdb as a docker container on k8s_master node
+#. grafana: setup grafana as a docker container on k8s_master node
#. collector: setup VES collector (test collector)
+#. zookeeper: setup zookeeper server for kafka configuration
#. kafka: setup kafka server for VES events from collect agent(s)
-#. collectd: setup collectd with libvirt plugin, as a kafka publisher
#. agent: setup VES agent in host or guest mode, as a kafka consumer
+#. barometer: setup barometer with libvirt plugin, as a kafka publisher
#. cloudify: (optional) use cloudify to deploy the component, as setup by
#. tools/cloudify/k8s-cloudify.sh in the OPNFV Models repo.
-#. bash ~/ves/ves-setup.sh <master> <workers>
-#. master: VES master node IP
-#. workers: quoted, space-separated list of worker node IPs
#.
-#. See demo_deploy.sh in this repo for a recommended sequence of the above.
+#. The recommended sequence for setting up the components is:
+#. influxdb: prerequisite for grafana datasource setup
+#. grafana: prerequisite for setup of datasource and dashboards
+#. collector: creates veseventsdb in influxdb, and grafana
+#. datasource/dashboards, then starts listening for VES event reports
+#. zookeeper: prerequisite for kafka
+#. kafka: prerequisite for agent and barometer
+#. agent: listens for collectd topic events over kafka, for reporting to collector
+#. barometer: monitors resources and reports via collectd topic in kafka
#.
#. Status: this is a work in progress, under test.
@@ -95,11 +105,13 @@ function common_prereqs() {
}
function setup_env() {
- cat <<'EOF' >~/ves/tools/ves_env.sh
+ log "updating VES environment variables"
+ cat <<EOF >~/ves/tools/ves_env.sh
#!/bin/bash
ves_mode="${ves_mode:=node}"
-ves_host="${ves_host:=127.0.0.1}"
-ves_port="${ves_port:=30000}"
+ves_host="${ves_host:=ves-collector-service.default.svc.cluster.local}"
+ves_hostname="${ves_hostname:=ves-collector-service.default.svc.cluster.local}"
+ves_port="${ves_port:=3001}"
ves_path="${ves_path:=}"
ves_topic="${ves_topic:=}"
ves_https="${ves_https:=false}"
@@ -107,17 +119,26 @@ ves_user="${ves_user:=}"
ves_pass="${ves_pass:=}"
ves_interval="${ves_interval:=20}"
ves_version="${ves_version:=5.1}"
-ves_kafka_host="${ves_kafka_host:=127.0.0.1}"
-ves_kafka_hostname="${ves_kafka_hostname:=localhost}"
+ves_zookeeper_host="${ves_zookeeper_host:=ves-zookeeper-service.default.svc.cluster.local}"
+ves_zookeeper_hostname="${ves_zookeeper_hostname:=ves-zookeeper-service.default.svc.cluster.local}"
+ves_zookeeper_host="${ves_zookeeper_host:=ves-zookeeper-service.default.svc.cluster.local}"
+ves_zookeeper_port="${ves_zookeeper_port:=2181}"
+ves_kafka_host="${ves_kafka_host:=ves-kafka-service.default.svc.cluster.local}"
+ves_kafka_hostname="${ves_kafka_hostname:=ves-kafka-service.default.svc.cluster.local}"
ves_kafka_port="${ves_kafka_port:=9092}"
-ves_influxdb_host="${ves_influxdb_host:=localhost:8086}"
+ves_influxdb_host="${ves_influxdb_host:=ves-influxdb-service.default.svc.cluster.local}"
+ves_influxdb_hostname="${ves_influxdb_hostname:=ves-influxdb-service.default.svc.cluster.local}"
+ves_influxdb_port="${ves_influxdb_port:=8086}"
ves_influxdb_auth="${ves_influxdb_auth:=}"
-ves_grafana_host="${ves_grafana_host:=localhost:3000}"
+ves_grafana_host="${ves_grafana_host:=ves-grafana-service.default.svc.cluster.local}"
+ves_grafana_hostname="${ves_grafana_hostname:=ves-grafana-service.default.svc.cluster.local}"
+ves_grafana_port="${ves_grafana_port:=3000}"
ves_grafana_auth="${ves_grafana_auth:=admin:admin}"
-ves_loglevel="${ves_loglevel:=}"
+ves_loglevel="${ves_loglevel:=DEBUG}"
ves_cloudtype="${ves_cloudtype:=kubernetes}"
export ves_mode
export ves_host
+export ves_hostname
export ves_port
export ves_path
export ves_topic
@@ -125,187 +146,105 @@ export ves_https
export ves_user
export ves_pass
export ves_interval
+export ves_version
+export ves_zookeeper_host
+export ves_zookeeper_hostname
+export ves_zookeeper_port
export ves_kafka_host
export ves_kafka_hostname
export ves_kafka_port
export ves_influxdb_host
+export ves_influxdb_hostname
+export ves_influxdb_port
export ves_influxdb_auth
export ves_grafana_host
+export ves_grafana_hostname
+export ves_grafana_port
export ves_grafana_auth
export ves_loglevel
export ves_cloudtype
EOF
source ~/ves/tools/ves_env.sh
- echo ~/ves/tools/ves_env.sh
+ env | grep ves_
}
-function setup_collectd() {
- log "setup collectd"
-
- common_prereqs
- source ~/ves/tools/ves_env.sh
-
- log "setup VES collectd config for VES $ves_mode mode"
- mkdir ~/collectd
- if [[ "$ves_mode" == "node" ]]; then
-# # TODO: fix for journalctl -xe report "... is marked executable"
-# sudo chmod 744 /etc/systemd/system/collectd.service
-
- cat <<EOF >~/collectd/collectd.conf
-# for VES plugin
-LoadPlugin logfile
-<Plugin logfile>
- LogLevel debug
- File STDOUT
- Timestamp true
- PrintSeverity false
-</Plugin>
-
-LoadPlugin csv
-<Plugin csv>
- DataDir "/work-dir/collectd/install/var/lib/csv"
- StoreRates false
-</Plugin>
-
-LoadPlugin target_set
-LoadPlugin match_regex
-<Chain "PreCache">
- <Rule "mark_memory_as_host">
- <Match "regex">
- Plugin "^memory$"
- </Match>
- <Target "set">
- PluginInstance "host"
- </Target>
- </Rule>
-</Chain>
-
-LoadPlugin cpu
-<Plugin cpu>
- ReportByCpu true
- ReportByState true
- ValuesPercentage true
-</Plugin>
-
-LoadPlugin interface
-LoadPlugin memory
-LoadPlugin load
-LoadPlugin disk
-# TODO: how to set this option only to apply to VMs (not nodes)
-LoadPlugin uuid
+function update_env() {
+ log "update VES environment with $1=$2"
+ eval ${1}=${2}
+ export $1
+ sed -i -- "s/.*$1=.*/$1=$2/" ~/ves/tools/ves_env.sh
+ env | grep ves_
+}
-LoadPlugin write_kafka
-<Plugin write_kafka>
- Property "metadata.broker.list" "$ves_kafka_host:$ves_kafka_port"
- <Topic "collectd">
- Format JSON
- </Topic>
-</Plugin>
-EOF
+function setup_kafka() {
+ log "setup kafka server"
+ log "deploy zookeeper and kafka"
+ if [[ "$1" == "cloudify" ]]; then
+ cp -r ~/ves/tools/cloudify/ves-zookeeper ~/models/tools/cloudify/blueprints/.
+ source ~/models/tools/cloudify/k8s-cloudify.sh start ves-zookeeper ves-zookeeper
+ source ~/models/tools/cloudify/k8s-cloudify.sh clusterIp ves-zookeeper
+ update_env ves_zookeeper_host $clusterIp
- if [[ -d /etc/nova ]]; then
- cat <<EOF >>~/collectd/collectd.conf
-LoadPlugin virt
-<Plugin virt>
- Connection "qemu:///system"
- RefreshInterval 60
- HostnameFormat uuid
- PluginInstanceFormat name
- ExtraStats "cpu_util"
-</Plugin>
-EOF
- fi
+ cp -r ~/ves/tools/cloudify/ves-kafka ~/models/tools/cloudify/blueprints/.
+ inputs="{ \
+ \"zookeeper_hostname\": \"$ves_zookeeper_hostname\",
+ \"zookeeper_host\": \"$ves_zookeeper_host\",
+ \"zookeeper_port\": \"$ves_zookeeper_port\",
+ \"kafka_port\": \"$ves_kafka_port\",
+ \"kafka_hostname\": \"$ves_kafka_hostname\"}"
+
+ source ~/models/tools/cloudify/k8s-cloudify.sh start ves-kafka ves-kafka "$inputs"
+ source ~/models/tools/cloudify/k8s-cloudify.sh clusterIp ves-kafka
+ update_env ves_kafka_host $clusterIp
else
- cat <<EOF >~/collectd/collectd.conf
-# for VES plugin
-LoadPlugin logfile
-<Plugin logfile>
- LogLevel debug
- File STDOUT
- Timestamp true
- PrintSeverity false
-</Plugin>
-
-LoadPlugin cpu
-<Plugin cpu>
- ReportByCpu true
- ReportByState true
- ValuesPercentage true
-</Plugin>
-
-LoadPlugin csv
-<Plugin csv>
- DataDir "/tmp"
-</Plugin>
-
-LoadPlugin interface
-LoadPlugin memory
-LoadPlugin load
-LoadPlugin disk
-LoadPlugin uuid
-
-LoadPlugin write_kafka
-<Plugin write_kafka>
- Property "metadata.broker.list" "$ves_kafka_host:$ves_kafka_port"
- <Topic "collectd">
- Format JSON
- </Topic>
-</Plugin>
-
-LoadPlugin target_set
-LoadPlugin match_regex
-<Chain "PreCache">
- <Rule "mark_memory_as_guest">
- <Match "regex">
- Plugin "^memory$"
- </Match>
- <Target "set">
- PluginInstance "guest"
- </Target>
- </Rule>
-</Chain>
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ $k8s_user@$k8s_master <<EOF
+sudo docker run -it -d -p $ves_zookeeper_port:2181 --name ves-zookeeper zookeeper
+sudo docker run -it -d -p $ves_kafka_port:9092 --name ves-kafka \
+ -e zookeeper_hostname=$ves_zookeeper_hostname \
+ -e kafka_hostname=$ves_kafka_hostname \
+ -e zookeeper_host=$ves_zookeeper_host \
+ -e zookeeper_port=$ves_zookeeper_port \
+ -e kafka_port=$ves_kafka_port \
+ -e kafka_hostname=$ves_kafka_hostname \
+ blsaws/ves-kafka:latest
EOF
fi
- log "collectd config updated"
+}
- if [[ $(grep -c $ves_kafka_hostname /etc/hosts) -eq 0 ]]; then
- log "add to /etc/hosts: $ves_kafka_host $ves_kafka_hostname"
- echo "$ves_kafka_host $ves_kafka_hostname" | sudo tee -a /etc/hosts
- fi
+function setup_barometer() {
+ log "setup barometer"
+# if [[ $(grep -c $ves_kafka_hostname /etc/hosts) -eq 0 ]]; then
+# log "add to /etc/hosts: $ves_kafka_host $ves_kafka_hostname"
+# echo "$ves_kafka_host $ves_kafka_hostname" | sudo tee -a /etc/hosts
+# fi
- log "start Barometer container"
- sudo docker run -tid --net=host --name ves-barometer \
- -v ~/collectd:/opt/collectd/etc/collectd.conf.d \
- -v /var/run:/var/run -v /tmp:/tmp --privileged \
- opnfv/barometer:latest /run_collectd.sh
+ log "start Barometer container as daemonset under kubernetes"
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ $k8s_user@$k8s_master <<EOF
+sed -i -- "s/<ves_mode>/$ves_mode/" \
+ /home/$k8s_user/ves/tools/kubernetes/ves-barometer/daemonset.yaml
+sed -i -- "s/<ves_kafka_hostname>/$ves_kafka_hostname/" \
+ /home/$k8s_user/ves/tools/kubernetes/ves-barometer/daemonset.yaml
+sed -i -- "s/<ves_kafka_port>/$ves_kafka_port/" \
+ /home/$k8s_user/ves/tools/kubernetes/ves-barometer/daemonset.yaml
+kubectl create \
+ -f /home/$k8s_user/ves/tools/kubernetes/ves-barometer/daemonset.yaml
+EOF
+
+# sudo docker run -tid --net=host --name ves-barometer \
+# -v ~/collectd:/opt/collectd/etc/collectd.conf.d \
+# -v /var/run:/var/run -v /tmp:/tmp --privileged \
+# opnfv/barometer:latest /run_collectd.sh
}
function setup_agent() {
log "setup VES agent"
- source ~/k8s_env.sh
- source ~/ves/tools/ves_env.sh
log "deploy the VES agent container"
if [[ "$1" == "cloudify" ]]; then
- cd ~/ves/tools/cloudify
- # Cloudify is deployed on the k8s master node
- manager_ip=$k8s_master
- log "copy kube config from k8s master for insertion into blueprint"
- scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
- $k8s_user@$manager_ip:/home/$k8s_user/.kube/config ves-agent/kube.config
-
- log "package the blueprint"
- # CLI: cfy blueprints package -o /tmp/$bp $bp
- tar ckf /tmp/blueprint.tar ves-agent
-
- log "upload the blueprint"
- # CLI: cfy blueprints upload -t default_tenant -b $bp /tmp/$bp.tar.gz
- curl -s -X PUT -u admin:admin --header 'Tenant: default_tenant' \
- --header "Content-Type: application/octet-stream" -o /tmp/json \
- http://$manager_ip/api/v3.1/blueprints/ves-agent?application_file_name=blueprint.yaml \
- -T /tmp/blueprint.tar
-
+ cp -r ~/ves/tools/cloudify/ves-agent ~/models/tools/cloudify/blueprints/.
inputs="{ \
\"ves_mode\": \"$ves_mode\",
\"ves_host\": \"$ves_host\",
@@ -317,44 +256,32 @@ function setup_agent() {
\"ves_pass\": \"$ves_pass\",
\"ves_interval\": \"$ves_interval\",
\"ves_version\": \"$ves_version\",
- \"ves_kafka_port\": \"$ves_kafka_port\",
- \"ves_kafka_host\": \"$ves_kafka_host\",
\"ves_kafka_hostname\": \"$ves_kafka_hostname\",
+ \"ves_kafka_host\": \"$ves_kafka_host\",
+ \"ves_kafka_port\": \"$ves_kafka_port\",
\"ves_loglevel\": \"$ves_loglevel\"}"
- log "create a deployment for the blueprint"
- # CLI: cfy deployments create -t default_tenant -b $bp $bp
- curl -s -X PUT -u admin:admin --header 'Tenant: default_tenant' \
- --header "Content-Type: application/json" -o /tmp/json \
- -d "{\"blueprint_id\": \"ves-agent\", \"inputs\": $inputs}" \
- http://$manager_ip/api/v3.1/deployments/ves-agent
- sleep 10
-
- # CLI: cfy workflows list -d $bp
-
- log "install the deployment pod and service"
- # CLI: cfy executions start install -d $bp
- curl -s -X POST -u admin:admin --header 'Tenant: default_tenant' \
- --header "Content-Type: application/json" -o /tmp/json \
- -d "{\"deployment_id\":\"ves-agent\", \"workflow_id\":\"install\"}" \
- http://$manager_ip/api/v3.1/executions
+ bash ~/models/tools/cloudify/k8s-cloudify.sh start ves-agent ves-agent "$inputs"
else
- sudo docker run -it -d \
- -e ves_mode=$ves_mode \
- -e ves_host=$ves_host \
- -e ves_port=$ves_port \
- -e ves_path=$ves_path \
- -e ves_topic=$ves_topic \
- -e ves_https=$ves_https \
- -e ves_user=$ves_user \
- -e ves_pass=$ves_pass \
- -e ves_interval=$ves_interval \
- -e ves_version=$ves_version \
- -e ves_kafka_port=$ves_kafka_port \
- -e ves_kafka_host=$ves_kafka_host \
- -e ves_kafka_hostname=$ves_kafka_hostname \
- -e ves_loglevel=$ves_loglevel \
- --name ves-agent blsaws/ves-agent:latest
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ $k8s_user@$k8s_master <<EOF
+sudo docker run -it -d \
+ -e ves_mode=$ves_mode \
+ -e ves_host=$ves_host \
+ -e ves_port=$ves_port \
+ -e ves_path=$ves_path \
+ -e ves_topic=$ves_topic \
+ -e ves_https=$ves_https \
+ -e ves_user=$ves_user \
+ -e ves_pass=$ves_pass \
+ -e ves_interval=$ves_interval \
+ -e ves_version=$ves_version \
+ -e ves_kafka_port=$ves_kafka_port \
+ -e ves_kafka_host=$ves_kafka_host \
+ -e ves_kafka_hostname=$ves_kafka_hostname \
+ -e ves_loglevel=$ves_loglevel \
+ --name ves-agent blsaws/ves-agent:latest
+EOF
fi
# debug hints
@@ -366,10 +293,8 @@ function setup_agent() {
# ~/kafka_2.11-0.11.0.2/bin/kafka-console-consumer.sh --zookeeper localhost:2181 --topic collectd
}
-function setup_collector() {
- log "setup collector"
- $2 $3 $4
-
+function setup_influxdb() {
+ log "setup influxdb"
log "install prerequistes"
if [[ "$dist" == "ubuntu" ]]; then
sudo apt-get install -y jq
@@ -377,152 +302,115 @@ function setup_collector() {
sudo yum install -y jq
fi
- setup_env
-
- if ! curl http://$ves_influxdb_host/ping ; then
- # TODO: migrate to deployment via Helm
- log "setup influxdb container"
- sudo docker run -d --name=ves-influxdb -p 8086:8086 influxdb
- status=$(sudo docker inspect ves-influxdb | jq -r '.[0].State.Status')
- while [[ "x$status" != "xrunning" ]]; do
- log "InfluxDB container state is ($status)"
- sleep 10
- status=$(sudo docker inspect ves-influxdb | jq -r '.[0].State.Status')
- done
- log "InfluxDB container state is $status"
-
- log "wait for InfluxDB API to be active"
- while ! curl http://$ves_influxdb_host/ping ; do
- log "InfluxDB API is not yet responding... waiting 10 seconds"
- sleep 10
- done
- fi
- echo "ves_influxdb_host=$ves_influxdb_host"
-
- log "setup InfluxDB database"
- # TODO: check if pre-existing and skip
- curl -X POST http://$ves_influxdb_host/query \
- --data-urlencode "q=CREATE DATABASE veseventsdb"
-
- if ! curl http://$ves_grafana_host ; then
- # TODO: migrate to deployment via Helm
- log "install Grafana container"
- sudo docker run -d --name ves-grafana -p 3000:3000 grafana/grafana
- status=$(sudo docker inspect ves-grafana | jq -r '.[0].State.Status')
- while [[ "x$status" != "xrunning" ]]; do
- log "Grafana container state is ($status)"
- sleep 10
- status=$(sudo docker inspect ves-grafana | jq -r '.[0].State.Status')
- done
- log "Grafana container state is $status"
- echo "ves_grafana_host=$ves_grafana_host"
-
- log "wait for Grafana API to be active"
- while ! curl http://$ves_grafana_host ; do
- log "Grafana API is not yet responding... waiting 10 seconds"
- sleep 10
- done
+ log "checking for influxdb at http://$ves_influxdb_host:$ves_influxdb_port/ping"
+ if ! curl http://$ves_influxdb_host:$ves_influxdb_port/ping ; then
+ log "install influxdb container on k8s master"
+ update_env ves_influxdb_host $k8s_master
+ update_env ves_influxdb_hostname $k8s_master_hostname
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ $k8s_user@$k8s_master \
+ sudo docker run -d --name=ves-influxdb -p 8086:8086 influxdb
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ $k8s_user@$k8s_master <<'EOF'
+status=$(sudo docker inspect ves-influxdb | jq -r '.[0].State.Status')
+while [[ "x$status" != "xrunning" ]]; do
+ echo; echo "InfluxDB container state is ($status)"
+ sleep 10
+ status=$(sudo docker inspect ves-influxdb | jq -r '.[0].State.Status')
+done
+echo; echo "InfluxDB container state is $status"
+EOF
fi
+}
- log "add VESEvents datasource to Grafana at http://$ves_grafana_auth@$ves_grafana_host"
- # TODO: check if pre-existing and skip
- cat <<EOF >~/ves/tools/grafana/datasource.json
-{ "name":"VESEvents",
- "type":"influxdb",
- "access":"direct",
- "url":"http://$ves_host:8086",
- "password":"root",
- "user":"root",
- "database":"veseventsdb",
- "basicAuth":false,
- "basicAuthUser":"",
- "basicAuthPassword":"",
- "withCredentials":false,
- "isDefault":false,
- "jsonData":null
-}
+function setup_grafana() {
+ log "setup grafana"
+ log "checking for grafana at http://$ves_grafana_host:$ves_grafana_port"
+ if ! curl http://$ves_grafana_host:$ves_grafana_port ; then
+ log "install Grafana container on k8s master"
+ update_env ves_grafana_host $k8s_master
+ update_env ves_grafana_hostname $k8s_master_hostname
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ $k8s_user@$k8s_master \
+ sudo docker run -d --name ves-grafana -p 3000:3000 grafana/grafana
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ $k8s_user@$k8s_master <<'EOF'
+status=$(sudo docker inspect ves-grafana | jq -r '.[0].State.Status')
+while [[ "x$status" != "xrunning" ]]; do
+ echo; echo "Grafana container state is ($status)"
+ sleep 10
+status=$(sudo docker inspect ves-grafana | jq -r '.[0].State.Status')
+done
+echo; echo "Grafana container state is $status"
EOF
-
- # Use /home/$USER/ instead of ~ with @
- curl -H "Accept: application/json" -H "Content-type: application/json" \
- -X POST -d @/home/$USER/ves/tools/grafana/datasource.json \
- http://$ves_grafana_auth@$ves_grafana_host/api/datasources
-
- log "add VES dashboard to Grafana at http://$ves_grafana_auth@$ves_grafana_host"
- curl -H "Accept: application/json" -H "Content-type: application/json" \
- -X POST \
- -d @/home/$USER/ves/tools/grafana/Dashboard.json\
- http://$ves_grafana_auth@$ves_grafana_host/api/dashboards/db
-
- log "setup collector container"
- # TODO: migrate to deployment via Helm
- sudo docker run -it -d -p 30000:30000 \
- -e ves_host=$ves_host \
- -e ves_port=$ves_port \
- -e ves_path=$ves_path \
- -e ves_topic=$ves_topic \
- -e ves_https=$ves_https \
- -e ves_user=$ves_user \
- -e ves_pass=$ves_pass \
- -e ves_interval=$ves_interval \
- -e ves_version=$ves_version \
- -e ves_influxdb_host=$ves_influxdb_host \
- -e ves_loglevel=$ves_loglevel \
- --name ves-collector blsaws/ves-collector:latest
-
- # debug hints
- # curl 'http://172.16.0.5:8086/query?pretty=true&db=veseventsdb&q=SELECT%20moving_average%28%22load-shortterm%22%2C%205%29%20FROM%20%22load%22%20WHERE%20time%20%3E%3D%20now%28%29%20-%205m%20GROUP%20BY%20%22system%22'
- # sudo docker logs ves-collector
- # sudo docker exec -it ves-collector apt-get install -y tcpdump
- # sudo docker exec -it ves-collector tcpdump -A -v -s 0 -i any port 30000
- # curl http://$ves_host:30000
- # sudo docker exec -it ves-collector /bin/bash
+ fi
}
-function clean() {
- log "clean installation"
- master=$1
- workers="$2"
- source ~/k8s_env.sh
+function setup_collector() {
+ log "setup collector"
+ if [[ "$1" == "cloudify" ]]; then
+ cp -r ~/ves/tools/cloudify/ves-collector ~/models/tools/cloudify/blueprints/.
+ inputs="{ \
+ \"ves_host\": \"$ves_host\",
+ \"ves_port\": \"$ves_port\",
+ \"ves_path\": \"$ves_path\",
+ \"ves_topic\": \"$ves_topic\",
+ \"ves_https\": \"$ves_https\",
+ \"ves_user\": \"$ves_user\",
+ \"ves_pass\": \"$ves_pass\",
+ \"ves_interval\": \"$ves_interval\",
+ \"ves_version\": \"$ves_version\",
+ \"ves_influxdb_host\": \"$ves_influxdb_host\",
+ \"ves_influxdb_port\": \"$ves_influxdb_port\",
+ \"ves_grafana_host\": \"$ves_grafana_host\",
+ \"ves_grafana_port\": \"$ves_grafana_port\",
+ \"ves_grafana_auth\": \"$ves_grafana_auth\",
+ \"ves_loglevel\": \"$ves_loglevel\"}"
- if [[ "$master" == "$workers" ]]; then
- nodes=$master
+ source ~/models/tools/cloudify/k8s-cloudify.sh start \
+ ves-collector ves-collector "$inputs"
+ source ~/models/tools/cloudify/k8s-cloudify.sh clusterIp ves-collector
+ update_env ves_host $clusterIp
+ log "updated VES env"; env | grep ves
else
- nodes="$master $workers"
- fi
-
- for node in $nodes; do
- log "remove config for VES at node $node"
ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
- $k8s_user@$node <<EOF
-sudo rm -rf /home/$k8s_user/ves
-sudo rm -rf /home/$k8s_user/collectd
+ $k8s_user@$k8s_master <<EOF
+sudo docker run -it -d -p 3001:3001 \
+ -e ves_host=$ves_host \
+ -e ves_port=$ves_port \
+ -e ves_path=$ves_path \
+ -e ves_topic=$ves_topic \
+ -e ves_https=$ves_https \
+ -e ves_user=$ves_user \
+ -e ves_pass=$ves_pass \
+ -e ves_interval=$ves_interval \
+ -e ves_version=$ves_version \
+ -e ves_influxdb_host=$ves_influxdb_host \
+ -e ves_grafana_port=$ves_grafana_port \
+ -e ves_grafana_host=$ves_grafana_host \
+ -e ves_grafana_auth=$ves_grafana_auth \
+ -e ves_loglevel=$ves_loglevel \
+ --name ves-collector blsaws/ves-collector:latest
EOF
- done
-
- log "VES datasources and dashboards at grafana server, if needed"
- curl -X DELETE \
- http://$ves_grafana_auth@$ves_grafana_host/api/datasources/name/VESEvents
- curl -X DELETE \
- http://$ves_grafana_auth@$ves_grafana_host/api/dashboards/db/ves-demo
+ fi
- log "Remove VES containers and collectd config at master node"
- ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
- $k8s_user@$master <<'EOF'
-cs="ves-agent ves-collector ves-grafana ves-influxdb ves-barometer"
-for c in $cs; do
- sudo docker stop $c
- sudo docker rm -v $c
-done
-EOF
+ # debug hints
+ # curl 'http://172.16.0.5:30886/query?pretty=true&db=veseventsdb&q=SELECT%20moving_average%28%22load-shortterm%22%2C%205%29%20FROM%20%22load%22%20WHERE%20time%20%3E%3D%20now%28%29%20-%205m%20GROUP%20BY%20%22system%22'
+ # sudo docker logs ves-collector
+ # sudo docker exec -it ves-collector apt-get install -y tcpdump
+ # sudo docker exec -it ves-collector tcpdump -A -v -s 0 -i any port 3001
+ # curl http://$ves_host:3001
+ # sudo docker exec -it ves-collector /bin/bash
}
function verify_veseventsdb() {
- source ~/k8s_env.sh
+ log "VES environment as set by ves_env.sh"
+ env | grep ves
+
for host in $1; do
uuid=$(ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $k8s_user@$host sudo cat /sys/class/dmi/id/product_uuid)
echo "$host=$uuid"
- result=$(curl -G "http://$ves_influxdb_host/query?pretty=true" --data-urlencode "db=veseventsdb" --data-urlencode "q=SELECT moving_average(\"$3\", 5) FROM \"$2\" WHERE (\"system\" =~ /^($uuid)$/) AND time >= now() - 5m" | jq -r '.results[0].series')
+ result=$(curl -G "http://$ves_influxdb_host:$ves_influxdb_port/query?pretty=true" --data-urlencode "db=veseventsdb" --data-urlencode "q=SELECT moving_average(\"$3\", 5) FROM \"$2\" WHERE (\"system\" =~ /^($uuid)$/) AND time >= now() - 5m" | jq -r '.results[0].series')
if [[ "$result" != "null" ]]; then
echo "$host load data found in influxdb"
else
@@ -537,32 +425,41 @@ if [[ $(grep -c $HOSTNAME /etc/hosts) -eq 0 ]]; then
sudo tee -a /etc/hosts
fi
+source ~/k8s_env.sh
+if [[ -f ~/ves/tools/ves_env.sh ]]; then
+ source ~/ves/tools/ves_env.sh
+fi
+log "VES environment as input"
+env | grep ves_
+
+trap 'fail' ERR
+
case "$1" in
- "collectd")
- setup_collectd
+ "env")
+ setup_env
+ ;;
+ "barometer")
+ setup_barometer
;;
"agent")
setup_agent $2
;;
+ "influxdb")
+ setup_influxdb
+ ;;
+ "grafana")
+ setup_grafana
+ ;;
"collector")
- setup_collector
+ setup_collector $2
;;
"kafka")
- log "setup kafka server"
- source ~/k8s_env.sh
- sudo docker run -it -d -p 2181:2181 --name zookeeper zookeeper
- sudo docker run -it -d -p 9092:9092 --name ves-kafka \
- -e zookeeper_host=$k8s_master_host \
- -e zookeeper=$k8s_master \
- -e kafka_hostname=$ves_kafka_hostname \
- blsaws/ves-kafka:latest
+ setup_kafka $2
;;
"verify")
- verify_veseventsdb "$1" "load" "load-shortterm"
- ;;
- "clean")
- clean $2 "$3"
+ verify_veseventsdb "$1" load load-shortterm
;;
*)
grep '#. ' $0
esac
+trap '' ERR