summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBryan Sullivan <bryan.sullivan@att.com>2018-01-22 15:25:53 -0800
committerBryan Sullivan <bryan.sullivan@att.com>2018-01-22 15:25:53 -0800
commitda9564a9b0b78bbe341de9b039aab3c378eb027f (patch)
tree74186eb0c6d429e84b3bb164bfbebfa50e90441e
parent1d82514c0cf25ed599d27ecfd0c4c9502478ef0c (diff)
Implement component deployment via cloudify
JIRA: VES-2 Change-Id: Ic696f13d2a32e10663f50cd4e26b9a060525ff92 Signed-off-by: Bryan Sullivan <bryan.sullivan@att.com>
-rw-r--r--build/ves-agent.sh5
-rw-r--r--build/ves-agent/Dockerfile29
-rw-r--r--build/ves-barometer.sh51
-rw-r--r--build/ves-barometer/Dockerfile38
-rw-r--r--build/ves-barometer/start.sh151
-rw-r--r--build/ves-collector.sh8
-rw-r--r--build/ves-collector/Dashboard.json (renamed from tools/grafana/Dashboard.json)0
-rw-r--r--build/ves-collector/Dockerfile14
-rw-r--r--build/ves-collector/start.sh59
-rw-r--r--build/ves-kafka.sh2
-rw-r--r--build/ves-kafka/start.sh6
-rw-r--r--tools/cloudify/ves-agent/blueprint.yaml9
-rw-r--r--tools/cloudify/ves-collector/blueprint.yaml183
-rw-r--r--tools/cloudify/ves-grafana/blueprint.yaml123
-rw-r--r--tools/cloudify/ves-influxdb/blueprint.yaml122
-rw-r--r--tools/cloudify/ves-kafka/blueprint.yaml142
-rw-r--r--tools/cloudify/ves-zookeeper/blueprint.yaml122
-rw-r--r--tools/demo_deploy.sh163
-rw-r--r--tools/kubernetes/ves-barometer/daemonset.yaml62
-rw-r--r--tools/ves-setup.sh546
20 files changed, 1423 insertions, 412 deletions
diff --git a/build/ves-agent.sh b/build/ves-agent.sh
index 87d4b07..396d7ea 100644
--- a/build/ves-agent.sh
+++ b/build/ves-agent.sh
@@ -19,7 +19,7 @@
#. Docker hub user logged in e.g. via "sudo docker login"
#.
#. Usage:
-#. bash ves-agent.sh <hub-user> <hub-pass>
+#. bash ves-agent.sh <hub-user>
#. hub-user: username for dockerhub
#.
#. NOTE: To allow patch testing, this script will not reclone the VES repo
@@ -31,9 +31,6 @@ echo; echo "$0 $(date): Update package repos"
sudo apt-get update
echo; echo "$0 $(date): Starting VES agent build process"
-if [[ -d /tmp/ves ]]; then rm -rf /tmp/ves; fi
-
-echo; echo "$0 $(date): Starting VES kafka build process"
if [[ ! -d /tmp/ves ]]; then
echo; echo "$0 $(date): Cloning VES repo to /tmp/ves"
git clone https://gerrit.opnfv.org/gerrit/ves /tmp/ves
diff --git a/build/ves-agent/Dockerfile b/build/ves-agent/Dockerfile
index 4c37197..293fcd5 100644
--- a/build/ves-agent/Dockerfile
+++ b/build/ves-agent/Dockerfile
@@ -21,26 +21,19 @@ FROM ubuntu:xenial
MAINTAINER Bryan Sullivan
-RUN apt-get update
-RUN apt-get install -y apt-utils
+RUN mkdir /opt/ves
+
+RUN apt-get update && apt-get install -y apt-utils
RUN apt-get -y upgrade
-RUN apt-get install -y git
-# Required for kafka
-RUN apt-get install -y default-jre
-RUN apt-get install -y zookeeperd
-RUN apt-get install -y python-pip
+# Required for kafka: default-jre zookeeperd python-pip kafka-python
+# Required for building librdkafka: git build-essential libpthread-stubs0-dev libssl-dev libsasl2-dev liblz4-dev
+# Required for building collectd: pkg-config
+RUN apt-get update && apt-get install -y default-jre \
+zookeeperd python-pip pkg-config \
+git build-essential libpthread-stubs0-dev libssl-dev libsasl2-dev liblz4-dev
RUN pip install kafka-python
-# Required for building collectd
-RUN apt-get install -y pkg-config
-
-RUN mkdir /opt/ves
# Build Kafka client
-RUN apt-get install -y build-essential
-RUN apt-get install -y libpthread-stubs0-dev
-RUN apt-get install -y libssl-dev
-RUN apt-get install -y libsasl2-dev
-RUN apt-get install -y liblz4-dev
RUN /bin/bash -c 'git clone --branch v0.9.5 \
https://github.com/edenhill/librdkafka.git /opt/ves/librdkafka; \
cd /opt/ves/librdkafka; ./configure --prefix=/usr; \
@@ -50,10 +43,6 @@ make; make install'
RUN pip install pyaml
RUN git clone https://gerrit.opnfv.org/gerrit/barometer /opt/ves/barometer
-# Test patch
-RUN /bin/bash -c 'cd /opt/ves/barometer; \
-git fetch https://gerrit.opnfv.org/gerrit/barometer \
-refs/changes/27/47427/1 && git checkout FETCH_HEAD'
COPY start.sh /opt/ves/start.sh
ENTRYPOINT ["/bin/bash", "/opt/ves/start.sh"]
diff --git a/build/ves-barometer.sh b/build/ves-barometer.sh
new file mode 100644
index 0000000..86e81f4
--- /dev/null
+++ b/build/ves-barometer.sh
@@ -0,0 +1,51 @@
+#!/bin/bash
+# Copyright 2018 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#. What this is: Build script for the OPNFV Barometer collectd agent docker image.
+#.
+#. Prerequisites:
+#. Docker hub user logged in e.g. via "sudo docker login"
+#.
+#. Usage:
+#. bash ves-barometer.sh <hub-user> [--no-cache]
+#. hub-user: username for dockerhub
+#. --no-cache
+#.
+#. NOTE: To allow patch testing, this script will not reclone the VES repo
+#. if it exists under /tmp
+#.
+#. Status: this is a work in progress, under test.
+
+cache="$2"
+echo; echo "$0 $(date): Update package repos"
+sudo apt-get update
+
+echo; echo "$0 $(date): Starting VES agent build process"
+if [[ ! -d /tmp/ves ]]; then
+ echo; echo "$0 $(date): Cloning VES repo to /tmp/ves"
+ git clone https://gerrit.opnfv.org/gerrit/ves /tmp/ves
+fi
+
+echo; echo "$0 $(date): Building the image"
+cd /tmp/ves/build/ves-barometer
+sudo docker build $cache -t ves-barometer .
+
+echo; echo "$0 $(date): Tagging the image"
+id=$(sudo docker images | grep ves-barometer | awk '{print $3}')
+id=$(echo $id | cut -d ' ' -f 1)
+sudo docker tag $id $1/ves-barometer:latest
+
+echo; echo "$0 $(date): Pushing the image to dockerhub as $1/ves-barometer"
+sudo docker push $1/ves-barometer
diff --git a/build/ves-barometer/Dockerfile b/build/ves-barometer/Dockerfile
new file mode 100644
index 0000000..4bd4c51
--- /dev/null
+++ b/build/ves-barometer/Dockerfile
@@ -0,0 +1,38 @@
+# Copyright 2018 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# What this is: A Dockerfile for building an OPFNV VES Agent container image.
+#
+# Status: this is a work in progress, under test.
+#
+FROM centos:7
+RUN yum update -y && yum install -y which sudo git
+ENV DOCKER y
+ENV repos_dir /src
+ENV openstack_plugins /src/barometer/src/collectd-openstack-plugins
+RUN git config --global http.sslVerify false
+
+WORKDIR ${repos_dir}
+RUN git clone https://gerrit.opnfv.org/gerrit/barometer
+WORKDIR ${repos_dir}/barometer/systems
+RUN sh ./build_base_machine.sh
+
+RUN useradd -ms /bin/bash collectd_exec
+RUN echo "collectd_exec ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
+
+WORKDIR ${openstack_plugins}
+RUN make
+
+COPY start.sh /opt/ves/start.sh
+ENTRYPOINT ["/bin/bash", "/opt/ves/start.sh"]
diff --git a/build/ves-barometer/start.sh b/build/ves-barometer/start.sh
new file mode 100644
index 0000000..da452bf
--- /dev/null
+++ b/build/ves-barometer/start.sh
@@ -0,0 +1,151 @@
+#!/bin/bash
+# Copyright 2018 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# What this is: Startup script for the OPNFV Barometer collectd agent running
+# under docker.
+
+rm -f /opt/collectd/etc/collectd.conf.d/*
+
+if [[ "$ves_mode" == "node" ]]; then
+ cat <<EOF >/opt/collectd/etc/collectd.conf.d/collectd.conf
+# for VES plugin
+LoadPlugin logfile
+<Plugin logfile>
+ LogLevel debug
+ File STDOUT
+ Timestamp true
+ PrintSeverity false
+</Plugin>
+
+LoadPlugin csv
+<Plugin csv>
+ DataDir "/work-dir/collectd/install/var/lib/csv"
+ StoreRates false
+</Plugin>
+
+LoadPlugin target_set
+LoadPlugin match_regex
+<Chain "PreCache">
+ <Rule "mark_memory_as_host">
+ <Match "regex">
+ Plugin "^memory$"
+ </Match>
+ <Target "set">
+ PluginInstance "host"
+ </Target>
+ </Rule>
+</Chain>
+
+LoadPlugin cpu
+<Plugin cpu>
+ ReportByCpu true
+ ReportByState true
+ ValuesPercentage true
+</Plugin>
+
+LoadPlugin interface
+LoadPlugin memory
+LoadPlugin load
+LoadPlugin disk
+# TODO: how to set this option only to apply to VMs (not nodes)
+LoadPlugin uuid
+
+LoadPlugin write_kafka
+<Plugin write_kafka>
+ Property "metadata.broker.list" "$ves_kafka_hostname:$ves_kafka_port"
+ <Topic "collectd">
+ Format JSON
+ </Topic>
+</Plugin>
+EOF
+
+ if [[ -d /etc/nova ]]; then
+ cat <<EOF >>~/collectd/collectd.conf
+LoadPlugin virt
+<Plugin virt>
+ Connection "qemu:///system"
+ RefreshInterval 60
+ HostnameFormat uuid
+ PluginInstanceFormat name
+ ExtraStats "cpu_util"
+</Plugin>
+EOF
+ fi
+else
+ cat <<EOF >/opt/collectd/etc/collectd.conf.d/collectd.conf
+# for VES plugin
+LoadPlugin logfile
+<Plugin logfile>
+ LogLevel debug
+ File STDOUT
+ Timestamp true
+ PrintSeverity false
+</Plugin>
+
+LoadPlugin cpu
+<Plugin cpu>
+ ReportByCpu true
+ ReportByState true
+ ValuesPercentage true
+</Plugin>
+
+LoadPlugin csv
+<Plugin csv>
+ DataDir "/tmp"
+</Plugin>
+
+LoadPlugin interface
+LoadPlugin memory
+LoadPlugin load
+LoadPlugin disk
+LoadPlugin uuid
+
+LoadPlugin write_kafka
+<Plugin write_kafka>
+ Property "metadata.broker.list" "$ves_kafka_hostname:$ves_kafka_port"
+ <Topic "collectd">
+ Format JSON
+ </Topic>
+</Plugin>
+
+LoadPlugin target_set
+LoadPlugin match_regex
+<Chain "PreCache">
+ <Rule "mark_memory_as_guest">
+ <Match "regex">
+ Plugin "^memory$"
+ </Match>
+ <Target "set">
+ PluginInstance "guest"
+ </Target>
+ </Rule>
+</Chain>
+EOF
+fi
+
+echo; echo "cat /opt/collectd/etc/collectd.conf.d/collectd.conf"
+cat /opt/collectd/etc/collectd.conf.d/collectd.conf
+
+#echo "Delete conf files causing collectd to fail"
+#rm -f /opt/collectd/etc/collectd.conf.d/dpdk*.conf
+#rm -f /opt/collectd/etc/collectd.conf.d/snmp*.conf
+#rm -f /opt/collectd/etc/collectd.conf.d/virt.conf
+#rm -f /opt/collectd/etc/collectd.conf.d/mcelog.conf
+#rm -f /opt/collectd/etc/collectd.conf.d/rdt.conf
+#sed -i -- 's/LoadPlugin cpufreq/#LoadPlugin cpufreq/' /opt/collectd/etc/collectd.conf.d/default_plugins.conf
+
+/opt/collectd/sbin/collectd -f
+echo "collectd has exited. sleeping for an hour to enable debugging"
+sleep 3600
diff --git a/build/ves-collector.sh b/build/ves-collector.sh
index 58aa354..a09eeec 100644
--- a/build/ves-collector.sh
+++ b/build/ves-collector.sh
@@ -31,10 +31,10 @@ echo; echo "$0 $(date): Update package repos"
sudo apt-get update
echo; echo "$0 $(date): Starting VES collector build process"
-if [[ -d /tmp/ves ]]; then rm -rf /tmp/ves; fi
-
-echo; echo "$0 $(date): Cloning VES repo to /tmp/ves"
-git clone https://gerrit.opnfv.org/gerrit/ves /tmp/ves
+if [[ ! -d /tmp/ves ]]; then
+ echo; echo "$0 $(date): Cloning VES repo to /tmp/ves"
+ git clone https://gerrit.opnfv.org/gerrit/ves /tmp/ves
+fi
echo; echo "$0 $(date): Building the image"
cd /tmp/ves/build/ves-collector
diff --git a/tools/grafana/Dashboard.json b/build/ves-collector/Dashboard.json
index b88646c..b88646c 100644
--- a/tools/grafana/Dashboard.json
+++ b/build/ves-collector/Dashboard.json
diff --git a/build/ves-collector/Dockerfile b/build/ves-collector/Dockerfile
index 9161871..4cd135f 100644
--- a/build/ves-collector/Dockerfile
+++ b/build/ves-collector/Dockerfile
@@ -1,4 +1,4 @@
-# Copyright 2017 AT&T Intellectual Property, Inc
+# Copyright 2017-2018 AT&T Intellectual Property, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-# What this is: A Dockerfile for building an OPFNV VES Agent container image.
+# What this is: A Dockerfile for building an OPFNV VES Collector container image.
#
# Status: this is a work in progress, under test.
#
@@ -21,14 +21,9 @@ FROM ubuntu:xenial
MAINTAINER Bryan Sullivan
-RUN apt-get update
-RUN apt-get install -y apt-utils
+RUN apt-get update && apt-get install -y apt-utils
RUN apt-get -y upgrade
-RUN apt-get update
-RUN apt-get install -y git
-
-# Required for VES collector
-RUN apt-get install -y python-pip python-jsonschema
+RUN apt-get update && apt-get install -y git python-pip python-jsonschema curl
RUN pip install requests
RUN mkdir /opt/ves
@@ -37,5 +32,6 @@ RUN mkdir /opt/ves
RUN git clone https://github.com/att/evel-test-collector.git /opt/ves/evel-test-collector
COPY monitor.py /opt/ves/evel-test-collector/code/collector/monitor.py
+COPY Dashboard.json /opt/ves/Dashboard.json
COPY start.sh /opt/ves/start.sh
ENTRYPOINT ["/bin/bash", "/opt/ves/start.sh"]
diff --git a/build/ves-collector/start.sh b/build/ves-collector/start.sh
index be30c9a..250af34 100644
--- a/build/ves-collector/start.sh
+++ b/build/ves-collector/start.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# Copyright 2017 AT&T Intellectual Property, Inc
+# Copyright 2017-2018 AT&T Intellectual Property, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -23,6 +23,8 @@ sed -i -- \
evel-test-collector/config/collector.conf
sed -i -- "s/vel_domain = 127.0.0.1/vel_domain = $ves_host/g" \
evel-test-collector/config/collector.conf
+sed -i -- "s/vel_port = 30000/vel_port = $ves_port/g" \
+ evel-test-collector/config/collector.conf
sed -i -- "s/vel_username =/vel_username = $ves_user/g" \
evel-test-collector/config/collector.conf
sed -i -- "s/vel_password =/vel_password = $ves_pass/g" \
@@ -31,18 +33,65 @@ sed -i -- "s~vel_path = vendor_event_listener/~vel_path = $ves_path~g" \
evel-test-collector/config/collector.conf
sed -i -- "s~vel_topic_name = example_vnf~vel_topic_name = $ves_topic~g" \
evel-test-collector/config/collector.conf
-sed -i -- "/vel_topic_name = /a influxdb = $ves_influxdb_host" \
+sed -i -- "/vel_topic_name = /a influxdb = $ves_influxdb_host:$ves_influxdb_port" \
evel-test-collector/config/collector.conf
+echo; echo "evel-test-collector/config/collector.conf"
+cat evel-test-collector/config/collector.conf
+
+echo; echo "wait for InfluxDB API at $ves_influxdb_host:$ves_influxdb_port"
+while ! curl http://$ves_influxdb_host:$ves_influxdb_port/ping ; do
+ echo "InfluxDB API is not yet responding... waiting 10 seconds"
+ sleep 10
+done
+
+echo; echo "setup veseventsdb in InfluxDB"
+# TODO: check if pre-existing and skip
+curl -X POST http://$ves_influxdb_host:$ves_influxdb_port/query \
+ --data-urlencode "q=CREATE DATABASE veseventsdb"
+
+echo; echo "wait for Grafana API to be active"
+while ! curl http://$ves_grafana_host:$ves_grafana_port ; do
+ echo "Grafana API is not yet responding... waiting 10 seconds"
+ sleep 10
+done
+
+echo; echo "add VESEvents datasource to Grafana"
+# TODO: check if pre-existing and skip
+cat <<EOF >/opt/ves/datasource.json
+{ "name":"VESEvents",
+ "type":"influxdb",
+ "access":"direct",
+ "url":"http://$ves_influxdb_host:$ves_influxdb_port",
+ "password":"root",
+ "user":"root",
+ "database":"veseventsdb",
+ "basicAuth":false,
+ "basicAuthUser":"",
+ "basicAuthPassword":"",
+ "withCredentials":false,
+ "isDefault":false,
+ "jsonData":null
+}
+EOF
+
+curl -H "Accept: application/json" -H "Content-type: application/json" \
+ -X POST -d @/opt/ves/datasource.json \
+ http://$ves_grafana_auth@$ves_grafana_host:$ves_grafana_port/api/datasources
+
+echo; echo "add VES dashboard to Grafana"
+curl -H "Accept: application/json" -H "Content-type: application/json" \
+ -X POST -d @/opt/ves/Dashboard.json \
+ http://$ves_grafana_auth@$ves_grafana_host:$ves_grafana_port/api/dashboards/db
+
if [[ "$ves_loglevel" != "" ]]; then
python /opt/ves/evel-test-collector/code/collector/monitor.py \
--config /opt/ves/evel-test-collector/config/collector.conf \
- --influxdb $ves_influxdb_host \
+ --influxdb $ves_influxdb_host:$ves_influxdb_port \
--section default > /opt/ves/monitor.log 2>&1
else
python /opt/ves/evel-test-collector/code/collector/monitor.py \
--config /opt/ves/evel-test-collector/config/collector.conf \
- --influxdb $ves_influxdb_host \
+ --influxdb $ves_influxdb_host:$ves_influxdb_port \
--section default
fi
-
diff --git a/build/ves-kafka.sh b/build/ves-kafka.sh
index 19a632b..c489535 100644
--- a/build/ves-kafka.sh
+++ b/build/ves-kafka.sh
@@ -19,7 +19,7 @@
#. Docker hub user logged in e.g. via "sudo docker login"
#.
#. Usage:
-#. bash ves-kafka.sh <hub-user> <hub-pass>
+#. bash ves-kafka.sh <hub-user>
#. hub-user: username for dockerhub
#.
#. NOTE: To allow patch testing, this script will not reclone the VES repo
diff --git a/build/ves-kafka/start.sh b/build/ves-kafka/start.sh
index ab4169b..37c36c2 100644
--- a/build/ves-kafka/start.sh
+++ b/build/ves-kafka/start.sh
@@ -16,14 +16,14 @@
#. What this is: Startup script for a kafka server as used by the OPNFV VES
#. framework.
-echo "$zookeeper $zookeeper_host" >>/etc/hosts
+echo "$zookeeper_host $zookeeper_hostname" >>/etc/hosts
cat /etc/hosts
cd /opt/ves
-sed -i "s/localhost:2181/$zookeeper_host:2181/" \
+sed -i "s/localhost:2181/$zookeeper_hostname:$zookeeper_port/" \
kafka_2.11-0.11.0.2/config/server.properties
grep 2181 kafka_2.11-0.11.0.2/config/server.properties
-sed -i "s~#advertised.listeners=PLAINTEXT://your.host.name:9092~advertised.listeners=PLAINTEXT://$kafka_hostname:9092~" \
+sed -i "s~#advertised.listeners=PLAINTEXT://your.host.name:9092~advertised.listeners=PLAINTEXT://$kafka_hostname:$kafka_port~" \
kafka_2.11-0.11.0.2/config/server.properties
grep advertised.listeners kafka_2.11-0.11.0.2/config/server.properties
diff --git a/tools/cloudify/ves-agent/blueprint.yaml b/tools/cloudify/ves-agent/blueprint.yaml
index f7ef0a9..dc2f977 100644
--- a/tools/cloudify/ves-agent/blueprint.yaml
+++ b/tools/cloudify/ves-agent/blueprint.yaml
@@ -52,6 +52,11 @@ inputs:
ves_loglevel:
default: info
+ spec_port:
+ default: 80
+ container_port:
+ default: 80
+
kubernetes_configuration_file_content:
default: kube.config
@@ -114,6 +119,8 @@ node_templates:
metadata:
name: ves-agent-service
spec:
+ ports:
+ - port: { get_input: spec_port }
selector:
app: ves-agent
relationships:
@@ -164,6 +171,8 @@ node_templates:
value: { get_input: ves_mode }
- name: ves_loglevel
value: { get_input: ves_loglevel }
+ ports:
+ - containerPort: { get_input: container_port }
relationships:
- type: cloudify.kubernetes.relationships.managed_by_master
target: kubernetes_master
diff --git a/tools/cloudify/ves-collector/blueprint.yaml b/tools/cloudify/ves-collector/blueprint.yaml
new file mode 100644
index 0000000..7c4a051
--- /dev/null
+++ b/tools/cloudify/ves-collector/blueprint.yaml
@@ -0,0 +1,183 @@
+tosca_definitions_version: cloudify_dsl_1_3
+#
+# Copyright 2018 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# What this is: Cloudify blueprint for running the OPNFV VES Collector under
+# kubernetes.
+
+imports:
+ - http://www.getcloudify.org/spec/cloudify/3.4/types.yaml
+ - https://raw.githubusercontent.com/cloudify-incubator/cloudify-kubernetes-plugin/1.2.1/plugin.yaml
+
+inputs:
+
+ ves_host:
+ default: 127.0.0.1
+ ves_port:
+ default: 3001
+ ves_path:
+ default:
+ ves_topic:
+ default:
+ ves_https:
+ default: false
+ ves_user:
+ default:
+ ves_pass:
+ default:
+ ves_interval:
+ default: 20
+ ves_version:
+ default: 5.1
+ ves_loglevel:
+ default: info
+ ves_influxdb_host:
+ default: 127.0.0.1
+ ves_influxdb_port:
+ default: 8086
+ ves_grafana_host:
+ default: 127.0.0.1
+ ves_grafana_port:
+ default: 3000
+ ves_grafana_auth:
+ default: admin:admin
+ spec_port:
+ default: 3001
+ container_port:
+ default: 3001
+
+ kubernetes_configuration_file_content:
+ default: kube.config
+
+ kubernetes_master_ip:
+ type: string
+ default: { get_secret: kubernetes_master_ip }
+
+ kubernetes_master_port:
+ type: string
+ default: { get_secret: kubernetes_master_port }
+
+ kubernetes_certificate_authority_data:
+ default: { get_secret: kubernetes_certificate_authority_data }
+
+ kubernetes-admin_client_certificate_data:
+ default: { get_secret: kubernetes-admin_client_certificate_data }
+
+ kubernetes-admin_client_key_data:
+ default: { get_secret: kubernetes-admin_client_key_data }
+
+ kubernetes_master_configuration:
+ default:
+ apiVersion: v1
+ kind: Config
+ preferences: {}
+ current-context: kubernetes-admin@kubernetes
+ clusters:
+ - name: kubernetes
+ cluster:
+ certificate-authority-data: { get_input: kubernetes_certificate_authority_data }
+ server: { concat: [ 'https://', { get_input: kubernetes_master_ip}, ':', { get_input: kubernetes_master_port } ] }
+ contexts:
+ - name: kubernetes-admin@kubernetes
+ context:
+ cluster: kubernetes
+ user: kubernetes-admin
+ users:
+ - name: kubernetes-admin
+ user:
+ client-certificate-data: { get_input: kubernetes-admin_client_certificate_data }
+ client-key-data: { get_input: kubernetes-admin_client_key_data }
+
+ kubernetes_configuration_file_content:
+ description: >
+ File content of kubernetes master YAML configuration
+ default: { get_input: kubernetes_master_configuration }
+
+node_templates:
+ kubernetes_master:
+ type: cloudify.kubernetes.nodes.Master
+ properties:
+ configuration:
+ file_content: { get_input: kubernetes_configuration_file_content }
+
+ ves_collector_service:
+ type: cloudify.kubernetes.resources.Service
+ properties:
+ definition:
+ apiVersion: v1
+ metadata:
+ name: ves-collector-service
+ spec:
+ ports:
+ - port: { get_input: spec_port }
+ selector:
+ app: ves-collector
+ relationships:
+ - type: cloudify.kubernetes.relationships.managed_by_master
+ target: kubernetes_master
+ - type: cloudify.relationships.depends_on
+ target: ves_collector_pod
+
+ ves_collector_pod:
+ type: cloudify.kubernetes.resources.Pod
+ properties:
+ definition:
+ apiVersion: v1
+ metadata:
+ name: ves-collector-pod
+ labels:
+ app: ves-collector
+ spec:
+ nodeSelector:
+ role: worker
+ containers:
+ - name: ves-collector
+ image: blsaws/ves-collector:latest
+ env:
+ - name: ves_host
+ value: { get_input: ves_host }
+ - name: ves_port
+ value: { get_input: ves_port }
+ - name: ves_path
+ value: { get_input: ves_path }
+ - name: ves_topic
+ value: { get_input: ves_topic }
+ - name: ves_https
+ value: { get_input: ves_https }
+ - name: ves_user
+ value: { get_input: ves_user }
+ - name: ves_pass
+ value: { get_input: ves_pass }
+ - name: ves_interval
+ value: { get_input: ves_interval }
+ - name: ves_version
+ value: { get_input: ves_version }
+ - name: ves_loglevel
+ value: { get_input: ves_loglevel }
+ - name: ves_influxdb_host
+ value: { get_input: ves_influxdb_host }
+ - name: ves_influxdb_port
+ value: { get_input: ves_influxdb_port }
+ - name: ves_grafana_host
+ value: { get_input: ves_grafana_host }
+ - name: ves_grafana_port
+ value: { get_input: ves_grafana_port }
+ - name: ves_grafana_auth
+ value: { get_input: ves_grafana_auth }
+ ports:
+ - containerPort: { get_input: container_port }
+ relationships:
+ - type: cloudify.kubernetes.relationships.managed_by_master
+ target: kubernetes_master
diff --git a/tools/cloudify/ves-grafana/blueprint.yaml b/tools/cloudify/ves-grafana/blueprint.yaml
new file mode 100644
index 0000000..00963b3
--- /dev/null
+++ b/tools/cloudify/ves-grafana/blueprint.yaml
@@ -0,0 +1,123 @@
+tosca_definitions_version: cloudify_dsl_1_3
+#
+# Copyright 2018 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# What this is: Cloudify blueprint for running a grafana service for OPNFV VES
+# under kubernetes.
+
+imports:
+ - http://www.getcloudify.org/spec/cloudify/3.4/types.yaml
+ - https://raw.githubusercontent.com/cloudify-incubator/cloudify-kubernetes-plugin/1.2.1/plugin.yaml
+
+inputs:
+
+ spec_port:
+ default: 3000
+
+ container_port:
+ default: 3000
+
+ kubernetes_configuration_file_content:
+ default: kube.config
+
+ kubernetes_master_ip:
+ type: string
+ default: { get_secret: kubernetes_master_ip }
+
+ kubernetes_master_port:
+ type: string
+ default: { get_secret: kubernetes_master_port }
+
+ kubernetes_certificate_authority_data:
+ default: { get_secret: kubernetes_certificate_authority_data }
+
+ kubernetes-admin_client_certificate_data:
+ default: { get_secret: kubernetes-admin_client_certificate_data }
+
+ kubernetes-admin_client_key_data:
+ default: { get_secret: kubernetes-admin_client_key_data }
+
+ kubernetes_master_configuration:
+ default:
+ apiVersion: v1
+ kind: Config
+ preferences: {}
+ current-context: kubernetes-admin@kubernetes
+ clusters:
+ - name: kubernetes
+ cluster:
+ certificate-authority-data: { get_input: kubernetes_certificate_authority_data }
+ server: { concat: [ 'https://', { get_input: kubernetes_master_ip}, ':', { get_input: kubernetes_master_port } ] }
+ contexts:
+ - name: kubernetes-admin@kubernetes
+ context:
+ cluster: kubernetes
+ user: kubernetes-admin
+ users:
+ - name: kubernetes-admin
+ user:
+ client-certificate-data: { get_input: kubernetes-admin_client_certificate_data }
+ client-key-data: { get_input: kubernetes-admin_client_key_data }
+
+ kubernetes_configuration_file_content:
+ description: >
+ File content of kubernetes master YAML configuration
+ default: { get_input: kubernetes_master_configuration }
+
+node_templates:
+ kubernetes_master:
+ type: cloudify.kubernetes.nodes.Master
+ properties:
+ configuration:
+ file_content: { get_input: kubernetes_configuration_file_content }
+
+ ves_grafana_service:
+ type: cloudify.kubernetes.resources.Service
+ properties:
+ definition:
+ apiVersion: v1
+ metadata:
+ name: ves-grafana-service
+ spec:
+ type: NodePort
+ ports:
+ - port: { get_input: spec_port }
+ selector:
+ app: ves-grafana
+ relationships:
+ - type: cloudify.kubernetes.relationships.managed_by_master
+ target: kubernetes_master
+ - type: cloudify.relationships.depends_on
+ target: ves_grafana_pod
+
+ ves_grafana_pod:
+ type: cloudify.kubernetes.resources.Pod
+ properties:
+ definition:
+ apiVersion: v1
+ metadata:
+ name: ves-grafana-pod
+ labels:
+ app: ves-grafana
+ spec:
+ containers:
+ - name: ves-grafana
+ image: grafana/grafana
+ env:
+ ports:
+ - containerPort: { get_input: container_port }
+ relationships:
+ - type: cloudify.kubernetes.relationships.managed_by_master
+ target: kubernetes_master
diff --git a/tools/cloudify/ves-influxdb/blueprint.yaml b/tools/cloudify/ves-influxdb/blueprint.yaml
new file mode 100644
index 0000000..d0a400b
--- /dev/null
+++ b/tools/cloudify/ves-influxdb/blueprint.yaml
@@ -0,0 +1,122 @@
+tosca_definitions_version: cloudify_dsl_1_3
+#
+# Copyright 2018 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# What this is: Cloudify blueprint for running a influxdb service for OPNFV VES
+# under kubernetes.
+
+imports:
+ - http://www.getcloudify.org/spec/cloudify/3.4/types.yaml
+ - https://raw.githubusercontent.com/cloudify-incubator/cloudify-kubernetes-plugin/1.2.1/plugin.yaml
+
+inputs:
+
+ spec_port:
+ default: 8086
+
+ container_port:
+ default: 8086
+
+ kubernetes_configuration_file_content:
+ default: kube.config
+
+ kubernetes_master_ip:
+ type: string
+ default: { get_secret: kubernetes_master_ip }
+
+ kubernetes_master_port:
+ type: string
+ default: { get_secret: kubernetes_master_port }
+
+ kubernetes_certificate_authority_data:
+ default: { get_secret: kubernetes_certificate_authority_data }
+
+ kubernetes-admin_client_certificate_data:
+ default: { get_secret: kubernetes-admin_client_certificate_data }
+
+ kubernetes-admin_client_key_data:
+ default: { get_secret: kubernetes-admin_client_key_data }
+
+ kubernetes_master_configuration:
+ default:
+ apiVersion: v1
+ kind: Config
+ preferences: {}
+ current-context: kubernetes-admin@kubernetes
+ clusters:
+ - name: kubernetes
+ cluster:
+ certificate-authority-data: { get_input: kubernetes_certificate_authority_data }
+ server: { concat: [ 'https://', { get_input: kubernetes_master_ip}, ':', { get_input: kubernetes_master_port } ] }
+ contexts:
+ - name: kubernetes-admin@kubernetes
+ context:
+ cluster: kubernetes
+ user: kubernetes-admin
+ users:
+ - name: kubernetes-admin
+ user:
+ client-certificate-data: { get_input: kubernetes-admin_client_certificate_data }
+ client-key-data: { get_input: kubernetes-admin_client_key_data }
+
+ kubernetes_configuration_file_content:
+ description: >
+ File content of kubernetes master YAML configuration
+ default: { get_input: kubernetes_master_configuration }
+
+node_templates:
+ kubernetes_master:
+ type: cloudify.kubernetes.nodes.Master
+ properties:
+ configuration:
+ file_content: { get_input: kubernetes_configuration_file_content }
+
+ ves_influxdb_service:
+ type: cloudify.kubernetes.resources.Service
+ properties:
+ definition:
+ apiVersion: v1
+ metadata:
+ name: ves-influxdb-service
+ spec:
+ ports:
+ - port: { get_input: spec_port }
+ selector:
+ app: ves-influxdb
+ relationships:
+ - type: cloudify.kubernetes.relationships.managed_by_master
+ target: kubernetes_master
+ - type: cloudify.relationships.depends_on
+ target: ves_influxdb_pod
+
+ ves_influxdb_pod:
+ type: cloudify.kubernetes.resources.Pod
+ properties:
+ definition:
+ apiVersion: v1
+ metadata:
+ name: ves-influxdb-pod
+ labels:
+ app: ves-influxdb
+ spec:
+ containers:
+ - name: ves-influxdb
+ image: influxdb
+ env:
+ ports:
+ - containerPort: { get_input: container_port }
+ relationships:
+ - type: cloudify.kubernetes.relationships.managed_by_master
+ target: kubernetes_master
diff --git a/tools/cloudify/ves-kafka/blueprint.yaml b/tools/cloudify/ves-kafka/blueprint.yaml
new file mode 100644
index 0000000..45f4378
--- /dev/null
+++ b/tools/cloudify/ves-kafka/blueprint.yaml
@@ -0,0 +1,142 @@
+tosca_definitions_version: cloudify_dsl_1_3
+#
+# Copyright 2018 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# What this is: Cloudify blueprint for running a Kafka service for OPNFV VES
+# under kubernetes.
+
+imports:
+ - http://www.getcloudify.org/spec/cloudify/3.4/types.yaml
+ - https://raw.githubusercontent.com/cloudify-incubator/cloudify-kubernetes-plugin/1.2.1/plugin.yaml
+
+inputs:
+
+ zookeeper_hostname:
+ default: localhost
+ zookeeper_host:
+ default: 127.0.0.1
+ zookeeper_port:
+ default: 2181
+ kafka_hostname:
+ default: localhost
+ kafka_port:
+ default: 9092
+ spec_port:
+ default: 9092
+ container_port:
+ default: 9092
+
+ kubernetes_configuration_file_content:
+ default: kube.config
+
+ kubernetes_master_ip:
+ type: string
+ default: { get_secret: kubernetes_master_ip }
+
+ kubernetes_master_port:
+ type: string
+ default: { get_secret: kubernetes_master_port }
+
+ kubernetes_certificate_authority_data:
+ default: { get_secret: kubernetes_certificate_authority_data }
+
+ kubernetes-admin_client_certificate_data:
+ default: { get_secret: kubernetes-admin_client_certificate_data }
+
+ kubernetes-admin_client_key_data:
+ default: { get_secret: kubernetes-admin_client_key_data }
+
+ kubernetes_master_configuration:
+ default:
+ apiVersion: v1
+ kind: Config
+ preferences: {}
+ current-context: kubernetes-admin@kubernetes
+ clusters:
+ - name: kubernetes
+ cluster:
+ certificate-authority-data: { get_input: kubernetes_certificate_authority_data }
+ server: { concat: [ 'https://', { get_input: kubernetes_master_ip}, ':', { get_input: kubernetes_master_port } ] }
+ contexts:
+ - name: kubernetes-admin@kubernetes
+ context:
+ cluster: kubernetes
+ user: kubernetes-admin
+ users:
+ - name: kubernetes-admin
+ user:
+ client-certificate-data: { get_input: kubernetes-admin_client_certificate_data }
+ client-key-data: { get_input: kubernetes-admin_client_key_data }
+
+ kubernetes_configuration_file_content:
+ description: >
+ File content of kubernetes master YAML configuration
+ default: { get_input: kubernetes_master_configuration }
+
+node_templates:
+ kubernetes_master:
+ type: cloudify.kubernetes.nodes.Master
+ properties:
+ configuration:
+ file_content: { get_input: kubernetes_configuration_file_content }
+
+ ves_kafka_service:
+ type: cloudify.kubernetes.resources.Service
+ properties:
+ definition:
+ apiVersion: v1
+ metadata:
+ name: ves-kafka-service
+ spec:
+ ports:
+ - port: { get_input: spec_port }
+ targetPort: { get_input: spec_port }
+ selector:
+ app: ves-kafka
+ relationships:
+ - type: cloudify.kubernetes.relationships.managed_by_master
+ target: kubernetes_master
+ - type: cloudify.relationships.depends_on
+ target: ves_kafka_pod
+
+ ves_kafka_pod:
+ type: cloudify.kubernetes.resources.Pod
+ properties:
+ definition:
+ apiVersion: v1
+ metadata:
+ name: ves-kafka-pod
+ labels:
+ app: ves-kafka
+ spec:
+ containers:
+ - name: ves-kafka
+ image: blsaws/ves-kafka:latest
+ env:
+ - name: zookeeper_hostname
+ value: { get_input: zookeeper_hostname }
+ - name: zookeeper_host
+ value: { get_input: zookeeper_host }
+ - name: zookeeper_port
+ value: { get_input: zookeeper_port }
+ - name: kafka_hostname
+ value: { get_input: kafka_hostname }
+ - name: kafka_port
+ value: { get_input: kafka_port }
+ ports:
+ - containerPort: { get_input: container_port }
+ relationships:
+ - type: cloudify.kubernetes.relationships.managed_by_master
+ target: kubernetes_master
diff --git a/tools/cloudify/ves-zookeeper/blueprint.yaml b/tools/cloudify/ves-zookeeper/blueprint.yaml
new file mode 100644
index 0000000..8e22ed3
--- /dev/null
+++ b/tools/cloudify/ves-zookeeper/blueprint.yaml
@@ -0,0 +1,122 @@
+tosca_definitions_version: cloudify_dsl_1_3
+#
+# Copyright 2018 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# What this is: Cloudify blueprint for running a zookeeper service for OPNFV VES
+# under kubernetes.
+
+imports:
+ - http://www.getcloudify.org/spec/cloudify/3.4/types.yaml
+ - https://raw.githubusercontent.com/cloudify-incubator/cloudify-kubernetes-plugin/1.2.1/plugin.yaml
+
+inputs:
+
+ spec_port:
+ default: 2181
+
+ container_port:
+ default: 2181
+
+ kubernetes_configuration_file_content:
+ default: kube.config
+
+ kubernetes_master_ip:
+ type: string
+ default: { get_secret: kubernetes_master_ip }
+
+ kubernetes_master_port:
+ type: string
+ default: { get_secret: kubernetes_master_port }
+
+ kubernetes_certificate_authority_data:
+ default: { get_secret: kubernetes_certificate_authority_data }
+
+ kubernetes-admin_client_certificate_data:
+ default: { get_secret: kubernetes-admin_client_certificate_data }
+
+ kubernetes-admin_client_key_data:
+ default: { get_secret: kubernetes-admin_client_key_data }
+
+ kubernetes_master_configuration:
+ default:
+ apiVersion: v1
+ kind: Config
+ preferences: {}
+ current-context: kubernetes-admin@kubernetes
+ clusters:
+ - name: kubernetes
+ cluster:
+ certificate-authority-data: { get_input: kubernetes_certificate_authority_data }
+ server: { concat: [ 'https://', { get_input: kubernetes_master_ip}, ':', { get_input: kubernetes_master_port } ] }
+ contexts:
+ - name: kubernetes-admin@kubernetes
+ context:
+ cluster: kubernetes
+ user: kubernetes-admin
+ users:
+ - name: kubernetes-admin
+ user:
+ client-certificate-data: { get_input: kubernetes-admin_client_certificate_data }
+ client-key-data: { get_input: kubernetes-admin_client_key_data }
+
+ kubernetes_configuration_file_content:
+ description: >
+ File content of kubernetes master YAML configuration
+ default: { get_input: kubernetes_master_configuration }
+
+node_templates:
+ kubernetes_master:
+ type: cloudify.kubernetes.nodes.Master
+ properties:
+ configuration:
+ file_content: { get_input: kubernetes_configuration_file_content }
+
+ ves_zookeeper_service:
+ type: cloudify.kubernetes.resources.Service
+ properties:
+ definition:
+ apiVersion: v1
+ metadata:
+ name: ves-zookeeper-service
+ spec:
+ ports:
+ - port: { get_input: spec_port }
+ selector:
+ app: ves-zookeeper
+ relationships:
+ - type: cloudify.kubernetes.relationships.managed_by_master
+ target: kubernetes_master
+ - type: cloudify.relationships.depends_on
+ target: ves_zookeeper_pod
+
+ ves_zookeeper_pod:
+ type: cloudify.kubernetes.resources.Pod
+ properties:
+ definition:
+ apiVersion: v1
+ metadata:
+ name: ves-zookeeper-pod
+ labels:
+ app: ves-zookeeper
+ spec:
+ containers:
+ - name: ves-zookeeper
+ image: zookeeper
+ env:
+ ports:
+ - containerPort: { get_input: container_port }
+ relationships:
+ - type: cloudify.kubernetes.relationships.managed_by_master
+ target: kubernetes_master
diff --git a/tools/demo_deploy.sh b/tools/demo_deploy.sh
index 305de9c..1105c2a 100644
--- a/tools/demo_deploy.sh
+++ b/tools/demo_deploy.sh
@@ -35,9 +35,17 @@
#. - env variables set prior to running this script, as per ves-setup.sh
#. ves_kafka_hostname: hostname of the node where the kafka server runs
#. - optional env varibles set prior to running this script, as per ves-setup.sh
-#. ves_influxdb_host: ip:port of the influxdb service
+#. ves_host: ip of the VES collector service
+#. ves_zookeeper_host: ip of the zookeeper service
+#. ves_zookeeper_port: port of the zookeeper service
+#. ves_kafka_host: ip of the kafka service
+#. ves_kafka_port: port of the kafka service
+#. ves_port: port of the VES collector service
+#. ves_influxdb_host: ip of the influxdb service
+#. ves_influxdb_port: port of the influxdb service
#. ves_influxdb_auth: authentication for the influxdb service
-#. ves_grafana_host: ip:port of the grafana service
+#. ves_grafana_host: ip of the grafana service
+#. ves_grafana_port: port of the grafana service
#. ves_grafana_auth: authentication for the grafana service
#. ves_loglevel: loglevel for VES Agent and Collector (ERROR|DEBUG)
#.
@@ -49,81 +57,104 @@
#. Usage: on the admin server
#. $ git clone https://gerrit.opnfv.org/gerrit/ves ~/ves
#. $ cd ~/ves/tools
-#. $ bash demo_deploy.sh <key> <user> <master> <workers> [cloudify]
-#. <key>: SSH key enabling password-less SSH to nodes
+#. $ bash demo_deploy.sh <user> <master> [cloudify]
#. <user>: username on node with password-less SSH authorized
-#. <master>: master node where core components will be installed
-#. <workers>: list of worker nodes where collectd will be installed
+#. <master>: hostname of k8s master node
#. cloudify: flag indicating to deploy VES core services via Cloudify
-key=$1
-user=$2
-master=$3
-workers="$4"
-cloudify=$5
+trap 'fail' ERR
-eval `ssh-agent`
-ssh-add $key
+function fail() {
+ log $1
+ exit 1
+}
-ves_host=$master
-ves_mode=node
-ves_user=hello
-ves_pass=world
-ves_kafka_host=$master
-ves_kafka_hostname=$ves_kafka_hostname
-ves_influxdb_host=$ves_influxdb_host
-ves_influxdb_auth=$ves_influxdb_auth
-ves_grafana_host=$ves_grafana_host
-ves_grafana_auth=$ves_grafana_auth
-ves_loglevel=$ves_loglevel
-env | grep ves
-source ~/ves/tools/ves-setup.sh env
+function log() {
+ f=$(caller 0 | awk '{print $2}')
+ l=$(caller 0 | awk '{print $1}')
+ echo; echo "$f:$l ($(date)) $1"
+}
-echo; echo "$0 $(date): Setting up master node"
-ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
- $user@$master mkdir /home/$user/ves
-scp -r -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
- ~/ves/tools $user@$master:/home/$user/ves
-ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
- $user@$master <<EOF
-bash ves/tools/ves-setup.sh collector
-bash ves/tools/ves-setup.sh kafka
-EOF
+function run() {
+ log "$1"
+ start=$((`date +%s`/60))
+ $1
+ step_end "$1"
+}
-if [[ "$cloudify" == "cloudify" ]]; then
- bash ves/tools/ves-setup.sh agent $cloudify
-else
+function step_end() {
+ end=$((`date +%s`/60))
+ runtime=$((end-start))
+ log "step \"$1\" duration = $runtime minutes"
+}
+
+function run_master() {
+ log "$1"
+ start=$((`date +%s`/60))
ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
- $user@$master bash ves/tools/ves-setup.sh agent
-fi
+ $k8s_user@$k8s_master "$1"
+ step_end "$1"
+}
+
+function deploy() {
+ if [[ -f ~/ves/tools/ves_env.sh ]]; then rm ~/ves/tools/ves_env.sh; fi
+ ves_host=$ves_host
+ ves_port=$ves_port
+ ves_mode=node
+ ves_user=hello
+ ves_pass=world
+ ves_kafka_host=$ves_kafka_host
+ ves_kafka_hostname=$ves_kafka_hostname
+ ves_zookeeper_host=$ves_zookeeper_host
+ ves_zookeeper_port=$ves_zookeeper_port
+ ves_influxdb_host=$ves_influxdb_host
+ ves_influxdb_port=$ves_influxdb_port
+ ves_influxdb_auth=$ves_influxdb_auth
+ ves_grafana_host=$ves_grafana_host
+ ves_grafana_port=$ves_grafana_port
+ ves_grafana_auth=$ves_grafana_auth
+ ves_loglevel=$ves_loglevel
+ source ~/ves/tools/ves-setup.sh env
+ env | grep ves_ >~/ves/tools/ves_env.sh
+ for var in $vars; do echo "export $var" | tee -a ~/ves/tools/ves_env.sh; done
-if [[ "$master" == "$workers" ]]; then
- nodes=$master
-else
- nodes="$master $workers"
-fi
+ log "Setting up master node"
+ run_master "mkdir /home/$user/ves"
+ scp -r -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ ~/ves/tools $user@$master:/home/$user/ves
+ run "bash ves/tools/ves-setup.sh collector $cloudify"
+ run "bash ves/tools/ves-setup.sh kafka $cloudify"
+ run "bash ves/tools/ves-setup.sh agent $cloudify"
-for node in $nodes; do
- echo; echo "$0 $(date): Setting up collectd at $node"
- if [[ "$node" != "$master" ]]; then
- ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
- $user@$node mkdir /home/$user/ves
- scp -r -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
- ~/ves/tools $user@$node:/home/$user/ves
+ if [[ "$k8s_master" == "$k8s_workers" ]]; then
+ nodes=$k8s_master
+ else
+ nodes="$k8s_master $k8s_workers"
fi
- ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
- $user@$node <<EOF > ves-collectd-$node.log 2>&1 &
-ves_kafka_host=$master
-export ves_kafka_host
-ves_kafka_port=$ves_kafka_port
-export ves_kafka_port
-ves_kafka_hostname=$ves_kafka_hostname
-export ves_kafka_hostname
-ves_mode=node
-export ves_mode
-bash /home/$user/ves/tools/ves-setup.sh collectd
+
+ for node in $nodes; do
+ log "Setting up collectd at $node"
+ if [[ "$node" != "$k8s_master" ]]; then
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ $user@$node mkdir /home/$user/ves
+ scp -r -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ ~/ves/tools $user@$node:/home/$user/ves
+ fi
+ run "bash ves/tools/ves-setup.sh collectd"
EOF
-done
+ done
-echo; echo "$0 $(date): VES Grafana dashboards are available at http://$ves_grafana_host (login as admin/admin)"
+ source ~/ves/tools/ves_env.sh
+ log "VES Grafana dashboards are available at http://$ves_grafana_host:$ves_grafana_port (login as admin/admin)"
+}
+deploy_start=$((`date +%s`/60))
+user=$1
+master=$2
+cloudify=$3
+source ~/k8s_env_$master.sh
+log "k8s environment as input"
+env | grep k8s
+eval `ssh-agent`
+ssh-add $k8s_key
+deploy
diff --git a/tools/kubernetes/ves-barometer/daemonset.yaml b/tools/kubernetes/ves-barometer/daemonset.yaml
new file mode 100644
index 0000000..d0d6d19
--- /dev/null
+++ b/tools/kubernetes/ves-barometer/daemonset.yaml
@@ -0,0 +1,62 @@
+#
+# Copyright 2017 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# What this is: Kuberbetes chart for the OPNFV Barometer collectd agent
+# running as a daemonset (one each kluster node) under kubernetes.
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: ves-barometer
+ namespace: default
+ labels:
+ k8s-app: ves-barometer
+spec:
+ selector:
+ matchLabels:
+ name: ves-barometer
+ template:
+ metadata:
+ labels:
+ name: ves-barometer
+ spec:
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ containers:
+ - name: ves-barometer
+ image: blsaws/ves-barometer:latest
+ env:
+ - name: ves_mode
+ value: <ves_mode>
+ - name: ves_kafka_hostname
+ value: <ves_kafka_hostname>
+ - name: ves_kafka_port
+ value: "<ves_kafka_port>"
+ volumeMounts:
+ - name: varrun
+ mountPath: /var/run
+ - name: tmp
+ mountPath: /tmp
+ readOnly: true
+ securityContext:
+ privileged: true
+ terminationGracePeriodSeconds: 30
+ volumes:
+ - name: varrun
+ hostPath:
+ path: /var/run
+ - name: tmp
+ hostPath:
+ path: /tmp
diff --git a/tools/ves-setup.sh b/tools/ves-setup.sh
index 0a279f0..1a3dbbe 100644
--- a/tools/ves-setup.sh
+++ b/tools/ves-setup.sh
@@ -35,7 +35,7 @@
#. - shell environment variables setup as below (for non-default setting)
#. ves_mode: install mode (node|guest) for VES collectd plugin (default: node)
#. ves_host: VES collector IP or hostname (default: 127.0.0.1)
-#. ves_port: VES collector port (default: 30000)
+#. ves_port: VES collector port (default: 3001)
#. ves_path: REST path optionalRoutingPath element (default: empty)
#. ves_topic: REST path topicName element (default: empty)
#. ves_https: use HTTPS instead of HTTP (default: false)
@@ -97,11 +97,13 @@ function common_prereqs() {
}
function setup_env() {
+ log "updating VES environment variables"
cat <<EOF >~/ves/tools/ves_env.sh
#!/bin/bash
ves_mode="${ves_mode:=node}"
ves_host="${ves_host:=127.0.0.1}"
-ves_port="${ves_port:=30000}"
+ves_hostname="${ves_hostname:=ves-collector-service.default.svc.cluster.local}"
+ves_port="${ves_port:=3001}"
ves_path="${ves_path:=}"
ves_topic="${ves_topic:=}"
ves_https="${ves_https:=false}"
@@ -109,17 +111,25 @@ ves_user="${ves_user:=}"
ves_pass="${ves_pass:=}"
ves_interval="${ves_interval:=20}"
ves_version="${ves_version:=5.1}"
+ves_zookeeper_hostname="${ves_zookeeper_hostname:=ves-zookeeper-service.default.svc.cluster.local}"
+ves_zookeeper_host="${ves_zookeeper_host:=127.0.0.1}"
+ves_zookeeper_port="${ves_zookeeper_port:=2181}"
ves_kafka_host="${ves_kafka_host:=127.0.0.1}"
-ves_kafka_hostname="${ves_kafka_hostname:=localhost}"
+ves_kafka_hostname="${ves_kafka_hostname:=ves-kafka-service.default.svc.cluster.local}"
ves_kafka_port="${ves_kafka_port:=9092}"
-ves_influxdb_host="${ves_influxdb_host:=localhost:8086}"
+ves_influxdb_host="${ves_influxdb_host:=127.0.0.1}"
+ves_influxdb_hostname="${ves_influxdb_hostname:=ves-influxdb-service.default.svc.cluster.local}"
+ves_influxdb_port="${ves_influxdb_port:=8086}"
ves_influxdb_auth="${ves_influxdb_auth:=}"
-ves_grafana_host="${ves_grafana_host:=localhost:3000}"
+ves_grafana_host="${ves_grafana_host:=127.0.0.1}"
+ves_grafana_hostname="${ves_grafana_hostname:=ves-grafana-service.default.svc.cluster.local}"
+ves_grafana_port="${ves_grafana_port:=3000}"
ves_grafana_auth="${ves_grafana_auth:=admin:admin}"
-ves_loglevel="${ves_loglevel:=}"
+ves_loglevel="${ves_loglevel:=DEBUG}"
ves_cloudtype="${ves_cloudtype:=kubernetes}"
export ves_mode
export ves_host
+export ves_hostname
export ves_port
export ves_path
export ves_topic
@@ -127,177 +137,105 @@ export ves_https
export ves_user
export ves_pass
export ves_interval
+export ves_version
+export ves_zookeeper_host
+export ves_zookeeper_hostname
+export ves_zookeeper_port
export ves_kafka_host
export ves_kafka_hostname
export ves_kafka_port
export ves_influxdb_host
+export ves_influxdb_hostname
+export ves_influxdb_port
export ves_influxdb_auth
export ves_grafana_host
+export ves_grafana_hostname
+export ves_grafana_port
export ves_grafana_auth
export ves_loglevel
export ves_cloudtype
EOF
source ~/ves/tools/ves_env.sh
- echo ~/ves/tools/ves_env.sh
+ env | grep ves_
}
-function setup_collectd() {
- log "setup collectd"
-
- common_prereqs
- source ~/ves/tools/ves_env.sh
- log "VES environment as set by ves_env.sh"
- env | grep ves
+function update_env() {
+ log "update VES environment with $1=$2"
+ eval ${1}=${2}
+ export $1
+ sed -i -- "s/.*$1=.*/$1=$2/" ~/ves/tools/ves_env.sh
+ env | grep ves_
+}
- log "setup VES collectd config for VES $ves_mode mode"
- mkdir ~/collectd
- if [[ "$ves_mode" == "node" ]]; then
-# # TODO: fix for journalctl -xe report "... is marked executable"
-# sudo chmod 744 /etc/systemd/system/collectd.service
-
- cat <<EOF >~/collectd/collectd.conf
-# for VES plugin
-LoadPlugin logfile
-<Plugin logfile>
- LogLevel debug
- File STDOUT
- Timestamp true
- PrintSeverity false
-</Plugin>
-
-LoadPlugin csv
-<Plugin csv>
- DataDir "/work-dir/collectd/install/var/lib/csv"
- StoreRates false
-</Plugin>
-
-LoadPlugin target_set
-LoadPlugin match_regex
-<Chain "PreCache">
- <Rule "mark_memory_as_host">
- <Match "regex">
- Plugin "^memory$"
- </Match>
- <Target "set">
- PluginInstance "host"
- </Target>
- </Rule>
-</Chain>
-
-LoadPlugin cpu
-<Plugin cpu>
- ReportByCpu true
- ReportByState true
- ValuesPercentage true
-</Plugin>
-
-LoadPlugin interface
-LoadPlugin memory
-LoadPlugin load
-LoadPlugin disk
-# TODO: how to set this option only to apply to VMs (not nodes)
-LoadPlugin uuid
-
-LoadPlugin write_kafka
-<Plugin write_kafka>
- Property "metadata.broker.list" "$ves_kafka_host:$ves_kafka_port"
- <Topic "collectd">
- Format JSON
- </Topic>
-</Plugin>
-EOF
+function setup_kafka() {
+ log "setup kafka server"
+ log "deploy zookeeper and kafka"
+ if [[ "$1" == "cloudify" ]]; then
+ cp -r ~/ves/tools/cloudify/ves-zookeeper ~/models/tools/cloudify/blueprints/.
+ source ~/models/tools/cloudify/k8s-cloudify.sh start ves-zookeeper ves-zookeeper
+ source ~/models/tools/cloudify/k8s-cloudify.sh clusterIp ves-zookeeper
+ update_env ves_zookeeper_host $clusterIp
- if [[ -d /etc/nova ]]; then
- cat <<EOF >>~/collectd/collectd.conf
-LoadPlugin virt
-<Plugin virt>
- Connection "qemu:///system"
- RefreshInterval 60
- HostnameFormat uuid
- PluginInstanceFormat name
- ExtraStats "cpu_util"
-</Plugin>
-EOF
- fi
+ cp -r ~/ves/tools/cloudify/ves-kafka ~/models/tools/cloudify/blueprints/.
+ inputs="{ \
+ \"zookeeper_hostname\": \"$ves_zookeeper_hostname\",
+ \"zookeeper_host\": \"$ves_zookeeper_host\",
+ \"zookeeper_port\": \"$ves_zookeeper_port\",
+ \"kafka_port\": \"$ves_kafka_port\",
+ \"kafka_hostname\": \"$ves_kafka_hostname\"}"
+
+ source ~/models/tools/cloudify/k8s-cloudify.sh start ves-kafka ves-kafka "$inputs"
+ source ~/models/tools/cloudify/k8s-cloudify.sh clusterIp ves-kafka
+ update_env ves_kafka_host $clusterIp
else
- cat <<EOF >~/collectd/collectd.conf
-# for VES plugin
-LoadPlugin logfile
-<Plugin logfile>
- LogLevel debug
- File STDOUT
- Timestamp true
- PrintSeverity false
-</Plugin>
-
-LoadPlugin cpu
-<Plugin cpu>
- ReportByCpu true
- ReportByState true
- ValuesPercentage true
-</Plugin>
-
-LoadPlugin csv
-<Plugin csv>
- DataDir "/tmp"
-</Plugin>
-
-LoadPlugin interface
-LoadPlugin memory
-LoadPlugin load
-LoadPlugin disk
-LoadPlugin uuid
-
-LoadPlugin write_kafka
-<Plugin write_kafka>
- Property "metadata.broker.list" "$ves_kafka_host:$ves_kafka_port"
- <Topic "collectd">
- Format JSON
- </Topic>
-</Plugin>
-
-LoadPlugin target_set
-LoadPlugin match_regex
-<Chain "PreCache">
- <Rule "mark_memory_as_guest">
- <Match "regex">
- Plugin "^memory$"
- </Match>
- <Target "set">
- PluginInstance "guest"
- </Target>
- </Rule>
-</Chain>
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ $k8s_user@$k8s_master <<EOF
+sudo docker run -it -d -p $ves_zookeeper_port:2181 --name ves-zookeeper zookeeper
+sudo docker run -it -d -p $ves_kafka_port:9092 --name ves-kafka \
+ -e zookeeper_hostname=$ves_zookeeper_hostname \
+ -e kafka_hostname=$ves_kafka_hostname \
+ -e zookeeper_host=$ves_zookeeper_host \
+ -e zookeeper_port=$ves_zookeeper_port \
+ -e kafka_port=$ves_kafka_port \
+ -e kafka_hostname=$ves_kafka_hostname \
+ blsaws/ves-kafka:latest
EOF
fi
- log "collectd config updated"
+}
- if [[ $(grep -c $ves_kafka_hostname /etc/hosts) -eq 0 ]]; then
- log "add to /etc/hosts: $ves_kafka_host $ves_kafka_hostname"
- echo "$ves_kafka_host $ves_kafka_hostname" | sudo tee -a /etc/hosts
- fi
+function setup_collectd() {
+ log "setup collectd"
+# if [[ $(grep -c $ves_kafka_hostname /etc/hosts) -eq 0 ]]; then
+# log "add to /etc/hosts: $ves_kafka_host $ves_kafka_hostname"
+# echo "$ves_kafka_host $ves_kafka_hostname" | sudo tee -a /etc/hosts
+# fi
- log "start Barometer container"
- sudo docker run -tid --net=host --name ves-barometer \
- -v ~/collectd:/opt/collectd/etc/collectd.conf.d \
- -v /var/run:/var/run -v /tmp:/tmp --privileged \
- opnfv/barometer:latest /run_collectd.sh
+ log "start Barometer container as daemonset under kubernetes"
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ $k8s_user@$k8s_master <<EOF
+sed -i -- "s/<ves_mode>/$ves_mode/" \
+ /home/$k8s_user/ves/tools/kubernetes/ves-barometer/daemonset.yaml
+sed -i -- "s/<ves_kafka_hostname>/$ves_kafka_hostname/" \
+ /home/$k8s_user/ves/tools/kubernetes/ves-barometer/daemonset.yaml
+sed -i -- "s/<ves_kafka_port>/$ves_kafka_port/" \
+ /home/$k8s_user/ves/tools/kubernetes/ves-barometer/daemonset.yaml
+kubectl create \
+ -f /home/$k8s_user/ves/tools/kubernetes/ves-barometer/daemonset.yaml
+EOF
+
+# sudo docker run -tid --net=host --name ves-barometer \
+# -v ~/collectd:/opt/collectd/etc/collectd.conf.d \
+# -v /var/run:/var/run -v /tmp:/tmp --privileged \
+# opnfv/barometer:latest /run_collectd.sh
}
function setup_agent() {
log "setup VES agent"
- source ~/k8s_env.sh
- source ~/ves/tools/ves_env.sh
- log "VES environment as set by ves_env.sh"
- env | grep ves
log "deploy the VES agent container"
if [[ "$1" == "cloudify" ]]; then
cp -r ~/ves/tools/cloudify/ves-agent ~/models/tools/cloudify/blueprints/.
- # Cloudify is deployed on the k8s master node
- manager_ip=$k8s_master
-
inputs="{ \
\"ves_mode\": \"$ves_mode\",
\"ves_host\": \"$ves_host\",
@@ -309,29 +247,32 @@ function setup_agent() {
\"ves_pass\": \"$ves_pass\",
\"ves_interval\": \"$ves_interval\",
\"ves_version\": \"$ves_version\",
- \"ves_kafka_port\": \"$ves_kafka_port\",
- \"ves_kafka_host\": \"$ves_kafka_host\",
\"ves_kafka_hostname\": \"$ves_kafka_hostname\",
+ \"ves_kafka_host\": \"$ves_kafka_host\",
+ \"ves_kafka_port\": \"$ves_kafka_port\",
\"ves_loglevel\": \"$ves_loglevel\"}"
bash ~/models/tools/cloudify/k8s-cloudify.sh start ves-agent ves-agent "$inputs"
else
- sudo docker run -it -d \
- -e ves_mode=$ves_mode \
- -e ves_host=$ves_host \
- -e ves_port=$ves_port \
- -e ves_path=$ves_path \
- -e ves_topic=$ves_topic \
- -e ves_https=$ves_https \
- -e ves_user=$ves_user \
- -e ves_pass=$ves_pass \
- -e ves_interval=$ves_interval \
- -e ves_version=$ves_version \
- -e ves_kafka_port=$ves_kafka_port \
- -e ves_kafka_host=$ves_kafka_host \
- -e ves_kafka_hostname=$ves_kafka_hostname \
- -e ves_loglevel=$ves_loglevel \
- --name ves-agent blsaws/ves-agent:latest
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ $k8s_user@$k8s_master <<EOF
+sudo docker run -it -d \
+ -e ves_mode=$ves_mode \
+ -e ves_host=$ves_host \
+ -e ves_port=$ves_port \
+ -e ves_path=$ves_path \
+ -e ves_topic=$ves_topic \
+ -e ves_https=$ves_https \
+ -e ves_user=$ves_user \
+ -e ves_pass=$ves_pass \
+ -e ves_interval=$ves_interval \
+ -e ves_version=$ves_version \
+ -e ves_kafka_port=$ves_kafka_port \
+ -e ves_kafka_host=$ves_kafka_host \
+ -e ves_kafka_hostname=$ves_kafka_hostname \
+ -e ves_loglevel=$ves_loglevel \
+ --name ves-agent blsaws/ves-agent:latest
+EOF
fi
# debug hints
@@ -345,9 +286,6 @@ function setup_agent() {
function setup_collector() {
log "setup collector"
- source ~/ves/tools/ves_env.sh
- log "VES environment as set by ves_env.sh"
- env | grep ves
log "install prerequistes"
if [[ "$dist" == "ubuntu" ]]; then
@@ -356,103 +294,99 @@ function setup_collector() {
sudo yum install -y jq
fi
- if ! curl http://$ves_influxdb_host/ping ; then
- # TODO: migrate to deployment via Helm
- log "setup influxdb container"
- sudo docker run -d --name=ves-influxdb -p 8086:8086 influxdb
- status=$(sudo docker inspect ves-influxdb | jq -r '.[0].State.Status')
- while [[ "x$status" != "xrunning" ]]; do
- log "InfluxDB container state is ($status)"
- sleep 10
- status=$(sudo docker inspect ves-influxdb | jq -r '.[0].State.Status')
- done
- log "InfluxDB container state is $status"
-
- log "wait for InfluxDB API to be active"
- while ! curl http://$ves_influxdb_host/ping ; do
- log "InfluxDB API is not yet responding... waiting 10 seconds"
- sleep 10
- done
- fi
- echo "ves_influxdb_host=$ves_influxdb_host"
-
- log "setup InfluxDB database"
- # TODO: check if pre-existing and skip
- curl -X POST http://$ves_influxdb_host/query \
- --data-urlencode "q=CREATE DATABASE veseventsdb"
-
- if ! curl http://$ves_grafana_host ; then
- # TODO: migrate to deployment via Helm
- log "install Grafana container"
- sudo docker run -d --name ves-grafana -p 3000:3000 grafana/grafana
- status=$(sudo docker inspect ves-grafana | jq -r '.[0].State.Status')
- while [[ "x$status" != "xrunning" ]]; do
- log "Grafana container state is ($status)"
- sleep 10
- status=$(sudo docker inspect ves-grafana | jq -r '.[0].State.Status')
- done
- log "Grafana container state is $status"
- echo "ves_grafana_host=$ves_grafana_host"
-
- log "wait for Grafana API to be active"
- while ! curl http://$ves_grafana_host ; do
- log "Grafana API is not yet responding... waiting 10 seconds"
- sleep 10
- done
+ log "checking for influxdb at http://$ves_influxdb_host:$ves_influxdb_port/ping"
+ if ! curl http://$ves_influxdb_host:$ves_influxdb_port/ping ; then
+ log "install influxdb container on k8s master"
+ update_env ves_influxdb_host $k8s_master
+ update_env ves_influxdb_hostname $k8s_master_hostname
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ $k8s_user@$k8s_master \
+ sudo docker run -d --name=ves-influxdb -p 8086:8086 influxdb
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ $k8s_user@$k8s_master <<'EOF'
+status=$(sudo docker inspect ves-influxdb | jq -r '.[0].State.Status')
+while [[ "x$status" != "xrunning" ]]; do
+ echo; echo "InfluxDB container state is ($status)"
+ sleep 10
+ status=$(sudo docker inspect ves-influxdb | jq -r '.[0].State.Status')
+done
+echo; echo "InfluxDB container state is $status"
+EOF
fi
- log "add VESEvents datasource to Grafana at http://$ves_grafana_auth@$ves_grafana_host"
- # TODO: check if pre-existing and skip
- cat <<EOF >~/ves/tools/grafana/datasource.json
-{ "name":"VESEvents",
- "type":"influxdb",
- "access":"direct",
- "url":"http://$ves_host:8086",
- "password":"root",
- "user":"root",
- "database":"veseventsdb",
- "basicAuth":false,
- "basicAuthUser":"",
- "basicAuthPassword":"",
- "withCredentials":false,
- "isDefault":false,
- "jsonData":null
-}
+ log "checking for grafana at http://$ves_grafana_host:$ves_grafana_port"
+ if ! curl http://$ves_grafana_host:$ves_grafana_port ; then
+ log "install Grafana container on k8s master"
+ update_env ves_grafana_host $k8s_master
+ update_env ves_grafana_hostname $k8s_master_hostname
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ $k8s_user@$k8s_master \
+ sudo docker run -d --name ves-grafana -p 3000:3000 grafana/grafana
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ $k8s_user@$k8s_master <<'EOF'
+status=$(sudo docker inspect ves-grafana | jq -r '.[0].State.Status')
+while [[ "x$status" != "xrunning" ]]; do
+ echo; echo "Grafana container state is ($status)"
+ sleep 10
+status=$(sudo docker inspect ves-grafana | jq -r '.[0].State.Status')
+done
+echo; echo "Grafana container state is $status"
EOF
-
- # Use /home/$USER/ instead of ~ with @
- curl -H "Accept: application/json" -H "Content-type: application/json" \
- -X POST -d @/home/$USER/ves/tools/grafana/datasource.json \
- http://$ves_grafana_auth@$ves_grafana_host/api/datasources
-
- log "add VES dashboard to Grafana at http://$ves_grafana_auth@$ves_grafana_host"
- curl -H "Accept: application/json" -H "Content-type: application/json" \
- -X POST \
- -d @/home/$USER/ves/tools/grafana/Dashboard.json\
- http://$ves_grafana_auth@$ves_grafana_host/api/dashboards/db
+ fi
log "setup collector container"
- # TODO: migrate to deployment via Helm
- sudo docker run -it -d -p 30000:30000 \
- -e ves_host=$ves_host \
- -e ves_port=$ves_port \
- -e ves_path=$ves_path \
- -e ves_topic=$ves_topic \
- -e ves_https=$ves_https \
- -e ves_user=$ves_user \
- -e ves_pass=$ves_pass \
- -e ves_interval=$ves_interval \
- -e ves_version=$ves_version \
- -e ves_influxdb_host=$ves_influxdb_host \
- -e ves_loglevel=$ves_loglevel \
- --name ves-collector blsaws/ves-collector:latest
+ if [[ "$1" == "cloudify" ]]; then
+ cp -r ~/ves/tools/cloudify/ves-collector ~/models/tools/cloudify/blueprints/.
+ inputs="{ \
+ \"ves_host\": \"$ves_host\",
+ \"ves_port\": \"$ves_port\",
+ \"ves_path\": \"$ves_path\",
+ \"ves_topic\": \"$ves_topic\",
+ \"ves_https\": \"$ves_https\",
+ \"ves_user\": \"$ves_user\",
+ \"ves_pass\": \"$ves_pass\",
+ \"ves_interval\": \"$ves_interval\",
+ \"ves_version\": \"$ves_version\",
+ \"ves_influxdb_host\": \"$ves_influxdb_host\",
+ \"ves_influxdb_port\": \"$ves_influxdb_port\",
+ \"ves_grafana_host\": \"$ves_grafana_host\",
+ \"ves_grafana_port\": \"$ves_grafana_port\",
+ \"ves_grafana_auth\": \"$ves_grafana_auth\",
+ \"ves_loglevel\": \"$ves_loglevel\"}"
+
+ source ~/models/tools/cloudify/k8s-cloudify.sh start \
+ ves-collector ves-collector "$inputs"
+ source ~/models/tools/cloudify/k8s-cloudify.sh clusterIp ves-collector
+ update_env ves_host $clusterIp
+ log "updated VES env"; env | grep ves
+ else
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ $k8s_user@$k8s_master <<EOF
+sudo docker run -it -d -p 3001:3001 \
+ -e ves_host=$ves_host \
+ -e ves_port=$ves_port \
+ -e ves_path=$ves_path \
+ -e ves_topic=$ves_topic \
+ -e ves_https=$ves_https \
+ -e ves_user=$ves_user \
+ -e ves_pass=$ves_pass \
+ -e ves_interval=$ves_interval \
+ -e ves_version=$ves_version \
+ -e ves_influxdb_host=$ves_influxdb_host \
+ -e ves_grafana_port=$ves_grafana_port \
+ -e ves_grafana_host=$ves_grafana_host \
+ -e ves_grafana_auth=$ves_grafana_auth \
+ -e ves_loglevel=$ves_loglevel \
+ --name ves-collector blsaws/ves-collector:latest
+EOF
+ fi
# debug hints
- # curl 'http://172.16.0.5:8086/query?pretty=true&db=veseventsdb&q=SELECT%20moving_average%28%22load-shortterm%22%2C%205%29%20FROM%20%22load%22%20WHERE%20time%20%3E%3D%20now%28%29%20-%205m%20GROUP%20BY%20%22system%22'
+ # curl 'http://172.16.0.5:30886/query?pretty=true&db=veseventsdb&q=SELECT%20moving_average%28%22load-shortterm%22%2C%205%29%20FROM%20%22load%22%20WHERE%20time%20%3E%3D%20now%28%29%20-%205m%20GROUP%20BY%20%22system%22'
# sudo docker logs ves-collector
# sudo docker exec -it ves-collector apt-get install -y tcpdump
- # sudo docker exec -it ves-collector tcpdump -A -v -s 0 -i any port 30000
- # curl http://$ves_host:30000
+ # sudo docker exec -it ves-collector tcpdump -A -v -s 0 -i any port 3001
+ # curl http://$ves_host:3001
# sudo docker exec -it ves-collector /bin/bash
}
@@ -462,57 +396,72 @@ function clean() {
workers="$2"
cloudify=$3
- source ~/k8s_env.sh
- source ~/ves/tools/ves_env.sh
- log "VES environment as set by ves_env.sh"
- env | grep ves
+ log "VES datasources and dashboards at grafana server, if needed"
+ curl -X DELETE \
+ http://$ves_grafana_auth@$ves_grafana_host:$ves_grafana_port/api/datasources/name/VESEvents
+ curl -X DELETE \
+ http://$ves_grafana_auth@$ves_grafana_host:$ves_grafana_port/api/dashboards/db/ves-demo
+
+ if [[ "$cloudify" == "cloudify" ]]; then
+ log "stop cloudify-managed services"
+ bash ~/models/tools/cloudify/k8s-cloudify.sh stop ves-agent ves-agent
+ bash ~/models/tools/cloudify/k8s-cloudify.sh stop ves-kafka ves-kafka
+ bash ~/models/tools/cloudify/k8s-cloudify.sh stop ves-zookeeper ves-zookeeper
+ bash ~/models/tools/cloudify/k8s-cloudify.sh stop ves-collector ves-collector
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ $k8s_user@$k8s_master <<'EOF'
+cs="ves-grafana ves-influxdb"
+for c in $cs; do
+ sudo docker stop $c
+ sudo docker rm -v $c
+done
+kubectl delete daemonset --namespace default ves-barometer
+EOF
+ log "workaround: force cleanup of k8s pods if still present"
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ $k8s_user@$k8s_master <<'EOF'
+cs="ves-agent-pod ves-collector-pod ves-grafana-pod ves-influxdb-pod ves-kafka-pod ves-zookeeper-pod"
+for c in $cs; do
+ kubectl delete pods --namespace default $c
+done
+EOF
+ else
+ log "Remove VES containers and collectd config at master node"
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ $k8s_user@$k8s_master <<'EOF'
+cs="ves-agent ves-collector ves-grafana ves-influxdb ves-kafka ves-zookeeper"
+for c in $cs; do
+ sudo docker stop $c
+ sudo docker rm -v $c
+done
+EOF
+ fi
if [[ "$master" == "$workers" ]]; then
nodes=$master
else
nodes="$master $workers"
fi
-
- if [[ "$cloudify" == "cloudify" ]]; then
- bash ~/models/tools/cloudify/k8s-cloudify.sh stop ves-agent ves-agent
- fi
-
for node in $nodes; do
- log "remove config for VES at node $node"
+ log "remove ves-barometer container and config for VES at node $node"
ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
$k8s_user@$node <<EOF
+sudo docker stop ves-barometer
+sudo docker rm -v ves-barometer
sudo rm -rf /home/$k8s_user/ves
sudo rm -rf /home/$k8s_user/collectd
EOF
done
-
- log "VES datasources and dashboards at grafana server, if needed"
- curl -X DELETE \
- http://$ves_grafana_auth@$ves_grafana_host/api/datasources/name/VESEvents
- curl -X DELETE \
- http://$ves_grafana_auth@$ves_grafana_host/api/dashboards/db/ves-demo
-
- log "Remove VES containers and collectd config at master node"
- ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
- $k8s_user@$master <<'EOF'
-cs="ves-agent ves-collector ves-grafana ves-influxdb ves-barometer ves-kafka ves-zookeeper"
-for c in $cs; do
- sudo docker stop $c
- sudo docker rm -v $c
-done
-EOF
}
function verify_veseventsdb() {
- source ~/k8s_env.sh
- source ~/ves/tools/ves_env.sh
log "VES environment as set by ves_env.sh"
env | grep ves
for host in $1; do
uuid=$(ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $k8s_user@$host sudo cat /sys/class/dmi/id/product_uuid)
echo "$host=$uuid"
- result=$(curl -G "http://$ves_influxdb_host/query?pretty=true" --data-urlencode "db=veseventsdb" --data-urlencode "q=SELECT moving_average(\"$3\", 5) FROM \"$2\" WHERE (\"system\" =~ /^($uuid)$/) AND time >= now() - 5m" | jq -r '.results[0].series')
+ result=$(curl -G "http://$ves_influxdb_host:$ves_influxdb_port/query?pretty=true" --data-urlencode "db=veseventsdb" --data-urlencode "q=SELECT moving_average(\"$3\", 5) FROM \"$2\" WHERE (\"system\" =~ /^($uuid)$/) AND time >= now() - 5m" | jq -r '.results[0].series')
if [[ "$result" != "null" ]]; then
echo "$host load data found in influxdb"
else
@@ -527,8 +476,14 @@ if [[ $(grep -c $HOSTNAME /etc/hosts) -eq 0 ]]; then
sudo tee -a /etc/hosts
fi
+source ~/k8s_env.sh
+if [[ -f ~/ves/tools/ves_env.sh ]]; then
+ source ~/ves/tools/ves_env.sh
+fi
log "VES environment as input"
-env | grep ves
+env | grep ves_
+
+trap 'fail' ERR
case "$1" in
"env")
@@ -541,23 +496,13 @@ case "$1" in
setup_agent $2
;;
"collector")
- setup_collector
+ setup_collector $2
;;
"kafka")
- log "setup kafka server"
- source ~/k8s_env.sh
- source ~/ves/tools/ves_env.sh
- log "VES environment as set by ves_env.sh"
- env | grep ves
- sudo docker run -it -d -p 2181:2181 --name ves-zookeeper zookeeper
- sudo docker run -it -d -p 9092:9092 --name ves-kafka \
- -e zookeeper_host=$k8s_master_host \
- -e zookeeper=$k8s_master \
- -e kafka_hostname=$ves_kafka_hostname \
- blsaws/ves-kafka:latest
+ setup_kafka $2
;;
"verify")
- verify_veseventsdb "$1" "load" "load-shortterm"
+ verify_veseventsdb "$1" load load-shortterm
;;
"clean")
clean $2 "$3" $4
@@ -565,3 +510,4 @@ case "$1" in
*)
grep '#. ' $0
esac
+trap '' ERR