summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--3rd_party/collectd-ves-app/ves_app/native.yaml287
-rw-r--r--docker/barometer-grafana/Dockerfile (renamed from docker/grafana/Dockerfile)0
-rwxr-xr-xdocker/barometer-grafana/configure_grafana.sh (renamed from docker/grafana/configure_grafana.sh)0
-rw-r--r--docker/barometer-grafana/dashboards/average_load_dashboard.json (renamed from docker/grafana/dashboards/average_load_dashboard.json)0
-rwxr-xr-xdocker/barometer-grafana/dashboards/configure_grafana.sh (renamed from docker/grafana/dashboards/configure_grafana.sh)0
-rw-r--r--docker/barometer-grafana/dashboards/cpu_usage_dashboard.json (renamed from docker/grafana/dashboards/cpu_usage_dashboard.json)0
-rw-r--r--docker/barometer-grafana/dashboards/host_overview_dashboard.json (renamed from docker/grafana/dashboards/host_overview_dashboard.json)0
-rw-r--r--docker/barometer-grafana/dashboards/hugepages_dashboard.json (renamed from docker/grafana/dashboards/hugepages_dashboard.json)0
-rw-r--r--docker/barometer-grafana/dashboards/intel_pmu_dashboard.json (renamed from docker/grafana/dashboards/intel_pmu_dashboard.json)0
-rw-r--r--docker/barometer-grafana/dashboards/intel_rdt_dashboard.json (renamed from docker/grafana/dashboards/intel_rdt_dashboard.json)0
-rw-r--r--docker/barometer-grafana/dashboards/ipmi_dashboard.json (renamed from docker/grafana/dashboards/ipmi_dashboard.json)0
-rw-r--r--docker/barometer-grafana/dashboards/mcelog_dashboard.json (renamed from docker/grafana/dashboards/mcelog_dashboard.json)0
-rw-r--r--docker/barometer-grafana/dashboards/ovs_stats_dashboard.json (renamed from docker/grafana/dashboards/ovs_stats_dashboard.json)0
-rw-r--r--docker/barometer-grafana/dashboards/virt_dashboard.json (renamed from docker/grafana/dashboards/virt_dashboard.json)0
-rw-r--r--docker/barometer-grafana/run.sh (renamed from docker/grafana/run.sh)0
-rw-r--r--docker/ves/Dockerfile20
-rw-r--r--docker/ves/start_ves_app.sh22
-rw-r--r--docs/release/userguide/collectd-ves-guest.conf15
-rw-r--r--docs/release/userguide/collectd-ves-host.conf22
-rw-r--r--docs/release/userguide/collectd-ves-hypervisor.conf26
-rw-r--r--docs/release/userguide/collectd.ves.userguide.rst216
-rw-r--r--docs/release/userguide/docker.userguide.rst96
-rw-r--r--docs/release/userguide/ves-app-host-mode.pngbin8057 -> 21203 bytes
-rw-r--r--docs/release/userguide/ves-app-hypervisor-mode.pngbin0 -> 8057 bytes
24 files changed, 544 insertions, 160 deletions
diff --git a/3rd_party/collectd-ves-app/ves_app/native.yaml b/3rd_party/collectd-ves-app/ves_app/native.yaml
new file mode 100644
index 00000000..8a351547
--- /dev/null
+++ b/3rd_party/collectd-ves-app/ves_app/native.yaml
@@ -0,0 +1,287 @@
+---
+# Common event header definition (required fields and defaults)
+commonEventHeader: &commonEventHeader
+ domain: N/A
+ eventId: "{system.id}"
+ eventName: ""
+ eventType: Info
+ lastEpochMicrosec: 0
+ priority: Normal
+ reportingEntityId: &reportingEntityId "{system.hostname}"
+ reportingEntityName: *reportingEntityId
+ sequence: 0
+ sourceName: N/A
+ startEpochMicrosec: 0
+ version: 2.0
+
+# Host measurements definition
+Host Measurements: !Measurements
+ - ITEM-DESC:
+ event:
+ commonEventHeader: &hostCommonEventHeader
+ <<: *commonEventHeader
+ eventType: platform
+ domain: measurementsForVfScaling
+ sourceId: &sourceId "{vl.host}"
+ sourceName: *sourceId
+ startEpochMicrosec: !Number "{vl.time}"
+ measurementsForVfScalingFields: &hostMeasurementsForVfScalingFields
+ measurementsForVfScalingVersion: 2.0
+ additionalMeasurements: !ArrayItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: "/^(?!memory|cpu|interface|disk).*$/"
+ - INDEX-KEY:
+ - host
+ - plugin
+ - plugin_instance
+ - ITEM-DESC:
+ name: !StripExtraDash "{vl.plugin}-{vl.plugin_instance}"
+ arrayOfFields: !ArrayItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: "{vl.plugin}"
+ plugin_instance: "{vl.plugin_instance}"
+ - ITEM-DESC:
+ name: !StripExtraDash "{vl.type}-{vl.type_instance}-{vl.ds_name}"
+ value: "{vl.value}"
+ measurementInterval: !Number "{vl.interval}"
+ memoryUsageArray: !ArrayItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: memory
+ type: memory
+ type_instance: free
+ - ITEM-DESC:
+ vmIdentifier: "{vl.host}"
+ memoryFree: !Number "{vl.value}"
+ memoryUsed: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: memory
+ type: memory
+ type_instance: used
+ - VALUE: !Bytes2Kibibytes "{vl.value}"
+ memoryBuffered: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: memory
+ type: memory
+ type_instance: buffered
+ - VALUE: !Bytes2Kibibytes "{vl.value}"
+ memoryCached: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: memory
+ type: memory
+ type_instance: cached
+ - VALUE: !Bytes2Kibibytes "{vl.value}"
+ memorySlabRecl: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: memory
+ type: memory
+ type_instance: slab_recl
+ - VALUE: !Bytes2Kibibytes "{vl.value}"
+ - DEFAULT: 0
+ memorySlabUnrecl: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: memory
+ type: memory
+ type_instance: slab_unrecl
+ - VALUE: !Bytes2Kibibytes "{vl.value}"
+ - DEFAULT: 0
+ cpuUsageArray: !ArrayItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: cpu
+ type: percent
+ type_instance: idle
+ - ITEM-DESC:
+ cpuIdentifier: "{vl.plugin_instance}"
+ cpuIdle: !Number "{vl.value}"
+ percentUsage: 0.0
+ cpuUsageUser: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: cpu
+ plugin_instance: "{vl.plugin_instance}"
+ type: percent
+ type_instance: user
+ cpuWait: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: cpu
+ plugin_instance: "{vl.plugin_instance}"
+ type: percent
+ type_instance: wait
+ cpuUsageInterrupt: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: cpu
+ plugin_instance: "{vl.plugin_instance}"
+ type: percent
+ type_instance: interrupt
+ cpuUsageNice: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: cpu
+ plugin_instance: "{vl.plugin_instance}"
+ type: percent
+ type_instance: nice
+ cpuUsageSoftIrq: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: cpu
+ plugin_instance: "{vl.plugin_instance}"
+ type: percent
+ type_instance: softirq
+ cpuUsageSteal: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: cpu
+ plugin_instance: "{vl.plugin_instance}"
+ type: percent
+ type_instance: steal
+ cpuUsageSystem: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: cpu
+ plugin_instance: "{vl.plugin_instance}"
+ type: percent
+ type_instance: system
+ vNicPerformanceArray: !ArrayItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: interface
+ type: if_packets
+ ds_name: rx
+ - ITEM-DESC:
+ valuesAreSuspect: "true"
+ vNicIdentifier: "{vl.plugin_instance}"
+ receivedTotalPacketsAccumulated: !Number "{vl.value}"
+ transmittedTotalPacketsAccumulated: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: interface
+ plugin_instance: "{vl.plugin_instance}"
+ type: if_packets
+ ds_name: tx
+ receivedOctetsAccumulated: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: interface
+ plugin_instance: "{vl.plugin_instance}"
+ type: if_octets
+ ds_name: rx
+ transmittedOctetsAccumulated: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: interface
+ plugin_instance: "{vl.plugin_instance}"
+ type: if_octets
+ ds_name: tx
+ receivedErrorPacketsAccumulated: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: interface
+ plugin_instance: "{vl.plugin_instance}"
+ type: if_errors
+ ds_name: rx
+ transmittedErrorPacketsAccumulated: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: interface
+ plugin_instance: "{vl.plugin_instance}"
+ type: if_errors
+ ds_name: tx
+ receivedDiscardedPacketsAccumulated: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: interface
+ plugin_instance: "{vl.plugin_instance}"
+ type: if_dropped
+ ds_name: rx
+ transmittedDiscardedPacketsAccumulated: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: interface
+ plugin_instance: "{vl.plugin_instance}"
+ type: if_dropped
+ ds_name: tx
+ diskUsageArray: !ArrayItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: disk
+ type: disk_octets
+ ds_name: read
+ - ITEM-DESC:
+ diskIdentifier: "{vl.plugin_instance}"
+ diskOctetsReadLast: !Number "{vl.value}"
+ diskOctetsWriteLast: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: disk
+ plugin_instance: "{vl.plugin_instance}"
+ type: disk_octets
+ ds_name: write
+ diskOpsReadLast: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: disk
+ plugin_instance: "{vl.plugin_instance}"
+ type: disk_ops
+ ds_name: read
+ diskOpsWriteLast: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: disk
+ plugin_instance: "{vl.plugin_instance}"
+ type: disk_ops
+ ds_name: write
+ diskIoTimeLast: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: disk
+ plugin_instance: "{vl.plugin_instance}"
+ type: disk_io_time
+ ds_name: io_time
+ - DEFAULT: 0
+ diskMergedReadLast: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: disk
+ plugin_instance: "{vl.plugin_instance}"
+ type: disk_merged
+ ds_name: read
+ - DEFAULT: 0
+ diskMergedWriteLast: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: disk
+ plugin_instance: "{vl.plugin_instance}"
+ type: disk_merged
+ ds_name: write
+ - DEFAULT: 0
+ diskTimeReadLast: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: disk
+ plugin_instance: "{vl.plugin_instance}"
+ type: disk_time
+ ds_name: read
+ - DEFAULT: 0
+ diskTimeWriteLast: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: disk
+ plugin_instance: "{vl.plugin_instance}"
+ type: disk_time
+ ds_name: write
+ - DEFAULT: 0
+ - SELECT:
+ plugin: memory
+ type_instance: free
+
+
diff --git a/docker/grafana/Dockerfile b/docker/barometer-grafana/Dockerfile
index 32a78862..32a78862 100644
--- a/docker/grafana/Dockerfile
+++ b/docker/barometer-grafana/Dockerfile
diff --git a/docker/grafana/configure_grafana.sh b/docker/barometer-grafana/configure_grafana.sh
index 0b185297..0b185297 100755
--- a/docker/grafana/configure_grafana.sh
+++ b/docker/barometer-grafana/configure_grafana.sh
diff --git a/docker/grafana/dashboards/average_load_dashboard.json b/docker/barometer-grafana/dashboards/average_load_dashboard.json
index ae797f75..ae797f75 100644
--- a/docker/grafana/dashboards/average_load_dashboard.json
+++ b/docker/barometer-grafana/dashboards/average_load_dashboard.json
diff --git a/docker/grafana/dashboards/configure_grafana.sh b/docker/barometer-grafana/dashboards/configure_grafana.sh
index 8ce9689b..8ce9689b 100755
--- a/docker/grafana/dashboards/configure_grafana.sh
+++ b/docker/barometer-grafana/dashboards/configure_grafana.sh
diff --git a/docker/grafana/dashboards/cpu_usage_dashboard.json b/docker/barometer-grafana/dashboards/cpu_usage_dashboard.json
index c9509fa0..c9509fa0 100644
--- a/docker/grafana/dashboards/cpu_usage_dashboard.json
+++ b/docker/barometer-grafana/dashboards/cpu_usage_dashboard.json
diff --git a/docker/grafana/dashboards/host_overview_dashboard.json b/docker/barometer-grafana/dashboards/host_overview_dashboard.json
index e6d3ad95..e6d3ad95 100644
--- a/docker/grafana/dashboards/host_overview_dashboard.json
+++ b/docker/barometer-grafana/dashboards/host_overview_dashboard.json
diff --git a/docker/grafana/dashboards/hugepages_dashboard.json b/docker/barometer-grafana/dashboards/hugepages_dashboard.json
index 146fccf4..146fccf4 100644
--- a/docker/grafana/dashboards/hugepages_dashboard.json
+++ b/docker/barometer-grafana/dashboards/hugepages_dashboard.json
diff --git a/docker/grafana/dashboards/intel_pmu_dashboard.json b/docker/barometer-grafana/dashboards/intel_pmu_dashboard.json
index e1fad5fa..e1fad5fa 100644
--- a/docker/grafana/dashboards/intel_pmu_dashboard.json
+++ b/docker/barometer-grafana/dashboards/intel_pmu_dashboard.json
diff --git a/docker/grafana/dashboards/intel_rdt_dashboard.json b/docker/barometer-grafana/dashboards/intel_rdt_dashboard.json
index 49efec7a..49efec7a 100644
--- a/docker/grafana/dashboards/intel_rdt_dashboard.json
+++ b/docker/barometer-grafana/dashboards/intel_rdt_dashboard.json
diff --git a/docker/grafana/dashboards/ipmi_dashboard.json b/docker/barometer-grafana/dashboards/ipmi_dashboard.json
index 04179ef6..04179ef6 100644
--- a/docker/grafana/dashboards/ipmi_dashboard.json
+++ b/docker/barometer-grafana/dashboards/ipmi_dashboard.json
diff --git a/docker/grafana/dashboards/mcelog_dashboard.json b/docker/barometer-grafana/dashboards/mcelog_dashboard.json
index 9618bacc..9618bacc 100644
--- a/docker/grafana/dashboards/mcelog_dashboard.json
+++ b/docker/barometer-grafana/dashboards/mcelog_dashboard.json
diff --git a/docker/grafana/dashboards/ovs_stats_dashboard.json b/docker/barometer-grafana/dashboards/ovs_stats_dashboard.json
index 34c8b7bf..34c8b7bf 100644
--- a/docker/grafana/dashboards/ovs_stats_dashboard.json
+++ b/docker/barometer-grafana/dashboards/ovs_stats_dashboard.json
diff --git a/docker/grafana/dashboards/virt_dashboard.json b/docker/barometer-grafana/dashboards/virt_dashboard.json
index 5317b701..5317b701 100644
--- a/docker/grafana/dashboards/virt_dashboard.json
+++ b/docker/barometer-grafana/dashboards/virt_dashboard.json
diff --git a/docker/grafana/run.sh b/docker/barometer-grafana/run.sh
index 5b9a8191..5b9a8191 100644
--- a/docker/grafana/run.sh
+++ b/docker/barometer-grafana/run.sh
diff --git a/docker/ves/Dockerfile b/docker/ves/Dockerfile
new file mode 100644
index 00000000..f77f852a
--- /dev/null
+++ b/docker/ves/Dockerfile
@@ -0,0 +1,20 @@
+FROM centos:7
+RUN yum update -y && \
+ yum install -y epel-release \
+ git \
+ nc
+
+RUN yum install -y python-pip
+RUN pip install pyyaml \
+ kafka-python
+
+ENV repos_dir /src
+
+WORKDIR ${repos_dir}
+RUN git clone https://gerrit.opnfv.org/gerrit/barometer
+WORKDIR barometer/3rd_party/collectd-ves-app/ves_app
+COPY start_ves_app.sh ${repos_dir}
+RUN chmod 755 start_ves_app.sh
+
+ENTRYPOINT ["./start_ves_app.sh"]
+CMD ["host.yaml"]
diff --git a/docker/ves/start_ves_app.sh b/docker/ves/start_ves_app.sh
new file mode 100644
index 00000000..ad276353
--- /dev/null
+++ b/docker/ves/start_ves_app.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+# Copyright 2017 OPNFV
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+YAML_FILE="$1"
+
+#wait for kafka service to be available
+while ! nc localhost 9092 < /dev/null; do sleep 1; done
+
+python ves_app.py --events-schema="$YAML_FILE" --config=ves_app_config.conf
diff --git a/docs/release/userguide/collectd-ves-guest.conf b/docs/release/userguide/collectd-ves-guest.conf
index 2ed814d5..80d2546f 100644
--- a/docs/release/userguide/collectd-ves-guest.conf
+++ b/docs/release/userguide/collectd-ves-guest.conf
@@ -1,4 +1,11 @@
LoadPlugin logfile
+LoadPlugin interface
+LoadPlugin memory
+LoadPlugin load
+LoadPlugin disk
+LoadPlugin uuid
+LoadPlugin write_kafka
+
<Plugin logfile>
LogLevel info
File "/opt/collectd/var/log/collectd.log"
@@ -6,20 +13,12 @@ LoadPlugin logfile
PrintSeverity false
</Plugin>
-LoadPlugin cpu
<Plugin cpu>
ReportByCpu true
ReportByState true
ValuesPercentage true
</Plugin>
-LoadPlugin interface
-LoadPlugin memory
-LoadPlugin load
-LoadPlugin disk
-LoadPlugin uuid
-
-LoadPlugin write_kafka
<Plugin write_kafka>
Property "metadata.broker.list" "localhost:9092"
<Topic "collectd">
diff --git a/docs/release/userguide/collectd-ves-host.conf b/docs/release/userguide/collectd-ves-host.conf
index 76706b5c..1a03e4e0 100644
--- a/docs/release/userguide/collectd-ves-host.conf
+++ b/docs/release/userguide/collectd-ves-host.conf
@@ -1,20 +1,12 @@
-LoadPlugin logfile
-<Plugin logfile>
- LogLevel info
- File "/opt/collectd/var/log/collectd.log"
- Timestamp true
- PrintSeverity false
-</Plugin>
+LoadPlugin interface
+LoadPlugin memory
+LoadPlugin disk
LoadPlugin cpu
-
-LoadPlugin virt
-<Plugin virt>
- Connection "qemu:///system"
- RefreshInterval 60
- HostnameFormat uuid
- PluginInstanceFormat name
- ExtraStats "cpu_util"
+<Plugin cpu>
+ ReportByCpu true
+ ReportByState true
+ ValuesPercentage true
</Plugin>
LoadPlugin write_kafka
diff --git a/docs/release/userguide/collectd-ves-hypervisor.conf b/docs/release/userguide/collectd-ves-hypervisor.conf
new file mode 100644
index 00000000..301b0a4d
--- /dev/null
+++ b/docs/release/userguide/collectd-ves-hypervisor.conf
@@ -0,0 +1,26 @@
+LoadPlugin logfile
+LoadPlugin cpu
+LoadPlugin virt
+LoadPlugin write_kafka
+
+<Plugin logfile>
+ LogLevel info
+ File "/opt/collectd/var/log/collectd.log"
+ Timestamp true
+ PrintSeverity false
+</Plugin>
+
+<Plugin virt>
+ Connection "qemu:///system"
+ RefreshInterval 60
+ HostnameFormat uuid
+ PluginInstanceFormat name
+ ExtraStats "cpu_util"
+</Plugin>
+
+<Plugin write_kafka>
+ Property "metadata.broker.list" "localhost:9092"
+ <Topic "collectd">
+ Format JSON
+ </Topic>
+</Plugin>
diff --git a/docs/release/userguide/collectd.ves.userguide.rst b/docs/release/userguide/collectd.ves.userguide.rst
index ea32ceb1..8b666114 100644
--- a/docs/release/userguide/collectd.ves.userguide.rst
+++ b/docs/release/userguide/collectd.ves.userguide.rst
@@ -8,16 +8,26 @@ VES Application User Guide
The Barometer repository contains a python based application for VES (VNF Event
Stream) which receives the `collectd`_ specific metrics via `Kafka`_ bus,
-normalizes the metric data into the VES message and sends it into the VES
+normalizes the metric data into the VES message format and sends it into the VES
collector.
The application currently supports pushing platform relevant metrics through the
additional measurements field for VES.
Collectd has a ``write_kafka`` plugin that sends collectd metrics and values to
-a Kafka Broker. The VES application uses Kafka Consumer to receive metrics
-from the Kafka Broker.
+a Kafka Broker. The VES message formatting application, ves_app.py, receives metrics from
+the Kafka broker, normalises the data to VES message format for forwarding to VES collector.
+The VES message formatting application will be simply referred to as the "VES application"
+within this userguide
+The VES application can be run in host mode (baremetal), hypervisor mode (on a host with a hypervisor
+and VMs running) or guest mode(within a VM). The main software blocks that are required to run the
+VES application demo are:
+
+ 1. Kafka
+ 2. Collectd
+ 3. VES Application
+ 4. VES Collector
Install Kafka Broker
--------------------
@@ -85,13 +95,13 @@ Install Kafka Broker
.. code:: bash
$ sudo pip install kafka-python
- $ wget "https://archive.apache.org/dist/kafka/0.11.0.0/kafka_2.11-0.11.0.0.tgz"
- $ tar -xvzf kafka_2.11-0.11.0.0.tgz
- $ sed -i -- 's/#delete.topic.enable=true/delete.topic.enable=true/' kafka_2.11-0.11.0.0/config/server.properties
- $ sudo nohup kafka_2.11-0.11.0.0/bin/kafka-server-start.sh \
- kafka_2.11-0.11.0.0/config/server.properties > kafka_2.11-0.11.0.0/kafka.log 2>&1 &
+ $ wget "https://archive.apache.org/dist/kafka/1.0.0/kafka_2.11-1.0.0.tgz"
+ $ tar -xvzf kafka_2.11-1.0.0.tgz
+ $ sed -i -- 's/#delete.topic.enable=true/delete.topic.enable=true/' kafka_2.11-1.0.0/config/server.properties
+ $ sudo nohup kafka_2.11-1.0.0/bin/kafka-server-start.sh \
+ kafka_2.11-1.0.0/config/server.properties > kafka_2.11-1.0.0/kafka.log 2>&1 &
- .. note:: If Kafka server fails to start, please check if the system IP
+ .. note:: If Kafka server fails to start, please check if the platform IP
address is associated with the hostname in the static host lookup
table. If it doesn't exist, use the following command to add it.
@@ -108,14 +118,14 @@ Install Kafka Broker
.. code:: bash
- $ echo "Hello, World" | kafka_2.11-0.11.0.0/bin/kafka-console-producer.sh \
+ $ echo "Hello, World" | kafka_2.11-1.0.0/bin/kafka-console-producer.sh \
--broker-list localhost:9092 --topic TopicTest > /dev/null
Consumer (Receive "Hello World"):
.. code:: bash
- $ kafka_2.11-0.11.0.0/bin/kafka-console-consumer.sh --zookeeper \
+ $ kafka_2.11-1.0.0/bin/kafka-console-consumer.sh --zookeeper \
localhost:2181 --topic TopicTest --from-beginning --max-messages 1 --timeout-ms 3000
@@ -162,62 +172,112 @@ Build collectd with Kafka support:
$ ./configure --with-librdkafka=/usr --without-perl-bindings --enable-perl=no
$ make && sudo make install
-Configure and start collectd. Create ``/opt/collectd/etc/collectd.conf``
-collectd configuration file as following:
+.. note::
+
+ If installing from git repository ``collectd.conf`` configuration file will be located in
+ directory ``/opt/collectd/etc/``. If installing from via a package manager ``collectd.conf``
+ configuration file will be located in directory ``/etc/collectd/``
-.. note:: The following collectd configuration file allows user to run VES
- application in the guest mode. To run the VES in host mode, please follow
- the `Configure VES in host mode`_ steps.
+Configure and start collectd. Modify Collectd configuration file ``collectd.conf``
+as following:
-.. include:: collectd-ves-guest.conf
- :code: bash
+- Within a VM: `Setup VES application (guest mode)`_
+- On Host with VMs: `Setup VES application (hypervisor mode)`_
+- No Virtualization: `Setup VES application (host mode)`_
Start collectd process as a service as described in :ref:`install-collectd-as-a-service`.
+Setup VES application (guest mode)
+----------------------------------
-Setup VES Test Collector
-------------------------
+In this mode Collectd runs from within a VM and sends metrics to the VES collector.
-.. note:: Test Collector setup is required only for VES application testing
- purposes.
+.. figure:: ves-app-guest-mode.png
+
+ VES guest mode setup
Install dependencies:
.. code:: bash
- $ sudo pip install jsonschema
+ $ sudo pip install pyyaml python-kafka
-Clone VES Test Collector:
+Clone Barometer repo and start the VES application:
.. code:: bash
- $ git clone https://github.com/att/evel-test-collector.git ~/evel-test-collector
+ $ git clone https://gerrit.opnfv.org/gerrit/barometer
+ $ cd barometer/3rd_party/collectd-ves-app/ves_app
+ $ nohup python ves_app.py --events-schema=guest.yaml --config=ves_app_config.conf > ves_app.stdout.log &
-Modify VES Test Collector config file to point to existing log directory and
-schema file:
-.. code:: bash
+Modify Collectd configuration file ``collectd.conf`` as following:
- $ sed -i.back 's/^\(log_file[ ]*=[ ]*\).*/\1collector.log/' ~/evel-test-collector/config/collector.conf
- $ sed -i.back 's/^\(schema_file[ ]*=.*\)event_format_updated.json$/\1CommonEventFormat.json/' ~/evel-test-collector/config/collector.conf
+.. include:: collectd-ves-guest.conf
+ :code: bash
-Start VES Test Collector:
-.. code:: bash
+Start collectd process as a service as described in :ref:`install-collectd-as-a-service`.
- $ cd ~/evel-test-collector/code/collector
- $ nohup python ./collector.py --config ../../config/collector.conf > collector.stdout.log &
+.. note::
+ The above configuration is used for a localhost. The VES application can be
+ configured to use remote VES collector and remote Kafka server. To do
+ so, the IP addresses/host names needs to be changed in ``collector.conf``
+ and ``ves_app_config.conf`` files accordingly.
-Setup VES application (guest mode)
-----------------------------------
-This mode is used to collect guest VM statistics provided by collectd
-and send those metrics into the VES collector.
+Setup VES application (hypervisor mode)
+----------------------------------------
-.. figure:: ves-app-guest-mode.png
+This mode is used to collect hypervisor statistics about guest VMs and to send
+those metrics into the VES collector. Also, this mode collects host statistics
+and send them as part of the guest VES message.
- VES guest mode setup
+.. figure:: ves-app-hypervisor-mode.png
+
+ VES hypervisor mode setup
+
+Running the VES in hypervisor mode looks like steps described in
+`Setup VES application (guest mode)`_ but with the following exceptions:
+
+- The ``hypervisor.yaml`` configuration file should be used instead of ``guest.yaml``
+ file when VES application is running.
+
+- Collectd should be running on hypervisor machine only.
+
+- Addition ``libvirtd`` dependencies needs to be installed on where
+ collectd daemon is running. To install those dependencies, see :ref:`virt-plugin`
+ section of Barometer user guide.
+
+- The next (minimum) configuration needs to be provided to collectd to be able
+ to generate the VES message to VES collector.
+
+.. note::
+ At least one VM instance should be up and running by hypervisor on the host.
+
+.. include:: collectd-ves-hypervisor.conf
+ :code: bash
+
+
+Start collectd process as a service as described in :ref:`install-collectd-as-a-service`.
+
+.. note::
+
+ The above configuration is used for a localhost. The VES application can be
+ configured to use remote VES collector and remote Kafka server. To do
+ so, the IP addresses/host names needs to be changed in ``collector.conf``
+ and ``ves_app_config.conf`` files accordingly.
+
+
+.. note:: The list of the plugins can be extented depends on your needs.
+
+
+Setup VES application (host mode)
+----------------------------------
+
+This mode is used to collect platform wide metrics and to send those metrics into the VES collector.
+It is most suitable for running within a baremetal platform.
Install dependencies:
@@ -229,58 +289,66 @@ Clone Barometer repo and start the VES application:
.. code:: bash
- $ git clone https://gerrit.opnfv.org/gerrit/barometer ~/barometer
- $ cd ~/barometer/3rd_party/collectd-ves-app/ves_app
- $ nohup python ves_app.py --events-schema=guest.yaml --config=ves_app_config.conf > ves_app.stdout.log &
+ $ git clone https://gerrit.opnfv.org/gerrit/barometer
+ $ cd barometer/3rd_party/collectd-ves-app/ves_app
+ $ nohup python ves_app.py --events-schema=host.yaml --config=ves_app_config.conf > ves_app.stdout.log &
+
+
+.. figure:: ves-app-host-mode.png
+
+ VES Native mode setup
+
+Modify collectd configuration file ``collectd.conf`` as following:
+
+.. include:: collectd-ves-host.conf
+ :code: bash
+
+Start collectd process as a service as described in :ref:`install-collectd-as-a-service`.
.. note::
The above configuration is used for a localhost. The VES application can be
- configured to use remote real VES collector and remote Kafka server. To do
+ configured to use remote VES collector and remote Kafka server. To do
so, the IP addresses/host names needs to be changed in ``collector.conf``
and ``ves_app_config.conf`` files accordingly.
-Configure VES in host mode
---------------------------
+.. note:: The list of the plugins can be extented depends on your needs.
-This mode is used to collect hypervisor statistics about guest VMs and to send
-those metrics into the VES collector. Also, this mode collects host statistics
-and send them as part of the guest VES message.
-.. figure:: ves-app-host-mode.png
- VES host mode setup
+Setup VES Test Collector
+------------------------
-Running the VES in host mode looks like steps described in
-`Setup VES application (guest mode)`_ but with the following exceptions:
+.. note:: Test Collector setup is required only for VES application testing
+ purposes.
-- The ``host.yaml`` configuration file should be used instead of ``guest.yaml``
- file when VES application is running.
+Install dependencies:
-- Collectd should be running on host machine only.
+.. code:: bash
-- Addition ``libvirtd`` dependencies needs to be installed on a host where
- collectd daemon is running. To install those dependencies, see :ref:`virt-plugin`
- section of Barometer user guide.
+ $ sudo pip install jsonschema
-- At least one VM instance should be up and running by hypervisor on the host.
+Clone VES Test Collector:
-- The next (minimum) configuration needs to be provided to collectd to be able
- to generate the VES message to VES collector.
+.. code:: bash
+
+ $ git clone https://github.com/att/evel-test-collector.git ~/evel-test-collector
- .. include:: collectd-ves-host.conf
- :code: bash
+Modify VES Test Collector config file to point to existing log directory and
+schema file:
+
+.. code:: bash
- to apply this configuration, the ``/opt/collectd/etc/collectd.conf`` file
- needs to be modified based on example above and collectd daemon needs to
- be restarted using the command below:
+ $ sed -i.back 's/^\(log_file[ ]*=[ ]*\).*/\1collector.log/' ~/evel-test-collector/config/collector.conf
+ $ sed -i.back 's/^\(schema_file[ ]*=.*\)event_format_updated.json$/\1CommonEventFormat.json/' ~/evel-test-collector/config/collector.conf
- .. code:: bash
+Start VES Test Collector:
- $ sudo systemctl restart collectd
+.. code:: bash
-.. note:: The list of the plugins can be extented depends on your needs.
+ $ cd ~/evel-test-collector/code/collector
+ $ nohup python ./collector.py --config ../../config/collector.conf > collector.stdout.log &
VES application configuration description
@@ -299,7 +367,7 @@ REST resources are of the form::
{ServerRoot}/eventListener/v{apiVersion}/eventBatch`
Within the VES directory (``3rd_party/collectd-ves-app/ves_app``) there is a
-configuration file called ``ves_app.conf``. The description of the
+configuration file called ``ves_app_conf.conf``. The description of the
configuration options are described below:
**Domain** *"host"*
@@ -310,10 +378,10 @@ configuration options are described below:
VES port (default: ``30000``)
**Path** *"path"*
- Used as the "optionalRoutingPath" element in the REST path (default: empty)
+ Used as the "optionalRoutingPath" element in the REST path (default: ``vendor_event_listener``)
**Topic** *"path"*
- Used as the "topicName" element in the REST path (default: empty)
+ Used as the "topicName" element in the REST path (default: ``example_vnf``)
**UseHttps** *true|false*
Allow application to use HTTPS instead of HTTP (default: ``false``)
@@ -329,7 +397,7 @@ configuration options are described below:
Vendor Event Listener (default: ``20``)
**ApiVersion** *version*
- Used as the "apiVersion" element in the REST path (default: ``5.1``)
+ Used as the "apiVersion" element in the REST path (default: ``3``)
**KafkaPort** *port*
Kafka Port (Default ``9092``)
diff --git a/docs/release/userguide/docker.userguide.rst b/docs/release/userguide/docker.userguide.rst
index be9f6cae..fe0ea5ac 100644
--- a/docs/release/userguide/docker.userguide.rst
+++ b/docs/release/userguide/docker.userguide.rst
@@ -291,69 +291,6 @@ Check your docker image is running
Build the influxdb + Grafana docker images
------------------------------------------
-
-
-Install docker-compose
-^^^^^^^^^^^^^^^^^^^^^^
-
-On the node where you want to run influxdb + grafana:
-1. Start by installing docker compose
-
-.. code:: bash
-
- $ sudo curl -L https://github.com/docker/compose/releases/download/1.17.0/docker-compose-`uname -s`-`uname -m` -o /usr/bin/docker-compose
-
-.. note::
- Use the latest Compose release number in the download command. The above command is an example,
- and it may become out-of-date. To ensure you have the latest version, check the Compose repository
- release page on GitHub.
-
-2. Apply executable permissions to the binary:
-
-.. code:: bash
-
- $ sudo chmod +x /usr/bin/docker-compose
-
-3. Test the installation.
-
-.. code:: bash
-
- $ sudo docker-compose --version
-
-
-Download the InfluxDB and Grafana docker image
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-If you wish to use pre-built barometer project's influxdb and grafana images, you can pull the
-images from https://hub.docker.com/r/opnfv/barometer-influxdb/ and https://hub.docker.com/r/opnfv/barometer-grafana/
-
-.. note::
- If your preference is to build images locally please see sections `Build the InfluxDB Image`_ and
- `Build the Grafana Image`_
-
-.. code:: bash
-
- $ docker pull opnfv/barometer-influxdb
- $ docker pull opnfv/barometer-grafana
-
-Run the Influxdb and Grafana Images
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Launch containers:
-
-.. code:: bash
-
- $ cd barometer/docker/
- $ sudo docker-compose up -d
-
-Check your docker images are running
-
-.. code:: bash
-
- $ sudo docker ps
-
-Connect to <host_ip>:3000 with a browser and log into grafana: admin/admin
-
-
Build the InfluxDB Image
^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -410,6 +347,39 @@ Output should contain an influxdb image:
REPOSITORY TAG IMAGE ID CREATED SIZE
barometer-grafana latest 05f2a3edd96b 3 hours ago 1.2GB
+
+Download the InfluxDB and Grafana docker image
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+If you wish to use pre-built barometer project's influxdb and grafana images, you can pull the
+images from https://hub.docker.com/r/opnfv/barometer-influxdb/ and https://hub.docker.com/r/opnfv/barometer-grafana/
+
+.. note::
+ If your preference is to build images locally please see sections `Build the InfluxDB Image`_ and
+ `Build the Grafana Image`_
+
+.. code:: bash
+
+ $ docker pull opnfv/barometer-influxdb
+ $ docker pull opnfv/barometer-grafana
+
+Run the Influxdb and Grafana Images
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Launch containers:
+
+.. code:: bash
+
+ $ cd barometer/docker/
+ $ sudo docker-compose up -d
+
+Check your docker images are running
+
+.. code:: bash
+
+ $ sudo docker ps
+
+Connect to <host_ip>:3000 with a browser and log into grafana: admin/admin
+
Testing the docker image
^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/docs/release/userguide/ves-app-host-mode.png b/docs/release/userguide/ves-app-host-mode.png
index 5a58787c..5a21d3a8 100644
--- a/docs/release/userguide/ves-app-host-mode.png
+++ b/docs/release/userguide/ves-app-host-mode.png
Binary files differ
diff --git a/docs/release/userguide/ves-app-hypervisor-mode.png b/docs/release/userguide/ves-app-hypervisor-mode.png
new file mode 100644
index 00000000..5a58787c
--- /dev/null
+++ b/docs/release/userguide/ves-app-hypervisor-mode.png
Binary files differ