aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitlab-ci.yml165
-rw-r--r--3rd_party/collectd-ves-app/ves_app/normalizer.py38
-rw-r--r--3rd_party/collectd-ves-app/ves_app/ves_app.py14
-rwxr-xr-x3rd_party/ovs_pmd_stats/ovs_pmd_stats.py3
-rw-r--r--INFO4
-rw-r--r--INFO.yaml56
-rw-r--r--README.rst38
-rw-r--r--baro_tests/config_server.py5
-rw-r--r--baro_tests/tests.py5
-rw-r--r--ci/barometer-build.sh24
-rw-r--r--ci/barometer-upload-artifact.sh74
-rwxr-xr-xci/install_dependencies.sh32
-rw-r--r--ci/utility/collectd.spec.patch95
-rwxr-xr-xci/utility/collectd_build_rpm.sh15
-rwxr-xr-xci/utility/package-list.sh7
-rwxr-xr-xci/utility/rpms_check.sh14
-rw-r--r--ci/utility/rpms_list9
-rw-r--r--docker/README_collectd22
-rw-r--r--docker/ansible/collectd6_test.yml143
-rw-r--r--docker/ansible/collectd_build.yml (renamed from docker/ansible/roles/config_files/templates/logfile.conf.j2)20
-rw-r--r--docker/ansible/collectd_ves.yml5
-rw-r--r--docker/ansible/default.inv5
-rw-r--r--docker/ansible/roles/build_collectd/tasks/main.yml71
-rw-r--r--docker/ansible/roles/config_files/defaults/main.yml60
-rw-r--r--docker/ansible/roles/config_files/tasks/capabilities.yml (renamed from docker/ansible/roles/config_files/templates/csv.conf.j2)14
-rw-r--r--docker/ansible/roles/config_files/tasks/csv.yml7
-rw-r--r--docker/ansible/roles/config_files/tasks/default_read_import.yml108
-rw-r--r--docker/ansible/roles/config_files/tasks/default_read_plugins.yml34
-rw-r--r--docker/ansible/roles/config_files/tasks/dpdk.yml19
-rw-r--r--docker/ansible/roles/config_files/tasks/exec.yml8
-rw-r--r--docker/ansible/roles/config_files/tasks/hugepages.yml9
-rw-r--r--docker/ansible/roles/config_files/tasks/ipmi.yml13
-rw-r--r--docker/ansible/roles/config_files/tasks/kafka.yml9
-rw-r--r--docker/ansible/roles/config_files/tasks/logfile.yml10
-rw-r--r--docker/ansible/roles/config_files/tasks/logparser.yml5
-rw-r--r--docker/ansible/roles/config_files/tasks/main.yml56
-rw-r--r--docker/ansible/roles/config_files/tasks/make_mcelog.yml5
-rw-r--r--docker/ansible/roles/config_files/tasks/mcelog.yml11
-rw-r--r--docker/ansible/roles/config_files/tasks/network.yml22
-rw-r--r--docker/ansible/roles/config_files/tasks/ovs.yml53
-rw-r--r--docker/ansible/roles/config_files/tasks/pmu.yml7
-rw-r--r--docker/ansible/roles/config_files/tasks/prometheus.yml9
-rw-r--r--docker/ansible/roles/config_files/tasks/rdt.yml9
-rw-r--r--docker/ansible/roles/config_files/tasks/snmp_agent.yml4
-rw-r--r--docker/ansible/roles/config_files/tasks/syslog.yml7
-rw-r--r--docker/ansible/roles/config_files/tasks/uuid.yml7
-rw-r--r--docker/ansible/roles/config_files/tasks/virt.yml16
-rw-r--r--docker/ansible/roles/config_files/tasks/vswitch.yml49
-rw-r--r--docker/ansible/roles/config_files/templates/default_read_plugins.conf.j234
-rw-r--r--docker/ansible/roles/config_files/templates/dpdkevents.conf.j236
-rw-r--r--docker/ansible/roles/config_files/templates/dpdkstat.conf.j225
-rw-r--r--docker/ansible/roles/config_files/templates/exec.conf.j222
-rw-r--r--docker/ansible/roles/config_files/templates/hugepages.conf.j224
-rw-r--r--docker/ansible/roles/config_files/templates/intel_pmu.conf.j225
-rw-r--r--docker/ansible/roles/config_files/templates/ipmi.conf.j245
-rw-r--r--docker/ansible/roles/config_files/templates/kafka.conf.j222
-rw-r--r--docker/ansible/roles/config_files/templates/latest/snmp_agent.conf.j2 (renamed from docker/ansible/roles/config_files/templates/master/snmp_agent.conf.j2)5
-rw-r--r--docker/ansible/roles/config_files/templates/logparser.conf.j2 (renamed from docker/ansible/roles/config_files/templates/experimental/logparser.conf.j2)5
-rw-r--r--docker/ansible/roles/config_files/templates/ovs_events.conf.j226
-rw-r--r--docker/ansible/roles/config_files/templates/ovs_stats.conf.j225
-rw-r--r--docker/ansible/roles/config_files/templates/prometheus.conf.j219
-rw-r--r--docker/ansible/roles/config_files/templates/snmp_agent.conf.j217
-rw-r--r--docker/ansible/roles/config_files/templates/syslog.conf.j221
-rw-r--r--docker/ansible/roles/config_files/templates/virt.conf.j232
-rw-r--r--docker/ansible/roles/config_files/vars/main.yml79
-rw-r--r--docker/ansible/roles/install_docker/tasks/fedora.yml47
-rw-r--r--docker/ansible/roles/install_docker/tasks/main.yml6
-rw-r--r--docker/ansible/roles/install_docker/tasks/ubuntu.yml11
-rw-r--r--docker/ansible/roles/run_collectd/tasks/main.yml33
-rw-r--r--docker/ansible/roles/run_collectd/vars/main.yml10
-rw-r--r--docker/ansible/roles/run_grafana/tasks/main.yml6
-rw-r--r--docker/ansible/roles/run_influxdb/tasks/main.yml8
-rw-r--r--docker/ansible/roles/run_kafka/tasks/main.yml10
-rw-r--r--docker/ansible/roles/run_kafka/vars/main.yml7
-rw-r--r--docker/ansible/roles/run_prometheus/templates/prometheus.yml15
-rw-r--r--docker/ansible/roles/run_ves/tasks/main.yml10
-rw-r--r--docker/barometer-collectd-experimental/Dockerfile63
-rwxr-xr-xdocker/barometer-collectd-experimental/collectd_apply_pull_request.sh39
-rw-r--r--docker/barometer-collectd-experimental/run_collectd.sh5
-rw-r--r--docker/barometer-collectd-latest/Dockerfile36
-rw-r--r--docker/barometer-collectd-latest/run_collectd.sh (renamed from docker/barometer-collectd-master/run_collectd.sh)5
-rw-r--r--docker/barometer-collectd-master/Dockerfile30
-rw-r--r--docker/barometer-collectd/Dockerfile33
-rw-r--r--docker/barometer-collectd/run_collectd.sh5
-rw-r--r--docker/barometer-grafana/Dockerfile15
-rwxr-xr-xdocker/barometer-grafana/dashboards/configure_grafana.sh6
-rw-r--r--docker/barometer-grafana/dashboards/cpu_usage_dashboard.json4
-rw-r--r--docker/barometer-influxdb/Dockerfile17
-rw-r--r--docker/barometer-kafka/Dockerfile15
-rwxr-xr-xdocker/barometer-kafka/start_kafka.sh5
-rw-r--r--docker/barometer-snmp/Dockerfile15
-rw-r--r--docker/barometer-snmp/snmpd.conf6
-rw-r--r--docker/barometer-tests/Dockerfile18
-rw-r--r--docker/barometer-tests/testcases.yaml21
-rw-r--r--docker/barometer-ves/Dockerfile6
-rw-r--r--docker/barometer-ves/start_ves_app.sh2
-rw-r--r--docker/flask_app/Dockerfile25
-rw-r--r--docker/flask_app/README45
-rw-r--r--docker/flask_app/flask_app.py16
-rw-r--r--docker/flask_app/requirements.txt1
-rw-r--r--docker/ves/Dockerfile15
-rw-r--r--docs/conf.py2
-rw-r--r--docs/development/requirements/01-intro.rst4
-rw-r--r--docs/development/requirements/03-dpdk.rst4
-rw-r--r--docs/index.rst9
-rw-r--r--docs/release/configguide/featureconfig.rst48
-rw-r--r--docs/release/configguide/index.rst8
-rw-r--r--docs/release/configguide/postinstall.rst172
-rw-r--r--docs/release/release-notes/config.yaml52
-rw-r--r--docs/release/release-notes/index.rst13
-rw-r--r--docs/release/release-notes/kali-release-notes.rst40
-rw-r--r--docs/release/release-notes/lakelse-release-notes.rst164
-rw-r--r--docs/release/release-notes/notes/.placeholder0
-rw-r--r--docs/release/release-notes/notes/add-reno-12eb20e3448b663b.yaml7
-rw-r--r--docs/release/release-notes/notes/add_unix_sock-e29efe16156c5c8e.yaml11
-rw-r--r--docs/release/release-notes/notes/ansible-build-containers-b4a4cc9cb70f83b3.yaml11
-rw-r--r--docs/release/release-notes/notes/anuket_containers-21b4206cb26c9975.yaml12
-rw-r--r--docs/release/release-notes/notes/collectd-5-v-6-testing-cc821b32bad2794c.yaml10
-rw-r--r--docs/release/release-notes/notes/collectd-6-testing-flask-app-2bb0ca1326775dd8.yaml3
-rw-r--r--docs/release/release-notes/notes/remove_dpdk_stats_events_plugins-59f366855f6e4261.yaml8
-rw-r--r--docs/release/release-notes/notes/update-apply-pr-script-46e6d547d331c5f2.yaml3
-rw-r--r--docs/release/release-notes/notes/update-grafana-9bee82ecfa11f54a.yaml6
-rw-r--r--docs/release/release-notes/notes/update_logparser_config-0db3d2746e6ad582.yaml6
-rw-r--r--docs/release/release-notes/old-release-notes.rst (renamed from docs/release/release-notes/release-notes.rst)6
-rw-r--r--docs/release/release-notes/unreleased.rst10
-rw-r--r--docs/release/scenarios/index.rst16
-rw-r--r--docs/release/scenarios/os-nosdn-bar-ha/index.rst15
-rw-r--r--docs/release/scenarios/os-nosdn-bar-ha/scenario.description.rst61
-rw-r--r--docs/release/scenarios/os-nosdn-bar-noha/index.rst15
-rw-r--r--docs/release/scenarios/os-nosdn-bar-noha/scenario.description.rst61
-rw-r--r--docs/release/scenarios/os-nosdn-kvm_ovs_dpdk_bar-ha/scenario.description.rst118
-rw-r--r--docs/release/userguide/collectd.ves.userguide.rst18
-rw-r--r--docs/release/userguide/feature.userguide.rst398
-rw-r--r--docs/release/userguide/index.rst15
-rw-r--r--docs/release/userguide/installguide.docker.rst (renamed from docs/release/userguide/docker.userguide.rst)594
-rw-r--r--docs/release/userguide/installguide.oneclick.rst410
-rw-r--r--docs/requirements.txt1
-rw-r--r--docs/testing/index.rst80
-rw-r--r--requirements.txt1
-rw-r--r--requirements.yml4
-rw-r--r--src/Makefile19
-rw-r--r--src/collectd-openstack-plugins/Makefile5
-rw-r--r--src/collectd/Makefile195
-rwxr-xr-xsrc/collectd/collectd_apply_pull_request.sh50
-rw-r--r--src/collectd/collectd_sample_configs-experimental/README (renamed from docker/barometer-collectd-experimental/experimental-configs/README)0
-rw-r--r--src/collectd/collectd_sample_configs-latest/capabilities.conf (renamed from docker/ansible/roles/config_files/templates/uuid.conf.j2)10
-rw-r--r--src/collectd/collectd_sample_configs-latest/csv.conf (renamed from src/collectd/collectd_sample_configs-master/csv.conf)0
-rw-r--r--src/collectd/collectd_sample_configs-latest/default_plugins.conf (renamed from src/collectd/collectd_sample_configs-master/default_plugins.conf)0
-rw-r--r--src/collectd/collectd_sample_configs-latest/exec.conf (renamed from src/collectd/collectd_sample_configs-master/exec.conf)0
-rw-r--r--src/collectd/collectd_sample_configs-latest/hugepages.conf (renamed from src/collectd/collectd_sample_configs-master/hugepages.conf)0
-rw-r--r--src/collectd/collectd_sample_configs-latest/intel_pmu.conf (renamed from src/collectd/collectd_sample_configs-master/intel_pmu.conf)0
-rw-r--r--src/collectd/collectd_sample_configs-latest/kafka.conf (renamed from src/collectd/collectd_sample_configs-master/kafka.conf)0
-rw-r--r--src/collectd/collectd_sample_configs-latest/logparser.conf (renamed from docker/barometer-collectd-experimental/experimental-configs/logparser.conf)5
-rw-r--r--src/collectd/collectd_sample_configs-latest/mcelog.conf (renamed from src/collectd/collectd_sample_configs-master/mcelog.conf)0
-rw-r--r--src/collectd/collectd_sample_configs-latest/network.conf (renamed from src/collectd/collectd_sample_configs-master/network.conf)0
-rw-r--r--src/collectd/collectd_sample_configs-latest/ovs_events.conf (renamed from src/collectd/collectd_sample_configs-master/ovs_events.conf)0
-rwxr-xr-xsrc/collectd/collectd_sample_configs-latest/ovs_pmd_stats.sh (renamed from src/collectd/collectd_sample_configs-master/ovs_pmd_stats.sh)3
-rw-r--r--src/collectd/collectd_sample_configs-latest/ovs_stats.conf (renamed from src/collectd/collectd_sample_configs-master/ovs_stats.conf)0
-rw-r--r--src/collectd/collectd_sample_configs-latest/pcie_errors.conf (renamed from src/collectd/collectd_sample_configs-master/pcie_errors.conf)0
-rw-r--r--src/collectd/collectd_sample_configs-latest/prometheus.conf (renamed from src/collectd/collectd_sample_configs-master/prometheus.conf)0
-rw-r--r--src/collectd/collectd_sample_configs-latest/rdt.conf (renamed from src/collectd/collectd_sample_configs-master/rdt.conf)0
-rw-r--r--src/collectd/collectd_sample_configs-latest/snmp_agent.conf (renamed from src/collectd/collectd_sample_configs-master/snmp_agent.conf)0
-rw-r--r--src/collectd/collectd_sample_configs-latest/virt.conf (renamed from src/collectd/collectd_sample_configs-master/virt.conf)0
-rwxr-xr-xsrc/collectd/collectd_sample_configs-latest/write_notification.sh (renamed from src/collectd/collectd_sample_configs-master/write_notification.sh)5
-rw-r--r--src/collectd/collectd_sample_configs-master/dpdkevents.conf35
-rw-r--r--src/collectd/collectd_sample_configs-master/dpdkstat.conf23
-rw-r--r--src/collectd/collectd_sample_configs/capabilities.conf (renamed from docker/ansible/roles/config_files/templates/network.conf.j2)8
-rw-r--r--src/collectd/collectd_sample_configs/dpdk_telemetry.conf (renamed from docker/ansible/roles/config_files/templates/rdt.conf.j2)12
-rw-r--r--src/collectd/collectd_sample_configs/dpdkevents.conf35
-rw-r--r--src/collectd/collectd_sample_configs/dpdkstat.conf24
-rw-r--r--src/collectd/collectd_sample_configs/intel_pmu.conf11
-rw-r--r--src/collectd/collectd_sample_configs/logparser.conf75
-rwxr-xr-xsrc/collectd/collectd_sample_configs/ovs_pmd_stats.sh5
-rw-r--r--src/collectd/collectd_sample_configs/snmp_agent.conf4
-rwxr-xr-xsrc/collectd/collectd_sample_configs/write_notification.sh5
-rwxr-xr-xsrc/collectd/include_config.sh5
-rwxr-xr-xsrc/collectd/ovs_pmd_stats_config.sh5
-rwxr-xr-xsrc/collectd/snmp_mib_config.sh5
-rw-r--r--src/dpdk/Makefile20
-rw-r--r--src/libpqos/Makefile9
-rw-r--r--src/librdkafka/Makefile5
-rw-r--r--src/package-list.mk41
-rw-r--r--src/pmu-tools/Makefile5
-rwxr-xr-xsystems/centos/7/build_base_machine.sh27
-rwxr-xr-xsystems/centos/8/build_base_machine.sh91
-rwxr-xr-xsystems/fedora/22/build_base_machine.sh5
-rwxr-xr-xsystems/rhel/7/build_base_machine.sh10
-rwxr-xr-xsystems/ubuntu/16.04/build_base_machine.sh14
-rw-r--r--tox.ini12
189 files changed, 3350 insertions, 2337 deletions
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
new file mode 100644
index 00000000..e07d405d
--- /dev/null
+++ b/.gitlab-ci.yml
@@ -0,0 +1,165 @@
+---
+include:
+ - project: anuket/releng
+ file: '/gitlab-templates/RTD.gitlab-ci.yml'
+ - project: anuket/releng
+ file: '/gitlab-templates/Docker.gitlab-ci.yml'
+ - project: anuket/releng
+ file: '/gitlab-templates/GoogleStorage.gitlab-ci.yml'
+
+variables:
+ DOCKER_REGISTRY: docker.io
+
+.barometer-build-script: &barometer-build-script
+ before_script:
+ - ./systems/centos/8/build_base_machine.sh
+ script:
+ - pwd
+ - cd src
+ - |
+ echo -e "\e[0Ksection_start:`date +%s`:make_clobber\r\e[0KMake Clobber"
+ make clobber
+ echo -e "\e[0Ksection_end:`date +%s`:make_clobber\r\e[0K"
+ - |
+ echo -e "\e[0Ksection_start:`date +%s`:make\r\e[0KMake"
+ make
+ echo -e "\e[0Ksection_end:`date +%s`:make\r\e[0K"
+
+# Docker Builds
+docker-build-collectd:
+ extends: .docker-build-and-push
+ variables:
+ DOCKER_IMAGE: "$DOCKER_ORGANIZATION/barometer-collectd"
+ DOCKER_BUILDCONTEXT: "docker/barometer-collectd"
+ DOCKER_FILEPATH: "docker/barometer-collectd/Dockerfile"
+
+docker-build-collectd-latest:
+ extends: .docker-build-and-push
+ variables:
+ DOCKER_IMAGE: "$DOCKER_ORGANIZATION/barometer-collectd-latest"
+ DOCKER_FILEPATH: "docker/barometer-collectd-latest/Dockerfile"
+
+docker-build-dma:
+ extends: .docker-build-and-push
+ # Failing since 2019
+ allow_failure: true
+ variables:
+ DOCKER_IMAGE: "$DOCKER_ORGANIZATION/barometer-dma"
+ DOCKER_BUILDCONTEXT: "docker/barometer-dma"
+ DOCKER_FILEPATH: "docker/barometer-dma/Dockerfile"
+
+docker-build-collectd-experimental:
+ extends: .docker-build-and-push
+ variables:
+ DOCKER_IMAGE: "$DOCKER_ORGANIZATION/barometer-collectd-latest"
+ DOCKER_FILEPATH: "docker/barometer-collectd-experimental/Dockerfile"
+
+docker-build-grafana:
+ extends: .docker-build-and-push
+ variables:
+ DOCKER_IMAGE: "$DOCKER_ORGANIZATION/barometer-grafana"
+ DOCKER_BUILDCONTEXT: "docker/barometer-grafana"
+ DOCKER_FILEPATH: "docker/barometer-grafana/Dockerfile"
+
+docker-build-influxdb:
+ extends: .docker-build-and-push
+ variables:
+ DOCKER_IMAGE: "$DOCKER_ORGANIZATION/barometer-influxdb"
+ DOCKER_BUILDCONTEXT: "docker/barometer-influxdb"
+ DOCKER_FILEPATH: "docker/barometer-influxdb/Dockerfile"
+
+docker-build-kafka:
+ extends: .docker-build-and-push
+ variables:
+ DOCKER_IMAGE: "$DOCKER_ORGANIZATION/barometer-kafka"
+ DOCKER_BUILDCONTEXT: "docker/barometer-kafka"
+ DOCKER_FILEPATH: "docker/barometer-kafka/Dockerfile"
+
+docker-build-ves:
+ extends: .docker-build-and-push
+ variables:
+ DOCKER_IMAGE: "$DOCKER_ORGANIZATION/barometer-ves"
+ DOCKER_BUILDCONTEXT: "docker/barometer-ves"
+ DOCKER_FILEPATH: "docker/barometer-ves/Dockerfile"
+
+docker-build-snmp:
+ extends: .docker-build-and-push
+ variables:
+ DOCKER_IMAGE: "$DOCKER_ORGANIZATION/barometer-snmp"
+ DOCKER_BUILDCONTEXT: "docker/barometer-snmp"
+ DOCKER_FILEPATH: "docker/barometer-snmp/Dockerfile"
+
+# Build jobs
+barometer-build:
+ image: centos:8
+ stage: deploy
+ <<: *barometer-build-script
+ variables:
+ DOCKER: y
+ rules:
+ - changes:
+ - "docs/**"
+ - .gitignore
+ when: never
+ - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
+ - if: '$CI_PIPELINE_SOURCE == "schedule"'
+ when: never
+ - if: $CI_COMMIT_TAG
+ - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
+
+# Scheduled Jobs
+barometer-daily:
+ image: centos:8
+ stage: build
+ <<: *barometer-build-script
+ variables:
+ DOCKER: y
+ script:
+ - !reference [.gsutil-install, script]
+ - cd ci/
+ - . barometer-build.sh
+ - . barometer-upload-artifact.sh
+ rules:
+ - if: $CI_PIPELINE_SOURCE == "schedule" && $BAROMETER_SCHEDULE == "true"
+
+barometer-plugins-test:
+ image: docker:latest
+ stage: test
+ interruptible: true
+ # Build has been failing for awhile
+ allow_failure: true
+ needs: []
+ services:
+ - docker:dind
+ before_script:
+ - docker login -u "$DOCKER_USERNAME" -p "$DOCKER_TOKEN" $DOCKER_REGISTRY
+ script:
+ - docker pull $DOCKER_ORGANIZATION/barometer-collectd-tests:latest || true
+ - docker pull $DOCKER_ORGANIZATION/barometer-collectd-tests-base:latest || true
+ - >
+ docker build
+ -t $DOCKER_ORGANIZATION/barometer-collectd-tests-base
+ -f docker/barometer-collectd-plugin-tests/Dockerfile.base
+ --network=host
+ .
+ - >
+ docker build
+ -t $DOCKER_ORGANIZATION/barometer-collectd-tests
+ -f docker/barometer-collectd-plugin-tests/Dockerfile
+ --network=host
+ .
+ - >
+ docker run
+ -t
+ -v `pwd`/src/collectd/collectd_sample_configs-master:/opt/collectd/etc/collectd.conf.d
+ -v /var/run:/var/run
+ -v /tmp:/tmp
+ -v `pwd`/plugin_test:/tests
+ --net=host
+ --privileged
+ $DOCKER_ORGANIZATION/barometer-collectd-tests:latest
+ - docker container rm $(sudo docker container ls -aq)
+ - docker rmi $DOCKER_ORGANIZATION/barometer-collectd-tests:latest
+ - docker rmi $DOCKER_ORGANIZATION/barometer-collectd-tests-base:latest
+ rules:
+ - if: $CI_PIPELINE_SOURCE == "schedule" && $BAROMETER_SCHEDULE == "true"
diff --git a/3rd_party/collectd-ves-app/ves_app/normalizer.py b/3rd_party/collectd-ves-app/ves_app/normalizer.py
index dcb02f27..95feead1 100644
--- a/3rd_party/collectd-ves-app/ves_app/normalizer.py
+++ b/3rd_party/collectd-ves-app/ves_app/normalizer.py
@@ -34,11 +34,9 @@ except ImportError:
# import synchronized queue
try:
- # python 2.x
- import Queue as queue
-except ImportError:
- # python 3.x
import queue
+except ImportError:
+ import Queue as queue
class Config(object):
@@ -80,7 +78,7 @@ class ItemIterator(object):
self._collector = collector
self._index = 0
- def next(self):
+ def __next__(self):
"""Returns next item from the list"""
if self._index == len(self._items):
raise StopIteration
@@ -151,7 +149,7 @@ class Collector(object):
def _check_aging(self):
"""Check aging time for all items"""
self.lock()
- for data_hash, data in self._metrics.items():
+ for data_hash, data in list(self._metrics.items()):
age, item = data
if ((time.time() - age) >= self._age_timeout):
# aging time has expired, remove the item from the collector
@@ -189,7 +187,7 @@ class Collector(object):
"""Returns locked (safe) item iterator"""
metrics = []
self.lock()
- for k, item in self._metrics.items():
+ for k, item in list(self._metrics.items()):
_, value = item
for select in select_list:
if value.match(**select):
@@ -220,7 +218,7 @@ class CollectdData(object):
def match(self, **kargs):
# compare the metric
- for key, value in kargs.items():
+ for key, value in list(kargs.items()):
if self.is_regular_expression(value):
if re.match(value[1:-1], getattr(self, key)) is None:
return False
@@ -323,7 +321,7 @@ class Item(yaml.YAMLObject):
class ValueItem(Item):
"""Class to process VlaueItem tag"""
- yaml_tag = u'!ValueItem'
+ yaml_tag = '!ValueItem'
@classmethod
def from_yaml(cls, loader, node):
@@ -343,7 +341,7 @@ class ValueItem(Item):
# if VALUE key isn't given, use default VALUE key
# format: `VALUE: !Number '{vl.value}'`
if value_desc is None:
- value_desc = yaml.ScalarNode(tag=u'!Number', value=u'{vl.value}')
+ value_desc = yaml.ScalarNode(tag='!Number', value='{vl.value}')
# select collectd metric based on SELECT condition
metrics = loader.collector.items(select)
assert len(metrics) < 2, \
@@ -361,7 +359,7 @@ class ValueItem(Item):
class ArrayItem(Item):
"""Class to process ArrayItem tag"""
- yaml_tag = u'!ArrayItem'
+ yaml_tag = '!ArrayItem'
@classmethod
def from_yaml(cls, loader, node):
@@ -415,12 +413,12 @@ class ArrayItem(Item):
class Measurements(ArrayItem):
"""Class to process Measurements tag"""
- yaml_tag = u'!Measurements'
+ yaml_tag = '!Measurements'
class Events(Item):
"""Class to process Events tag"""
- yaml_tag = u'!Events'
+ yaml_tag = '!Events'
@classmethod
def from_yaml(cls, loader, node):
@@ -441,7 +439,7 @@ class Events(Item):
class Bytes2Kibibytes(yaml.YAMLObject):
"""Class to process Bytes2Kibibytes tag"""
- yaml_tag = u'!Bytes2Kibibytes'
+ yaml_tag = '!Bytes2Kibibytes'
@classmethod
def from_yaml(cls, loader, node):
@@ -450,7 +448,7 @@ class Bytes2Kibibytes(yaml.YAMLObject):
class Number(yaml.YAMLObject):
"""Class to process Number tag"""
- yaml_tag = u'!Number'
+ yaml_tag = '!Number'
@classmethod
def from_yaml(cls, loader, node):
@@ -462,7 +460,7 @@ class Number(yaml.YAMLObject):
class StripExtraDash(yaml.YAMLObject):
"""Class to process StripExtraDash tag"""
- yaml_tag = u'!StripExtraDash'
+ yaml_tag = '!StripExtraDash'
@classmethod
def from_yaml(cls, loader, node):
@@ -471,7 +469,7 @@ class StripExtraDash(yaml.YAMLObject):
class MapValue(yaml.YAMLObject):
"""Class to process MapValue tag"""
- yaml_tag = u'!MapValue'
+ yaml_tag = '!MapValue'
@classmethod
def from_yaml(cls, loader, node):
@@ -485,7 +483,7 @@ class MapValue(yaml.YAMLObject):
assert val is not None, "Mandatory VALUE key isn't set"
assert val in mapping, \
'Value "{}" cannot be mapped to any of {} values'.format(
- val, mapping.keys())
+ val, list(mapping.keys()))
return mapping[val]
@@ -514,10 +512,10 @@ class Normalizer(object):
measurements.append((key, value))
if value.tag == Events.yaml_tag:
events.append((key, value))
- measurements_yaml = yaml.MappingNode(u'tag:yaml.org,2002:map',
+ measurements_yaml = yaml.MappingNode('tag:yaml.org,2002:map',
measurements)
measurements_stream = yaml.serialize(measurements_yaml)
- events_yaml = yaml.MappingNode(u'tag:yaml.org,2002:map', events)
+ events_yaml = yaml.MappingNode('tag:yaml.org,2002:map', events)
events_stream = yaml.serialize(events_yaml)
# return event & measurements definition
return events_stream, measurements_stream
diff --git a/3rd_party/collectd-ves-app/ves_app/ves_app.py b/3rd_party/collectd-ves-app/ves_app/ves_app.py
index 105c66e2..66dc8f2d 100644
--- a/3rd_party/collectd-ves-app/ves_app/ves_app.py
+++ b/3rd_party/collectd-ves-app/ves_app/ves_app.py
@@ -1,24 +1,31 @@
#!/usr/bin/env python
#
+# Copyright(c) 2017-2019 Intel Corporation and OPNFV. All rights reserved.
+#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
import json
import sys
import base64
-import ConfigParser
import logging
import argparse
+try:
+ import configparser
+except ImportError:
+ import ConfigParser as configparser
+
from distutils.util import strtobool
from kafka import KafkaConsumer
@@ -32,7 +39,6 @@ except ImportError:
# Fall back to Python 2's urllib2
import urllib2 as url
-
class VESApp(Normalizer):
"""VES Application"""
@@ -110,7 +116,7 @@ class VESApp(Normalizer):
def init(self, configfile, schema_file):
if configfile is not None:
# read VES configuration file if provided
- config = ConfigParser.ConfigParser()
+ config = configparser.ConfigParser()
config.optionxform = lambda option: option
config.read(configfile)
self.config(config)
diff --git a/3rd_party/ovs_pmd_stats/ovs_pmd_stats.py b/3rd_party/ovs_pmd_stats/ovs_pmd_stats.py
index fc6045b9..b706bcd5 100755
--- a/3rd_party/ovs_pmd_stats/ovs_pmd_stats.py
+++ b/3rd_party/ovs_pmd_stats/ovs_pmd_stats.py
@@ -101,9 +101,8 @@ for el in array:
plugin_instance = el[:-1].replace(' ', '_')
else:
type_instance = el.split(':')[0].replace(' ', "_")
- value = el.split(':')[1].split(' ')[0]
+ value = el.split(':')[1].strip().split(' ')[0]
print('PUTVAL %s/%s-%s/%s-%s N:%s' % (HOSTNAME, PROG_NAME, plugin_instance, TYPE, type_instance, value))
# close socket
sock.close()
-
diff --git a/INFO b/INFO
index cb0a34d1..f3e2edfe 100644
--- a/INFO
+++ b/INFO
@@ -3,7 +3,7 @@ Project Creation Date:
Project Category:
Lifecycle State:
Primary Contact:
-Project Lead: mrunge@redhat.com
+Project Lead: efoley@redhat.com
Jira Project Name: barometer
Jira Project Prefix: barometer
Mailing list tag: []
@@ -16,7 +16,7 @@ bruce.richardson@intel.com
thomas.monjalon@6wind.com
acmorton@att.com
aasmith@redhat.com
-emma.l.foley@intel.com
+efoley@redhat.com
calin.gherghe@intel.com
mrunge@redhat.com
diff --git a/INFO.yaml b/INFO.yaml
index 04008044..4b0582f1 100644
--- a/INFO.yaml
+++ b/INFO.yaml
@@ -3,58 +3,48 @@ project: 'Barometer'
project_creation_date: ''
project_category: ''
lifecycle_state: ''
-project_lead: &opnfv_barometer_ptl
- name: 'Matthias Runge'
- email: 'mrunge@redhat.com'
- id: 'mrunge'
+project_lead: &anuket_barometer_ptl
+ name: 'Emma Foley'
+ email: 'efoley@redhat.com'
+ id: 'efoley'
company: 'redhat.com'
- timezone: 'Europe/Berlin'
-primary_contact: *opnfv_barometer_ptl
+ timezone: 'Europe/Dublin'
+primary_contact: *anuket_barometer_ptl
issue_tracking:
type: 'jira'
- url: 'https://jira.opnfv.org/projects/barometer'
+ url: 'https://jira.anuket.io/projects/barometer'
key: 'barometer'
mailing_list:
- type: 'mailman2'
- url: 'opnfv-tech-discuss@lists.opnfv.org'
- tag: '[barometer]'
+ type: 'groups.io'
+ url: 'anuket-tech-discuss@lists.anuket.io'
+ tag: '#barometer'
realtime_discussion:
- type: irc
- server: 'freenode.net'
- channel: '#opnfv-barometer'
+ type: slack
+ server: 'anuketworkspace.slack.com'
+ channel: '#barometer'
meetings:
- - type: 'gotomeeting+irc'
- agenda: # eg: 'https://wiki.opnfv.org/display/'
- url: # eg: 'https://global.gotomeeting.com/join/819733085'
- server: 'freenode.net'
- channel: '#opnfv-meeting'
+ - type: 'zoom'
+ agenda: 'https://wiki.anuket.io/display/HOME/Meetings'
+ url: 'https://zoom.us/j/92950950555?pwd=K2tWZWovWXJWTmNuZHIxNGlZWkpHQT0'
repeats: 'weekly'
- time: # eg: '16:00 UTC'
+ time: '17:00 Europe/Dublin'
repositories:
- 'barometer'
- 'puppet-barometer'
committers:
- - <<: *opnfv_barometer_ptl
- - name: 'Maryam Tahhan'
- email: 'maryam.tahhan@intel.com'
- company: 'intel.com'
- id: 'maryamtahhan'
- - name: 'Calin Gherghe'
- email: 'calin.gherghe@intel.com'
- company: 'intel.com'
- id: 'cgherghe'
+ - <<: *anuket_barometer_ptl
- name: 'Al Morton'
email: 'acmorton@att.com'
company: 'att.com'
id: 'acm'
- name: 'Emma Foley'
- email: 'emma.l.foley@intel.com'
- company: 'intel.com'
+ email: 'efoley@redhat.com'
+ company: 'redhat.com'
id: 'elfoley'
- - name: 'Aaron Smith'
- email: 'aasmith@redhat.com'
+ - name: 'Matthias Runge'
+ email: 'mrunge@redhat.com'
company: 'redhat.com'
- id: 'TsaLaGi'
+ id: 'mrunge'
tsc:
# yamllint disable rule:line-length
approval: ''
diff --git a/README.rst b/README.rst
new file mode 100644
index 00000000..1b96a90d
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,38 @@
+Barometer
+---------
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+
+::
+
+ Note: this repository provides a demo implementation. It is not intended
+ for unmodified use in production. It has not been tested for production.
+
+
+
+The ability to monitor the Network Function Virtualization Infrastructure
+(NFVI) where VNFs are in operation will be a key part of Service Assurance
+within an NFV environment, in order to enforce SLAs or to detect violations,
+faults or degradation in the performance of NFVI resources so that events
+and relevant metrics are reported to higher level fault management systems.
+If fixed function appliances are going to be replaced by virtualized
+appliances the service levels, manageability and service assurance needs
+to remain consistent or improve on what is available today.
+
+As such, the NFVI needs to support the ability to monitor:
+
+#. Traffic monitoring and performance monitoring of the components that
+ provide networking functionality to the VNF, including: physical
+ interfaces, virtual switch interfaces and flows, as well as the
+ virtual interfaces themselves and their status, etc.
+#. Platform monitoring including: CPU, memory, load, cache, thermals, fan
+ speeds, voltages and machine check exceptions, etc.
+
+
+All of the statistics and events gathered must be collected in-service and
+must be capable of being reported by standard Telco mechanisms (e.g. SNMP,
+REST), for potential enforcement or correction actions. In addition, this
+information could be fed to analytics systems to enable failure prediction,
+and can also be used for intelligent workload placement.
+
+
diff --git a/baro_tests/config_server.py b/baro_tests/config_server.py
index e6d72335..a6849f05 100644
--- a/baro_tests/config_server.py
+++ b/baro_tests/config_server.py
@@ -1,18 +1,19 @@
# -*- coding: utf-8 -*-
#
-# Copyright 2017 OPNFV
+# Copyright(c) 2017-2019 Intel Corporation and OPNFV. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+#
"""Classes used by collectd.py"""
diff --git a/baro_tests/tests.py b/baro_tests/tests.py
index 47edd778..5400510f 100644
--- a/baro_tests/tests.py
+++ b/baro_tests/tests.py
@@ -1,18 +1,19 @@
# -*- coding: utf-8 -*-
#
-# Copyright 2017 OPNFV
+# Copyright(c) 2017-2019 Intel Corporation and OPNFV. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+#
"""Function for testing collectd plug-ins with different oup plug-ins"""
diff --git a/ci/barometer-build.sh b/ci/barometer-build.sh
new file mode 100644
index 00000000..ded45187
--- /dev/null
+++ b/ci/barometer-build.sh
@@ -0,0 +1,24 @@
+# This script is used by the barometer-daily CI job in gitlab.
+# It builds and packages collectd as an RPM
+# After this script is run, the barometer-daily job runs the
+# barometer-upload-artifact.sh script.
+set -x
+
+OPNFV_ARTIFACT_VERSION=$(date -u +"%Y-%m-%d_%H-%M-%S")
+OPNFV_ARTIFACT_URL="$GS_URL/$OPNFV_ARTIFACT_VERSION/"
+
+# log info to console
+echo "Starting the build of Barometer RPMs"
+echo "------------------------------------"
+echo
+
+./install_dependencies.sh
+./build_rpm.sh
+cp utility/rpms_list $WORKSPACE
+
+# save information regarding artifact into file
+(
+ echo "OPNFV_ARTIFACT_VERSION=$OPNFV_ARTIFACT_VERSION"
+ echo "OPNFV_ARTIFACT_URL=$OPNFV_ARTIFACT_URL"
+) > $WORKSPACE/opnfv.properties
+
diff --git a/ci/barometer-upload-artifact.sh b/ci/barometer-upload-artifact.sh
new file mode 100644
index 00000000..f05dc2af
--- /dev/null
+++ b/ci/barometer-upload-artifact.sh
@@ -0,0 +1,74 @@
+#!/bin/bash
+set -o nounset
+set -o pipefail
+
+RPM_LIST=$WORKSPACE/rpms_list
+RPM_WORKDIR=$WORKSPACE/rpmbuild
+RPM_DIR=$RPM_WORKDIR/RPMS/x86_64/
+cd $WORKSPACE/
+
+# source the opnfv.properties to get ARTIFACT_VERSION
+source $WORKSPACE/opnfv.properties
+
+# Check if all the appropriate RPMs were generated
+echo "Checking if all the Barometer RPMs were created"
+echo "-----------------------------------------------"
+echo
+
+if [ -d $RPM_DIR ]
+then
+ ls $RPM_DIR > list_of_gen_pack
+else
+ echo "Can't access folder $RPM_DIR with rpm packages"
+ echo "Barometer nightly build FAILED"
+ exit 1
+fi
+
+for PACKAGENAME in `cat $RPM_LIST`
+do
+ if ! grep -q $PACKAGENAME list_of_gen_pack
+ then
+ echo "$PACKAGENAME is missing"
+ echo "Barometer nightly build FAILED"
+ exit 2
+ fi
+done
+
+#remove the file you no longer need.
+rm list_of_gen_pack
+
+echo "Uploading the barometer RPMs to artifacts.opnfv.org"
+echo "---------------------------------------------------"
+echo
+
+gsutil -m cp -r $RPM_DIR/* gs://$OPNFV_ARTIFACT_URL > $WORKSPACE/gsutil.log 2>&1
+
+# Check if the RPMs were pushed
+gsutil ls gs://$OPNFV_ARTIFACT_URL > /dev/null 2>&1
+if [[ $? -ne 0 ]]; then
+ echo "Problem while uploading barometer RPMs to gs://$OPNFV_ARTIFACT_URL!"
+ echo "Check log $WORKSPACE/gsutil.log on the appropriate build server"
+ exit 1
+else
+ # upload property files only if build is successful
+ gsutil cp $WORKSPACE/opnfv.properties gs://$OPNFV_ARTIFACT_URL/opnfv.properties > gsutil.properties.log 2>&1
+ gsutil cp $WORKSPACE/opnfv.properties gs://$GS_URL/latest.properties > gsutil.latest.log 2>&1
+fi
+
+gsutil -m setmeta \
+ -h "Cache-Control:private, max-age=0, no-transform" \
+ gs://$OPNFV_ARTIFACT_URL/*.rpm > /dev/null 2>&1
+
+gsutil -m setmeta \
+ -h "Content-Type:text/html" \
+ -h "Cache-Control:private, max-age=0, no-transform" \
+ gs://$GS_URL/latest.properties \
+ gs://$OPNFV_ARTIFACT_URL/opnfv.properties > /dev/null 2>&1
+
+echo
+echo "--------------------------------------------------------"
+echo "Done!"
+echo "Artifact is available at $OPNFV_ARTIFACT_URL"
+
+#cleanup the RPM repo from the build machine.
+rm -rf $RPM_WORKDIR
diff --git a/ci/install_dependencies.sh b/ci/install_dependencies.sh
index 87f8163f..40437804 100755
--- a/ci/install_dependencies.sh
+++ b/ci/install_dependencies.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# Copyright 2017 Intel Corporation
+# Copyright 2017-21 Anuket, Intel Corporation and others
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,24 +16,18 @@
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source $DIR/utility/package-list.sh
-# For collectd
-sudo yum install -y yum-utils
-sudo yum install -y epel-release
-sudo yum-builddep -y collectd
-
-sudo yum -y install autoconf automake flex bison libtool pkg-config
-
-sudo yum -y install git
-
-sudo yum -y install rpm-build \
- libcap-devel xfsprogs-devel iptables-devel \
- libmemcached-devel gtk2-devel libvirt-devel
-
-# For DMA component
-sudo yum -y install hiredis-devel
-
-# For intel-cmt-cat
-sudo yum -y install wget
+# Reuse build_base_machine.sh for this distro, to install the required packages
+# Detect OS name and version from systemd based os-release file
+. /etc/os-release
+distro_dir="$DIR/../systems/$ID/$VERSION_ID"
+
+# build base system using OS specific scripts
+if [ -d "$distro_dir" ] && [ -e "$distro_dir/build_base_machine.sh" ]; then
+ sudo $distro_dir/build_base_machine.sh || ( echo "$distro_dir/build_base_machine.sh failed" && exit 1 )
+else
+ "$distro_dir is not supported"
+ exit 1
+fi
# For RPM build
mkdir -p $RPM_WORKDIR/{BUILD,RPMS,SOURCES,SPECS,SRPMS}
diff --git a/ci/utility/collectd.spec.patch b/ci/utility/collectd.spec.patch
new file mode 100644
index 00000000..026e96ff
--- /dev/null
+++ b/ci/utility/collectd.spec.patch
@@ -0,0 +1,95 @@
+diff --git a/contrib/redhat/collectd.spec b/contrib/redhat/collectd.spec
+index 4721d47f..ca9929db 100644
+--- a/contrib/redhat/collectd.spec
++++ b/contrib/redhat/collectd.spec
+@@ -96,6 +96,7 @@
+ %define with_mbmon 0%{!?_without_mbmon:1}
+ %define with_mcelog 0%{!?_without_mcelog:1}
+ %define with_md 0%{!?_without_md:1}
++%define with_mdevents 0%{!?_without_mdevents:1}
+ %define with_memcachec 0%{!?_without_memcachec:1}
+ %define with_memcached 0%{!?_without_memcached:1}
+ %define with_memory 0%{!?_without_memory:1}
+@@ -112,7 +113,7 @@
+ %define with_notify_nagios 0%{!?_without_notify_nagios:1}
+ %define with_ntpd 0%{!?_without_ntpd:1}
+ %define with_numa 0%{!?_without_numa:1}
+-%define with_nut 0%{!?_without_nut:1}
++%define with_nut 0%{!?_without_nut:0}
+ %define with_olsrd 0%{!?_without_olsrd:1}
+ %define with_openldap 0%{!?_without_openldap:1}
+ %define with_openvpn 0%{!?_without_openvpn:1}
+@@ -154,7 +155,7 @@
+ %define with_uptime 0%{!?_without_uptime:1}
+ %define with_users 0%{!?_without_users:1}
+ %define with_uuid 0%{!?_without_uuid:1}
+-%define with_varnish 0%{!?_without_varnish:1}
++%define with_varnish 0%{!?_without_varnish:0}
+ %define with_virt 0%{!?_without_virt:1}
+ %define with_vmem 0%{!?_without_vmem:1}
+ %define with_vserver 0%{!?_without_vserver:1}
+@@ -195,7 +196,7 @@
+ # plugin intel_pmu disabled, requires libjevents
+ %define with_intel_pmu 0%{!?_without_intel_pmu:0}
+ # plugin intel_rdt disabled, requires intel-cmt-cat
+-%define with_intel_rdt 0%{!?_without_intel_rdt:0}
++%define with_intel_rdt 0%{!?_without_intel_rdt:1}
+ # plugin mic disabled, requires Mic
+ %define with_mic 0%{!?_without_mic:0}
+ # plugin netapp disabled, requires libnetapp
+@@ -643,7 +644,7 @@ The modbus plugin collects values from Modbus/TCP enabled devices
+ Summary: MySQL plugin for collectd
+ Group: System Environment/Daemons
+ Requires: %{name}%{?_isa} = %{version}-%{release}
+-BuildRequires: mysql-devel
++BuildRequires: mariadb-connector-c-devel
+ %description mysql
+ MySQL querying plugin. This plugin provides data of issued commands, called
+ handlers and database traffic.
+@@ -1503,6 +1504,12 @@ Collectd utilities
+ %define _with_md --disable-md
+ %endif
+
++%if %{with_mdevents}
++%define _with_mdevents --enable-mdevents
++%else
++%define _with_mdevents --disable-mdevents
++%endif
++
+ %if %{with_memcachec}
+ %define _with_memcachec --enable-memcachec
+ %else
+@@ -2117,6 +2124,7 @@ Collectd utilities
+ %{?_with_grpc} \
+ %{?_with_hddtemp} \
+ %{?_with_hugepages} \
++ --disable-infiniband \
+ %{?_with_intel_pmu} \
+ %{?_with_intel_rdt} \
+ %{?_with_interface} \
+@@ -2137,6 +2145,7 @@ Collectd utilities
+ %{?_with_mbmon} \
+ %{?_with_mcelog} \
+ %{?_with_md} \
++ %{?_with_mdevents} \
+ %{?_with_memcachec} \
+ %{?_with_memcached} \
+ %{?_with_memory} \
+@@ -2146,6 +2155,7 @@ Collectd utilities
+ %{?_with_multimeter} \
+ %{?_with_mysql} \
+ %{?_with_netapp} \
++ --disable-netstat_udp \
+ %{?_with_netlink} \
+ %{?_with_network} \
+ %{?_with_nfs} \
+@@ -2453,6 +2463,9 @@ fi
+ %if %{with_md}
+ %{_libdir}/%{name}/md.so
+ %endif
++%if %{with_mdevents}
++%{_libdir}/%{name}/mdevents.so
++%endif
+ %if %{with_memcached}
+ %{_libdir}/%{name}/memcached.so
+ %endif
diff --git a/ci/utility/collectd_build_rpm.sh b/ci/utility/collectd_build_rpm.sh
index a0e9cc23..a261c292 100755
--- a/ci/utility/collectd_build_rpm.sh
+++ b/ci/utility/collectd_build_rpm.sh
@@ -1,17 +1,18 @@
#!/bin/bash
-# Copyright 2017 Intel Corporation and OPNFV
+# Copyright 2017-2019 Intel Corporation and OPNFV. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source $DIR/package-list.sh
@@ -31,14 +32,14 @@ make dist
cp $COLLECTD_DIR/collectd-$VERSION.tar.bz2 $RPM_WORKDIR/SOURCES/
+git apply $DIR/collectd.spec.patch
+
sed --regexp-extended \
--in-place=".bak" \
- --expression="s/Version:\s+\S+$/Version: $VERSION/g" \
+ --expression="s/Version:\s+\S+$/Version: $VERSION/g" \
$COLLECTD_DIR/contrib/redhat/collectd.spec
-sed --regexp-extended \
- --in-place \
- --expression="s/without_intel_rdt:[0-9]/without_intel_rdt:1/g" \
- $COLLECTD_DIR/contrib/redhat/collectd.spec
+dnf builddep -y $COLLECTD_DIR/contrib/redhat/collectd.spec
rpmbuild --define "_topdir $RPM_WORKDIR" -bb $COLLECTD_DIR/contrib/redhat/collectd.spec
+
diff --git a/ci/utility/package-list.sh b/ci/utility/package-list.sh
index 0ca4aea6..019893d3 100755
--- a/ci/utility/package-list.sh
+++ b/ci/utility/package-list.sh
@@ -1,17 +1,18 @@
#!/bin/bash
-# Copyright 2017 Intel Corporation
+# Copyright 2016-2019 Intel Corporation and OPNFV. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
RPM_WORKDIR=$WORKSPACE/rpmbuild
RPM_DIR=$RPM_WORKDIR/RPMS/x86_64
@@ -24,4 +25,4 @@ CMTCAT_BRANCH=8b27ad757e86a01bc51eafcb9e600605ff1beca4
CMTCAT_VERSION="v1.1.0"
COLLECTD_REPO=https://github.com/collectd/collectd.git
-COLLECTD_BRANCH=collectd-5.8
+COLLECTD_BRANCH=collectd-5.12
diff --git a/ci/utility/rpms_check.sh b/ci/utility/rpms_check.sh
index a08b3ef9..af7fc34a 100755
--- a/ci/utility/rpms_check.sh
+++ b/ci/utility/rpms_check.sh
@@ -1,4 +1,18 @@
#!/bin/sh
+# Copyright (C) 2017-2019 Intel Corporation and OPNFV. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source $DIR/package-list.sh
diff --git a/ci/utility/rpms_list b/ci/utility/rpms_list
index 9607c5ab..eda3f435 100644
--- a/ci/utility/rpms_list
+++ b/ci/utility/rpms_list
@@ -15,8 +15,6 @@ collectd-debuginfo
collectd-disk
collectd-dns
collectd-email
-collectd-gmond
-collectd-gps
collectd-hddtemp
collectd-intel_rdt
collectd-ipmi
@@ -24,34 +22,27 @@ collectd-iptables
collectd-java
collectd-log_logstash
collectd-lua
-collectd-lvm
collectd-memcachec
-collectd-modbus
-collectd-mqtt
collectd-mysql
collectd-netlink
collectd-nginx
collectd-notify_desktop
collectd-notify_email
-collectd-nut
collectd-openldap
collectd-ovs_events
collectd-ovs_stats
collectd-perl
collectd-php-collection
collectd-pinba
-collectd-ping
collectd-postgresql
collectd-python
collectd-redis
collectd-rrdcached
collectd-rrdtool
collectd-sensors
-collectd-smart
collectd-snmp
collectd-snmp_agent
collectd-utils
-collectd-varnish
collectd-virt
collectd-write_http
collectd-write_prometheus
diff --git a/docker/README_collectd b/docker/README_collectd
index c922ab64..cf497105 100644
--- a/docker/README_collectd
+++ b/docker/README_collectd
@@ -9,7 +9,7 @@ Table of content:
1. DESCRIPTION
2. SYSTEM REQUIREMENTS
3. INSTALLATION NOTES - barometer-collectd
-4. INSTALLATION NOTES - barometer-collectd-master
+4. INSTALLATION NOTES - barometer-collectd-latest
5. ADDITIONAL STEPS
------------------------------------------------------------------------------
@@ -18,9 +18,9 @@ Table of content:
This Dockerfile provides instruction for building collect in isolated container.
There are currently two variants of collectd container:
- barometer-collectd - it is based on stable collect release
- - barometer-collectd-master - development container that is based on
- latest 'master' branch for collectd project. It contains all available
- collectd plugins and features that are available on 'master' branch but
+ - barometer-collectd-latest - development container that is based on
+ latest 'main' branch for collectd project. It contains all available
+ collectd plugins and features that are available on 'main' branch but
some issues with configuration or stability may occur
------------------------------------------------------------------------------
@@ -46,22 +46,22 @@ sudo docker run -ti --net=host -v `pwd`/src/collectd/collectd_sample_configs:/op
/opt/collectd/sbin/collectd -f
------------------------------------------------------------------------------
-4. INSTALLATION NOTES: barometer-collectd-master (development container)
+4. INSTALLATION NOTES: barometer-collectd-latest (development container)
-To build docker barometer-collectd-master container run (it is based on master branch from collectd):
-sudo docker build -f ./docker/barometer-collectd-master/Dockerfile .
+To build docker barometer-collectd-latest container run (it is based on main branch from collectd):
+sudo docker build -f ./docker/barometer-collectd-latest/Dockerfile .
from root barometer folder.
To run builded image run
sudo docker images # get docker image id
-sudo docker run -ti --net=host -v `pwd`/src/collectd/collectd_sample_configs-master:/opt/collectd/etc/collectd.conf.d \
+sudo docker run -ti --net=host -v `pwd`/src/collectd/collectd_sample_configs-latest:/opt/collectd/etc/collectd.conf.d \
-v /var/run:/var/run -v /tmp:/tmp --privileged <image id>
-NOTE: barometer-collectd-master container uses a different sample configurations files
-compared to regular barometer-collectd container (src/collectd/collectd_sample_configs-master)
+NOTE: barometer-collectd-latest container uses a different sample configurations files
+compared to regular barometer-collectd container (src/collectd/collectd_sample_configs-latest)
To make some changes run
-sudo docker run -ti --net=host -v `pwd`/src/collectd/collectd_sample_configs-master:/opt/collectd/etc/collectd.conf.d \
+sudo docker run -ti --net=host -v `pwd`/src/collectd/collectd_sample_configs-latest:/opt/collectd/etc/collectd.conf.d \
-v /var/run:/var/run -v /tmp:/tmp --privileged --entrypoint=/bin/bash <image id>
/opt/collectd/sbin/collectd -f
diff --git a/docker/ansible/collectd6_test.yml b/docker/ansible/collectd6_test.yml
new file mode 100644
index 00000000..c1a3a8b4
--- /dev/null
+++ b/docker/ansible/collectd6_test.yml
@@ -0,0 +1,143 @@
+---
+# ansible-playbook -e PR=<PRID> -e new_plugin=<plugin> collectd6_test.yml
+
+# As well as passing a PRID, a config command should be passable too since
+# a lot of the plugins have been explicitly disabled in the build.
+- hosts: localhost
+ become: true
+ tasks:
+ - name: Set names for containers to be used for testing
+ set_fact:
+ collectd5_container_name: "bar-collectd-latest"
+ collectd6_container_name: "bar-collectd-6{{ '-' + PR if PR is defined }}"
+ flask_container_name: "test-collectd-5-v-6"
+
+ - name: Remove existing containers
+ docker_container:
+ name: "{{ item }}"
+ state: absent
+ force_kill: yes
+ with_items:
+ - "{{ collectd5_container_name }}"
+ - "{{ collectd6_container_name }}"
+ - "{{ flask_container_name }}"
+
+ - name: Get a list of containers
+ command:
+ docker ps -a
+ register: output
+
+ - name: Confirm that existing test containers were removed
+ assert:
+ that:
+ - 'not "{{ collectd5_container_name }}" in output.stdout'
+ - 'not "{{ collectd6_container_name }}" in output.stdout'
+ - 'not "{{ flask_container_name }}" in output.stdout'
+
+ - name: Build collectd containers
+ include_role:
+ name: build_collectd
+ args:
+ apply:
+ tags:
+ - latest
+ - collectd-6
+ - flask_test
+ vars:
+ COLLECTD_PULL_REQUESTS: "{{ PR | default() }}"
+ COLLECTD_CONFIG_CMD_ARGS: "{{ '--enable-' + new_plugin if new_plugin is defined }}"
+
+ - name: "Set up config for write_http plugin"
+ set_fact:
+ collectd_plugins: "{{ collectd_plugins | default([]) | union(['write_http']) }}"
+ collectd_plugin_write_http_nodes:
+ flask:
+ url: http://localhost:5000
+ format: "Command"
+
+ - name: Generate collectd configs
+ include_role:
+ name: config_files
+
+ # Since I can't skip-tags here, I have to remove the plugins later
+ # TODO(efoley) Add a disable_plugins and enable_plugins list to
+ # roles/config_files, as an alternative to tags.
+ # This alternative is kinda needed anyway, so that it's easier to add extra
+ # plugins instead of using.
+ # ``{{ collectd_plugins | default([]) | union(['the_plugin_i_want_to_enable'])}}``
+ # Tags can stay, since they are convenient, and easier to pass to the
+ # command line than a list of plugins
+ - name: "Remove plugin configs"
+ file:
+ path: "/opt/collectd/etc/collectd.conf.d/{{ item }}.conf"
+ state: absent
+ with_items:
+ - snmp_agent
+ - intel_pmu
+
+ # TODO(efoley): The path here should be parameterised, to a degree, I don't
+ # want it to be repeated. And I shouldn't assume that this is always going
+ # to be the value (unless it is in vars/main instead of defaults/main)
+ - name: "Remove plugin configs (collectd 6)"
+ file:
+ path: "/opt/collectd/etc/collectd.conf.d/{{ item }}.conf"
+ state: absent
+ with_items:
+ - csv
+ - network
+ - rrdtool
+ - write_kafka
+ - write_prometheus
+ - logfile
+
+ - debug:
+ var: PR
+
+ - name: Run the collectd-6 container
+ include_role:
+ name: run_collectd
+ vars:
+ collectd_image_name: "opnfv/barometer-collectd-6{{ '-' + PR if PR is defined }}"
+ collectd_container_name: "{{ collectd6_container_name }}"
+
+ - name: Run the collectd-latest container
+ include_role:
+ name: run_collectd
+ vars:
+ collectd_image_name: opnfv/barometer-collectd-latest
+ collectd_container_name: "{{ collectd5_container_name }}"
+
+ - name: Run the flask test container
+ docker_container:
+ name: "{{ flask_container_name }}"
+ image: test-collectd-write_http
+ detach: yes
+ state: started
+ #network_mode: host
+ published_ports: 5000:5000
+
+ - name: Check output for flask app
+ become: true
+ shell: |
+ docker logs {{ flask_container_name }} {{ '| grep "' + new_plugin + '"' if new_plugin is defined }} | tail -200
+ register: output
+
+ - debug:
+ var: output.stdout_lines
+
+ - name: Get a list of running containers
+ become: true
+ command:
+ docker ps
+ register: output
+
+ - name: Make sure that the expected containers are running
+ assert:
+ that:
+ - '"{{ collectd6_container_name }}" in output.stdout'
+ - '"{{ collectd5_container_name }}" in output.stdout'
+ - '"{{ flask_container_name }}" in output.stdout'
+
+# Create a small report at the end for collectd versions...
+# Update Apply PRs to check out a branch when it is a single PR
+# OR update these playbooks to use the tag way of checking out a PR
diff --git a/docker/ansible/roles/config_files/templates/logfile.conf.j2 b/docker/ansible/collectd_build.yml
index 8bc96ffe..d5cad076 100644
--- a/docker/ansible/roles/config_files/templates/logfile.conf.j2
+++ b/docker/ansible/collectd_build.yml
@@ -1,4 +1,4 @@
-#Copyright 2018 OPNFV and Intel Corporation
+# Copyright 2021 Anuket and others
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -11,12 +11,12 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
-LoadPlugin logfile
-
-<Plugin logfile>
- LogLevel "{{ logfile_log_level }}"
- File "{{ logfile_dir }}/collectd.log"
- Timestamp true
-</Plugin>
-
+---
+# ansible-playbook collectd_build.yml
+#
+- hosts: localhost
+ become: true
+ become_user: root
+ gather_facts: true
+ roles:
+ - name: build_collectd
diff --git a/docker/ansible/collectd_ves.yml b/docker/ansible/collectd_ves.yml
index 9e3bf463..002744ce 100644
--- a/docker/ansible/collectd_ves.yml
+++ b/docker/ansible/collectd_ves.yml
@@ -1,16 +1,17 @@
-#Copyright 2018 OPNFV and Intel Corporation
+# Copyright 2016-2019 Intel Corporation and OPNFV. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
---
- hosts: collectd_hosts kafka_hosts ves_hosts zookeeper_hosts
diff --git a/docker/ansible/default.inv b/docker/ansible/default.inv
index fb366577..d65e1c0e 100644
--- a/docker/ansible/default.inv
+++ b/docker/ansible/default.inv
@@ -13,8 +13,8 @@ localhost
[collectd_hosts:vars]
install_mcelog=true
insert_ipmi_modules=true
-#to use master or experimental container set the collectd flavor below
-#possible values: stable|master|experimental
+#to use latest (collectd-main branch) or experimental (main + PRs) container
+#set the collectd flavor below. Possible values: stable|latest|experimental
flavor=stable
[influxdb_hosts]
@@ -23,6 +23,7 @@ flavor=stable
#hostname
[grafana_hosts]
+#NOTE: As per current support, Grafana and Influxdb should be same host.
#hostname
[prometheus_hosts]
diff --git a/docker/ansible/roles/build_collectd/tasks/main.yml b/docker/ansible/roles/build_collectd/tasks/main.yml
new file mode 100644
index 00000000..6faddde5
--- /dev/null
+++ b/docker/ansible/roles/build_collectd/tasks/main.yml
@@ -0,0 +1,71 @@
+# Copyright 2021 Anuket and others
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Build stable container
+ docker_image:
+ name: anuket/barometer-collectd
+ build:
+ path: "{{ playbook_dir }}/../barometer-collectd/"
+ source: build
+ tags:
+ - stable
+
+- name: Build the latest container
+ docker_image:
+ name: anuket/barometer-collectd-latest
+ build:
+ path: "{{ playbook_dir }}/../../"
+ dockerfile: "docker/barometer-collectd-latest/Dockerfile"
+ source: build
+ tags:
+ - latest
+
+- name: Build collectd-experimental
+ docker_image:
+ name: anuket/barometer-collectd-experimental
+ build:
+ path: "{{ playbook_dir }}/../../"
+ dockerfile: "docker/barometer-collectd-experimental/Dockerfile"
+ args:
+ COLLECTD_FLAVOR: experimental
+ COLLECTD_TAG: "{{ COLLECTD_TAG | default('main') }}"
+ COLLECTD_PULL_REQUESTS: "{{ COLLECTD_PULL_REQUESTS | default() }}"
+ source: build
+ tags:
+ - experimental
+
+- name: Build collectd-6
+ docker_image:
+ name: "anuket/barometer-collectd-6{{ ( '-' + COLLECTD_PULL_REQUESTS ) if COLLECTD_PULL_REQUESTS is defined else '' }}"
+ build:
+ path: "{{ playbook_dir }}/../../"
+ dockerfile: "docker/barometer-collectd-experimental/Dockerfile"
+ args:
+ COLLECTD_FLAVOR: collectd-6
+ COLLECTD_TAG: "{{ COLLECTD_TAG | default('collectd-6.0') }}"
+ COLLECTD_CONFIG_CMD_ARGS: "{{ COLLECTD_CONFIG_CMD_ARGS if COLLECTD_CONFIG_CMD_ARGS is defined }}"
+ source: build
+ tags:
+ - collectd-6
+
+- name: Build test_app for write_http
+ docker_image:
+ name: test-collectd-write_http
+ build:
+ path: "{{ playbook_dir }}/../flask_app/"
+ source: build
+ tags:
+ - flask_test
+ - never
+
diff --git a/docker/ansible/roles/config_files/defaults/main.yml b/docker/ansible/roles/config_files/defaults/main.yml
new file mode 100644
index 00000000..c2004ae1
--- /dev/null
+++ b/docker/ansible/roles/config_files/defaults/main.yml
@@ -0,0 +1,60 @@
+# Copyright 2018-21 Anuket, Intel Corporation and others
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+config_file_dir: "/opt/collectd/etc/collectd.conf.d/"
+csv_log_dir: "/var/lib/collectd/csv"
+logfile_dir: "/var/log/"
+
+#global default interval
+interval_value: 10
+
+#influx network port
+influx_network_port: 25826
+
+#network plugin vars
+network_port: 25826
+network_ip_addr: localhost
+
+#prometheus plugin vars
+prometheus_port: 9103
+
+#ovs event and stats plugin vars
+ovs_events_interval: 1
+ovs_events_ip_addr: localhost
+ovs_events_port: 6640
+ovs_stats_interval: 1
+ovs_stats_ip_addr: localhost
+ovs_stats_port: 6640
+
+#rdt plugin vars
+rdt_interval: 1
+
+#kafka plugin vars
+kafka_ip_addr: localhost
+kafka_port: 9092
+kafka_topic: collectd
+
+#logfile plugin vars
+logfile_log_level: info
+
+#syslog plugin vars
+syslog_log_level: info
+
+#rrd plugin vars
+rrdtool_db_dir: "/var/lib/collectd/rrd"
+cache_flush: 120
+write_per_sec: 50
+
+#additional configuration files path
+additional_configs_path: ""
diff --git a/docker/ansible/roles/config_files/templates/csv.conf.j2 b/docker/ansible/roles/config_files/tasks/capabilities.yml
index 3e351932..cfca03a2 100644
--- a/docker/ansible/roles/config_files/templates/csv.conf.j2
+++ b/docker/ansible/roles/config_files/tasks/capabilities.yml
@@ -1,4 +1,4 @@
-#Copyright 2018 OPNFV and Intel Corporation
+# Copyright 2019-21 Anuket, Intel Corporation and others
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -11,11 +11,11 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+---
-LoadPlugin csv
-
-<Plugin csv>
- DataDir "{{ csv_log_dir }}"
- StoreRates false
-</Plugin>
+- name: enable capabilities plugin
+ set_fact:
+ collectd_plugins: "{{ collectd_plugins | union(['capabilities']) | unique }}"
+ tags:
+ - capabilities
diff --git a/docker/ansible/roles/config_files/tasks/csv.yml b/docker/ansible/roles/config_files/tasks/csv.yml
index 0be4ec8f..bfd88c0e 100644
--- a/docker/ansible/roles/config_files/tasks/csv.yml
+++ b/docker/ansible/roles/config_files/tasks/csv.yml
@@ -1,4 +1,4 @@
-#Copyright 2018 OPNFV and Intel Corporation
+# Copyright 2018-21 Anuket, Intel Corporation and others
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,9 +14,8 @@
---
- name: enable csv plugin
- template:
- src: csv.conf.j2
- dest: "{{ config_file_dir }}/csv.conf"
+ set_fact:
+ collectd_plugins: "{{ collectd_plugins | union(['csv']) | unique }}"
tags:
- csv
diff --git a/docker/ansible/roles/config_files/tasks/default_read_import.yml b/docker/ansible/roles/config_files/tasks/default_read_import.yml
index d70919b3..46b4d516 100644
--- a/docker/ansible/roles/config_files/tasks/default_read_import.yml
+++ b/docker/ansible/roles/config_files/tasks/default_read_import.yml
@@ -1,4 +1,4 @@
-#Copyright 2018 OPNFV and Intel Corporation
+# Copyright 2018-21 OPNFV, Intel Corporation and Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,123 +12,107 @@
# See the License for the specific language governing permissions and
# limitations under the License.
---
-
- name: enable contextswitch plugin
- replace:
- path: "{{ config_file_dir }}/default_read_plugins.conf"
- regexp: '(\s+)#LoadPlugin contextswitch(\s+.*)?$'
- replace: '\1LoadPlugin contextswitch\2'
+ set_fact:
+ collectd_plugins: "{{ collectd_plugins | union(['contextswitch']) | unique }}"
tags:
+ - en_default_all
- contextswitch
- name: enable cpu plugin
- replace:
- path: "{{ config_file_dir }}/default_read_plugins.conf"
- regexp: '(\s+)#LoadPlugin cpu(\s+.*)?$'
- replace: '\1LoadPlugin cpu\2'
+ set_fact:
+ collectd_plugins: "{{ collectd_plugins | union(['cpu']) | unique }}"
tags:
+ - en_default_all
- cpu
- name: enable cpufreq plugin
- replace:
- path: "{{ config_file_dir }}/default_read_plugins.conf"
- regexp: '(\s+)#LoadPlugin cpufreq(\s+.*)?$'
- replace: '\1LoadPlugin cpufreq\2'
+ set_fact:
+ collectd_plugins: "{{ collectd_plugins | union(['cpufreq']) | unique }}"
tags:
+ - en_default_all
- cpufreq
- name: enable df plugin
- replace:
- path: "{{ config_file_dir }}/default_read_plugins.conf"
- regexp: '(\s+)#LoadPlugin df(\s+.*)?$'
- replace: '\1LoadPlugin df\2'
+ set_fact:
+ collectd_plugins: "{{ collectd_plugins | union(['df']) | unique }}"
tags:
+ - en_default_all
- df
- name: enable disk plugin
- replace:
- path: "{{ config_file_dir }}/default_read_plugins.conf"
- regexp: '(\s+)#LoadPlugin disk(\s+.*)?$'
- replace: '\1LoadPlugin disk\2'
+ set_fact:
+ collectd_plugins: "{{ collectd_plugins | union(['disk']) | unique }}"
tags:
+ - en_default_all
- disk
- name: enable ethstat plugin
- replace:
- path: "{{ config_file_dir }}/default_read_plugins.conf"
- regexp: '(\s+)#LoadPlugin ethstat(\s+.*)?$'
- replace: '\1LoadPlugin ethstat\2'
+ set_fact:
+ collectd_plugins: "{{ collectd_plugins | union(['ethstat']) | unique }}"
tags:
+ - en_default_all
- ethstat
- name: enable ipc plugin
- replace:
- path: "{{ config_file_dir }}/default_read_plugins.conf"
- regexp: '(\s+)#LoadPlugin ipc(\s+.*)?$'
- replace: '\1LoadPlugin ipc\2'
+ set_fact:
+ collectd_plugins: "{{ collectd_plugins | union(['ipc']) | unique }}"
tags:
+ - en_default_all
- ipc
- name: enable irq plugin
- replace:
- path: "{{ config_file_dir }}/default_read_plugins.conf"
- regexp: '(\s+)#LoadPlugin irq(\s+.*)?$'
- replace: '\1LoadPlugin irq\2'
+ set_fact:
+ collectd_plugins: "{{ collectd_plugins | union(['irq']) | unique }}"
tags:
+ - en_default_all
- irq
- name: enable load plugin
- replace:
- path: "{{ config_file_dir }}/default_read_plugins.conf"
- regexp: '(\s+)#LoadPlugin load(\s+.*)?$'
- replace: '\1LoadPlugin load\2'
+ set_fact:
+ collectd_plugins: "{{ collectd_plugins | union(['load']) | unique }}"
tags:
+ - en_default_all
- load
- name: enable memory plugin
- replace:
- path: "{{ config_file_dir }}/default_read_plugins.conf"
- regexp: '(\s+)#LoadPlugin memory(\s+.*)?$'
- replace: '\1LoadPlugin memory\2'
+ set_fact:
+ collectd_plugins: "{{ collectd_plugins | union(['memory']) | unique }}"
tags:
+ - en_default_all
- memory
- name: enable numa plugin
- replace:
- path: "{{ config_file_dir }}/default_read_plugins.conf"
- regexp: '(\s+)#LoadPlugin numa(\s+.*)?$'
- replace: '\1LoadPlugin numa\2'
+ set_fact:
+ collectd_plugins: "{{ collectd_plugins | union(['numa']) | unique }}"
tags:
+ - en_default_all
- numa
- name: enable processes plugin
- replace:
- path: "{{ config_file_dir }}/default_read_plugins.conf"
- regexp: '(\s+)#LoadPlugin processes(\s+.*)?$'
- replace: '\1LoadPlugin processes\2'
+ set_fact:
+ collectd_plugins: "{{ collectd_plugins | union(['processes']) | unique }}"
tags:
+ - en_default_all
- processes
- name: enable swap plugin
- replace:
- path: "{{ config_file_dir }}/default_read_plugins.conf"
- regexp: '(\s+)#LoadPlugin swap(\s+.*)?$'
- replace: '\1LoadPlugin swap\2'
+ set_fact:
+ collectd_plugins: "{{ collectd_plugins | union(['swap']) | unique }}"
tags:
+ - en_default_all
- swap
- name: enable turbostat plugin
- replace:
- path: "{{ config_file_dir }}/default_read_plugins.conf"
- regexp: '(\s+)#LoadPlugin turbostat(\s+.*)?$'
- replace: '\1LoadPlugin turbostat\2'
+ set_fact:
+ collectd_plugins: "{{ collectd_plugins | union(['turbostat']) | unique }}"
tags:
+ - en_default_all
- turbostat
- name: enable uptime plugin
- replace:
- path: "{{ config_file_dir }}/default_read_plugins.conf"
- regexp: '(\s+)#LoadPlugin uptime(\s+.*)?$'
- replace: '\1LoadPlugin uptime\2'
+ set_fact:
+ collectd_plugins: "{{ collectd_plugins | union(['uptime']) | unique }}"
tags:
+ - en_default_all
- uptime
diff --git a/docker/ansible/roles/config_files/tasks/default_read_plugins.yml b/docker/ansible/roles/config_files/tasks/default_read_plugins.yml
deleted file mode 100644
index 95b942c2..00000000
--- a/docker/ansible/roles/config_files/tasks/default_read_plugins.yml
+++ /dev/null
@@ -1,34 +0,0 @@
-#Copyright 2018 OPNFV and Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
----
-
-- name: copy default template to system
- template:
- src: default_read_plugins.conf.j2
- dest: "{{ config_file_dir }}/default_read_plugins.conf"
- tags:
- - always
-
-- name: enable all default plugins
- replace:
- path: "{{ config_file_dir }}/default_read_plugins.conf"
- regexp: '(\s+)#LoadPlugin(\s+.*)?$'
- replace: '\1LoadPlugin\2'
- register: en_default_all
- tags:
- - en_default_all
-
-- name: include per default plugin tasks
- include: default_read_import.yml
- when: en_default_all is not defined \ No newline at end of file
diff --git a/docker/ansible/roles/config_files/tasks/dpdk.yml b/docker/ansible/roles/config_files/tasks/dpdk.yml
index 7809c56a..2be146b0 100644
--- a/docker/ansible/roles/config_files/tasks/dpdk.yml
+++ b/docker/ansible/roles/config_files/tasks/dpdk.yml
@@ -1,4 +1,4 @@
-#Copyright 2018 OPNFV and Intel Corporation
+# Copyright 2018-21 Anuket, Intel Corporation and others
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,17 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
---
-
-- name: enable dpdkstat plugin
- template:
- src: dpdkstat.conf.j2
- dest: "{{ config_file_dir }}/dpdkstat.conf"
+- name: enable dpdk_telemetry plugin
+ set_fact:
+ collectd_plugins: "{{ collectd_plugins | union(['dpdk_telemetry']) | unique }}"
tags:
- - dpdkstat
-
-- name: enable dpdkevents plugin
- template:
- src: dpdkevents.conf.j2
- dest: "{{ config_file_dir }}/dpdkevents.conf"
- tags:
- - dpdkevents
+ - dpdk_telemetry
diff --git a/docker/ansible/roles/config_files/tasks/exec.yml b/docker/ansible/roles/config_files/tasks/exec.yml
index b7389703..956e3a8f 100644
--- a/docker/ansible/roles/config_files/tasks/exec.yml
+++ b/docker/ansible/roles/config_files/tasks/exec.yml
@@ -1,4 +1,4 @@
-#Copyright 2018 OPNFV and Intel Corporation
+# Copyright 2018-21 Anuket, Intel Corporation and others
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,10 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
---
-
- name: enable exec plugin
- template:
- src: exec.conf.j2
- dest: "{{ config_file_dir }}/exec.conf"
+ set_fact:
+ collectd_plugins: "{{ collectd_plugins | union(['exec']) | unique }}"
tags:
- exec
diff --git a/docker/ansible/roles/config_files/tasks/hugepages.yml b/docker/ansible/roles/config_files/tasks/hugepages.yml
index bf49035c..5b4ec0ba 100644
--- a/docker/ansible/roles/config_files/tasks/hugepages.yml
+++ b/docker/ansible/roles/config_files/tasks/hugepages.yml
@@ -1,4 +1,4 @@
-#Copyright 2018 OPNFV and Intel Corporation
+# Copyright 2018-21 Anuket, Intel Corporation and others
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -25,9 +25,8 @@
- hugepages
- name: enable hugepages plugin
- template:
- src: hugepages.conf.j2
- dest: "{{ config_file_dir }}//hugepages.conf"
- when: hugepages_result|succeeded
+ set_fact:
+ collectd_plugins: "{{ collectd_plugins | union(['hugepages']) | unique }}"
+ when: hugepages_result is succeeded
tags:
- hugepages
diff --git a/docker/ansible/roles/config_files/tasks/ipmi.yml b/docker/ansible/roles/config_files/tasks/ipmi.yml
index c7359fbf..12913a85 100644
--- a/docker/ansible/roles/config_files/tasks/ipmi.yml
+++ b/docker/ansible/roles/config_files/tasks/ipmi.yml
@@ -1,4 +1,4 @@
-#Copyright 2018 OPNFV and Intel Corporation
+# Copyright 2018-21 Anuket, Intel Corporation and others
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -31,7 +31,7 @@
command: "modprobe ipmi_devintf"
register: ipmi_devintf
ignore_errors: true
- when: ipmi_devintf|failed and insert_ipmi_modules|default(false)|bool
+ when: (ipmi_devintf is failed) and (insert_ipmi_modules|default(false)|bool)
tags:
- ipmi
@@ -39,7 +39,7 @@
command: "modprobe ipmi_si"
register: ipmi_devsi
ignore_errors: true
- when: ipmi_devsi|failed and insert_ipmi_modules|default(false)|bool
+ when: (ipmi_devsi is failed) and (insert_ipmi_modules|default(false)|bool)
tags:
- ipmi
@@ -52,10 +52,9 @@
- ipmi
- name: enable ipmi plugin
- template:
- src: ipmi.conf.j2
- dest: "{{ config_file_dir }}/0_ipmi.conf"
- when: ipmi0_exists|succeeded and ipmi_devintf|succeeded and ipmi_devsi|succeeded
+ set_fact:
+ collectd_plugins: '{{ collectd_plugins | union(["ipmi"]) | unique }}'
+ when: (ipmi0_exists is succeeded) and (ipmi_devintf is succeeded) and (ipmi_devsi is succeeded)
tags:
- ipmi
diff --git a/docker/ansible/roles/config_files/tasks/kafka.yml b/docker/ansible/roles/config_files/tasks/kafka.yml
index d7639cbf..6fb3f050 100644
--- a/docker/ansible/roles/config_files/tasks/kafka.yml
+++ b/docker/ansible/roles/config_files/tasks/kafka.yml
@@ -1,4 +1,4 @@
-#Copyright 2018 OPNFV and Intel Corporation
+# Copyright 2018-21 Anuket, Intel Corporation and others
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,9 +13,8 @@
# limitations under the License.
---
-- name: enable kafka plugin
- template:
- src: kafka.conf.j2
- dest: "{{ config_file_dir }}/kafka.conf"
+- name: enable kafka plugin with collectd_config
+ set_fact:
+ collectd_plugins: "{{ collectd_plugins | union(['write_kafka']) | unique }}"
tags:
- kafka
diff --git a/docker/ansible/roles/config_files/tasks/logfile.yml b/docker/ansible/roles/config_files/tasks/logfile.yml
index b2700594..cd1c1049 100644
--- a/docker/ansible/roles/config_files/tasks/logfile.yml
+++ b/docker/ansible/roles/config_files/tasks/logfile.yml
@@ -1,4 +1,4 @@
-#Copyright 2018 OPNFV and Intel Corporation
+# Copyright 2018-21 Anuket, Intel Corporation and others
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,12 +14,8 @@
---
- name: enable logfile plugin
- template:
- src: logfile.conf.j2
- dest: "{{ config_file_dir }}/logfile.conf"
+ set_fact:
+ collectd_plugins: "{{ collectd_plugins | union(['logfile']) | unique }}"
tags:
- logfile
-
-
-
diff --git a/docker/ansible/roles/config_files/tasks/logparser.yml b/docker/ansible/roles/config_files/tasks/logparser.yml
index 615d2e2b..72adcea2 100644
--- a/docker/ansible/roles/config_files/tasks/logparser.yml
+++ b/docker/ansible/roles/config_files/tasks/logparser.yml
@@ -1,4 +1,4 @@
-#Copyright 2019 OPNFV and Intel Corporation
+# Copyright 2019-21 Anuket, Intel Corporation, and others
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -15,9 +15,8 @@
- name: enable logparser plugin
template:
- src: experimental/logparser.conf.j2
+ src: logparser.conf.j2
dest: "{{ config_file_dir }}/logparser.conf"
- when: flavor|default('stable')|string == 'experimental'
tags:
- logparser
diff --git a/docker/ansible/roles/config_files/tasks/main.yml b/docker/ansible/roles/config_files/tasks/main.yml
index 58cfec63..9b241890 100644
--- a/docker/ansible/roles/config_files/tasks/main.yml
+++ b/docker/ansible/roles/config_files/tasks/main.yml
@@ -1,16 +1,18 @@
-#Copyright 2018 OPNFV and Intel Corporation
+# Copyright 2018-2021 Intel Corporation, Anuket and others.
+# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
---
- name: Clean collectd config dir path
@@ -29,11 +31,14 @@
tags:
- always
+- name: enable capabilities plugin
+ import_tasks: capabilities.yml
+
- name: enable csv plugin
import_tasks: csv.yml
- name: enable default plugins
- import_tasks: default_read_plugins.yml
+ import_tasks: default_read_import.yml
- name: enable dpdk plugins
import_tasks: dpdk.yml
@@ -78,14 +83,59 @@
- name: enable syslog plugin
import_tasks: syslog.yml
+- name: enable ovs plugins
+ import_tasks: ovs.yml
+
- name: enable virt plugin
import_tasks: virt.yml
- name: enable ipmi plugin
include: ipmi.yml
+- name: Enable unixsock plugin
+ set_fact:
+ collectd_plugins: "{{ collectd_plugins | union(['unixsock']) | unique }}"
+ tags:
+ - unixsock
+
- name: enable uuid plugin
include: uuid.yml
+- name: configure plugins
+ include_role:
+ name: collectd_config
+ vars:
+ collectd_conf_output_dir: /tmp/collectd.conf.d
+ tags:
+ - always
+ - en_default_all
+
+- name: Copy the generated plugin configs
+ copy:
+ src: "{{ item.src }}"
+ dest: "{{ item.dest }}"
+ with_items:
+ - { src: "/tmp/collectd.conf.d/", dest: "{{ config_file_dir }}" }
+ - { src: "/tmp/collectd.conf", dest: "{{ config_file_dir }}/../collectd.conf" }
+ tags:
+ - always
+ - en_default_all
+
+- name: Update TypesDB location
+ lineinfile:
+ path: "{{ config_file_dir }}../collectd.conf"
+ regexp: '^TypesDB "/usr/share/collectd/types.db"$'
+ line: 'TypesDB "/opt/collectd/share/collectd/types.db"'
+
+- name: Update config file location
+ lineinfile:
+ path: "{{ config_file_dir }}../collectd.conf"
+ regexp: "^(.*)/tmp/collectd.conf.d(.*)$"
+ line: '\1{{ config_file_dir }}\2'
+ backrefs: yes
+ tags:
+ - always
+ - en_default_all
+
- name: copy additional config files
include: additional_configs.yml
diff --git a/docker/ansible/roles/config_files/tasks/make_mcelog.yml b/docker/ansible/roles/config_files/tasks/make_mcelog.yml
index d05b9433..938396c8 100644
--- a/docker/ansible/roles/config_files/tasks/make_mcelog.yml
+++ b/docker/ansible/roles/config_files/tasks/make_mcelog.yml
@@ -1,16 +1,17 @@
-#Copyright 2018 OPNFV and Intel Corporation
+# Copyright 2016-2019 Intel Corporation and OPNFV. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
---
- name: extract mcelog from online archive
diff --git a/docker/ansible/roles/config_files/tasks/mcelog.yml b/docker/ansible/roles/config_files/tasks/mcelog.yml
index be185092..ac14f80c 100644
--- a/docker/ansible/roles/config_files/tasks/mcelog.yml
+++ b/docker/ansible/roles/config_files/tasks/mcelog.yml
@@ -1,16 +1,17 @@
-#Copyright 2018 OPNFV and Intel Corporation
+# Copyright 2018-2019 Intel Corporation and OPNFV. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
---
- name: install mcelog package
@@ -32,7 +33,7 @@
- name: build mcelog from sources if not in package
import_tasks: make_mcelog.yml
- when: mcelog_present|failed and install_mcelog|default(false)|bool
+ when: (mcelog_present is failed) and (install_mcelog|default(false)|bool)
tags:
- mcelogs
@@ -72,6 +73,6 @@
template:
src: mcelog.conf.j2
dest: "{{ config_file_dir }}/mcelog.conf"
- when: mcelog_running|succeeded and mcelog_exists|succeeded and mcelog_client_exists|succeeded
+ when: (mcelog_running is succeeded) and (mcelog_exists is succeeded) and (mcelog_client_exists is succeeded)
tags:
- - mcelogs
+ - mcelogs
diff --git a/docker/ansible/roles/config_files/tasks/network.yml b/docker/ansible/roles/config_files/tasks/network.yml
index 52902d4c..0af6a713 100644
--- a/docker/ansible/roles/config_files/tasks/network.yml
+++ b/docker/ansible/roles/config_files/tasks/network.yml
@@ -1,4 +1,4 @@
-#Copyright 2018 OPNFV and Intel Corporation
+# Copyright 2018-21 Anuket, Intel Corporation and others
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,26 +14,24 @@
---
- name: Enable network plugin
- template:
- src: network.conf.j2
- dest: "{{ config_file_dir }}/network.conf"
+ set_fact:
+ collectd_plugins: '{{ collectd_plugins | union (["network"]) | unique }}'
tags:
- network
- name: Add network port if defined
- lineinfile:
- dest: "{{ config_file_dir }}/network.conf"
- insertafter: Plugin network
- line: " Server \"{{ network_ip_addr }}\" \"{{ network_port }}\""
+ set_fact:
+ collectd_plugin_network_server: "{{ collectd_plugin_network_server | default([]) | union([{
+ 'name': network_ip_addr,
+ 'port': network_port,
+ }]) }}"
when: network_ip_addr is defined
tags:
- network
- name: Modify network.conf to add influxdb_hosts
- lineinfile:
- dest: "{{ config_file_dir }}/network.conf"
- insertafter: Plugin network
- line: " Server \"{{ item }}\" \"{{ influx_network_port }}\""
+ set_fact:
+ collectd_plugin_network_server: "{{ collectd_plugin_network_server | default([]) | union([{ 'name': item, 'port': influx_network_port }]) }}"
with_items: "{{ groups['influxdb_hosts'] }}"
tags:
- network
diff --git a/docker/ansible/roles/config_files/tasks/ovs.yml b/docker/ansible/roles/config_files/tasks/ovs.yml
new file mode 100644
index 00000000..fe3f0a13
--- /dev/null
+++ b/docker/ansible/roles/config_files/tasks/ovs.yml
@@ -0,0 +1,53 @@
+# Copyright 2018-21 Anuket, Intel Corporation and others
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+- name: Check if vswitchd is running
+ shell: ps -ef | grep vswitchd | grep -v grep > /dev/null
+ register: vswitchd_running
+ ignore_errors: True
+ tags:
+ - ovs_stats
+ - ovs_events
+
+- name: Check if db.sock exists
+ stat:
+ path: /var/run/openvswitch/db.sock
+ register: dbsock_exists
+ ignore_errors: True
+ tags:
+ - ovs_stats
+ - ovs_events
+
+- name: enable ovs_stats plugin
+ set_fact:
+ collectd_plugins: '{{ collectd_plugins | union(["ovs_stats"]) | unique }}'
+ collectd_plugin_ovs_stats_port: "{{ ovs_stats_port if ovs_stats_port is defined else omit }}"
+ collectd_plugin_ovs_stats_address: "{{ ovs_stats_ip_addr if ovs_stats_ip_addr is defined else omit }}"
+ collectd_plugin_ovs_stats_interval: "{{ ovs_stats_interval if ovs_stats_interval is defined else omit }}"
+ when: (vswitchd_running is succeeded) and (dbsock_exists is succeeded)
+ tags:
+ - ovs_stats
+
+- name: enable ovs_events plugin
+ set_fact:
+ collectd_plugins: '{{ collectd_plugins | union(["ovs_events"]) | unique }}'
+ collectd_plugin_ovs_events_interval: "{{ ovs_events_interval if ovs_events_interval is defined else omit }}"
+ collectd_plugin_ovs_events_address: "{{ ovs_events_ip_addr if ovs_events_ip_addr is defined else omit }}"
+ collectd_plugin_ovs_events_port: "{{ ovs_events_port if ovs_events_port is defined else omit }}"
+ when: (vswitchd_running is succeeded) and (dbsock_exists is succeeded)
+ tags:
+ - ovs_events
+
+
diff --git a/docker/ansible/roles/config_files/tasks/pmu.yml b/docker/ansible/roles/config_files/tasks/pmu.yml
index 20e3034c..c5820f98 100644
--- a/docker/ansible/roles/config_files/tasks/pmu.yml
+++ b/docker/ansible/roles/config_files/tasks/pmu.yml
@@ -1,4 +1,4 @@
-#Copyright 2018 OPNFV and Intel Corporation
+# Copyright 2018-21 Anuket, Intel Corporation and others
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,8 +14,7 @@
---
- name: enable intel_pmu plugin
- template:
- src: intel_pmu.conf.j2
- dest: "{{ config_file_dir }}/intel_pmu.conf"
+ set_fact:
+ collectd_plugins: "{{ collectd_plugins | union(['intel_pmu']) | unique }}"
tags:
- pmu
diff --git a/docker/ansible/roles/config_files/tasks/prometheus.yml b/docker/ansible/roles/config_files/tasks/prometheus.yml
index 51f2b1f6..c0e7e79d 100644
--- a/docker/ansible/roles/config_files/tasks/prometheus.yml
+++ b/docker/ansible/roles/config_files/tasks/prometheus.yml
@@ -1,4 +1,4 @@
-#Copyright 2018 OPNFV and Intel Corporation
+# Copyright 2018-21 Anuket, Intel Corporation and others
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,8 +14,7 @@
---
- name: enable prometheus plugin
- template:
- src: prometheus.conf.j2
- dest: "{{ config_file_dir }}/prometheus.conf"
+ set_fact:
+ collectd_plugins: "{{ collectd_plugins | union(['write_prometheus']) | unique }}"
tags:
- - prometheus \ No newline at end of file
+ - prometheus
diff --git a/docker/ansible/roles/config_files/tasks/rdt.yml b/docker/ansible/roles/config_files/tasks/rdt.yml
index cbfa7de5..f0a46261 100644
--- a/docker/ansible/roles/config_files/tasks/rdt.yml
+++ b/docker/ansible/roles/config_files/tasks/rdt.yml
@@ -1,4 +1,4 @@
-#Copyright 2018 OPNFV and Intel Corporation
+# Copyright 2018-21 Anuket, Intel Corporation and others
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -29,10 +29,9 @@
- rdt
- name: enable rdt plugin
- template:
- src: rdt.conf.j2
- dest: "{{ config_file_dir }}/rdt.conf"
- when: rdt_result|succeeded and virt_file.stat.exists == False
+ set_fact:
+ collectd_plugins: "{{ collectd_plugins | union(['intel_rdt']) | unique }}"
+ when: (rdt_result is succeeded) and (virt_file.stat.exists == False)
tags:
- rdt
diff --git a/docker/ansible/roles/config_files/tasks/snmp_agent.yml b/docker/ansible/roles/config_files/tasks/snmp_agent.yml
index c72aee48..5e1ba975 100644
--- a/docker/ansible/roles/config_files/tasks/snmp_agent.yml
+++ b/docker/ansible/roles/config_files/tasks/snmp_agent.yml
@@ -21,9 +21,9 @@
tags:
- snmp
-- name: enable snmp agent for master container
+- name: enable snmp agent for non-stable container
template:
- src: master/snmp_agent.conf.j2
+ src: latest/snmp_agent.conf.j2
dest: "{{ config_file_dir }}/snmp_agent.conf"
when: flavor|default('stable')|string != 'stable'
tags:
diff --git a/docker/ansible/roles/config_files/tasks/syslog.yml b/docker/ansible/roles/config_files/tasks/syslog.yml
index 97864a5d..e9ab06f3 100644
--- a/docker/ansible/roles/config_files/tasks/syslog.yml
+++ b/docker/ansible/roles/config_files/tasks/syslog.yml
@@ -1,4 +1,4 @@
-#Copyright 2018 OPNFV and Intel Corporation
+# Copyright 2018-21 Anuket, Intel Corporation and others
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,9 +14,8 @@
---
- name: enable syslog plugin
- template:
- src: syslog.conf.j2
- dest: "{{ config_file_dir }}/syslog.conf"
+ set_fact:
+ collectd_plugins: "{{ collectd_plugins | union(['syslog']) | unique }}"
tags:
- syslog
diff --git a/docker/ansible/roles/config_files/tasks/uuid.yml b/docker/ansible/roles/config_files/tasks/uuid.yml
index 25cfa128..df669e1a 100644
--- a/docker/ansible/roles/config_files/tasks/uuid.yml
+++ b/docker/ansible/roles/config_files/tasks/uuid.yml
@@ -1,4 +1,4 @@
-#Copyright 2018 OPNFV and Intel Corporation
+# Copyright 2018-21 Anuket, Intel Corporation and others
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,8 +14,7 @@
---
- name: enable uuid plugin
- template:
- src: uuid.conf.j2
- dest: "{{ config_file_dir }}/uuid.conf"
+ set_fact:
+ collectd_plugins: "{{ collectd_plugins | union(['uuid']) | unique }}"
tags:
- uuid
diff --git a/docker/ansible/roles/config_files/tasks/virt.yml b/docker/ansible/roles/config_files/tasks/virt.yml
index 9db10e8c..75a17a18 100644
--- a/docker/ansible/roles/config_files/tasks/virt.yml
+++ b/docker/ansible/roles/config_files/tasks/virt.yml
@@ -1,4 +1,4 @@
-#Copyright 2018 OPNFV and Intel Corporation
+# Copyright 2018-21 Anuket, Intel Corporation and others
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -21,16 +21,14 @@
- virt
- name: (virt) check if rdt is enabled
- stat:
- path: "{{ config_file_dir }}/rdt.conf"
- register: rdt_file
+ set_fact:
+ rdt_enabled: true
tags:
- - virt
+ - rdt
- name: enable virt plugin
- template:
- src: virt.conf.j2
- dest: "{{ config_file_dir }}/virt.conf"
- when: libvirt_result|succeeded and rdt_file.stat.exists == False
+ set_fact:
+ collectd_plugins: "{{ collectd_plugins | union(['virt']) | unique }}"
+ when: libvirt_result is succeeded and not (rdt_enabled | default(false))
tags:
- virt
diff --git a/docker/ansible/roles/config_files/tasks/vswitch.yml b/docker/ansible/roles/config_files/tasks/vswitch.yml
deleted file mode 100644
index baffc4bd..00000000
--- a/docker/ansible/roles/config_files/tasks/vswitch.yml
+++ /dev/null
@@ -1,49 +0,0 @@
-#Copyright 2018 OPNFV and Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
----
-
-- name: Check if vswitchd is running
- shell: ps -ef | grep vswitchd | grep -v grep > /dev/null
- register: vswitchd_running
- ignore_errors: True
- tags:
- - ovs_stats
- - ovs_events
-
-- name: Check if db.sock exists
- stat:
- path: /var/run/openvswitch/db.sock
- register: dbsock_exists
- ignore_errors: True
- tags:
- - ovs_stats
- - ovs_events
-
-- name: enable ovs_stats plugin
- template:
- src: "ovs_stats.conf.j2"
- dest: "{{ config_file_dir }}/ovs_stats.conf"
- when: vswitchd_running|succeeded and dbsock_exists|succeeded
- tags:
- - ovs_stats
-
-- name: enable ovs_events plugin
- template:
- src: "ovs_events.conf.j2"
- dest: "{{ config_file_dir }}/ovs_events.conf"
- when: vswitchd_running|succeeded and dbsock_exists|succeeded
- tags:
- - ovs_events
-
-
diff --git a/docker/ansible/roles/config_files/templates/default_read_plugins.conf.j2 b/docker/ansible/roles/config_files/templates/default_read_plugins.conf.j2
deleted file mode 100644
index 48beb0be..00000000
--- a/docker/ansible/roles/config_files/templates/default_read_plugins.conf.j2
+++ /dev/null
@@ -1,34 +0,0 @@
-#Copyright 2018 OPNFV and Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-Hostname "{{ inventory_hostname }}"
-Interval "{{ interval_value }}"
-
-
-#LoadPlugin contextswitch
-#LoadPlugin cpu
-#LoadPlugin cpufreq
-#LoadPlugin df
-#LoadPlugin disk
-#LoadPlugin ethstat
-#LoadPlugin ipc
-#LoadPlugin irq
-#LoadPlugin load
-#LoadPlugin memory
-#LoadPlugin numa
-#LoadPlugin processes
-#LoadPlugin swap
-#LoadPlugin turbostat
-#LoadPlugin uptime
-
diff --git a/docker/ansible/roles/config_files/templates/dpdkevents.conf.j2 b/docker/ansible/roles/config_files/templates/dpdkevents.conf.j2
deleted file mode 100644
index 60bdb3e2..00000000
--- a/docker/ansible/roles/config_files/templates/dpdkevents.conf.j2
+++ /dev/null
@@ -1,36 +0,0 @@
-#Copyright 2018 OPNFV and Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-<LoadPlugin dpdkevents>
- Interval 1
-</LoadPlugin>
-
-<Plugin "dpdkevents">
-# <EAL>
-# Coremask "0x1"
-# MemoryChannels "4"
-# FilePrefix "rte"
-# </EAL>
- <Event "link_status">
- SendEventsOnUpdate false
- EnabledPortMask 0xffff
- SendNotification true
- </Event>
-# <Event "keep_alive">
-# SendEventsOnUpdate false
-# LCoreMask "0xf"
-# KeepAliveShmName "/dpdk_keepalive_shm_name"
-# SendNotification true
-# </Event>
-</Plugin>
diff --git a/docker/ansible/roles/config_files/templates/dpdkstat.conf.j2 b/docker/ansible/roles/config_files/templates/dpdkstat.conf.j2
deleted file mode 100644
index 738fb4d0..00000000
--- a/docker/ansible/roles/config_files/templates/dpdkstat.conf.j2
+++ /dev/null
@@ -1,25 +0,0 @@
-#Copyright 2018 OPNFV and Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-LoadPlugin dpdkstat
-
-<Plugin dpdkstat>
-# Coremask "0xf"
-# ProcessType "secondary"
-# FilePrefix "rte"
- EnabledPortMask 0xffff
-# PortName "interface1"
-# PortName "interface2"
-</Plugin>
-
diff --git a/docker/ansible/roles/config_files/templates/exec.conf.j2 b/docker/ansible/roles/config_files/templates/exec.conf.j2
deleted file mode 100644
index 2467606b..00000000
--- a/docker/ansible/roles/config_files/templates/exec.conf.j2
+++ /dev/null
@@ -1,22 +0,0 @@
-#Copyright 2018 OPNFV and Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-LoadPlugin exec
-
-<Plugin exec>
- # For OVS PMD stats plugin
- Exec "collectd_exec" "/src/barometer/src/collectd/collectd_sample_configs/ovs_pmd_stats.sh"
- NotificationExec "collectd_exec" "/src/barometer/src/collectd/collectd_sample_configs/write_notification.sh"
-</Plugin>
-
diff --git a/docker/ansible/roles/config_files/templates/hugepages.conf.j2 b/docker/ansible/roles/config_files/templates/hugepages.conf.j2
deleted file mode 100644
index b391a398..00000000
--- a/docker/ansible/roles/config_files/templates/hugepages.conf.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-#Copyright 2018 OPNFV and Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-LoadPlugin hugepages
-
-<Plugin hugepages>
- ReportPerNodeHP true
- ReportRootHP true
- ValuesPages true
- ValuesBytes false
- ValuesPercentage false
-</Plugin>
-
diff --git a/docker/ansible/roles/config_files/templates/intel_pmu.conf.j2 b/docker/ansible/roles/config_files/templates/intel_pmu.conf.j2
deleted file mode 100644
index 564898da..00000000
--- a/docker/ansible/roles/config_files/templates/intel_pmu.conf.j2
+++ /dev/null
@@ -1,25 +0,0 @@
-#Copyright 2018-2019 OPNFV and Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-LoadPlugin intel_pmu
-
-<Plugin intel_pmu>
- ReportHardwareCacheEvents true
- ReportKernelPMUEvents true
- ReportSoftwareEvents true
-# EventList "/var/cache/pmu/GenuineIntel-6-2D-core.json"
-# HardwareEvents "L2_RQSTS.CODE_RD_HIT,L2_RQSTS.CODE_RD_MISS" "L2_RQSTS.ALL_CODE_RD"
- Cores "[0-4]"
-</Plugin>
-
diff --git a/docker/ansible/roles/config_files/templates/ipmi.conf.j2 b/docker/ansible/roles/config_files/templates/ipmi.conf.j2
deleted file mode 100644
index 54554d23..00000000
--- a/docker/ansible/roles/config_files/templates/ipmi.conf.j2
+++ /dev/null
@@ -1,45 +0,0 @@
-#Copyright 2018 OPNFV and Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-LoadPlugin ipmi
-
-#<Plugin ipmi>
-# <Instance "local">
-# Sensor "some_sensor"
-# Sensor "another_one"
-# IgnoreSelected false
-# NotifySensorAdd false
-# NotifySensorRemove true
-# NotifySensorNotPresent false
-# NotifyIPMIConnectionState false
-# SELEnabled false
-# SELClearEvent false
-# </Instance>
-# <Instance "remote">
-# Host "server.example.com"
-# Address "1.2.3.4"
-# Username "user"
-# Password "secret"
-# #AuthType "md5"
-# Sensor "some_sensor"
-# Sensor "another_one"
-# IgnoreSelected false
-# NotifySensorAdd false
-# NotifySensorRemove true
-# NotifySensorNotPresent false
-# NotifyIPMIConnectionState false
-# SELEnabled false
-# SELClearEvent false
-# </Instance>
-#</Plugin>
diff --git a/docker/ansible/roles/config_files/templates/kafka.conf.j2 b/docker/ansible/roles/config_files/templates/kafka.conf.j2
deleted file mode 100644
index 3e4bc242..00000000
--- a/docker/ansible/roles/config_files/templates/kafka.conf.j2
+++ /dev/null
@@ -1,22 +0,0 @@
-#Copyright 2018 OPNFV and Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-LoadPlugin write_kafka
-
-<Plugin write_kafka>
- Property "metadata.broker.list" "{{ kafka_ip_addr }}:{{ kafka_port }}"
- <Topic "{{ kafka_topic }}">
- Format JSON
- </Topic>
-</Plugin>
diff --git a/docker/ansible/roles/config_files/templates/master/snmp_agent.conf.j2 b/docker/ansible/roles/config_files/templates/latest/snmp_agent.conf.j2
index 97a4ce40..1ff8b228 100644
--- a/docker/ansible/roles/config_files/templates/master/snmp_agent.conf.j2
+++ b/docker/ansible/roles/config_files/templates/latest/snmp_agent.conf.j2
@@ -1,16 +1,17 @@
-# Copyright 2019 OPNFV
+# Copyright 2017-2019 Intel Corporation and OPNFV. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
LoadPlugin snmp_agent
diff --git a/docker/ansible/roles/config_files/templates/experimental/logparser.conf.j2 b/docker/ansible/roles/config_files/templates/logparser.conf.j2
index 3802768b..1f1a725b 100644
--- a/docker/ansible/roles/config_files/templates/experimental/logparser.conf.j2
+++ b/docker/ansible/roles/config_files/templates/logparser.conf.j2
@@ -1,16 +1,17 @@
-# Copyright 2019 OPNFV
+# Copyright 2019 Intel Corporation and OPNFV. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
LoadPlugin logparser
diff --git a/docker/ansible/roles/config_files/templates/ovs_events.conf.j2 b/docker/ansible/roles/config_files/templates/ovs_events.conf.j2
deleted file mode 100644
index 4fbea5ad..00000000
--- a/docker/ansible/roles/config_files/templates/ovs_events.conf.j2
+++ /dev/null
@@ -1,26 +0,0 @@
-#Copyright 2018 OPNFV and Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-<LoadPlugin ovs_events>
- Interval "{{ ovs_events_interval }}"
-</LoadPlugin>
-
-<Plugin ovs_events>
-# Port "{{ ovs_event_port }}"
-# Address "{{ ovs_event_ip_addr }}"
-# Socket "/var/run/openvswitch/db.sock"
-# Interfaces "br0" "veth0"
- SendNotification true
- DispatchValues true
-</Plugin>
diff --git a/docker/ansible/roles/config_files/templates/ovs_stats.conf.j2 b/docker/ansible/roles/config_files/templates/ovs_stats.conf.j2
deleted file mode 100644
index fc26e4b8..00000000
--- a/docker/ansible/roles/config_files/templates/ovs_stats.conf.j2
+++ /dev/null
@@ -1,25 +0,0 @@
-#Copyright 2018 OPNFV and Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-<LoadPlugin ovs_stats>
- Interval "{{ ovs_stats_interval }}"
-</LoadPlugin>
-
-<Plugin ovs_stats>
-# Port "{{ ovs_stats_port }}"
-# Address "{{ ovs_stats_ip_addr }}"
-# Socket "/var/run/openvswitch/db.sock"
-# Bridges "br0" "br_ext"
-</Plugin>
-
diff --git a/docker/ansible/roles/config_files/templates/prometheus.conf.j2 b/docker/ansible/roles/config_files/templates/prometheus.conf.j2
deleted file mode 100644
index bb947867..00000000
--- a/docker/ansible/roles/config_files/templates/prometheus.conf.j2
+++ /dev/null
@@ -1,19 +0,0 @@
-#Copyright 2018 OPNFV and Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-LoadPlugin write_prometheus
-
-<Plugin "write_prometheus">
- Port "{{ prometheus_port }}"
-</Plugin>
diff --git a/docker/ansible/roles/config_files/templates/snmp_agent.conf.j2 b/docker/ansible/roles/config_files/templates/snmp_agent.conf.j2
index 406033ed..48bb709f 100644
--- a/docker/ansible/roles/config_files/templates/snmp_agent.conf.j2
+++ b/docker/ansible/roles/config_files/templates/snmp_agent.conf.j2
@@ -13,12 +13,15 @@
# limitations under the License.
LoadPlugin snmp_agent
+
<Plugin snmp_agent>
# Intel PMU MIB
<Table "pmuTable">
IndexOID "INTEL-PMU-MIB::pmuGroupIndex"
<Data "pmuGroupDescr">
- Instance true
+ <IndexKey>
+ Source "PluginInstance"
+ </IndexKey>
Plugin "intel_pmu"
OIDs "INTEL-PMU-MIB::pmuGroupDescr"
</Data>
@@ -280,7 +283,9 @@ LoadPlugin snmp_agent
IndexOID "INTEL-RDT-MIB::rdtGroupIndex"
SizeOID "INTEL-RDT-MIB::rdtGroupNumber"
<Data "rdtGroupDescr">
- Instance true
+ <IndexKey>
+ Source "PluginInstance"
+ </IndexKey>
Plugin "intel_rdt"
OIDs "INTEL-RDT-MIB::rdtGroupDescr"
</Data>
@@ -312,7 +317,9 @@ LoadPlugin snmp_agent
<Table "mcelogTable">
IndexOID "INTEL-MCELOG-MIB::memoryGroupIndex"
<Data "memoryGroupDescr">
- Instance true
+ <IndexKey>
+ Source "PluginInstance"
+ </IndexKey>
Plugin "mcelog"
OIDs "INTEL-MCELOG-MIB::memoryGroupDescr"
</Data>
@@ -344,7 +351,9 @@ LoadPlugin snmp_agent
# Hugepages
<Table "hugepagesTable">
<Data "hugepagesNode">
- Instance true
+ <IndexKey>
+ Source "PluginInstance"
+ </IndexKey>
Plugin "hugepages"
OIDs "INTEL-HUGEPAGES-MIB::hugepagesNode"
</Data>
diff --git a/docker/ansible/roles/config_files/templates/syslog.conf.j2 b/docker/ansible/roles/config_files/templates/syslog.conf.j2
deleted file mode 100644
index f9d77814..00000000
--- a/docker/ansible/roles/config_files/templates/syslog.conf.j2
+++ /dev/null
@@ -1,21 +0,0 @@
-#Copyright 2018 OPNFV and Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-LoadPlugin syslog
-
-# Decrease syslog verbosity, to avoid duplicate logging
-<Plugin syslog>
- LogLevel "{{ syslog_log_level }}"
-</Plugin>
-
diff --git a/docker/ansible/roles/config_files/templates/virt.conf.j2 b/docker/ansible/roles/config_files/templates/virt.conf.j2
deleted file mode 100644
index f1ba324d..00000000
--- a/docker/ansible/roles/config_files/templates/virt.conf.j2
+++ /dev/null
@@ -1,32 +0,0 @@
-#Copyright 2018 OPNFV and Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-LoadPlugin virt
-
-<Plugin virt>
-# Connection "xen:///"
- RefreshInterval 60
-# Domain "name"
-# BlockDevice "name:device"
-# BlockDeviceFormat target
-# BlockDeviceFormatBasename false
-# InterfaceDevice "name:device"
-# IgnoreSelected false
-# HostnameFormat name
-# InterfaceFormat name
-# PluginInstanceFormat name
-# Instances 1
- ExtraStats "cpu_util disk disk_err domain_state fs_info job_stats_background pcpu perf vcpupin"
-</Plugin>
-
diff --git a/docker/ansible/roles/config_files/vars/main.yml b/docker/ansible/roles/config_files/vars/main.yml
index f12492a3..02fd7fb9 100644
--- a/docker/ansible/roles/config_files/vars/main.yml
+++ b/docker/ansible/roles/config_files/vars/main.yml
@@ -1,4 +1,4 @@
-#Copyright 2018 OPNFV and Intel Corporation
+# Copyright 2018-21 Anuket, Intel Corporation and others
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,51 +12,56 @@
# See the License for the specific language governing permissions and
# limitations under the License.
---
+collectd_plugins: []
+collectd_interval: "{{ interval_value }}"
+collectd_hostname: "{{ inventory_hostname }}"
-#directory paths
-config_file_dir: "/opt/collectd/etc/collectd.conf.d/"
-csv_log_dir: "/var/lib/collectd/csv"
-logfile_dir: "/var/log/"
+collectd_plugin_capabilities_port: "9564"
-#global default interval
-interval_value: 10
+collectd_plugin_cpu_valuespercentage: False
+collectd_plugin_cpu_reportbystate: True
-#influx network port
-influx_network_port: 25826
+collectd_plugin_csv_datadir: "{{ csv_log_dir }}"
+collectd_plugin_csv_storerates: False
-#network plugin vars
-#network_port: 25826
-#network_ip_addr: localhost
+collectd_plugin_dpdk_telemetry_client_socket_path: "/var/run/.client"
+collectd_plugin_dpdk_telemetry_dpdk_socket_path: "/var/run/dpdk/rte/telemetry"
-#prometheus plugin vars
-prometheus_port: 9103
+collectd_plugin_exec_exec:
+ - user: "collectd_exec"
+ exec: "/src/barometer/src/collectd/collectd_sample_configs/ovs_pmd_stats.sh"
+collectd_plugin_exec_notification:
+ - user: "collectd_exec"
+ notification_exec: "/src/barometer/src/collectd/collectd_sample_configs/write_notification.sh"
-#ovs event and stats plugin vars
-ovs_event_interval: 1
-ovs_event_ip_addr: localhost
-ovs_event_port: 6640
-ovs_stats_interval: 1
-ovs_stats_ip_addr: localhost
-ovs_stats_port: 6640
+collectd_plugin_hugepages_report_per_node_hp: True
+collectd_plugin_hugepages_report_root_hp: True
+collectd_plugin_hugepages_values_pages: True
+collectd_plugin_hugepages_values_bytes: False
+collectd_plugin_hugepages_values_percentage: False
-#rdt plugin vars
-rdt_interval: 1
+collectd_plugin_intel_pmu_report_hardware_cache_events: True
+collectd_plugin_intel_pmu_report_kernel_pmu_events: True
+collectd_plugin_intel_pmu_report_software_events: True
+collectd_plugin_intel_pmu_cores: ["0-4"]
-#kafka plugin vars
-kafka_ip_addr: localhost
-kafka_port: 9092
-kafka_topic: collectd
+collectd_plugin_intel_rdt_cores: [""]
+collectd_plugin_intel_rdt_interval: "{{ rdt_interval }}"
-#logfile plugin vars
-logfile_log_level: info
+collectd_plugin_logfile_loglevel: "{{ logfile_log_level }}"
+collectd_plugin_logfile_file: "{{ logfile_dir }}/collectd.log"
+collectd_plugin_logfile_timestamp: True
-#syslog plugin vars
-syslog_log_level: info
+collectd_plugin_ovs_events_dispatch_values: True
+collectd_plugin_ovs_events_send_notification: True
-#rrd plugin vars
-rrdtool_db_dir: "/var/lib/collectd/rrd"
-cache_flush: 120
-write_per_sec: 50
+collectd_plugin_virt_extra_stats: "cpu_util disk disk_err domain_state fs_info job_stats_background pcpu perf vcpupin"
+collectd_plugin_virt_refresh_interval: 60
-#additional configuration files path
-additional_configs_path: ""
+collectd_plugin_write_kafka_hosts:
+ - "{{ kafka_ip_addr }}:{{ kafka_port }}"
+collectd_plugin_write_kafka_topics: '{{ { kafka_topic: { "format": "JSON" }} }}'
+
+collectd_plugin_write_prometheus_port: "{{ prometheus_port }}"
+
+collectd_plugin_syslog_loglevel: "{{ syslog_log_level }}"
diff --git a/docker/ansible/roles/install_docker/tasks/fedora.yml b/docker/ansible/roles/install_docker/tasks/fedora.yml
new file mode 100644
index 00000000..f2a4f403
--- /dev/null
+++ b/docker/ansible/roles/install_docker/tasks/fedora.yml
@@ -0,0 +1,47 @@
+#Copyright 2019 Red Hat
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+- name: Set Python interpreter
+ set_fact: ansible_python_interpreter=/usr/bin/python3
+
+- name: install dependencies for docker
+ package:
+ name: "{{ packages }}"
+ state: present
+ vars:
+ packages:
+ - python3-dnf
+ - python3
+ - python3-libselinux
+
+- name: set up docker repository
+ command: "dnf config-manager --add-repo https://download.docker.com/linux/fedora/docker-ce.repo && dnf makecache"
+
+- name: install docker-py
+ package:
+ name: python3-docker
+ state: present
+
+- name: install docker
+ package:
+ name: docker-ce
+ state: present
+
+- name: start docker daemon
+ systemd:
+ name: docker
+ daemon_reload: true
+ state: started
+ enabled: true
diff --git a/docker/ansible/roles/install_docker/tasks/main.yml b/docker/ansible/roles/install_docker/tasks/main.yml
index 7e015bbc..ba356563 100644
--- a/docker/ansible/roles/install_docker/tasks/main.yml
+++ b/docker/ansible/roles/install_docker/tasks/main.yml
@@ -24,6 +24,12 @@
import_tasks: ubuntu.yml
when: ansible_distribution == "Ubuntu"
+- name: install docker for fedora
+ tags:
+ - install_docker
+ import_tasks: fedora.yml
+ when: ansible_distribution == "Fedora"
+
- name: install docker for centos
tags:
- install_docker
diff --git a/docker/ansible/roles/install_docker/tasks/ubuntu.yml b/docker/ansible/roles/install_docker/tasks/ubuntu.yml
index 44156873..f3aa81b1 100644
--- a/docker/ansible/roles/install_docker/tasks/ubuntu.yml
+++ b/docker/ansible/roles/install_docker/tasks/ubuntu.yml
@@ -13,20 +13,23 @@
# limitations under the License.
---
+- name: set Python interpreter
+ set_fact: ansible_python_interpreter=/usr/bin/python3
+
- name: update package manager cache
tags:
- cache_update
package:
update_cache: yes
-- name: install python
+- name: install python3
package:
- name: python
+ name: python3
state: present
-- name: install python-pip
+- name: install python3-pip
package:
- name: python-pip
+ name: python3-pip
state: present
- name: install docker-py
diff --git a/docker/ansible/roles/run_collectd/tasks/main.yml b/docker/ansible/roles/run_collectd/tasks/main.yml
index 744c7a77..bf5aabf5 100644
--- a/docker/ansible/roles/run_collectd/tasks/main.yml
+++ b/docker/ansible/roles/run_collectd/tasks/main.yml
@@ -1,4 +1,4 @@
-#Copyright 2018-2019 OPNFV and Intel Corporation
+# Copyright 2018-21 Anuket, Intel Corporation and others
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -15,7 +15,7 @@
- name: remove bar-collectd container
docker_container:
- name: bar-collectd
+ name: "{{ collectd_container_name }}"
state: absent
tags:
- rm_containers
@@ -31,16 +31,31 @@
- rm_collectd_image
when: rm_images|default(false)|bool == true
-- name: launch collectd container
- docker_container:
- name: bar-collectd
- image: "{{ collectd_image_name }}"
- volumes:
- - /opt/collectd/etc/collectd.conf.d/:/opt/collectd/etc/collectd.conf.d
+- name: check if /sys/fs/resctrl exist
+ stat:
+ path: /sys/fs/resctrl
+ register: resctrl_dir
+
+- name: set list of volumes to bind
+ set_fact:
+ volumes_list:
+
+ - /opt/collectd/etc/:/opt/collectd/etc/
- /var/run:/var/run
- /tmp:/tmp
- /var/lib/collectd:/var/lib/collectd
- command: "/run_collectd.sh"
+
+- name: add resctrl to container volumes
+ set_fact:
+ volumes_list: "{{ volumes_list + [ '/sys/fs/resctrl:/sys/fs/resctrl' ] }}"
+ when: resctrl_dir.stat.exists
+
+- name: launch collectd container
+ docker_container:
+ name: "{{ collectd_container_name }}"
+ image: "{{ collectd_image_name }}"
+ volumes: "{{ volumes_list }}"
+ entrypoint: "/run_collectd.sh"
detach: yes
state: started
restart: yes
diff --git a/docker/ansible/roles/run_collectd/vars/main.yml b/docker/ansible/roles/run_collectd/vars/main.yml
index cebd0f47..26007ecf 100644
--- a/docker/ansible/roles/run_collectd/vars/main.yml
+++ b/docker/ansible/roles/run_collectd/vars/main.yml
@@ -1,4 +1,4 @@
-#Copyright 2019 OPNFV and Intel Corporation
+# Copyright 2019-21 Anuket, Intel Corporation and others
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,9 +13,11 @@
# limitations under the License.
---
+collectd_container_name: "bar-collectd"
default_flavor: "{{ flavor|default('stable')|string }}"
flavor_image_name: "{{
- 'barometer-collectd-master' if (default_flavor == 'master') else
- 'barometer-collectd-experimental' if (default_flavor == 'experimental')
+ 'barometer-collectd-latest' if (default_flavor == 'master' or default_flavor == 'latest') else
+ 'barometer-collectd-experimental' if (default_flavor == 'experimental') else
+ 'barometer-collectd-6' if (default_flavor == 'collectd-6')
else 'barometer-collectd' }}"
-collectd_image_name: "{{ 'opnfv/' + flavor_image_name }}"
+collectd_image_name: "{{ 'anuket/' + flavor_image_name }}"
diff --git a/docker/ansible/roles/run_grafana/tasks/main.yml b/docker/ansible/roles/run_grafana/tasks/main.yml
index 0ec307fa..d4a1638c 100644
--- a/docker/ansible/roles/run_grafana/tasks/main.yml
+++ b/docker/ansible/roles/run_grafana/tasks/main.yml
@@ -1,4 +1,4 @@
-#Copyright 2018 OPNFV and Intel Corporation
+# Copyright 2018-21 Anuket, Intel Corporation and others
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -25,7 +25,7 @@
- name: Remove barometer-grafana image
docker_image:
state: absent
- name: opnfv/barometer-grafana
+ name: anuket/barometer-grafana
tags:
- rm_images
- rm_grafana_image
@@ -40,7 +40,7 @@
- name: launch barometer-grafana container
docker_container:
name: bar-grafana
- image: opnfv/barometer-grafana
+ image: anuket/barometer-grafana
volumes:
- /var/lib/grafana:/var/lib/grafana
ports:
diff --git a/docker/ansible/roles/run_influxdb/tasks/main.yml b/docker/ansible/roles/run_influxdb/tasks/main.yml
index 53187fae..02eeb788 100644
--- a/docker/ansible/roles/run_influxdb/tasks/main.yml
+++ b/docker/ansible/roles/run_influxdb/tasks/main.yml
@@ -1,4 +1,4 @@
-#Copyright 2018 OPNFV and Intel Corporation
+# Copyright 2018-21 Anuket, Intel Corporation and others
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -22,9 +22,9 @@
- rm_influxdb_cont
when: rm_containers|default(false)|bool == true
-- name: Remove opnfv/barometer-influxdb image
+- name: Remove barometer-influxdb image
docker_image:
- name: opnfv/barometer-influxdb
+ name: anuket/barometer-influxdb
state: absent
tags:
- rm_images
@@ -34,7 +34,7 @@
- name: launch barometer-influxdb container
docker_container:
name: bar-influxdb
- image: opnfv/barometer-influxdb
+ image: anuket/barometer-influxdb
volumes:
- /var/lib/influxdb:/var/lib/influxdb
exposed:
diff --git a/docker/ansible/roles/run_kafka/tasks/main.yml b/docker/ansible/roles/run_kafka/tasks/main.yml
index 34d1f910..f30acd89 100644
--- a/docker/ansible/roles/run_kafka/tasks/main.yml
+++ b/docker/ansible/roles/run_kafka/tasks/main.yml
@@ -1,4 +1,4 @@
-#Copyright 2018 OPNFV and Intel Corporation
+# Copyright 2018-21 Anuket, Intel Corporation and others
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -22,19 +22,19 @@
- remove_bar-kafka
when: rm_containers|default(false)|bool == true
-- name: Remove opnfv/barometer-kafka image
+- name: Remove anuket/barometer-kafka image
docker_image:
- name: opnfv/barometer-kafka
+ name: anuket/barometer-kafka
state: absent
tags:
- remove_images
- remove_kafka_image
when: rm_images|default(false)|bool == true
-- name: launch opnfv/barometer-kafka container
+- name: launch anuket/barometer-kafka container
docker_container:
name: bar-kafka
- image: opnfv/barometer-kafka
+ image: anuket/barometer-kafka
env:
zookeeper_node: "{{ zookeeper_hostname }}"
broker_id: "{{ broker_id }}"
diff --git a/docker/ansible/roles/run_kafka/vars/main.yml b/docker/ansible/roles/run_kafka/vars/main.yml
index 1fc5b528..b3091d40 100644
--- a/docker/ansible/roles/run_kafka/vars/main.yml
+++ b/docker/ansible/roles/run_kafka/vars/main.yml
@@ -1,16 +1,19 @@
-#Copyright 2018 OPNFV and Intel Corporation
+# Copyright 2016-2019 Intel Corporation and OPNFV. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+#
---
zookeeper_hostname: "{{ (groups['zookeeper_hosts']|default({}))[0] | default('localhost') }}"
diff --git a/docker/ansible/roles/run_prometheus/templates/prometheus.yml b/docker/ansible/roles/run_prometheus/templates/prometheus.yml
index 7c6afa8a..86ee6e0a 100644
--- a/docker/ansible/roles/run_prometheus/templates/prometheus.yml
+++ b/docker/ansible/roles/run_prometheus/templates/prometheus.yml
@@ -1,3 +1,18 @@
+# Copyright 2018-2019 Intel Corporation and OPNFV. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
global:
scrape_timeout: "{{scrape_timeout}}"
scrape_interval: "{{scrape_interval}}"
diff --git a/docker/ansible/roles/run_ves/tasks/main.yml b/docker/ansible/roles/run_ves/tasks/main.yml
index aa9c29d3..a203fa98 100644
--- a/docker/ansible/roles/run_ves/tasks/main.yml
+++ b/docker/ansible/roles/run_ves/tasks/main.yml
@@ -1,4 +1,4 @@
-#Copyright 2018 OPNFV and Intel Corporation
+# Copyright 2018-21 Anuket, Intel Corporation and others
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -22,19 +22,19 @@
- remove_bar-ves
when: rm_containers|default(false)|bool == true
-- name: Remove opnfv/barometer-ves image
+- name: Remove anuket/barometer-ves image
docker_image:
state: absent
- name: opnfv/barometer-ves
+ name: anuket/barometer-ves
tags:
- remove_images
- remove_ves_image
when: rm_images|default(false)|bool == true
-- name: launch opnfv/barometer-ves container
+- name: launch anuket/barometer-ves container
docker_container:
name: bar-ves
- image: opnfv/barometer-ves
+ image: anuket/barometer-ves
detach: yes
state: started
restart: yes
diff --git a/docker/barometer-collectd-experimental/Dockerfile b/docker/barometer-collectd-experimental/Dockerfile
index e505d2e2..f051ef35 100644
--- a/docker/barometer-collectd-experimental/Dockerfile
+++ b/docker/barometer-collectd-experimental/Dockerfile
@@ -1,37 +1,54 @@
-FROM centos:7
-RUN yum update -y && \
- yum install -y which sudo git && \
- yum clean all && \
- git config --global http.sslVerify false
+# Copyright 2017-2021 Intel Corporation, Anuket and others.
+# All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+FROM quay.io/centos/centos:stream8 as builder
+
+ARG COLLECTD_FLAVOR=experimental
+ARG COLLECTD_PULL_REQUESTS
+ARG COLLECTD_CONFIG_CMD_ARGS
+ARG COLLECTD_TAG
+ARG WITH_DPDK=n
ENV DOCKER y
-ENV COLLECTD_FLAVOR experimental
-ENV WITH_DPDK y
ENV repos_dir /src
-ENV openstack_plugins /src/barometer/src/collectd-openstack-plugins
-WORKDIR ${repos_dir}
RUN mkdir -p ${repos_dir}/barometer
COPY . ${repos_dir}/barometer
-COPY docker/barometer-collectd-experimental/collectd_apply_pull_request.sh \
- ${repos_dir}/barometer/src/collectd/
-
-# copying additional experimental configs on top of configs for 'master'
-# branch release
-COPY docker/barometer-collectd-experimental/experimental-configs/* \
- ${repos_dir}/barometer/src/collectd/collectd_sample_configs-master/
WORKDIR ${repos_dir}/barometer/systems
-RUN sh ./build_base_machine.sh && \
- useradd -ms /bin/bash collectd_exec && \
- echo "collectd_exec ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
+RUN ./build_base_machine.sh && \
+ dnf clean all && rm -rf /var/cache/dnf
-WORKDIR ${openstack_plugins}
-RUN make && \
- pip install --upgrade pip && \
- pip install -r ${openstack_plugins}/collectd-openstack-plugins/requirements.txt
+FROM quay.io/centos/centos:stream8
COPY docker/barometer-collectd-experimental/run_collectd.sh /run_collectd.sh
RUN chmod +x /run_collectd.sh
+RUN useradd -ms /bin/bash collectd_exec && \
+ echo "collectd_exec ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
+
+COPY --from=builder /opt/collectd /opt/collectd
+COPY --from=builder /usr/local/src /usr/local/src
+COPY --from=builder /usr/share/snmp/mibs /usr/share/snmp/mibs
+COPY --from=builder /opt/collectd/share/collectd/types.db /usr/share/collectd/types.db
+
+RUN dnf install -y 'dnf-command(builddep)' centos-release-opstools && \
+ dnf config-manager --set-enabled powertools && \
+ dnf builddep -y https://raw.githubusercontent.com/centos-opstools/collectd/master/collectd.spec && \
+ dnf install -y jansson && \
+ dnf clean all && rm -rf /var/cache/dnf
+
ENTRYPOINT ["/run_collectd.sh"]
diff --git a/docker/barometer-collectd-experimental/collectd_apply_pull_request.sh b/docker/barometer-collectd-experimental/collectd_apply_pull_request.sh
deleted file mode 100755
index dcea33a3..00000000
--- a/docker/barometer-collectd-experimental/collectd_apply_pull_request.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#! /bin/bash
-# Copyright 2019 OPNFV
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This files contains list of pull requests to be applied on top
-# of master branch before building collectd included in docker
-# collectd-experimental container
-
-# Space/newline separated list of pull requests IDs
-# for example:
-# PULL_REQUESTS=(3027 #reimplement delay rate
-# 3028 #other PR
-# )
-
-PULL_REQUESTS=(
- 3045 #logparser
- #insert another PR ID here
- )
-
-# during rebasing/merging git requires email & name to be set
-git config user.email "barometer-experimental@container"
-git config user.name "BarometerExperimental"
-
-for PR_ID in "${PULL_REQUESTS[@]}"
-do
- echo "Applying pull request $PR_ID"
- git pull --rebase origin pull/$PR_ID/head
-done
diff --git a/docker/barometer-collectd-experimental/run_collectd.sh b/docker/barometer-collectd-experimental/run_collectd.sh
index b45711e1..5920248a 100644
--- a/docker/barometer-collectd-experimental/run_collectd.sh
+++ b/docker/barometer-collectd-experimental/run_collectd.sh
@@ -1,15 +1,16 @@
#!/bin/bash
-# Copyright 2019 OPNFV
+# Copyright 2016-2019 Intel Corporation and OPNFV. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
/opt/collectd/sbin/collectd -f
diff --git a/docker/barometer-collectd-latest/Dockerfile b/docker/barometer-collectd-latest/Dockerfile
new file mode 100644
index 00000000..4b6dfefb
--- /dev/null
+++ b/docker/barometer-collectd-latest/Dockerfile
@@ -0,0 +1,36 @@
+# Copyright 2017-2021 Intel Corporation, Anuket and others.
+# All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+FROM quay.io/centos/centos:stream8
+
+ARG COLLECTD_FLAVOR=latest
+ARG WITH_DPDK=y
+ENV DOCKER y
+ENV repos_dir /src
+
+RUN mkdir -p ${repos_dir}/barometer
+COPY . ${repos_dir}/barometer
+
+WORKDIR ${repos_dir}/barometer/systems
+RUN sh ./build_base_machine.sh && \
+ dnf clean all && rm -rf /var/cache/dnf && \
+ useradd -ms /bin/bash collectd_exec && \
+ echo "collectd_exec ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
+
+COPY docker/barometer-collectd-latest/run_collectd.sh /run_collectd.sh
+RUN chmod +x /run_collectd.sh
+
+ENTRYPOINT ["/run_collectd.sh"]
diff --git a/docker/barometer-collectd-master/run_collectd.sh b/docker/barometer-collectd-latest/run_collectd.sh
index 001cce1a..5920248a 100644
--- a/docker/barometer-collectd-master/run_collectd.sh
+++ b/docker/barometer-collectd-latest/run_collectd.sh
@@ -1,15 +1,16 @@
#!/bin/bash
-# Copyright 2017 OPNFV
+# Copyright 2016-2019 Intel Corporation and OPNFV. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
/opt/collectd/sbin/collectd -f
diff --git a/docker/barometer-collectd-master/Dockerfile b/docker/barometer-collectd-master/Dockerfile
deleted file mode 100644
index da94888d..00000000
--- a/docker/barometer-collectd-master/Dockerfile
+++ /dev/null
@@ -1,30 +0,0 @@
-FROM centos:7
-RUN yum update -y && \
- yum install -y which sudo git && \
- yum clean all && \
- git config --global http.sslVerify false
-
-ENV DOCKER y
-ENV COLLECTD_FLAVOR master
-ENV WITH_DPDK y
-ENV repos_dir /src
-ENV openstack_plugins /src/barometer/src/collectd-openstack-plugins
-
-WORKDIR ${repos_dir}
-RUN mkdir -p ${repos_dir}/barometer
-COPY . ${repos_dir}/barometer
-
-WORKDIR ${repos_dir}/barometer/systems
-RUN sh ./build_base_machine.sh && \
- useradd -ms /bin/bash collectd_exec && \
- echo "collectd_exec ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
-
-WORKDIR ${openstack_plugins}
-RUN make && \
- pip install --upgrade pip && \
- pip install -r ${openstack_plugins}/collectd-openstack-plugins/requirements.txt
-
-COPY docker/barometer-collectd-master/run_collectd.sh /run_collectd.sh
-RUN chmod +x /run_collectd.sh
-
-ENTRYPOINT ["/run_collectd.sh"]
diff --git a/docker/barometer-collectd/Dockerfile b/docker/barometer-collectd/Dockerfile
index 2f2f57e5..db155572 100644
--- a/docker/barometer-collectd/Dockerfile
+++ b/docker/barometer-collectd/Dockerfile
@@ -1,27 +1,40 @@
-FROM centos:7
-RUN yum update -y && \
- yum install -y which sudo git && \
- yum clean all && \
+# Copyright 2017-2021 Intel Corporation, Anuket and others.
+# All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+FROM quay.io/centos/centos:stream8
+RUN dnf update -y && \
+ dnf install -y which sudo git-core && \
+ dnf clean all && \
git config --global http.sslVerify false
ENV DOCKER y
ENV WITH_DPDK y
ENV COLLECTD_FLAVOR stable
ENV repos_dir /src
-ENV openstack_plugins /src/barometer/src/collectd-openstack-plugins
WORKDIR ${repos_dir}
RUN git clone https://gerrit.opnfv.org/gerrit/barometer
+
WORKDIR ${repos_dir}/barometer/systems
RUN sh ./build_base_machine.sh && \
+ dnf clean all && rm -rf /var/cache/dnf && \
+ cd ${repos_dir}/ && rm -rf barometer && \
useradd -ms /bin/bash collectd_exec && \
echo "collectd_exec ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
-WORKDIR ${openstack_plugins}
-RUN make && \
- pip install --upgrade pip && \
- pip install -r ${openstack_plugins}/collectd-openstack-plugins/requirements.txt
-
COPY run_collectd.sh /run_collectd.sh
RUN chmod +x /run_collectd.sh
diff --git a/docker/barometer-collectd/run_collectd.sh b/docker/barometer-collectd/run_collectd.sh
index 001cce1a..5920248a 100644
--- a/docker/barometer-collectd/run_collectd.sh
+++ b/docker/barometer-collectd/run_collectd.sh
@@ -1,15 +1,16 @@
#!/bin/bash
-# Copyright 2017 OPNFV
+# Copyright 2016-2019 Intel Corporation and OPNFV. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
/opt/collectd/sbin/collectd -f
diff --git a/docker/barometer-grafana/Dockerfile b/docker/barometer-grafana/Dockerfile
index 610e8086..714f95ae 100644
--- a/docker/barometer-grafana/Dockerfile
+++ b/docker/barometer-grafana/Dockerfile
@@ -1,3 +1,18 @@
+# Copyright 2017-2019 Intel Corporation and OPNFV. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
FROM grafana/grafana:4.6.3
ENV grafana_folder /opt/grafana
diff --git a/docker/barometer-grafana/dashboards/configure_grafana.sh b/docker/barometer-grafana/dashboards/configure_grafana.sh
index 8ce9689b..9089f832 100755
--- a/docker/barometer-grafana/dashboards/configure_grafana.sh
+++ b/docker/barometer-grafana/dashboards/configure_grafana.sh
@@ -1,16 +1,18 @@
-# Copyright 2017 OPNFV
+# Copyright 2017-2019 Intel Corporation and OPNFV. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
+
curl -u admin:admin -X POST -H 'content-type: application/json'\
http://127.0.0.1:3000/api/datasources -d \
'{"name":"collectd","type":"influxdb","url":"http://localhost:8086","access":"proxy","isDefault":true,"database":"collectd","user":"admin","password":"admin","basicAuth":false}'
diff --git a/docker/barometer-grafana/dashboards/cpu_usage_dashboard.json b/docker/barometer-grafana/dashboards/cpu_usage_dashboard.json
index 0b8ab73e..bab2742d 100644
--- a/docker/barometer-grafana/dashboards/cpu_usage_dashboard.json
+++ b/docker/barometer-grafana/dashboards/cpu_usage_dashboard.json
@@ -582,7 +582,7 @@
"measurement": "cpu_value",
"orderByTime": "ASC",
"policy": "default",
- "query": "select derivative(mean(value),1s) AS idle from \"cpu_value\" WHERE \"type\" = 'cpu' AND \"host\" =~ /^$host$/ AND $timeFilter GROUP BY time($interval), instance, type_instance fill(null)",
+ "query": "select derivative(mean(value),1s) AS idle from \"cpu_value\" WHERE \"host\" =~ /^$host$/ AND $timeFilter GROUP BY time($interval), instance, type_instance fill(null)",
"rawQuery": true,
"refId": "A",
"resultFormat": "time_series",
@@ -710,7 +710,7 @@
"measurement": "cpu_value",
"orderByTime": "ASC",
"policy": "default",
- "query": "SELECT mean(\"value\") FROM \"cpu_value\" WHERE (\"host\" =~ /^$host$/ AND \"type\" = 'cpu' AND \"type_instance\" != 'idle') AND $timeFilter GROUP BY time($__interval), \"instance\", \"type_instance\" fill(none)",
+ "query": "SELECT mean(\"value\") FROM \"cpu_value\" WHERE (\"host\" =~ /^$host$/ AND \"type_instance\" != 'idle') AND $timeFilter GROUP BY time($__interval), \"instance\", \"type_instance\" fill(none)",
"rawQuery": true,
"refId": "A",
"resultFormat": "time_series",
diff --git a/docker/barometer-influxdb/Dockerfile b/docker/barometer-influxdb/Dockerfile
index 2989f631..5a640c2b 100644
--- a/docker/barometer-influxdb/Dockerfile
+++ b/docker/barometer-influxdb/Dockerfile
@@ -1,4 +1,19 @@
+# Copyright 2017-2019 Intel Corporation and OPNFV. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
FROM influxdb:1.3.7
-RUN wget https://raw.githubusercontent.com/collectd/collectd/collectd-5.8/src/types.db -O /types.db
+RUN wget https://raw.githubusercontent.com/collectd/collectd/collectd-5.11/src/types.db -O /types.db
COPY ./influxdb.conf /etc/influxdb/influxdb.conf
diff --git a/docker/barometer-kafka/Dockerfile b/docker/barometer-kafka/Dockerfile
index 3e2402e4..828d5ae4 100644
--- a/docker/barometer-kafka/Dockerfile
+++ b/docker/barometer-kafka/Dockerfile
@@ -1,3 +1,18 @@
+# Copyright 2017-2019 Intel Corporation and OPNFV. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
FROM centos:7
RUN yum update -y && yum install -y epel-release \
wget \
diff --git a/docker/barometer-kafka/start_kafka.sh b/docker/barometer-kafka/start_kafka.sh
index abde2d13..1edbccea 100755
--- a/docker/barometer-kafka/start_kafka.sh
+++ b/docker/barometer-kafka/start_kafka.sh
@@ -1,17 +1,18 @@
#!/bin/bash
-# Copyright 2017 OPNFV
+# Copyright 2016-2019 Intel Corporation and OPNFV. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
if [ -n "$broker_id" ]
then
diff --git a/docker/barometer-snmp/Dockerfile b/docker/barometer-snmp/Dockerfile
index b6cf94bd..dfc02d25 100644
--- a/docker/barometer-snmp/Dockerfile
+++ b/docker/barometer-snmp/Dockerfile
@@ -1,3 +1,18 @@
+# Copyright 2018-2019 Intel Corporation and OPNFV. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
FROM polinux/snmpd
COPY ./snmpd.conf /etc/snmpd/snmpd.conf
diff --git a/docker/barometer-snmp/snmpd.conf b/docker/barometer-snmp/snmpd.conf
index 61ec1a38..54f65fc5 100644
--- a/docker/barometer-snmp/snmpd.conf
+++ b/docker/barometer-snmp/snmpd.conf
@@ -1,16 +1,18 @@
-# Copyright 2017 OPNFV
+# Copyright 2016-2019 Intel Corporation and OPNFV. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
+
view systemview included .1
access notConfigGroup "" any noauth exact systemview none none
master agentx
diff --git a/docker/barometer-tests/Dockerfile b/docker/barometer-tests/Dockerfile
new file mode 100644
index 00000000..6b5d3f15
--- /dev/null
+++ b/docker/barometer-tests/Dockerfile
@@ -0,0 +1,18 @@
+FROM opnfv/functest-core:hunter
+
+ARG BRANCH=master
+ARG OPNFV_BRANCH=stable/hunter
+ARG OPENSTACK_TAG=stable/rocky
+
+RUN apk --no-cache add --virtual .build-deps --update \
+ python-dev build-base linux-headers libffi-dev \
+ openssl-dev libjpeg-turbo-dev && \
+ wget -q -O- https://opendev.org/openstack/requirements/raw/branch/$OPENSTACK_TAG/upper-constraints.txt > upper-constraints.txt && \
+ wget -q -O- https://git.opnfv.org/functest/plain/upper-constraints.txt?h=$OPNFV_BRANCH > upper-constraints.opnfv.txt && \
+ sed -i -E /#egg=baro_tests/d upper-constraints.opnfv.txt && \
+ pip install --no-cache-dir --src /src -cupper-constraints.txt -cupper-constraints.opnfv.txt \
+ git+https://gerrit.opnfv.org/gerrit/barometer@$BRANCH#egg=baro_tests && \
+ rm -r upper-constraints.txt upper-constraints.opnfv.txt && \
+ apk del .build-deps
+COPY testcases.yaml /usr/lib/python2.7/site-packages/xtesting/ci/testcases.yaml
+CMD ["run_tests", "-t", "all"]
diff --git a/docker/barometer-tests/testcases.yaml b/docker/barometer-tests/testcases.yaml
new file mode 100644
index 00000000..a8c0a4f3
--- /dev/null
+++ b/docker/barometer-tests/testcases.yaml
@@ -0,0 +1,21 @@
+---
+tiers:
+ -
+ name: barometer
+ order: 0
+ description: >-
+ Test suites from the Barometer project
+ testcases:
+ -
+ case_name: barometercollectd
+ project_name: barometer
+ criteria: 100
+ blocking: false
+ description: >-
+ Test suite for the Barometer project. Separate tests verify
+ the proper configuration and basic functionality of all the
+ collectd plugins as described in the Project Release Plan
+ dependencies:
+ - DEPLOY_SCENARIO: 'bar'
+ run:
+ name: barometercollectd
diff --git a/docker/barometer-ves/Dockerfile b/docker/barometer-ves/Dockerfile
index e36ef848..27b3cbd3 100644
--- a/docker/barometer-ves/Dockerfile
+++ b/docker/barometer-ves/Dockerfile
@@ -18,9 +18,9 @@ RUN yum update -y && \
git \
nc
-RUN yum install -y python-pip
-RUN pip install pyyaml \
- kafka-python
+RUN yum install -y python3 python3-pip
+RUN pip3 install pyyaml \
+ kafka-python
ENV VES_DIR /opt/ves
diff --git a/docker/barometer-ves/start_ves_app.sh b/docker/barometer-ves/start_ves_app.sh
index f859bc45..0ac756c8 100644
--- a/docker/barometer-ves/start_ves_app.sh
+++ b/docker/barometer-ves/start_ves_app.sh
@@ -24,4 +24,4 @@ fi
#wait for kafka service to be available
while ! nc $ves_kafka_host 9092 < /dev/null; do sleep 1; done
-python ves_app.py --events-schema="./yaml/$YAML_FILE" --config="./config/ves_app_config.conf"
+python3 ves_app.py --events-schema="./yaml/$YAML_FILE" --config="./config/ves_app_config.conf"
diff --git a/docker/flask_app/Dockerfile b/docker/flask_app/Dockerfile
new file mode 100644
index 00000000..67e6d589
--- /dev/null
+++ b/docker/flask_app/Dockerfile
@@ -0,0 +1,25 @@
+# Copyright 2021 Anuket and others. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+FROM python:3-alpine
+
+EXPOSE 5000
+
+WORKDIR /app
+COPY ./ /app
+
+RUN python -m pip install -r requirements.txt
+
+ENTRYPOINT ["python", "flask_app.py"]
diff --git a/docker/flask_app/README b/docker/flask_app/README
new file mode 100644
index 00000000..03f8f3ff
--- /dev/null
+++ b/docker/flask_app/README
@@ -0,0 +1,45 @@
+To build this run:
+ sudo docker build -t my-flask-app .
+
+To run the app and see collectd metrics:
+
+ sudo docker run -d --net=host my-flask-app
+ OR
+ sudo docker run -d -p 5000:5000 my-flask-app
+
+and configure collectd to use the write_http plugin:
+
+ LoadPlugin write_http
+
+ <Plugin "write_http">
+ <Node "example">
+ URL "http://127.0.0.1:5000"
+ Format Command
+ # Format JSON
+ </Node>
+ </Plugin>
+
+Format Command is used to make the output more readable for humans.
+You can also use JSON.
+
+Later the server will do something more useful.
+To view the metrics that are being sent by collectd, run::
+
+ sudo docker inspect <container_id>
+ #OR
+ sudo docker logs <container_id>
+
+Metrics from collectd-5.x will use PUTVAL
+Metrics from collectd-6.x will use PUTMETRIC
+
+Sample output::
+
+ 127.0.0.1 - - [21/Apr/2021 19:31:49] "POST / HTTP/1.1" 200 -
+ PUTVAL fbae30cc-2f20-11b2-a85c-819293100691/turbostat-cpu00/gauge-TSC interval=10.000 1619029909.268:2112.02271161789
+ PUTVAL fbae30cc-2f20-11b2-a85c-819293100691/turbostat-cpu00/frequency-busy interval=10.000 1619029909.268:1613.51555288381
+ PUTVAL fbae30cc-2f20-11b2-a85c-819293100691/turbostat-cpu00/percent-c1 interval=10.000 1619029909.268:86.2353665532377
+ PUTVAL fbae30cc-2f20-11b2-a85c-819293100691/turbostat-cpu00/frequency-average interval=10.000 1619029909.268:222.094501460956
+ PUTVAL fbae30cc-2f20-11b2-a85c-819293100691/turbostat-pkg00/temperature interval=10.000 1619029909.268:53
+ PUTVAL fbae30cc-2f20-11b2-a85c-819293100691/turbostat-pkg00/temperature-tcc_activation interval=10.000 1619029909.268:100
+ PUTVAL fbae30cc-2f20-11b2-a85c-819293100691/turbostat-cpu04/frequency-average interval=10.000 1619029909.268:206.978572579757
+
diff --git a/docker/flask_app/flask_app.py b/docker/flask_app/flask_app.py
new file mode 100644
index 00000000..771a91bc
--- /dev/null
+++ b/docker/flask_app/flask_app.py
@@ -0,0 +1,16 @@
+from flask import Flask, request
+import json
+
+app = Flask(__name__)
+
+@app.route('/', methods=['GET', 'POST'])
+def get_data():
+ #print(request.data)
+ #print(type(request.data))
+ print(request.data.decode('utf-8'))
+ #print(json.loads(request.data.decode("utf-8")))
+
+ return 'This is working!'
+
+if __name__=='__main__':
+ app.run(debug=True, host='0.0.0.0')
diff --git a/docker/flask_app/requirements.txt b/docker/flask_app/requirements.txt
new file mode 100644
index 00000000..e3e9a71d
--- /dev/null
+++ b/docker/flask_app/requirements.txt
@@ -0,0 +1 @@
+Flask
diff --git a/docker/ves/Dockerfile b/docker/ves/Dockerfile
index f77f852a..fd04d905 100644
--- a/docker/ves/Dockerfile
+++ b/docker/ves/Dockerfile
@@ -1,3 +1,18 @@
+# Copyright 2017-2019 Intel Corporation and OPNFV. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
FROM centos:7
RUN yum update -y && \
yum install -y epel-release \
diff --git a/docs/conf.py b/docs/conf.py
index 3c4453e7..041afc56 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -1 +1,3 @@
from docs_conf.conf import *
+
+extensions = ['reno.sphinxext']
diff --git a/docs/development/requirements/01-intro.rst b/docs/development/requirements/01-intro.rst
index 70abc553..c34961a5 100644
--- a/docs/development/requirements/01-intro.rst
+++ b/docs/development/requirements/01-intro.rst
@@ -1,6 +1,6 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
-.. (c) OPNFV, Intel Corporation and others.
+.. (c) Anuket, Intel Corporation and others.
Problem Statement
------------------
@@ -16,7 +16,7 @@ useful information as possible off the platform so that faults and errors in
the NFVI can be detected promptly and reported to the appropriate fault
management entity.
-The OPNFV platform (NFVI) requires functionality to:
+The Anuket platform (NFVI) requires functionality to:
* Create a low latency, high performance packet processing path (fast path)
through the NFVI that VNFs can take advantage of;
diff --git a/docs/development/requirements/03-dpdk.rst b/docs/development/requirements/03-dpdk.rst
index ad7c8c78..fa960f4e 100644
--- a/docs/development/requirements/03-dpdk.rst
+++ b/docs/development/requirements/03-dpdk.rst
@@ -1,6 +1,6 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
-.. (c) OPNFV, Intel Corporation and others.
+.. (c) Anuket, Intel Corporation and others.
DPDK Enhancements
==================
@@ -167,4 +167,4 @@ Through extending the dpdkstat plugin for collectd with KA functionality, and
integrating the extended plugin with Monasca for high performing, resilient,
and scalable fault detection.
-.. _L2 Forwarding Sample Application (in Real and Virtualized Environments): http://dpdk.org/doc/guides/sample_app_ug/l2_forward_real_virtual.html
+.. _L2 Forwarding Sample Application (in Real and Virtualized Environments): http://doc.dpdk.org/guides/sample_app_ug/l2_forward_real_virtual.html
diff --git a/docs/index.rst b/docs/index.rst
index 8f80b559..0ad7d5d3 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -1,6 +1,6 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
-.. (c) OPNFV, Intel Corporation and others.
+.. (c) Anuket, Intel Corporation and others.
=========
Barometer
@@ -35,7 +35,7 @@ support:
* Detecting and reporting violations that can be consumed by VNFs
and higher level management systems (through DPDK Keep Alive).
-With Barometer the scope is extended to monitoring the NFVI. The ability to
+With `Barometer`_ the scope is extended to monitoring the NFVI. The ability to
monitor the Network Function Virtualization Infrastructure (NFVI) where VNFs
are in operation will be a key part of Service Assurance within an NFV
environment, in order to enforce SLAs or to detect violations, faults or
@@ -66,7 +66,6 @@ telemetry such as `collectd`_, and relevant Openstack projects.
:maxdepth: 3
./release/configguide/index.rst
- ./release/scenarios/index.rst
./release/userguide/index.rst
./release/release-notes/index.rst
./development/requirements/index.rst
@@ -77,6 +76,6 @@ Indices
=======
* :ref:`search`
-.. _Barometer: https://wiki.opnfv.org/display/fastpath
-.. _collectd: http://collectd.org/
+.. _Barometer: https://wiki.anuket.io/display/HOME/Barometer
+.. _collectd: https://collectd.org/
diff --git a/docs/release/configguide/featureconfig.rst b/docs/release/configguide/featureconfig.rst
index c264fff4..f9e197b7 100644
--- a/docs/release/configguide/featureconfig.rst
+++ b/docs/release/configguide/featureconfig.rst
@@ -4,51 +4,5 @@
=============================
Barometer Configuration Guide
=============================
-This document provides guidelines on how to install and configure Barometer with Apex and Compass4nfv.
-The deployment script installs and enables a series of collectd plugins on the compute node(s),
-which collect and dispatch specific metrics and events from the platform.
+Information on configuring Barometer are included in the :ref:`feature userguide <feature-userguide>`.
-Pre-configuration activities - Apex
------------------------------------
-Deploying the Barometer components in Apex is done through the deploy-opnfv command by selecting
-a scenario-file which contains the ``barometer: true`` option. These files are located on the
-Jump Host in the ``/etc/opnfv-apex/ folder``. Two scenarios are pre-defined to include Barometer,
-and they are: ``os-nosdn-bar-ha.yaml`` and ``os-nosdn-bar-noha.yaml``.
-
-.. code:: bash
-
- $ cd /etc/opnfv-apex
- $ opnfv-deploy -d os-nosdn-bar-ha.yaml -n network_settings.yaml -i inventory.yaml –- debug
-
-Pre-configuration activities - Compass4nfv
-------------------------------------------
-Deploying the Barometer components in Compass4nfv is done by running the deploy.sh script after
-exporting a scenario-file which contains the ``barometer: true`` option. Two scenarios are pre-defined
-to include Barometer, and they are: ``os-nosdn-bar-ha.yaml`` and ``os-nosdn-bar-noha.yaml``. For more
-information, please refer to these useful links:
-https://github.com/opnfv/compass4nfv
-https://wiki.opnfv.org/display/compass4nfv/Compass+101
-https://wiki.opnfv.org/display/compass4nfv/Containerized+Compass
-
-The quickest way to deploy using Compass4nfv is given below.
-
-.. code:: bash
-
- $ export SCENARIO=os-nosdn-bar-ha.yml
- $ curl https://raw.githubusercontent.com/opnfv/compass4nfv/master/quickstart.sh | bash
-
-Hardware configuration
-----------------------
-There's no specific Hardware configuration required. However, the ``intel_rdt`` plugin works
-only on platforms with Intel CPUs.
-
-Feature configuration
----------------------
-All Barometer plugins are automatically deployed on all compute nodes. There is no option to
-selectively install only a subset of plugins. Any custom disabling or configuration must be done
-directly on the compute node(s) after the deployment is completed.
-
-Upgrading the plugins
----------------------
-The Barometer components are built-in in the ISO image, and respectively the RPM/Debian packages.
-There is no simple way to update only the Barometer plugins in an existing deployment.
diff --git a/docs/release/configguide/index.rst b/docs/release/configguide/index.rst
index 57ff2787..ecea3dbf 100644
--- a/docs/release/configguide/index.rst
+++ b/docs/release/configguide/index.rst
@@ -4,12 +4,12 @@
.. http://creativecommons.org/licenses/by/4.0
.. Copyright (c) 2017 Open Platform for NFV Project, Inc. and its contributors
-======================================
-OPNFV Barometer configuration Guide
-======================================
+====================================
+Anuket Barometer configuration Guide
+====================================
.. toctree::
- :maxdepth: 3
+ :maxdepth: 3
featureconfig
postinstall
diff --git a/docs/release/configguide/postinstall.rst b/docs/release/configguide/postinstall.rst
index 8f23eec3..dad56b99 100644
--- a/docs/release/configguide/postinstall.rst
+++ b/docs/release/configguide/postinstall.rst
@@ -1,3 +1,5 @@
+.. _barometer-postinstall:
+
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
@@ -6,8 +8,18 @@ Barometer post installation procedures
======================================
This document describes briefly the methods of validating the Barometer installation.
+.. TODO: Update this to include reference to containers rather than an Openstack deployment.
+
Automated post installation activities
--------------------------------------
+.. This section will include how to run plugin validation tests, when they are created/merged.
+.. This section will also include some troubleshooting and debugging information.
+
+.. note:: This section is outdated and needs to be updated.
+
+.. TODO: Update this section; post-installation/verification shouldn't be in
+ the config guide. It should be in testing.
+
The Barometer test-suite in Functest is called ``barometercollectd`` and is part of the ``Features``
tier. Running these tests is done automatically by the OPNFV deployment pipeline on the supported
scenarios. The testing consists of basic verifications that each plugin is functional per their
@@ -17,124 +29,48 @@ default configurations. Inside the Functest container, the detailed results can
Barometer post configuration procedures
---------------------------------------
The functionality for each plugin (such as enabling/disabling and configuring its capabilities)
-is controlled as described in the User Guide through their individual ``.conf`` file located in
-the ``/etc/collectd/collectd.conf.d/`` folder on the compute node(s). In order for any changes to
-take effect, the collectd service must be stopped and then started again.
+is controlled as described in the :ref:`User Guide <barometer-userguide>` through their individual
+``.conf`` file located in the ``/etc/collectd/collectd.conf.d/`` on the host(s). In order for any
+changes to take effect, the collectd service must be stopped and then started again.
-Platform components validation - Apex
--------------------------------------
-The following steps describe how to perform a simple "manual" testing of the Barometer components:
+Plugin verification
+~~~~~~~~~~~~~~~~~~~
+Once collectd has been installed and deployed, you will see metrics from most plugins immediately. However, in some cases, you may want to verify that the configuration is correct and that the plugion is functioning as intended (particularly during development, or when testing an experimental version). The following sections provide some verification steps to make sure the plugins are working as expected.
-On the controller:
+MCElog
+^^^^^^
+On the collectd host, you can induce an event monitored by the plugins; e.g. a corrected memory error:
-1. Get a list of the available metrics:
+.. code:: bash
- .. code::
+ $ git clone https://git.kernel.org/pub/scm/utils/cpu/mce/mce-inject.git
+ $ cd mce-inject
+ $ make
+ $ modprobe mce-inject
- $ openstack metric list
+Modify the test/corrected script to include the following:
-2. Take note of the ID of the metric of interest, and show the measures of this metric:
+.. code:: bash
- .. code::
+ CPU 0 BANK 0
+ STATUS 0xcc00008000010090
+ ADDR 0x0010FFFFFFF
- $ openstack metric measures show <metric_id>
+Inject the error:
-3. Watch the measure list for updates to verify that metrics are being added:
+.. code:: bash
- .. code:: bash
+ $ ./mce-inject < test/corrected
- $ watch –n2 –d openstack metric measures show <metric_id>
-
-More on testing and displaying metrics is shown below.
-
-On the compute:
-
-1. Connect to any compute node and ensure that the collectd service is running. The log file
- ``collectd.log`` should contain no errors and should indicate that each plugin was successfully
- loaded. For example, from the Jump Host:
-
- .. code:: bash
+.. TODO: How to check that the event was propogated to collectd
- $ opnfv-util overcloud compute0
- $ ls /etc/collectd/collectd.conf.d/
- $ systemctl status collectd
- $ vi /opt/stack/collectd.log
+.. _barometer-docker-verification:
- The following plugings should be found loaded:
- aodh, gnocchi, hugepages, intel_rdt, mcelog, ovs_events, ovs_stats, snmp, virt
-
-2. On the compute node, induce an event monitored by the plugins; e.g. a corrected memory error:
-
- .. code:: bash
-
- $ git clone https://git.kernel.org/pub/scm/utils/cpu/mce/mce-inject.git
- $ cd mce-inject
- $ make
- $ modprobe mce-inject
-
- Modify the test/corrected script to include the following:
-
- .. code:: bash
-
- CPU 0 BANK 0
- STATUS 0xcc00008000010090
- ADDR 0x0010FFFFFFF
-
- Inject the error:
-
- .. code:: bash
-
- $ ./mce-inject < test/corrected
-
-3. Connect to the controller and query the monitoring services. Make sure the overcloudrc.v3
- file has been copied to the controller (from the undercloud VM or from the Jump Host) in order
- to be able to authenticate for OpenStack services.
-
- .. code:: bash
-
- $ opnfv-util overcloud controller0
- $ su
- $ source overcloudrc.v3
- $ gnocchi metric list
- $ aodh alarm list
-
- The output for the gnocchi and aodh queries should be similar to the excerpts below:
-
- .. code:: bash
-
- +--------------------------------------+---------------------+------------------------------------------------------------------------------------------------------------+-----------+-------------+
- | id | archive_policy/name | name | unit | resource_id |
- +--------------------------------------+---------------------+------------------------------------------------------------------------------------------------------------+-----------+-------------+
- [...]
- | 0550d7c1-384f-4129-83bc-03321b6ba157 | high | overcloud-novacompute-0.jf.intel.com-hugepages-mm-2048Kb@vmpage_number.free | Pages | None |
- | 0cf9f871-0473-4059-9497-1fea96e5d83a | high | overcloud-novacompute-0.jf.intel.com-hugepages-node0-2048Kb@vmpage_number.free | Pages | None |
- | 0d56472e-99d2-4a64-8652-81b990cd177a | high | overcloud-novacompute-0.jf.intel.com-hugepages-node1-1048576Kb@vmpage_number.used | Pages | None |
- | 0ed71a49-6913-4e57-a475-d30ca2e8c3d2 | high | overcloud-novacompute-0.jf.intel.com-hugepages-mm-1048576Kb@vmpage_number.used | Pages | None |
- | 11c7be53-b2c1-4c0e-bad7-3152d82c6503 | high | overcloud-novacompute-0.jf.intel.com-mcelog- | None | None |
- | | | SOCKET_0_CHANNEL_any_DIMM_any@errors.uncorrected_memory_errors_in_24h | | |
- | 120752d4-385e-4153-aed8-458598a2a0e0 | high | overcloud-novacompute-0.jf.intel.com-cpu-24@cpu.interrupt | jiffies | None |
- | 1213161e-472e-4e1b-9e56-5c6ad1647c69 | high | overcloud-novacompute-0.jf.intel.com-cpu-6@cpu.softirq | jiffies | None |
- [...]
-
- +--------------------------------------+-------+------------------------------------------------------------------+-------+----------+---------+
- | alarm_id | type | name | state | severity | enabled |
- +--------------------------------------+-------+------------------------------------------------------------------+-------+----------+---------+
- | fbd06539-45dd-42c5-a991-5c5dbf679730 | event | gauge.memory_erros(overcloud-novacompute-0.jf.intel.com-mcelog) | ok | moderate | True |
- | d73251a5-1c4e-4f16-bd3d-377dd1e8cdbe | event | gauge.mcelog_status(overcloud-novacompute-0.jf.intel.com-mcelog) | ok | moderate | True |
- [...]
-
-
-Barometer post installation verification for Compass4nfv
---------------------------------------------------------
-
-For Fraser release, Compass4nfv integrated the ``barometer-collectd`` container of Barometer.
-As a result, on the compute node, collectd runs in a Docker container. On the controller node,
-Grafana and InfluxDB are installed and configured.
+Barometer post installation verification on barometer-collectd container
+------------------------------------------------------------------------
The following steps describe how to perform simple "manual" testing of the Barometer components
-after successfully deploying a Barometer scenario using Compass4nfv:
-
-On the compute:
+after :ref:`successfully deploying the barometer-collectd container<barometer-docker-userguide>`:
1. Connect to any compute node and ensure that the collectd container is running.
@@ -144,28 +80,14 @@ On the compute:
You should see the container ``opnfv/barometer-collectd`` running.
-2. Testing using mce-inject is similar to testing done in Apex.
-
-On the controller:
-
-3. Connect to the controller and query the monitoring services. Make sure to log in to the lxc-utility
-container before using the OpenStack CLI. Please refer to this wiki for details:
-https://wiki.opnfv.org/display/compass4nfv/Containerized+Compass#ContainerizedCompass-HowtouseOpenStackCLI
-
- .. code:: bash
-
- root@host1-utility-container-d15da033:~# source ~/openrc
- root@host1-utility-container-d15da033:~# gnocchi metric list
- root@host1-utility-container-d15da033:~# aodh alarm list
-
- The output for the gnocchi and aodh queries should be similar to the excerpts shown in the section above for Apex.
-
-4. Use a web browser to connect to Grafana at ``http://<serverip>:3000/``, using the hostname or
-IP of your Ubuntu server and port 3000. Log in with admin/admin. You will see ``collectd``
-InfluxDB database in the ``Data Sources``. Also, you will notice metrics coming in the several
-dashboards such as ``CPU Usage`` and ``Host Overview``.
+2. Use a web browser to connect to Grafana at ``http://<serverip>:3000/``, using the hostname or
+ IP of your server and port 3000. Log in with admin/admin. You will see ``collectd``
+ InfluxDB database in the ``Data Sources``. Also, you will notice metrics coming in the several
+ dashboards such as ``CPU Usage`` and ``Host Overview``.
For more details on the Barometer containers, Grafana and InfluxDB, please refer to
the following documentation links:
-https://wiki.opnfv.org/display/fastpath/Barometer+Containers#BarometerContainers-barometer-collectdcontainer
-:ref:`<barometer-docker-userguide>`
+
+`Barometer Containers wiki page <https://wiki.opnfv.org/display/fastpath/Barometer+Containers#BarometerContainers-barometer-collectdcontainer>`_
+
+:ref:`Barometer Docker install guide<barometer-docker-userguide>`
diff --git a/docs/release/release-notes/config.yaml b/docs/release/release-notes/config.yaml
new file mode 100644
index 00000000..ae56f82d
--- /dev/null
+++ b/docs/release/release-notes/config.yaml
@@ -0,0 +1,52 @@
+---
+unreleased_version_title: 'Lakelse'
+sections:
+ - [ features, New Features ]
+ - [ testing, Testing Notes ]
+ - [ docs, Documentation Updates ]
+ - [ containers, Container updates ]
+ - [ ansible, Ansible playbook updates ]
+ - [ build, Build script updates ]
+ - [ fixes, Normal Bug Fixes ]
+ - [ deprecations, Deprecations ]
+ - [ other, Other Notes ]
+
+branch_name_prefix: stable/
+#release_tag_re: opnfv-((?:v?[\d.ab]|rc)+)
+prelude_section_name: release_summary
+stop_at_branch_base: False
+collapse_pre_releases: False
+template: |
+ release_summary: >
+ Add a summary of the change here. All sections (including this one) are
+ optional, and if they are not needed, you can completely remove that section.
+ Each section is rendered separately, this mean you should assume each item
+ below is self-contained, and should not depend on info in any other section.
+ This means repetition is sometimes necessary, and this is okay.
+ features:
+ - |
+ List new features here
+ testing:
+ - |
+ List new testing notes here
+ docs:
+ - |
+ List documentation updates here
+ containers:
+ - |
+ List container updates here
+ ansible:
+ - |
+ List ansible playbook updates here
+ build:
+ - |
+ List build script updates here
+ fixes:
+ - |
+ List normal bug fixes here
+ deprecations:
+ - |
+ List deprecations here
+ other:
+ - |
+ List other notes here
diff --git a/docs/release/release-notes/index.rst b/docs/release/release-notes/index.rst
index db8221ab..3f676753 100644
--- a/docs/release/release-notes/index.rst
+++ b/docs/release/release-notes/index.rst
@@ -2,14 +2,17 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
-.. (c) OPNFV, Intel Corporation and others.
+.. (c) Anuket, Intel Corporation and others.
-==================================================
-OPNFV Barometer Release Notes
-==================================================
+==============================
+Anuket Barometer Release Notes
+==============================
.. toctree::
:maxdepth: 1
- release-notes
+ unreleased
+ lakelse-release-notes
+ kali-release-notes
+ old-release-notes
diff --git a/docs/release/release-notes/kali-release-notes.rst b/docs/release/release-notes/kali-release-notes.rst
new file mode 100644
index 00000000..cac7325b
--- /dev/null
+++ b/docs/release/release-notes/kali-release-notes.rst
@@ -0,0 +1,40 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+
+==================
+Kali Release Notes
+==================
+
+This document provides the release notes for Kali release of Barometer.
+
+Summary
+-------
+The Kali release is the first one since becoming part of Anuket, and focussed
+on changes that will make testing and integrating easier.
+
+Details
+-------
+Testing and build tools were developed and updated to do the following:
+
+* A new reference container was added for the collectd-6.0 version, which is
+ under development and represents a big API change that is not backwards
+ compatible. This reference build should facilitate porting the plugins that
+ were previously developed by the Barometer project.
+ https://jira.anuket.io/browse/BAROMETER-184
+
+* Updated to the stable version of collectd to collectd 5.12.
+
+* Removed duplication in the three existing containers (stable, latest and experimental).
+ https://jira.anuket.io/browse/BAROMETER-179
+
+Some work was started but not completed in the Kali release:
+
+* Updating of the ansible playbooks for generating configs so that they will be
+ easier to maintain and extend in the future.
+
+* Additional testing tools for verifying plugin functionality
+
+References
+----------
+* `Barometer Kali release plan <https://wiki.anuket.io/display/HOME/Barometer+Kali+Release+Planning>`_
+* `Kali Release on Jira <https://jira.anuket.io/projects/BAROMETER/versions/10224>`_
diff --git a/docs/release/release-notes/lakelse-release-notes.rst b/docs/release/release-notes/lakelse-release-notes.rst
new file mode 100644
index 00000000..bf04342e
--- /dev/null
+++ b/docs/release/release-notes/lakelse-release-notes.rst
@@ -0,0 +1,164 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) Anuket and others.
+
+============================
+Lakelse Release Notes
+============================
+
+.. _Release Notes_lakelse:
+
+Lakelse
+=======
+
+.. _Release Notes_lakelse_Release Summary:
+
+Release Summary
+---------------
+
+.. docs/release/release-notes/notes/lakelse/add_unix_sock-e29efe16156c5c8e.yaml @ None
+
+Added unixsock plugin to one-click install.
+
+
+.. docs/release/release-notes/notes/lakelse/ansible-build-containers-b4a4cc9cb70f83b3.yaml @ None
+
+Add ansible playbook for building the containers locally.
+
+
+.. docs/release/release-notes/notes/lakelse/anuket_containers-21b4206cb26c9975.yaml @ None
+
+Since the anuket dockerhub repository was created, and containers are being pushed to there, instructions and build scripts have been updated to reflect this.
+
+
+.. docs/release/release-notes/notes/lakelse/collectd-5-v-6-testing-cc821b32bad2794c.yaml @ None
+
+Testing playbooks were added to compare collectd5 vs collectd6, for the purpose of helping to review new PRs by comparing the generated metrics between versions.
+
+
+.. docs/release/release-notes/notes/lakelse/remove_dpdk_stats_events_plugins-59f366855f6e4261.yaml @ None
+
+Remove dpdkstats and dpdkevents from Barometer.
+
+
+.. docs/release/release-notes/notes/lakelse/update_logparser_config-0db3d2746e6ad582.yaml @ None
+
+Enable the Logparser plugin by default when using one-click install.
+
+
+.. _Release Notes_lakelse_Testing Notes:
+
+Testing Notes
+-------------
+
+.. docs/release/release-notes/notes/lakelse/collectd-5-v-6-testing-cc821b32bad2794c.yaml @ None
+
+- Added a playbook to compare collectd 5 and collectd 6. The playbook uses
+ existing ansible roles to build both collectd 5 and collectd 6 container
+ images, creates a common configuration, then runs the containers and shows
+ the outputs to let the user inspect the metrics and whether they match.
+
+
+.. _Release Notes_lakelse_Documentation Updates:
+
+Documentation Updates
+---------------------
+
+.. docs/release/release-notes/notes/lakelse/anuket_containers-21b4206cb26c9975.yaml @ None
+
+- Docs have been updated to use anuket/ repository in dockerhub.
+ Container build instructions now use anuket/ prefix to tag images.
+
+
+.. _Release Notes_lakelse_Container updates:
+
+Container updates
+-----------------
+
+.. docs/release/release-notes/notes/lakelse/anuket_containers-21b4206cb26c9975.yaml @ None
+
+- Containers are now pulled from anuket/ repository in dockerhub.
+
+.. docs/release/release-notes/notes/lakelse/collectd-6-testing-flask-app-2bb0ca1326775dd8.yaml @ None
+
+- Add a flask app for testing collectd using metrics sent via write_http plugin.
+
+.. docs/release/release-notes/notes/lakelse/update-grafana-9bee82ecfa11f54a.yaml @ None
+
+- Grafana container was updated to support both jiffies and percent for cpu metrics.
+
+
+.. _Release Notes_lakelse_Ansible playbook updates:
+
+Ansible playbook updates
+------------------------
+
+.. docs/release/release-notes/notes/lakelse/add_unix_sock-e29efe16156c5c8e.yaml @ None
+
+- Added `unixsock <https://collectd.org/documentation/manpages/collectd-unixsock.5.shtml>`_
+ plugin to one-click install, which allows the user to interact with collectd using the
+ ``collectdctl`` command in the bar-collectd-* containers.
+ The unixsock plugin is useful for debugging issues in collectd, and can
+ be used to verify that metrics are being collected without having to
+ create CSV files or log into the container.
+
+.. docs/release/release-notes/notes/lakelse/ansible-build-containers-b4a4cc9cb70f83b3.yaml @ None
+
+- Added a playbook and role for building the collectd containers locally.
+ This automates the actions described in the docker install guide. The
+ ``barometer-collectd``, ``barometer-collectd-latest`` and the
+ ``barometer-collectd-experimental`` containers are now easier to build
+ locally. The ``barometer-collectd-6`` and
+ ``barometer-collectd-experimental`` containers can also be built with
+ arbirtary PRs applied, to aid in testing locally.
+
+.. docs/release/release-notes/notes/lakelse/anuket_containers-21b4206cb26c9975.yaml @ None
+
+- Containers are now pulled from anuker/ repository in dockerhub.
+
+.. docs/release/release-notes/notes/lakelse/update_logparser_config-0db3d2746e6ad582.yaml @ None
+
+- The logparser plugin is now rendered for all flavours.
+ The Logparser plugin has been part of collectd since 5.11, however, the ansible playbooks had it marked as experimental, and would not deploy it by default.
+
+
+.. _Release Notes_lakelse_Build script updates:
+
+Build script updates
+--------------------
+
+.. docs/release/release-notes/notes/lakelse/update-apply-pr-script-46e6d547d331c5f2.yaml @ None
+
+- Update collectd_apply_pull_request.sh to rebase only if multiple chanegs are selected. The script will checkout the PR branch if there's only one PR_ID passed.
+
+
+.. _Release Notes_lakelse_Normal Bug Fixes:
+
+Normal Bug Fixes
+----------------
+
+.. docs/release/release-notes/notes/lakelse/update-grafana-9bee82ecfa11f54a.yaml @ None
+
+- Update the grafana dashboard to show metrics in both jffies and percent, depending on what is configured.
+
+
+.. _Release Notes_lakelse_Deprecations:
+
+Deprecations
+------------
+
+.. docs/release/release-notes/notes/lakelse/remove_dpdk_stats_events_plugins-59f366855f6e4261.yaml @ None
+
+- The dpdkstats and dpdkevents plugins were removed from Barometer. These
+ plugins are still available in collectd, however, will not be deployed by
+ Barometer. It is recommended that the DPDK telemetry plugin be used instead.
+
+
+.. _Release Notes_lakelse_Other Notes:
+
+Other Notes
+-----------
+
+.. docs/release/release-notes/notes/lakelse/add-reno-12eb20e3448b663b.yaml @ None
+
+- Add `reno <https://docs.openstack.org/reno/latest/index.html#>`_ and corresponding tox jobs (compile notes and add new notes) to make compiling release notes easier
diff --git a/docs/release/release-notes/notes/.placeholder b/docs/release/release-notes/notes/.placeholder
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/docs/release/release-notes/notes/.placeholder
diff --git a/docs/release/release-notes/notes/add-reno-12eb20e3448b663b.yaml b/docs/release/release-notes/notes/add-reno-12eb20e3448b663b.yaml
new file mode 100644
index 00000000..2456c099
--- /dev/null
+++ b/docs/release/release-notes/notes/add-reno-12eb20e3448b663b.yaml
@@ -0,0 +1,7 @@
+---
+documentation:
+ - |
+ Release notes are now automatically generated and included in the documentation using `reno <https://docs.openstack.org/reno/latest/index.html>`_.
+other:
+ - |
+ Add `reno <https://docs.openstack.org/reno/latest/index.html#>`_ and corresponding tox jobs (compile notes and add new notes) to make compiling release notes easier
diff --git a/docs/release/release-notes/notes/add_unix_sock-e29efe16156c5c8e.yaml b/docs/release/release-notes/notes/add_unix_sock-e29efe16156c5c8e.yaml
new file mode 100644
index 00000000..7ba83afe
--- /dev/null
+++ b/docs/release/release-notes/notes/add_unix_sock-e29efe16156c5c8e.yaml
@@ -0,0 +1,11 @@
+release_summary: >
+ Added unixsock plugin to one-click install.
+ansible:
+ - |
+ Added `unixsock <https://collectd.org/documentation/manpages/collectd-unixsock.5.shtml>`_
+ plugin to one-click install, which allows the user to interact with collectd using the
+ ``collectdctl`` command in the bar-collectd-* containers.
+ The unixsock plugin is useful for debugging issues in collectd, and can
+ be used to verify that metrics are being collected without having to
+ create CSV files or log into the container.
+
diff --git a/docs/release/release-notes/notes/ansible-build-containers-b4a4cc9cb70f83b3.yaml b/docs/release/release-notes/notes/ansible-build-containers-b4a4cc9cb70f83b3.yaml
new file mode 100644
index 00000000..aae4b999
--- /dev/null
+++ b/docs/release/release-notes/notes/ansible-build-containers-b4a4cc9cb70f83b3.yaml
@@ -0,0 +1,11 @@
+release_summary: >
+ Add ansible playbook for building the containers locally.
+ansible:
+ - |
+ Added a playbook and role for building the collectd containers locally.
+ This automates the actions described in the docker install guide. The
+ ``barometer-collectd``, ``barometer-collectd-latest`` and the
+ ``barometer-collectd-experimental`` containers are now easier to build
+ locally. The ``barometer-collectd-6`` and
+ ``barometer-collectd-experimental`` containers can also be built with
+ arbirtary PRs applied, to aid in testing locally.
diff --git a/docs/release/release-notes/notes/anuket_containers-21b4206cb26c9975.yaml b/docs/release/release-notes/notes/anuket_containers-21b4206cb26c9975.yaml
new file mode 100644
index 00000000..75e7e4f0
--- /dev/null
+++ b/docs/release/release-notes/notes/anuket_containers-21b4206cb26c9975.yaml
@@ -0,0 +1,12 @@
+release_summary: >
+ Since the anuket dockerhub repository was created, and containers are being pushed to there, instructions and build scripts have been updated to reflect this.
+docs:
+ - |
+ Docs have been updated to use anuket/ repository in dockerhub.
+ Container build instructions now use anuket/ prefix to tag images.
+containers:
+ - |
+ Containers are now pulled from anuket/ repository in dockerhub.
+ansible:
+ - |
+ Containers are now pulled from anuker/ repository in dockerhub.
diff --git a/docs/release/release-notes/notes/collectd-5-v-6-testing-cc821b32bad2794c.yaml b/docs/release/release-notes/notes/collectd-5-v-6-testing-cc821b32bad2794c.yaml
new file mode 100644
index 00000000..20013147
--- /dev/null
+++ b/docs/release/release-notes/notes/collectd-5-v-6-testing-cc821b32bad2794c.yaml
@@ -0,0 +1,10 @@
+release_summary: >
+ Testing playbooks were added to compare collectd5 vs collectd6, for the
+ purpose of helping to review new PRs by comparing the generated metrics
+ between versions.
+testing:
+ - |
+ Added a playbook to compare collectd 5 and collectd 6. The playbook uses
+ existing ansible roles to build both collectd 5 and collectd 6 container
+ images, creates a common configuration, then runs the containers and shows
+ the outputs to let the user inspect the metrics and whether they match.
diff --git a/docs/release/release-notes/notes/collectd-6-testing-flask-app-2bb0ca1326775dd8.yaml b/docs/release/release-notes/notes/collectd-6-testing-flask-app-2bb0ca1326775dd8.yaml
new file mode 100644
index 00000000..9c605876
--- /dev/null
+++ b/docs/release/release-notes/notes/collectd-6-testing-flask-app-2bb0ca1326775dd8.yaml
@@ -0,0 +1,3 @@
+containers:
+ - |
+ Add a flask app for testing collectd using metrics sent via write_http plugin.
diff --git a/docs/release/release-notes/notes/remove_dpdk_stats_events_plugins-59f366855f6e4261.yaml b/docs/release/release-notes/notes/remove_dpdk_stats_events_plugins-59f366855f6e4261.yaml
new file mode 100644
index 00000000..78ab1c4c
--- /dev/null
+++ b/docs/release/release-notes/notes/remove_dpdk_stats_events_plugins-59f366855f6e4261.yaml
@@ -0,0 +1,8 @@
+---
+release_summary: >
+ Remove dpdkstats and dpdkevents from Barometer.
+deprecations:
+ - |
+ The dpdkstats and dpdkevents plugins were removed from Barometer. These
+ plugins are still available in collectd, however, will not be deployed by
+ Barometer. It is recommended that the DPDK telemetry plugin be used instead.
diff --git a/docs/release/release-notes/notes/update-apply-pr-script-46e6d547d331c5f2.yaml b/docs/release/release-notes/notes/update-apply-pr-script-46e6d547d331c5f2.yaml
new file mode 100644
index 00000000..de1be994
--- /dev/null
+++ b/docs/release/release-notes/notes/update-apply-pr-script-46e6d547d331c5f2.yaml
@@ -0,0 +1,3 @@
+build:
+ - |
+ Update collectd_apply_pull_request.sh to rebase only if multiple chanegs are selected. The script will checkout the PR branch if there's only one PR_ID passed.
diff --git a/docs/release/release-notes/notes/update-grafana-9bee82ecfa11f54a.yaml b/docs/release/release-notes/notes/update-grafana-9bee82ecfa11f54a.yaml
new file mode 100644
index 00000000..95e2cbdb
--- /dev/null
+++ b/docs/release/release-notes/notes/update-grafana-9bee82ecfa11f54a.yaml
@@ -0,0 +1,6 @@
+containers:
+ - |
+ Grafana container was updated to support both jiffies and percent for cpu metrics.
+fixes:
+ - |
+ Update the grafana dashboard to show metrics in both jffies and percent, depending on what is configured.
diff --git a/docs/release/release-notes/notes/update_logparser_config-0db3d2746e6ad582.yaml b/docs/release/release-notes/notes/update_logparser_config-0db3d2746e6ad582.yaml
new file mode 100644
index 00000000..e5be3eff
--- /dev/null
+++ b/docs/release/release-notes/notes/update_logparser_config-0db3d2746e6ad582.yaml
@@ -0,0 +1,6 @@
+release_summary: >
+ Enable the Logparser plugin by default when using one-click install.
+ansible:
+ - |
+ The logparser plugin is now rendered for all flavours.
+ The Logparser plugin has been part of collectd since 5.11, however, the ansible playbooks had it marked as experimental, and would not deploy it by default.
diff --git a/docs/release/release-notes/release-notes.rst b/docs/release/release-notes/old-release-notes.rst
index 75a2e391..d5c1b7e5 100644
--- a/docs/release/release-notes/release-notes.rst
+++ b/docs/release/release-notes/old-release-notes.rst
@@ -1,9 +1,9 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
-======================================================================
-Barometer Release Notes
-======================================================================
+===================
+Older Release Notes
+===================
This document provides the release notes for Euphrates release of Barometer.
diff --git a/docs/release/release-notes/unreleased.rst b/docs/release/release-notes/unreleased.rst
new file mode 100644
index 00000000..e3b0dccc
--- /dev/null
+++ b/docs/release/release-notes/unreleased.rst
@@ -0,0 +1,10 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) Anuket and others.
+
+============================
+Current Series Release Notes
+============================
+
+.. release-notes::
+ :relnotessubdir: docs/release/release-notes/
diff --git a/docs/release/scenarios/index.rst b/docs/release/scenarios/index.rst
deleted file mode 100644
index 7593434a..00000000
--- a/docs/release/scenarios/index.rst
+++ /dev/null
@@ -1,16 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) 2017 Intel Corporation and Others
-
-=============================
-OPNFV Barometer Scenarios
-=============================
-.. This document will be used to provide a description of the scenario for an end user.
-.. You should explain the purpose of the scenario, the types of capabilities provided and
-.. the unique components that make up the scenario including how they are used.
-
-.. toctree::
- :maxdepth: 1
-
- ./os-nosdn-bar-ha/scenario.description
- ./os-nosdn-bar-noha/scenario.description
diff --git a/docs/release/scenarios/os-nosdn-bar-ha/index.rst b/docs/release/scenarios/os-nosdn-bar-ha/index.rst
deleted file mode 100644
index d1b18639..00000000
--- a/docs/release/scenarios/os-nosdn-bar-ha/index.rst
+++ /dev/null
@@ -1,15 +0,0 @@
-.. _os-nosdn-bar-ha:
-
-.. This work is licensed under a Creative Commons Attribution 4.0 International Licence.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) <optionally add copywriters name>
-
-========================================
-os-nosdn-bar-ha overview and description
-========================================
-
-.. toctree::
- :numbered:
- :maxdepth: 4
-
- scenario.description.rst
diff --git a/docs/release/scenarios/os-nosdn-bar-ha/scenario.description.rst b/docs/release/scenarios/os-nosdn-bar-ha/scenario.description.rst
deleted file mode 100644
index 3f31ff0d..00000000
--- a/docs/release/scenarios/os-nosdn-bar-ha/scenario.description.rst
+++ /dev/null
@@ -1,61 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) <optionally add copywriters name>
-
-===============================================
-OPNFV os-nosdn-bar-ha overview and description
-===============================================
-
-This document provides details of the scenario for Euphrates release of Barometer.
-
-.. contents::
- :depth: 3
- :local:
-
-Introduction
----------------
-.. In this section explain the purpose of the scenario and the types of
-.. capabilities provided
-
-This scenario has the features from the Barometer project. Collectd (a telemetry agent) is installed
-on compute nodes so that their statistics, events and alarming services can be relayed to Gnoochi and Aodh.
-These are the first steps in paving the way for Platform (NFVI) Monitoring in OPNFV.
-
-Scenario components and composition
--------------------------------------
-.. In this section describe the unique components that make up the scenario,
-.. what each component provides and why it has been included in order
-.. to communicate to the user the capabilities available in this scenario.
-
-This scenario deploys the High Availability OPNFV Cloud based on the
-configurations provided in ``os-nosdn-bar-ha.yaml``.
-This yaml file contains configurations and is passed as an
-argument to ``overcloud-deploy-function.sh`` script.
-This scenario deploys multiple nodes: 3 Controllers, 2 Computes.
-
-Collectd is installed on compute nodes and Openstack services runs on the controller nodes.
-
-os-nosdn-bar-ha scenario is successful when all the nodes are accessible, up and running.
-Also, verify if plugins/services are communicating with Gnocchi and Aodh on the controller nodes.
-
-Scenario usage overview
-----------------------------
-.. Provide a brief overview on how to use the scenario and the features available to the
-.. user. This should be an "introduction" to the userguide document, and explicitly link to it,
-.. where the specifics of the features are covered including examples and API's
-
-After installation, plugins will be able to read/write the stats on/from the controller node.
-A detailed list of supported plugins along with their sample configuration can be found in the userguide.
-
-Limitations, Issues and Workarounds
----------------------------------------
-.. Explain scenario limitations here, this should be at a design level rather than discussing
-.. faults or bugs. If the system design only provide some expected functionality then provide
-.. some insight at this point.
-
-None.
-
-References
------------------
-
-
diff --git a/docs/release/scenarios/os-nosdn-bar-noha/index.rst b/docs/release/scenarios/os-nosdn-bar-noha/index.rst
deleted file mode 100644
index 92851afa..00000000
--- a/docs/release/scenarios/os-nosdn-bar-noha/index.rst
+++ /dev/null
@@ -1,15 +0,0 @@
-.. _os-nosdn-bar-noha:
-
-.. This work is licensed under a Creative Commons Attribution 4.0 International Licence.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) <optionally add copywriters name>
-
-==========================================
-os-nosdn-bar-noha overview and description
-==========================================
-
-.. toctree::
- :numbered:
- :maxdepth: 4
-
- scenario.description.rst
diff --git a/docs/release/scenarios/os-nosdn-bar-noha/scenario.description.rst b/docs/release/scenarios/os-nosdn-bar-noha/scenario.description.rst
deleted file mode 100644
index d6a1184a..00000000
--- a/docs/release/scenarios/os-nosdn-bar-noha/scenario.description.rst
+++ /dev/null
@@ -1,61 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) <optionally add copywriters name>
-
-=================================================
-OPNFV os-nosdn-bar-noha overview and description
-=================================================
-
-This document provides details of the scenario for Euphrates release of Barometer.
-
-.. contents::
- :depth: 3
- :local:
-
-Introduction
----------------
-.. In this section explain the purpose of the scenario and the types of
-.. capabilities provided
-
-This scenario has the features from the Barometer project. Collectd (a telemetry agent) is installed
-on compute nodes so that their statistics, events and alarming services can be relayed to Gnoochi and Aodh.
-These are the first steps in paving the way for Platform (NFVI) Monitoring in OPNFV.
-
-Scenario components and composition
--------------------------------------
-.. In this section describe the unique components that make up the scenario,
-.. what each component provides and why it has been included in order
-.. to communicate to the user the capabilities available in this scenario.
-
-This scenario deploys the High Availability OPNFV Cloud based on the
-configurations provided in ``os-nosdn-bar-noha.yaml``.
-This yaml file contains configurations and is passed as an
-argument to ``overcloud-deploy-function.sh`` script.
-This scenario deploys multiple nodes: 1 Controller, 2 Computes.
-
-Collectd is installed on compute nodes and Openstack services runs on the controller node.
-
-os-nosdn-bar-noha scenario is successful when all the nodes are accessible, up and running.
-Also, verify if plugins/services are communicating with Gnocchi and Aodh on the controller nodes.
-
-Scenario usage overview
-----------------------------
-.. Provide a brief overview on how to use the scenario and the features available to the
-.. user. This should be an "introduction" to the userguide document, and explicitly link to it,
-.. where the specifics of the features are covered including examples and API's
-
-After installation, plugins will be able to read/write the stats on/from the controller node.
-A detailed list of supported plugins along with their sample configuration can be found in the userguide.
-
-Limitations, Issues and Workarounds
----------------------------------------
-.. Explain scenario limitations here, this should be at a design level rather than discussing
-.. faults or bugs. If the system design only provide some expected functionality then provide
-.. some insight at this point.
-
-None.
-
-References
------------------
-
-
diff --git a/docs/release/scenarios/os-nosdn-kvm_ovs_dpdk_bar-ha/scenario.description.rst b/docs/release/scenarios/os-nosdn-kvm_ovs_dpdk_bar-ha/scenario.description.rst
deleted file mode 100644
index f98a05ab..00000000
--- a/docs/release/scenarios/os-nosdn-kvm_ovs_dpdk_bar-ha/scenario.description.rst
+++ /dev/null
@@ -1,118 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) <optionally add copywriters name>
-
-===================================
-OPNFV os-nosdn-kvm_ovs_dpdk_bar-ha
-===================================
-
-This document provides scenario level details for Danube of Barometer.
-
-.. contents::
- :depth: 3
- :local:
-
-Introduction
----------------
-.. In this section explain the purpose of the scenario and the types of
-.. capabilities provided
-This scenario combines the features from the following three projects in a
-single instantiation of OPNFV:
-
-- KVM4NFV
-- OVS4NFV
-- Barometer
-
-A distinguishing factor for this scenario vs other scenarios that integrate
-Open vSwitch and KVM is that collectd (a telemetry agent) is installed on
-compute nodes so that their statistics and events can be relayed to ceilometer.
-These are the first steps in paving the way for Platform (NFVI) Monitoring in
-OPNFV.
-
-For Fuel this scenario installs the latest DPDK-enabled Open vSwitch component,
-KVM4NFV latest software packages for Linux Kernel and QEMU patches for
-achieving low latency, and the collectd telemetry agent.
-
-Scenario components and composition
--------------------------------------
-.. In this section describe the unique components that make up the scenario,
-.. what each component provides and why it has been included in order
-.. to communicate to the user the capabilities available in this scenario.
-
-This scenario deploys the High Availability OPNFV Cloud based on the
-configurations provided in ha_nfv-kvm_ovs_bar_heat_ceilometer_scenario.yaml.
-This yaml file contains following configurations and is passed as an
-argument to deploy.py script
-
-* scenario.yaml:This configuration file defines translation between a
- short deployment scenario name(os-nosdn-kvm_ovs_dpdk_bar-ha) and an actual
- deployment scenario configuration file
- (ha_nfv-kvm_nfv-ovs-dpdk_bar_heat_ceilometer_scenario.yaml)
-
-* deployment-scenario-metadata:Contains the configuration metadata like
- title,version,created,comment.
-
-* stack-extensions:Stack extentions are opnfv added value features in form
- of a fuel-plugin.Plugins listed in stack extensions are enabled and
- configured.
-
-* dea-override-config: Used to configure the HA mode,network segmentation
- types and role to node assignments. These configurations overrides
- corresponding keys in the dea_base.yaml and dea_pod_override.yaml.
- These keys are used to deploy multiple nodes(3 controllers,2 computes)
- as mention below.
-
-* **Node 1**: This node has MongoDB and Controller roles. The controller
- node runs the Identity service, Image Service, management portions of
- Compute and Networking, Networking plug-in and the dashboard. The
- Telemetry service which was designed to support billing systems for
- OpenStack cloud resources uses a NoSQL database to store information.
- The database typically runs on the controller node.
-
-* **Node 2**: This node has Controller and Ceph-osd roles. Ceph is a
- massively scalable, open source, distributed storage system. It is
- comprised of an object store, block store and a POSIX-compliant distributed
- file system. Enabling Ceph, configures Nova to store ephemeral volumes in
- RBD, configures Glance to use the Ceph RBD backend to store images,
- configures Cinder to store volumes in Ceph RBD images and configures the
- default number of object replicas in Ceph.
-
-* **Node 3**: This node has Controller role in order to achieve high
- availability.
-
-* **Node 4**: This node has Compute role. The compute node runs the
- hypervisor portion of Compute that operates tenant virtual machines
- instances. By default, Compute uses KVM as the hypervisor. Collectd
- will be installed on this node.
-
-* **Node 5**: This node has compute role.
-
-* dha-override-config:Provides information about the VM definition and
- Network config for virtual deployment. These configurations overrides
- the pod dha definition and points to the controller,compute and
- fuel definition files.
-
-* os-nosdn-kvm_ovs_dpdk_bar-ha scenario is successful when all the 5 Nodes are
- accessible, up and running.
-
-Scenario usage overview
-----------------------------
-.. Provide a brief overview on how to use the scenario and the features available to the
-.. user. This should be an "introduction" to the userguide document, and explicitly link to it,
-.. where the specifics of the features are covered including examples and API's
-
-After installation use of the scenario traffic on the private network will
-automatically be processed by the upgraded DPDK datapath.
-
-Limitations, Issues and Workarounds
----------------------------------------
-.. Explain scenario limitations here, this should be at a design level rather than discussing
-.. faults or bugs. If the system design only provide some expected functionality then provide
-.. some insight at this point.
-
-References
------------------
-
-For more information on the OPNFV Danube release, please visit
-http://www.opnfv.org/danube
-
diff --git a/docs/release/userguide/collectd.ves.userguide.rst b/docs/release/userguide/collectd.ves.userguide.rst
index f56acbc0..2d3760b8 100644
--- a/docs/release/userguide/collectd.ves.userguide.rst
+++ b/docs/release/userguide/collectd.ves.userguide.rst
@@ -1,6 +1,6 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
-.. (c) OPNFV, Intel Corporation and others.
+.. (c) Anuket, Intel Corporation and others.
.. _barometer-ves-userguide:
==========================
@@ -209,7 +209,7 @@ Clone Barometer repo and start the VES application:
$ git clone https://gerrit.opnfv.org/gerrit/barometer
$ cd barometer/3rd_party/collectd-ves-app/ves_app
- $ nohup python ves_app.py --events-schema=guest.yaml --config=ves_app_config.conf > ves_app.stdout.log &
+ $ nohup python ves_app.py --events-schema=yaml/guest.yaml --config=config/ves_app_config.conf > ves_app.stdout.log &
Modify Collectd configuration file ``collectd.conf`` as following:
@@ -292,7 +292,7 @@ Clone Barometer repo and start the VES application:
$ git clone https://gerrit.opnfv.org/gerrit/barometer
$ cd barometer/3rd_party/collectd-ves-app/ves_app
- $ nohup python ves_app.py --events-schema=host.yaml --config=ves_app_config.conf > ves_app.stdout.log &
+ $ nohup python ves_app.py --events-schema=yaml/host.yaml --config=config/ves_app_config.conf > ves_app.stdout.log &
.. figure:: ves-app-host-mode.png
@@ -368,7 +368,7 @@ REST resources are of the form::
{ServerRoot}/eventListener/v{apiVersion}/{topicName}`
{ServerRoot}/eventListener/v{apiVersion}/eventBatch`
-Within the VES directory (``3rd_party/collectd-ves-app/ves_app``) there is a
+Within the VES directory (``3rd_party/collectd-ves-app/ves_app/config``) there is a
configuration file called ``ves_app_conf.conf``. The description of the
configuration options are described below:
@@ -932,13 +932,13 @@ Limitations
definition and the format is descibed in the document.
-.. _collectd: http://collectd.org/
+.. _collectd: https://collectd.org/
.. _Kafka: https://kafka.apache.org/
-.. _`VES`: https://wiki.opnfv.org/display/fastpath/VES+plugin+updates
+.. _`VES`: https://wiki.anuket.io/display/HOME/VES+plugin+updates
.. _`VES shema definition`: https://gerrit.onap.org/r/gitweb?p=demo.git;a=tree;f=vnfs/VES5.0/evel/evel-test-collector/docs/att_interface_definition;hb=refs/heads/master
.. _`PyYAML documentation`: https://pyyaml.org/wiki/PyYAMLDocumentation
-.. _`collectd plugin description`: https://github.com/collectd/collectd/blob/master/src/collectd.conf.pod
-.. _`collectd data types file`: https://github.com/collectd/collectd/blob/master/src/types.db
-.. _`collectd data types description`: https://github.com/collectd/collectd/blob/master/src/types.db.pod
+.. _`collectd plugin description`: https://github.com/collectd/collectd/blob/main/src/collectd.conf.pod
+.. _`collectd data types file`: https://github.com/collectd/collectd/blob/main/src/types.db
+.. _`collectd data types description`: https://github.com/collectd/collectd/blob/main/src/types.db.pod
.. _`python regular expression syntax`: https://docs.python.org/2/library/re.html#regular-expression-syntax
.. _`Kafka collectd plugin`: https://collectd.org/wiki/index.php/Plugin:Write_Kafka
diff --git a/docs/release/userguide/feature.userguide.rst b/docs/release/userguide/feature.userguide.rst
index eeec7a2e..2750bd8d 100644
--- a/docs/release/userguide/feature.userguide.rst
+++ b/docs/release/userguide/feature.userguide.rst
@@ -1,10 +1,12 @@
+.. _feature-userguide:
+
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
-.. (c) <optionally add copywriters name>
+.. (c) Anuket and others
-===================================
-OPNFV Barometer User Guide
-===================================
+===========================
+Anuket Barometer User Guide
+===========================
Barometer collectd plugins description
---------------------------------------
@@ -20,11 +22,15 @@ to support thresholding and notification.
Barometer has enabled the following collectd plugins:
-* *dpdkstat plugin*: A read plugin that retrieves stats from the DPDK extended
- NIC stats API.
+* *dpdk_telemetry plugin*: A read plugin to collect dpdk interface stats and
+ application or global stats from dpdk telemetry library. The ``dpdk_telemetry``
+ plugin provides both DPDK NIC Stats and DPDK application stats.
+ This plugin doesn't deal with dpdk events.
+ The mimimum dpdk version required to use this plugin is 19.08.
-* *dpdkevents plugin*: A read plugin that retrieves DPDK link status and DPDK
- forwarding cores liveliness status (DPDK Keep Alive).
+.. note::
+ The ``dpdk_telemetry`` plugin should only be used if your dpdk application
+ doesn't already have more relevant metrics available (e.g.ovs_stats).
* `gnocchi plugin`_: A write plugin that pushes the retrieved stats to
Gnocchi. It's capable of pushing any stats read through collectd to
@@ -62,12 +68,13 @@ Barometer has enabled the following collectd plugins:
from collectd and translates requested values from collectd's internal format
to SNMP format. Supports SNMP: get, getnext and walk requests.
-All the plugins above are available on the collectd master, except for the
-Gnocchi and Aodh plugins as they are Python-based plugins and only C plugins
-are accepted by the collectd community. The Gnocchi and Aodh plugins live in
-the OpenStack repositories.
+All the plugins above are available on the collectd main branch, except for
+the Gnocchi and Aodh plugins as they are Python-based plugins and only C
+plugins are accepted by the collectd community. The Gnocchi and Aodh plugins
+live in the OpenStack repositories.
-Other plugins existing as a pull request into collectd master:
+.. TODO: Update this to reflect merging of these PRs
+Other plugins existing as a pull request into collectd main:
* *Legacy/IPMI*: A read plugin that reports platform thermals, voltages,
fanspeed, current, flow, power etc. Also, the plugin monitors Intelligent
@@ -91,19 +98,16 @@ Read Plugins/application: Intel RDT plugin, virt plugin, Open vSwitch stats plug
Open vSwitch PMD stats application.
Collectd capabilities and usage
-------------------------------------
+-------------------------------
.. Describe the specific capabilities and usage for <XYZ> feature.
.. Provide enough information that a user will be able to operate the feature on a deployed scenario.
-.. note:: Plugins included in the OPNFV E release will be built-in for Apex integration
- and can be configured as shown in the examples below.
-
- The collectd plugins in OPNFV are configured with reasonable defaults, but can
- be overridden.
+The collectd plugins in Anuket are configured with reasonable defaults, but can
+be overridden.
Building all Barometer upstreamed plugins from scratch
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-The plugins that have been merged to the collectd master branch can all be
+The plugins that have been merged to the collectd main branch can all be
built and configured through the barometer repository.
.. note::
@@ -136,12 +140,12 @@ Sample configuration files can be found in '/opt/collectd/etc/collectd.conf.d'
By default, `collectd_exec` user is used in the exec.conf provided in the sample
configurations directory under src/collectd in the Barometer repo. These scripts *DO NOT* create this user.
You need to create this user or modify the configuration in the sample configurations directory
- under src/collectd to use another existing non root user before running build_base_machine.sh.
+ under src/collectd to use another existing non root user before running build_base_machine.sh.
.. note::
If you are using any Open vSwitch plugins you need to run:
-.. code:: bash
+ .. code:: bash
$ sudo ovs-vsctl set-manager ptcp:6640
@@ -160,18 +164,18 @@ collectd, check out the `collectd-openstack-plugins GSG`_.
Below is the per plugin installation and configuration guide, if you only want
to install some/particular plugins.
-DPDK plugins
-^^^^^^^^^^^^^
+DPDK telemetry plugin
+^^^^^^^^^^^^^^^^^^^^^
Repo: https://github.com/collectd/collectd
-Branch: master
+Branch: main
-Dependencies: DPDK (http://dpdk.org/)
+Dependencies: `DPDK <https://www.dpdk.org/>`_ (runtime), libjansson (compile-time)
-.. note:: DPDK statistics plugin requires DPDK version 16.04 or later.
+.. note:: DPDK telemetry plugin requires DPDK version 19.08 or later.
To build and install DPDK to /usr please see:
-https://github.com/collectd/collectd/blob/master/docs/BUILD.dpdkstat.md
+https://github.com/collectd/collectd/blob/main/docs/BUILD.dpdkstat.md
Building and installing collectd:
@@ -184,83 +188,35 @@ Building and installing collectd:
$ make
$ sudo make install
-.. note:: If DPDK was installed in a non standard location you will need to
- specify paths to the header files and libraries using *LIBDPDK_CPPFLAGS* and
- *LIBDPDK_LDFLAGS*. You will also need to add the DPDK library symbols to the
- shared library path using *ldconfig*. Note that this update to the shared
- library path is not persistant (i.e. it will not survive a reboot).
-
-Example of specifying custom paths to DPDK headers and libraries:
-
-.. code:: bash
-
- $ ./configure LIBDPDK_CPPFLAGS="path to DPDK header files" LIBDPDK_LDFLAGS="path to DPDK libraries"
-
This will install collectd to default folder ``/opt/collectd``. The collectd
configuration file (``collectd.conf``) can be found at ``/opt/collectd/etc``.
-To configure the dpdkstats plugin you need to modify the configuration file to
-include:
-
-.. code:: bash
-
- LoadPlugin dpdkstat
- <Plugin dpdkstat>
- Coremask "0xf"
- ProcessType "secondary"
- FilePrefix "rte"
- EnabledPortMask 0xffff
- PortName "interface1"
- PortName "interface2"
- </Plugin>
-
-To configure the dpdkevents plugin you need to modify the configuration file to
+To configure the dpdk_telemetry plugin you need to modify the configuration file to
include:
.. code:: bash
- <LoadPlugin dpdkevents>
- Interval 1
- </LoadPlugin>
-
- <Plugin "dpdkevents">
- <EAL>
- Coremask "0x1"
- MemoryChannels "4"
- FilePrefix "rte"
- </EAL>
- <Event "link_status">
- SendEventsOnUpdate false
- EnabledPortMask 0xffff
- SendNotification true
- </Event>
- <Event "keep_alive">
- SendEventsOnUpdate false
- LCoreMask "0xf"
- KeepAliveShmName "/dpdk_keepalive_shm_name"
- SendNotification true
- </Event>
+ LoadPlugin dpdk_telemetry
+ <Plugin dpdk_telemetry>
+ #ClientSocketPath "/var/run/.client"
+ #DpdkSocketPath "/var/run/dpdk/rte/telemetry"
</Plugin>
-.. note:: Currently, the DPDK library doesn’t support API to de-initialize
- the DPDK resources allocated on the initialization. It means, the collectd
- plugin will not be able to release the allocated DPDK resources
- (locks/memory/pci bindings etc.) correctly on collectd shutdown or reinitialize
- the DPDK library if primary DPDK process is restarted. The only way to release
- those resources is to terminate the process itself. For this reason, the plugin
- forks off a separate collectd process. This child process becomes a secondary
- DPDK process which can be run on specific CPU cores configured by user through
- collectd configuration file (“Coremask” EAL configuration option, the
- hexadecimal bitmask of the cores to run on).
+The plugin uses default values (as shown) for the socket paths, if you use different values,
+uncomment and update ``ClientSocketPath`` and ``DpdkSocketPath`` as required.
For more information on the plugin parameters, please see:
-https://github.com/collectd/collectd/blob/master/src/collectd.conf.pod
+https://github.com/collectd/collectd/blob/main/src/collectd.conf.pod
+
+.. note::
+
+ To gather metrics from a DPDK application, telemetry needs to be enabled.
+ This can be done by setting the ``CONFIG_RTE_LIBRTE_TELEMETRY=y`` config flag.
+ The application then needs to be run with the ``--telemetry`` EAL option, e.g.
+ ::
+ $dpdk/app/testpmd --telemetry -l 2,3,4 -n 4
-.. note:: dpdkstat plugin initialization time depends on read interval. It
- requires 5 read cycles to set up internal buffers and states, during that time
- no statistics are submitted. Also, if plugin is running and the number of DPDK
- ports is increased, internal buffers are resized. That requires 3 read cycles
- and no port statistics are submitted during that time.
+For more information on the ``dpdk_telemetry`` plugin, see the `anuket wiki <https://wiki.anuket.io/display/HOME/DPDK+Telemetry+Plugin>`_.
The Address-Space Layout Randomization (ASLR) security feature in Linux should be
disabled, in order for the same hugepage memory mappings to be present in all
@@ -283,31 +239,14 @@ To fully enable ASLR:
and only when all implications of this change have been understood.
For more information on multi-process support, please see:
-http://dpdk.org/doc/guides/prog_guide/multi_proc_support.html
-
-**DPDK stats plugin limitations:**
-
-1. The DPDK primary process application should use the same version of DPDK
- that collectd DPDK plugin is using;
-
-2. L2 statistics are only supported;
-
-3. The plugin has been tested on Intel NIC’s only.
+https://doc.dpdk.org/guides/prog_guide/multi_proc_support.html
-**DPDK stats known issues:**
-
-* DPDK port visibility
-
- When network port controlled by Linux is bound to DPDK driver, the port
- will not be available in the OS. It affects the SNMP write plugin as those
- ports will not be present in standard IF-MIB. Thus, additional work is
- required to be done to support DPDK ports and statistics.
Hugepages Plugin
^^^^^^^^^^^^^^^^^
Repo: https://github.com/collectd/collectd
-Branch: master
+Branch: main
Dependencies: None, but assumes hugepages are configured.
@@ -335,25 +274,18 @@ configuration file (``collectd.conf``) can be found at ``/opt/collectd/etc``.
To configure the hugepages plugin you need to modify the configuration file to
include:
-.. code:: bash
-
- LoadPlugin hugepages
- <Plugin hugepages>
- ReportPerNodeHP true
- ReportRootHP true
- ValuesPages true
- ValuesBytes false
- ValuesPercentage false
- </Plugin>
+.. literalinclude:: ../../../src/collectd/collectd_sample_configs/hugepages.conf
+ :start-at: LoadPlugin
+ :language: bash
For more information on the plugin parameters, please see:
-https://github.com/collectd/collectd/blob/master/src/collectd.conf.pod
+https://github.com/collectd/collectd/blob/main/src/collectd.conf.pod
Intel PMU Plugin
^^^^^^^^^^^^^^^^
Repo: https://github.com/collectd/collectd
-Branch: master
+Branch: main
Dependencies:
@@ -381,7 +313,7 @@ CPU event list json file:
.. code:: bash
- $ wget https://raw.githubusercontent.com/andikleen/pmu-tools/master/event_download.py
+ $ wget https://raw.githubusercontent.com/andikleen/pmu-tools/main/event_download.py
$ python event_download.py
This will download the json files to the location: $HOME/.cache/pmu-events/. If you don't want to
@@ -404,37 +336,18 @@ configuration file (``collectd.conf``) can be found at ``/opt/collectd/etc``.
To configure the PMU plugin you need to modify the configuration file to
include:
-.. code:: bash
-
- <LoadPlugin intel_pmu>
- Interval 1
- </LoadPlugin>
- <Plugin "intel_pmu">
- ReportHardwareCacheEvents true
- ReportKernelPMUEvents true
- ReportSoftwareEvents true
- Cores ""
- </Plugin>
-
-If you want to monitor Intel CPU specific CPU events, make sure to enable the
-additional two options shown below:
-
-.. code:: bash
+.. literalinclude:: ../../../src/collectd/collectd_sample_configs/intel_pmu.conf
+ :start-at: LoadPlugin
+ :language: bash
- <Plugin intel_pmu>
- ReportHardwareCacheEvents true
- ReportKernelPMUEvents true
- ReportSoftwareEvents true
- EventList "$HOME/.cache/pmu-events/GenuineIntel-6-2D-core.json"
- HardwareEvents "L2_RQSTS.CODE_RD_HIT,L2_RQSTS.CODE_RD_MISS" "L2_RQSTS.ALL_CODE_RD"
- Cores ""
- </Plugin>
+If you want to monitor Intel CPU specific CPU events, make sure to uncomment the
+``EventList`` and ``HardwareEvents`` options above.
.. note::
If you set XDG_CACHE_HOME to anything other than the variable above - you will need to modify
the path for the EventList configuration.
-Use "Cores" option to monitor metrics only for configured cores. If an empty string is provided
+Use ``Cores`` option to monitor metrics only for configured cores. If an empty string is provided
as value for this field default cores configuration is applied - that is all available cores
are monitored separately. To limit monitoring to cores 0-7 set the option as shown below:
@@ -443,7 +356,7 @@ are monitored separately. To limit monitoring to cores 0-7 set the option as sho
Cores "[0-7]"
For more information on the plugin parameters, please see:
-https://github.com/collectd/collectd/blob/master/src/collectd.conf.pod
+https://github.com/collectd/collectd/blob/main/src/collectd.conf.pod
.. note::
@@ -458,18 +371,18 @@ Intel RDT Plugin
^^^^^^^^^^^^^^^^
Repo: https://github.com/collectd/collectd
-Branch: master
+Branch: main
Dependencies:
- * PQoS/Intel RDT library https://github.com/01org/intel-cmt-cat.git
- * msr kernel module
+* PQoS/Intel RDT library https://github.com/intel/intel-cmt-cat
+* msr kernel module
Building and installing PQoS/Intel RDT library:
.. code:: bash
- $ git clone https://github.com/01org/intel-cmt-cat.git
+ $ git clone https://github.com/intel/intel-cmt-cat
$ cd intel-cmt-cat
$ make
$ make install PREFIX=/usr
@@ -496,17 +409,12 @@ configuration file (``collectd.conf``) can be found at ``/opt/collectd/etc``.
To configure the RDT plugin you need to modify the configuration file to
include:
-.. code:: bash
-
- <LoadPlugin intel_rdt>
- Interval 1
- </LoadPlugin>
- <Plugin "intel_rdt">
- Cores ""
- </Plugin>
+.. literalinclude:: ../../../src/collectd/collectd_sample_configs/rdt.conf
+ :start-at: LoadPlugin
+ :language: bash
For more information on the plugin parameters, please see:
-https://github.com/collectd/collectd/blob/master/src/collectd.conf.pod
+https://github.com/collectd/collectd/blob/main/src/collectd.conf.pod
IPMI Plugin
^^^^^^^^^^^^
@@ -514,7 +422,7 @@ Repo: https://github.com/collectd/collectd
Branch: feat_ipmi_events, feat_ipmi_analog
-Dependencies: OpenIPMI library (http://openipmi.sourceforge.net/)
+Dependencies: `OpenIPMI library <https://openipmi.sourceforge.io/>`_
The IPMI plugin is already implemented in the latest collectd and sensors
like temperature, voltage, fanspeed, current are already supported there.
@@ -607,7 +515,7 @@ To configure the IPMI plugin you need to modify the file to include:
dispatch the values to collectd and send SEL notifications.
For more information on the IPMI plugin parameters and SEL feature configuration,
-please see: https://github.com/collectd/collectd/blob/master/src/collectd.conf.pod
+please see: https://github.com/collectd/collectd/blob/main/src/collectd.conf.pod
Extended analog sensors support doesn't require additional configuration. The usual
collectd IPMI documentation can be used:
@@ -618,15 +526,15 @@ collectd IPMI documentation can be used:
IPMI documentation:
- https://www.kernel.org/doc/Documentation/IPMI.txt
-- http://www.intel.com/content/www/us/en/servers/ipmi/ipmi-second-gen-interface-spec-v2-rev1-1.html
+- https://www.intel.com/content/www/us/en/products/docs/servers/ipmi/ipmi-second-gen-interface-spec-v2-rev1-1.html
Mcelog Plugin
^^^^^^^^^^^^^^
Repo: https://github.com/collectd/collectd
-Branch: master
+Branch: main
-Dependencies: mcelog
+Dependencies: `mcelog <http://mcelog.org/>`_
Start by installing mcelog.
@@ -709,21 +617,12 @@ configuration file (``collectd.conf``) can be found at ``/opt/collectd/etc``.
To configure the mcelog plugin you need to modify the configuration file to
include:
-.. code:: bash
-
- <LoadPlugin mcelog>
- Interval 1
- </LoadPlugin>
- <Plugin mcelog>
- <Memory>
- McelogClientSocket "/var/run/mcelog-client"
- PersistentNotification false
- </Memory>
- #McelogLogfile "/var/log/mcelog"
- </Plugin>
+.. literalinclude:: ../../../src/collectd/collectd_sample_configs/mcelog.conf
+ :start-at: LoadPlugin
+ :language: bash
For more information on the plugin parameters, please see:
-https://github.com/collectd/collectd/blob/master/src/collectd.conf.pod
+https://github.com/collectd/collectd/blob/main/src/collectd.conf.pod
Simulating a Machine Check Exception can be done in one of 3 ways:
@@ -819,15 +718,15 @@ To inject corrected memory errors:
* Check the MCE statistic: mcelog --client. Check the mcelog log for injected error details: less /var/log/mcelog.
Open vSwitch Plugins
-^^^^^^^^^^^^^^^^^^^^^
+^^^^^^^^^^^^^^^^^^^^
OvS Plugins Repo: https://github.com/collectd/collectd
-OvS Plugins Branch: master
+OvS Plugins Branch: main
OvS Events MIBs: The SNMP OVS interface link status is provided by standard
-IF-MIB (http://www.net-snmp.org/docs/mibs/IF-MIB.txt)
+`IF-MIB <http://www.net-snmp.org/docs/mibs/IF-MIB.txt>`_
-Dependencies: Open vSwitch, Yet Another JSON Library (https://github.com/lloyd/yajl)
+Dependencies: Open vSwitch, `Yet Another JSON Library <https://github.com/lloyd/yajl>`_
On Centos, install the dependencies and Open vSwitch:
@@ -836,7 +735,7 @@ On Centos, install the dependencies and Open vSwitch:
$ sudo yum install yajl-devel
Steps to install Open vSwtich can be found at
-http://docs.openvswitch.org/en/latest/intro/install/fedora/
+https://docs.openvswitch.org/en/latest/intro/install/fedora/
Start the Open vSwitch service:
@@ -856,7 +755,7 @@ Clone and install the collectd ovs plugin:
$ git clone $REPO
$ cd collectd
- $ git checkout master
+ $ git checkout main
$ ./build.sh
$ ./configure --enable-syslog --enable-logfile --enable-debug
$ make
@@ -864,47 +763,33 @@ Clone and install the collectd ovs plugin:
This will install collectd to default folder ``/opt/collectd``. The collectd
configuration file (``collectd.conf``) can be found at ``/opt/collectd/etc``.
-To configure the OVS events plugin you need to modify the configuration file to include:
-
-.. code:: bash
+To configure the OVS events plugin you need to modify the configuration file
+(uncommenting and updating values as appropriate) to include:
- <LoadPlugin ovs_events>
- Interval 1
- </LoadPlugin>
- <Plugin ovs_events>
- Port "6640"
- Address "127.0.0.1"
- Socket "/var/run/openvswitch/db.sock"
- Interfaces "br0" "veth0"
- SendNotification true
- </Plugin>
+.. literalinclude:: ../../../src/collectd/collectd_sample_configs/ovs_events.conf
+ :start-at: LoadPlugin
+ :language: bash
To configure the OVS stats plugin you need to modify the configuration file
-to include:
+(uncommenting and updating values as appropriate) to include:
-.. code:: bash
-
- <LoadPlugin ovs_stats>
- Interval 1
- </LoadPlugin>
- <Plugin ovs_stats>
- Port "6640"
- Address "127.0.0.1"
- Socket "/var/run/openvswitch/db.sock"
- Bridges "br0"
- </Plugin>
+.. literalinclude:: ../../../src/collectd/collectd_sample_configs/ovs_stats.conf
+ :start-at: LoadPlugin
+ :language: bash
For more information on the plugin parameters, please see:
-https://github.com/collectd/collectd/blob/master/src/collectd.conf.pod
+https://github.com/collectd/collectd/blob/main/src/collectd.conf.pod
OVS PMD stats
-^^^^^^^^^^^^^^
-Repo: https://gerrit.opnfv.org/gerrit/barometer
+^^^^^^^^^^^^^
+Repo: https://gerrit.opnfv.org/gerrit/gitweb?p=barometer.git
Prequistes:
-1. Open vSwitch dependencies are installed.
-2. Open vSwitch service is running.
-3. Ovsdb-server manager is configured.
+
+#. Open vSwitch dependencies are installed.
+#. Open vSwitch service is running.
+#. Ovsdb-server manager is configured.
+
You can refer `Open vSwitch Plugins`_ section above for each one of them.
OVS PMD stats application is run through the exec plugin.
@@ -923,18 +808,17 @@ to include:
.. note:: Exec plugin configuration has to be changed to use appropriate user before starting collectd service.
-ovs_pmd_stat.sh calls the script for OVS PMD stats application with its argument:
+``ovs_pmd_stat.sh`` calls the script for OVS PMD stats application with its argument:
-.. code:: bash
-
- sudo python /usr/local/src/ovs_pmd_stats.py" "--socket-pid-file"
- "/var/run/openvswitch/ovs-vswitchd.pid"
+.. literalinclude:: ../../../src/collectd/collectd_sample_configs/ovs_pmd_stats.sh
+ :start-at: python
+ :language: bash
SNMP Agent Plugin
^^^^^^^^^^^^^^^^^
Repo: https://github.com/collectd/collectd
-Branch: master
+Branch: main
Dependencies: NET-SNMP library
@@ -1072,7 +956,7 @@ The ``snmpwalk`` command can be used to validate the collectd configuration:
retreived using standard IF-MIB tables.
For more information on the plugin parameters, please see:
-https://github.com/collectd/collectd/blob/master/src/collectd.conf.pod
+https://github.com/collectd/collectd/blob/main/src/collectd.conf.pod
For more details on AgentX subagent, please see:
http://www.net-snmp.org/tutorial/tutorial-5/toolkit/demon/
@@ -1080,12 +964,12 @@ http://www.net-snmp.org/tutorial/tutorial-5/toolkit/demon/
.. _virt-plugin:
virt plugin
-^^^^^^^^^^^^
+^^^^^^^^^^^
Repo: https://github.com/collectd/collectd
-Branch: master
+Branch: main
-Dependencies: libvirt (https://libvirt.org/), libxml2
+Dependencies: `libvirt <https://libvirt.org/>`_, libxml2
On Centos, install the dependencies:
@@ -1113,7 +997,7 @@ metrics depends on running libvirt daemon version.
.. note:: Please keep in mind that RDT metrics (part of *Performance monitoring
events*) have to be supported by hardware. For more details on hardware support,
please see:
- https://github.com/01org/intel-cmt-cat
+ https://github.com/intel/intel-cmt-cat
Additionally perf metrics **cannot** be collected if *Intel RDT* plugin is enabled.
@@ -1216,14 +1100,12 @@ statistics are disabled. They can be enabled with ``ExtraStats`` option.
</Plugin>
For more information on the plugin parameters, please see:
-https://github.com/collectd/collectd/blob/master/src/collectd.conf.pod
+https://github.com/collectd/collectd/blob/main/src/collectd.conf.pod
.. _install-collectd-as-a-service:
Installing collectd as a service
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-**NOTE**: In an OPNFV installation, collectd is installed and configured as a
-service.
Collectd service scripts are available in the collectd/contrib directory.
To install collectd as a service:
@@ -1254,33 +1136,27 @@ Reload
$ sudo systemctl status collectd.service should show success
Additional useful plugins
-^^^^^^^^^^^^^^^^^^^^^^^^^^
+^^^^^^^^^^^^^^^^^^^^^^^^^
**Exec Plugin** : Can be used to show you when notifications are being
generated by calling a bash script that dumps notifications to file. (handy
-for debug). Modify /opt/collectd/etc/collectd.conf:
+for debug). Modify ``/opt/collectd/etc/collectd.conf`` to include the
+``NotificationExec`` config option, taking care to add the right directory path
+to the ``write_notification.sh`` script:
-.. code:: bash
-
- LoadPlugin exec
- <Plugin exec>
- # Exec "user:group" "/path/to/exec"
- NotificationExec "user" "<path to barometer>/barometer/src/collectd/collectd_sample_configs/write_notification.sh"
- </Plugin>
+.. literalinclude:: ../../../src/collectd/collectd_sample_configs/exec.conf
+ :start-at: LoadPlugin
+ :emphasize-lines: 6
+ :language: bash
-write_notification.sh (just writes the notification passed from exec through
-STDIN to a file (/tmp/notifications)):
-
-.. code:: bash
+``write_notification.sh`` writes the notification passed from exec through
+STDIN to a file (``/tmp/notifications``):
- #!/bin/bash
- rm -f /tmp/notifications
- while read x y
- do
- echo $x$y >> /tmp/notifications
- done
+.. literalinclude:: ../../../src/collectd/collectd_sample_configs/write_notification.sh
+ :start-at: rm -f
+ :language: bash
-output to /tmp/notifications should look like:
+output to ``/tmp/notifications`` should look like:
.. code:: bash
@@ -1327,7 +1203,7 @@ For more information on configuring and installing OpenStack plugins for
collectd, check out the `collectd-openstack-plugins GSG`_.
Security
-^^^^^^^^^
+^^^^^^^^
* AAA – on top of collectd there secure agents like SNMP V3, Openstack agents
etc. with their own AAA methods.
@@ -1338,7 +1214,7 @@ Security
* Ensuring that only one instance of the program is executed by collectd at any time
* Forcing the plugin to check that custom programs are never executed with superuser
- privileges.
+ privileges.
* Protection of Data in flight:
@@ -1357,14 +1233,14 @@ Security
* `CVE-2010-4336`_ fixed https://mailman.verplant.org/pipermail/collectd/2010-November/004277.html
in Version 4.10.2.
- * http://www.cvedetails.com/product/20310/Collectd-Collectd.html?vendor_id=11242
+ * https://www.cvedetails.com/product/20310/Collectd-Collectd.html?vendor_id=11242
* It's recommended to only use collectd plugins from signed packages.
References
^^^^^^^^^^^
.. [1] https://collectd.org/wiki/index.php/Naming_schema
-.. [2] https://github.com/collectd/collectd/blob/master/src/daemon/plugin.h
+.. [2] https://github.com/collectd/collectd/blob/main/src/daemon/plugin.h
.. [3] https://collectd.org/wiki/index.php/Value_list_t
.. [4] https://collectd.org/wiki/index.php/Data_set
.. [5] https://collectd.org/documentation/manpages/types.db.5.shtml
@@ -1372,10 +1248,10 @@ References
.. [7] https://collectd.org/wiki/index.php/Meta_Data_Interface
.. _Barometer OPNFV Summit demo: https://prezi.com/kjv6o8ixs6se/software-fastpath-service-quality-metrics-demo/
-.. _gnocchi plugin: https://github.com/openstack/collectd-openstack-plugins/tree/stable/ocata/
-.. _aodh plugin: https://github.com/openstack/collectd-openstack-plugins/tree/stable/ocata/
-.. _collectd-openstack-plugins GSG: https://github.com/openstack/collectd-openstack-plugins/blob/master/doc/source/GSG.rst
-.. _grafana guide: https://wiki.opnfv.org/display/fastpath/Installing+and+configuring+InfluxDB+and+Grafana+to+display+metrics+with+collectd
+.. _gnocchi plugin: https://opendev.org/x/collectd-openstack-plugins/src/branch/stable/ocata/
+.. _aodh plugin: https://opendev.org/x/collectd-openstack-plugins/src/branch/stable/ocata/
+.. _collectd-openstack-plugins GSG: https://opendev.org/x/collectd-openstack-plugins/src/branch/master/doc/source/GSG.rst
+.. _grafana guide: https://wiki.anuket.io/display/HOME/Installing+and+configuring+InfluxDB+and+Grafana+to+display+metrics+with+collectd
.. _CVE-2017-7401: https://www.cvedetails.com/cve/CVE-2017-7401/
.. _CVE-2016-6254: https://www.cvedetails.com/cve/CVE-2016-6254/
.. _CVE-2010-4336: https://www.cvedetails.com/cve/CVE-2010-4336/
diff --git a/docs/release/userguide/index.rst b/docs/release/userguide/index.rst
index e880f3a9..566bb692 100644
--- a/docs/release/userguide/index.rst
+++ b/docs/release/userguide/index.rst
@@ -2,24 +2,23 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
-.. (c) Intel and OPNFV
+.. (c) Intel, Anuket and others
===========================
-OPNFV Barometer User Guide
+Anuket Barometer User Guide
===========================
-.. The feature user guide should provide an OPNFV user with enough information to
-.. use the features provided by the feature project in the supported scenarios.
-.. This guide should walk a user through the usage of the features once a scenario
-.. has been deployed and is active according to the installation guide provided
-.. by the installer project.
+.. The feature user guide should provide an Anuket user with enough information
+.. to use the features provided by the feature project.
.. toctree::
:maxdepth: 1
feature.userguide
collectd.ves.userguide.rst
- docker.userguide.rst
+ installguide.docker.rst
+ installguide.oneclick.rst
+
.. The feature.userguide.rst file should contain the text for this document
.. additional documents can be added to this directory and added in the right order
.. to this file as a list below.
diff --git a/docs/release/userguide/docker.userguide.rst b/docs/release/userguide/installguide.docker.rst
index d0916cfa..9141eef6 100644
--- a/docs/release/userguide/docker.userguide.rst
+++ b/docs/release/userguide/installguide.docker.rst
@@ -1,25 +1,25 @@
+.. _barometer-docker-userguide:
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
-.. (c) <optionally add copywriters name>
-.. _barometer-docker-userguide:
+.. (c) Anuket and others
-===================================
-OPNFV Barometer Docker User Guide
-===================================
+=====================================
+Anuket Barometer Docker Install Guide
+=====================================
.. contents::
:depth: 3
:local:
The intention of this user guide is to outline how to install and test the Barometer project's
-docker images. The `OPNFV docker hub <https://hub.docker.com/u/opnfv/?page=1>`_ contains 5 docker
+docker images. The `Anuket docker hub <https://hub.docker.com/u/anuket/>`_ contains 5 docker
images from the Barometer project:
- 1. `Collectd docker image <https://hub.docker.com/r/opnfv/barometer-collectd/>`_
- 2. `Influxdb docker image <https://hub.docker.com/r/opnfv/barometer-influxdb/>`_
- 3. `Grafana docker image <https://hub.docker.com/r/opnfv/barometer-grafana/>`_
- 4. `Kafka docker image <https://hub.docker.com/r/opnfv/barometer-kafka/>`_
- 5. `VES application docker image <https://hub.docker.com/r/opnfv/barometer-ves/>`_
+ 1. `Collectd docker image <https://hub.docker.com/r/anuket/barometer-collectd/>`_
+ 2. `Influxdb docker image <https://hub.docker.com/r/anuket/barometer-influxdb/>`_
+ 3. `Grafana docker image <https://hub.docker.com/r/anuket/barometer-grafana/>`_
+ 4. `Kafka docker image <https://hub.docker.com/r/anuket/barometer-kafka>`_
+ 5. `VES application docker image <https://hub.docker.com/r/anuket/barometer-ves/>`_
For description of images please see section `Barometer Docker Images Description`_
@@ -31,6 +31,10 @@ For steps to build and run VES and Kafka images please see section `Build and Ru
For overview of running VES application with Kafka please see the :ref:`VES Application User Guide <barometer-ves-userguide>`
+For an alternative installation method using ansible, please see the :ref:`Barometer One Click Install Guide <barometer-oneclick-userguide>`.
+
+For post-installation verification and troubleshooting, please see the :ref:`Barometer post installation guide <barometer-postinstall>`.
+
Barometer Docker Images Description
-----------------------------------
@@ -44,7 +48,7 @@ the barometer plugins.
.. note::
The Dockerfile is available in the docker/barometer-collectd directory in the barometer repo.
- The Dockerfile builds a CentOS 7 docker image.
+ The Dockerfile builds a CentOS 8 docker image.
The container MUST be run as a privileged container.
Collectd is a daemon which collects system performance statistics periodically
@@ -91,322 +95,9 @@ The Barometer project's VES application and Kafka docker images are based on a C
docker image has a dependancy on `Zookeeper <https://zookeeper.apache.org/>`_. Kafka must be able to
connect and register with an instance of Zookeeper that is either running on local or remote host.
Kafka recieves and stores metrics recieved from Collectd. VES application pulls latest metrics from Kafka
-which it normalizes into VES format for sending to a VES collector. Please see details in
+which it normalizes into VES format for sending to a VES collector. Please see details in
:ref:`VES Application User Guide <barometer-ves-userguide>`
-One Click Install with Ansible
-------------------------------
-
-Proxy for package manager on host
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-.. note::
- This step has to be performed only if host is behind HTTP/HTTPS proxy
-
-Proxy URL have to be set in dedicated config file
-
-1. CentOS - /etc/yum.conf
-
-.. code:: bash
-
- proxy=http://your.proxy.domain:1234
-
-2. Ubuntu - /etc/apt/apt.conf
-
-.. code:: bash
-
- Acquire::http::Proxy "http://your.proxy.domain:1234"
-
-After update of config file, apt mirrors have to be updated via 'apt-get update'
-
-.. code:: bash
-
- $ sudo apt-get update
-
-Proxy environment variables(for docker and pip)
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-.. note::
- This step has to be performed only if host is behind HTTP/HTTPS proxy
-
-Configuring proxy for packaging system is not enough, also some proxy
-environment variables have to be set in the system before ansible scripts
-can be started.
-Barometer configures docker proxy automatically via ansible task as a part
-of 'one click install' process - user only has to provide proxy URL using common
-shell environment variables and ansible will automatically configure proxies
-for docker(to be able to fetch barometer images). Another component used by
-ansible (e.g. pip is used for downloading python dependencies) will also benefit
-from setting proxy variables properly in the system.
-
-Proxy variables used by ansible One Click Install:
- * http_proxy
- * https_proxy
- * ftp_proxy
- * no_proxy
-
-Variables mentioned above have to be visible for superuser (because most
-actions involving ansible-barometer installation require root privileges).
-Proxy variables are commonly defined in '/etc/environment' file (but any other
-place is good as long as variables can be seen by commands using 'su').
-
-Sample proxy configuration in /etc/environment:
-
-.. code:: bash
-
- http_proxy=http://your.proxy.domain:1234
- https_proxy=http://your.proxy.domain:1234
- ftp_proxy=http://your.proxy.domain:1234
- no_proxy=localhost
-
-Install Ansible
-^^^^^^^^^^^^^^^
-.. note::
- * sudo permissions or root access are required to install ansible.
- * ansible version needs to be 2.4+, because usage of import/include statements
-
-The following steps have been verified with Ansible 2.6.3 on Ubuntu 16.04 and 18.04.
-To install Ansible 2.6.3 on Ubuntu:
-
-.. code:: bash
-
- $ sudo apt-get install python
- $ sudo apt-get install python-pip
- $ sudo -H pip install 'ansible==2.6.3'
-
-The following steps have been verified with Ansible 2.6.3 on Centos 7.5.
-To install Ansible 2.6.3 on Centos:
-
-.. code:: bash
-
- $ sudo yum install python
- $ sudo yum install epel-release
- $ sudo yum install python-pip
- $ sudo -H pip install 'ansible==2.6.3'
- $ sudo yum install git
-
-.. note::
- When using multi-node-setup, please make sure that 'python' package is
- installed on all of the target nodes (ansible during 'Gathering facts'
- phase is using python2 and it may not be installed by default on some
- distributions - e.g. on Ubuntu 16.04 it has to be installed manually)
-
-Clone barometer repo
-^^^^^^^^^^^^^^^^^^^^
-
-.. code:: bash
-
- $ git clone https://gerrit.opnfv.org/gerrit/barometer
- $ cd barometer/docker/ansible
-
-Edit inventory file
-^^^^^^^^^^^^^^^^^^^
-Edit inventory file and add hosts: $barometer_dir/docker/ansible/default.inv
-
-.. code:: bash
-
- [collectd_hosts]
- localhost
-
- [collectd_hosts:vars]
- install_mcelog=true
- insert_ipmi_modules=true
-
- [influxdb_hosts]
- localhost
-
- [grafana_hosts]
- localhost
-
- [prometheus_hosts]
- #localhost
-
- [zookeeper_hosts]
- #NOTE: currently one zookeeper host is supported
- #hostname
-
- [kafka_hosts]
- #hostname
-
- [ves_hosts]
- #hostname
-
-Change localhost to different hosts where neccessary.
-Hosts for influxdb and grafana are required only for collectd_service.yml.
-Hosts for zookeeper, kafka and ves are required only for collectd_ves.yml.
-
-.. note::
- Zookeeper, Kafka and VES need to be on the same host, there is no
- support for multi node setup.
-
-To change host for kafka edit kafka_ip_addr in ./roles/config_files/vars/main.yml.
-
-Additional plugin dependencies
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-By default ansible will try to fulfill dependencies for mcelog and ipmi plugin.
-For mcelog plugin it installs mcelog daemon. For ipmi it tries to insert ipmi_devintf
-and ipmi_si kernel modules.
-This can be changed in inventory file with use of variables install_mcelog
-and insert_ipmi_modules, both variables are independent:
-
-.. code:: bash
-
- [collectd_hosts:vars]
- install_mcelog=false
- insert_ipmi_modules=false
-
-.. note::
- On Ubuntu 18.04 the deb package for mcelog daemon is not available in official
- Ubuntu repository. In that case ansible scripts will try to download, make and
- install the daemon from mcelog git repository.
-
-Configure ssh keys
-^^^^^^^^^^^^^^^^^^
-
-Generate ssh keys if not present, otherwise move onto next step.
-
-.. code:: bash
-
- $ sudo ssh-keygen
-
-Copy ssh key to all target hosts. It requires to provide root password.
-The example is for localhost.
-
-.. code:: bash
-
- $ sudo -i
- $ ssh-copy-id root@localhost
-
-Verify that key is added and password is not required to connect.
-
-.. code:: bash
-
- $ sudo ssh root@localhost
-
-.. note::
- Keys should be added to every target host and [localhost] is only used as an
- example. For multinode installation keys need to be copied for each node:
- [collectd_hostname], [influxdb_hostname] etc.
-
-Download and run Collectd+Influxdb+Grafana containers
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-The One Click installation features easy and scalable deployment of Collectd,
-Influxdb and Grafana containers using Ansible playbook. The following steps goes
-through more details.
-
-.. code:: bash
-
- $ sudo -H ansible-playbook -i default.inv collectd_service.yml
-
-Check the three containers are running, the output of docker ps should be similar to:
-
-.. code:: bash
-
- $ sudo docker ps
- CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
- a033aeea180d opnfv/barometer-grafana "/run.sh" 9 days ago Up 7 minutes bar-grafana
- 1bca2e4562ab opnfv/barometer-influxdb "/entrypoint.sh in..." 9 days ago Up 7 minutes bar-influxdb
- daeeb68ad1d5 opnfv/barometer-collectd "/run_collectd.sh ..." 9 days ago Up 7 minutes bar-collectd
-
-To make some changes when a container is running run:
-
-.. code:: bash
-
- $ sudo docker exec -ti <CONTAINER ID> /bin/bash
-
-Connect to <host_ip>:3000 with a browser and log into Grafana: admin/admin.
-For short introduction please see the:
-`Grafana guide <http://docs.grafana.org/guides/getting_started/>`_.
-
-The collectd configuration files can be accessed directly on target system in '/opt/collectd/etc/collectd.conf.d'.
-It can be used for manual changes or enable/disable plugins. If configuration has been modified it is required to
-restart collectd:
-
-.. code:: bash
-
- $ sudo docker restart bar-collectd
-
-Download and run collectd+kafka+ves containers
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-.. code:: bash
-
- $ sudo ansible-playbook -i default.inv collectd_ves.yml
-
-Check the containers are running, the output of docker ps should be similar to:
-
-.. code:: bash
-
- $ sudo docker ps
- CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
- 29035be2dab5 zookeeper:3.4.11 "/docker-entrypoint._" 7 minutes ago Up 7 minutes bar-zookeeper
- eb8bba3c0b76 opnfv/barometer-ves "./start_ves_app.s..." 6 minutes ago Up 6 minutes bar-ves
- 86702a96a68c opnfv/barometer-kafka "/src/start_kafka.sh" 6 minutes ago Up 6 minutes bar-kafka
- daeeb68ad1d5 opnfv/barometer-collectd "/run_collectd.sh ..." 6 minutes ago Up 6 minutes bar-collectd
-
-
-To make some changes when a container is running run:
-
-.. code:: bash
-
- $ sudo docker exec -ti <CONTAINER ID> /bin/bash
-
-List of default plugins for collectd container
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-.. note::
- The dpdk plugins dpdkevents and dpdkstat were tested with DPDK v16.11.
-
-By default the collectd is started with default configuration which includes the followin plugins:
- * csv, contextswitch, cpu, cpufreq, df, disk, ethstat, ipc, irq, load, memory, numa, processes,
- swap, turbostat, uuid, uptime, exec, hugepages, intel_pmu, ipmi, write_kafka, logfile, mcelog,
- network, intel_rdt, rrdtool, snmp_agent, syslog, virt, ovs_stats, ovs_events, dpdkevents,
- dpdkstat
-
-Some of the plugins are loaded depending on specific system requirements and can be omitted if
-dependency is not met, this is the case for:
- * hugepages, ipmi, mcelog, intel_rdt, virt, ovs_stats, ovs_events
-
-List and description of tags used in ansible scripts
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Tags can be used to run a specific part of the configuration without running the whole playbook.
-To run a specific parts only:
-
-.. code:: bash
-
- $ sudo ansible-playbook -i default.inv collectd_service.yml --tags "syslog,cpu,uuid"
-
-To disable some parts or plugins:
-
-.. code:: bash
-
- $ sudo ansible-playbook -i default.inv collectd_service.yml --skip-tags "en_default_all,syslog,cpu,uuid"
-
-List of available tags:
-
-install_docker
- Install docker and required dependencies with package manager.
-
-add_docker_proxy
- Configure proxy file for docker service if proxy is set on host environment.
-
-rm_config_dir
- Remove collectd config files.
-
-copy_additional_configs
- Copy additional configuration files to target system. Path to additional configuration
- is stored in $barometer_dir/docker/ansible/roles/config_files/vars/main.yml as additional_configs_path.
-
-en_default_all
- Set of default read plugins: contextswitch, cpu, cpufreq, df, disk, ethstat, ipc, irq,
- load, memory, numa, processes, swap, turbostat, uptime.
-
-plugins tags
- The following tags can be used to enable/disable plugins: csv, contextswitch, cpu,
- cpufreq, df, disk, ethstat, ipc, irq, load, memory, numa, processes, swap, turbostat,
- uptime, exec, hugepages, ipmi, kafka, logfile, mcelogs, network, pmu, rdt, rrdtool,
- snmp, syslog, virt, ovs_stats, ovs_events, uuid, dpdkevents, dpdkstat.
-
Installing Docker
-----------------
.. Describe the specific capabilities and usage for <XYZ> feature.
@@ -418,7 +109,7 @@ Installing Docker
use of Ansible-Playbook.
On Ubuntu
-^^^^^^^^^^
+^^^^^^^^^
.. note::
* sudo permissions are required to install docker.
* These instructions are for Ubuntu 16.10
@@ -464,6 +155,7 @@ Replace <username> above with an appropriate user name.
Retrieving key from https://download.docker.com/linux/centos/gpg
Importing GPG key 0x621E9F35:
+.. ::
Userid : "Docker Release (CE rpm) <docker@docker.com>"
Fingerprint: 060a 61c5 1b55 8a7f 742b 77aa c52f eb6b 621e 9f35
From : https://download.docker.com/linux/centos/gpg
@@ -544,11 +236,11 @@ The output should be something like:
.. code:: bash
- Unable to find image 'hello-world:latest' locally
- latest: Pulling from library/hello-world
- 5b0f327be733: Pull complete
- Digest: sha256:07d5f7800dfe37b8c2196c7b1c524c33808ce2e0f74e7aa00e603295ca9a0972
- Status: Downloaded newer image for hello-world:latest
+ Trying to pull docker.io/library/hello-world...Getting image source signatures
+ Copying blob 0e03bdcc26d7 done
+ Copying config bf756fb1ae done
+ Writing manifest to image destination
+ Storing signatures
Hello from Docker!
This message shows that your installation appears to be working correctly.
@@ -561,11 +253,14 @@ The output should be something like:
4. The Docker daemon streamed that output to the Docker client, which sent it
to your terminal.
-To try something more ambitious, you can run an Ubuntu container with:
+ To try something more ambitious, you can run an Ubuntu container with:
+ $ docker run -it ubuntu bash
-.. code:: bash
+ Share images, automate workflows, and more with a free Docker ID:
+ https://hub.docker.com/
- $ docker run -it ubuntu bash
+ For more examples and ideas, visit:
+ https://docs.docker.com/get-started/
Build and Run Collectd Docker Image
-----------------------------------
@@ -575,22 +270,23 @@ Collectd-barometer flavors
Before starting to build and run the Collectd container, understand the available
flavors of Collectd containers:
- * barometer-collectd - stable release, based on collectd 5.8
- * barometer-collectd-master - release based on collectd 'master' branch
- * barometer-collectd-experimental - release based on collectd 'master'
- branch that also includes set of experimental(not yet merged into upstream)
- pull requests
+
+* barometer-collectd - stable release, based on collectd 5.12
+* barometer-collectd-latest - release based on collectd 'main' branch
+* barometer-collectd-experimental - release based on collectd 'main'
+ branch that can also include a set of experimental (not yet merged into
+ upstream) pull requests
.. note::
Experimental container is not tested across various OS'es and the stability
of the container can change. Usage of experimental flavor is at users risk.
-Stable barometer-collectd container is intended for work in production
+Stable `barometer-collectd` container is intended for work in production
environment as it is based on latest collectd official release.
-`Barometer-collectd-master` and `barometer-collectd-experimental` containers
+`barometer-collectd-latest` and `barometer-collectd-experimental` containers
can be used in order to try new collectd features.
-All flavors are located in `barometer` git repository - respective dockerfiles
-are stored in subdirectories of 'docker/' directory
+All flavors are located in `barometer` git repository - respective Dockerfiles
+are stored in subdirectories of `docker/` directory
.. code:: bash
@@ -598,7 +294,7 @@ are stored in subdirectories of 'docker/' directory
$ git clone https://gerrit.opnfv.org/gerrit/barometer
$ ls barometer/docker|grep collectd
barometer-collectd
- barometer-collectd-master
+ barometer-collectd-latest
barometer-collectd-experimental
.. note::
@@ -609,11 +305,11 @@ are stored in subdirectories of 'docker/' directory
Download the collectd docker image
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If you wish to use a pre-built barometer image, you can pull the barometer
-image from https://hub.docker.com/r/opnfv/barometer-collectd/
+image from `dockerhub <https://hub.docker.com/r/anuket/barometer-collectd/>`_
.. code:: bash
- $ docker pull opnfv/barometer-collectd
+ $ docker pull anuket/barometer-collectd
Build stable collectd container
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -621,7 +317,7 @@ Build stable collectd container
.. code:: bash
$ cd <BAROMETER_REPO_DIR>/docker/barometer-collectd
- $ sudo docker build -t opnfv/barometer-collectd --build-arg http_proxy=`echo $http_proxy` \
+ $ sudo docker build -t anuket/barometer-collectd --build-arg http_proxy=`echo $http_proxy` \
--build-arg https_proxy=`echo $https_proxy` --network=host -f Dockerfile .
.. note::
@@ -634,53 +330,99 @@ Check the docker images:
$ sudo docker images
-Output should contain a barometer-collectd image:
+Output should contain a ``barometer-collectd`` image:
.. code::
REPOSITORY TAG IMAGE ID CREATED SIZE
- opnfv/barometer-collectd latest 05f2a3edd96b 3 hours ago 1.2GB
+ anuket/barometer-collectd latest 39f5e0972178 2 months ago 1.28GB
centos 7 196e0ce0c9fb 4 weeks ago 197MB
centos latest 196e0ce0c9fb 4 weeks ago 197MB
hello-world latest 05a3bd381fc2 4 weeks ago 1.84kB
.. note::
- If you do not plan to use collectd-master and collectd-experimental barometer
- containers, then you can proceed directly to section `Run the collectd stable docker image`_
+ If you do not plan to use `barometer-collectd-latest` and
+ `barometer-collectd-experimental` containers, then you can proceed directly
+ to section `Run the collectd stable docker image`_
-Build collectd-master container
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Build barometer-collectd-latest container
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. code:: bash
$ cd <BAROMETER_REPO_DIR>
- $ sudo docker build -t opnfv/barometer-collectd-master \
+ $ sudo docker build -t anuket/barometer-collectd-latest \
--build-arg http_proxy=`echo $http_proxy` \
--build-arg https_proxy=`echo $https_proxy` --network=host -f \
- docker/barometer-collectd-master/Dockerfile .
+ docker/barometer-collectd-latest/Dockerfile .
.. note::
- For `barometer-collectd-master` and `barometer-collectd-experimental` containers
+ For `barometer-collectd-latest` and `barometer-collectd-experimental` containers
proxy parameters should be passed only if system is behind an HTTP or HTTPS
proxy server (same as for stable collectd container)
-Build collectd-experimental container
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Build barometer-collectd-experimental container
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The barometer-collectd-experimental container use the ``main`` branch of
+collectd, but allows the user to apply a number of pull requests, which are
+passed via the COLLECTD_PULL_REQUESTS build arg, which is passed to docker as
+shown in the example below.
+COLLECTD_PULL_REQUESTS should be a comma-delimited string of pull request IDs.
.. code:: bash
$ cd <BAROMETER_REPO_DIR>
- $ sudo docker build -t opnfv/barometer-collectd-experimental \
+ $ sudo docker build -t anuket/barometer-collectd-experimental \
--build-arg http_proxy=`echo $http_proxy` \
--build-arg https_proxy=`echo $https_proxy` \
+ --build-arg COLLECTD_PULL_REQUESTS=1234,5678 \
--network=host -f docker/barometer-collectd-experimental/Dockerfile .
.. note::
- For `barometer-collectd-master` and `barometer-collectd-experimental` containers
+ For `barometer-collectd-latest` and `barometer-collectd-experimental` containers
proxy parameters should be passed only if system is behind an HTTP or HTTPS
proxy server (same as for stable collectd container)
+Build collectd-6
+^^^^^^^^^^^^^^^^
+
+The barometer-collectd-experimental Dockerfile can be used to build
+collectd-6.0, which is currently under development. In order to do this, the
+``COLLECTD_FLAVOR`` build arg can be passed to the docker build command.
+The optional ``COLLECTD_PULL_REQUESTS`` arg can be passed as well, to test
+proposed patches to collectd.
+
+.. code:: bash
+
+ $ cd <BAROMETER_REPO_DIR>
+ $ sudo docker build -t anuket/barometer-collectd-6 \
+ --build-arg COLLECTD_FLAVOR=collectd-6 \
+ --build-arg COLLECTD_PULL_REQUESTS=1234,5678 \
+ --network=host -f docker/barometer-collectd-experimental/Dockerfile .
+
+The instructions for running the collectd-6 container are the same as for the
+collectd-experimental container.
+
+There are a few useful build args that can be used to further customise the
+collectd-6 build:
+
+* **COLLECTD_CONFIG_CMD_ARGS**
+ For testing with new plugins for collectd-6, as un-ported plugins are
+ disabled by default.
+ This new option lets the ./configure command be run with extra args,
+ e.g. --enable-cpu --enable-<my-newly-ported-plugin>, which means that
+ plugin can be enabled for the PR that is being tested.
+
+* **COLLECTD_TAG**
+ This overrides the default tag selected by the flavors, and allows checking
+ out out an arbitrary branch (e.g. PR branch instead of using the
+ ``COLLECTD_PULL_REQUESTS`` arg, which rebases each PR on top of the
+ nominal branch.
+ To check out a PR, use the following args with the docker build command:
+ ``--build-arg COLLECTD_TAG=pull/<PR_ID>/head``
+
Run the collectd stable docker image
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. code:: bash
@@ -688,7 +430,8 @@ Run the collectd stable docker image
$ cd <BAROMETER_REPO_DIR>
$ sudo docker run -ti --net=host -v \
`pwd`/src/collectd/collectd_sample_configs:/opt/collectd/etc/collectd.conf.d \
- -v /var/run:/var/run -v /tmp:/tmp --privileged opnfv/barometer-collectd
+ -v /var/run:/var/run -v /tmp:/tmp -v /sys/fs/resctrl:/sys/fs/resctrl \
+ --privileged anuket/barometer-collectd
.. note::
The docker collectd image contains configuration for all the collectd
@@ -707,15 +450,20 @@ Run the collectd stable docker image
files should be removed from shared configuration directory
(`<BAROMETER_REPO_DIR>/src/collectd/collectd_sample_configs/`) prior
to starting barometer-collectd container. By example: in case of missing
- `DPDK` functionality on the host, `dpdkstat.conf` and `dpdkevents.conf`
- should be removed.
+ `DPDK` functionality on the host, `dpdk_telemetry.conf` should be removed.
Sample configurations can be found at:
https://github.com/opnfv/barometer/tree/master/src/collectd/collectd_sample_configs
List of barometer-collectd dependencies on host for various plugins
can be found at:
- https://wiki.opnfv.org/display/fastpath/Barometer-collectd+host+dependencies
+ https://wiki.anuket.io/display/HOME/Barometer-collectd+host+dependencies
+
+ The Resource Control file system (/sys/fs/resctrl) can be bound from host to
+ container only if this directory exists on the host system. Otherwise omit
+ the '-v /sys/fs/resctrl:/sys/fs/resctrl' part in docker run command.
+ More information about resctrl can be found at:
+ https://github.com/intel/intel-cmt-cat/wiki/resctrl
Check your docker image is running
@@ -729,56 +477,70 @@ To make some changes when the container is running run:
sudo docker exec -ti <CONTAINER ID> /bin/bash
-Run the barometer-collectd-master docker image
+Run the barometer-collectd-latest docker image
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Run command for `barometer-collectd-master` container is very similar to command
-used for stable container - the only differences are name of the image
-and location of the sample configuration files(as different version of collectd
-plugins requiring different configuration files)
+Run command for ``barometer-collectd-latest`` container is very similar to
+command used for stable container - the only differences are name of the image
+and location of the sample configuration files (as different version of
+collectd plugins requiring different configuration files)
.. code:: bash
$ cd <BAROMETER_REPO_DIR>
$ sudo docker run -ti --net=host -v \
- `pwd`/src/collectd/collectd_sample_configs-master:/opt/collectd/etc/collectd.conf.d \
- -v /var/run:/var/run -v /tmp:/tmp --privileged opnfv/barometer-collectd-master
+ `pwd`/src/collectd/collectd_sample_configs-latest:/opt/collectd/etc/collectd.conf.d \
+ -v /var/run:/var/run -v /tmp:/tmp -v /sys/fs/resctrl:/sys/fs/resctrl \
+ --privileged anuket/barometer-collectd-latest
.. note::
Barometer collectd docker images are sharing some directories with host
(e.g. /tmp) therefore only one of collectd barometer flavors can be run
- at a time. In other words, if you want to try `barometer-collectd-master` or
+ at a time. In other words, if you want to try `barometer-collectd-latest` or
`barometer-collectd-experimental` image, please stop instance of
`barometer-collectd(stable)` image first.
+ The Resource Control file system (/sys/fs/resctrl) can be bound from host to
+ container only if this directory exists on the host system. Otherwise omit
+ the '-v /sys/fs/resctrl:/sys/fs/resctrl' part in docker run command.
+ More information about resctrl can be found at:
+ https://github.com/intel/intel-cmt-cat/wiki/resctrl
+
Run the barometer-collectd-experimental docker image
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Barometer-collectd-experimental container shares default configuration files
-with 'barometer-collectd-master' equivalent but some of experimental pull
+with 'barometer-collectd-latest' equivalent but some of experimental pull
requests may require modified configuration. Additional configuration files that
are required specifically by experimental container can be found in
`docker/barometer-collectd-experimental/experimental-configs/`
directory. Content of this directory (all \*.conf files) should be copied to
-`src/collectd/collectd_sample_configs-master` directory before first run of
+``src/collectd/collectd_sample_configs-latest`` directory before first run of
experimental container.
.. code:: bash
$ cd <BAROMETER_REPO_DIR>
$ cp docker/barometer-collectd-experimental/experimental-configs/*.conf \
- src/collectd/collectd_sample_configs-master
+ src/collectd/collectd_sample_configs-latest
When configuration files are up to date for experimental container, it can be
-launched using following command (almost identical to run-command for 'master'
+launched using following command (almost identical to run-command for ``latest``
collectd container)
.. code:: bash
$ cd <BAROMETER_REPO_DIR>
$ sudo docker run -ti --net=host -v \
- `pwd`/src/collectd/collectd_sample_configs-master:/opt/collectd/etc/collectd.conf.d \
- -v /var/run:/var/run -v /tmp:/tmp --privileged \
- opnfv/barometer-collectd-experimental
+ `pwd`/src/collectd/collectd_sample_configs-latest:/opt/collectd/etc/collectd.conf.d \
+ -v /var/run:/var/run -v /tmp:/tmp -v /sys/fs/resctrl:/sys/fs/resctrl --privileged \
+ anuket/barometer-collectd-experimental
+
+.. note::
+ The Resource Control file system (/sys/fs/resctrl) can be bound from host to
+ container only if this directory exists on the host system. Otherwise omit
+ the '-v /sys/fs/resctrl:/sys/fs/resctrl' part in docker run command.
+ More information about resctrl can be found at:
+ https://github.com/intel/intel-cmt-cat/wiki/resctrl
Build and Run InfluxDB and Grafana docker images
@@ -809,7 +571,7 @@ volume folder been mounted. Appropriate example are given in section `Run the Gr
Download the InfluxDB and Grafana docker images
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If you wish to use pre-built barometer project's influxdb and grafana images, you can pull the
-images from https://hub.docker.com/r/opnfv/barometer-influxdb/ and https://hub.docker.com/r/opnfv/barometer-grafana/
+images from https://hub.docker.com/r/anuket/barometer-influxdb/ and https://hub.docker.com/r/anuket/barometer-grafana/
.. note::
If your preference is to build images locally please see sections `Build InfluxDB Docker Image`_ and
@@ -817,8 +579,8 @@ images from https://hub.docker.com/r/opnfv/barometer-influxdb/ and https://hub.d
.. code:: bash
- $ docker pull opnfv/barometer-influxdb
- $ docker pull opnfv/barometer-grafana
+ $ docker pull anuket/barometer-influxdb
+ $ docker pull anuket/barometer-grafana
.. note::
If you have pulled the pre-built barometer-influxdb and barometer-grafana images there is no
@@ -834,7 +596,7 @@ Build influxdb image from Dockerfile
.. code:: bash
$ cd barometer/docker/barometer-influxdb
- $ sudo docker build -t opnfv/barometer-influxdb --build-arg http_proxy=`echo $http_proxy` \
+ $ sudo docker build -t anuket/barometer-influxdb --build-arg http_proxy=`echo $http_proxy` \
--build-arg https_proxy=`echo $https_proxy` --network=host -f Dockerfile .
.. note::
@@ -852,7 +614,7 @@ Output should contain an influxdb image:
.. code::
REPOSITORY TAG IMAGE ID CREATED SIZE
- opnfv/barometer-influxdb latest 1e4623a59fe5 3 days ago 191MB
+ anuket/barometer-influxdb latest c5a09a117067 2 months ago 191MB
Build Grafana docker image
^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -862,7 +624,7 @@ Build Grafana image from Dockerfile
.. code:: bash
$ cd barometer/docker/barometer-grafana
- $ sudo docker build -t opnfv/barometer-grafana --build-arg http_proxy=`echo $http_proxy` \
+ $ sudo docker build -t anuket/barometer-grafana --build-arg http_proxy=`echo $http_proxy` \
--build-arg https_proxy=`echo $https_proxy` -f Dockerfile .
.. note::
@@ -880,17 +642,17 @@ Output should contain an influxdb image:
.. code::
REPOSITORY TAG IMAGE ID CREATED SIZE
- opnfv/barometer-grafana latest 05f2a3edd96b 3 hours ago 1.2GB
+ anuket/barometer-grafana latest 3724ab87f0b1 2 months ago 284MB
Run the Influxdb and Grafana Images
-----------------------------------
Run the InfluxDB docker image
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. code:: bash
$ sudo docker run -tid -v /var/lib/influxdb:/var/lib/influxdb --net=host\
- --name bar-influxdb opnfv/barometer-influxdb
+ --name bar-influxdb anuket/barometer-influxdb
Check your docker image is running
@@ -951,7 +713,7 @@ changing output location is required:
$ cd <BAROMETER_REPO_DIR>
$ sudo docker run -ti --name bar-collectd --net=host -v \
`pwd`/src/collectd/collectd_sample_configs:/opt/collectd/etc/collectd.conf.d \
- -v /var/run:/var/run -v /tmp:/tmp --privileged opnfv/barometer-collectd
+ -v /var/run:/var/run -v /tmp:/tmp --privileged anuket/barometer-collectd
Now collectd container will be sending data to InfluxDB container located on
remote Host pointed by IP configured in step 3.
@@ -966,7 +728,7 @@ Connecting to an influxdb instance running on local system and adding own custom
$ cd <BAROMETER_REPO_DIR>
$ sudo docker run -tid -v /var/lib/grafana:/var/lib/grafana \
-v ${PWD}/docker/barometer-grafana/dashboards:/opt/grafana/dashboards \
- --name bar-grafana --net=host opnfv/barometer-grafana
+ --name bar-grafana --net=host anuket/barometer-grafana
Connecting to an influxdb instance running on remote system with hostname of someserver and IP address
of 192.168.121.111
@@ -975,7 +737,7 @@ of 192.168.121.111
$ sudo docker run -tid -v /var/lib/grafana:/var/lib/grafana --net=host -e \
influxdb_host=someserver --add-host someserver:192.168.121.111 --name \
- bar-grafana opnfv/barometer-grafana
+ bar-grafana anuket/barometer-grafana
Check your docker image is running
@@ -1017,22 +779,22 @@ folowing actions have to be performed
on shared folders to not affect new setup with an old configuration.
Build and Run VES and Kafka Docker Images
-------------------------------------------
+-----------------------------------------
Download VES and Kafka docker images
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If you wish to use pre-built barometer project's VES and kafka images, you can pull the
-images from https://hub.docker.com/r/opnfv/barometer-ves/ and https://hub.docker.com/r/opnfv/barometer-kafka/
+images from https://hub.docker.com/r/anuket/barometer-ves/ and https://hub.docker.com/r/anuket/barometer-kafka/
.. note::
- If your preference is to build images locally please see sections `Build the Kafka Image`_ and
- `Build VES Image`_
+ If your preference is to build images locally please see sections `Build Kafka Docker Image`_ and
+ `Build VES Docker Image`_
.. code:: bash
- $ docker pull opnfv/barometer-kafka
- $ docker pull opnfv/barometer-ves
+ $ docker pull anuket/barometer-kafka
+ $ docker pull anuket/barometer-ves
.. note::
If you have pulled the pre-built images there is no requirement to complete steps outlined
@@ -1047,7 +809,7 @@ Build Kafka docker image:
.. code:: bash
$ cd barometer/docker/barometer-kafka
- $ sudo docker build -t opnfv/barometer-kafka --build-arg http_proxy=`echo $http_proxy` \
+ $ sudo docker build -t anuket/barometer-kafka --build-arg http_proxy=`echo $http_proxy` \
--build-arg https_proxy=`echo $https_proxy` -f Dockerfile .
.. note::
@@ -1065,7 +827,7 @@ Output should contain a barometer image:
.. code::
REPOSITORY TAG IMAGE ID CREATED SIZE
- opnfv/barometer-kafka latest 05f2a3edd96b 3 hours ago 1.2GB
+ anuket/barometer-kafka latest 75a0860b8d6e 2 months ago 902MB
Build VES docker image
^^^^^^^^^^^^^^^^^^^^^^
@@ -1075,7 +837,7 @@ Build VES application docker image:
.. code:: bash
$ cd barometer/docker/barometer-ves
- $ sudo docker build -t opnfv/barometer-ves --build-arg http_proxy=`echo $http_proxy` \
+ $ sudo docker build -t anuket/barometer-ves --build-arg http_proxy=`echo $http_proxy` \
--build-arg https_proxy=`echo $https_proxy` -f Dockerfile .
.. note::
@@ -1093,7 +855,7 @@ Output should contain a barometer image:
.. code::
REPOSITORY TAG IMAGE ID CREATED SIZE
- opnfv/barometer-ves latest 05f2a3edd96b 3 hours ago 1.2GB
+ anuket/barometer-ves latest 36a4a953e1b4 2 months ago 723MB
Run Kafka docker image
^^^^^^^^^^^^^^^^^^^^^^
@@ -1118,7 +880,7 @@ Run kafka docker image which connects with a zookeeper instance running on same
.. code:: bash
- $ sudo docker run -tid --net=host -p 9092:9092 opnfv/barometer-kafka
+ $ sudo docker run -tid --net=host -p 9092:9092 anuket/barometer-kafka
Run kafka docker image which connects with a zookeeper instance running on a node with IP address of
@@ -1127,7 +889,7 @@ Run kafka docker image which connects with a zookeeper instance running on a nod
.. code:: bash
$ sudo docker run -tid --net=host -p 9092:9092 --env broker_id=1 --env zookeeper_node=zookeeper --add-host \
- zookeeper:192.168.121.111 opnfv/barometer-kafka
+ zookeeper:192.168.121.111 anuket/barometer-kafka
Run VES Application docker image
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -1144,13 +906,13 @@ Run VES docker image with default configuration
.. code:: bash
- $ sudo docker run -tid --net=host opnfv/barometer-ves
+ $ sudo docker run -tid --net=host anuket/barometer-ves
Run VES docker image with guest.yaml files from barometer/3rd_party/collectd-ves-app/ves_app/yaml/
.. code:: bash
- $ sudo docker run -tid --net=host opnfv/barometer-ves guest.yaml
+ $ sudo docker run -tid --net=host anuket/barometer-ves guest.yaml
Run VES docker image with using custom config and yaml files. In example below yaml/ folder cotains
@@ -1159,7 +921,7 @@ file named custom.yaml
.. code:: bash
$ sudo docker run -tid --net=host -v ${PWD}/custom.config:/opt/ves/config/ves_app_config.conf \
- -v ${PWD}/yaml/:/opt/ves/yaml/ opnfv/barometer-ves custom.yaml
+ -v ${PWD}/yaml/:/opt/ves/yaml/ anuket/barometer-ves custom.yaml
Run VES Test Collector application
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -1169,10 +931,10 @@ wide metrics that are collected by barometer-ves container.
Setup instructions are located in: :ref:`Setup VES Test Collector`
Build and Run DMA and Redis Docker Images
------------------------------------------------------
+-----------------------------------------
Download DMA docker images
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+^^^^^^^^^^^^^^^^^^^^^^^^^^
If you wish to use pre-built barometer project's DMA images, you can pull the
images from https://hub.docker.com/r/opnfv/barometer-dma/
@@ -1190,7 +952,7 @@ images from https://hub.docker.com/r/opnfv/barometer-dma/
`Run DMA Docker Image`_
Build DMA docker image
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+^^^^^^^^^^^^^^^^^^^^^^
Build DMA docker image:
@@ -1218,7 +980,7 @@ Output should contain a barometer image:
opnfv/barometer-dma latest 2f14fbdbd498 3 hours ago 941 MB
Run Redis docker image
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+^^^^^^^^^^^^^^^^^^^^^^
.. note::
Before running DMA, Redis must be running.
@@ -1236,10 +998,10 @@ Check your docker image is running
sudo docker ps
Run DMA docker image
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+^^^^^^^^^^^^^^^^^^^^
.. note::
-Run DMA docker image with default configuration
+ Run DMA docker image with default configuration
.. code:: bash
@@ -1275,9 +1037,9 @@ Run DMA docker image with default configuration
$ sudo ln -s ${PWD}/threshold /usr/local/bin/
References
-^^^^^^^^^^^
-.. [1] https://docs.docker.com/engine/admin/systemd/#httphttps-proxy
-.. [2] https://docs.docker.com/engine/installation/linux/docker-ce/centos/#install-using-the-repository
+^^^^^^^^^^
+.. [1] https://docs.docker.com/config/daemon/systemd/#httphttps-proxy
+.. [2] https://docs.docker.com/engine/install/centos/#install-using-the-repository
.. [3] https://docs.docker.com/engine/userguide/
diff --git a/docs/release/userguide/installguide.oneclick.rst b/docs/release/userguide/installguide.oneclick.rst
new file mode 100644
index 00000000..78203a12
--- /dev/null
+++ b/docs/release/userguide/installguide.oneclick.rst
@@ -0,0 +1,410 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) Anuket and others
+.. _barometer-oneclick-userguide:
+
+========================================
+Anuket Barometer One Click Install Guide
+========================================
+
+.. contents::
+ :depth: 3
+ :local:
+
+The intention of this user guide is to outline how to use the ansible
+playbooks for a one click installation of Barometer. A more in-depth
+installation guide is available with the
+:ref:`Docker user guide <barometer-docker-userguide>`.
+
+
+One Click Install with Ansible
+------------------------------
+
+
+Proxy for package manager on host
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+.. note::
+ This step has to be performed only if host is behind HTTP/HTTPS proxy
+
+Proxy URL have to be set in dedicated config file
+
+1. CentOS - ``/etc/yum.conf``
+
+.. code:: bash
+
+ proxy=http://your.proxy.domain:1234
+
+2. Ubuntu - ``/etc/apt/apt.conf``
+
+.. code:: bash
+
+ Acquire::http::Proxy "http://your.proxy.domain:1234"
+
+After update of config file, apt mirrors have to be updaited via
+``apt-get update``
+
+.. code:: bash
+
+ $ sudo apt-get update
+
+Proxy environment variables (for docker and pip)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+.. note::
+ This step has to be performed only if host is behind HTTP/HTTPS proxy
+
+Configuring proxy for packaging system is not enough, also some proxy
+environment variables have to be set in the system before ansible scripts
+can be started.
+Barometer configures docker proxy automatically via ansible task as a part
+of *one click install* process - user only has to provide proxy URL using common
+shell environment variables and ansible will automatically configure proxies
+for docker(to be able to fetch barometer images). Another component used by
+ansible (e.g. pip is used for downloading python dependencies) will also benefit
+from setting proxy variables properly in the system.
+
+Proxy variables used by ansible One Click Install:
+ * ``http_proxy``
+ * ``https_proxy``
+ * ``ftp_proxy``
+ * ``no_proxy``
+
+Variables mentioned above have to be visible for superuser (because most
+actions involving ``ansible-barometer`` installation require root privileges).
+Proxy variables are commonly defined in ``/etc/environment`` file (but any other
+place is good as long as variables can be seen by commands using ``su``).
+
+Sample proxy configuration in ``/etc/environment``:
+
+.. code:: bash
+
+ http_proxy=http://your.proxy.domain:1234
+ https_proxy=http://your.proxy.domain:1234
+ ftp_proxy=http://your.proxy.domain:1234
+ no_proxy=localhost
+
+Install Ansible
+^^^^^^^^^^^^^^^
+.. note::
+ * sudo permissions or root access are required to install ansible.
+ * ansible version needs to be 2.4+, because usage of import/include statements
+
+The following steps have been verified with Ansible 2.6.3 on Ubuntu 16.04 and 18.04.
+To install Ansible 2.6.3 on Ubuntu:
+
+.. code:: bash
+
+ $ sudo apt-get install python
+ $ sudo apt-get install python-pip
+ $ sudo -H pip install 'ansible==2.6.3'
+ $ sudo apt-get install git
+
+The following steps have been verified with Ansible 2.6.3 on Centos 7.5.
+To install Ansible 2.6.3 on Centos:
+
+.. code:: bash
+
+ $ sudo yum install python
+ $ sudo yum install epel-release
+ $ sudo yum install python-pip
+ $ sudo -H pip install 'ansible==2.6.3'
+ $ sudo yum install git
+
+.. note::
+ When using multi-node-setup, please make sure that ``python`` package is
+ installed on all of the target nodes (ansible during 'Gathering facts'
+ phase is using ``python2`` and it may not be installed by default on some
+ distributions - e.g. on Ubuntu 16.04 it has to be installed manually)
+
+Clone barometer repo
+^^^^^^^^^^^^^^^^^^^^
+
+.. code:: bash
+
+ $ git clone https://gerrit.opnfv.org/gerrit/barometer
+ $ cd barometer
+
+Install ansible dependencies
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+To run the ansible playbooks for the one-click install, additional dependencies are needed.
+There are listed in requirements.yml and can be installed using::
+
+ $ ansible-galaxy install -r $barometer_dir/requirements.yml
+
+
+Edit inventory file
+^^^^^^^^^^^^^^^^^^^
+Edit inventory file and add hosts:
+``$barometer_dir/docker/ansible/default.inv``
+
+.. code:: bash
+
+ [collectd_hosts]
+ localhost
+
+ [collectd_hosts:vars]
+ install_mcelog=true
+ insert_ipmi_modules=true
+ #to use master or experimental container set the collectd flavor below
+ #possible values: stable|master|experimental
+ flavor=stable
+
+ [influxdb_hosts]
+ #hostname or ip must be used.
+ #using localhost will cause issues with collectd network plugin.
+ #hostname
+
+ [grafana_hosts]
+ #NOTE: As per current support, Grafana and Influxdb should be same host.
+ #hostname
+
+ [prometheus_hosts]
+ #localhost
+
+ [zookeeper_hosts]
+ #NOTE: currently one zookeeper host is supported
+ #hostname
+
+ [kafka_hosts]
+ #hostname
+
+ [ves_hosts]
+ #hostname
+
+Change localhost to different hosts where neccessary.
+Hosts for influxdb and grafana are required only for ``collectd_service.yml``.
+Hosts for zookeeper, kafka and ves are required only for ``collectd_ves.yml``.
+
+.. note::
+ Zookeeper, Kafka and VES need to be on the same host, there is no
+ support for multi node setup.
+
+To change host for kafka edit ``kafka_ip_addr`` in
+``./roles/config_files/vars/main.yml``.
+
+Additional plugin dependencies
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+By default ansible will try to fulfill dependencies for ``mcelog`` and
+``ipmi`` plugin. For ``mcelog`` plugin it installs mcelog daemon. For ipmi it
+tries to insert ``ipmi_devintf`` and ``ipmi_si`` kernel modules.
+This can be changed in inventory file with use of variables ``install_mcelog``
+and ``insert_ipmi_modules``, both variables are independent:
+
+.. code:: bash
+
+ [collectd_hosts:vars]
+ install_mcelog=false
+ insert_ipmi_modules=false
+
+.. note::
+ On Ubuntu 18.04 the deb package for mcelog daemon is not available in official
+ Ubuntu repository. In that case ansible scripts will try to download, make and
+ install the daemon from mcelog git repository.
+
+Configure ssh keys
+^^^^^^^^^^^^^^^^^^
+
+Generate ssh keys if not present, otherwise move onto next step.
+ssh keys are required for Ansible to connect the host you use for Barometer Installation.
+
+.. code:: bash
+
+ $ sudo ssh-keygen
+
+Copy ssh key to all target hosts. It requires to provide root password.
+The example is for ``localhost``.
+
+.. code:: bash
+
+ $ sudo -i
+ $ ssh-copy-id root@localhost
+
+Verify that key is added and password is not required to connect.
+
+.. code:: bash
+
+ $ sudo ssh root@localhost
+
+.. note::
+ Keys should be added to every target host and [localhost] is only used as an
+ example. For multinode installation keys need to be copied for each node:
+ [collectd_hostname], [influxdb_hostname] etc.
+
+Build the Collectd containers
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+This is an optional step, if you do not wish to build the containers locally, please continue to `Download and run Collectd+Influxdb+Grafana containers`_.
+This step will build the container images locally, allowing for testing of new changes to collectd.
+This is particularly useful for the ``experimental`` flavour for testing PRs, and for building a ``collectd-6`` container.
+
+To run the playbook and build the containers, run::
+ sudo ansible-playbook docker/ansible/collectd_build.yml
+
+By default, all contaienrs will be built.
+Since this can take a while, it is recommended that you choose a flavor to build using tags::
+
+ sudo ansible-playbook docker/ansible/collectd_build.yml --tags='collectd-6,latest'
+
+The available tags are:
+
+* *stable* builds the ``barometer-collectd`` image
+* *latest* builds the ``barometer-collectd-latest`` image
+* *experimental* builds the ``barometer-collectd-experimental`` container, with optional PRs
+* *collectd-6* builds the ``baromter-collectd-6`` container, with optional PR(s)
+
+* *flask_test* builds a small webapp that displays the metrics sent via the write_http plugin
+
+.. note::
+ The flask_test tag must be explicitly enabled.
+ This can be done either through the ``--tags='flask_test'`` (to build just
+ this container) or with ``--tags=all`` to build this and all the other
+ containers as well.
+
+Download and run Collectd+Influxdb+Grafana containers
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The One Click installation features easy and scalable deployment of Collectd,
+Influxdb and Grafana containers using Ansible playbook. The following steps goes
+through more details.
+
+.. code:: bash
+
+ $ sudo -H ansible-playbook -i default.inv collectd_service.yml
+
+Check the three containers are running, the output of ``docker ps`` should be similar to:
+
+.. code:: bash
+
+ $ sudo docker ps
+ CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
+ 4c2143fb6bbd anuket/barometer-grafana "/run.sh" 59 minutes ago Up 4 minutes bar-grafana
+ 5e356cb1cb04 anuket/barometer-influxdb "/entrypoint.sh infl…" 59 minutes ago Up 4 minutes bar-influxdb
+ 2ddac8db21e2 anuket/barometer-collectd "/run_collectd.sh" About an hour ago Up 4 minutes bar-collectd
+
+To make some changes when a container is running run:
+
+.. code:: bash
+
+ $ sudo docker exec -ti <CONTAINER ID> /bin/bash
+
+Connect to ``<host_ip>:3000`` with a browser and log into Grafana: admin/admin.
+For short introduction please see the:
+`Grafana guide <https://grafana.com/docs/grafana/latest/guides/getting_started/>`_.
+
+The collectd configuration files can be accessed directly on target system in
+``/opt/collectd/etc/collectd.conf.d``. It can be used for manual changes or
+enable/disable plugins. If configuration has been modified it is required to
+restart collectd:
+
+.. code:: bash
+
+ $ sudo docker restart bar-collectd
+
+Download and run collectd+kafka+ves containers
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. code:: bash
+
+ $ sudo ansible-playbook -i default.inv collectd_ves.yml
+
+Check the containers are running, the output of ``docker ps`` should be similar to:
+
+.. code:: bash
+
+ $ sudo docker ps
+ CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
+ d041d8fff849 zookeeper:3.4.11 "/docker-entrypoint.…" 2 minutes ago Up 2 minutes bar-zookeeper
+ da67b81274bc anuket/barometer-ves "./start_ves_app.sh …" 2 minutes ago Up 2 minutes bar-ves
+ 2c25e0c79f93 anuket/barometer-kafka "/src/start_kafka.sh" 2 minutes ago Up 2 minutes bar-kafka
+ b161260c90ed anuket/barometer-collectd "/run_collectd.sh" 2 minutes ago Up 2 minutes bar-collectd
+
+
+To make some changes when a container is running run:
+
+.. code:: bash
+
+ $ sudo docker exec -ti <CONTAINER ID> /bin/bash
+
+List of default plugins for collectd container
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+.. note::
+ From Jerma release, the supported dpdk version is 19.11
+
+ If you would like to use v18.11, make the following changes:
+
+ 1. Update the dpdk version to v18.11 in ``<barometer>/src/package-list.mk``
+ 2. Replace all ``common_linux`` string with ``common_linuxapp`` in ``<barometer>/src/dpdk/Makefile``
+
+ If you would like to downgrade to a version lower than v18.11, make the following changes:
+
+ 1. Update the dpdk version to a version lower than v18.11 (e.g.:- v16.11) in ``<barometer>/src/package-list.mk``
+ 2. Replace all ``common_linux`` string with ``common_linuxapp`` in ``<barometer>/src/dpdk/Makefile``
+ 3. Change the Makefile path from ``(WORKDIR)/kernel/linux/kni/Makefile`` to ``(WORKDIR)/lib/librte_eal/linuxapp/kni/Makefile`` in ``(WORK_DIR)/src/dpdk/Makefile``.
+
+By default the collectd is started with default configuration which includes
+the following plugins:
+
+* ``csv``, ``contextswitch``, ``cpu``, ``cpufreq``, ``df``, ``disk``,
+ ``ethstat``, ``ipc``, ``irq``, ``load``, ``memory``, ``numa``,
+ ``processes``, ``swap``, ``turbostat``, ``uuid``, ``uptime``, ``exec``,
+ ``hugepages``, ``intel_pmu``, ``ipmi``, ``write_kafka``, ``logfile``,
+ ``logparser``, ``mcelog``, ``network``, ``intel_rdt``, ``rrdtool``,
+ ``snmp_agent``, ``syslog``, ``virt``, ``ovs_stats``, ``ovs_events``,
+ ``dpdk_telemetry``.
+
+.. note::
+ Some of the plugins are loaded depending on specific system requirements and can be omitted if
+ dependency is not met, this is the case for:
+
+ * ``hugepages``, ``ipmi``, ``mcelog``, ``intel_rdt``, ``virt``, ``ovs_stats``, ``ovs_events``
+
+ For instructions on how to disable certain plugins see the `List and description of tags used in ansible scripts`_ section.
+
+List and description of tags used in ansible scripts
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Tags can be used to run a specific part of the configuration without running
+the whole playbook. To run a specific parts only:
+
+.. code:: bash
+
+ $ sudo ansible-playbook -i default.inv collectd_service.yml --tags "syslog,cpu,uuid"
+
+To disable some parts or plugins:
+
+.. code:: bash
+
+ $ sudo ansible-playbook -i default.inv collectd_service.yml --skip-tags "en_default_all,syslog,cpu,uuid"
+
+List of available tags:
+
+``install_docker``
+ Install docker and required dependencies with package manager.
+
+``add_docker_proxy``
+ Configure proxy file for docker service if proxy is set on host environment.
+
+``rm_config_dir``
+ Remove collectd config files.
+
+``copy_additional_configs``
+ Copy additional configuration files to target system. Path to additional
+ configuration is stored in
+ ``$barometer_dir/docker/ansible/roles/config_files/docs/main.yml`` as
+ ``additional_configs_path``.
+
+``en_default_all``
+ Set of default read plugins: ``contextswitch``, ``cpu``, ``cpufreq``, ``df``,
+ ``disk``, ``ethstat``, ``ipc``, ``irq``, ``load``, ``memory``, ``numa``,
+ ``processes``, ``swap``, ``turbostat``, ``uptime``.
+
+``plugins tags``
+ The following tags can be used to enable/disable plugins: ``csv``,
+ ``contextswitch``, ``cpu``, ``cpufreq``, ``df``, ``disk,`` ``ethstat``,
+ ``ipc``, ``irq``, ``load``, ``memory``, ``numa``, ``processes``, ``swap``,
+ ``turbostat``, ``uptime``, ``exec``, ``hugepages``, ``ipmi``, ``kafka``,
+ ``logfile``, ``logparser``, ``mcelog``, ``network``, ``pmu``, ``rdt``,
+ ``rrdtool``, ``snmp``, ``syslog``, ``unixsock``, ``virt``, ``ovs_stats``,
+ ``ovs_events``, ``uuid``, ``dpdk_telemetry``.
+
diff --git a/docs/requirements.txt b/docs/requirements.txt
index 9fde2df2..dfa44583 100644
--- a/docs/requirements.txt
+++ b/docs/requirements.txt
@@ -1,2 +1,3 @@
lfdocs-conf
sphinx_opnfv_theme
+reno
diff --git a/docs/testing/index.rst b/docs/testing/index.rst
index 392b39f4..f763ca64 100644
--- a/docs/testing/index.rst
+++ b/docs/testing/index.rst
@@ -1 +1,79 @@
-.. To be decided
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) Anuket and others
+
+==============================
+Anuket Barometer testing guide
+==============================
+
+This document will describe how to use different tests in this repo.
+
+There are a number of tools and scripts in Barometer that can be used for testing, whether that is during development, building, code reviews, or run regularly in CI.
+Some of the tests are automated, and cover building collectd, others cover particular plugins.
+
+.. TODO: This guide should also include how to manually verify that collectd plugins are working as expected.
+
+.. TODO: There might be some troubleshooting guide in here too.
+
+Porting collectd to version 6
+=============================
+
+Thre is an ansible playbook for building and running collectd 5 and 6 together to compare the collected metrics.
+This is intended to help test porting from collectd 5 to 6, and confirm equivalency across the versions.
+
+The playbook will::
+
+ * build collectd-6, collectd-latest and flask app containers
+ * generate a set of collectd configs
+ * launch the collectd-6, collectd-latest with the generated configs
+ * run the flask app which has a http server that receives metrics from
+ collectd v5 and collectd v6
+ * display the received metrics from both versions of collectd
+ Collectd v5 shows PUTVAL
+ Collectd v6 shows PUTMETRIC
+
+To run this comparison, use the following command::
+
+ $ cd docker/ansible/
+ $ sudo ansible-playbook -i default.inv collectd6_test.yml
+
+The playbook takes the following parameters:
+
+ * PR (optional)
+ The PRID for an upstream collectd pull request that will be
+ passed to the collectd 6 container build
+
+ * plugin (optional)
+ The name of the plugin that is bneing ported
+ This will filter the received metrics to show the value passed.
+
+To run the playbook with these configs, pass the extra var to ansible::
+
+ sudo ansible-playbook -i default.inv -e PR=<PR_ID> -e plugin=<plugin_name> collectd6_test.yml
+
+The metrics can then be viewed by inspecting the container logs or attaching to the container to view the output::
+
+ $ docker attach <webserver-container>
+ $ #OR
+ $ docker logs <webserver-container>
+
+Metrics from collectd 5 will appear preceeded with ``PUTVAL``, and metrics from collectd 6 will appear preceeded by ``PUTMETRIC``.
+
+::
+
+ PUTVAL fbae30cc-2f20-11b2-a85c-819293100691/hugepages-mm-2048Kb/vmpage_number-free interval=10.000 1629466502.664:0
+ PUTVAL fbae30cc-2f20-11b2-a85c-819293100691/hugepages-mm-2048Kb/vmpage_number-used interval=10.000 1629466502.664:0
+ PUTVAL fbae30cc-2f20-11b2-a85c-819293100691/hugepages-mm-1048576Kb/vmpage_number-free interval=10.000 1629466502.664:0
+ PUTVAL fbae30cc-2f20-11b2-a85c-819293100691/hugepages-mm-1048576Kb/vmpage_number-used interval=10.000 1629466502.664:0
+ PUTVAL fbae30cc-2f20-11b2-a85c-819293100691/hugepages-node0-2048Kb/vmpage_number-free interval=10.000 1629466502.664:0
+ PUTVAL fbae30cc-2f20-11b2-a85c-819293100691/hugepages-node0-2048Kb/vmpage_number-used interval=10.000 1629466502.664:0
+ PUTVAL fbae30cc-2f20-11b2-a85c-819293100691/hugepages-node0-1048576Kb/vmpage_number-used interval=10.000 1629466502.665:0
+ PUTVAL fbae30cc-2f20-11b2-a85c-819293100691/hugepages-node0-1048576Kb/vmpage_number-free interval=10.000 1629466502.665:0
+ PUTMETRIC collectd_hugepages_vmpage_number type=GAUGE time=1629466501.807 interval=10.000 label:hugepages="mm-2048Kb" label:instance="fbae30cc-2f20-11b2-a85c-819293100691" label:type="free" 0
+ PUTMETRIC collectd_hugepages_vmpage_number type=GAUGE time=1629466501.807 interval=10.000 label:hugepages="mm-2048Kb" label:instance="fbae30cc-2f20-11b2-a85c-819293100691" label:type="used" 0
+ PUTMETRIC collectd_hugepages_vmpage_number type=GAUGE time=1629466501.808 interval=10.000 label:hugepages="mm-1048576Kb" label:instance="fbae30cc-2f20-11b2-a85c-819293100691" label:type="free" 0
+ PUTMETRIC collectd_hugepages_vmpage_number type=GAUGE time=1629466501.808 interval=10.000 label:hugepages="node0-2048Kb" label:instance="fbae30cc-2f20-11b2-a85c-819293100691" label:type="free" 0
+ PUTMETRIC collectd_hugepages_vmpage_number type=GAUGE time=1629466501.808 interval=10.000 label:hugepages="node0-2048Kb" label:instance="fbae30cc-2f20-11b2-a85c-819293100691" label:type="used" 0
+ PUTMETRIC collectd_hugepages_vmpage_number type=GAUGE time=1629466501.809 interval=10.000 label:hugepages="node0-1048576Kb" label:instance="fbae30cc-2f20-11b2-a85c-819293100691" label:type="free" 0
+ PUTMETRIC collectd_hugepages_vmpage_number type=GAUGE time=1629466501.809 interval=10.000 label:hugepages="node0-1048576Kb" label:instance="fbae30cc-2f20-11b2-a85c-819293100691" label:type="used" 0
+ PUTMETRIC collectd_hugepages_vmpage_number type=GAUGE time=1629466501.808 interval=10.000 label:hugepages="mm-1048576Kb" label:instance="fbae30cc-2f20-11b2-a85c-819293100691" label:type="used" 0
diff --git a/requirements.txt b/requirements.txt
index 57a2f51e..0c4b10c2 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -8,3 +8,4 @@ toml # MIT
opnfv # Apache-2.0
functest # Apache-2.0
xtesting # Apache-2.0
+pika # BSD
diff --git a/requirements.yml b/requirements.yml
new file mode 100644
index 00000000..47720b54
--- /dev/null
+++ b/requirements.yml
@@ -0,0 +1,4 @@
+---
+- src: http://github.com/infrawatch/collectd-config-ansible-role
+ version: master
+ name: collectd_config
diff --git a/src/Makefile b/src/Makefile
index 7c6c17cc..b5494265 100644
--- a/src/Makefile
+++ b/src/Makefile
@@ -1,19 +1,20 @@
# Top Makefile to build upstream packages.
#
-# Copyright 2016 OPNFV
+# Copyright 2016-2019 Intel Corporation and OPNFV. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
#
# Contributors:
@@ -34,14 +35,14 @@ with-dpdk:
export WITH_DPDK
include mk/master.mk
+# TODO(elfoley) Later on, we can re-enable build from source as a user configured option
SUBDIRS =
-ifdef WITH_DPDK
-SUBDIRS += dpdk
-endif
-SUBDIRS += libpqos
-SUBDIRS += pmu-tools
-SUBDIRS += librdkafka
+#ifdef WITH_DPDK
+#SUBDIRS += dpdk
+#endif
+#SUBDIRS += libpqos
+#SUBDIRS += pmu-tools
+#SUBDIRS += librdkafka
SUBDIRS += collectd
-SUBDIRS += collectd-openstack-plugins
include mk/make-subsys.mk
diff --git a/src/collectd-openstack-plugins/Makefile b/src/collectd-openstack-plugins/Makefile
index 96bbebb8..a6745b5f 100644
--- a/src/collectd-openstack-plugins/Makefile
+++ b/src/collectd-openstack-plugins/Makefile
@@ -1,19 +1,20 @@
# makefile to manage COLLECTD_OPENSTACK package
#
-# Copyright 2015-2017 OPNFV
+# Copyright 2015-2019 Intel Corporation and OPNFV. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
#
# Contributors:
diff --git a/src/collectd/Makefile b/src/collectd/Makefile
index 8eea00fd..785614d5 100644
--- a/src/collectd/Makefile
+++ b/src/collectd/Makefile
@@ -1,30 +1,33 @@
# makefile to manage collectd package
#
-# Copyright 2016-2017 OPNFV
+# Copyright 2016-2021 Intel Corporation, Anuket and others.
+# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
+#
#
# Contributors:
# Aihua Li, Huawei Technologies.
# Maryam Tahhan, Intel Corporation.
+# Emma Foley, Red Hat.
include ../mk/master.mk
include ../package-list.mk
WORK_DIR = collectd
-TAG_DONE_FLAG = $(WORK_DIR)/.$(COLLECTD_TAG).done
+# $COLLECTD_TAG can contain "/", which is being replaced with "-" here
+TAG_DONE_FLAG := $(WORK_DIR)/.$(subst /,-,$(COLLECTD_TAG)).done
BUILD_CMD = ./build.sh
CONFIG_CMD =
CONFIG_CMD += ./configure
@@ -39,6 +42,183 @@ ifeq ($(XDG_CACHE_HOME_DIR),)
XDG_CACHE_HOME_DIR = $(shell echo $$HOME)
endif
+ifeq ($(COLLECTD_FLAVOR),collectd-6)
+CONFIG_CMD += --enable-cpu
+CONFIG_CMD += --enable-interface
+CONFIG_CMD += --enable-memory
+CONFIG_CMD += --enable-ping
+CONFIG_CMD += --enable-uptime
+
+CONFIG_CMD += --disable-aggregation
+CONFIG_CMD += --disable-amqp
+CONFIG_CMD += --disable-amqp1
+#CONFIG_CMD += --disable-apache
+#CONFIG_CMD += --disable-apcups
+#CONFIG_CMD += --disable-apple_sensors
+#CONFIG_CMD += --disable-aquaero
+CONFIG_CMD += --disable-barometer
+#CONFIG_CMD += --disable-battery
+#CONFIG_CMD += --disable-bind
+#CONFIG_CMD += --disable-buddyinfo
+#CONFIG_CMD += --disable-capabilities
+#CONFIG_CMD += --disable-ceph
+#CONFIG_CMD += --disable-cgroups
+#CONFIG_CMD += --disable-chrony
+CONFIG_CMD += --disable-check_uptime
+#CONFIG_CMD += --disable-connectivity
+#CONFIG_CMD += --disable-conntrack
+#CONFIG_CMD += --disable-contextswitch
+#CONFIG_CMD += --disable-cpufreq
+#CONFIG_CMD += --disable-cpusleep
+CONFIG_CMD += --disable-csv
+#CONFIG_CMD += --disable-curl
+#CONFIG_CMD += --disable-curl_json
+CONFIG_CMD += --disable-curl_xml
+#CONFIG_CMD += --disable-dbi
+#CONFIG_CMD += --disable-dcpmm
+#CONFIG_CMD += --disable-df
+#CONFIG_CMD += --disable-disk
+#CONFIG_CMD += --disable-dns
+#CONFIG_CMD += --disable-dpdkevents
+#CONFIG_CMD += --disable-dpdkstat
+#CONFIG_CMD += --disable-dpdk_telemetry
+#CONFIG_CMD += --disable-drbd
+#CONFIG_CMD += --disable-email
+#CONFIG_CMD += --disable-entropy
+#CONFIG_CMD += --disable-ethstat
+#CONFIG_CMD += --disable-exec
+#CONFIG_CMD += --disable-fhcount
+#CONFIG_CMD += --disable-filecount
+#CONFIG_CMD += --disable-fscache
+CONFIG_CMD += --disable-gmond
+#CONFIG_CMD += --disable-gps
+#CONFIG_CMD += --disable-gpu_nvidia
+CONFIG_CMD += --disable-grpc
+#CONFIG_CMD += --disable-hddtemp
+#CONFIG_CMD += --disable-hugepages
+#CONFIG_CMD += --disable-infiniband
+#CONFIG_CMD += --disable-intel_pmu
+#CONFIG_CMD += --disable-intel_rdt
+#CONFIG_CMD += --disable-ipc
+#CONFIG_CMD += --disable-ipmi
+#CONFIG_CMD += --disable-iptables
+#CONFIG_CMD += --disable-ipstats
+#CONFIG_CMD += --disable-ipvs
+#CONFIG_CMD += --disable-irq
+CONFIG_CMD += --disable-java
+#CONFIG_CMD += --disable-load
+#CONFIG_CMD += --disable-log_logstash
+#CONFIG_CMD += --disable-logfile
+#CONFIG_CMD += --disable-logparser
+#CONFIG_CMD += --disable-lpar
+CONFIG_CMD += --disable-lua
+#CONFIG_CMD += --disable-madwifi
+CONFIG_CMD += --disable-match_empty_counter
+CONFIG_CMD += --disable-match_hashed
+CONFIG_CMD += --disable-match_regex
+CONFIG_CMD += --disable-match_timediff
+CONFIG_CMD += --disable-match_value
+#CONFIG_CMD += --disable-mbmon
+#CONFIG_CMD += --disable-mcelog
+#CONFIG_CMD += --disable-md
+#CONFIG_CMD += --disable-mdevents
+#CONFIG_CMD += --disable-memcachec
+#CONFIG_CMD += --disable-memcached
+#CONFIG_CMD += --disable-mic
+CONFIG_CMD += --disable-modbus
+CONFIG_CMD += --disable-mqtt
+#CONFIG_CMD += --disable-multimeter
+#CONFIG_CMD += --disable-mysql
+#CONFIG_CMD += --disable-netapp
+#CONFIG_CMD += --disable-netlink
+#CONFIG_CMD += --disable-netstat_udp
+CONFIG_CMD += --disable-network
+#CONFIG_CMD += --disable-nfs
+#CONFIG_CMD += --disable-nginx
+#CONFIG_CMD += --disable-notify_desktop
+#CONFIG_CMD += --disable-notify_email
+#CONFIG_CMD += --disable-notify_nagios
+#CONFIG_CMD += --disable-ntpd
+#CONFIG_CMD += --disable-numa
+#CONFIG_CMD += --disable-nut
+#CONFIG_CMD += --disable-olsrd
+#CONFIG_CMD += --disable-onewire
+CONFIG_CMD += --disable-openldap
+#CONFIG_CMD += --disable-openvpn
+#CONFIG_CMD += --disable-oracle
+#CONFIG_CMD += --disable-ovs_events
+#CONFIG_CMD += --disable-ovs_stats
+#CONFIG_CMD += --disable-pcie_errors
+CONFIG_CMD += --disable-perl
+#CONFIG_CMD += --disable-pf
+#CONFIG_CMD += --disable-pinba
+CONFIG_CMD += --disable-postgresql
+#CONFIG_CMD += --disable-powerdns
+#CONFIG_CMD += --disable-processes
+#CONFIG_CMD += --disable-procevent
+#CONFIG_CMD += --disable-protocols
+CONFIG_CMD += --disable-python
+#CONFIG_CMD += --disable-redfish
+CONFIG_CMD += --disable-redis
+#CONFIG_CMD += --disable-routeros
+CONFIG_CMD += --disable-rrdcached
+CONFIG_CMD += --disable-rrdtool
+#CONFIG_CMD += --disable-sensors
+#CONFIG_CMD += --disable-serial
+#CONFIG_CMD += --disable-sigrok
+#CONFIG_CMD += --disable-slurm
+#CONFIG_CMD += --disable-smart
+CONFIG_CMD += --disable-snmp
+CONFIG_CMD += --disable-snmp_agent
+CONFIG_CMD += --disable-statsd
+#CONFIG_CMD += --disable-swap
+#CONFIG_CMD += --disable-synproxy
+#CONFIG_CMD += --disable-sysevent
+#CONFIG_CMD += --disable-syslog
+#CONFIG_CMD += --disable-table
+#CONFIG_CMD += --disable-tail
+#CONFIG_CMD += --disable-tail_csv
+#CONFIG_CMD += --disable-tape
+CONFIG_CMD += --disable-target_notification
+CONFIG_CMD += --disable-target_replace
+CONFIG_CMD += --disable-target_scale
+CONFIG_CMD += --disable-target_set
+CONFIG_CMD += --disable-target_v5upgrade
+#CONFIG_CMD += --disable-tcpconns
+#CONFIG_CMD += --disable-teamspeak2
+#CONFIG_CMD += --disable-ted
+#CONFIG_CMD += --disable-thermal
+CONFIG_CMD += --disable-threshold
+#CONFIG_CMD += --disable-tokyotyrant
+#CONFIG_CMD += --disable-turbostat
+#CONFIG_CMD += --disable-ubi
+#CONFIG_CMD += --disable-unixsock
+#CONFIG_CMD += --disable-users
+#CONFIG_CMD += --disable-uuid
+#CONFIG_CMD += --disable-varnish
+#CONFIG_CMD += --disable-virt
+#CONFIG_CMD += --disable-vmem
+#CONFIG_CMD += --disable-vserver
+#CONFIG_CMD += --disable-wireless
+CONFIG_CMD += --disable-write_graphite
+#CONFIG_CMD += --disable-write_http
+CONFIG_CMD += --disable-write_influxdb_udp
+CONFIG_CMD += --disable-write_kafka
+#CONFIG_CMD += --disable-write_log
+CONFIG_CMD += --disable-write_mongodb
+CONFIG_CMD += --disable-write_prometheus
+CONFIG_CMD += --disable-write_redis
+CONFIG_CMD += --disable-write_riemann
+CONFIG_CMD += --disable-write_sensu
+#CONFIG_CMD += --disable-write_stackdriver
+CONFIG_CMD += --disable-write_syslog
+CONFIG_CMD += --disable-write_tsdb
+#CONFIG_CMD += --disable-xencpu
+#CONFIG_CMD += --disable-zfs_arc
+#CONFIG_CMD += --disable-zone
+#CONFIG_CMD += --disable-zookeeper
+CONFIG_CMD += --enable-debug
+else
CONFIG_CMD += --enable-syslog
CONFIG_CMD += --enable-logfile
CONFIG_CMD += --with-libpqos=$(LIBPQOS_DIR)
@@ -51,10 +231,14 @@ CONFIG_CMD += --enable-write_redis
CONFIG_CMD += --disable-perl
CONFIG_CMD += --with-librdkafka=/usr
CONFIG_CMD += --disable-lvm
+endif
+
ifdef WITH_DPDK
CONFIG_CMD += LIBDPDK_CFLAGS='-mssse3'
endif
+CONFIG_CMD += $(COLLECTD_CONFIG_CMD_ARGS)
+
.PHONY: install force_install config force_make
# install depends on make
@@ -64,6 +248,7 @@ all: force_make
@echo "Finished making $(WORK_DIR)"
config $(WORK_DIR)/Makefile: $(WORK_DIR)/configure
+ @echo "Configuring with: " $(CONFIG_CMD)
$(AT)cd $(WORK_DIR); $(CONFIG_CMD)
@echo "Configure done"
@@ -120,7 +305,7 @@ $(WORK_DIR):
$(TAG_DONE_FLAG): $(WORK_DIR)
@echo "Checking out collectd from tag: $(COLLECTD_TAG)"
- $(AT)cd collectd; git checkout $(COLLECTD_TAG)
+ $(AT)cd collectd; git fetch origin $(COLLECTD_TAG); git checkout FETCH_HEAD
ifneq ($(PATCH_FILE),)
$(AT)cd $(WORK_DIR); patch -p1 < ../$(PATCH_FILE)
endif
diff --git a/src/collectd/collectd_apply_pull_request.sh b/src/collectd/collectd_apply_pull_request.sh
new file mode 100755
index 00000000..403d78dd
--- /dev/null
+++ b/src/collectd/collectd_apply_pull_request.sh
@@ -0,0 +1,50 @@
+#! /bin/bash
+# Copyright 2019-2021 Intel Corporation, Anuket and others.
+# All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This files contains list of pull requests to be applied on top
+# of main branch before building collectd included in docker
+# collectd-experimental container
+
+# Use this script with a COLLECTD_PULL_REQUESTS variable defined
+# for example:
+# COLLECTD_PULL_REQUESTS="3027,3028" ./collectd_apply_pull_request.sh
+
+if [ -z "$COLLECTD_PULL_REQUESTS" ];
+then
+ echo "COLLECTD_PULL_REQUESTS is unset, exiting"
+ exit
+fi
+
+IFS=', ' read -a PULL_REQUESTS <<< "$COLLECTD_PULL_REQUESTS"
+
+# during rebasing/merging git requires email & name to be set
+git config user.email "barometer-experimental@container"
+git config user.name "BarometerExperimental"
+
+# If there's a single PR listed, just check it out
+if [ "${#PULL_REQUESTS[@]}" -eq "1" ];
+then
+ echo "Checking out pull request $COLLECTD_PULL_REQUESTS"
+ git fetch origin pull/$COLLECTD_PULL_REQUESTS/head && git checkout FETCH_HEAD
+else
+# if there are multiple PRs, rebase them on top of the checked out branch
+ for PR_ID in "${PULL_REQUESTS[@]}"
+ do
+ echo "Applying pull request $PR_ID"
+ git pull --rebase origin pull/$PR_ID/head
+ done
+fi
diff --git a/docker/barometer-collectd-experimental/experimental-configs/README b/src/collectd/collectd_sample_configs-experimental/README
index e4386358..e4386358 100644
--- a/docker/barometer-collectd-experimental/experimental-configs/README
+++ b/src/collectd/collectd_sample_configs-experimental/README
diff --git a/docker/ansible/roles/config_files/templates/uuid.conf.j2 b/src/collectd/collectd_sample_configs-latest/capabilities.conf
index a9a84e71..a422b702 100644
--- a/docker/ansible/roles/config_files/templates/uuid.conf.j2
+++ b/src/collectd/collectd_sample_configs-latest/capabilities.conf
@@ -1,4 +1,4 @@
-#Copyright 2018 OPNFV and Intel Corporation
+# Copyright 2019 OPNFV and Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,9 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-LoadPlugin uuid
+LoadPlugin capabilities
-#<Plugin uuid>
-# UUIDFile "/etc/uuid"
-#</Plugin>
+<Plugin capabilities>
+ Port "9564"
+</Plugin>
diff --git a/src/collectd/collectd_sample_configs-master/csv.conf b/src/collectd/collectd_sample_configs-latest/csv.conf
index 99a8498d..99a8498d 100644
--- a/src/collectd/collectd_sample_configs-master/csv.conf
+++ b/src/collectd/collectd_sample_configs-latest/csv.conf
diff --git a/src/collectd/collectd_sample_configs-master/default_plugins.conf b/src/collectd/collectd_sample_configs-latest/default_plugins.conf
index c96b0076..c96b0076 100644
--- a/src/collectd/collectd_sample_configs-master/default_plugins.conf
+++ b/src/collectd/collectd_sample_configs-latest/default_plugins.conf
diff --git a/src/collectd/collectd_sample_configs-master/exec.conf b/src/collectd/collectd_sample_configs-latest/exec.conf
index 0a291bdb..0a291bdb 100644
--- a/src/collectd/collectd_sample_configs-master/exec.conf
+++ b/src/collectd/collectd_sample_configs-latest/exec.conf
diff --git a/src/collectd/collectd_sample_configs-master/hugepages.conf b/src/collectd/collectd_sample_configs-latest/hugepages.conf
index 97cd2d17..97cd2d17 100644
--- a/src/collectd/collectd_sample_configs-master/hugepages.conf
+++ b/src/collectd/collectd_sample_configs-latest/hugepages.conf
diff --git a/src/collectd/collectd_sample_configs-master/intel_pmu.conf b/src/collectd/collectd_sample_configs-latest/intel_pmu.conf
index 959fb8a1..959fb8a1 100644
--- a/src/collectd/collectd_sample_configs-master/intel_pmu.conf
+++ b/src/collectd/collectd_sample_configs-latest/intel_pmu.conf
diff --git a/src/collectd/collectd_sample_configs-master/kafka.conf b/src/collectd/collectd_sample_configs-latest/kafka.conf
index f81e87fd..f81e87fd 100644
--- a/src/collectd/collectd_sample_configs-master/kafka.conf
+++ b/src/collectd/collectd_sample_configs-latest/kafka.conf
diff --git a/docker/barometer-collectd-experimental/experimental-configs/logparser.conf b/src/collectd/collectd_sample_configs-latest/logparser.conf
index 3802768b..1f1a725b 100644
--- a/docker/barometer-collectd-experimental/experimental-configs/logparser.conf
+++ b/src/collectd/collectd_sample_configs-latest/logparser.conf
@@ -1,16 +1,17 @@
-# Copyright 2019 OPNFV
+# Copyright 2019 Intel Corporation and OPNFV. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
LoadPlugin logparser
diff --git a/src/collectd/collectd_sample_configs-master/mcelog.conf b/src/collectd/collectd_sample_configs-latest/mcelog.conf
index 633a3bcd..633a3bcd 100644
--- a/src/collectd/collectd_sample_configs-master/mcelog.conf
+++ b/src/collectd/collectd_sample_configs-latest/mcelog.conf
diff --git a/src/collectd/collectd_sample_configs-master/network.conf b/src/collectd/collectd_sample_configs-latest/network.conf
index 4309ed86..4309ed86 100644
--- a/src/collectd/collectd_sample_configs-master/network.conf
+++ b/src/collectd/collectd_sample_configs-latest/network.conf
diff --git a/src/collectd/collectd_sample_configs-master/ovs_events.conf b/src/collectd/collectd_sample_configs-latest/ovs_events.conf
index 250e1e43..250e1e43 100644
--- a/src/collectd/collectd_sample_configs-master/ovs_events.conf
+++ b/src/collectd/collectd_sample_configs-latest/ovs_events.conf
diff --git a/src/collectd/collectd_sample_configs-master/ovs_pmd_stats.sh b/src/collectd/collectd_sample_configs-latest/ovs_pmd_stats.sh
index 0517717f..78e72047 100755
--- a/src/collectd/collectd_sample_configs-master/ovs_pmd_stats.sh
+++ b/src/collectd/collectd_sample_configs-latest/ovs_pmd_stats.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# Copyright 2017 OPNFV
+# Copyright 2017-2019 Intel Corporation and OPNFV. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,4 +12,5 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
sudo python /usr/local/src/ovs_pmd_stats.py --socket-pid-file /var/run/openvswitch/ovs-vswitchd.pid
diff --git a/src/collectd/collectd_sample_configs-master/ovs_stats.conf b/src/collectd/collectd_sample_configs-latest/ovs_stats.conf
index 2a6d2fb2..2a6d2fb2 100644
--- a/src/collectd/collectd_sample_configs-master/ovs_stats.conf
+++ b/src/collectd/collectd_sample_configs-latest/ovs_stats.conf
diff --git a/src/collectd/collectd_sample_configs-master/pcie_errors.conf b/src/collectd/collectd_sample_configs-latest/pcie_errors.conf
index de9b7533..de9b7533 100644
--- a/src/collectd/collectd_sample_configs-master/pcie_errors.conf
+++ b/src/collectd/collectd_sample_configs-latest/pcie_errors.conf
diff --git a/src/collectd/collectd_sample_configs-master/prometheus.conf b/src/collectd/collectd_sample_configs-latest/prometheus.conf
index 13ef328e..13ef328e 100644
--- a/src/collectd/collectd_sample_configs-master/prometheus.conf
+++ b/src/collectd/collectd_sample_configs-latest/prometheus.conf
diff --git a/src/collectd/collectd_sample_configs-master/rdt.conf b/src/collectd/collectd_sample_configs-latest/rdt.conf
index ae983dc0..ae983dc0 100644
--- a/src/collectd/collectd_sample_configs-master/rdt.conf
+++ b/src/collectd/collectd_sample_configs-latest/rdt.conf
diff --git a/src/collectd/collectd_sample_configs-master/snmp_agent.conf b/src/collectd/collectd_sample_configs-latest/snmp_agent.conf
index 7cbde816..7cbde816 100644
--- a/src/collectd/collectd_sample_configs-master/snmp_agent.conf
+++ b/src/collectd/collectd_sample_configs-latest/snmp_agent.conf
diff --git a/src/collectd/collectd_sample_configs-master/virt.conf b/src/collectd/collectd_sample_configs-latest/virt.conf
index 88229e3c..88229e3c 100644
--- a/src/collectd/collectd_sample_configs-master/virt.conf
+++ b/src/collectd/collectd_sample_configs-latest/virt.conf
diff --git a/src/collectd/collectd_sample_configs-master/write_notification.sh b/src/collectd/collectd_sample_configs-latest/write_notification.sh
index ed6ed9e2..47ae9a83 100755
--- a/src/collectd/collectd_sample_configs-master/write_notification.sh
+++ b/src/collectd/collectd_sample_configs-latest/write_notification.sh
@@ -1,17 +1,18 @@
#!/bin/bash
-# Copyright 2017 OPNFV
+# Copyright 2017-2019 Intel Corporation and OPNFV. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
rm -f /tmp/notifications
while read x y
do
diff --git a/src/collectd/collectd_sample_configs-master/dpdkevents.conf b/src/collectd/collectd_sample_configs-master/dpdkevents.conf
deleted file mode 100644
index fdb6f3db..00000000
--- a/src/collectd/collectd_sample_configs-master/dpdkevents.conf
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright 2017 OPNFV
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-<LoadPlugin dpdkevents>
- Interval 1
-</LoadPlugin>
-
-<Plugin "dpdkevents">
- <EAL>
-# Coremask "0x1"
-# MemoryChannels "4"
-# FilePrefix "rte"
- </EAL>
- <Event "link_status">
- SendEventsOnUpdate false
- EnabledPortMask 0xffff
- SendNotification true
- </Event>
- <Event "keep_alive">
- SendEventsOnUpdate false
- LCoreMask "0xf"
- KeepAliveShmName "/dpdk_keepalive_shm_name"
- SendNotification true
- </Event>
-</Plugin>
diff --git a/src/collectd/collectd_sample_configs-master/dpdkstat.conf b/src/collectd/collectd_sample_configs-master/dpdkstat.conf
deleted file mode 100644
index 59906d4e..00000000
--- a/src/collectd/collectd_sample_configs-master/dpdkstat.conf
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright 2017 OPNFV
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-LoadPlugin dpdkstat
-
-<Plugin dpdkstat>
-# Coremask "0xf"
-# ProcessType "secondary"
-# FilePrefix "rte"
- EnabledPortMask 0xffff
-# PortName "interface1"
-# PortName "interface2"
-</Plugin>
diff --git a/docker/ansible/roles/config_files/templates/network.conf.j2 b/src/collectd/collectd_sample_configs/capabilities.conf
index c89a18eb..a422b702 100644
--- a/docker/ansible/roles/config_files/templates/network.conf.j2
+++ b/src/collectd/collectd_sample_configs/capabilities.conf
@@ -1,4 +1,4 @@
-#Copyright 2018 OPNFV and Intel Corporation
+# Copyright 2019 OPNFV and Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,9 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-LoadPlugin network
+LoadPlugin capabilities
-#this is auto-filled using ansible script
-<Plugin network>
+<Plugin capabilities>
+ Port "9564"
</Plugin>
diff --git a/docker/ansible/roles/config_files/templates/rdt.conf.j2 b/src/collectd/collectd_sample_configs/dpdk_telemetry.conf
index 0e1afcfc..3bc0dd15 100644
--- a/docker/ansible/roles/config_files/templates/rdt.conf.j2
+++ b/src/collectd/collectd_sample_configs/dpdk_telemetry.conf
@@ -1,4 +1,4 @@
-#Copyright 2018 OPNFV and Intel Corporation
+#Copyright 2020 OPNFV and Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,11 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-<LoadPlugin intel_rdt>
- Interval {{ rdt_interval }}
+<LoadPlugin dpdk_telemetry>
+ Interval 1
</LoadPlugin>
-<Plugin intel_rdt>
- Cores ""
+<Plugin dpdk_telemetry>
+ ClientSocketPath "/var/run/.client"
+ DpdkSocketPath "/var/run/dpdk/rte/telemetry"
</Plugin>
-
diff --git a/src/collectd/collectd_sample_configs/dpdkevents.conf b/src/collectd/collectd_sample_configs/dpdkevents.conf
deleted file mode 100644
index fdb6f3db..00000000
--- a/src/collectd/collectd_sample_configs/dpdkevents.conf
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright 2017 OPNFV
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-<LoadPlugin dpdkevents>
- Interval 1
-</LoadPlugin>
-
-<Plugin "dpdkevents">
- <EAL>
-# Coremask "0x1"
-# MemoryChannels "4"
-# FilePrefix "rte"
- </EAL>
- <Event "link_status">
- SendEventsOnUpdate false
- EnabledPortMask 0xffff
- SendNotification true
- </Event>
- <Event "keep_alive">
- SendEventsOnUpdate false
- LCoreMask "0xf"
- KeepAliveShmName "/dpdk_keepalive_shm_name"
- SendNotification true
- </Event>
-</Plugin>
diff --git a/src/collectd/collectd_sample_configs/dpdkstat.conf b/src/collectd/collectd_sample_configs/dpdkstat.conf
deleted file mode 100644
index 919e6e6e..00000000
--- a/src/collectd/collectd_sample_configs/dpdkstat.conf
+++ /dev/null
@@ -1,24 +0,0 @@
-# Copyright 2017 OPNFV
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-LoadPlugin dpdkstat
-
-<Plugin dpdkstat>
-# Coremask "0xf"
-# ProcessType "secondary"
-# FilePrefix "rte"
- EnabledPortMask 0xffff
-# PortName "interface1"
-# PortName "interface2"
-</Plugin>
-
diff --git a/src/collectd/collectd_sample_configs/intel_pmu.conf b/src/collectd/collectd_sample_configs/intel_pmu.conf
index 959fb8a1..c4beee0c 100644
--- a/src/collectd/collectd_sample_configs/intel_pmu.conf
+++ b/src/collectd/collectd_sample_configs/intel_pmu.conf
@@ -1,4 +1,4 @@
-# Copyright 2017 OPNFV
+# Copyright 2017-21 Anuket and others
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -11,13 +11,16 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-LoadPlugin intel_pmu
+
+<LoadPlugin intel_pmu>
+ Interval 1
+</LoadPlugin>
<Plugin intel_pmu>
ReportHardwareCacheEvents true
ReportKernelPMUEvents true
ReportSoftwareEvents true
-# EventList "/var/cache/pmu/GenuineIntel-6-2D-core.json"
+# EventList "$HOME/.cache/pmu-events/GenuineIntel-6-2D-core.json"
# HardwareEvents "L2_RQSTS.CODE_RD_HIT,L2_RQSTS.CODE_RD_MISS" "L2_RQSTS.ALL_CODE_RD"
- Cores "[0-4]"
+ Cores ""
</Plugin>
diff --git a/src/collectd/collectd_sample_configs/logparser.conf b/src/collectd/collectd_sample_configs/logparser.conf
new file mode 100644
index 00000000..1f1a725b
--- /dev/null
+++ b/src/collectd/collectd_sample_configs/logparser.conf
@@ -0,0 +1,75 @@
+# Copyright 2019 Intel Corporation and OPNFV. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+LoadPlugin logparser
+
+<Plugin logparser>
+ <Logfile "/var/log/syslog">
+ FirstFullRead false
+ <Message "pcie_errors">
+ DefaultType "pcie_error"
+ DefaultSeverity "warning"
+ <Match "aer error">
+ Regex "AER:.*error received"
+ SubmatchIdx -1
+ </Match>
+ <Match "incident time">
+ Regex "(... .. ..:..:..) .* pcieport.*AER"
+ SubmatchIdx 1
+ IsMandatory false
+ </Match>
+ <Match "root port">
+ Regex "pcieport (.*): AER:"
+ SubmatchIdx 1
+ IsMandatory true
+ </Match>
+ <Match "device">
+ PluginInstance true
+ Regex " ([0-9a-fA-F:\\.]*): PCIe Bus Error"
+ SubmatchIdx 1
+ IsMandatory false
+ </Match>
+ <Match "severity_mandatory">
+ Regex "severity="
+ SubMatchIdx -1
+ </Match>
+ <Match "nonfatal">
+ Regex "severity=.*\\([nN]on-[fF]atal"
+ TypeInstance "non_fatal"
+ IsMandatory false
+ </Match>
+ <Match "fatal">
+ Regex "severity=.*\\([fF]atal"
+ Severity "failure"
+ TypeInstance "fatal"
+ IsMandatory false
+ </Match>
+ <Match "corrected">
+ Regex "severity=Corrected"
+ TypeInstance "correctable"
+ IsMandatory false
+ </Match>
+ <Match "error type">
+ Regex "type=(.*),"
+ SubmatchIdx 1
+ IsMandatory false
+ </Match>
+ <Match "id">
+ Regex ", id=(.*)"
+ SubmatchIdx 1
+ </Match>
+ </Message>
+ </Logfile>
+</Plugin>
diff --git a/src/collectd/collectd_sample_configs/ovs_pmd_stats.sh b/src/collectd/collectd_sample_configs/ovs_pmd_stats.sh
index 0517717f..d3e83d82 100755
--- a/src/collectd/collectd_sample_configs/ovs_pmd_stats.sh
+++ b/src/collectd/collectd_sample_configs/ovs_pmd_stats.sh
@@ -1,15 +1,16 @@
#!/bin/bash
-# Copyright 2017 OPNFV
+# Copyright 2017-2019 Intel Corporation and OPNFV. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
sudo python /usr/local/src/ovs_pmd_stats.py --socket-pid-file /var/run/openvswitch/ovs-vswitchd.pid
diff --git a/src/collectd/collectd_sample_configs/snmp_agent.conf b/src/collectd/collectd_sample_configs/snmp_agent.conf
index a4511a94..7452ccae 100644
--- a/src/collectd/collectd_sample_configs/snmp_agent.conf
+++ b/src/collectd/collectd_sample_configs/snmp_agent.conf
@@ -17,7 +17,6 @@ LoadPlugin snmp_agent
<Table "pmuTable">
IndexOID "INTEL-PMU-MIB::pmuGroupIndex"
<Data "pmuGroupDescr">
- Instance true
Plugin "intel_pmu"
OIDs "INTEL-PMU-MIB::pmuGroupDescr"
</Data>
@@ -279,7 +278,6 @@ LoadPlugin snmp_agent
IndexOID "INTEL-RDT-MIB::rdtGroupIndex"
SizeOID "INTEL-RDT-MIB::rdtGroupNumber"
<Data "rdtGroupDescr">
- Instance true
Plugin "intel_rdt"
OIDs "INTEL-RDT-MIB::rdtGroupDescr"
</Data>
@@ -311,7 +309,6 @@ LoadPlugin snmp_agent
<Table "mcelogTable">
IndexOID "INTEL-MCELOG-MIB::memoryGroupIndex"
<Data "memoryGroupDescr">
- Instance true
Plugin "mcelog"
OIDs "INTEL-MCELOG-MIB::memoryGroupDescr"
</Data>
@@ -343,7 +340,6 @@ LoadPlugin snmp_agent
# Hugepages
<Table "hugepagesTable">
<Data "hugepagesNode">
- Instance true
Plugin "hugepages"
OIDs "INTEL-HUGEPAGES-MIB::hugepagesNode"
</Data>
diff --git a/src/collectd/collectd_sample_configs/write_notification.sh b/src/collectd/collectd_sample_configs/write_notification.sh
index ed6ed9e2..47ae9a83 100755
--- a/src/collectd/collectd_sample_configs/write_notification.sh
+++ b/src/collectd/collectd_sample_configs/write_notification.sh
@@ -1,17 +1,18 @@
#!/bin/bash
-# Copyright 2017 OPNFV
+# Copyright 2017-2019 Intel Corporation and OPNFV. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
rm -f /tmp/notifications
while read x y
do
diff --git a/src/collectd/include_config.sh b/src/collectd/include_config.sh
index 4f91a453..f8db0dcf 100755
--- a/src/collectd/include_config.sh
+++ b/src/collectd/include_config.sh
@@ -1,11 +1,11 @@
#!/bin/bash
-# Copyright 2017 OPNFV
+# Copyright 2017-2019 Intel Corporation and OPNFV. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
@@ -15,6 +15,7 @@
# Config file options are changing between releases so we have to store both
# configurations variants and choose correct one for target collectd version
+#
if [ -z "$1" ]; then
echo "Error! Please sample configs variant name as a param!"\
diff --git a/src/collectd/ovs_pmd_stats_config.sh b/src/collectd/ovs_pmd_stats_config.sh
index bdb0615d..5b6f490e 100755
--- a/src/collectd/ovs_pmd_stats_config.sh
+++ b/src/collectd/ovs_pmd_stats_config.sh
@@ -1,17 +1,18 @@
#!/bin/bash
-# Copyright 2017 OPNFV
+# Copyright 2017-2019 Intel Corporation and OPNFV. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
export CURRENT_DIR=$(pwd)
diff --git a/src/collectd/snmp_mib_config.sh b/src/collectd/snmp_mib_config.sh
index a2c44db1..a1ff677f 100755
--- a/src/collectd/snmp_mib_config.sh
+++ b/src/collectd/snmp_mib_config.sh
@@ -1,17 +1,18 @@
#!/bin/bash
-# Copyright 2017 OPNFV
+# Copyright 2017-2019 Intel Corporation and OPNFV. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
export CURRENT_DIR=$(pwd)
diff --git a/src/dpdk/Makefile b/src/dpdk/Makefile
index 40fa4820..44da38c9 100644
--- a/src/dpdk/Makefile
+++ b/src/dpdk/Makefile
@@ -1,19 +1,20 @@
# makefile to manage dpdk package
#
-# Copyright 2015-2017 OPNFV
+# Copyright 2015-2019 Intel Corporation and OPNFV. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
#
# Contributors:
@@ -21,6 +22,7 @@
# Martin Klozik, Intel Corporation.
# Christian Trautman, Red Hat Inc.
# Maryam Tahhan, Intel Corporation.
+# Jabir Kanhira Kadavathu, Intel Corporation.
include ../mk/master.mk
include ../package-list.mk
@@ -33,14 +35,14 @@ TAG_DONE_FLAG = $(WORK_DIR)/.$(DPDK_TAG).tag.done
# VHOST configuration options are stored in different files based on DPDK version
# v1.2.3r0-v1.6.0r2 - configuration inside config/defconfig_x86_64-default-linuxapp-gcc
-# v1.7.0-rc1-v2.2.0 - configuration inside config/common_linuxapp
-# v16 and newer - configuration split between config/common_linuxapp and config/common_base
+# v1.7.0-rc1-v2.2.0 - configuration inside config/common_linux
+# v16 and newer - configuration split between config/common_linux and config/common_base
DPDK_TAG_MAJOR = $(shell echo $(DPDK_TAG) | cut -d. -f1)
DPDK_TAG_MINOR = $(shell echo $(DPDK_TAG) | cut -d. -f2)
ifeq ($(DPDK_TAG_MAJOR),v1)
ifeq ($(DPDK_TAG_MINOR), $(filter $(DPDK_TAG_MINOR), 7 8))
DPDK_TARGET = x86_64-native-linuxapp-gcc
- CONFIG_FILE_LINUXAPP = $(WORK_DIR)/config/common_linuxapp
+ CONFIG_FILE_LINUXAPP = $(WORK_DIR)/config/common_linux
else
DPDK_TARGET = x86_64-default-linuxapp-gcc
CONFIG_FILE_LINUXAPP = $(WORK_DIR)/config/defconfig_x86_64-default-linuxapp-gcc
@@ -48,11 +50,11 @@ endif
else
ifeq ($(DPDK_TAG_MAJOR),v2)
DPDK_TARGET = x86_64-native-linuxapp-gcc
- CONFIG_FILE_LINUXAPP = $(WORK_DIR)/config/common_linuxapp
+ CONFIG_FILE_LINUXAPP = $(WORK_DIR)/config/common_linux
else
DPDK_TARGET = x86_64-native-linuxapp-gcc
CONFIG_FILE_BASE = $(WORK_DIR)/config/common_base
- CONFIG_FILE_LINUXAPP = $(WORK_DIR)/config/common_linuxapp
+ CONFIG_FILE_LINUXAPP = $(WORK_DIR)/config/common_linux
endif
endif
@@ -85,13 +87,13 @@ endif
# CentOS 7.3 specific config changes to compile
ifeq ($(ID),"centos")
ifeq ($(VERSION_ID),"7")
- $(AT)sed -i.bak s@'SRCS-y += ethtool/igb/igb_main.c'@'#SRCS-y += ethtool/igb/igb_main.c'@g $(WORK_DIR)/lib/librte_eal/linuxapp/kni/Makefile
+ $(AT)sed -i.bak s@'SRCS-y += ethtool/igb/igb_main.c'@'#SRCS-y += ethtool/igb/igb_main.c'@g $(WORK_DIR)/kernel/linux/kni/Makefile
endif
endif
# RHEL 7.3 specific config changes to compile
ifeq ($(ID),"rhel")
ifeq ($(VERSION_ID),"7.3")
- $(AT)sed -i.bak s@'SRCS-y += ethtool/igb/igb_main.c'@'#SRCS-y += ethtool/igb/igb_main.c'@g $(WORK_DIR)/lib/librte_eal/linuxapp/kni/Makefile
+ $(AT)sed -i.bak s@'SRCS-y += ethtool/igb/igb_main.c'@'#SRCS-y += ethtool/igb/igb_main.c'@g $(WORK_DIR)/kernel/linux/kni/Makefile
endif
endif
$(AT)sed -i -e 's/CONFIG_RTE_LIBRTE_KNI=./CONFIG_RTE_LIBRTE_KNI=n/g' $(CONFIG_FILE_LINUXAPP)
diff --git a/src/libpqos/Makefile b/src/libpqos/Makefile
index a32a4ab8..a64afd2a 100644
--- a/src/libpqos/Makefile
+++ b/src/libpqos/Makefile
@@ -1,19 +1,20 @@
# makefile to manage collectd package
#
-# Copyright 2016 OPNFV
+# Copyright 2016-2019 Intel Corporation and OPNFV. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
#
# Contributors:
@@ -40,7 +41,9 @@ INSTALL_TARGET = force_install force_make
force_make: $(WORK_DIR)
$(AT)cd $(WORK_DIR) && git pull $(LIBPQOS_URL) $(LIBPQOS_TAG)
@echo "git pull done"
- $(AT)$(MAKE) -C $(WORK_DIR) $(MORE_MAKE_FLAGS)
+ $(AT)$(MAKE) -C $(WORK_DIR)/lib $(MORE_MAKE_FLAGS)
+ $(AT)$(MAKE) -C $(WORK_DIR)/pqos $(MORE_MAKE_FLAGS)
+ $(AT)$(MAKE) -C $(WORK_DIR)/rdtset $(MORE_MAKE_FLAGS)
@echo "Make done"
force_install:
diff --git a/src/librdkafka/Makefile b/src/librdkafka/Makefile
index ea5e34e2..5769e27d 100644
--- a/src/librdkafka/Makefile
+++ b/src/librdkafka/Makefile
@@ -1,17 +1,18 @@
# makefile to manage collectd package
-# Copyright 2017 OPNFV
+# Copyright 2016-2019 Intel Corporation and OPNFV. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
#
# Contributors:
diff --git a/src/package-list.mk b/src/package-list.mk
index 47b90bde..b27cd575 100644
--- a/src/package-list.mk
+++ b/src/package-list.mk
@@ -6,7 +6,7 @@
# dpdk section
# DPDK_URL ?= git://dpdk.org/dpdk
DPDK_URL ?= http://dpdk.org/git/dpdk
-DPDK_TAG ?= v16.11
+DPDK_TAG ?= v19.11
LIBPQOS_URL ?= https://github.com/01org/intel-cmt-cat.git
LIBPQOS_TAG ?= master
@@ -15,31 +15,40 @@ PMUTOOLS_URL ?= https://github.com/andikleen/pmu-tools
PMUTOOLS_TAG ?= master
KAFKA_URL ?= https://github.com/edenhill/librdkafka.git
-KAFKA_TAG ?= v0.9.5
+KAFKA_TAG ?= v1.5.2
# collectd section
COLLECTD_URL ?= https://github.com/collectd/collectd
-# there are 3 collectd flavors:
+# there are 4 collectd flavors:
# -"stable" - based on stable collectd release
-# -"master" - development version, based on master branch
-# -"experimental" - it is based on master branch as above and includes
+# -"latest" - development version, based on main branch
+# -"experimental" - it is based on main branch as above and includes
# set pull requests with experimental features
+# -"collectd-6" - based on the collectd 6.0 branch
ifeq ($(COLLECTD_FLAVOR), stable)
-# using latest stable release
- COLLECTD_TAG ?= collectd-5.8
+# using the most recent stable release
+ COLLECTD_TAG ?= collectd-5.12
SAMPLE_CONF_VARIANT_NAME = collectd_sample_configs
-else
-# 'master' and 'experimental' collectd flavors are both using
-# code from master branch
- COLLECTD_TAG ?= master
- SAMPLE_CONF_VARIANT_NAME = collectd_sample_configs-master
+endif
+ifeq ($(COLLECTD_FLAVOR), latest)
+# collectd code from main branch
+ COLLECTD_TAG ?= main
+ SAMPLE_CONF_VARIANT_NAME = collectd_sample_configs-latest
+endif
ifeq ($(COLLECTD_FLAVOR), experimental)
# 'experimental' flavor is using additional Pull Requests that
-# are put on top of master release
+# are put on top of main release
+ COLLECTD_TAG ?= main
+ SAMPLE_CONF_VARIANT_NAME = collectd_sample_configs-latest
COLLECTD_USE_EXPERIMENTAL_PR ?= y
endif #end of experimental-branch handling
-endif
+ifeq ($(COLLECTD_FLAVOR), collectd-6)
+# 'collectd-6' flavor is using collectd-6.0 branch
+ COLLECTD_TAG ?= collectd-6.0
+ SAMPLE_CONF_VARIANT_NAME = collectd_sample_configs-latest
+ COLLECTD_USE_EXPERIMENTAL_PR ?= y
+endif #end of collectd-6.0-branch handling
-COLLECTD_OPENSTACK_URL ?= https://github.com/openstack/collectd-openstack-plugins
-COLLECTD_OPENSTACK_TAG ?= stable/pike
+@echo "Using COLLECTD_TAG: $(COLLECTD_TAG)"
+@echo "Using SAMPLE_CONF_VARIANT_NAME: $(SAMPLE_CONF_VARIANT_NAME)"
diff --git a/src/pmu-tools/Makefile b/src/pmu-tools/Makefile
index ab0d8170..c0fc3030 100644
--- a/src/pmu-tools/Makefile
+++ b/src/pmu-tools/Makefile
@@ -1,19 +1,20 @@
# makefile to manage collectd package
#
-# Copyright 2017 OPNFV
+# Copyright 2016-2019 Intel Corporation and OPNFV. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
#
# Contributors:
diff --git a/systems/centos/7/build_base_machine.sh b/systems/centos/7/build_base_machine.sh
index 35e65e6d..346160fa 100755
--- a/systems/centos/7/build_base_machine.sh
+++ b/systems/centos/7/build_base_machine.sh
@@ -2,13 +2,14 @@
#
# Build a base machine for CentOS distro
#
-# Copyright 2017-2018 OPNFV
+# Copyright 2017-2021 Intel Corporation, Anuket and others.
+# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
@@ -16,6 +17,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+#
# Contributors:
# Aihua Li, Huawei Technologies.
# Martin Klozik, Intel Corporation.
@@ -31,8 +33,8 @@ yum-builddep -y collectd
# Install required packages
yum -y install $(echo "
-kernel-devel
-kernel-headers
+#kernel-devel
+#kernel-headers
make
gcc
gcc-c++
@@ -42,7 +44,8 @@ flex
bison
libtool
pkg-config
-git
+git-core
+sudo
rpm-build
libcap-devel
xfsprogs-devel
@@ -55,10 +58,18 @@ mcelog
wget
net-snmp-devel
hiredis-devel
+libmicrohttpd-devel
+jansson-devel
+libatasmart-devel
+librdkafka-devel
+yajl-devel
+protobuf-c-devel
+rrdtool-devel
+intel-cat-cmt
+dpdk-18.11
+qpid-proton-c-devel
-# install epel release required for git-review
-epel-release
libvirt-python
-python2-pip
+python3-pip
numactl-devel
" | grep -v ^#)
diff --git a/systems/centos/8/build_base_machine.sh b/systems/centos/8/build_base_machine.sh
new file mode 100755
index 00000000..bb3caa12
--- /dev/null
+++ b/systems/centos/8/build_base_machine.sh
@@ -0,0 +1,91 @@
+#!/bin/bash
+#
+# Build a base machine for CentOS distro
+#
+# Copyright 2017-2021 Intel Corporation, Anuket and others.
+# All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#
+# Contributors:
+# Aihua Li, Huawei Technologies.
+# Martin Klozik, Intel Corporation.
+# Maryam Tahhan, Intel Corporation.
+# Emma Foley, Red Hat.
+# Synchronize package index files
+dnf -y update
+
+# For collectd
+dnf install -y yum-utils
+dnf install -y centos-release-opstools
+
+# For CentOS 8, a lot of the dependencies are from PowerTools repo
+dnf install -y 'dnf-command(config-manager)' && dnf config-manager --set-enabled powertools
+
+# Use collectd.spec from centos-opstools to install deps since
+# ``dnf builddep -y collectd`` isn't finding collectd in centos-opstools
+dnf builddep -y https://raw.githubusercontent.com/centos-opstools/collectd/master/collectd.spec
+
+# Install required packages
+dnf -y install $(echo "
+
+make
+gcc
+gcc-c++
+autoconf
+automake
+flex
+bison
+libtool
+pkg-config
+git-core
+sudo
+rpm-build
+which
+libcap-devel
+xfsprogs-devel
+iptables-devel
+libmemcached-devel
+gtk2-devel
+libvirt-devel
+libvirt-daemon
+mcelog
+wget
+net-snmp-devel
+hiredis-devel
+libmicrohttpd-devel
+jansson-devel
+libpcap-devel
+lua-devel
+OpenIPMI-devel
+libmnl-devel
+librabbitmq-devel
+iproute-static
+libatasmart-devel
+librdkafka-devel
+yajl-devel
+protobuf-c-devel
+rrdtool-devel
+dpdk-20.11
+qpid-proton-c-devel
+
+# ping collectd-6
+liboping-devel
+
+python3-pip
+python36-devel
+numactl-devel
+intel-cmt-cat
+intel-cmt-cat-devel
+" | grep -v ^#)
diff --git a/systems/fedora/22/build_base_machine.sh b/systems/fedora/22/build_base_machine.sh
index c0c61462..2d749cfb 100755
--- a/systems/fedora/22/build_base_machine.sh
+++ b/systems/fedora/22/build_base_machine.sh
@@ -2,13 +2,13 @@
#
# Build a base machine for Fedora 22
#
-# Copyright 2015 OPNFV, Intel Corporation.
+# Copyright 2016-2019 Intel Corporation and OPNFV. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
@@ -16,6 +16,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+#
# Contributors:
# Aihua Li, Huawei Technologies.
# Martin Klozik, Intel Corporation.
diff --git a/systems/rhel/7/build_base_machine.sh b/systems/rhel/7/build_base_machine.sh
index 037f87ad..ac9de4f5 100755
--- a/systems/rhel/7/build_base_machine.sh
+++ b/systems/rhel/7/build_base_machine.sh
@@ -2,13 +2,14 @@
#
# Build a base machine for RHEL distro
#
-# Copyright 2017 OPNFV
+# Copyright 2016-2021 Intel Corporation, Anuket and others.
+# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
@@ -16,10 +17,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+#
# Contributors:
# Aihua Li, Huawei Technologies.
# Martin Klozik, Intel Corporation.
# Maryam Tahhan, Intel Corporation.
+# Emma Foley, Red Hat.
# Synchronize package index files
yum -y update
@@ -42,7 +45,8 @@ flex
bison
libtool
pkg-config
-git
+git-core
+sudo
rpm-build
libcap-devel
xfsprogs-devel
diff --git a/systems/ubuntu/16.04/build_base_machine.sh b/systems/ubuntu/16.04/build_base_machine.sh
index 7ea42404..d04d587e 100755
--- a/systems/ubuntu/16.04/build_base_machine.sh
+++ b/systems/ubuntu/16.04/build_base_machine.sh
@@ -1,21 +1,27 @@
#!/bin/bash
-# Copyright 2016-2017 OPNFV
+# Copyright 2016-2019 Intel Corporation and OPNFV. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
+
set -eux
-apt-get -y install build-essential dh-autoreconf fakeroot devscripts dpkg-dev git wget
-apt-get -y install \
+export DEBIAN_FRONTEND=noninteractive
+
+apt-get -yq update
+apt-get -yq install build-essential dh-autoreconf fakeroot devscripts dpkg-dev git-core wget sudo
+
+apt-get -yq install \
debhelper dpkg-dev po-debconf dh-systemd dh-strip-nondeterminism \
bison flex autotools-dev libltdl-dev pkg-config \
iptables-dev \
diff --git a/tox.ini b/tox.ini
index 69aa1893..6f02ca0b 100644
--- a/tox.ini
+++ b/tox.ini
@@ -6,6 +6,7 @@ envlist =
skipsdist = true
[testenv:docs]
+basepython = python3
deps = -rdocs/requirements.txt
commands =
sphinx-build -b html -n -d {envtmpdir}/doctrees ./docs/ {toxinidir}/docs/_build/html
@@ -13,5 +14,16 @@ commands =
whitelist_externals = echo
[testenv:docs-linkcheck]
+basepython = python3
deps = -rdocs/requirements.txt
commands = sphinx-build -b linkcheck -d {envtmpdir}/doctrees ./docs/ {toxinidir}/docs/_build/linkcheck
+
+[testenv:docs-releasenotes]
+basepython=python3
+deps = -rdocs/requirements.txt
+commands = reno --rel-notes-dir docs/release/release-notes/ report
+
+[testenv:docs-create-note]
+basepython = python3
+deps = -rdocs/requirements.txt
+commands = reno --rel-notes-dir docs/release/release-notes/ new {posargs}