aboutsummaryrefslogtreecommitdiffstats
path: root/tools/collectors
diff options
context:
space:
mode:
Diffstat (limited to 'tools/collectors')
-rwxr-xr-xtools/collectors/cadvisor/__init__.py17
-rw-r--r--tools/collectors/cadvisor/cadvisor.py218
-rw-r--r--tools/collectors/collectd/collectd.py35
-rwxr-xr-xtools/collectors/multicmd/__init__.py17
-rw-r--r--tools/collectors/multicmd/multicmd.py138
5 files changed, 422 insertions, 3 deletions
diff --git a/tools/collectors/cadvisor/__init__.py b/tools/collectors/cadvisor/__init__.py
new file mode 100755
index 00000000..235ab875
--- /dev/null
+++ b/tools/collectors/cadvisor/__init__.py
@@ -0,0 +1,17 @@
+# Copyright 2020 University Of Delhi.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Wrapper for cAdvisor as a collector
+"""
diff --git a/tools/collectors/cadvisor/cadvisor.py b/tools/collectors/cadvisor/cadvisor.py
new file mode 100644
index 00000000..de48cecd
--- /dev/null
+++ b/tools/collectors/cadvisor/cadvisor.py
@@ -0,0 +1,218 @@
+# Copyright 2020 University Of Delhi.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Collects container metrics from cAdvisor.
+Sends metrics to influxDB and also stores results locally.
+"""
+
+import subprocess
+import logging
+import os
+from collections import OrderedDict
+
+from tools.collectors.collector import collector
+from tools import tasks
+from conf import settings
+
+
+
+# inherit from collector.Icollector.
+class Cadvisor(collector.ICollector):
+ """A collector of container metrics based on cAdvisor
+
+ It starts cadvisor and collects metrics.
+ """
+
+ def __init__(self, results_dir, test_name):
+ """
+ Initialize collection of statistics
+ """
+ self._logger = logging.getLogger(__name__)
+ self.resultsdir = results_dir
+ self.testname = test_name
+ self._pid = 0
+ self._results = OrderedDict()
+ self._log = os.path.join(results_dir,
+ settings.getValue('LOG_FILE_CADVISOR') +
+ '_' + test_name + '.log')
+ self._logfile = 0
+
+
+ def start(self):
+ """
+ Starts collection of statistics by cAdvisor and stores them
+ into-
+ 1. The file in directory with test results
+ 2. InfluxDB result container
+ """
+
+ # CMD options for cAdvisor
+ cmd = ['sudo', '/opt/cadvisor/cadvisor',
+ '-storage_driver='+settings.getValue('CADVISOR_STORAGE_DRIVER'),
+ '-storage_driver_host='+settings.getValue('CADVISOR_STORAGE_HOST'),
+ '-storage_driver_db='+settings.getValue('CADVISOR_DRIVER_DB'),
+ '-housekeeping_interval=0.5s',
+ '-storage_driver_buffer_duration=1s'
+ ]
+
+ self._logfile = open(self._log, 'a')
+
+ self._pid = subprocess.Popen(map(os.path.expanduser, cmd), stdout=self._logfile, bufsize=0)
+ self._logger.info('Starting cAdvisor')
+
+
+
+ def stop(self):
+ """
+ Stops collection of metrics by cAdvisor and stores statistic
+ summary for each monitored container into self._results dictionary
+ """
+ try:
+ subprocess.check_output(["pidof", "cadvisor"])
+ tasks.run_task(['sudo', 'pkill', '--signal', '2', 'cadvisor'],
+ self._logger, 'Stopping cAdvisor', True)
+ except subprocess.CalledProcessError:
+ self._logger.error('Failed to stop cAdvisor, maybe process does not exist')
+
+
+ self._logfile.close()
+ self._logger.info('cAdvisor log available at %s', self._log)
+
+ containers = settings.getValue('CADVISOR_CONTAINERS')
+ self._results = cadvisor_log_result(self._log, containers)
+
+
+ def get_results(self):
+ """Returns collected statistics.
+ """
+ return self._results
+
+ def print_results(self):
+ """Logs collected statistics.
+ """
+ for cnt in self._results:
+ logging.info("Container: %s", cnt)
+ for (key, value) in self._results[cnt].items():
+
+ postfix = ''
+
+ if key == 'cpu_cumulative_usage':
+ key = 'CPU_usage'
+ value = round(float(value) / 1000000000, 4)
+ postfix = '%'
+
+ if key in ['memory_usage', 'memory_working_set']:
+ value = round(float(value) / 1024 / 1024, 4)
+ postfix = 'MB'
+
+ if key in ['rx_bytes', 'tx_bytes']:
+ value = round(float(value) / 1024 / 1024, 4)
+ postfix = 'mBps'
+
+ logging.info(" Statistic: %s Value: %s %s",
+ str(key), str(value), postfix)
+
+
+def cadvisor_log_result(filename, containers):
+ """
+ Processes cAdvisor logfile and returns average results
+
+ :param filename: Name of cadvisor logfile
+ :param containers: List of container names
+
+ :returns: Result as average stats of Containers
+ """
+ result = OrderedDict()
+ previous = OrderedDict()
+ logfile = open(filename, 'r')
+ with logfile:
+ # for every line
+ for _, line in enumerate(logfile):
+ # skip lines having root '/' metrics
+ if line[0:7] == 'cName=/':
+ continue
+
+ # parse line into OrderedDict
+ tmp_res = parse_line(line)
+
+ cnt = tmp_res['cName']
+
+ # skip if cnt is not in container list
+ if cnt not in containers:
+ continue
+
+ # add metrics to result
+ if cnt not in result:
+ result[cnt] = tmp_res
+ previous[cnt] = tmp_res
+ result[cnt]['count'] = 1
+ else:
+ for field in tmp_res:
+
+ if field in ['rx_errors', 'tx_errors', 'memory_usage', 'memory_working_set']:
+ val = float(tmp_res[field])
+ elif field in ['cpu_cumulative_usage', 'rx_bytes', 'tx_bytes']:
+ val = float(tmp_res[field]) - float(previous[cnt][field])
+ else:
+ # discard remaining fields
+ try:
+ result[cnt].pop(field)
+ except KeyError:
+ continue
+ continue
+
+ result[cnt][field] = float(result[cnt][field]) + val
+
+ result[cnt]['count'] += 1
+ previous[cnt] = tmp_res
+
+ # calculate average results for containers
+ result = calculate_average(result)
+ return result
+
+
+def calculate_average(results):
+ """
+ Calculates average for container stats
+ """
+ for cnt in results:
+ for field in results[cnt]:
+ if field != 'count':
+ val = float(results[cnt][field])/results[cnt]['count']
+ results[cnt][field] = '{0:.2f}'.format(val)
+
+ results[cnt].pop('count')
+ #sort results
+ results[cnt] = OrderedDict(sorted(results[cnt].items()))
+
+ return results
+
+
+def parse_line(line):
+ """
+ Reads single line from cAdvisor logfile
+
+ :param line: single line as str
+
+ :returns: OrderedDict of line read
+ """
+ tmp_res = OrderedDict()
+ # split line into array of "key=value" metrics
+ metrics = line.split()
+ for metric in metrics:
+ key, value = metric.split('=')
+ tmp_res[key] = value
+
+ return tmp_res
diff --git a/tools/collectors/collectd/collectd.py b/tools/collectors/collectd/collectd.py
index 700aef47..5e996d3a 100644
--- a/tools/collectors/collectd/collectd.py
+++ b/tools/collectors/collectd/collectd.py
@@ -20,6 +20,7 @@ Plot the values of the stored samples once the test is completed
import copy
import csv
+import glob
import logging
import multiprocessing
import os
@@ -30,6 +31,7 @@ import matplotlib.pyplot as plt
import numpy as np
import tools.collectors.collectd.collectd_bucky as cb
from tools.collectors.collector import collector
+from tools import tasks
from conf import settings
# The y-lables. Keys in this dictionary are used as y-labels.
@@ -49,6 +51,7 @@ def get_label(sample):
return label
return None
+
def plot_graphs(dict_of_arrays):
"""
Plot the values
@@ -194,6 +197,7 @@ class Receiver(multiprocessing.Process):
val = self.pd_dict[sample[1]]
val.append((sample[2], sample[3]))
self.pd_dict[sample[1]] = val
+ logging.debug("COLLECTD %s", ' '.join(str(p) for p in sample))
def stop(self):
"""
@@ -216,13 +220,27 @@ class Collectd(collector.ICollector):
"""
Initialize collection of statistics
"""
- self._log = os.path.join(results_dir,
- settings.getValue('LOG_FILE_COLLECTD') +
- '_' + test_name + '.log')
+ self.logger = logging.getLogger(__name__)
+ self.resultsdir = results_dir
+ self.testname = test_name
self.results = {}
self.sample_dict = multiprocessing.Manager().dict()
self.control = multiprocessing.Value('b', False)
self.receiver = Receiver(self.sample_dict, self.control)
+ self.cleanup_metrics()
+ # Assumption: collected is installed at /opt/collectd
+ # And collected is configured to write to csv at /tmp/csv
+ self.pid = tasks.run_background_task(
+ ['sudo', '/opt/collectd/sbin/collectd'],
+ self.logger, 'Staring Collectd')
+
+ def cleanup_metrics(self):
+ """
+ Cleaup the old or archived metrics
+ """
+ for name in glob.glob(os.path.join('/tmp/csv/', '*')):
+ tasks.run_task(['sudo', 'rm', '-rf', name], self.logger,
+ 'Cleaning up Metrics', True)
def start(self):
"""
@@ -235,6 +253,11 @@ class Collectd(collector.ICollector):
"""
Stop receiving samples
"""
+ tasks.terminate_task_subtree(self.pid, logger=self.logger)
+ # At times collectd fails to fully terminate.
+ # Killing process by name too helps.
+ tasks.run_task(['sudo', 'pkill', '--signal', '2', 'collectd'],
+ self.logger, 'Stopping Collectd', True)
self.control.value = True
self.receiver.stop()
self.receiver.server.join(5)
@@ -244,6 +267,12 @@ class Collectd(collector.ICollector):
if self.receiver.is_alive():
self.receiver.terminate()
self.results = copy.deepcopy(self.sample_dict)
+ # Backup the collectd-metrics for this test into a zipfile
+ filename = ('/tmp/collectd-' + settings.getValue('LOG_TIMESTAMP') +
+ '.tar.gz')
+ tasks.run_task(['sudo', 'tar', '-czvf', filename, '/tmp/csv/'],
+ self.logger, 'Zipping File', True)
+ self.cleanup_metrics()
def get_results(self):
"""
diff --git a/tools/collectors/multicmd/__init__.py b/tools/collectors/multicmd/__init__.py
new file mode 100755
index 00000000..2ae2340f
--- /dev/null
+++ b/tools/collectors/multicmd/__init__.py
@@ -0,0 +1,17 @@
+# Copyright 2019 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Wrapper for multi-commands as a collector
+"""
diff --git a/tools/collectors/multicmd/multicmd.py b/tools/collectors/multicmd/multicmd.py
new file mode 100644
index 00000000..275a0693
--- /dev/null
+++ b/tools/collectors/multicmd/multicmd.py
@@ -0,0 +1,138 @@
+# Copyright 2019 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Collects information using various command line tools.
+"""
+
+#from tools.collectors.collector import collector
+import glob
+import logging
+import os
+from collections import OrderedDict
+from tools import tasks
+from tools.collectors.collector import collector
+from conf import settings
+
+class MultiCmd(collector.ICollector):
+ """ Multiple command-line controllers
+ collectd, prox, crond, filebeat
+ """
+ def __init__(self, results_dir, test_name):
+ """
+ initialize collectrs
+ """
+ self.prox_home = settings.getValue('MC_PROX_HOME')
+ self.collectd_cmd = settings.getValue('MC_COLLECTD_CMD')
+ self.collectd_csv = settings.getValue('MC_COLLECTD_CSV')
+ self.prox_out = settings.getValue('MC_PROX_OUT')
+ self.prox_cmd = settings.getValue('MC_PROX_CMD')
+ self.cron_out = settings.getValue('MC_CRON_OUT')
+ self.logger = logging.getLogger(__name__)
+ self.results_dir = results_dir
+ self.collectd_pid = 0
+ self.prox_pid = 0
+ self.cleanup_collectd_metrics()
+ self.logger.debug('%s', 'Multicmd data for '+ str(test_name))
+ # There should not be a file by name stop in prox_home folder
+ # Else Prox will start and stop immediately. This is a Hack to
+ # control prox-runrapid, which by default runs for specified duration.
+ filename = os.path.join(self.prox_home, 'stop')
+ if os.path.exists(filename):
+ tasks.run_task(['sudo', 'rm', filename],
+ self.logger, 'deleting stop')
+ self.results = OrderedDict()
+
+ def cleanup_collectd_metrics(self):
+ """
+ Cleaup the old or archived metrics
+ """
+ for name in glob.glob(os.path.join(self.collectd_csv, '*')):
+ tasks.run_task(['sudo', 'rm', '-rf', name], self.logger,
+ 'Cleaning up Metrics', True)
+
+ def start(self):
+ # Command-1: Start Collectd
+ self.collectd_pid = tasks.run_background_task(
+ ['sudo', self.collectd_cmd],
+ self.logger, 'Staring Collectd')
+
+ # Command-2: Start PROX
+ working_dir = os.getcwd()
+ if os.path.exists(self.prox_home):
+ os.chdir(self.prox_home)
+ self.prox_pid = tasks.run_background_task(['sudo', self.prox_cmd,
+ '--test', 'irq',
+ '--env', 'irq'],
+ self.logger,
+ 'Start PROX')
+ os.chdir(working_dir)
+ # Command-3: Start CROND
+ tasks.run_task(['sudo', 'systemctl', 'start', 'crond'],
+ self.logger, 'Staring CROND', True)
+
+ # command-4: BEATS
+ tasks.run_task(['sudo', 'systemctl', 'start', 'filebeat'],
+ self.logger, 'Starting BEATS', True)
+
+ def stop(self):
+ """
+ Stop All commands
+ """
+ # Command-1: COLLECTD
+ tasks.terminate_task_subtree(self.collectd_pid, logger=self.logger)
+ tasks.run_task(['sudo', 'pkill', '--signal', '2', 'collectd'],
+ self.logger, 'Stopping Collectd', True)
+
+ # Backup the collectd-metrics for this test into a results folder
+ # results_dir = os.path.join(settings.getValue('RESULTS_PATH'), '/')
+ tasks.run_task(['sudo', 'cp', '-r', self.collectd_csv,
+ self.results_dir], self.logger,
+ 'Copying Collectd Results File', True)
+ self.cleanup_collectd_metrics()
+
+ # Command-2: PROX
+ filename = os.path.join(self.prox_home, 'stop')
+ if os.path.exists(self.prox_home):
+ tasks.run_task(['sudo', 'touch', filename],
+ self.logger, 'Stopping PROX', True)
+
+ outfile = os.path.join(self.prox_home, self.prox_out)
+ if os.path.exists(outfile):
+ tasks.run_task(['sudo', 'mv', outfile, self.results_dir],
+ self.logger, 'Moving PROX-OUT file', True)
+
+ # Command-3: CROND
+ tasks.run_task(['sudo', 'systemctl', 'stop', 'crond'],
+ self.logger, 'Stopping CROND', True)
+ if os.path.exists(self.cron_out):
+ tasks.run_task(['sudo', 'mv', self.cron_out, self.results_dir],
+ self.logger, 'Move Cron Logs', True)
+
+ # Command-4: BEATS
+ tasks.run_task(['sudo', 'systemctl', 'stop', 'filebeat'],
+ self.logger, 'Stopping BEATS', True)
+
+ def get_results(self):
+ """
+ Return results
+ """
+ return self.results
+
+ def print_results(self):
+ """
+ Print results
+ """
+ logging.info("Multicmd Output is not collected by VSPERF")
+ logging.info("Please refer to corresponding command's output")