summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--qtip/collector/calculator.py38
-rw-r--r--qtip/collector/parser/grep.py74
-rw-r--r--qtip/collector/parser/regex.yaml85
-rw-r--r--qtip/driver/ansible_driver.py50
-rw-r--r--qtip/runner/runner.py105
5 files changed, 332 insertions, 20 deletions
diff --git a/qtip/collector/calculator.py b/qtip/collector/calculator.py
new file mode 100644
index 00000000..c3d961b3
--- /dev/null
+++ b/qtip/collector/calculator.py
@@ -0,0 +1,38 @@
+##############################################################################
+# Copyright (c) 2017 ZTE Corp and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+from operator import add
+
+from qtip.util.logger import QtipLogger
+
+logger = QtipLogger('calculator').get
+
+
+def dpi_calculator(samples):
+ try:
+ float_pps = map(lambda x: float(x), samples['pps'])
+ float_bps = map(lambda x: float(x), samples['bps'])
+ sum_dpi_pps = reduce(add,
+ map(lambda x: x / 1000 if x > 100 else x, float_pps))
+ sum_dpi_bps = reduce(add,
+ map(lambda x: x / 1000 if x > 100 else x, float_bps))
+
+ return {'pps': round(sum_dpi_pps / 10, 3), 'bps': round(sum_dpi_bps / 10, 3)}
+ except Exception as error:
+ logger.error(error)
+ return {'pps': None, 'bps': None}
+
+
+def calculate_cpu_usage(cpu_idle):
+ try:
+ cpu_usage = round((100.0 - float(cpu_idle)), 3)
+ return '{0}%'.format(str(cpu_usage))
+ except Exception, error:
+ logger.error(error)
+ return None
diff --git a/qtip/collector/parser/grep.py b/qtip/collector/parser/grep.py
index f74ce403..44edb5a1 100644
--- a/qtip/collector/parser/grep.py
+++ b/qtip/collector/parser/grep.py
@@ -7,12 +7,20 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
+from collections import defaultdict
+from os import path
import re
+import yaml
-from qtip.base.constant import BaseProp
from qtip.base import BaseActor
+from qtip.base.constant import BaseProp
+from qtip.collector import calculator
+from qtip.util.logger import QtipLogger
+
+logger = QtipLogger('grep').get
+
+REGEX_FILE = path.join(path.dirname(__file__), 'regex.yaml')
class GrepProp(BaseProp):
@@ -32,3 +40,65 @@ def grep_in_file(filename, regex):
with open(filename, 'r') as f:
return filter(lambda x: x is not None,
re.finditer(regex, f.read(), re.MULTILINE))
+
+
+def _parse_logfile(config, paths):
+ captured = {}
+ for regex_rules_by_file in config:
+ filename = \
+ '{0}/{1}'.format(paths, regex_rules_by_file[GrepProp.FILENAME])
+ for regex in regex_rules_by_file['grep']:
+ matches = grep_in_file(filename, regex)
+ for item in matches:
+ print item.groupdict()
+ if len(matches) > 1:
+ temp_dict = defaultdict(list)
+ for item in [match.groupdict() for match in matches]:
+ for key in item:
+ temp_dict[key].append(item[key])
+ captured.update(temp_dict)
+ elif len(matches) == 1:
+ captured.update(matches[0].groupdict())
+ else:
+ logger.error("Nothing is matched from {0}".format(filename))
+ return captured
+
+
+# TODO: Hardcord in Danube, it will be removed in the future.
+def parse_sysinfo(config, result_dir):
+ sysinfo = _parse_logfile(config, result_dir)
+ if "cpu_idle" in sysinfo:
+ sysinfo['cpu_usage'] = \
+ calculator.calculate_cpu_usage(sysinfo['cpu_idle'])
+ sysinfo.pop('cpu_idle')
+ return sysinfo
+
+
+# TODO: Hardcord in Danube, it will be removed in the future.
+def parse_test_result(benchmark, config, result_dir):
+ test_result = _parse_logfile(config, result_dir)
+ if benchmark == 'dpi':
+ return calculator.dpi_calculator(test_result)
+ if benchmark == 'dhrystone' or benchmark == 'whetstone':
+ return {'total_cpus': test_result['total_cpus'],
+ 'single_cpu': {'num': test_result['single_cpu'],
+ 'score': test_result['score'][0]},
+ 'multi_cpus': {'num': test_result['multi_cpus'],
+ 'score': test_result['score'][1]}}
+ return test_result
+
+
+# TODO: Hardcord in Danube, it will be removed in the future.
+def parse_benchmark_result(result_dir):
+ regex_config = yaml.safe_load(file(REGEX_FILE))
+ benchmark = result_dir.split('/')[-1]
+ result = {'name': benchmark}
+
+ test_result = \
+ parse_test_result(benchmark, regex_config[benchmark], result_dir)
+ result['results'] = test_result.copy()
+
+ sysinfo = parse_sysinfo(regex_config['sysinfo'], result_dir)
+ result['sysinfo'] = sysinfo.copy()
+
+ return result
diff --git a/qtip/collector/parser/regex.yaml b/qtip/collector/parser/regex.yaml
new file mode 100644
index 00000000..397f8973
--- /dev/null
+++ b/qtip/collector/parser/regex.yaml
@@ -0,0 +1,85 @@
+##############################################################################
+# Copyright (c) 2017 ZTE Corporation and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+dhrystone:
+ - filename: dhrystone
+ grep:
+ - '^(?P<total_cpus>\d+)\sCPUs in system; running 1 parallel copy of tests$'
+ - '.+\srunning (?P<single_cpu>\d+) parallel copy of tests$'
+ - '.+\srunning (?P<multi_cpus>\d+) parallel copies of tests$'
+ - '^System Benchmarks Index Score \(Partial Only\)\s+(?P<score>\d+\.\d)$'
+whetstone:
+ - filename: whetstone
+ grep:
+ - '^(?P<total_cpus>\d+)\sCPUs in system; running 1 parallel copy of tests$'
+ - '.+\srunning (?P<single_cpu>\d+) parallel copy of tests$'
+ - '.+\srunning (?P<multi_cpus>\d+) parallel copies of tests$'
+ - '^System Benchmarks Index Score \(Partial Only\)\s+(?P<score>\d+\.\d)$'
+dpi:
+ - filename: dpi_dump.txt
+ grep:
+ - |-
+ ^\s+nDPI throughput:.+?(?P<pps>\d+.\d+)\sM\spps.+
+ ?(?P<bps>\d+.\d+)\sGb\/sec
+ramspeed:
+ - filename: Intmem
+ grep:
+ - '^INTEGER\s+BatchRun\s+Copy:\s+?(?P<integer_copy>\d+\.\d+)\sMB/s$'
+ - '^INTEGER\s+BatchRun\s+Scale:\s+?(?P<integer_scale>\d+\.\d+)\sMB/s$'
+ - '^INTEGER\s+BatchRun\s+Add:\s+?(?P<integer_add>\d+\.\d+)\sMB/s$'
+ - '^INTEGER\s+BatchRun\s+Triad:\s+?(?P<integer_triad>\d+\.\d+)\sMB/s$'
+ - '^INTEGER\s+BatchRun\s+AVERAGE:\s+?(?P<integer_average>\d+\.\d+)\sMB/s$'
+ - filename: Floatmem
+ grep:
+ - '^FL-POINT\s+BatchRun\s+Copy:\s+?(?P<float_copy>\d+\.\d+)\sMB/s$'
+ - '^FL-POINT\s+BatchRun\s+Scale:\s+?(?P<float_scale>\d+\.\d+)\sMB/s$'
+ - '^FL-POINT\s+BatchRun\s+Add:\s+?(?P<float_add>\d+\.\d+)\sMB/s$'
+ - '^FL-POINT\s+BatchRun\s+Triad:\s+?(?P<float_triad>\d+\.\d+)\sMB/s$'
+ - '^FL-POINT\s+BatchRun\s+AVERAGE:\s+?(?P<float_average>\d+\.\d+)\sMB/s$'
+ssl:
+ - filename: RSA_dump
+ grep:
+ - |-
+ ^rsa\s+512\sbits\s.+
+ ?(?P<rsa_sign_512>\d+\.\d)\s+
+ ?(?P<rsa_verify_512>\d+\.\d)$
+ - |-
+ ^rsa\s+1024\sbits\s.+
+ ?(?P<rsa_sign_1024>\d+\.\d)\s+
+ ?(?P<rsa_verify_1024>\d+\.\d)$
+ - |-
+ ^rsa\s+2048\sbits\s.+
+ ?(?P<rsa_sign_2048>\d+\.\d)\s+
+ ?(?P<rsa_verify_2048>\d+\.\d)$
+ - |-
+ ^rsa\s+4096\sbits\s.+
+ ?(?P<rsa_sign_4096>\d+\.\d)\s+
+ ?(?P<rsa_verify_4096>\d+\.\d)$
+ - filename: AES-128-CBC_dump
+ grep:
+ - |-
+ ^aes-128-cbc\s+
+ ?(?P<aes_128_cbc_16_bytes>\d+\.\w+)\s+
+ ?(?P<aes_128_cbc_64_bytes>\d+\.\w+)\s+
+ ?(?P<aes_128_cbc_256_bytes>\d+\.\w+)\s+
+ ?(?P<aes_128_cbc_1024_bytes>\d+\.\w+)\s+
+ ?(?P<aes_128_cbc_8192_bytes>\d+\.\w+)$
+sysinfo:
+ - filename: top.log
+ grep:
+ - 'Cpu\(s\):.+?(?P<cpu_idle>\d+\.\d)\sid'
+ - filename: inxi.log
+ grep:
+ - '.+\s+Host:\s+(?P<hostname>.+)\sKernel'
+ - '.+\sMemory:\s+(?P<memory>.+MB)\s'
+ - '^CPU\(s\):\s+(?P<cpu>.+)'
+ - '.+\sDistro:\s+(?P<os>.+)'
+ - '.+\sKernel:\s+(?P<kernel>.+)\sConsole'
+ - '.+\s+HDD Total Size:\s+(?P<disk>.+)\s'
+ - '.+\sproduct:\s+(?P<product>.+)\sv' \ No newline at end of file
diff --git a/qtip/driver/ansible_driver.py b/qtip/driver/ansible_driver.py
index 1cd7918d..356c39b7 100644
--- a/qtip/driver/ansible_driver.py
+++ b/qtip/driver/ansible_driver.py
@@ -9,6 +9,7 @@
from collections import defaultdict
from os import path
+from operator import add
from qtip.driver.ansible_api import AnsibleApi
from qtip.util.env import AnsibleEnvSetup
@@ -43,7 +44,7 @@ class AnsibleDriver(object):
logger.info("Starting to setup test environment...")
self.env.setup(self.config)
self.env_setup_flag = True
- logger("Done!")
+ logger.info("Setup test enviroment, Done!")
def run(self, metric_list, **kwargs):
if 'args' in self.config:
@@ -52,10 +53,9 @@ class AnsibleDriver(object):
extra_vars = kwargs
logger.info("extra_var: {0}".format(extra_vars))
- # TODO zhihui: will add a new property named "tool" for metrics, hardcode it now.
tool_to_metrics = defaultdict(list)
for metric in metric_list:
- if metric in ['dhrystone', 'whetstone']:
+ if metric == 'dhrystone' or metric == 'whetstone':
tool_to_metrics['unixbench'].append(metric)
extra_vars[metric] = True
elif metric == 'ssl':
@@ -63,23 +63,37 @@ class AnsibleDriver(object):
else:
tool_to_metrics[metric].append(metric)
- ansible_api = AnsibleApi()
- map(lambda tool: self._run_metric(ansible_api, tool,
- tool_to_metrics[tool], extra_vars),
- tool_to_metrics)
+ result_list = map(lambda tool: self._run_metric(tool,
+ tool_to_metrics[tool],
+ extra_vars),
+ tool_to_metrics)
+ return False not in result_list
- def _run_metric(self, ansible_api, tool, metrics, extra_vars):
+ def _run_metric(self, tool, metrics, extra_vars):
logger.info('Using {0} to measure metrics {1}'.format(tool, metrics))
- for metric in metrics:
- extra_vars[metric] = True
+ setup_pbook = "{0}/{1}/setup.yaml".format(PLAYBOOK_DIR, tool)
+ run_pbook = "{0}/{1}/run.yaml".format(PLAYBOOK_DIR, tool)
+ clean_pbook = "{0}/{1}/clean.yaml".format(PLAYBOOK_DIR, tool)
+
+ if self._run_ansible_playbook(setup_pbook, extra_vars):
+ self._run_ansible_playbook(run_pbook, extra_vars)
+ else:
+ logger.error("{0} is failed.".format(setup_pbook))
+
+ return self._run_ansible_playbook(clean_pbook, extra_vars)
- logger.debug("extra_vars: {0}".format(extra_vars))
+ def _run_ansible_playbook(self, pbook, extra_vars):
+ ansible_api = AnsibleApi()
+ logger.debug("Run {0} with extra_vars: {1}".format(pbook, extra_vars))
+ ansible_api.execute_playbook(pbook, self.env.hostfile,
+ self.env.keypair['private'], extra_vars)
+ playbook_stats = ansible_api.get_detail_playbook_stats()
+ logger.debug("playbook_stat: {0}".format(playbook_stats))
+ return self.is_pass(playbook_stats)
- for item in ['setup', 'run', 'clean']:
- pbook = "{0}/{1}/{2}.yaml".format(PLAYBOOK_DIR, tool, item)
- logger.debug("Start to run {0}".format(pbook))
- ansible_api.execute_playbook(pbook, self.env.hostfile,
- self.env.keypair['private'], extra_vars)
- playbook_stat = ansible_api.get_detail_playbook_stats()
- logger.debug("playbook_stat: {0}".format(playbook_stat))
+ @staticmethod
+ def is_pass(stats):
+ return 0 == reduce(add,
+ map(lambda x: x[1]['failures'] + x[1]['unreachable'],
+ stats))
diff --git a/qtip/runner/runner.py b/qtip/runner/runner.py
new file mode 100644
index 00000000..47795bc3
--- /dev/null
+++ b/qtip/runner/runner.py
@@ -0,0 +1,105 @@
+##############################################################################
+# Copyright (c) 2017 ZTE corp. and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import argparse
+import json
+import os
+from os import path
+import sys
+import time
+
+from qtip.collector.parser import grep
+from qtip.driver.ansible_driver import AnsibleDriver
+from qtip.util.logger import QtipLogger
+
+logger = QtipLogger('runner').get
+
+ALL_BENCHMARKS = ['dpi', 'ramspeed', 'ssl', 'dhrystone', 'whetstone']
+
+
+def parse_args(args):
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-d', '--dest', required=True,
+ help='the destination where results will be stored.')
+ parser.add_argument('-b', '--benchmark', required=True, action='append',
+ help='the benchmark you want to execute.')
+ return parser.parse_args(args)
+
+
+def run_benchmark(result_dir, benchmarks):
+ if not path.isdir(result_dir):
+ os.makedirs(result_dir)
+ driver = AnsibleDriver({'args': {'result_dir': result_dir}})
+ driver.pre_run()
+ return driver.run(benchmarks)
+
+
+def generate_report(result_dir, start_time, stop_time):
+ output = {
+ "plan_name": "compute_qpi",
+ "start_time": start_time,
+ "stop_time": stop_time,
+ "sut": []
+ }
+ output.update(parse_result(result_dir))
+ output.update({'stop_time': stop_time})
+ with open('{0}/result.json'.format(result_dir), 'w+') as f:
+ json.dump(output, f, indent=4, sort_keys=True)
+
+
+def parse_result(result_dir):
+ sut_template = {'sut': []}
+ nodes_list = os.listdir(result_dir)
+ for node in nodes_list:
+ node_output_template = {
+ 'name': node,
+ 'type': 'baremetal',
+ 'qpis': []
+ }
+ qpi_result = {'name': 'compute_qpi', 'benchmarks': []}
+ for benchmark in os.listdir('{0}/{1}'.format(result_dir, node)):
+ benchmark_result = \
+ grep.parse_benchmark_result(
+ '{0}/{1}/{2}'.format(result_dir, node, benchmark))
+ qpi_result['benchmarks'].append(benchmark_result)
+ node_output_template['qpis'].append(qpi_result)
+ sut_template['sut'].append(node_output_template)
+ return sut_template
+
+
+def main(args=sys.argv[1:]):
+ args = parse_args(args)
+
+ if not path.isdir(str(args.dest)):
+ logger.error("The destination {0} you give doesn't exist. "
+ "Please check!".format(args.dest))
+ sys.exit(1)
+
+ if args.benchmark == ['all']:
+ args.benchmark = ALL_BENCHMARKS
+ elif len(set(args.benchmark).difference(ALL_BENCHMARKS)) != 0:
+ logger.error("Please check benchmarks name. The supported benchmarks are"
+ "{0}".format(ALL_BENCHMARKS))
+ logger.info("Start to run benchmark test: {0}.".format(args.benchmark))
+
+ start_time = time.strftime("%Y-%m-%d-%H-%M")
+ logger.info("start_time: {0}".format(start_time))
+ if not args.dest.endswith('/'):
+ args.dest += '/'
+ result_dir = args.dest + start_time
+ ansible_result = run_benchmark(result_dir, args.benchmark)
+ stop_time = time.strftime("%Y-%m-%d-%H-%M")
+ logger.info("stop_time: {0}".format(stop_time))
+ if not ansible_result:
+ logger.error("Bechmarks run failed. Cann't generate any report.")
+ sys.exit(1)
+ generate_report(result_dir, start_time, stop_time)
+
+
+if __name__ == "__main__":
+ main()