From 478cd02a9219f7c8b49d8529d1f809a04399ad0d Mon Sep 17 00:00:00 2001
From: "wu.zhihui" <wu.zhihui1@zte.com.cn>
Date: Sat, 11 Mar 2017 00:53:00 +0800
Subject: Implement the workflow of compute qpi

Local test is ok.
The result will be written to report.json.

usage: runner.py [-h] -d DEST -b BENCHMARK

optional arguments:
  -d DEST, --dest DEST  the destination where results will be stored.
  -b BENCHMARK, --benchmark BENCHMARK
                        the benchmark you want to execute.

Change-Id: Ic3a70c65a5aa045bf9df34ce4d14957a7a1b3dcf
Signed-off-by: wu.zhihui <wu.zhihui1@zte.com.cn>
---
 qtip/driver/ansible_driver.py |  50 ++++++++++++--------
 qtip/runner/runner.py         | 105 ++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 137 insertions(+), 18 deletions(-)
 create mode 100644 qtip/runner/runner.py

diff --git a/qtip/driver/ansible_driver.py b/qtip/driver/ansible_driver.py
index 1cd7918d..356c39b7 100644
--- a/qtip/driver/ansible_driver.py
+++ b/qtip/driver/ansible_driver.py
@@ -9,6 +9,7 @@
 
 from collections import defaultdict
 from os import path
+from operator import add
 
 from qtip.driver.ansible_api import AnsibleApi
 from qtip.util.env import AnsibleEnvSetup
@@ -43,7 +44,7 @@ class AnsibleDriver(object):
             logger.info("Starting to setup test environment...")
             self.env.setup(self.config)
             self.env_setup_flag = True
-            logger("Done!")
+            logger.info("Setup test enviroment, Done!")
 
     def run(self, metric_list, **kwargs):
         if 'args' in self.config:
@@ -52,10 +53,9 @@ class AnsibleDriver(object):
             extra_vars = kwargs
         logger.info("extra_var: {0}".format(extra_vars))
 
-        # TODO zhihui: will add a new property named "tool" for metrics, hardcode it now.
         tool_to_metrics = defaultdict(list)
         for metric in metric_list:
-            if metric in ['dhrystone', 'whetstone']:
+            if metric == 'dhrystone' or metric == 'whetstone':
                 tool_to_metrics['unixbench'].append(metric)
                 extra_vars[metric] = True
             elif metric == 'ssl':
@@ -63,23 +63,37 @@ class AnsibleDriver(object):
             else:
                 tool_to_metrics[metric].append(metric)
 
-        ansible_api = AnsibleApi()
-        map(lambda tool: self._run_metric(ansible_api, tool,
-                                          tool_to_metrics[tool], extra_vars),
-            tool_to_metrics)
+        result_list = map(lambda tool: self._run_metric(tool,
+                                                        tool_to_metrics[tool],
+                                                        extra_vars),
+                          tool_to_metrics)
+        return False not in result_list
 
-    def _run_metric(self, ansible_api, tool, metrics, extra_vars):
+    def _run_metric(self, tool, metrics, extra_vars):
         logger.info('Using {0} to measure metrics {1}'.format(tool, metrics))
 
-        for metric in metrics:
-            extra_vars[metric] = True
+        setup_pbook = "{0}/{1}/setup.yaml".format(PLAYBOOK_DIR, tool)
+        run_pbook = "{0}/{1}/run.yaml".format(PLAYBOOK_DIR, tool)
+        clean_pbook = "{0}/{1}/clean.yaml".format(PLAYBOOK_DIR, tool)
+
+        if self._run_ansible_playbook(setup_pbook, extra_vars):
+            self._run_ansible_playbook(run_pbook, extra_vars)
+        else:
+            logger.error("{0} is failed.".format(setup_pbook))
+
+        return self._run_ansible_playbook(clean_pbook, extra_vars)
 
-        logger.debug("extra_vars: {0}".format(extra_vars))
+    def _run_ansible_playbook(self, pbook, extra_vars):
+        ansible_api = AnsibleApi()
+        logger.debug("Run {0} with extra_vars: {1}".format(pbook, extra_vars))
+        ansible_api.execute_playbook(pbook, self.env.hostfile,
+                                     self.env.keypair['private'], extra_vars)
+        playbook_stats = ansible_api.get_detail_playbook_stats()
+        logger.debug("playbook_stat: {0}".format(playbook_stats))
+        return self.is_pass(playbook_stats)
 
-        for item in ['setup', 'run', 'clean']:
-            pbook = "{0}/{1}/{2}.yaml".format(PLAYBOOK_DIR, tool, item)
-            logger.debug("Start to run {0}".format(pbook))
-            ansible_api.execute_playbook(pbook, self.env.hostfile,
-                                         self.env.keypair['private'], extra_vars)
-            playbook_stat = ansible_api.get_detail_playbook_stats()
-            logger.debug("playbook_stat: {0}".format(playbook_stat))
+    @staticmethod
+    def is_pass(stats):
+        return 0 == reduce(add,
+                           map(lambda x: x[1]['failures'] + x[1]['unreachable'],
+                               stats))
diff --git a/qtip/runner/runner.py b/qtip/runner/runner.py
new file mode 100644
index 00000000..47795bc3
--- /dev/null
+++ b/qtip/runner/runner.py
@@ -0,0 +1,105 @@
+##############################################################################
+# Copyright (c) 2017 ZTE corp. and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import argparse
+import json
+import os
+from os import path
+import sys
+import time
+
+from qtip.collector.parser import grep
+from qtip.driver.ansible_driver import AnsibleDriver
+from qtip.util.logger import QtipLogger
+
+logger = QtipLogger('runner').get
+
+ALL_BENCHMARKS = ['dpi', 'ramspeed', 'ssl', 'dhrystone', 'whetstone']
+
+
+def parse_args(args):
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-d', '--dest', required=True,
+                        help='the destination where results will be stored.')
+    parser.add_argument('-b', '--benchmark', required=True, action='append',
+                        help='the benchmark you want to execute.')
+    return parser.parse_args(args)
+
+
+def run_benchmark(result_dir, benchmarks):
+    if not path.isdir(result_dir):
+        os.makedirs(result_dir)
+    driver = AnsibleDriver({'args': {'result_dir': result_dir}})
+    driver.pre_run()
+    return driver.run(benchmarks)
+
+
+def generate_report(result_dir, start_time, stop_time):
+    output = {
+        "plan_name": "compute_qpi",
+        "start_time": start_time,
+        "stop_time": stop_time,
+        "sut": []
+    }
+    output.update(parse_result(result_dir))
+    output.update({'stop_time': stop_time})
+    with open('{0}/result.json'.format(result_dir), 'w+') as f:
+        json.dump(output, f, indent=4, sort_keys=True)
+
+
+def parse_result(result_dir):
+    sut_template = {'sut': []}
+    nodes_list = os.listdir(result_dir)
+    for node in nodes_list:
+        node_output_template = {
+            'name': node,
+            'type': 'baremetal',
+            'qpis': []
+        }
+        qpi_result = {'name': 'compute_qpi', 'benchmarks': []}
+        for benchmark in os.listdir('{0}/{1}'.format(result_dir, node)):
+            benchmark_result = \
+                grep.parse_benchmark_result(
+                    '{0}/{1}/{2}'.format(result_dir, node, benchmark))
+            qpi_result['benchmarks'].append(benchmark_result)
+        node_output_template['qpis'].append(qpi_result)
+        sut_template['sut'].append(node_output_template)
+    return sut_template
+
+
+def main(args=sys.argv[1:]):
+    args = parse_args(args)
+
+    if not path.isdir(str(args.dest)):
+        logger.error("The destination {0} you give doesn't exist. "
+                     "Please check!".format(args.dest))
+        sys.exit(1)
+
+    if args.benchmark == ['all']:
+        args.benchmark = ALL_BENCHMARKS
+    elif len(set(args.benchmark).difference(ALL_BENCHMARKS)) != 0:
+        logger.error("Please check benchmarks name. The supported benchmarks are"
+                     "{0}".format(ALL_BENCHMARKS))
+    logger.info("Start to run benchmark test: {0}.".format(args.benchmark))
+
+    start_time = time.strftime("%Y-%m-%d-%H-%M")
+    logger.info("start_time: {0}".format(start_time))
+    if not args.dest.endswith('/'):
+        args.dest += '/'
+    result_dir = args.dest + start_time
+    ansible_result = run_benchmark(result_dir, args.benchmark)
+    stop_time = time.strftime("%Y-%m-%d-%H-%M")
+    logger.info("stop_time: {0}".format(stop_time))
+    if not ansible_result:
+        logger.error("Bechmarks run failed. Cann't generate any report.")
+        sys.exit(1)
+    generate_report(result_dir, start_time, stop_time)
+
+
+if __name__ == "__main__":
+    main()
-- 
cgit