From c285f1b57e6b13847b43561c3db09c3bdb32d8ff Mon Sep 17 00:00:00 2001 From: "wu.zhihui" Date: Sat, 11 Mar 2017 00:53:00 +0800 Subject: Implement the workflow of compute qpi Local test is ok. The result will be written to report.json. usage: runner.py [-h] -d DEST -b BENCHMARK optional arguments: -d DEST, --dest DEST the destination where results will be stored. -b BENCHMARK, --benchmark BENCHMARK the benchmark you want to execute. Change-Id: Ic3a70c65a5aa045bf9df34ce4d14957a7a1b3dcf Signed-off-by: wu.zhihui (cherry picked from commit 478cd02a9219f7c8b49d8529d1f809a04399ad0d) --- qtip/driver/ansible_driver.py | 50 +++++++++++++++++++++++++++---------------- 1 file changed, 32 insertions(+), 18 deletions(-) (limited to 'qtip/driver') diff --git a/qtip/driver/ansible_driver.py b/qtip/driver/ansible_driver.py index 1cd7918d..356c39b7 100644 --- a/qtip/driver/ansible_driver.py +++ b/qtip/driver/ansible_driver.py @@ -9,6 +9,7 @@ from collections import defaultdict from os import path +from operator import add from qtip.driver.ansible_api import AnsibleApi from qtip.util.env import AnsibleEnvSetup @@ -43,7 +44,7 @@ class AnsibleDriver(object): logger.info("Starting to setup test environment...") self.env.setup(self.config) self.env_setup_flag = True - logger("Done!") + logger.info("Setup test enviroment, Done!") def run(self, metric_list, **kwargs): if 'args' in self.config: @@ -52,10 +53,9 @@ class AnsibleDriver(object): extra_vars = kwargs logger.info("extra_var: {0}".format(extra_vars)) - # TODO zhihui: will add a new property named "tool" for metrics, hardcode it now. tool_to_metrics = defaultdict(list) for metric in metric_list: - if metric in ['dhrystone', 'whetstone']: + if metric == 'dhrystone' or metric == 'whetstone': tool_to_metrics['unixbench'].append(metric) extra_vars[metric] = True elif metric == 'ssl': @@ -63,23 +63,37 @@ class AnsibleDriver(object): else: tool_to_metrics[metric].append(metric) - ansible_api = AnsibleApi() - map(lambda tool: self._run_metric(ansible_api, tool, - tool_to_metrics[tool], extra_vars), - tool_to_metrics) + result_list = map(lambda tool: self._run_metric(tool, + tool_to_metrics[tool], + extra_vars), + tool_to_metrics) + return False not in result_list - def _run_metric(self, ansible_api, tool, metrics, extra_vars): + def _run_metric(self, tool, metrics, extra_vars): logger.info('Using {0} to measure metrics {1}'.format(tool, metrics)) - for metric in metrics: - extra_vars[metric] = True + setup_pbook = "{0}/{1}/setup.yaml".format(PLAYBOOK_DIR, tool) + run_pbook = "{0}/{1}/run.yaml".format(PLAYBOOK_DIR, tool) + clean_pbook = "{0}/{1}/clean.yaml".format(PLAYBOOK_DIR, tool) + + if self._run_ansible_playbook(setup_pbook, extra_vars): + self._run_ansible_playbook(run_pbook, extra_vars) + else: + logger.error("{0} is failed.".format(setup_pbook)) + + return self._run_ansible_playbook(clean_pbook, extra_vars) - logger.debug("extra_vars: {0}".format(extra_vars)) + def _run_ansible_playbook(self, pbook, extra_vars): + ansible_api = AnsibleApi() + logger.debug("Run {0} with extra_vars: {1}".format(pbook, extra_vars)) + ansible_api.execute_playbook(pbook, self.env.hostfile, + self.env.keypair['private'], extra_vars) + playbook_stats = ansible_api.get_detail_playbook_stats() + logger.debug("playbook_stat: {0}".format(playbook_stats)) + return self.is_pass(playbook_stats) - for item in ['setup', 'run', 'clean']: - pbook = "{0}/{1}/{2}.yaml".format(PLAYBOOK_DIR, tool, item) - logger.debug("Start to run {0}".format(pbook)) - ansible_api.execute_playbook(pbook, self.env.hostfile, - self.env.keypair['private'], extra_vars) - playbook_stat = ansible_api.get_detail_playbook_stats() - logger.debug("playbook_stat: {0}".format(playbook_stat)) + @staticmethod + def is_pass(stats): + return 0 == reduce(add, + map(lambda x: x[1]['failures'] + x[1]['unreachable'], + stats)) -- cgit 1.2.3-korg