summaryrefslogtreecommitdiffstats
path: root/qtip
diff options
context:
space:
mode:
Diffstat (limited to 'qtip')
-rw-r--r--qtip/ansible_library/plugins/action/calculate.py85
1 files changed, 85 insertions, 0 deletions
diff --git a/qtip/ansible_library/plugins/action/calculate.py b/qtip/ansible_library/plugins/action/calculate.py
new file mode 100644
index 00000000..f88729b7
--- /dev/null
+++ b/qtip/ansible_library/plugins/action/calculate.py
@@ -0,0 +1,85 @@
+#!/usr/bin/python
+
+###############################################################
+# Copyright (c) 2017 ZTE Corporation
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+from numpy import mean
+
+from ansible.plugins.action import ActionBase
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class ActionModule(ActionBase):
+ def run(self, tmp=None, task_vars=None):
+
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+
+ if result.get('skipped', False):
+ return result
+
+ spec = self._task.args.get('spec')
+ metrics = self._task.args.get('metrics')
+
+ return calc_qpi(spec, metrics)
+
+
+def calc_qpi(qpi_spec, metrics):
+
+ display.vv("calculate QPI {}".format(qpi_spec['name']))
+ display.vvv("spec: {}".format(qpi_spec))
+ display.vvv("metrics: {}".format(metrics))
+
+ section_results = [{'name': s['name'], 'result': calc_section(s, metrics)}
+ for s in qpi_spec['sections']]
+ # TODO(yujunz): use formula in spec
+ qpi_score = mean([r['result']['score'] for r in section_results])
+ return {
+ 'spec': qpi_spec,
+ 'score': qpi_score,
+ 'section_results': section_results,
+ 'metrics': metrics
+ }
+
+
+def calc_section(section_spec, metrics):
+
+ display.vv("calculate section {}".format(section_spec['name']))
+ display.vvv("spec: {}".format(section_spec))
+ display.vvv("metrics: {}".format(metrics))
+
+ metric_results = [{'name': m['name'], 'result': calc_metric(m, metrics[m['name']])}
+ for m in section_spec['metrics']]
+ # TODO(yujunz): use formula in spec
+ section_score = mean([r['result']['score'] for r in metric_results])
+ return {
+ 'score': section_score,
+ 'metric_results': metric_results
+ }
+
+
+def calc_metric(metric_spec, metrics):
+
+ display.vv("calculate metric {}".format(metric_spec['name']))
+ display.vvv("spec: {}".format(metric_spec))
+ display.vvv("metrics: {}".format(metrics))
+
+ # TODO(yujunz): use formula in spec
+ # TODO(yujunz): convert metric to float in collector
+ workload_results = [{'name': w['name'], 'score': mean([float(m) for m in metrics[w['name']]]) / w['baseline']}
+ for w in metric_spec['workloads']]
+ metric_score = mean([r['score'] for r in workload_results])
+ return {
+ 'score': metric_score,
+ 'workload_results': workload_results
+ }