summaryrefslogtreecommitdiffstats
path: root/qtip
diff options
context:
space:
mode:
authorzhihui wu <wu.zhihui1@zte.com.cn>2017-06-02 01:26:19 +0000
committerGerrit Code Review <gerrit@opnfv.org>2017-06-02 01:26:19 +0000
commitaa27f390c9ad6817d2682c4f59594f43c69d614f (patch)
tree3f484c0efcc28880b387d6ca305925bc0430c796 /qtip
parent10e89733d84e6bdcd8427c34ce4452aa69cf5df0 (diff)
parent70d8b5c5dbe78d0adb9cdb80afc93e7eb9ac4ca1 (diff)
Merge "metrics baseline in compute-baseline.json"
Diffstat (limited to 'qtip')
-rw-r--r--qtip/ansible_library/plugins/action/calculate.py87
1 files changed, 64 insertions, 23 deletions
diff --git a/qtip/ansible_library/plugins/action/calculate.py b/qtip/ansible_library/plugins/action/calculate.py
index 077d863c..383be580 100644
--- a/qtip/ansible_library/plugins/action/calculate.py
+++ b/qtip/ansible_library/plugins/action/calculate.py
@@ -9,18 +9,18 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-import humanfriendly
import json
import numbers
-from numpy import mean
-import yaml
from ansible.plugins.action import ActionBase
from ansible.utils.display import Display
+from asq.initiators import query
+import humanfriendly
+from numpy import mean
+import yaml
from qtip.util.export_to import export_to_file
-
display = Display()
@@ -45,22 +45,51 @@ class ActionModule(ActionBase):
metrics[metric] = json.load(f)
dest = self._task.args.get('dest')
- return calc_qpi(spec, metrics, dest=dest)
+ baseline_file = self._task.args.get('baseline')
+ if baseline_file is not None:
+ with open(baseline_file) as f:
+ baseline = json.load(f)
+ return calc_qpi(spec, metrics, baseline, dest=dest)
+ else:
+ return save_as_baseline(spec, metrics, dest=dest)
+# TODO(wuzhihui): It is more reasonable to put this function into collect.py.
+# For now metrics data is not easy to be collected from collect.py.
@export_to_file
-def calc_qpi(qpi_spec, metrics):
+def save_as_baseline(qpi_spec, metrics):
+ display.vv("save {} metrics as baseline".format(qpi_spec['name']))
+ display.vvv("spec: {}".format(qpi_spec))
+ display.vvv("metrics: {}".format(metrics))
+ return {
+ 'name': qpi_spec['name'],
+ 'score': 2048,
+ 'description': qpi_spec['description'],
+ 'details': {
+ 'metrics': metrics,
+ 'spec': "https://git.opnfv.org/qtip/tree/resources/QPI/compute.yaml",
+ 'baseline': ""
+ }
+ }
+
+
+@export_to_file
+def calc_qpi(qpi_spec, metrics, qpi_baseline):
display.vv("calculate QPI {}".format(qpi_spec['name']))
display.vvv("spec: {}".format(qpi_spec))
display.vvv("metrics: {}".format(metrics))
+ display.vvv("baseline: {}".format(qpi_baseline))
- section_results = [calc_section(s, metrics)
- for s in qpi_spec['sections']]
+ section_results = []
+ for s in qpi_spec['sections']:
+ s_baseline = query(qpi_baseline['sections']).first(
+ lambda section: section['name'] == s['name'])
+ section_results.append(calc_section(s, metrics, s_baseline))
# TODO(yujunz): use formula in spec
- standard_score = 2048
- qpi_score = int(mean([r['score'] for r in section_results]) * standard_score)
+ qpi_score = int(
+ mean([r['score'] for r in section_results]) * qpi_baseline['score'])
results = {
'score': qpi_score,
@@ -69,21 +98,26 @@ def calc_qpi(qpi_spec, metrics):
'children': section_results,
'details': {
'metrics': metrics,
- 'spec': "https://git.opnfv.org/qtip/tree/resources/QPI/compute.yaml"
+ 'spec': "https://git.opnfv.org/qtip/tree/resources/QPI/compute.yaml",
+ 'baseline': "https://git.opnfv.org/qtip/tree/resources/QPI/compute-baseline.json"
}
}
return results
-def calc_section(section_spec, metrics):
-
+def calc_section(section_spec, metrics, section_baseline):
display.vv("calculate section {}".format(section_spec['name']))
display.vvv("spec: {}".format(section_spec))
display.vvv("metrics: {}".format(metrics))
+ display.vvv("baseline: {}".format(section_baseline))
+
+ metric_results = []
+ for m in section_spec['metrics']:
+ m_baseline = query(section_baseline['metrics']).first(
+ lambda metric: metric['name'] == m['name'])
+ metric_results.append(calc_metric(m, metrics[m['name']], m_baseline))
- metric_results = [calc_metric(m, metrics[m['name']])
- for m in section_spec['metrics']]
# TODO(yujunz): use formula in spec
section_score = mean([r['score'] for r in metric_results])
return {
@@ -94,17 +128,23 @@ def calc_section(section_spec, metrics):
}
-def calc_metric(metric_spec, metrics):
-
+def calc_metric(metric_spec, metrics, metric_basline):
display.vv("calculate metric {}".format(metric_spec['name']))
display.vvv("spec: {}".format(metric_spec))
display.vvv("metrics: {}".format(metrics))
+ display.vvv("baseline: {}".format(metric_basline))
# TODO(yujunz): use formula in spec
- workload_results = [{'name': w['name'],
- 'description': 'workload',
- 'score': calc_score(metrics[w['name']], w['baseline'])}
- for w in metric_spec['workloads']]
+ workload_results = []
+ for w in metric_spec['workloads']:
+ w_baseline = query(metric_basline['workloads']).first(
+ lambda workload: workload['name'] == w['name'])
+ workload_results.append({
+ 'name': w['name'],
+ 'description': 'workload',
+ 'score': calc_score(metrics[w['name']], w_baseline['baseline'])
+ })
+
metric_score = mean([r['score'] for r in workload_results])
return {
'score': metric_score,
@@ -118,5 +158,6 @@ def calc_score(metrics, baseline):
if not isinstance(baseline, numbers.Number):
baseline = humanfriendly.parse_size(baseline)
- return mean([m if isinstance(m, numbers.Number) else humanfriendly.parse_size(m)
- for m in metrics]) / baseline
+ return mean(
+ [m if isinstance(m, numbers.Number) else humanfriendly.parse_size(m)
+ for m in metrics]) / baseline