1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
|
#!/usr/bin/python
###############################################################
# Copyright (c) 2017 ZTE Corporation
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
import humanfriendly
import json
import numbers
from numpy import mean
import yaml
from ansible.plugins.action import ActionBase
from ansible.utils.display import Display
from qtip.util.export_to import export_to_file
display = Display()
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
if result.get('skipped', False):
return result
with open(self._task.args.get('spec')) as stream:
spec = yaml.safe_load(stream)
metrics_files = self._task.args.get('metrics')
metrics = {}
for metric, filename in metrics_files.items():
with open(filename) as f:
metrics[metric] = json.load(f)
dest = self._task.args.get('dest')
return calc_qpi(spec, metrics, dest=dest)
@export_to_file
def calc_qpi(qpi_spec, metrics):
display.vv("calculate QPI {}".format(qpi_spec['name']))
display.vvv("spec: {}".format(qpi_spec))
display.vvv("metrics: {}".format(metrics))
section_results = [{'name': s['name'], 'result': calc_section(s, metrics)}
for s in qpi_spec['sections']]
# TODO(yujunz): use formula in spec
standard_score = 2048
qpi_score = int(mean([r['result']['score'] for r in section_results]) * standard_score)
results = {
'spec': qpi_spec,
'score': qpi_score,
'section_results': section_results,
'metrics': metrics
}
return results
def calc_section(section_spec, metrics):
display.vv("calculate section {}".format(section_spec['name']))
display.vvv("spec: {}".format(section_spec))
display.vvv("metrics: {}".format(metrics))
metric_results = [{'name': m['name'], 'result': calc_metric(m, metrics[m['name']])}
for m in section_spec['metrics']]
# TODO(yujunz): use formula in spec
section_score = mean([r['result']['score'] for r in metric_results])
return {
'score': section_score,
'metric_results': metric_results
}
def calc_metric(metric_spec, metrics):
display.vv("calculate metric {}".format(metric_spec['name']))
display.vvv("spec: {}".format(metric_spec))
display.vvv("metrics: {}".format(metrics))
# TODO(yujunz): use formula in spec
workload_results = [{'name': w['name'], 'score': calc_score(metrics[w['name']], w['baseline'])}
for w in metric_spec['workloads']]
metric_score = mean([r['score'] for r in workload_results])
return {
'score': metric_score,
'workload_results': workload_results
}
def calc_score(metrics, baseline):
if not isinstance(baseline, numbers.Number):
baseline = humanfriendly.parse_size(baseline)
return mean([m if isinstance(m, numbers.Number) else humanfriendly.parse_size(m)
for m in metrics]) / baseline
|