summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorzhihui wu <wu.zhihui1@zte.com.cn>2017-06-02 01:26:19 +0000
committerGerrit Code Review <gerrit@opnfv.org>2017-06-02 01:26:19 +0000
commitaa27f390c9ad6817d2682c4f59594f43c69d614f (patch)
tree3f484c0efcc28880b387d6ca305925bc0430c796
parent10e89733d84e6bdcd8427c34ce4452aa69cf5df0 (diff)
parent70d8b5c5dbe78d0adb9cdb80afc93e7eb9ac4ca1 (diff)
Merge "metrics baseline in compute-baseline.json"
-rw-r--r--qtip/ansible_library/plugins/action/calculate.py87
-rw-r--r--requirements.txt3
-rw-r--r--resources/ansible_roles/qtip/tasks/calculate.yml1
-rw-r--r--tests/unit/ansible_library/plugins/action/calculate_test.py57
4 files changed, 117 insertions, 31 deletions
diff --git a/qtip/ansible_library/plugins/action/calculate.py b/qtip/ansible_library/plugins/action/calculate.py
index 077d863c..383be580 100644
--- a/qtip/ansible_library/plugins/action/calculate.py
+++ b/qtip/ansible_library/plugins/action/calculate.py
@@ -9,18 +9,18 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-import humanfriendly
import json
import numbers
-from numpy import mean
-import yaml
from ansible.plugins.action import ActionBase
from ansible.utils.display import Display
+from asq.initiators import query
+import humanfriendly
+from numpy import mean
+import yaml
from qtip.util.export_to import export_to_file
-
display = Display()
@@ -45,22 +45,51 @@ class ActionModule(ActionBase):
metrics[metric] = json.load(f)
dest = self._task.args.get('dest')
- return calc_qpi(spec, metrics, dest=dest)
+ baseline_file = self._task.args.get('baseline')
+ if baseline_file is not None:
+ with open(baseline_file) as f:
+ baseline = json.load(f)
+ return calc_qpi(spec, metrics, baseline, dest=dest)
+ else:
+ return save_as_baseline(spec, metrics, dest=dest)
+# TODO(wuzhihui): It is more reasonable to put this function into collect.py.
+# For now metrics data is not easy to be collected from collect.py.
@export_to_file
-def calc_qpi(qpi_spec, metrics):
+def save_as_baseline(qpi_spec, metrics):
+ display.vv("save {} metrics as baseline".format(qpi_spec['name']))
+ display.vvv("spec: {}".format(qpi_spec))
+ display.vvv("metrics: {}".format(metrics))
+ return {
+ 'name': qpi_spec['name'],
+ 'score': 2048,
+ 'description': qpi_spec['description'],
+ 'details': {
+ 'metrics': metrics,
+ 'spec': "https://git.opnfv.org/qtip/tree/resources/QPI/compute.yaml",
+ 'baseline': ""
+ }
+ }
+
+
+@export_to_file
+def calc_qpi(qpi_spec, metrics, qpi_baseline):
display.vv("calculate QPI {}".format(qpi_spec['name']))
display.vvv("spec: {}".format(qpi_spec))
display.vvv("metrics: {}".format(metrics))
+ display.vvv("baseline: {}".format(qpi_baseline))
- section_results = [calc_section(s, metrics)
- for s in qpi_spec['sections']]
+ section_results = []
+ for s in qpi_spec['sections']:
+ s_baseline = query(qpi_baseline['sections']).first(
+ lambda section: section['name'] == s['name'])
+ section_results.append(calc_section(s, metrics, s_baseline))
# TODO(yujunz): use formula in spec
- standard_score = 2048
- qpi_score = int(mean([r['score'] for r in section_results]) * standard_score)
+ qpi_score = int(
+ mean([r['score'] for r in section_results]) * qpi_baseline['score'])
results = {
'score': qpi_score,
@@ -69,21 +98,26 @@ def calc_qpi(qpi_spec, metrics):
'children': section_results,
'details': {
'metrics': metrics,
- 'spec': "https://git.opnfv.org/qtip/tree/resources/QPI/compute.yaml"
+ 'spec': "https://git.opnfv.org/qtip/tree/resources/QPI/compute.yaml",
+ 'baseline': "https://git.opnfv.org/qtip/tree/resources/QPI/compute-baseline.json"
}
}
return results
-def calc_section(section_spec, metrics):
-
+def calc_section(section_spec, metrics, section_baseline):
display.vv("calculate section {}".format(section_spec['name']))
display.vvv("spec: {}".format(section_spec))
display.vvv("metrics: {}".format(metrics))
+ display.vvv("baseline: {}".format(section_baseline))
+
+ metric_results = []
+ for m in section_spec['metrics']:
+ m_baseline = query(section_baseline['metrics']).first(
+ lambda metric: metric['name'] == m['name'])
+ metric_results.append(calc_metric(m, metrics[m['name']], m_baseline))
- metric_results = [calc_metric(m, metrics[m['name']])
- for m in section_spec['metrics']]
# TODO(yujunz): use formula in spec
section_score = mean([r['score'] for r in metric_results])
return {
@@ -94,17 +128,23 @@ def calc_section(section_spec, metrics):
}
-def calc_metric(metric_spec, metrics):
-
+def calc_metric(metric_spec, metrics, metric_basline):
display.vv("calculate metric {}".format(metric_spec['name']))
display.vvv("spec: {}".format(metric_spec))
display.vvv("metrics: {}".format(metrics))
+ display.vvv("baseline: {}".format(metric_basline))
# TODO(yujunz): use formula in spec
- workload_results = [{'name': w['name'],
- 'description': 'workload',
- 'score': calc_score(metrics[w['name']], w['baseline'])}
- for w in metric_spec['workloads']]
+ workload_results = []
+ for w in metric_spec['workloads']:
+ w_baseline = query(metric_basline['workloads']).first(
+ lambda workload: workload['name'] == w['name'])
+ workload_results.append({
+ 'name': w['name'],
+ 'description': 'workload',
+ 'score': calc_score(metrics[w['name']], w_baseline['baseline'])
+ })
+
metric_score = mean([r['score'] for r in workload_results])
return {
'score': metric_score,
@@ -118,5 +158,6 @@ def calc_score(metrics, baseline):
if not isinstance(baseline, numbers.Number):
baseline = humanfriendly.parse_size(baseline)
- return mean([m if isinstance(m, numbers.Number) else humanfriendly.parse_size(m)
- for m in metrics]) / baseline
+ return mean(
+ [m if isinstance(m, numbers.Number) else humanfriendly.parse_size(m)
+ for m in metrics]) / baseline
diff --git a/requirements.txt b/requirements.txt
index e601d10d..b9d0e881 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -9,4 +9,5 @@ pbr
prettytable
six
PyYAML
-humanfriendly \ No newline at end of file
+humanfriendly
+asq \ No newline at end of file
diff --git a/resources/ansible_roles/qtip/tasks/calculate.yml b/resources/ansible_roles/qtip/tasks/calculate.yml
index 63fec7fd..61e96faf 100644
--- a/resources/ansible_roles/qtip/tasks/calculate.yml
+++ b/resources/ansible_roles/qtip/tasks/calculate.yml
@@ -19,5 +19,6 @@
floatmem: "{{ qtip_results }}/memory/float-metrics.json"
arithmetic: "{{ qtip_results }}/arithmetic/metrics.json"
spec: "{{ qtip_resources }}/QPI/compute.yaml"
+ baseline: "{{ qtip_resources }}/QPI/compute-baseline.json"
dest: "{{ qtip_results }}/compute.json"
delegate_to: localhost
diff --git a/tests/unit/ansible_library/plugins/action/calculate_test.py b/tests/unit/ansible_library/plugins/action/calculate_test.py
index fae59821..80a07206 100644
--- a/tests/unit/ansible_library/plugins/action/calculate_test.py
+++ b/tests/unit/ansible_library/plugins/action/calculate_test.py
@@ -42,7 +42,7 @@ def section_spec(metric_spec):
}
-@pytest.fixture
+@pytest.fixture()
def qpi_spec(section_spec):
return {
"name": "compute",
@@ -52,6 +52,42 @@ def qpi_spec(section_spec):
@pytest.fixture()
+def rsa_sign_baseline():
+ return {'name': 'rsa_sign', 'baseline': '500'}
+
+
+@pytest.fixture()
+def rsa_verify_baseline():
+ return {"name": "rsa_verify", "baseline": 600}
+
+
+@pytest.fixture()
+def metric_baseline(rsa_sign_baseline, rsa_verify_baseline):
+ return {
+ "name": "ssl_rsa",
+ "workloads": [rsa_sign_baseline, rsa_verify_baseline]
+ }
+
+
+@pytest.fixture()
+def section_baseline(metric_baseline):
+ return {
+ "name": "ssl",
+ "metrics": [metric_baseline]
+ }
+
+
+@pytest.fixture()
+def qpi_baseline(section_baseline):
+ return {
+ "name": "compute-baseline",
+ "description": "The baseline for compute QPI",
+ "score": 2048,
+ "sections": [section_baseline]
+ }
+
+
+@pytest.fixture()
def metric_result():
return {'score': 1.0,
'name': 'ssl_rsa',
@@ -76,19 +112,26 @@ def qpi_result(section_result, metrics):
'children': [section_result],
'details': {
'spec': "https://git.opnfv.org/qtip/tree/resources/QPI/compute.yaml",
+ 'baseline': "https://git.opnfv.org/qtip/tree/resources/QPI/compute-baseline.json",
'metrics': metrics}}
-def test_calc_metric(metric_spec, metrics, metric_result):
- assert calculate.calc_metric(metric_spec, metrics['ssl_rsa']) == metric_result
+def test_calc_metric(metric_spec, metrics, metric_baseline, metric_result):
+ assert calculate.calc_metric(metric_spec,
+ metrics['ssl_rsa'],
+ metric_baseline) == metric_result
-def test_calc_section(section_spec, metrics, section_result):
- assert calculate.calc_section(section_spec, metrics) == section_result
+def test_calc_section(section_spec, metrics, section_baseline, section_result):
+ assert calculate.calc_section(section_spec,
+ metrics,
+ section_baseline) == section_result
-def test_calc_qpi(qpi_spec, metrics, qpi_result):
- assert calculate.calc_qpi(qpi_spec, metrics) == qpi_result
+def test_calc_qpi(qpi_spec, metrics, qpi_baseline, qpi_result):
+ assert calculate.calc_qpi(qpi_spec,
+ metrics,
+ qpi_baseline) == qpi_result
@pytest.mark.parametrize('metrics, baseline, expected', [