aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--qtip/ansible_library/plugins/action/calculate.py66
-rw-r--r--tests/unit/ansible_library/plugins/action/calculate_test.py85
2 files changed, 151 insertions, 0 deletions
diff --git a/qtip/ansible_library/plugins/action/calculate.py b/qtip/ansible_library/plugins/action/calculate.py
new file mode 100644
index 00000000..030c4cde
--- /dev/null
+++ b/qtip/ansible_library/plugins/action/calculate.py
@@ -0,0 +1,66 @@
+#!/usr/bin/python
+
+###############################################################
+# Copyright (c) 2017 ZTE Corporation
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+from numpy import mean
+
+from ansible.plugins.action import ActionBase
+
+
+class ActionModule(ActionBase):
+ def run(self, tmp=None, task_vars=None):
+
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+
+ if result.get('skipped', False):
+ return result
+
+ spec = self._task.args.get('spec')
+ metrics = self._task.args.get('metrics')
+
+ return calc_qpi(spec, metrics)
+
+
+def calc_qpi(qpi_spec, metrics):
+ section_results = [{'name': s['name'], 'result': calc_section(s, metrics)}
+ for s in qpi_spec['sections']]
+ # TODO(yujunz): use formula in spec
+ qpi_score = mean([r['result']['score'] for r in section_results])
+ return {
+ 'spec': qpi_spec,
+ 'score': qpi_score,
+ 'section_results': section_results,
+ 'metrics': metrics
+ }
+
+
+def calc_section(section_spec, metrics):
+ metric_results = [{'name': m['name'], 'result': calc_metric(m, metrics[m['name']])}
+ for m in section_spec['metrics']]
+ # TODO(yujunz): use formula in spec
+ section_score = mean([r['result']['score'] for r in metric_results])
+ return {
+ 'score': section_score,
+ 'metric_results': metric_results
+ }
+
+
+def calc_metric(metric_spec, metrics):
+ # TODO(yujunz): use formula in spec
+ workload_results = [{'name': w['name'], 'score': mean(metrics[w['name']]) / w['baseline']}
+ for w in metric_spec['workloads']]
+ metric_score = mean([r['score'] for r in workload_results])
+ return {
+ 'score': metric_score,
+ 'workload_results': workload_results
+ }
diff --git a/tests/unit/ansible_library/plugins/action/calculate_test.py b/tests/unit/ansible_library/plugins/action/calculate_test.py
new file mode 100644
index 00000000..ae163102
--- /dev/null
+++ b/tests/unit/ansible_library/plugins/action/calculate_test.py
@@ -0,0 +1,85 @@
+##############################################################################
+# Copyright (c) 2017 ZTE Corp and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import pytest
+
+from qtip.ansible_library.plugins.action import calculate
+
+
+@pytest.fixture()
+def metrics():
+ return {
+ "ssl_rsa": {
+ "rsa_sign": [500],
+ "rsa_verify": [600]
+ }
+ }
+
+
+@pytest.fixture()
+def metric_spec():
+ return {
+ "name": "ssl_rsa",
+ "workloads": [
+ {"name": "rsa_sign", "baseline": 500},
+ {"name": "rsa_verify", "baseline": 600}
+ ]
+ }
+
+
+@pytest.fixture()
+def section_spec(metric_spec):
+ return {
+ "name": "ssl",
+ "description": "cryptography and SSL/TLS performance",
+ "metrics": [metric_spec]
+ }
+
+
+@pytest.fixture
+def qpi_spec(section_spec):
+ return {
+ "description": "QTIP Performance Index of compute",
+ "name": "compute",
+ "sections": [section_spec]
+ }
+
+
+@pytest.fixture()
+def metric_result():
+ return {'score': 1.0,
+ 'workload_results': [
+ {'name': 'rsa_sign', 'score': 1.0},
+ {'name': 'rsa_verify', 'score': 1.0}]}
+
+
+@pytest.fixture()
+def section_result(metric_result):
+ return {'score': 1.0,
+ 'metric_results': [{'name': 'ssl_rsa', 'result': metric_result}]}
+
+
+@pytest.fixture()
+def qpi_result(qpi_spec, section_result, metrics):
+ return {'score': 1.0,
+ 'spec': qpi_spec,
+ 'metrics': metrics,
+ 'section_results': [{'name': 'ssl', 'result': section_result}]}
+
+
+def test_calc_metric(metric_spec, metrics, metric_result):
+ assert calculate.calc_metric(metric_spec, metrics['ssl_rsa']) == metric_result
+
+
+def test_calc_section(section_spec, metrics, section_result):
+ assert calculate.calc_section(section_spec, metrics) == section_result
+
+
+def test_calc_qpi(qpi_spec, metrics, qpi_result):
+ assert calculate.calc_qpi(qpi_spec, metrics) == qpi_result