diff options
author | Taseer <taseer94@gmail.com> | 2017-06-20 22:30:23 +0500 |
---|---|---|
committer | Taseer <taseer94@gmail.com> | 2017-06-26 23:17:19 +0500 |
commit | 024cf82595afef0a1da16b6c6604c6cb52d41dfb (patch) | |
tree | cb29d78772f97e1f9924c749a92093995f3b9f5c /tests/unit/ansible_library/plugins/action/calculate_test.py | |
parent | 26c2a8f2d556a8777d443b02d931e7df7fec8f6c (diff) |
Refactor output format
- Merge scores and results in qpi.json
- metrics: {} will be completely removed as a consequence
- Make baseline the same format as spec
Change-Id: Id0cc487002a38c51736de07f3759fd78d42a0b9c
Signed-off-by: Taseer Ahmed <taseer94@gmail.com>
Diffstat (limited to 'tests/unit/ansible_library/plugins/action/calculate_test.py')
-rw-r--r-- | tests/unit/ansible_library/plugins/action/calculate_test.py | 28 |
1 files changed, 15 insertions, 13 deletions
diff --git a/tests/unit/ansible_library/plugins/action/calculate_test.py b/tests/unit/ansible_library/plugins/action/calculate_test.py index a83e1286..2a0b6a7c 100644 --- a/tests/unit/ansible_library/plugins/action/calculate_test.py +++ b/tests/unit/ansible_library/plugins/action/calculate_test.py @@ -27,8 +27,8 @@ def metric_spec(): return { "name": "ssl_rsa", "workloads": [ - {"name": "rsa_sign", "baseline": 500}, - {"name": "rsa_verify", "baseline": 600} + {"name": "rsa_sign"}, + {"name": "rsa_verify"} ] } @@ -92,8 +92,10 @@ def metric_result(): return {'score': 1.0, 'name': 'ssl_rsa', 'description': 'metric', - 'children': [{'description': 'workload', 'name': 'rsa_sign', 'score': 1.0}, - {'description': 'workload', 'name': 'rsa_verify', 'score': 1.0}]} + 'workloads': [{'description': 'workload', 'name': 'rsa_sign', + 'score': 1.0, 'result': 500}, + {'description': 'workload', 'name': 'rsa_verify', + 'score': 1.0, 'result': 600}]} @pytest.fixture() @@ -101,7 +103,7 @@ def section_result(metric_result): return {'score': 1.0, 'name': 'ssl', 'description': 'cryptography and SSL/TLS performance', - 'children': [metric_result]} + 'metrics': [metric_result]} @pytest.fixture() @@ -119,16 +121,15 @@ def info(): @pytest.fixture() -def qpi_result(section_result, metrics, info): +def qpi_result(section_result, info): return {'score': 2048, 'name': 'compute', 'description': 'QTIP Performance Index of compute', 'system_info': info, - 'children': [section_result], - 'details': { - 'spec': "https://git.opnfv.org/qtip/tree/resources/QPI/compute.yaml", - 'baseline': "https://git.opnfv.org/qtip/tree/resources/QPI/compute-baseline.json", - 'metrics': metrics}} + 'sections': [section_result], + 'spec': "https://git.opnfv.org/qtip/tree/resources/QPI/compute.yaml", + 'baseline': "https://git.opnfv.org/qtip/tree/resources/QPI/compute-baseline.json", + } def test_calc_metric(metric_spec, metrics, metric_baseline, metric_result): @@ -143,10 +144,11 @@ def test_calc_section(section_spec, metrics, section_baseline, section_result): section_baseline) == section_result -def test_calc_qpi(qpi_spec, metrics, qpi_baseline, info, qpi_result): +def test_calc_qpi(qpi_spec, metrics, qpi_baseline, qpi_result, section_spec, info): + section_spec['score'] = 1.0 assert calculate.calc_qpi(qpi_spec, metrics, - qpi_baseline, info) == qpi_result + info, qpi_baseline) == qpi_result @pytest.mark.parametrize('metrics, baseline, expected', [ |