diff options
-rw-r--r-- | qtip/ansible_library/plugins/action/calculate.py | 110 | ||||
-rw-r--r-- | tests/unit/ansible_library/plugins/action/calculate_test.py | 28 |
2 files changed, 67 insertions, 71 deletions
diff --git a/qtip/ansible_library/plugins/action/calculate.py b/qtip/ansible_library/plugins/action/calculate.py index 0c3ab7db..33cda1bb 100644 --- a/qtip/ansible_library/plugins/action/calculate.py +++ b/qtip/ansible_library/plugins/action/calculate.py @@ -54,111 +54,105 @@ class ActionModule(ActionBase): if baseline_file is not None: with open(baseline_file) as f: baseline = json.load(f) - return calc_qpi(spec, metrics, baseline, sysinfo, dest=dest) + return calc_qpi(spec, metrics, sysinfo, baseline, dest=dest) else: - return save_as_baseline(spec, metrics, sysinfo, dest=dest) + return calc_qpi(spec, metrics, sysinfo, None, dest=dest) -# TODO(wuzhihui): It is more reasonable to put this function into collect.py. -# For now metrics data is not easy to be collected from collect.py. @export_to_file -def save_as_baseline(qpi_spec, metrics, sysinfo): - display.vv("save {} metrics as baseline".format(qpi_spec['name'])) - display.vvv("spec: {}".format(qpi_spec)) - display.vvv("metrics: {}".format(metrics)) - - return { - 'name': qpi_spec['name'], - 'score': 2048, - 'description': qpi_spec['description'], - 'system_info': sysinfo, - 'details': { - 'metrics': metrics, - 'spec': "https://git.opnfv.org/qtip/tree/resources/QPI/compute.yaml", - 'baseline': "" - } - } - - -@export_to_file -def calc_qpi(qpi_spec, metrics, qpi_baseline, sysinfo): +def calc_qpi(qpi_spec, metrics, sysinfo, qpi_baseline): display.vv("calculate QPI {}".format(qpi_spec['name'])) display.vvv("spec: {}".format(qpi_spec)) display.vvv("metrics: {}".format(metrics)) display.vvv("baseline: {}".format(qpi_baseline)) section_results = [] - for s in qpi_spec['sections']: - s_baseline = query(qpi_baseline['sections']).first( - lambda section: section['name'] == s['name']) - section_results.append(calc_section(s, metrics, s_baseline)) - - # TODO(yujunz): use formula in spec - qpi_score = int( - mean([r['score'] for r in section_results]) * qpi_baseline['score']) + qpi_score = 0 + if qpi_baseline: + for s in qpi_spec['sections']: + s_baseline = query(qpi_baseline['sections']).first( + lambda section: section['name'] == s['name']) + section_results.append(calc_section(s, metrics, s_baseline)) + + # TODO(yujunz): use formula in spec + qpi_score = int( + mean([r['score'] for r in section_results]) * qpi_baseline['score']) + else: + for s in qpi_spec['sections']: + section_results.append(calc_section(s, metrics)) results = { 'score': qpi_score, 'name': qpi_spec['name'], 'description': qpi_spec['description'], 'system_info': sysinfo, - 'children': section_results, - 'details': { - 'metrics': metrics, - 'spec': "https://git.opnfv.org/qtip/tree/resources/QPI/compute.yaml", - 'baseline': "https://git.opnfv.org/qtip/tree/resources/QPI/compute-baseline.json" - } + 'sections': section_results, + 'spec': "https://git.opnfv.org/qtip/tree/resources/QPI/compute.yaml", + 'baseline': "https://git.opnfv.org/qtip/tree/resources/QPI/compute-baseline.json" } return results -def calc_section(section_spec, metrics, section_baseline): +def calc_section(section_spec, metrics, section_baseline=None): display.vv("calculate section {}".format(section_spec['name'])) display.vvv("spec: {}".format(section_spec)) display.vvv("metrics: {}".format(metrics)) display.vvv("baseline: {}".format(section_baseline)) metric_results = [] - for m in section_spec['metrics']: - m_baseline = query(section_baseline['metrics']).first( - lambda metric: metric['name'] == m['name']) - metric_results.append(calc_metric(m, metrics[m['name']], m_baseline)) + section_score = 0 + if section_baseline: + for m in section_spec['metrics']: + m_baseline = query(section_baseline['metrics']).first( + lambda metric: metric['name'] == m['name']) + metric_results.append(calc_metric(m, metrics[m['name']], m_baseline)) + section_score = mean([r['score'] for r in metric_results]) + else: + for m in section_spec['metrics']: + metric_results.append(calc_metric(m, metrics[m['name']])) # TODO(yujunz): use formula in spec - section_score = mean([r['score'] for r in metric_results]) return { 'score': section_score, 'name': section_spec['name'], 'description': section_spec.get('description', 'section'), - 'children': metric_results + 'metrics': metric_results } -def calc_metric(metric_spec, metrics, metric_basline): +def calc_metric(metric_spec, metrics, metric_baseline=None): display.vv("calculate metric {}".format(metric_spec['name'])) display.vvv("spec: {}".format(metric_spec)) display.vvv("metrics: {}".format(metrics)) - display.vvv("baseline: {}".format(metric_basline)) + display.vvv("baseline: {}".format(metric_baseline)) # TODO(yujunz): use formula in spec workload_results = [] - for w in metric_spec['workloads']: - w_baseline = query(metric_basline['workloads']).first( - lambda workload: workload['name'] == w['name']) - workload_results.append({ - 'name': w['name'], - 'description': 'workload', - 'score': calc_score(metrics[w['name']], w_baseline['baseline']) - }) - - metric_score = mean([r['score'] for r in workload_results]) + metric_score = 0 + if metric_baseline: + for w in metric_spec['workloads']: + w_baseline = query(metric_baseline['workloads']).first( + lambda workload: workload['name'] == w['name']) + workload_results.append({ + 'name': w['name'], + 'description': 'workload', + 'score': calc_score(metrics[w['name']], w_baseline['baseline']), + 'result': metrics[w['name']][0] + }) + metric_score = mean([r['score'] for r in workload_results]) + else: + for w in metric_spec['workloads']: + workload_results.append({ + 'name': w['name'], + 'baseline': metrics[w['name']][0] + }) return { 'score': metric_score, 'name': metric_spec['name'], 'description': metric_spec.get('description', 'metric'), - 'children': workload_results + 'workloads': workload_results } diff --git a/tests/unit/ansible_library/plugins/action/calculate_test.py b/tests/unit/ansible_library/plugins/action/calculate_test.py index a83e1286..2a0b6a7c 100644 --- a/tests/unit/ansible_library/plugins/action/calculate_test.py +++ b/tests/unit/ansible_library/plugins/action/calculate_test.py @@ -27,8 +27,8 @@ def metric_spec(): return { "name": "ssl_rsa", "workloads": [ - {"name": "rsa_sign", "baseline": 500}, - {"name": "rsa_verify", "baseline": 600} + {"name": "rsa_sign"}, + {"name": "rsa_verify"} ] } @@ -92,8 +92,10 @@ def metric_result(): return {'score': 1.0, 'name': 'ssl_rsa', 'description': 'metric', - 'children': [{'description': 'workload', 'name': 'rsa_sign', 'score': 1.0}, - {'description': 'workload', 'name': 'rsa_verify', 'score': 1.0}]} + 'workloads': [{'description': 'workload', 'name': 'rsa_sign', + 'score': 1.0, 'result': 500}, + {'description': 'workload', 'name': 'rsa_verify', + 'score': 1.0, 'result': 600}]} @pytest.fixture() @@ -101,7 +103,7 @@ def section_result(metric_result): return {'score': 1.0, 'name': 'ssl', 'description': 'cryptography and SSL/TLS performance', - 'children': [metric_result]} + 'metrics': [metric_result]} @pytest.fixture() @@ -119,16 +121,15 @@ def info(): @pytest.fixture() -def qpi_result(section_result, metrics, info): +def qpi_result(section_result, info): return {'score': 2048, 'name': 'compute', 'description': 'QTIP Performance Index of compute', 'system_info': info, - 'children': [section_result], - 'details': { - 'spec': "https://git.opnfv.org/qtip/tree/resources/QPI/compute.yaml", - 'baseline': "https://git.opnfv.org/qtip/tree/resources/QPI/compute-baseline.json", - 'metrics': metrics}} + 'sections': [section_result], + 'spec': "https://git.opnfv.org/qtip/tree/resources/QPI/compute.yaml", + 'baseline': "https://git.opnfv.org/qtip/tree/resources/QPI/compute-baseline.json", + } def test_calc_metric(metric_spec, metrics, metric_baseline, metric_result): @@ -143,10 +144,11 @@ def test_calc_section(section_spec, metrics, section_baseline, section_result): section_baseline) == section_result -def test_calc_qpi(qpi_spec, metrics, qpi_baseline, info, qpi_result): +def test_calc_qpi(qpi_spec, metrics, qpi_baseline, qpi_result, section_spec, info): + section_spec['score'] = 1.0 assert calculate.calc_qpi(qpi_spec, metrics, - qpi_baseline, info) == qpi_result + info, qpi_baseline) == qpi_result @pytest.mark.parametrize('metrics, baseline, expected', [ |