From ab72f9c49cabd945d35c946dce4158ea8e228e2a Mon Sep 17 00:00:00 2001 From: Yujun Zhang Date: Sat, 13 May 2017 21:52:37 +0800 Subject: Implement sunburst badge for QPI Change-Id: Iccdec7b0ac223a38c846f73adc6bd0e53db3723b Signed-off-by: Yujun Zhang --- qtip/ansible_library/plugins/action/aggregate.py | 12 +- qtip/ansible_library/plugins/action/calculate.py | 30 +- resources/QPI/compute.yaml | 1 - resources/ansible_roles/qtip/tasks/aggregate.yml | 7 +- resources/template/qpi.html.j2 | 323 +++++++++++++++++++++ tests/data/results/expected.json | 16 +- .../plugins/action/calculate_test.py | 22 +- 7 files changed, 384 insertions(+), 27 deletions(-) create mode 100644 resources/template/qpi.html.j2 diff --git a/qtip/ansible_library/plugins/action/aggregate.py b/qtip/ansible_library/plugins/action/aggregate.py index f1451e06..36ea0ef1 100644 --- a/qtip/ansible_library/plugins/action/aggregate.py +++ b/qtip/ansible_library/plugins/action/aggregate.py @@ -42,9 +42,15 @@ class ActionModule(ActionBase): # aggregate QPI results @export_to_file def aggregate(hosts, basepath, src): - host_results = [{'host': host, 'result': json.load(open(os.path.join(basepath, host, src)))} for host in hosts] - score = int(mean([r['result']['score'] for r in host_results])) + host_results = [] + for host in hosts: + host_result = json.load(open(os.path.join(basepath, host, src))) + host_result['name'] = host + host_results.append(host_result) + score = int(mean([r['score'] for r in host_results])) return { 'score': score, - 'host_results': host_results + 'name': 'compute', + 'description': 'POD Compute QPI', + 'children': host_results } diff --git a/qtip/ansible_library/plugins/action/calculate.py b/qtip/ansible_library/plugins/action/calculate.py index 8d5fa1f7..d50222fe 100644 --- a/qtip/ansible_library/plugins/action/calculate.py +++ b/qtip/ansible_library/plugins/action/calculate.py @@ -55,18 +55,22 @@ def calc_qpi(qpi_spec, metrics): display.vvv("spec: {}".format(qpi_spec)) display.vvv("metrics: {}".format(metrics)) - section_results = [{'name': s['name'], 'result': calc_section(s, metrics)} + section_results = [calc_section(s, metrics) for s in qpi_spec['sections']] # TODO(yujunz): use formula in spec standard_score = 2048 - qpi_score = int(mean([r['result']['score'] for r in section_results]) * standard_score) + qpi_score = int(mean([r['score'] for r in section_results]) * standard_score) results = { - 'spec': qpi_spec, 'score': qpi_score, - 'section_results': section_results, - 'metrics': metrics + 'name': qpi_spec['name'], + 'description': qpi_spec['description'], + 'children': section_results, + 'details': { + 'metrics': metrics, + 'spec': qpi_spec + } } return results @@ -78,13 +82,15 @@ def calc_section(section_spec, metrics): display.vvv("spec: {}".format(section_spec)) display.vvv("metrics: {}".format(metrics)) - metric_results = [{'name': m['name'], 'result': calc_metric(m, metrics[m['name']])} + metric_results = [calc_metric(m, metrics[m['name']]) for m in section_spec['metrics']] # TODO(yujunz): use formula in spec - section_score = mean([r['result']['score'] for r in metric_results]) + section_score = mean([r['score'] for r in metric_results]) return { 'score': section_score, - 'metric_results': metric_results + 'name': section_spec['name'], + 'description': section_spec.get('description', 'section'), + 'children': metric_results } @@ -95,12 +101,16 @@ def calc_metric(metric_spec, metrics): display.vvv("metrics: {}".format(metrics)) # TODO(yujunz): use formula in spec - workload_results = [{'name': w['name'], 'score': calc_score(metrics[w['name']], w['baseline'])} + workload_results = [{'name': w['name'], + 'description': 'workload', + 'score': calc_score(metrics[w['name']], w['baseline'])} for w in metric_spec['workloads']] metric_score = mean([r['score'] for r in workload_results]) return { 'score': metric_score, - 'workload_results': workload_results + 'name': metric_spec['name'], + 'description': metric_spec.get('description', 'metric'), + 'children': workload_results } diff --git a/resources/QPI/compute.yaml b/resources/QPI/compute.yaml index e69a463e..775f5c96 100644 --- a/resources/QPI/compute.yaml +++ b/resources/QPI/compute.yaml @@ -18,7 +18,6 @@ sections: # split based on different application formual: geometric mean workloads: - name: rsa_sign_512 - description: RSA signature 512 bits baseline: 14982.3 - name: rsa_verify_512 baseline: 180619.2 diff --git a/resources/ansible_roles/qtip/tasks/aggregate.yml b/resources/ansible_roles/qtip/tasks/aggregate.yml index 9ecdc700..904fc5d6 100644 --- a/resources/ansible_roles/qtip/tasks/aggregate.yml +++ b/resources/ansible_roles/qtip/tasks/aggregate.yml @@ -14,5 +14,10 @@ group: compute basepath: "{{ qtip_results }}/current" src: "compute.json" - dest: "{{ pod_name }}-qpi.json" + dest: "qpi.json" register: pod_result + +- name: generating HTML report + template: + src: "{{ qtip_resources }}/template/qpi.html.j2" + dest: "{{ qtip_results }}/current/index.html" diff --git a/resources/template/qpi.html.j2 b/resources/template/qpi.html.j2 new file mode 100644 index 00000000..3515676a --- /dev/null +++ b/resources/template/qpi.html.j2 @@ -0,0 +1,323 @@ + + + + + + \ No newline at end of file diff --git a/tests/data/results/expected.json b/tests/data/results/expected.json index a495d999..e77200d4 100644 --- a/tests/data/results/expected.json +++ b/tests/data/results/expected.json @@ -1,7 +1,15 @@ { "score": 150, - "host_results": [ - {"host": "host1", "result": {"score": 100}}, - {"host": "host2", "result": {"score": 200}} - ] + "children": [ + { + "name": "host1", + "score": 100 + }, + { + "name": "host2", + "score": 200 + } + ], + "description": "POD Compute QPI", + "name": "compute" } diff --git a/tests/unit/ansible_library/plugins/action/calculate_test.py b/tests/unit/ansible_library/plugins/action/calculate_test.py index 68a03e2a..31d72120 100644 --- a/tests/unit/ansible_library/plugins/action/calculate_test.py +++ b/tests/unit/ansible_library/plugins/action/calculate_test.py @@ -45,8 +45,8 @@ def section_spec(metric_spec): @pytest.fixture def qpi_spec(section_spec): return { - "description": "QTIP Performance Index of compute", "name": "compute", + "description": "QTIP Performance Index of compute", "sections": [section_spec] } @@ -54,23 +54,29 @@ def qpi_spec(section_spec): @pytest.fixture() def metric_result(): return {'score': 1.0, - 'workload_results': [ - {'name': 'rsa_sign', 'score': 1.0}, - {'name': 'rsa_verify', 'score': 1.0}]} + 'name': 'ssl_rsa', + 'description': 'metric', + 'children': [{'description': 'workload', 'name': 'rsa_sign', 'score': 1.0}, + {'description': 'workload', 'name': 'rsa_verify', 'score': 1.0}]} @pytest.fixture() def section_result(metric_result): return {'score': 1.0, - 'metric_results': [{'name': 'ssl_rsa', 'result': metric_result}]} + 'name': 'ssl', + 'description': 'cryptography and SSL/TLS performance', + 'children': [metric_result]} @pytest.fixture() def qpi_result(qpi_spec, section_result, metrics): return {'score': 2048, - 'spec': qpi_spec, - 'metrics': metrics, - 'section_results': [{'name': 'ssl', 'result': section_result}]} + 'name': 'compute', + 'description': 'QTIP Performance Index of compute', + 'children': [section_result], + 'details': { + 'spec': qpi_spec, + 'metrics': metrics}} def test_calc_metric(metric_spec, metrics, metric_result): -- cgit 1.2.3-korg