summaryrefslogtreecommitdiffstats
path: root/utils/test/result_collection_api/dashboard/bottlenecks2Dashboard.py
blob: 6f7679d6fa1e35096030ae95131f414360887d6f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
#!/usr/bin/python
#
##############################################################################
# Copyright (c) 2015 Huawei Technologies Co.,Ltd and other.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
#
# This script is used to build dashboard ready json results
# It may be used for all the test case of the Bottlenecks project
# a new method format_<Test_case>_for_dashboard(results)
# v0.1: basic example with methods for Rubbos.
#
import os
import requests
import json


def get_bottlenecks_cases():
    """
    get the list of the supported test cases
    TODO: update the list when adding a new test case for the dashboard
    """
    return ["rubbos", "tu1", "tu3"]


def check_bottlenecks_case_exist(case):
    """
    check if the testcase exists
    if the test case is not defined or not declared in the list
    return False
    """
    bottlenecks_cases = get_bottlenecks_cases()

    if case is None or case not in bottlenecks_cases:
        return False
    else:
        return True


def format_bottlenecks_for_dashboard(case, results):
    """
    generic method calling the method corresponding to the test case
    check that the testcase is properly declared first
    then build the call to the specific method
    """
    if check_bottlenecks_case_exist(case):
        cmd = "format_" + case + "_for_dashboard(results)"
        res = eval(cmd)
    else:
        res = []
        print "Test cases not declared"
    return res


def format_rubbos_for_dashboard(results):
    """
    Post processing for the Rubbos test case
    """
    test_data = [{'description': 'Rubbos results'}]

    # Graph 1:
    # ********************************
    new_element = []
    for each_result in results:
        throughput_data = [record['throughput'] for record in each_result['details']]
        new_element.append({'x': each_result['creation_date'],
                            'y': max(throughput_data)})

    test_data.append({'name': "Rubbos max throughput",
                      'info': {'type': "graph",
                               'xlabel': 'time',
                               'ylabel': 'maximal throughput'},
                      'data_set': new_element})
    return test_data

def format_rubbos_probe_for_dashboard(results):
    """
    Post processing for the Rubbos test case of one time
    """
    test_data = [{'description': 'Rubbos results'}]

    element = []
    latest_result = results[-1]["details"]
    for key in sorted(lastest_result):
        throughput = latest_result[key]["throughput"]
        client_num = int(key)
        element.append({'x': client_num,
                        'y': throughput})
    #graph
    test_data.append({'name': "Rubbos throughput vs client number",
                      'info': {'type': "graph",
                      'xlabel': 'client number',
                      'ylabel': 'throughput'},
                      'data_set': element})

    return test_data

def format_tu1_for_dashboard(results):
    test_data = [{'description': 'Tu-1 performance result'}]
    line_element = []
    bar_element = {}
    last_result = results[-1]["details"]
    for key in sorted(last_result):
        bandwith = last_result[key]["Bandwidth"]
        pktsize = int(key)
        line_element.append({'x': pktsize,
                             'y': bandwith * 1000})
        bar_element[key] = bandwith * 1000
    # graph1, line
    test_data.append({'name': "VM2VM max single directional throughput",
                      'info': {'type': "graph",
                               'xlabel': 'pktsize',
                               'ylabel': 'bandwith(kpps)'},
                      'data_set': line_element})
    # graph2, bar
    test_data.append({'name': "VM2VM max single directional throughput",
                      'info': {"type": "bar"},
                      'data_set': bar_element})
    return test_data


def format_tu3_for_dashboard(results):
    test_data = [{'description': 'Tu-3 performance result'}]
    new_element = []
    bar_element = {}
    last_result = results[-1]["details"]
    for key in sorted(last_result):
        bandwith = last_result[key]["Bandwidth"]
        pktsize = int(key)
        new_element.append({'x': pktsize,
                            'y': bandwith * 1000})
        bar_element[key] = bandwith * 1000
    # graph1, line
    test_data.append({'name': "VM2VM max bidirectional throughput",
                      'info': {'type': "graph",
                               'xlabel': 'pktsize',
                               'ylabel': 'bandwith(kpps)'},
                      'data_set': new_element})
    # graph2, bar
    test_data.append({'name': "VM2VM max single directional throughput",
                      'info': {"type": "bar"},
                      'data_set': bar_element})
    return test_data


############################  For local test  ################################

def _read_sample_output(filename):
    curr_path = os.path.dirname(os.path.abspath(__file__))
    output = os.path.join(curr_path, filename)
    with open(output) as f:
        sample_output = f.read()

    result = json.loads(sample_output)
    return result


# Copy form functest/testcases/Dashboard/dashboard_utils.py
# and did some minor modification for local test.
def _get_results(db_url, test_criteria):
    test_project = test_criteria["project"]
    testcase = test_criteria["testcase"]

    # Build headers
    headers = {'Content-Type': 'application/json'}

    # build the request
    # if criteria is all => remove criteria
    url = db_url + "/results?project=" + test_project + "&case=" + testcase

    # Send Request to Test DB
    myData = requests.get(url, headers=headers)

    # Get result as a json object
    myNewData = json.loads(myData.text)

    # Get results
    myDataResults = myNewData['test_results']
    return myDataResults


def _test():
    db_url = "http://213.77.62.197"
    results = _get_results(db_url, {"project": "bottlenecks", "testcase": "rubbos"})
    test_result = format_rubbos_probe_for_dashboard(results)
    print json.dumps(test_result, indent=4)

    results = _get_results(db_url, {"project": "bottlenecks", "testcase": "tu1"})
    #results = _read_sample_output("sample")
    #print json.dumps(results, indent=4)
    test_result = format_tu1_for_dashboard(results)
    print json.dumps(test_result, indent=4)
    results = _get_results(db_url, {"project": "bottlenecks", "testcase": "tu3"})
    test_result = format_tu3_for_dashboard(results)
    print json.dumps(test_result, indent=4)


if __name__ == '__main__':
    _test()