diff options
Diffstat (limited to 'utils/test/reporting')
-rw-r--r-- | utils/test/reporting/default.css | 56 | ||||
-rw-r--r-- | utils/test/reporting/img/weather-clear.png | bin | 0 -> 1560 bytes | |||
-rw-r--r-- | utils/test/reporting/img/weather-few-clouds.png | bin | 0 -> 1927 bytes | |||
-rw-r--r-- | utils/test/reporting/img/weather-overcast.png | bin | 0 -> 1588 bytes | |||
-rw-r--r-- | utils/test/reporting/img/weather-storm.png | bin | 0 -> 2137 bytes | |||
-rw-r--r-- | utils/test/reporting/index-status-tmpl.html | 92 | ||||
-rw-r--r-- | utils/test/reporting/index-tempest-tmpl.html | 90 | ||||
-rw-r--r-- | utils/test/reporting/index-vims-tmpl.html | 91 | ||||
-rw-r--r-- | utils/test/reporting/index.html | 52 | ||||
-rw-r--r-- | utils/test/reporting/reporting-status.py | 178 | ||||
-rw-r--r-- | utils/test/reporting/reporting-tempest.py | 99 | ||||
-rw-r--r-- | utils/test/reporting/reporting-vims.py | 83 |
12 files changed, 741 insertions, 0 deletions
diff --git a/utils/test/reporting/default.css b/utils/test/reporting/default.css new file mode 100644 index 000000000..0e330e965 --- /dev/null +++ b/utils/test/reporting/default.css @@ -0,0 +1,56 @@ +.panel-header-item { + position: relative; + display: inline-block; + padding-left: 17px; + padding-right: 17px; +} + +.panel-pod-name { + margin-top: 10px; + margin-right: 27px; + float:right; + padding: 6px; +} + +.panel-default > .panel-heading .badge { + background-color: #007e88; + position: relative; + display: inline-block; +} + +.panel-default > .panel-heading .progress-bar { + height: 100%; + position: absolute; + left: 0; + top: 0; + width: 100%; + background-color: #0095a2 +} +.panel-default > .panel-heading h4 { + color: white; +} + +.panel-default > .panel-heading { + background-color: #00ADBB; + overflow: hidden; + position: relative; + width: 100%; +} + +th{ + text-align: center; +} + +td{ + text-align: center; +} + +.tr-danger { + background-color: #177870; + color: white; +} + +.btn-more { + color: white; + background-color: #0095a2; +}
\ No newline at end of file diff --git a/utils/test/reporting/img/weather-clear.png b/utils/test/reporting/img/weather-clear.png Binary files differnew file mode 100644 index 000000000..a0d967750 --- /dev/null +++ b/utils/test/reporting/img/weather-clear.png diff --git a/utils/test/reporting/img/weather-few-clouds.png b/utils/test/reporting/img/weather-few-clouds.png Binary files differnew file mode 100644 index 000000000..acfa78398 --- /dev/null +++ b/utils/test/reporting/img/weather-few-clouds.png diff --git a/utils/test/reporting/img/weather-overcast.png b/utils/test/reporting/img/weather-overcast.png Binary files differnew file mode 100644 index 000000000..4296246d0 --- /dev/null +++ b/utils/test/reporting/img/weather-overcast.png diff --git a/utils/test/reporting/img/weather-storm.png b/utils/test/reporting/img/weather-storm.png Binary files differnew file mode 100644 index 000000000..956f0e20f --- /dev/null +++ b/utils/test/reporting/img/weather-storm.png diff --git a/utils/test/reporting/index-status-tmpl.html b/utils/test/reporting/index-status-tmpl.html new file mode 100644 index 000000000..130ecd5c1 --- /dev/null +++ b/utils/test/reporting/index-status-tmpl.html @@ -0,0 +1,92 @@ + <html> + <head> + <meta charset="utf-8"> + <!-- Bootstrap core CSS --> + <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet"> + <link href="default.css" rel="stylesheet"> + <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script> + <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script> + <script type="text/javascript"> + $(document).ready(function (){ + $(".btn-more").click(function() { + $(this).hide(); + $(this).parent().find(".panel-default").show(); + }); + }) + </script> + </head> + <body> + <div class="container"> + <div class="masthead"> + <h3 class="text-muted">Functest status page</h3> + <nav> + <ul class="nav nav-justified"> + <li class="active"><a href="index.html">Home</a></li> + <li><a href="index-status-apex.html">Apex</a></li> + <li><a href="index-status-compass.html">Compass</a></li> + <li><a href="index-status-fuel.html">Fuel</a></li> + <li><a href="index-status-joid.html">Joid</a></li> + </ul> + </nav> + </div> +<div class="row"> + <div class="col-md-1"></div> + <div class="col-md-10"> + <div class="page-header"> + <h2>{{installer}}</h2> + </div> + + <div class="scenario-overview"> + <div class="panel-heading"><h4><b>List of last scenarios run over the last 7 days </b></h4></div> + <table class="table"> + <tr> + <th width="80%">Scenario</th> + <th width="20%">Iteration</th> + </tr> + {% for scenario,iteration in scenario_stats.iteritems() -%} + <tr class="tr-ok"> + <td>{{scenario}}</td> + <td>{{iteration}}</td> + </tr> + {%- endfor %} + </table> + </div> + + + + {% for scenario, iteration in scenario_stats.iteritems() -%} + <div class="scenario-part"> + <div class="page-header"> + <h3><span class="glyphicon glyphicon-chevron-right"> <b>{{scenario}}</b></h3> + </div> + <div class="panel panel-default"> + <div class="panel-heading"> + <span class="panel-header-item"> + </span> + </div> + <table class="table"> + <tr> + {% for test in items[scenario] -%} + <th>{{test.getName() }}</th> + {%- endfor %} + </tr> + <tr class="tr-weather-weather"> + {% for test in items[scenario] -%} + {% if test.getCriteria() > 3 -%} + <td><img src="./img/weather-clear.png"></td> + {%- elif test.getCriteria() > 2 -%} + <td><img src="./img/weather-few-clouds.png"></td> + {%- elif test.getCriteria() > 1 -%} + <td><img src="./img/weather-overcast.png"></td> + {%- else -%} + <td><img src="./img/weather-storm.png"></td> + {%- endif %} + {%- endfor %} + </tr> + </table> + </div> + </div> + {%- endfor %} + </div> + <div class="col-md-1"></div> +</div> diff --git a/utils/test/reporting/index-tempest-tmpl.html b/utils/test/reporting/index-tempest-tmpl.html new file mode 100644 index 000000000..be0b79734 --- /dev/null +++ b/utils/test/reporting/index-tempest-tmpl.html @@ -0,0 +1,90 @@ + <html> + <head> + <meta charset="utf-8"> + <!-- Bootstrap core CSS --> + <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet"> + <link href="default.css" rel="stylesheet"> + <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script> + <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script> + <script type="text/javascript"> + $(document).ready(function (){ + $(".btn-more").click(function() { + $(this).hide(); + $(this).parent().find(".panel-default").show(); + }); + }) + </script> + </head> + <body> + <div class="container"> + <div class="masthead"> + <h3 class="text-muted">Tempest status page</h3> + <nav> + <ul class="nav nav-justified"> + <li class="active"><a href="index.html">Home</a></li> + <li><a href="index-tempest-apex.html">Apex</a></li> + <li><a href="index-tempest-compass.html">Compass</a></li> + <li><a href="index-tempest-fuel.html">Fuel</a></li> + <li><a href="index-tempest-joid.html">Joid</a></li> + </ul> + </nav> + </div> +<div class="row"> + <div class="col-md-1"></div> + <div class="col-md-10"> + <div class="page-header"> + <h2>{{installer}}</h2> + </div> + {% for scenario_name, results in scenario_results.iteritems() -%} + <div class="scenario-part"> + <div class="page-header"> + <h3><span class="glyphicon glyphicon-chevron-right"> <b>{{scenario_name}}</b></h3> + </div> + {% for result in results -%} + {% if loop.index > 2 -%} + <div class="panel panel-default" hidden> + {%- else -%} + <div class="panel panel-default"> + {%- endif %} + <div class="panel-heading"> + <div class="progress-bar" role="progressbar" aria-valuenow="{{result.pr_step_ok}}" aria-valuemin="0" aria-valuemax="100" style="width: {{result.pr_step_ok}}%"></div> + <span class="panel-header-item"> + <h4><b>{{result.creation_date}}</b></h4> + </span> + <span class="badge panel-pod-name">{{result.pod_name}}</span> + </div> + <table class="table"> + <tr> + <th width="20%">Item</th> + <th width="10%">Result</th> + <th width="10%">Status</th> + <th width="60%">Errors</th> + </tr> + {% for item in items -%} + {% if item in result.details.keys() -%} + {% if result.criteria[item] -%} + <tr class="tr-ok"> + <td>{{item}}</td> + <td>{{result.details[item]}}</td> + <td><span class="glyphicon glyphicon-ok"></td> + <td>{{result.errors[item]}}</td> + </tr> + {%- else -%} + <tr class="tr-danger"> + <td>{{item}}</td> + <td>{{result.details[item]}}</td> + <td><span class="glyphicon glyphicon-remove"></td> + <td>{{result.errors[item]}}</td> + </tr> + {%- endif %} + {%- endif %} + {%- endfor %} + </table> + </div> + {%- endfor %} + <button type="button" class="btn btn-more">More than two</button> + </div> + {%- endfor %} + </div> + <div class="col-md-1"></div> +</div> diff --git a/utils/test/reporting/index-vims-tmpl.html b/utils/test/reporting/index-vims-tmpl.html new file mode 100644 index 000000000..8858182c1 --- /dev/null +++ b/utils/test/reporting/index-vims-tmpl.html @@ -0,0 +1,91 @@ + <html> + <head> + <meta charset="utf-8"> + <!-- Bootstrap core CSS --> + <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet"> + <link href="default.css" rel="stylesheet"> + <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script> + <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script> + <script type="text/javascript"> + $(document).ready(function (){ + $(".btn-more").click(function() { + $(this).hide(); + $(this).parent().find(".panel-default").show(); + }); + }) + </script> + </head> + <body> + <div class="container"> + <div class="masthead"> + <h3 class="text-muted">vIMS status page</h3> + <nav> + <ul class="nav nav-justified"> + <li class="active"><a href="index.html">Home</a></li> + <li><a href="index-vims-fuel.html">Fuel</a></li> + <li><a href="index--vims-compass.html">Compass</a></li> + <li><a href="index-vims-joid.html">JOID</a></li> + <li><a href="index-vims-apex.html">APEX</a></li> + </ul> + </nav> + </div> +<div class="row"> + <div class="col-md-1"></div> + <div class="col-md-10"> + <div class="page-header"> + <h2>{{installer}}</h2> + </div> + {% for scenario_name, results in scenario_results.iteritems() -%} + <div class="scenario-part"> + <div class="page-header"> + <h3><span class="glyphicon glyphicon-chevron-right"> <b>{{scenario_name}}</b></h3> + </div> + {% for result in results -%} + {% if loop.index > 2 -%} + <div class="panel panel-default" hidden> + {%- else -%} + <div class="panel panel-default"> + {%- endif %} + <div class="panel-heading"> + <div class="progress-bar" role="progressbar" aria-valuenow="{{result.pr_step_ok}}" aria-valuemin="0" aria-valuemax="100" style="width: {{result.pr_step_ok}}%"></div> + <span class="panel-header-item"> + <h4><b>{{result.creation_date}}</b></h4> + </span> + <span class="badge panel-pod-name">{{result.pod_name}}</span> + </div> + <table class="table"> + <tr> + <th width="20%">Step</th> + <th width="10%">Status</th> + <th width="10%">Duration</th> + <th width="60%">Result</th> + </tr> + {% for step_od_name in step_order -%} + {% if step_od_name in result.details.keys() -%} + {% set step_result = result.details[step_od_name] -%} + {% if step_result.duration != 0 -%} + <tr class="tr-ok"> + <td>{{step_od_name}}</td> + <td><span class="glyphicon glyphicon-ok"></td> + <td><b>{{step_result.duration_display}}</b></td> + <td>{{step_result.result}}</td> + </tr> + {%- else -%} + <tr class="tr-danger"> + <td>{{step_od_name}}</td> + <td><span class="glyphicon glyphicon-remove"></td> + <td><b>0s</b></td> + <td>{{step_result.result}}</td> + </tr> + {%- endif %} + {%- endif %} + {%- endfor %} + </table> + </div> + {%- endfor %} + <button type="button" class="btn btn-more">More than two</button> + </div> + {%- endfor %} + </div> + <div class="col-md-1"></div> +</div> diff --git a/utils/test/reporting/index.html b/utils/test/reporting/index.html new file mode 100644 index 000000000..af4033567 --- /dev/null +++ b/utils/test/reporting/index.html @@ -0,0 +1,52 @@ + <html> + <head> + <meta charset="utf-8"> + <!-- Bootstrap core CSS --> + <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet"> + <link href="default.css" rel="stylesheet"> + <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script> + <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script> + <script type="text/javascript"> + $(document).ready(function (){ + $(".btn-more").click(function() { + $(this).hide(); + $(this).parent().find(".panel-default").show(); + }); + }) + </script> + </head> + <body> + <div class="container"> + <div class="masthead"> + <h3 class="text-muted">Functest reporting page</h3> + <nav> + <ul class="nav nav-justified"> + <li class="active"><a href="#">Home</a></li> + <li><a href="./index-status-apex.html">Status</a></li> + <li><a href="./index-tempest-apex.html">Tempest</a></li> + <li><a href="./index-vims-apex.html">vIMS</a></li> + </ul> + </nav> + </div> +<div class="row"> + <div class="col-md-1"></div> + <div class="col-md-10"> + <div class="page-main"> + <h2>Functest</h2> + This project develops test suites that cover functionaling test cases in OPNFV. + <br>The test suites are integrated in the continuation integration (CI) framework and used to evaluate/validate scenario. + <br> Weekly meeting: every Tuesday 8 AM UTC + <br> IRC chan #opnfv-testperf + + <br> + <h2>Useful Links</h2> + <li><a href="http://events.linuxfoundation.org/sites/events/files/slides/Functest%20in%20Depth_0.pdf">Functest in Depth</a></li> + <li><a href="https://git.opnfv.org/cgit/functest">Functest Repo</a></li> + <li><a href="https://wiki.opnfv.org/opnfv_functional_testing">Functest Project</a></li> + <li><a href="https://build.opnfv.org/ci/view/functest/">Functest Jenkins page</a></li> + <li><a href="https://jira.opnfv.org/secure/RapidBoard.jspa?rapidView=59&projectKey=FUNCTEST">JIRA</a></li> + + </div> + </div> + <div class="col-md-1"></div> +</div> diff --git a/utils/test/reporting/reporting-status.py b/utils/test/reporting/reporting-status.py new file mode 100644 index 000000000..b27af4b14 --- /dev/null +++ b/utils/test/reporting/reporting-status.py @@ -0,0 +1,178 @@ +from urllib2 import Request, urlopen, URLError +import urllib2 +import json +import jinja2 +import os +import random + + +class TestCase(object): + def __init__(self, name, project, criteria=-1): + self.name = name + self.project = project + self.criteria = criteria + + def getName(self): + return self.name + + def getProject(self): + return self.project + + def getCriteria(self): + return self.criteria + + def setCriteria(self, criteria): + self.criteria = criteria + + +def getApiResults(case, installer): + case = case.getName() + + # to remove proxy (to be removed at the end for local test only) + # proxy_handler = urllib2.ProxyHandler({}) + # opener = urllib2.build_opener(proxy_handler) + # urllib2.install_opener(opener) + url = "http://testresults.opnfv.org/testapi/results?case=" + case + "&period=30&installer=" + installer + #url = "http://127.0.0.1:8000/results?case=" + case + "&period=30&installer=" + installer + request = Request(url) + + try: + response = urlopen(request) + k = response.read() + results = json.loads(k) + except URLError, e: + print 'No kittez. Got an error code:', e + + return results + + +def getScenarios(case, installer): + + results = getApiResults(case, installer) + test_results = results['test_results'] + + if test_results is not None: + test_results.reverse() + + scenario_results = {} + + for r in test_results: + # Retrieve all the scenarios per installer + if not r['version'] in scenario_results.keys(): + scenario_results[r['version']] = [] + scenario_results[r['version']].append(r) + + return scenario_results + + +def getScenarioStats(scenario_results): + scenario_stats = {} + for k, v in scenario_results.iteritems(): + scenario_stats[k] = len(v) + + return scenario_stats + + +def getResult(testCase, installer): + + # retrieve raw results + results = getApiResults(testCase, installer) + # let's concentrate on test results only + test_results = results['test_results'] + + # if results found, analyze them + if test_results is not None: + test_results.reverse() + + scenario_results = {} + + for r in test_results: + if not r['version'] in scenario_results.keys(): + scenario_results[r['version']] = [] + scenario_results[r['version']].append(r) + + for s, s_result in scenario_results.items(): + scenario_results[s] = s_result[0:5] + # For each scenario, we build a result object to deal with + # results, criteria and error handling + for result in scenario_results[s]: + result["creation_date"] = result["creation_date"].split(".")[0] + + # Cannot be fully generic + # need to look for specific criteria case by case + # TODO add a criteria passed/failed in DB?? + # TODO result["Success_criteria"] = result["success_criteria"] + # meanwhile just random.... + # and consider the last random arbitrarily + # 4 levels for the results + # 3: 4+ consecutive runs passing the success criteria + # 2: <4 successful consecutive runs but passing the criteria + # 1: close to pass the success criteria + # 0: 0% success, not passing + # + + return int(random.random()*4)+1 + +# ****************************************************************************** +# ****************************************************************************** +# ****************************************************************************** +# ****************************************************************************** +# ****************************************************************************** + +# as the criteria are all difference, we shall use a common way to indicate +# the criteria +# 100 = 100% = all the test must be OK +# 90 = 90% = all the test must be above 90% of success rate +# TODO harmonize success criteria +# some criteria could be the duration, the success rate, the packet loss,... +# to be done case by case +# TODo create TestCriteria Object + + +installers = ["apex", "compass", "fuel", "joid"] +# init just tempest to get the scenario as all the scenarios run Temepst +tempest = TestCase("Tempest", "functest", -1) + +for installer in installers: + + scenario_results = getScenarios(tempest, installer) + scenario_stats = getScenarioStats(scenario_results) + + items = {} + + for s, s_result in scenario_results.items(): + + vPing = TestCase("vPing", "functest") + vPing_userdata = TestCase("vPing_userdata", "functest") + tempest = TestCase("Tempest", "functest") + rally = TestCase("Rally", "functest") + odl = TestCase("ODL", "functest") + onos = TestCase("ONOS", "functest") + ovno = TestCase("OVNO", "functest") + vIMS = TestCase("vIMS", "functest") + doctor = TestCase("doctor-notification", "doctor") + promise = TestCase("promise", "promise") + odl_vpn = TestCase("ODL VPN Service tests", "sdnvpn") + bgpvpn_api = TestCase("OpenStack Neutron BGPVPN API extension tests", + "sdnvpn") + testCases = [vPing, vPing_userdata, tempest, rally, odl, onos, vIMS, + doctor, promise] + + for testCase in testCases: + result = getResult(testCase, installer) + testCase.setCriteria(result) + # print "case %s (%s) = %s " % (testCase.getName(), s, result) + items[s] = testCases + + templateLoader = jinja2.FileSystemLoader(os.path.dirname(os.path.abspath(__file__))) + templateEnv = jinja2.Environment(loader=templateLoader) + + TEMPLATE_FILE = "index-status-tmpl.html" + template = templateEnv.get_template(TEMPLATE_FILE) + + outputText = template.render(scenario_stats=scenario_stats, + items=items, + installer=installer) + + with open("index-status-" + installer + ".html", "wb") as fh: + fh.write(outputText) diff --git a/utils/test/reporting/reporting-tempest.py b/utils/test/reporting/reporting-tempest.py new file mode 100644 index 000000000..944b42809 --- /dev/null +++ b/utils/test/reporting/reporting-tempest.py @@ -0,0 +1,99 @@ +from urllib2 import Request, urlopen, URLError +import json +import jinja2 +import os + +installers = ["apex", "compass", "fuel", "joid"] +items = ["tests", "Success rate", "duration"] + +for installer in installers: + # we consider the Tempest results of the last 7 days + url = "http://testresults.opnfv.org/testapi/results?case=Tempest" + request = Request(url + '&period=7&installer=' + installer) + + try: + response = urlopen(request) + k = response.read() + results = json.loads(k) + except URLError, e: + print 'No kittez. Got an error code:', e + + test_results = results['test_results'] + test_results.reverse() + + scenario_results = {} + criteria = {} + errors = {} + + for r in test_results: + # Retrieve all the scenarios per installer + if not r['version'] in scenario_results.keys(): + scenario_results[r['version']] = [] + scenario_results[r['version']].append(r) + + for s, s_result in scenario_results.items(): + scenario_results[s] = s_result[0:5] + # For each scenario, we build a result object to deal with + # results, criteria and error handling + for result in scenario_results[s]: + result["creation_date"] = result["creation_date"].split(".")[0] + + # retrieve results + # **************** + nb_tests_run = result['details']['tests'] + if nb_tests_run != 0: + success_rate = 100*(int(result['details']['tests']) - int(result['details']['failures']))/int(result['details']['tests']) + else: + success_rate = 0 + + result['details']["tests"] = nb_tests_run + result['details']["Success rate"] = str(success_rate) + "%" + + # Criteria management + # ******************* + crit_tests = False + crit_rate = False + crit_time = False + + # Expect that at least 200 tests are run + if nb_tests_run >= 200: + crit_tests = True + + # Expect that at least 90% of success + if success_rate >= 90: + crit_rate = True + + # Expect that the suite duration is inferior to 45m + if result['details']['duration'] < 2700: + crit_time = True + + result['criteria'] = {'tests': crit_tests, + 'Success rate': crit_rate, + 'duration': crit_time} + + # error management + # **************** + + # TODO get information from artefact based on build tag + # to identify errors of the associated run + # build tag needed to wget errors on the artifacts + # the idea is to list the tests in errors and provide the link + # towards complete artifact + # another option will be to put the errors in the DB + # (in the detail section)... + result['errors'] = {'tests': "", + 'Success rate': "", + 'duration': ""} + + templateLoader = jinja2.FileSystemLoader(os.path.dirname(os.path.abspath(__file__))) + templateEnv = jinja2.Environment(loader=templateLoader) + + TEMPLATE_FILE = "index-tempest-tmpl.html" + template = templateEnv.get_template(TEMPLATE_FILE) + + outputText = template.render(scenario_results=scenario_results, + items=items, + installer=installer) + + with open("index-tempest-" + installer + ".html", "wb") as fh: + fh.write(outputText) diff --git a/utils/test/reporting/reporting-vims.py b/utils/test/reporting/reporting-vims.py new file mode 100644 index 000000000..cf43f3ebc --- /dev/null +++ b/utils/test/reporting/reporting-vims.py @@ -0,0 +1,83 @@ +from urllib2 import Request, urlopen, URLError +import json +import jinja2 +import os + +def sig_test_format(sig_test): + nbPassed = 0 + nbFailures = 0 + nbSkipped = 0 + for data_test in sig_test: + if data_test['result'] == "Passed": + nbPassed+= 1 + elif data_test['result'] == "Failed": + nbFailures += 1 + elif data_test['result'] == "Skipped": + nbSkipped += 1 + total_sig_test_result = {} + total_sig_test_result['passed'] = nbPassed + total_sig_test_result['failures'] = nbFailures + total_sig_test_result['skipped'] = nbSkipped + return total_sig_test_result + +installers = ["fuel", "compass", "joid", "apex"] +step_order = ["initialisation", "orchestrator", "vIMS", "sig_test"] + +for installer in installers: + request = Request('http://testresults.opnfv.org/testapi/results?case=vIMS&installer=' + installer) + + try: + response = urlopen(request) + k = response.read() + results = json.loads(k) + except URLError, e: + print 'No kittez. Got an error code:', e + + test_results = results['test_results'] + test_results.reverse() + + scenario_results = {} + for r in test_results: + if not r['version'] in scenario_results.keys(): + scenario_results[r['version']] = [] + scenario_results[r['version']].append(r) + + for s, s_result in scenario_results.items(): + scenario_results[s] = s_result[0:5] + for result in scenario_results[s]: + result["creation_date"] = result["creation_date"].split(".")[0] + sig_test = result['details']['sig_test']['result'] + if not sig_test == "" and isinstance(sig_test, list): + format_result = sig_test_format(sig_test) + if format_result['failures'] > format_result['passed']: + result['details']['sig_test']['duration'] = 0 + result['details']['sig_test']['result'] = format_result + nb_step_ok = 0 + nb_step = len(result['details']) + + for step_name, step_result in result['details'].items(): + if step_result['duration'] != 0: + nb_step_ok += 1 + m, s = divmod(step_result['duration'], 60) + m_display = "" + if int(m) != 0: + m_display += str(int(m)) + "m " + step_result['duration_display'] = m_display + str(int(s)) + "s" + + result['pr_step_ok'] = 0 + if nb_step != 0: + result['pr_step_ok'] = (float(nb_step_ok)/nb_step)*100 + + + templateLoader = jinja2.FileSystemLoader(os.path.dirname(os.path.abspath(__file__))) + templateEnv = jinja2.Environment( loader=templateLoader ) + + TEMPLATE_FILE = "index-vims-tmpl.html" + template = templateEnv.get_template( TEMPLATE_FILE ) + + outputText = template.render( scenario_results = scenario_results, step_order = step_order, installer = installer) + + with open("index-vims" + installer + ".html", "wb") as fh: + fh.write(outputText) + + |