summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--scripts/create_kibana_dashboards.py824
-rw-r--r--scripts/kibana_cleanup.py43
-rw-r--r--scripts/mongo_to_elasticsearch.py448
-rw-r--r--scripts/shared_utils.py36
4 files changed, 1351 insertions, 0 deletions
diff --git a/scripts/create_kibana_dashboards.py b/scripts/create_kibana_dashboards.py
new file mode 100644
index 0000000..252ce21
--- /dev/null
+++ b/scripts/create_kibana_dashboards.py
@@ -0,0 +1,824 @@
+#! /usr/bin/env python
+import logging
+import argparse
+import shared_utils
+import json
+import urlparse
+
+logger = logging.getLogger('create_kibana_dashboards')
+logger.setLevel(logging.DEBUG)
+file_handler = logging.FileHandler('/var/log/{}.log'.format(__name__))
+file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s'))
+logger.addHandler(file_handler)
+
+_installers = {'fuel', 'apex', 'compass', 'joid'}
+
+# see class VisualizationState for details on format
+_testcases = [
+ ('functest', 'Tempest',
+ [
+ {
+ "metrics": [
+ {
+ "type": "avg",
+ "params": {
+ "field": "details.duration"
+ }
+ }
+ ],
+ "type": "line",
+ "metadata": {
+ "label": "Tempest duration",
+ "test_family": "VIM"
+ }
+ },
+
+ {
+ "metrics": [
+ {
+ "type": "sum",
+ "params": {
+ "field": "details.tests"
+ }
+ },
+ {
+ "type": "sum",
+ "params": {
+ "field": "details.failures"
+ }
+ }
+ ],
+ "type": "histogram",
+ "metadata": {
+ "label": "Tempest nr of tests/failures",
+ "test_family": "VIM"
+ }
+ },
+
+ {
+ "metrics": [
+ {
+ "type": "avg",
+ "params": {
+ "field": "details.success_percentage"
+ }
+ }
+ ],
+ "type": "line",
+ "metadata": {
+ "label": "Tempest success percentage",
+ "test_family": "VIM"
+ }
+ }
+ ]
+ ),
+
+ ('functest', 'Rally',
+ [
+ {
+ "metrics": [
+ {
+ "type": "avg",
+ "params": {
+ "field": "details.duration"
+ }
+ }
+ ],
+ "type": "line",
+ "metadata": {
+ "label": "Rally duration",
+ "test_family": "VIM"
+ }
+ },
+
+ {
+ "metrics": [
+ {
+ "type": "avg",
+ "params": {
+ "field": "details.tests"
+ }
+ }
+ ],
+ "type": "histogram",
+ "metadata": {
+ "label": "Rally nr of tests",
+ "test_family": "VIM"
+ }
+ },
+
+ {
+ "metrics": [
+ {
+ "type": "avg",
+ "params": {
+ "field": "details.success_percentage"
+ }
+ }
+ ],
+ "type": "line",
+ "metadata": {
+ "label": "Rally success percentage",
+ "test_family": "VIM"
+ }
+ }
+ ]
+ ),
+
+ ('functest', 'vPing',
+ [
+ {
+ "metrics": [
+ {
+ "type": "avg",
+ "params": {
+ "field": "details.duration"
+ }
+ }
+ ],
+ "type": "line",
+ "metadata": {
+ "label": "vPing duration",
+ "test_family": "VIM"
+ }
+ }
+ ]
+ ),
+
+ ('functest', 'vPing_userdata',
+ [
+ {
+ "metrics": [
+ {
+ "type": "avg",
+ "params": {
+ "field": "details.duration"
+ }
+ }
+ ],
+ "type": "line",
+ "metadata": {
+ "label": "vPing_userdata duration",
+ "test_family": "VIM"
+ }
+ }
+ ]
+ ),
+
+ ('functest', 'ODL',
+ [
+ {
+ "metrics": [
+ {
+ "type": "sum",
+ "params": {
+ "field": "details.tests"
+ }
+ },
+ {
+ "type": "sum",
+ "params": {
+ "field": "details.failures"
+ }
+ }
+ ],
+ "type": "histogram",
+ "metadata": {
+ "label": "ODL nr of tests/failures",
+ "test_family": "Controller"
+ }
+ },
+
+ {
+ "metrics": [
+ {
+ "type": "avg",
+ "params": {
+ "field": "details.success_percentage"
+ }
+ }
+ ],
+ "type": "line",
+ "metadata": {
+ "label": "ODL success percentage",
+ "test_family": "Controller"
+ }
+ }
+ ]
+ ),
+
+ ('functest', 'ONOS',
+ [
+ {
+ "metrics": [
+ {
+ "type": "avg",
+ "params": {
+ "field": "details.FUNCvirNet.duration"
+ }
+ }
+ ],
+ "type": "line",
+ "metadata": {
+ "label": "ONOS FUNCvirNet duration",
+ "test_family": "Controller"
+ }
+ },
+
+ {
+ "metrics": [
+ {
+ "type": "sum",
+ "params": {
+ "field": "details.FUNCvirNet.tests"
+ }
+ },
+ {
+ "type": "sum",
+ "params": {
+ "field": "details.FUNCvirNet.failures"
+ }
+ }
+ ],
+ "type": "histogram",
+ "metadata": {
+ "label": "ONOS FUNCvirNet nr of tests/failures",
+ "test_family": "Controller"
+ }
+ },
+
+ {
+ "metrics": [
+ {
+ "type": "avg",
+ "params": {
+ "field": "details.FUNCvirNetL3.duration"
+ }
+ }
+ ],
+ "type": "line",
+ "metadata": {
+ "label": "ONOS FUNCvirNetL3 duration",
+ "test_family": "Controller"
+ }
+ },
+
+ {
+ "metrics": [
+ {
+ "type": "sum",
+ "params": {
+ "field": "details.FUNCvirNetL3.tests"
+ }
+ },
+ {
+ "type": "sum",
+ "params": {
+ "field": "details.FUNCvirNetL3.failures"
+ }
+ }
+ ],
+ "type": "histogram",
+ "metadata": {
+ "label": "ONOS FUNCvirNetL3 nr of tests/failures",
+ "test_family": "Controller"
+ }
+ }
+ ]
+ ),
+
+ ('functest', 'vIMS',
+ [
+ {
+ "metrics": [
+ {
+ "type": "sum",
+ "params": {
+ "field": "details.sig_test.tests"
+ }
+ },
+ {
+ "type": "sum",
+ "params": {
+ "field": "details.sig_test.failures"
+ }
+ },
+ {
+ "type": "sum",
+ "params": {
+ "field": "details.sig_test.passed"
+ }
+ },
+ {
+ "type": "sum",
+ "params": {
+ "field": "details.sig_test.skipped"
+ }
+ }
+ ],
+ "type": "histogram",
+ "metadata": {
+ "label": "vIMS nr of tests/failures/passed/skipped",
+ "test_family": "Features"
+ }
+ },
+
+ {
+ "metrics": [
+ {
+ "type": "avg",
+ "params": {
+ "field": "details.vIMS.duration"
+ }
+ },
+ {
+ "type": "avg",
+ "params": {
+ "field": "details.orchestrator.duration"
+ }
+ },
+ {
+ "type": "avg",
+ "params": {
+ "field": "details.sig_test.duration"
+ }
+ }
+ ],
+ "type": "histogram",
+ "metadata": {
+ "label": "vIMS/ochestrator/test duration",
+ "test_family": "Features"
+ }
+ }
+ ]
+ ),
+
+ ('promise', 'promise',
+ [
+ {
+ "metrics": [
+ {
+ "type": "avg",
+ "params": {
+ "field": "details.duration"
+ }
+ }
+ ],
+ "type": "line",
+ "metadata": {
+ "label": "promise duration",
+ "test_family": "Features"
+ }
+ },
+
+ {
+ "metrics": [
+ {
+ "type": "sum",
+ "params": {
+ "field": "details.tests"
+ }
+ },
+ {
+ "type": "sum",
+ "params": {
+ "field": "details.failures"
+ }
+ }
+ ],
+ "type": "histogram",
+ "metadata": {
+ "label": "promise nr of tests/failures",
+ "test_family": "Features"
+ }
+ }
+ ]
+ ),
+
+ ('doctor', 'doctor-notification',
+ [
+ {
+ "metrics": [
+ {
+ "type": "avg",
+ "params": {
+ "field": "details.duration"
+ }
+ }
+ ],
+ "type": "line",
+ "metadata": {
+ "label": "doctor-notification duration",
+ "test_family": "Features"
+ }
+ }
+ ]
+ )
+]
+
+
+class KibanaDashboard(dict):
+ def __init__(self, project_name, case_name, installer, pod, versions, visualization_detail):
+ super(KibanaDashboard, self).__init__()
+ self.project_name = project_name
+ self.case_name = case_name
+ self.installer = installer
+ self.pod = pod
+ self.versions = versions
+ self.visualization_detail = visualization_detail
+ self._visualization_title = None
+ self._kibana_visualizations = []
+ self._kibana_dashboard = None
+ self._create_visualizations()
+ self._create()
+
+ def _create_visualizations(self):
+ for version in self.versions:
+ self._kibana_visualizations.append(KibanaVisualization(self.project_name,
+ self.case_name,
+ self.installer,
+ self.pod,
+ version,
+ self.visualization_detail))
+
+ self._visualization_title = self._kibana_visualizations[0].vis_state_title
+
+ def _publish_visualizations(self):
+ for visualization in self._kibana_visualizations:
+ url = urlparse.urljoin(base_elastic_url, '/.kibana/visualization/{}'.format(visualization.id))
+ logger.debug("publishing visualization '{}'".format(url))
+ shared_utils.publish_json(visualization, es_user, es_passwd, url)
+
+ def _construct_panels(self):
+ size_x = 6
+ size_y = 3
+ max_columns = 7
+ column = 1
+ row = 1
+ panel_index = 1
+ panels_json = []
+ for visualization in self._kibana_visualizations:
+ panels_json.append({
+ "id": visualization.id,
+ "type": 'visualization',
+ "panelIndex": panel_index,
+ "size_x": size_x,
+ "size_y": size_y,
+ "col": column,
+ "row": row
+ })
+ panel_index += 1
+ column += size_x
+ if column > max_columns:
+ column = 1
+ row += size_y
+ return json.dumps(panels_json, separators=(',', ':'))
+
+ def _create(self):
+ self['title'] = '{} {} {} {} {}'.format(self.project_name,
+ self.case_name,
+ self.installer,
+ self._visualization_title,
+ self.pod)
+ self.id = self['title'].replace(' ', '-').replace('/', '-')
+
+ self['hits'] = 0
+ self['description'] = "Kibana dashboard for project_name '{}', case_name '{}', installer '{}', data '{}' and" \
+ " pod '{}'".format(self.project_name,
+ self.case_name,
+ self.installer,
+ self._visualization_title,
+ self.pod)
+ self['panelsJSON'] = self._construct_panels()
+ self['optionsJSON'] = json.dumps({
+ "darkTheme": False
+ },
+ separators=(',', ':'))
+ self['uiStateJSON'] = "{}"
+ self['version'] = 1
+ self['timeRestore'] = False
+ self['kibanaSavedObjectMeta'] = {
+ 'searchSourceJSON': json.dumps({
+ "filter": [
+ {
+ "query": {
+ "query_string": {
+ "query": "*",
+ "analyze_wildcard": True
+ }
+ }
+ }
+ ]
+ },
+ separators=(',', ':'))
+ }
+ self['metadata'] = self.visualization_detail['metadata']
+
+ def _publish(self):
+ url = urlparse.urljoin(base_elastic_url, '/.kibana/dashboard/{}'.format(self.id))
+ logger.debug("publishing dashboard '{}'".format(url))
+ shared_utils.publish_json(self, es_user, es_passwd, url)
+
+ def publish(self):
+ self._publish_visualizations()
+ self._publish()
+
+
+class KibanaSearchSourceJSON(dict):
+ """
+ "filter": [
+ {"match": {"installer": {"query": installer, "type": "phrase"}}},
+ {"match": {"project_name": {"query": project_name, "type": "phrase"}}},
+ {"match": {"case_name": {"query": case_name, "type": "phrase"}}}
+ ]
+ """
+
+ def __init__(self, project_name, case_name, installer, pod, version):
+ super(KibanaSearchSourceJSON, self).__init__()
+ self["filter"] = [
+ {"match": {"project_name": {"query": project_name, "type": "phrase"}}},
+ {"match": {"case_name": {"query": case_name, "type": "phrase"}}},
+ {"match": {"installer": {"query": installer, "type": "phrase"}}},
+ {"match": {"version": {"query": version, "type": "phrase"}}}
+ ]
+ if pod != 'all':
+ self["filter"].append({"match": {"pod_name": {"query": pod, "type": "phrase"}}})
+
+
+class VisualizationState(dict):
+ def __init__(self, input_dict):
+ """
+ dict structure:
+ {
+ "metrics":
+ [
+ {
+ "type": type, # default sum
+ "params": {
+ "field": field # mandatory, no default
+ },
+ {metric2}
+ ],
+ "segments":
+ [
+ {
+ "type": type, # default date_histogram
+ "params": {
+ "field": field # default creation_date
+ },
+ {segment2}
+ ],
+ "type": type, # default area
+ "mode": mode, # default grouped for type 'histogram', stacked for other types
+ "metadata": {
+ "label": "Tempest duration",# mandatory, no default
+ "test_family": "VIM" # mandatory, no default
+ }
+ }
+
+ default modes:
+ type histogram: grouped
+ type area: stacked
+
+ :param input_dict:
+ :return:
+ """
+ super(VisualizationState, self).__init__()
+ metrics = input_dict['metrics']
+ segments = [] if 'segments' not in input_dict else input_dict['segments']
+
+ graph_type = 'area' if 'type' not in input_dict else input_dict['type']
+ self['type'] = graph_type
+
+ if 'mode' not in input_dict:
+ if graph_type == 'histogram':
+ mode = 'grouped'
+ else:
+ # default
+ mode = 'stacked'
+ else:
+ mode = input_dict['mode']
+ self['params'] = {
+ "shareYAxis": True,
+ "addTooltip": True,
+ "addLegend": True,
+ "smoothLines": False,
+ "scale": "linear",
+ "interpolate": "linear",
+ "mode": mode,
+ "times": [],
+ "addTimeMarker": False,
+ "defaultYExtents": False,
+ "setYExtents": False,
+ "yAxis": {}
+ }
+
+ self['aggs'] = []
+
+ i = 1
+ for metric in metrics:
+ self['aggs'].append({
+ "id": str(i),
+ "type": 'sum' if 'type' not in metric else metric['type'],
+ "schema": "metric",
+ "params": {
+ "field": metric['params']['field']
+ }
+ })
+ i += 1
+
+ if len(segments) > 0:
+ for segment in segments:
+ self['aggs'].append({
+ "id": str(i),
+ "type": 'date_histogram' if 'type' not in segment else segment['type'],
+ "schema": "metric",
+ "params": {
+ "field": "creation_date" if ('params' not in segment or 'field' not in segment['params'])
+ else segment['params']['field'],
+ "interval": "auto",
+ "customInterval": "2h",
+ "min_doc_count": 1,
+ "extended_bounds": {}
+ }
+ })
+ i += 1
+ else:
+ self['aggs'].append({
+ "id": str(i),
+ "type": 'date_histogram',
+ "schema": "segment",
+ "params": {
+ "field": "creation_date",
+ "interval": "auto",
+ "customInterval": "2h",
+ "min_doc_count": 1,
+ "extended_bounds": {}
+ }
+ })
+
+ self['listeners'] = {}
+ self['title'] = ' '.join(['{} {}'.format(x['type'], x['params']['field']) for x in self['aggs']
+ if x['schema'] == 'metric'])
+
+
+class KibanaVisualization(dict):
+ def __init__(self, project_name, case_name, installer, pod, version, detail):
+ """
+ We need two things
+ 1. filter created from
+ project_name
+ case_name
+ installer
+ pod
+ version
+ 2. visualization state
+ field for y axis (metric) with type (avg, sum, etc.)
+ field for x axis (segment) with type (date_histogram)
+
+ :return:
+ """
+ super(KibanaVisualization, self).__init__()
+ vis_state = VisualizationState(detail)
+ self.vis_state_title = vis_state['title']
+ self['title'] = '{} {} {} {} {} {}'.format(project_name,
+ case_name,
+ self.vis_state_title,
+ installer,
+ pod,
+ version)
+ self.id = self['title'].replace(' ', '-').replace('/', '-')
+ self['visState'] = json.dumps(vis_state, separators=(',', ':'))
+ self['uiStateJSON'] = "{}"
+ self['description'] = "Kibana visualization for project_name '{}', case_name '{}', data '{}', installer '{}'," \
+ " pod '{}' and version '{}'".format(project_name,
+ case_name,
+ self.vis_state_title,
+ installer,
+ pod,
+ version)
+ self['version'] = 1
+ self['kibanaSavedObjectMeta'] = {"searchSourceJSON": json.dumps(KibanaSearchSourceJSON(project_name,
+ case_name,
+ installer,
+ pod,
+ version),
+ separators=(',', ':'))}
+
+
+def _get_pods_and_versions(project_name, case_name, installer):
+ query_json = json.JSONEncoder().encode({
+ "query": {
+ "bool": {
+ "must": [
+ {"match_all": {}}
+ ],
+ "filter": [
+ {"match": {"installer": {"query": installer, "type": "phrase"}}},
+ {"match": {"project_name": {"query": project_name, "type": "phrase"}}},
+ {"match": {"case_name": {"query": case_name, "type": "phrase"}}}
+ ]
+ }
+ }
+ })
+
+ elastic_data = shared_utils.get_elastic_data(urlparse.urljoin(base_elastic_url, '/test_results/mongo2elastic'),
+ es_user, es_passwd, query_json)
+
+ pods_and_versions = {}
+
+ for data in elastic_data:
+ pod = data['pod_name']
+ if pod in pods_and_versions:
+ pods_and_versions[pod].add(data['version'])
+ else:
+ pods_and_versions[pod] = {data['version']}
+
+ if 'all' in pods_and_versions:
+ pods_and_versions['all'].add(data['version'])
+ else:
+ pods_and_versions['all'] = {data['version']}
+
+ return pods_and_versions
+
+
+def construct_dashboards():
+ """
+ iterate over testcase and installer
+ 1. get available pods for each testcase/installer pair
+ 2. get available version for each testcase/installer/pod tuple
+ 3. construct KibanaInput and append
+
+ :return: list of KibanaDashboards
+ """
+ kibana_dashboards = []
+ for project_name, case_name, visualization_details in _testcases:
+ for installer in _installers:
+ pods_and_versions = _get_pods_and_versions(project_name, case_name, installer)
+ for visualization_detail in visualization_details:
+ for pod, versions in pods_and_versions.iteritems():
+ kibana_dashboards.append(KibanaDashboard(project_name, case_name, installer, pod, versions,
+ visualization_detail))
+ return kibana_dashboards
+
+
+def generate_js_inputs(js_file_path, kibana_url, dashboards):
+ js_dict = {}
+ for dashboard in dashboards:
+ dashboard_meta = dashboard['metadata']
+ test_family = dashboard_meta['test_family']
+ test_label = dashboard_meta['label']
+
+ if test_family not in js_dict:
+ js_dict[test_family] = {}
+
+ js_test_family = js_dict[test_family]
+
+ if test_label not in js_test_family:
+ js_test_family[test_label] = {}
+
+ js_test_label = js_test_family[test_label]
+
+ if dashboard.installer not in js_test_label:
+ js_test_label[dashboard.installer] = {}
+
+ js_installer = js_test_label[dashboard.installer]
+ js_installer[dashboard.pod] = kibana_url + '#/dashboard/' + dashboard.id
+
+ with open(js_file_path, 'w+') as js_file_fdesc:
+ js_file_fdesc.write('var kibana_dashboard_links = ')
+ js_file_fdesc.write(str(js_dict).replace("u'", "'"))
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description='Create Kibana dashboards from data in elasticsearch')
+ parser.add_argument('-e', '--elasticsearch-url', default='http://localhost:9200',
+ help='the url of elasticsearch, defaults to http://localhost:9200')
+ parser.add_argument('-js', '--generate_js_inputs', action='store_true',
+ help='Use this argument to generate javascript inputs for kibana landing page')
+ parser.add_argument('--js_path', default='/usr/share/nginx/html/kibana_dashboards/conf.js',
+ help='Path of javascript file with inputs for kibana landing page')
+ parser.add_argument('-k', '--kibana_url', default='https://testresults.opnfv.org/kibana/app/kibana',
+ help='The url of kibana for javascript inputs')
+
+ parser.add_argument('-u', '--elasticsearch-username',
+ help='the username for elasticsearch')
+
+ parser.add_argument('-p', '--elasticsearch-password',
+ help='the password for elasticsearch')
+
+ args = parser.parse_args()
+ base_elastic_url = args.elasticsearch_url
+ generate_inputs = args.generate_js_inputs
+ input_file_path = args.js_path
+ kibana_url = args.kibana_url
+ es_user = args.elasticsearch_username
+ es_passwd = args.elasticsearch_password
+
+ dashboards = construct_dashboards()
+
+ for kibana_dashboard in dashboards:
+ kibana_dashboard.publish()
+
+ if generate_inputs:
+ generate_js_inputs(input_file_path, kibana_url, dashboards)
diff --git a/scripts/kibana_cleanup.py b/scripts/kibana_cleanup.py
new file mode 100644
index 0000000..2cd52af
--- /dev/null
+++ b/scripts/kibana_cleanup.py
@@ -0,0 +1,43 @@
+#! /usr/bin/env python
+import logging
+import argparse
+import shared_utils
+import json
+import urlparse
+
+logger = logging.getLogger('clear_kibana')
+logger.setLevel(logging.DEBUG)
+file_handler = logging.FileHandler('/var/log/{}.log'.format(__name__))
+file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s'))
+logger.addHandler(file_handler)
+
+
+def delete_all(url, es_user, es_passwd):
+ ids = shared_utils.get_elastic_data(url, es_user, es_passwd, body=None, field='_id')
+ for id in ids:
+ del_url = '/'.join([url, id])
+ shared_utils.delete_request(del_url, es_user, es_passwd)
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description='Delete saved kibana searches, visualizations and dashboards')
+ parser.add_argument('-e', '--elasticsearch-url', default='http://localhost:9200',
+ help='the url of elasticsearch, defaults to http://localhost:9200')
+
+ parser.add_argument('-u', '--elasticsearch-username',
+ help='the username for elasticsearch')
+
+ parser.add_argument('-p', '--elasticsearch-password',
+ help='the password for elasticsearch')
+
+ args = parser.parse_args()
+ base_elastic_url = args.elasticsearch_url
+ es_user = args.elasticsearch_username
+ es_passwd = args.elasticsearch_password
+
+ urls = (urlparse.urljoin(base_elastic_url, '/.kibana/visualization'),
+ urlparse.urljoin(base_elastic_url, '/.kibana/dashboard'),
+ urlparse.urljoin(base_elastic_url, '/.kibana/search'))
+
+ for url in urls:
+ delete_all(url, es_user, es_passwd)
diff --git a/scripts/mongo_to_elasticsearch.py b/scripts/mongo_to_elasticsearch.py
new file mode 100644
index 0000000..51b6913
--- /dev/null
+++ b/scripts/mongo_to_elasticsearch.py
@@ -0,0 +1,448 @@
+#! /usr/bin/env python
+import logging
+import argparse
+import shared_utils
+import json
+import urlparse
+import uuid
+import os
+import subprocess
+import datetime
+
+logger = logging.getLogger('mongo_to_elasticsearch')
+logger.setLevel(logging.DEBUG)
+file_handler = logging.FileHandler('/var/log/{}.log'.format(__name__))
+file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s'))
+logger.addHandler(file_handler)
+
+
+def _get_dicts_from_list(dict_list, keys):
+ dicts = []
+ for dictionary in dict_list:
+ # iterate over dictionaries in input list
+ if keys == set(dictionary.keys()):
+ # check the dictionary structure
+ dicts.append(dictionary)
+ return dicts
+
+
+def _get_results_from_list_of_dicts(list_of_dict_statuses, dict_indexes, expected_results=None):
+ test_results = {}
+ for test_status in list_of_dict_statuses:
+ status = test_status
+ for index in dict_indexes:
+ status = status[index]
+ if status in test_results:
+ test_results[status] += 1
+ else:
+ test_results[status] = 1
+
+ if expected_results is not None:
+ for expected_result in expected_results:
+ if expected_result not in test_results:
+ test_results[expected_result] = 0
+
+ return test_results
+
+
+def _convert_duration(duration):
+ if (isinstance(duration, str) or isinstance(duration, unicode)) and ':' in duration:
+ hours, minutes, seconds = duration.split(":")
+ int_duration = 3600 * int(hours) + 60 * int(minutes) + float(seconds)
+ else:
+ int_duration = duration
+ return int_duration
+
+
+def modify_functest_tempest(testcase):
+ if modify_default_entry(testcase):
+ testcase_details = testcase['details']
+ testcase_tests = float(testcase_details['tests'])
+ testcase_failures = float(testcase_details['failures'])
+ if testcase_tests != 0:
+ testcase_details['success_percentage'] = 100 * (testcase_tests - testcase_failures) / testcase_tests
+ else:
+ testcase_details['success_percentage'] = 0
+ return True
+ else:
+ return False
+
+
+def modify_functest_vims(testcase):
+ """
+ Structure:
+ details.sig_test.result.[{result}]
+ details.sig_test.duration
+ details.vIMS.duration
+ details.orchestrator.duration
+
+ Find data for these fields
+ -> details.sig_test.duration
+ -> details.sig_test.tests
+ -> details.sig_test.failures
+ -> details.sig_test.passed
+ -> details.sig_test.skipped
+ -> details.vIMS.duration
+ -> details.orchestrator.duration
+ """
+ testcase_details = testcase['details']
+ sig_test_results = _get_dicts_from_list(testcase_details['sig_test']['result'],
+ {'duration', 'result', 'name', 'error'})
+ if len(sig_test_results) < 1:
+ logger.info("No 'result' from 'sig_test' found in vIMS details, skipping")
+ return False
+ else:
+ test_results = _get_results_from_list_of_dicts(sig_test_results, ('result',), ('Passed', 'Skipped', 'Failed'))
+ passed = test_results['Passed']
+ skipped = test_results['Skipped']
+ failures = test_results['Failed']
+ all_tests = passed + skipped + failures
+ testcase['details'] = {
+ 'sig_test': {
+ 'duration': testcase_details['sig_test']['duration'],
+ 'tests': all_tests,
+ 'failures': failures,
+ 'passed': passed,
+ 'skipped': skipped
+ },
+ 'vIMS': {
+ 'duration': testcase_details['vIMS']['duration']
+ },
+ 'orchestrator': {
+ 'duration': testcase_details['orchestrator']['duration']
+ }
+ }
+ return True
+
+
+def modify_functest_onos(testcase):
+ """
+ Structure:
+ details.FUNCvirNet.duration
+ details.FUNCvirNet.status.[{Case result}]
+ details.FUNCvirNetL3.duration
+ details.FUNCvirNetL3.status.[{Case result}]
+
+ Find data for these fields
+ -> details.FUNCvirNet.duration
+ -> details.FUNCvirNet.tests
+ -> details.FUNCvirNet.failures
+ -> details.FUNCvirNetL3.duration
+ -> details.FUNCvirNetL3.tests
+ -> details.FUNCvirNetL3.failures
+ """
+ testcase_details = testcase['details']
+
+ funcvirnet_details = testcase_details['FUNCvirNet']['status']
+ funcvirnet_statuses = _get_dicts_from_list(funcvirnet_details, {'Case result', 'Case name:'})
+
+ funcvirnetl3_details = testcase_details['FUNCvirNetL3']['status']
+ funcvirnetl3_statuses = _get_dicts_from_list(funcvirnetl3_details, {'Case result', 'Case name:'})
+
+ if len(funcvirnet_statuses) < 0:
+ logger.info("No results found in 'FUNCvirNet' part of ONOS results")
+ return False
+ elif len(funcvirnetl3_statuses) < 0:
+ logger.info("No results found in 'FUNCvirNetL3' part of ONOS results")
+ return False
+ else:
+ funcvirnet_results = _get_results_from_list_of_dicts(funcvirnet_statuses,
+ ('Case result',), ('PASS', 'FAIL'))
+ funcvirnetl3_results = _get_results_from_list_of_dicts(funcvirnetl3_statuses,
+ ('Case result',), ('PASS', 'FAIL'))
+
+ funcvirnet_passed = funcvirnet_results['PASS']
+ funcvirnet_failed = funcvirnet_results['FAIL']
+ funcvirnet_all = funcvirnet_passed + funcvirnet_failed
+
+ funcvirnetl3_passed = funcvirnetl3_results['PASS']
+ funcvirnetl3_failed = funcvirnetl3_results['FAIL']
+ funcvirnetl3_all = funcvirnetl3_passed + funcvirnetl3_failed
+
+ testcase_details['FUNCvirNet'] = {
+ 'duration': _convert_duration(testcase_details['FUNCvirNet']['duration']),
+ 'tests': funcvirnet_all,
+ 'failures': funcvirnet_failed
+ }
+
+ testcase_details['FUNCvirNetL3'] = {
+ 'duration': _convert_duration(testcase_details['FUNCvirNetL3']['duration']),
+ 'tests': funcvirnetl3_all,
+ 'failures': funcvirnetl3_failed
+ }
+
+ return True
+
+
+def modify_functest_rally(testcase):
+ """
+ Structure:
+ details.[{summary.duration}]
+ details.[{summary.nb success}]
+ details.[{summary.nb tests}]
+
+ Find data for these fields
+ -> details.duration
+ -> details.tests
+ -> details.success_percentage
+ """
+ summaries = _get_dicts_from_list(testcase['details'], {'summary'})
+
+ if len(summaries) != 1:
+ logger.info("Found zero or more than one 'summaries' in Rally details, skipping")
+ return False
+ else:
+ summary = summaries[0]['summary']
+ testcase['details'] = {
+ 'duration': summary['duration'],
+ 'tests': summary['nb tests'],
+ 'success_percentage': summary['nb success']
+ }
+ return True
+
+
+def modify_functest_odl(testcase):
+ """
+ Structure:
+ details.details.[{test_status.@status}]
+
+ Find data for these fields
+ -> details.tests
+ -> details.failures
+ -> details.success_percentage?
+ """
+ test_statuses = _get_dicts_from_list(testcase['details']['details'], {'test_status', 'test_doc', 'test_name'})
+ if len(test_statuses) < 1:
+ logger.info("No 'test_status' found in ODL details, skipping")
+ return False
+ else:
+ test_results = _get_results_from_list_of_dicts(test_statuses, ('test_status', '@status'), ('PASS', 'FAIL'))
+
+ passed_tests = test_results['PASS']
+ failed_tests = test_results['FAIL']
+ all_tests = passed_tests + failed_tests
+
+ testcase['details'] = {
+ 'tests': all_tests,
+ 'failures': failed_tests,
+ 'success_percentage': 100 * passed_tests / float(all_tests)
+ }
+ return True
+
+
+def modify_default_entry(testcase):
+ """
+ Look for these and leave any of those:
+ details.duration
+ details.tests
+ details.failures
+
+ If none are present, then return False
+ """
+ found = False
+ testcase_details = testcase['details']
+ fields = ['duration', 'tests', 'failures']
+ if isinstance(testcase_details, dict):
+ for key, value in testcase_details.items():
+ if key in fields:
+ found = True
+ if key == 'duration':
+ testcase_details[key] = _convert_duration(value)
+ else:
+ del testcase_details[key]
+
+ return found
+
+
+def _fix_date(date_string):
+ if isinstance(date_string, dict):
+ return date_string['$date']
+ else:
+ return date_string[:-3].replace(' ', 'T') + 'Z'
+
+
+def verify_mongo_entry(testcase):
+ """
+ Mandatory fields:
+ installer
+ pod_name
+ version
+ case_name
+ date
+ project
+ details
+
+ these fields must be present and must NOT be None
+
+ Optional fields:
+ description
+
+ these fields will be preserved if the are NOT None
+ """
+ mandatory_fields = ['installer',
+ 'pod_name',
+ 'version',
+ 'case_name',
+ 'project_name',
+ 'details']
+ mandatory_fields_to_modify = {'creation_date': _fix_date}
+ if '_id' in testcase:
+ mongo_id = testcase['_id']
+ else:
+ mongo_id = None
+ optional_fields = ['description']
+ for key, value in testcase.items():
+ if key in mandatory_fields:
+ if value is None:
+ # empty mandatory field, invalid input
+ logger.info("Skipping testcase with mongo _id '{}' because the testcase was missing value"
+ " for mandatory field '{}'".format(mongo_id, key))
+ return False
+ else:
+ mandatory_fields.remove(key)
+ elif key in mandatory_fields_to_modify:
+ if value is None:
+ # empty mandatory field, invalid input
+ logger.info("Skipping testcase with mongo _id '{}' because the testcase was missing value"
+ " for mandatory field '{}'".format(mongo_id, key))
+ return False
+ else:
+ testcase[key] = mandatory_fields_to_modify[key](value)
+ del mandatory_fields_to_modify[key]
+ elif key in optional_fields:
+ if value is None:
+ # empty optional field, remove
+ del testcase[key]
+ optional_fields.remove(key)
+ else:
+ # unknown field
+ del testcase[key]
+
+ if len(mandatory_fields) > 0:
+ # some mandatory fields are missing
+ logger.info("Skipping testcase with mongo _id '{}' because the testcase was missing"
+ " mandatory field(s) '{}'".format(mongo_id, mandatory_fields))
+ return False
+ else:
+ return True
+
+
+def modify_mongo_entry(testcase):
+ # 1. verify and identify the testcase
+ # 2. if modification is implemented, then use that
+ # 3. if not, try to use default
+ # 4. if 2 or 3 is successful, return True, otherwise return False
+ if verify_mongo_entry(testcase):
+ project = testcase['project_name']
+ case_name = testcase['case_name']
+ if project == 'functest':
+ if case_name == 'Rally':
+ return modify_functest_rally(testcase)
+ elif case_name == 'ODL':
+ return modify_functest_odl(testcase)
+ elif case_name == 'ONOS':
+ return modify_functest_onos(testcase)
+ elif case_name == 'vIMS':
+ return modify_functest_vims(testcase)
+ elif case_name == 'Tempest':
+ return modify_functest_tempest(testcase)
+ return modify_default_entry(testcase)
+ else:
+ return False
+
+
+def publish_mongo_data(output_destination):
+ tmp_filename = 'mongo-{}.log'.format(uuid.uuid4())
+ try:
+ subprocess.check_call(['mongoexport', '--db', 'test_results_collection', '-c', 'test_results', '--out',
+ tmp_filename])
+ with open(tmp_filename) as fobj:
+ for mongo_json_line in fobj:
+ test_result = json.loads(mongo_json_line)
+ if modify_mongo_entry(test_result):
+ shared_utils.publish_json(test_result, output_destination, es_user, es_passwd)
+ finally:
+ if os.path.exists(tmp_filename):
+ os.remove(tmp_filename)
+
+
+def get_mongo_data(days):
+ past_time = datetime.datetime.today() - datetime.timedelta(days=days)
+ mongo_json_lines = subprocess.check_output(['mongoexport', '--db', 'test_results_collection', '-c', 'test_results',
+ '--query', '{{"creation_date":{{$gt:"{}"}}}}'
+ .format(past_time)]).splitlines()
+
+ mongo_data = []
+ for mongo_json_line in mongo_json_lines:
+ test_result = json.loads(mongo_json_line)
+ if modify_mongo_entry(test_result):
+ # if the modification could be applied, append the modified result
+ mongo_data.append(test_result)
+ return mongo_data
+
+
+def publish_difference(mongo_data, elastic_data, output_destination, es_user, es_passwd):
+ for elastic_entry in elastic_data:
+ if elastic_entry in mongo_data:
+ mongo_data.remove(elastic_entry)
+
+ logger.info('number of parsed test results: {}'.format(len(mongo_data)))
+
+ for parsed_test_result in mongo_data:
+ shared_utils.publish_json(parsed_test_result, es_user, es_passwd, output_destination)
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description='Modify and filter mongo json data for elasticsearch')
+ parser.add_argument('-od', '--output-destination',
+ default='elasticsearch',
+ choices=('elasticsearch', 'stdout'),
+ help='defaults to elasticsearch')
+
+ parser.add_argument('-ml', '--merge-latest', default=0, type=int, metavar='N',
+ help='get entries old at most N days from mongodb and'
+ ' parse those that are not already in elasticsearch.'
+ ' If not present, will get everything from mongodb, which is the default')
+
+ parser.add_argument('-e', '--elasticsearch-url', default='http://localhost:9200',
+ help='the url of elasticsearch, defaults to http://localhost:9200')
+
+ parser.add_argument('-u', '--elasticsearch-username',
+ help='the username for elasticsearch')
+
+ parser.add_argument('-p', '--elasticsearch-password',
+ help='the password for elasticsearch')
+
+ parser.add_argument('-m', '--mongodb-url', default='http://localhost:8082',
+ help='the url of mongodb, defaults to http://localhost:8082')
+
+ args = parser.parse_args()
+ base_elastic_url = urlparse.urljoin(args.elasticsearch_url, '/test_results/mongo2elastic')
+ output_destination = args.output_destination
+ days = args.merge_latest
+ es_user = args.elasticsearch_username
+ es_passwd = args.elasticsearch_password
+
+ if output_destination == 'elasticsearch':
+ output_destination = base_elastic_url
+
+ # parsed_test_results will be printed/sent to elasticsearch
+ if days == 0:
+ # TODO get everything from mongo
+ publish_mongo_data(output_destination)
+ elif days > 0:
+ body = '''{{
+ "query" : {{
+ "range" : {{
+ "creation_date" : {{
+ "gte" : "now-{}d"
+ }}
+ }}
+ }}
+}}'''.format(days)
+ elastic_data = shared_utils.get_elastic_data(base_elastic_url, es_user, es_passwd, body)
+ logger.info('number of hits in elasticsearch for now-{}d: {}'.format(days, len(elastic_data)))
+ mongo_data = get_mongo_data(days)
+ publish_difference(mongo_data, elastic_data, output_destination, es_user, es_passwd)
+ else:
+ raise Exception('Update must be non-negative')
diff --git a/scripts/shared_utils.py b/scripts/shared_utils.py
new file mode 100644
index 0000000..899f844
--- /dev/null
+++ b/scripts/shared_utils.py
@@ -0,0 +1,36 @@
+import urllib3
+import json
+http = urllib3.PoolManager()
+
+
+def delete_request(url, username, password, body=None):
+ headers = urllib3.util.make_headers(basic_auth=':'.join([username, password]))
+ http.request('DELETE', url, headers=headers, body=body)
+
+
+def publish_json(json_ojb, username, password, output_destination):
+ json_dump = json.dumps(json_ojb)
+ if output_destination == 'stdout':
+ print json_dump
+ else:
+ headers = urllib3.util.make_headers(basic_auth=':'.join([username, password]))
+ http.request('POST', output_destination, headers=headers, body=json_dump)
+
+
+def _get_nr_of_hits(elastic_json):
+ return elastic_json['hits']['total']
+
+
+def get_elastic_data(elastic_url, username, password, body, field='_source'):
+ # 1. get the number of results
+ headers = urllib3.util.make_headers(basic_auth=':'.join([username, password]))
+ elastic_json = json.loads(http.request('GET', elastic_url + '/_search?size=0', headers=headers, body=body).data)
+ nr_of_hits = _get_nr_of_hits(elastic_json)
+
+ # 2. get all results
+ elastic_json = json.loads(http.request('GET', elastic_url + '/_search?size={}'.format(nr_of_hits), headers=headers, body=body).data)
+
+ elastic_data = []
+ for hit in elastic_json['hits']['hits']:
+ elastic_data.append(hit[field])
+ return elastic_data