summaryrefslogtreecommitdiffstats
path: root/dashboard
diff options
context:
space:
mode:
authorSerenaFeng <feng.xiaowei@zte.com.cn>2016-09-22 16:15:58 +0800
committerSerenaFeng <feng.xiaowei@zte.com.cn>2016-09-22 16:15:58 +0800
commit39d1381bb4b7d6041a8f05b198db911c9e583da5 (patch)
tree49faaca654d96fc9f47a7702ae9ae90cd985af2e /dashboard
parent71edb3b92cbbf4a4f648dc4442efea29b76a157e (diff)
rebuild directory structure of Kibana dashboard
JIRA: FUNCTEST-465 Change-Id: Icecd350b2f67105c8aaa9d71fd76d24827515545 Signed-off-by: SerenaFeng <feng.xiaowei@zte.com.cn>
Diffstat (limited to 'dashboard')
-rw-r--r--dashboard/backup-db.sh33
-rw-r--r--dashboard/dashboard/__init__.py0
-rw-r--r--dashboard/dashboard/common/__init__.py0
-rw-r--r--dashboard/dashboard/common/elastic_access.py64
-rw-r--r--dashboard/dashboard/common/logger_utils.py65
-rw-r--r--dashboard/dashboard/conf/__init__.py0
-rw-r--r--dashboard/dashboard/conf/config.py88
-rw-r--r--dashboard/dashboard/conf/testcases.py24
-rw-r--r--dashboard/dashboard/elastic2kibana/__init__.py0
-rw-r--r--dashboard/dashboard/elastic2kibana/main.py373
-rw-r--r--dashboard/dashboard/elastic2kibana_main.py4
-rw-r--r--dashboard/dashboard/functest/__init__.py0
-rw-r--r--dashboard/dashboard/functest/testcases.yaml138
-rw-r--r--dashboard/dashboard/mongo2elastic/__init__.py0
-rw-r--r--dashboard/dashboard/mongo2elastic/format.py186
-rw-r--r--dashboard/dashboard/mongo2elastic/main.py243
-rw-r--r--dashboard/dashboard/mongo2elastic_main.py4
-rw-r--r--dashboard/etc/config.ini14
-rw-r--r--dashboard/kibana_cleanup.py41
19 files changed, 1277 insertions, 0 deletions
diff --git a/dashboard/backup-db.sh b/dashboard/backup-db.sh
new file mode 100644
index 0000000..35c3fbe
--- /dev/null
+++ b/dashboard/backup-db.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2016 Orange and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+echo "Backup Test collection DB"
+now=$(date +"%m_%d_%Y_%H_%M_%S")
+echo $now
+echo " ------------- "
+TARGET_DIR=./$now
+TEST_RESULT_DB_BACKUP="test_collection_db."$now".tar.gz"
+
+echo "Create Directory for backup"
+mkdir -p $TARGET_DIR
+
+echo "Export results"
+mongoexport --db test_results_collection -c results --out $TARGET_DIR/backup-results.json
+echo "Export test cases"
+mongoexport --db test_results_collection -c testcases --out $TARGET_DIR/backup-cases.json
+echo "Export projects"
+mongoexport --db test_results_collection -c projects --out $TARGET_DIR/backup-projects.json
+echo "Export pods"
+mongoexport --db test_results_collection -c pods --out $TARGET_DIR/backup-pod.json
+
+echo "Create tar.gz"
+#tar -cvzf $TEST_RESULT_DB_BACKUP $TARGET_DIR
+
+echo "Delete temp directory"
+#rm -Rf $TARGET_DIR
diff --git a/dashboard/dashboard/__init__.py b/dashboard/dashboard/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dashboard/dashboard/__init__.py
diff --git a/dashboard/dashboard/common/__init__.py b/dashboard/dashboard/common/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dashboard/dashboard/common/__init__.py
diff --git a/dashboard/dashboard/common/elastic_access.py b/dashboard/dashboard/common/elastic_access.py
new file mode 100644
index 0000000..e90a17f
--- /dev/null
+++ b/dashboard/dashboard/common/elastic_access.py
@@ -0,0 +1,64 @@
+import json
+
+import urllib3
+
+http = urllib3.PoolManager()
+
+
+def delete_request(url, creds, body=None):
+ headers = urllib3.make_headers(basic_auth=creds)
+ http.request('DELETE', url, headers=headers, body=body)
+
+
+def publish_json(json_ojb, creds, to):
+ json_dump = json.dumps(json_ojb)
+ if to == 'stdout':
+ print json_dump
+ return 200, None
+ else:
+ headers = urllib3.make_headers(basic_auth=creds)
+ result = http.request('POST', to, headers=headers, body=json_dump)
+ return result.status, result.data
+
+
+def _get_nr_of_hits(elastic_json):
+ return elastic_json['hits']['total']
+
+
+def get_elastic_docs(elastic_url, creds, body=None, field = '_source'):
+
+ # 1. get the number of results
+ headers = urllib3.make_headers(basic_auth=creds)
+ elastic_json = json.loads(http.request('GET', elastic_url + '/_search?size=0', headers=headers, body=body).data)
+ print elastic_json
+ nr_of_hits = _get_nr_of_hits(elastic_json)
+
+ # 2. get all results
+ elastic_json = json.loads(http.request('GET', elastic_url + '/_search?size={}'.format(nr_of_hits), headers=headers, body=body).data)
+
+ elastic_docs = []
+ for hit in elastic_json['hits']['hits']:
+ elastic_docs.append(hit[field])
+ return elastic_docs
+
+
+def get_elastic_docs_by_days(elastic_url, creds, days):
+ if days == 0:
+ body = '''{
+ "query": {
+ "match_all": {}
+ }
+ }'''
+ elif days > 0:
+ body = '''{{
+ "query" : {{
+ "range" : {{
+ "start_date" : {{
+ "gte" : "now-{}d"
+ }}
+ }}
+ }}
+ }}'''.format(days)
+ else:
+ raise Exception('Update days must be non-negative')
+ return get_elastic_docs(elastic_url, creds, body)
diff --git a/dashboard/dashboard/common/logger_utils.py b/dashboard/dashboard/common/logger_utils.py
new file mode 100644
index 0000000..1830808
--- /dev/null
+++ b/dashboard/dashboard/common/logger_utils.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+#
+# feng.xiaowei@zte.com.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Logging levels:
+# Level Numeric value
+# CRITICAL 50
+# ERROR 40
+# WARNING 30
+# INFO 20
+# DEBUG 10
+# NOTSET 0
+#
+# Usage:
+# import functest_logger as fl
+# logger = fl.Logger("script_name").getLogger()
+# logger.info("message to be shown with - INFO - ")
+# logger.debug("message to be shown with - DEBUG -")
+
+import logging
+import os
+
+
+class Logger(object):
+ file_path = '/var/log'
+ formatter = logging.Formatter('%(asctime)s - %(name)s - '
+ '%(levelname)s - %(message)s')
+
+ def __init__(self, logger_name):
+
+ IF_DEBUG = os.getenv('IF_DEBUG')
+
+ self.logger_name = logger_name
+ self.logger = logging.getLogger(logger_name)
+ self.logger.propagate = 0
+ self.logger.setLevel(logging.DEBUG)
+
+ ch = logging.StreamHandler()
+ ch.setFormatter(self.formatter)
+ if IF_DEBUG is not None and IF_DEBUG.lower() == "true":
+ ch.setLevel(logging.DEBUG)
+ else:
+ ch.setLevel(logging.INFO)
+ self.logger.addHandler(ch)
+
+ hdlr = logging.FileHandler('%s/%s.log' % (self.file_path, logger_name))
+ hdlr.setFormatter(self.formatter)
+ hdlr.setLevel(logging.DEBUG)
+ self.logger.addHandler(hdlr)
+
+ @property
+ def get(self):
+ return self.logger
+
+
+class DashboardLogger(Logger):
+ file_path = '/var/log/kibana_dashboard'
+
+ def __init__(self, logger_name):
+ super(DashboardLogger, self).__init__(logger_name)
+
diff --git a/dashboard/dashboard/conf/__init__.py b/dashboard/dashboard/conf/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dashboard/dashboard/conf/__init__.py
diff --git a/dashboard/dashboard/conf/config.py b/dashboard/dashboard/conf/config.py
new file mode 100644
index 0000000..2e0f1ca
--- /dev/null
+++ b/dashboard/dashboard/conf/config.py
@@ -0,0 +1,88 @@
+#! /usr/bin/env python
+
+from ConfigParser import SafeConfigParser, NoOptionError
+
+
+class ParseError(Exception):
+ """
+ Custom exception class for config file
+ """
+
+ def __init__(self, message):
+ self.msg = message
+
+ def __str__(self):
+ return 'error parsing config file : %s' % self.msg
+
+
+class APIConfig:
+ """
+ The purpose of this class is to load values correctly from the config file.
+ Each key is declared as an attribute in __init__() and linked in parse()
+ """
+
+ def __init__(self):
+ self._default_config_location = "../etc/config.ini"
+ self.elastic_url = 'http://localhost:9200'
+ self.elastic_creds = None
+ self.destination = 'elasticsearch'
+ self.kibana_url = None
+ self.is_js = True
+ self.js_path = None
+
+ def _get_str_parameter(self, section, param):
+ try:
+ return self._parser.get(section, param)
+ except NoOptionError:
+ raise ParseError("[%s.%s] parameter not found" % (section, param))
+
+ def _get_int_parameter(self, section, param):
+ try:
+ return int(self._get_str_parameter(section, param))
+ except ValueError:
+ raise ParseError("[%s.%s] not an int" % (section, param))
+
+ def _get_bool_parameter(self, section, param):
+ result = self._get_str_parameter(section, param)
+ if str(result).lower() == 'true':
+ return True
+ if str(result).lower() == 'false':
+ return False
+
+ raise ParseError(
+ "[%s.%s : %s] not a boolean" % (section, param, result))
+
+ @staticmethod
+ def parse(config_location=None):
+ obj = APIConfig()
+
+ if config_location is None:
+ config_location = obj._default_config_location
+
+ obj._parser = SafeConfigParser()
+ obj._parser.read(config_location)
+ if not obj._parser:
+ raise ParseError("%s not found" % config_location)
+
+ # Linking attributes to keys from file with their sections
+ obj.elastic_url = obj._get_str_parameter("elastic", "url")
+ obj.elastic_creds = obj._get_str_parameter("elastic", "creds")
+ obj.destination = obj._get_str_parameter("output", "destination")
+ obj.kibana_url = obj._get_str_parameter("kibana", "url")
+ obj.is_js = obj._get_bool_parameter("kibana", "js")
+ obj.js_path = obj._get_str_parameter("kibana", "js_path")
+
+ return obj
+
+ def __str__(self):
+ return "elastic_url = %s \n" \
+ "elastic_creds = %s \n" \
+ "destination = %s \n" \
+ "kibana_url = %s \n" \
+ "is_js = %s \n" \
+ "js_path = %s \n" % (self.elastic_url,
+ self.elastic_creds,
+ self.destination,
+ self.kibana_url,
+ self.is_js,
+ self.js_path)
diff --git a/dashboard/dashboard/conf/testcases.py b/dashboard/dashboard/conf/testcases.py
new file mode 100644
index 0000000..e120987
--- /dev/null
+++ b/dashboard/dashboard/conf/testcases.py
@@ -0,0 +1,24 @@
+import yaml
+
+
+with open('./functest/testcases.yaml') as f:
+ testcases_yaml = yaml.safe_load(f)
+f.close()
+
+
+def compose_format(fmt):
+ return 'format_' + fmt.strip()
+
+
+def get_format(project, case):
+ testcases = testcases_yaml.get(project)
+ if isinstance(testcases, list):
+ for case_dict in testcases:
+ if case_dict['name'] == case:
+ return compose_format(case_dict['format'])
+ return None
+
+
+if __name__ == '__main__':
+ fmt = get_format('functest', 'vping_ssh')
+ print fmt
diff --git a/dashboard/dashboard/elastic2kibana/__init__.py b/dashboard/dashboard/elastic2kibana/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dashboard/dashboard/elastic2kibana/__init__.py
diff --git a/dashboard/dashboard/elastic2kibana/main.py b/dashboard/dashboard/elastic2kibana/main.py
new file mode 100644
index 0000000..c1cbc30
--- /dev/null
+++ b/dashboard/dashboard/elastic2kibana/main.py
@@ -0,0 +1,373 @@
+#! /usr/bin/env python
+import json
+import urlparse
+
+import argparse
+
+from common import logger_utils, elastic_access
+from conf import testcases
+from conf.config import APIConfig
+
+logger = logger_utils.DashboardLogger('elastic2kibana').get
+
+parser = argparse.ArgumentParser()
+parser.add_argument("-c", "--config-file",
+ dest='config_file',
+ help="Config file location")
+
+args = parser.parse_args()
+CONF = APIConfig().parse(args.config_file)
+base_elastic_url = CONF.elastic_url
+generate_inputs = CONF.is_js
+input_file_path = CONF.js_path
+kibana_url = CONF.kibana_url
+es_creds = CONF.elastic_creds
+
+_installers = {'fuel', 'apex', 'compass', 'joid'}
+
+
+class KibanaDashboard(dict):
+ def __init__(self, project_name, case_name, family, installer, pod, scenarios, visualization):
+ super(KibanaDashboard, self).__init__()
+ self.project_name = project_name
+ self.case_name = case_name
+ self.family = family
+ self.installer = installer
+ self.pod = pod
+ self.scenarios = scenarios
+ self.visualization = visualization
+ self._visualization_title = None
+ self._kibana_visualizations = []
+ self._kibana_dashboard = None
+ self._create_visualizations()
+ self._create()
+
+ def _create_visualizations(self):
+ for scenario in self.scenarios:
+ self._kibana_visualizations.append(KibanaVisualization(self.project_name,
+ self.case_name,
+ self.installer,
+ self.pod,
+ scenario,
+ self.visualization))
+
+ self._visualization_title = self._kibana_visualizations[0].vis_state_title
+
+ def _publish_visualizations(self):
+ for visualization in self._kibana_visualizations:
+ url = urlparse.urljoin(base_elastic_url, '/.kibana/visualization/{}'.format(visualization.id))
+ logger.debug("publishing visualization '{}'".format(url))
+ elastic_access.publish_json(visualization, es_creds, url)
+
+ def _construct_panels(self):
+ size_x = 6
+ size_y = 3
+ max_columns = 7
+ column = 1
+ row = 1
+ panel_index = 1
+ panels_json = []
+ for visualization in self._kibana_visualizations:
+ panels_json.append({
+ "id": visualization.id,
+ "type": 'visualization',
+ "panelIndex": panel_index,
+ "size_x": size_x,
+ "size_y": size_y,
+ "col": column,
+ "row": row
+ })
+ panel_index += 1
+ column += size_x
+ if column > max_columns:
+ column = 1
+ row += size_y
+ return json.dumps(panels_json, separators=(',', ':'))
+
+ def _create(self):
+ self['title'] = '{} {} {} {} {}'.format(self.project_name,
+ self.case_name,
+ self.installer,
+ self._visualization_title,
+ self.pod)
+ self.id = self['title'].replace(' ', '-').replace('/', '-')
+
+ self['hits'] = 0
+ self['description'] = "Kibana dashboard for project_name '{}', case_name '{}', installer '{}', data '{}' and" \
+ " pod '{}'".format(self.project_name,
+ self.case_name,
+ self.installer,
+ self._visualization_title,
+ self.pod)
+ self['panelsJSON'] = self._construct_panels()
+ self['optionsJSON'] = json.dumps({
+ "darkTheme": False
+ },
+ separators=(',', ':'))
+ self['uiStateJSON'] = "{}"
+ self['scenario'] = 1
+ self['timeRestore'] = False
+ self['kibanaSavedObjectMeta'] = {
+ 'searchSourceJSON': json.dumps({
+ "filter": [
+ {
+ "query": {
+ "query_string": {
+ "query": "*",
+ "analyze_wildcard": True
+ }
+ }
+ }
+ ]
+ },
+ separators=(',', ':'))
+ }
+
+ label = self.case_name
+ if 'label' in self.visualization:
+ label += " %s" % self.visualization.get('label')
+ label += " %s" % self.visualization.get('name')
+ self['metadata'] = {
+ "label": label,
+ "test_family": self.family
+ }
+
+ def _publish(self):
+ url = urlparse.urljoin(base_elastic_url, '/.kibana/dashboard/{}'.format(self.id))
+ logger.debug("publishing dashboard '{}'".format(url))
+ elastic_access.publish_json(self, es_creds, url)
+
+ def publish(self):
+ self._publish_visualizations()
+ self._publish()
+
+
+class KibanaSearchSourceJSON(dict):
+ """
+ "filter": [
+ {"match": {"installer": {"query": installer, "type": "phrase"}}},
+ {"match": {"project_name": {"query": project_name, "type": "phrase"}}},
+ {"match": {"case_name": {"query": case_name, "type": "phrase"}}}
+ ]
+ """
+
+ def __init__(self, project_name, case_name, installer, pod, scenario):
+ super(KibanaSearchSourceJSON, self).__init__()
+ self["filter"] = [
+ {"match": {"project_name": {"query": project_name, "type": "phrase"}}},
+ {"match": {"case_name": {"query": case_name, "type": "phrase"}}},
+ {"match": {"installer": {"query": installer, "type": "phrase"}}},
+ {"match": {"scenario": {"query": scenario, "type": "phrase"}}}
+ ]
+ if pod != 'all':
+ self["filter"].append({"match": {"pod_name": {"query": pod, "type": "phrase"}}})
+
+
+class VisualizationState(dict):
+ def __init__(self, visualization):
+ super(VisualizationState, self).__init__()
+ name = visualization.get('name')
+ fields = visualization.get('fields')
+
+ if name == 'tests_failures':
+ mode = 'grouped'
+ metric_type = 'sum'
+ self['type'] = 'histogram'
+ else:
+ # duration or success_percentage
+ mode = 'stacked'
+ metric_type = 'avg'
+ self['type'] = 'line'
+
+ self['params'] = {
+ "shareYAxis": True,
+ "addTooltip": True,
+ "addLegend": True,
+ "smoothLines": False,
+ "scale": "linear",
+ "interpolate": "linear",
+ "mode": mode,
+ "times": [],
+ "addTimeMarker": False,
+ "defaultYExtents": False,
+ "setYExtents": False,
+ "yAxis": {}
+ }
+
+ self['aggs'] = []
+
+ i = 1
+ for field in fields:
+ self['aggs'].append({
+ "id": str(i),
+ "type": metric_type,
+ "schema": "metric",
+ "params": {
+ "field": field.get('field')
+ }
+ })
+ i += 1
+
+ self['aggs'].append({
+ "id": str(i),
+ "type": 'date_histogram',
+ "schema": "segment",
+ "params": {
+ "field": "start_date",
+ "interval": "auto",
+ "customInterval": "2h",
+ "min_doc_count": 1,
+ "extended_bounds": {}
+ }
+ })
+
+ self['listeners'] = {}
+ self['title'] = ' '.join(['{} {}'.format(x['type'], x['params']['field']) for x in self['aggs']
+ if x['schema'] == 'metric'])
+
+
+class KibanaVisualization(dict):
+ def __init__(self, project_name, case_name, installer, pod, scenario, visualization):
+ """
+ We need two things
+ 1. filter created from
+ project_name
+ case_name
+ installer
+ pod
+ scenario
+ 2. visualization state
+ field for y axis (metric) with type (avg, sum, etc.)
+ field for x axis (segment) with type (date_histogram)
+
+ :return:
+ """
+ super(KibanaVisualization, self).__init__()
+ vis_state = VisualizationState(visualization)
+ self.vis_state_title = vis_state['title']
+ self['title'] = '{} {} {} {} {} {}'.format(project_name,
+ case_name,
+ self.vis_state_title,
+ installer,
+ pod,
+ scenario)
+ self.id = self['title'].replace(' ', '-').replace('/', '-')
+ self['visState'] = json.dumps(vis_state, separators=(',', ':'))
+ self['uiStateJSON'] = "{}"
+ self['description'] = "Kibana visualization for project_name '{}', case_name '{}', data '{}', installer '{}'," \
+ " pod '{}' and scenario '{}'".format(project_name,
+ case_name,
+ self.vis_state_title,
+ installer,
+ pod,
+ scenario)
+ self['scenario'] = 1
+ self['kibanaSavedObjectMeta'] = {"searchSourceJSON": json.dumps(KibanaSearchSourceJSON(project_name,
+ case_name,
+ installer,
+ pod,
+ scenario),
+ separators=(',', ':'))}
+
+
+def _get_pods_and_scenarios(project_name, case_name, installer):
+ query_json = json.JSONEncoder().encode({
+ "query": {
+ "bool": {
+ "must": [
+ {"match_all": {}}
+ ],
+ "filter": [
+ {"match": {"installer": {"query": installer, "type": "phrase"}}},
+ {"match": {"project_name": {"query": project_name, "type": "phrase"}}},
+ {"match": {"case_name": {"query": case_name, "type": "phrase"}}}
+ ]
+ }
+ }
+ })
+
+ elastic_data = elastic_access.get_elastic_docs(urlparse.urljoin(base_elastic_url, '/test_results/mongo2elastic'),
+ es_creds, query_json)
+
+ pods_and_scenarios = {}
+
+ for data in elastic_data:
+ pod = data['pod_name']
+ if pod in pods_and_scenarios:
+ pods_and_scenarios[pod].add(data['scenario'])
+ else:
+ pods_and_scenarios[pod] = {data['scenario']}
+
+ if 'all' in pods_and_scenarios:
+ pods_and_scenarios['all'].add(data['scenario'])
+ else:
+ pods_and_scenarios['all'] = {data['scenario']}
+
+ return pods_and_scenarios
+
+
+def construct_dashboards():
+ """
+ iterate over testcase and installer
+ 1. get available pods for each testcase/installer pair
+ 2. get available scenario for each testcase/installer/pod tuple
+ 3. construct KibanaInput and append
+
+ :return: list of KibanaDashboards
+ """
+ kibana_dashboards = []
+ for project, case_dicts in testcases.testcases_yaml.items():
+ for case in case_dicts:
+ case_name = case.get('name')
+ visualizations = case.get('visualizations')
+ family = case.get('test_family')
+ for installer in _installers:
+ pods_and_scenarios = _get_pods_and_scenarios(project, case_name, installer)
+ for visualization in visualizations:
+ for pod, scenarios in pods_and_scenarios.iteritems():
+ kibana_dashboards.append(KibanaDashboard(project,
+ case_name,
+ family,
+ installer,
+ pod,
+ scenarios,
+ visualization))
+ return kibana_dashboards
+
+
+def generate_js_inputs(js_file_path, kibana_url, dashboards):
+ js_dict = {}
+ for dashboard in dashboards:
+ dashboard_meta = dashboard['metadata']
+ test_family = dashboard_meta['test_family']
+ test_label = dashboard_meta['label']
+
+ if test_family not in js_dict:
+ js_dict[test_family] = {}
+
+ js_test_family = js_dict[test_family]
+
+ if test_label not in js_test_family:
+ js_test_family[test_label] = {}
+
+ js_test_label = js_test_family[test_label]
+
+ if dashboard.installer not in js_test_label:
+ js_test_label[dashboard.installer] = {}
+
+ js_installer = js_test_label[dashboard.installer]
+ js_installer[dashboard.pod] = kibana_url + '#/dashboard/' + dashboard.id
+
+ with open(js_file_path, 'w+') as js_file_fdesc:
+ js_file_fdesc.write('var kibana_dashboard_links = ')
+ js_file_fdesc.write(str(js_dict).replace("u'", "'"))
+
+
+def main():
+ dashboards = construct_dashboards()
+
+ for kibana_dashboard in dashboards:
+ kibana_dashboard.publish()
+
+ if generate_inputs:
+ generate_js_inputs(input_file_path, kibana_url, dashboards)
diff --git a/dashboard/dashboard/elastic2kibana_main.py b/dashboard/dashboard/elastic2kibana_main.py
new file mode 100644
index 0000000..3ec27cb
--- /dev/null
+++ b/dashboard/dashboard/elastic2kibana_main.py
@@ -0,0 +1,4 @@
+from elastic2kibana.main import main
+
+if __name__ == '__main__':
+ main()
diff --git a/dashboard/dashboard/functest/__init__.py b/dashboard/dashboard/functest/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dashboard/dashboard/functest/__init__.py
diff --git a/dashboard/dashboard/functest/testcases.yaml b/dashboard/dashboard/functest/testcases.yaml
new file mode 100644
index 0000000..9c33d2e
--- /dev/null
+++ b/dashboard/dashboard/functest/testcases.yaml
@@ -0,0 +1,138 @@
+functest:
+ -
+ name: tempest_smoke_serial
+ format: normal
+ test_family: VIM
+ visualizations:
+ -
+ name: duration
+ fields:
+ - field: details.duration
+ -
+ name: tests_failures
+ fields:
+ - field: details.tests
+ - field: details.failures
+ -
+ name: success_percentage
+ fields:
+ - field: details.success_percentage
+ -
+ name: rally_sanity
+ test_family: VIM
+ format: rally
+ visualizations:
+ -
+ name: duration
+ fields:
+ - field: details.duration
+ -
+ name: tests_failures
+ fields:
+ - field: details.tests
+ -
+ name: success_percentage
+ fields:
+ - field: details.success_percentage
+ -
+ name: vping_ssh
+ format: normal
+ test_family: VIM
+ visualizations:
+ -
+ name: duration
+ fields:
+ - field: details.duration
+ -
+ name: vping_userdata
+ format: normal
+ test_family: VIM
+ visualizations:
+ -
+ name: duration
+ fields:
+ - field: details.duration
+ -
+ name: odl
+ test_family: Controller
+ format: odl
+ visualizations:
+ -
+ name: tests_failures
+ fields:
+ - field: details.tests
+ - field: details.failures
+ -
+ name: success_percentage
+ fields:
+ - field: details.success_percentage
+ -
+ name: onos
+ format: onos
+ test_family: Controller
+ visualizations:
+ -
+ name: duration
+ label: FUNCvirNet
+ fields:
+ - field: details.FUNCvirNet.duration
+ -
+ name: duration
+ label: FUNCvirNetL3
+ fields:
+ - field: details.FUNCvirNetL3.duration
+ -
+ name: tests_failures
+ label: FUNCvirNet
+ fields:
+ - field: details.FUNCvirNet.tests
+ - field: details.FUNCvirNet.failures
+ -
+ name: tests_failures
+ label: FUNCvirNetL3
+ fields:
+ - field: details.FUNCvirNetL3.tests
+ - field: details.FUNCvirNetL3.failures
+ -
+ name: vims
+ format: vims
+ test_family: Features
+ visualizations:
+ -
+ name: duration
+ fields:
+ - field: details.vIMS.duration
+ - field: details.orchestrator.duration
+ - field: details.sig_test.duration
+ -
+ name: tests_failures
+ fields:
+ - field: details.sig_test.tests
+ - field: details.sig_test.failures
+ - field: details.sig_test.passed
+ - field: details.sig_test.skipped
+promise:
+ -
+ name: promise
+ format: normal
+ test_family: Features
+ visualizations:
+ -
+ name: duration
+ fields:
+ - field: details.duration
+ -
+ name: tests_failures
+ fields:
+ - field: details.tests
+ - field: details.failures
+doctor:
+ -
+ name: doctor-notification
+ test_family: Features
+ format: normal
+ visualizations:
+ -
+ name: duration
+ fields:
+ - field: details.duration
diff --git a/dashboard/dashboard/mongo2elastic/__init__.py b/dashboard/dashboard/mongo2elastic/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dashboard/dashboard/mongo2elastic/__init__.py
diff --git a/dashboard/dashboard/mongo2elastic/format.py b/dashboard/dashboard/mongo2elastic/format.py
new file mode 100644
index 0000000..ef485ba
--- /dev/null
+++ b/dashboard/dashboard/mongo2elastic/format.py
@@ -0,0 +1,186 @@
+#! /usr/bin/env python
+
+
+def _convert_value(value):
+ return value if value != '' else 0
+
+
+def _convert_duration(duration):
+ if (isinstance(duration, str) or isinstance(duration, unicode)) and ':' in duration:
+ hours, minutes, seconds = duration.split(":")
+ hours = _convert_value(hours)
+ minutes = _convert_value(minutes)
+ seconds = _convert_value(seconds)
+ int_duration = 3600 * int(hours) + 60 * int(minutes) + float(seconds)
+ else:
+ int_duration = duration
+ return int_duration
+
+
+def format_normal(testcase):
+ """
+ Look for these and leave any of those:
+ details.duration
+ details.tests
+ details.failures
+
+ If none are present, then return False
+ """
+ found = False
+ testcase_details = testcase['details']
+ fields = ['duration', 'tests', 'failures']
+ if isinstance(testcase_details, dict):
+ for key, value in testcase_details.items():
+ if key in fields:
+ found = True
+ if key == 'duration':
+ testcase_details[key] = _convert_duration(value)
+ else:
+ del testcase_details[key]
+
+ if 'tests' in testcase_details and 'failures' in testcase_details:
+ testcase_tests = float(testcase_details['tests'])
+ testcase_failures = float(testcase_details['failures'])
+ if testcase_tests != 0:
+ testcase_details['success_percentage'] = 100 * (testcase_tests - testcase_failures) / testcase_tests
+ else:
+ testcase_details['success_percentage'] = 0
+
+
+ return found
+
+
+def format_rally(testcase):
+ """
+ Structure:
+ details.[{summary.duration}]
+ details.[{summary.nb success}]
+ details.[{summary.nb tests}]
+
+ Find data for these fields
+ -> details.duration
+ -> details.tests
+ -> details.success_percentage
+ """
+ details = testcase['details']
+ summary = None
+ for item in details:
+ if 'summary' in item:
+ summary = item['summary']
+
+ if not summary:
+ return False
+
+ testcase['details'] = {
+ 'duration': summary['duration'],
+ 'tests': summary['nb tests'],
+ 'success_percentage': summary['nb success']
+ }
+ return True
+
+
+def _get_statistics(orig_data, stat_fields, stat_values=None):
+ test_results = {}
+ for stat_data in orig_data:
+ for field in stat_fields:
+ stat_value = stat_data[field]
+ if stat_value in test_results:
+ test_results[stat_value] += 1
+ else:
+ test_results[stat_value] = 1
+
+ if stat_values is not None:
+ for stat_value in stat_values:
+ if stat_value not in test_results:
+ test_results[stat_value] = 0
+
+ return test_results
+
+
+def format_onos(testcase):
+ """
+ Structure:
+ details.FUNCvirNet.duration
+ details.FUNCvirNet.status.[{Case result}]
+ details.FUNCvirNetL3.duration
+ details.FUNCvirNetL3.status.[{Case result}]
+
+ Find data for these fields
+ -> details.FUNCvirNet.duration
+ -> details.FUNCvirNet.tests
+ -> details.FUNCvirNet.failures
+ -> details.FUNCvirNetL3.duration
+ -> details.FUNCvirNetL3.tests
+ -> details.FUNCvirNetL3.failures
+ """
+ testcase_details = testcase['details']
+
+ if 'FUNCvirNet' not in testcase_details or 'FUNCvirNetL3' not in testcase_details:
+ return False
+
+ funcvirnet_details = testcase_details['FUNCvirNet']['status']
+ funcvirnet_stats = _get_statistics(funcvirnet_details, ('Case result',), ('PASS', 'FAIL'))
+ funcvirnet_passed = funcvirnet_stats['PASS']
+ funcvirnet_failed = funcvirnet_stats['FAIL']
+ funcvirnet_all = funcvirnet_passed + funcvirnet_failed
+
+ funcvirnetl3_details = testcase_details['FUNCvirNetL3']['status']
+ funcvirnetl3_stats = _get_statistics(funcvirnetl3_details, ('Case result',), ('PASS', 'FAIL'))
+ funcvirnetl3_passed = funcvirnetl3_stats['PASS']
+ funcvirnetl3_failed = funcvirnetl3_stats['FAIL']
+ funcvirnetl3_all = funcvirnetl3_passed + funcvirnetl3_failed
+
+ testcase_details['FUNCvirNet'] = {
+ 'duration': _convert_duration(testcase_details['FUNCvirNet']['duration']),
+ 'tests': funcvirnet_all,
+ 'failures': funcvirnet_failed
+ }
+ testcase_details['FUNCvirNetL3'] = {
+ 'duration': _convert_duration(testcase_details['FUNCvirNetL3']['duration']),
+ 'tests': funcvirnetl3_all,
+ 'failures': funcvirnetl3_failed
+ }
+ return True
+
+
+def format_vims(testcase):
+ """
+ Structure:
+ details.sig_test.result.[{result}]
+ details.sig_test.duration
+ details.vIMS.duration
+ details.orchestrator.duration
+
+ Find data for these fields
+ -> details.sig_test.duration
+ -> details.sig_test.tests
+ -> details.sig_test.failures
+ -> details.sig_test.passed
+ -> details.sig_test.skipped
+ -> details.vIMS.duration
+ -> details.orchestrator.duration
+ """
+ testcase_details = testcase['details']
+ test_results = _get_statistics(testcase_details['sig_test']['result'],
+ ('result',),
+ ('Passed', 'Skipped', 'Failed'))
+ passed = test_results['Passed']
+ skipped = test_results['Skipped']
+ failures = test_results['Failed']
+ all_tests = passed + skipped + failures
+ testcase['details'] = {
+ 'sig_test': {
+ 'duration': testcase_details['sig_test']['duration'],
+ 'tests': all_tests,
+ 'failures': failures,
+ 'passed': passed,
+ 'skipped': skipped
+ },
+ 'vIMS': {
+ 'duration': testcase_details['vIMS']['duration']
+ },
+ 'orchestrator': {
+ 'duration': testcase_details['orchestrator']['duration']
+ }
+ }
+ return True
diff --git a/dashboard/dashboard/mongo2elastic/main.py b/dashboard/dashboard/mongo2elastic/main.py
new file mode 100644
index 0000000..25b5320
--- /dev/null
+++ b/dashboard/dashboard/mongo2elastic/main.py
@@ -0,0 +1,243 @@
+#! /usr/bin/env python
+
+import datetime
+import json
+import os
+import subprocess
+import traceback
+import urlparse
+import uuid
+
+import argparse
+
+from common import logger_utils, elastic_access
+from conf import testcases
+from conf.config import APIConfig
+from mongo2elastic import format
+
+logger = logger_utils.DashboardLogger('mongo2elastic').get
+
+parser = argparse.ArgumentParser()
+parser.add_argument("-c", "--config-file",
+ dest='config_file',
+ help="Config file location")
+parser.add_argument('-ld', '--latest-days',
+ default=0,
+ type=int,
+ metavar='N',
+ help='get entries old at most N days from mongodb and'
+ ' parse those that are not already in elasticsearch.'
+ ' If not present, will get everything from mongodb, which is the default')
+
+args = parser.parse_args()
+CONF = APIConfig().parse(args.config_file)
+
+
+tmp_docs_file = './mongo-{}.json'.format(uuid.uuid4())
+
+
+class DocumentPublisher:
+
+ def __init__(self, doc, fmt, exist_docs, creds, to):
+ self.doc = doc
+ self.fmt = fmt
+ self.creds = creds
+ self.exist_docs = exist_docs
+ self.to = to
+ self.is_formatted = True
+
+ def format(self):
+ try:
+ if self._verify_document() and self.fmt:
+ self.is_formatted = vars(format)[self.fmt](self.doc)
+ else:
+ self.is_formatted = False
+ except Exception:
+ logger.error("Fail in format testcase[%s]\nerror message: %s" %
+ (self.doc, traceback.format_exc()))
+ self.is_formatted = False
+ finally:
+ return self
+
+ def publish(self):
+ if self.is_formatted and self.doc not in self.exist_docs:
+ self._publish()
+
+ def _publish(self):
+ status, data = elastic_access.publish_json(self.doc, self.creds, self.to)
+ if status > 300:
+ logger.error('Publish record[{}] failed, due to [{}]'
+ .format(self.doc, json.loads(data)['error']['reason']))
+
+ def _fix_date(self, date_string):
+ if isinstance(date_string, dict):
+ return date_string['$date']
+ else:
+ return date_string[:-3].replace(' ', 'T') + 'Z'
+
+ def _verify_document(self):
+ """
+ Mandatory fields:
+ installer
+ pod_name
+ version
+ case_name
+ date
+ project
+ details
+
+ these fields must be present and must NOT be None
+
+ Optional fields:
+ description
+
+ these fields will be preserved if the are NOT None
+ """
+ mandatory_fields = ['installer',
+ 'pod_name',
+ 'version',
+ 'case_name',
+ 'project_name',
+ 'details']
+ mandatory_fields_to_modify = {'start_date': self._fix_date}
+ fields_to_swap_or_add = {'scenario': 'version'}
+ if '_id' in self.doc:
+ mongo_id = self.doc['_id']
+ else:
+ mongo_id = None
+ optional_fields = ['description']
+ for key, value in self.doc.items():
+ if key in mandatory_fields:
+ if value is None:
+ # empty mandatory field, invalid input
+ logger.info("Skipping testcase with mongo _id '{}' because the testcase was missing value"
+ " for mandatory field '{}'".format(mongo_id, key))
+ return False
+ else:
+ mandatory_fields.remove(key)
+ elif key in mandatory_fields_to_modify:
+ if value is None:
+ # empty mandatory field, invalid input
+ logger.info("Skipping testcase with mongo _id '{}' because the testcase was missing value"
+ " for mandatory field '{}'".format(mongo_id, key))
+ return False
+ else:
+ self.doc[key] = mandatory_fields_to_modify[key](value)
+ del mandatory_fields_to_modify[key]
+ elif key in fields_to_swap_or_add:
+ if value is None:
+ swapped_key = fields_to_swap_or_add[key]
+ swapped_value = self.doc[swapped_key]
+ logger.info("Swapping field '{}' with value None for '{}' with value '{}'.".format(key, swapped_key,
+ swapped_value))
+ self.doc[key] = swapped_value
+ del fields_to_swap_or_add[key]
+ else:
+ del fields_to_swap_or_add[key]
+ elif key in optional_fields:
+ if value is None:
+ # empty optional field, remove
+ del self.doc[key]
+ optional_fields.remove(key)
+ else:
+ # unknown field
+ del self.doc[key]
+
+ if len(mandatory_fields) > 0:
+ # some mandatory fields are missing
+ logger.info("Skipping testcase with mongo _id '{}' because the testcase was missing"
+ " mandatory field(s) '{}'".format(mongo_id, mandatory_fields))
+ return False
+ elif len(mandatory_fields_to_modify) > 0:
+ # some mandatory fields are missing
+ logger.info("Skipping testcase with mongo _id '{}' because the testcase was missing"
+ " mandatory field(s) '{}'".format(mongo_id, mandatory_fields_to_modify.keys()))
+ return False
+ else:
+ if len(fields_to_swap_or_add) > 0:
+ for key, swap_key in fields_to_swap_or_add.iteritems():
+ self.doc[key] = self.doc[swap_key]
+
+ return True
+
+
+class DocumentsPublisher:
+
+ def __init__(self, project, case, fmt, days, elastic_url, creds, to):
+ self.project = project
+ self.case = case
+ self.fmt = fmt
+ self.days = days
+ self.elastic_url = elastic_url
+ self.creds = creds
+ self.to = to
+ self.existed_docs = []
+
+ def export(self):
+ if self.days > 0:
+ past_time = datetime.datetime.today() - datetime.timedelta(days=self.days)
+ query = '''{{
+ "project_name": "{}",
+ "case_name": "{}",
+ "start_date": {{"$gt" : "{}"}}
+ }}'''.format(self.project, self.case, past_time)
+ else:
+ query = '''{{
+ "project_name": "{}",
+ "case_name": "{}"
+ }}'''.format(self.project, self.case)
+ cmd = ['mongoexport',
+ '--db', 'test_results_collection',
+ '--collection', 'results',
+ '--query', '{}'.format(query),
+ '--out', '{}'.format(tmp_docs_file)]
+ try:
+ subprocess.check_call(cmd)
+ return self
+ except Exception, err:
+ logger.error("export mongodb failed: %s" % err)
+ self._remove()
+ exit(-1)
+
+ def get_existed_docs(self):
+ self.existed_docs = elastic_access.get_elastic_docs_by_days(self.elastic_url, self.creds, self.days)
+ return self
+
+ def publish(self):
+ try:
+ with open(tmp_docs_file) as fdocs:
+ for doc_line in fdocs:
+ DocumentPublisher(json.loads(doc_line),
+ self.fmt,
+ self.existed_docs,
+ self.creds,
+ self.to).format().publish()
+ finally:
+ fdocs.close()
+ self._remove()
+
+ def _remove(self):
+ if os.path.exists(tmp_docs_file):
+ os.remove(tmp_docs_file)
+
+
+def main():
+ base_elastic_url = urlparse.urljoin(CONF.elastic_url, '/test_results/mongo2elastic')
+ to = CONF.destination
+ days = args.latest_days
+ es_creds = CONF.elastic_creds
+
+ if to == 'elasticsearch':
+ to = base_elastic_url
+
+ for project, case_dicts in testcases.testcases_yaml.items():
+ for case_dict in case_dicts:
+ case = case_dict.get('name')
+ fmt = testcases.compose_format(case_dict.get('format'))
+ DocumentsPublisher(project,
+ case,
+ fmt,
+ days,
+ base_elastic_url,
+ es_creds,
+ to).export().get_existed_docs().publish()
diff --git a/dashboard/dashboard/mongo2elastic_main.py b/dashboard/dashboard/mongo2elastic_main.py
new file mode 100644
index 0000000..141d8f3
--- /dev/null
+++ b/dashboard/dashboard/mongo2elastic_main.py
@@ -0,0 +1,4 @@
+from mongo2elastic.main import main
+
+if __name__ == '__main__':
+ main()
diff --git a/dashboard/etc/config.ini b/dashboard/etc/config.ini
new file mode 100644
index 0000000..b94ac7b
--- /dev/null
+++ b/dashboard/etc/config.ini
@@ -0,0 +1,14 @@
+# to add a new parameter in the config file,
+# the CONF object in config.ini must be updated
+[elastic]
+url = http://localhost:9200
+creds =
+
+[output]
+# elasticsearch or console
+destination = elasticsearch
+
+[kibana]
+url = http://10.63.243.17:5601/app/kibana
+js = true
+js_path = /usr/share/nginx/html/kibana_dashboards/conf.js
diff --git a/dashboard/kibana_cleanup.py b/dashboard/kibana_cleanup.py
new file mode 100644
index 0000000..9ce4994
--- /dev/null
+++ b/dashboard/kibana_cleanup.py
@@ -0,0 +1,41 @@
+#! /usr/bin/env python
+import logging
+import urlparse
+
+import argparse
+
+from dashboard.common import elastic_access
+
+logger = logging.getLogger('clear_kibana')
+logger.setLevel(logging.DEBUG)
+file_handler = logging.FileHandler('/var/log/{}.log'.format('clear_kibana'))
+file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s'))
+logger.addHandler(file_handler)
+
+
+def delete_all(url, es_creds):
+ ids = elastic_access.get_elastic_docs(url, es_creds, body=None, field='_id')
+ for id in ids:
+ del_url = '/'.join([url, id])
+ elastic_access.delete_request(del_url, es_creds)
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description='Delete saved kibana searches, visualizations and dashboards')
+ parser.add_argument('-e', '--elasticsearch-url', default='http://localhost:9200',
+ help='the url of elasticsearch, defaults to http://localhost:9200')
+
+ parser.add_argument('-u', '--elasticsearch-username', default=None,
+ help='The username with password for elasticsearch in format username:password')
+
+ args = parser.parse_args()
+ base_elastic_url = args.elasticsearch_url
+ es_creds = args.elasticsearch_username
+
+ urls = (urlparse.urljoin(base_elastic_url, '/.kibana/visualization'),
+ urlparse.urljoin(base_elastic_url, '/.kibana/dashboard'),
+ urlparse.urljoin(base_elastic_url, '/.kibana/search'))
+
+ for url in urls:
+ delete_all(url, es_creds)
+