summaryrefslogtreecommitdiffstats
path: root/utils/test
diff options
context:
space:
mode:
Diffstat (limited to 'utils/test')
-rw-r--r--utils/test/result_collection_api/opnfv_testapi/resources/project_handlers.py2
-rw-r--r--utils/test/result_collection_api/opnfv_testapi/resources/result_handlers.py2
-rw-r--r--utils/test/result_collection_api/opnfv_testapi/resources/testcase_handlers.py2
-rw-r--r--utils/test/scripts/create_kibana_dashboards.py110
-rw-r--r--utils/test/scripts/kibana_cleanup.py21
-rw-r--r--utils/test/scripts/mongo_to_elasticsearch.py71
-rw-r--r--utils/test/scripts/shared_utils.py13
7 files changed, 118 insertions, 103 deletions
diff --git a/utils/test/result_collection_api/opnfv_testapi/resources/project_handlers.py b/utils/test/result_collection_api/opnfv_testapi/resources/project_handlers.py
index 171ab7695..1e9a97230 100644
--- a/utils/test/result_collection_api/opnfv_testapi/resources/project_handlers.py
+++ b/utils/test/result_collection_api/opnfv_testapi/resources/project_handlers.py
@@ -38,7 +38,7 @@ class ProjectCLHandler(GenericProjectHandler):
@param body: project to be created
@type body: L{ProjectCreateRequest}
@in body: body
- @rtype: L{Project}
+ @rtype: L{CreateResponse}
@return 200: project is created.
@raise 403: project already exists
@raise 400: body or name not provided
diff --git a/utils/test/result_collection_api/opnfv_testapi/resources/result_handlers.py b/utils/test/result_collection_api/opnfv_testapi/resources/result_handlers.py
index fe2d71e5e..56bed6c81 100644
--- a/utils/test/result_collection_api/opnfv_testapi/resources/result_handlers.py
+++ b/utils/test/result_collection_api/opnfv_testapi/resources/result_handlers.py
@@ -134,7 +134,7 @@ class ResultsCLHandler(GenericResultHandler):
@param body: result to be created
@type body: L{ResultCreateRequest}
@in body: body
- @rtype: L{TestResult}
+ @rtype: L{CreateResponse}
@return 200: result is created.
@raise 404: pod/project/testcase not exist
@raise 400: body/pod_name/project_name/case_name not provided
diff --git a/utils/test/result_collection_api/opnfv_testapi/resources/testcase_handlers.py b/utils/test/result_collection_api/opnfv_testapi/resources/testcase_handlers.py
index b4f9db96d..253aa6649 100644
--- a/utils/test/result_collection_api/opnfv_testapi/resources/testcase_handlers.py
+++ b/utils/test/result_collection_api/opnfv_testapi/resources/testcase_handlers.py
@@ -41,7 +41,7 @@ class TestcaseCLHandler(GenericTestcaseHandler):
@param body: testcase to be created
@type body: L{TestcaseCreateRequest}
@in body: body
- @rtype: L{Testcase}
+ @rtype: L{CreateResponse}
@return 200: testcase is created in this project.
@raise 403: project not exist
or testcase already exists in this project
diff --git a/utils/test/scripts/create_kibana_dashboards.py b/utils/test/scripts/create_kibana_dashboards.py
index 252ce2138..73f4ed971 100644
--- a/utils/test/scripts/create_kibana_dashboards.py
+++ b/utils/test/scripts/create_kibana_dashboards.py
@@ -7,7 +7,7 @@ import urlparse
logger = logging.getLogger('create_kibana_dashboards')
logger.setLevel(logging.DEBUG)
-file_handler = logging.FileHandler('/var/log/{}.log'.format(__name__))
+file_handler = logging.FileHandler('/var/log/{}.log'.format('create_kibana_dashboards'))
file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s'))
logger.addHandler(file_handler)
@@ -15,7 +15,7 @@ _installers = {'fuel', 'apex', 'compass', 'joid'}
# see class VisualizationState for details on format
_testcases = [
- ('functest', 'Tempest',
+ ('functest', 'tempest_smoke_serial',
[
{
"metrics": [
@@ -28,7 +28,7 @@ _testcases = [
],
"type": "line",
"metadata": {
- "label": "Tempest duration",
+ "label": "tempest_smoke_serial duration",
"test_family": "VIM"
}
},
@@ -50,7 +50,7 @@ _testcases = [
],
"type": "histogram",
"metadata": {
- "label": "Tempest nr of tests/failures",
+ "label": "tempest_smoke_serial nr of tests/failures",
"test_family": "VIM"
}
},
@@ -66,14 +66,14 @@ _testcases = [
],
"type": "line",
"metadata": {
- "label": "Tempest success percentage",
+ "label": "tempest_smoke_serial success percentage",
"test_family": "VIM"
}
}
]
),
- ('functest', 'Rally',
+ ('functest', 'rally_sanity',
[
{
"metrics": [
@@ -86,7 +86,7 @@ _testcases = [
],
"type": "line",
"metadata": {
- "label": "Rally duration",
+ "label": "rally_sanity duration",
"test_family": "VIM"
}
},
@@ -102,7 +102,7 @@ _testcases = [
],
"type": "histogram",
"metadata": {
- "label": "Rally nr of tests",
+ "label": "rally_sanity nr of tests",
"test_family": "VIM"
}
},
@@ -118,14 +118,14 @@ _testcases = [
],
"type": "line",
"metadata": {
- "label": "Rally success percentage",
+ "label": "rally_sanity success percentage",
"test_family": "VIM"
}
}
]
),
- ('functest', 'vPing',
+ ('functest', 'vping_ssh',
[
{
"metrics": [
@@ -145,7 +145,7 @@ _testcases = [
]
),
- ('functest', 'vPing_userdata',
+ ('functest', 'vping_userdata',
[
{
"metrics": [
@@ -165,7 +165,7 @@ _testcases = [
]
),
- ('functest', 'ODL',
+ ('functest', 'odl',
[
{
"metrics": [
@@ -207,7 +207,7 @@ _testcases = [
]
),
- ('functest', 'ONOS',
+ ('functest', 'onos',
[
{
"metrics": [
@@ -287,7 +287,7 @@ _testcases = [
]
),
- ('functest', 'vIMS',
+ ('functest', 'vims',
[
{
"metrics": [
@@ -418,13 +418,13 @@ _testcases = [
class KibanaDashboard(dict):
- def __init__(self, project_name, case_name, installer, pod, versions, visualization_detail):
+ def __init__(self, project_name, case_name, installer, pod, scenarios, visualization_detail):
super(KibanaDashboard, self).__init__()
self.project_name = project_name
self.case_name = case_name
self.installer = installer
self.pod = pod
- self.versions = versions
+ self.scenarios = scenarios
self.visualization_detail = visualization_detail
self._visualization_title = None
self._kibana_visualizations = []
@@ -433,12 +433,12 @@ class KibanaDashboard(dict):
self._create()
def _create_visualizations(self):
- for version in self.versions:
+ for scenario in self.scenarios:
self._kibana_visualizations.append(KibanaVisualization(self.project_name,
self.case_name,
self.installer,
self.pod,
- version,
+ scenario,
self.visualization_detail))
self._visualization_title = self._kibana_visualizations[0].vis_state_title
@@ -447,7 +447,7 @@ class KibanaDashboard(dict):
for visualization in self._kibana_visualizations:
url = urlparse.urljoin(base_elastic_url, '/.kibana/visualization/{}'.format(visualization.id))
logger.debug("publishing visualization '{}'".format(url))
- shared_utils.publish_json(visualization, es_user, es_passwd, url)
+ shared_utils.publish_json(visualization, es_creds, url)
def _construct_panels(self):
size_x = 6
@@ -495,7 +495,7 @@ class KibanaDashboard(dict):
},
separators=(',', ':'))
self['uiStateJSON'] = "{}"
- self['version'] = 1
+ self['scenario'] = 1
self['timeRestore'] = False
self['kibanaSavedObjectMeta'] = {
'searchSourceJSON': json.dumps({
@@ -517,7 +517,7 @@ class KibanaDashboard(dict):
def _publish(self):
url = urlparse.urljoin(base_elastic_url, '/.kibana/dashboard/{}'.format(self.id))
logger.debug("publishing dashboard '{}'".format(url))
- shared_utils.publish_json(self, es_user, es_passwd, url)
+ shared_utils.publish_json(self, es_creds, url)
def publish(self):
self._publish_visualizations()
@@ -533,13 +533,13 @@ class KibanaSearchSourceJSON(dict):
]
"""
- def __init__(self, project_name, case_name, installer, pod, version):
+ def __init__(self, project_name, case_name, installer, pod, scenario):
super(KibanaSearchSourceJSON, self).__init__()
self["filter"] = [
{"match": {"project_name": {"query": project_name, "type": "phrase"}}},
{"match": {"case_name": {"query": case_name, "type": "phrase"}}},
{"match": {"installer": {"query": installer, "type": "phrase"}}},
- {"match": {"version": {"query": version, "type": "phrase"}}}
+ {"match": {"scenario": {"query": scenario, "type": "phrase"}}}
]
if pod != 'all':
self["filter"].append({"match": {"pod_name": {"query": pod, "type": "phrase"}}})
@@ -564,14 +564,14 @@ class VisualizationState(dict):
{
"type": type, # default date_histogram
"params": {
- "field": field # default creation_date
+ "field": field # default start_date
},
{segment2}
],
"type": type, # default area
"mode": mode, # default grouped for type 'histogram', stacked for other types
"metadata": {
- "label": "Tempest duration",# mandatory, no default
+ "label": "tempest_smoke_serial duration",# mandatory, no default
"test_family": "VIM" # mandatory, no default
}
}
@@ -634,7 +634,7 @@ class VisualizationState(dict):
"type": 'date_histogram' if 'type' not in segment else segment['type'],
"schema": "metric",
"params": {
- "field": "creation_date" if ('params' not in segment or 'field' not in segment['params'])
+ "field": "start_date" if ('params' not in segment or 'field' not in segment['params'])
else segment['params']['field'],
"interval": "auto",
"customInterval": "2h",
@@ -649,7 +649,7 @@ class VisualizationState(dict):
"type": 'date_histogram',
"schema": "segment",
"params": {
- "field": "creation_date",
+ "field": "start_date",
"interval": "auto",
"customInterval": "2h",
"min_doc_count": 1,
@@ -663,7 +663,7 @@ class VisualizationState(dict):
class KibanaVisualization(dict):
- def __init__(self, project_name, case_name, installer, pod, version, detail):
+ def __init__(self, project_name, case_name, installer, pod, scenario, detail):
"""
We need two things
1. filter created from
@@ -671,7 +671,7 @@ class KibanaVisualization(dict):
case_name
installer
pod
- version
+ scenario
2. visualization state
field for y axis (metric) with type (avg, sum, etc.)
field for x axis (segment) with type (date_histogram)
@@ -686,27 +686,27 @@ class KibanaVisualization(dict):
self.vis_state_title,
installer,
pod,
- version)
+ scenario)
self.id = self['title'].replace(' ', '-').replace('/', '-')
self['visState'] = json.dumps(vis_state, separators=(',', ':'))
self['uiStateJSON'] = "{}"
self['description'] = "Kibana visualization for project_name '{}', case_name '{}', data '{}', installer '{}'," \
- " pod '{}' and version '{}'".format(project_name,
+ " pod '{}' and scenario '{}'".format(project_name,
case_name,
self.vis_state_title,
installer,
pod,
- version)
- self['version'] = 1
+ scenario)
+ self['scenario'] = 1
self['kibanaSavedObjectMeta'] = {"searchSourceJSON": json.dumps(KibanaSearchSourceJSON(project_name,
case_name,
installer,
pod,
- version),
+ scenario),
separators=(',', ':'))}
-def _get_pods_and_versions(project_name, case_name, installer):
+def _get_pods_and_scenarios(project_name, case_name, installer):
query_json = json.JSONEncoder().encode({
"query": {
"bool": {
@@ -723,30 +723,30 @@ def _get_pods_and_versions(project_name, case_name, installer):
})
elastic_data = shared_utils.get_elastic_data(urlparse.urljoin(base_elastic_url, '/test_results/mongo2elastic'),
- es_user, es_passwd, query_json)
+ es_creds, query_json)
- pods_and_versions = {}
+ pods_and_scenarios = {}
for data in elastic_data:
pod = data['pod_name']
- if pod in pods_and_versions:
- pods_and_versions[pod].add(data['version'])
+ if pod in pods_and_scenarios:
+ pods_and_scenarios[pod].add(data['scenario'])
else:
- pods_and_versions[pod] = {data['version']}
+ pods_and_scenarios[pod] = {data['scenario']}
- if 'all' in pods_and_versions:
- pods_and_versions['all'].add(data['version'])
+ if 'all' in pods_and_scenarios:
+ pods_and_scenarios['all'].add(data['scenario'])
else:
- pods_and_versions['all'] = {data['version']}
+ pods_and_scenarios['all'] = {data['scenario']}
- return pods_and_versions
+ return pods_and_scenarios
def construct_dashboards():
"""
iterate over testcase and installer
1. get available pods for each testcase/installer pair
- 2. get available version for each testcase/installer/pod tuple
+ 2. get available scenario for each testcase/installer/pod tuple
3. construct KibanaInput and append
:return: list of KibanaDashboards
@@ -754,10 +754,10 @@ def construct_dashboards():
kibana_dashboards = []
for project_name, case_name, visualization_details in _testcases:
for installer in _installers:
- pods_and_versions = _get_pods_and_versions(project_name, case_name, installer)
+ pods_and_scenarios = _get_pods_and_scenarios(project_name, case_name, installer)
for visualization_detail in visualization_details:
- for pod, versions in pods_and_versions.iteritems():
- kibana_dashboards.append(KibanaDashboard(project_name, case_name, installer, pod, versions,
+ for pod, scenarios in pods_and_scenarios.iteritems():
+ kibana_dashboards.append(KibanaDashboard(project_name, case_name, installer, pod, scenarios,
visualization_detail))
return kibana_dashboards
@@ -794,26 +794,25 @@ if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create Kibana dashboards from data in elasticsearch')
parser.add_argument('-e', '--elasticsearch-url', default='http://localhost:9200',
help='the url of elasticsearch, defaults to http://localhost:9200')
+
parser.add_argument('-js', '--generate_js_inputs', action='store_true',
help='Use this argument to generate javascript inputs for kibana landing page')
+
parser.add_argument('--js_path', default='/usr/share/nginx/html/kibana_dashboards/conf.js',
help='Path of javascript file with inputs for kibana landing page')
+
parser.add_argument('-k', '--kibana_url', default='https://testresults.opnfv.org/kibana/app/kibana',
help='The url of kibana for javascript inputs')
- parser.add_argument('-u', '--elasticsearch-username',
- help='the username for elasticsearch')
-
- parser.add_argument('-p', '--elasticsearch-password',
- help='the password for elasticsearch')
+ parser.add_argument('-u', '--elasticsearch-username', default=None,
+ help='The username with password for elasticsearch in format username:password')
args = parser.parse_args()
base_elastic_url = args.elasticsearch_url
generate_inputs = args.generate_js_inputs
input_file_path = args.js_path
kibana_url = args.kibana_url
- es_user = args.elasticsearch_username
- es_passwd = args.elasticsearch_password
+ es_creds = args.elasticsearch_username
dashboards = construct_dashboards()
@@ -822,3 +821,4 @@ if __name__ == '__main__':
if generate_inputs:
generate_js_inputs(input_file_path, kibana_url, dashboards)
+
diff --git a/utils/test/scripts/kibana_cleanup.py b/utils/test/scripts/kibana_cleanup.py
index 2cd52af67..e8d452af2 100644
--- a/utils/test/scripts/kibana_cleanup.py
+++ b/utils/test/scripts/kibana_cleanup.py
@@ -7,16 +7,16 @@ import urlparse
logger = logging.getLogger('clear_kibana')
logger.setLevel(logging.DEBUG)
-file_handler = logging.FileHandler('/var/log/{}.log'.format(__name__))
+file_handler = logging.FileHandler('/var/log/{}.log'.format('clear_kibana'))
file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s'))
logger.addHandler(file_handler)
-def delete_all(url, es_user, es_passwd):
- ids = shared_utils.get_elastic_data(url, es_user, es_passwd, body=None, field='_id')
+def delete_all(url, es_creds):
+ ids = shared_utils.get_elastic_data(url, es_creds, body=None, field='_id')
for id in ids:
del_url = '/'.join([url, id])
- shared_utils.delete_request(del_url, es_user, es_passwd)
+ shared_utils.delete_request(del_url, es_creds)
if __name__ == '__main__':
@@ -24,20 +24,17 @@ if __name__ == '__main__':
parser.add_argument('-e', '--elasticsearch-url', default='http://localhost:9200',
help='the url of elasticsearch, defaults to http://localhost:9200')
- parser.add_argument('-u', '--elasticsearch-username',
- help='the username for elasticsearch')
-
- parser.add_argument('-p', '--elasticsearch-password',
- help='the password for elasticsearch')
+ parser.add_argument('-u', '--elasticsearch-username', default=None,
+ help='The username with password for elasticsearch in format username:password')
args = parser.parse_args()
base_elastic_url = args.elasticsearch_url
- es_user = args.elasticsearch_username
- es_passwd = args.elasticsearch_password
+ es_creds = args.elasticsearch_username
urls = (urlparse.urljoin(base_elastic_url, '/.kibana/visualization'),
urlparse.urljoin(base_elastic_url, '/.kibana/dashboard'),
urlparse.urljoin(base_elastic_url, '/.kibana/search'))
for url in urls:
- delete_all(url, es_user, es_passwd)
+ delete_all(url, es_creds)
+
diff --git a/utils/test/scripts/mongo_to_elasticsearch.py b/utils/test/scripts/mongo_to_elasticsearch.py
index 8c36d3007..a569ac61c 100644
--- a/utils/test/scripts/mongo_to_elasticsearch.py
+++ b/utils/test/scripts/mongo_to_elasticsearch.py
@@ -11,7 +11,7 @@ import datetime
logger = logging.getLogger('mongo_to_elasticsearch')
logger.setLevel(logging.DEBUG)
-file_handler = logging.FileHandler('/var/log/{}.log'.format(__name__))
+file_handler = logging.FileHandler('/var/log/{}.log'.format('mongo_to_elasticsearch'))
file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s'))
logger.addHandler(file_handler)
@@ -21,7 +21,7 @@ def _get_dicts_from_list(testcase, dict_list, keys):
for dictionary in dict_list:
# iterate over dictionaries in input list
if not isinstance(dictionary, dict):
- logger.info("Skipping non-dict details testcase [{}]".format(testcase))
+ logger.info("Skipping non-dict details testcase '{}'".format(testcase))
continue
if keys == set(dictionary.keys()):
# check the dictionary structure
@@ -143,6 +143,9 @@ def modify_functest_onos(testcase):
"""
testcase_details = testcase['details']
+ if 'FUNCvirNet' not in testcase_details:
+ return modify_default_entry(testcase)
+
funcvirnet_details = testcase_details['FUNCvirNet']['status']
funcvirnet_statuses = _get_dicts_from_list(testcase, funcvirnet_details, {'Case result', 'Case name:'})
@@ -238,6 +241,7 @@ def modify_functest_odl(testcase):
'failures': failed_tests,
'success_percentage': 100 * passed_tests / float(all_tests)
}
+ logger.debug("Modified odl testcase: '{}'".format(json.dumps(testcase, indent=2)))
return True
@@ -296,7 +300,8 @@ def verify_mongo_entry(testcase):
'case_name',
'project_name',
'details']
- mandatory_fields_to_modify = {'creation_date': _fix_date}
+ mandatory_fields_to_modify = {'start_date': _fix_date}
+ fields_to_swap_or_add = {'scenario': 'version'}
if '_id' in testcase:
mongo_id = testcase['_id']
else:
@@ -320,6 +325,15 @@ def verify_mongo_entry(testcase):
else:
testcase[key] = mandatory_fields_to_modify[key](value)
del mandatory_fields_to_modify[key]
+ elif key in fields_to_swap_or_add:
+ if value is None:
+ swapped_key = fields_to_swap_or_add[key]
+ swapped_value = testcase[swapped_key]
+ logger.info("Swapping field '{}' with value None for '{}' with value '{}'.".format(key, swapped_key, swapped_value))
+ testcase[key] = swapped_value
+ del fields_to_swap_or_add[key]
+ else:
+ del fields_to_swap_or_add[key]
elif key in optional_fields:
if value is None:
# empty optional field, remove
@@ -334,7 +348,16 @@ def verify_mongo_entry(testcase):
logger.info("Skipping testcase with mongo _id '{}' because the testcase was missing"
" mandatory field(s) '{}'".format(mongo_id, mandatory_fields))
return False
+ elif len(mandatory_fields_to_modify) > 0:
+ # some mandatory fields are missing
+ logger.info("Skipping testcase with mongo _id '{}' because the testcase was missing"
+ " mandatory field(s) '{}'".format(mongo_id, mandatory_fields_to_modify.keys()))
+ return False
else:
+ if len(fields_to_swap_or_add) > 0:
+ for key, swap_key in fields_to_swap_or_add.iteritems():
+ testcase[key] = testcase[swap_key]
+
return True
@@ -346,16 +369,17 @@ def modify_mongo_entry(testcase):
if verify_mongo_entry(testcase):
project = testcase['project_name']
case_name = testcase['case_name']
+ logger.info("Processing mongo test case '{}'".format(case_name))
if project == 'functest':
- if case_name == 'Rally':
+ if case_name == 'rally_sanity':
return modify_functest_rally(testcase)
- elif case_name == 'ODL':
+ elif case_name.lower() == 'odl':
return modify_functest_odl(testcase)
- elif case_name == 'ONOS':
+ elif case_name.lower() == 'onos':
return modify_functest_onos(testcase)
- elif case_name == 'vIMS':
+ elif case_name.lower() == 'vims':
return modify_functest_vims(testcase)
- elif case_name == 'Tempest':
+ elif case_name == 'tempest_smoke_serial':
return modify_functest_tempest(testcase)
return modify_default_entry(testcase)
else:
@@ -371,7 +395,7 @@ def publish_mongo_data(output_destination):
for mongo_json_line in fobj:
test_result = json.loads(mongo_json_line)
if modify_mongo_entry(test_result):
- shared_utils.publish_json(test_result, es_user, es_passwd, output_destination)
+ shared_utils.publish_json(test_result, es_creds, output_destination)
finally:
if os.path.exists(tmp_filename):
os.remove(tmp_filename)
@@ -380,7 +404,7 @@ def publish_mongo_data(output_destination):
def get_mongo_data(days):
past_time = datetime.datetime.today() - datetime.timedelta(days=days)
mongo_json_lines = subprocess.check_output(['mongoexport', '--db', 'test_results_collection', '-c', 'results',
- '--query', '{{"creation_date":{{$gt:"{}"}}}}'
+ '--query', '{{"start_date":{{$gt:"{}"}}}}'
.format(past_time)]).splitlines()
mongo_data = []
@@ -392,7 +416,7 @@ def get_mongo_data(days):
return mongo_data
-def publish_difference(mongo_data, elastic_data, output_destination, es_user, es_passwd):
+def publish_difference(mongo_data, elastic_data, output_destination, es_creds):
for elastic_entry in elastic_data:
if elastic_entry in mongo_data:
mongo_data.remove(elastic_entry)
@@ -400,7 +424,7 @@ def publish_difference(mongo_data, elastic_data, output_destination, es_user, es
logger.info('number of parsed test results: {}'.format(len(mongo_data)))
for parsed_test_result in mongo_data:
- shared_utils.publish_json(parsed_test_result, es_user, es_passwd, output_destination)
+ shared_utils.publish_json(parsed_test_result, es_creds, output_destination)
if __name__ == '__main__':
@@ -418,42 +442,35 @@ if __name__ == '__main__':
parser.add_argument('-e', '--elasticsearch-url', default='http://localhost:9200',
help='the url of elasticsearch, defaults to http://localhost:9200')
- parser.add_argument('-u', '--elasticsearch-username',
- help='the username for elasticsearch')
-
- parser.add_argument('-p', '--elasticsearch-password',
- help='the password for elasticsearch')
-
- parser.add_argument('-m', '--mongodb-url', default='http://localhost:8082',
- help='the url of mongodb, defaults to http://localhost:8082')
+ parser.add_argument('-u', '--elasticsearch-username', default=None,
+ help='The username with password for elasticsearch in format username:password')
args = parser.parse_args()
- base_elastic_url = urlparse.urljoin(args.elasticsearch_url, '/results/mongo2elastic')
+ base_elastic_url = urlparse.urljoin(args.elasticsearch_url, '/test_results/mongo2elastic')
output_destination = args.output_destination
days = args.merge_latest
- es_user = args.elasticsearch_username
- es_passwd = args.elasticsearch_password
+ es_creds = args.elasticsearch_username
if output_destination == 'elasticsearch':
output_destination = base_elastic_url
# parsed_test_results will be printed/sent to elasticsearch
if days == 0:
- # TODO get everything from mongo
publish_mongo_data(output_destination)
elif days > 0:
body = '''{{
"query" : {{
"range" : {{
- "creation_date" : {{
+ "start_date" : {{
"gte" : "now-{}d"
}}
}}
}}
}}'''.format(days)
- elastic_data = shared_utils.get_elastic_data(base_elastic_url, es_user, es_passwd, body)
+ elastic_data = shared_utils.get_elastic_data(base_elastic_url, es_creds, body)
logger.info('number of hits in elasticsearch for now-{}d: {}'.format(days, len(elastic_data)))
mongo_data = get_mongo_data(days)
- publish_difference(mongo_data, elastic_data, output_destination, es_user, es_passwd)
+ publish_difference(mongo_data, elastic_data, output_destination, es_creds)
else:
raise Exception('Update must be non-negative')
+
diff --git a/utils/test/scripts/shared_utils.py b/utils/test/scripts/shared_utils.py
index 899f844ec..91ce38e45 100644
--- a/utils/test/scripts/shared_utils.py
+++ b/utils/test/scripts/shared_utils.py
@@ -3,17 +3,17 @@ import json
http = urllib3.PoolManager()
-def delete_request(url, username, password, body=None):
- headers = urllib3.util.make_headers(basic_auth=':'.join([username, password]))
+def delete_request(url, creds, body=None):
+ headers = urllib3.make_headers(basic_auth=creds)
http.request('DELETE', url, headers=headers, body=body)
-def publish_json(json_ojb, username, password, output_destination):
+def publish_json(json_ojb, creds, output_destination):
json_dump = json.dumps(json_ojb)
if output_destination == 'stdout':
print json_dump
else:
- headers = urllib3.util.make_headers(basic_auth=':'.join([username, password]))
+ headers = urllib3.make_headers(basic_auth=creds)
http.request('POST', output_destination, headers=headers, body=json_dump)
@@ -21,9 +21,9 @@ def _get_nr_of_hits(elastic_json):
return elastic_json['hits']['total']
-def get_elastic_data(elastic_url, username, password, body, field='_source'):
+def get_elastic_data(elastic_url, creds, body, field='_source'):
# 1. get the number of results
- headers = urllib3.util.make_headers(basic_auth=':'.join([username, password]))
+ headers = urllib3.make_headers(basic_auth=creds)
elastic_json = json.loads(http.request('GET', elastic_url + '/_search?size=0', headers=headers, body=body).data)
nr_of_hits = _get_nr_of_hits(elastic_json)
@@ -34,3 +34,4 @@ def get_elastic_data(elastic_url, username, password, body, field='_source'):
for hit in elastic_json['hits']['hits']:
elastic_data.append(hit[field])
return elastic_data
+