summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--prototypes/bifrost/README.md10
-rw-r--r--prototypes/bifrost/playbooks/test-bifrost-infracloud.yaml17
-rwxr-xr-xprototypes/bifrost/scripts/destroy-env.sh2
-rwxr-xr-xprototypes/bifrost/scripts/test-bifrost-deployment.sh7
-rw-r--r--prototypes/puppet-infracloud/modules/opnfv/manifests/compute.pp8
-rw-r--r--prototypes/puppet-infracloud/modules/opnfv/manifests/controller.pp8
-rwxr-xr-xutils/test/reporting/functest/reporting-status.py2
-rwxr-xr-xutils/test/reporting/functest/reporting-tempest.py4
-rwxr-xr-xutils/test/reporting/functest/reporting-vims.py4
-rw-r--r--utils/test/reporting/functest/reportingConf.py2
-rw-r--r--utils/test/reporting/functest/reportingUtils.py4
-rw-r--r--utils/test/reporting/yardstick/reporting-status.py2
-rw-r--r--utils/test/reporting/yardstick/reportingConf.py2
-rw-r--r--utils/test/scripts/conf_utils.py20
-rw-r--r--utils/test/scripts/create_kibana_dashboards.py7
-rw-r--r--utils/test/scripts/mongo2elastic_format.py179
-rw-r--r--utils/test/scripts/mongo_to_elasticsearch.py284
-rw-r--r--utils/test/scripts/shared_utils.py5
-rw-r--r--utils/test/scripts/testcases.yaml9
19 files changed, 266 insertions, 310 deletions
diff --git a/prototypes/bifrost/README.md b/prototypes/bifrost/README.md
index f50ffb217..4bba0dbc0 100644
--- a/prototypes/bifrost/README.md
+++ b/prototypes/bifrost/README.md
@@ -21,12 +21,14 @@ Please follow that steps:
cp -R /opt/releng/prototypes/bifrost/* /opt/bifrost/
-5. Run destroy script if you need to cleanup previous environment::
+5. If you are on a RHEL/CentOS box, ensure that selinux is disabled
+
+6. Run destroy script if you need to cleanup previous environment::
cd /opt/bifrost
./scripts/destroy-env.sh
-6. Run deployment script to spin up 3 vms with bifrost: jumphost, controller and compute::
+7. Run deployment script to spin up 3 vms with bifrost: jumphost, controller and compute::
cd /opt/bifrost
./scripts/test-bifrost-deployment.sh
@@ -39,10 +41,10 @@ It is likely that the script will show some errors due to timeout. Please ignore
And wait until all the vms are in **active** Provisioning State.
-7. Check the IPs assigned to each of the VMS. You can check it by looking at inventory:
+8. Check the IPs assigned to each of the VMS. You can check it by looking at inventory:
cat /tmp/baremetal.csv
-8. You can enter into the vms with devuser login/pass:
+9. You can enter into the vms with devuser login/pass:
ssh devuser@192.168.122.2
diff --git a/prototypes/bifrost/playbooks/test-bifrost-infracloud.yaml b/prototypes/bifrost/playbooks/test-bifrost-infracloud.yaml
index ba548b305..b4dffdccf 100644
--- a/prototypes/bifrost/playbooks/test-bifrost-infracloud.yaml
+++ b/prototypes/bifrost/playbooks/test-bifrost-infracloud.yaml
@@ -41,8 +41,21 @@
# NOTE(TheJulia): While the next step creates a ramdisk, some elements
# do not support ramdisk-image-create as they invoke steps to cleanup
# the ramdisk which causes ramdisk-image-create to believe it failed.
- - { role: bifrost-create-dib-image, dib_imagename: "{{ http_boot_folder }}/ipa", build_ramdisk: false, dib_os_element: "{{ ipa_dib_os_element|default('debian') }}", dib_os_release: "jessie", dib_elements: "ironic-agent {{ ipa_extra_dib_elements | default('') }}", when: create_ipa_image | bool == true }
- - { role: bifrost-create-dib-image, dib_imagetype: "qcow2", dib_imagename: "{{deploy_image}}", dib_os_element: "ubuntu-minimal", dib_os_release: "trusty", dib_elements: "vm serial-console simple-init devuser infra-cloud-bridge puppet growroot {{ extra_dib_elements|default('') }}", dib_packages: "openssh-server,vlan,vim,less,bridge-utils,language-pack-en,iputils-ping,rsyslog,curl", when: create_image_via_dib | bool == true and transform_boot_image | bool == false }
+ - role: bifrost-create-dib-image
+ dib_imagename: "{{ http_boot_folder }}/ipa"
+ build_ramdisk: false
+ dib_os_element: "{{ ipa_dib_os_element|default('debian') }}"
+ dib_os_release: "jessie"
+ dib_elements: "ironic-agent {{ ipa_extra_dib_elements | default('') }}"
+ when: create_ipa_image | bool == true
+ - role: bifrost-create-dib-image
+ dib_imagetype: "qcow2"
+ dib_imagename: "{{deploy_image}}"
+ dib_os_element: "{{ lookup('env','DIB_OS_ELEMENT') }}"
+ dib_os_release: "{{ lookup('env', 'DIB_OS_RELEASE') }}"
+ dib_elements: "vm serial-console simple-init devuser infra-cloud-bridge puppet growroot {{ extra_dib_elements|default('') }}"
+ dib_packages: "{{ lookup('env', 'DIB_OS_PACKAGES') }}"
+ when: create_image_via_dib | bool == true and transform_boot_image | bool == false
environment:
http_proxy: "{{ lookup('env','http_proxy') }}"
https_proxy: "{{ lookup('env','https_proxy') }}"
diff --git a/prototypes/bifrost/scripts/destroy-env.sh b/prototypes/bifrost/scripts/destroy-env.sh
index 4dffee62a..f092a658a 100755
--- a/prototypes/bifrost/scripts/destroy-env.sh
+++ b/prototypes/bifrost/scripts/destroy-env.sh
@@ -27,6 +27,8 @@ echo "removing logs"
rm -rf /var/log/libvirt/baremetal_logs/*.log
# clean up dib images only if requested explicitly
+CLEAN_DIB_IMAGES=${CLEAN_DIB_IMAGES:-false}
+
if [ $CLEAN_DIB_IMAGES = "true" ]; then
rm -rf /httpboot/*
rm -rf /tftpboot/*
diff --git a/prototypes/bifrost/scripts/test-bifrost-deployment.sh b/prototypes/bifrost/scripts/test-bifrost-deployment.sh
index d796f3509..fb49afc42 100755
--- a/prototypes/bifrost/scripts/test-bifrost-deployment.sh
+++ b/prototypes/bifrost/scripts/test-bifrost-deployment.sh
@@ -57,6 +57,13 @@ export ELEMENTS_PATH=/usr/share/diskimage-builder/elements:/opt/puppet-infraclou
export DIB_DEV_USER_PWDLESS_SUDO=yes
export DIB_DEV_USER_PASSWORD=devuser
+# settings for distro: trusty/ubuntu-minimal, 7/centos-minimal
+export DIB_OS_RELEASE=${DIB_OS_RELEASE:-trusty}
+export DIB_OS_ELEMENT=${DIB_OS_ELEMENT:-ubuntu-minimal}
+
+# for centos 7: "openssh-server,vim,less,bridge-utils,iputils,rsyslog,curl"
+export DIB_OS_PACKAGES=${DIB_OS_PACKAGES:-"openssh-server,vlan,vim,less,bridge-utils,language-pack-en,iputils-ping,rsyslog,curl"}
+
# Source Ansible
# NOTE(TheJulia): Ansible stable-1.9 source method tosses an error deep
# under the hood which -x will detect, so for this step, we need to suspend
diff --git a/prototypes/puppet-infracloud/modules/opnfv/manifests/compute.pp b/prototypes/puppet-infracloud/modules/opnfv/manifests/compute.pp
index 77908c0b8..ca548a5d5 100644
--- a/prototypes/puppet-infracloud/modules/opnfv/manifests/compute.pp
+++ b/prototypes/puppet-infracloud/modules/opnfv/manifests/compute.pp
@@ -8,14 +8,6 @@ class opnfv::compute (
$controller_public_address,
$virt_type = 'kvm',
) {
- # disable selinux if needed
- if $::osfamily == 'RedHat' {
- class { 'selinux':
- mode => 'permissive',
- before => Class['::infracloud::compute'],
- }
- }
-
class { '::infracloud::compute':
nova_rabbit_password => $nova_rabbit_password,
neutron_rabbit_password => $neutron_rabbit_password,
diff --git a/prototypes/puppet-infracloud/modules/opnfv/manifests/controller.pp b/prototypes/puppet-infracloud/modules/opnfv/manifests/controller.pp
index 4bae42cf7..7522692c1 100644
--- a/prototypes/puppet-infracloud/modules/opnfv/manifests/controller.pp
+++ b/prototypes/puppet-infracloud/modules/opnfv/manifests/controller.pp
@@ -30,14 +30,6 @@ class opnfv::controller (
$opnfv_password,
$opnfv_email = 'opnfvuser@gmail.com',
) {
- # disable selinux if needed
- if $::osfamily == 'RedHat' {
- class { 'selinux':
- mode => 'permissive',
- before => Class['::infracloud::controller'],
- }
- }
-
class { '::infracloud::controller':
keystone_rabbit_password => $keystone_rabbit_password,
neutron_rabbit_password => $neutron_rabbit_password,
diff --git a/utils/test/reporting/functest/reporting-status.py b/utils/test/reporting/functest/reporting-status.py
index 7c943d8b3..e9e167d16 100755
--- a/utils/test/reporting/functest/reporting-status.py
+++ b/utils/test/reporting/functest/reporting-status.py
@@ -195,7 +195,7 @@ for version in conf.versions:
logger.info("--------------------------")
templateLoader = jinja2.FileSystemLoader(conf.REPORTING_PATH)
- templateEnv = jinja2.Environment(loader=templateLoader)
+ templateEnv = jinja2.Environment(loader=templateLoader, autoescape=True)
TEMPLATE_FILE = "/template/index-status-tmpl.html"
template = templateEnv.get_template(TEMPLATE_FILE)
diff --git a/utils/test/reporting/functest/reporting-tempest.py b/utils/test/reporting/functest/reporting-tempest.py
index 0dc1dd343..363f123cf 100755
--- a/utils/test/reporting/functest/reporting-tempest.py
+++ b/utils/test/reporting/functest/reporting-tempest.py
@@ -28,7 +28,7 @@ logger.info("success rate > %s " % criteria_success_rate)
for version in conf.versions:
for installer in conf.installers:
# we consider the Tempest results of the last PERIOD days
- url = conf.URL_BASE + "?case=tempest_smoke_serial"
+ url = 'http://' + conf.URL_BASE + "?case=tempest_smoke_serial"
request = Request(url + '&period=' + str(PERIOD) +
'&installer=' + installer +
'&version=' + version)
@@ -116,7 +116,7 @@ for version in conf.versions:
logger.error("Error field not present (Brahamputra runs?)")
templateLoader = jinja2.FileSystemLoader(conf.REPORTING_PATH)
- templateEnv = jinja2.Environment(loader=templateLoader)
+ templateEnv = jinja2.Environment(loader=templateLoader, autoescape=True)
TEMPLATE_FILE = "/template/index-tempest-tmpl.html"
template = templateEnv.get_template(TEMPLATE_FILE)
diff --git a/utils/test/reporting/functest/reporting-vims.py b/utils/test/reporting/functest/reporting-vims.py
index a83d92f0a..430a5453c 100755
--- a/utils/test/reporting/functest/reporting-vims.py
+++ b/utils/test/reporting/functest/reporting-vims.py
@@ -39,7 +39,7 @@ for version in conf.versions:
for installer in installers:
logger.info("Search vIMS results for installer: %s, version: %s"
% (installer, version))
- request = Request(conf.URL_BASE + '?case=vims&installer=' +
+ request = Request("http://" + conf.URL_BASE + '?case=vims&installer=' +
installer + '&version=' + version)
try:
@@ -102,7 +102,7 @@ for version in conf.versions:
logger.debug("----------------------------------------")
templateLoader = jinja2.FileSystemLoader(conf.REPORTING_PATH)
- templateEnv = jinja2.Environment(loader=templateLoader)
+ templateEnv = jinja2.Environment(loader=templateLoader, autoescape=True)
TEMPLATE_FILE = "/template/index-vims-tmpl.html"
template = templateEnv.get_template(TEMPLATE_FILE)
diff --git a/utils/test/reporting/functest/reportingConf.py b/utils/test/reporting/functest/reportingConf.py
index 9230cb286..b0e4cf7a1 100644
--- a/utils/test/reporting/functest/reportingConf.py
+++ b/utils/test/reporting/functest/reportingConf.py
@@ -21,7 +21,7 @@ MAX_SCENARIO_CRITERIA = 50
NB_TESTS = 5
# REPORTING_PATH = "/usr/share/nginx/html/reporting/functest"
REPORTING_PATH = "."
-URL_BASE = 'http://testresults.opnfv.org/test/api/v1/results'
+URL_BASE = 'testresults.opnfv.org/test/api/v1/results'
TEST_CONF = "https://git.opnfv.org/cgit/functest/plain/ci/testcases.yaml"
LOG_LEVEL = "ERROR"
LOG_FILE = REPORTING_PATH + "/reporting.log"
diff --git a/utils/test/reporting/functest/reportingUtils.py b/utils/test/reporting/functest/reportingUtils.py
index 5051ffa95..f02620430 100644
--- a/utils/test/reporting/functest/reportingUtils.py
+++ b/utils/test/reporting/functest/reportingUtils.py
@@ -37,7 +37,7 @@ def getApiResults(case, installer, scenario, version):
# urllib2.install_opener(opener)
# url = "http://127.0.0.1:8000/results?case=" + case + \
# "&period=30&installer=" + installer
- url = (conf.URL_BASE + "?case=" + case +
+ url = ("http://" + conf.URL_BASE + "?case=" + case +
"&period=" + str(conf.PERIOD) + "&installer=" + installer +
"&scenario=" + scenario + "&version=" + version +
"&last=" + str(conf.NB_TESTS))
@@ -56,7 +56,7 @@ def getApiResults(case, installer, scenario, version):
def getScenarios(case, installer, version):
case = case.getName()
- url = (conf.URL_BASE + "?case=" + case +
+ url = ("http://" + conf.URL_BASE + "?case=" + case +
"&period=" + str(conf.PERIOD) + "&installer=" + installer +
"&version=" + version)
request = Request(url)
diff --git a/utils/test/reporting/yardstick/reporting-status.py b/utils/test/reporting/yardstick/reporting-status.py
index ed5dab044..546bf08c4 100644
--- a/utils/test/reporting/yardstick/reporting-status.py
+++ b/utils/test/reporting/yardstick/reporting-status.py
@@ -59,7 +59,7 @@ for version in conf.versions:
logger.info("--------------------------")
templateLoader = jinja2.FileSystemLoader(conf.REPORTING_PATH)
- templateEnv = jinja2.Environment(loader=templateLoader)
+ templateEnv = jinja2.Environment(loader=templateLoader, autoescape=True)
TEMPLATE_FILE = "/template/index-status-tmpl.html"
template = templateEnv.get_template(TEMPLATE_FILE)
diff --git a/utils/test/reporting/yardstick/reportingConf.py b/utils/test/reporting/yardstick/reportingConf.py
index 9e34034e2..447b428a8 100644
--- a/utils/test/reporting/yardstick/reportingConf.py
+++ b/utils/test/reporting/yardstick/reportingConf.py
@@ -11,7 +11,7 @@
# ****************************************************
installers = ["apex", "compass", "fuel", "joid"]
-versions = ["master", "stable/colorado"]
+versions = ["master", "colorado"]
# get data in the past 7 days
PERIOD = 7
diff --git a/utils/test/scripts/conf_utils.py b/utils/test/scripts/conf_utils.py
new file mode 100644
index 000000000..e35d5ed10
--- /dev/null
+++ b/utils/test/scripts/conf_utils.py
@@ -0,0 +1,20 @@
+import yaml
+
+
+with open('./testcases.yaml') as f:
+ testcases_yaml = yaml.safe_load(f)
+f.close()
+
+
+def get_format(project, case):
+ testcases = testcases_yaml.get(project)
+ if isinstance(testcases, list):
+ for case_dict in testcases:
+ if case_dict['name'] == case:
+ return 'format_' + case_dict['format'].strip()
+ return None
+
+
+if __name__ == '__main__':
+ fmt = get_format('functest', 'vping_ssh')
+ print fmt \ No newline at end of file
diff --git a/utils/test/scripts/create_kibana_dashboards.py b/utils/test/scripts/create_kibana_dashboards.py
index abb9471ac..5897a7e79 100644
--- a/utils/test/scripts/create_kibana_dashboards.py
+++ b/utils/test/scripts/create_kibana_dashboards.py
@@ -4,8 +4,8 @@ import logging
import urlparse
import argparse
-import yaml
+import conf_utils
import shared_utils
logger = logging.getLogger('create_kibana_dashboards')
@@ -307,10 +307,7 @@ def construct_dashboards():
:return: list of KibanaDashboards
"""
kibana_dashboards = []
- with open('./testcases.yaml') as f:
- testcases_yaml = yaml.safe_load(f)
-
- for project, case_dicts in testcases_yaml.items():
+ for project, case_dicts in conf_utils.testcases_yaml.items():
for case in case_dicts:
case_name = case.get('name')
visualizations = case.get('visualizations')
diff --git a/utils/test/scripts/mongo2elastic_format.py b/utils/test/scripts/mongo2elastic_format.py
new file mode 100644
index 000000000..0b036e3ff
--- /dev/null
+++ b/utils/test/scripts/mongo2elastic_format.py
@@ -0,0 +1,179 @@
+#! /usr/bin/env python
+
+
+def _convert_value(value):
+ return value if value != '' else 0
+
+
+def _convert_duration(duration):
+ if (isinstance(duration, str) or isinstance(duration, unicode)) and ':' in duration:
+ hours, minutes, seconds = duration.split(":")
+ hours = _convert_value(hours)
+ minutes = _convert_value(minutes)
+ seconds = _convert_value(seconds)
+ int_duration = 3600 * int(hours) + 60 * int(minutes) + float(seconds)
+ else:
+ int_duration = duration
+ return int_duration
+
+
+def format_normal(testcase):
+ """
+ Look for these and leave any of those:
+ details.duration
+ details.tests
+ details.failures
+
+ If none are present, then return False
+ """
+ found = False
+ testcase_details = testcase['details']
+ fields = ['duration', 'tests', 'failures']
+ if isinstance(testcase_details, dict):
+ for key, value in testcase_details.items():
+ if key in fields:
+ found = True
+ if key == 'duration':
+ testcase_details[key] = _convert_duration(value)
+ else:
+ del testcase_details[key]
+
+ if 'tests' in testcase_details and 'failures' in testcase_details:
+ testcase_tests = float(testcase_details['tests'])
+ testcase_failures = float(testcase_details['failures'])
+ if testcase_tests != 0:
+ testcase_details['success_percentage'] = 100 * (testcase_tests - testcase_failures) / testcase_tests
+ else:
+ testcase_details['success_percentage'] = 0
+
+
+ return found
+
+
+def format_rally(testcase):
+ """
+ Structure:
+ details.[{summary.duration}]
+ details.[{summary.nb success}]
+ details.[{summary.nb tests}]
+
+ Find data for these fields
+ -> details.duration
+ -> details.tests
+ -> details.success_percentage
+ """
+ summary = testcase['details']['summary']
+
+ testcase['details'] = {
+ 'duration': summary['duration'],
+ 'tests': summary['nb tests'],
+ 'success_percentage': summary['nb success']
+ }
+ return True
+
+
+def _get_statistics(orig_data, stat_fields, stat_values=None):
+ test_results = {}
+ for stat_data in orig_data:
+ for field in stat_fields:
+ stat_value = stat_data[field]
+ if stat_value in test_results:
+ test_results[stat_value] += 1
+ else:
+ test_results[stat_value] = 1
+
+ if stat_values is not None:
+ for stat_value in stat_values:
+ if stat_value not in test_results:
+ test_results[stat_value] = 0
+
+ return test_results
+
+
+def format_onos(testcase):
+ """
+ Structure:
+ details.FUNCvirNet.duration
+ details.FUNCvirNet.status.[{Case result}]
+ details.FUNCvirNetL3.duration
+ details.FUNCvirNetL3.status.[{Case result}]
+
+ Find data for these fields
+ -> details.FUNCvirNet.duration
+ -> details.FUNCvirNet.tests
+ -> details.FUNCvirNet.failures
+ -> details.FUNCvirNetL3.duration
+ -> details.FUNCvirNetL3.tests
+ -> details.FUNCvirNetL3.failures
+ """
+ testcase_details = testcase['details']
+
+ if 'FUNCvirNet' not in testcase_details or 'FUNCvirNetL3' not in testcase_details:
+ return False
+
+ funcvirnet_details = testcase_details['FUNCvirNet']['status']
+ funcvirnet_stats = _get_statistics(funcvirnet_details, ('Case result',), ('PASS', 'FAIL'))
+ funcvirnet_passed = funcvirnet_stats['PASS']
+ funcvirnet_failed = funcvirnet_stats['FAIL']
+ funcvirnet_all = funcvirnet_passed + funcvirnet_failed
+
+ funcvirnetl3_details = testcase_details['FUNCvirNetL3']['status']
+ funcvirnetl3_stats = _get_statistics(funcvirnetl3_details, ('Case result',), ('PASS', 'FAIL'))
+ funcvirnetl3_passed = funcvirnetl3_stats['PASS']
+ funcvirnetl3_failed = funcvirnetl3_stats['FAIL']
+ funcvirnetl3_all = funcvirnetl3_passed + funcvirnetl3_failed
+
+ testcase_details['FUNCvirNet'] = {
+ 'duration': _convert_duration(testcase_details['FUNCvirNet']['duration']),
+ 'tests': funcvirnet_all,
+ 'failures': funcvirnet_failed
+ }
+ testcase_details['FUNCvirNetL3'] = {
+ 'duration': _convert_duration(testcase_details['FUNCvirNetL3']['duration']),
+ 'tests': funcvirnetl3_all,
+ 'failures': funcvirnetl3_failed
+ }
+ return True
+
+
+def format_vims(testcase):
+ """
+ Structure:
+ details.sig_test.result.[{result}]
+ details.sig_test.duration
+ details.vIMS.duration
+ details.orchestrator.duration
+
+ Find data for these fields
+ -> details.sig_test.duration
+ -> details.sig_test.tests
+ -> details.sig_test.failures
+ -> details.sig_test.passed
+ -> details.sig_test.skipped
+ -> details.vIMS.duration
+ -> details.orchestrator.duration
+ """
+ testcase_details = testcase['details']
+ test_results = _get_statistics(testcase_details['sig_test']['result'],
+ ('result',),
+ ('Passed', 'Skipped', 'Failed'))
+ passed = test_results['Passed']
+ skipped = test_results['Skipped']
+ failures = test_results['Failed']
+ all_tests = passed + skipped + failures
+ testcase['details'] = {
+ 'sig_test': {
+ 'duration': testcase_details['sig_test']['duration'],
+ 'tests': all_tests,
+ 'failures': failures,
+ 'passed': passed,
+ 'skipped': skipped
+ },
+ 'vIMS': {
+ 'duration': testcase_details['vIMS']['duration']
+ },
+ 'orchestrator': {
+ 'duration': testcase_details['orchestrator']['duration']
+ }
+ }
+ return True
diff --git a/utils/test/scripts/mongo_to_elasticsearch.py b/utils/test/scripts/mongo_to_elasticsearch.py
index ded58ef4c..6799574f5 100644
--- a/utils/test/scripts/mongo_to_elasticsearch.py
+++ b/utils/test/scripts/mongo_to_elasticsearch.py
@@ -10,7 +10,9 @@ import uuid
import argparse
+import conf_utils
import shared_utils
+import mongo2elastic_format
logger = logging.getLogger('mongo_to_elasticsearch')
logger.setLevel(logging.DEBUG)
@@ -19,259 +21,6 @@ file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(messag
logger.addHandler(file_handler)
-def _get_dicts_from_list(testcase, dict_list, keys):
- dicts = []
- for dictionary in dict_list:
- # iterate over dictionaries in input list
- if not isinstance(dictionary, dict):
- logger.info("Skipping non-dict details testcase '{}'".format(testcase))
- continue
- if keys == set(dictionary.keys()):
- # check the dictionary structure
- dicts.append(dictionary)
- return dicts
-
-
-def _get_results_from_list_of_dicts(list_of_dict_statuses, dict_indexes, expected_results=None):
- test_results = {}
- for test_status in list_of_dict_statuses:
- status = test_status
- for index in dict_indexes:
- status = status[index]
- if status in test_results:
- test_results[status] += 1
- else:
- test_results[status] = 1
-
- if expected_results is not None:
- for expected_result in expected_results:
- if expected_result not in test_results:
- test_results[expected_result] = 0
-
- return test_results
-
-
-def _convert_value(value):
- return value if value != '' else 0
-
-
-def _convert_duration(duration):
- if (isinstance(duration, str) or isinstance(duration, unicode)) and ':' in duration:
- hours, minutes, seconds = duration.split(":")
- hours = _convert_value(hours)
- minutes = _convert_value(minutes)
- seconds = _convert_value(seconds)
- int_duration = 3600 * int(hours) + 60 * int(minutes) + float(seconds)
- else:
- int_duration = duration
- return int_duration
-
-
-def modify_functest_tempest(testcase):
- if modify_default_entry(testcase):
- testcase_details = testcase['details']
- testcase_tests = float(testcase_details['tests'])
- testcase_failures = float(testcase_details['failures'])
- if testcase_tests != 0:
- testcase_details['success_percentage'] = 100 * (testcase_tests - testcase_failures) / testcase_tests
- else:
- testcase_details['success_percentage'] = 0
- return True
- else:
- return False
-
-
-def modify_functest_vims(testcase):
- """
- Structure:
- details.sig_test.result.[{result}]
- details.sig_test.duration
- details.vIMS.duration
- details.orchestrator.duration
-
- Find data for these fields
- -> details.sig_test.duration
- -> details.sig_test.tests
- -> details.sig_test.failures
- -> details.sig_test.passed
- -> details.sig_test.skipped
- -> details.vIMS.duration
- -> details.orchestrator.duration
- """
- testcase_details = testcase['details']
- sig_test_results = _get_dicts_from_list(testcase, testcase_details['sig_test']['result'],
- {'duration', 'result', 'name', 'error'})
- if len(sig_test_results) < 1:
- logger.info("No 'result' from 'sig_test' found in vIMS details, skipping")
- return False
- else:
- test_results = _get_results_from_list_of_dicts(sig_test_results, ('result',), ('Passed', 'Skipped', 'Failed'))
- passed = test_results['Passed']
- skipped = test_results['Skipped']
- failures = test_results['Failed']
- all_tests = passed + skipped + failures
- testcase['details'] = {
- 'sig_test': {
- 'duration': testcase_details['sig_test']['duration'],
- 'tests': all_tests,
- 'failures': failures,
- 'passed': passed,
- 'skipped': skipped
- },
- 'vIMS': {
- 'duration': testcase_details['vIMS']['duration']
- },
- 'orchestrator': {
- 'duration': testcase_details['orchestrator']['duration']
- }
- }
- return True
-
-
-def modify_functest_onos(testcase):
- """
- Structure:
- details.FUNCvirNet.duration
- details.FUNCvirNet.status.[{Case result}]
- details.FUNCvirNetL3.duration
- details.FUNCvirNetL3.status.[{Case result}]
-
- Find data for these fields
- -> details.FUNCvirNet.duration
- -> details.FUNCvirNet.tests
- -> details.FUNCvirNet.failures
- -> details.FUNCvirNetL3.duration
- -> details.FUNCvirNetL3.tests
- -> details.FUNCvirNetL3.failures
- """
- testcase_details = testcase['details']
-
- if 'FUNCvirNet' not in testcase_details:
- return modify_default_entry(testcase)
-
- funcvirnet_details = testcase_details['FUNCvirNet']['status']
- funcvirnet_statuses = _get_dicts_from_list(testcase, funcvirnet_details, {'Case result', 'Case name:'})
-
- funcvirnetl3_details = testcase_details['FUNCvirNetL3']['status']
- funcvirnetl3_statuses = _get_dicts_from_list(testcase, funcvirnetl3_details, {'Case result', 'Case name:'})
-
- if len(funcvirnet_statuses) < 0:
- logger.info("No results found in 'FUNCvirNet' part of ONOS results")
- return False
- elif len(funcvirnetl3_statuses) < 0:
- logger.info("No results found in 'FUNCvirNetL3' part of ONOS results")
- return False
- else:
- funcvirnet_results = _get_results_from_list_of_dicts(funcvirnet_statuses,
- ('Case result',), ('PASS', 'FAIL'))
- funcvirnetl3_results = _get_results_from_list_of_dicts(funcvirnetl3_statuses,
- ('Case result',), ('PASS', 'FAIL'))
-
- funcvirnet_passed = funcvirnet_results['PASS']
- funcvirnet_failed = funcvirnet_results['FAIL']
- funcvirnet_all = funcvirnet_passed + funcvirnet_failed
-
- funcvirnetl3_passed = funcvirnetl3_results['PASS']
- funcvirnetl3_failed = funcvirnetl3_results['FAIL']
- funcvirnetl3_all = funcvirnetl3_passed + funcvirnetl3_failed
-
- testcase_details['FUNCvirNet'] = {
- 'duration': _convert_duration(testcase_details['FUNCvirNet']['duration']),
- 'tests': funcvirnet_all,
- 'failures': funcvirnet_failed
- }
-
- testcase_details['FUNCvirNetL3'] = {
- 'duration': _convert_duration(testcase_details['FUNCvirNetL3']['duration']),
- 'tests': funcvirnetl3_all,
- 'failures': funcvirnetl3_failed
- }
-
- return True
-
-
-def modify_functest_rally(testcase):
- """
- Structure:
- details.[{summary.duration}]
- details.[{summary.nb success}]
- details.[{summary.nb tests}]
-
- Find data for these fields
- -> details.duration
- -> details.tests
- -> details.success_percentage
- """
- summaries = _get_dicts_from_list(testcase, testcase['details'], {'summary'})
-
- if len(summaries) != 1:
- logger.info("Found zero or more than one 'summaries' in Rally details, skipping")
- return False
- else:
- summary = summaries[0]['summary']
- testcase['details'] = {
- 'duration': summary['duration'],
- 'tests': summary['nb tests'],
- 'success_percentage': summary['nb success']
- }
- return True
-
-
-def modify_functest_odl(testcase):
- """
- Structure:
- details.details.[{test_status.@status}]
-
- Find data for these fields
- -> details.tests
- -> details.failures
- -> details.success_percentage?
- """
- test_statuses = _get_dicts_from_list(testcase, testcase['details']['details'],
- {'test_status', 'test_doc', 'test_name'})
- if len(test_statuses) < 1:
- logger.info("No 'test_status' found in ODL details, skipping")
- return False
- else:
- test_results = _get_results_from_list_of_dicts(test_statuses, ('test_status', '@status'), ('PASS', 'FAIL'))
-
- passed_tests = test_results['PASS']
- failed_tests = test_results['FAIL']
- all_tests = passed_tests + failed_tests
-
- testcase['details'] = {
- 'tests': all_tests,
- 'failures': failed_tests,
- 'success_percentage': 100 * passed_tests / float(all_tests)
- }
- logger.debug("Modified odl testcase: '{}'".format(json.dumps(testcase, indent=2)))
- return True
-
-
-def modify_default_entry(testcase):
- """
- Look for these and leave any of those:
- details.duration
- details.tests
- details.failures
-
- If none are present, then return False
- """
- found = False
- testcase_details = testcase['details']
- fields = ['duration', 'tests', 'failures']
- if isinstance(testcase_details, dict):
- for key, value in testcase_details.items():
- if key in fields:
- found = True
- if key == 'duration':
- testcase_details[key] = _convert_duration(value)
- else:
- del testcase_details[key]
-
- return found
-
-
def _fix_date(date_string):
if isinstance(date_string, dict):
return date_string['$date']
@@ -372,22 +121,13 @@ def modify_mongo_entry(testcase):
if verify_mongo_entry(testcase):
project = testcase['project_name']
case_name = testcase['case_name']
- logger.info("Processing mongo test case '{}'".format(case_name))
- try:
- if project == 'functest':
- if case_name == 'rally_sanity':
- return modify_functest_rally(testcase)
- elif case_name.lower() == 'odl':
- return modify_functest_odl(testcase)
- elif case_name.lower() == 'onos':
- return modify_functest_onos(testcase)
- elif case_name.lower() == 'vims':
- return modify_functest_vims(testcase)
- elif case_name == 'tempest_smoke_serial':
- return modify_functest_tempest(testcase)
- return modify_default_entry(testcase)
- except Exception:
- logger.error("Fail in modify testcase[%s]\nerror message: %s" % (testcase, traceback.format_exc()))
+ fmt = conf_utils.get_format(project, case_name)
+ if fmt:
+ try:
+ logger.info("Processing %s/%s using format %s" % (project, case_name, fmt))
+ return vars(mongo2elastic_format)[fmt](testcase)
+ except Exception:
+ logger.error("Fail in modify testcase[%s]\nerror message: %s" % (testcase, traceback.format_exc()))
else:
return False
@@ -395,8 +135,10 @@ def modify_mongo_entry(testcase):
def publish_mongo_data(output_destination):
tmp_filename = 'mongo-{}.log'.format(uuid.uuid4())
try:
- subprocess.check_call(['mongoexport', '--db', 'test_results_collection', '-c', 'results', '--out',
- tmp_filename])
+ subprocess.check_call(['mongoexport',
+ '--db', 'test_results_collection',
+ '-c', 'results',
+ '--out', tmp_filename])
with open(tmp_filename) as fobj:
for mongo_json_line in fobj:
test_result = json.loads(mongo_json_line)
diff --git a/utils/test/scripts/shared_utils.py b/utils/test/scripts/shared_utils.py
index 8bbbdbe07..15c1af8a5 100644
--- a/utils/test/scripts/shared_utils.py
+++ b/utils/test/scripts/shared_utils.py
@@ -1,5 +1,7 @@
-import urllib3
import json
+
+import urllib3
+
http = urllib3.PoolManager()
@@ -36,4 +38,3 @@ def get_elastic_data(elastic_url, creds, body, field='_source'):
for hit in elastic_json['hits']['hits']:
elastic_data.append(hit[field])
return elastic_data
-
diff --git a/utils/test/scripts/testcases.yaml b/utils/test/scripts/testcases.yaml
index 12031ef5d..9c33d2e6b 100644
--- a/utils/test/scripts/testcases.yaml
+++ b/utils/test/scripts/testcases.yaml
@@ -1,6 +1,7 @@
functest:
-
name: tempest_smoke_serial
+ format: normal
test_family: VIM
visualizations:
-
@@ -19,6 +20,7 @@ functest:
-
name: rally_sanity
test_family: VIM
+ format: rally
visualizations:
-
name: duration
@@ -34,6 +36,7 @@ functest:
- field: details.success_percentage
-
name: vping_ssh
+ format: normal
test_family: VIM
visualizations:
-
@@ -42,6 +45,7 @@ functest:
- field: details.duration
-
name: vping_userdata
+ format: normal
test_family: VIM
visualizations:
-
@@ -51,6 +55,7 @@ functest:
-
name: odl
test_family: Controller
+ format: odl
visualizations:
-
name: tests_failures
@@ -63,6 +68,7 @@ functest:
- field: details.success_percentage
-
name: onos
+ format: onos
test_family: Controller
visualizations:
-
@@ -89,6 +95,7 @@ functest:
- field: details.FUNCvirNetL3.failures
-
name: vims
+ format: vims
test_family: Features
visualizations:
-
@@ -107,6 +114,7 @@ functest:
promise:
-
name: promise
+ format: normal
test_family: Features
visualizations:
-
@@ -122,6 +130,7 @@ doctor:
-
name: doctor-notification
test_family: Features
+ format: normal
visualizations:
-
name: duration