aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xfunctest/ci/prepare_env.py71
-rwxr-xr-xfunctest/ci/run_tests.py78
-rwxr-xr-xfunctest/ci/testcases.yaml27
-rw-r--r--functest/core/testcase_base.py47
-rwxr-xr-xfunctest/opnfv_tests/features/multisite.py22
-rw-r--r--functest/opnfv_tests/openstack/rally/rally.py9
-rw-r--r--functest/opnfv_tests/openstack/tempest/conf_utils.py25
-rw-r--r--functest/opnfv_tests/openstack/tempest/tempest.py67
-rw-r--r--functest/opnfv_tests/openstack/vping/vping_base.py2
-rwxr-xr-xfunctest/opnfv_tests/sdn/odl/odl.py2
-rw-r--r--[-rwxr-xr-x]functest/tests/unit/core/test_testcase_base.py8
-rw-r--r--functest/tests/unit/utils/test_openstack_utils.py3
-rw-r--r--functest/utils/decorators.py36
-rwxr-xr-xfunctest/utils/functest_logger.py58
-rw-r--r--functest/utils/functest_utils.py36
-rwxr-xr-xfunctest/utils/openstack_utils.py7
-rwxr-xr-xrun_unit_tests.sh9
17 files changed, 235 insertions, 272 deletions
diff --git a/functest/ci/prepare_env.py b/functest/ci/prepare_env.py
index cca9ac73..6b24fe08 100755
--- a/functest/ci/prepare_env.py
+++ b/functest/ci/prepare_env.py
@@ -47,7 +47,8 @@ class PrepareEnvParser():
def __init__(self):
self.parser = argparse.ArgumentParser()
self.parser.add_argument("action", help="Possible actions are: "
- "'{d[0]}|{d[1]}' ".format(d=actions))
+ "'{d[0]}|{d[1]}' ".format(d=actions),
+ choices=actions)
self.parser.add_argument("-d", "--debug", help="Debug mode",
action="store_true")
@@ -140,14 +141,14 @@ def source_rc_file():
if CONST.INSTALLER_IP is None:
logger.error("The env variable CI_INSTALLER_IP must be provided in"
" order to fetch the credentials from the installer.")
- sys.exit("Missing CI_INSTALLER_IP.")
+ raise Exception("Missing CI_INSTALLER_IP.")
if CONST.INSTALLER_TYPE not in opnfv_constants.INSTALLERS:
logger.error("Cannot fetch credentials. INSTALLER_TYPE=%s is "
"not a valid OPNFV installer. Available "
"installers are : %s." %
(CONST.INSTALLER_TYPE,
opnfv_constants.INSTALLERS))
- sys.exit("Wrong INSTALLER_TYPE.")
+ raise Exception("Wrong INSTALLER_TYPE.")
cmd = ("/home/opnfv/repos/releng/utils/fetch_os_creds.sh "
"-d %s -i %s -a %s"
@@ -159,15 +160,12 @@ def source_rc_file():
output = p.communicate()[0]
logger.debug("\n%s" % output)
if p.returncode != 0:
- logger.error("Failed to fetch credentials from installer.")
- sys.exit(1)
+ raise Exception("Failed to fetch credentials from installer.")
else:
logger.info("RC file provided in %s."
% CONST.openstack_creds)
if os.path.getsize(CONST.openstack_creds) == 0:
- logger.error("The file %s is empty."
- % CONST.openstack_creds)
- sys.exit(1)
+ raise Exception("The file %s is empty." % CONST.openstack_creds)
logger.info("Sourcing the OpenStack RC file...")
os_utils.source_credentials(
@@ -211,7 +209,7 @@ def verify_deployment():
line = p.stdout.readline().rstrip()
if "ERROR" in line:
logger.error(line)
- sys.exit("Problem while running 'check_os.sh'.")
+ raise Exception("Problem while running 'check_os.sh'.")
logger.info(line)
@@ -270,46 +268,43 @@ def create_flavor():
def check_environment():
msg_not_active = "The Functest environment is not installed."
if not os.path.isfile(CONST.env_active):
- logger.error(msg_not_active)
- sys.exit(1)
+ raise Exception(msg_not_active)
with open(CONST.env_active, "r") as env_file:
s = env_file.read()
if not re.search("1", s):
- logger.error(msg_not_active)
- sys.exit(1)
+ raise Exception(msg_not_active)
logger.info("Functest environment is installed.")
def main(**kwargs):
- if not (kwargs['action'] in actions):
- logger.error('Argument not valid.')
- sys.exit()
-
- if kwargs['action'] == "start":
- logger.info("######### Preparing Functest environment #########\n")
- check_env_variables()
- create_directories()
- source_rc_file()
- patch_config_file()
- verify_deployment()
- install_rally()
- install_tempest()
- create_flavor()
-
- with open(CONST.env_active, "w") as env_file:
- env_file.write("1")
-
- check_environment()
-
- if kwargs['action'] == "check":
- check_environment()
-
- exit(0)
+ try:
+ if not (kwargs['action'] in actions):
+ logger.error('Argument not valid.')
+ return -1
+ elif kwargs['action'] == "start":
+ logger.info("######### Preparing Functest environment #########\n")
+ check_env_variables()
+ create_directories()
+ source_rc_file()
+ patch_config_file()
+ verify_deployment()
+ install_rally()
+ install_tempest()
+ create_flavor()
+ with open(CONST.env_active, "w") as env_file:
+ env_file.write("1")
+ check_environment()
+ elif kwargs['action'] == "check":
+ check_environment()
+ except Exception as e:
+ logger.error(e)
+ return -1
+ return 0
if __name__ == '__main__':
parser = PrepareEnvParser()
args = parser.parse_args(sys.argv[1:])
- main(**args)
+ sys.exit(main(**args))
diff --git a/functest/ci/run_tests.py b/functest/ci/run_tests.py
index 6a6516ab..93518de0 100755
--- a/functest/ci/run_tests.py
+++ b/functest/ci/run_tests.py
@@ -10,6 +10,7 @@
import argparse
import datetime
+import enum
import importlib
import os
import re
@@ -35,7 +36,16 @@ logger = ft_logger.Logger("run_tests").getLogger()
EXEC_SCRIPT = ("%s/functest/ci/exec_test.sh" % CONST.dir_repo_functest)
# This will be the return code of this script. If any of the tests fails,
-# this variable will change to -1
+# this variable will change to Result.EX_ERROR
+
+
+class Result(enum.Enum):
+ EX_OK = os.EX_OK
+ EX_ERROR = -1
+
+
+class BlockingTestFailed(Exception):
+ pass
class RunTestsParser():
@@ -60,7 +70,7 @@ class RunTestsParser():
class GlobalVariables:
EXECUTED_TEST_CASES = []
- OVERALL_RESULT = 0
+ OVERALL_RESULT = Result.EX_OK
CLEAN_FLAG = True
REPORT_FLAG = False
@@ -75,8 +85,7 @@ def print_separator(str, count=45):
def source_rc_file():
rc_file = CONST.openstack_creds
if not os.path.isfile(rc_file):
- logger.error("RC file %s does not exist..." % rc_file)
- sys.exit(1)
+ raise Exception("RC file %s does not exist..." % rc_file)
logger.debug("Sourcing the OpenStack RC file...")
os_utils.source_credentials(rc_file)
for key, value in os.environ.iteritems():
@@ -155,7 +164,7 @@ def run_test(test, tier_name, testcases=None):
result = test_case.run()
if result == testcase_base.TestcaseBase.EX_OK:
if GlobalVariables.REPORT_FLAG:
- test_case.publish_report()
+ test_case.push_to_db()
result = test_case.check_criteria()
except ImportError:
logger.exception("Cannot import module {}".format(
@@ -179,19 +188,16 @@ def run_test(test, tier_name, testcases=None):
if result != 0:
logger.error("The test case '%s' failed. " % test_name)
- GlobalVariables.OVERALL_RESULT = -1
+ GlobalVariables.OVERALL_RESULT = Result.EX_ERROR
result_str = "FAIL"
if test.is_blocking():
if not testcases or testcases == "all":
- logger.info("This test case is blocking. Aborting overall "
- "execution.")
# if it is a single test we don't print the whole results table
update_test_info(test_name, result_str, duration_str)
generate_report.main(GlobalVariables.EXECUTED_TEST_CASES)
- logger.info("Execution exit value: %s" %
- GlobalVariables.OVERALL_RESULT)
- sys.exit(GlobalVariables.OVERALL_RESULT)
+ raise BlockingTestFailed("The test case {} failed and is blocking"
+ .format(test.get_name()))
update_test_info(test_name, result_str, duration_str)
@@ -246,33 +252,37 @@ def main(**kwargs):
if kwargs['report']:
GlobalVariables.REPORT_FLAG = True
- if kwargs['test']:
- source_rc_file()
- if _tiers.get_tier(kwargs['test']):
- run_tier(_tiers.get_tier(kwargs['test']))
-
- elif _tiers.get_test(kwargs['test']):
- run_test(_tiers.get_test(kwargs['test']),
- _tiers.get_tier(kwargs['test']),
- kwargs['test'])
-
- elif kwargs['test'] == "all":
- run_all(_tiers)
-
+ try:
+ if kwargs['test']:
+ source_rc_file()
+ if _tiers.get_tier(kwargs['test']):
+ GlobalVariables.EXECUTED_TEST_CASES = generate_report.init(
+ [_tiers.get_tier(kwargs['test'])])
+ run_tier(_tiers.get_tier(kwargs['test']))
+ elif _tiers.get_test(kwargs['test']):
+ run_test(_tiers.get_test(kwargs['test']),
+ _tiers.get_tier(kwargs['test']),
+ kwargs['test'])
+ elif kwargs['test'] == "all":
+ run_all(_tiers)
+ else:
+ logger.error("Unknown test case or tier '%s', "
+ "or not supported by "
+ "the given scenario '%s'."
+ % (kwargs['test'], CI_SCENARIO))
+ logger.debug("Available tiers are:\n\n%s"
+ % _tiers)
+ return Result.EX_ERROR
else:
- logger.error("Unknown test case or tier '%s', or not supported by "
- "the given scenario '%s'."
- % (kwargs['test'], CI_SCENARIO))
- logger.debug("Available tiers are:\n\n%s"
- % _tiers)
- else:
- run_all(_tiers)
-
+ run_all(_tiers)
+ except Exception as e:
+ logger.error(e)
+ GlobalVariables.OVERALL_RESULT = Result.EX_ERROR
logger.info("Execution exit value: %s" % GlobalVariables.OVERALL_RESULT)
- sys.exit(GlobalVariables.OVERALL_RESULT)
+ return GlobalVariables.OVERALL_RESULT
if __name__ == '__main__':
parser = RunTestsParser()
args = parser.parse_args(sys.argv[1:])
- main(**args)
+ sys.exit(main(**args).value)
diff --git a/functest/ci/testcases.yaml b/functest/ci/testcases.yaml
index e692c008..3ff503cb 100755
--- a/functest/ci/testcases.yaml
+++ b/functest/ci/testcases.yaml
@@ -140,9 +140,9 @@ tiers:
criteria: 'success_rate == 100%'
blocking: true
description: >-
- Test Suite for the OpenDaylight SDN Controller. It integrates
- some test suites from upstream using Robot as the test
- framework.
+ Test Suite for the OpenDaylight SDN Controller. It
+ integrates some test suites from upstream using
+ Robot as the test framework.
dependencies:
installer: ''
scenario: 'odl'
@@ -155,6 +155,27 @@ tiers:
- /home/opnfv/repos/odl_test/csit/suites/openstack/neutron
-
+ name: odl_netvirt
+ criteria: 'success_rate == 100%'
+ blocking: true
+ description: >-
+ Test Suite for the OpenDaylight SDN Controller when
+ the NetVirt features are installed. It integrates
+ some test suites from upstream using Robot as the
+ test framework.
+ dependencies:
+ installer: ''
+ scenario: 'netvirt'
+ run:
+ module: 'functest.opnfv_tests.sdn.odl.odl'
+ class: 'ODLTests'
+ args:
+ suites:
+ - /home/opnfv/repos/odl_test/csit/suites/integration/basic
+ - /home/opnfv/repos/odl_test/csit/suites/openstack/neutron
+ - /home/opnfv/repos/odl_test/csit/suites/openstack/connectivity
+
+ -
name: onos
criteria: 'status == "PASS"'
blocking: true
diff --git a/functest/core/testcase_base.py b/functest/core/testcase_base.py
index ec46bc64..838b6398 100644
--- a/functest/core/testcase_base.py
+++ b/functest/core/testcase_base.py
@@ -9,7 +9,6 @@
import os
-from functest.utils.constants import CONST
import functest.utils.functest_logger as ft_logger
import functest.utils.functest_utils as ft_utils
@@ -18,7 +17,7 @@ class TestcaseBase(object):
EX_OK = os.EX_OK
EX_RUN_ERROR = os.EX_SOFTWARE
- EX_PUBLISH_RESULT_FAILED = os.EX_SOFTWARE - 1
+ EX_PUSH_TO_DB_ERROR = os.EX_SOFTWARE - 1
EX_TESTCASE_FAILED = os.EX_SOFTWARE - 2
logger = ft_logger.Logger(__name__).getLogger()
@@ -44,45 +43,21 @@ class TestcaseBase(object):
self.logger.error("Run must be implemented")
return TestcaseBase.EX_RUN_ERROR
- def publish_report(self):
- if "RESULTS_STORE" in os.environ:
- CONST.results_test_db_url = os.environ['RESULTS_STORE']
-
+ def push_to_db(self):
try:
assert self.project_name
assert self.case_name
assert self.criteria
assert self.start_time
assert self.stop_time
- if CONST.results_test_db_url.lower().startswith(
- ("http://", "https://")):
- self.push_to_db()
- elif CONST.results_test_db_url.lower().startswith("file://"):
- self.write_to_file()
+ if ft_utils.push_results_to_db(
+ self.project_name, self.case_name, self.start_time,
+ self.stop_time, self.criteria, self.details):
+ self.logger.info("The results were successfully pushed to DB")
+ return TestcaseBase.EX_OK
else:
- self.logger.error("Please check parameter test_db_url and "
- "OS environ variable RESTULTS_STORE")
- return TestcaseBase.EX_PUBLISH_RESULT_FAILED
+ self.logger.error("The results cannot be pushed to DB")
+ return TestcaseBase.EX_PUSH_TO_DB_ERROR
except Exception:
- self.logger.exception("The results cannot be stored")
- return TestcaseBase.EX_PUBLISH_RESULT_FAILED
-
- def write_to_file(self):
- if ft_utils.write_results_to_file(
- self.project_name, self.case_name, self.start_time,
- self.stop_time, self.criteria, self.details):
- self.logger.info("The results were successfully written to a file")
- return TestcaseBase.EX_OK
- else:
- self.logger.error("write results to a file failed")
- return TestcaseBase.EX_PUBLISH_RESULT_FAILED
-
- def push_to_db(self):
- if ft_utils.push_results_to_db(
- self.project_name, self.case_name, self.start_time,
- self.stop_time, self.criteria, self.details):
- self.logger.info("The results were successfully pushed to DB")
- return TestcaseBase.EX_OK
- else:
- self.logger.error("The results cannot be pushed to DB")
- return TestcaseBase.EX_PUBLISH_RESULT_FAILED
+ self.logger.exception("The results cannot be pushed to DB")
+ return TestcaseBase.EX_PUSH_TO_DB_ERROR
diff --git a/functest/opnfv_tests/features/multisite.py b/functest/opnfv_tests/features/multisite.py
deleted file mode 100755
index 15cfe2a4..00000000
--- a/functest/opnfv_tests/features/multisite.py
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2015 All rights reserved
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Execute Multisite Tempest test cases
-#
-import functest.utils.functest_logger as ft_logger
-
-logger = ft_logger.Logger("multisite").getLogger()
-
-
-def main():
- logger.info("multisite OK")
-
-
-if __name__ == '__main__':
- main()
diff --git a/functest/opnfv_tests/openstack/rally/rally.py b/functest/opnfv_tests/openstack/rally/rally.py
index 16a872fc..46d6a570 100644
--- a/functest/opnfv_tests/openstack/rally/rally.py
+++ b/functest/opnfv_tests/openstack/rally/rally.py
@@ -526,14 +526,13 @@ class RallyBase(testcase_base.TestcaseBase):
self._run_tests()
self._generate_report()
self._clean_up()
+ res = testcase_base.TestcaseBase.EX_OK
except Exception as e:
logger.error('Error with run: %s' % e)
- return testcase_base.TestcaseBase.EX_RUN_ERROR
- self.stop_time = time.time()
+ res = testcase_base.TestcaseBase.EX_RUN_ERROR
- # If we are here, it means that the test case was successfully executed
- # criteria is managed by the criteria Field
- return testcase_base.TestcaseBase.EX_OK
+ self.stop_time = time.time()
+ return res
class RallySanity(RallyBase):
diff --git a/functest/opnfv_tests/openstack/tempest/conf_utils.py b/functest/opnfv_tests/openstack/tempest/conf_utils.py
index f013b442..028b085c 100644
--- a/functest/opnfv_tests/openstack/tempest/conf_utils.py
+++ b/functest/opnfv_tests/openstack/tempest/conf_utils.py
@@ -13,8 +13,6 @@ import re
import shutil
import subprocess
-import opnfv.utils.constants as releng_constants
-
from functest.utils.constants import CONST
import functest.utils.functest_logger as ft_logger
import functest.utils.functest_utils as ft_utils
@@ -113,13 +111,9 @@ def configure_tempest(deployment_dir, IMAGE_ID=None, FLAVOR_ID=None):
Calls rally verify and updates the generated tempest.conf with
given parameters
"""
- conf_verifier_result = configure_verifier(deployment_dir)
- if conf_verifier_result == releng_constants.EXIT_RUN_ERROR:
- return releng_constants.EXIT_RUN_ERROR
- else:
- configure_tempest_update_params(conf_verifier_result,
- IMAGE_ID, FLAVOR_ID)
- return releng_constants.EXIT_OK
+ conf_file = configure_verifier(deployment_dir)
+ configure_tempest_update_params(conf_file,
+ IMAGE_ID, FLAVOR_ID)
def configure_tempest_update_params(tempest_conf_file,
@@ -191,13 +185,13 @@ def configure_verifier(deployment_dir):
logger.info("Configuring the verifier...")
cmd = "rally verify configure-verifier"
ft_utils.execute_command(cmd)
- logger.debug("Looking for tempest.conf file...")
+ logger.debug("Looking for tempest.conf file...")
if not os.path.isfile(tempest_conf_file):
logger.error("Tempest configuration file %s NOT found."
% tempest_conf_file)
- return releng_constants.EXIT_RUN_ERROR
-
+ raise Exception("Tempest configuration file %s NOT found."
+ % tempest_conf_file)
else:
return tempest_conf_file
@@ -212,9 +206,8 @@ def configure_tempest_multisite(deployment_dir):
logger.debug("Finding tempest.conf file...")
tempest_conf_old = os.path.join(deployment_dir, 'tempest.conf')
if not os.path.isfile(tempest_conf_old):
- logger.error("Tempest configuration file %s NOT found."
- % tempest_conf_old)
- return releng_constants.EXIT_RUN_ERROR
+ raise Exception("Tempest configuration file %s NOT found."
+ % tempest_conf_old)
# Copy tempest.conf to /home/opnfv/functest/results/tempest/
cur_path = os.path.split(os.path.realpath(__file__))[0]
@@ -286,5 +279,3 @@ def configure_tempest_multisite(deployment_dir):
config.set('kingbird', 'api_version', kingbird_api_version)
with open(tempest_conf_file, 'wb') as config_file:
config.write(config_file)
-
- return releng_constants.EXIT_OK
diff --git a/functest/opnfv_tests/openstack/tempest/tempest.py b/functest/opnfv_tests/openstack/tempest/tempest.py
index e1a223a7..13d9e4e6 100644
--- a/functest/opnfv_tests/openstack/tempest/tempest.py
+++ b/functest/opnfv_tests/openstack/tempest/tempest.py
@@ -57,7 +57,7 @@ class TempestCommon(testcase_base.TestcaseBase):
CONST.tempest_identity_tenant_name,
CONST.tempest_identity_tenant_description)
if not tenant_id:
- logger.error("Error : Failed to create %s tenant"
+ logger.error("Failed to create %s tenant"
% CONST.tempest_identity_tenant_name)
user_id = os_utils.create_user(keystone_client,
@@ -65,7 +65,7 @@ class TempestCommon(testcase_base.TestcaseBase):
CONST.tempest_identity_user_password,
None, tenant_id)
if not user_id:
- logger.error("Error : Failed to create %s user" %
+ logger.error("Failed to create %s user" %
CONST.tempest_identity_user_name)
logger.debug("Creating private network for Tempest suite")
@@ -74,8 +74,8 @@ class TempestCommon(testcase_base.TestcaseBase):
CONST.tempest_private_subnet_name,
CONST.tempest_router_name,
CONST.tempest_private_subnet_cidr)
- if not network_dic:
- return testcase_base.TestcaseBase.EX_RUN_ERROR
+ if network_dic is None:
+ raise Exception('Failed to create private network')
if CONST.tempest_use_custom_images:
# adding alternative image should be trivial should we need it
@@ -83,8 +83,8 @@ class TempestCommon(testcase_base.TestcaseBase):
_, self.IMAGE_ID = os_utils.get_or_create_image(
CONST.openstack_image_name, conf_utils.GLANCE_IMAGE_PATH,
CONST.openstack_image_disk_format)
- if not self.IMAGE_ID:
- return testcase_base.TestcaseBase.EX_RUN_ERROR
+ if self.IMAGE_ID is None:
+ raise Exception('Failed to create image')
if CONST.tempest_use_custom_flavors:
# adding alternative flavor should be trivial should we need it
@@ -94,10 +94,8 @@ class TempestCommon(testcase_base.TestcaseBase):
CONST.openstack_flavor_ram,
CONST.openstack_flavor_disk,
CONST.openstack_flavor_vcpus)
- if not self.FLAVOR_ID:
- return testcase_base.TestcaseBase.EX_RUN_ERROR
-
- return testcase_base.TestcaseBase.EX_OK
+ if self.FLAVOR_ID is None:
+ raise Exception('Failed to create flavor')
def generate_test_list(self, verifier_repo_dir):
logger.debug("Generating test case list...")
@@ -109,9 +107,8 @@ class TempestCommon(testcase_base.TestcaseBase):
shutil.copyfile(
conf_utils.TEMPEST_CUSTOM, conf_utils.TEMPEST_RAW_LIST)
else:
- logger.error("Tempest test list file %s NOT found."
- % conf_utils.TEMPEST_CUSTOM)
- return testcase_base.TestcaseBase.EX_RUN_ERROR
+ raise Exception("Tempest test list file %s NOT found."
+ % conf_utils.TEMPEST_CUSTOM)
else:
if self.MODE == 'smoke':
testr_mode = "smoke"
@@ -128,8 +125,6 @@ class TempestCommon(testcase_base.TestcaseBase):
conf_utils.TEMPEST_RAW_LIST))
ft_utils.execute_command(cmd)
- return testcase_base.TestcaseBase.EX_OK
-
def apply_tempest_blacklist(self):
logger.debug("Applying tempest blacklist...")
cases_file = self.read_file(conf_utils.TEMPEST_RAW_LIST)
@@ -164,7 +159,6 @@ class TempestCommon(testcase_base.TestcaseBase):
else:
result_file.write(str(cases_line) + '\n')
result_file.close()
- return testcase_base.TestcaseBase.EX_OK
def _parse_verification_id(line):
first_pos = line.index("UUID=") + len("UUID=")
@@ -217,7 +211,7 @@ class TempestCommon(testcase_base.TestcaseBase):
f_env.close()
def parse_verifier_result(self):
- if not self.VERIFICATION_ID:
+ if self.VERIFICATION_ID is None:
raise Exception('Verification UUID not found')
cmd_line = "rally verify show --uuid {}".format(self.VERIFICATION_ID)
@@ -274,33 +268,22 @@ class TempestCommon(testcase_base.TestcaseBase):
if not os.path.exists(conf_utils.TEMPEST_RESULTS_DIR):
os.makedirs(conf_utils.TEMPEST_RESULTS_DIR)
- # Pre-configuration
- res = self.create_tempest_resources()
- if res != testcase_base.TestcaseBase.EX_OK:
- return res
-
- res = conf_utils.configure_tempest(self.DEPLOYMENT_DIR,
- self.IMAGE_ID,
- self.FLAVOR_ID)
- if res != testcase_base.TestcaseBase.EX_OK:
- return res
-
- res = self.generate_test_list(self.VERIFIER_REPO_DIR)
- if res != testcase_base.TestcaseBase.EX_OK:
- return res
-
- res = self.apply_tempest_blacklist()
- if res != testcase_base.TestcaseBase.EX_OK:
- return res
-
- self.run_verifier_tests()
- self.parse_verifier_result()
+ try:
+ self.create_tempest_resources()
+ conf_utils.configure_tempest(self.DEPLOYMENT_DIR,
+ self.IMAGE_ID,
+ self.FLAVOR_ID)
+ self.generate_test_list(self.VERIFIER_REPO_DIR)
+ self.apply_tempest_blacklist()
+ self.run_verifier_tests()
+ self.parse_verifier_result()
+ res = testcase_base.TestcaseBase.EX_OK
+ except Exception as e:
+ logger.error('Error with run: %s' % e)
+ res = testcase_base.TestcaseBase.EX_RUN_ERROR
self.stop_time = time.time()
-
- # If we are here, it means that the test case was successfully executed
- # criteria is managed by the criteria Field
- return testcase_base.TestcaseBase.EX_OK
+ return res
class TempestSmokeSerial(TempestCommon):
diff --git a/functest/opnfv_tests/openstack/vping/vping_base.py b/functest/opnfv_tests/openstack/vping/vping_base.py
index 8285d93f..a5309bd4 100644
--- a/functest/opnfv_tests/openstack/vping/vping_base.py
+++ b/functest/opnfv_tests/openstack/vping/vping_base.py
@@ -289,6 +289,6 @@ class VPingMain(object):
if result != VPingBase.EX_OK:
return result
if kwargs['report']:
- return self.vping.publish_report()
+ return self.vping.push_to_db()
except Exception:
return VPingBase.EX_RUN_ERROR
diff --git a/functest/opnfv_tests/sdn/odl/odl.py b/functest/opnfv_tests/sdn/odl/odl.py
index 25075957..9bff324f 100755
--- a/functest/opnfv_tests/sdn/odl/odl.py
+++ b/functest/opnfv_tests/sdn/odl/odl.py
@@ -237,6 +237,6 @@ if __name__ == '__main__':
if result != testcase_base.TestcaseBase.EX_OK:
sys.exit(result)
if args['pushtodb']:
- sys.exit(odl.publish_report())
+ sys.exit(odl.push_to_db())
except Exception:
sys.exit(testcase_base.TestcaseBase.EX_RUN_ERROR)
diff --git a/functest/tests/unit/core/test_testcase_base.py b/functest/tests/unit/core/test_testcase_base.py
index 94d2e966..b7c81d87 100755..100644
--- a/functest/tests/unit/core/test_testcase_base.py
+++ b/functest/tests/unit/core/test_testcase_base.py
@@ -9,7 +9,6 @@
import logging
import mock
-import os
import unittest
from functest.core import testcase_base
@@ -32,12 +31,11 @@ class TestcaseBaseTesting(unittest.TestCase):
self.assertEqual(self.test.run(),
testcase_base.TestcaseBase.EX_RUN_ERROR)
- @mock.patch.dict(os.environ, {})
@mock.patch('functest.utils.functest_utils.push_results_to_db',
return_value=False)
def _test_missing_attribute(self, mock_function):
- self.assertEqual(self.test.publish_report(),
- testcase_base.TestcaseBase.EX_PUBLISH_RESULT_FAILED)
+ self.assertEqual(self.test.push_to_db(),
+ testcase_base.TestcaseBase.EX_PUSH_TO_DB_ERROR)
mock_function.assert_not_called()
def test_missing_case_name(self):
@@ -70,7 +68,7 @@ class TestcaseBaseTesting(unittest.TestCase):
return_value=False)
def test_push_to_db_failed(self, mock_function):
self.assertEqual(self.test.push_to_db(),
- testcase_base.TestcaseBase.EX_PUBLISH_RESULT_FAILED)
+ testcase_base.TestcaseBase.EX_PUSH_TO_DB_ERROR)
mock_function.assert_called_once_with(
self.test.project, self.test.case_name, self.test.start_time,
self.test.stop_time, self.test.criteria, self.test.details)
diff --git a/functest/tests/unit/utils/test_openstack_utils.py b/functest/tests/unit/utils/test_openstack_utils.py
index 0971b4e8..447271fc 100644
--- a/functest/tests/unit/utils/test_openstack_utils.py
+++ b/functest/tests/unit/utils/test_openstack_utils.py
@@ -379,6 +379,9 @@ class OSUtilsTesting(unittest.TestCase):
self._test_source_credentials('export OS_TENANT_NAME = "admin"')
self._test_source_credentials('OS_TENANT_NAME', value='')
self._test_source_credentials('export OS_TENANT_NAME', value='')
+ # This test will fail as soon as rc_file is fixed
+ self._test_source_credentials(
+ 'export "\'OS_TENANT_NAME\'" = "\'admin\'"')
@mock.patch('functest.utils.openstack_utils.os.getenv',
return_value=None)
diff --git a/functest/utils/decorators.py b/functest/utils/decorators.py
new file mode 100644
index 00000000..99bcef3e
--- /dev/null
+++ b/functest/utils/decorators.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+
+import mock
+import requests.sessions
+import urlparse
+
+
+def can_dump_request_to_file(method):
+
+ def dump_preparedrequest(request, **kwargs):
+ parseresult = urlparse.urlparse(request.url)
+ if parseresult.scheme == "file":
+ with open(parseresult.path.replace('/results', ''), 'a') as f:
+ headers = ""
+ for key in request.headers:
+ headers += key + " " + request.headers[key] + "\n"
+ message = "{} {}\n{}\n{}\n\n\n".format(
+ request.method, request.url, headers, request.body)
+ f.write(message)
+ return mock.Mock()
+
+ def patch_request(method, url, **kwargs):
+ with requests.sessions.Session() as session:
+ parseresult = urlparse.urlparse(url)
+ if parseresult.scheme == "file":
+ with mock.patch.object(
+ session, 'send', side_effect=dump_preparedrequest):
+ return session.request(method=method, url=url, **kwargs)
+ else:
+ return session.request(method=method, url=url, **kwargs)
+
+ def hook(*args, **kwargs):
+ with mock.patch('requests.api.request', side_effect=patch_request):
+ return method(*args, **kwargs)
+
+ return hook
diff --git a/functest/utils/functest_logger.py b/functest/utils/functest_logger.py
index f09f56be..0cba8c52 100755
--- a/functest/utils/functest_logger.py
+++ b/functest/utils/functest_logger.py
@@ -28,42 +28,36 @@ import json
from functest.utils.constants import CONST
-logger = logging.getLogger(__name__)
-
-
-def is_debug():
- if CONST.CI_DEBUG and CONST.CI_DEBUG.lower() == "true":
- return True
- return False
-
-
-def setup_logging(default_path=CONST.dir_functest_logging_cfg,
- default_level=logging.INFO,
- env_key='LOG_CFG'):
- path = default_path
- value = os.getenv(env_key, None)
- if value:
- path = value
- if os.path.exists(path):
- with open(path, 'rt') as f:
- config = json.load(f)
- if (config['handlers'] and
- config['handlers']['console']):
- stream_level = logging.INFO
- if is_debug():
- stream_level = logging.DEBUG
- config['handlers']['console']['level'] = stream_level
- logging.config.dictConfig(config)
- else:
- logging.basicConfig(level=default_level)
-
-
-setup_logging()
-
class Logger:
def __init__(self, logger_name):
+ self.setup_logging()
self.logger = logging.getLogger(logger_name)
def getLogger(self):
return self.logger
+
+ def is_debug(self):
+ if CONST.CI_DEBUG and CONST.CI_DEBUG.lower() == "true":
+ return True
+ return False
+
+ def setup_logging(self, default_path=CONST.dir_functest_logging_cfg,
+ default_level=logging.INFO,
+ env_key='LOG_CFG'):
+ path = default_path
+ value = os.getenv(env_key, None)
+ if value:
+ path = value
+ if os.path.exists(path):
+ with open(path, 'rt') as f:
+ config = json.load(f)
+ if (config['handlers'] and
+ config['handlers']['console']):
+ stream_level = logging.INFO
+ if self.is_debug():
+ stream_level = logging.DEBUG
+ config['handlers']['console']['level'] = stream_level
+ logging.config.dictConfig(config)
+ else:
+ logging.basicConfig(level=default_level)
diff --git a/functest/utils/functest_utils.py b/functest/utils/functest_utils.py
index 7c5c4fcc..b2c36cff 100644
--- a/functest/utils/functest_utils.py
+++ b/functest/utils/functest_utils.py
@@ -23,10 +23,9 @@ import requests
import yaml
from git import Repo
-from functest.utils.constants import CONST
+from functest.utils import decorators
import functest.utils.functest_logger as ft_logger
-
logger = ft_logger.Logger("functest_utils").getLogger()
@@ -184,43 +183,14 @@ def logger_test_results(project, case_name, status, details):
'd': details})
-def write_results_to_file(project, case_name, start_date,
- stop_date, criteria, details):
- file_path = re.split(r'://', CONST.results_test_db_url)[1]
-
- try:
- installer = os.environ['INSTALLER_TYPE']
- scenario = os.environ['DEPLOY_SCENARIO']
- pod_name = os.environ['NODE_NAME']
- except KeyError as e:
- logger.error("Please set env var: " + str(e))
- return False
-
- test_start = dt.fromtimestamp(start_date).strftime('%Y-%m-%d %H:%M:%S')
- test_stop = dt.fromtimestamp(stop_date).strftime('%Y-%m-%d %H:%M:%S')
-
- params = {"project_name": project, "case_name": case_name,
- "pod_name": pod_name, "installer": installer,
- "scenario": scenario, "criteria": criteria,
- "start_date": test_start, "stop_date": test_stop,
- "details": details}
- try:
- with open(file_path, "a+w") as outfile:
- json.dump(params, outfile)
- outfile.write("\n")
- return True
- except Exception as e:
- logger.error("write result data into a file failed: %s" % e)
- return False
-
-
+@decorators.can_dump_request_to_file
def push_results_to_db(project, case_name,
start_date, stop_date, criteria, details):
"""
POST results to the Result target DB
"""
# Retrieve params from CI and conf
- url = CONST.results_test_db_url + "/results"
+ url = get_db_url() + "/results"
try:
installer = os.environ['INSTALLER_TYPE']
diff --git a/functest/utils/openstack_utils.py b/functest/utils/openstack_utils.py
index a0d78ae9..3093cb55 100755
--- a/functest/utils/openstack_utils.py
+++ b/functest/utils/openstack_utils.py
@@ -115,8 +115,11 @@ def source_credentials(rc_file):
with open(rc_file, "r") as f:
for line in f:
var = line.rstrip('"\n').replace('export ', '').split("=")
- key = re.sub(r'^ *| *$', '', var[0])
- value = re.sub(r'^[" ]*|[ "]*$', '', "".join(var[1:]))
+ # The two next lines should be modified as soon as rc_file
+ # conforms with common rules. Be aware that it could induce
+ # issues if value starts with '
+ key = re.sub(r'^["\' ]*|[ \'"]*$', '', var[0])
+ value = re.sub(r'^["\' ]*|[ \'"]*$', '', "".join(var[1:]))
os.environ[key] = value
diff --git a/run_unit_tests.sh b/run_unit_tests.sh
index 606aedcd..d60a2d62 100755
--- a/run_unit_tests.sh
+++ b/run_unit_tests.sh
@@ -5,7 +5,7 @@ set -o pipefail
# Either Workspace is set (CI)
if [ -z $WORKSPACE ]
then
- WORKSPACE="."
+ WORKSPACE=`pwd`
fi
@@ -24,6 +24,13 @@ pip install --upgrade pip
pip install -r $WORKSPACE/test-requirements.txt
pip install $WORKSPACE
+#install releng
+cd $WORKSPACE/../
+git clone https://gerrit.opnfv.org/gerrit/releng
+pip install releng/modules/
+rm -fr releng
+cd $WORKSPACE
+
export CONFIG_FUNCTEST_YAML=$(pwd)/functest/ci/config_functest.yaml
# unit tests
# TODO: remove cover-erase