aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xfunctest/ci/run_tests.py404
-rw-r--r--functest/ci/testcases.yaml1
-rw-r--r--functest/opnfv_tests/openstack/rally/rally.py33
-rw-r--r--functest/opnfv_tests/openstack/tempest/conf_utils.py138
-rw-r--r--functest/opnfv_tests/openstack/tempest/tempest.py13
-rw-r--r--functest/tests/unit/ci/test_run_tests.py171
-rw-r--r--functest/tests/unit/openstack/rally/test_rally.py14
-rw-r--r--functest/tests/unit/openstack/tempest/test_conf_utils.py36
-rw-r--r--functest/tests/unit/openstack/tempest/test_tempest.py8
9 files changed, 429 insertions, 389 deletions
diff --git a/functest/ci/run_tests.py b/functest/ci/run_tests.py
index 76760096..f973b616 100755
--- a/functest/ci/run_tests.py
+++ b/functest/ci/run_tests.py
@@ -64,210 +64,209 @@ class RunTestsParser(object):
return vars(self.parser.parse_args(argv))
-class GlobalVariables:
- EXECUTED_TEST_CASES = []
- OVERALL_RESULT = Result.EX_OK
- CLEAN_FLAG = True
- REPORT_FLAG = False
-
-
-def print_separator(str, count=45):
- line = ""
- for i in range(0, count - 1):
- line += str
- logger.info("%s" % line)
-
-
-def source_rc_file():
- rc_file = CONST.__getattribute__('openstack_creds')
- if not os.path.isfile(rc_file):
- raise Exception("RC file %s does not exist..." % rc_file)
- logger.debug("Sourcing the OpenStack RC file...")
- os_utils.source_credentials(rc_file)
- for key, value in os.environ.iteritems():
- if re.search("OS_", key):
- if key == 'OS_AUTH_URL':
- CONST.__setattr__('OS_AUTH_URL', value)
- elif key == 'OS_USERNAME':
- CONST.__setattr__('OS_USERNAME', value)
- elif key == 'OS_TENANT_NAME':
- CONST.__setattr__('OS_TENANT_NAME', value)
- elif key == 'OS_PASSWORD':
- CONST.__setattr__('OS_PASSWORD', value)
-
-
-def generate_os_snapshot():
- os_snapshot.main()
-
-
-def cleanup():
- os_clean.main()
-
-
-def get_run_dict(testname):
- try:
- dict = ft_utils.get_dict_by_test(testname)
- if not dict:
- logger.error("Cannot get {}'s config options".format(testname))
- elif 'run' in dict:
- return dict['run']
- return None
- except Exception:
- logger.exception("Cannot get {}'s config options".format(testname))
- return None
-
-
-def run_test(test, tier_name, testcases=None):
- if not test.is_enabled():
- raise TestNotEnabled("The test case {} is not enabled"
- .format(test.get_name()))
- test_name = test.get_name()
- logger.info("\n") # blank line
- print_separator("=")
- logger.info("Running test case '%s'..." % test_name)
- print_separator("=")
- logger.debug("\n%s" % test)
- source_rc_file()
-
- if test.needs_clean() and GlobalVariables.CLEAN_FLAG:
- generate_os_snapshot()
-
- flags = (" -t %s" % (test_name))
- if GlobalVariables.REPORT_FLAG:
- flags += " -r"
-
- result = testcase.TestCase.EX_RUN_ERROR
- run_dict = get_run_dict(test_name)
- if run_dict:
+class Runner(object):
+
+ def __init__(self):
+ self.executed_test_cases = []
+ self.overall_result = Result.EX_OK
+ self.clean_flag = True
+ self.report_flag = False
+
+ @staticmethod
+ def print_separator(str, count=45):
+ line = ""
+ for i in range(0, count - 1):
+ line += str
+ logger.info("%s" % line)
+
+ @staticmethod
+ def source_rc_file():
+ rc_file = CONST.__getattribute__('openstack_creds')
+ if not os.path.isfile(rc_file):
+ raise Exception("RC file %s does not exist..." % rc_file)
+ logger.debug("Sourcing the OpenStack RC file...")
+ os_utils.source_credentials(rc_file)
+ for key, value in os.environ.iteritems():
+ if re.search("OS_", key):
+ if key == 'OS_AUTH_URL':
+ CONST.__setattr__('OS_AUTH_URL', value)
+ elif key == 'OS_USERNAME':
+ CONST.__setattr__('OS_USERNAME', value)
+ elif key == 'OS_TENANT_NAME':
+ CONST.__setattr__('OS_TENANT_NAME', value)
+ elif key == 'OS_PASSWORD':
+ CONST.__setattr__('OS_PASSWORD', value)
+
+ @staticmethod
+ def generate_os_snapshot():
+ os_snapshot.main()
+
+ @staticmethod
+ def cleanup():
+ os_clean.main()
+
+ @staticmethod
+ def get_run_dict(testname):
try:
- module = importlib.import_module(run_dict['module'])
- cls = getattr(module, run_dict['class'])
- test_dict = ft_utils.get_dict_by_test(test_name)
- test_case = cls(**test_dict)
- GlobalVariables.EXECUTED_TEST_CASES.append(test_case)
+ dict = ft_utils.get_dict_by_test(testname)
+ if not dict:
+ logger.error("Cannot get {}'s config options".format(testname))
+ elif 'run' in dict:
+ return dict['run']
+ return None
+ except Exception:
+ logger.exception("Cannot get {}'s config options".format(testname))
+ return None
+
+ def run_test(self, test, tier_name, testcases=None):
+ if not test.is_enabled():
+ raise TestNotEnabled(
+ "The test case {} is not enabled".format(test.get_name()))
+ test_name = test.get_name()
+ logger.info("\n") # blank line
+ self.print_separator("=")
+ logger.info("Running test case '%s'..." % test_name)
+ self.print_separator("=")
+ logger.debug("\n%s" % test)
+ self.source_rc_file()
+
+ if test.needs_clean() and self.clean_flag:
+ self.generate_os_snapshot()
+
+ flags = (" -t %s" % (test_name))
+ if self.report_flag:
+ flags += " -r"
+
+ result = testcase.TestCase.EX_RUN_ERROR
+ run_dict = self.get_run_dict(test_name)
+ if run_dict:
try:
- kwargs = run_dict['args']
- result = test_case.run(**kwargs)
- except KeyError:
- result = test_case.run()
- if result == testcase.TestCase.EX_OK:
- if GlobalVariables.REPORT_FLAG:
- test_case.push_to_db()
- result = test_case.is_successful()
- logger.info("Test result:\n\n%s\n", test_case)
- except ImportError:
- logger.exception("Cannot import module {}".format(
- run_dict['module']))
- except AttributeError:
- logger.exception("Cannot get class {}".format(
- run_dict['class']))
- else:
- raise Exception("Cannot import the class for the test case.")
-
- if test.needs_clean() and GlobalVariables.CLEAN_FLAG:
- cleanup()
- if result != testcase.TestCase.EX_OK:
- logger.error("The test case '%s' failed. " % test_name)
- GlobalVariables.OVERALL_RESULT = Result.EX_ERROR
- if test.is_blocking():
- raise BlockingTestFailed("The test case {} failed and is blocking"
- .format(test.get_name()))
-
-
-def run_tier(tier):
- tier_name = tier.get_name()
- tests = tier.get_tests()
- if tests is None or len(tests) == 0:
- logger.info("There are no supported test cases in this tier "
- "for the given scenario")
- return 0
- logger.info("\n\n") # blank line
- print_separator("#")
- logger.info("Running tier '%s'" % tier_name)
- print_separator("#")
- logger.debug("\n%s" % tier)
- for test in tests:
- run_test(test, tier_name)
-
-
-def run_all(tiers):
- summary = ""
- tiers_to_run = []
-
- for tier in tiers.get_tiers():
- if (len(tier.get_tests()) != 0 and
- re.search(CONST.__getattribute__('CI_LOOP'),
- tier.get_ci_loop()) is not None):
- tiers_to_run.append(tier)
- summary += ("\n - %s:\n\t %s"
- % (tier.get_name(),
- tier.get_test_names()))
-
- logger.info("Tests to be executed:%s" % summary)
- for tier in tiers_to_run:
- run_tier(tier)
-
-
-def main(**kwargs):
-
- file = CONST.functest_testcases_yaml
- _tiers = tb.TierBuilder(CONST.__getattribute__('INSTALLER_TYPE'),
- CONST.__getattribute__('DEPLOY_SCENARIO'),
- file)
-
- if kwargs['noclean']:
- GlobalVariables.CLEAN_FLAG = False
-
- if kwargs['report']:
- GlobalVariables.REPORT_FLAG = True
-
- try:
- if kwargs['test']:
- source_rc_file()
- if _tiers.get_tier(kwargs['test']):
- run_tier(_tiers.get_tier(kwargs['test']))
- elif _tiers.get_test(kwargs['test']):
- run_test(_tiers.get_test(kwargs['test']),
- _tiers.get_tier_name(kwargs['test']),
- kwargs['test'])
- elif kwargs['test'] == "all":
- run_all(_tiers)
- else:
- logger.error("Unknown test case or tier '%s', "
- "or not supported by "
- "the given scenario '%s'."
- % (kwargs['test'],
- CONST.__getattribute__('DEPLOY_SCENARIO')))
- logger.debug("Available tiers are:\n\n%s"
- % _tiers)
- return Result.EX_ERROR
+ module = importlib.import_module(run_dict['module'])
+ cls = getattr(module, run_dict['class'])
+ test_dict = ft_utils.get_dict_by_test(test_name)
+ test_case = cls(**test_dict)
+ self.executed_test_cases.append(test_case)
+ try:
+ kwargs = run_dict['args']
+ result = test_case.run(**kwargs)
+ except KeyError:
+ result = test_case.run()
+ if result == testcase.TestCase.EX_OK:
+ if self.report_flag:
+ test_case.push_to_db()
+ result = test_case.is_successful()
+ logger.info("Test result:\n\n%s\n", test_case)
+ except ImportError:
+ logger.exception("Cannot import module {}".format(
+ run_dict['module']))
+ except AttributeError:
+ logger.exception("Cannot get class {}".format(
+ run_dict['class']))
else:
- run_all(_tiers)
- except Exception as e:
- logger.error(e)
- GlobalVariables.OVERALL_RESULT = Result.EX_ERROR
-
- msg = prettytable.PrettyTable(
- header_style='upper', padding_width=5,
- field_names=['env var', 'value'])
- for env_var in ['INSTALLER_TYPE', 'DEPLOY_SCENARIO', 'BUILD_TAG',
- 'CI_LOOP']:
- msg.add_row([env_var, CONST.__getattribute__(env_var)])
- logger.info("Deployment description: \n\n%s\n", msg)
-
- msg = prettytable.PrettyTable(
- header_style='upper', padding_width=5,
- field_names=['test case', 'project', 'tier', 'duration', 'result'])
- for test_case in GlobalVariables.EXECUTED_TEST_CASES:
- result = 'PASS' if(test_case.is_successful(
- ) == test_case.EX_OK) else 'FAIL'
- msg.add_row([test_case.case_name, test_case.project_name,
- _tiers.get_tier_name(test_case.case_name),
- test_case.get_duration(), result])
- logger.info("FUNCTEST REPORT: \n\n%s\n", msg)
-
- logger.info("Execution exit value: %s" % GlobalVariables.OVERALL_RESULT)
- return GlobalVariables.OVERALL_RESULT
+ raise Exception("Cannot import the class for the test case.")
+
+ if test.needs_clean() and self.clean_flag:
+ self.cleanup()
+ if result != testcase.TestCase.EX_OK:
+ logger.error("The test case '%s' failed. " % test_name)
+ self.overall_result = Result.EX_ERROR
+ if test.is_blocking():
+ raise BlockingTestFailed(
+ "The test case {} failed and is blocking".format(
+ test.get_name()))
+
+ def run_tier(self, tier):
+ tier_name = tier.get_name()
+ tests = tier.get_tests()
+ if tests is None or len(tests) == 0:
+ logger.info("There are no supported test cases in this tier "
+ "for the given scenario")
+ return 0
+ logger.info("\n\n") # blank line
+ self.print_separator("#")
+ logger.info("Running tier '%s'" % tier_name)
+ self.print_separator("#")
+ logger.debug("\n%s" % tier)
+ for test in tests:
+ self.run_test(test, tier_name)
+
+ def run_all(self, tiers):
+ summary = ""
+ tiers_to_run = []
+
+ for tier in tiers.get_tiers():
+ if (len(tier.get_tests()) != 0 and
+ re.search(CONST.__getattribute__('CI_LOOP'),
+ tier.get_ci_loop()) is not None):
+ tiers_to_run.append(tier)
+ summary += ("\n - %s:\n\t %s"
+ % (tier.get_name(),
+ tier.get_test_names()))
+
+ logger.info("Tests to be executed:%s" % summary)
+ for tier in tiers_to_run:
+ self.run_tier(tier)
+
+ def main(self, **kwargs):
+ _tiers = tb.TierBuilder(
+ CONST.__getattribute__('INSTALLER_TYPE'),
+ CONST.__getattribute__('DEPLOY_SCENARIO'),
+ CONST.__getattribute__("functest_testcases_yaml"))
+
+ if kwargs['noclean']:
+ self.clean_flag = False
+
+ if kwargs['report']:
+ self.report_flag = True
+
+ try:
+ if kwargs['test']:
+ self.source_rc_file()
+ logger.error(kwargs['test'])
+ if _tiers.get_tier(kwargs['test']):
+ self.run_tier(_tiers.get_tier(kwargs['test']))
+ elif _tiers.get_test(kwargs['test']):
+ self.run_test(_tiers.get_test(kwargs['test']),
+ _tiers.get_tier_name(kwargs['test']),
+ kwargs['test'])
+ elif kwargs['test'] == "all":
+ self.run_all(_tiers)
+ else:
+ logger.error("Unknown test case or tier '%s', "
+ "or not supported by "
+ "the given scenario '%s'."
+ % (kwargs['test'],
+ CONST.__getattribute__('DEPLOY_SCENARIO')))
+ logger.debug("Available tiers are:\n\n%s",
+ _tiers)
+ return Result.EX_ERROR
+ else:
+ self.run_all(_tiers)
+ except Exception:
+ logger.exception("Runner failed")
+ self.overall_result = Result.EX_ERROR
+
+ msg = prettytable.PrettyTable(
+ header_style='upper', padding_width=5,
+ field_names=['env var', 'value'])
+ for env_var in ['INSTALLER_TYPE', 'DEPLOY_SCENARIO', 'BUILD_TAG',
+ 'CI_LOOP']:
+ msg.add_row([env_var, CONST.__getattribute__(env_var)])
+ logger.info("Deployment description: \n\n%s\n", msg)
+
+ msg = prettytable.PrettyTable(
+ header_style='upper', padding_width=5,
+ field_names=['test case', 'project', 'tier', 'duration', 'result'])
+ for test_case in self.executed_test_cases:
+ result = 'PASS' if(test_case.is_successful(
+ ) == test_case.EX_OK) else 'FAIL'
+ msg.add_row([test_case.case_name, test_case.project_name,
+ _tiers.get_tier_name(test_case.case_name),
+ test_case.get_duration(), result])
+ logger.info("FUNCTEST REPORT: \n\n%s\n", msg)
+
+ logger.info("Execution exit value: %s" % self.overall_result)
+ return self.overall_result
if __name__ == '__main__':
@@ -275,4 +274,5 @@ if __name__ == '__main__':
CONST.__getattribute__('dir_functest_logging_cfg'))
parser = RunTestsParser()
args = parser.parse_args(sys.argv[1:])
- sys.exit(main(**args).value)
+ runner = Runner()
+ sys.exit(runner.main(**args).value)
diff --git a/functest/ci/testcases.yaml b/functest/ci/testcases.yaml
index d98a2de2..10587f26 100644
--- a/functest/ci/testcases.yaml
+++ b/functest/ci/testcases.yaml
@@ -302,7 +302,6 @@ tiers:
-
case_name: bgpvpn
- enabled: false
project_name: sdnvpn
criteria: 100
blocking: false
diff --git a/functest/opnfv_tests/openstack/rally/rally.py b/functest/opnfv_tests/openstack/rally/rally.py
index 40f8386c..86ec3558 100644
--- a/functest/opnfv_tests/openstack/rally/rally.py
+++ b/functest/opnfv_tests/openstack/rally/rally.py
@@ -30,14 +30,17 @@ logger = logging.getLogger(__name__)
class RallyBase(testcase.TestCase):
TESTS = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
- GLANCE_IMAGE_NAME = CONST.openstack_image_name
- GLANCE_IMAGE_FILENAME = CONST.openstack_image_file_name
- GLANCE_IMAGE_PATH = os.path.join(CONST.dir_functest_images,
- GLANCE_IMAGE_FILENAME)
- GLANCE_IMAGE_FORMAT = CONST.openstack_image_disk_format
+ GLANCE_IMAGE_NAME = CONST.__getattribute__('openstack_image_name')
+ GLANCE_IMAGE_FILENAME = CONST.__getattribute__('openstack_image_file_name')
+ GLANCE_IMAGE_PATH = os.path.join(
+ CONST.__getattribute__('dir_functest_images'),
+ GLANCE_IMAGE_FILENAME)
+ GLANCE_IMAGE_FORMAT = CONST.__getattribute__('openstack_image_disk_format')
FLAVOR_NAME = "m1.tiny"
- RALLY_DIR = os.path.join(CONST.dir_repo_functest, CONST.dir_rally)
+ RALLY_DIR = os.path.join(
+ CONST.__getattribute__('dir_repo_functest'),
+ CONST.__getattribute__('dir_rally'))
RALLY_SCENARIO_DIR = os.path.join(RALLY_DIR, "scenario")
TEMPLATE_DIR = os.path.join(RALLY_SCENARIO_DIR, "templates")
SUPPORT_DIR = os.path.join(RALLY_SCENARIO_DIR, "support")
@@ -45,17 +48,17 @@ class RallyBase(testcase.TestCase):
TENANTS_AMOUNT = 3
ITERATIONS_AMOUNT = 10
CONCURRENCY = 4
- RESULTS_DIR = os.path.join(CONST.dir_results, 'rally')
- TEMPEST_CONF_FILE = os.path.join(CONST.dir_results,
+ RESULTS_DIR = os.path.join(CONST.__getattribute__('dir_results'), 'rally')
+ TEMPEST_CONF_FILE = os.path.join(CONST.__getattribute__('dir_results'),
'tempest/tempest.conf')
BLACKLIST_FILE = os.path.join(RALLY_DIR, "blacklist.txt")
TEMP_DIR = os.path.join(RALLY_DIR, "var")
CINDER_VOLUME_TYPE_NAME = "volume_test"
- RALLY_PRIVATE_NET_NAME = CONST.rally_network_name
- RALLY_PRIVATE_SUBNET_NAME = CONST.rally_subnet_name
- RALLY_PRIVATE_SUBNET_CIDR = CONST.rally_subnet_cidr
- RALLY_ROUTER_NAME = CONST.rally_router_name
+ RALLY_PRIVATE_NET_NAME = CONST.__getattribute__('rally_network_name')
+ RALLY_PRIVATE_SUBNET_NAME = CONST.__getattribute__('rally_subnet_name')
+ RALLY_PRIVATE_SUBNET_CIDR = CONST.__getattribute__('rally_subnet_cidr')
+ RALLY_ROUTER_NAME = CONST.__getattribute__('rally_router_name')
def __init__(self, **kwargs):
super(RallyBase, self).__init__(**kwargs)
@@ -97,7 +100,7 @@ class RallyBase(testcase.TestCase):
task_args['netid'] = ''
# get keystone auth endpoint
- task_args['request_url'] = CONST.OS_AUTH_URL or ''
+ task_args['request_url'] = CONST.__getattribute__('OS_AUTH_URL') or ''
return task_args
@@ -183,8 +186,8 @@ class RallyBase(testcase.TestCase):
with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
black_list_yaml = yaml.safe_load(black_list_file)
- installer_type = CONST.INSTALLER_TYPE
- deploy_scenario = CONST.DEPLOY_SCENARIO
+ installer_type = CONST.__getattribute__('INSTALLER_TYPE')
+ deploy_scenario = CONST.__getattribute__('DEPLOY_SCENARIO')
if (bool(installer_type) * bool(deploy_scenario)):
if 'scenario' in black_list_yaml.keys():
for item in black_list_yaml['scenario']:
diff --git a/functest/opnfv_tests/openstack/tempest/conf_utils.py b/functest/opnfv_tests/openstack/tempest/conf_utils.py
index cd6a2a8c..556a41d4 100644
--- a/functest/opnfv_tests/openstack/tempest/conf_utils.py
+++ b/functest/opnfv_tests/openstack/tempest/conf_utils.py
@@ -21,11 +21,12 @@ import functest.utils.openstack_utils as os_utils
IMAGE_ID_ALT = None
FLAVOR_ID_ALT = None
-REPO_PATH = CONST.dir_repo_functest
-GLANCE_IMAGE_PATH = os.path.join(CONST.dir_functest_images,
- CONST.openstack_image_file_name)
-TEMPEST_TEST_LIST_DIR = CONST.dir_tempest_cases
-TEMPEST_RESULTS_DIR = os.path.join(CONST.dir_results,
+REPO_PATH = CONST.__getattribute__('dir_repo_functest')
+GLANCE_IMAGE_PATH = os.path.join(
+ CONST.__getattribute__('dir_functest_images'),
+ CONST.__getattribute__('openstack_image_file_name'))
+TEMPEST_TEST_LIST_DIR = CONST.__getattribute__('dir_tempest_cases')
+TEMPEST_RESULTS_DIR = os.path.join(CONST.__getattribute__('dir_results'),
'tempest')
TEMPEST_CUSTOM = os.path.join(REPO_PATH, TEMPEST_TEST_LIST_DIR,
'test_list.txt')
@@ -35,11 +36,11 @@ TEMPEST_DEFCORE = os.path.join(REPO_PATH, TEMPEST_TEST_LIST_DIR,
'defcore_req.txt')
TEMPEST_RAW_LIST = os.path.join(TEMPEST_RESULTS_DIR, 'test_raw_list.txt')
TEMPEST_LIST = os.path.join(TEMPEST_RESULTS_DIR, 'test_list.txt')
-REFSTACK_RESULTS_DIR = os.path.join(CONST.dir_results,
+REFSTACK_RESULTS_DIR = os.path.join(CONST.__getattribute__('dir_results'),
'refstack')
-CI_INSTALLER_TYPE = CONST.INSTALLER_TYPE
-CI_INSTALLER_IP = CONST.INSTALLER_IP
+CI_INSTALLER_TYPE = CONST.__getattribute__('INSTALLER_TYPE')
+CI_INSTALLER_IP = CONST.__getattribute__('INSTALLER_IP')
""" logging configuration """
logger = logging.getLogger(__name__)
@@ -52,26 +53,27 @@ def create_tempest_resources(use_custom_images=False,
logger.debug("Creating tenant and user for Tempest suite")
tenant_id = os_utils.create_tenant(
keystone_client,
- CONST.tempest_identity_tenant_name,
- CONST.tempest_identity_tenant_description)
+ CONST.__getattribute__('tempest_identity_tenant_name'),
+ CONST.__getattribute__('tempest_identity_tenant_description'))
if not tenant_id:
logger.error("Failed to create %s tenant"
- % CONST.tempest_identity_tenant_name)
+ % CONST.__getattribute__('tempest_identity_tenant_name'))
- user_id = os_utils.create_user(keystone_client,
- CONST.tempest_identity_user_name,
- CONST.tempest_identity_user_password,
- None, tenant_id)
+ user_id = os_utils.create_user(
+ keystone_client,
+ CONST.__getattribute__('tempest_identity_user_name'),
+ CONST.__getattribute__('tempest_identity_user_password'),
+ None, tenant_id)
if not user_id:
logger.error("Failed to create %s user" %
- CONST.tempest_identity_user_name)
+ CONST.__getattribute__('tempest_identity_user_name'))
logger.debug("Creating private network for Tempest suite")
network_dic = os_utils.create_shared_network_full(
- CONST.tempest_private_net_name,
- CONST.tempest_private_subnet_name,
- CONST.tempest_router_name,
- CONST.tempest_private_subnet_cidr)
+ CONST.__getattribute__('tempest_private_net_name'),
+ CONST.__getattribute__('tempest_private_subnet_name'),
+ CONST.__getattribute__('tempest_router_name'),
+ CONST.__getattribute__('tempest_private_subnet_cidr'))
if network_dic is None:
raise Exception('Failed to create private network')
@@ -80,41 +82,45 @@ def create_tempest_resources(use_custom_images=False,
flavor_id = ""
flavor_id_alt = ""
- if CONST.tempest_use_custom_images or use_custom_images:
+ if (CONST.__getattribute__('tempest_use_custom_images') or
+ use_custom_images):
# adding alternative image should be trivial should we need it
logger.debug("Creating image for Tempest suite")
_, image_id = os_utils.get_or_create_image(
- CONST.openstack_image_name, GLANCE_IMAGE_PATH,
- CONST.openstack_image_disk_format)
+ CONST.__getattribute__('openstack_image_name'),
+ GLANCE_IMAGE_PATH,
+ CONST.__getattribute__('openstack_image_disk_format'))
if image_id is None:
raise Exception('Failed to create image')
if use_custom_images:
logger.debug("Creating 2nd image for Tempest suite")
_, image_id_alt = os_utils.get_or_create_image(
- CONST.openstack_image_name_alt, GLANCE_IMAGE_PATH,
- CONST.openstack_image_disk_format)
+ CONST.__getattribute__('openstack_image_name_alt'),
+ GLANCE_IMAGE_PATH,
+ CONST.__getattribute__('openstack_image_disk_format'))
if image_id_alt is None:
raise Exception('Failed to create image')
- if CONST.tempest_use_custom_flavors or use_custom_flavors:
+ if (CONST.__getattribute__('tempest_use_custom_flavors') or
+ use_custom_flavors):
# adding alternative flavor should be trivial should we need it
logger.debug("Creating flavor for Tempest suite")
_, flavor_id = os_utils.get_or_create_flavor(
- CONST.openstack_flavor_name,
- CONST.openstack_flavor_ram,
- CONST.openstack_flavor_disk,
- CONST.openstack_flavor_vcpus)
+ CONST.__getattribute__('openstack_flavor_name'),
+ CONST.__getattribute__('openstack_flavor_ram'),
+ CONST.__getattribute__('openstack_flavor_disk'),
+ CONST.__getattribute__('openstack_flavor_vcpus'))
if flavor_id is None:
raise Exception('Failed to create flavor')
if use_custom_flavors:
logger.debug("Creating 2nd flavor for tempest_defcore")
_, flavor_id_alt = os_utils.get_or_create_flavor(
- CONST.openstack_flavor_name_alt,
- CONST.openstack_flavor_ram,
- CONST.openstack_flavor_disk,
- CONST.openstack_flavor_vcpus)
+ CONST.__getattribute__('openstack_flavor_name_alt'),
+ CONST.__getattribute__('openstack_flavor_ram'),
+ CONST.__getattribute__('openstack_flavor_disk'),
+ CONST.__getattribute__('openstack_flavor_vcpus'))
if flavor_id_alt is None:
raise Exception('Failed to create flavor')
@@ -132,7 +138,7 @@ def get_verifier_id():
Returns verifer id for current Tempest
"""
cmd = ("rally verify list-verifiers | awk '/" +
- CONST.tempest_deployment_name +
+ CONST.__getattribute__('tempest_deployment_name') +
"/ {print $2}'")
p = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE,
@@ -149,7 +155,7 @@ def get_verifier_deployment_id():
Returns deployment id for active Rally deployment
"""
cmd = ("rally deployment list | awk '/" +
- CONST.rally_deployment_name +
+ CONST.__getattribute__('rally_deployment_name') +
"/ {print $2}'")
p = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE,
@@ -168,7 +174,7 @@ def get_verifier_repo_dir(verifier_id):
if not verifier_id:
verifier_id = get_verifier_id()
- return os.path.join(CONST.dir_rally_inst,
+ return os.path.join(CONST.__getattribute__('dir_rally_inst'),
'verification',
'verifier-{}'.format(verifier_id),
'repo')
@@ -184,7 +190,7 @@ def get_verifier_deployment_dir(verifier_id, deployment_id):
if not deployment_id:
deployment_id = get_verifier_deployment_id()
- return os.path.join(CONST.dir_rally_inst,
+ return os.path.join(CONST.__getattribute__('dir_rally_inst'),
'verification',
'verifier-{}'.format(verifier_id),
'for-deployment-{}'.format(deployment_id))
@@ -247,8 +253,9 @@ def configure_tempest_defcore(deployment_dir, img_flavor_dict):
with open(conf_file, 'wb') as config_file:
config.write(config_file)
- confpath = os.path.join(CONST.dir_functest_test,
- CONST.refstack_tempest_conf_path)
+ confpath = os.path.join(
+ CONST.__getattribute__('dir_functest_test'),
+ CONST.__getattribute__('refstack_tempest_conf_path'))
shutil.copyfile(conf_file, confpath)
@@ -263,32 +270,37 @@ def configure_tempest_update_params(tempest_conf_file,
config.set(
'compute',
'fixed_network_name',
- CONST.tempest_private_net_name)
+ CONST.__getattribute__('tempest_private_net_name'))
config.set('compute', 'volume_device_name',
- CONST.tempest_volume_device_name)
- if CONST.tempest_use_custom_images:
+ CONST.__getattribute__('tempest_volume_device_name'))
+ if CONST.__getattribute__('tempest_use_custom_images'):
if IMAGE_ID is not None:
config.set('compute', 'image_ref', IMAGE_ID)
if IMAGE_ID_ALT is not None:
config.set('compute', 'image_ref_alt', IMAGE_ID_ALT)
- if CONST.tempest_use_custom_flavors:
+ if CONST.__getattribute__('tempest_use_custom_flavors'):
if FLAVOR_ID is not None:
config.set('compute', 'flavor_ref', FLAVOR_ID)
if FLAVOR_ID_ALT is not None:
config.set('compute', 'flavor_ref_alt', FLAVOR_ID_ALT)
- config.set('identity', 'tenant_name', CONST.tempest_identity_tenant_name)
- config.set('identity', 'username', CONST.tempest_identity_user_name)
- config.set('identity', 'password', CONST.tempest_identity_user_password)
+ config.set('identity', 'tenant_name',
+ CONST.__getattribute__('tempest_identity_tenant_name'))
+ config.set('identity', 'username',
+ CONST.__getattribute__('tempest_identity_user_name'))
+ config.set('identity', 'password',
+ CONST.__getattribute__('tempest_identity_user_password'))
config.set('identity', 'region', 'RegionOne')
config.set(
- 'validation', 'ssh_timeout', CONST.tempest_validation_ssh_timeout)
+ 'validation', 'ssh_timeout',
+ CONST.__getattribute__('tempest_validation_ssh_timeout'))
config.set('object-storage', 'operator_role',
- CONST.tempest_object_storage_operator_role)
+ CONST.__getattribute__('tempest_object_storage_operator_role'))
- if CONST.OS_ENDPOINT_TYPE is not None:
+ if CONST.__getattribute__('OS_ENDPOINT_TYPE') is not None:
sections = config.sections()
if os_utils.is_keystone_v3():
- config.set('identity', 'v3_endpoint_type', CONST.OS_ENDPOINT_TYPE)
+ config.set('identity', 'v3_endpoint_type',
+ CONST.__getattribute__('OS_ENDPOINT_TYPE'))
if 'identity-feature-enabled' not in sections:
config.add_section('identity-feature-enabled')
config.set('identity-feature-enabled', 'api_v2', False)
@@ -304,7 +316,7 @@ def configure_tempest_update_params(tempest_conf_file,
if service not in sections:
config.add_section(service)
config.set(service, 'endpoint_type',
- CONST.OS_ENDPOINT_TYPE)
+ CONST.__getattribute__('OS_ENDPOINT_TYPE'))
with open(tempest_conf_file, 'wb') as config_file:
config.write(config_file)
@@ -365,22 +377,22 @@ def configure_tempest_multisite_params(tempest_conf_file):
"StrictHostKeyChecking=no")
# Get the controller IP from the fuel node
- cmd = 'sshpass -p %s ssh 2>/dev/null %s %s@%s \
- \'fuel node --env 1| grep controller | grep "True\| 1" \
- | awk -F\| "{print \$5}"\'' % (installer_password,
+ cmd = ('sshpass -p %s ssh 2>/dev/null %s %s@%s '
+ '\'fuel node --env 1| grep controller | grep "True\| 1" '
+ '| awk -F\| "{print \$5}"\'' % (installer_password,
ssh_options,
installer_username,
- installer_ip)
+ installer_ip))
multisite_controller_ip = "".join(os.popen(cmd).read().split())
# Login to controller and get bind host details
- cmd = 'sshpass -p %s ssh 2>/dev/null %s %s@%s "ssh %s \\" \
- grep -e "^bind_" %s \\""' % (installer_password,
- ssh_options,
- installer_username,
- installer_ip,
- multisite_controller_ip,
- kingbird_conf_path)
+ cmd = ('sshpass -p %s ssh 2>/dev/null %s %s@%s "ssh %s \\" '
+ 'grep -e "^bind_" %s \\""' % (installer_password,
+ ssh_options,
+ installer_username,
+ installer_ip,
+ multisite_controller_ip,
+ kingbird_conf_path))
bind_details = os.popen(cmd).read()
bind_details = "".join(bind_details.split())
# Extract port number from the bind details
diff --git a/functest/opnfv_tests/openstack/tempest/tempest.py b/functest/opnfv_tests/openstack/tempest/tempest.py
index a41d07c7..233ceb48 100644
--- a/functest/opnfv_tests/openstack/tempest/tempest.py
+++ b/functest/opnfv_tests/openstack/tempest/tempest.py
@@ -81,8 +81,8 @@ class TempestCommon(testcase.TestCase):
result_file = open(conf_utils.TEMPEST_LIST, 'w')
black_tests = []
try:
- installer_type = CONST.INSTALLER_TYPE
- deploy_scenario = CONST.DEPLOY_SCENARIO
+ installer_type = CONST.__getattribute__('INSTALLER_TYPE')
+ deploy_scenario = CONST.__getattribute__('DEPLOY_SCENARIO')
if (bool(installer_type) * bool(deploy_scenario)):
# if INSTALLER_TYPE and DEPLOY_SCENARIO are set we read the
# file
@@ -119,9 +119,9 @@ class TempestCommon(testcase.TestCase):
header = ("Tempest environment:\n"
" SUT: %s\n Scenario: %s\n Node: %s\n Date: %s\n" %
- (CONST.INSTALLER_TYPE,
- CONST.DEPLOY_SCENARIO,
- CONST.NODE_NAME,
+ (CONST.__getattribute__('INSTALLER_TYPE'),
+ CONST.__getattribute__('DEPLOY_SCENARIO'),
+ CONST.__getattribute__('NODE_NAME'),
time.strftime("%a %b %d %H:%M:%S %Z %Y")))
f_stdout = open(
@@ -273,7 +273,8 @@ class TempestMultisite(TempestCommon):
TempestCommon.__init__(self, **kwargs)
self.MODE = "feature_multisite"
self.OPTION = "--concurrency 1"
- conf_utils.install_verifier_ext(CONST.dir_repo_kingbird)
+ conf_utils.install_verifier_ext(
+ CONST.__getattribute__('dir_repo_kingbird'))
class TempestCustom(TempestCommon):
diff --git a/functest/tests/unit/ci/test_run_tests.py b/functest/tests/unit/ci/test_run_tests.py
index d48c79cc..88e5d2b8 100644
--- a/functest/tests/unit/ci/test_run_tests.py
+++ b/functest/tests/unit/ci/test_run_tests.py
@@ -5,19 +5,32 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
-
-import unittest
import logging
+import unittest
import mock
from functest.ci import run_tests
from functest.utils.constants import CONST
+from functest.core.testcase import TestCase
+
+
+class FakeModule(TestCase):
+
+ def run(self):
+ return TestCase.EX_OK
+
+ def push_to_db(self):
+ return TestCase.EX_OK
+
+ def is_successful(self):
+ return TestCase.EX_OK
class RunTestsTesting(unittest.TestCase):
def setUp(self):
+ self.runner = run_tests.Runner()
self.sep = 'test_sep'
self.creds = {'OS_AUTH_URL': 'http://test_ip:test_port/v2.0',
'OS_USERNAME': 'test_os_username',
@@ -36,11 +49,10 @@ class RunTestsTesting(unittest.TestCase):
self.tiers.configure_mock(**attrs)
self.run_tests_parser = run_tests.RunTestsParser()
- self.global_variables = run_tests.GlobalVariables()
@mock.patch('functest.ci.run_tests.logger.info')
def test_print_separator(self, mock_logger_info):
- run_tests.print_separator(self.sep)
+ self.runner.print_separator(self.sep)
mock_logger_info.assert_called_once_with(self.sep * 44)
@mock.patch('functest.ci.run_tests.logger.error')
@@ -48,24 +60,24 @@ class RunTestsTesting(unittest.TestCase):
with mock.patch('functest.ci.run_tests.os.path.isfile',
return_value=False), \
self.assertRaises(Exception):
- run_tests.source_rc_file()
+ self.runner.source_rc_file()
@mock.patch('functest.ci.run_tests.logger.debug')
- def test_source_rc_file_default(self, mock_logger_debug):
- with mock.patch('functest.ci.run_tests.os.path.isfile',
- return_value=True), \
- mock.patch('functest.ci.run_tests.os_utils.source_credentials',
- return_value=self.creds):
- run_tests.source_rc_file()
+ @mock.patch('functest.ci.run_tests.os.path.isfile',
+ return_value=True)
+ def test_source_rc_file_default(self, *args):
+ with mock.patch('functest.ci.run_tests.os_utils.source_credentials',
+ return_value=self.creds):
+ self.runner.source_rc_file()
@mock.patch('functest.ci.run_tests.os_snapshot.main')
def test_generate_os_snapshot(self, mock_os_snap):
- run_tests.generate_os_snapshot()
+ self.runner.generate_os_snapshot()
self.assertTrue(mock_os_snap.called)
@mock.patch('functest.ci.run_tests.os_clean.main')
def test_cleanup(self, mock_os_clean):
- run_tests.cleanup()
+ self.runner.cleanup()
self.assertTrue(mock_os_clean.called)
def test_get_run_dict_if_defined_default(self):
@@ -73,7 +85,7 @@ class RunTestsTesting(unittest.TestCase):
with mock.patch('functest.ci.run_tests.'
'ft_utils.get_dict_by_test',
return_value={'run': mock_obj}):
- self.assertEqual(run_tests.get_run_dict('test_name'),
+ self.assertEqual(self.runner.get_run_dict('test_name'),
mock_obj)
@mock.patch('functest.ci.run_tests.logger.error')
@@ -83,7 +95,7 @@ class RunTestsTesting(unittest.TestCase):
'ft_utils.get_dict_by_test',
return_value=None):
testname = 'test_name'
- self.assertEqual(run_tests.get_run_dict(testname),
+ self.assertEqual(self.runner.get_run_dict(testname),
None)
mock_logger_error.assert_called_once_with("Cannot get {}'s config "
"options"
@@ -93,7 +105,7 @@ class RunTestsTesting(unittest.TestCase):
'ft_utils.get_dict_by_test',
return_value={}):
testname = 'test_name'
- self.assertEqual(run_tests.get_run_dict(testname),
+ self.assertEqual(self.runner.get_run_dict(testname),
None)
@mock.patch('functest.ci.run_tests.logger.exception')
@@ -103,7 +115,7 @@ class RunTestsTesting(unittest.TestCase):
'ft_utils.get_dict_by_test',
side_effect=Exception):
testname = 'test_name'
- self.assertEqual(run_tests.get_run_dict(testname),
+ self.assertEqual(self.runner.get_run_dict(testname),
None)
mock_logger_except.assert_called_once_with("Cannot get {}'s config"
" options"
@@ -114,63 +126,67 @@ class RunTestsTesting(unittest.TestCase):
args = {'get_name.return_value': 'test_name',
'needs_clean.return_value': False}
mock_test.configure_mock(**args)
- with mock.patch('functest.ci.run_tests.print_separator'),\
- mock.patch('functest.ci.run_tests.source_rc_file'), \
- mock.patch('functest.ci.run_tests.get_run_dict',
+ with mock.patch('functest.ci.run_tests.Runner.print_separator'),\
+ mock.patch('functest.ci.run_tests.Runner.source_rc_file'), \
+ mock.patch('functest.ci.run_tests.Runner.get_run_dict',
return_value=None), \
self.assertRaises(Exception) as context:
- run_tests.run_test(mock_test, 'tier_name')
+ self.runner(mock_test, 'tier_name')
msg = "Cannot import the class for the test case."
self.assertTrue(msg in context)
- def test_run_tests_default(self):
+ @mock.patch('functest.ci.run_tests.Runner.print_separator')
+ @mock.patch('functest.ci.run_tests.Runner.source_rc_file')
+ @mock.patch('functest.ci.run_tests.Runner.generate_os_snapshot')
+ @mock.patch('functest.ci.run_tests.Runner.cleanup')
+ @mock.patch('importlib.import_module', name="module",
+ return_value=mock.Mock(test_class=mock.Mock(
+ side_effect=FakeModule)))
+ @mock.patch('functest.utils.functest_utils.get_dict_by_test')
+ def test_run_tests_default(self, *args):
mock_test = mock.Mock()
- args = {'get_name.return_value': 'test_name',
- 'needs_clean.return_value': True}
- mock_test.configure_mock(**args)
+ kwargs = {'get_name.return_value': 'test_name',
+ 'needs_clean.return_value': True}
+ mock_test.configure_mock(**kwargs)
test_run_dict = {'module': 'test_module',
- 'class': mock.Mock,
- 'args': 'test_args'}
- with mock.patch('functest.ci.run_tests.print_separator'),\
- mock.patch('functest.ci.run_tests.source_rc_file'), \
- mock.patch('functest.ci.run_tests.generate_os_snapshot'), \
- mock.patch('functest.ci.run_tests.cleanup'), \
- mock.patch('functest.ci.run_tests.get_run_dict',
- return_value=test_run_dict), \
- self.assertRaises(run_tests.BlockingTestFailed) as context:
- run_tests.GlobalVariables.CLEAN_FLAG = True
- run_tests.run_test(mock_test, 'tier_name')
- msg = 'The test case test_name failed and is blocking'
- self.assertTrue(msg in context)
+ 'class': 'test_class'}
+ with mock.patch('functest.ci.run_tests.Runner.get_run_dict',
+ return_value=test_run_dict):
+ self.runner.clean_flag = True
+ self.runner.run_test(mock_test, 'tier_name')
+ self.assertEqual(self.runner.overall_result,
+ run_tests.Result.EX_OK)
@mock.patch('functest.ci.run_tests.logger.info')
def test_run_tier_default(self, mock_logger_info):
- with mock.patch('functest.ci.run_tests.print_separator'), \
- mock.patch('functest.ci.run_tests.run_test') as mock_method:
- run_tests.run_tier(self.tier)
+ with mock.patch('functest.ci.run_tests.Runner.print_separator'), \
+ mock.patch(
+ 'functest.ci.run_tests.Runner.run_test') as mock_method:
+ self.runner.run_tier(self.tier)
mock_method.assert_any_call('test1', 'test_tier')
mock_method.assert_any_call('test2', 'test_tier')
self.assertTrue(mock_logger_info.called)
@mock.patch('functest.ci.run_tests.logger.info')
def test_run_tier_missing_test(self, mock_logger_info):
- with mock.patch('functest.ci.run_tests.print_separator'):
+ with mock.patch('functest.ci.run_tests.Runner.print_separator'):
self.tier.get_tests.return_value = None
- self.assertEqual(run_tests.run_tier(self.tier), 0)
+ self.assertEqual(self.runner.run_tier(self.tier), 0)
self.assertTrue(mock_logger_info.called)
@mock.patch('functest.ci.run_tests.logger.info')
def test_run_all_default(self, mock_logger_info):
- with mock.patch('functest.ci.run_tests.run_tier') as mock_method:
+ with mock.patch(
+ 'functest.ci.run_tests.Runner.run_tier') as mock_method:
CONST.__setattr__('CI_LOOP', 'test_ci_loop')
- run_tests.run_all(self.tiers)
+ self.runner.run_all(self.tiers)
mock_method.assert_any_call(self.tier)
self.assertTrue(mock_logger_info.called)
@mock.patch('functest.ci.run_tests.logger.info')
def test_run_all_missing_tier(self, mock_logger_info):
CONST.__setattr__('CI_LOOP', 'loop_re_not_available')
- run_tests.run_all(self.tiers)
+ self.runner.run_all(self.tiers)
self.assertTrue(mock_logger_info.called)
def test_main_failed(self):
@@ -179,69 +195,78 @@ class RunTestsTesting(unittest.TestCase):
args = {'get_tier.return_value': False,
'get_test.return_value': False}
mock_obj.configure_mock(**args)
-
with mock.patch('functest.ci.run_tests.tb.TierBuilder'), \
- mock.patch('functest.ci.run_tests.source_rc_file',
+ mock.patch('functest.ci.run_tests.Runner.source_rc_file',
side_effect=Exception):
- self.assertEqual(run_tests.main(**kwargs),
+ self.assertEqual(self.runner.main(**kwargs),
run_tests.Result.EX_ERROR)
-
with mock.patch('functest.ci.run_tests.tb.TierBuilder',
return_value=mock_obj), \
- mock.patch('functest.ci.run_tests.source_rc_file',
+ mock.patch('functest.ci.run_tests.Runner.source_rc_file',
side_effect=Exception):
- self.assertEqual(run_tests.main(**kwargs),
+ self.assertEqual(self.runner.main(**kwargs),
run_tests.Result.EX_ERROR)
- def test_main_default(self):
- kwargs = {'test': 'test_name', 'noclean': True, 'report': True}
+ def test_main_tier(self, *args):
+ mock_tier = mock.Mock()
+ args = {'get_name.return_value': 'tier_name'}
+ mock_tier.configure_mock(**args)
+ kwargs = {'test': 'tier_name', 'noclean': True, 'report': True}
mock_obj = mock.Mock()
- args = {'get_tier.return_value': True,
- 'get_test.return_value': False}
+ args = {'get_tier.return_value': mock_tier,
+ 'get_test.return_value': None}
mock_obj.configure_mock(**args)
with mock.patch('functest.ci.run_tests.tb.TierBuilder',
return_value=mock_obj), \
- mock.patch('functest.ci.run_tests.source_rc_file'), \
- mock.patch('functest.ci.run_tests.run_tier') as m:
- self.assertEqual(run_tests.main(**kwargs),
+ mock.patch('functest.ci.run_tests.Runner.source_rc_file'), \
+ mock.patch('functest.ci.run_tests.Runner.run_tier') as m:
+ self.assertEqual(self.runner.main(**kwargs),
run_tests.Result.EX_OK)
self.assertTrue(m.called)
+ def test_main_test(self, *args):
+ kwargs = {'test': 'test_name', 'noclean': True, 'report': True}
+ mock_test = mock.Mock()
+ args = {'get_name.return_value': 'test_name',
+ 'needs_clean.return_value': True}
+ mock_test.configure_mock(**args)
mock_obj = mock.Mock()
- args = {'get_tier.return_value': False,
- 'get_test.return_value': True}
+ args = {'get_tier.return_value': None,
+ 'get_test.return_value': mock_test}
mock_obj.configure_mock(**args)
with mock.patch('functest.ci.run_tests.tb.TierBuilder',
return_value=mock_obj), \
- mock.patch('functest.ci.run_tests.source_rc_file'), \
- mock.patch('functest.ci.run_tests.run_test') as m:
- self.assertEqual(run_tests.main(**kwargs),
+ mock.patch('functest.ci.run_tests.Runner.source_rc_file'), \
+ mock.patch('functest.ci.run_tests.Runner.run_test') as m:
+ self.assertEqual(self.runner.main(**kwargs),
run_tests.Result.EX_OK)
self.assertTrue(m.called)
+ def test_main_all_tier(self, *args):
kwargs = {'test': 'all', 'noclean': True, 'report': True}
mock_obj = mock.Mock()
- args = {'get_tier.return_value': False,
- 'get_test.return_value': False}
+ args = {'get_tier.return_value': None,
+ 'get_test.return_value': None}
mock_obj.configure_mock(**args)
with mock.patch('functest.ci.run_tests.tb.TierBuilder',
return_value=mock_obj), \
- mock.patch('functest.ci.run_tests.source_rc_file'), \
- mock.patch('functest.ci.run_tests.run_all') as m:
- self.assertEqual(run_tests.main(**kwargs),
+ mock.patch('functest.ci.run_tests.Runner.source_rc_file'), \
+ mock.patch('functest.ci.run_tests.Runner.run_all') as m:
+ self.assertEqual(self.runner.main(**kwargs),
run_tests.Result.EX_OK)
self.assertTrue(m.called)
+ def test_main_any_tier_test_ko(self, *args):
kwargs = {'test': 'any', 'noclean': True, 'report': True}
mock_obj = mock.Mock()
- args = {'get_tier.return_value': False,
- 'get_test.return_value': False}
+ args = {'get_tier.return_value': None,
+ 'get_test.return_value': None}
mock_obj.configure_mock(**args)
with mock.patch('functest.ci.run_tests.tb.TierBuilder',
return_value=mock_obj), \
- mock.patch('functest.ci.run_tests.source_rc_file'), \
+ mock.patch('functest.ci.run_tests.Runner.source_rc_file'), \
mock.patch('functest.ci.run_tests.logger.debug') as m:
- self.assertEqual(run_tests.main(**kwargs),
+ self.assertEqual(self.runner.main(**kwargs),
run_tests.Result.EX_ERROR)
self.assertTrue(m.called)
diff --git a/functest/tests/unit/openstack/rally/test_rally.py b/functest/tests/unit/openstack/rally/test_rally.py
index c367d2cc..b9e78616 100644
--- a/functest/tests/unit/openstack/rally/test_rally.py
+++ b/functest/tests/unit/openstack/rally/test_rally.py
@@ -37,7 +37,7 @@ class OSRallyTesting(unittest.TestCase):
self.polling_iter = 2
def test_build_task_args_missing_floating_network(self):
- CONST.OS_AUTH_URL = None
+ CONST.__setattr__('OS_AUTH_URL', None)
with mock.patch('functest.opnfv_tests.openstack.rally.rally.'
'os_utils.get_external_net',
return_value=None):
@@ -45,7 +45,7 @@ class OSRallyTesting(unittest.TestCase):
self.assertEqual(task_args['floating_network'], '')
def test_build_task_args_missing_net_id(self):
- CONST.OS_AUTH_URL = None
+ CONST.__setattr__('OS_AUTH_URL', None)
self.rally_base.network_dict['net_id'] = ''
with mock.patch('functest.opnfv_tests.openstack.rally.rally.'
'os_utils.get_external_net',
@@ -54,7 +54,7 @@ class OSRallyTesting(unittest.TestCase):
self.assertEqual(task_args['netid'], '')
def test_build_task_args_missing_auth_url(self):
- CONST.OS_AUTH_URL = None
+ CONST.__setattr__('OS_AUTH_URL', None)
with mock.patch('functest.opnfv_tests.openstack.rally.rally.'
'os_utils.get_external_net',
return_value='test_floating_network'):
@@ -134,8 +134,8 @@ class OSRallyTesting(unittest.TestCase):
'lineline')
def test_excl_scenario_default(self):
- CONST.INSTALLER_TYPE = 'test_installer'
- CONST.DEPLOY_SCENARIO = 'test_scenario'
+ CONST.__setattr__('INSTALLER_TYPE', 'test_installer')
+ CONST.__setattr__('DEPLOY_SCENARIO', 'test_scenario')
dic = {'scenario': [{'scenarios': ['test_scenario'],
'installers': ['test_installer'],
'tests': ['test']}]}
@@ -152,8 +152,8 @@ class OSRallyTesting(unittest.TestCase):
[])
def test_excl_func_default(self):
- CONST.INSTALLER_TYPE = 'test_installer'
- CONST.DEPLOY_SCENARIO = 'test_scenario'
+ CONST.__setattr__('INSTALLER_TYPE', 'test_installer')
+ CONST.__setattr__('DEPLOY_SCENARIO', 'test_scenario')
dic = {'functionality': [{'functions': ['no_live_migration'],
'tests': ['test']}]}
with mock.patch('__builtin__.open', mock.mock_open()), \
diff --git a/functest/tests/unit/openstack/tempest/test_conf_utils.py b/functest/tests/unit/openstack/tempest/test_conf_utils.py
index bdd1c7a6..23f6e45c 100644
--- a/functest/tests/unit/openstack/tempest/test_conf_utils.py
+++ b/functest/tests/unit/openstack/tempest/test_conf_utils.py
@@ -52,12 +52,12 @@ class OSTempestConfUtilsTesting(unittest.TestCase):
return_value=(mock.Mock(), None)), \
self.assertRaises(Exception) as context:
- CONST.tempest_use_custom_images = True
+ CONST.__setattr__('tempest_use_custom_images', True)
conf_utils.create_tempest_resources()
msg = 'Failed to create image'
self.assertTrue(msg in context)
- CONST.tempest_use_custom_images = False
+ CONST.__setattr__('tempest_use_custom_images', False)
conf_utils.create_tempest_resources(use_custom_images=True)
msg = 'Failed to create image'
self.assertTrue(msg in context)
@@ -82,20 +82,20 @@ class OSTempestConfUtilsTesting(unittest.TestCase):
'os_utils.get_or_create_flavor',
return_value=(mock.Mock(), None)), \
self.assertRaises(Exception) as context:
- CONST.tempest_use_custom_images = True
- CONST.tempest_use_custom_flavors = True
+ CONST.__setattr__('tempest_use_custom_images', True)
+ CONST.__setattr__('tempest_use_custom_flavors', True)
conf_utils.create_tempest_resources()
msg = 'Failed to create flavor'
self.assertTrue(msg in context)
- CONST.tempest_use_custom_images = True
- CONST.tempest_use_custom_flavors = False
+ CONST.__setattr__('tempest_use_custom_images', True)
+ CONST.__setattr__('tempest_use_custom_flavors', False)
conf_utils.create_tempest_resources(use_custom_flavors=False)
msg = 'Failed to create flavor'
self.assertTrue(msg in context)
def test_get_verifier_id_missing_verifier(self):
- CONST.tempest_deployment_name = 'test_deploy_name'
+ CONST.__setattr__('tempest_deployment_name', 'test_deploy_name')
with mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.subprocess.Popen') as mock_popen, \
self.assertRaises(Exception):
@@ -106,7 +106,7 @@ class OSTempestConfUtilsTesting(unittest.TestCase):
conf_utils.get_verifier_id(),
def test_get_verifier_id_default(self):
- CONST.tempest_deployment_name = 'test_deploy_name'
+ CONST.__setattr__('tempest_deployment_name', 'test_deploy_name')
with mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.subprocess.Popen') as mock_popen:
mock_stdout = mock.Mock()
@@ -118,7 +118,7 @@ class OSTempestConfUtilsTesting(unittest.TestCase):
'test_deploy_id')
def test_get_verifier_deployment_id_missing_rally(self):
- CONST.rally_deployment_name = 'test_rally_deploy_name'
+ CONST.__setattr__('tempest_deployment_name', 'test_deploy_name')
with mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.subprocess.Popen') as mock_popen, \
self.assertRaises(Exception):
@@ -129,7 +129,7 @@ class OSTempestConfUtilsTesting(unittest.TestCase):
conf_utils.get_verifier_deployment_id(),
def test_get_verifier_deployment_id_default(self):
- CONST.rally_deployment_name = 'test_rally_deploy_name'
+ CONST.__setattr__('tempest_deployment_name', 'test_deploy_name')
with mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.subprocess.Popen') as mock_popen:
mock_stdout = mock.Mock()
@@ -238,8 +238,8 @@ class OSTempestConfUtilsTesting(unittest.TestCase):
mock.patch('__builtin__.open', mock.mock_open()), \
mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.shutil.copyfile'):
- CONST.dir_functest_test = 'test_dir'
- CONST.refstack_tempest_conf_path = 'test_path'
+ CONST.__setattr__('dir_functest_test', 'test_dir')
+ CONST.__setattr__('refstack_tempest_conf_path', 'test_path')
conf_utils.configure_tempest_defcore('test_dep_dir',
img_flavor_dict)
mset.assert_any_call('compute', 'image_ref', 'test_image_id')
@@ -264,8 +264,8 @@ class OSTempestConfUtilsTesting(unittest.TestCase):
mock.patch('__builtin__.open', mock.mock_open()), \
mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.backup_tempest_config'):
- CONST.dir_functest_test = 'test_dir'
- CONST.OS_ENDPOINT_TYPE = None
+ CONST.__setattr__('dir_functest_test', 'test_dir')
+ CONST.__setattr__('OS_ENDPOINT_TYPE', None)
conf_utils.\
configure_tempest_update_params('test_conf_file',
IMAGE_ID=image_id,
@@ -275,25 +275,25 @@ class OSTempestConfUtilsTesting(unittest.TestCase):
self.assertTrue(mwrite.called)
def test_configure_tempest_update_params_missing_image_id(self):
- CONST.tempest_use_custom_images = True
+ CONST.__setattr__('tempest_use_custom_images', True)
self._test_missing_param(('compute', 'image_ref',
'test_image_id'), 'test_image_id',
None)
def test_configure_tempest_update_params_missing_image_id_alt(self):
- CONST.tempest_use_custom_images = True
+ CONST.__setattr__('tempest_use_custom_images', True)
conf_utils.IMAGE_ID_ALT = 'test_image_id_alt'
self._test_missing_param(('compute', 'image_ref_alt',
'test_image_id_alt'), None, None)
def test_configure_tempest_update_params_missing_flavor_id(self):
- CONST.tempest_use_custom_flavors = True
+ CONST.__setattr__('tempest_use_custom_flavors', True)
self._test_missing_param(('compute', 'flavor_ref',
'test_flavor_id'), None,
'test_flavor_id')
def test_configure_tempest_update_params_missing_flavor_id_alt(self):
- CONST.tempest_use_custom_flavors = True
+ CONST.__setattr__('tempest_use_custom_flavors', True)
conf_utils.FLAVOR_ID_ALT = 'test_flavor_id_alt'
self._test_missing_param(('compute', 'flavor_ref_alt',
'test_flavor_id_alt'), None,
diff --git a/functest/tests/unit/openstack/tempest/test_tempest.py b/functest/tests/unit/openstack/tempest/test_tempest.py
index 3df2b321..b8b258b3 100644
--- a/functest/tests/unit/openstack/tempest/test_tempest.py
+++ b/functest/tests/unit/openstack/tempest/test_tempest.py
@@ -112,8 +112,8 @@ class OSTempestTesting(unittest.TestCase):
mock.patch.object(self.tempestcommon, 'read_file',
return_value=['test1', 'test2']):
conf_utils.TEMPEST_BLACKLIST = Exception
- CONST.INSTALLER_TYPE = 'installer_type'
- CONST.DEPLOY_SCENARIO = 'deploy_scenario'
+ CONST.__setattr__('INSTALLER_TYPE', 'installer_type')
+ CONST.__setattr__('DEPLOY_SCENARIO', 'deploy_scenario')
self.tempestcommon.apply_tempest_blacklist()
obj = m()
obj.write.assert_any_call('test1\n')
@@ -128,8 +128,8 @@ class OSTempestTesting(unittest.TestCase):
return_value=['test1', 'test2']), \
mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
'yaml.safe_load', return_value=item_dict):
- CONST.INSTALLER_TYPE = 'installer_type'
- CONST.DEPLOY_SCENARIO = 'deploy_scenario'
+ CONST.__setattr__('INSTALLER_TYPE', 'installer_type')
+ CONST.__setattr__('DEPLOY_SCENARIO', 'deploy_scenario')
self.tempestcommon.apply_tempest_blacklist()
obj = m()
obj.write.assert_any_call('test1\n')