diff options
author | Cédric Ollivier <cedric.ollivier@orange.com> | 2018-02-28 09:35:49 +0100 |
---|---|---|
committer | Cédric Ollivier <cedric.ollivier@orange.com> | 2018-02-28 09:36:32 +0100 |
commit | 2aab5c48df64b044ab9bae6e883e6e0acaabbf52 (patch) | |
tree | c82294952795b3953130bf624929d6ecae3e4fcf /xtesting/ci | |
parent | baa8f2d5f67d45e5761f92cb93fe22050f08d0fe (diff) |
Rename all Functest refs to Xtesting
It mainly renames python modules and then the related documentation
config files.
Change-Id: I186010bb88d3d39afe7b8fd1ebcef9c690cc1282
Signed-off-by: Cédric Ollivier <cedric.ollivier@orange.com>
Diffstat (limited to 'xtesting/ci')
-rw-r--r-- | xtesting/ci/__init__.py | 0 | ||||
-rw-r--r-- | xtesting/ci/logging.ini | 65 | ||||
-rw-r--r-- | xtesting/ci/run_tests.py | 302 | ||||
-rw-r--r-- | xtesting/ci/testcases.yaml | 424 | ||||
-rw-r--r-- | xtesting/ci/tier_builder.py | 106 | ||||
-rw-r--r-- | xtesting/ci/tier_handler.py | 174 |
6 files changed, 1071 insertions, 0 deletions
diff --git a/xtesting/ci/__init__.py b/xtesting/ci/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/xtesting/ci/__init__.py diff --git a/xtesting/ci/logging.ini b/xtesting/ci/logging.ini new file mode 100644 index 00000000..ab82073f --- /dev/null +++ b/xtesting/ci/logging.ini @@ -0,0 +1,65 @@ +[loggers] +keys=root,xtesting,api,ci,cli,core,energy,opnfv_tests,utils + +[handlers] +keys=console,wconsole,file,null + +[formatters] +keys=standard + +[logger_root] +level=NOTSET +handlers=null + +[logger_xtesting] +level=NOTSET +handlers=file +qualname=xtesting + +[logger_ci] +level=NOTSET +handlers=console +qualname=xtesting.ci + +[logger_core] +level=NOTSET +handlers=console +qualname=xtesting.core + +[logger_energy] +level=NOTSET +handlers=wconsole +qualname=xtesting.energy + +[logger_utils] +level=NOTSET +handlers=wconsole +qualname=xtesting.utils + +[handler_null] +class=NullHandler +level=NOTSET +formatter=standard +args=() + +[handler_console] +class=StreamHandler +level=INFO +formatter=standard +args=(sys.stdout,) + +[handler_wconsole] +class=StreamHandler +level=WARN +formatter=standard +args=(sys.stdout,) + +[handler_file] +class=FileHandler +level=DEBUG +formatter=standard +args=("/home/opnfv/xtesting/results/xtesting.log",) + +[formatter_standard] +format=%(asctime)s - %(name)s - %(levelname)s - %(message)s +datefmt= diff --git a/xtesting/ci/run_tests.py b/xtesting/ci/run_tests.py new file mode 100644 index 00000000..5c9143a3 --- /dev/null +++ b/xtesting/ci/run_tests.py @@ -0,0 +1,302 @@ +#!/usr/bin/env python + +# Copyright (c) 2016 Ericsson AB and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 + +""" The entry of running tests: +1) Parses xtesting/ci/testcases.yaml to check which testcase(s) to be run +2) Execute the common operations on every testcase (run, push results to db...) +3) Return the right status code +""" + +import argparse +import importlib +import logging +import logging.config +import os +import re +import sys +import textwrap +import pkg_resources + +import enum +import prettytable +import yaml + +from xtesting.ci import tier_builder +from xtesting.core import testcase +from xtesting.utils import constants +from xtesting.utils import env + +LOGGER = logging.getLogger('xtesting.ci.run_tests') + + +class Result(enum.Enum): + """The overall result in enumerated type""" + # pylint: disable=too-few-public-methods + EX_OK = os.EX_OK + EX_ERROR = -1 + + +class BlockingTestFailed(Exception): + """Exception when the blocking test fails""" + pass + + +class TestNotEnabled(Exception): + """Exception when the test is not enabled""" + pass + + +class RunTestsParser(object): + """Parser to run tests""" + # pylint: disable=too-few-public-methods + + def __init__(self): + self.parser = argparse.ArgumentParser() + self.parser.add_argument("-t", "--test", dest="test", action='store', + help="Test case or tier (group of tests) " + "to be executed. It will run all the test " + "if not specified.") + self.parser.add_argument("-n", "--noclean", help="Do not clean " + "OpenStack resources after running each " + "test (default=false).", + action="store_true") + self.parser.add_argument("-r", "--report", help="Push results to " + "database (default=false).", + action="store_true") + + def parse_args(self, argv=None): + """Parse arguments. + + It can call sys.exit if arguments are incorrect. + + Returns: + the arguments from cmdline + """ + return vars(self.parser.parse_args(argv)) + + +class Runner(object): + """Runner class""" + + def __init__(self): + self.executed_test_cases = {} + self.overall_result = Result.EX_OK + self.clean_flag = True + self.report_flag = False + self.tiers = tier_builder.TierBuilder( + env.get('INSTALLER_TYPE'), + env.get('DEPLOY_SCENARIO'), + pkg_resources.resource_filename('xtesting', 'ci/testcases.yaml')) + + @staticmethod + def source_envfile(rc_file=constants.ENV_FILE): + """Source the env file passed as arg""" + if not os.path.isfile(rc_file): + LOGGER.debug("No env file %s found", rc_file) + return + with open(rc_file, "r") as rcfd: + for line in rcfd: + var = (line.rstrip('"\n').replace('export ', '').split( + "=") if re.search(r'(.*)=(.*)', line) else None) + # The two next lines should be modified as soon as rc_file + # conforms with common rules. Be aware that it could induce + # issues if value starts with ' + if var: + key = re.sub(r'^["\' ]*|[ \'"]*$', '', var[0]) + value = re.sub(r'^["\' ]*|[ \'"]*$', '', "".join(var[1:])) + os.environ[key] = value + rcfd.seek(0, 0) + LOGGER.info("Sourcing env file %s\n\n%s", rc_file, rcfd.read()) + + @staticmethod + def get_dict_by_test(testname): + # pylint: disable=bad-continuation,missing-docstring + with open(pkg_resources.resource_filename( + 'xtesting', 'ci/testcases.yaml')) as tyaml: + testcases_yaml = yaml.safe_load(tyaml) + for dic_tier in testcases_yaml.get("tiers"): + for dic_testcase in dic_tier['testcases']: + if dic_testcase['case_name'] == testname: + return dic_testcase + LOGGER.error('Project %s is not defined in testcases.yaml', testname) + return None + + @staticmethod + def get_run_dict(testname): + """Obtain the 'run' block of the testcase from testcases.yaml""" + try: + dic_testcase = Runner.get_dict_by_test(testname) + if not dic_testcase: + LOGGER.error("Cannot get %s's config options", testname) + elif 'run' in dic_testcase: + return dic_testcase['run'] + return None + except Exception: # pylint: disable=broad-except + LOGGER.exception("Cannot get %s's config options", testname) + return None + + def run_test(self, test): + """Run one test case""" + if not test.is_enabled(): + raise TestNotEnabled( + "The test case {} is not enabled".format(test.get_name())) + LOGGER.info("Running test case '%s'...", test.get_name()) + result = testcase.TestCase.EX_RUN_ERROR + run_dict = self.get_run_dict(test.get_name()) + if run_dict: + try: + module = importlib.import_module(run_dict['module']) + cls = getattr(module, run_dict['class']) + test_dict = Runner.get_dict_by_test(test.get_name()) + test_case = cls(**test_dict) + self.executed_test_cases[test.get_name()] = test_case + try: + kwargs = run_dict['args'] + test_case.run(**kwargs) + except KeyError: + test_case.run() + if self.report_flag: + test_case.push_to_db() + if test.get_project() == "xtesting": + result = test_case.is_successful() + else: + result = testcase.TestCase.EX_OK + LOGGER.info("Test result:\n\n%s\n", test_case) + if self.clean_flag: + test_case.clean() + except ImportError: + LOGGER.exception("Cannot import module %s", run_dict['module']) + except AttributeError: + LOGGER.exception("Cannot get class %s", run_dict['class']) + else: + raise Exception("Cannot import the class for the test case.") + return result + + def run_tier(self, tier): + """Run one tier""" + tier_name = tier.get_name() + tests = tier.get_tests() + if not tests: + LOGGER.info("There are no supported test cases in this tier " + "for the given scenario") + self.overall_result = Result.EX_ERROR + else: + LOGGER.info("Running tier '%s'", tier_name) + for test in tests: + self.run_test(test) + test_case = self.executed_test_cases[test.get_name()] + if test_case.is_successful() != testcase.TestCase.EX_OK: + LOGGER.error("The test case '%s' failed.", test.get_name()) + if test.get_project() == "xtesting": + self.overall_result = Result.EX_ERROR + if test.is_blocking(): + raise BlockingTestFailed( + "The test case {} failed and is blocking".format( + test.get_name())) + return self.overall_result + + def run_all(self): + """Run all available testcases""" + tiers_to_run = [] + msg = prettytable.PrettyTable( + header_style='upper', padding_width=5, + field_names=['tiers', 'order', 'CI Loop', 'description', + 'testcases']) + for tier in self.tiers.get_tiers(): + ci_loop = env.get('CI_LOOP') + if (tier.get_tests() and + re.search(ci_loop, tier.get_ci_loop()) is not None): + tiers_to_run.append(tier) + msg.add_row([tier.get_name(), tier.get_order(), + tier.get_ci_loop(), + textwrap.fill(tier.description, width=40), + textwrap.fill(' '.join([str(x.get_name( + )) for x in tier.get_tests()]), width=40)]) + LOGGER.info("TESTS TO BE EXECUTED:\n\n%s\n", msg) + for tier in tiers_to_run: + self.run_tier(tier) + + def main(self, **kwargs): + """Entry point of class Runner""" + if 'noclean' in kwargs: + self.clean_flag = not kwargs['noclean'] + if 'report' in kwargs: + self.report_flag = kwargs['report'] + try: + LOGGER.info("Deployment description:\n\n%s\n", env.string()) + self.source_envfile() + if 'test' in kwargs: + LOGGER.debug("Test args: %s", kwargs['test']) + if self.tiers.get_tier(kwargs['test']): + self.run_tier(self.tiers.get_tier(kwargs['test'])) + elif self.tiers.get_test(kwargs['test']): + result = self.run_test( + self.tiers.get_test(kwargs['test'])) + if result != testcase.TestCase.EX_OK: + LOGGER.error("The test case '%s' failed.", + kwargs['test']) + self.overall_result = Result.EX_ERROR + elif kwargs['test'] == "all": + self.run_all() + else: + LOGGER.error("Unknown test case or tier '%s', or not " + "supported by the given scenario '%s'.", + kwargs['test'], + env.get('DEPLOY_SCENARIO')) + LOGGER.debug("Available tiers are:\n\n%s", + self.tiers) + return Result.EX_ERROR + else: + self.run_all() + except BlockingTestFailed: + pass + except Exception: # pylint: disable=broad-except + LOGGER.exception("Failures when running testcase(s)") + self.overall_result = Result.EX_ERROR + if not self.tiers.get_test(kwargs['test']): + self.summary(self.tiers.get_tier(kwargs['test'])) + LOGGER.info("Execution exit value: %s", self.overall_result) + return self.overall_result + + def summary(self, tier=None): + """To generate xtesting report showing the overall results""" + msg = prettytable.PrettyTable( + header_style='upper', padding_width=5, + field_names=['test case', 'project', 'tier', + 'duration', 'result']) + tiers = [tier] if tier else self.tiers.get_tiers() + for each_tier in tiers: + for test in each_tier.get_tests(): + try: + test_case = self.executed_test_cases[test.get_name()] + except KeyError: + msg.add_row([test.get_name(), test.get_project(), + each_tier.get_name(), "00:00", "SKIP"]) + else: + result = 'PASS' if(test_case.is_successful( + ) == test_case.EX_OK) else 'FAIL' + msg.add_row( + [test_case.case_name, test_case.project_name, + self.tiers.get_tier_name(test_case.case_name), + test_case.get_duration(), result]) + for test in each_tier.get_skipped_test(): + msg.add_row([test.get_name(), test.get_project(), + each_tier.get_name(), "00:00", "SKIP"]) + LOGGER.info("Xtesting report:\n\n%s\n", msg) + + +def main(): + """Entry point""" + logging.config.fileConfig(pkg_resources.resource_filename( + 'xtesting', 'ci/logging.ini')) + logging.captureWarnings(True) + parser = RunTestsParser() + args = parser.parse_args(sys.argv[1:]) + runner = Runner() + return runner.main(**args).value diff --git a/xtesting/ci/testcases.yaml b/xtesting/ci/testcases.yaml new file mode 100644 index 00000000..c338f57e --- /dev/null +++ b/xtesting/ci/testcases.yaml @@ -0,0 +1,424 @@ +--- +tiers: + - + name: healthcheck + order: 0 + ci_loop: '(daily)|(weekly)' + description: >- + First tier to be executed to verify the basic + operations in the VIM. + testcases: + - + case_name: connection_check + project_name: xtesting + criteria: 100 + blocking: true + description: >- + This test case verifies the retrieval of OpenStack clients: + Keystone, Glance, Neutron and Nova and may perform some + simple queries. When the config value of + snaps.use_keystone is True, xtesting must have access to + the cloud's private network. + dependencies: + installer: '^((?!netvirt).)*$' + scenario: '' + run: + module: + 'xtesting.opnfv_tests.openstack.snaps.connection_check' + class: 'ConnectionCheck' + + - + case_name: api_check + project_name: xtesting + criteria: 100 + blocking: true + description: >- + This test case verifies the retrieval of OpenStack clients: + Keystone, Glance, Neutron and Nova and may perform some + simple queries. When the config value of + snaps.use_keystone is True, xtesting must have access to + the cloud's private network. + dependencies: + installer: '^((?!netvirt).)*$' + scenario: '^((?!lxd).)*$' + run: + module: 'xtesting.opnfv_tests.openstack.snaps.api_check' + class: 'ApiCheck' + + - + case_name: snaps_health_check + project_name: xtesting + criteria: 100 + blocking: true + description: >- + This test case creates executes the SimpleHealthCheck + Python test class which creates an, image, flavor, network, + and Cirros VM instance and observes the console output to + validate the single port obtains the correct IP address. + dependencies: + installer: '' + scenario: '^((?!lxd).)*$' + run: + module: 'xtesting.opnfv_tests.openstack.snaps.health_check' + class: 'HealthCheck' + + - + name: smoke + order: 1 + ci_loop: '(daily)|(weekly)' + description: >- + Set of basic Functional tests to validate the OPNFV scenarios. + testcases: + - + case_name: vping_ssh + project_name: xtesting + criteria: 100 + blocking: true + description: >- + This test case verifies: 1) SSH to an instance using + floating IPs over the public network. 2) Connectivity + between 2 instances over a private network. + dependencies: + installer: '' + scenario: '^((?!odl_l3|odl-bgpvpn|gluon).)*$' + run: + module: 'xtesting.opnfv_tests.openstack.vping.vping_ssh' + class: 'VPingSSH' + + - + case_name: vping_userdata + project_name: xtesting + criteria: 100 + blocking: true + description: >- + This test case verifies: 1) Boot a VM with given userdata. + 2) Connectivity between 2 instances over a private network. + dependencies: + installer: '' + scenario: '^((?!lxd).)*$' + run: + module: + 'xtesting.opnfv_tests.openstack.vping.vping_userdata' + class: 'VPingUserdata' + + - + case_name: tempest_smoke_serial + project_name: xtesting + criteria: 100 + blocking: false + description: >- + This test case runs the smoke subset of the OpenStack + Tempest suite. The list of test cases is generated by + Tempest automatically and depends on the parameters of + the OpenStack deplopyment. + dependencies: + installer: '^((?!netvirt).)*$' + scenario: '' + run: + module: 'xtesting.opnfv_tests.openstack.tempest.tempest' + class: 'TempestSmokeSerial' + + - + case_name: rally_sanity + project_name: xtesting + criteria: 100 + blocking: false + description: >- + This test case runs a sub group of tests of the OpenStack + Rally suite in smoke mode. + dependencies: + installer: '' + scenario: '' + run: + module: 'xtesting.opnfv_tests.openstack.rally.rally' + class: 'RallySanity' + + - + case_name: refstack_defcore + project_name: xtesting + criteria: 100 + blocking: false + description: >- + This test case runs a sub group of tests of the OpenStack + Defcore testcases by using refstack client. + dependencies: + installer: '' + scenario: '' + run: + module: + 'xtesting.opnfv_tests.openstack.refstack_client.refstack_client' + class: 'RefstackClient' + + - + case_name: odl + project_name: xtesting + criteria: 100 + blocking: false + description: >- + Test Suite for the OpenDaylight SDN Controller. It + integrates some test suites from upstream using + Robot as the test framework. + dependencies: + installer: '' + scenario: 'odl' + run: + module: 'xtesting.opnfv_tests.sdn.odl.odl' + class: 'ODLTests' + args: + suites: + - /src/odl_test/csit/suites/integration/basic + - /src/odl_test/csit/suites/openstack/neutron + + - + case_name: odl_netvirt + project_name: xtesting + criteria: 100 + blocking: false + description: >- + Test Suite for the OpenDaylight SDN Controller when + the NetVirt features are installed. It integrates + some test suites from upstream using Robot as the + test framework. + dependencies: + installer: 'apex' + scenario: 'os-odl_l3-nofeature' + run: + module: 'xtesting.opnfv_tests.sdn.odl.odl' + class: 'ODLTests' + args: + suites: + - /src/odl_test/csit/suites/integration/basic + - /src/odl_test/csit/suites/openstack/neutron + - /src/odl_test/csit/suites/openstack/connectivity + + - + case_name: snaps_smoke + project_name: xtesting + criteria: 100 + blocking: false + description: >- + This test case contains tests that setup and destroy + environments with VMs with and without Floating IPs + with a newly created user and project. Set the config + value snaps.use_floating_ips (True|False) to toggle + this functionality. When the config value of + snaps.use_keystone is True, xtesting must have access to + the cloud's private network. + + dependencies: + installer: '^((?!netvirt).)*$' + scenario: '^((?!lxd).)*$' + run: + module: 'xtesting.opnfv_tests.openstack.snaps.smoke' + class: 'SnapsSmoke' + + - + name: features + order: 2 + ci_loop: '(daily)|(weekly)' + description: >- + Test suites from feature projects + integrated in xtesting + testcases: + - + case_name: doctor-notification + project_name: doctor + criteria: 100 + blocking: false + description: >- + Test suite from Doctor project. + dependencies: + installer: 'apex' + scenario: '^((?!fdio).)*$' + run: + module: 'xtesting.core.feature' + class: 'BashFeature' + args: + cmd: 'doctor-test' + + - + case_name: bgpvpn + project_name: sdnvpn + criteria: 100 + blocking: false + description: >- + Test suite from SDNVPN project. + dependencies: + installer: '(fuel)|(apex)|(netvirt)' + scenario: 'bgpvpn' + run: + module: 'sdnvpn.test.xtesting.run_sdnvpn_tests' + class: 'SdnvpnFunctest' + + - + case_name: xtesting-odl-sfc + project_name: sfc + criteria: 100 + blocking: false + description: >- + Test suite for odl-sfc to test two chains with one SF and + one chain with two SFs + dependencies: + installer: '' + scenario: 'odl.*sfc' + run: + module: 'xtesting.core.feature' + class: 'BashFeature' + args: + cmd: 'run_sfc_tests.py' + + - + case_name: barometercollectd + project_name: barometer + criteria: 100 + blocking: false + description: >- + Test suite for the Barometer project. Separate tests verify + the proper configuration and basic functionality of all the + collectd plugins as described in the Project Release Plan + dependencies: + installer: 'apex' + scenario: 'bar' + run: + module: 'baro_tests.barometer' + class: 'BarometerCollectd' + + - + case_name: fds + project_name: fastdatastacks + criteria: 100 + blocking: false + description: >- + Test Suite for the OpenDaylight SDN Controller when GBP + features are installed. It integrates some test suites from + upstream using Robot as the test framework. + dependencies: + installer: 'apex' + scenario: 'odl.*-fdio' + run: + module: 'xtesting.opnfv_tests.sdn.odl.odl' + class: 'ODLTests' + args: + suites: + - /src/fds/testing/robot + + - + name: components + order: 3 + ci_loop: 'weekly' + description: >- + Extensive testing of OpenStack API. + testcases: + - + case_name: tempest_full_parallel + project_name: xtesting + criteria: 80 + blocking: false + description: >- + The list of test cases is generated by + Tempest automatically and depends on the parameters of + the OpenStack deplopyment. + dependencies: + installer: '^((?!netvirt).)*$' + scenario: '' + run: + module: 'xtesting.opnfv_tests.openstack.tempest.tempest' + class: 'TempestFullParallel' + + - + case_name: rally_full + project_name: xtesting + criteria: 100 + blocking: false + description: >- + This test case runs the full suite of scenarios of the + OpenStack Rally suite using several threads and iterations. + dependencies: + installer: '^((?!netvirt).)*$' + scenario: '' + run: + module: 'xtesting.opnfv_tests.openstack.rally.rally' + class: 'RallyFull' + + - + name: vnf + order: 4 + ci_loop: '(daily)|(weekly)' + description: >- + Collection of VNF test cases. + testcases: + - + case_name: cloudify_ims + project_name: xtesting + criteria: 80 + blocking: false + description: >- + This test case deploys an OpenSource vIMS solution from + Clearwater using the Cloudify orchestrator. It also runs + some signaling traffic. + dependencies: + installer: '' + scenario: 'os-nosdn-nofeature-.*ha' + run: + module: 'xtesting.opnfv_tests.vnf.ims.cloudify_ims' + class: 'CloudifyIms' + + - + case_name: vyos_vrouter + project_name: xtesting + criteria: 100 + blocking: false + description: >- + This test case is vRouter testing. + dependencies: + installer: '' + scenario: 'os-nosdn-nofeature-.*ha' + run: + module: 'xtesting.opnfv_tests.vnf.router.cloudify_vrouter' + class: 'CloudifyVrouter' + + - + case_name: orchestra_openims + project_name: orchestra + enabled: false + criteria: 100 + blocking: false + description: >- + OpenIMS VNF deployment with Open Baton (Orchestra) + dependencies: + installer: '' + scenario: 'os-nosdn-nofeature-.*ha' + run: + module: 'xtesting.opnfv_tests.vnf.ims.orchestra_openims' + class: 'OpenImsVnf' + + - + case_name: orchestra_clearwaterims + project_name: orchestra + enabled: false + criteria: 100 + blocking: false + description: >- + ClearwaterIMS VNF deployment with Open Baton (Orchestra) + dependencies: + installer: '' + scenario: 'os-nosdn-nofeature-.*ha' + run: + module: + 'xtesting.opnfv_tests.vnf.ims.orchestra_clearwaterims' + class: 'ClearwaterImsVnf' + + - + case_name: juju_epc + project_name: xtesting + criteria: 100 + blocking: false + description: >- + vEPC validation with Juju as VNF manager and ABoT as test + executor. + dependencies: + installer: '' + scenario: 'os-nosdn-nofeature-.*ha' + run: + module: 'xtesting.opnfv_tests.vnf.epc.juju_epc' + class: 'JujuEpc' diff --git a/xtesting/ci/tier_builder.py b/xtesting/ci/tier_builder.py new file mode 100644 index 00000000..2c7b0cab --- /dev/null +++ b/xtesting/ci/tier_builder.py @@ -0,0 +1,106 @@ +#!/usr/bin/env python + +# Copyright (c) 2016 Ericsson AB and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 + +"""TierBuilder class to parse testcases config file""" + +import yaml + +import xtesting.ci.tier_handler as th + + +class TierBuilder(object): + # pylint: disable=missing-docstring + + def __init__(self, ci_installer, ci_scenario, testcases_file): + self.ci_installer = ci_installer + self.ci_scenario = ci_scenario + self.testcases_file = testcases_file + self.dic_tier_array = None + self.tier_objects = [] + self.testcases_yaml = None + self.generate_tiers() + + def read_test_yaml(self): + with open(self.testcases_file) as tc_file: + self.testcases_yaml = yaml.safe_load(tc_file) + + self.dic_tier_array = [] + for tier in self.testcases_yaml.get("tiers"): + self.dic_tier_array.append(tier) + + def generate_tiers(self): + if self.dic_tier_array is None: + self.read_test_yaml() + + del self.tier_objects[:] + for dic_tier in self.dic_tier_array: + tier = th.Tier( + name=dic_tier['name'], order=dic_tier['order'], + ci_loop=dic_tier['ci_loop'], + description=dic_tier['description']) + + for dic_testcase in dic_tier['testcases']: + installer = dic_testcase['dependencies']['installer'] + scenario = dic_testcase['dependencies']['scenario'] + dep = th.Dependency(installer, scenario) + + testcase = th.TestCase( + name=dic_testcase['case_name'], + enabled=dic_testcase.get('enabled', True), + dependency=dep, criteria=dic_testcase['criteria'], + blocking=dic_testcase['blocking'], + description=dic_testcase['description'], + project=dic_testcase['project_name']) + if (testcase.is_compatible(self.ci_installer, + self.ci_scenario) and + testcase.is_enabled()): + tier.add_test(testcase) + else: + tier.skip_test(testcase) + + self.tier_objects.append(tier) + + def get_tiers(self): + return self.tier_objects + + def get_tier_names(self): + tier_names = [] + for tier in self.tier_objects: + tier_names.append(tier.get_name()) + return tier_names + + def get_tier(self, tier_name): + for i in range(0, len(self.tier_objects)): + if self.tier_objects[i].get_name() == tier_name: + return self.tier_objects[i] + return None + + def get_tier_name(self, test_name): + for i in range(0, len(self.tier_objects)): + if self.tier_objects[i].is_test(test_name): + return self.tier_objects[i].name + return None + + def get_test(self, test_name): + for i in range(0, len(self.tier_objects)): + if self.tier_objects[i].is_test(test_name): + return self.tier_objects[i].get_test(test_name) + return None + + def get_tests(self, tier_name): + for i in range(0, len(self.tier_objects)): + if self.tier_objects[i].get_name() == tier_name: + return self.tier_objects[i].get_tests() + return None + + def __str__(self): + output = "" + for i in range(0, len(self.tier_objects)): + output += str(self.tier_objects[i]) + "\n" + return output diff --git a/xtesting/ci/tier_handler.py b/xtesting/ci/tier_handler.py new file mode 100644 index 00000000..9fc3f24d --- /dev/null +++ b/xtesting/ci/tier_handler.py @@ -0,0 +1,174 @@ +#!/usr/bin/env python + +# Copyright (c) 2016 Ericsson AB and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 + +"""Tier and TestCase classes to wrap the testcases config file""" +# pylint: disable=missing-docstring + +import re +import textwrap + +import prettytable + + +LINE_LENGTH = 72 + + +def split_text(text, max_len): + words = text.split() + lines = [] + line = "" + for word in words: + if len(line) + len(word) < max_len - 1: + line += word + " " + else: + lines.append(line) + line = word + " " + if line != "": + lines.append(line) + return lines + + +class Tier(object): + + def __init__(self, name, order, ci_loop, description=""): + self.tests_array = [] + self.skipped_tests_array = [] + self.name = name + self.order = order + self.ci_loop = ci_loop + self.description = description + + def add_test(self, testcase): + self.tests_array.append(testcase) + + def skip_test(self, testcase): + self.skipped_tests_array.append(testcase) + + def get_tests(self): + array_tests = [] + for test in self.tests_array: + array_tests.append(test) + return array_tests + + def get_skipped_test(self): + return self.skipped_tests_array + + def get_test_names(self): + array_tests = [] + for test in self.tests_array: + array_tests.append(test.get_name()) + return array_tests + + def get_test(self, test_name): + if self.is_test(test_name): + for test in self.tests_array: + if test.get_name() == test_name: + return test + return None + + def is_test(self, test_name): + for test in self.tests_array: + if test.get_name() == test_name: + return True + return False + + def get_name(self): + return self.name + + def get_order(self): + return self.order + + def get_ci_loop(self): + return self.ci_loop + + def __str__(self): + msg = prettytable.PrettyTable( + header_style='upper', padding_width=5, + field_names=['tiers', 'order', 'CI Loop', 'description', + 'testcases']) + msg.add_row( + [self.name, self.order, self.ci_loop, + textwrap.fill(self.description, width=40), + textwrap.fill(' '.join([str(x.get_name( + )) for x in self.get_tests()]), width=40)]) + return msg.get_string() + + +class TestCase(object): + + def __init__(self, name, enabled, dependency, criteria, blocking, + description="", project=""): + # pylint: disable=too-many-arguments + self.name = name + self.enabled = enabled + self.dependency = dependency + self.criteria = criteria + self.blocking = blocking + self.description = description + self.project = project + + @staticmethod + def is_none(item): + return item is None or item == "" + + def is_compatible(self, ci_installer, ci_scenario): + try: + if not self.is_none(ci_installer): + if re.search(self.dependency.get_installer(), + ci_installer) is None: + return False + if not self.is_none(ci_scenario): + if re.search(self.dependency.get_scenario(), + ci_scenario) is None: + return False + return True + except TypeError: + return False + + def get_name(self): + return self.name + + def is_enabled(self): + return self.enabled + + def get_criteria(self): + return self.criteria + + def is_blocking(self): + return self.blocking + + def get_project(self): + return self.project + + def __str__(self): + msg = prettytable.PrettyTable( + header_style='upper', padding_width=5, + field_names=['test case', 'description', 'criteria', 'dependency']) + msg.add_row([self.name, textwrap.fill(self.description, width=40), + self.criteria, self.dependency]) + return msg.get_string() + + +class Dependency(object): + + def __init__(self, installer, scenario): + self.installer = installer + self.scenario = scenario + + def get_installer(self): + return self.installer + + def get_scenario(self): + return self.scenario + + def __str__(self): + delimitator = "\n" if self.get_installer( + ) and self.get_scenario() else "" + return "{}{}{}".format(self.get_installer(), delimitator, + self.get_scenario()) |