aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--docker/Dockerfile5
-rw-r--r--docker/Dockerfile.aarch643
-rwxr-xr-xdocker/add_images.sh11
-rw-r--r--docs/testing/developer/internship/unit_tests/index.rst76
-rw-r--r--functest/ci/config_aarch64_patch.yaml8
-rw-r--r--functest/ci/config_functest.yaml7
-rw-r--r--functest/ci/download_images.sh31
-rwxr-xr-xfunctest/ci/prepare_env.py11
-rwxr-xr-xfunctest/ci/run_tests.py405
-rw-r--r--functest/ci/testcases.yaml1
-rw-r--r--functest/cli/commands/cli_env.py20
-rw-r--r--functest/cli/commands/cli_os.py15
-rw-r--r--functest/cli/commands/cli_testcase.py13
-rw-r--r--functest/cli/commands/cli_tier.py13
-rw-r--r--functest/opnfv_tests/openstack/rally/rally.py53
-rwxr-xr-xfunctest/opnfv_tests/openstack/refstack_client/refstack_client.py42
-rwxr-xr-xfunctest/opnfv_tests/openstack/refstack_client/tempest_conf.py8
-rw-r--r--functest/opnfv_tests/openstack/tempest/conf_utils.py138
-rw-r--r--functest/opnfv_tests/openstack/tempest/tempest.py31
-rwxr-xr-xfunctest/opnfv_tests/sdn/odl/odl.py18
-rw-r--r--functest/opnfv_tests/sdn/onos/onos.py2
-rw-r--r--functest/opnfv_tests/vnf/ims/opera_ims.py8
-rw-r--r--functest/tests/unit/ci/test_prepare_env.py8
-rw-r--r--functest/tests/unit/ci/test_run_tests.py171
-rw-r--r--functest/tests/unit/cli/commands/test_cli_env.py21
-rw-r--r--functest/tests/unit/cli/commands/test_cli_os.py33
-rw-r--r--functest/tests/unit/cli/commands/test_cli_testcase.py16
-rw-r--r--functest/tests/unit/cli/commands/test_cli_tier.py20
-rw-r--r--functest/tests/unit/odl/test_odl.py40
-rw-r--r--functest/tests/unit/openstack/rally/test_rally.py27
-rw-r--r--functest/tests/unit/openstack/refstack_client/test_refstack_client.py24
-rw-r--r--functest/tests/unit/openstack/tempest/test_conf_utils.py36
-rw-r--r--functest/tests/unit/openstack/tempest/test_tempest.py26
-rw-r--r--functest/tests/unit/utils/test_functest_utils.py16
-rw-r--r--functest/utils/functest_utils.py10
-rw-r--r--requirements.txt2
-rwxr-xr-xrun_unit_tests.sh5
-rw-r--r--test-requirements.txt21
38 files changed, 736 insertions, 659 deletions
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 6137cc94..202eb5cb 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -25,6 +25,7 @@ ARG REPOS_DIR=/home/opnfv/repos
ARG FUNCTEST_BASE_DIR=/home/opnfv/functest
ARG FUNCTEST_CONF_DIR=${FUNCTEST_BASE_DIR}/conf
ARG FUNCTEST_DATA_DIR=${FUNCTEST_BASE_DIR}/data
+ARG FUNCTEST_IMAGES_DIR=${FUNCTEST_BASE_DIR}/images
ARG FUNCTEST_RESULTS_DIR=${FUNCTEST_BASE_DIR}/results
ARG FUNCTEST_REPO_DIR=${REPOS_DIR}/functest
ARG FUNCTEST_TEST_DIR=${FUNCTEST_REPO_DIR}/functest/opnfv_tests
@@ -71,7 +72,9 @@ RUN pip install --upgrade pip
RUN mkdir -p ${REPOS_DIR} \
&& mkdir -p ${REPOS_VNFS_DIR} \
&& mkdir -p ${FUNCTEST_BASE_DIR}/results \
- && mkdir -p ${FUNCTEST_BASE_DIR}/conf \
+ && mkdir -p ${FUNCTEST_CONF_DIR} \
+ && mkdir -p ${FUNCTEST_DATA_DIR} \
+ && mkdir -p ${FUNCTEST_IMAGES_DIR} \
&& mkdir -p /root/.ssh \
&& chmod 700 /root/.ssh
diff --git a/docker/Dockerfile.aarch64 b/docker/Dockerfile.aarch64
index 83df2d0c..b03c6d97 100644
--- a/docker/Dockerfile.aarch64
+++ b/docker/Dockerfile.aarch64
@@ -24,6 +24,7 @@ ARG REPOS_DIR=/home/opnfv/repos
ARG FUNCTEST_BASE_DIR=/home/opnfv/functest
ARG FUNCTEST_CONF_DIR=${FUNCTEST_BASE_DIR}/conf
ARG FUNCTEST_DATA_DIR=${FUNCTEST_BASE_DIR}/data
+ARG FUNCTEST_IMAGES_DIR=${FUNCTEST_BASE_DIR}/images
ARG FUNCTEST_RESULTS_DIR=${FUNCTEST_BASE_DIR}/results
ARG FUNCTEST_REPO_DIR=${REPOS_DIR}/functest
ARG FUNCTEST_TEST_DIR=${FUNCTEST_REPO_DIR}/functest/opnfv_tests
@@ -71,6 +72,8 @@ RUN mkdir -p ${REPOS_DIR} \
&& mkdir -p ${REPOS_VNFS_DIR} \
&& mkdir -p ${FUNCTEST_BASE_DIR}/results \
&& mkdir -p ${FUNCTEST_BASE_DIR}/conf \
+ && mkdir -p ${FUNCTEST_DATA_DIR} \
+ && mkdir -p ${FUNCTEST_IMAGES_DIR} \
&& mkdir -p /root/.ssh \
&& chmod 700 /root/.ssh
diff --git a/docker/add_images.sh b/docker/add_images.sh
index af2956c2..919cecd8 100755
--- a/docker/add_images.sh
+++ b/docker/add_images.sh
@@ -7,11 +7,10 @@ CIRROS_REPO_URL=http://download.cirros-cloud.net
CIRROS_AARCH64_TAG=161201
CIRROS_X86_64_TAG=0.3.5
-wget ${CIRROS_REPO_URL}/${CIRROS_X86_64_TAG}/cirros-${CIRROS_X86_64_TAG}-x86_64-disk.img -P ${FUNCTEST_BASE_DIR}/data/
-wget ${CIRROS_REPO_URL}/${CIRROS_X86_64_TAG}/cirros-${CIRROS_X86_64_TAG}-x86_64-lxc.tar.gz -P ${FUNCTEST_BASE_DIR}/data/
-wget http://205.177.226.237:9999/onosfw/firewall_block_image.img -P ${FUNCTEST_BASE_DIR}/data/
+wget ${CIRROS_REPO_URL}/${CIRROS_X86_64_TAG}/cirros-${CIRROS_X86_64_TAG}-x86_64-disk.img -P ${FUNCTEST_IMAGES_DIR}
+wget ${CIRROS_REPO_URL}/${CIRROS_X86_64_TAG}/cirros-${CIRROS_X86_64_TAG}-x86_64-lxc.tar.gz -P ${FUNCTEST_IMAGES_DIR}
# Add the 3-part image for aarch64, since functest can be run from an x86 machine to test an aarch64 POD
-wget ${CIRROS_REPO_URL}/daily/20${CIRROS_AARCH64_TAG}/cirros-d${CIRROS_AARCH64_TAG}-aarch64-disk.img -P ${FUNCTEST_BASE_DIR}/data/
-wget ${CIRROS_REPO_URL}/daily/20${CIRROS_AARCH64_TAG}/cirros-d${CIRROS_AARCH64_TAG}-aarch64-initramfs -P ${FUNCTEST_BASE_DIR}/data/
-wget ${CIRROS_REPO_URL}/daily/20${CIRROS_AARCH64_TAG}/cirros-d${CIRROS_AARCH64_TAG}-aarch64-kernel -P ${FUNCTEST_BASE_DIR}/data/
+wget ${CIRROS_REPO_URL}/daily/20${CIRROS_AARCH64_TAG}/cirros-d${CIRROS_AARCH64_TAG}-aarch64-disk.img -P ${FUNCTEST_IMAGES_DIR}
+wget ${CIRROS_REPO_URL}/daily/20${CIRROS_AARCH64_TAG}/cirros-d${CIRROS_AARCH64_TAG}-aarch64-initramfs -P ${FUNCTEST_IMAGES_DIR}
+wget ${CIRROS_REPO_URL}/daily/20${CIRROS_AARCH64_TAG}/cirros-d${CIRROS_AARCH64_TAG}-aarch64-kernel -P ${FUNCTEST_IMAGES_DIR}
diff --git a/docs/testing/developer/internship/unit_tests/index.rst b/docs/testing/developer/internship/unit_tests/index.rst
index f969aa72..a117c860 100644
--- a/docs/testing/developer/internship/unit_tests/index.rst
+++ b/docs/testing/developer/internship/unit_tests/index.rst
@@ -25,41 +25,98 @@ Version history
| **Date** | **Ver.** | **Author** | **Comment** |
| | | | |
+------------+----------+------------------+------------------------+
-| 2016-??-?? | 0.0.1 | Morgan Richomme | Beginning of the |
+| 2016-11-14 | 0.0.1 | Morgan Richomme | Beginning of the |
| | | (Orange) | Internship |
+------------+----------+------------------+------------------------+
+| 2017-03-31 | 0.0.2 | Ashish Kumar | During the |
+| | | (IIIT Hyderabad) | Internship |
++------------+----------+------------------+------------------------+
Overview:
=========
-
+Functest project is developing and integrating functional test cases for OPNFV
+and it is part of OPNFV since the beginning. Functest develops its own testcases
+and framework. This framework includes several utility libraries. The Project is
+growing rapidly with more features, tests added as per requirement. It becomes
+the responsibility of every developer to maintain the integrity of code i.e. new
+patch should not break the previous functionality of the project. To automate this
+process of software development, we should write unit tests and add them to CI so
+that when a new patch is ready to merge, we shouldn't allow those which are breaking
+previous unit tests or decreasing the coverage.
Problem Statement:
------------------
-
+The goal of the intership consists in creating unit test suites on Functest code
+with good code coverage (>80%) and integrate it in continuous integration in order
+to consolidate existing code.
Curation Phase
--------------
+The curation phase was the first 3 to 4 weeks of the internship. This phase was to get
+familiar with the functest code and functionality and explore the solutions for unit
+testing in other projects and come up with the strategy for writing unit tests in functest.
-
+In this phase we decided,
+- Coverage should be 80%. There are some functions like __init__, getter, setter and other
+ private methods for which writing unit test is a tedious job, so we are leaving these methods
+ for now.
+- Do method wise testing for every module.
+- Use mock for external or third party services, system calls and other external library calls
+ which could impact the behaviour of system during the run of unit test.
+- Add it in jenkins as passing criteria for patches.
+- Write tests in modular way so that it can help to serve as a form of documentation.
Schedule:
=========
-
-
-
+--------------------------+------------------------------------------+
| **Date** | **Comment** |
| | |
+--------------------------+------------------------------------------+
-| December - January | ........ |
+| Nov 14th - Nov 28th | 1. Learn Functest Project Business |
+| | 2. Set up the development environment |
+| | 3. Run Functest code |
++--------------------------+------------------------------------------+
+| Nov 28th - Dec.9th | 1. Explore Unit Testing Strategy, |
+| | 2. Learn about Mock in python |
++--------------------------+------------------------------------------+
+| Dec 12th - Dec 23rd | Implement Unit Tests for CLI |
+| | |
++--------------------------+------------------------------------------+
+| Dec 26th - Jan 6th | Implement Unit Tests for Utils |
+| | |
++--------------------------+------------------------------------------+
+| Jan 9th - Jan 20th | Implement Unit Tests for CI |
+| | |
++--------------------------+------------------------------------------+
+| Jan 23rd - Feb 3rd | Implement Unit Tests for Core |
+| | |
++--------------------------+------------------------------------------+
+| Feb 6th - Feb 17th | Implement Unit Tests for |
+| | opnfv_tests/openstack/tempest |
+--------------------------+------------------------------------------+
-| January - february | ........ |
+| Feb 20th - Mar 3rd | Implement Unit Tests for |
+| | opnfv_tests/openstack/rally |
++--------------------------+------------------------------------------+
+| Mar 6th - Mar 17th | Implement Unit Tests for |
+| | opnfv_tests/vnf/ims |
++--------------------------+------------------------------------------+
+| Mar 20th - Mar 31st | Recheck and Increase Coverage for all |
+| | modules > 80% |
++--------------------------+------------------------------------------+
+| Apr 3rd - Apr 14th | Add CI Gating for unit tests |
+| | |
++--------------------------+------------------------------------------+
+| Apr 17th - Apr 28th | Use Tox Utility, Documentation |
+| | |
++--------------------------+------------------------------------------+
+| Apr 28th - End | Bug Fixing |
+| | |
+--------------------------+------------------------------------------+
@@ -67,4 +124,3 @@ References:
===========
.. _`[1]` : https://wiki.opnfv.org/display/DEV/Intern+Project%3A+Functest+unit+tests
-
diff --git a/functest/ci/config_aarch64_patch.yaml b/functest/ci/config_aarch64_patch.yaml
index b43b5a76..cd395ab8 100644
--- a/functest/ci/config_aarch64_patch.yaml
+++ b/functest/ci/config_aarch64_patch.yaml
@@ -16,5 +16,13 @@ os:
vping:
image_name: TestVM
+ odl_sfc:
+ image_base_url: "http://artifacts.opnfv.org/sfc/demo"
+ image_name: sfc_nsh_danube
+ image_file_name: sf_nsh_danube_arm64.img
+ image_initrd: sf_nsh_danube_arm64-initrd
+ image_kernel: sf_nsh_danube_arm64-kernel
+ image_format: ami
+ os_cmd_line: 'root=LABEL=cloudimg-rootfs ro'
doctor:
image_name: TestVM
diff --git a/functest/ci/config_functest.yaml b/functest/ci/config_functest.yaml
index 6d44f398..9b796071 100644
--- a/functest/ci/config_functest.yaml
+++ b/functest/ci/config_functest.yaml
@@ -38,6 +38,7 @@ general:
functest_conf: /home/opnfv/functest/conf
functest_data: /home/opnfv/functest/data
ims_data: /home/opnfv/functest/data/ims/
+ functest_images: /home/opnfv/functest/images
rally_inst: /home/opnfv/.rally
repo_kingbird: /home/opnfv/repos/kingbird
refstack_client: /home/opnfv/repos/refstack-client
@@ -117,6 +118,12 @@ onos_sfc:
image_name: TestSfcVm
image_file_name: firewall_block_image.img
+odl_sfc:
+ image_base_url: "http://artifacts.opnfv.org/sfc/images"
+ image_name: sfc_nsh_danube
+ image_file_name: sfc_nsh_danube.qcow2
+ image_format: qcow2
+
tempest:
deployment_name: opnfv-tempest
identity:
diff --git a/functest/ci/download_images.sh b/functest/ci/download_images.sh
new file mode 100644
index 00000000..f3fdef2e
--- /dev/null
+++ b/functest/ci/download_images.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+CIRROS_REPO_URL=http://download.cirros-cloud.net
+CIRROS_AARCH64_TAG=161201
+CIRROS_X86_64_TAG=0.3.5
+
+RED='\033[1;31m'
+NC='\033[0m' # No Color
+
+function usage(){
+ echo -e "${RED}USAGE: $script <destination_folder>${NC}"
+ exit 0
+}
+
+script=`basename "$0"`
+IMAGES_FOLDER_DIR=$1
+
+if [[ -z $IMAGES_FOLDER_DIR ]]; then usage; fi;
+
+set -ex
+mkdir -p ${IMAGES_FOLDER_DIR}
+
+wget -nc ${CIRROS_REPO_URL}/${CIRROS_X86_64_TAG}/cirros-${CIRROS_X86_64_TAG}-x86_64-disk.img -P ${IMAGES_FOLDER_DIR}
+wget -nc ${CIRROS_REPO_URL}/${CIRROS_X86_64_TAG}/cirros-${CIRROS_X86_64_TAG}-x86_64-lxc.tar.gz -P ${IMAGES_FOLDER_DIR}
+wget -nc http://artifacts.opnfv.org/sdnvpn/ubuntu-16.04-server-cloudimg-amd64-disk1.img -P ${IMAGES_FOLDER_DIR}
+
+# Add 3rd-party images for aarch64, since Functest can be run on an x86 machine to test an aarch64 POD
+wget -nc ${CIRROS_REPO_URL}/daily/20${CIRROS_AARCH64_TAG}/cirros-d${CIRROS_AARCH64_TAG}-aarch64-disk.img -P ${IMAGES_FOLDER_DIR}
+wget -nc ${CIRROS_REPO_URL}/daily/20${CIRROS_AARCH64_TAG}/cirros-d${CIRROS_AARCH64_TAG}-aarch64-initramfs -P ${IMAGES_FOLDER_DIR}
+wget -nc ${CIRROS_REPO_URL}/daily/20${CIRROS_AARCH64_TAG}/cirros-d${CIRROS_AARCH64_TAG}-aarch64-kernel -P ${IMAGES_FOLDER_DIR}
+set +ex \ No newline at end of file
diff --git a/functest/ci/prepare_env.py b/functest/ci/prepare_env.py
index 9fd07958..8e17a4fc 100755
--- a/functest/ci/prepare_env.py
+++ b/functest/ci/prepare_env.py
@@ -156,8 +156,8 @@ def create_directories():
logger.info(" %s created." %
CONST.__getattribute__('dir_functest_conf'))
else:
- logger.debug(" %s already exists."
- % CONST.__getattribute__('dir_functest_conf'))
+ logger.debug(" %s already exists." %
+ CONST.__getattribute__('dir_functest_conf'))
if not os.path.exists(CONST.__getattribute__('dir_functest_data')):
os.makedirs(CONST.__getattribute__('dir_functest_data'))
@@ -166,6 +166,13 @@ def create_directories():
else:
logger.debug(" %s already exists." %
CONST.__getattribute__('dir_functest_data'))
+ if not os.path.exists(CONST.__getattribute__('dir_functest_images')):
+ os.makedirs(CONST.__getattribute__('dir_functest_images'))
+ logger.info(" %s created." %
+ CONST.__getattribute__('dir_functest_images'))
+ else:
+ logger.debug(" %s already exists." %
+ CONST.__getattribute__('dir_functest_images'))
def source_rc_file():
diff --git a/functest/ci/run_tests.py b/functest/ci/run_tests.py
index 76760096..95353c87 100755
--- a/functest/ci/run_tests.py
+++ b/functest/ci/run_tests.py
@@ -64,210 +64,210 @@ class RunTestsParser(object):
return vars(self.parser.parse_args(argv))
-class GlobalVariables:
- EXECUTED_TEST_CASES = []
- OVERALL_RESULT = Result.EX_OK
- CLEAN_FLAG = True
- REPORT_FLAG = False
-
-
-def print_separator(str, count=45):
- line = ""
- for i in range(0, count - 1):
- line += str
- logger.info("%s" % line)
-
-
-def source_rc_file():
- rc_file = CONST.__getattribute__('openstack_creds')
- if not os.path.isfile(rc_file):
- raise Exception("RC file %s does not exist..." % rc_file)
- logger.debug("Sourcing the OpenStack RC file...")
- os_utils.source_credentials(rc_file)
- for key, value in os.environ.iteritems():
- if re.search("OS_", key):
- if key == 'OS_AUTH_URL':
- CONST.__setattr__('OS_AUTH_URL', value)
- elif key == 'OS_USERNAME':
- CONST.__setattr__('OS_USERNAME', value)
- elif key == 'OS_TENANT_NAME':
- CONST.__setattr__('OS_TENANT_NAME', value)
- elif key == 'OS_PASSWORD':
- CONST.__setattr__('OS_PASSWORD', value)
-
-
-def generate_os_snapshot():
- os_snapshot.main()
-
-
-def cleanup():
- os_clean.main()
-
-
-def get_run_dict(testname):
- try:
- dict = ft_utils.get_dict_by_test(testname)
- if not dict:
- logger.error("Cannot get {}'s config options".format(testname))
- elif 'run' in dict:
- return dict['run']
- return None
- except Exception:
- logger.exception("Cannot get {}'s config options".format(testname))
- return None
-
-
-def run_test(test, tier_name, testcases=None):
- if not test.is_enabled():
- raise TestNotEnabled("The test case {} is not enabled"
- .format(test.get_name()))
- test_name = test.get_name()
- logger.info("\n") # blank line
- print_separator("=")
- logger.info("Running test case '%s'..." % test_name)
- print_separator("=")
- logger.debug("\n%s" % test)
- source_rc_file()
-
- if test.needs_clean() and GlobalVariables.CLEAN_FLAG:
- generate_os_snapshot()
-
- flags = (" -t %s" % (test_name))
- if GlobalVariables.REPORT_FLAG:
- flags += " -r"
-
- result = testcase.TestCase.EX_RUN_ERROR
- run_dict = get_run_dict(test_name)
- if run_dict:
+class Runner(object):
+
+ def __init__(self):
+ self.executed_test_cases = []
+ self.overall_result = Result.EX_OK
+ self.clean_flag = True
+ self.report_flag = False
+
+ @staticmethod
+ def print_separator(str, count=45):
+ line = ""
+ for i in range(0, count - 1):
+ line += str
+ logger.info("%s" % line)
+
+ @staticmethod
+ def source_rc_file():
+ rc_file = CONST.__getattribute__('openstack_creds')
+ if not os.path.isfile(rc_file):
+ raise Exception("RC file %s does not exist..." % rc_file)
+ logger.debug("Sourcing the OpenStack RC file...")
+ os_utils.source_credentials(rc_file)
+ for key, value in os.environ.iteritems():
+ if re.search("OS_", key):
+ if key == 'OS_AUTH_URL':
+ CONST.__setattr__('OS_AUTH_URL', value)
+ elif key == 'OS_USERNAME':
+ CONST.__setattr__('OS_USERNAME', value)
+ elif key == 'OS_TENANT_NAME':
+ CONST.__setattr__('OS_TENANT_NAME', value)
+ elif key == 'OS_PASSWORD':
+ CONST.__setattr__('OS_PASSWORD', value)
+
+ @staticmethod
+ def generate_os_snapshot():
+ os_snapshot.main()
+
+ @staticmethod
+ def cleanup():
+ os_clean.main()
+
+ @staticmethod
+ def get_run_dict(testname):
try:
- module = importlib.import_module(run_dict['module'])
- cls = getattr(module, run_dict['class'])
- test_dict = ft_utils.get_dict_by_test(test_name)
- test_case = cls(**test_dict)
- GlobalVariables.EXECUTED_TEST_CASES.append(test_case)
+ dict = ft_utils.get_dict_by_test(testname)
+ if not dict:
+ logger.error("Cannot get {}'s config options".format(testname))
+ elif 'run' in dict:
+ return dict['run']
+ return None
+ except Exception:
+ logger.exception("Cannot get {}'s config options".format(testname))
+ return None
+
+ def run_test(self, test, tier_name, testcases=None):
+ if not test.is_enabled():
+ raise TestNotEnabled(
+ "The test case {} is not enabled".format(test.get_name()))
+ logger.info("\n") # blank line
+ self.print_separator("=")
+ logger.info("Running test case '%s'..." % test.get_name())
+ self.print_separator("=")
+ logger.debug("\n%s" % test)
+ self.source_rc_file()
+
+ if test.needs_clean() and self.clean_flag:
+ self.generate_os_snapshot()
+
+ flags = " -t %s" % test.get_name()
+ if self.report_flag:
+ flags += " -r"
+
+ result = testcase.TestCase.EX_RUN_ERROR
+ run_dict = self.get_run_dict(test.get_name())
+ if run_dict:
try:
- kwargs = run_dict['args']
- result = test_case.run(**kwargs)
- except KeyError:
- result = test_case.run()
- if result == testcase.TestCase.EX_OK:
- if GlobalVariables.REPORT_FLAG:
- test_case.push_to_db()
- result = test_case.is_successful()
- logger.info("Test result:\n\n%s\n", test_case)
- except ImportError:
- logger.exception("Cannot import module {}".format(
- run_dict['module']))
- except AttributeError:
- logger.exception("Cannot get class {}".format(
- run_dict['class']))
- else:
- raise Exception("Cannot import the class for the test case.")
-
- if test.needs_clean() and GlobalVariables.CLEAN_FLAG:
- cleanup()
- if result != testcase.TestCase.EX_OK:
- logger.error("The test case '%s' failed. " % test_name)
- GlobalVariables.OVERALL_RESULT = Result.EX_ERROR
- if test.is_blocking():
- raise BlockingTestFailed("The test case {} failed and is blocking"
- .format(test.get_name()))
-
-
-def run_tier(tier):
- tier_name = tier.get_name()
- tests = tier.get_tests()
- if tests is None or len(tests) == 0:
- logger.info("There are no supported test cases in this tier "
- "for the given scenario")
- return 0
- logger.info("\n\n") # blank line
- print_separator("#")
- logger.info("Running tier '%s'" % tier_name)
- print_separator("#")
- logger.debug("\n%s" % tier)
- for test in tests:
- run_test(test, tier_name)
-
-
-def run_all(tiers):
- summary = ""
- tiers_to_run = []
-
- for tier in tiers.get_tiers():
- if (len(tier.get_tests()) != 0 and
- re.search(CONST.__getattribute__('CI_LOOP'),
- tier.get_ci_loop()) is not None):
- tiers_to_run.append(tier)
- summary += ("\n - %s:\n\t %s"
- % (tier.get_name(),
- tier.get_test_names()))
-
- logger.info("Tests to be executed:%s" % summary)
- for tier in tiers_to_run:
- run_tier(tier)
-
-
-def main(**kwargs):
-
- file = CONST.functest_testcases_yaml
- _tiers = tb.TierBuilder(CONST.__getattribute__('INSTALLER_TYPE'),
- CONST.__getattribute__('DEPLOY_SCENARIO'),
- file)
-
- if kwargs['noclean']:
- GlobalVariables.CLEAN_FLAG = False
-
- if kwargs['report']:
- GlobalVariables.REPORT_FLAG = True
-
- try:
- if kwargs['test']:
- source_rc_file()
- if _tiers.get_tier(kwargs['test']):
- run_tier(_tiers.get_tier(kwargs['test']))
- elif _tiers.get_test(kwargs['test']):
- run_test(_tiers.get_test(kwargs['test']),
- _tiers.get_tier_name(kwargs['test']),
- kwargs['test'])
- elif kwargs['test'] == "all":
- run_all(_tiers)
- else:
- logger.error("Unknown test case or tier '%s', "
- "or not supported by "
- "the given scenario '%s'."
- % (kwargs['test'],
- CONST.__getattribute__('DEPLOY_SCENARIO')))
- logger.debug("Available tiers are:\n\n%s"
- % _tiers)
- return Result.EX_ERROR
+ module = importlib.import_module(run_dict['module'])
+ cls = getattr(module, run_dict['class'])
+ test_dict = ft_utils.get_dict_by_test(test.get_name())
+ test_case = cls(**test_dict)
+ self.executed_test_cases.append(test_case)
+ try:
+ kwargs = run_dict['args']
+ result = test_case.run(**kwargs)
+ except KeyError:
+ result = test_case.run()
+ if result == testcase.TestCase.EX_OK:
+ if self.report_flag:
+ test_case.push_to_db()
+ result = test_case.is_successful()
+ logger.info("Test result:\n\n%s\n", test_case)
+ except ImportError:
+ logger.exception("Cannot import module {}".format(
+ run_dict['module']))
+ except AttributeError:
+ logger.exception("Cannot get class {}".format(
+ run_dict['class']))
else:
- run_all(_tiers)
- except Exception as e:
- logger.error(e)
- GlobalVariables.OVERALL_RESULT = Result.EX_ERROR
-
- msg = prettytable.PrettyTable(
- header_style='upper', padding_width=5,
- field_names=['env var', 'value'])
- for env_var in ['INSTALLER_TYPE', 'DEPLOY_SCENARIO', 'BUILD_TAG',
- 'CI_LOOP']:
- msg.add_row([env_var, CONST.__getattribute__(env_var)])
- logger.info("Deployment description: \n\n%s\n", msg)
-
- msg = prettytable.PrettyTable(
- header_style='upper', padding_width=5,
- field_names=['test case', 'project', 'tier', 'duration', 'result'])
- for test_case in GlobalVariables.EXECUTED_TEST_CASES:
- result = 'PASS' if(test_case.is_successful(
- ) == test_case.EX_OK) else 'FAIL'
- msg.add_row([test_case.case_name, test_case.project_name,
- _tiers.get_tier_name(test_case.case_name),
- test_case.get_duration(), result])
- logger.info("FUNCTEST REPORT: \n\n%s\n", msg)
-
- logger.info("Execution exit value: %s" % GlobalVariables.OVERALL_RESULT)
- return GlobalVariables.OVERALL_RESULT
+ raise Exception("Cannot import the class for the test case.")
+
+ if test.needs_clean() and self.clean_flag:
+ self.cleanup()
+ if result != testcase.TestCase.EX_OK:
+ logger.error("The test case '%s' failed. " % test.get_name())
+ self.overall_result = Result.EX_ERROR
+ if test.is_blocking():
+ raise BlockingTestFailed(
+ "The test case {} failed and is blocking".format(
+ test.get_name()))
+
+ def run_tier(self, tier):
+ tier_name = tier.get_name()
+ tests = tier.get_tests()
+ if tests is None or len(tests) == 0:
+ logger.info("There are no supported test cases in this tier "
+ "for the given scenario")
+ return 0
+ logger.info("\n\n") # blank line
+ self.print_separator("#")
+ logger.info("Running tier '%s'" % tier_name)
+ self.print_separator("#")
+ logger.debug("\n%s" % tier)
+ for test in tests:
+ self.run_test(test, tier_name)
+
+ def run_all(self, tiers):
+ summary = ""
+ tiers_to_run = []
+
+ for tier in tiers.get_tiers():
+ if (len(tier.get_tests()) != 0 and
+ re.search(CONST.__getattribute__('CI_LOOP'),
+ tier.get_ci_loop()) is not None):
+ tiers_to_run.append(tier)
+ summary += ("\n - %s:\n\t %s"
+ % (tier.get_name(),
+ tier.get_test_names()))
+
+ logger.info("Tests to be executed:%s" % summary)
+ for tier in tiers_to_run:
+ self.run_tier(tier)
+
+ def main(self, **kwargs):
+ _tiers = tb.TierBuilder(
+ CONST.__getattribute__('INSTALLER_TYPE'),
+ CONST.__getattribute__('DEPLOY_SCENARIO'),
+ CONST.__getattribute__("functest_testcases_yaml"))
+
+ if kwargs['noclean']:
+ self.clean_flag = False
+
+ if kwargs['report']:
+ self.report_flag = True
+
+ try:
+ if kwargs['test']:
+ self.source_rc_file()
+ logger.error(kwargs['test'])
+ if _tiers.get_tier(kwargs['test']):
+ self.run_tier(_tiers.get_tier(kwargs['test']))
+ elif _tiers.get_test(kwargs['test']):
+ self.run_test(_tiers.get_test(kwargs['test']),
+ _tiers.get_tier_name(kwargs['test']),
+ kwargs['test'])
+ elif kwargs['test'] == "all":
+ self.run_all(_tiers)
+ else:
+ logger.error("Unknown test case or tier '%s', "
+ "or not supported by "
+ "the given scenario '%s'."
+ % (kwargs['test'],
+ CONST.__getattribute__('DEPLOY_SCENARIO')))
+ logger.debug("Available tiers are:\n\n%s",
+ _tiers)
+ return Result.EX_ERROR
+ else:
+ self.run_all(_tiers)
+ except BlockingTestFailed:
+ pass
+ except Exception:
+ logger.exception("Failures when running testcase(s)")
+ self.overall_result = Result.EX_ERROR
+
+ msg = prettytable.PrettyTable(
+ header_style='upper', padding_width=5,
+ field_names=['env var', 'value'])
+ for env_var in ['INSTALLER_TYPE', 'DEPLOY_SCENARIO', 'BUILD_TAG',
+ 'CI_LOOP']:
+ msg.add_row([env_var, CONST.__getattribute__(env_var)])
+ logger.info("Deployment description: \n\n%s\n", msg)
+
+ msg = prettytable.PrettyTable(
+ header_style='upper', padding_width=5,
+ field_names=['test case', 'project', 'tier', 'duration', 'result'])
+ for test_case in self.executed_test_cases:
+ result = 'PASS' if(test_case.is_successful(
+ ) == test_case.EX_OK) else 'FAIL'
+ msg.add_row([test_case.case_name, test_case.project_name,
+ _tiers.get_tier_name(test_case.case_name),
+ test_case.get_duration(), result])
+ logger.info("FUNCTEST REPORT: \n\n%s\n", msg)
+
+ logger.info("Execution exit value: %s" % self.overall_result)
+ return self.overall_result
if __name__ == '__main__':
@@ -275,4 +275,5 @@ if __name__ == '__main__':
CONST.__getattribute__('dir_functest_logging_cfg'))
parser = RunTestsParser()
args = parser.parse_args(sys.argv[1:])
- sys.exit(main(**args).value)
+ runner = Runner()
+ sys.exit(runner.main(**args).value)
diff --git a/functest/ci/testcases.yaml b/functest/ci/testcases.yaml
index d98a2de2..10587f26 100644
--- a/functest/ci/testcases.yaml
+++ b/functest/ci/testcases.yaml
@@ -302,7 +302,6 @@ tiers:
-
case_name: bgpvpn
- enabled: false
project_name: sdnvpn
criteria: 100
blocking: false
diff --git a/functest/cli/commands/cli_env.py b/functest/cli/commands/cli_env.py
index 14ad01bf..f5ba4b34 100644
--- a/functest/cli/commands/cli_env.py
+++ b/functest/cli/commands/cli_env.py
@@ -28,7 +28,7 @@ class CliEnv(object):
"it again? [y|n]\n")
while True:
if answer.lower() in ["y", "yes"]:
- os.remove(CONST.env_active)
+ os.remove(CONST.__getattribute__('env_active'))
break
elif answer.lower() in ["n", "no"]:
return
@@ -36,19 +36,19 @@ class CliEnv(object):
answer = raw_input("Invalid answer. Please type [y|n]\n")
cmd = ("python %s/functest/ci/prepare_env.py start" %
- CONST.dir_repo_functest)
+ CONST.__getattribute__('dir_repo_functest'))
ft_utils.execute_command(cmd)
def show(self):
def _get_value(attr, default='Unknown'):
return attr if attr else default
- install_type = _get_value(CONST.INSTALLER_TYPE)
- installer_ip = _get_value(CONST.INSTALLER_IP)
+ install_type = _get_value(CONST.__getattribute__('INSTALLER_TYPE'))
+ installer_ip = _get_value(CONST.__getattribute__('INSTALLER_IP'))
installer_info = ("%s, %s" % (install_type, installer_ip))
- scenario = _get_value(CONST.DEPLOY_SCENARIO)
- node = _get_value(CONST.NODE_NAME)
- repo_h = git.Repo(CONST.dir_repo_functest).head
+ scenario = _get_value(CONST.__getattribute__('DEPLOY_SCENARIO'))
+ node = _get_value(CONST.__getattribute__('NODE_NAME'))
+ repo_h = git.Repo(CONST.__getattribute__('dir_repo_functest')).head
if repo_h.is_detached:
git_branch = 'detached from FETCH_HEAD'
git_hash = repo_h.commit.hexsha
@@ -56,8 +56,8 @@ class CliEnv(object):
branch = repo_h.reference
git_branch = branch.name
git_hash = branch.commit.hexsha
- is_debug = _get_value(CONST.CI_DEBUG, 'false')
- build_tag = CONST.BUILD_TAG
+ is_debug = _get_value(CONST.__getattribute__('CI_DEBUG'), 'false')
+ build_tag = CONST.__getattribute__('BUILD_TAG')
if build_tag is not None:
build_tag = build_tag.lstrip(
"jenkins-").lstrip("functest").lstrip("-")
@@ -84,7 +84,7 @@ class CliEnv(object):
def status(self, verbose=True):
ret_val = 0
- if not os.path.isfile(CONST.env_active):
+ if not os.path.isfile(CONST.__getattribute__('env_active')):
if verbose:
click.echo("Functest environment is not installed.\n")
ret_val = 1
diff --git a/functest/cli/commands/cli_os.py b/functest/cli/commands/cli_os.py
index f85f4041..e54eb423 100644
--- a/functest/cli/commands/cli_os.py
+++ b/functest/cli/commands/cli_os.py
@@ -21,11 +21,11 @@ import functest.utils.openstack_snapshot as os_snapshot
class CliOpenStack(object):
def __init__(self):
- self.os_auth_url = CONST.OS_AUTH_URL
+ self.os_auth_url = CONST.__getattribute__('OS_AUTH_URL')
self.endpoint_ip = None
self.endpoint_port = None
- self.openstack_creds = CONST.openstack_creds
- self.snapshot_file = CONST.openstack_snapshot_file
+ self.openstack_creds = CONST.__getattribute__('openstack_creds')
+ self.snapshot_file = CONST.__getattribute__('openstack_snapshot_file')
if self.os_auth_url:
self.endpoint_ip = self.os_auth_url.rsplit("/")[2].rsplit(":")[0]
self.endpoint_port = self.os_auth_url.rsplit("/")[2].rsplit(":")[1]
@@ -59,16 +59,16 @@ class CliOpenStack(object):
else:
answer = raw_input("Invalid answer. Please type [y|n]\n")
- installer_type = CONST.INSTALLER_TYPE
+ installer_type = CONST.__getattribute__('INSTALLER_TYPE')
if installer_type is None:
click.echo("The environment variable 'INSTALLER_TYPE' is not"
"defined. Please export it")
- installer_ip = CONST.INSTALLER_IP
+ installer_ip = CONST.__getattribute__('INSTALLER_IP')
if installer_ip is None:
click.echo("The environment variable 'INSTALLER_IP' is not"
"defined. Please export it")
cmd = ("%s/releng/utils/fetch_os_creds.sh -d %s -i %s -a %s"
- % (CONST.dir_repos,
+ % (CONST.__getattribute__('dir_repos'),
self.openstack_creds,
installer_type,
installer_ip))
@@ -78,7 +78,8 @@ class CliOpenStack(object):
def check(self):
self.ping_endpoint()
- cmd = CONST.dir_repo_functest + "/functest/ci/check_os.sh"
+ cmd = os.path.join(CONST.__getattribute__('dir_repo_functest'),
+ "functest/ci/check_os.sh")
ft_utils.execute_command(cmd, verbose=False)
def snapshot_create(self):
diff --git a/functest/cli/commands/cli_testcase.py b/functest/cli/commands/cli_testcase.py
index 6644a0c2..3d3f1cb3 100644
--- a/functest/cli/commands/cli_testcase.py
+++ b/functest/cli/commands/cli_testcase.py
@@ -22,9 +22,10 @@ import functest.utils.functest_vacation as vacation
class CliTestcase(object):
def __init__(self):
- self.tiers = tb.TierBuilder(CONST.INSTALLER_TYPE,
- CONST.DEPLOY_SCENARIO,
- CONST.functest_testcases_yaml)
+ self.tiers = tb.TierBuilder(
+ CONST.__getattribute__('INSTALLER_TYPE'),
+ CONST.__getattribute__('DEPLOY_SCENARIO'),
+ CONST.__getattribute__('functest_testcases_yaml'))
def list(self):
summary = ""
@@ -52,12 +53,14 @@ class CliTestcase(object):
if testname == 'vacation':
vacation.main()
- elif not os.path.isfile(CONST.env_active):
+ elif not os.path.isfile(CONST.__getattribute__('env_active')):
click.echo("Functest environment is not ready. "
"Run first 'functest env prepare'")
else:
tests = testname.split(",")
for test in tests:
cmd = ("python %s/functest/ci/run_tests.py "
- "%s -t %s" % (CONST.dir_repo_functest, flags, test))
+ "%s -t %s" %
+ (CONST.__getattribute__('dir_repo_functest'),
+ flags, test))
ft_utils.execute_command(cmd)
diff --git a/functest/cli/commands/cli_tier.py b/functest/cli/commands/cli_tier.py
index 012b11d0..531f0ff9 100644
--- a/functest/cli/commands/cli_tier.py
+++ b/functest/cli/commands/cli_tier.py
@@ -21,9 +21,10 @@ import functest.utils.functest_utils as ft_utils
class CliTier(object):
def __init__(self):
- self.tiers = tb.TierBuilder(CONST.INSTALLER_TYPE,
- CONST.DEPLOY_SCENARIO,
- CONST.functest_testcases_yaml)
+ self.tiers = tb.TierBuilder(
+ CONST.__getattribute__('INSTALLER_TYPE'),
+ CONST.__getattribute__('DEPLOY_SCENARIO'),
+ CONST.__getattribute__('functest_testcases_yaml'))
def list(self):
summary = ""
@@ -62,10 +63,12 @@ class CliTier(object):
if report:
flags += "-r "
- if not os.path.isfile(CONST.env_active):
+ if not os.path.isfile(CONST.__getattribute__('env_active')):
click.echo("Functest environment is not ready. "
"Run first 'functest env prepare'")
else:
cmd = ("python %s/functest/ci/run_tests.py "
- "%s -t %s" % (CONST.dir_repo_functest, flags, tiername))
+ "%s -t %s" %
+ (CONST.__getattribute__('dir_repo_functest'),
+ flags, tiername))
ft_utils.execute_command(cmd)
diff --git a/functest/opnfv_tests/openstack/rally/rally.py b/functest/opnfv_tests/openstack/rally/rally.py
index f762383a..86ec3558 100644
--- a/functest/opnfv_tests/openstack/rally/rally.py
+++ b/functest/opnfv_tests/openstack/rally/rally.py
@@ -8,6 +8,8 @@
# http://www.apache.org/licenses/LICENSE-2.0
#
+from __future__ import division
+
import json
import logging
import os
@@ -20,7 +22,6 @@ import yaml
from functest.core import testcase
from functest.utils.constants import CONST
-import functest.utils.functest_utils as ft_utils
import functest.utils.openstack_utils as os_utils
logger = logging.getLogger(__name__)
@@ -29,14 +30,17 @@ logger = logging.getLogger(__name__)
class RallyBase(testcase.TestCase):
TESTS = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
- GLANCE_IMAGE_NAME = CONST.openstack_image_name
- GLANCE_IMAGE_FILENAME = CONST.openstack_image_file_name
- GLANCE_IMAGE_PATH = os.path.join(CONST.dir_functest_data,
- GLANCE_IMAGE_FILENAME)
- GLANCE_IMAGE_FORMAT = CONST.openstack_image_disk_format
+ GLANCE_IMAGE_NAME = CONST.__getattribute__('openstack_image_name')
+ GLANCE_IMAGE_FILENAME = CONST.__getattribute__('openstack_image_file_name')
+ GLANCE_IMAGE_PATH = os.path.join(
+ CONST.__getattribute__('dir_functest_images'),
+ GLANCE_IMAGE_FILENAME)
+ GLANCE_IMAGE_FORMAT = CONST.__getattribute__('openstack_image_disk_format')
FLAVOR_NAME = "m1.tiny"
- RALLY_DIR = os.path.join(CONST.dir_repo_functest, CONST.dir_rally)
+ RALLY_DIR = os.path.join(
+ CONST.__getattribute__('dir_repo_functest'),
+ CONST.__getattribute__('dir_rally'))
RALLY_SCENARIO_DIR = os.path.join(RALLY_DIR, "scenario")
TEMPLATE_DIR = os.path.join(RALLY_SCENARIO_DIR, "templates")
SUPPORT_DIR = os.path.join(RALLY_SCENARIO_DIR, "support")
@@ -44,17 +48,17 @@ class RallyBase(testcase.TestCase):
TENANTS_AMOUNT = 3
ITERATIONS_AMOUNT = 10
CONCURRENCY = 4
- RESULTS_DIR = os.path.join(CONST.dir_results, 'rally')
- TEMPEST_CONF_FILE = os.path.join(CONST.dir_results,
+ RESULTS_DIR = os.path.join(CONST.__getattribute__('dir_results'), 'rally')
+ TEMPEST_CONF_FILE = os.path.join(CONST.__getattribute__('dir_results'),
'tempest/tempest.conf')
BLACKLIST_FILE = os.path.join(RALLY_DIR, "blacklist.txt")
TEMP_DIR = os.path.join(RALLY_DIR, "var")
CINDER_VOLUME_TYPE_NAME = "volume_test"
- RALLY_PRIVATE_NET_NAME = CONST.rally_network_name
- RALLY_PRIVATE_SUBNET_NAME = CONST.rally_subnet_name
- RALLY_PRIVATE_SUBNET_CIDR = CONST.rally_subnet_cidr
- RALLY_ROUTER_NAME = CONST.rally_router_name
+ RALLY_PRIVATE_NET_NAME = CONST.__getattribute__('rally_network_name')
+ RALLY_PRIVATE_SUBNET_NAME = CONST.__getattribute__('rally_subnet_name')
+ RALLY_PRIVATE_SUBNET_CIDR = CONST.__getattribute__('rally_subnet_cidr')
+ RALLY_ROUTER_NAME = CONST.__getattribute__('rally_router_name')
def __init__(self, **kwargs):
super(RallyBase, self).__init__(**kwargs)
@@ -96,7 +100,7 @@ class RallyBase(testcase.TestCase):
task_args['netid'] = ''
# get keystone auth endpoint
- task_args['request_url'] = CONST.OS_AUTH_URL or ''
+ task_args['request_url'] = CONST.__getattribute__('OS_AUTH_URL') or ''
return task_args
@@ -182,8 +186,8 @@ class RallyBase(testcase.TestCase):
with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
black_list_yaml = yaml.safe_load(black_list_file)
- installer_type = CONST.INSTALLER_TYPE
- deploy_scenario = CONST.DEPLOY_SCENARIO
+ installer_type = CONST.__getattribute__('INSTALLER_TYPE')
+ deploy_scenario = CONST.__getattribute__('DEPLOY_SCENARIO')
if (bool(installer_type) * bool(deploy_scenario)):
if 'scenario' in black_list_yaml.keys():
for item in black_list_yaml['scenario']:
@@ -480,11 +484,12 @@ class RallyBase(testcase.TestCase):
total_duration_str2 = "{0:<10}".format(total_duration_str)
total_nb_tests_str = "{0:<13}".format(total_nb_tests)
- if len(self.summary):
- success_rate = total_success / len(self.summary)
- else:
- success_rate = 100
- success_rate = "{:0.2f}".format(success_rate)
+ try:
+ self.result = total_success / len(self.summary)
+ except ZeroDivisionError:
+ self.result = 100
+
+ success_rate = "{:0.2f}".format(self.result)
success_rate_str = "{0:<10}".format(str(success_rate) + '%')
report += ("+===================+============"
"+===============+===========+")
@@ -500,12 +505,10 @@ class RallyBase(testcase.TestCase):
'nb tests': total_nb_tests,
'nb success': success_rate}})
- self.result = ft_utils.check_success_rate(
- self.case_name, success_rate)
self.details = payload
- logger.info("Rally '%s' success_rate is %s%%, is marked as %s"
- % (self.case_name, success_rate, self.result))
+ logger.info("Rally '%s' success_rate is %s%%"
+ % (self.case_name, success_rate))
def _clean_up(self):
if self.volume_type:
diff --git a/functest/opnfv_tests/openstack/refstack_client/refstack_client.py b/functest/opnfv_tests/openstack/refstack_client/refstack_client.py
index ebae4b86..5f1f3a1d 100755
--- a/functest/opnfv_tests/openstack/refstack_client/refstack_client.py
+++ b/functest/opnfv_tests/openstack/refstack_client/refstack_client.py
@@ -5,6 +5,10 @@
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
+
+from __future__ import division
+
+
import argparse
import logging
import os
@@ -29,9 +33,9 @@ class RefstackClient(testcase.TestCase):
if "case_name" not in kwargs:
kwargs["case_name"] = "refstack_defcore"
super(RefstackClient, self).__init__(**kwargs)
- self.FUNCTEST_TEST = CONST.dir_functest_test
- self.CONF_PATH = CONST.refstack_tempest_conf_path
- self.DEFCORE_LIST = CONST.refstack_defcore_list
+ self.FUNCTEST_TEST = CONST.__getattribute__('dir_functest_test')
+ self.CONF_PATH = CONST.__getattribute__('refstack_tempest_conf_path')
+ self.DEFCORE_LIST = CONST.__getattribute__('refstack_defcore_list')
self.confpath = os.path.join(self.FUNCTEST_TEST,
self.CONF_PATH)
self.defcorelist = os.path.join(self.FUNCTEST_TEST,
@@ -41,7 +45,7 @@ class RefstackClient(testcase.TestCase):
cmd = ("cd {0};"
". .venv/bin/activate;"
- "cd -;".format(CONST.dir_refstack_client))
+ "cd -;".format(CONST.__getattribute__('dir_refstack_client')))
ft_utils.execute_command(cmd)
def run_defcore(self, conf, testlist):
@@ -49,7 +53,7 @@ class RefstackClient(testcase.TestCase):
cmd = ("cd {0};"
"./refstack-client test -c {1} -v --test-list {2};"
- "cd -;".format(CONST.dir_refstack_client,
+ "cd -;".format(CONST.__getattribute__('dir_refstack_client'),
conf,
testlist))
ft_utils.execute_command(cmd)
@@ -59,16 +63,16 @@ class RefstackClient(testcase.TestCase):
cmd = ("cd {0};"
"./refstack-client test -c {1} -v --test-list {2};"
- "cd -;".format(CONST.dir_refstack_client,
+ "cd -;".format(CONST.__getattribute__('dir_refstack_client'),
self.confpath,
self.defcorelist))
logger.info("Starting Refstack_defcore test case: '%s'." % cmd)
header = ("Refstack environment:\n"
" SUT: %s\n Scenario: %s\n Node: %s\n Date: %s\n" %
- (CONST.INSTALLER_TYPE,
- CONST.DEPLOY_SCENARIO,
- CONST.NODE_NAME,
+ (CONST.__getattribute__('INSTALLER_TYPE'),
+ CONST.__getattribute__('DEPLOY_SCENARIO'),
+ CONST.__getattribute__('NODE_NAME'),
time.strftime("%a %b %d %H:%M:%S %Z %Y")))
f_stdout = open(
@@ -123,7 +127,11 @@ class RefstackClient(testcase.TestCase):
skipped_testcases += match + ", "
num_executed = int(num_tests) - int(num_skipped)
- success_rate = 100 * int(num_success) / int(num_executed)
+
+ try:
+ self.result = 100 * int(num_success) / int(num_executed)
+ except ZeroDivisionError:
+ logger.error("No test has been executed")
self.details = {"tests": int(num_tests),
"failures": int(num_failures),
@@ -131,12 +139,10 @@ class RefstackClient(testcase.TestCase):
"errors": failed_testcases,
"skipped": skipped_testcases}
except Exception:
- success_rate = 0
+ self.result = 0
- self.result = ft_utils.check_success_rate(
- self.case_name, success_rate)
- logger.info("Testcase %s success_rate is %s%%, is marked as %s"
- % (self.case_name, success_rate, self.result))
+ logger.info("Testcase %s success_rate is %s%%"
+ % (self.case_name, self.result))
def run(self):
'''used for functest command line,
@@ -196,9 +202,9 @@ class RefstackClient(testcase.TestCase):
class RefstackClientParser(object):
def __init__(self):
- self.FUNCTEST_TEST = CONST.dir_functest_test
- self.CONF_PATH = CONST.refstack_tempest_conf_path
- self.DEFCORE_LIST = CONST.refstack_defcore_list
+ self.FUNCTEST_TEST = CONST.__getattribute__('dir_functest_test')
+ self.CONF_PATH = CONST.__getattribute__('refstack_tempest_conf_path')
+ self.DEFCORE_LIST = CONST.__getattribute__('refstack_defcore_list')
self.confpath = os.path.join(self.FUNCTEST_TEST,
self.CONF_PATH)
self.defcorelist = os.path.join(self.FUNCTEST_TEST,
diff --git a/functest/opnfv_tests/openstack/refstack_client/tempest_conf.py b/functest/opnfv_tests/openstack/refstack_client/tempest_conf.py
index 5c04253c..fbaad589 100755
--- a/functest/opnfv_tests/openstack/refstack_client/tempest_conf.py
+++ b/functest/opnfv_tests/openstack/refstack_client/tempest_conf.py
@@ -24,12 +24,14 @@ class TempestConf(object):
self.DEPLOYMENT_ID = conf_utils.get_verifier_deployment_id()
self.DEPLOYMENT_DIR = conf_utils.get_verifier_deployment_dir(
self.VERIFIER_ID, self.DEPLOYMENT_ID)
- self.confpath = os.path.join(CONST.dir_functest_test,
- CONST.refstack_tempest_conf_path)
+ self.confpath = os.path.join(
+ CONST.__getattribute__('dir_functest_test'),
+ CONST.__getattribute__('refstack_tempest_conf_path'))
def generate_tempestconf(self):
try:
- openstack_utils.source_credentials(CONST.openstack_creds)
+ openstack_utils.source_credentials(
+ CONST.__getattribute__('openstack_creds'))
img_flavor_dict = conf_utils.create_tempest_resources(
use_custom_images=True, use_custom_flavors=True)
conf_utils.configure_tempest_defcore(
diff --git a/functest/opnfv_tests/openstack/tempest/conf_utils.py b/functest/opnfv_tests/openstack/tempest/conf_utils.py
index 54f7428c..556a41d4 100644
--- a/functest/opnfv_tests/openstack/tempest/conf_utils.py
+++ b/functest/opnfv_tests/openstack/tempest/conf_utils.py
@@ -21,11 +21,12 @@ import functest.utils.openstack_utils as os_utils
IMAGE_ID_ALT = None
FLAVOR_ID_ALT = None
-REPO_PATH = CONST.dir_repo_functest
-GLANCE_IMAGE_PATH = os.path.join(CONST.dir_functest_data,
- CONST.openstack_image_file_name)
-TEMPEST_TEST_LIST_DIR = CONST.dir_tempest_cases
-TEMPEST_RESULTS_DIR = os.path.join(CONST.dir_results,
+REPO_PATH = CONST.__getattribute__('dir_repo_functest')
+GLANCE_IMAGE_PATH = os.path.join(
+ CONST.__getattribute__('dir_functest_images'),
+ CONST.__getattribute__('openstack_image_file_name'))
+TEMPEST_TEST_LIST_DIR = CONST.__getattribute__('dir_tempest_cases')
+TEMPEST_RESULTS_DIR = os.path.join(CONST.__getattribute__('dir_results'),
'tempest')
TEMPEST_CUSTOM = os.path.join(REPO_PATH, TEMPEST_TEST_LIST_DIR,
'test_list.txt')
@@ -35,11 +36,11 @@ TEMPEST_DEFCORE = os.path.join(REPO_PATH, TEMPEST_TEST_LIST_DIR,
'defcore_req.txt')
TEMPEST_RAW_LIST = os.path.join(TEMPEST_RESULTS_DIR, 'test_raw_list.txt')
TEMPEST_LIST = os.path.join(TEMPEST_RESULTS_DIR, 'test_list.txt')
-REFSTACK_RESULTS_DIR = os.path.join(CONST.dir_results,
+REFSTACK_RESULTS_DIR = os.path.join(CONST.__getattribute__('dir_results'),
'refstack')
-CI_INSTALLER_TYPE = CONST.INSTALLER_TYPE
-CI_INSTALLER_IP = CONST.INSTALLER_IP
+CI_INSTALLER_TYPE = CONST.__getattribute__('INSTALLER_TYPE')
+CI_INSTALLER_IP = CONST.__getattribute__('INSTALLER_IP')
""" logging configuration """
logger = logging.getLogger(__name__)
@@ -52,26 +53,27 @@ def create_tempest_resources(use_custom_images=False,
logger.debug("Creating tenant and user for Tempest suite")
tenant_id = os_utils.create_tenant(
keystone_client,
- CONST.tempest_identity_tenant_name,
- CONST.tempest_identity_tenant_description)
+ CONST.__getattribute__('tempest_identity_tenant_name'),
+ CONST.__getattribute__('tempest_identity_tenant_description'))
if not tenant_id:
logger.error("Failed to create %s tenant"
- % CONST.tempest_identity_tenant_name)
+ % CONST.__getattribute__('tempest_identity_tenant_name'))
- user_id = os_utils.create_user(keystone_client,
- CONST.tempest_identity_user_name,
- CONST.tempest_identity_user_password,
- None, tenant_id)
+ user_id = os_utils.create_user(
+ keystone_client,
+ CONST.__getattribute__('tempest_identity_user_name'),
+ CONST.__getattribute__('tempest_identity_user_password'),
+ None, tenant_id)
if not user_id:
logger.error("Failed to create %s user" %
- CONST.tempest_identity_user_name)
+ CONST.__getattribute__('tempest_identity_user_name'))
logger.debug("Creating private network for Tempest suite")
network_dic = os_utils.create_shared_network_full(
- CONST.tempest_private_net_name,
- CONST.tempest_private_subnet_name,
- CONST.tempest_router_name,
- CONST.tempest_private_subnet_cidr)
+ CONST.__getattribute__('tempest_private_net_name'),
+ CONST.__getattribute__('tempest_private_subnet_name'),
+ CONST.__getattribute__('tempest_router_name'),
+ CONST.__getattribute__('tempest_private_subnet_cidr'))
if network_dic is None:
raise Exception('Failed to create private network')
@@ -80,41 +82,45 @@ def create_tempest_resources(use_custom_images=False,
flavor_id = ""
flavor_id_alt = ""
- if CONST.tempest_use_custom_images or use_custom_images:
+ if (CONST.__getattribute__('tempest_use_custom_images') or
+ use_custom_images):
# adding alternative image should be trivial should we need it
logger.debug("Creating image for Tempest suite")
_, image_id = os_utils.get_or_create_image(
- CONST.openstack_image_name, GLANCE_IMAGE_PATH,
- CONST.openstack_image_disk_format)
+ CONST.__getattribute__('openstack_image_name'),
+ GLANCE_IMAGE_PATH,
+ CONST.__getattribute__('openstack_image_disk_format'))
if image_id is None:
raise Exception('Failed to create image')
if use_custom_images:
logger.debug("Creating 2nd image for Tempest suite")
_, image_id_alt = os_utils.get_or_create_image(
- CONST.openstack_image_name_alt, GLANCE_IMAGE_PATH,
- CONST.openstack_image_disk_format)
+ CONST.__getattribute__('openstack_image_name_alt'),
+ GLANCE_IMAGE_PATH,
+ CONST.__getattribute__('openstack_image_disk_format'))
if image_id_alt is None:
raise Exception('Failed to create image')
- if CONST.tempest_use_custom_flavors or use_custom_flavors:
+ if (CONST.__getattribute__('tempest_use_custom_flavors') or
+ use_custom_flavors):
# adding alternative flavor should be trivial should we need it
logger.debug("Creating flavor for Tempest suite")
_, flavor_id = os_utils.get_or_create_flavor(
- CONST.openstack_flavor_name,
- CONST.openstack_flavor_ram,
- CONST.openstack_flavor_disk,
- CONST.openstack_flavor_vcpus)
+ CONST.__getattribute__('openstack_flavor_name'),
+ CONST.__getattribute__('openstack_flavor_ram'),
+ CONST.__getattribute__('openstack_flavor_disk'),
+ CONST.__getattribute__('openstack_flavor_vcpus'))
if flavor_id is None:
raise Exception('Failed to create flavor')
if use_custom_flavors:
logger.debug("Creating 2nd flavor for tempest_defcore")
_, flavor_id_alt = os_utils.get_or_create_flavor(
- CONST.openstack_flavor_name_alt,
- CONST.openstack_flavor_ram,
- CONST.openstack_flavor_disk,
- CONST.openstack_flavor_vcpus)
+ CONST.__getattribute__('openstack_flavor_name_alt'),
+ CONST.__getattribute__('openstack_flavor_ram'),
+ CONST.__getattribute__('openstack_flavor_disk'),
+ CONST.__getattribute__('openstack_flavor_vcpus'))
if flavor_id_alt is None:
raise Exception('Failed to create flavor')
@@ -132,7 +138,7 @@ def get_verifier_id():
Returns verifer id for current Tempest
"""
cmd = ("rally verify list-verifiers | awk '/" +
- CONST.tempest_deployment_name +
+ CONST.__getattribute__('tempest_deployment_name') +
"/ {print $2}'")
p = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE,
@@ -149,7 +155,7 @@ def get_verifier_deployment_id():
Returns deployment id for active Rally deployment
"""
cmd = ("rally deployment list | awk '/" +
- CONST.rally_deployment_name +
+ CONST.__getattribute__('rally_deployment_name') +
"/ {print $2}'")
p = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE,
@@ -168,7 +174,7 @@ def get_verifier_repo_dir(verifier_id):
if not verifier_id:
verifier_id = get_verifier_id()
- return os.path.join(CONST.dir_rally_inst,
+ return os.path.join(CONST.__getattribute__('dir_rally_inst'),
'verification',
'verifier-{}'.format(verifier_id),
'repo')
@@ -184,7 +190,7 @@ def get_verifier_deployment_dir(verifier_id, deployment_id):
if not deployment_id:
deployment_id = get_verifier_deployment_id()
- return os.path.join(CONST.dir_rally_inst,
+ return os.path.join(CONST.__getattribute__('dir_rally_inst'),
'verification',
'verifier-{}'.format(verifier_id),
'for-deployment-{}'.format(deployment_id))
@@ -247,8 +253,9 @@ def configure_tempest_defcore(deployment_dir, img_flavor_dict):
with open(conf_file, 'wb') as config_file:
config.write(config_file)
- confpath = os.path.join(CONST.dir_functest_test,
- CONST.refstack_tempest_conf_path)
+ confpath = os.path.join(
+ CONST.__getattribute__('dir_functest_test'),
+ CONST.__getattribute__('refstack_tempest_conf_path'))
shutil.copyfile(conf_file, confpath)
@@ -263,32 +270,37 @@ def configure_tempest_update_params(tempest_conf_file,
config.set(
'compute',
'fixed_network_name',
- CONST.tempest_private_net_name)
+ CONST.__getattribute__('tempest_private_net_name'))
config.set('compute', 'volume_device_name',
- CONST.tempest_volume_device_name)
- if CONST.tempest_use_custom_images:
+ CONST.__getattribute__('tempest_volume_device_name'))
+ if CONST.__getattribute__('tempest_use_custom_images'):
if IMAGE_ID is not None:
config.set('compute', 'image_ref', IMAGE_ID)
if IMAGE_ID_ALT is not None:
config.set('compute', 'image_ref_alt', IMAGE_ID_ALT)
- if CONST.tempest_use_custom_flavors:
+ if CONST.__getattribute__('tempest_use_custom_flavors'):
if FLAVOR_ID is not None:
config.set('compute', 'flavor_ref', FLAVOR_ID)
if FLAVOR_ID_ALT is not None:
config.set('compute', 'flavor_ref_alt', FLAVOR_ID_ALT)
- config.set('identity', 'tenant_name', CONST.tempest_identity_tenant_name)
- config.set('identity', 'username', CONST.tempest_identity_user_name)
- config.set('identity', 'password', CONST.tempest_identity_user_password)
+ config.set('identity', 'tenant_name',
+ CONST.__getattribute__('tempest_identity_tenant_name'))
+ config.set('identity', 'username',
+ CONST.__getattribute__('tempest_identity_user_name'))
+ config.set('identity', 'password',
+ CONST.__getattribute__('tempest_identity_user_password'))
config.set('identity', 'region', 'RegionOne')
config.set(
- 'validation', 'ssh_timeout', CONST.tempest_validation_ssh_timeout)
+ 'validation', 'ssh_timeout',
+ CONST.__getattribute__('tempest_validation_ssh_timeout'))
config.set('object-storage', 'operator_role',
- CONST.tempest_object_storage_operator_role)
+ CONST.__getattribute__('tempest_object_storage_operator_role'))
- if CONST.OS_ENDPOINT_TYPE is not None:
+ if CONST.__getattribute__('OS_ENDPOINT_TYPE') is not None:
sections = config.sections()
if os_utils.is_keystone_v3():
- config.set('identity', 'v3_endpoint_type', CONST.OS_ENDPOINT_TYPE)
+ config.set('identity', 'v3_endpoint_type',
+ CONST.__getattribute__('OS_ENDPOINT_TYPE'))
if 'identity-feature-enabled' not in sections:
config.add_section('identity-feature-enabled')
config.set('identity-feature-enabled', 'api_v2', False)
@@ -304,7 +316,7 @@ def configure_tempest_update_params(tempest_conf_file,
if service not in sections:
config.add_section(service)
config.set(service, 'endpoint_type',
- CONST.OS_ENDPOINT_TYPE)
+ CONST.__getattribute__('OS_ENDPOINT_TYPE'))
with open(tempest_conf_file, 'wb') as config_file:
config.write(config_file)
@@ -365,22 +377,22 @@ def configure_tempest_multisite_params(tempest_conf_file):
"StrictHostKeyChecking=no")
# Get the controller IP from the fuel node
- cmd = 'sshpass -p %s ssh 2>/dev/null %s %s@%s \
- \'fuel node --env 1| grep controller | grep "True\| 1" \
- | awk -F\| "{print \$5}"\'' % (installer_password,
+ cmd = ('sshpass -p %s ssh 2>/dev/null %s %s@%s '
+ '\'fuel node --env 1| grep controller | grep "True\| 1" '
+ '| awk -F\| "{print \$5}"\'' % (installer_password,
ssh_options,
installer_username,
- installer_ip)
+ installer_ip))
multisite_controller_ip = "".join(os.popen(cmd).read().split())
# Login to controller and get bind host details
- cmd = 'sshpass -p %s ssh 2>/dev/null %s %s@%s "ssh %s \\" \
- grep -e "^bind_" %s \\""' % (installer_password,
- ssh_options,
- installer_username,
- installer_ip,
- multisite_controller_ip,
- kingbird_conf_path)
+ cmd = ('sshpass -p %s ssh 2>/dev/null %s %s@%s "ssh %s \\" '
+ 'grep -e "^bind_" %s \\""' % (installer_password,
+ ssh_options,
+ installer_username,
+ installer_ip,
+ multisite_controller_ip,
+ kingbird_conf_path))
bind_details = os.popen(cmd).read()
bind_details = "".join(bind_details.split())
# Extract port number from the bind details
diff --git a/functest/opnfv_tests/openstack/tempest/tempest.py b/functest/opnfv_tests/openstack/tempest/tempest.py
index 984e2a1b..233ceb48 100644
--- a/functest/opnfv_tests/openstack/tempest/tempest.py
+++ b/functest/opnfv_tests/openstack/tempest/tempest.py
@@ -8,6 +8,8 @@
# http://www.apache.org/licenses/LICENSE-2.0
#
+from __future__ import division
+
import logging
import os
import re
@@ -79,8 +81,8 @@ class TempestCommon(testcase.TestCase):
result_file = open(conf_utils.TEMPEST_LIST, 'w')
black_tests = []
try:
- installer_type = CONST.INSTALLER_TYPE
- deploy_scenario = CONST.DEPLOY_SCENARIO
+ installer_type = CONST.__getattribute__('INSTALLER_TYPE')
+ deploy_scenario = CONST.__getattribute__('DEPLOY_SCENARIO')
if (bool(installer_type) * bool(deploy_scenario)):
# if INSTALLER_TYPE and DEPLOY_SCENARIO are set we read the
# file
@@ -117,9 +119,9 @@ class TempestCommon(testcase.TestCase):
header = ("Tempest environment:\n"
" SUT: %s\n Scenario: %s\n Node: %s\n Date: %s\n" %
- (CONST.INSTALLER_TYPE,
- CONST.DEPLOY_SCENARIO,
- CONST.NODE_NAME,
+ (CONST.__getattribute__('INSTALLER_TYPE'),
+ CONST.__getattribute__('DEPLOY_SCENARIO'),
+ CONST.__getattribute__('NODE_NAME'),
time.strftime("%a %b %d %H:%M:%S %Z %Y")))
f_stdout = open(
@@ -181,7 +183,13 @@ class TempestCommon(testcase.TestCase):
try:
num_executed = int(num_tests) - int(num_skipped)
- success_rate = 100 * int(num_success) / int(num_executed)
+ try:
+ self.result = 100 * int(num_success) / int(num_executed)
+ except ZeroDivisionError:
+ logger.error("No test has been executed")
+ self.result = 0
+ return
+
with open(os.path.join(conf_utils.TEMPEST_RESULTS_DIR,
"tempest.log"), 'r') as logfile:
output = logfile.read()
@@ -198,12 +206,10 @@ class TempestCommon(testcase.TestCase):
"errors": error_logs,
"skipped": skipped_testcase}
except Exception:
- success_rate = 0
+ self.result = 0
- self.result = ft_utils.check_success_rate(
- self.case_name, success_rate)
- logger.info("Tempest %s success_rate is %s%%, is marked as %s"
- % (self.case_name, success_rate, self.result))
+ logger.info("Tempest %s success_rate is %s%%"
+ % (self.case_name, self.result))
def run(self):
@@ -267,7 +273,8 @@ class TempestMultisite(TempestCommon):
TempestCommon.__init__(self, **kwargs)
self.MODE = "feature_multisite"
self.OPTION = "--concurrency 1"
- conf_utils.install_verifier_ext(CONST.dir_repo_kingbird)
+ conf_utils.install_verifier_ext(
+ CONST.__getattribute__('dir_repo_kingbird'))
class TempestCustom(TempestCommon):
diff --git a/functest/opnfv_tests/sdn/odl/odl.py b/functest/opnfv_tests/sdn/odl/odl.py
index 45ed7cc8..b2b0b77c 100755
--- a/functest/opnfv_tests/sdn/odl/odl.py
+++ b/functest/opnfv_tests/sdn/odl/odl.py
@@ -30,6 +30,7 @@ import robot.api
from robot.errors import RobotError
import robot.run
from robot.utils.robottime import timestamp_to_secs
+from six import StringIO
from six.moves import urllib
from functest.core import testcase
@@ -172,16 +173,11 @@ class ODLTests(testcase.TestCase):
self.__logger.exception(
"Cannot create %s", self.res_dir)
return self.EX_RUN_ERROR
- stdout_file = os.path.join(self.res_dir, 'stdout.txt')
output_dir = os.path.join(self.res_dir, 'output.xml')
- with open(stdout_file, 'w+') as stdout:
- robot.run(*suites, variable=variables,
- output=output_dir,
- log='NONE',
- report='NONE',
- stdout=stdout)
- stdout.seek(0, 0)
- self.__logger.info("\n" + stdout.read())
+ stream = StringIO()
+ robot.run(*suites, variable=variables, output=output_dir,
+ log='NONE', report='NONE', stdout=stream)
+ self.__logger.info("\n" + stream.getvalue())
self.__logger.info("ODL results were successfully generated")
try:
self.parse_results()
@@ -190,10 +186,6 @@ class ODLTests(testcase.TestCase):
self.__logger.error("Run tests before publishing: %s",
ex.message)
return self.EX_RUN_ERROR
- try:
- os.remove(stdout_file)
- except OSError:
- self.__logger.warning("Cannot remove %s", stdout_file)
return self.EX_OK
else:
return self.EX_RUN_ERROR
diff --git a/functest/opnfv_tests/sdn/onos/onos.py b/functest/opnfv_tests/sdn/onos/onos.py
index d7a2d38e..5dfff036 100644
--- a/functest/opnfv_tests/sdn/onos/onos.py
+++ b/functest/opnfv_tests/sdn/onos/onos.py
@@ -25,7 +25,7 @@ class OnosBase(testcase.TestCase):
onos_repo_path = CONST.__getattribute__('dir_repo_onos')
onos_sfc_image_name = CONST.__getattribute__('onos_sfc_image_name')
onos_sfc_image_path = os.path.join(
- CONST.__getattribute__('dir_functest_data'),
+ CONST.__getattribute__('dir_functest_images'),
CONST.__getattribute__('onos_sfc_image_file_name'))
onos_sfc_path = os.path.join(CONST.__getattribute__('dir_repo_functest'),
CONST.__getattribute__('dir_onos_sfc'))
diff --git a/functest/opnfv_tests/vnf/ims/opera_ims.py b/functest/opnfv_tests/vnf/ims/opera_ims.py
index 8defdee6..8c33d16e 100644
--- a/functest/opnfv_tests/vnf/ims/opera_ims.py
+++ b/functest/opnfv_tests/vnf/ims/opera_ims.py
@@ -16,14 +16,14 @@ from opera import openo_connect
import requests
import functest.opnfv_tests.vnf.ims.clearwater_ims_base as clearwater_ims_base
-from functest.utils.constants import CONST
class OperaIms(clearwater_ims_base.ClearwaterOnBoardingBase):
- def __init__(self, project='functest', case_name='opera_ims',
- repo=CONST.dir_repo_opera, cmd=''):
- super(OperaIms, self).__init__(project, case_name, repo, cmd)
+ def __init__(self, **kwargs):
+ if "case_name" not in kwargs:
+ kwargs["case_name"] = "opera_ims"
+ super(OperaIms, self).__init__(**kwargs)
self.logger = logging.getLogger(__name__)
self.ellis_file = os.path.join(self.result_dir, 'ellis.info')
self.live_test_file = os.path.join(self.result_dir,
diff --git a/functest/tests/unit/ci/test_prepare_env.py b/functest/tests/unit/ci/test_prepare_env.py
index 6bb1c13e..513e7230 100644
--- a/functest/tests/unit/ci/test_prepare_env.py
+++ b/functest/tests/unit/ci/test_prepare_env.py
@@ -190,12 +190,17 @@ class PrepareEnvTesting(unittest.TestCase):
CONST.__getattribute__('dir_functest_conf'))
mock_method.assert_any_call(
CONST.__getattribute__('dir_functest_data'))
+ mock_method.assert_any_call(
+ CONST.__getattribute__('dir_functest_images'))
mock_logger_info.assert_any_call(" %s created." %
CONST.__getattribute__(
'dir_functest_conf'))
mock_logger_info.assert_any_call(" %s created." %
CONST.__getattribute__(
'dir_functest_data'))
+ mock_logger_info.assert_any_call(" %s created." %
+ CONST.__getattribute__(
+ 'dir_functest_images'))
@mock.patch('functest.ci.prepare_env.logger.info')
@mock.patch('functest.ci.prepare_env.logger.debug')
@@ -211,6 +216,9 @@ class PrepareEnvTesting(unittest.TestCase):
mock_logger_debug.assert_any_call(" %s already exists." %
CONST.__getattribute__(
'dir_functest_data'))
+ mock_logger_debug.assert_any_call(" %s already exists." %
+ CONST.__getattribute__(
+ 'dir_functest_images'))
def _get_env_cred_dict(self, os_prefix=''):
return {'OS_USERNAME': os_prefix + 'username',
diff --git a/functest/tests/unit/ci/test_run_tests.py b/functest/tests/unit/ci/test_run_tests.py
index d48c79cc..88e5d2b8 100644
--- a/functest/tests/unit/ci/test_run_tests.py
+++ b/functest/tests/unit/ci/test_run_tests.py
@@ -5,19 +5,32 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
-
-import unittest
import logging
+import unittest
import mock
from functest.ci import run_tests
from functest.utils.constants import CONST
+from functest.core.testcase import TestCase
+
+
+class FakeModule(TestCase):
+
+ def run(self):
+ return TestCase.EX_OK
+
+ def push_to_db(self):
+ return TestCase.EX_OK
+
+ def is_successful(self):
+ return TestCase.EX_OK
class RunTestsTesting(unittest.TestCase):
def setUp(self):
+ self.runner = run_tests.Runner()
self.sep = 'test_sep'
self.creds = {'OS_AUTH_URL': 'http://test_ip:test_port/v2.0',
'OS_USERNAME': 'test_os_username',
@@ -36,11 +49,10 @@ class RunTestsTesting(unittest.TestCase):
self.tiers.configure_mock(**attrs)
self.run_tests_parser = run_tests.RunTestsParser()
- self.global_variables = run_tests.GlobalVariables()
@mock.patch('functest.ci.run_tests.logger.info')
def test_print_separator(self, mock_logger_info):
- run_tests.print_separator(self.sep)
+ self.runner.print_separator(self.sep)
mock_logger_info.assert_called_once_with(self.sep * 44)
@mock.patch('functest.ci.run_tests.logger.error')
@@ -48,24 +60,24 @@ class RunTestsTesting(unittest.TestCase):
with mock.patch('functest.ci.run_tests.os.path.isfile',
return_value=False), \
self.assertRaises(Exception):
- run_tests.source_rc_file()
+ self.runner.source_rc_file()
@mock.patch('functest.ci.run_tests.logger.debug')
- def test_source_rc_file_default(self, mock_logger_debug):
- with mock.patch('functest.ci.run_tests.os.path.isfile',
- return_value=True), \
- mock.patch('functest.ci.run_tests.os_utils.source_credentials',
- return_value=self.creds):
- run_tests.source_rc_file()
+ @mock.patch('functest.ci.run_tests.os.path.isfile',
+ return_value=True)
+ def test_source_rc_file_default(self, *args):
+ with mock.patch('functest.ci.run_tests.os_utils.source_credentials',
+ return_value=self.creds):
+ self.runner.source_rc_file()
@mock.patch('functest.ci.run_tests.os_snapshot.main')
def test_generate_os_snapshot(self, mock_os_snap):
- run_tests.generate_os_snapshot()
+ self.runner.generate_os_snapshot()
self.assertTrue(mock_os_snap.called)
@mock.patch('functest.ci.run_tests.os_clean.main')
def test_cleanup(self, mock_os_clean):
- run_tests.cleanup()
+ self.runner.cleanup()
self.assertTrue(mock_os_clean.called)
def test_get_run_dict_if_defined_default(self):
@@ -73,7 +85,7 @@ class RunTestsTesting(unittest.TestCase):
with mock.patch('functest.ci.run_tests.'
'ft_utils.get_dict_by_test',
return_value={'run': mock_obj}):
- self.assertEqual(run_tests.get_run_dict('test_name'),
+ self.assertEqual(self.runner.get_run_dict('test_name'),
mock_obj)
@mock.patch('functest.ci.run_tests.logger.error')
@@ -83,7 +95,7 @@ class RunTestsTesting(unittest.TestCase):
'ft_utils.get_dict_by_test',
return_value=None):
testname = 'test_name'
- self.assertEqual(run_tests.get_run_dict(testname),
+ self.assertEqual(self.runner.get_run_dict(testname),
None)
mock_logger_error.assert_called_once_with("Cannot get {}'s config "
"options"
@@ -93,7 +105,7 @@ class RunTestsTesting(unittest.TestCase):
'ft_utils.get_dict_by_test',
return_value={}):
testname = 'test_name'
- self.assertEqual(run_tests.get_run_dict(testname),
+ self.assertEqual(self.runner.get_run_dict(testname),
None)
@mock.patch('functest.ci.run_tests.logger.exception')
@@ -103,7 +115,7 @@ class RunTestsTesting(unittest.TestCase):
'ft_utils.get_dict_by_test',
side_effect=Exception):
testname = 'test_name'
- self.assertEqual(run_tests.get_run_dict(testname),
+ self.assertEqual(self.runner.get_run_dict(testname),
None)
mock_logger_except.assert_called_once_with("Cannot get {}'s config"
" options"
@@ -114,63 +126,67 @@ class RunTestsTesting(unittest.TestCase):
args = {'get_name.return_value': 'test_name',
'needs_clean.return_value': False}
mock_test.configure_mock(**args)
- with mock.patch('functest.ci.run_tests.print_separator'),\
- mock.patch('functest.ci.run_tests.source_rc_file'), \
- mock.patch('functest.ci.run_tests.get_run_dict',
+ with mock.patch('functest.ci.run_tests.Runner.print_separator'),\
+ mock.patch('functest.ci.run_tests.Runner.source_rc_file'), \
+ mock.patch('functest.ci.run_tests.Runner.get_run_dict',
return_value=None), \
self.assertRaises(Exception) as context:
- run_tests.run_test(mock_test, 'tier_name')
+ self.runner(mock_test, 'tier_name')
msg = "Cannot import the class for the test case."
self.assertTrue(msg in context)
- def test_run_tests_default(self):
+ @mock.patch('functest.ci.run_tests.Runner.print_separator')
+ @mock.patch('functest.ci.run_tests.Runner.source_rc_file')
+ @mock.patch('functest.ci.run_tests.Runner.generate_os_snapshot')
+ @mock.patch('functest.ci.run_tests.Runner.cleanup')
+ @mock.patch('importlib.import_module', name="module",
+ return_value=mock.Mock(test_class=mock.Mock(
+ side_effect=FakeModule)))
+ @mock.patch('functest.utils.functest_utils.get_dict_by_test')
+ def test_run_tests_default(self, *args):
mock_test = mock.Mock()
- args = {'get_name.return_value': 'test_name',
- 'needs_clean.return_value': True}
- mock_test.configure_mock(**args)
+ kwargs = {'get_name.return_value': 'test_name',
+ 'needs_clean.return_value': True}
+ mock_test.configure_mock(**kwargs)
test_run_dict = {'module': 'test_module',
- 'class': mock.Mock,
- 'args': 'test_args'}
- with mock.patch('functest.ci.run_tests.print_separator'),\
- mock.patch('functest.ci.run_tests.source_rc_file'), \
- mock.patch('functest.ci.run_tests.generate_os_snapshot'), \
- mock.patch('functest.ci.run_tests.cleanup'), \
- mock.patch('functest.ci.run_tests.get_run_dict',
- return_value=test_run_dict), \
- self.assertRaises(run_tests.BlockingTestFailed) as context:
- run_tests.GlobalVariables.CLEAN_FLAG = True
- run_tests.run_test(mock_test, 'tier_name')
- msg = 'The test case test_name failed and is blocking'
- self.assertTrue(msg in context)
+ 'class': 'test_class'}
+ with mock.patch('functest.ci.run_tests.Runner.get_run_dict',
+ return_value=test_run_dict):
+ self.runner.clean_flag = True
+ self.runner.run_test(mock_test, 'tier_name')
+ self.assertEqual(self.runner.overall_result,
+ run_tests.Result.EX_OK)
@mock.patch('functest.ci.run_tests.logger.info')
def test_run_tier_default(self, mock_logger_info):
- with mock.patch('functest.ci.run_tests.print_separator'), \
- mock.patch('functest.ci.run_tests.run_test') as mock_method:
- run_tests.run_tier(self.tier)
+ with mock.patch('functest.ci.run_tests.Runner.print_separator'), \
+ mock.patch(
+ 'functest.ci.run_tests.Runner.run_test') as mock_method:
+ self.runner.run_tier(self.tier)
mock_method.assert_any_call('test1', 'test_tier')
mock_method.assert_any_call('test2', 'test_tier')
self.assertTrue(mock_logger_info.called)
@mock.patch('functest.ci.run_tests.logger.info')
def test_run_tier_missing_test(self, mock_logger_info):
- with mock.patch('functest.ci.run_tests.print_separator'):
+ with mock.patch('functest.ci.run_tests.Runner.print_separator'):
self.tier.get_tests.return_value = None
- self.assertEqual(run_tests.run_tier(self.tier), 0)
+ self.assertEqual(self.runner.run_tier(self.tier), 0)
self.assertTrue(mock_logger_info.called)
@mock.patch('functest.ci.run_tests.logger.info')
def test_run_all_default(self, mock_logger_info):
- with mock.patch('functest.ci.run_tests.run_tier') as mock_method:
+ with mock.patch(
+ 'functest.ci.run_tests.Runner.run_tier') as mock_method:
CONST.__setattr__('CI_LOOP', 'test_ci_loop')
- run_tests.run_all(self.tiers)
+ self.runner.run_all(self.tiers)
mock_method.assert_any_call(self.tier)
self.assertTrue(mock_logger_info.called)
@mock.patch('functest.ci.run_tests.logger.info')
def test_run_all_missing_tier(self, mock_logger_info):
CONST.__setattr__('CI_LOOP', 'loop_re_not_available')
- run_tests.run_all(self.tiers)
+ self.runner.run_all(self.tiers)
self.assertTrue(mock_logger_info.called)
def test_main_failed(self):
@@ -179,69 +195,78 @@ class RunTestsTesting(unittest.TestCase):
args = {'get_tier.return_value': False,
'get_test.return_value': False}
mock_obj.configure_mock(**args)
-
with mock.patch('functest.ci.run_tests.tb.TierBuilder'), \
- mock.patch('functest.ci.run_tests.source_rc_file',
+ mock.patch('functest.ci.run_tests.Runner.source_rc_file',
side_effect=Exception):
- self.assertEqual(run_tests.main(**kwargs),
+ self.assertEqual(self.runner.main(**kwargs),
run_tests.Result.EX_ERROR)
-
with mock.patch('functest.ci.run_tests.tb.TierBuilder',
return_value=mock_obj), \
- mock.patch('functest.ci.run_tests.source_rc_file',
+ mock.patch('functest.ci.run_tests.Runner.source_rc_file',
side_effect=Exception):
- self.assertEqual(run_tests.main(**kwargs),
+ self.assertEqual(self.runner.main(**kwargs),
run_tests.Result.EX_ERROR)
- def test_main_default(self):
- kwargs = {'test': 'test_name', 'noclean': True, 'report': True}
+ def test_main_tier(self, *args):
+ mock_tier = mock.Mock()
+ args = {'get_name.return_value': 'tier_name'}
+ mock_tier.configure_mock(**args)
+ kwargs = {'test': 'tier_name', 'noclean': True, 'report': True}
mock_obj = mock.Mock()
- args = {'get_tier.return_value': True,
- 'get_test.return_value': False}
+ args = {'get_tier.return_value': mock_tier,
+ 'get_test.return_value': None}
mock_obj.configure_mock(**args)
with mock.patch('functest.ci.run_tests.tb.TierBuilder',
return_value=mock_obj), \
- mock.patch('functest.ci.run_tests.source_rc_file'), \
- mock.patch('functest.ci.run_tests.run_tier') as m:
- self.assertEqual(run_tests.main(**kwargs),
+ mock.patch('functest.ci.run_tests.Runner.source_rc_file'), \
+ mock.patch('functest.ci.run_tests.Runner.run_tier') as m:
+ self.assertEqual(self.runner.main(**kwargs),
run_tests.Result.EX_OK)
self.assertTrue(m.called)
+ def test_main_test(self, *args):
+ kwargs = {'test': 'test_name', 'noclean': True, 'report': True}
+ mock_test = mock.Mock()
+ args = {'get_name.return_value': 'test_name',
+ 'needs_clean.return_value': True}
+ mock_test.configure_mock(**args)
mock_obj = mock.Mock()
- args = {'get_tier.return_value': False,
- 'get_test.return_value': True}
+ args = {'get_tier.return_value': None,
+ 'get_test.return_value': mock_test}
mock_obj.configure_mock(**args)
with mock.patch('functest.ci.run_tests.tb.TierBuilder',
return_value=mock_obj), \
- mock.patch('functest.ci.run_tests.source_rc_file'), \
- mock.patch('functest.ci.run_tests.run_test') as m:
- self.assertEqual(run_tests.main(**kwargs),
+ mock.patch('functest.ci.run_tests.Runner.source_rc_file'), \
+ mock.patch('functest.ci.run_tests.Runner.run_test') as m:
+ self.assertEqual(self.runner.main(**kwargs),
run_tests.Result.EX_OK)
self.assertTrue(m.called)
+ def test_main_all_tier(self, *args):
kwargs = {'test': 'all', 'noclean': True, 'report': True}
mock_obj = mock.Mock()
- args = {'get_tier.return_value': False,
- 'get_test.return_value': False}
+ args = {'get_tier.return_value': None,
+ 'get_test.return_value': None}
mock_obj.configure_mock(**args)
with mock.patch('functest.ci.run_tests.tb.TierBuilder',
return_value=mock_obj), \
- mock.patch('functest.ci.run_tests.source_rc_file'), \
- mock.patch('functest.ci.run_tests.run_all') as m:
- self.assertEqual(run_tests.main(**kwargs),
+ mock.patch('functest.ci.run_tests.Runner.source_rc_file'), \
+ mock.patch('functest.ci.run_tests.Runner.run_all') as m:
+ self.assertEqual(self.runner.main(**kwargs),
run_tests.Result.EX_OK)
self.assertTrue(m.called)
+ def test_main_any_tier_test_ko(self, *args):
kwargs = {'test': 'any', 'noclean': True, 'report': True}
mock_obj = mock.Mock()
- args = {'get_tier.return_value': False,
- 'get_test.return_value': False}
+ args = {'get_tier.return_value': None,
+ 'get_test.return_value': None}
mock_obj.configure_mock(**args)
with mock.patch('functest.ci.run_tests.tb.TierBuilder',
return_value=mock_obj), \
- mock.patch('functest.ci.run_tests.source_rc_file'), \
+ mock.patch('functest.ci.run_tests.Runner.source_rc_file'), \
mock.patch('functest.ci.run_tests.logger.debug') as m:
- self.assertEqual(run_tests.main(**kwargs),
+ self.assertEqual(self.runner.main(**kwargs),
run_tests.Result.EX_ERROR)
self.assertTrue(m.called)
diff --git a/functest/tests/unit/cli/commands/test_cli_env.py b/functest/tests/unit/cli/commands/test_cli_env.py
index def30aa1..14e926eb 100644
--- a/functest/tests/unit/cli/commands/test_cli_env.py
+++ b/functest/tests/unit/cli/commands/test_cli_env.py
@@ -26,7 +26,7 @@ class CliEnvTesting(unittest.TestCase):
@mock.patch('functest.cli.commands.cli_testcase.ft_utils.execute_command')
def test_prepare_default(self, mock_ft_utils, mock_os):
cmd = ("python %s/functest/ci/prepare_env.py start" %
- CONST.dir_repo_functest)
+ CONST.__getattribute__('dir_repo_functest'))
self.cli_environ.prepare()
mock_ft_utils.assert_called_with(cmd)
@@ -38,29 +38,30 @@ class CliEnvTesting(unittest.TestCase):
mock.patch('functest.cli.commands.cli_testcase.os.remove') \
as mock_os_remove:
cmd = ("python %s/functest/ci/prepare_env.py start" %
- CONST.dir_repo_functest)
+ CONST.__getattribute__('dir_repo_functest'))
self.cli_environ.prepare()
- mock_os_remove.assert_called_once_with(CONST.env_active)
+ mock_os_remove.assert_called_once_with(
+ CONST.__getattribute__('env_active'))
mock_ft_utils.assert_called_with(cmd)
def _test_show_missing_env_var(self, var, *args):
if var == 'INSTALLER_TYPE':
- CONST.INSTALLER_TYPE = None
+ CONST.__setattr__('INSTALLER_TYPE', None)
reg_string = "| INSTALLER: Unknown, \S+\s*|"
elif var == 'INSTALLER_IP':
- CONST.INSTALLER_IP = None
+ CONST.__setattr__('INSTALLER_IP', None)
reg_string = "| INSTALLER: \S+, Unknown\s*|"
elif var == 'SCENARIO':
- CONST.DEPLOY_SCENARIO = None
+ CONST.__setattr__('DEPLOY_SCENARIO', None)
reg_string = "| SCENARIO: Unknown\s*|"
elif var == 'NODE':
- CONST.NODE_NAME = None
+ CONST.__setattr__('NODE_NAME', None)
reg_string = "| POD: Unknown\s*|"
elif var == 'BUILD_TAG':
- CONST.BUILD_TAG = None
+ CONST.__setattr__('BUILD_TAG', None)
reg_string = "| BUILD TAG: None|"
elif var == 'DEBUG':
- CONST.CI_DEBUG = None
+ CONST.__setattr__('CI_DEBUG', None)
reg_string = "| DEBUG FLAG: false\s*|"
elif var == 'STATUS':
reg_string = "| STATUS: not ready\s*|"
@@ -104,7 +105,7 @@ class CliEnvTesting(unittest.TestCase):
@mock.patch('functest.cli.commands.cli_env.os.path.exists',
return_value=False)
def test_show_missing_git_repo_dir(self, *args):
- CONST.dir_repo_functest = None
+ CONST.__setattr__('dir_repo_functest', None)
self.assertRaises(NoSuchPathError, lambda: self.cli_environ.show())
@mock.patch('functest.cli.commands.cli_env.click.echo')
diff --git a/functest/tests/unit/cli/commands/test_cli_os.py b/functest/tests/unit/cli/commands/test_cli_os.py
index 9c576fe4..7ab4ddc3 100644
--- a/functest/tests/unit/cli/commands/test_cli_os.py
+++ b/functest/tests/unit/cli/commands/test_cli_os.py
@@ -68,10 +68,10 @@ class CliOpenStackTesting(unittest.TestCase):
def test_fetch_credentials_default(self, mock_click_echo,
mock_os_path,
mock_ftutils_execute):
- CONST.INSTALLER_TYPE = self.installer_type
- CONST.INSTALLER_IP = self.installer_ip
+ CONST.__setattr__('INSTALLER_TYPE', self.installer_type)
+ CONST.__setattr__('INSTALLER_IP', self.installer_ip)
cmd = ("%s/releng/utils/fetch_os_creds.sh -d %s -i %s -a %s"
- % (CONST.dir_repos,
+ % (CONST.__getattribute__('dir_repos'),
self.openstack_creds,
self.installer_type,
self.installer_ip))
@@ -91,15 +91,13 @@ class CliOpenStackTesting(unittest.TestCase):
def test_fetch_credentials_missing_installer_type(self, mock_click_echo,
mock_os_path,
mock_ftutils_execute):
- installer_type = None
- installer_ip = self.installer_ip
- CONST.INSTALLER_TYPE = installer_type
- CONST.INSTALLER_IP = installer_ip
+ CONST.__setattr__('INSTALLER_TYPE', None)
+ CONST.__setattr__('INSTALLER_IP', self.installer_ip)
cmd = ("%s/releng/utils/fetch_os_creds.sh -d %s -i %s -a %s"
- % (CONST.dir_repos,
+ % (CONST.__getattribute__('dir_repos'),
self.openstack_creds,
- installer_type,
- installer_ip))
+ None,
+ self.installer_ip))
self.cli_os.openstack_creds = self.openstack_creds
self.cli_os.fetch_credentials()
mock_click_echo.assert_any_call("The environment variable "
@@ -108,8 +106,8 @@ class CliOpenStackTesting(unittest.TestCase):
mock_click_echo.assert_any_call("Fetching credentials from "
"installer node '%s' with "
"IP=%s.." %
- (installer_type,
- installer_ip))
+ (None,
+ self.installer_ip))
mock_ftutils_execute.assert_called_once_with(cmd, verbose=False)
@mock.patch('functest.cli.commands.cli_os.ft_utils.execute_command')
@@ -121,10 +119,10 @@ class CliOpenStackTesting(unittest.TestCase):
mock_ftutils_execute):
installer_type = self.installer_type
installer_ip = None
- CONST.INSTALLER_TYPE = installer_type
- CONST.INSTALLER_IP = installer_ip
+ CONST.__setattr__('INSTALLER_TYPE', installer_type)
+ CONST.__setattr__('INSTALLER_IP', installer_ip)
cmd = ("%s/releng/utils/fetch_os_creds.sh -d %s -i %s -a %s"
- % (CONST.dir_repos,
+ % (CONST.__getattribute__('dir_repos'),
self.openstack_creds,
installer_type,
installer_ip))
@@ -143,8 +141,9 @@ class CliOpenStackTesting(unittest.TestCase):
@mock.patch('functest.cli.commands.cli_os.ft_utils.execute_command')
def test_check(self, mock_ftutils_execute):
with mock.patch.object(self.cli_os, 'ping_endpoint'):
- CONST.dir_repo_functest = self.dir_repo_functest
- cmd = CONST.dir_repo_functest + "/functest/ci/check_os.sh"
+ CONST.__setattr__('dir_repo_functest', self.dir_repo_functest)
+ cmd = os.path.join(CONST.__getattribute__('dir_repo_functest'),
+ "functest/ci/check_os.sh")
self.cli_os.check()
mock_ftutils_execute.assert_called_once_with(cmd, verbose=False)
diff --git a/functest/tests/unit/cli/commands/test_cli_testcase.py b/functest/tests/unit/cli/commands/test_cli_testcase.py
index 4bf808bf..fddfc317 100644
--- a/functest/tests/unit/cli/commands/test_cli_testcase.py
+++ b/functest/tests/unit/cli/commands/test_cli_testcase.py
@@ -40,7 +40,9 @@ class CliTestCasesTesting(unittest.TestCase):
@mock.patch('functest.cli.commands.cli_testcase.ft_utils.execute_command')
def test_run_default(self, mock_ft_utils, mock_os):
cmd = ("python %s/functest/ci/run_tests.py "
- "%s -t %s" % (CONST.dir_repo_functest, "-n -r ", self.testname))
+ "%s -t %s" %
+ (CONST.__getattribute__('dir_repo_functest'),
+ "-n -r ", self.testname))
self.cli_tests.run(self.testname, noclean=True, report=True)
mock_ft_utils.assert_called_with(cmd)
@@ -49,7 +51,9 @@ class CliTestCasesTesting(unittest.TestCase):
@mock.patch('functest.cli.commands.cli_testcase.ft_utils.execute_command')
def test_run_noclean_missing_report(self, mock_ft_utils, mock_os):
cmd = ("python %s/functest/ci/run_tests.py "
- "%s -t %s" % (CONST.dir_repo_functest, "-n ", self.testname))
+ "%s -t %s" %
+ (CONST.__getattribute__('dir_repo_functest'),
+ "-n ", self.testname))
self.cli_tests.run(self.testname, noclean=True, report=False)
mock_ft_utils.assert_called_with(cmd)
@@ -58,7 +62,9 @@ class CliTestCasesTesting(unittest.TestCase):
@mock.patch('functest.cli.commands.cli_testcase.ft_utils.execute_command')
def test_run_report_missing_noclean(self, mock_ft_utils, mock_os):
cmd = ("python %s/functest/ci/run_tests.py "
- "%s -t %s" % (CONST.dir_repo_functest, "-r ", self.testname))
+ "%s -t %s" %
+ (CONST.__getattribute__('dir_repo_functest'),
+ "-r ", self.testname))
self.cli_tests.run(self.testname, noclean=False, report=True)
mock_ft_utils.assert_called_with(cmd)
@@ -67,7 +73,9 @@ class CliTestCasesTesting(unittest.TestCase):
@mock.patch('functest.cli.commands.cli_testcase.ft_utils.execute_command')
def test_run_missing_noclean_report(self, mock_ft_utils, mock_os):
cmd = ("python %s/functest/ci/run_tests.py "
- "%s -t %s" % (CONST.dir_repo_functest, "", self.testname))
+ "%s -t %s" %
+ (CONST.__getattribute__('dir_repo_functest'),
+ "", self.testname))
self.cli_tests.run(self.testname, noclean=False, report=False)
mock_ft_utils.assert_called_with(cmd)
diff --git a/functest/tests/unit/cli/commands/test_cli_tier.py b/functest/tests/unit/cli/commands/test_cli_tier.py
index abcdc597..550eec93 100644
--- a/functest/tests/unit/cli/commands/test_cli_tier.py
+++ b/functest/tests/unit/cli/commands/test_cli_tier.py
@@ -88,8 +88,9 @@ class CliTierTesting(unittest.TestCase):
@mock.patch('functest.cli.commands.cli_tier.ft_utils.execute_command')
def test_run_default(self, mock_ft_utils, mock_os):
cmd = ("python %s/functest/ci/run_tests.py "
- "%s -t %s" % (CONST.dir_repo_functest, "-n -r ",
- self.tiername))
+ "%s -t %s" %
+ (CONST.__getattribute__('dir_repo_functest'),
+ "-n -r ", self.tiername))
self.cli_tier.run(self.tiername, noclean=True, report=True)
mock_ft_utils.assert_called_with(cmd)
@@ -98,8 +99,9 @@ class CliTierTesting(unittest.TestCase):
@mock.patch('functest.cli.commands.cli_tier.ft_utils.execute_command')
def test_run_report_missing_noclean(self, mock_ft_utils, mock_os):
cmd = ("python %s/functest/ci/run_tests.py "
- "%s -t %s" % (CONST.dir_repo_functest, "-r ",
- self.tiername))
+ "%s -t %s" %
+ (CONST.__getattribute__('dir_repo_functest'),
+ "-r ", self.tiername))
self.cli_tier.run(self.tiername, noclean=False, report=True)
mock_ft_utils.assert_called_with(cmd)
@@ -108,8 +110,9 @@ class CliTierTesting(unittest.TestCase):
@mock.patch('functest.cli.commands.cli_tier.ft_utils.execute_command')
def test_run_noclean_missing_report(self, mock_ft_utils, mock_os):
cmd = ("python %s/functest/ci/run_tests.py "
- "%s -t %s" % (CONST.dir_repo_functest, "-n ",
- self.tiername))
+ "%s -t %s" %
+ (CONST.__getattribute__('dir_repo_functest'),
+ "-n ", self.tiername))
self.cli_tier.run(self.tiername, noclean=True, report=False)
mock_ft_utils.assert_called_with(cmd)
@@ -118,8 +121,9 @@ class CliTierTesting(unittest.TestCase):
@mock.patch('functest.cli.commands.cli_tier.ft_utils.execute_command')
def test_run_missing_noclean_report(self, mock_ft_utils, mock_os):
cmd = ("python %s/functest/ci/run_tests.py "
- "%s -t %s" % (CONST.dir_repo_functest, "",
- self.tiername))
+ "%s -t %s" %
+ (CONST.__getattribute__('dir_repo_functest'),
+ "", self.tiername))
self.cli_tier.run(self.tiername, noclean=False, report=False)
mock_ft_utils.assert_called_with(cmd)
diff --git a/functest/tests/unit/odl/test_odl.py b/functest/tests/unit/odl/test_odl.py
index d62f689e..60adf211 100644
--- a/functest/tests/unit/odl/test_odl.py
+++ b/functest/tests/unit/odl/test_odl.py
@@ -310,8 +310,6 @@ class ODLMainTesting(ODLTesting):
def test_run_ko(self, *args):
with mock.patch.object(self.test, 'set_robotframework_vars',
return_value=True), \
- mock.patch.object(odl, 'open', mock.mock_open(),
- create=True), \
self.assertRaises(RobotError):
self._test_main(testcase.TestCase.EX_RUN_ERROR, *args)
@@ -320,71 +318,33 @@ class ODLMainTesting(ODLTesting):
def test_parse_results_ko(self, *args):
with mock.patch.object(self.test, 'set_robotframework_vars',
return_value=True), \
- mock.patch.object(odl, 'open', mock.mock_open(),
- create=True), \
mock.patch.object(self.test, 'parse_results',
side_effect=RobotError):
self._test_main(testcase.TestCase.EX_RUN_ERROR, *args)
- @mock.patch('os.remove', side_effect=Exception)
- @mock.patch('robot.run')
- @mock.patch('os.makedirs')
- def test_remove_exc(self, *args):
- with mock.patch.object(self.test, 'set_robotframework_vars',
- return_value=True), \
- mock.patch.object(self.test, 'parse_results'), \
- self.assertRaises(Exception):
- self._test_main(testcase.TestCase.EX_OK, *args)
-
- @mock.patch('os.remove')
@mock.patch('robot.run')
@mock.patch('os.makedirs')
def test_ok(self, *args):
with mock.patch.object(self.test, 'set_robotframework_vars',
return_value=True), \
- mock.patch.object(odl, 'open', mock.mock_open(),
- create=True), \
mock.patch.object(self.test, 'parse_results'):
self._test_main(testcase.TestCase.EX_OK, *args)
- @mock.patch('os.remove')
@mock.patch('robot.run')
@mock.patch('os.makedirs', side_effect=OSError(errno.EEXIST, ''))
def test_makedirs_oserror17(self, *args):
with mock.patch.object(self.test, 'set_robotframework_vars',
return_value=True), \
- mock.patch.object(odl, 'open', mock.mock_open(),
- create=True) as mock_open, \
mock.patch.object(self.test, 'parse_results'):
self._test_main(testcase.TestCase.EX_OK, *args)
- mock_open.assert_called_once_with(
- os.path.join(odl.ODLTests.res_dir, 'stdout.txt'), 'w+')
- @mock.patch('os.remove')
@mock.patch('robot.run', return_value=1)
@mock.patch('os.makedirs')
def test_testcases_in_failure(self, *args):
with mock.patch.object(self.test, 'set_robotframework_vars',
return_value=True), \
- mock.patch.object(odl, 'open', mock.mock_open(),
- create=True) as mock_open, \
- mock.patch.object(self.test, 'parse_results'):
- self._test_main(testcase.TestCase.EX_OK, *args)
- mock_open.assert_called_once_with(
- os.path.join(odl.ODLTests.res_dir, 'stdout.txt'), 'w+')
-
- @mock.patch('os.remove', side_effect=OSError)
- @mock.patch('robot.run')
- @mock.patch('os.makedirs')
- def test_remove_oserror(self, *args):
- with mock.patch.object(self.test, 'set_robotframework_vars',
- return_value=True), \
- mock.patch.object(odl, 'open', mock.mock_open(),
- create=True) as mock_open, \
mock.patch.object(self.test, 'parse_results'):
self._test_main(testcase.TestCase.EX_OK, *args)
- mock_open.assert_called_once_with(
- os.path.join(odl.ODLTests.res_dir, 'stdout.txt'), 'w+')
class ODLRunTesting(ODLTesting):
diff --git a/functest/tests/unit/openstack/rally/test_rally.py b/functest/tests/unit/openstack/rally/test_rally.py
index 3c939bb5..b9e78616 100644
--- a/functest/tests/unit/openstack/rally/test_rally.py
+++ b/functest/tests/unit/openstack/rally/test_rally.py
@@ -37,7 +37,7 @@ class OSRallyTesting(unittest.TestCase):
self.polling_iter = 2
def test_build_task_args_missing_floating_network(self):
- CONST.OS_AUTH_URL = None
+ CONST.__setattr__('OS_AUTH_URL', None)
with mock.patch('functest.opnfv_tests.openstack.rally.rally.'
'os_utils.get_external_net',
return_value=None):
@@ -45,7 +45,7 @@ class OSRallyTesting(unittest.TestCase):
self.assertEqual(task_args['floating_network'], '')
def test_build_task_args_missing_net_id(self):
- CONST.OS_AUTH_URL = None
+ CONST.__setattr__('OS_AUTH_URL', None)
self.rally_base.network_dict['net_id'] = ''
with mock.patch('functest.opnfv_tests.openstack.rally.rally.'
'os_utils.get_external_net',
@@ -54,7 +54,7 @@ class OSRallyTesting(unittest.TestCase):
self.assertEqual(task_args['netid'], '')
def test_build_task_args_missing_auth_url(self):
- CONST.OS_AUTH_URL = None
+ CONST.__setattr__('OS_AUTH_URL', None)
with mock.patch('functest.opnfv_tests.openstack.rally.rally.'
'os_utils.get_external_net',
return_value='test_floating_network'):
@@ -134,8 +134,8 @@ class OSRallyTesting(unittest.TestCase):
'lineline')
def test_excl_scenario_default(self):
- CONST.INSTALLER_TYPE = 'test_installer'
- CONST.DEPLOY_SCENARIO = 'test_scenario'
+ CONST.__setattr__('INSTALLER_TYPE', 'test_installer')
+ CONST.__setattr__('DEPLOY_SCENARIO', 'test_scenario')
dic = {'scenario': [{'scenarios': ['test_scenario'],
'installers': ['test_installer'],
'tests': ['test']}]}
@@ -152,8 +152,8 @@ class OSRallyTesting(unittest.TestCase):
[])
def test_excl_func_default(self):
- CONST.INSTALLER_TYPE = 'test_installer'
- CONST.DEPLOY_SCENARIO = 'test_scenario'
+ CONST.__setattr__('INSTALLER_TYPE', 'test_installer')
+ CONST.__setattr__('DEPLOY_SCENARIO', 'test_scenario')
dic = {'functionality': [{'functions': ['no_live_migration'],
'tests': ['test']}]}
with mock.patch('__builtin__.open', mock.mock_open()), \
@@ -341,19 +341,6 @@ class OSRallyTesting(unittest.TestCase):
self.rally_base._run_tests()
self.rally_base._run_task.assert_any_call('test1')
- @mock.patch('functest.opnfv_tests.openstack.rally.rally.logger.info')
- def test_generate_report(self, mock_logger_info):
- summary = [{'test_name': 'test_name',
- 'overall_duration': 5,
- 'nb_tests': 3,
- 'success': 5}]
- self.rally_base.summary = summary
- with mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'ft_utils.check_success_rate',
- return_value='criteria'):
- self.rally_base._generate_report()
- self.assertTrue(mock_logger_info.called)
-
def test_clean_up_default(self):
self.rally_base.volume_type = mock.Mock()
self.rally_base.cinder_client = mock.Mock()
diff --git a/functest/tests/unit/openstack/refstack_client/test_refstack_client.py b/functest/tests/unit/openstack/refstack_client/test_refstack_client.py
index 58ec5a07..8c149baa 100644
--- a/functest/tests/unit/openstack/refstack_client/test_refstack_client.py
+++ b/functest/tests/unit/openstack/refstack_client/test_refstack_client.py
@@ -17,10 +17,12 @@ from functest.utils.constants import CONST
class OSRefstackClientTesting(unittest.TestCase):
- _config = os.path.join(CONST.dir_functest_test,
- CONST.refstack_tempest_conf_path)
- _testlist = os.path.join(CONST.dir_functest_test,
- CONST.refstack_defcore_list)
+ _config = os.path.join(
+ CONST.__getattribute__('dir_functest_test'),
+ CONST.__getattribute__('refstack_tempest_conf_path'))
+ _testlist = os.path.join(
+ CONST.__getattribute__('dir_functest_test'),
+ CONST.__getattribute__('refstack_defcore_list'))
def setUp(self):
self.defaultargs = {'config': self._config,
@@ -28,12 +30,13 @@ class OSRefstackClientTesting(unittest.TestCase):
self.refstackclient = refstack_client.RefstackClient()
def test_source_venv(self):
- CONST.dir_refstack_client = 'test_repo_dir'
+ CONST.__setattr__('dir_refstack_client', 'test_repo_dir')
with mock.patch('functest.opnfv_tests.openstack.refstack_client.'
'refstack_client.ft_utils.execute_command') as m:
cmd = ("cd {0};"
". .venv/bin/activate;"
- "cd -;".format(CONST.dir_refstack_client))
+ "cd -;"
+ .format(CONST.__getattribute__('dir_refstack_client')))
self.refstackclient.source_venv()
m.assert_any_call(cmd)
@@ -44,9 +47,10 @@ class OSRefstackClientTesting(unittest.TestCase):
'refstack_client.ft_utils.execute_command') as m:
cmd = ("cd {0};"
"./refstack-client test -c {1} -v --test-list {2};"
- "cd -;".format(CONST.dir_refstack_client,
- config,
- testlist))
+ "cd -;"
+ .format(CONST.__getattribute__('dir_refstack_client'),
+ config,
+ testlist))
self.refstackclient.run_defcore(config, testlist)
m.assert_any_call(cmd)
@@ -62,7 +66,7 @@ class OSRefstackClientTesting(unittest.TestCase):
self.assertEqual(self.refstackclient.main(**kwargs), status)
if len(args) > 0:
args[0].assert_called_once_with(
- refstack_client.RefstackClient.result_dir)
+ refstack_client.RefstackClient.result_dir)
if len(args) > 1:
args
diff --git a/functest/tests/unit/openstack/tempest/test_conf_utils.py b/functest/tests/unit/openstack/tempest/test_conf_utils.py
index bdd1c7a6..23f6e45c 100644
--- a/functest/tests/unit/openstack/tempest/test_conf_utils.py
+++ b/functest/tests/unit/openstack/tempest/test_conf_utils.py
@@ -52,12 +52,12 @@ class OSTempestConfUtilsTesting(unittest.TestCase):
return_value=(mock.Mock(), None)), \
self.assertRaises(Exception) as context:
- CONST.tempest_use_custom_images = True
+ CONST.__setattr__('tempest_use_custom_images', True)
conf_utils.create_tempest_resources()
msg = 'Failed to create image'
self.assertTrue(msg in context)
- CONST.tempest_use_custom_images = False
+ CONST.__setattr__('tempest_use_custom_images', False)
conf_utils.create_tempest_resources(use_custom_images=True)
msg = 'Failed to create image'
self.assertTrue(msg in context)
@@ -82,20 +82,20 @@ class OSTempestConfUtilsTesting(unittest.TestCase):
'os_utils.get_or_create_flavor',
return_value=(mock.Mock(), None)), \
self.assertRaises(Exception) as context:
- CONST.tempest_use_custom_images = True
- CONST.tempest_use_custom_flavors = True
+ CONST.__setattr__('tempest_use_custom_images', True)
+ CONST.__setattr__('tempest_use_custom_flavors', True)
conf_utils.create_tempest_resources()
msg = 'Failed to create flavor'
self.assertTrue(msg in context)
- CONST.tempest_use_custom_images = True
- CONST.tempest_use_custom_flavors = False
+ CONST.__setattr__('tempest_use_custom_images', True)
+ CONST.__setattr__('tempest_use_custom_flavors', False)
conf_utils.create_tempest_resources(use_custom_flavors=False)
msg = 'Failed to create flavor'
self.assertTrue(msg in context)
def test_get_verifier_id_missing_verifier(self):
- CONST.tempest_deployment_name = 'test_deploy_name'
+ CONST.__setattr__('tempest_deployment_name', 'test_deploy_name')
with mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.subprocess.Popen') as mock_popen, \
self.assertRaises(Exception):
@@ -106,7 +106,7 @@ class OSTempestConfUtilsTesting(unittest.TestCase):
conf_utils.get_verifier_id(),
def test_get_verifier_id_default(self):
- CONST.tempest_deployment_name = 'test_deploy_name'
+ CONST.__setattr__('tempest_deployment_name', 'test_deploy_name')
with mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.subprocess.Popen') as mock_popen:
mock_stdout = mock.Mock()
@@ -118,7 +118,7 @@ class OSTempestConfUtilsTesting(unittest.TestCase):
'test_deploy_id')
def test_get_verifier_deployment_id_missing_rally(self):
- CONST.rally_deployment_name = 'test_rally_deploy_name'
+ CONST.__setattr__('tempest_deployment_name', 'test_deploy_name')
with mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.subprocess.Popen') as mock_popen, \
self.assertRaises(Exception):
@@ -129,7 +129,7 @@ class OSTempestConfUtilsTesting(unittest.TestCase):
conf_utils.get_verifier_deployment_id(),
def test_get_verifier_deployment_id_default(self):
- CONST.rally_deployment_name = 'test_rally_deploy_name'
+ CONST.__setattr__('tempest_deployment_name', 'test_deploy_name')
with mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.subprocess.Popen') as mock_popen:
mock_stdout = mock.Mock()
@@ -238,8 +238,8 @@ class OSTempestConfUtilsTesting(unittest.TestCase):
mock.patch('__builtin__.open', mock.mock_open()), \
mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.shutil.copyfile'):
- CONST.dir_functest_test = 'test_dir'
- CONST.refstack_tempest_conf_path = 'test_path'
+ CONST.__setattr__('dir_functest_test', 'test_dir')
+ CONST.__setattr__('refstack_tempest_conf_path', 'test_path')
conf_utils.configure_tempest_defcore('test_dep_dir',
img_flavor_dict)
mset.assert_any_call('compute', 'image_ref', 'test_image_id')
@@ -264,8 +264,8 @@ class OSTempestConfUtilsTesting(unittest.TestCase):
mock.patch('__builtin__.open', mock.mock_open()), \
mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.backup_tempest_config'):
- CONST.dir_functest_test = 'test_dir'
- CONST.OS_ENDPOINT_TYPE = None
+ CONST.__setattr__('dir_functest_test', 'test_dir')
+ CONST.__setattr__('OS_ENDPOINT_TYPE', None)
conf_utils.\
configure_tempest_update_params('test_conf_file',
IMAGE_ID=image_id,
@@ -275,25 +275,25 @@ class OSTempestConfUtilsTesting(unittest.TestCase):
self.assertTrue(mwrite.called)
def test_configure_tempest_update_params_missing_image_id(self):
- CONST.tempest_use_custom_images = True
+ CONST.__setattr__('tempest_use_custom_images', True)
self._test_missing_param(('compute', 'image_ref',
'test_image_id'), 'test_image_id',
None)
def test_configure_tempest_update_params_missing_image_id_alt(self):
- CONST.tempest_use_custom_images = True
+ CONST.__setattr__('tempest_use_custom_images', True)
conf_utils.IMAGE_ID_ALT = 'test_image_id_alt'
self._test_missing_param(('compute', 'image_ref_alt',
'test_image_id_alt'), None, None)
def test_configure_tempest_update_params_missing_flavor_id(self):
- CONST.tempest_use_custom_flavors = True
+ CONST.__setattr__('tempest_use_custom_flavors', True)
self._test_missing_param(('compute', 'flavor_ref',
'test_flavor_id'), None,
'test_flavor_id')
def test_configure_tempest_update_params_missing_flavor_id_alt(self):
- CONST.tempest_use_custom_flavors = True
+ CONST.__setattr__('tempest_use_custom_flavors', True)
conf_utils.FLAVOR_ID_ALT = 'test_flavor_id_alt'
self._test_missing_param(('compute', 'flavor_ref_alt',
'test_flavor_id_alt'), None,
diff --git a/functest/tests/unit/openstack/tempest/test_tempest.py b/functest/tests/unit/openstack/tempest/test_tempest.py
index 8476f9f7..b8b258b3 100644
--- a/functest/tests/unit/openstack/tempest/test_tempest.py
+++ b/functest/tests/unit/openstack/tempest/test_tempest.py
@@ -112,8 +112,8 @@ class OSTempestTesting(unittest.TestCase):
mock.patch.object(self.tempestcommon, 'read_file',
return_value=['test1', 'test2']):
conf_utils.TEMPEST_BLACKLIST = Exception
- CONST.INSTALLER_TYPE = 'installer_type'
- CONST.DEPLOY_SCENARIO = 'deploy_scenario'
+ CONST.__setattr__('INSTALLER_TYPE', 'installer_type')
+ CONST.__setattr__('DEPLOY_SCENARIO', 'deploy_scenario')
self.tempestcommon.apply_tempest_blacklist()
obj = m()
obj.write.assert_any_call('test1\n')
@@ -128,8 +128,8 @@ class OSTempestTesting(unittest.TestCase):
return_value=['test1', 'test2']), \
mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
'yaml.safe_load', return_value=item_dict):
- CONST.INSTALLER_TYPE = 'installer_type'
- CONST.DEPLOY_SCENARIO = 'deploy_scenario'
+ CONST.__setattr__('INSTALLER_TYPE', 'installer_type')
+ CONST.__setattr__('DEPLOY_SCENARIO', 'deploy_scenario')
self.tempestcommon.apply_tempest_blacklist()
obj = m()
obj.write.assert_any_call('test1\n')
@@ -149,24 +149,6 @@ class OSTempestTesting(unittest.TestCase):
assert_any_call("Starting Tempest test suite: '%s'."
% cmd_line)
- @mock.patch('functest.opnfv_tests.openstack.tempest.tempest.logger.info')
- def test_parse_verifier_result_default(self, mock_logger_info):
- self.tempestcommon.VERIFICATION_ID = 'test_uuid'
- self.tempestcommon.case_name = 'test_case_name'
- stdout = ['Testscount||2', 'Success||2', 'Skipped||0', 'Failures||0']
- with mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
- 'subprocess.Popen') as mock_popen, \
- mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
- 'ft_utils.check_success_rate') as mock_method, \
- mock.patch('__builtin__.open', mock.mock_open()):
- mock_stdout = mock.Mock()
- attrs = {'stdout': stdout}
- mock_stdout.configure_mock(**attrs)
- mock_popen.return_value = mock_stdout
-
- self.tempestcommon.parse_verifier_result()
- mock_method.assert_any_call('test_case_name', 100)
-
@mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
'os.path.exists', return_value=False)
@mock.patch('functest.opnfv_tests.openstack.tempest.tempest.os.makedirs',
diff --git a/functest/tests/unit/utils/test_functest_utils.py b/functest/tests/unit/utils/test_functest_utils.py
index 70ebe258..0fe7e91d 100644
--- a/functest/tests/unit/utils/test_functest_utils.py
+++ b/functest/tests/unit/utils/test_functest_utils.py
@@ -561,22 +561,6 @@ class FunctestUtilsTesting(unittest.TestCase):
assert_called_once_with(self.parameter,
self.config_yaml)
- def test_check_success_rate_default(self):
- with mock.patch('functest.utils.functest_utils.get_criteria_by_test') \
- as mock_criteria:
- mock_criteria.return_value = self.criteria
- resp = functest_utils.check_success_rate(self.case_name,
- self.result)
- self.assertEqual(resp, 100)
-
- def test_check_success_rate_failed(self):
- with mock.patch('functest.utils.functest_utils.get_criteria_by_test') \
- as mock_criteria:
- mock_criteria.return_value = self.criteria
- resp = functest_utils.check_success_rate(self.case_name,
- 0)
- self.assertEqual(resp, 0)
-
# TODO: merge_dicts
def test_get_testcases_file_dir(self):
diff --git a/functest/utils/functest_utils.py b/functest/utils/functest_utils.py
index 744258b3..bf30f56e 100644
--- a/functest/utils/functest_utils.py
+++ b/functest/utils/functest_utils.py
@@ -379,16 +379,6 @@ def get_functest_config(parameter):
return get_parameter_from_yaml(parameter, yaml_)
-def check_success_rate(case_name, result):
- # It should be removed as TestCase tests criteria
- # and result.
- logger.warning('check_success_rate will be removed soon')
- criteria = get_criteria_by_test(case_name)
- if type(criteria) == int and result >= criteria:
- return 100
- return 0
-
-
def merge_dicts(dict1, dict2):
for k in set(dict1.keys()).union(dict2.keys()):
if k in dict1 and k in dict2:
diff --git a/requirements.txt b/requirements.txt
index 4170157c..65b36979 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -14,7 +14,7 @@ python-keystoneclient==3.5.0
python-neutronclient==6.0.0
python-novaclient==6.0.0
python-congressclient==1.5.0
-virtualenv==15.1.0
+python-tackerclient==0.7.0
pexpect==4.0
requests>=2.8.0
robotframework==3.0.2
diff --git a/run_unit_tests.sh b/run_unit_tests.sh
index 9780de7a..86096fab 100755
--- a/run_unit_tests.sh
+++ b/run_unit_tests.sh
@@ -14,13 +14,16 @@ fi
# ***************
echo "Running unit tests..."
+sudo apt-get install -y build-essential python-dev python-pip
+sudo pip install virtualenv==15.1.0
+
# start vitual env
virtualenv $WORKSPACE/functest_venv
source $WORKSPACE/functest_venv/bin/activate
# install python packages
-sudo apt-get install -y build-essential python-dev python-pip
pip install --upgrade pip
+pip install -r $WORKSPACE/requirements.txt
pip install -r $WORKSPACE/test-requirements.txt
pip install $WORKSPACE
diff --git a/test-requirements.txt b/test-requirements.txt
index 4ba763a5..b0d4ff8d 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -5,27 +5,6 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
#
-click==6.6
coverage==4.1
-dnspython==1.15.0
-gitpython==1.0.1
-iniparse==0.4
mock==1.3.0
nose==1.3.7
-python-ceilometerclient==2.6.2
-python-congressclient==1.5.0
-python-heatclient==1.7.0
-python-keystoneclient==3.5.0
-python-neutronclient==6.0.0
-python-openstackclient==2.3.0
-python-tackerclient==0.7.0
-pyyaml==3.10
-requests==2.8.0
-robotframework==3.0.2
-robotframework-httplibrary==0.4.2
-robotframework-requests==0.4.7
-robotframework-sshlibrary==2.1.3
-subprocess32==3.2.7
-virtualenv==15.1.0
-PrettyTable>=0.7.1,<0.8 # BSD
-six>=1.9.0 # MIT