aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--docs/com/pres/framework/framework.md26
-rw-r--r--functest/ci/generate_report.py149
-rwxr-xr-xfunctest/ci/run_tests.py59
-rw-r--r--functest/ci/testcases.yaml59
-rw-r--r--functest/ci/tier_builder.py11
-rw-r--r--functest/ci/tier_handler.py5
-rw-r--r--functest/core/feature.py1
-rw-r--r--functest/core/pytest_suite_runner.py18
-rw-r--r--functest/core/testcase.py16
-rw-r--r--functest/tests/unit/ci/test_generate_report.py129
-rw-r--r--functest/tests/unit/ci/test_run_tests.py31
-rw-r--r--functest/tests/unit/ci/test_tier_builder.py8
-rw-r--r--functest/tests/unit/ci/test_tier_handler.py5
-rw-r--r--functest/tests/unit/core/test_pytest_suite_runner.py51
-rw-r--r--functest/tests/unit/core/test_testcase.py7
-rw-r--r--functest/tests/unit/core/test_vnf.py142
-rw-r--r--functest/tests/unit/utils/test_decorators.py135
-rw-r--r--functest/utils/decorators.py2
-rw-r--r--requirements.txt3
-rw-r--r--test-requirements.txt3
20 files changed, 469 insertions, 391 deletions
diff --git a/docs/com/pres/framework/framework.md b/docs/com/pres/framework/framework.md
index b80ad3dd7..3c1aae1b8 100644
--- a/docs/com/pres/framework/framework.md
+++ b/docs/com/pres/framework/framework.md
@@ -2,11 +2,11 @@
created by [Cédric Ollivier](mailto:cedric.ollivier@orange.com)
-2017/04/24
+2017/05/06
Note:
-- Functest integrates lots of heteregeounous testcases:
+- Functest integrates lots of heterogeneous testcases:
- python vs bash
- internal vs external
- it aims to benefit from object programming
@@ -33,11 +33,11 @@ Note:
### Our target
- limit run_tests.py instructions by defining:
- - the basic testcase attritutes
+ - the basic testcase attributes
- all common operations
- the status codes expected
- avoid duplicating codes between testcases
-- ease the developpement of third-party testcases (aka features)
+- ease the development of third-party testcases (aka features)
@@ -51,6 +51,7 @@ base model for single test case
- project_name (default: 'functest')
- case_name
- criteria
+- result
- start_time
- stop_time
- details
@@ -61,7 +62,8 @@ base model for single test case
| Method | Purpose |
|-------------------|--------------------------------------------|
| run(**kwargs) | run the test case |
-| check_criteria() | interpret the results of the test case |
+| is_successful() | interpret the results of the test case |
+| get_duration() | return the duration of the test case |
| push_to_db() | push the results of the test case to the DB|
@@ -69,7 +71,7 @@ base model for single test case
- the subclasses must override the default implementation which is false on purpose
- the new implementation must set the following attributes to push the results to DB:
- - criteria
+ - result
- start_time
- stop_time
@@ -99,7 +101,7 @@ except KeyError:
if result == testcase.TestCase.EX_OK:
if GlobalVariables.REPORT_FLAG:
test_case.push_to_db()
- result = test_case.check_criteria()
+ result = test_case.is_successful()
```
@@ -121,7 +123,7 @@ class Test(testcase.TestCase):
def run(self, **kwargs):
self.start_time = time.time()
print "Hello World"
- self.criteria = 'PASS'
+ self.result = 100
self.stop_time = time.time()
return testcase.TestCase.EX_OK
```
@@ -132,7 +134,7 @@ class Test(testcase.TestCase):
```yaml
case_name: first
project_name: functest
-criteria: 'status == "PASS"'
+criteria: 100
blocking: true
clean_flag: false
description: ''
@@ -164,7 +166,7 @@ base model for single feature
- allows executing any Python method by calling execute()
- sets the following attributes required to push the results to DB:
- - criteria
+ - result
- start_time
- stop_time
- doesn't fulfill details when pushing the results to the DB.
@@ -200,7 +202,7 @@ class Test(feature.Feature):
```yaml
case_name: second
project_name: functest
-criteria: 'status == "PASS"'
+criteria: 100
blocking: true
clean_flag: false
description: ''
@@ -234,7 +236,7 @@ execute the cmd passed as arg.
```
case_name: third
project_name: functest
-criteria: 'status == "PASS"'
+criteria: 100
blocking: true
clean_flag: false
description: ''
diff --git a/functest/ci/generate_report.py b/functest/ci/generate_report.py
deleted file mode 100644
index e400b1b64..000000000
--- a/functest/ci/generate_report.py
+++ /dev/null
@@ -1,149 +0,0 @@
-#!/usr/bin/env python
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-import json
-import logging
-import re
-import urllib2
-
-import functest.utils.functest_utils as ft_utils
-from functest.utils.constants import CONST
-
-COL_1_LEN = 25
-COL_2_LEN = 15
-COL_3_LEN = 12
-COL_4_LEN = 15
-COL_5_LEN = 75
-
-# If we run from CI (Jenkins) we will push the results to the DB
-# and then we can print the url to the specific test result
-
-
-logger = logging.getLogger(__name__)
-
-
-def init(tiers_to_run=[]):
- test_cases_arr = []
- for tier in tiers_to_run:
- for test in tier.get_tests():
- test_cases_arr.append({'test_name': test.get_name(),
- 'tier_name': tier.get_name(),
- 'result': 'Not executed',
- 'duration': '0',
- 'url': ''})
- return test_cases_arr
-
-
-def get_results_from_db():
- url = "%s?build_tag=%s" % (ft_utils.get_db_url(),
- CONST.BUILD_TAG)
- logger.debug("Query to rest api: %s" % url)
- try:
- data = json.load(urllib2.urlopen(url))
- return data['results']
- except:
- logger.error("Cannot read content from the url: %s" % url)
- return None
-
-
-def get_data(test, results):
- test_result = test['result']
- url = ''
- for test_db in results:
- if test['test_name'] in test_db['case_name']:
- id = test_db['_id']
- url = ft_utils.get_db_url() + '/' + id
- test_result = test_db['criteria']
-
- return {"url": url, "result": test_result}
-
-
-def print_line(w1, w2='', w3='', w4='', w5=''):
- str = ('| ' + w1.ljust(COL_1_LEN - 1) +
- '| ' + w2.ljust(COL_2_LEN - 1) +
- '| ' + w3.ljust(COL_3_LEN - 1) +
- '| ' + w4.ljust(COL_4_LEN - 1))
- if CONST.__getattribute__('IS_CI_RUN'):
- str += ('| ' + w5.ljust(COL_5_LEN - 1))
- str += '|\n'
- return str
-
-
-def print_line_no_columns(str):
- TOTAL_LEN = COL_1_LEN + COL_2_LEN + COL_3_LEN + COL_4_LEN + 2
- if CONST.__getattribute__('IS_CI_RUN'):
- TOTAL_LEN += COL_5_LEN + 1
- return ('| ' + str.ljust(TOTAL_LEN) + "|\n")
-
-
-def print_separator(char="=", delimiter="+"):
- str = ("+" + char * COL_1_LEN +
- delimiter + char * COL_2_LEN +
- delimiter + char * COL_3_LEN +
- delimiter + char * COL_4_LEN)
- if CONST.__getattribute__('IS_CI_RUN'):
- str += (delimiter + char * COL_5_LEN)
- str += '+\n'
- return str
-
-
-def main(args=[]):
- executed_test_cases = args
-
- if CONST.__getattribute__('IS_CI_RUN'):
- results = get_results_from_db()
- if results is not None:
- for test in executed_test_cases:
- data = get_data(test, results)
- test.update({"url": data['url'],
- "result": data['result']})
-
- TOTAL_LEN = COL_1_LEN + COL_2_LEN + COL_3_LEN + COL_4_LEN
- if CONST.__getattribute__('IS_CI_RUN'):
- TOTAL_LEN += COL_5_LEN
- MID = TOTAL_LEN / 2
-
- if CONST.__getattribute__('BUILD_TAG') is not None:
- if re.search("daily", CONST.__getattribute__('BUILD_TAG')) is not None:
- CONST.__setattr__('CI_LOOP', 'daily')
- else:
- CONST.__setattr__('CI_LOOP', 'weekly')
-
- str = ''
- str += print_separator('=', delimiter="=")
- str += print_line_no_columns(' ' * (MID - 8) + 'FUNCTEST REPORT')
- str += print_separator('=', delimiter="=")
- str += print_line_no_columns(' ')
- str += print_line_no_columns(" Deployment description:")
- str += print_line_no_columns(" INSTALLER: %s"
- % CONST.__getattribute__('INSTALLER_TYPE'))
- if CONST.__getattribute__('DEPLOY_SCENARIO') is not None:
- str += print_line_no_columns(" SCENARIO: %s"
- % CONST.__getattribute__(
- 'DEPLOY_SCENARIO'))
- if CONST.__getattribute__('BUILD_TAG') is not None:
- str += print_line_no_columns(" BUILD TAG: %s"
- % CONST.__getattribute__('BUILD_TAG'))
- if CONST.__getattribute__('CI_LOOP') is not None:
- str += print_line_no_columns(" CI LOOP: %s"
- % CONST.__getattribute__('CI_LOOP'))
- str += print_line_no_columns(' ')
- str += print_separator('=')
- if CONST.__getattribute__('IS_CI_RUN'):
- str += print_line('TEST CASE', 'TIER', 'DURATION', 'RESULT', 'URL')
- else:
- str += print_line('TEST CASE', 'TIER', 'DURATION', 'RESULT')
- str += print_separator('=')
- for test in executed_test_cases:
- str += print_line(test['test_name'],
- test['tier_name'],
- test['duration'],
- test['result'],
- test['url'])
- str += print_separator('-')
-
- logger.info("\n\n\n%s" % str)
diff --git a/functest/ci/run_tests.py b/functest/ci/run_tests.py
index 1396644f7..76760096b 100755
--- a/functest/ci/run_tests.py
+++ b/functest/ci/run_tests.py
@@ -17,7 +17,8 @@ import os
import re
import sys
-import functest.ci.generate_report as generate_report
+import prettytable
+
import functest.ci.tier_builder as tb
import functest.core.testcase as testcase
import functest.utils.functest_utils as ft_utils
@@ -39,6 +40,10 @@ class BlockingTestFailed(Exception):
pass
+class TestNotEnabled(Exception):
+ pass
+
+
class RunTestsParser(object):
def __init__(self):
@@ -99,13 +104,6 @@ def cleanup():
os_clean.main()
-def update_test_info(test_name, result, duration):
- for test in GlobalVariables.EXECUTED_TEST_CASES:
- if test['test_name'] == test_name:
- test.update({"result": result,
- "duration": duration})
-
-
def get_run_dict(testname):
try:
dict = ft_utils.get_dict_by_test(testname)
@@ -120,8 +118,9 @@ def get_run_dict(testname):
def run_test(test, tier_name, testcases=None):
- duration = "XX:XX"
- result_str = "PASS"
+ if not test.is_enabled():
+ raise TestNotEnabled("The test case {} is not enabled"
+ .format(test.get_name()))
test_name = test.get_name()
logger.info("\n") # blank line
print_separator("=")
@@ -145,6 +144,7 @@ def run_test(test, tier_name, testcases=None):
cls = getattr(module, run_dict['class'])
test_dict = ft_utils.get_dict_by_test(test_name)
test_case = cls(**test_dict)
+ GlobalVariables.EXECUTED_TEST_CASES.append(test_case)
try:
kwargs = run_dict['args']
result = test_case.run(**kwargs)
@@ -154,8 +154,7 @@ def run_test(test, tier_name, testcases=None):
if GlobalVariables.REPORT_FLAG:
test_case.push_to_db()
result = test_case.is_successful()
- duration = test_case.get_duration()
- logger.info("\n%s\n", test_case)
+ logger.info("Test result:\n\n%s\n", test_case)
except ImportError:
logger.exception("Cannot import module {}".format(
run_dict['module']))
@@ -167,22 +166,13 @@ def run_test(test, tier_name, testcases=None):
if test.needs_clean() and GlobalVariables.CLEAN_FLAG:
cleanup()
-
if result != testcase.TestCase.EX_OK:
logger.error("The test case '%s' failed. " % test_name)
GlobalVariables.OVERALL_RESULT = Result.EX_ERROR
- result_str = "FAIL"
-
if test.is_blocking():
- if not testcases or testcases == "all":
- # if it is a single test we don't print the whole results table
- update_test_info(test_name, result_str, duration)
- generate_report.main(GlobalVariables.EXECUTED_TEST_CASES)
raise BlockingTestFailed("The test case {} failed and is blocking"
.format(test.get_name()))
- update_test_info(test_name, result_str, duration)
-
def run_tier(tier):
tier_name = tier.get_name()
@@ -214,12 +204,9 @@ def run_all(tiers):
tier.get_test_names()))
logger.info("Tests to be executed:%s" % summary)
- GlobalVariables.EXECUTED_TEST_CASES = generate_report.init(tiers_to_run)
for tier in tiers_to_run:
run_tier(tier)
- generate_report.main(GlobalVariables.EXECUTED_TEST_CASES)
-
def main(**kwargs):
@@ -238,12 +225,10 @@ def main(**kwargs):
if kwargs['test']:
source_rc_file()
if _tiers.get_tier(kwargs['test']):
- GlobalVariables.EXECUTED_TEST_CASES = generate_report.init(
- [_tiers.get_tier(kwargs['test'])])
run_tier(_tiers.get_tier(kwargs['test']))
elif _tiers.get_test(kwargs['test']):
run_test(_tiers.get_test(kwargs['test']),
- _tiers.get_tier(kwargs['test']),
+ _tiers.get_tier_name(kwargs['test']),
kwargs['test'])
elif kwargs['test'] == "all":
run_all(_tiers)
@@ -261,6 +246,26 @@ def main(**kwargs):
except Exception as e:
logger.error(e)
GlobalVariables.OVERALL_RESULT = Result.EX_ERROR
+
+ msg = prettytable.PrettyTable(
+ header_style='upper', padding_width=5,
+ field_names=['env var', 'value'])
+ for env_var in ['INSTALLER_TYPE', 'DEPLOY_SCENARIO', 'BUILD_TAG',
+ 'CI_LOOP']:
+ msg.add_row([env_var, CONST.__getattribute__(env_var)])
+ logger.info("Deployment description: \n\n%s\n", msg)
+
+ msg = prettytable.PrettyTable(
+ header_style='upper', padding_width=5,
+ field_names=['test case', 'project', 'tier', 'duration', 'result'])
+ for test_case in GlobalVariables.EXECUTED_TEST_CASES:
+ result = 'PASS' if(test_case.is_successful(
+ ) == test_case.EX_OK) else 'FAIL'
+ msg.add_row([test_case.case_name, test_case.project_name,
+ _tiers.get_tier_name(test_case.case_name),
+ test_case.get_duration(), result])
+ logger.info("FUNCTEST REPORT: \n\n%s\n", msg)
+
logger.info("Execution exit value: %s" % GlobalVariables.OVERALL_RESULT)
return GlobalVariables.OVERALL_RESULT
diff --git a/functest/ci/testcases.yaml b/functest/ci/testcases.yaml
index 7009e910c..d98a2de23 100644
--- a/functest/ci/testcases.yaml
+++ b/functest/ci/testcases.yaml
@@ -19,7 +19,6 @@ tiers:
simple queries. When the config value of
snaps.use_keystone is True, functest must have access to
the cloud's private network.
-
dependencies:
installer: '^((?!netvirt).)*$'
scenario: ''
@@ -39,13 +38,13 @@ tiers:
simple queries. When the config value of
snaps.use_keystone is True, functest must have access to
the cloud's private network.
-
dependencies:
installer: '^((?!netvirt).)*$'
scenario: ''
run:
module: 'functest.opnfv_tests.openstack.snaps.api_check'
class: 'ApiCheck'
+
-
case_name: snaps_health_check
project_name: functest
@@ -63,6 +62,7 @@ tiers:
run:
module: 'functest.opnfv_tests.openstack.snaps.health_check'
class: 'HealthCheck'
+
-
name: smoke
order: 1
@@ -266,6 +266,7 @@ tiers:
testcases:
-
case_name: promise
+ enabled: false
project_name: promise
criteria: 100
blocking: false
@@ -283,6 +284,7 @@ tiers:
-
case_name: doctor-notification
+ enabled: false
project_name: doctor
criteria: 100
blocking: false
@@ -300,6 +302,7 @@ tiers:
-
case_name: bgpvpn
+ enabled: false
project_name: sdnvpn
criteria: 100
blocking: false
@@ -317,6 +320,7 @@ tiers:
-
case_name: security_scan
+ enabled: false
project_name: securityscanning
criteria: 100
blocking: false
@@ -334,6 +338,7 @@ tiers:
-
case_name: copper
+ enabled: false
project_name: copper
criteria: 100
blocking: false
@@ -351,6 +356,7 @@ tiers:
-
case_name: multisite
+ enabled: false
project_name: multisite
criteria: 100
blocking: false
@@ -363,8 +369,10 @@ tiers:
run:
module: 'functest.opnfv_tests.openstack.tempest.tempest'
class: 'TempestMultisite'
+
-
case_name: functest-odl-sfc
+ enabled: false
project_name: sfc
criteria: 100
blocking: false
@@ -379,8 +387,10 @@ tiers:
class: 'BashFeature'
args:
cmd: 'cd /home/opnfv/repos/sfc/sfc/tests/functest && python ./run_tests.py'
+
-
case_name: onos_sfc
+ enabled: false
project_name: functest
criteria: 100
blocking: true
@@ -393,8 +403,10 @@ tiers:
run:
module: 'functest.opnfv_tests.sdn.onos.onos'
class: 'OnosSfc'
+
-
case_name: parser-basics
+ enabled: false
project_name: parser
criteria: 100
blocking: false
@@ -409,8 +421,10 @@ tiers:
class: 'BashFeature'
args:
cmd: 'cd /home/opnfv/repos/parser/tests && ./functest_run.sh'
+
-
case_name: domino-multinode
+ enabled: false
project_name: domino
criteria: 100
blocking: false
@@ -425,8 +439,10 @@ tiers:
class: 'BashFeature'
args:
cmd: 'cd /home/opnfv/repos/domino && ./tests/run_multinode.sh'
+
-
case_name: gluon_vping
+ enabled: false
project_name: netready
criteria: 100
blocking: false
@@ -441,8 +457,10 @@ tiers:
class: 'BashFeature'
args:
cmd: 'cd /home/opnfv/repos/netready/test/functest && python ./gluon-test-suite.py'
+
-
case_name: barometercollectd
+ enabled: false
project_name: barometer
criteria: 100
blocking: false
@@ -458,6 +476,7 @@ tiers:
run:
module: 'functest.opnfv_tests.features.barometer'
class: 'BarometerCollectd'
+
-
name: components
order: 3
@@ -481,6 +500,7 @@ tiers:
run:
module: 'functest.opnfv_tests.openstack.tempest.tempest'
class: 'TempestFullParallel'
+
-
case_name: tempest_custom
project_name: functest
@@ -499,6 +519,7 @@ tiers:
run:
module: 'functest.opnfv_tests.openstack.tempest.tempest'
class: 'TempestCustom'
+
-
case_name: rally_full
project_name: functest
@@ -537,22 +558,26 @@ tiers:
run:
module: 'functest.opnfv_tests.vnf.ims.cloudify_ims'
class: 'CloudifyIms'
-# -
-# case_name: aaa
-# project_name: functest
-# criteria: 100
-# blocking: false
-# clean_flag: true
-# description: >-
-# Test suite from Parser project.
-# dependencies:
-# installer: ''
-# scenario: ''
-# run:
-# module: 'functest.opnfv_tests.vnf.aaa.aaa'
-# class: 'AaaVnf'
+
+ -
+ case_name: aaa
+ enabled: false
+ project_name: functest
+ criteria: 100
+ blocking: false
+ clean_flag: true
+ description: >-
+ Test suite from Parser project.
+ dependencies:
+ installer: ''
+ scenario: ''
+ run:
+ module: 'functest.opnfv_tests.vnf.aaa.aaa'
+ class: 'AaaVnf'
+
-
case_name: orchestra_ims
+ enabled: false
project_name: functest
criteria: 100
blocking: false
@@ -568,6 +593,7 @@ tiers:
-
case_name: opera_vims
+ enabled: false
project_name: opera
criteria: 100
blocking: false
@@ -583,6 +609,7 @@ tiers:
-
case_name: vyos_vrouter
+ enabled: false
project_name: functest
criteria: 100
blocking: false
diff --git a/functest/ci/tier_builder.py b/functest/ci/tier_builder.py
index 44b272584..12562f09c 100644
--- a/functest/ci/tier_builder.py
+++ b/functest/ci/tier_builder.py
@@ -47,12 +47,15 @@ class TierBuilder(object):
dep = th.Dependency(installer, scenario)
testcase = th.TestCase(name=dic_testcase['case_name'],
+ enabled=dic_testcase.get(
+ 'enabled', True),
dependency=dep,
criteria=dic_testcase['criteria'],
blocking=dic_testcase['blocking'],
clean_flag=dic_testcase['clean_flag'],
description=dic_testcase['description'])
- if testcase.is_compatible(self.ci_installer, self.ci_scenario):
+ if (testcase.is_compatible(self.ci_installer, self.ci_scenario)
+ and testcase.is_enabled()):
tier.add_test(testcase)
self.tier_objects.append(tier)
@@ -72,6 +75,12 @@ class TierBuilder(object):
return self.tier_objects[i]
return None
+ def get_tier_name(self, test_name):
+ for i in range(0, len(self.tier_objects)):
+ if self.tier_objects[i].is_test(test_name):
+ return self.tier_objects[i].name
+ return None
+
def get_test(self, test_name):
for i in range(0, len(self.tier_objects)):
if self.tier_objects[i].is_test(test_name):
diff --git a/functest/ci/tier_handler.py b/functest/ci/tier_handler.py
index fe7372a38..36ce245e7 100644
--- a/functest/ci/tier_handler.py
+++ b/functest/ci/tier_handler.py
@@ -105,12 +105,14 @@ class Tier(object):
class TestCase(object):
def __init__(self, name,
+ enabled,
dependency,
criteria,
blocking,
clean_flag,
description=""):
self.name = name
+ self.enabled = enabled
self.dependency = dependency
self.criteria = criteria
self.blocking = blocking
@@ -138,6 +140,9 @@ class TestCase(object):
def get_name(self):
return self.name
+ def is_enabled(self):
+ return self.enabled
+
def get_criteria(self):
return self.criteria
diff --git a/functest/core/feature.py b/functest/core/feature.py
index 8563c9257..140c9bb2e 100644
--- a/functest/core/feature.py
+++ b/functest/core/feature.py
@@ -83,7 +83,6 @@ class Feature(base.TestCase):
ft_utils.logger_test_results(
self.project_name, self.case_name,
self.result, self.details)
- self.__logger.info("%s %s", self.project_name, self.result)
except Exception: # pylint: disable=broad-except
self.__logger.exception("%s FAILED", self.project_name)
self.__logger.info("Test result is stored in '%s'", self.result_file)
diff --git a/functest/core/pytest_suite_runner.py b/functest/core/pytest_suite_runner.py
index 21edc1874..2b201ee08 100644
--- a/functest/core/pytest_suite_runner.py
+++ b/functest/core/pytest_suite_runner.py
@@ -5,12 +5,18 @@
#
# http://www.apache.org/licenses/LICENSE-2.0
-import testcase as base
+# pylint: disable=missing-docstring
+
+import logging
import unittest
import time
+from functest.core import testcase
+
+logging.basicConfig()
+
-class PyTestSuiteRunner(base.TestCase):
+class PyTestSuiteRunner(testcase.TestCase):
"""
This superclass is designed to execute pre-configured unittest.TestSuite()
objects
@@ -18,7 +24,7 @@ class PyTestSuiteRunner(base.TestCase):
def __init__(self, **kwargs):
super(PyTestSuiteRunner, self).__init__(**kwargs)
self.suite = None
- self.logger = None
+ self.logger = logging.getLogger(__name__)
def run(self, **kwargs):
"""
@@ -45,13 +51,13 @@ class PyTestSuiteRunner(base.TestCase):
# we shall distinguish Execution Error from FAIL results
# TestCase.EX_RUN_ERROR means that the test case was not run
# not that it was run but the result was FAIL
- exit_code = base.TestCase.EX_OK
+ exit_code = testcase.TestCase.EX_OK
if ((result.errors and len(result.errors) > 0)
or (result.failures and len(result.failures) > 0)):
- self.logger.info("%s FAILED" % self.case_name)
+ self.logger.info("%s FAILED", self.case_name)
self.result = 'FAIL'
else:
- self.logger.info("%s OK" % self.case_name)
+ self.logger.info("%s OK", self.case_name)
self.result = 'PASS'
self.details = {}
diff --git a/functest/core/testcase.py b/functest/core/testcase.py
index 49fae6097..d8b63ef29 100644
--- a/functest/core/testcase.py
+++ b/functest/core/testcase.py
@@ -12,6 +12,8 @@
import logging
import os
+import prettytable
+
import functest.utils.functest_utils as ft_utils
__author__ = "Cedric Ollivier <cedric.ollivier@orange.com>"
@@ -49,14 +51,16 @@ class TestCase(object):
assert self.case_name
result = 'PASS' if(self.is_successful(
) == TestCase.EX_OK) else 'FAIL'
- return ('| {0:<23} | {1:<13} | {2:<10} | {3:<13} |'
- '\n{4:-<26}{4:-<16}{4:-<13}{4:-<16}{4}'.format(
- self.case_name, self.project_name,
- self.get_duration(), result, '+'))
+ msg = prettytable.PrettyTable(
+ header_style='upper', padding_width=5,
+ field_names=['test case', 'project', 'duration',
+ 'result'])
+ msg.add_row([self.case_name, self.project_name,
+ self.get_duration(), result])
+ return msg.get_string()
except AssertionError:
self.__logger.error("We cannot print invalid objects")
- return '| {0:^68} |\n{1:-<26}{1:-<16}{1:-<13}{1:-<16}{1}'.format(
- 'INVALID OBJECT', '+')
+ return super(TestCase, self).__str__()
def get_duration(self):
"""Return the duration of the test case.
diff --git a/functest/tests/unit/ci/test_generate_report.py b/functest/tests/unit/ci/test_generate_report.py
deleted file mode 100644
index 2c5ce2ea8..000000000
--- a/functest/tests/unit/ci/test_generate_report.py
+++ /dev/null
@@ -1,129 +0,0 @@
-#!/usr/bin/env python
-
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-
-import logging
-import unittest
-import urllib2
-
-import mock
-
-from functest.ci import generate_report as gen_report
-from functest.tests.unit import test_utils
-from functest.utils import functest_utils as ft_utils
-from functest.utils.constants import CONST
-
-
-class GenerateReportTesting(unittest.TestCase):
-
- logging.disable(logging.CRITICAL)
-
- def test_init(self):
- test_array = gen_report.init()
- self.assertEqual(test_array, [])
-
- @mock.patch('functest.ci.generate_report.urllib2.urlopen',
- side_effect=urllib2.URLError('no host given'))
- def test_get_results_from_db_fail(self, mock_method):
- url = "%s?build_tag=%s" % (ft_utils.get_db_url(),
- CONST.__getattribute__('BUILD_TAG'))
- self.assertIsNone(gen_report.get_results_from_db())
- mock_method.assert_called_once_with(url)
-
- @mock.patch('functest.ci.generate_report.urllib2.urlopen',
- return_value={'results': []})
- def test_get_results_from_db_success(self, mock_method):
- url = "%s?build_tag=%s" % (ft_utils.get_db_url(),
- CONST.__getattribute__('BUILD_TAG'))
- self.assertEqual(gen_report.get_results_from_db(), None)
- mock_method.assert_called_once_with(url)
-
- def test_get_data(self):
- self.assertIsInstance(gen_report.get_data({'result': ''}, ''), dict)
-
- def test_print_line_with_ci_run(self):
- CONST.__setattr__('IS_CI_RUN', True)
- w1 = 'test_print_line'
- test_str = ("| %s| %s| %s| %s| %s|\n"
- % (w1.ljust(gen_report.COL_1_LEN - 1),
- ''.ljust(gen_report.COL_2_LEN - 1),
- ''.ljust(gen_report.COL_3_LEN - 1),
- ''.ljust(gen_report.COL_4_LEN - 1),
- ''.ljust(gen_report.COL_5_LEN - 1)))
- self.assertEqual(gen_report.print_line(w1), test_str)
-
- def test_print_line_without_ci_run(self):
- CONST.__setattr__('IS_CI_RUN', False)
- w1 = 'test_print_line'
- test_str = ("| %s| %s| %s| %s|\n"
- % (w1.ljust(gen_report.COL_1_LEN - 1),
- ''.ljust(gen_report.COL_2_LEN - 1),
- ''.ljust(gen_report.COL_3_LEN - 1),
- ''.ljust(gen_report.COL_4_LEN - 1)))
- self.assertEqual(gen_report.print_line(w1), test_str)
-
- def test_print_line_no_column_with_ci_run(self):
- CONST.__setattr__('IS_CI_RUN', True)
- TOTAL_LEN = gen_report.COL_1_LEN + gen_report.COL_2_LEN
- TOTAL_LEN += gen_report.COL_3_LEN + gen_report.COL_4_LEN + 2
- TOTAL_LEN += gen_report.COL_5_LEN + 1
- test_str = ("| %s|\n" % 'test'.ljust(TOTAL_LEN))
- self.assertEqual(gen_report.print_line_no_columns('test'), test_str)
-
- def test_print_line_no_column_without_ci_run(self):
- CONST.__setattr__('IS_CI_RUN', False)
- TOTAL_LEN = gen_report.COL_1_LEN + gen_report.COL_2_LEN
- TOTAL_LEN += gen_report.COL_3_LEN + gen_report.COL_4_LEN + 2
- test_str = ("| %s|\n" % 'test'.ljust(TOTAL_LEN))
- self.assertEqual(gen_report.print_line_no_columns('test'), test_str)
-
- def test_print_separator_with_ci_run(self):
- CONST.__setattr__('IS_CI_RUN', True)
- test_str = ("+" + "=" * gen_report.COL_1_LEN +
- "+" + "=" * gen_report.COL_2_LEN +
- "+" + "=" * gen_report.COL_3_LEN +
- "+" + "=" * gen_report.COL_4_LEN +
- "+" + "=" * gen_report.COL_5_LEN)
- test_str += '+\n'
- self.assertEqual(gen_report.print_separator(), test_str)
-
- def test_print_separator_without_ci_run(self):
- CONST.__setattr__('IS_CI_RUN', False)
- test_str = ("+" + "=" * gen_report.COL_1_LEN +
- "+" + "=" * gen_report.COL_2_LEN +
- "+" + "=" * gen_report.COL_3_LEN +
- "+" + "=" * gen_report.COL_4_LEN)
- test_str += "+\n"
- self.assertEqual(gen_report.print_separator(), test_str)
-
- @mock.patch('functest.ci.generate_report.logger.info')
- def test_main_with_ci_run(self, mock_method):
- CONST.__setattr__('IS_CI_RUN', True)
- gen_report.main()
- mock_method.assert_called_once_with(test_utils.SubstrMatch('URL'))
-
- @mock.patch('functest.ci.generate_report.logger.info')
- def test_main_with_ci_loop(self, mock_method):
- CONST.__setattr__('CI_LOOP', 'daily')
- gen_report.main()
- mock_method.assert_called_once_with(test_utils.SubstrMatch('CI LOOP'))
-
- @mock.patch('functest.ci.generate_report.logger.info')
- def test_main_with_scenario(self, mock_method):
- CONST.__setattr__('DEPLOY_SCENARIO', 'test_scenario')
- gen_report.main()
- mock_method.assert_called_once_with(test_utils.SubstrMatch('SCENARIO'))
-
- @mock.patch('functest.ci.generate_report.logger.info')
- def test_main_with_build_tag(self, mock_method):
- CONST.__setattr__('BUILD_TAG', 'test_build_tag')
- gen_report.main()
- mock_method.assert_called_once_with(test_utils.
- SubstrMatch('BUILD TAG'))
-
-
-if __name__ == "__main__":
- unittest.main(verbosity=2)
diff --git a/functest/tests/unit/ci/test_run_tests.py b/functest/tests/unit/ci/test_run_tests.py
index ef08282aa..d0052392f 100644
--- a/functest/tests/unit/ci/test_run_tests.py
+++ b/functest/tests/unit/ci/test_run_tests.py
@@ -70,17 +70,6 @@ class RunTestsTesting(unittest.TestCase):
run_tests.cleanup()
self.assertTrue(mock_os_clean.called)
- def test_update_test_info(self):
- run_tests.GlobalVariables.EXECUTED_TEST_CASES = [self.test]
- run_tests.update_test_info('test_name',
- 'test_result',
- 'test_duration')
- exp = self.test
- exp.update({"result": 'test_result',
- "duration": 'test_duration'})
- self.assertEqual(run_tests.GlobalVariables.EXECUTED_TEST_CASES,
- [exp])
-
def test_get_run_dict_if_defined_default(self):
mock_obj = mock.Mock()
with mock.patch('functest.ci.run_tests.'
@@ -148,10 +137,8 @@ class RunTestsTesting(unittest.TestCase):
mock.patch('functest.ci.run_tests.source_rc_file'), \
mock.patch('functest.ci.run_tests.generate_os_snapshot'), \
mock.patch('functest.ci.run_tests.cleanup'), \
- mock.patch('functest.ci.run_tests.update_test_info'), \
mock.patch('functest.ci.run_tests.get_run_dict',
return_value=test_run_dict), \
- mock.patch('functest.ci.run_tests.generate_report.main'), \
self.assertRaises(run_tests.BlockingTestFailed) as context:
run_tests.GlobalVariables.CLEAN_FLAG = True
run_tests.run_test(mock_test, 'tier_name')
@@ -176,21 +163,17 @@ class RunTestsTesting(unittest.TestCase):
@mock.patch('functest.ci.run_tests.logger.info')
def test_run_all_default(self, mock_logger_info):
- with mock.patch('functest.ci.run_tests.run_tier') as mock_method, \
- mock.patch('functest.ci.run_tests.generate_report.init'), \
- mock.patch('functest.ci.run_tests.generate_report.main'):
+ with mock.patch('functest.ci.run_tests.run_tier') as mock_method:
CONST.__setattr__('CI_LOOP', 'test_ci_loop')
run_tests.run_all(self.tiers)
mock_method.assert_any_call(self.tier)
self.assertTrue(mock_logger_info.called)
@mock.patch('functest.ci.run_tests.logger.info')
- def test_run_all__missing_tier(self, mock_logger_info):
- with mock.patch('functest.ci.run_tests.generate_report.init'), \
- mock.patch('functest.ci.run_tests.generate_report.main'):
- CONST.__setattr__('CI_LOOP', 'loop_re_not_available')
- run_tests.run_all(self.tiers)
- self.assertTrue(mock_logger_info.called)
+ def test_run_all_missing_tier(self, mock_logger_info):
+ CONST.__setattr__('CI_LOOP', 'loop_re_not_available')
+ run_tests.run_all(self.tiers)
+ self.assertTrue(mock_logger_info.called)
def test_main_failed(self):
kwargs = {'test': 'test_name', 'noclean': True, 'report': True}
@@ -221,7 +204,6 @@ class RunTestsTesting(unittest.TestCase):
with mock.patch('functest.ci.run_tests.tb.TierBuilder',
return_value=mock_obj), \
mock.patch('functest.ci.run_tests.source_rc_file'), \
- mock.patch('functest.ci.run_tests.generate_report.init'), \
mock.patch('functest.ci.run_tests.run_tier') as m:
self.assertEqual(run_tests.main(**kwargs),
run_tests.Result.EX_OK)
@@ -234,7 +216,6 @@ class RunTestsTesting(unittest.TestCase):
with mock.patch('functest.ci.run_tests.tb.TierBuilder',
return_value=mock_obj), \
mock.patch('functest.ci.run_tests.source_rc_file'), \
- mock.patch('functest.ci.run_tests.generate_report.init'), \
mock.patch('functest.ci.run_tests.run_test') as m:
self.assertEqual(run_tests.main(**kwargs),
run_tests.Result.EX_OK)
@@ -248,7 +229,6 @@ class RunTestsTesting(unittest.TestCase):
with mock.patch('functest.ci.run_tests.tb.TierBuilder',
return_value=mock_obj), \
mock.patch('functest.ci.run_tests.source_rc_file'), \
- mock.patch('functest.ci.run_tests.generate_report.init'), \
mock.patch('functest.ci.run_tests.run_all') as m:
self.assertEqual(run_tests.main(**kwargs),
run_tests.Result.EX_OK)
@@ -262,7 +242,6 @@ class RunTestsTesting(unittest.TestCase):
with mock.patch('functest.ci.run_tests.tb.TierBuilder',
return_value=mock_obj), \
mock.patch('functest.ci.run_tests.source_rc_file'), \
- mock.patch('functest.ci.run_tests.generate_report.init'), \
mock.patch('functest.ci.run_tests.logger.debug') as m:
self.assertEqual(run_tests.main(**kwargs),
run_tests.Result.EX_ERROR)
diff --git a/functest/tests/unit/ci/test_tier_builder.py b/functest/tests/unit/ci/test_tier_builder.py
index 438fa7c25..feaf33a81 100644
--- a/functest/tests/unit/ci/test_tier_builder.py
+++ b/functest/tests/unit/ci/test_tier_builder.py
@@ -22,6 +22,7 @@ class TierBuilderTesting(unittest.TestCase):
'scenario': 'test_scenario'}
self.testcase = {'dependencies': self.dependency,
+ 'enabled': 'true',
'case_name': 'test_name',
'criteria': 'test_criteria',
'blocking': 'test_blocking',
@@ -78,6 +79,13 @@ class TierBuilderTesting(unittest.TestCase):
self.assertEqual(self.tierbuilder.get_tests('test_tier2'),
None)
+ def test_get_tier_name_ok(self):
+ self.assertEqual(self.tierbuilder.get_tier_name('test_name'),
+ 'test_tier')
+
+ def test_get_tier_name_ko(self):
+ self.assertEqual(self.tierbuilder.get_tier_name('test_name2'), None)
+
if __name__ == "__main__":
unittest.main(verbosity=2)
diff --git a/functest/tests/unit/ci/test_tier_handler.py b/functest/tests/unit/ci/test_tier_handler.py
index 21df4098b..280062743 100644
--- a/functest/tests/unit/ci/test_tier_handler.py
+++ b/functest/tests/unit/ci/test_tier_handler.py
@@ -32,6 +32,7 @@ class TierHandlerTesting(unittest.TestCase):
'test_ci_loop',
description='test_desc')
self.testcase = tier_handler.TestCase('test_name',
+ 'true',
self.mock_depend,
'test_criteria',
'test_blocking',
@@ -116,6 +117,10 @@ class TierHandlerTesting(unittest.TestCase):
self.assertEqual(self.tier.get_name(),
'test_tier')
+ def test_testcase_is_enabled(self):
+ self.assertEqual(self.testcase.is_enabled(),
+ 'true')
+
def test_testcase_get_criteria(self):
self.assertEqual(self.tier.get_order(),
'test_order')
diff --git a/functest/tests/unit/core/test_pytest_suite_runner.py b/functest/tests/unit/core/test_pytest_suite_runner.py
new file mode 100644
index 000000000..15e5bd73b
--- /dev/null
+++ b/functest/tests/unit/core/test_pytest_suite_runner.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# pylint: disable=missing-docstring
+
+import logging
+import unittest
+
+import mock
+
+from functest.core import pytest_suite_runner
+from functest.core import testcase
+
+
+class PyTestSuiteRunnerTesting(unittest.TestCase):
+
+ logging.disable(logging.CRITICAL)
+
+ def setUp(self):
+ self.psrunner = pytest_suite_runner.PyTestSuiteRunner()
+ self.result = mock.Mock()
+ attrs = {'errors': [('test1', 'error_msg1')],
+ 'failures': [('test2', 'failure_msg1')]}
+ self.result.configure_mock(**attrs)
+
+ self.pass_results = mock.Mock()
+ attrs = {'errors': None,
+ 'failures': None}
+ self.pass_results.configure_mock(**attrs)
+
+ def test_run(self):
+ self.psrunner.case_name = 'test_case_name'
+ with mock.patch('functest.core.pytest_suite_runner.'
+ 'unittest.TextTestRunner.run',
+ return_value=self.result):
+ self.assertEqual(self.psrunner.run(),
+ testcase.TestCase.EX_OK)
+
+ with mock.patch('functest.core.pytest_suite_runner.'
+ 'unittest.TextTestRunner.run',
+ return_value=self.pass_results):
+ self.assertEqual(self.psrunner.run(),
+ testcase.TestCase.EX_OK)
+
+
+if __name__ == "__main__":
+ unittest.main(verbosity=2)
diff --git a/functest/tests/unit/core/test_testcase.py b/functest/tests/unit/core/test_testcase.py
index d017e4124..2adf4a6dc 100644
--- a/functest/tests/unit/core/test_testcase.py
+++ b/functest/tests/unit/core/test_testcase.py
@@ -20,7 +20,6 @@ __author__ = "Cedric Ollivier <cedric.ollivier@orange.com>"
class TestCaseTesting(unittest.TestCase):
-
"""The class testing TestCase."""
# pylint: disable=missing-docstring,too-many-public-methods
@@ -191,11 +190,13 @@ class TestCaseTesting(unittest.TestCase):
def test_str_project_name_ko(self):
self.test.project_name = None
- self.assertIn("INVALID OBJECT", str(self.test))
+ self.assertIn("<functest.core.testcase.TestCase object at",
+ str(self.test))
def test_str_case_name_ko(self):
self.test.case_name = None
- self.assertIn("INVALID OBJECT", str(self.test))
+ self.assertIn("<functest.core.testcase.TestCase object at",
+ str(self.test))
def test_str_pass(self):
duration = '01:01'
diff --git a/functest/tests/unit/core/test_vnf.py b/functest/tests/unit/core/test_vnf.py
index f348c0dbf..793e95768 100644
--- a/functest/tests/unit/core/test_vnf.py
+++ b/functest/tests/unit/core/test_vnf.py
@@ -7,10 +7,16 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
+# pylint: disable=missing-docstring
+
import logging
+import os
import unittest
+import mock
+
from functest.core import vnf
+from functest.core import testcase
class VnfBaseTesting(unittest.TestCase):
@@ -18,21 +24,126 @@ class VnfBaseTesting(unittest.TestCase):
logging.disable(logging.CRITICAL)
def setUp(self):
- self.test = vnf.VnfOnBoarding(project='functest',
- case_name='aaa')
+ self.test = vnf.VnfOnBoarding(
+ project='functest', case_name='aaa')
self.test.project = "functest"
self.test.start_time = "1"
self.test.stop_time = "5"
self.test.result = ""
- self.test.details = {"orchestrator": {"status": "PASS",
- "result": "",
- "duration": 20},
- "vnf": {"status": "PASS",
- "result": "",
- "duration": 15},
- "test_vnf": {"status": "FAIL",
- "result": "",
- "duration": 5}}
+ self.test.details = {
+ "orchestrator": {"status": "PASS", "result": "", "duration": 20},
+ "vnf": {"status": "PASS", "result": "", "duration": 15},
+ "test_vnf": {"status": "FAIL", "result": "", "duration": 5}}
+ self.test.keystone_client = 'test_client'
+ self.test.tenant_name = 'test_tenant_name'
+
+ def test_execute_deploy_vnf_fail(self):
+ with mock.patch.object(self.test, 'prepare'),\
+ mock.patch.object(self.test, 'deploy_orchestrator',
+ return_value=None), \
+ mock.patch.object(self.test, 'deploy_vnf',
+ side_effect=Exception):
+ self.assertEqual(self.test.execute(),
+ testcase.TestCase.EX_TESTCASE_FAILED)
+
+ def test_execute_test_vnf_fail(self):
+ with mock.patch.object(self.test, 'prepare'),\
+ mock.patch.object(self.test, 'deploy_orchestrator',
+ return_value=None), \
+ mock.patch.object(self.test, 'deploy_vnf'), \
+ mock.patch.object(self.test, 'test_vnf',
+ side_effect=Exception):
+ self.assertEqual(self.test.execute(),
+ testcase.TestCase.EX_TESTCASE_FAILED)
+
+ @mock.patch('functest.core.vnf.os_utils.get_tenant_id',
+ return_value='test_tenant_id')
+ @mock.patch('functest.core.vnf.os_utils.delete_tenant',
+ return_value=True)
+ @mock.patch('functest.core.vnf.os_utils.get_user_id',
+ return_value='test_user_id')
+ @mock.patch('functest.core.vnf.os_utils.delete_user',
+ return_value=True)
+ def test_execute_default(self, *args):
+ with mock.patch.object(self.test, 'prepare'),\
+ mock.patch.object(self.test, 'deploy_orchestrator',
+ return_value=None), \
+ mock.patch.object(self.test, 'deploy_vnf'), \
+ mock.patch.object(self.test, 'test_vnf'), \
+ mock.patch.object(self.test, 'parse_results',
+ return_value='ret_exit_code'), \
+ mock.patch.object(self.test, 'log_results'):
+ self.assertEqual(self.test.execute(),
+ 'ret_exit_code')
+
+ @mock.patch('functest.core.vnf.os_utils.get_credentials')
+ @mock.patch('functest.core.vnf.os_utils.get_keystone_client')
+ @mock.patch('functest.core.vnf.os_utils.get_user_id', return_value='')
+ def test_prepare_missing_userid(self, *args):
+ with self.assertRaises(Exception):
+ self.test.prepare()
+
+ @mock.patch('functest.core.vnf.os_utils.get_credentials')
+ @mock.patch('functest.core.vnf.os_utils.get_keystone_client')
+ @mock.patch('functest.core.vnf.os_utils.get_user_id',
+ return_value='test_roleid')
+ @mock.patch('functest.core.vnf.os_utils.create_tenant',
+ return_value='')
+ def test_prepare_missing_tenantid(self, *args):
+ with self.assertRaises(Exception):
+ self.test.prepare()
+
+ @mock.patch('functest.core.vnf.os_utils.get_credentials')
+ @mock.patch('functest.core.vnf.os_utils.get_keystone_client')
+ @mock.patch('functest.core.vnf.os_utils.get_user_id',
+ return_value='test_roleid')
+ @mock.patch('functest.core.vnf.os_utils.create_tenant',
+ return_value='test_tenantid')
+ @mock.patch('functest.core.vnf.os_utils.get_role_id',
+ return_value='')
+ def test_prepare_missing_roleid(self, *args):
+ with self.assertRaises(Exception):
+ self.test.prepare()
+
+ @mock.patch('functest.core.vnf.os_utils.get_credentials')
+ @mock.patch('functest.core.vnf.os_utils.get_keystone_client')
+ @mock.patch('functest.core.vnf.os_utils.get_user_id',
+ return_value='test_roleid')
+ @mock.patch('functest.core.vnf.os_utils.create_tenant',
+ return_value='test_tenantid')
+ @mock.patch('functest.core.vnf.os_utils.get_role_id',
+ return_value='test_roleid')
+ @mock.patch('functest.core.vnf.os_utils.add_role_user',
+ return_value='')
+ def test_prepare_role_add_failure(self, *args):
+ with self.assertRaises(Exception):
+ self.test.prepare()
+
+ @mock.patch('functest.core.vnf.os_utils.get_credentials')
+ @mock.patch('functest.core.vnf.os_utils.get_keystone_client')
+ @mock.patch('functest.core.vnf.os_utils.get_user_id',
+ return_value='test_roleid')
+ @mock.patch('functest.core.vnf.os_utils.create_tenant',
+ return_value='test_tenantid')
+ @mock.patch('functest.core.vnf.os_utils.get_role_id',
+ return_value='test_roleid')
+ @mock.patch('functest.core.vnf.os_utils.add_role_user')
+ @mock.patch('functest.core.vnf.os_utils.create_user',
+ return_value='')
+ def test_create_user_failure(self, *args):
+ with self.assertRaises(Exception):
+ self.test.prepare()
+
+ def test_log_results_default(self):
+ with mock.patch('functest.core.vnf.'
+ 'ft_utils.logger_test_results') \
+ as mock_method:
+ self.test.log_results()
+ self.assertTrue(mock_method.called)
+
+ def test_step_failures_default(self):
+ with self.assertRaises(Exception):
+ self.test.step_failure("error_msg")
def test_deploy_vnf_unimplemented(self):
with self.assertRaises(Exception) as context:
@@ -44,8 +155,13 @@ class VnfBaseTesting(unittest.TestCase):
self.test.test_vnf()()
self.assertTrue('VNF not tested' in context.exception)
- def test_parse_results(self):
- self.assertNotEqual(self.test.parse_results(), 0)
+ def test_parse_results_ex_ok(self):
+ self.test.details['test_vnf']['status'] = 'PASS'
+ self.assertEqual(self.test.parse_results(), os.EX_OK)
+
+ def test_parse_results_ex_run_error(self):
+ self.test.details['vnf']['status'] = 'FAIL'
+ self.assertEqual(self.test.parse_results(), os.EX_SOFTWARE)
if __name__ == "__main__":
diff --git a/functest/tests/unit/utils/test_decorators.py b/functest/tests/unit/utils/test_decorators.py
new file mode 100644
index 000000000..f8bd9a54f
--- /dev/null
+++ b/functest/tests/unit/utils/test_decorators.py
@@ -0,0 +1,135 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2017 Orange and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""Define the class required to fully cover decorators."""
+
+from datetime import datetime
+import errno
+import json
+import logging
+import os
+import unittest
+
+import mock
+
+from functest.utils import decorators
+from functest.utils import functest_utils
+
+__author__ = "Cedric Ollivier <cedric.ollivier@orange.com>"
+
+VERSION = 'master'
+DIR = '/dev'
+FILE = '{}/null'.format(DIR)
+URL = 'file://{}'.format(FILE)
+
+
+class DecoratorsTesting(unittest.TestCase):
+ # pylint: disable=missing-docstring
+
+ logging.disable(logging.CRITICAL)
+
+ _case_name = 'base'
+ _project_name = 'functest'
+ _start_time = 1.0
+ _stop_time = 2.0
+ _result = 'PASS'
+ _build_tag = VERSION
+ _node_name = 'bar'
+ _deploy_scenario = 'foo'
+ _installer_type = 'debian'
+
+ def setUp(self):
+ os.environ['INSTALLER_TYPE'] = self._installer_type
+ os.environ['DEPLOY_SCENARIO'] = self._deploy_scenario
+ os.environ['NODE_NAME'] = self._node_name
+ os.environ['BUILD_TAG'] = self._build_tag
+
+ def test_wraps(self):
+ self.assertEqual(functest_utils.push_results_to_db.__name__,
+ "push_results_to_db")
+
+ def _get_json(self):
+ stop_time = datetime.fromtimestamp(self._stop_time).strftime(
+ '%Y-%m-%d %H:%M:%S')
+ start_time = datetime.fromtimestamp(self._start_time).strftime(
+ '%Y-%m-%d %H:%M:%S')
+ data = {'project_name': self._project_name,
+ 'stop_date': stop_time, 'start_date': start_time,
+ 'case_name': self._case_name, 'build_tag': self._build_tag,
+ 'pod_name': self._node_name, 'installer': self._installer_type,
+ 'scenario': self._deploy_scenario, 'version': VERSION,
+ 'details': {}, 'criteria': self._result}
+ return json.dumps(data)
+
+ @mock.patch('{}.get_db_url'.format(functest_utils.__name__),
+ return_value='http://127.0.0.1')
+ @mock.patch('{}.get_version'.format(functest_utils.__name__),
+ return_value=VERSION)
+ @mock.patch('requests.post')
+ def test_http_shema(self, *args):
+ self.assertTrue(functest_utils.push_results_to_db(
+ self._project_name, self._case_name, self._start_time,
+ self._stop_time, self._result, {}))
+ args[1].assert_called_once_with()
+ args[2].assert_called_once_with()
+ args[0].assert_called_once_with(
+ 'http://127.0.0.1', data=self._get_json(),
+ headers={'Content-Type': 'application/json'})
+
+ @mock.patch('{}.get_db_url'.format(functest_utils.__name__),
+ return_value="/dev/null")
+ def test_wrong_shema(self, mock_method=None):
+ self.assertFalse(functest_utils.push_results_to_db(
+ self._project_name, self._case_name, self._start_time,
+ self._stop_time, self._result, {}))
+ mock_method.assert_called_once_with()
+
+ @mock.patch('{}.get_version'.format(functest_utils.__name__),
+ return_value=VERSION)
+ @mock.patch('{}.get_db_url'.format(functest_utils.__name__),
+ return_value=URL)
+ def _test_dump(self, *args):
+ with mock.patch.object(decorators, 'open', mock.mock_open(),
+ create=True) as mock_open:
+ self.assertTrue(functest_utils.push_results_to_db(
+ self._project_name, self._case_name, self._start_time,
+ self._stop_time, self._result, {}))
+ mock_open.assert_called_once_with(FILE, 'a')
+ handle = mock_open()
+ call_args, _ = handle.write.call_args
+ self.assertIn('POST', call_args[0])
+ self.assertIn(self._get_json(), call_args[0])
+ args[0].assert_called_once_with()
+ args[1].assert_called_once_with()
+
+ @mock.patch('os.makedirs')
+ def test_default_dump(self, mock_method=None):
+ self._test_dump()
+ mock_method.assert_called_once_with(DIR)
+
+ @mock.patch('os.makedirs', side_effect=OSError(errno.EEXIST, ''))
+ def test_makedirs_dir_exists(self, mock_method=None):
+ self._test_dump()
+ mock_method.assert_called_once_with(DIR)
+
+ @mock.patch('{}.get_db_url'.format(functest_utils.__name__),
+ return_value=URL)
+ @mock.patch('os.makedirs', side_effect=OSError)
+ def test_makedirs_exc(self, *args):
+ self.assertFalse(
+ functest_utils.push_results_to_db(
+ self._project_name, self._case_name, self._start_time,
+ self._stop_time, self._result, {}))
+ args[0].assert_called_once_with(DIR)
+ args[1].assert_called_once_with()
+
+
+if __name__ == "__main__":
+ logging.basicConfig()
+ unittest.main(verbosity=2)
diff --git a/functest/utils/decorators.py b/functest/utils/decorators.py
index 46ffe35d1..bfbdf048d 100644
--- a/functest/utils/decorators.py
+++ b/functest/utils/decorators.py
@@ -3,6 +3,7 @@
# pylint: disable=missing-docstring
import errno
+import functools
import os
import urlparse
@@ -41,6 +42,7 @@ def can_dump_request_to_file(method):
else:
return session.request(method=method, url=url, **kwargs)
+ @functools.wraps(method)
def hook(*args, **kwargs):
with mock.patch('requests.api.request', side_effect=patch_request):
return method(*args, **kwargs)
diff --git a/requirements.txt b/requirements.txt
index e709220a5..976deefa2 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -19,7 +19,7 @@ pexpect==4.0
requests>=2.8.0
robotframework==2.9.1
robotframework-requests==0.3.8
-robotframework-sshlibrary==2.1.1
+robotframework-sshlibrary==2.1.3
configObj==5.0.6
Flask==0.10.1
xmltodict==0.9.2
@@ -33,3 +33,4 @@ click==6.6
openbaton-cli==2.2.1-beta7
mock==1.3.0
iniparse==0.4
+PrettyTable>=0.7.1,<0.8 # BSD
diff --git a/test-requirements.txt b/test-requirements.txt
index 471e9c30a..76e475dcc 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -23,6 +23,7 @@ pyyaml==3.10
requests==2.8.0
robotframework==2.9.1
robotframework-requests==0.3.8
-robotframework-sshlibrary==2.1.1
+robotframework-sshlibrary==2.1.3
subprocess32==3.2.7
virtualenv==15.1.0
+PrettyTable>=0.7.1,<0.8 # BSD