summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--docker/Dockerfile8
-rw-r--r--docs/userguide/index.rst92
-rw-r--r--docs/userguide/introduction.rst58
-rw-r--r--docs/userguide/runfunctest.rst94
-rw-r--r--docs/userguide/troubleshooting.rst7
-rwxr-xr-xfunctest/ci/config_functest.yaml5
-rwxr-xr-xfunctest/ci/exec_test.sh7
-rw-r--r--functest/ci/logging.json29
-rwxr-xr-xfunctest/ci/prepare_env.py5
-rwxr-xr-xfunctest/ci/run_tests.py4
-rwxr-xr-xfunctest/ci/testcases.yaml11
-rw-r--r--[-rwxr-xr-x]functest/core/pytest_suite_runner.py8
-rw-r--r--functest/core/vnf_base.py12
-rwxr-xr-xfunctest/opnfv_tests/features/copper.py2
-rwxr-xr-xfunctest/opnfv_tests/features/doctor.py81
-rw-r--r--functest/opnfv_tests/openstack/rally/__init__.py0
-rw-r--r--functest/opnfv_tests/openstack/rally/rally.py554
-rwxr-xr-xfunctest/opnfv_tests/openstack/rally/run_rally-cert.py613
-rw-r--r--functest/opnfv_tests/openstack/tempest/conf_utils.py76
-rw-r--r--functest/opnfv_tests/openstack/tempest/custom_tests/blacklist.txt20
-rw-r--r--functest/opnfv_tests/openstack/tempest/tempest.py81
-rw-r--r--[-rwxr-xr-x]functest/opnfv_tests/openstack/vping/vping_base.py0
-rw-r--r--functest/opnfv_tests/sdn/onos/sfc/sfc_onos.py8
-rwxr-xr-xfunctest/opnfv_tests/sdn/onos/teston/onos.py6
-rw-r--r--functest/opnfv_tests/vnf/aaa/aaa.py3
-rw-r--r--functest/opnfv_tests/vnf/ims/cloudify_ims.py3
-rw-r--r--functest/opnfv_tests/vnf/ims/opera_ims.py3
-rw-r--r--functest/opnfv_tests/vnf/ims/orchestra_ims.py3
-rw-r--r--functest/tests/unit/cli/commands/test_cli_env.py1
-rwxr-xr-x[-rw-r--r--]functest/tests/unit/core/test_testcase_base.py2
-rw-r--r--functest/tests/unit/odl/test_odl.py1
-rw-r--r--functest/tests/unit/test_logging.ini27
-rw-r--r--functest/tests/unit/utils/test_functest_utils.py1
-rw-r--r--functest/utils/functest_constants.py4
-rwxr-xr-x[-rw-r--r--]functest/utils/functest_logger.py60
-rw-r--r--[-rwxr-xr-x]functest/utils/openstack_tacker.py0
-rw-r--r--[-rwxr-xr-x]requirements.txt1
-rwxr-xr-xrun_unit_tests.sh1
-rw-r--r--[-rwxr-xr-x]test-requirements.txt0
39 files changed, 933 insertions, 958 deletions
diff --git a/docker/Dockerfile b/docker/Dockerfile
index dce657e8e..dda4ea6e6 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -30,7 +30,7 @@ LABEL version="0.1" description="OPNFV Functest Docker container"
# Environment variables
ARG BRANCH=master
-ARG TEMPEST_TAG=12.2.0
+ARG TEMPEST_TAG=14.0.0
ARG ODL_TAG=release/beryllium-sr4
ARG OPENSTACK_TAG=stable/mitaka
ARG KINGBIRD_TAG=0.2.2
@@ -115,6 +115,9 @@ RUN git clone --depth 1 -b $ODL_TAG https://git.opendaylight.org/gerrit/p/integr
RUN git clone --depth 1 -b $VIMS_TAG https://github.com/boucherv-orange/clearwater-live-test ${REPOS_VNFS_DIR}/vims-test
RUN git clone --depth 1 https://github.com/wuwenbin2/OnosSystemTest.git ${REPOS_DIR}/onos
+RUN pip install -r ${REPOS_DIR}/rally/requirements.txt
+RUN pip install -r ${REPOS_DIR}/tempest/requirements.txt
+
RUN cd ${FUNCTEST_REPO_DIR} \
&& pip install -r requirements.txt \
&& pip install .
@@ -122,9 +125,6 @@ RUN cd ${FUNCTEST_REPO_DIR} \
RUN cd ${RELENG_MODULE_DIR} \
&& pip install .
-RUN pip install -r ${REPOS_DIR}/rally/requirements.txt
-RUN pip install -r ${REPOS_DIR}/tempest/requirements.txt
-
RUN find ${FUNCTEST_REPO_DIR} -name "*.py" \
-not -path "*tests/unit*" |xargs grep __main__ |cut -d\: -f 1 |xargs chmod -c 755 \
&& find ${FUNCTEST_REPO_DIR} -name "*.sh" |xargs grep \#\! |cut -d\: -f 1 |xargs chmod -c 755
diff --git a/docs/userguide/index.rst b/docs/userguide/index.rst
index d2467df71..bc02776aa 100644
--- a/docs/userguide/index.rst
+++ b/docs/userguide/index.rst
@@ -8,18 +8,28 @@ OPNFV FUNCTEST user guide
.. toctree::
:maxdepth: 2
+Version history
+===============
++------------+----------+------------------+----------------------------------+
+| **Date** | **Ver.** | **Author** | **Comment** |
+| | | | |
++------------+----------+------------------+----------------------------------+
+| 2016-08-17 | 1.0.0 | Juha Haapavirta | Colorado release |
+| | | Column Gaynor | |
++------------+----------+------------------+----------------------------------+
+| 2017-01-23 | 1.0.1 | Morgan Richomme | Adaptations for Danube |
+| | | | |
+| | | | |
++------------+----------+------------------+----------------------------------+
+
Introduction
============
The goal of this document is to describe the OPNFV Functest test cases and to
-provide a procedure to execute them. In the OPNFV Colorado system release,
+provide a procedure to execute them. In the OPNFV Danube system release,
a Functest CLI utility is introduced for easier execution of test procedures.
-An overview presentation has been created for the first OPNFV Summit `[4]`_.
-
-This document is a continuation of the OPNFV Functest Configuration Guide `[1]`_.
-
**IMPORTANT**: It is assumed here that the Functest Docker container is already
properly deployed and that all instructions described in this guide are to be
performed from *inside* the deployed Functest Docker container.
@@ -34,7 +44,7 @@ VIM (Virtualized Infrastructure Manager)
Healthcheck
^^^^^^^^^^^
In Colorado release a new Tier 'healthcheck' with one testcase 'healthcheck'
-is introduced. The healthcheck testcase verifies that some basic IP connectivity
+was introduced. The healthcheck testcase verifies that some basic IP connectivity
and essential operations of OpenStack functionality over the command line are
working correctly.
@@ -174,7 +184,7 @@ The Tempest testcases are distributed accross two
Tiers:
* Smoke Tier - Test Case 'tempest_smoke_serial'
- * Openstack Tier - Test case 'tempest_full_parallel'
+ * Components Tier - Test case 'tempest_full_parallel'
NOTE: Test case 'tempest_smoke_serial' executes a defined set of tempest smoke
tests with a single thread (i.e. serial mode). Test case 'tempest_full_parallel'
@@ -214,11 +224,44 @@ A basic SLA (stop test on errors) has been implemented.
The Rally testcases are distributed accross two Tiers:
* Smoke Tier - Test Case 'rally_sanity'
- * Openstack Tier - Test case 'rally_full'
+ * Components Tier - Test case 'rally_full'
NOTE: Test case 'rally_sanity' executes a limited number of Rally smoke test
cases. Test case 'rally_full' executes the full defined set of Rally tests.
+SNAPS
+-----
+
+SNAPS stands for "SNA/NFV Application development Platform and Stack".
+This project seeks to develop baseline OpenStack NFV installations. It has been
+developed by Steven Pisarski and provided an object oriented library to perform
+functional and performance tests. It has been declined in several test suites in
+Functest.
+
+connection check
+^^^^^^^^^^^^^^^^
+Connection_check consists in 9 test cases (test duration < 5s) checking the
+connectivity with Glance, Keystone, Neutron, Nova and the external network.
+
+api_check
+^^^^^^^^^
+This test case verifies the retrieval of OpenStack clients: Keystone, Glance,
+Neutron and Nova and may perform some simple queries. When the config value of
+snaps.use_keystone is True, functest must have access to the cloud's private
+network.
+This suite consists in 49 tests (test duration< 2 minutes)
+
+Snaps_smoke
+^^^^^^^^^^^
+This test case contains tests that setup and destroy environments with VMs with
+and without Floating IPs with a newly created user and project. Set the config
+value snaps.use_floating_ips (True|False) to toggle this functionality. When
+the config value of snaps.use_keystone is True, functest must have access
+the cloud's private network.
+This suite consists in 38 tests (test duration < 10 minutes)
+
+More information on SNAPS can be found in  `[13]`_
+
SDN Controllers
---------------
@@ -323,21 +366,19 @@ The test cases are described as follows:
Features
--------
-Most of the features have been developped by feature projects.
-Security_scan has been initiated in Functest repository but should soon
-be declared in its own repository as well.
-
Please refer to the dedicated feature user guides for details:
- * bgpvpn: http://artifacts.opnfv.org/sdnvpn/colorado/docs/userguide/index.html
- * copper: http://artifacts.opnfv.org/copper/colorado/docs/userguide/index.html
- * doctor: http://artifacts.opnfv.org/doctor/colorado/userguide/index.html
+ * bgpvpn: http://artifacts.opnfv.org/sdnvpn/danube/docs/userguide/index.html
+ * copper: http://artifacts.opnfv.org/copper/danube/docs/userguide/index.html
+ * doctor: http://artifacts.opnfv.org/doctor/danube/userguide/index.html
* domino: http://artifacts.opnfv.org/domino/docs/userguide-single/index.html
* moon: http://artifacts.opnfv.org/moon/docs/userguide/index.html
* multisites: http://artifacts.opnfv.org/multisite/docs/userguide/index.html
- * onos-sfc: http://artifacts.opnfv.org/onosfw/colorado/userguide/index.html
- * odl-sfc: http://artifacts.opnfv.org/sfc/colorado/userguide/index.html
- * promise: http://artifacts.opnfv.org/promise/colorado/docs/userguide/index.html
+ * onos-sfc: http://artifacts.opnfv.org/onosfw/danube/userguide/index.html
+ * odl-sfc: http://artifacts.opnfv.org/sfc/danube/userguide/index.html
+ * promise: http://artifacts.opnfv.org/danube/colorado/docs/userguide/index.html
+ * security_scan: http://artifacts.opnfv.org/security_scan/colorado/docs/userguide/index.html
+ * TODO
security_scan
^^^^^^^^^^^^^
@@ -345,11 +386,12 @@ security_scan
Security Scanning, is a project to insure security compliance and vulnerability
checks, as part of an automated CI / CD platform delivery process.
-The project makes use of the existing SCAP format[6] to perform deep scanning of
-NFVi nodes, to insure they are hardened and free of known CVE reported vulnerabilities.
+The project makes use of the existing SCAP format `[6]`_ to perform deep
+scanning of NFVI nodes, to insure they are hardened and free of known CVE
+reported vulnerabilities.
The SCAP content itself, is then consumed and run using an upstream opensource tool
-known as OpenSCAP[7].
+known as OpenSCAP `[7]`_.
The OPNFV Security Group have developed the code that will called by the OPNFV Jenkins
build platform, to perform a complete scan. Resulting reports are then copied to the
@@ -369,8 +411,7 @@ The current work flow is as follows:
* If the config file value 'clean' is set to 'True' then the application installed in
step 5 is removed, and all reports created at step 6 are deleted.
-At present, only the Apex installer is supported, with support for other installers due
-within D-release.
+Security scan is supported by Apex, TODO....
@@ -385,8 +426,8 @@ architectural framework for delivering IP multimedia services.
vIMS has been integrated in Functest to demonstrate the capability to deploy a
relatively complex NFV scenario on the OPNFV platform. The deployment of a complete
-functional VNF allows the test of most of the
-essential functions needed for a NFV platform.
+functional VNF allows the test of most of the essential functions needed for a
+NFV platform.
The goal of this test suite consists of:
@@ -491,6 +532,7 @@ References
.. _`[9]`: https://git.opnfv.org/cgit/functest/tree/testcases/VIM/OpenStack/CI/libraries/os_defaults.yaml
.. _`[11]`: http://robotframework.org/
.. _`[12]`: http://artifacts.opnfv.org/parser/colorado/docs/userguide/index.html
+.. _`[13]`: TODO URL doc SNAPS
OPNFV main site: opnfvmain_.
diff --git a/docs/userguide/introduction.rst b/docs/userguide/introduction.rst
index 76aadc842..e5a090ed5 100644
--- a/docs/userguide/introduction.rst
+++ b/docs/userguide/introduction.rst
@@ -9,14 +9,26 @@ In the Continuous Integration pipeline, it is launched after an OPNFV fresh
installation to validate and verify the basic functions of the
infrastructure.
-The current list of test suites can be distributed over 4 main domains: VIM
+The current list of test suites can be distributed over 5 main domains: VIM
(Virtualised Infrastructure Manager), Controllers (i.e. SDN Controllers),
-Features and VNF (Virtual Network Functions).
+Features, VNF (Virtual Network Functions) and MANO stacks.
+
+Functest test suites are also distributed in the OPNFV testing categories:
+healthcheck, smoke, features, components, performance, VNF, Stress tests.
+
+All the Healthcheck and smoke tests of a given scenario must be succesful to
+validate the scenario for the release.
+-------------+---------------+----------------+----------------------------------+
| Domain | Tier | Test case | Comments |
+=============+===============+================+==================================+
| VIM | healthcheck | healthcheck | Verify basic operation in VIM |
+| | +----------------+----------------------------------+
+| | | connection | Check OpenStack connectivity |
+| | | _check | through SNAPS framework |
+| | +----------------+----------------------------------+
+| | | api_check | Check OpenStack API through |
+| | | | SNAPS framework |
| +---------------+----------------+----------------------------------+
| | smoke | vPing_SSH | NFV "Hello World" using an SSH |
| | | | connection to a destination VM |
@@ -45,8 +57,11 @@ Features and VNF (Virtual Network Functions).
| | +----------------+----------------------------------+
| | | rally_sanity | Run a subset of the OpenStack |
| | | | Rally Test Suite in smoke mode |
+| | +----------------+----------------------------------+
+| | | snaps_smoke | Run a subset of the OpenStack |
+| | | | Rally Test Suite in smoke mode |
| +---------------+----------------+----------------------------------+
-| | openstack | tempest_full | Generate and run a full set of |
+| | components | tempest_full | Generate and run a full set of |
| | | \_parallel | the OpenStack Tempest Test Suite.|
| | | | See the OpenStack reference test |
| | | | suite `[2]`_. The generated |
@@ -57,7 +72,7 @@ Features and VNF (Virtual Network Functions).
| | | | benchmarking OpenStack modules |
| | | | See the Rally documents `[3]`_. |
+-------------+---------------+----------------+----------------------------------+
-| Controllers | sdn_suites | odl | Opendaylight Test suite |
+| Controllers | smoke | odl | Opendaylight Test suite |
| | | | Limited test suite to check the |
| | | | basic neutron (Layer 2) |
| | | | operations mainly based on |
@@ -69,7 +84,7 @@ Features and VNF (Virtual Network Functions).
| | | | See `ONOSFW User Guide`_ for |
| | | | details. |
+-------------+---------------+----------------+----------------------------------+
-| Features | features | Promise | Resource reservation and |
+| Features | features | promise | Resource reservation and |
| | | | management project to identify |
| | | | NFV related requirements and |
| | | | realize resource reservation for |
@@ -80,7 +95,7 @@ Features and VNF (Virtual Network Functions).
| | | | See `Promise User Guide`_ for |
| | | | details. |
| | +----------------+----------------------------------+
-| | | Doctor | Doctor platform, as of Colorado |
+| | | doctor | Doctor platform, as of Colorado |
| | | | release, provides the three |
| | | | features: |
| | | | * Immediate Notification |
@@ -119,7 +134,7 @@ Features and VNF (Virtual Network Functions).
| | | | See `Domino User Guide`_ for |
| | | | details |
| | +----------------+----------------------------------+
-| | | Copper | Copper develops OPNFV platform |
+| | | copper | Copper develops OPNFV platform |
| | | | support for policy management, |
| | | | using open source projects such |
| | | | as OpenStack Congress, focused |
@@ -139,13 +154,17 @@ Features and VNF (Virtual Network Functions).
| | | | See `Moon User Guide`_ for |
| | | | details |
+-------------+---------------+----------------+----------------------------------+
-| VNF | vnf | vims | Example of a real VNF deployment |
+| VNF | vnf | cloudify_ims | Example of a real VNF deployment |
| | | | to show the NFV capabilities of |
| | | | the platform. The IP Multimedia |
| | | | Subsytem is a typical Telco test |
| | | | case, referenced by ETSI. |
| | | | It provides a fully functional |
| | | | VoIP System |
+| | +----------------+----------------------------------+
+| | | opera_ims | vIMS deployment using openBaton |
+| | +----------------+----------------------------------+
+| | | orchestra_ims | vIMS deployment using open-O |
+ +---------------+----------------+----------------------------------+
| | | parser | Parser is an integration project |
| | | | which aims to provide |
@@ -166,6 +185,8 @@ Test cases also have an implicit execution order. For example, if the early
'healthcheck' Tier testcase fails, or if there are any failures in the 'smoke'
Tier testcases, there is little point to launch a full testcase execution round.
+In Danube, we merged smoke and sdn controller tiers in smoke tier.
+
An overview of the Functest Structural Concept is depicted graphically below:
.. figure:: ../images/concepts_mapping_final.png
@@ -186,19 +207,24 @@ NoSQL database. The goal is to populate the database with results from different
sources and scenarios and to show them on a `Functest Dashboard`_. A screenshot
of a live Functest Dashboard is shown below:
-.. figure:: ../images/FunctestDashboardColorado.png
+** TODO **
+.. figure:: ../images/FunctestDashboardDanube.png
:align: center
:alt: Functest Dashboard
-There is no real notion of Test domain or Test coverage. Basic components
-(VIM, SDN controllers) are tested through their own suites. Feature projects
-also provide their own test suites with different ways of running their tests.
+Basic components (VIM, SDN controllers) are tested through their own suites.
+Feature projects also provide their own test suites with different ways of
+running their tests.
+
+The notion of domain has been introduced in the description of the test cases
+stored in the Database.
+This parameters as well as possible tags can be used for the Test case catalog.
vIMS test case was integrated to demonstrate the capability to deploy a
relatively complex NFV scenario on top of the OPNFV infrastructure.
-Functest considers OPNFV as a black box. As of Colorado release the OPNFV
+Functest considers OPNFV as a black box. As of Danube release the OPNFV
offers a lot of potential combinations:
* 3 controllers (OpenDaylight, ONOS, OpenContrail)
@@ -210,9 +236,9 @@ deployed features. The system uses the environment variables (INSTALLER_IP and
DEPLOY_SCENARIO) to automatically determine the valid test cases, for each given
environment.
-In the Colorado OPNFV System release a convenience Functest CLI utility is also
-introduced to simplify setting up the Functest evironment, management of the
-OpenStack environment (e.g. resource clean-up) and for executing tests.
+A convenience Functest CLI utility is also available to simplify setting up the
+Functest evironment, management of the OpenStack environment (e.g. resource
+clean-up) and for executing tests.
The Functest CLI organised the testcase into logical Tiers, which contain in
turn one or more testcases. The CLI allows execution of a single specified
testcase, all test cases in a specified Tier, or the special case of execution
diff --git a/docs/userguide/runfunctest.rst b/docs/userguide/runfunctest.rst
index a6a2c1756..ef1017104 100644
--- a/docs/userguide/runfunctest.rst
+++ b/docs/userguide/runfunctest.rst
@@ -17,11 +17,6 @@ If any of the above steps are missing please refer to the Functest Config Guide
as they are a prerequisite and all the commands explained in this section **must** be
performed **inside the container**.
-Note: In Colorado release, the scripts **run_tests.sh** is now replaced with a
-new Functest CLI. One difference, is that tests run through the Functest CLI
-will always clean-up OpenStack resources. See the `Troubleshooting`_ section of this
-document, where this difference is discussed.
-
The Functest CLI offers two commands (functest tier ...) and (functest testcase ... )
for the execution of Test Tiers or Test Cases::
@@ -53,17 +48,15 @@ command::
root@22e436918db0:~/repos/functest/ci# functest tier list
- 0. healthcheck:
- ['healthcheck']
+ ['healthcheck', 'connection_check', 'api_check',]
- 1. smoke:
- ['vping_ssh', 'vping_userdata', 'tempest_smoke_serial', 'rally_sanity']
- - 2. sdn_suites:
- ['odl']
- - 3. features:
+ ['vping_ssh', 'vping_userdata', 'tempest_smoke_serial', 'rally_sanity', 'snaps_smoke', 'odl']
+ - 2. features:
['doctor', 'security_scan']
- - 4. openstack:
+ - 3. components:
['tempest_full_parallel', 'rally_full']
- - 5. vnf:
- ['vims']
+ - 4. vnf:
+ ['cloudify_ims']
and
@@ -71,6 +64,9 @@ command::
healthcheck
vping_ssh
vping_userdata
+ connection_check
+ api_check
+ snaps_smoke
tempest_smoke_serial
rally_sanity
odl
@@ -78,7 +74,7 @@ command::
security_scan
tempest_full_parallel
rally_full
- vims
+ cloudify_ims
More specific details on specific Tiers or Test Cases can be seen wih the
'show' command::
@@ -208,9 +204,9 @@ To execute a Test Tier or Test Case, the 'run' command is used::
To list the test cases which are part of a specific Test Tier, the 'get-tests'
command is used with 'functest tier'::
- root@22e436918db0:~/repos/functest/ci# functest tier get-tests sdn_suites
- Test cases in tier 'sdn_suites':
- ['odl']
+ root@22e436918db0:~/repos/functest/ci# functest tier get-tests healthcheck
+ Test cases in tier 'healthcheck':
+ ['healthcheck']
Please note that for some scenarios some test cases might not be launched.
@@ -231,37 +227,6 @@ two possibilities::
* Run a single Test Case, specified by a valid choice of <testcase_name>
* Run ALL test Test Cases (for all Tiers) by specifying <testcase_name> = 'all'
-Example::
-
- root@22e436918db0:~/repos/functest/ci# functest testcase run all
- Executing command: 'python /home/opnfv/repos/functest/ci/run_tests.py -t all'
- 2016-06-30 12:03:28,628 - run_tests - INFO - Sourcing the OpenStack RC file...
- 2016-06-30 12:03:28,634 - run_tests - INFO - Tiers to be executed:
- - 0. healthcheck:
- ['healthcheck']
- - 1. smoke:
- ['vping_ssh', 'vping_userdata', 'tempest_smoke_serial', 'rally_sanity']
- - 2. sdn_suites:
- ['odl']
- - 3. features:
- ['doctor', 'security_scan']
- - 4. openstack:
- ['tempest_full_parallel', 'rally_full']
- - 5. vnf:
- ['vims']
- 2016-06-30 12:03:28,634 - run_tests - INFO - ############################################
- 2016-06-30 12:03:28,635 - run_tests - INFO - Running tier 'healthcheck'
- 2016-06-30 12:03:28,635 - run_tests - INFO - ############################################
- 2016-06-30 12:03:28,635 - run_tests - INFO - ============================================
- 2016-06-30 12:03:28,635 - run_tests - INFO - Running test case 'healthcheck'...
- 2016-06-30 12:03:28,635 - run_tests - INFO - ============================================
- 2016-06-30 12:03:28,651 - healtcheck - INFO - Testing Keystone API...
- 2016-06-30 12:03:36,676 - healtcheck - INFO - ...Keystone OK!
- 2016-06-30 12:03:36,679 - healtcheck - INFO - Testing Glance API...
- :
- :
- etc.
-
Functest includes a cleaning mechanism in order to remove all the OpenStack
resources except those present before running any test. The script
*$REPOS_DIR/functest/functest/utils/generate_defaults.py* is called once when setting up
@@ -304,19 +269,20 @@ once a week maximum) and the third job allows testing test suite by test suite s
the test suite name. The user may also use either of these Jenkins jobs to execute
the desired test suites.
-One of the most challenging task in the Colorado release consists
+One of the most challenging task in the Danube release consists
in dealing with lots of scenarios and installers. Thus, when the tests are
automatically started from CI, a basic algorithm has been created in order to
detect whether a given test is runnable or not on the given scenario.
Some Functest test suites cannot be systematically run (e.g. ODL suite can not
-be run on an ONOS scenario). Moreover since Colorado, we also introduce the
-notion of daily/weekly in order to save CI time and avoid running systematically
-long duration tests.
+be run on an ONOS scenario). The daily/weekly notion has been introduces in
+Colorado in order to save CI time and avoid running systematically
+long duration tests. It was not used in Colorado due to CI resource shortage.
+The mechanism remains however as part of the CI evolution.
CI provides some useful information passed to the container as environment
variables:
- * Installer (apex|compass|fuel|joid), stored in INSTALLER_TYPE
+ * Installer (apex|compass|daisy|fuel|joid), stored in INSTALLER_TYPE
* Installer IP of the engine or VM running the actual deployment, stored in INSTALLER_IP
* The scenario [controller]-[feature]-[mode], stored in DEPLOY_SCENARIO with
@@ -366,6 +332,9 @@ The constraints per test case are defined in the Functest configuration file
dependencies:
installer: ''
scenario: '^((?!bgpvpn|odl_l3).)*$'
+ run:
+ module: 'functest.opnfv_tests.openstack.vping.vping_ssh'
+ class: 'VPingSSH'
....
We may distinguish 2 levels in the test case description:
@@ -383,6 +352,18 @@ For a given test case we defined:
* blocking: if set to true, if the test is failed, the execution of the following tests is canceled
* the description of the test case
* the dependencies: a combination of 2 regex on the scenario and the installer name
+ * run: In Danube we introduced the notion of abstract class in order to harmonize the way to run internal, feature or vnf tests
+
+For further details on abstraction classes, see developper guide.
+
+Additional parameters have been added in the desription in the Database.
+The target is to use the configuration stored in the Database and consider the
+local file as backup if the Database is not reachable.
+The additional fields related to a test case are:
+ * trust: we introduced this notion to put in place a mechanism of scenario promotion.
+ * Version: it indicates since which version you can run this test
+ * domains: the main domain covered by the test suite
+ * tags: a list of tags related to the test suite
The order of execution is the one defined in the file if all test cases are selected.
@@ -390,13 +371,12 @@ In CI daily job the tests are executed in the following order:
1) healthcheck (blocking)
2) smoke: both vPings are blocking
- 3) SDN controller suites (blocking)
- 4) Feature project tests cases
+ 3) Feature project tests cases
In CI weekly job we add 2 tiers:
- 5) vIMS suite
- 6) Rally suite
+ 4) VNFs (vIMS)
+ 5) Components (Rally and Tempest long duration suites)
As explained before, at the end of an automated execution, the OpenStack resources
might be eventually removed.
diff --git a/docs/userguide/troubleshooting.rst b/docs/userguide/troubleshooting.rst
index bf94935e6..1b7bf9b38 100644
--- a/docs/userguide/troubleshooting.rst
+++ b/docs/userguide/troubleshooting.rst
@@ -48,9 +48,6 @@ python scripts, located in paths:
*$REPOS_DIR/functest/functest/opnfv_tests/vPing/CI/libraries/vPing_userdata.py*
Notes:
- #. In this Colorado Funtest Userguide, the use of the Functest CLI is
- emphasized. The Functest CLI replaces the earlier Bash shell script
- *run_tests.sh*.
#. There is one difference, between the Functest CLI based test case
execution compared to the earlier used Bash shell script, which is
@@ -348,8 +345,8 @@ See OpenSCAP web site: https://www.open-scap.org/
NFV
---
-vIMS
-^^^^
+cloudify_ims
+^^^^^^^^^^^^
vIMS deployment may fail for several reasons, the most frequent ones are
described in the following table:
diff --git a/functest/ci/config_functest.yaml b/functest/ci/config_functest.yaml
index 25be17240..2feab771b 100755
--- a/functest/ci/config_functest.yaml
+++ b/functest/ci/config_functest.yaml
@@ -21,7 +21,7 @@ general:
repo_sfc: /home/opnfv/repos/sfc
dir_repo_onos: /home/opnfv/repos/onos
dir_repo_promise: /home/opnfv/repos/promise
- dir_repo_doctor: /home/opnfv/repos/doctor
+ repo_doctor: /home/opnfv/repos/doctor
repo_copper: /home/opnfv/repos/copper
dir_repo_ovno: /home/opnfv/repos/ovno
repo_parser: /home/opnfv/repos/parser
@@ -30,6 +30,7 @@ general:
functest: /home/opnfv/functest
functest_test: /home/opnfv/repos/functest/functest/opnfv_tests
results: /home/opnfv/functest/results
+ functest_logging_cfg: /home/opnfv/repos/functest/functest/ci/logging.json
functest_conf: /home/opnfv/functest/conf
functest_data: /home/opnfv/functest/data
dir_vIMS_data: /home/opnfv/functest/data/vIMS/
@@ -96,6 +97,8 @@ tempest:
user_password: tempest
validation:
ssh_timeout: 130
+ object_storage:
+ operator_role: SwiftOperator
private_net_name: tempest-net
private_subnet_name: tempest-subnet
private_subnet_cidr: 192.168.150.0/24
diff --git a/functest/ci/exec_test.sh b/functest/ci/exec_test.sh
index 7c96d69c3..54a7c624e 100755
--- a/functest/ci/exec_test.sh
+++ b/functest/ci/exec_test.sh
@@ -93,13 +93,6 @@ function run_test(){
"vims")
python ${FUNCTEST_TEST_DIR}/vnf/ims/vims.py $clean_flag $report
;;
- "rally_full")
- python ${FUNCTEST_TEST_DIR}/openstack/rally/run_rally-cert.py $clean_flag all $report
- ;;
- "rally_sanity")
- python ${FUNCTEST_TEST_DIR}/openstack/rally/run_rally-cert.py \
- $clean_flag --sanity all $report
- ;;
"onos")
python ${FUNCTEST_TEST_DIR}/sdn/onos/teston/onos.py
;;
diff --git a/functest/ci/logging.json b/functest/ci/logging.json
new file mode 100644
index 000000000..3f454e8fa
--- /dev/null
+++ b/functest/ci/logging.json
@@ -0,0 +1,29 @@
+{
+ "version": 1,
+ "disable_existing_loggers": false,
+ "formatters": {
+ "standard": {
+ "format": "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
+ }
+ },
+ "handlers": {
+ "console": {
+ "level": "INFO",
+ "class": "logging.StreamHandler",
+ "formatter": "standard"
+ },
+ "file": {
+ "level": "DEBUG",
+ "class": "logging.FileHandler",
+ "formatter": "standard",
+ "filename": "/home/opnfv/functest/results/functest.log"
+ }
+ },
+ "loggers": {
+ "": {
+ "handlers": ["console", "file"],
+ "level": "DEBUG",
+ "propagate": "yes"
+ }
+ }
+}
diff --git a/functest/ci/prepare_env.py b/functest/ci/prepare_env.py
index 74c751af9..8bbdf18bc 100755
--- a/functest/ci/prepare_env.py
+++ b/functest/ci/prepare_env.py
@@ -184,11 +184,6 @@ def source_rc_file():
CONST.OS_TENANT_NAME = value
elif key == 'OS_PASSWORD':
CONST.OS_PASSWORD = value
- logger.debug("Used credentials: %s" % str)
- logger.debug("OS_AUTH_URL:%s" % CONST.OS_AUTH_URL)
- logger.debug("OS_USERNAME:%s" % CONST.OS_USERNAME)
- logger.debug("OS_TENANT_NAME:%s" % CONST.OS_TENANT_NAME)
- logger.debug("OS_PASSWORD:%s" % CONST.OS_PASSWORD)
def patch_config_file():
diff --git a/functest/ci/run_tests.py b/functest/ci/run_tests.py
index a5f1ab9e8..ef0800163 100755
--- a/functest/ci/run_tests.py
+++ b/functest/ci/run_tests.py
@@ -93,10 +93,6 @@ def source_rc_file():
elif key == 'OS_PASSWORD':
ft_constants.OS_PASSWORD = value
CONST.OS_PASSWORD = value
- logger.debug("OS_AUTH_URL:%s" % CONST.OS_AUTH_URL)
- logger.debug("OS_USERNAME:%s" % CONST.OS_USERNAME)
- logger.debug("OS_TENANT_NAME:%s" % CONST.OS_TENANT_NAME)
- logger.debug("OS_PASSWORD:%s" % CONST.OS_PASSWORD)
def generate_os_snapshot():
diff --git a/functest/ci/testcases.yaml b/functest/ci/testcases.yaml
index ede082856..27d358bf7 100755
--- a/functest/ci/testcases.yaml
+++ b/functest/ci/testcases.yaml
@@ -81,6 +81,9 @@ tiers:
dependencies:
installer: ''
scenario: '^((?!bgpvpn).)*$'
+ run:
+ module: 'functest.opnfv_tests.openstack.rally.rally'
+ class: 'RallySanity'
-
name: odl
@@ -190,8 +193,11 @@ tiers:
description: >-
Test suite from Doctor project.
dependencies:
- installer: 'apex'
+ installer: '(apex)|(fuel)|(joid)'
scenario: '^((?!fdio).)*$'
+ run:
+ module: 'functest.opnfv_tests.features.doctor'
+ class: 'Doctor'
-
name: bgpvpn
@@ -325,6 +331,9 @@ tiers:
dependencies:
installer: '^((?!netvirt).)*$'
scenario: ''
+ run:
+ module: 'functest.opnfv_tests.openstack.rally.rally'
+ class: 'RallyFull'
-
name: vnf
diff --git a/functest/core/pytest_suite_runner.py b/functest/core/pytest_suite_runner.py
index 1eed92b57..c168d7d93 100755..100644
--- a/functest/core/pytest_suite_runner.py
+++ b/functest/core/pytest_suite_runner.py
@@ -41,14 +41,18 @@ class PyTestSuiteRunner(base.TestcaseBase):
for test, message in result.failures:
self.logger.error(str(test) + " FAILED with " + message)
+ # a result can be PASS or FAIL
+ # But in this case it means that the Execution was OK
+ # we shall distinguish Execution Error from FAIL results
+ # TestcaseBase.EX_RUN_ERROR means that the test case was not run
+ # not that it was run but the result was FAIL
+ exit_code = base.TestcaseBase.EX_OK
if ((result.errors and len(result.errors) > 0)
or (result.failures and len(result.failures) > 0)):
self.logger.info("%s FAILED" % self.case_name)
self.criteria = 'FAIL'
- exit_code = base.TestcaseBase.EX_RUN_ERROR
else:
self.logger.info("%s OK" % self.case_name)
- exit_code = base.TestcaseBase.EX_OK
self.criteria = 'PASS'
self.details = {}
diff --git a/functest/core/vnf_base.py b/functest/core/vnf_base.py
index 995204940..4d019858a 100644
--- a/functest/core/vnf_base.py
+++ b/functest/core/vnf_base.py
@@ -35,6 +35,7 @@ class VnfOnBoardingBase(base.TestcaseBase):
self.details['orchestrator'] = {}
self.details['vnf'] = {}
self.details['test_vnf'] = {}
+ self.images = {}
try:
self.tenant_name = CONST.__getattribute__(
'vnf_{}_tenant_name'.format(self.case_name))
@@ -44,7 +45,7 @@ class VnfOnBoardingBase(base.TestcaseBase):
raise Exception("Unknown VNF case=" + self.case_name)
try:
- self.tenant_images = CONST.__getattribute__(
+ self.images = CONST.__getattribute__(
'vnf_{}_tenant_images'.format(self.case_name))
except:
self.logger.warn("No tenant image defined for this VNF")
@@ -152,14 +153,15 @@ class VnfOnBoardingBase(base.TestcaseBase):
self.logger.info("Update OpenStack creds informations")
self.creds.update({
- "username": self.tenant_name,
- "password": self.tenant_name,
"tenant": self.tenant_name,
})
- self.glance_client = os_utils.get_glance_client(self.creds)
self.neutron_client = os_utils.get_neutron_client(self.creds)
self.nova_client = os_utils.get_nova_client(self.creds)
-
+ self.creds.update({
+ "username": self.tenant_name,
+ "password": self.tenant_name,
+ })
+ self.glance_client = os_utils.get_glance_client(self.creds)
self.logger.info("Upload some OS images if it doesn't exist")
temp_dir = os.path.join(self.data_dir, "tmp/")
diff --git a/functest/opnfv_tests/features/copper.py b/functest/opnfv_tests/features/copper.py
index a10364e26..735b315d2 100755
--- a/functest/opnfv_tests/features/copper.py
+++ b/functest/opnfv_tests/features/copper.py
@@ -22,4 +22,4 @@ class Copper(base.FeatureBase):
super(Copper, self).__init__(project='copper',
case='copper-notification',
repo='dir_repo_copper')
- self.cmd = 'bash %s/tests/run.sh' % self.repo
+ self.cmd = 'cd %s/tests && bash run.sh && cd -' % self.repo
diff --git a/functest/opnfv_tests/features/doctor.py b/functest/opnfv_tests/features/doctor.py
index dbd803a65..4d295a674 100755
--- a/functest/opnfv_tests/features/doctor.py
+++ b/functest/opnfv_tests/features/doctor.py
@@ -1,6 +1,6 @@
#!/usr/bin/python
#
-# Copyright (c) 2015 All rights reserved
+# Copyright (c) 2017 All rights reserved
# This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
@@ -13,77 +13,12 @@
# 0.2: measure test duration and publish results under json format
#
#
-import argparse
-import os
-import time
+import functest.core.feature_base as base
-import functest.utils.functest_logger as ft_logger
-import functest.utils.functest_utils as functest_utils
-import functest.utils.functest_constants as ft_constants
-parser = argparse.ArgumentParser()
-parser.add_argument("-r", "--report",
- help="Create json result file",
- action="store_true")
-args = parser.parse_args()
-
-functest_yaml = functest_utils.get_functest_yaml()
-
-DOCTOR_REPO_DIR = ft_constants.DOCTOR_REPO_DIR
-RESULTS_DIR = ft_constants.FUNCTEST_RESULTS_DIR
-
-logger = ft_logger.Logger("doctor").getLogger()
-
-
-def main():
- exit_code = -1
-
- # if the image name is explicitly set for the doctor suite, set it as
- # enviroment variable
- if 'doctor' in functest_yaml and 'image_name' in functest_yaml['doctor']:
- os.environ["IMAGE_NAME"] = functest_yaml['doctor']['image_name']
-
- cmd = 'cd %s/tests && ./run.sh' % DOCTOR_REPO_DIR
- log_file = RESULTS_DIR + "/doctor.log"
-
- start_time = time.time()
-
- ret = functest_utils.execute_command(cmd,
- info=True,
- output_file=log_file)
-
- stop_time = time.time()
- duration = round(stop_time - start_time, 1)
- if ret == 0:
- logger.info("Doctor test case OK")
- test_status = 'OK'
- exit_code = 0
- else:
- logger.info("Doctor test case FAILED")
- test_status = 'NOK'
-
- details = {
- 'timestart': start_time,
- 'duration': duration,
- 'status': test_status,
- }
- status = "FAIL"
- if details['status'] == "OK":
- status = "PASS"
- functest_utils.logger_test_results("Doctor",
- "doctor-notification",
- status, details)
- if args.report:
- functest_utils.push_results_to_db("doctor",
- "doctor-notification",
- start_time,
- stop_time,
- status,
- details)
- logger.info("Doctor results pushed to DB")
-
- exit(exit_code)
-
-
-if __name__ == '__main__':
- main()
+class Doctor(base.FeatureBase):
+ def __init__(self):
+ super(Doctor, self).__init__(project='doctor',
+ case='doctor-notification',
+ repo='dir_repo_doctor')
+ self.cmd = 'cd %s/tests && ./run.sh' % self.repo
diff --git a/functest/opnfv_tests/openstack/rally/__init__.py b/functest/opnfv_tests/openstack/rally/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/functest/opnfv_tests/openstack/rally/__init__.py
diff --git a/functest/opnfv_tests/openstack/rally/rally.py b/functest/opnfv_tests/openstack/rally/rally.py
new file mode 100644
index 000000000..e7cac7afa
--- /dev/null
+++ b/functest/opnfv_tests/openstack/rally/rally.py
@@ -0,0 +1,554 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+
+import json
+import os
+import re
+import subprocess
+import time
+
+import iniparse
+import yaml
+
+from functest.core import testcase_base
+from functest.utils.constants import CONST
+import functest.utils.functest_logger as ft_logger
+import functest.utils.functest_utils as ft_utils
+import functest.utils.openstack_utils as os_utils
+
+logger = ft_logger.Logger('Rally').getLogger()
+
+
+class RallyBase(testcase_base.TestcaseBase):
+ TESTS = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
+ 'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
+ GLANCE_IMAGE_NAME = CONST.openstack_image_name
+ GLANCE_IMAGE_FILENAME = CONST.openstack_image_file_name
+ GLANCE_IMAGE_PATH = os.path.join(CONST.dir_functest_data,
+ GLANCE_IMAGE_FILENAME)
+ GLANCE_IMAGE_FORMAT = CONST.openstack_image_disk_format
+ FLAVOR_NAME = "m1.tiny"
+
+ RALLY_DIR = os.path.join(CONST.dir_repo_functest, CONST.dir_rally)
+ RALLY_SCENARIO_DIR = os.path.join(RALLY_DIR, "scenario")
+ TEMPLATE_DIR = os.path.join(RALLY_SCENARIO_DIR, "templates")
+ SUPPORT_DIR = os.path.join(RALLY_SCENARIO_DIR, "support")
+ USERS_AMOUNT = 2
+ TENANTS_AMOUNT = 3
+ ITERATIONS_AMOUNT = 10
+ CONCURRENCY = 4
+ RESULTS_DIR = os.path.join(CONST.dir_results, 'rally')
+ TEMPEST_CONF_FILE = os.path.join(CONST.dir_results,
+ 'tempest/tempest.conf')
+ BLACKLIST_FILE = os.path.join(RALLY_DIR, "blacklist.txt")
+ TEMP_DIR = os.path.join(RALLY_DIR, "var")
+
+ CINDER_VOLUME_TYPE_NAME = "volume_test"
+ RALLY_PRIVATE_NET_NAME = CONST.rally_network_name
+ RALLY_PRIVATE_SUBNET_NAME = CONST.rally_subnet_name
+ RALLY_PRIVATE_SUBNET_CIDR = CONST.rally_subnet_cidr
+ RALLY_ROUTER_NAME = CONST.rally_router_name
+
+ def __init__(self):
+ super(RallyBase, self).__init__()
+ self.mode = ''
+ self.summary = []
+ self.scenario_dir = ''
+ self.nova_client = os_utils.get_nova_client()
+ self.neutron_client = os_utils.get_neutron_client()
+ self.cinder_client = os_utils.get_cinder_client()
+ self.network_dict = {}
+ self.volume_type = None
+
+ def _build_task_args(self, test_file_name):
+ task_args = {'service_list': [test_file_name]}
+ task_args['image_name'] = self.GLANCE_IMAGE_NAME
+ task_args['flavor_name'] = self.FLAVOR_NAME
+ task_args['glance_image_location'] = self.GLANCE_IMAGE_PATH
+ task_args['glance_image_format'] = self.GLANCE_IMAGE_FORMAT
+ task_args['tmpl_dir'] = self.TEMPLATE_DIR
+ task_args['sup_dir'] = self.SUPPORT_DIR
+ task_args['users_amount'] = self.USERS_AMOUNT
+ task_args['tenants_amount'] = self.TENANTS_AMOUNT
+ task_args['use_existing_users'] = False
+ task_args['iterations'] = self.ITERATIONS_AMOUNT
+ task_args['concurrency'] = self.CONCURRENCY
+ task_args['smoke'] = self.smoke
+
+ ext_net = os_utils.get_external_net(self.neutron_client)
+ if ext_net:
+ task_args['floating_network'] = str(ext_net)
+ else:
+ task_args['floating_network'] = ''
+
+ net_id = self.network_dict['net_id']
+ if net_id:
+ task_args['netid'] = str(net_id)
+ else:
+ task_args['netid'] = ''
+
+ auth_url = CONST.OS_AUTH_URL
+ if auth_url is not None:
+ task_args['request_url'] = auth_url.rsplit(":", 1)[0]
+ else:
+ task_args['request_url'] = ''
+
+ return task_args
+
+ def _prepare_test_list(self, test_name):
+ test_yaml_file_name = 'opnfv-{}.yaml'.format(test_name)
+ scenario_file_name = os.path.join(self.RALLY_SCENARIO_DIR,
+ test_yaml_file_name)
+
+ if not os.path.exists(scenario_file_name):
+ scenario_file_name = os.path.join(self.scenario_dir,
+ test_yaml_file_name)
+
+ if not os.path.exists(scenario_file_name):
+ raise Exception("The scenario '%s' does not exist."
+ % scenario_file_name)
+
+ logger.debug('Scenario fetched from : {}'.format(scenario_file_name))
+ test_file_name = os.path.join(self.TEMP_DIR, test_yaml_file_name)
+
+ if not os.path.exists(self.TEMP_DIR):
+ os.makedirs(self.TEMP_DIR)
+
+ self.apply_blacklist(scenario_file_name, test_file_name)
+ return test_file_name
+
+ @staticmethod
+ def get_task_id(cmd_raw):
+ """
+ get task id from command rally result
+ :param cmd_raw:
+ :return: task_id as string
+ """
+ taskid_re = re.compile('^Task +(.*): started$')
+ for line in cmd_raw.splitlines(True):
+ line = line.strip()
+ match = taskid_re.match(line)
+ if match:
+ return match.group(1)
+ return None
+
+ @staticmethod
+ def task_succeed(json_raw):
+ """
+ Parse JSON from rally JSON results
+ :param json_raw:
+ :return: Bool
+ """
+ rally_report = json.loads(json_raw)
+ for report in rally_report:
+ if report is None or report.get('result') is None:
+ return False
+
+ for result in report.get('result'):
+ if result is None or len(result.get('error')) > 0:
+ return False
+
+ return True
+
+ @staticmethod
+ def live_migration_supported():
+ config = iniparse.ConfigParser()
+ if (config.read(RallyBase.TEMPEST_CONF_FILE) and
+ config.has_section('compute-feature-enabled') and
+ config.has_option('compute-feature-enabled',
+ 'live_migration')):
+ return config.getboolean('compute-feature-enabled',
+ 'live_migration')
+
+ return False
+
+ @staticmethod
+ def get_cmd_output(proc):
+ result = ""
+ while proc.poll() is None:
+ line = proc.stdout.readline()
+ result += line
+ return result
+
+ @staticmethod
+ def excl_scenario():
+ black_tests = []
+ try:
+ with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
+ black_list_yaml = yaml.safe_load(black_list_file)
+
+ installer_type = CONST.INSTALLER_TYPE
+ deploy_scenario = CONST.DEPLOY_SCENARIO
+ if (bool(installer_type) * bool(deploy_scenario)):
+ if 'scenario' in black_list_yaml.keys():
+ for item in black_list_yaml['scenario']:
+ scenarios = item['scenarios']
+ installers = item['installers']
+ if (deploy_scenario in scenarios and
+ installer_type in installers):
+ tests = item['tests']
+ black_tests.extend(tests)
+ except Exception:
+ logger.debug("Scenario exclusion not applied.")
+
+ return black_tests
+
+ @staticmethod
+ def excl_func():
+ black_tests = []
+ func_list = []
+
+ try:
+ with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
+ black_list_yaml = yaml.safe_load(black_list_file)
+
+ if not RallyBase.live_migration_supported():
+ func_list.append("no_live_migration")
+
+ if 'functionality' in black_list_yaml.keys():
+ for item in black_list_yaml['functionality']:
+ functions = item['functions']
+ for func in func_list:
+ if func in functions:
+ tests = item['tests']
+ black_tests.extend(tests)
+ except Exception:
+ logger.debug("Functionality exclusion not applied.")
+
+ return black_tests
+
+ @staticmethod
+ def apply_blacklist(case_file_name, result_file_name):
+ logger.debug("Applying blacklist...")
+ cases_file = open(case_file_name, 'r')
+ result_file = open(result_file_name, 'w')
+
+ black_tests = list(set(RallyBase.excl_func() +
+ RallyBase.excl_scenario()))
+
+ include = True
+ for cases_line in cases_file:
+ if include:
+ for black_tests_line in black_tests:
+ if re.search(black_tests_line,
+ cases_line.strip().rstrip(':')):
+ include = False
+ break
+ else:
+ result_file.write(str(cases_line))
+ else:
+ if cases_line.isspace():
+ include = True
+
+ cases_file.close()
+ result_file.close()
+
+ @staticmethod
+ def file_is_empty(file_name):
+ try:
+ if os.stat(file_name).st_size > 0:
+ return False
+ except:
+ pass
+
+ return True
+
+ def _run_task(self, test_name):
+ logger.info('Starting test scenario "{}" ...'.format(test_name))
+
+ task_file = os.path.join(self.RALLY_DIR, 'task.yaml')
+ if not os.path.exists(task_file):
+ logger.error("Task file '%s' does not exist." % task_file)
+ raise Exception("Task file '%s' does not exist." % task_file)
+
+ file_name = self._prepare_test_list(test_name)
+ if self.file_is_empty(file_name):
+ logger.info('No tests for scenario "{}"'.format(test_name))
+ return
+
+ cmd_line = ("rally task start --abort-on-sla-failure "
+ "--task {0} "
+ "--task-args \"{1}\""
+ .format(task_file, self._build_task_args(test_name)))
+ logger.debug('running command line: {}'.format(cmd_line))
+
+ p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT, shell=True)
+ output = self._get_output(p, test_name)
+ task_id = self.get_task_id(output)
+ logger.debug('task_id : {}'.format(task_id))
+
+ if task_id is None:
+ logger.error('Failed to retrieve task_id, validating task...')
+ cmd_line = ("rally task validate "
+ "--task {0} "
+ "--task-args \"{1}\""
+ .format(task_file, self.__build_task_args(test_name)))
+ logger.debug('running command line: {}'.format(cmd_line))
+ p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT, shell=True)
+ output = self.get_cmd_output(p)
+ logger.error("Task validation result:" + "\n" + output)
+ return
+
+ # check for result directory and create it otherwise
+ if not os.path.exists(self.RESULTS_DIR):
+ logger.debug('{} does not exist, we create it.'
+ .format(self.RESULTS_DIR))
+ os.makedirs(self.RESULTS_DIR)
+
+ # write html report file
+ report_html_name = 'opnfv-{}.html'.format(test_name)
+ report_html_dir = os.path.join(self.RESULTS_DIR, report_html_name)
+ cmd_line = "rally task report {} --out {}".format(task_id,
+ report_html_dir)
+
+ logger.debug('running command line: {}'.format(cmd_line))
+ os.popen(cmd_line)
+
+ # get and save rally operation JSON result
+ cmd_line = "rally task results %s" % task_id
+ logger.debug('running command line: {}'.format(cmd_line))
+ cmd = os.popen(cmd_line)
+ json_results = cmd.read()
+ report_json_name = 'opnfv-{}.json'.format(test_name)
+ report_json_dir = os.path.join(self.RESULTS_DIR, report_json_name)
+ with open(report_json_dir, 'w') as f:
+ logger.debug('saving json file')
+ f.write(json_results)
+
+ """ parse JSON operation result """
+ if self.task_succeed(json_results):
+ logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
+ else:
+ logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
+
+ def _get_output(self, proc, test_name):
+ result = ""
+ nb_tests = 0
+ overall_duration = 0.0
+ success = 0.0
+ nb_totals = 0
+
+ while proc.poll() is None:
+ line = proc.stdout.readline()
+ if ("Load duration" in line or
+ "started" in line or
+ "finished" in line or
+ " Preparing" in line or
+ "+-" in line or
+ "|" in line):
+ result += line
+ elif "test scenario" in line:
+ result += "\n" + line
+ elif "Full duration" in line:
+ result += line + "\n\n"
+
+ # parse output for summary report
+ if ("| " in line and
+ "| action" not in line and
+ "| Starting" not in line and
+ "| Completed" not in line and
+ "| ITER" not in line and
+ "| " not in line and
+ "| total" not in line):
+ nb_tests += 1
+ elif "| total" in line:
+ percentage = ((line.split('|')[8]).strip(' ')).strip('%')
+ try:
+ success += float(percentage)
+ except ValueError:
+ logger.info('Percentage error: %s, %s' %
+ (percentage, line))
+ nb_totals += 1
+ elif "Full duration" in line:
+ duration = line.split(': ')[1]
+ try:
+ overall_duration += float(duration)
+ except ValueError:
+ logger.info('Duration error: %s, %s' % (duration, line))
+
+ overall_duration = "{:10.2f}".format(overall_duration)
+ if nb_totals == 0:
+ success_avg = 0
+ else:
+ success_avg = "{:0.2f}".format(success / nb_totals)
+
+ scenario_summary = {'test_name': test_name,
+ 'overall_duration': overall_duration,
+ 'nb_tests': nb_tests,
+ 'success': success_avg}
+ self.summary.append(scenario_summary)
+
+ logger.debug("\n" + result)
+
+ return result
+
+ def _prepare_env(self):
+ logger.debug('Validating the test name...')
+ if not (self.test_name in self.TESTS):
+ raise Exception("Test name '%s' is invalid" % self.test_name)
+
+ volume_types = os_utils.list_volume_types(self.cinder_client,
+ private=False)
+ if volume_types:
+ logger.debug("Using existing volume type(s)...")
+ else:
+ logger.debug('Creating volume type...')
+ self.volume_type = os_utils.create_volume_type(
+ self.cinder_client, self.CINDER_VOLUME_TYPE_NAME)
+ if self.volume_type is None:
+ raise Exception("Failed to create volume type '%s'" %
+ self.CINDER_VOLUME_TYPE_NAME)
+ logger.debug("Volume type '%s' is created succesfully." %
+ self.CINDER_VOLUME_TYPE_NAME)
+
+ logger.debug('Getting or creating image...')
+ self.image_exists, self.image_id = os_utils.get_or_create_image(
+ self.GLANCE_IMAGE_NAME,
+ self.GLANCE_IMAGE_PATH,
+ self.GLANCE_IMAGE_FORMAT)
+ if self.image_id is None:
+ raise Exception("Failed to get or create image '%s'" %
+ self.GLANCE_IMAGE_NAME)
+
+ logger.debug("Creating network '%s'..." % self.RALLY_PRIVATE_NET_NAME)
+ self.network_dict = os_utils.create_shared_network_full(
+ self.RALLY_PRIVATE_NET_NAME,
+ self.RALLY_PRIVATE_SUBNET_NAME,
+ self.RALLY_ROUTER_NAME,
+ self.RALLY_PRIVATE_SUBNET_CIDR)
+ if self.network_dict is None:
+ raise Exception("Failed to create shared network '%s'" %
+ self.RALLY_PRIVATE_NET_NAME)
+
+ def _run_tests(self):
+ if self.test_name == 'all':
+ for test in self.TESTS:
+ if (test == 'all' or test == 'vm'):
+ continue
+ self._run_task(test)
+ else:
+ self._run_task(self.test_name)
+
+ def _generate_report(self):
+ report = (
+ "\n"
+ " "
+ "\n"
+ " Rally Summary Report\n"
+ "\n"
+ "+===================+============+===============+===========+"
+ "\n"
+ "| Module | Duration | nb. Test Run | Success |"
+ "\n"
+ "+===================+============+===============+===========+"
+ "\n")
+ payload = []
+
+ # for each scenario we draw a row for the table
+ total_duration = 0.0
+ total_nb_tests = 0
+ total_success = 0.0
+ for s in self.summary:
+ name = "{0:<17}".format(s['test_name'])
+ duration = float(s['overall_duration'])
+ total_duration += duration
+ duration = time.strftime("%M:%S", time.gmtime(duration))
+ duration = "{0:<10}".format(duration)
+ nb_tests = "{0:<13}".format(s['nb_tests'])
+ total_nb_tests += int(s['nb_tests'])
+ success = "{0:<10}".format(str(s['success']) + '%')
+ total_success += float(s['success'])
+ report += ("" +
+ "| " + name + " | " + duration + " | " +
+ nb_tests + " | " + success + "|\n" +
+ "+-------------------+------------"
+ "+---------------+-----------+\n")
+ payload.append({'module': name,
+ 'details': {'duration': s['overall_duration'],
+ 'nb tests': s['nb_tests'],
+ 'success': s['success']}})
+
+ total_duration_str = time.strftime("%H:%M:%S",
+ time.gmtime(total_duration))
+ total_duration_str2 = "{0:<10}".format(total_duration_str)
+ total_nb_tests_str = "{0:<13}".format(total_nb_tests)
+
+ if len(self.summary):
+ success_rate = total_success / len(self.summary)
+ else:
+ success_rate = 100
+ success_rate = "{:0.2f}".format(success_rate)
+ success_rate_str = "{0:<10}".format(str(success_rate) + '%')
+ report += ("+===================+============"
+ "+===============+===========+")
+ report += "\n"
+ report += ("| TOTAL: | " + total_duration_str2 + " | " +
+ total_nb_tests_str + " | " + success_rate_str + "|\n")
+ report += ("+===================+============"
+ "+===============+===========+")
+ report += "\n"
+
+ logger.info("\n" + report)
+ payload.append({'summary': {'duration': total_duration,
+ 'nb tests': total_nb_tests,
+ 'nb success': success_rate}})
+
+ self.criteria = ft_utils.check_success_rate(
+ self.case_name, success_rate)
+ self.details = payload
+
+ logger.info("Rally '%s' success_rate is %s%%, is marked as %s"
+ % (self.case_name, success_rate, self.criteria))
+
+ def _clean_up(self):
+ if self.volume_type:
+ logger.debug("Deleting volume type '%s'..." % self.volume_type)
+ os_utils.delete_volume_type(self.cinder_client, self.volume_type)
+
+ if not self.image_exists:
+ logger.debug("Deleting image '%s' with ID '%s'..."
+ % (self.GLANCE_IMAGE_NAME, self.image_id))
+ if not os_utils.delete_glance_image(self.nova_client,
+ self.image_id):
+ logger.error("Error deleting the glance image")
+
+ def run(self):
+ self.start_time = time.time()
+ try:
+ self._prepare_env()
+ self._run_tests()
+ self._generate_report()
+ self._clean_up()
+ except Exception as e:
+ logger.error('Error with run: %s' % e)
+ return testcase_base.TestcaseBase.EX_RUN_ERROR
+ self.stop_time = time.time()
+
+
+class RallySanity(RallyBase):
+ def __init__(self):
+ super(RallySanity, self).__init__()
+ self.case_name = 'rally_sanity'
+ self.mode = 'sanity'
+ self.test_name = 'all'
+ self.smoke = True
+ self.scenario_dir = os.path.join(self.RALLY_SCENARIO_DIR, 'sanity')
+
+
+class RallyFull(RallyBase):
+ def __init__(self):
+ super(RallyFull, self).__init__()
+ self.case_name = 'rally_full'
+ self.mode = 'full'
+ self.test_name = 'all'
+ self.smoke = False
+ self.scenario_dir = os.path.join(self.RALLY_SCENARIO_DIR, 'full')
diff --git a/functest/opnfv_tests/openstack/rally/run_rally-cert.py b/functest/opnfv_tests/openstack/rally/run_rally-cert.py
deleted file mode 100755
index b02fd4270..000000000
--- a/functest/opnfv_tests/openstack/rally/run_rally-cert.py
+++ /dev/null
@@ -1,613 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2015 All rights reserved
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-
-import argparse
-import json
-import os
-import re
-import subprocess
-import time
-
-import iniparse
-import yaml
-
-from functest.utils.constants import CONST
-import functest.utils.functest_logger as ft_logger
-import functest.utils.functest_utils as ft_utils
-import functest.utils.openstack_utils as os_utils
-
-tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
- 'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
-parser = argparse.ArgumentParser()
-parser.add_argument("test_name",
- help="Module name to be tested. "
- "Possible values are : "
- "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
- "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | "
- "{d[10]} ] "
- "The 'all' value "
- "performs all possible test scenarios"
- .format(d=tests))
-
-parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
-parser.add_argument("-r", "--report",
- help="Create json result file",
- action="store_true")
-parser.add_argument("-s", "--smoke",
- help="Smoke test mode",
- action="store_true")
-parser.add_argument("-v", "--verbose",
- help="Print verbose info about the progress",
- action="store_true")
-parser.add_argument("-n", "--noclean",
- help="Don't clean the created resources for this test.",
- action="store_true")
-parser.add_argument("-z", "--sanity",
- help="Sanity test mode, execute only a subset of tests",
- action="store_true")
-
-args = parser.parse_args()
-
-
-if args.verbose:
- RALLY_STDERR = subprocess.STDOUT
-else:
- RALLY_STDERR = open(os.devnull, 'w')
-
-""" logging configuration """
-logger = ft_logger.Logger("run_rally-cert").getLogger()
-
-RALLY_DIR = os.path.join(CONST.dir_repo_functest, CONST.dir_rally)
-RALLY_SCENARIO_DIR = os.path.join(RALLY_DIR, "scenario")
-SANITY_MODE_DIR = os.path.join(RALLY_SCENARIO_DIR, "sanity")
-FULL_MODE_DIR = os.path.join(RALLY_SCENARIO_DIR, "full")
-TEMPLATE_DIR = os.path.join(RALLY_SCENARIO_DIR, "templates")
-SUPPORT_DIR = os.path.join(RALLY_SCENARIO_DIR, "support")
-TEMP_DIR = os.path.join(RALLY_DIR, "var")
-BLACKLIST_FILE = os.path.join(RALLY_DIR, "blacklist.txt")
-
-FLAVOR_NAME = "m1.tiny"
-USERS_AMOUNT = 2
-TENANTS_AMOUNT = 3
-ITERATIONS_AMOUNT = 10
-CONCURRENCY = 4
-
-RESULTS_DIR = os.path.join(CONST.dir_results, 'rally')
-TEMPEST_CONF_FILE = os.path.join(CONST.dir_results,
- 'tempest/tempest.conf')
-
-RALLY_PRIVATE_NET_NAME = CONST.rally_network_name
-RALLY_PRIVATE_SUBNET_NAME = CONST.rally_subnet_name
-RALLY_PRIVATE_SUBNET_CIDR = CONST.rally_subnet_cidr
-RALLY_ROUTER_NAME = CONST.rally_router_name
-
-GLANCE_IMAGE_NAME = CONST.openstack_image_name
-GLANCE_IMAGE_FILENAME = CONST.openstack_image_file_name
-GLANCE_IMAGE_FORMAT = CONST.openstack_image_disk_format
-GLANCE_IMAGE_PATH = os.path.join(CONST.dir_functest_data,
- GLANCE_IMAGE_FILENAME)
-CINDER_VOLUME_TYPE_NAME = "volume_test"
-
-
-class GlobalVariables:
- SUMMARY = []
- neutron_client = None
- network_dict = {}
-
-
-def get_task_id(cmd_raw):
- """
- get task id from command rally result
- :param cmd_raw:
- :return: task_id as string
- """
- taskid_re = re.compile('^Task +(.*): started$')
- for line in cmd_raw.splitlines(True):
- line = line.strip()
- match = taskid_re.match(line)
- if match:
- return match.group(1)
- return None
-
-
-def task_succeed(json_raw):
- """
- Parse JSON from rally JSON results
- :param json_raw:
- :return: Bool
- """
- rally_report = json.loads(json_raw)
- for report in rally_report:
- if report is None or report.get('result') is None:
- return False
-
- for result in report.get('result'):
- if result is None or len(result.get('error')) > 0:
- return False
-
- return True
-
-
-def live_migration_supported():
- config = iniparse.ConfigParser()
- if (config.read(TEMPEST_CONF_FILE) and
- config.has_section('compute-feature-enabled') and
- config.has_option('compute-feature-enabled', 'live_migration')):
- return config.getboolean('compute-feature-enabled', 'live_migration')
-
- return False
-
-
-def build_task_args(test_file_name):
- task_args = {'service_list': [test_file_name]}
- task_args['image_name'] = GLANCE_IMAGE_NAME
- task_args['flavor_name'] = FLAVOR_NAME
- task_args['glance_image_location'] = GLANCE_IMAGE_PATH
- task_args['glance_image_format'] = GLANCE_IMAGE_FORMAT
- task_args['tmpl_dir'] = TEMPLATE_DIR
- task_args['sup_dir'] = SUPPORT_DIR
- task_args['users_amount'] = USERS_AMOUNT
- task_args['tenants_amount'] = TENANTS_AMOUNT
- task_args['use_existing_users'] = False
- task_args['iterations'] = ITERATIONS_AMOUNT
- task_args['concurrency'] = CONCURRENCY
-
- if args.sanity:
- task_args['smoke'] = True
- else:
- task_args['smoke'] = args.smoke
-
- ext_net = os_utils.get_external_net(GlobalVariables.neutron_client)
- if ext_net:
- task_args['floating_network'] = str(ext_net)
- else:
- task_args['floating_network'] = ''
-
- net_id = GlobalVariables.network_dict['net_id']
- task_args['netid'] = str(net_id)
-
- auth_url = CONST.OS_AUTH_URL
- if auth_url is not None:
- task_args['request_url'] = auth_url.rsplit(":", 1)[0]
- else:
- task_args['request_url'] = ''
-
- return task_args
-
-
-def get_output(proc, test_name):
- result = ""
- nb_tests = 0
- overall_duration = 0.0
- success = 0.0
- nb_totals = 0
-
- while proc.poll() is None:
- line = proc.stdout.readline()
- if args.verbose:
- result += line
- else:
- if ("Load duration" in line or
- "started" in line or
- "finished" in line or
- " Preparing" in line or
- "+-" in line or
- "|" in line):
- result += line
- elif "test scenario" in line:
- result += "\n" + line
- elif "Full duration" in line:
- result += line + "\n\n"
-
- # parse output for summary report
- if ("| " in line and
- "| action" not in line and
- "| Starting" not in line and
- "| Completed" not in line and
- "| ITER" not in line and
- "| " not in line and
- "| total" not in line):
- nb_tests += 1
- elif "| total" in line:
- percentage = ((line.split('|')[8]).strip(' ')).strip('%')
- try:
- success += float(percentage)
- except ValueError:
- logger.info('Percentage error: %s, %s' % (percentage, line))
- nb_totals += 1
- elif "Full duration" in line:
- duration = line.split(': ')[1]
- try:
- overall_duration += float(duration)
- except ValueError:
- logger.info('Duration error: %s, %s' % (duration, line))
-
- overall_duration = "{:10.2f}".format(overall_duration)
- if nb_totals == 0:
- success_avg = 0
- else:
- success_avg = "{:0.2f}".format(success / nb_totals)
-
- scenario_summary = {'test_name': test_name,
- 'overall_duration': overall_duration,
- 'nb_tests': nb_tests,
- 'success': success_avg}
- GlobalVariables.SUMMARY.append(scenario_summary)
-
- logger.debug("\n" + result)
-
- return result
-
-
-def get_cmd_output(proc):
- result = ""
-
- while proc.poll() is None:
- line = proc.stdout.readline()
- result += line
-
- return result
-
-
-def excl_scenario():
- black_tests = []
-
- try:
- with open(BLACKLIST_FILE, 'r') as black_list_file:
- black_list_yaml = yaml.safe_load(black_list_file)
-
- installer_type = CONST.INSTALLER_TYPE
- deploy_scenario = CONST.DEPLOY_SCENARIO
- if (bool(installer_type) * bool(deploy_scenario)):
- if 'scenario' in black_list_yaml.keys():
- for item in black_list_yaml['scenario']:
- scenarios = item['scenarios']
- installers = item['installers']
- if (deploy_scenario in scenarios and
- installer_type in installers):
- tests = item['tests']
- black_tests.extend(tests)
- except:
- logger.debug("Scenario exclusion not applied.")
-
- return black_tests
-
-
-def excl_func():
- black_tests = []
- func_list = []
-
- try:
- with open(BLACKLIST_FILE, 'r') as black_list_file:
- black_list_yaml = yaml.safe_load(black_list_file)
-
- if not live_migration_supported():
- func_list.append("no_live_migration")
-
- if 'functionality' in black_list_yaml.keys():
- for item in black_list_yaml['functionality']:
- functions = item['functions']
- for func in func_list:
- if func in functions:
- tests = item['tests']
- black_tests.extend(tests)
- except:
- logger.debug("Functionality exclusion not applied.")
-
- return black_tests
-
-
-def apply_blacklist(case_file_name, result_file_name):
- logger.debug("Applying blacklist...")
- cases_file = open(case_file_name, 'r')
- result_file = open(result_file_name, 'w')
-
- black_tests = list(set(excl_func() + excl_scenario()))
-
- include = True
- for cases_line in cases_file:
- if include:
- for black_tests_line in black_tests:
- if re.search(black_tests_line, cases_line.strip().rstrip(':')):
- include = False
- break
- else:
- result_file.write(str(cases_line))
- else:
- if cases_line.isspace():
- include = True
-
- cases_file.close()
- result_file.close()
-
-
-def prepare_test_list(test_name):
- test_yaml_file_name = 'opnfv-{}.yaml'.format(test_name)
- scenario_file_name = os.path.join(RALLY_SCENARIO_DIR, test_yaml_file_name)
-
- if not os.path.exists(scenario_file_name):
- if args.sanity:
- scenario_file_name = os.path.join(SANITY_MODE_DIR,
- test_yaml_file_name)
- else:
- scenario_file_name = os.path.join(FULL_MODE_DIR,
- test_yaml_file_name)
-
- if not os.path.exists(scenario_file_name):
- logger.info("The scenario '%s' does not exist."
- % scenario_file_name)
- exit(-1)
-
- logger.debug('Scenario fetched from : {}'.format(scenario_file_name))
- test_file_name = os.path.join(TEMP_DIR, test_yaml_file_name)
-
- if not os.path.exists(TEMP_DIR):
- os.makedirs(TEMP_DIR)
-
- apply_blacklist(scenario_file_name, test_file_name)
- return test_file_name
-
-
-def file_is_empty(file_name):
- try:
- if os.stat(file_name).st_size > 0:
- return False
- except:
- pass
-
- return True
-
-
-def run_task(test_name):
- #
- # the "main" function of the script who launch rally for a task
- # :param test_name: name for the rally test
- # :return: void
- #
- logger.info('Starting test scenario "{}" ...'.format(test_name))
- start_time = time.time()
-
- task_file = os.path.join(RALLY_DIR, 'task.yaml')
- if not os.path.exists(task_file):
- logger.error("Task file '%s' does not exist." % task_file)
- exit(-1)
-
- file_name = prepare_test_list(test_name)
- if file_is_empty(file_name):
- logger.info('No tests for scenario "{}"'.format(test_name))
- return
-
- cmd_line = ("rally task start --abort-on-sla-failure "
- "--task {0} "
- "--task-args \"{1}\""
- .format(task_file, build_task_args(test_name)))
- logger.debug('running command line: {}'.format(cmd_line))
-
- p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
- stderr=RALLY_STDERR, shell=True)
- output = get_output(p, test_name)
- task_id = get_task_id(output)
- logger.debug('task_id : {}'.format(task_id))
-
- if task_id is None:
- logger.error('Failed to retrieve task_id, validating task...')
- cmd_line = ("rally task validate "
- "--task {0} "
- "--task-args \"{1}\""
- .format(task_file, build_task_args(test_name)))
- logger.debug('running command line: {}'.format(cmd_line))
- p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT, shell=True)
- output = get_cmd_output(p)
- logger.error("Task validation result:" + "\n" + output)
- return
-
- # check for result directory and create it otherwise
- if not os.path.exists(RESULTS_DIR):
- logger.debug('{} does not exist, we create it.'.format(RESULTS_DIR))
- os.makedirs(RESULTS_DIR)
-
- # write html report file
- report_html_name = 'opnfv-{}.html'.format(test_name)
- report_html_dir = os.path.join(RESULTS_DIR, report_html_name)
- cmd_line = "rally task report {} --out {}".format(task_id,
- report_html_dir)
-
- logger.debug('running command line: {}'.format(cmd_line))
- os.popen(cmd_line)
-
- # get and save rally operation JSON result
- cmd_line = "rally task results %s" % task_id
- logger.debug('running command line: {}'.format(cmd_line))
- cmd = os.popen(cmd_line)
- json_results = cmd.read()
- report_json_name = 'opnfv-{}.json'.format(test_name)
- report_json_dir = os.path.join(RESULTS_DIR, report_json_name)
- with open(report_json_dir, 'w') as f:
- logger.debug('saving json file')
- f.write(json_results)
-
- with open(report_json_dir) as json_file:
- json_data = json.load(json_file)
-
- """ parse JSON operation result """
- status = "FAIL"
- if task_succeed(json_results):
- logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
- status = "PASS"
- else:
- logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
-
- # Push results in payload of testcase
- if args.report:
- stop_time = time.time()
- logger.debug("Push Rally detailed results into DB")
- ft_utils.push_results_to_db("functest",
- "Rally_details",
- start_time,
- stop_time,
- status,
- json_data)
-
-
-def main():
-
- GlobalVariables.nova_client = os_utils.get_nova_client()
- GlobalVariables.neutron_client = os_utils.get_neutron_client()
- cinder_client = os_utils.get_cinder_client()
-
- start_time = time.time()
-
- # configure script
- if not (args.test_name in tests):
- logger.error('argument not valid')
- exit(-1)
-
- GlobalVariables.SUMMARY = []
-
- volume_types = os_utils.list_volume_types(cinder_client,
- private=False)
- if not volume_types:
- volume_type = os_utils.create_volume_type(
- cinder_client, CINDER_VOLUME_TYPE_NAME)
- if not volume_type:
- logger.error("Failed to create volume type...")
- exit(-1)
- else:
- logger.debug("Volume type '%s' created succesfully..."
- % CINDER_VOLUME_TYPE_NAME)
- else:
- logger.debug("Using existing volume type(s)...")
-
- image_exists, image_id = os_utils.get_or_create_image(GLANCE_IMAGE_NAME,
- GLANCE_IMAGE_PATH,
- GLANCE_IMAGE_FORMAT)
- if not image_id:
- exit(-1)
-
- logger.debug("Creating network '%s'..." % RALLY_PRIVATE_NET_NAME)
- GlobalVariables.network_dict = \
- os_utils.create_shared_network_full(RALLY_PRIVATE_NET_NAME,
- RALLY_PRIVATE_SUBNET_NAME,
- RALLY_ROUTER_NAME,
- RALLY_PRIVATE_SUBNET_CIDR)
- if not GlobalVariables.network_dict:
- exit(1)
-
- if args.test_name == "all":
- for test_name in tests:
- if not (test_name == 'all' or
- test_name == 'vm'):
- run_task(test_name)
- else:
- logger.debug("Test name: " + args.test_name)
- run_task(args.test_name)
-
- report = ("\n"
- " "
- "\n"
- " Rally Summary Report\n"
- "\n"
- "+===================+============+===============+===========+"
- "\n"
- "| Module | Duration | nb. Test Run | Success |"
- "\n"
- "+===================+============+===============+===========+"
- "\n")
- payload = []
- stop_time = time.time()
-
- # for each scenario we draw a row for the table
- total_duration = 0.0
- total_nb_tests = 0
- total_success = 0.0
- for s in GlobalVariables.SUMMARY:
- name = "{0:<17}".format(s['test_name'])
- duration = float(s['overall_duration'])
- total_duration += duration
- duration = time.strftime("%M:%S", time.gmtime(duration))
- duration = "{0:<10}".format(duration)
- nb_tests = "{0:<13}".format(s['nb_tests'])
- total_nb_tests += int(s['nb_tests'])
- success = "{0:<10}".format(str(s['success']) + '%')
- total_success += float(s['success'])
- report += ("" +
- "| " + name + " | " + duration + " | " +
- nb_tests + " | " + success + "|\n" +
- "+-------------------+------------"
- "+---------------+-----------+\n")
- payload.append({'module': name,
- 'details': {'duration': s['overall_duration'],
- 'nb tests': s['nb_tests'],
- 'success': s['success']}})
-
- total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
- total_duration_str2 = "{0:<10}".format(total_duration_str)
- total_nb_tests_str = "{0:<13}".format(total_nb_tests)
-
- if len(GlobalVariables.SUMMARY):
- success_rate = total_success / len(GlobalVariables.SUMMARY)
- else:
- success_rate = 100
- success_rate = "{:0.2f}".format(success_rate)
- success_rate_str = "{0:<10}".format(str(success_rate) + '%')
- report += "+===================+============+===============+===========+"
- report += "\n"
- report += ("| TOTAL: | " + total_duration_str2 + " | " +
- total_nb_tests_str + " | " + success_rate_str + "|\n")
- report += "+===================+============+===============+===========+"
- report += "\n"
-
- logger.info("\n" + report)
- payload.append({'summary': {'duration': total_duration,
- 'nb tests': total_nb_tests,
- 'nb success': success_rate}})
-
- if args.sanity:
- case_name = "rally_sanity"
- else:
- case_name = "rally_full"
-
- # Evaluation of the success criteria
- status = ft_utils.check_success_rate(case_name, success_rate)
-
- exit_code = -1
- if status == "PASS":
- exit_code = 0
-
- if args.report:
- logger.debug("Pushing Rally summary into DB...")
- ft_utils.push_results_to_db("functest",
- case_name,
- start_time,
- stop_time,
- status,
- payload)
- if args.noclean:
- exit(exit_code)
-
- if not image_exists:
- logger.debug("Deleting image '%s' with ID '%s'..."
- % (GLANCE_IMAGE_NAME, image_id))
- if not os_utils.delete_glance_image(GlobalVariables.nova_client,
- image_id):
- logger.error("Error deleting the glance image")
-
- if not volume_types:
- logger.debug("Deleting volume type '%s'..."
- % CINDER_VOLUME_TYPE_NAME)
- if not os_utils.delete_volume_type(cinder_client, volume_type):
- logger.error("Error in deleting volume type...")
-
- exit(exit_code)
-
-
-if __name__ == '__main__':
- main()
diff --git a/functest/opnfv_tests/openstack/tempest/conf_utils.py b/functest/opnfv_tests/openstack/tempest/conf_utils.py
index 67b527968..4c5e8663a 100644
--- a/functest/opnfv_tests/openstack/tempest/conf_utils.py
+++ b/functest/opnfv_tests/openstack/tempest/conf_utils.py
@@ -11,10 +11,12 @@ import ConfigParser
import os
import re
import shutil
+import subprocess
import opnfv.utils.constants as releng_constants
from functest.utils.constants import CONST
+import functest.utils.functest_logger as ft_logger
import functest.utils.functest_utils as ft_utils
import functest.utils.openstack_utils as os_utils
@@ -39,8 +41,74 @@ TEMPEST_LIST = os.path.join(TEMPEST_RESULTS_DIR, 'test_list.txt')
CI_INSTALLER_TYPE = CONST.INSTALLER_TYPE
CI_INSTALLER_IP = CONST.INSTALLER_IP
+""" logging configuration """
+logger = ft_logger.Logger("Tempest").getLogger()
-def configure_tempest(logger, deployment_dir, IMAGE_ID=None, FLAVOR_ID=None):
+
+def get_verifier_id():
+ """
+ Returns verifer id for current Tempest
+ """
+ cmd = ("rally verify list-verifiers | awk '/" +
+ CONST.tempest_deployment_name +
+ "/ {print $2}'")
+ p = subprocess.Popen(cmd, shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ deployment_uuid = p.stdout.readline().rstrip()
+ if deployment_uuid == "":
+ logger.error("Tempest verifier not found.")
+ raise Exception('Error with command:%s' % cmd)
+ return deployment_uuid
+
+
+def get_verifier_deployment_id():
+ """
+ Returns deployment id for active Rally deployment
+ """
+ cmd = ("rally deployment list | awk '/" +
+ CONST.rally_deployment_name +
+ "/ {print $2}'")
+ p = subprocess.Popen(cmd, shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ deployment_uuid = p.stdout.readline().rstrip()
+ if deployment_uuid == "":
+ logger.error("Rally deployment not found.")
+ raise Exception('Error with command:%s' % cmd)
+ return deployment_uuid
+
+
+def get_verifier_repo_dir(verifier_id):
+ """
+ Returns installed verfier repo directory for Tempest
+ """
+ if not verifier_id:
+ verifier_id = get_verifier_id()
+
+ return os.path.join(CONST.dir_rally_inst,
+ 'verification',
+ 'verifier-{}'.format(verifier_id),
+ 'repo')
+
+
+def get_verifier_deployment_dir(verifier_id, deployment_id):
+ """
+ Returns Rally deployment directory for current verifier
+ """
+ if not verifier_id:
+ verifier_id = get_verifier_id()
+
+ if not deployment_id:
+ deployment_id = get_verifier_deployment_id()
+
+ return os.path.join(CONST.dir_rally_inst,
+ 'verification',
+ 'verifier-{}'.format(verifier_id),
+ 'for-deployment-{}'.format(deployment_id))
+
+
+def configure_tempest(deployment_dir, IMAGE_ID=None, FLAVOR_ID=None):
"""
Add/update needed parameters into tempest.conf file generated by Rally
"""
@@ -82,6 +150,8 @@ def configure_tempest(logger, deployment_dir, IMAGE_ID=None, FLAVOR_ID=None):
config.set('identity', 'password', CONST.tempest_identity_user_password)
config.set(
'validation', 'ssh_timeout', CONST.tempest_validation_ssh_timeout)
+ config.set('object-storage', 'operator_role',
+ CONST.tempest_object_storage_operator_role)
if CONST.OS_ENDPOINT_TYPE is not None:
services_list = ['compute',
@@ -108,12 +178,12 @@ def configure_tempest(logger, deployment_dir, IMAGE_ID=None, FLAVOR_ID=None):
return releng_constants.EXIT_OK
-def configure_tempest_multisite(logger, deployment_dir):
+def configure_tempest_multisite(deployment_dir):
"""
Add/update needed parameters into tempest.conf file generated by Rally
"""
logger.debug("configure the tempest")
- configure_tempest(logger, deployment_dir)
+ configure_tempest(deployment_dir)
logger.debug("Finding tempest.conf file...")
tempest_conf_old = os.path.join(deployment_dir, 'tempest.conf')
diff --git a/functest/opnfv_tests/openstack/tempest/custom_tests/blacklist.txt b/functest/opnfv_tests/openstack/tempest/custom_tests/blacklist.txt
index 5c8581f66..0a4256ce6 100644
--- a/functest/opnfv_tests/openstack/tempest/custom_tests/blacklist.txt
+++ b/functest/opnfv_tests/openstack/tempest/custom_tests/blacklist.txt
@@ -74,23 +74,3 @@
- tempest.scenario.test_server_basic_ops.TestServerBasicOps.test_server_basic_ops
- tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_volume_boot_pattern
- tempest.scenario.test_volume_boot_pattern.TestVolumeBootPatternV2.test_volume_boot_pattern
-
--
- # https://bugs.launchpad.net/tempest/+bug/1586931
- scenarios:
- - os-odl_l2-nofeature-ha
- - os-odl_l2-nofeature-noha
- - os-odl_l2-sfc-ha
- - os-odl_l2-sfc-noha
- - os-odl_l3-nofeature-ha
- - os-odl_l3-nofeature-noha
- - os-nosdn-kvm-ha
- - os-nosdn-kvm-noha
- - os-nosdn-nofeature-ha
- - os-nosdn-nofeature-noha
- - os-nosdn-ovs-ha
- - os-nosdn-ovs-noha
- installers:
- - fuel
- tests:
- - tempest.scenario.test_server_basic_ops.TestServerBasicOps.test_server_basic_ops
diff --git a/functest/opnfv_tests/openstack/tempest/tempest.py b/functest/opnfv_tests/openstack/tempest/tempest.py
index 0014b7187..2bdbe47f7 100644
--- a/functest/opnfv_tests/openstack/tempest/tempest.py
+++ b/functest/opnfv_tests/openstack/tempest/tempest.py
@@ -35,74 +35,15 @@ class TempestCommon(testcase_base.TestcaseBase):
self.OPTION = ""
self.FLAVOR_ID = None
self.IMAGE_ID = None
- self.VERIFIER_ID = self.get_verifier_id()
- self.VERIFIER_REPO_DIR = self.get_verifier_repo_dir()
- self.DEPLOYMENT_ID = self.get_verifier_deployment_id()
- self.DEPLOYMENT_DIR = self.get_verifier_deployment_dir()
+ self.VERIFIER_ID = conf_utils.get_verifier_id()
+ self.VERIFIER_REPO_DIR = conf_utils.get_verifier_repo_dir(
+ self.VERIFIER_ID)
+ self.DEPLOYMENT_ID = conf_utils.get_verifier_deployment_id()
+ self.DEPLOYMENT_DIR = conf_utils.get_verifier_deployment_dir(
+ self.VERIFIER_ID, self.DEPLOYMENT_ID)
self.VERIFICATION_ID = None
@staticmethod
- def get_verifier_id():
- """
- Returns verifer id for current Tempest
- """
- cmd = ("rally verify list-verifiers | awk '/" +
- CONST.tempest_deployment_name +
- "/ {print $2}'")
- p = subprocess.Popen(cmd, shell=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
- deployment_uuid = p.stdout.readline().rstrip()
- if deployment_uuid == "":
- logger.error("Tempest verifier not found.")
- raise Exception('Error with command:%s' % cmd)
- return deployment_uuid
-
- @staticmethod
- def get_verifier_deployment_id():
- """
- Returns deployment id for active Rally deployment
- """
- cmd = ("rally deployment list | awk '/" +
- CONST.rally_deployment_name +
- "/ {print $2}'")
- p = subprocess.Popen(cmd, shell=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
- deployment_uuid = p.stdout.readline().rstrip()
- if deployment_uuid == "":
- logger.error("Rally deployment not found.")
- raise Exception('Error with command:%s' % cmd)
- return deployment_uuid
-
- def get_verifier_repo_dir(self):
- """
- Returns installed verfier repo directory for Tempest
- """
- if not self.VERIFIER_ID:
- self.VERIFIER_ID = self.get_verifier_id()
-
- return os.path.join(CONST.dir_rally_inst,
- 'verification',
- 'verifier-{}'.format(self.VERIFIER_ID),
- 'repo')
-
- def get_verifier_deployment_dir(self):
- """
- Returns Rally deployment directory for current verifier
- """
- if not self.VERIFIER_ID:
- self.VERIFIER_ID = self.get_verifier_id()
-
- if not self.DEPLOYMENT_ID:
- self.DEPLOYMENT_ID = self.get_verifier_deployment_id()
-
- return os.path.join(CONST.dir_rally_inst,
- 'verification',
- 'verifier-{}'.format(self.VERIFIER_ID),
- 'for-deployment-{}'.format(self.DEPLOYMENT_ID))
-
- @staticmethod
def read_file(filename):
with open(filename) as src:
return [line.strip() for line in src.readlines()]
@@ -321,8 +262,7 @@ class TempestCommon(testcase_base.TestcaseBase):
if res != testcase_base.TestcaseBase.EX_OK:
return res
- res = conf_utils.configure_tempest(logger,
- self.DEPLOYMENT_DIR,
+ res = conf_utils.configure_tempest(self.DEPLOYMENT_DIR,
self.IMAGE_ID,
self.FLAVOR_ID)
if res != testcase_base.TestcaseBase.EX_OK:
@@ -341,11 +281,6 @@ class TempestCommon(testcase_base.TestcaseBase):
self.stop_time = time.time()
- if self.criteria == "PASS":
- return testcase_base.TestcaseBase.EX_OK
- else:
- return testcase_base.TestcaseBase.EX_TESTCASE_FAILED
-
class TempestSmokeSerial(TempestCommon):
@@ -380,7 +315,7 @@ class TempestMultisite(TempestCommon):
self.case_name = "multisite"
self.MODE = "feature_multisite"
self.OPTION = "--concurrency 1"
- conf_utils.configure_tempest_multisite(logger, self.DEPLOYMENT_DIR)
+ conf_utils.configure_tempest_multisite(self.DEPLOYMENT_DIR)
class TempestCustom(TempestCommon):
diff --git a/functest/opnfv_tests/openstack/vping/vping_base.py b/functest/opnfv_tests/openstack/vping/vping_base.py
index 8285d93f8..8285d93f8 100755..100644
--- a/functest/opnfv_tests/openstack/vping/vping_base.py
+++ b/functest/opnfv_tests/openstack/vping/vping_base.py
diff --git a/functest/opnfv_tests/sdn/onos/sfc/sfc_onos.py b/functest/opnfv_tests/sdn/onos/sfc/sfc_onos.py
index 349b42a88..090502ba9 100644
--- a/functest/opnfv_tests/sdn/onos/sfc/sfc_onos.py
+++ b/functest/opnfv_tests/sdn/onos/sfc/sfc_onos.py
@@ -232,10 +232,8 @@ class SfcOnos:
else:
return(response.status_code)
- url = ("http://%s:8774//v2.1/%s/ports/"
- "%s/flavors?name=m1.tiny" % (self.nova_hostname,
- self.tenant_id))
-
+ url = ("http://%s:8774/v2.1/%s/flavors?"
+ "name=m1.tiny" % (self.nova_hostname, self.tenant_id))
headers = {"Accept": "application/json", "Content-Type":
"application/json", "X-Auth-Token": self.token_id}
response = requests.get(url, headers=headers)
@@ -344,7 +342,7 @@ class SfcOnos:
def getPortPair(self):
"""Query the Portpair id value."""
for p in range(0, 1):
- url = ("http://%s:9696/%s/ports/"
+ url = ("http://%s:9696/%s/"
"sfc/port_pairs?name=PP1" % (self.neutron_hostname,
self.osver))
headers = {"Accept": "application/json",
diff --git a/functest/opnfv_tests/sdn/onos/teston/onos.py b/functest/opnfv_tests/sdn/onos/teston/onos.py
index 213bdb7dc..2537e18de 100755
--- a/functest/opnfv_tests/sdn/onos/teston/onos.py
+++ b/functest/opnfv_tests/sdn/onos/teston/onos.py
@@ -175,7 +175,7 @@ def CreateImage():
def SfcTest():
- cmd = "python " + ONOS_SFC_PATH + "/Sfc.py"
+ cmd = "python " + ONOS_SFC_PATH + "/sfc.py"
logger.debug("Run sfc tests")
os.system(cmd)
@@ -187,7 +187,7 @@ def GetIp(type):
def Replace(before, after):
- file = "/Sfc_fun.py"
+ file = "/sfc_onos.py"
cmd = "sed -i 's/" + before + "/" + after + "/g' " + ONOS_SFC_PATH + file
os.system(cmd)
@@ -199,7 +199,7 @@ def SetSfcConf():
Replace("glance_ip", GetIp("glance"))
pwd = ft_constants.OS_PASSWORD
Replace("console", pwd)
- creds_neutron = openstack_utils.get_credentials("neutron")
+ creds_neutron = openstack_utils.get_credentials()
neutron_client = neutronclient.Client(**creds_neutron)
ext_net = openstack_utils.get_external_net(neutron_client)
Replace("admin_floating_net", ext_net)
diff --git a/functest/opnfv_tests/vnf/aaa/aaa.py b/functest/opnfv_tests/vnf/aaa/aaa.py
index 8898b9fc9..f1c265f47 100644
--- a/functest/opnfv_tests/vnf/aaa/aaa.py
+++ b/functest/opnfv_tests/vnf/aaa/aaa.py
@@ -21,8 +21,7 @@ class AaaVnf(vnf_base.VnfOnBoardingBase):
logger = ft_logger.Logger("VNF AAA").getLogger()
def __init__(self):
- super(AaaVnf, self).__init__()
- self.case_name = "aaa"
+ super(AaaVnf, self).__init__(case="aaa")
def deploy_orchestrator(self):
self.logger.info("No VNFM needed to deploy a free radius here")
diff --git a/functest/opnfv_tests/vnf/ims/cloudify_ims.py b/functest/opnfv_tests/vnf/ims/cloudify_ims.py
index e584519b7..13a5af4fd 100644
--- a/functest/opnfv_tests/vnf/ims/cloudify_ims.py
+++ b/functest/opnfv_tests/vnf/ims/cloudify_ims.py
@@ -25,7 +25,8 @@ from orchestrator_cloudify import Orchestrator
class ImsVnf(vnf_base.VnfOnBoardingBase):
- def __init__(self, project='functest', case='', repo='', cmd=''):
+ def __init__(self, project='functest', case='cloudify_ims',
+ repo='', cmd=''):
super(ImsVnf, self).__init__(project, case, repo, cmd)
self.logger = ft_logger.Logger("vIMS").getLogger()
self.case_dir = os.path.join(CONST.functest_test, 'vnf/ims/')
diff --git a/functest/opnfv_tests/vnf/ims/opera_ims.py b/functest/opnfv_tests/vnf/ims/opera_ims.py
index fa8f9ec98..073a56c37 100644
--- a/functest/opnfv_tests/vnf/ims/opera_ims.py
+++ b/functest/opnfv_tests/vnf/ims/opera_ims.py
@@ -21,7 +21,8 @@ from functest.utils.constants import CONST
class ImsVnf(vnf_base.VnfOnBoardingBase):
- def __init__(self, project='functest', case='', repo='', cmd=''):
+ def __init__(self, project='functest', case='opera_ims',
+ repo='', cmd=''):
super(ImsVnf, self).__init__(project, case, repo, cmd)
self.logger = ft_logger.Logger("vIMS").getLogger()
self.case_dir = os.path.join(CONST.functest_test, 'vnf/ims/')
diff --git a/functest/opnfv_tests/vnf/ims/orchestra_ims.py b/functest/opnfv_tests/vnf/ims/orchestra_ims.py
index ebd6c9baf..28f37f053 100644
--- a/functest/opnfv_tests/vnf/ims/orchestra_ims.py
+++ b/functest/opnfv_tests/vnf/ims/orchestra_ims.py
@@ -21,7 +21,8 @@ from functest.utils.constants import CONST
class ImsVnf(vnf_base.VnfOnBoardingBase):
- def __init__(self, project='functest', case='', repo='', cmd=''):
+ def __init__(self, project='functest', case='orchestra_ims',
+ repo='', cmd=''):
super(ImsVnf, self).__init__(project, case, repo, cmd)
self.logger = ft_logger.Logger("vIMS").getLogger()
self.case_dir = os.path.join(CONST.functest_test, 'vnf/ims/')
diff --git a/functest/tests/unit/cli/commands/test_cli_env.py b/functest/tests/unit/cli/commands/test_cli_env.py
index f70761dcd..4b6ea57a7 100644
--- a/functest/tests/unit/cli/commands/test_cli_env.py
+++ b/functest/tests/unit/cli/commands/test_cli_env.py
@@ -11,7 +11,6 @@ import unittest
from git.exc import NoSuchPathError
import mock
-mock.patch('logging.FileHandler').start() # noqa
from functest.cli.commands import cli_env
from functest.utils.constants import CONST
from functest.tests.unit import test_utils
diff --git a/functest/tests/unit/core/test_testcase_base.py b/functest/tests/unit/core/test_testcase_base.py
index 8df524b0f..94d2e966b 100644..100755
--- a/functest/tests/unit/core/test_testcase_base.py
+++ b/functest/tests/unit/core/test_testcase_base.py
@@ -12,8 +12,6 @@ import mock
import os
import unittest
-mock.patch('logging.FileHandler').start() # noqa
-
from functest.core import testcase_base
diff --git a/functest/tests/unit/odl/test_odl.py b/functest/tests/unit/odl/test_odl.py
index 59ab2c654..568fdc828 100644
--- a/functest/tests/unit/odl/test_odl.py
+++ b/functest/tests/unit/odl/test_odl.py
@@ -19,7 +19,6 @@ from robot.errors import DataError, RobotError
from robot.result import testcase
from robot.utils.robottime import timestamp_to_secs
-mock.patch('logging.FileHandler').start() # noqa
from functest.core import testcase_base
from functest.opnfv_tests.sdn.odl import odl
diff --git a/functest/tests/unit/test_logging.ini b/functest/tests/unit/test_logging.ini
new file mode 100644
index 000000000..3d5b947c8
--- /dev/null
+++ b/functest/tests/unit/test_logging.ini
@@ -0,0 +1,27 @@
+[loggers]
+keys=root,functest_logger
+
+[logger_root]
+level=DEBUG
+handlers=console
+
+[logger_functest_logger]
+level=DEBUG
+handlers=console
+qualname=functest.utils.functest_logger
+propagate=0
+
+[handlers]
+keys=console
+
+[handler_console]
+class=StreamHandler
+level=INFO
+formatter=standard
+args=(sys.stdout,)
+
+[formatters]
+keys=standard
+
+[formatter_standard]
+format=%(asctime)s - %(name)s - %(levelname)s - %(message)s \ No newline at end of file
diff --git a/functest/tests/unit/utils/test_functest_utils.py b/functest/tests/unit/utils/test_functest_utils.py
index c4b566608..ce9086a7a 100644
--- a/functest/tests/unit/utils/test_functest_utils.py
+++ b/functest/tests/unit/utils/test_functest_utils.py
@@ -18,7 +18,6 @@ import mock
import requests
from functest.tests.unit import test_utils
-mock.patch('logging.FileHandler').start() # noqa
from functest.utils import functest_utils
diff --git a/functest/utils/functest_constants.py b/functest/utils/functest_constants.py
index 7fb03e8a2..bd1097855 100644
--- a/functest/utils/functest_constants.py
+++ b/functest/utils/functest_constants.py
@@ -146,6 +146,8 @@ TEMPEST_USER_PASSWORD = get_value('tempest.identity.user_password',
'TEMPEST_USER_PASSWORD')
TEMPEST_SSH_TIMEOUT = get_value('tempest.validation.ssh_timeout',
'TEMPEST_SSH_TIMEOUT')
+TEMPEST_OPERATOR_ROLE = get_value('tempest.object_storage.operator_role',
+ 'TEMPEST_OPERATOR_ROLE')
TEMPEST_USE_CUSTOM_IMAGES = get_value('tempest.use_custom_images',
'TEMPEST_USE_CUSTOM_IMAGES')
TEMPEST_USE_CUSTOM_FLAVORS = get_value('tempest.use_custom_flavors',
@@ -218,8 +220,6 @@ PROMISE_SUBNET_CIDR = get_value('promise.subnet_cidr',
'PROMISE_SUBNET_CIDR')
PROMISE_ROUTER_NAME = get_value('promise.router_name',
'PROMISE_ROUTER_NAME')
-DOCTOR_REPO_DIR = get_value('general.dir.dir_repo_doctor',
- 'DOCTOR_REPO_DIR')
COPPER_REPO_DIR = get_value('general.dir.repo_copper',
'COPPER_REPO_DIR')
EXAMPLE_INSTANCE_NAME = get_value('example.vm_name',
diff --git a/functest/utils/functest_logger.py b/functest/utils/functest_logger.py
index c0fba082a..f09f56be0 100644..100755
--- a/functest/utils/functest_logger.py
+++ b/functest/utils/functest_logger.py
@@ -20,36 +20,50 @@
# logger = fl.Logger("script_name").getLogger()
# logger.info("message to be shown with - INFO - ")
# logger.debug("message to be shown with - DEBUG -")
-
import logging
+import logging.config
import os
+import json
-class Logger:
- def __init__(self, logger_name):
+from functest.utils.constants import CONST
+
+logger = logging.getLogger(__name__)
+
+
+def is_debug():
+ if CONST.CI_DEBUG and CONST.CI_DEBUG.lower() == "true":
+ return True
+ return False
- CI_DEBUG = os.getenv('CI_DEBUG')
+def setup_logging(default_path=CONST.dir_functest_logging_cfg,
+ default_level=logging.INFO,
+ env_key='LOG_CFG'):
+ path = default_path
+ value = os.getenv(env_key, None)
+ if value:
+ path = value
+ if os.path.exists(path):
+ with open(path, 'rt') as f:
+ config = json.load(f)
+ if (config['handlers'] and
+ config['handlers']['console']):
+ stream_level = logging.INFO
+ if is_debug():
+ stream_level = logging.DEBUG
+ config['handlers']['console']['level'] = stream_level
+ logging.config.dictConfig(config)
+ else:
+ logging.basicConfig(level=default_level)
+
+
+setup_logging()
+
+
+class Logger:
+ def __init__(self, logger_name):
self.logger = logging.getLogger(logger_name)
- self.logger.propagate = 0
- self.logger.setLevel(logging.DEBUG)
-
- ch = logging.StreamHandler()
- formatter = logging.Formatter('%(asctime)s - %(name)s - '
- '%(levelname)s - %(message)s')
- ch.setFormatter(formatter)
- if CI_DEBUG is not None and CI_DEBUG.lower() == "true":
- ch.setLevel(logging.DEBUG)
- self.logger.parent.level = logging.DEBUG
- else:
- ch.setLevel(logging.INFO)
- self.logger.parent.level = logging.INFO
- self.logger.addHandler(ch)
-
- hdlr = logging.FileHandler('/home/opnfv/functest/results/functest.log')
- hdlr.setFormatter(formatter)
- hdlr.setLevel(logging.DEBUG)
- self.logger.addHandler(hdlr)
def getLogger(self):
return self.logger
diff --git a/functest/utils/openstack_tacker.py b/functest/utils/openstack_tacker.py
index f17b421e8..f17b421e8 100755..100644
--- a/functest/utils/openstack_tacker.py
+++ b/functest/utils/openstack_tacker.py
diff --git a/requirements.txt b/requirements.txt
index 28b3fed3e..b5e78bb59 100755..100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -11,6 +11,7 @@ python-openstackclient==2.3.0
python-ceilometerclient==2.6.2
python-keystoneclient==3.5.0
python-neutronclient==6.0.0
+python-novaclient==6.0.0
python-congressclient==1.5.0
virtualenv==15.1.0
pexpect==4.0
diff --git a/run_unit_tests.sh b/run_unit_tests.sh
index 79d05d3d1..5167d78c6 100755
--- a/run_unit_tests.sh
+++ b/run_unit_tests.sh
@@ -38,6 +38,7 @@ nosetests --with-xunit \
--cover-package=functest.utils \
--cover-xml \
--cover-html \
+ --log-config=$(pwd)/functest/tests/unit/test_logging.ini \
functest/tests/unit
rc=$?
diff --git a/test-requirements.txt b/test-requirements.txt
index 2bf297ba8..2bf297ba8 100755..100644
--- a/test-requirements.txt
+++ b/test-requirements.txt