aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xdocker/run_tests.sh42
-rw-r--r--docs/devguide/index.rst6
-rw-r--r--docs/userguide/description.rst51
-rw-r--r--docs/userguide/index.rst215
-rw-r--r--docs/userguide/runfunctest.rst135
-rwxr-xr-xtestcases/VIM/OpenStack/CI/libraries/run_rally-cert.py66
-rwxr-xr-xtestcases/VIM/OpenStack/CI/libraries/run_rally.py6
-rw-r--r--testcases/VIM/OpenStack/CI/libraries/run_tempest.py20
-rw-r--r--testcases/config_functest.yaml4
-rw-r--r--testcases/tests/TestFunctestUtils.py7
-rw-r--r--testcases/vIMS/CI/vIMS.py5
-rw-r--r--testcases/vPing/CI/libraries/vPing_ssh.py (renamed from testcases/vPing/CI/libraries/vPing2.py)8
-rw-r--r--testcases/vPing/CI/libraries/vPing_userdata.py (renamed from testcases/vPing/CI/libraries/vPing.py)8
13 files changed, 351 insertions, 222 deletions
diff --git a/docker/run_tests.sh b/docker/run_tests.sh
index 9d5f681ca..0f9e8a30c 100755
--- a/docker/run_tests.sh
+++ b/docker/run_tests.sh
@@ -22,13 +22,15 @@ where:
-h|--help show this help text
-r|--report push results to database (false by default)
-n|--no-clean do not clean OpenStack resources after test run
+ -s|--serial run tests in one thread
-t|--test run specific set of tests
- <test_name> one or more of the following: vping,odl,rally,tempest,vims,onos,promise,ovno. Separated by comma.
+ <test_name> one or more of the following separated by comma:
+ vping_ssh,vping_userdata,odl,rally,tempest,vims,onos,promise,ovno
examples:
$(basename "$0")
- $(basename "$0") --test vping,odl
+ $(basename "$0") --test vping_ssh,odl
$(basename "$0") -t tempest,rally"
@@ -37,6 +39,8 @@ examples:
offline=false
report=""
clean=true
+serial=false
+
# Get the list of runnable tests
# Check if we are in CI mode
@@ -81,16 +85,25 @@ function run_test(){
echo " Running test case: $i"
echo "----------------------------------------------"
echo ""
+ clean_flag=""
+ if [ $clean == "false" ]; then
+ clean_flag="-n"
+ fi
+ serial_flag=""
+ if [ $serial == "true" ]; then
+ serial_flag="-s"
+ fi
+
case $test_name in
- "vping")
- info "Running vPing test..."
- python ${FUNCTEST_REPO_DIR}/testcases/vPing/CI/libraries/vPing2.py \
- --debug ${report}
+ "vping_ssh")
+ info "Running vPing-SSH test..."
+ python ${FUNCTEST_REPO_DIR}/testcases/vPing/CI/libraries/vPing_ssh.py \
+ --debug $clean_flag ${report}
;;
"vping_userdata")
- info "Running vPing test using userdata/cloudinit.... "
- python ${FUNCTEST_REPO_DIR}/testcases/vPing/CI/libraries/vPing.py \
- --debug ${report}
+ info "Running vPing-userdata test... "
+ python ${FUNCTEST_REPO_DIR}/testcases/vPing/CI/libraries/vPing_userdata.py \
+ --debug $clean_flag ${report}
;;
"odl")
info "Running ODL test..."
@@ -109,7 +122,7 @@ function run_test(){
"tempest")
info "Running Tempest tests..."
python ${FUNCTEST_REPO_DIR}/testcases/VIM/OpenStack/CI/libraries/run_tempest.py \
- --debug -m custom ${report}
+ --debug $serial_flag $clean_flag -m custom ${report}
# save tempest.conf for further troubleshooting
tempest_conf="${RALLY_VENV_DIR}/tempest/for-deployment-*/tempest.conf"
if [ -f ${tempest_conf} ]; then
@@ -120,13 +133,13 @@ function run_test(){
"vims")
info "Running vIMS test..."
python ${FUNCTEST_REPO_DIR}/testcases/vIMS/CI/vIMS.py \
- --debug ${report}
+ --debug $clean_flag ${report}
clean_openstack
;;
"rally")
info "Running Rally benchmark suite..."
python ${FUNCTEST_REPO_DIR}/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py \
- --debug all ${report}
+ --debug $clean_flag all ${report}
clean_openstack
;;
@@ -207,6 +220,9 @@ while [[ $# > 0 ]]
-n|--no-clean)
clean=false
;;
+ -s|--serial)
+ serial=true
+ ;;
-t|--test|--tests)
TEST="$2"
shift
@@ -225,7 +241,7 @@ if [[ -n "$DEPLOY_SCENARIO" && "$DEPLOY_SCENARIO" != "none" ]] &&\
[[ -f $tests_file ]]; then
arr_test=($(cat $tests_file))
else
- arr_test=(vping tempest vims rally)
+ arr_test=(vping_ssh tempest vims rally)
fi
BASEDIR=`dirname $0`
diff --git a/docs/devguide/index.rst b/docs/devguide/index.rst
index 7dd5cc790..7f3233c4c 100644
--- a/docs/devguide/index.rst
+++ b/docs/devguide/index.rst
@@ -41,7 +41,8 @@ Functest can be described as follow::
Functest deals with internal and external test cases.
The Internal test cases in Brahmaputra are:
- * vPing
+ * vPing_SSH
+ * vPing_userdata
* ODL
* Tempest
* vIMS
@@ -265,7 +266,8 @@ own project::
And do not forget to update also the help line::
-t|--test run specific set of tests
- <test_name> one or more of the following: vping,odl,rally,tempest,vims,onos, promise. Separated by comma.
+ <test_name> one or more of the following separated by comma:
+ vping_ssh,vping_userdata,odl,rally,tempest,vims,onos,promise,ovno
config_funtest.yaml
diff --git a/docs/userguide/description.rst b/docs/userguide/description.rst
index 288ad95af..b47d5ef62 100644
--- a/docs/userguide/description.rst
+++ b/docs/userguide/description.rst
@@ -1,12 +1,9 @@
-Description of the test cases
+Overview of the test suites
=============================
-Functest is an OPNFV project dedicated to functional testing.
-In the continuous integration, it is launched after an OPNFV fresh installation.
-The Functest target is to verify the basic functions of the infrastructure.
-
-Functest includes different test suites which several test cases within.
-Test cases are developed in Functest and in feature projects.
+Functest is the OPNFV project primarily targeting function testing.
+In the Continuous Integration pipeline, it is launched after an OPNFV fresh
+installation to validate and verify the basic functions of the infrastructure.
The current list of test suites can be distributed in 3 main domains::
@@ -21,8 +18,8 @@ The current list of test suites can be distributed in 3 main domains::
| +----------------+-------------------------------------------+
|(Virtualised | Tempest | OpenStack reference test suite `[2]`_ |
| Infrastructure +----------------+-------------------------------------------+
- | Manager) | Rally scenario | OpenStack testing tool testing OpenStack |
- | | | modules `[3]`_ |
+ | Manager) | Rally bench | OpenStack testing tool benchmarking |
+ | | | OpenStack modules `[3]`_ |
+----------------+----------------+-------------------------------------------+
| | OpenDaylight | Opendaylight Test suite |
| +----------------+-------------------------------------------+
@@ -30,8 +27,8 @@ The current list of test suites can be distributed in 3 main domains::
| +----------------+-------------------------------------------+
| | OpenContrail | |
+----------------+----------------+-------------------------------------------+
- | Features | vIMS | Show the capability to deploy a real NFV |
- | | | test cases. |
+ | Features | vIMS | Example of a real VNF deployment to show |
+ | | | the NFV capabilities of the platform. |
| | | The IP Multimedia Subsytem is a typical |
| | | Telco test case, referenced by ETSI. |
| | | It provides a fully functional VoIP System|
@@ -46,31 +43,33 @@ The current list of test suites can be distributed in 3 main domains::
| | SDNVPN | |
+----------------+----------------+-------------------------------------------+
-
-Most of the test suites are developed upstream.
-For example, `Tempest <http://docs.openstack.org/developer/tempest/overview.html>`_ is the
-OpenStack integration test suite.
-Functest is in charge of the integration of different functional test suites.
+Functest includes different test suites with several test cases within. Some
+of the tests are developed by Functest team members whereas others are integrated
+from upstream communities or other OPNFV projects. For example,
+`Tempest <http://docs.openstack.org/developer/tempest/overview.html>`_ is the
+OpenStack integration test suite and Functest is in charge of the selection,
+integration and automation of the tests that fit in OPNFV.
The Tempest suite has been customized but no new test cases have been created.
-Some OPNFV feature projects (e.g. SDNVPN) have created Tempest tests cases and
-pushed to upstream.
+Some OPNFV feature projects (e.g. SDNVPN) have written some Tempest tests cases
+and pushed upstream to be used by Functest.
-The tests run from CI are pushed into a database.
-The goal is to populate the database with results and to show them on a Test
-Dashboard.
+The results produced by the tests run from CI are pushed and collected in a NoSQL
+database. The goal is to populate the database with results from different sources
+and scenarios and to show them on a Dashboard.
-There is no real notion of Test domain or Test coverage yet.
-Basic components (VIM, controllers) are tested through their own suites.
-Feature projects also provide their own test suites.
+There is no real notion of Test domain or Test coverage. Basic components
+(VIM, controllers) are tested through their own suites. Feature projects also
+provide their own test suites with different ways of running their tests.
vIMS test case was integrated to demonstrate the capability to deploy a
relatively complex NFV scenario on top of the OPNFV infrastructure.
Functest considers OPNFV as a black box.
-OPNFV, since Brahmaputra, offers lots of possible combinations:
+OPNFV, since the Brahmaputra release, offers lots of potential combinations:
* 3 controllers (OpenDayligh, ONOS, OpenContrail)
* 4 installers (Apex, Compass, Fuel, Joid)
-However most of the tests shall be runnable on any configuration.
+Most of the tests are runnable on any combination, but some others might have
+restrictions imposed by the installers or the available deployed features.
diff --git a/docs/userguide/index.rst b/docs/userguide/index.rst
index 39d795261..b33b3120c 100644
--- a/docs/userguide/index.rst
+++ b/docs/userguide/index.rst
@@ -11,7 +11,7 @@ Introduction
============
The goal of this documents is to describe the Functest test cases as well as
-provide a procedure about how to execute (or launch) them.
+provide a procedure about how to execute them.
A presentation has been created for the first OPNFV Summit `[4]`_.
@@ -21,79 +21,100 @@ It is assumed that Functest container has been properly installed `[1]`_.
The different scenarios are described in the section hereafter.
-VIM
+VIM (Virtualized Infrastructure Manager)
---
-vPing
-^^^^^
+vPing_SSH
+^^^^^^^^^
-The goal of this test can be described as follows::
+Given the script 'ping.sh'::
+
+ #!/bin/sh
+ while true; do
+ ping -c 1 $1 2>&1 >/dev/null
+ RES=$?
+ if [ "Z$RES" = "Z0" ] ; then
+ echo 'vPing OK'
+ break
+ else
+ echo 'vPing KO'
+ fi
+ sleep 1
+ done
+
+The goal of this test is described as follows::
vPing test case
- +-------------+ +-------------+
- | | | |
- | | Boot VM1 | |
- | +------------------>| |
- | | | |
- | | Get IP VM1 | |
- | +------------------>| |
- | Tester | | System |
- | | Boot VM2 | Under |
- | +------------------>| Test |
- | | | |
- | | Create (VM2) | |
- | | floating IP | |
- | +------------------>| |
- | | | |
- | | SCP vPing script | |
- | | to VM2 | |
- | +------------------>| |
- | | | |
- | | SSH to VM2 | |
- | +------------------>| |
- | | | |
- | | Ping VM1 | |
- | | private IP | |
- | +------------------>| |
- | | | |
- | | If ping: | |
- | | exit OK | |
- | | else (timeout) | |
- | | exit KO | |
- | | | |
- +-------------+ +-------------+
-
-This example can be considered as an "Hello World" example.
-It is the first basic example, it must work on any configuration.
+ +-------------+ +-------------+
+ | | | |
+ | | Boot VM1 with IP1 | |
+ | +------------------->| |
+ | Tester | | System |
+ | | Boot VM2 | Under |
+ | +------------------->| Test |
+ | | | |
+ | | Create floating IP | |
+ | +------------------->| |
+ | | | |
+ | | Assign floating IP | |
+ | | to VM2 | |
+ | +------------------->| |
+ | | | |
+ | | Stablish SSH | |
+ | | connection to VM2 | |
+ | | through floating IP| |
+ | +------------------->| |
+ | | | |
+ | | SCP ping.sh to VM2 | |
+ | +------------------->| |
+ | | | |
+ | | VM2 executes | |
+ | | ping.sh to VM1 | |
+ | +------------------->| |
+ | | | |
+ | | If ping: | |
+ | | exit OK | |
+ | | else (timeout) | |
+ | | exit Failed | |
+ | | | |
+ +-------------+ +-------------+
+
+This test can be considered as an "Hello World" example.
+It is the first basic use case which shall work on any deployment.
vPing_userdata
^^^^^^^^^^^^^^
-The goal of this test can be described as follow::
+The goal of this test can be described as follows::
vPing_userdata test case
- +-------------+ +-------------+
- | | | |
- | | Boot VM1 | |
- | +------------------>| |
- | | | |
- | | Get IP VM1 | |
- | +------------------>| |
- | Tester | | System |
- | | Boot VM2 | Under |
- | +------------------>| Test |
- | | VM2 pings VM1 | |
- | | (cloud-init) | |
- | | Check console log | |
- | | If ping: | |
- | | exit OK | |
- | | else (timeout) | |
- | | exit KO | |
- | | | |
- +-------------+ +-------------+
+ +-------------+ +-------------+
+ | | | |
+ | | Boot VM1 with IP1 | |
+ | +------------------->| |
+ | | | |
+ | | Boot VM2 with | |
+ | | ping.sh as userdata| |
+ | | with IP1 as $1. | |
+ | +------------------->| |
+ | Tester | | System |
+ | | VM2 exeutes ping.sh| Under |
+ | | (ping IP1) | Test |
+ | +------------------->| |
+ | | | |
+ | | Monitor nova | |
+ | | console-log VM 2 | |
+ | | If ping: | |
+ | | exit OK | |
+ | | else (timeout) | |
+ | | exit Failed | |
+ | | | |
+ +-------------+ +-------------+
This scenario is similar to the previous one but it uses cloud-init (nova
-metadata service) instead of floating IPs and SSH.
+metadata service) instead of floating IPs and SSH connection. When the second VM
+boots it will execute the script automatically and the ping will be detected
+capturing periodically the output in the console-log of the second VM.
Tempest
@@ -107,27 +128,27 @@ Tempest has batteries of tests for:
* Scenarios
* Other specific tests useful in validating an OpenStack deployment
-We use Rally `[3]`_ to run Tempest suite.
-Rally generates automatically tempest.conf configuration file.
-Before running actual test cases Functest creates needed resources.
-Needed parameters are updated in the configuration file.
-When the Tempest suite is run, each test duration is measured.
-The full console output is stored in the tempest.log file.
+Functest uses Rally `[3]`_ to run the Tempest suite.
+Rally generates automatically the Tempest configuration file (tempest.conf).
+Before running the actual test cases, Functest creates the needed resources and
+updates the appropriate parameters to the configuration file.
+When the Tempest suite is executed, each test duration is measured and the full
+console output is stored in the tempest.log file for further analysis.
As an addition of Arno, Brahmaputra runs a customized set of Tempest test cases.
The list is specificed through *--tests-file* when running Rally.
This option has been introduced in Rally in version 0.1.2.
The customized test list is available in the Functest repo `[4]`_.
-This list contains more than 200 Tempest test cases.
-The list can be divied into two main parts:
+This list contains more than 200 Tempest test cases and can be divided
+into two main sections:
1) Set of tempest smoke test cases
2) Set of test cases from DefCore list `[8]`_
-The goal of Tempest test suite is to check the basic functionalities of
+The goal of the Tempest test suite is to check the basic functionalities of
different OpenStack components on an OPNFV fresh installation using
-corresponding REST API interfaces.
+the corresponding REST API interfaces.
Rally bench test suites
@@ -137,8 +158,8 @@ Rally `[3]`_ is a benchmarking tool that answers the question::
“How does OpenStack work at scale?”.
-The goal of this test suite is to test the different modules of OpenStack and
-get significant figures that could help us to define telco Cloud KPI.
+The goal of this test suite is to benchmark the different OpenStack modules and
+get significant figures that could help to define Telco Cloud KPIs.
The OPNFV scenarios are based on the collection of the existing Rally scenarios:
@@ -152,7 +173,7 @@ The OPNFV scenarios are based on the collection of the existing Rally scenarios:
* quotas
* requests
-Basic SLA (stop test on errors) have been implemented.
+A basic SLA (stop test on errors) have been implemented.
SDN Controllers
@@ -169,11 +190,11 @@ OpenDaylight
^^^^^^^^^^^^
The OpenDaylight (ODL) test suite consists of a set of basic tests inherited
-from ODL project.
+from the ODL project using the Robot `[11]`_ framework.
The suite verifies creation and deletion of networks, subnets and ports with
OpenDaylight and Neutron.
-The list of tests can be described as follow:
+The list of tests can be described as follows:
* Restconf.basic: Get the controller modules via Restconf
* Neutron.Networks
@@ -216,14 +237,13 @@ The list of tests can be described as follow:
ONOS
^^^^
-TestON Framework is used to test ONOS function.
+TestON Framework is used to test the ONOS SDN controller functions.
The test cases deal with L2 and L3 functions.
-ONOS is configured through OPNFV scenario.
The ONOS test suite can be run on any ONOS compliant scenario.
-The test cases may be described as follow:
+The test cases may be described as follows:
- * onosfunctest: The mainly executable file contains the initialization of
+ * onosfunctest: The main executable file contains the initialization of
the docker environment and functions called by FUNCvirNetNB and
FUNCvirNetNBL3
@@ -273,7 +293,7 @@ vIMS
^^^^
The goal of this test suite consists of:
- * deploying a VNF orchestrator (cloudify)
+ * deploy a VNF orchestrator (Cloudify)
* deploy a Clearwater vIMS (IP Multimedia Subsystem) VNF from this
orchestrator based on a TOSCA blueprint defined in `[5]`_
* run suite of signaling tests on top of this VNF
@@ -290,7 +310,7 @@ Two types of information are stored in the Test Database:
* the test results
The deployment of a complete functional VNF allows the test of most of the
-essential functions needed for a NFV system.
+essential functions needed for a NFV platform.
Promise
^^^^^^^
@@ -340,6 +360,9 @@ include::
flavor_ram: 512
flavor_disk: 0
+However, these parameters must not be changed, as they are the values expected
+by the Promise test suite.
+
.. include:: ./runfunctest.rst
Test results
@@ -348,10 +371,10 @@ Test results
VIM
---
-vPing
-^^^^^
+vPing_SSH
+^^^^^^^^^
-vPing results are displayed in the console::
+vPing test case output is displayed in the console::
FUNCTEST.info: Running vPing test...
2016-01-23 03:18:20,153 - vPing- INFO - Creating neutron network vping-net...
@@ -428,8 +451,8 @@ The Tempest results are displayed in the console::
2016-01-28 08:19:32,133 - run_tempest - INFO - Pushing results to DB: 'http://testresults.opnfv.org/testapi/results'.
2016-01-28 08:19:32,278 - run_tempest - INFO - Deleting tenant and user for Tempest suite)
-In order to check all the available test cases related debug information, please
-inspect tempest.log file stored into related Rally deployment folder.
+In order to check all the available test cases related debug information, inspect
+tempest.log file stored under */home/opnfv/functest/results/tempest/*.
Rally
@@ -493,9 +516,9 @@ The results of ODL tests can be seen in the console::
1 critical test, 1 passed, 0 failed
1 test total, 1 passed, 0 failed
==============================================================================
- Output: /home/jenkins-ci/workspace/functest-opnfv-jump-2/output.xml
- Log: /home/jenkins-ci/workspace/functest-opnfv-jump-2/log.html
- Report: /home/jenkins-ci/workspace/functest-opnfv-jump-2/report.html
+ Output: /home/opnfv/repos/functest/output.xml
+ Log: /home/opnfv/repos/functest/log.html
+ Report: /home/opnfv/repos/functest/report.html
..............................................................................
@@ -507,9 +530,9 @@ The results of ODL tests can be seen in the console::
18 critical tests, 18 passed, 0 failed
18 tests total, 18 passed, 0 failed
==============================================================================
- Output: /home/jenkins-ci/workspace/functest-opnfv-jump-2/output.xml
- Log: /home/jenkins-ci/workspace/functest-opnfv-jump-2/log.html
- Report: /home/jenkins-ci/workspace/functest-opnfv-jump-2/report.html
+ Output: /home/opnfv/repos/functest/output.xml
+ Log: /home/opnfv/repos/functest/log.html
+ Report: /home/opnfv/repos/functest/report.html
3 result files are generated:
* output.xml
@@ -659,7 +682,6 @@ Please note that vIMS traces can bee summarized in several steps:
* INFO - Cloudify-manager server is UP ! => orchestrator deployed
* INFO - The deployment of clearwater-opnfv is ended => VNF deployed
* Multiple Identities (UDP) - (6505550771, 6505550675) Passed => tests run
- * DEBUG - Pushing results to DB.... => tests saved
Promise
@@ -704,8 +726,8 @@ Troubleshooting
VIM
---
-vPing
-^^^^^
+vPing_SSH
+^^^^^^^^^
vPing should work on all the scenarios. In case of timeout, check your network
connectivity. The test case creates its own security group to allow SSH access,
@@ -833,6 +855,7 @@ References
.. _`[8]`: https://wiki.openstack.org/wiki/Governance/DefCoreCommittee
.. _`[9]`: https://git.opnfv.org/cgit/functest/tree/testcases/VIM/OpenStack/CI/libraries/os_defaults.yaml
.. _`[10]`: https://git.opnfv.org/cgit/functest/tree/testcases/VIM/OpenStack/CI/rally_cert/task.yaml
+.. _`[11]`: http://robotframework.org/
OPNFV main site: opnfvmain_.
diff --git a/docs/userguide/runfunctest.rst b/docs/userguide/runfunctest.rst
index b186e539c..364333740 100644
--- a/docs/userguide/runfunctest.rst
+++ b/docs/userguide/runfunctest.rst
@@ -5,89 +5,120 @@ Manual testing
--------------
Once the Functest docker container is running and Functest environment ready
-(through /home/opnfv/repos/functest/docker/prepare_env.sh script), the system is
+(through */home/opnfv/repos/functest/docker/prepare_env.sh* script), the system is
ready to run the tests.
-The script *run_tests.sh* is located in $repos_dir/functest/docker and it has
+The script *run_tests.sh* launches the test in an automated way.
+Although it is possible to execute the different tests manually, it is
+recommended to use the previous shell script which makes the call
+to the actual scripts with the appropriate parameters.
+
+It is located in *$repos_dir/functest/docker* and it has
several options::
./run_tests.sh -h
Script to trigger the tests automatically.
usage:
- bash run_tests.sh [--offline] [-h|--help] [-t <test_name>]
+ bash run_tests.sh [-h|--help] [-r|--report] [-n|--no-clean] [-t|--test <test_name>]
where:
-h|--help show this help text
-r|--report push results to database (false by default)
-n|--no-clean do not clean up OpenStack resources after test run
+ -s|--serial run tests in one thread
-t|--test run specific set of tests
- <test_name> one or more of the following: vping,vping_userdata,odl,rally,tempest,vims,onos,promise. Separated by comma.
+ <test_name> one or more of the following separated by comma:
+ vping_ssh,vping_userdata,odl,rally,tempest,vims,onos,promise,ovno
examples:
run_tests.sh
run_tests.sh --test vping,odl
run_tests.sh -t tempest,rally --no-clean
-The *-r* option is used by the Continuous Integration in order to push the test
-results into a test collection database, see in next section for details.
-In manual mode, you must not use it, your try will be anyway probably rejected
-as your POD must be declared in the database to collect the data.
+The *-r* option is used by the OPNFV Continuous Integration automation mechanisms
+in order to push the test results into the NoSQL results collection database.
+This database is read only for a regular user given that it needs special rights
+and special conditions to push data.
-The *-n* option is used for preserving all the existing OpenStack resources after
-execution test cases.
+The *-t* option can be used to specify the list of a desired test to be launched,
+by default Functest will launch all the test suites in the following order:
+vPing, Tempest, vIMS, Rally.
-The *-t* option can be used to specify the list of test you want to launch, by
-default Functest will try to launch all its test suites in the following order
-vPing, odl, Tempest, vIMS, Rally.
-You may launch only one single test by using *-t <the test you want to launch>*.
+A single or set of test may be launched at once using *-t <test_name>* specifying
+the test name or names separated by commas in the following list:
+*[vping,vping_userdata,odl,rally,tempest,vims,onos,promise]*.
-Within Tempest test suite you can define which test cases you want to execute in
-your environment by editing test_list.txt file before executing *run_tests.sh*
-script.
+The *-n* option is used for preserving all the possible OpenStack resources created
+by the tests after their execution.
Please note that Functest includes cleaning mechanism in order to remove
-everything except what was present after a fresh install.
-If you create your own VMs, tenants, networks etc. and then launch Functest,
-they all will be deleted after executing the tests. Use the *--no-clean* option with
-run_test.sh in order to preserve all the existing resources.
-However, be aware that Tempest and Rally create of lot of resources (users,
+all the VIM resources except what was present before running any test. The script
+*$repos_dir/functest/testcases/VIM/OpenStack/CI/libraries/generate_defaults.py*
+is called once by *prepare_env.sh* when setting up the Functest environment
+to snapshot all the OpenStack resources (images, networks, volumes, security groups,
+tenants, users) so that an eventual cleanup does not remove any of this defaults.
+
+The *-s* option forces execution of test cases in a single thread. Currently this
+option affects Tempest test cases only and can be used e.g. for troubleshooting
+concurrency problems.
+
+The script
+*$repos_dir/functest/testcases/VIM/OpenStack/CI/libraries/clean_openstack.py*
+is normally called after a test execution if the *-n* is not specified. It
+is in charge of cleaning the OpenStack resources that are not specified
+in the defaults file generated previously which is stored in
+*/home/opnfv/functest/conf/os_defaults.yaml* in the docker
+container.
+
+It is important to mention that if there are new OpenStack resources created
+manually after preparing the Functest environment, they will be removed if this
+flag is not specified in the *run_tests.sh* command.
+The reason to include this cleanup meachanism in Functest is because some
+test suites such as Tempest or Rally create a lot of resources (users,
tenants, networks, volumes etc.) that are not always properly cleaned, so this
-cleaning function has been set to keep the system as clean as possible after a
-full Functest run.
+cleaning function has been set to keep the system as clean as it was before a
+full Functest execution.
-You may also add you own test by adding a section into the function run_test().
+Within the Tempest test suite it is possible to define which test cases to execute
+by editing *test_list.txt* file before executing *run_tests.sh* script. This file
+is located in *$repos_dir/functest/testcases/VIM/OpenStack/CI/custom_tests/test_list.txt*
+Although *run_tests.sh* provides an easy way to run any test, it is possible to
+do a direct call to the desired test script. For example::
+ python $repos_dir/functest/testcases/vPing/vPing.py -d
Automated testing
-----------------
-As mentioned in `[1]`, the *prepare-env.sh* and *run_test.sh* can be executed within
-the container from jenkins.
-2 jobs have been created, one to run all the test and one that allows testing
-test suite by test suite.
-You thus just have to launch the acurate jenkins job on the target lab, all the
-tests shall be automatically run.
+As mentioned in `[1]`, the *prepare-env.sh* and *run_test.sh* can be called within
+the container from Jenkins. There are 2 jobs that automate all the manual steps
+explained in the previous section. One job runs all the tests and the other one allows testing
+test suite by test suite specifying the test name. The user might use one or
+the other job to execute the desired test suites.
+
+One of the most challenging task in the Brahmaputra release consists
+in dealing with lots of scenarios and installers. Thus, when the tests are
+automatically started from CI, a basic algorithm has been created in order to
+detect whether a given test is runnable or not on the given scenario.
+Some Functest test suites cannot be systematically run (e.g. ODL suite can not
+be run on an ONOS scenario).
+
-When the tests are automatically started from CI, a basic algorithm has been
-created in order to detect whether the test is runnable or not on the given
-scenario.
-In fact, one of the most challenging task in Brahmaputra consists in dealing
-with lots of scenario and installers.
-Functest test suites cannot be systematically run (e.g. run the ODL suite on an
-ONOS scenario).
-CI provides several information:
+CI provides some useful information passed to the container as environment
+variables:
- * The installer (apex|compass|fuel|joid)
- * The scenario [controller]-[feature]-[mode] with
+ * Installer (apex|compass|fuel|joid), stored in INSTALLER_TYPE
+ * Installer IP of the engine or VM running the actual deployment, stored in INSTALLER_IP
+ * The scenario [controller]-[feature]-[mode], stored in DEPLOY_SCENARIO with
* controller = (odl|onos|ocl|nosdn)
* feature = (ovs(dpdk)|kvm)
* mode = (ha|noha)
-Constraints per test case are defined in the Functest configuration file
-/home/opnfv/functest/config/config_functest.yaml::
+The constraints per test case are defined in the Functest configuration file
+*/home/opnfv/functest/config/config_functest.yaml*::
test-dependencies:
functest:
@@ -105,16 +136,17 @@ Constraints per test case are defined in the Functest configuration file
....
At the end of the Functest environment creation (prepare_env.sh see `[1]`_), a
-file (/home/opnfv/functest/conf/testcase-list.txt) is created with the list of
+file */home/opnfv/functest/conf/testcase-list.txt* is created with the list of
all the runnable tests.
-We consider the static constraints as regex and compare them with the scenario.
-For instance, odl can be run only on scenario including odl in its name.
+Functest considers the static constraints as regular expressions and compare them
+with the given scenario name.
+For instance, ODL suite can be run only on an scenario including 'odl' in its name.
The order of execution is also described in the Functest configuration file::
test_exec_priority:
- 1: vping
+ 1: vping_ssh
2: vping_userdata
3: tempest
4: odl
@@ -130,13 +162,10 @@ The order of execution is also described in the Functest configuration file::
The tests are executed in the following order:
- * Basic scenario (vPing, vPing_userdata, Tempest)
+ * Basic scenario (vPing_ssh, vPing_userdata, Tempest)
* Controller suites: ODL or ONOS or OpenContrail
* Feature projects (promise, vIMS)
* Rally (benchmark scenario)
-At the end of an automated execution, everything is cleaned.
-Before running Functest, a snapshot of the OpenStack configuration (users,
-tenants, networks, ....) is performed. After Functest, a clean mechanism is
-launched to delete everything that would not have been properly deleted in order
-to restitute the system as it was prior to the tests.
+As explained before, at the end of an automated execution, the OpenStack resources
+might be eventually removed.
diff --git a/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py b/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py
index ade4385b3..0d1992604 100755
--- a/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py
+++ b/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py
@@ -54,6 +54,9 @@ parser.add_argument("-s", "--smoke",
parser.add_argument("-v", "--verbose",
help="Print verbose info about the progress",
action="store_true")
+parser.add_argument("-n", "--noclean",
+ help="Don't clean the created resources for this test.",
+ action="store_true")
args = parser.parse_args()
@@ -208,15 +211,13 @@ def get_output(proc, test_name):
nb_tests = 0
overall_duration = 0.0
success = 0.0
+ nb_totals = 0
- if args.verbose:
- while proc.poll() is None:
- line = proc.stdout.readline()
- print line.replace('\n', '')
+ while proc.poll() is None:
+ line = proc.stdout.readline()
+ if args.verbose:
result += line
- else:
- while proc.poll() is None:
- line = proc.stdout.readline()
+ else:
if "Load duration" in line or \
"started" in line or \
"finished" in line or \
@@ -224,28 +225,41 @@ def get_output(proc, test_name):
"+-" in line or \
"|" in line:
result += line
- if "| " in line and \
- "| action" not in line and \
- "| " not in line and \
- "| total" not in line:
- nb_tests += 1
- percentage = ((line.split('|')[8]).strip(' ')).strip('%')
- success += float(percentage)
-
elif "test scenario" in line:
result += "\n" + line
elif "Full duration" in line:
result += line + "\n\n"
- overall_duration += float(line.split(': ')[1])
- logger.info("\n" + result)
- overall_duration = "{:10.2f}".format(overall_duration)
- success_avg = success / nb_tests
+
+ # parse output for summary report
+ if "| " in line and \
+ "| action" not in line and \
+ "| Starting" not in line and \
+ "| Completed" not in line and \
+ "| ITER" not in line and \
+ "| " not in line and \
+ "| total" not in line:
+ nb_tests += 1
+ elif "| total" in line:
+ percentage = ((line.split('|')[8]).strip(' ')).strip('%')
+ success += float(percentage)
+ nb_totals += 1
+ elif "Full duration" in line:
+ overall_duration += float(line.split(': ')[1])
+
+ overall_duration="{:10.2f}".format(overall_duration)
+ if nb_totals == 0:
+ success_avg = 0
+ else:
+ success_avg = "{:0.2f}".format(success / nb_totals)
+
scenario_summary = {'test_name': test_name,
'overall_duration': overall_duration,
'nb_tests': nb_tests,
'success': success_avg}
-
SUMMARY.append(scenario_summary)
+
+ logger.info("\n" + result)
+
return result
@@ -255,6 +269,7 @@ def run_task(test_name):
# :param test_name: name for the rally test
# :return: void
#
+ global SUMMARY
logger.info('Starting test scenario "{}" ...'.format(test_name))
task_file = '{}task.yaml'.format(SCENARIOS_DIR)
@@ -282,12 +297,12 @@ def run_task(test_name):
logger.debug('task_id : {}'.format(task_id))
if task_id is None:
- logger.error("failed to retrieve task_id")
+ logger.error("Failed to retrieve task_id.")
exit(-1)
# check for result directory and create it otherwise
if not os.path.exists(RESULTS_DIR):
- logger.debug('does not exists, we create it'.format(RESULTS_DIR))
+ logger.debug('%s does not exist, we create it.'.format(RESULTS_DIR))
os.makedirs(RESULTS_DIR)
# write html report file
@@ -388,7 +403,7 @@ def main():
test_name == 'vm'):
run_task(test_name)
else:
- print(args.test_name)
+ logger.debug("Test name: " + args.test_name)
run_task(args.test_name)
report = "\n"\
@@ -424,7 +439,7 @@ def main():
total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
total_duration_str2 = "{0:<10}".format(total_duration_str)
total_nb_tests_str = "{0:<13}".format(total_nb_tests)
- total_success = total_success / len(SUMMARY)
+ total_success = "{:0.2f}".format(total_success / len(SUMMARY))
total_success_str = "{0:<10}".format(str(total_success)+'%')
report += "+===================+============+===============+===========+\n"
report += "| TOTAL: | " + total_duration_str2 + " | " + \
@@ -445,6 +460,9 @@ def main():
logger.debug("Pushing Rally summary into DB...")
push_results_to_db("Rally", payload)
+ if args.noclean:
+ exit(0)
+
logger.debug("Deleting image '%s' with ID '%s'..." \
% (GLANCE_IMAGE_NAME, image_id))
if not functest_utils.delete_glance_image(nova_client, image_id):
diff --git a/testcases/VIM/OpenStack/CI/libraries/run_rally.py b/testcases/VIM/OpenStack/CI/libraries/run_rally.py
index 18f60acc1..6b1aae2eb 100755
--- a/testcases/VIM/OpenStack/CI/libraries/run_rally.py
+++ b/testcases/VIM/OpenStack/CI/libraries/run_rally.py
@@ -47,6 +47,9 @@ parser.add_argument("-r", "--report",
parser.add_argument("-v", "--verbose",
help="Print verbose info about the progress",
action="store_true")
+parser.add_argument("-n", "--noclean",
+ help="Don't clean the created resources for this test.",
+ action="store_true")
args = parser.parse_args()
@@ -271,6 +274,9 @@ def main():
print(args.test_name)
run_task(args.test_name)
+ if args.noclean:
+ exit(0)
+
logger.debug("Deleting image '%s' with ID '%s'..." \
% (GLANCE_IMAGE_NAME, image_id))
if not functest_utils.delete_glance_image(nova_client, image_id):
diff --git a/testcases/VIM/OpenStack/CI/libraries/run_tempest.py b/testcases/VIM/OpenStack/CI/libraries/run_tempest.py
index b8ed2716e..294669182 100644
--- a/testcases/VIM/OpenStack/CI/libraries/run_tempest.py
+++ b/testcases/VIM/OpenStack/CI/libraries/run_tempest.py
@@ -33,12 +33,21 @@ modes = ['full', 'smoke', 'baremetal', 'compute', 'data_processing',
""" tests configuration """
parser = argparse.ArgumentParser()
-parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
-parser.add_argument("-m", "--mode", help="Tempest test mode [smoke, all]",
+parser.add_argument("-d", "--debug",
+ help="Debug mode",
+ action="store_true")
+parser.add_argument("-s", "--serial",
+ help="Run tests in one thread",
+ action="store_true")
+parser.add_argument("-m", "--mode",
+ help="Tempest test mode [smoke, all]",
default="smoke")
parser.add_argument("-r", "--report",
help="Create json result file",
action="store_true")
+parser.add_argument("-n", "--noclean",
+ help="Don't clean the created resources for this test.",
+ action="store_true")
args = parser.parse_args()
@@ -289,12 +298,19 @@ def main():
else:
MODE = "--set "+args.mode
+ if args.serial:
+ MODE = "--concur 1 "+MODE
+
if not os.path.exists(TEMPEST_RESULTS_DIR):
os.makedirs(TEMPEST_RESULTS_DIR)
create_tempest_resources()
configure_tempest()
run_tempest(MODE)
+
+ if args.noclean:
+ exit(0)
+
free_tempest_resources()
diff --git a/testcases/config_functest.yaml b/testcases/config_functest.yaml
index 2f034f940..7d5f21360 100644
--- a/testcases/config_functest.yaml
+++ b/testcases/config_functest.yaml
@@ -166,7 +166,7 @@ results:
# the execution order is important as some tests may be more destructive than others
# and if vPing is failing is usually not needed to continue...
test_exec_priority:
- 1: vping
+ 1: vping_ssh
2: vping_userdata
3: tempest
4: odl
@@ -231,7 +231,7 @@ test-dependencies:
functest:
vims:
scenario: '(ocl)|(odl)|(nosdn)'
- vping:
+ vping_ssh:
vping_userdata:
scenario: '(ocl)|(odl)|(nosdn)'
tempest:
diff --git a/testcases/tests/TestFunctestUtils.py b/testcases/tests/TestFunctestUtils.py
index 17bc958e3..fd83ed6f5 100644
--- a/testcases/tests/TestFunctestUtils.py
+++ b/testcases/tests/TestFunctestUtils.py
@@ -65,7 +65,10 @@ class TestFunctestUtils(unittest.TestCase):
test = isTestRunnable('functest/odl', functest_yaml)
self.assertTrue(test)
- test = isTestRunnable('functest/vping', functest_yaml)
+ test = isTestRunnable('functest/vping_ssh', functest_yaml)
+ self.assertTrue(test)
+
+ test = isTestRunnable('functest/vping_userdata', functest_yaml)
self.assertTrue(test)
test = isTestRunnable('functest/tempest', functest_yaml)
@@ -82,7 +85,7 @@ class TestFunctestUtils(unittest.TestCase):
test = generateTestcaseList(functest_yaml)
- expected_list = "vping tempest odl doctor promise policy-test odl-vpn_service-tests vims rally "
+ expected_list = "vping_ssh vping_userdata tempest odl doctor promise policy-test odl-vpn_service-tests vims rally "
self.assertEqual(test, expected_list)
def tearDown(self):
diff --git a/testcases/vIMS/CI/vIMS.py b/testcases/vIMS/CI/vIMS.py
index a8ac97f5c..c50334936 100644
--- a/testcases/vIMS/CI/vIMS.py
+++ b/testcases/vIMS/CI/vIMS.py
@@ -40,6 +40,9 @@ parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
parser.add_argument("-r", "--report",
help="Create json result file",
action="store_true")
+parser.add_argument("-n", "--noclean",
+ help="Don't clean the created resources for this test.",
+ action="store_true")
args = parser.parse_args()
""" logging configuration """
@@ -461,6 +464,8 @@ def main():
cfy.undeploy_manager()
############### GENERAL CLEANUP ################
+ if args.noclean:
+ exit(0)
ks_creds = functest_utils.get_credentials("keystone")
diff --git a/testcases/vPing/CI/libraries/vPing2.py b/testcases/vPing/CI/libraries/vPing_ssh.py
index 1ce6dc9e5..d8b50f7e9 100644
--- a/testcases/vPing/CI/libraries/vPing2.py
+++ b/testcases/vPing/CI/libraries/vPing_ssh.py
@@ -37,12 +37,15 @@ parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
parser.add_argument("-r", "--report",
help="Create json result file",
action="store_true")
+parser.add_argument("-n", "--noclean",
+ help="Don't clean the created resources for this test.",
+ action="store_true")
args = parser.parse_args()
""" logging configuration """
-logger = logging.getLogger('vPing')
+logger = logging.getLogger('vPing_ssh')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
@@ -198,6 +201,9 @@ def create_private_neutron_net(neutron):
def cleanup(nova, neutron, image_id, network_dic, port_id1, port_id2, secgroup_id):
+ if args.noclean:
+ logger.debug("The OpenStack resources are not deleted.")
+ return True
# delete both VMs
logger.info("Cleaning up...")
diff --git a/testcases/vPing/CI/libraries/vPing.py b/testcases/vPing/CI/libraries/vPing_userdata.py
index 1368bbec1..c81a1fddb 100644
--- a/testcases/vPing/CI/libraries/vPing.py
+++ b/testcases/vPing/CI/libraries/vPing_userdata.py
@@ -35,12 +35,15 @@ parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
parser.add_argument("-r", "--report",
help="Create json result file",
action="store_true")
+parser.add_argument("-n", "--noclean",
+ help="Don't clean the created resources for this test.",
+ action="store_true")
args = parser.parse_args()
""" logging configuration """
-logger = logging.getLogger('vPing')
+logger = logging.getLogger('vPing_userdata')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
@@ -192,6 +195,9 @@ def create_private_neutron_net(neutron):
def cleanup(nova, neutron, image_id, network_dic, port_id1, port_id2):
+ if args.noclean:
+ logger.debug("The OpenStack resources are not deleted.")
+ return True
# delete both VMs
logger.info("Cleaning up...")