aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMorgan Richomme <morgan.richomme@orange.com>2015-10-01 11:03:09 +0200
committerMorgan Richomme <morgan.richomme@orange.com>2015-10-01 11:03:09 +0200
commit19c7d0ecfd453d5e631466401e454d4da3314f9b (patch)
treec1dac5451af1760f963d709772b5062ba2741983
parent5901dd9a47b08c162acb631bf5acaeeb1b7ce745 (diff)
parent96bf9abe9b1b26a79dcc86900e8eb33d8544e773 (diff)
Merge branch 'master' into stable/arnoarno.2015.2.0
-rw-r--r--INFO8
-rw-r--r--commons/ims/readme.rst0
-rw-r--r--commons/mobile/readme.rst116
-rw-r--r--commons/traffic-profile-guidelines.rst64
-rw-r--r--docs/functest.rst228
-rwxr-xr-xtestcases/Controllers/ODL/CI/start_tests.sh21
-rw-r--r--testcases/Controllers/ONOS/Teston/CI/Readme.txt5
-rw-r--r--testcases/Controllers/ONOS/Teston/CI/dependencies/onos23
-rw-r--r--testcases/Controllers/ONOS/Teston/CI/onosfunctest.py203
-rw-r--r--testcases/Dashboard/dashboard_utils.py251
-rw-r--r--testcases/Dashboard/functest2Dashboard.py81
-rw-r--r--testcases/Dashboard/odl2Dashboard.py52
-rw-r--r--testcases/Dashboard/rally2Dashboard.py52
-rw-r--r--testcases/Dashboard/tempest2Dashboard.py52
-rw-r--r--testcases/Dashboard/vPing2Dashboard.py94
-rw-r--r--testcases/VIM/OpenStack/CI/libraries/run_rally.py7
-rw-r--r--testcases/config_functest.py50
-rw-r--r--testcases/config_functest.yaml3
-rw-r--r--testcases/functest_utils.py95
-rw-r--r--testcases/vIMS/vIMS.md3
-rw-r--r--testcases/vPing/CI/libraries/vPing.py318
21 files changed, 1504 insertions, 222 deletions
diff --git a/INFO b/INFO
index 9c966fe6c..b0b7db6af 100644
--- a/INFO
+++ b/INFO
@@ -3,7 +3,7 @@ Project Creation Date: January 20, 2015
Project Category: Integration & Testing
Lifecycle State: Incubation
Primary Contact: Morgan Richomme (morgan.richomme@orange.com)
-Project Lead: Trever Cooper (trevor.cooper@intel.com)
+Project Lead: Morgan Richomme (morgan.richomme@orange.comtrevor.cooper@inte)
Jira Project Name: Base System Functionality Testing Project
Jira Project Prefix: FUNCTEST
Mailing list tag: [functest]
@@ -16,16 +16,16 @@ sama@docomolab-euro.com
jose.lausuch@ericsson.com
Andrew.Caldwell@metaswitch.com
morgan.richomme@orange.com
-Palani.Chinnakannan@gmail.com
trevor.cooper@intel.com
Prabu.Kuppuswamy@spirent.com
fuqiao@chinamobile.com
chitti.nimmagadda@ericsson.com
raja.karthik@hp.com
vivekanandan.p@hp.com
-yuyijun@huawei.com
koffirodrigue@gmail.com
-Peng.Li@huawei.com
+dk068x@att.com
+meimei@huawei.com
+valentin.boucher@orange.com
Link to TSC approval of the project: http://meetbot.opnfv.org/meetings/opnfv-meeting/2015/opnfv-meeting.2015-01-20-14.57.html
diff --git a/commons/ims/readme.rst b/commons/ims/readme.rst
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/commons/ims/readme.rst
diff --git a/commons/mobile/readme.rst b/commons/mobile/readme.rst
new file mode 100644
index 000000000..07c872503
--- /dev/null
+++ b/commons/mobile/readme.rst
@@ -0,0 +1,116 @@
+Mobility Traffic Profiles for OPNFV
+===================================
+
+Mobility User-Plane
++++++++++++++++++++
+
+The following tables describe per-session average (in a range) for user-plane traffic related to classes of applications.
+
+Downlink Mobility User-Plane
+----------------------------
+
+.. list-table:: Downlink Mobility User-Plane
+ :widths: 25 25 25 25
+ :header-rows: 1
+
+ * - Service/protocol
+ - Downlink Packet Size (bytes)
+ - Downlink Flow Size (KB)
+ - Downlink per-flow Throughput (Kbps)
+
+ * - Browsing + Apps
+ - 1,220 - 1,260
+ - 40 - 55
+ - 130 - 253
+
+ * - HTTPS traffic
+ - 1,050 - 1,085
+ - 32 - 40
+ - 107 - 124
+
+ * - Video Streaming
+ - 1,360 - 1,390
+ - 545 - 650
+ - 690 - 790
+
+ * - VoLTE bearer (media, excluding signaling)
+ - 84 – 102
+ - 116 - 142
+ - 18 - 24
+
+
+Uplink Mobility User-Plane
+--------------------------
+.. list-table:: Uplink Mobility User-Plane
+ :widths: 25 25 25 25
+ :header-rows: 1
+
+ * - Service/protocol
+ - Uplink Packet Size (bytes)
+ - Uplink Flow Size (KB)
+ - Uplink per-flow Throughput (Kbps)
+
+ * - Browsing + Apps
+ - 90 – 120
+ - 3 – 10
+ - 6 – 18
+
+ * - HTTPS traffic
+ - 140 – 200
+ - 5 – 12
+ - 8 – 15
+
+ * - Video Streaming
+ - 50 – 110
+ - 10 – 20
+ - 12 - 20
+
+ * - VoLTE bearer (media, excluding signaling)
+ - 84 – 102
+ - 112 - 135
+ - 18 - 24
+
+
+Mobility User-Plane Traffic Distribution
+----------------------------------------
+.. list-table:: Mobility User-Plane Traffic Distribution
+ :widths: 33 33 33
+ :header-rows: 1
+
+ * - Service/protocol
+ - Downlink
+ - Uplink
+
+ * - HTTP
+ - 40 - 70% (60 – 40 split between ‘browsing + apps’ and ‘streaming’)
+ - 30 - 50% (55 – 45 split between ‘browsing + apps’ and ‘streaming’)
+
+ * - HTTPS
+ - 25 - 50%
+ - 40 - 60%
+
+ * - Email
+ - 1%
+ - 3%
+
+ * - P2P
+ - 0.1%
+ - 0.5%
+
+ * - VoLTE
+ - 0-5%
+ - 5-30%
+
+ * - Others
+ - 4%
+ - 8%
+
+Mobility Control-Plane
+++++++++++++++++++++++
+
+This section will provide average per-session mobility control-plane traffic for various protocols associated with applications.
+
+Mobility Sessions per Hour
+++++++++++++++++++++++++++
+
+This section will provide per-hour average and min-max ranges for mobility application sessions.
diff --git a/commons/traffic-profile-guidelines.rst b/commons/traffic-profile-guidelines.rst
new file mode 100644
index 000000000..0b965b156
--- /dev/null
+++ b/commons/traffic-profile-guidelines.rst
@@ -0,0 +1,64 @@
+================================
+OPNFV traffic profile guidelines
+================================
+
+.. contents::
+
+.. _introduction:
+
+------------
+Introduction
+------------
+
+In order to have consistent testing profiles, it has been suggested to define and store traffic profiles.
+These profiles shall be based on operator representative scenario.
+
+These reference profiles may be used by any test projects, unitary, functional or performance tests.
+It is possible to adapt them to specific testcases.
+It is recommended to use them in order to avoid getting as many profiles as tests.
+It should be helpful to compare the results of test scenario.
+
+.. _howto:
+
+-------------------------
+How to use these profiles
+-------------------------
+
+The directory of the traffic profiles may be described as follow::
+
+ β”œβ”€β”€ commons
+ β”œβ”€β”€ ims
+ β”‚ └── readme.rst
+ β”œβ”€β”€ mobile
+ β”‚ └── readme.rst
+ └── traffic-profile-guidelines.rst
+
+the readme.rst details the profile.
+
+
+.. _overview:
+
+------------------------
+Traffic profile overview
+------------------------
+
+The current profiles are available:
+ * Mobile traffic
+ * IMS residential traffic
+ * ...
+
+Mobile traffic
+==============
+
+IMS residential traffic
+=======================
+
+
+
+
+
+.. _reference:
+
+---------
+reference
+---------
diff --git a/docs/functest.rst b/docs/functest.rst
index 515ab6986..ed436f586 100644
--- a/docs/functest.rst
+++ b/docs/functest.rst
@@ -71,11 +71,30 @@ In the rest of the document the OPNFV solution would be considered as the System
The installation and configuration of the tools needed to perform the tests will be described in the following sections.
-For release 1, the tools are automatically installed, but the tests are not fully automated due to the requirement that sourcing of OpenStack credentials is required on at least one machine where tests are launched. More details will be provided in the configuration section.
+For Arno SR1, the tools are automatically installed. Manual sourcing of OpenStack credentials is no more required if you are fully integrated in the continuous integration.
+A script has been added to automatically retrieve the credentials.
+However, if you still install manually functest, you will need to source the rc file on the machine you are running the tests.
+More details will be provided in the configuration section.
.. _pharos: https://wiki.opnfv.org/pharos
It is recommended to install the different tools on the jump host server as defined in the pharos_ project.
+
+For functest, the following libraries are needed. You can install them either with yum install or apt-get install, depending on your operating system:
+ * python-pip
+ * python-dev
+ * libffi-dev
+ * libxml2-dev
+ * libxslt1-dev
+
+You will also need some Python modules:
+ * sudo pip install GitPython
+ * sudo pip install python-novaclient
+ * sudo pip install python-neutronclient
+ * sudo pip install python-glanceclient
+ * sudo pip install python-keystoneclient
+
+
The high level architecture can be described as follow::
CIMC/Lights+out management Admin Private Public Storage
@@ -98,7 +117,7 @@ The high level architecture can be described as follow::
| | | | | | | | |
| | | Tempest | | | | | |
| | +----------+ | | | | |
- | | FuncTests +-----------------------------------------+ |
+ | | FuncTest +-----------------------------------------+ |
| | | | | | |
| | +--------------------------------------------------+
| | | | | | |
@@ -161,6 +180,8 @@ The goal of this test can be described as follow::
This example, using OpenStack Python clients can be considered as an "Hello World" example and may be modified for future use.
+In SR1, some code has been added in order to push the results (status and duration) into a centralized test result database.
+
OpenDaylight
============
@@ -256,6 +277,8 @@ The goal of this test is to to check the basic OpenStack functionality on a fre
Tooling installation
----------------------
+.. _fetch_os_creds.sh: https://git.opnfv.org/cgit/releng/tree/utils/fetch_os_creds.sh
+
2 external tools are needed for the functional tests on Arno:
* Rally
* Robot
@@ -291,82 +314,99 @@ This script will:
* Create Glance images
-When integrated in CI, the only prerequisite consists in retrieving the OpenStack credentials (rc file).
-This file shall be saved on the jumphost. It must be sourced by the user (who shall have sudo rights) executing the tests.
+When integrated in CI, there are no additional prerequisites.
+When running functest manually, the only prerequisite consists in retrieving the OpenStack credentials (rc file).
+This file shall be saved on the jumphost. It must be sourced by the user (who shall have sudo rights) executing the tests.
For the Continuous Integration we store this file under $HOME/functest/opnfv-openrc.sh on the jumphost server so CI can automatically execute the suite of tests
The procedure to set up functional testing environment can be described as follow:
- Log on the Jumphost server
- Be sure you are no root then execute::
-
- [user@jumphost]$ mkdir <Your_functest_directory>
- [user@jumphost]$ cd <Your_functest_directory>
- [user@jumphost]$ git clone https://git.opnfv.org/functest
- [user@jumphost]$ cd testcases/
+Log on the Jumphost server. Be sure you are no root then execute::
+
+ [user@jumphost]$ mkdir <Your_functest_directory>
+ [user@jumphost]$ cd <Your_functest_directory>
+ [user@jumphost]$ git clone https://git.opnfv.org/functest
+ [user@jumphost]$ cd testcases/
+
+Modify and adapt needed parameters in the config_functest.yaml. Follow the instructions below.
+
+Retrieve OpenStack source file (configure your `OpenRC`_ file to let Rally access to your OpenStack, you can either export it from Horizon or build it manually (OpenStack credentials are required)::
- Modify and adapt needed parameters in the config_functest.yaml. Follow the instructions below.
- Retrieve OpenStack source file (configure your `OpenRC`_ file to let Rally access to your OpenStack, you can either export it from Horizon or build it manually (OpenStack credentials are required)::
+ [user@jumphost]$ source Your_OpenRC_file
+ [user@jumphost]$ python <functest_repo_directory>/config_functest.py -d <Your_functest_directory> start
+
+In SR1, a script has been created: fetch_os_creds.sh_. This script retrieves automatically the credentials of your OpenStack solution. You may run it manually::
+
+ [user@jumphost]$ /home/jenkins-ci/functest/fetch_os_creds.sh -d <destination> -i <installer_type> -a <installer_ip>
+
+with
+ * installer_type = fuel or foreman
+ * installer_ip the IP of your installer
+ * the destination shall be the full path including the file name.
- [user@jumphost]$ source Your_OpenRC_file
- [user@jumphost]$ python <functest_repo_directory>/config_functest.py -d <Your_functest_directory> start
+Examples::
+
+ [user@jumphost]$./fetch_os_creds.sh -d ./credentials -i foreman -a 172.30.10.73
+ [user@jumphost]$./fetch_os_creds.sh -d ./credentials -i fuel -a 10.20.0.2
+
At the end of the git clone, the tree of <functest_repo_directory> will have the following structure::
- β”œβ”€β”€ docs
- β”‚ β”œβ”€β”€ functest.rst
- β”‚ └── images
- β”‚ └── Ims_overview.png
- β”œβ”€β”€ INFO
- β”œβ”€β”€ LICENSE
- └── testcases
- β”œβ”€β”€ config_functest.py
- β”œβ”€β”€ config_functest.yaml
- β”œβ”€β”€ Controllers
- β”‚ └── ODL
- β”‚ β”œβ”€β”€ CI
- β”‚ β”‚ β”œβ”€β”€ create_venv.sh
- β”‚ β”‚ β”œβ”€β”€ custom_tests
- β”‚ β”‚ β”‚ └── neutron
- β”‚ β”‚ β”œβ”€β”€ integration
- β”‚ β”‚ β”‚ β”œβ”€β”€ distributions
- β”‚ β”‚ β”‚ β”œβ”€β”€ features
- β”‚ β”‚ β”‚ β”œβ”€β”€ feature-selector
- β”‚ β”‚ β”‚ β”œβ”€β”€ packaging
- β”‚ β”‚ β”‚ β”œβ”€β”€ pom.xml
- β”‚ β”‚ β”‚ β”œβ”€β”€ test
- β”‚ β”‚ β”‚ └── vm
- β”‚ β”‚ β”œβ”€β”€ logs
- β”‚ β”‚ β”œβ”€β”€ requirements.pip
- β”‚ β”‚ β”œβ”€β”€ start_tests.sh
- β”‚ β”‚ └── test_list.txt
- β”‚ └── ODL.md
- β”œβ”€β”€ functest_utils.py
- β”œβ”€β”€ VIM
- β”‚ └── OpenStack
- β”‚ β”œβ”€β”€ CI
- β”‚ β”‚ β”œβ”€β”€ libraries
- β”‚ β”‚ β”‚ └── run_rally.py
- β”‚ β”‚ └── suites
- β”‚ β”‚ β”œβ”€β”€ opnfv-authenticate.json
- β”‚ β”‚ β”œβ”€β”€ opnfv-cinder.json
- β”‚ β”‚ β”œβ”€β”€ opnfv-glance.json
- β”‚ β”‚ β”œβ”€β”€ opnfv-heat.json
- β”‚ β”‚ β”œβ”€β”€ opnfv-keystone.json
- β”‚ β”‚ β”œβ”€β”€ opnfv-neutron.json
- β”‚ β”‚ β”œβ”€β”€ opnfv-nova.json
- β”‚ β”‚ β”œβ”€β”€ opnfv-quotas.json
- β”‚ β”‚ β”œβ”€β”€ opnfv-requests.json
- β”‚ β”‚ β”œβ”€β”€ opnfv-smoke-green.json
- β”‚ β”‚ β”œβ”€β”€ opnfv-smoke.json
- β”‚ β”‚ β”œβ”€β”€ opnfv-tempest.json
- β”‚ β”‚ └── opnfv-vm.json
- β”‚ └── OpenStack.md
- └── vPing
- └── CI
- └── libraries
- └── vPing.py
+ |-- docs/
+ | |-- functest.rst
+ | |-- images
+ | |-- Ims_overview.png
+ |-- INFO
+ |-- LICENSE
+ |-- testcases/
+ |-- config_functest.py
+ |-- config_functest.yaml
+ |-- functest_utils.py
+ |-- Controllers/
+ | |-- ODL/
+ | |-- CI/
+ | | |-- create_venv.sh
+ | | |-- custom_tests/
+ | | | |-- neutron
+ | | |-- integration/
+ | | | |-- distributions
+ | | | |-- features
+ | | | |-- feature-selector
+ | | | |-- packaging
+ | | | |-- pom.xml
+ | | | |-- test
+ | | | |-- vm
+ | | |-- logs
+ | | |-- requirements.pip
+ | | |-- start_tests.sh
+ | | |-- test_list.txt
+ | |-- ODL.md
+ |-- functest_utils.py
+ |-- VIM/
+ | |-- OpenStack/
+ | |-- CI/
+ | | |-- libraries/
+ | | | |-- run_rally.py
+ | | |-- suites/
+ | | |-- opnfv-authenticate.json
+ | | |-- opnfv-cinder.json
+ | | |-- opnfv-glance.json
+ | | |-- opnfv-heat.json
+ | | |-- opnfv-keystone.json
+ | | |-- opnfv-neutron.json
+ | | |-- opnfv-nova.json
+ | | |-- opnfv-quotas.json
+ | | |-- opnfv-requests.json
+ | | |-- opnfv-smoke-green.json
+ | | |-- opnfv-smoke.json
+ | | |-- opnfv-tempest.json
+ | | |-- opnfv-vm.json
+ | |-- OpenStack.md
+ |-- vPing/
+ |-- CI/
+ |-- libraries/
+ |-- vPing.py
NOTE: the Rally environment will be installed under ~/.rally/ the default Tempest configuration (automatically generated by Rally based on OpenStack credentials) can be found under .rally/tempest/for-deployment-<deployment_id>/tempest.conf
@@ -492,7 +532,7 @@ The script will:
* run rally with the selected scenario
* generate the html result page into <result_folder>/<timestamp>/opnfv-[module name].html
* generate the json result page into <result_folder>/<timestamp>/opnfv-[module name].json
- * generate OK or KO per test based on json result file
+ * generate OK or NOK per test based on json result file
Tempest suite
=============
@@ -518,26 +558,29 @@ vPing
vPing result is displayed in the console::
Functest: run vPing
- 2015-06-02 21:24:55,065 - vPing - INFO - Glance image found 'functest-img'
- 2015-06-02 21:24:55,066 - vPing - INFO - Creating neutron network functest-net...
- 2015-06-02 21:24:57,672 - vPing - INFO - Flavor found 'm1.small'
- 2015-06-02 21:24:58,670 - vPing - INFO - Creating instance 'opnfv-vping-1' with IP 192.168.120.30...
- 2015-06-02 21:25:32,098 - vPing - INFO - Instance 'opnfv-vping-1' is ACTIVE.
- 2015-06-02 21:25:32,540 - vPing - INFO - Creating instance 'opnfv-vping-2' with IP 192.168.120.40...
- 2015-06-02 21:25:38,614 - vPing - INFO - Instance 'opnfv-vping-2' is ACTIVE.
- 2015-06-02 21:25:38,614 - vPing - INFO - Waiting for ping...
- 2015-06-02 21:26:42,385 - vPing - INFO - vPing detected!
- 2015-06-02 21:26:42,385 - vPing - INFO - Cleaning up...
- 2015-06-02 21:26:54,127 - vPing - INFO - Deleting network 'functest-net'...
- 2015-06-02 21:26:55,349 - vPing - INFO - vPing OK
-
+ 2015-09-13 22:11:49,502 - vPing- INFO - Glance image found 'functest-img'
+ 2015-09-13 22:11:49,502 - vPing- INFO - Creating neutron network functest-net...
+ 2015-09-13 22:11:50,275 - vPing- INFO - Flavor found 'm1.small'
+ 2015-09-13 22:11:50,318 - vPing- INFO - vPing Start Time:'2015-09-13 22:11:50'
+ 2015-09-13 22:11:50,470 - vPing- INFO - Creating instance 'opnfv-vping-1' with IP 192.168.120.30...
+ 2015-09-13 22:11:58,803 - vPing- INFO - Instance 'opnfv-vping-1' is ACTIVE.
+ 2015-09-13 22:11:58,981 - vPing- INFO - Creating instance 'opnfv-vping-2' with IP 192.168.120.40...
+ 2015-09-13 22:12:09,169 - vPing- INFO - Instance 'opnfv-vping-2' is ACTIVE.
+ 2015-09-13 22:12:09,169 - vPing- INFO - Waiting for ping...
+ 2015-09-13 22:13:11,329 - vPing- INFO - vPing detected!
+ 2015-09-13 22:13:11,329 - vPing- INFO - vPing duration:'81.0'
+ 2015-09-13 22:13:11,329 - vPing- INFO - Cleaning up...
+ 2015-09-13 22:13:18,727 - vPing- INFO - Deleting network 'functest-net'...
+ 015-09-13 22:13:19,470 - vPing- INFO - vPing OK
+
+A json file is produced and pushed into the test result database.
OpenDaylight
============
.. _`functest wiki (ODL section)`: https://wiki.opnfv.org/r1_odl_suite
-The results of ODL tests can be seen in the console::
+The results of ODL tests can be seen in the console::
==============================================================================
Basic
@@ -580,11 +623,11 @@ The results of ODL tests can be seen in the console::
ODL result page
.. figure:: ./images/functestODL.png
- :scale: 50 %
+ :width: 170mm
:align: center
:alt: ODL suite result page
-
+
Known issues
------------
@@ -635,11 +678,11 @@ You shall see the results as follow::
Total results of verification:
- +--------------------------------------+--------------------------------------+----------+-------+----------+----------------------------+----------+
- | UUID | Deployment UUID | Set name | Tests | Failures | Created at | Status |
- +--------------------------------------+--------------------------------------+----------+-------+----------+----------------------------+----------+
- | 0144c50f-ab03-45fb-9c36-242ad6440b46 | d9e1bb21-8e36-4d89-b137-0c852dbb308e | smoke | 87 | 32 | 2015-05-05 16:36:00.986003 | finished |
- +--------------------------------------+--------------------------------------+----------+-------+----------+----------------------------+----------+
+ +--------------------------------------+--------------------------------------+----------+-------+----------+----------------------------+----------------+----------+
+ | UUID | Deployment UUID | Set name | Tests | Failures | Created at | Duration | Status |
+ +--------------------------------------+--------------------------------------+----------+-------+----------+----------------------------+----------------+----------+
+ | 546c678a-19c4-4b2e-8f24-6f8c5ff20635 | 9c13dbbe-7a80-43db-8d6c-c4a61f257c7f | smoke | 111 | 15 | 2015-09-14 06:18:54.896224 | 0:00:51.804504 | finished |
+ +--------------------------------------+--------------------------------------+----------+-------+----------+----------------------------+----------------+----------+
If you run this test several times, you will see as many lines as test attempts.
@@ -675,13 +718,10 @@ Known issues
.. _`functest wiki (Tempest section)`: https://wiki.opnfv.org/r1_tempest
.. _`ODL bug lists`: https://bugs.opendaylight.org/buglist.cgi?component=General&product=neutron&resolution=---
-Several tests are declared as failed. They can be divided in 3 main categories:
- * Invalid credentials (10 errors)
+Several tests are declared as failed. They can be divided in 2 main categories:
* Multiple possible networks found, use a Network ID to be more specific.
* Network errors
-The "Invalid Credential" error is not an error. Adding "admin_domain_name=default" in the tempest.conf file generated by Rally will lead to successful tests. A `Rally patch`_ has been proposed to Rally community.
-
The Multiple possible netwok error occurs several times and may have different origins. It indicates that the test needs a network context to be run properly. A change in the `automatically generated tempest.conf`_ file could allow to precise the network ID.
The network errors are various and dealing with all the aspects of networking: create/update/delete network/subnet/port/router. Some may be due to (possible) bug in tempest when it tries to delete networks which should not be there for the following tests. Some may be caused by the ODL bugs, several bugs related to tempest are already reported in `ODL bug lists`_.
diff --git a/testcases/Controllers/ODL/CI/start_tests.sh b/testcases/Controllers/ODL/CI/start_tests.sh
index 7bc0b513b..56f4d564d 100755
--- a/testcases/Controllers/ODL/CI/start_tests.sh
+++ b/testcases/Controllers/ODL/CI/start_tests.sh
@@ -56,7 +56,11 @@ else
fi
# Change openstack password for admin tenant in neutron suite
-sed -i "s/\"password\": \"admin\"/\"password\": \"${PASS}\"/" ${BASEDIR}/integration/test/csit/suites/openstack/neutron/__init__.robot
+sed -i "s/\"password\": \".*\"/\"password\": \"${PASS}\"/" ${BASEDIR}/integration/test/csit/suites/openstack/neutron/__init__.robot
+
+# Add Start Suite and Teardown Suite
+sed -i "/^Documentation.*/a Suite Teardown Stop Suite" ${BASEDIR}/integration/test/csit/suites/openstack/neutron/__init__.robot
+sed -i "/^Documentation.*/a Suite Setup Start Suite" ${BASEDIR}/integration/test/csit/suites/openstack/neutron/__init__.robot
if source $BASEDIR/venv/bin/activate; then
echo -e "${green}Python virtualenv activated.${nc}"
@@ -72,7 +76,7 @@ cp -vf $BASEDIR/custom_tests/neutron/* $BASEDIR/integration/test/csit/suites/ope
# List of tests are specified in test_list.txt
# those are relative paths to test directories from integartion suite
echo -e "${green}Executing chosen tests.${nc}"
-test_num=1
+test_num=0
while read line
do
# skip comments
@@ -80,16 +84,23 @@ do
# skip empty lines
[[ -z "${line}" ]] && continue
+ ((test_num++))
echo -e "${light_green}Starting test: $line ${nc}"
pybot -v OPENSTACK:${NEUTRON_IP} -v PORT:${ODL_PORT} -v CONTROLLER:${ODL_IP} ${BASEDIR}/$line
mkdir -p $BASEDIR/logs/${test_num}
mv log.html $BASEDIR/logs/${test_num}/
mv report.html $BASEDIR/logs/${test_num}/
mv output.xml $BASEDIR/logs/${test_num}/
- ((test_num++))
done < ${BASEDIR}/test_list.txt
+# create final report which includes all partial test reports
+for i in $(seq $test_num); do
+ rebot_params="$rebot_params $BASEDIR/logs/$i/output.xml"
+done
+
+echo -e "${green}Final report is located:${nc}"
+rebot $rebot_params
+
+# deactivate venv
echo -e "${green}Deactivate venv.${nc}"
deactivate
-
-# Now we can copy output.xml, log.html and report.xml files generated by robot.
diff --git a/testcases/Controllers/ONOS/Teston/CI/Readme.txt b/testcases/Controllers/ONOS/Teston/CI/Readme.txt
new file mode 100644
index 000000000..7648b2a98
--- /dev/null
+++ b/testcases/Controllers/ONOS/Teston/CI/Readme.txt
@@ -0,0 +1,5 @@
+1.This is a basic test run about onos,we will make them better and better
+2.This test include two suits:
+(1)Test northbound(network/subnet/ports create/update/delete)
+(2)Ovsdb test,default configuration,openflow connection,vm go onlines.
+3.Later we will make a framework to do this test \ No newline at end of file
diff --git a/testcases/Controllers/ONOS/Teston/CI/dependencies/onos b/testcases/Controllers/ONOS/Teston/CI/dependencies/onos
new file mode 100644
index 000000000..d4d59e0f7
--- /dev/null
+++ b/testcases/Controllers/ONOS/Teston/CI/dependencies/onos
@@ -0,0 +1,23 @@
+#!/bin/bash
+# -----------------------------------------------------------------------------
+# ONOS remote command-line client.
+# -----------------------------------------------------------------------------
+
+[ ! -d "$ONOS_ROOT" ] && echo "ONOS_ROOT is not defined" >&2 && exit 1
+. /root/.bashrc
+. $ONOS_ROOT/tools/build/envDefaults
+. $ONOS_ROOT/tools/test/bin/find-node.sh
+
+[ "$1" = "-w" ] && shift && onos-wait-for-start $1
+
+[ -n "$1" ] && OCI=$(find_node $1) && shift
+
+if which client 1>/dev/null 2>&1 && [ -z "$ONOS_USE_SSH" ]; then
+ # Use Karaf client only if we can and are allowed to
+ unset KARAF_HOME
+ client -h $OCI -u karaf "$@" 2>/dev/null
+else
+ # Otherwise use raw ssh; strict checking is off for dev environments only
+ #ssh -p 8101 -o StrictHostKeyChecking=no $OCI "$@"
+ sshpass -p karaf ssh -l karaf -p 8101 $OCI "$@"
+fi
diff --git a/testcases/Controllers/ONOS/Teston/CI/onosfunctest.py b/testcases/Controllers/ONOS/Teston/CI/onosfunctest.py
new file mode 100644
index 000000000..72fa4ae1f
--- /dev/null
+++ b/testcases/Controllers/ONOS/Teston/CI/onosfunctest.py
@@ -0,0 +1,203 @@
+"""
+Description: This test is to run onos Teston VTN scripts
+
+List of test cases:
+CASE1 - Northbound NBI test network/subnet/ports
+CASE2 - Ovsdb test&Default configuration&Vm go online
+
+lanqinglong@huawei.com
+"""
+import os
+import os.path
+import time
+import pexpect
+import re
+import sys
+
+def SSHlogin(ipaddr,username,password):
+ login = pexpect.spawn('ssh %s@%s'%(username,ipaddr))
+ index = 0
+ while index != 2:
+ index = login.expect(['assword:','yes/no','#|$',pexpect.EOF])
+ if index == 0:
+ login.sendline(password)
+ login.interact()
+ if index == 1:
+ login.sendline('yes')
+ print "Login Success!"
+
+def AddKarafUser(ipaddr,username,password):
+ print '\033[1;31;40m'
+ print "Now Adding karaf user to OC1..."
+ print "\033[0m"
+ login = pexpect.spawn("ssh -l %s -p 8101 %s"%(username,ipaddr))
+ index = 0
+ while index != 2:
+ index = login.expect(['assword:','yes/no',pexpect.EOF])
+ if index == 0:
+ login.sendline(password)
+ login.sendline("logout")
+ index = login.expect(["closed",pexpect.EOF])
+ if index == 0:
+ print "Add SSH Known Host Success!"
+ else:
+ print "Add SSH Known Host Failed! Please Check!"
+ login.interact()
+ if index == 1:
+ login.sendline('yes')
+
+def DownLoadCode():
+ print '\033[1;31;40m'
+ print "Now loading test codes!Please wait in patient..."
+ print "\033[0m"
+ os.system("git clone https://github.com/sunyulin/OnosSystemTest.git")
+ time.sleep(1)
+ os.system("git clone https://gerrit.onosproject.org/onos")
+ time.sleep(1)
+ print "Done!"
+
+def CleanEnv():
+ print '\033[1;31;40m'
+ print "Now Cleaning test environment"
+ print "\033[0m"
+ os.system("sudo apt-get install -y mininet")
+ os.system("OnosSystemTest/TestON/bin/cleanup.sh")
+ time.sleep(5)
+ print "Done!"
+
+def OnosPushKeys(cmd,password):
+ print '\033[1;31;40m'
+ print "Now Pushing Onos Keys:"+cmd
+ print "\033[0m"
+ Pushkeys = pexpect.spawn(cmd)
+ Result = 0
+ while Result != 2:
+ Result = Pushkeys.expect(["yes","password",pexpect.EOF,pexpect.TIMEOUT])
+ if (Result == 0):
+ Pushkeys.sendline("yes")
+ if (Result == 1):
+ Pushkeys.sendline(password)
+ if (Result == 3):
+ print("Push keys Error!")
+ print "Done!"
+
+def AddEnvIntoBashrc(name):
+ print '\033[1;31;40m'
+ print "Now Adding bash environment"
+ print "\033[0m"
+ fileopen = open("/etc/profile",'r')
+ findContext = 1
+ while findContext:
+ findContext = fileopen.readline()
+ result = findContext.find('dev/bash_profile')
+ if result != -1:
+ break
+ fileopen.close
+ if result == -1:
+ envAdd = open("/etc/profile",'a+')
+ envAdd.writelines("\nsource /root/onos/tools/dev/bash_profile")
+ envAdd.close()
+
+def SetEnvVar(masterpass,agentpass):
+ print '\033[1;31;40m'
+ print "Now Setting test environment"
+ print "\033[0m"
+ os.environ["OCT"] = "10.1.0.1"
+ os.environ["OC1"] = "10.1.0.50"
+ os.environ["OC2"] = "10.1.0.51"
+ os.environ["OC3"] = "10.1.0.52"
+ os.environ["OCN"] = "10.1.0.53"
+ os.environ["OCN2"] = "10.1.0.54"
+ os.environ["localhost"] = "10.1.0.1"
+ os.system("sudo pip install configobj")
+ os.system("sudo apt-get install -y sshpass")
+ OnosPushKeys("onos-push-keys 10.1.0.1",masterpass)
+ OnosPushKeys("onos-push-keys 10.1.0.50",agentpass)
+ OnosPushKeys("onos-push-keys 10.1.0.53",agentpass)
+ OnosPushKeys("onos-push-keys 10.1.0.54",agentpass)
+
+def Gensshkey():
+ print '\033[1;31;40m'
+ print "Now Generating SSH keys..."
+ print "\033[0m"
+ os.system("rm -rf ~/.ssh/*")
+ keysub = pexpect.spawn("ssh-keygen -t rsa")
+ Result = 0
+ while Result != 2:
+ Result = keysub.expect(["Overwrite","Enter",pexpect.EOF,pexpect.TIMEOUT])
+ if Result == 0:
+ keysub.sendline("y")
+ if Result == 1:
+ keysub.sendline("\n")
+ if Result == 3:
+ printf("Generate SSH key failed.")
+ print "Done!"
+
+def ChangeOnosName(user,password):
+ print '\033[1;31;40m'
+ print "Now Changing ONOS name&password"
+ print "\033[0m"
+ line = open("onos/tools/build/envDefaults",'r').readlines()
+ lenall = len(line)-1
+ for i in range(lenall):
+ if "ONOS_USER=" in line[i]:
+ line[i]=line[i].replace("sdn",user)
+ if "ONOS_GROUP" in line[i]:
+ line[i]=line[i].replace("sdn",user)
+ if "ONOS_PWD" in line[i]:
+ line[i]=line[i].replace("rocks",password)
+ NewFile = open("onos/tools/build/envDefaults",'w')
+ NewFile.writelines(line)
+ NewFile.close
+ print "Done!"
+
+def ChangeTestCasePara(testcase,user,password):
+ print '\033[1;31;40m'
+ print "Now Changing " + testcase + " name&password"
+ print "\033[0m"
+ filepath = "OnosSystemTest/TestON/tests/" + testcase + "/" + testcase + ".topo"
+ line = open(filepath,'r').readlines()
+ lenall = len(line)-1
+ for i in range(lenall-2):
+ if ("localhost" in line[i]) or ("OCT" in line[i]):
+ line[i+1]=re.sub(">\w+",">"+user,line[i+1])
+ line[i+2]=re.sub(">\w+",">"+password,line[i+2])
+ if "OC1" in line [i] \
+ or "OC2" in line [i] \
+ or "OC3" in line [i] \
+ or "OCN" in line [i] \
+ or "OCN2" in line[i]:
+ line[i+1]=re.sub(">\w+",">root",line[i+1])
+ line[i+2]=re.sub(">\w+",">root",line[i+2])
+ NewFile = open(filepath,'w')
+ NewFile.writelines(line)
+ NewFile.close
+ print "Done!"
+
+def RunScript(testname,masterusername,masterpassword):
+ ChangeTestCasePara(testname,masterusername,masterpassword)
+ runtest = "OnosSystemTest/TestON/bin/cli.py run " + testname
+ os.system(runtest)
+ print "Done!"
+
+if __name__=="__main__":
+
+ #This is the compass run machine user&pass,you need to modify
+ masterusername = "root"
+ masterpassword = "root"
+
+ #The config below you don't need to care
+ agentusername = "root"
+ agentpassword = "root"
+
+ print "Test Begin....."
+ Gensshkey()
+ AddKarafUser("10.1.0.50","karaf","karaf")
+ AddEnvIntoBashrc("source onos/tools/dev/bash_profile")
+ SSHlogin("10.1.0.1",masterusername,masterpassword)
+ ChangeOnosName(agentusername,agentpassword)
+ DownLoadCode()
+ CleanEnv()
+ SetEnvVar(masterpassword,agentpassword)
+ RunScript("FUNCvirNetNB",masterusername,masterpassword)
+ RunScript("FUNCovsdbtest",masterusername,masterpassword)
diff --git a/testcases/Dashboard/dashboard_utils.py b/testcases/Dashboard/dashboard_utils.py
new file mode 100644
index 000000000..90562855a
--- /dev/null
+++ b/testcases/Dashboard/dashboard_utils.py
@@ -0,0 +1,251 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 Orange
+# morgan.richomme@orange.com
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# This script is used to get data from test DB
+# and format them into a json format adapted for a dashboard
+#
+# v0.1: basic example
+#
+import json
+import requests
+from vPing2Dashboard import format_vPing_for_dashboard
+
+
+class TestCriteria:
+
+ """ describes the test criteria platform """
+ def __init__(self):
+ self.project = ''
+ self.testcase = ''
+ self.pod_id = -1
+ self.duration = 'all'
+ self.version = 'all'
+ self.installer = 'all'
+
+ def setCriteria(self, project, testcase, pod_id,
+ duration, version, installer):
+ self.project = project
+ self.testcase = testcase
+ self.pod_id = pod_id
+ self.duration = duration
+ self.version = version
+ self.installer = installer
+
+ def format_criteria(self, name):
+ if(name == 'all' or name == 0):
+ return ""
+ else:
+ if(type(name) == int):
+ return "-" + str(name)
+ else:
+ return "-" + name
+
+ def format(self):
+ pod_name = self.format_criteria(self.pod_id)
+ version_name = self.format_criteria(self.version)
+ installer_name = self.format_criteria(self.installer)
+ duration_name = self.format_criteria(self.duration)
+ try:
+ fileName = "result-" + self.project + "-" + self.testcase + \
+ pod_name + version_name + installer_name + \
+ duration_name + ".json"
+ except:
+ print "Impossible to format json file name"
+ return fileName
+
+
+def get_pods(db_url):
+ # retrieve the list of pods
+ url = db_url + "/pods"
+ # Build headers
+ headers = {'Content-Type': 'application/json'}
+
+ try:
+ db_data = requests.get(url, headers=headers)
+ # Get result as a json object
+ pods_data = json.loads(db_data.text)
+ # Get results
+ pods = pods_data['pods']
+ pods_table = []
+ for pod in pods:
+ # cast int becase otherwise API retrieve 1.0
+ # TODO check format with API
+ pods_table.append(int(pod['_id']))
+
+ pods_table.append(0) # 0 means all the pods here
+ return pods_table
+ except:
+ print "Error retrieving the list of PODs"
+ return None
+
+
+def get_versions(db_url):
+ # retrieve the list of versions
+ # TODO not supported in API yet
+ url = db_url + "/versions"
+ # Build headers
+ headers = {'Content-Type': 'application/json'}
+
+ try:
+ db_data = requests.get(url, headers=headers)
+ # Get result as a json object
+ versions_data = json.loads(db_data.text)
+ # Get results
+ versions = versions_data['versions']
+
+ versions_table = []
+ for version in versions:
+ versions_table.append(version['version'])
+
+ versions_table.append('all')
+
+ return versions_table
+ except:
+ print "Error retrieving the list of OPNFV versions"
+ return None
+
+
+def get_installers(db_url):
+ # retrieve the list of installers
+ # TODO not supported in API yet
+ url = db_url + "/installers"
+ # Build headers
+ headers = {'Content-Type': 'application/json'}
+
+ try:
+ db_data = requests.get(url, headers=headers)
+ # Get result as a json object
+ installers_data = json.loads(db_data.text)
+ # Get results
+ installers = installers_data['installers']
+
+ installers_table = []
+ for installer in installers:
+ installers_table.append(installer['installer'])
+
+ installers_table.append('all')
+
+ return installers
+ except:
+ print "Error retrieving the list of OPNFV installers"
+ return None
+
+
+def get_testcases(db_url, project):
+ # retrieve the list of pods
+ url = db_url + "/test_projects/" + project + "/cases"
+ # Build headers
+ headers = {'Content-Type': 'application/json'}
+
+ try:
+ db_data = requests.get(url, headers=headers)
+ # Get result as a json object
+ testcases_data = json.loads(db_data.text)
+ # Get results
+ testcases = testcases_data['test_cases']
+ testcases_table = []
+ for testcase in testcases:
+ testcases_table.append(testcase['name'])
+
+ testcases_table.append('all')
+
+ return testcases_table
+ except:
+ print "Error retrieving the list of testcases"
+ return None
+
+
+def get_results(db_url, test_criteria):
+
+ # use param to filter request to result DB
+ # if not precised => no filter
+ # filter criteria:
+ # - POD
+ # - versions
+ # - installers
+ # - testcase
+ # - test projects
+ # - timeframe (last 30 days, 365 days, since beginning of the project)
+ # e.g.
+ # - vPing tests since 2 months
+ # - Tempest tests on LF POD2 fuel based / Arno stable since the beginning
+ # - yardstick tests on any POD since 30 days
+ # - Qtip tests on dell-test1 POD
+ #
+ # params = {"pod_id":pod_id, "testcase":testcase}
+ # filter_date = days # data from now - days
+
+ # test_project = test_criteria.project
+ testcase = test_criteria.testcase
+ # duration_frame = test_criteria.duration
+ # version = test_criteria.version
+ # installer_type = test_criteria.installer
+ pod_id = test_criteria.pod_id
+
+ pod_criteria = ""
+ if (pod_id > 0):
+ pod_criteria = "&pod=" + str(pod_id)
+
+ # TODO complete params (installer type, testcase, version )
+ # need API to be up to date
+ # we assume that criteria could be used at the API level
+ # no need to processing on date for instance
+ params = {"pod_id": pod_id}
+
+ # Build headers
+ headers = {'Content-Type': 'application/json'}
+
+ url = db_url + "/results?case=" + testcase + pod_criteria
+
+ # Send Request to Test DB
+ myData = requests.get(url, data=json.dumps(params), headers=headers)
+ # Get result as a json object
+ myNewData = json.loads(myData.text)
+
+ # Get results
+ myDataResults = myNewData['test_results']
+
+ return myDataResults
+
+
+def generateJson(test_name, test_case, db_url):
+ # pod_id = 1
+ # test_version = 'Arno master'
+ # test_installer = 'fuel'
+ # test_retention = 30
+
+ pods = get_pods(db_url)
+ versions = ['ArnoR1', 'ArnoSR1', 'all'] # not available in the API yet
+ installers = ['fuel', 'foreman', 'all'] # not available in the API yet
+ test_durations = [90, 365, 'all'] # not available through the API yet
+
+ # For all the PoDs
+ for pod in pods:
+ # all the versions
+ for version in versions:
+ # all the installers
+ for installer in installers:
+ # all the retention time
+ for test_duration in test_durations:
+
+ criteria = TestCriteria()
+ criteria.setCriteria(test_name, test_case, pod,
+ test_duration, version, installer)
+ format_data_for_dashboard(criteria)
+
+
+def format_data_for_dashboard(criteria):
+
+ # Depending on the use case, json for dashboarding is customized
+ # depending on the graph you want to show
+
+ if (criteria.testcase == "vPing"):
+ format_vPing_for_dashboard(criteria)
diff --git a/testcases/Dashboard/functest2Dashboard.py b/testcases/Dashboard/functest2Dashboard.py
new file mode 100644
index 000000000..c03ddbd14
--- /dev/null
+++ b/testcases/Dashboard/functest2Dashboard.py
@@ -0,0 +1,81 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 Orange
+# morgan.richomme@orange.com
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# This script is used to get data from test DB
+# and format them into a json format adapted for a dashboard
+#
+# v0.1: basic example
+#
+import logging
+import argparse
+import pprint
+import dashboard_utils
+import os
+import yaml
+
+pp = pprint.PrettyPrinter(indent=4)
+
+parser = argparse.ArgumentParser()
+parser.add_argument("repo_path", help="Path to the repository")
+parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
+args = parser.parse_args()
+
+""" logging configuration """
+logger = logging.getLogger('config_functest')
+logger.setLevel(logging.DEBUG)
+
+ch = logging.StreamHandler()
+if args.debug:
+ ch.setLevel(logging.DEBUG)
+else:
+ ch.setLevel(logging.INFO)
+
+formatter = logging.Formatter('%(asctime)s - %(name)s -\
+ %(levelname)s - %(message)s')
+ch.setFormatter(formatter)
+logger.addHandler(ch)
+
+if not os.path.exists(args.repo_path):
+ logger.error("Repo directory not found '%s'" % args.repo_path)
+ exit(-1)
+
+with open(args.repo_path+"testcases/config_functest.yaml") as f:
+ functest_yaml = yaml.safe_load(f)
+f.close()
+
+""" global variables """
+# Directories
+HOME = os.environ['HOME']+"/"
+REPO_PATH = args.repo_path
+TEST_DB = functest_yaml.get("results").get("test_db_url")
+
+
+def main():
+ try:
+ logger.info("Functest test result generation for dashboard")
+
+ # TODO create the loop to provide all the json files
+ logger.debug("Retrieve all the testcases from DB")
+ test_cases = dashboard_utils.get_testcases(TEST_DB, "functest")
+
+ # TODO to be refactor once graph for Tempest, rally and ODL ready
+ # Do it only for vPing in first stage
+ for case in test_cases:
+ logger.debug("Generate " + case + " json files")
+ dashboard_utils.generateJson('functest', case, TEST_DB)
+
+ logger.info("Functest json files for dashboard successfully generated")
+ except:
+ logger.error("Impossible to generate json files for dashboard")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/testcases/Dashboard/odl2Dashboard.py b/testcases/Dashboard/odl2Dashboard.py
new file mode 100644
index 000000000..12247663e
--- /dev/null
+++ b/testcases/Dashboard/odl2Dashboard.py
@@ -0,0 +1,52 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 Orange
+# morgan.richomme@orange.com
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# This script is used to build json files for the dashboard
+# for the ODL test case
+#
+# v0.1: basic example
+#
+import logging
+import argparse
+import pprint
+# import json
+# import dashboard_utils
+import os
+import yaml
+
+pp = pprint.PrettyPrinter(indent=4)
+
+parser = argparse.ArgumentParser()
+parser.add_argument("repo_path", help="Path to the repository")
+parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
+args = parser.parse_args()
+
+""" logging configuration """
+logger = logging.getLogger('config_functest')
+logger.setLevel(logging.DEBUG)
+
+if not os.path.exists(args.repo_path):
+ logger.error("Repo directory not found '%s'" % args.repo_path)
+ exit(-1)
+
+with open(args.repo_path+"testcases/config_functest.yaml") as f:
+ functest_yaml = yaml.safe_load(f)
+f.close()
+
+""" global variables """
+# Directories
+HOME = os.environ['HOME']+"/"
+REPO_PATH = args.repo_path
+TEST_DB = functest_yaml.get("results").get("test_db_url")
+
+
+def format_tempest_for_dashboard(criteria):
+ logger.debug("generate dashboard json files for ODL suite")
diff --git a/testcases/Dashboard/rally2Dashboard.py b/testcases/Dashboard/rally2Dashboard.py
new file mode 100644
index 000000000..20e597468
--- /dev/null
+++ b/testcases/Dashboard/rally2Dashboard.py
@@ -0,0 +1,52 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 Orange
+# morgan.richomme@orange.com
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# This script is used to build json files for the dashboard
+# for the rally test case
+#
+# v0.1: basic example
+#
+import logging
+import argparse
+import pprint
+# import json
+# import dashboard_utils
+import os
+import yaml
+
+pp = pprint.PrettyPrinter(indent=4)
+
+parser = argparse.ArgumentParser()
+parser.add_argument("repo_path", help="Path to the repository")
+parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
+args = parser.parse_args()
+
+""" logging configuration """
+logger = logging.getLogger('config_functest')
+logger.setLevel(logging.DEBUG)
+
+if not os.path.exists(args.repo_path):
+ logger.error("Repo directory not found '%s'" % args.repo_path)
+ exit(-1)
+
+with open(args.repo_path+"testcases/config_functest.yaml") as f:
+ functest_yaml = yaml.safe_load(f)
+f.close()
+
+""" global variables """
+# Directories
+HOME = os.environ['HOME']+"/"
+REPO_PATH = args.repo_path
+TEST_DB = functest_yaml.get("results").get("test_db_url")
+
+
+def format_tempest_for_dashboard(criteria):
+ logger.debug("generate dashboard json files for rally")
diff --git a/testcases/Dashboard/tempest2Dashboard.py b/testcases/Dashboard/tempest2Dashboard.py
new file mode 100644
index 000000000..8cbecbbc8
--- /dev/null
+++ b/testcases/Dashboard/tempest2Dashboard.py
@@ -0,0 +1,52 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 Orange
+# morgan.richomme@orange.com
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# This script is used to build json files for the dashboard
+# for the tempest test case
+#
+# v0.1: basic example
+#
+import logging
+import argparse
+import pprint
+# import json
+# import dashboard_utils
+import os
+import yaml
+
+pp = pprint.PrettyPrinter(indent=4)
+
+parser = argparse.ArgumentParser()
+parser.add_argument("repo_path", help="Path to the repository")
+parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
+args = parser.parse_args()
+
+""" logging configuration """
+logger = logging.getLogger('config_functest')
+logger.setLevel(logging.DEBUG)
+
+if not os.path.exists(args.repo_path):
+ logger.error("Repo directory not found '%s'" % args.repo_path)
+ exit(-1)
+
+with open(args.repo_path+"testcases/config_functest.yaml") as f:
+ functest_yaml = yaml.safe_load(f)
+f.close()
+
+""" global variables """
+# Directories
+HOME = os.environ['HOME']+"/"
+REPO_PATH = args.repo_path
+TEST_DB = functest_yaml.get("results").get("test_db_url")
+
+
+def format_tempest_for_dashboard(criteria):
+ logger.debug("generate dashboard json files for Tempest")
diff --git a/testcases/Dashboard/vPing2Dashboard.py b/testcases/Dashboard/vPing2Dashboard.py
new file mode 100644
index 000000000..f799e280f
--- /dev/null
+++ b/testcases/Dashboard/vPing2Dashboard.py
@@ -0,0 +1,94 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 Orange
+# morgan.richomme@orange.com
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# This script is used to build json files for the dashboard
+# for the vPing test case
+#
+# v0.1: basic example
+#
+import logging
+import argparse
+import pprint
+import json
+import dashboard_utils
+import os
+import yaml
+
+pp = pprint.PrettyPrinter(indent=4)
+
+parser = argparse.ArgumentParser()
+parser.add_argument("repo_path", help="Path to the repository")
+parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
+args = parser.parse_args()
+
+""" logging configuration """
+logger = logging.getLogger('config_functest')
+logger.setLevel(logging.DEBUG)
+
+if not os.path.exists(args.repo_path):
+ logger.error("Repo directory not found '%s'" % args.repo_path)
+ exit(-1)
+
+with open(args.repo_path+"testcases/config_functest.yaml") as f:
+ functest_yaml = yaml.safe_load(f)
+f.close()
+
+""" global variables """
+# Directories
+HOME = os.environ['HOME']+"/"
+REPO_PATH = args.repo_path
+TEST_DB = functest_yaml.get("results").get("test_db_url")
+
+
+def format_vPing_for_dashboard(criteria):
+
+ # Get results
+ myDataResults = dashboard_utils.get_results(TEST_DB, criteria)
+
+ # Depending on the use case, json for dashboarding is customized
+ # depending on the graph you want to show
+
+ test_data = [{'description': 'vPing results for Dashboard'}]
+
+ # Graph 1: Duration = f(time)
+ # ***************************
+ new_element = []
+ for data in myDataResults:
+ new_element.append({'x': data['creation_date'],
+ 'y': data['details']['duration']})
+
+ test_data.append({'name': "vPing duration",
+ 'info': {'type': "graph",
+ 'xlabel': 'time',
+ 'ylabel': 'duration (s)'},
+ 'data_set': new_element})
+
+ # Graph 2: bar
+ # ************
+ nbTest = 0
+ nbTestOk = 0
+
+ for data in myDataResults:
+ nbTest += 1
+ if data['details']['status'] == "OK":
+ nbTestOk += 1
+
+ test_data.append({'name': "vPing status",
+ 'info': {"type": "bar"},
+ 'data_set': [{'Nb tests': nbTest,
+ 'Nb Success': nbTestOk}]})
+
+ # Generate json file
+ fileName = criteria.format()
+ logger.debug("Generate json file:" + fileName)
+
+ with open(fileName, "w") as outfile:
+ json.dump(test_data, outfile, indent=4)
diff --git a/testcases/VIM/OpenStack/CI/libraries/run_rally.py b/testcases/VIM/OpenStack/CI/libraries/run_rally.py
index 341281a2c..61bbaaeb7 100644
--- a/testcases/VIM/OpenStack/CI/libraries/run_rally.py
+++ b/testcases/VIM/OpenStack/CI/libraries/run_rally.py
@@ -12,11 +12,6 @@ import re, json, os, urllib2, argparse, logging, yaml
-""" get the date """
-cmd = os.popen("date '+%d%m%Y_%H%M'")
-test_date = cmd.read().rstrip()
-
-
""" tests configuration """
tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone', 'neutron', 'nova', 'quotas', 'requests', 'vm', 'tempest', 'all', 'smoke']
parser = argparse.ArgumentParser()
@@ -57,7 +52,7 @@ f.close()
HOME = os.environ['HOME']+"/"
REPO_PATH = args.repo_path
SCENARIOS_DIR = REPO_PATH + functest_yaml.get("general").get("directories").get("dir_rally_scn")
-RESULTS_DIR = HOME + functest_yaml.get("general").get("directories").get("dir_rally_res") + test_date + "/"
+RESULTS_DIR = HOME + functest_yaml.get("general").get("directories").get("dir_rally_res") + "/rally/"
diff --git a/testcases/config_functest.py b/testcases/config_functest.py
index e618d2dd4..7fbd06042 100644
--- a/testcases/config_functest.py
+++ b/testcases/config_functest.py
@@ -8,9 +8,11 @@
# http://www.apache.org/licenses/LICENSE-2.0
#
-import re, json, os, urllib2, argparse, logging, shutil, subprocess, yaml, sys
+import re, json, os, urllib2, argparse, logging, shutil, subprocess, yaml, sys, getpass
import functest_utils
from git import Repo
+from os import stat
+from pwd import getpwuid
actions = ['start', 'check', 'clean']
parser = argparse.ArgumentParser()
@@ -53,6 +55,7 @@ REPO_PATH = args.repo_path
RALLY_DIR = REPO_PATH + functest_yaml.get("general").get("directories").get("dir_rally")
RALLY_REPO_DIR = HOME + functest_yaml.get("general").get("directories").get("dir_rally_repo")
RALLY_INSTALLATION_DIR = HOME + functest_yaml.get("general").get("directories").get("dir_rally_inst")
+RALLY_RESULT_DIR = HOME + functest_yaml.get("general").get("directories").get("dir_rally_res")
VPING_DIR = REPO_PATH + functest_yaml.get("general").get("directories").get("dir_vping")
ODL_DIR = REPO_PATH + functest_yaml.get("general").get("directories").get("dir_odl")
@@ -70,6 +73,10 @@ def action_start():
"""
Start the functest environment installation
"""
+ if not check_permissions():
+ logger.error("Bad Python cache directory ownership.")
+ exit(-1)
+
if not functest_utils.check_internet_connectivity():
logger.error("There is no Internet connectivity. Please check the network configuration.")
exit(-1)
@@ -83,11 +90,10 @@ def action_start():
logger.debug("Cleaning possible functest environment leftovers.")
action_clean()
- logger.info("Starting installation of functest environment")
- logger.info("Installing Rally...")
- if not install_rally():
- logger.error("There has been a problem while installing Rally.")
- action_clean()
+ logger.info("Installing needed libraries on the host")
+ cmd = "sudo yum -y install gcc libffi-devel python-devel openssl-devel gmp-devel libxml2-devel libxslt-devel postgresql-devel git wget"
+ if not functest_utils.execute_command(cmd, logger):
+ logger.error("There has been a problem while installing software packages.")
exit(-1)
logger.info("Installing ODL environment...")
@@ -96,6 +102,16 @@ def action_start():
action_clean()
exit(-1)
+ logger.info("Starting installation of functest environment")
+ logger.info("Installing Rally...")
+ if not install_rally():
+ logger.error("There has been a problem while installing Rally.")
+ action_clean()
+ exit(-1)
+
+ # Create result folder under functest if necessary
+ if not os.path.exists(RALLY_RESULT_DIR):
+ os.makedirs(RALLY_RESULT_DIR)
logger.info("Downloading image...")
if not functest_utils.download_url(IMAGE_URL, IMAGE_DIR):
@@ -206,10 +222,27 @@ def action_clean():
cmd = "glance image-delete " + image_id
functest_utils.execute_command(cmd,logger)
+ if os.path.exists(RALLY_RESULT_DIR):
+ logger.debug("Removing Result directory")
+ shutil.rmtree(RALLY_RESULT_DIR,ignore_errors=True)
+
+
logger.info("Functest environment clean!")
+def check_permissions():
+ current_user = getpass.getuser()
+ cache_dir = HOME+".cache/pip"
+ logger.info("Checking permissions of '%s'..." %cache_dir)
+ logger.debug("Current user is '%s'" %current_user)
+ cache_user = getpwuid(stat(cache_dir).st_uid).pw_name
+ logger.debug("Cache directory owner is '%s'" %cache_user)
+ if cache_user != current_user:
+ logger.info("The owner of '%s' is '%s'. Please run 'sudo chown -R %s %s'." %(cache_dir, cache_user, current_user, cache_dir))
+ return False
+
+ return True
def install_rally():
@@ -221,8 +254,9 @@ def install_rally():
Repo.clone_from(url, RALLY_REPO_DIR)
logger.debug("Executing %s./install_rally.sh..." %RALLY_REPO_DIR)
- install_script = RALLY_REPO_DIR + "install_rally.sh"
- functest_utils.execute_command(install_script,logger)
+ install_script = RALLY_REPO_DIR + "install_rally.sh --yes"
+ cmd = 'sudo ' + install_script
+ functest_utils.execute_command(cmd,logger)
logger.debug("Creating Rally environment...")
cmd = "rally deployment create --fromenv --name=opnfv-arno-rally"
diff --git a/testcases/config_functest.yaml b/testcases/config_functest.yaml
index 40eb024ad..c38b46066 100644
--- a/testcases/config_functest.yaml
+++ b/testcases/config_functest.yaml
@@ -37,3 +37,6 @@ vping:
vm_name_2: opnfv-vping-2
ip_1: 192.168.120.30
ip_2: 192.168.120.40
+
+results:
+ test_db_url: http://213.77.62.197
diff --git a/testcases/functest_utils.py b/testcases/functest_utils.py
index 26c1f478f..6af55f7a7 100644
--- a/testcases/functest_utils.py
+++ b/testcases/functest_utils.py
@@ -31,10 +31,6 @@ def check_credentials():
os.environ['OS_TENANT_NAME']
except KeyError:
return False
- try:
- os.environ['OS_REGION_NAME']
- except KeyError:
- return False
return True
@@ -194,16 +190,103 @@ def check_neutron_net(neutron_client, net_name):
return True
return False
+def get_image_id(glance_client, image_name):
+ images = glance_client.images.list()
+ id = ''
+ for i in images:
+ if i.name == image_name:
+ id = i.id
+ break
+ return id
+
+def create_glance_image(glance_client, image_name, file_path):
+ try:
+ with open(file_path) as fimage:
+ image = glance_client.images.create(name=image_name, is_public=True, disk_format="qcow2",
+ container_format="bare", data=fimage)
+ return image.id
+ except:
+ return False
+
+def get_flavor_id(nova_client, flavor_name):
+ flavors = nova_client.flavors.list(detailed=True)
+ id = ''
+ for f in flavors:
+ if f.name == flavor_name:
+ id = f.id
+ break
+ return id
+
+def get_flavor_id_by_ram_range(nova_client, min_ram, max_ram):
+ flavors = nova_client.flavors.list(detailed=True)
+ id = ''
+ for f in flavors:
+ if min_ram <= f.ram and f.ram <= max_ram:
+ id = f.id
+ break
+ return id
+
+
+def get_tenant_id(keystone_client, tenant_name):
+ tenants = keystone_client.tenants.list()
+ id = ''
+ for t in tenants:
+ if t.name == tenant_name:
+ id = t.id
+ break
+ return id
+
+def get_role_id(keystone_client, role_name):
+ roles = keystone_client.roles.list()
+ id = ''
+ for r in roles:
+ if r.name == role_name:
+ id = r.id
+ break
+ return id
+
+def get_user_id(keystone_client, user_name):
+ users = keystone_client.users.list()
+ id = ''
+ for u in users:
+ if u.name == user_name:
+ id = u.id
+ break
+ return id
+
+def create_tenant(keystone_client, tenant_name, tenant_description):
+ try:
+ tenant = keystone_client.tenants.create(tenant_name, tenant_description, enabled=True)
+ return tenant.id
+ except:
+ print "Error:", sys.exc_info()[0]
+ return False
+
+def delete_tenant(keystone_client, tenant_id):
+ try:
+ tenant = keystone_client.tenants.delete(tenant_id)
+ return True
+ except:
+ print "Error:", sys.exc_info()[0]
+ return False
+
+def add_role_user(keystone_client, user_id, role_id, tenant_id):
+ try:
+ keystone_client.roles.add_user_role(user_id, role_id, tenant_id)
+ return True
+ except:
+ print "Error:", sys.exc_info()[0]
+ return False
-def check_internet_connectivity(url='http://www.google.com/'):
+def check_internet_connectivity(url='http://www.opnfv.org/'):
"""
Check if there is access to the internet
"""
try:
urllib2.urlopen(url, timeout=5)
return True
- except urllib.request.URLError:
+ except urllib.URLError:
return False
diff --git a/testcases/vIMS/vIMS.md b/testcases/vIMS/vIMS.md
new file mode 100644
index 000000000..68f86d9fa
--- /dev/null
+++ b/testcases/vIMS/vIMS.md
@@ -0,0 +1,3 @@
+# vIMS README
+
+
diff --git a/testcases/vPing/CI/libraries/vPing.py b/testcases/vPing/CI/libraries/vPing.py
index 1cc73922c..5d68f2229 100644
--- a/testcases/vPing/CI/libraries/vPing.py
+++ b/testcases/vPing/CI/libraries/vPing.py
@@ -1,49 +1,69 @@
#!/usr/bin/python
#
-# Copyright (c) 2015 All rights reserved. This program and the accompanying materials
+# Copyright (c) 2015 All rights reserved
+# This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# This script boots the VM1 and allocates IP address from Nova
+# 0.1: This script boots the VM1 and allocates IP address from Nova
# Later, the VM2 boots then execute cloud-init to ping VM1.
# After successful ping, both the VMs are deleted.
+# 0.2: measure test duration and publish results under json format
#
-# Note: this is script works only with Ubuntu image, not with Cirros image
#
-import os, time, subprocess, logging, argparse, yaml, pprint, sys
+import os
+import time
+import argparse
+import pprint
+import sys
+import json
+import logging
+import yaml
+import datetime
+import requests
import novaclient.v2.client as novaclient
from neutronclient.v2_0 import client as neutronclient
pp = pprint.PrettyPrinter(indent=4)
-
parser = argparse.ArgumentParser()
+
parser.add_argument("repo_path", help="Path to the repository")
-parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
+parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
+parser.add_argument("-r", "--report",
+ help="Create json result file",
+ action="store_true")
+
args = parser.parse_args()
sys.path.append(args.repo_path + "testcases/")
+
import functest_utils
""" logging configuration """
+
logger = logging.getLogger('vPing')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
+
if args.debug:
ch.setLevel(logging.DEBUG)
else:
ch.setLevel(logging.INFO)
-formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+
+formatter = logging.Formatter('%(asctime)s - %(name)s'
+ '- %(levelname)s - %(message)s')
+
ch.setFormatter(formatter)
logger.addHandler(ch)
+HOME = os.environ['HOME'] + "/"
-HOME = os.environ['HOME']+"/"
-with open(args.repo_path+"testcases/config_functest.yaml") as f:
+with open(args.repo_path + "testcases/config_functest.yaml") as f:
functest_yaml = yaml.safe_load(f)
f.close()
@@ -51,151 +71,203 @@ f.close()
VM_BOOT_TIMEOUT = 180
VM_DELETE_TIMEOUT = 100
PING_TIMEOUT = functest_yaml.get("vping").get("ping_timeout")
+TEST_DB = functest_yaml.get("results").get("test_db_url")
NAME_VM_1 = functest_yaml.get("vping").get("vm_name_1")
NAME_VM_2 = functest_yaml.get("vping").get("vm_name_2")
IP_1 = functest_yaml.get("vping").get("ip_1")
IP_2 = functest_yaml.get("vping").get("ip_2")
-GLANCE_IMAGE_NAME = functest_yaml.get("general").get("openstack").get("image_name")
+GLANCE_IMAGE_NAME = functest_yaml.get("general"). \
+ get("openstack").get("image_name")
FLAVOR = functest_yaml.get("vping").get("vm_flavor")
# NEUTRON Private Network parameters
-NEUTRON_PRIVATE_NET_NAME = functest_yaml.get("general").get("openstack").get("neutron_private_net_name")
-NEUTRON_PRIVATE_SUBNET_NAME = functest_yaml.get("general").get("openstack").get("neutron_private_subnet_name")
-NEUTRON_PRIVATE_SUBNET_CIDR = functest_yaml.get("general").get("openstack").get("neutron_private_subnet_cidr")
-NEUTRON_ROUTER_NAME = functest_yaml.get("general").get("openstack").get("neutron_router_name")
+
+NEUTRON_PRIVATE_NET_NAME = functest_yaml.get("general"). \
+ get("openstack").get("neutron_private_net_name")
+
+NEUTRON_PRIVATE_SUBNET_NAME = functest_yaml.get("general"). \
+ get("openstack").get("neutron_private_subnet_name")
+
+NEUTRON_PRIVATE_SUBNET_CIDR = functest_yaml.get("general"). \
+ get("openstack").get("neutron_private_subnet_cidr")
+
+NEUTRON_ROUTER_NAME = functest_yaml.get("general"). \
+ get("openstack").get("neutron_router_name")
def pMsg(value):
+
"""pretty printing"""
pp.pprint(value)
-def waitVmActive(nova,vm):
+def waitVmActive(nova, vm):
+
# sleep and wait for VM status change
sleep_time = 3
count = VM_BOOT_TIMEOUT / sleep_time
while True:
- status = functest_utils.get_instance_status(nova,vm)
+ status = functest_utils.get_instance_status(nova, vm)
logger.debug("Status: %s" % status)
if status == "ACTIVE":
return True
if status == "ERROR" or count == 0:
return False
- count-=1
+ count -= 1
time.sleep(sleep_time)
return False
-def waitVmDeleted(nova,vm):
+
+def waitVmDeleted(nova, vm):
+
# sleep and wait for VM status change
sleep_time = 3
count = VM_DELETE_TIMEOUT / sleep_time
while True:
- status = functest_utils.get_instance_status(nova,vm)
+ status = functest_utils.get_instance_status(nova, vm)
if not status:
return True
elif count == 0:
logger.debug("Timeout")
return False
else:
- #return False
- count-=1
+ # return False
+ count -= 1
time.sleep(sleep_time)
return False
def create_private_neutron_net(neutron):
+
neutron.format = 'json'
logger.info('Creating neutron network %s...' % NEUTRON_PRIVATE_NET_NAME)
- network_id = functest_utils.create_neutron_net(neutron, NEUTRON_PRIVATE_NET_NAME)
+ network_id = functest_utils. \
+ create_neutron_net(neutron, NEUTRON_PRIVATE_NET_NAME)
+
if not network_id:
return False
logger.debug("Network '%s' created successfully" % network_id)
-
logger.debug('Creating Subnet....')
- subnet_id = functest_utils.create_neutron_subnet(neutron, NEUTRON_PRIVATE_SUBNET_NAME, NEUTRON_PRIVATE_SUBNET_CIDR, network_id)
+ subnet_id = functest_utils. \
+ create_neutron_subnet(neutron,
+ NEUTRON_PRIVATE_SUBNET_NAME,
+ NEUTRON_PRIVATE_SUBNET_CIDR,
+ network_id)
if not subnet_id:
return False
logger.debug("Subnet '%s' created successfully" % subnet_id)
-
logger.debug('Creating Router...')
- router_id = functest_utils.create_neutron_router(neutron, NEUTRON_ROUTER_NAME)
+ router_id = functest_utils. \
+ create_neutron_router(neutron, NEUTRON_ROUTER_NAME)
+
if not router_id:
return False
- logger.debug("Router '%s' created successfully" % router_id)
+ logger.debug("Router '%s' created successfully" % router_id)
logger.debug('Adding router to subnet...')
+
result = functest_utils.add_interface_router(neutron, router_id, subnet_id)
+
if not result:
return False
- logger.debug("Interface added successfully.")
- network_dic = {'net_id' : network_id,
- 'subnet_id' : subnet_id,
- 'router_id' : router_id}
+ logger.debug("Interface added successfully.")
+ network_dic = {'net_id': network_id,
+ 'subnet_id': subnet_id,
+ 'router_id': router_id}
return network_dic
-def cleanup(nova,neutron,network_dic):
+def cleanup(nova, neutron, network_dic):
+
# delete both VMs
logger.info("Cleaning up...")
vm1 = functest_utils.get_instance_by_name(nova, NAME_VM_1)
if vm1:
- logger.debug("Deleting '%s'..." %NAME_VM_1)
+ logger.debug("Deleting '%s'..." % NAME_VM_1)
nova.servers.delete(vm1)
- #wait until VMs are deleted
- if not waitVmDeleted(nova,vm1):
- logger.error("Instance '%s' with cannot be deleted. Status is '%s'" % (NAME_VM_1,functest_utils.get_instance_status(nova_client,vm1)))
+ # wait until VMs are deleted
+ if not waitVmDeleted(nova, vm1):
+ logger.error(
+ "Instance '%s' with cannot be deleted. Status is '%s'" % (
+ NAME_VM_1, functest_utils.get_instance_status(nova, vm1)))
else:
logger.debug("Instance %s terminated." % NAME_VM_1)
vm2 = functest_utils.get_instance_by_name(nova, NAME_VM_2)
+
if vm2:
- logger.debug("Deleting '%s'..." %NAME_VM_2)
+ logger.debug("Deleting '%s'..." % NAME_VM_2)
vm2 = nova.servers.find(name=NAME_VM_2)
nova.servers.delete(vm2)
- if not waitVmDeleted(nova,vm2):
- logger.error("Instance '%s' with cannot be deleted. Status is '%s'" % (NAME_VM_2,functest_utils.get_instance_status(nova_client,vm2)))
+
+ if not waitVmDeleted(nova, vm2):
+ logger.error(
+ "Instance '%s' with cannot be deleted. Status is '%s'" % (
+ NAME_VM_2, functest_utils.get_instance_status(nova, vm2)))
else:
logger.debug("Instance %s terminated." % NAME_VM_2)
# delete created network
logger.info("Deleting network '%s'..." % NEUTRON_PRIVATE_NET_NAME)
- net_id=network_dic["net_id"]
- subnet_id=network_dic["subnet_id"]
- router_id=network_dic["router_id"]
- if not functest_utils.remove_interface_router(neutron, router_id, subnet_id):
- logger.error("Unable to remove subnet '%s' from router '%s'" %(subnet_id,router_id))
- return False
+ net_id = network_dic["net_id"]
+ subnet_id = network_dic["subnet_id"]
+ router_id = network_dic["router_id"]
+
+ if not functest_utils.remove_interface_router(neutron, router_id,
+ subnet_id):
+ logger.error("Unable to remove subnet '%s' from router '%s'" % (
+ subnet_id, router_id))
+ return False
+
logger.debug("Interface removed successfully")
if not functest_utils.delete_neutron_router(neutron, router_id):
- logger.error("Unable to delete router '%s'" %router_id)
+ logger.error("Unable to delete router '%s'" % router_id)
return False
+
logger.debug("Router deleted successfully")
+
if not functest_utils.delete_neutron_subnet(neutron, subnet_id):
- logger.error("Unable to delete subnet '%s'" %subnet_id)
+ logger.error("Unable to delete subnet '%s'" % subnet_id)
return False
- logger.debug("Subnet '%s' deleted successfully" %NEUTRON_PRIVATE_SUBNET_NAME)
+
+ logger.debug(
+ "Subnet '%s' deleted successfully" % NEUTRON_PRIVATE_SUBNET_NAME)
+
if not functest_utils.delete_neutron_net(neutron, net_id):
- logger.error("Unable to delete network '%s'" %net_id)
+ logger.error("Unable to delete network '%s'" % net_id)
return False
- logger.debug("Network '%s' deleted successfully" %NEUTRON_PRIVATE_NET_NAME)
+
+ logger.debug(
+ "Network '%s' deleted successfully" % NEUTRON_PRIVATE_NET_NAME)
return True
+def push_results_to_db(payload):
+
+ # TODO move DB creds into config file
+ url = TEST_DB + "/results"
+ params = {"project_name": "functest", "case_name": "vPing", "pod_id": 1,
+ "details": payload}
+ headers = {'Content-Type': 'application/json'}
+ r = requests.post(url, data=json.dumps(params), headers=headers)
+ logger.debug(r)
+
def main():
+
creds_nova = functest_utils.get_credentials("nova")
nova_client = novaclient.Client(**creds_nova)
creds_neutron = functest_utils.get_credentials("neutron")
neutron_client = neutronclient.Client(**creds_neutron)
EXIT_CODE = -1
+
image = None
- network = None
flavor = None
# Check if the given image exists
try:
- image = nova_client.images.find(name = GLANCE_IMAGE_NAME)
+ image = nova_client.images.find(name=GLANCE_IMAGE_NAME)
logger.info("Glance image found '%s'" % GLANCE_IMAGE_NAME)
except:
logger.error("ERROR: Glance image '%s' not found." % GLANCE_IMAGE_NAME)
@@ -204,15 +276,18 @@ def main():
exit(-1)
network_dic = create_private_neutron_net(neutron_client)
+
if not network_dic:
- logger.error("There has been a problem when creating the neutron network")
+ logger.error(
+ "There has been a problem when creating the neutron network")
exit(-1)
network_id = network_dic["net_id"]
# Check if the given flavor exists
+
try:
- flavor = nova_client.flavors.find(name = FLAVOR)
+ flavor = nova_client.flavors.find(name=FLAVOR)
logger.info("Flavor found '%s'" % FLAVOR)
except:
logger.error("Flavor '%s' not found." % FLAVOR)
@@ -220,77 +295,102 @@ def main():
pMsg(nova_client.flavor.list())
exit(-1)
-
# Deleting instances if they exist
- servers=nova_client.servers.list()
+
+ servers = nova_client.servers.list()
for server in servers:
if server.name == NAME_VM_1 or server.name == NAME_VM_2:
- logger.info("Instance %s found. Deleting..." %server.name)
+ logger.info("Instance %s found. Deleting..." % server.name)
server.delete()
-
# boot VM 1
# basic boot
- # tune (e.g. flavor, images, network) to your specific openstack configuration here
+ # tune (e.g. flavor, images, network) to your specific
+ # openstack configuration here
+ # we consider start time at VM1 booting
+ start_time_ts = time.time()
+ end_time_ts = start_time_ts
+ logger.info("vPing Start Time:'%s'" % (
+ datetime.datetime.fromtimestamp(start_time_ts).strftime(
+ '%Y-%m-%d %H:%M:%S')))
# create VM
- logger.debug("Creating port 'vping-port-1' with IP %s..." %IP_1)
- port_id=functest_utils.create_neutron_port(neutron_client, "vping-port-1", network_id, IP_1)
+ logger.debug("Creating port 'vping-port-1' with IP %s..." % IP_1)
+ port_id = functest_utils.create_neutron_port(neutron_client,
+ "vping-port-1", network_id,
+ IP_1)
if not port_id:
logger.error("Unable to create port.")
exit(-1)
- logger.info("Creating instance '%s' with IP %s..." %(NAME_VM_1,IP_1))
- logger.debug("Configuration:\n name=%s \n flavor=%s \n image=%s \n network=%s \n" %(NAME_VM_1,flavor,image,network_id))
+
+ logger.info("Creating instance '%s' with IP %s..." % (NAME_VM_1, IP_1))
+ logger.debug(
+ "Configuration:\n name=%s \n flavor=%s \n image=%s \n "
+ "network=%s \n" % (NAME_VM_1, flavor, image, network_id))
vm1 = nova_client.servers.create(
- name = NAME_VM_1,
- flavor = flavor,
- image = image,
- #nics = [{"net-id": network_id, "v4-fixed-ip": IP_1}]
- nics = [{"port-id": port_id}]
+ name=NAME_VM_1,
+ flavor=flavor,
+ image=image,
+ # nics = [{"net-id": network_id, "v4-fixed-ip": IP_1}]
+ nics=[{"port-id": port_id}]
)
- #wait until VM status is active
- if not waitVmActive(nova_client,vm1):
- logger.error("Instance '%s' cannot be booted. Status is '%s'" % (NAME_VM_1,functest_utils.get_instance_status(nova_client,vm1)))
- cleanup(nova_client,neutron_client,network_dic)
+
+ # wait until VM status is active
+ if not waitVmActive(nova_client, vm1):
+
+ logger.error("Instance '%s' cannot be booted. Status is '%s'" % (
+ NAME_VM_1, functest_utils.get_instance_status(nova_client, vm1)))
+ cleanup(nova_client, neutron_client, network_dic)
return (EXIT_CODE)
else:
logger.info("Instance '%s' is ACTIVE." % NAME_VM_1)
- #retrieve IP of first VM
- #logger.debug("Fetching IP...")
- #server = functest_utils.get_instance_by_name(nova_client, NAME_VM_1)
- # theoretically there is only one IP address so we take the first element of the table
+ # Retrieve IP of first VM
+ # logger.debug("Fetching IP...")
+ # server = functest_utils.get_instance_by_name(nova_client, NAME_VM_1)
+ # theoretically there is only one IP address so we take the
+ # first element of the table
# Dangerous! To be improved!
- #test_ip = server.networks.get(NEUTRON_PRIVATE_NET_NAME)[0]
- test_ip=IP_1
- logger.debug("Instance '%s' got %s" %(NAME_VM_1,test_ip))
+ # test_ip = server.networks.get(NEUTRON_PRIVATE_NET_NAME)[0]
+ test_ip = IP_1
+ logger.debug("Instance '%s' got %s" % (NAME_VM_1, test_ip))
# boot VM 2
# we will boot then execute a ping script with cloud-init
# the long chain corresponds to the ping procedure converted with base 64
- # tune (e.g. flavor, images, network) to your specific openstack configuration here
- u = "#!/bin/sh\n\nwhile true; do\n ping -c 1 %s 2>&1 >/dev/null\n RES=$?\n if [ \"Z$RES\" = \"Z0\" ] ; then\n echo 'vPing OK'\n break\n else\n echo 'vPing KO'\n fi\n sleep 1\ndone\n"%test_ip
+ # tune (e.g. flavor, images, network) to your specific openstack
+ # configuration here
+ u = "#!/bin/sh\n\nwhile true; do\n ping -c 1 %s 2>&1 >/dev/null\n " \
+ "RES=$?\n if [ \"Z$RES\" = \"Z0\" ] ; then\n echo 'vPing OK'\n " \
+ "break\n else\n echo 'vPing KO'\n fi\n sleep 1\ndone\n" % test_ip
+
# create VM
+ logger.debug("Creating port 'vping-port-2' with IP %s..." % IP_2)
+ port_id = functest_utils.create_neutron_port(neutron_client,
+ "vping-port-2", network_id,
+ IP_2)
- logger.debug("Creating port 'vping-port-2' with IP %s..." %IP_2)
- port_id=functest_utils.create_neutron_port(neutron_client, "vping-port-2", network_id, IP_2)
if not port_id:
logger.error("Unable to create port.")
exit(-1)
- logger.info("Creating instance '%s' with IP %s..." %(NAME_VM_2,IP_2))
- logger.debug("Configuration:\n name=%s \n flavor=%s \n image=%s \n network=%s \n userdata= \n%s" %(NAME_VM_2,flavor,image,network_id,u))
+ logger.info("Creating instance '%s' with IP %s..." % (NAME_VM_2, IP_2))
+ logger.debug(
+ "Configuration:\n name=%s \n flavor=%s \n image=%s \n network=%s "
+ "\n userdata= \n%s" % (
+ NAME_VM_2, flavor, image, network_id, u))
vm2 = nova_client.servers.create(
- name = NAME_VM_2,
- flavor = flavor,
- image = image,
- #nics = [{"net-id": network_id, "v4-fixed-ip": IP_2}],
- nics = [{"port-id": port_id}],
- userdata = u
+ name=NAME_VM_2,
+ flavor=flavor,
+ image=image,
+ # nics = [{"net-id": network_id, "v4-fixed-ip": IP_2}],
+ nics=[{"port-id": port_id}],
+ userdata=u
)
- if not waitVmActive(nova_client,vm2):
- logger.error("Instance '%s' cannot be booted. Status is '%s'" % (NAME_VM_2,functest_utils.get_instance_status(nova_client,vm2)))
- cleanup(nova_client,neutron_client,network_dic)
+ if not waitVmActive(nova_client, vm2):
+ logger.error("Instance '%s' cannot be booted. Status is '%s'" % (
+ NAME_VM_2, functest_utils.get_instance_status(nova_client, vm2)))
+ cleanup(nova_client, neutron_client, network_dic)
return (EXIT_CODE)
else:
logger.info("Instance '%s' is ACTIVE." % NAME_VM_2)
@@ -298,13 +398,19 @@ def main():
logger.info("Waiting for ping...")
sec = 0
console_log = vm2.get_console_output()
+
while True:
time.sleep(1)
console_log = vm2.get_console_output()
- #print "--"+console_log
+ # print "--"+console_log
# report if the test is failed
if "vPing OK" in console_log:
logger.info("vPing detected!")
+
+ # we consider start time at VM1 booting
+ end_time_ts = time.time()
+ duration = round(end_time_ts - start_time_ts, 1)
+ logger.info("vPing duration:'%s'" % duration)
EXIT_CODE = 0
break
elif sec == PING_TIMEOUT:
@@ -312,17 +418,31 @@ def main():
break
else:
logger.debug("No vPing detected...")
- sec+=1
+ sec += 1
- cleanup(nova_client,neutron_client,network_dic)
+ cleanup(nova_client, neutron_client, network_dic)
+ test_status = "NOK"
if EXIT_CODE == 0:
logger.info("vPing OK")
+ test_status = "OK"
else:
logger.error("vPing FAILED")
- exit(EXIT_CODE)
+ try:
+ if args.report:
+ logger.debug("Push result into DB")
+ # TODO check path result for the file
+ push_results_to_db(
+ {'timestart': start_time_ts, 'duration': duration,
+ 'status': test_status})
+ # with open("vPing-result.json", "w") as outfile:
+ # json.dump({'timestart': start_time_ts, 'duration': duration,
+ # 'status': test_status}, outfile, indent=4)
+ except:
+ logger.error("Error pushing results into Database")
+ exit(EXIT_CODE)
if __name__ == '__main__':
main()