summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--INFO5
-rw-r--r--docker/Dockerfile16
-rw-r--r--docker/docker_remote_api/docs/TLS-intro.rst214
-rwxr-xr-x[-rw-r--r--]docker/docker_remote_api/enable_remote_api.sh102
-rw-r--r--docs/internship/security_group/index.rst70
-rw-r--r--docs/internship/testapi_evolution/index.rst70
-rw-r--r--docs/internship/unit_tests/index.rst70
-rw-r--r--docs/internship/vnf_catalog/index.rst170
-rwxr-xr-xfunctest/ci/config_functest.yaml417
-rwxr-xr-xfunctest/ci/exec_test.sh37
-rwxr-xr-x[-rw-r--r--]functest/ci/run_tests.py8
-rwxr-xr-xfunctest/ci/testcases.yaml75
-rw-r--r--functest/cli/commands/cli_testcase.py16
-rw-r--r--functest/core/feature_base.py58
-rw-r--r--functest/core/pytest_suite_runner.py55
-rw-r--r--functest/core/testcase_base.py10
-rwxr-xr-xfunctest/opnfv_tests/features/copper.py72
-rw-r--r--functest/opnfv_tests/features/odl_sfc.py20
-rwxr-xr-x[-rw-r--r--]functest/opnfv_tests/features/sdnvpn.py0
-rw-r--r--functest/opnfv_tests/openstack/snaps/__init__.py0
-rw-r--r--functest/opnfv_tests/openstack/snaps/api_check.py32
-rw-r--r--functest/opnfv_tests/openstack/snaps/connection_check.py32
-rw-r--r--functest/opnfv_tests/openstack/snaps/smoke.py41
-rw-r--r--functest/opnfv_tests/openstack/snaps/snaps_utils.py22
-rw-r--r--functest/opnfv_tests/openstack/tempest/__init__.py0
-rw-r--r--[-rwxr-xr-x]functest/opnfv_tests/openstack/tempest/conf_utils.py (renamed from functest/opnfv_tests/openstack/tempest/gen_tempest_conf.py)118
-rwxr-xr-xfunctest/opnfv_tests/openstack/tempest/run_tempest.py451
-rw-r--r--functest/opnfv_tests/openstack/tempest/tempest.py316
-rwxr-xr-x[-rw-r--r--]functest/opnfv_tests/openstack/vping/vping_ssh.py0
-rwxr-xr-x[-rw-r--r--]functest/opnfv_tests/openstack/vping/vping_userdata.py0
-rwxr-xr-xfunctest/opnfv_tests/sdn/odl/odl.py51
-rw-r--r--functest/opnfv_tests/sdn/onos/sfc/sfc_onos.py2
-rw-r--r--functest/opnfv_tests/vnf/rnc/parser.py65
-rw-r--r--functest/tests/unit/core/test_testcase_base.py19
-rw-r--r--functest/tests/unit/odl/test_odl.py140
-rw-r--r--functest/utils/functest_utils.py36
-rwxr-xr-xfunctest/utils/openstack_clean.py12
-rwxr-xr-xfunctest/utils/openstack_snapshot.py4
-rwxr-xr-x[-rw-r--r--]functest/utils/openstack_tacker.py2
-rwxr-xr-xfunctest/utils/openstack_utils.py374
-rwxr-xr-x[-rw-r--r--]requirements.txt4
-rwxr-xr-xrun_unit_tests.sh1
-rw-r--r--setup.py50
-rwxr-xr-x[-rw-r--r--]test-requirements.txt2
44 files changed, 1980 insertions, 1279 deletions
diff --git a/INFO b/INFO
index 1f34d8cdf..07145bd13 100644
--- a/INFO
+++ b/INFO
@@ -3,14 +3,15 @@ Project Creation Date: January 20, 2015
Project Category: Integration & Testing
Lifecycle State: Incubation
Primary Contact: Jose Lausuch (jose.lausuch@ericsson.com)
-Project Lead: Jose lausuch (jose.lausuch@ericsson.com)
+Project Lead: Jose lausuch (jose.lausuch@ericsson.com)
Jira Project Name: Base System Functionality Testing Project
Jira Project Prefix: FUNCTEST
Mailing list tag: [functest]
-IRC: Server:freenode.net Channel:#opnfv-testperf
+IRC: Server:freenode.net Channel:#opnfv-functest
Repository: functest
Committers:
+yaohelan@huawei.com
serena.feng.711@gmail.com
ollivier.cedric@gmail.com
jose.lausuch@ericsson.com
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 6bdfe5ce6..5105fbbd1 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -32,7 +32,7 @@ LABEL version="0.1" description="OPNFV Functest Docker container"
ARG BRANCH=master
ARG TEMPEST_TAG=12.2.0
ARG RALLY_TAG=0.7.0
-ARG ODL_TAG=release/beryllium-sr3
+ARG ODL_TAG=release/beryllium-sr4
ARG OPENSTACK_TAG=stable/mitaka
ARG KINGBIRD_TAG=0.2.2
ARG VIMS_TAG=stable
@@ -43,6 +43,7 @@ ARG FUNCTEST_DATA_DIR=${FUNCTEST_BASE_DIR}/data
ARG FUNCTEST_RESULTS_DIR=${FUNCTEST_BASE_DIR}/results
ARG FUNCTEST_REPO_DIR=${REPOS_DIR}/functest
ARG FUNCTEST_TEST_DIR=${FUNCTEST_REPO_DIR}/functest/opnfv_tests
+ARG RELENG_MODULE_DIR=${REPOS_DIR}/releng/modules
# Environment variables
ENV HOME /home/opnfv
@@ -117,11 +118,14 @@ RUN cd ${FUNCTEST_REPO_DIR} \
&& pip install -r requirements.txt \
&& pip install .
+RUN cd ${RELENG_MODULE_DIR} \
+ && pip install .
+
RUN pip install -r ${REPOS_DIR}/rally/requirements.txt
RUN pip install -r ${REPOS_DIR}/tempest/requirements.txt
RUN find ${FUNCTEST_REPO_DIR} -name "*.py" \
- -not -path *unit_tests* |xargs grep __main__ |cut -d\: -f 1 |xargs chmod -c 755 \
+ -not -path "*tests/unit*" |xargs grep __main__ |cut -d\: -f 1 |xargs chmod -c 755 \
&& find ${FUNCTEST_REPO_DIR} -name "*.sh" |xargs grep \#\! |cut -d\: -f 1 |xargs chmod -c 755
RUN /bin/bash ${REPOS_DIR}/parser/tests/parser_install.sh ${REPOS_DIR}
@@ -138,7 +142,13 @@ RUN curl -L https://get.rvm.io | bash -s stable
RUN git clone --depth 1 https://gerrit.cablelabs.com/snaps-provisioning ${REPOS_DIR}/snaps
RUN pip install -e ${REPOS_DIR}/snaps/
-RUN /bin/bash -c ". ${REPOS_DIR}/sfc/tests/functest/odl-sfc/tacker_client_install.sh"
+# SFC integration
+RUN /bin/bash -c ". ${REPOS_DIR}/sfc/sfc/tests/functest/setup_scripts/tacker_client_install.sh"
+RUN cd ${REPOS_DIR}/sfc && pip install .
+
+# SDNVPN integration
+RUN cd ${REPOS_DIR}/sdnvpn && pip install .
+
RUN cd ${REPOS_DIR}/bgpvpn && pip install .
#RUN cd ${REPOS_DIR}/kingbird && pip install -e .
RUN cd ${REPOS_DIR}/moon/moonclient/ && python setup.py install
diff --git a/docker/docker_remote_api/docs/TLS-intro.rst b/docker/docker_remote_api/docs/TLS-intro.rst
index 934f99a8b..44fdd4aed 100644
--- a/docker/docker_remote_api/docs/TLS-intro.rst
+++ b/docker/docker_remote_api/docs/TLS-intro.rst
@@ -1,107 +1,107 @@
-Encrypt the docker remote API via TLS for Ubuntu and CentOS
-
-[Introduction]
-The Docker daemon can listen to Docker Remote API requests via three types of
-Socket: unix, tcp and fd. By default, a unix domain socket (or IPC socket) is
-created at /var/run/docker.sock, requiring either root permission, or docker
-group membership.
-
-Port 2375 is conventionally used for un-encrypted communition with Docker daemon
-remotely, where docker server can be accessed by any docker client via tcp socket
-in local area network. You can listen to port 2375 on all network interfaces with
--H tcp://0.0.0.0:2375, where 0.0.0.0 means any available IP address on host, and
-tcp://0.0.0.0:2375 indicates that port 2375 is listened on any IP of daemon host.
-If we want to make docker server open on the Internet via TCP port, and only trusted
-clients have the right to access the docker server in a safe manner, port 2376 for
-encrypted communication with the daemon should be listened. It can be achieved to
-create certificate and distribute it to the trusted clients.
-
-Through creating self-signed certificate, and using --tlsverify command when running
-Docker daemon, Docker daemon opens the TLS authentication. Thus only the clients
-with related private key files can have access to the Docker daemon's server. As
-long as the key files for encryption are secure between docker server and client,
-the Docker daemon can keep secure.
-In summary,
-Firstly we should create docker server certificate and related key files, which
-are distributed to the trusted clients.
-Then the clients with related key files can access docker server.
-
-[Steps]
-1.0. Create a CA, server and client keys with OpenSSL.
- OpenSSL is used to generate certificate, and can be installed as follows.
- apt-get install openssl openssl-devel
-
-1.1 First generate CA private and public keys.
- openssl genrsa -aes256 -out ca-key.pem 4096
- openssl req -new -x509 -days 365 -key ca-key.pem -sha256 -out ca.pem
-
- You are about to be asked to enter information that will be incorporated
- into your certificate request, where the instance of $HOST should be replaced
- with the DNS name of your Docker daemon's host, here the DNS name of my Docker
- daemon is ly.
- Common Name (e.g. server FQDN or YOUR name) []:$HOST
-
-1.2 Now we have a CA (ca-key.pem and ca.pem), you can create a server key and
-certificate signing request.
- openssl genrsa -out server-key.pem 4096
- openssl req -subj "/CN=$HOST" -sha256 -new -key server-key.pem -out server.csr
-
-1.3 Sign the public key with our CA.
- TLS connections can be made via IP address as well as DNS name, they need to be
- specified when creating the certificate.
-
- echo subjectAltName = IP:172.16.10.121,IP:127.0.0.1 > extfile.cnf
- openssl x509 -req -days 365 -sha256 -in server.csr -CA ca.pem -CAkey ca-key.pem \
- -CAcreateserial -out server-cert.pem -extfile extfile.cnf
-
-1.4 For client authentication, create a client key and certificate signing request.
- openssl genrsa -out key.pem 4096
- openssl req -subj '/CN=client' -new -key key.pem -out client.csr
-
-1.5 To make the key suitable for client authentication, create an extensions config file.
- echo extendedKeyUsage = clientAuth > extfile.cnf
-
-1.6 Sign the public key and after generating cert.pem and server-cert.pem, two certificate
- signing requests can be removed.
- openssl x509 -req -days 365 -sha256 -in client.csr -CA ca.pem -CAkey ca-key.pem \
- -CAcreateserial -out cert.pem -extfile extfile.cnf
-
-1.7 In order to protect your keys from accidental damage, you may change file modes to
- be only readable.
- chmod -v 0400 ca-key.pem key.pem server-key.pem
- chmod -v 0444 ca.pem server-cert.pem cert.pem
-
-1.8 Build docker server
- dockerd --tlsverify --tlscacert=ca.pem --tlscert=server-cert.pem --tlskey=server-key.pem \
- -H=0.0.0.0:2376
- Then, it can be seen from the command 'netstat -ntlp' that port 2376 has been listened
- and the Docker daemon only accept connections from clients providing a certificate
- trusted by our CA.
-
-1.9 Distribute the keys to the client
- scp /etc/docker/ca.pem wwl@172.16.10.121:/etc/docker
- scp /etc/docker/cert.pem wwl@172.16.10.121:/etc/docker
- scp /etc/docker/key.pem wwl@172.16.10.121:/etc/docker
- Where, wwl and 172.16.10.121 is the username and IP of the client respectively.
- And the password of the client is needed when you distribute the keys to the client.
-
-1.10 To access Docker daemon from the client via keys.
- docker --tlsverify --tlscacert=ca.pem --tlscert=cert.pem --tlskey=key.pem \
- -H=$HOST:2376 version
-
- Then we can operate docker in the Docker daemon from the client vis keys, for example:
- 1) create container from the client
- docker --tlsverify --tlscacert=ca.pem --tlscert=cert.pem --tlskey=key.pem -H=ly:2376 run -d \
- -it --name w1 grafana/grafana
- 2) list containers from the client
- docker --tlsverify --tlscacert=ca.pem --tlscert=cert.pem --tlskey=key.pem -H=ly:2376 pa -a
- 3) stop/start containers from the client
- docker --tlsverify --tlscacert=ca.pem --tlscert=cert.pem --tlskey=key.pem -H=ly:2376 stop w1
- docker --tlsverify --tlscacert=ca.pem --tlscert=cert.pem --tlskey=key.pem -H=ly:2376 start w1
-
-
-
-
-
-
-
+Encrypt the docker remote API via TLS for Ubuntu and CentOS
+
+[Introduction]
+The Docker daemon can listen to Docker Remote API requests via three types of
+Socket: unix, tcp and fd. By default, a unix domain socket (or IPC socket) is
+created at /var/run/docker.sock, requiring either root permission, or docker
+group membership.
+
+Port 2375 is conventionally used for un-encrypted communition with Docker daemon
+remotely, where docker server can be accessed by any docker client via tcp socket
+in local area network. You can listen to port 2375 on all network interfaces with
+-H tcp://0.0.0.0:2375, where 0.0.0.0 means any available IP address on host, and
+tcp://0.0.0.0:2375 indicates that port 2375 is listened on any IP of daemon host.
+If we want to make docker server open on the Internet via TCP port, and only trusted
+clients have the right to access the docker server in a safe manner, port 2376 for
+encrypted communication with the daemon should be listened. It can be achieved to
+create certificate and distribute it to the trusted clients.
+
+Through creating self-signed certificate, and using --tlsverify command when running
+Docker daemon, Docker daemon opens the TLS authentication. Thus only the clients
+with related private key files can have access to the Docker daemon's server. As
+long as the key files for encryption are secure between docker server and client,
+the Docker daemon can keep secure.
+In summary,
+Firstly we should create docker server certificate and related key files, which
+are distributed to the trusted clients.
+Then the clients with related key files can access docker server.
+
+[Steps]
+1.0. Create a CA, server and client keys with OpenSSL.
+ OpenSSL is used to generate certificate, and can be installed as follows.
+ apt-get install openssl openssl-devel
+
+1.1 First generate CA private and public keys.
+ openssl genrsa -aes256 -out ca-key.pem 4096
+ openssl req -new -x509 -days 365 -key ca-key.pem -sha256 -out ca.pem
+
+ You are about to be asked to enter information that will be incorporated
+ into your certificate request, where the instance of $HOST should be replaced
+ with the DNS name of your Docker daemon's host, here the DNS name of my Docker
+ daemon is ly.
+ Common Name (e.g. server FQDN or YOUR name) []:$HOST
+
+1.2 Now we have a CA (ca-key.pem and ca.pem), you can create a server key and
+certificate signing request.
+ openssl genrsa -out server-key.pem 4096
+ openssl req -subj "/CN=$HOST" -sha256 -new -key server-key.pem -out server.csr
+
+1.3 Sign the public key with our CA.
+ TLS connections can be made via IP address as well as DNS name, they need to be
+ specified when creating the certificate.
+
+ echo subjectAltName = IP:172.16.10.121,IP:127.0.0.1 > extfile.cnf
+ openssl x509 -req -days 365 -sha256 -in server.csr -CA ca.pem -CAkey ca-key.pem \
+ -CAcreateserial -out server-cert.pem -extfile extfile.cnf
+
+1.4 For client authentication, create a client key and certificate signing request.
+ openssl genrsa -out key.pem 4096
+ openssl req -subj '/CN=client' -new -key key.pem -out client.csr
+
+1.5 To make the key suitable for client authentication, create an extensions config file.
+ echo extendedKeyUsage = clientAuth > extfile.cnf
+
+1.6 Sign the public key and after generating cert.pem and server-cert.pem, two certificate
+ signing requests can be removed.
+ openssl x509 -req -days 365 -sha256 -in client.csr -CA ca.pem -CAkey ca-key.pem \
+ -CAcreateserial -out cert.pem -extfile extfile.cnf
+
+1.7 In order to protect your keys from accidental damage, you may change file modes to
+ be only readable.
+ chmod -v 0400 ca-key.pem key.pem server-key.pem
+ chmod -v 0444 ca.pem server-cert.pem cert.pem
+
+1.8 Build docker server
+ dockerd --tlsverify --tlscacert=ca.pem --tlscert=server-cert.pem --tlskey=server-key.pem \
+ -H=0.0.0.0:2376
+ Then, it can be seen from the command 'netstat -ntlp' that port 2376 has been listened
+ and the Docker daemon only accept connections from clients providing a certificate
+ trusted by our CA.
+
+1.9 Distribute the keys to the client
+ scp /etc/docker/ca.pem wwl@172.16.10.121:/etc/docker
+ scp /etc/docker/cert.pem wwl@172.16.10.121:/etc/docker
+ scp /etc/docker/key.pem wwl@172.16.10.121:/etc/docker
+ Where, wwl and 172.16.10.121 is the username and IP of the client respectively.
+ And the password of the client is needed when you distribute the keys to the client.
+
+1.10 To access Docker daemon from the client via keys.
+ docker --tlsverify --tlscacert=ca.pem --tlscert=cert.pem --tlskey=key.pem \
+ -H=$HOST:2376 version
+
+ Then we can operate docker in the Docker daemon from the client vis keys, for example:
+ 1) create container from the client
+ docker --tlsverify --tlscacert=ca.pem --tlscert=cert.pem --tlskey=key.pem -H=ly:2376 run -d \
+ -it --name w1 grafana/grafana
+ 2) list containers from the client
+ docker --tlsverify --tlscacert=ca.pem --tlscert=cert.pem --tlskey=key.pem -H=ly:2376 pa -a
+ 3) stop/start containers from the client
+ docker --tlsverify --tlscacert=ca.pem --tlscert=cert.pem --tlskey=key.pem -H=ly:2376 stop w1
+ docker --tlsverify --tlscacert=ca.pem --tlscert=cert.pem --tlskey=key.pem -H=ly:2376 start w1
+
+
+
+
+
+
+
diff --git a/docker/docker_remote_api/enable_remote_api.sh b/docker/docker_remote_api/enable_remote_api.sh
index 6867eeddf..76e59b850 100644..100755
--- a/docker/docker_remote_api/enable_remote_api.sh
+++ b/docker/docker_remote_api/enable_remote_api.sh
@@ -1,51 +1,51 @@
-#!/bin/bash
-# SPDX-license-identifier: Apache-2.0
-
-# ******************************
-# Script to update the docker host configuration
-# to enable Docker Remote API
-# ******************************
-
-if [ -f /etc/lsb-release ]; then
- #tested on ubuntu 14.04 and 16.04
- if grep -q "#DOCKER_OPTS=" "/etc/default/docker"; then
- cp /etc/default/docker /etc/default/docker.bak
- sed -i 's/^#DOCKER_OPTS.*$/DOCKER_OPTS=\"-H unix:\/\/\/var\/run\/docker.sock -H tcp:\/\/0.0.0.0:2375\"/g' /etc/default/docker
- else
- echo DOCKER_OPTS=\"-H unix:///var/run/docker.sock -H tcp://0.0.0.0:2375\" >> /etc/default/docker
- fi
- service docker restart
- #docker start $(docker ps -aq)
-elif [ -f /etc/system-release ]; then
- #tested on centos 7.2
- if grep -q "ExecStart=\/usr\/bin\/docker-current daemon" "/lib/systemd/system/docker.service"; then
- cp /lib/systemd/system/docker.service /lib/systemd/system/docker.service.bak
- sed -i 's/^ExecStart=.*$/ExecStart=\/usr\/bin\/docker daemon -H tcp:\/\/0.0.0.0:2375 -H unix:\/\/\/var\/run\/docker.sock \\/g' /lib/systemd/system/docker.service
- systemctl daemon-reload
- systemctl restart docker
- else
- echo "to be implemented"
- fi
-else
- echo "OS is not supported"
-fi
-
-# Issue Note for Ubuntu
-# 1. If the configuration of the file /etc/default/docker does not take effect after restarting docker service,
-# you may try to modify /lib/systemd/system/docker.service
-# commands:
-# cp /lib/systemd/system/docker.service /lib/systemd/system/docker.service.bak
-# sed -i '/^ExecStart/i\EnvironmentFile=-/etc/default/docker' /lib/systemd/system/docker.service
-# sed -i '/ExecStart=\/usr\/bin\/dockerd/{;s/$/ \$DOCKER_OPTS/}' /lib/systemd/system/docker.service
-# systemctl daemon-reload
-# service docker restart
-# 2. Systemd is a system and session manager for Linux, where systemctl is one tool for systemd to view and control systemd.
-# If the file /lib/systemd/system/docker.service is modified, systemd has to be reloaded to scan new or changed units.
-# 1) systemd and related packages are available on the PPA. To use the PPA, first add it to your software sources list as follows.
-# add-apt-repository ppa:pitti/systemd
-# apt-get update
-# 2) system can be installed from the PPS as follows.
-# apt-get install systemd libpam-systemd systemd-ui
-
-
-
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+
+# ******************************
+# Script to update the docker host configuration
+# to enable Docker Remote API
+# ******************************
+
+if [ -f /etc/lsb-release ]; then
+ #tested on ubuntu 14.04 and 16.04
+ if grep -q "#DOCKER_OPTS=" "/etc/default/docker"; then
+ cp /etc/default/docker /etc/default/docker.bak
+ sed -i 's/^#DOCKER_OPTS.*$/DOCKER_OPTS=\"-H unix:\/\/\/var\/run\/docker.sock -H tcp:\/\/0.0.0.0:2375\"/g' /etc/default/docker
+ else
+ echo DOCKER_OPTS=\"-H unix:///var/run/docker.sock -H tcp://0.0.0.0:2375\" >> /etc/default/docker
+ fi
+ service docker restart
+ #docker start $(docker ps -aq)
+elif [ -f /etc/system-release ]; then
+ #tested on centos 7.2
+ if grep -q "ExecStart=\/usr\/bin\/docker-current daemon" "/lib/systemd/system/docker.service"; then
+ cp /lib/systemd/system/docker.service /lib/systemd/system/docker.service.bak
+ sed -i 's/^ExecStart=.*$/ExecStart=\/usr\/bin\/docker daemon -H tcp:\/\/0.0.0.0:2375 -H unix:\/\/\/var\/run\/docker.sock \\/g' /lib/systemd/system/docker.service
+ systemctl daemon-reload
+ systemctl restart docker
+ else
+ echo "to be implemented"
+ fi
+else
+ echo "OS is not supported"
+fi
+
+# Issue Note for Ubuntu
+# 1. If the configuration of the file /etc/default/docker does not take effect after restarting docker service,
+# you may try to modify /lib/systemd/system/docker.service
+# commands:
+# cp /lib/systemd/system/docker.service /lib/systemd/system/docker.service.bak
+# sed -i '/^ExecStart/i\EnvironmentFile=-/etc/default/docker' /lib/systemd/system/docker.service
+# sed -i '/ExecStart=\/usr\/bin\/dockerd/{;s/$/ \$DOCKER_OPTS/}' /lib/systemd/system/docker.service
+# systemctl daemon-reload
+# service docker restart
+# 2. Systemd is a system and session manager for Linux, where systemctl is one tool for systemd to view and control systemd.
+# If the file /lib/systemd/system/docker.service is modified, systemd has to be reloaded to scan new or changed units.
+# 1) systemd and related packages are available on the PPA. To use the PPA, first add it to your software sources list as follows.
+# add-apt-repository ppa:pitti/systemd
+# apt-get update
+# 2) system can be installed from the PPS as follows.
+# apt-get install systemd libpam-systemd systemd-ui
+
+
+
diff --git a/docs/internship/security_group/index.rst b/docs/internship/security_group/index.rst
new file mode 100644
index 000000000..d1cdbdd8f
--- /dev/null
+++ b/docs/internship/security_group/index.rst
@@ -0,0 +1,70 @@
+=======
+License
+=======
+
+Functest Docs are licensed under a Creative Commons Attribution 4.0
+International License.
+You should have received a copy of the license along with this.
+If not, see <http://creativecommons.org/licenses/by/4.0/>.
+
+==================================
+Functest Security group test cases
+==================================
+
+Author: Girish Sukhatankar
+mentors: D.Blaisonneau, J.Lausuch, M.Richomme
+
+Abstract
+========
+
+
+Version history
+===============
+
++------------+----------+------------------+------------------------+
+| **Date** | **Ver.** | **Author** | **Comment** |
+| | | | |
++------------+----------+------------------+------------------------+
+| 2016-??-?? | 0.0.1 | Morgan Richomme | Beginning of the |
+| | | (Orange) | Internship |
++------------+----------+------------------+------------------------+
+
+
+Overview:
+=========
+
+
+
+
+Problem Statement:
+------------------
+
+
+
+Curation Phase
+--------------
+
+
+
+
+
+Schedule:
+=========
+
+
+
++--------------------------+------------------------------------------+
+| **Date** | **Comment** |
+| | |
++--------------------------+------------------------------------------+
+| December - January | ........ |
++--------------------------+------------------------------------------+
+| January - february | ........ |
++--------------------------+------------------------------------------+
+
+
+References:
+===========
+
+.. _`[1]` : https://wiki.opnfv.org/display/DEV/Intern+Project%3A+Security+groups+test+case+in+Functest
+
diff --git a/docs/internship/testapi_evolution/index.rst b/docs/internship/testapi_evolution/index.rst
new file mode 100644
index 000000000..f2583e2f0
--- /dev/null
+++ b/docs/internship/testapi_evolution/index.rst
@@ -0,0 +1,70 @@
+=======
+License
+=======
+
+Functest Docs are licensed under a Creative Commons Attribution 4.0
+International License.
+You should have received a copy of the license along with this.
+If not, see <http://creativecommons.org/licenses/by/4.0/>.
+
+==================
+Test API evolution
+==================
+
+Author: Rohit Sakala
+Mentors: S. Feng, J.Lausuch, M.Richomme
+
+Abstract
+========
+
+
+Version history
+===============
+
++------------+----------+------------------+------------------------+
+| **Date** | **Ver.** | **Author** | **Comment** |
+| | | | |
++------------+----------+------------------+------------------------+
+| 2016-??-?? | 0.0.1 | Morgan Richomme | Beginning of the |
+| | | (Orange) | Internship |
++------------+----------+------------------+------------------------+
+
+
+Overview:
+=========
+
+
+
+
+Problem Statement:
+------------------
+
+
+
+Curation Phase
+--------------
+
+
+
+
+
+Schedule:
+=========
+
+
+
++--------------------------+------------------------------------------+
+| **Date** | **Comment** |
+| | |
++--------------------------+------------------------------------------+
+| December - January | ........ |
++--------------------------+------------------------------------------+
+| January - february | ........ |
++--------------------------+------------------------------------------+
+
+
+References:
+===========
+
+.. _`[1]` : https://wiki.opnfv.org/display/DEV/Intern+Project%3A+testapi+evolution
+
diff --git a/docs/internship/unit_tests/index.rst b/docs/internship/unit_tests/index.rst
new file mode 100644
index 000000000..f969aa72d
--- /dev/null
+++ b/docs/internship/unit_tests/index.rst
@@ -0,0 +1,70 @@
+=======
+License
+=======
+
+Functest Docs are licensed under a Creative Commons Attribution 4.0
+International License.
+You should have received a copy of the license along with this.
+If not, see <http://creativecommons.org/licenses/by/4.0/>.
+
+===================
+Functest Unit tests
+===================
+
+Author: Ashish Kumar
+Mentors: H.Yao, J.Lausuch, M.Richomme
+
+Abstract
+========
+
+
+Version history
+===============
+
++------------+----------+------------------+------------------------+
+| **Date** | **Ver.** | **Author** | **Comment** |
+| | | | |
++------------+----------+------------------+------------------------+
+| 2016-??-?? | 0.0.1 | Morgan Richomme | Beginning of the |
+| | | (Orange) | Internship |
++------------+----------+------------------+------------------------+
+
+
+Overview:
+=========
+
+
+
+
+Problem Statement:
+------------------
+
+
+
+Curation Phase
+--------------
+
+
+
+
+
+Schedule:
+=========
+
+
+
++--------------------------+------------------------------------------+
+| **Date** | **Comment** |
+| | |
++--------------------------+------------------------------------------+
+| December - January | ........ |
++--------------------------+------------------------------------------+
+| January - february | ........ |
++--------------------------+------------------------------------------+
+
+
+References:
+===========
+
+.. _`[1]` : https://wiki.opnfv.org/display/DEV/Intern+Project%3A+Functest+unit+tests
+
diff --git a/docs/internship/vnf_catalog/index.rst b/docs/internship/vnf_catalog/index.rst
new file mode 100644
index 000000000..df7633391
--- /dev/null
+++ b/docs/internship/vnf_catalog/index.rst
@@ -0,0 +1,170 @@
+=======
+License
+=======
+
+Functest Docs are licensed under a Creative Commons Attribution 4.0
+International License.
+You should have received a copy of the license along with this.
+If not, see <http://creativecommons.org/licenses/by/4.0/>.
+
+=======================
+Open Source VNF Catalog
+=======================
+
+Author: Kumar Rishabh
+Mentors: B.Souville, M.Richomme, J.Lausuch
+
+Abstract
+========
+
+
+
+Version hissory
+===============
+
++------------+----------+------------------+------------------------+
+| **Date** | **Ver.** | **Author** | **Comment** |
+| | | | |
++------------+----------+------------------+------------------------+
+| 2016-12-12 | 0.0.1 | Morgan Richomme | Beginning of the |
+| | | (Orange) | Internship |
++------------+----------+------------------+------------------------+
+
+
+Overview:
+=========
+
+
+This project aims to create an Open Source catalog for reference and
+classification of Virtual Network Functions (VNFs)s available on
+Internet. The classification method proposed will be in sync with the
+requirements of Telcos active in NFV landscape. The project aims to have
+running web platform similar to [1] by the mid of internship (2nd week
+of March). By the penultimate month of internship I aim to have fully
+functional implementation of an Open Source VNF in functest.
+
+
+Problem Statement:
+------------------
+
+OPNFV aims to be the reference platform for development,
+standardization and integration of Open Source NFV components across
+various Open Source Platforms. It mainly deals with the infrastructure
+through the Network Function Virtualization Infrastructure (NFVI) and
+Virtual Infrastructure manager (VIM). The MANO (Management and
+orchestration) stacks have been introduced recently. VNFs are not
+directly in OPNFV scope, however VNFs are needed to test and qualify the
+infrastructure. In this regard having a common curated Open Source
+Reference VNF catalog would be of immense importance to community.
+
+Since major focus of OPNFV is Telcos, a curated platform targeted from
+industry point of view would be very useful. We plan to divide the
+entire project into three major phases(with some iterative improvements
+and overlaps)
+
+
+Curation Phase
+--------------
+This phase pertains to studying various Open Source VNFs available and
+classification of them based on certain parameters. The parameters that
+I currently have in mind are:
+ * Developer Metrics: These pertain to repo characteristics of VNF under
+ study
+ * Usage Statistics - Activity, Number of Commits, stars
+ * Maturity Statistics - For instance if an NFV community decides code
+ coverage is important for them, it shows the NFV community is serious
+ about taking the project forward
+ * Technical Tagging: These are the tags that pertain to technical
+ characteristics of a VNF
+ * Broad Use Cases - Whether the VNF fits strictly in IaaS, PaaS or
+ SaaS layer or is an hybrid of two/all.
+ * Generic Use Cases - This in my opinion is the broadest
+ classification category. For instance a VNF could be built with a
+ broad idea of powering IOT devices at home or from usage perspective
+ of Telco Operators (vFW, vEPC, vIMS, vCDN, vAAA, vCPE,...).`[2]`_
+ * Fields of Application
+ * Library Status - Whether APIs are standardized, support RESTful
+ services.
+ * Dependency Forwarding Graph - This is pretty complex tagging
+ mechanism. It essentially tries to establish a graph relationship
+ between the VNFs (elementary VNFs are used in Service Function
+ Chaining chains such as Firewall, DPI, content enrichment,..). In my
+ opinion this is useful immensely. This will allow users to go to
+ platform and ask a question like - “I have this X tech stack to
+ support, Y and Z are my use cases, which NFVs should I use to support
+ this.
+ * Visitor Score - Based on `[1]`_ I plan to evolve a visitor score for
+ the platform. This will allow users to score an NFV on certain
+ parameters, may be post comments.
+
+**I plan to use the above three scores and evolve cumulative score which
+will be displayed next to each of the NFV on the platform.**
+
+ * Platform building phase - This will involve erecting a Web Platform
+ which will be similar to this `[1]`_. I am decently familiar with
+ Django and hence I will write the platform in Django. There are two
+ action plans that I have in mind right now. Either I can start writing
+ the platform simultaneously which will help keep track of my progress
+ or I can write the platform after 1.5 - 2 months into the internship.
+ Either way I aim to have the Web Platform ready by March 12.
+
+ * Functest VNF implementation phase - This is the last phase that will
+ involve writing a fully functional implementation of an Open Source VNF
+ into Functest. I will undertake this after I am 3 months into the
+ internship. I have a decent familiarity with python and hence I think
+ it shouldn’t be too difficult. I need to decide how complex the VNFI
+ should undertake this exercise for (e.g. AAA such as free radius sounds
+ relatively easy, vCDN is much more challenging).
+ This will be decided in consent with my mentors.
+
+
+
+
+Schedule:
+=========
+I plan to take this project in 6 months time frame as I want to use it
+as a chance to read more about NFVs in particular and SDN in general
+
+
++--------------------------+------------------------------------------+
+| **Date** | **Comment** |
+| | |
++--------------------------+------------------------------------------+
+| December 12 - January 12 | Study the above mentioned metrics |
+| | Decide which of them are important for |
+| | community (and which are not). |
++--------------------------+------------------------------------------+
+| January 12 - January 27 | Make a database for the above studied |
+| | metrics and evolve it further based on |
+| | Mentors’ input. + associated API |
++--------------------------+------------------------------------------+
+| January 27 - February 5 | Compile the data collected above and make|
+| | it public. Although I can keep everything|
+| | public from the beginning too. My |
+| | rationale of not making the entire data |
+| | public in initial stage as the errors |
+| | caused by me could be misleading for |
+| | developers. |
++--------------------------+------------------------------------------+
+| February 5 - March 5 | Erect the Web Platform and release it |
+| | for restricted group for alpha testing. |
++--------------------------+------------------------------------------+
+| March 5 - March 12 | Make it public. Release it to public for |
+| | beta testing. Fix Bugs. |
++--------------------------+------------------------------------------+
+| March 12 - April 12 | Start working on implementation of an |
+| | Open Source VNF in Functest. |
++--------------------------+------------------------------------------+
+| April 12 - May 12 | I will decided what to do here based on |
+| | discussion with mentors. |
++--------------------------+------------------------------------------+
+
+
+References:
+===========
+
+.. _`[1]` : Openhub: https://www.openhub.net/explore/projects
+
+.. _`[2]` : ETSI NFV White Paper: https://portal.etsi.org/Portals/0/TBpages/NFV/Docs/NFV_White_Paper3.pdf
+
+.. _`[3]` : https://wiki.opnfv.org/display/DEV/Intern+Project%3A+Open+Source+VNF+catalog
diff --git a/functest/ci/config_functest.yaml b/functest/ci/config_functest.yaml
index c75afdafe..11ff7fdb5 100755
--- a/functest/ci/config_functest.yaml
+++ b/functest/ci/config_functest.yaml
@@ -1,206 +1,211 @@
-general:
- directories:
- # Relative to the path where the repo is cloned:
- dir_vping: functest/opnfv_tests/openstack/vping
- dir_odl: functest/opnfv_tests/sdn/odl
- dir_rally: functest/opnfv_tests/openstack/rally
- dir_tempest_cases: functest/opnfv_tests/openstack/tempest/custom_tests
- dir_vIMS: functest/opnfv_tests/vnf/ims
- dir_onos: functest/opnfv_tests/sdn/onos/teston
- dir_onos_sfc: functest/opnfv_tests/sdn/onos/sfc
-
- # Absolute path
- dir_home: /home/opnfv
- dir_repos: /home/opnfv/repos
- dir_repo_functest: /home/opnfv/repos/functest
- dir_repo_rally: /home/opnfv/repos/rally
- dir_repo_tempest: /home/opnfv/repos/tempest
- dir_repo_releng: /home/opnfv/repos/releng
- dir_repo_vims_test: /home/opnfv/repos/vims-test
- dir_repo_sdnvpn: /home/opnfv/repos/sdnvpn
- dir_repo_sfc: /home/opnfv/repos/sfc
- dir_repo_onos: /home/opnfv/repos/onos
- dir_repo_promise: /home/opnfv/repos/promise
- dir_repo_doctor: /home/opnfv/repos/doctor
- dir_repo_copper: /home/opnfv/repos/copper
- dir_repo_ovno: /home/opnfv/repos/ovno
- dir_repo_parser: /home/opnfv/repos/parser
- dir_repo_domino: /home/opnfv/repos/domino
- dir_functest: /home/opnfv/functest
- dir_functest_test: /home/opnfv/repos/functest/functest/opnfv_tests
- dir_results: /home/opnfv/functest/results
- dir_functest_conf: /home/opnfv/functest/conf
- dir_functest_data: /home/opnfv/functest/data
- dir_vIMS_data: /home/opnfv/functest/data/vIMS/
- dir_rally_inst: /home/opnfv/.rally
-
- openstack:
- creds: /home/opnfv/functest/conf/openstack.creds
- snapshot_file: /home/opnfv/functest/conf/openstack_snapshot.yaml
-
- image_name: Cirros-0.3.4
- image_file_name: cirros-0.3.4-x86_64-disk.img
- image_disk_format: qcow2
-
- flavor_name: opnfv_flavor
- flavor_ram: 512
- flavor_disk: 1
- flavor_vcpus: 1
-
- # Private network for functest. Will be created by config_functest.py
- neutron_private_net_name: functest-net
- neutron_private_subnet_name: functest-subnet
- neutron_private_subnet_cidr: 192.168.120.0/24
- neutron_private_subnet_start: 192.168.120.2
- neutron_private_subnet_end: 192.168.120.254
- neutron_private_subnet_gateway: 192.168.120.254
- neutron_router_name: functest-router
-
- functest:
- testcases_yaml: /home/opnfv/repos/functest/functest/ci/testcases.yaml
-
-healthcheck:
- disk_image: /home/opnfv/functest/data/cirros-0.3.4-x86_64-disk.img
- disk_format: qcow2
- wait_time: 60
-
-vping:
- ping_timeout: 200
- vm_flavor: m1.tiny # adapt to your environment
- vm_name_1: opnfv-vping-1
- vm_name_2: opnfv-vping-2
- image_name: functest-vping
- vping_private_net_name: vping-net
- vping_private_subnet_name: vping-subnet
- vping_private_subnet_cidr: 192.168.130.0/24
- vping_router_name: vping-router
- vping_sg_name: vPing-sg
- vping_sg_descr: Security group for vPing test case
-
-onos_sfc:
- image_base_url: http://artifacts.opnfv.org/sfc/demo
- image_name: TestSfcVm
- image_file_name: firewall_block_image.img
-
-tempest:
- identity:
- tenant_name: tempest
- tenant_description: Tenant for Tempest test suite
- user_name: tempest
- user_password: tempest
- validation:
- ssh_timeout: 130
- private_net_name: tempest-net
- private_subnet_name: tempest-subnet
- private_subnet_cidr: 192.168.150.0/24
- router_name: tempest-router
- use_custom_images: False
- use_custom_flavors: False
-
-rally:
- deployment_name: opnfv-rally
- network_name: rally-net
- subnet_name: rally-subnet
- subnet_cidr: 192.168.140.0/24
- router_name: rally-router
-
-vIMS:
- general:
- tenant_name: vIMS
- tenant_description: vIMS Functionality Testing
- images:
- ubuntu:
- image_url: http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img
- image_name: ubuntu_14.04
- centos:
- image_url: http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1510.qcow2
- image_name: centos_7
- cloudify:
- blueprint:
- url: https://github.com/boucherv-orange/cloudify-manager-blueprints.git
- branch: "3.3.1-build"
- requierments:
- ram_min: 3000
- os_image: centos_7
- inputs:
- keystone_username: ""
- keystone_password: ""
- keystone_tenant_name: ""
- keystone_url: ""
- manager_public_key_name: 'manager-kp'
- agent_public_key_name: 'agent-kp'
- image_id: ""
- flavor_id: "3"
- external_network_name: ""
- ssh_user: centos
- agents_user: ubuntu
- clearwater:
- blueprint:
- file_name: 'openstack-blueprint.yaml'
- name: "clearwater-opnfv"
- destination_folder: "opnfv-cloudify-clearwater"
- url: https://github.com/Orange-OpenSource/opnfv-cloudify-clearwater.git
- branch: "stable"
- deployment-name: 'clearwater-opnfv'
- requierments:
- ram_min: 1700
- os_image: ubuntu_14.04
- inputs:
- image_id: ''
- flavor_id: ''
- agent_user: 'ubuntu'
- external_network_name: ''
- public_domain: clearwater.opnfv
-ONOS:
- general:
- onosbench_username: 'root'
- onosbench_password: 'root'
- onoscli_username: 'root'
- onoscli_password: 'root'
- runtimeout: 300
- environment:
- OCT: '10.20.0.1'
- OC1: '10.20.0.7'
- OC2: '10.20.0.7'
- OC3: '10.20.0.7'
- OCN: '10.20.0.4'
- OCN2: '10.20.0.5'
- installer_master: '10.20.0.2'
- installer_master_username: 'root'
- installer_master_password: 'r00tme'
-multisite:
- fuel_environment:
- installer_username: 'root'
- installer_password: 'r00tme'
- compass_environment:
- installer_username: 'root'
- installer_password: 'root'
- multisite_controller_ip: '10.1.0.50'
-promise:
- tenant_name: promise
- tenant_description: promise Functionality Testing
- user_name: promiser
- user_pwd: test
- image_name: promise-img
- flavor_name: promise-flavor
- flavor_vcpus: 1
- flavor_ram: 128
- flavor_disk: 0
- network_name: promise-net
- subnet_name: promise-subnet
- subnet_cidr: 192.168.121.0/24
- router_name: promise-router
-
-example:
- example_vm_name: example-vm
- example_flavor: m1.small
- example_image_name: functest-example-vm
- example_private_net_name: example-net
- example_private_subnet_name: example-subnet
- example_private_subnet_cidr: 192.168.170.0/24
- example_router_name: example-router
- example_sg_name: example-sg
- example_sg_descr: Example Security group
-
-results:
- test_db_url: http://testresults.opnfv.org/test/api/v1
+general:
+ directories:
+ # Relative to the path where the repo is cloned:
+ dir_vping: functest/opnfv_tests/openstack/vping
+ dir_odl: functest/opnfv_tests/sdn/odl
+ dir_rally: functest/opnfv_tests/openstack/rally
+ dir_tempest_cases: functest/opnfv_tests/openstack/tempest/custom_tests
+ dir_vIMS: functest/opnfv_tests/vnf/ims
+ dir_onos: functest/opnfv_tests/sdn/onos/teston
+ dir_onos_sfc: functest/opnfv_tests/sdn/onos/sfc
+
+ # Absolute path
+ dir_home: /home/opnfv
+ dir_repos: /home/opnfv/repos
+ dir_repo_functest: /home/opnfv/repos/functest
+ dir_repo_rally: /home/opnfv/repos/rally
+ dir_repo_tempest: /home/opnfv/repos/tempest
+ dir_repo_releng: /home/opnfv/repos/releng
+ dir_repo_vims_test: /home/opnfv/repos/vims-test
+ dir_repo_sdnvpn: /home/opnfv/repos/sdnvpn
+ dir_repo_sfc: /home/opnfv/repos/sfc
+ dir_repo_onos: /home/opnfv/repos/onos
+ dir_repo_promise: /home/opnfv/repos/promise
+ dir_repo_doctor: /home/opnfv/repos/doctor
+ dir_repo_copper: /home/opnfv/repos/copper
+ dir_repo_ovno: /home/opnfv/repos/ovno
+ dir_repo_parser: /home/opnfv/repos/parser
+ dir_repo_domino: /home/opnfv/repos/domino
+ dir_repo_snaps: /home/opnfv/repos/snaps
+ dir_functest: /home/opnfv/functest
+ dir_functest_test: /home/opnfv/repos/functest/functest/opnfv_tests
+ dir_results: /home/opnfv/functest/results
+ dir_functest_conf: /home/opnfv/functest/conf
+ dir_functest_data: /home/opnfv/functest/data
+ dir_vIMS_data: /home/opnfv/functest/data/vIMS/
+ dir_rally_inst: /home/opnfv/.rally
+
+ openstack:
+ creds: /home/opnfv/functest/conf/openstack.creds
+ snapshot_file: /home/opnfv/functest/conf/openstack_snapshot.yaml
+
+ image_name: Cirros-0.3.4
+ image_file_name: cirros-0.3.4-x86_64-disk.img
+ image_disk_format: qcow2
+
+ flavor_name: opnfv_flavor
+ flavor_ram: 512
+ flavor_disk: 1
+ flavor_vcpus: 1
+
+ # Private network for functest. Will be created by config_functest.py
+ neutron_private_net_name: functest-net
+ neutron_private_subnet_name: functest-subnet
+ neutron_private_subnet_cidr: 192.168.120.0/24
+ neutron_private_subnet_start: 192.168.120.2
+ neutron_private_subnet_end: 192.168.120.254
+ neutron_private_subnet_gateway: 192.168.120.254
+ neutron_router_name: functest-router
+
+ functest:
+ testcases_yaml: /home/opnfv/repos/functest/functest/ci/testcases.yaml
+
+healthcheck:
+ disk_image: /home/opnfv/functest/data/cirros-0.3.4-x86_64-disk.img
+ disk_format: qcow2
+ wait_time: 60
+
+snaps:
+ use_keystone: True
+ use_floating_ips: False
+
+vping:
+ ping_timeout: 200
+ vm_flavor: m1.tiny # adapt to your environment
+ vm_name_1: opnfv-vping-1
+ vm_name_2: opnfv-vping-2
+ image_name: functest-vping
+ vping_private_net_name: vping-net
+ vping_private_subnet_name: vping-subnet
+ vping_private_subnet_cidr: 192.168.130.0/24
+ vping_router_name: vping-router
+ vping_sg_name: vPing-sg
+ vping_sg_descr: Security group for vPing test case
+
+onos_sfc:
+ image_base_url: http://artifacts.opnfv.org/sfc/demo
+ image_name: TestSfcVm
+ image_file_name: firewall_block_image.img
+
+tempest:
+ identity:
+ tenant_name: tempest
+ tenant_description: Tenant for Tempest test suite
+ user_name: tempest
+ user_password: tempest
+ validation:
+ ssh_timeout: 130
+ private_net_name: tempest-net
+ private_subnet_name: tempest-subnet
+ private_subnet_cidr: 192.168.150.0/24
+ router_name: tempest-router
+ use_custom_images: False
+ use_custom_flavors: False
+
+rally:
+ deployment_name: opnfv-rally
+ network_name: rally-net
+ subnet_name: rally-subnet
+ subnet_cidr: 192.168.140.0/24
+ router_name: rally-router
+
+vIMS:
+ general:
+ tenant_name: vIMS
+ tenant_description: vIMS Functionality Testing
+ images:
+ ubuntu:
+ image_url: http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img
+ image_name: ubuntu_14.04
+ centos:
+ image_url: http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1510.qcow2
+ image_name: centos_7
+ cloudify:
+ blueprint:
+ url: https://github.com/boucherv-orange/cloudify-manager-blueprints.git
+ branch: "3.3.1-build"
+ requierments:
+ ram_min: 3000
+ os_image: centos_7
+ inputs:
+ keystone_username: ""
+ keystone_password: ""
+ keystone_tenant_name: ""
+ keystone_url: ""
+ manager_public_key_name: 'manager-kp'
+ agent_public_key_name: 'agent-kp'
+ image_id: ""
+ flavor_id: "3"
+ external_network_name: ""
+ ssh_user: centos
+ agents_user: ubuntu
+ clearwater:
+ blueprint:
+ file_name: 'openstack-blueprint.yaml'
+ name: "clearwater-opnfv"
+ destination_folder: "opnfv-cloudify-clearwater"
+ url: https://github.com/Orange-OpenSource/opnfv-cloudify-clearwater.git
+ branch: "stable"
+ deployment-name: 'clearwater-opnfv'
+ requierments:
+ ram_min: 1700
+ os_image: ubuntu_14.04
+ inputs:
+ image_id: ''
+ flavor_id: ''
+ agent_user: 'ubuntu'
+ external_network_name: ''
+ public_domain: clearwater.opnfv
+ONOS:
+ general:
+ onosbench_username: 'root'
+ onosbench_password: 'root'
+ onoscli_username: 'root'
+ onoscli_password: 'root'
+ runtimeout: 300
+ environment:
+ OCT: '10.20.0.1'
+ OC1: '10.20.0.7'
+ OC2: '10.20.0.7'
+ OC3: '10.20.0.7'
+ OCN: '10.20.0.4'
+ OCN2: '10.20.0.5'
+ installer_master: '10.20.0.2'
+ installer_master_username: 'root'
+ installer_master_password: 'r00tme'
+multisite:
+ fuel_environment:
+ installer_username: 'root'
+ installer_password: 'r00tme'
+ compass_environment:
+ installer_username: 'root'
+ installer_password: 'root'
+ multisite_controller_ip: '10.1.0.50'
+promise:
+ tenant_name: promise
+ tenant_description: promise Functionality Testing
+ user_name: promiser
+ user_pwd: test
+ image_name: promise-img
+ flavor_name: promise-flavor
+ flavor_vcpus: 1
+ flavor_ram: 128
+ flavor_disk: 0
+ network_name: promise-net
+ subnet_name: promise-subnet
+ subnet_cidr: 192.168.121.0/24
+ router_name: promise-router
+
+example:
+ example_vm_name: example-vm
+ example_flavor: m1.small
+ example_image_name: functest-example-vm
+ example_private_net_name: example-net
+ example_private_subnet_name: example-subnet
+ example_private_subnet_cidr: 192.168.170.0/24
+ example_router_name: example-router
+ example_sg_name: example-sg
+ example_sg_descr: Example Security group
+
+results:
+ test_db_url: http://testresults.opnfv.org/test/api/v1
diff --git a/functest/ci/exec_test.sh b/functest/ci/exec_test.sh
index 913ce08ec..2b4cd8b45 100755
--- a/functest/ci/exec_test.sh
+++ b/functest/ci/exec_test.sh
@@ -61,17 +61,7 @@ function odl_tests(){
fi
}
-function sfc_prepare(){
- ids=($(neutron security-group-list|grep default|awk '{print $2}'))
- for id in ${ids[@]}; do
- if ! neutron security-group-show $id|grep "22/tcp" &>/dev/null; then
- neutron security-group-rule-create --protocol tcp \
- --port-range-min 22 --port-range-max 22 --direction ingress $id
- neutron security-group-rule-create --protocol tcp \
- --port-range-min 22 --port-range-max 22 --direction egress $id
- fi
- done
-}
+
function run_test(){
test_name=$1
@@ -93,14 +83,6 @@ function run_test(){
--ospassword ${OS_PASSWORD} \
--odlip $odl_ip --odlwebport $odl_port ${args}
;;
- "tempest_smoke_serial")
- python ${FUNCTEST_TEST_DIR}/openstack/tempest/run_tempest.py \
- $clean_flag -s -m smoke $report
- ;;
- "tempest_full_parallel")
- python ${FUNCTEST_TEST_DIR}/openstack/tempest/run_tempest.py \
- $serial_flag $clean_flag -m full $report
- ;;
"vims")
python ${FUNCTEST_TEST_DIR}/vnf/ims/vims.py $clean_flag $report
;;
@@ -140,19 +122,6 @@ function run_test(){
"moon")
python ${REPOS_DIR}/moon/tests/run_tests.py $report
;;
- "multisite")
- python ${FUNCTEST_TEST_DIR}/openstack/tempest/gen_tempest_conf.py
- python ${FUNCTEST_TEST_DIR}/openstack/tempest/run_tempest.py \
- $clean_flag -s -m feature_multisite $report \
- -c ${FUNCTEST_TEST_DIR}/openstack/tempest/tempest_multisite.conf
- ;;
- "odl-sfc")
- ODL_SFC_DIR=${REPOS_DIR}/sfc/tests/functest/odl-sfc
- # pass FUNCTEST_REPO_DIR inside prepare_odl_sfc.bash
- FUNCTEST_REPO_DIR=${FUNCTEST_REPO_DIR} python ${ODL_SFC_DIR}/prepare_odl_sfc.py || exit $?
- source ${ODL_SFC_DIR}/tackerc
- python ${ODL_SFC_DIR}/sfc.py $report
- ;;
*)
echo "The test case '${test_name}' does not exist."
exit 1
@@ -197,10 +166,6 @@ done
echo "Sourcing Credentials ${creds} to run the test.."
source ${creds}
-# ODL Boron workaround to create additional flow rules to allow port 22 TCP
-if [[ $DEPLOY_SCENARIO == *"odl_l2-sfc"* ]]; then
- sfc_prepare
-fi
# Run test
run_test $TEST
diff --git a/functest/ci/run_tests.py b/functest/ci/run_tests.py
index d2a64aea1..557ba08dd 100644..100755
--- a/functest/ci/run_tests.py
+++ b/functest/ci/run_tests.py
@@ -124,6 +124,7 @@ def run_test(test, tier_name):
logger.info("Running test case '%s'..." % test_name)
print_separator("=")
logger.debug("\n%s" % test)
+ source_rc_file()
if GlobalVariables.CLEAN_FLAG:
generate_os_snapshot()
@@ -140,9 +141,10 @@ def run_test(test, tier_name):
cls = getattr(module, run_dict['class'])
test_case = cls()
result = test_case.run()
- if (result == testcase_base.TestcaseBase.EX_OK and
- GlobalVariables.REPORT_FLAG):
- result = test_case.push_to_db()
+ if result == testcase_base.TestcaseBase.EX_OK:
+ if GlobalVariables.REPORT_FLAG:
+ test_case.push_to_db()
+ result = test_case.check_criteria()
except ImportError:
logger.exception("Cannot import module {}".format(
run_dict['module']))
diff --git a/functest/ci/testcases.yaml b/functest/ci/testcases.yaml
index d483e589e..6f57c7030 100755
--- a/functest/ci/testcases.yaml
+++ b/functest/ci/testcases.yaml
@@ -67,7 +67,9 @@ tiers:
dependencies:
installer: ''
scenario: ''
-
+ run:
+ module: 'functest.opnfv_tests.openstack.tempest.tempest'
+ class: 'TempestSmokeSerial'
-
name: rally_sanity
criteria: 'success_rate == 100%'
@@ -106,6 +108,62 @@ tiers:
installer: ''
scenario: 'onos'
+ -
+ name: connection_check
+ criteria: 'status == "PASS"'
+ blocking: false
+ description: >-
+ This test case verifies the retrieval of OpenStack clients:
+ Keystone, Glance, Neutron and Nova and may perform some
+ simple queries. When the config value of
+ snaps.use_keystone is True, functest must have access to
+ the cloud's private network.
+
+ dependencies:
+ installer: ''
+ scenario: ''
+ run:
+ module: 'functest.opnfv_tests.openstack.snaps.connection_check'
+ class: 'ConnectionCheck'
+
+ -
+ name: api_check
+ criteria: 'status == "PASS"'
+ blocking: false
+ description: >-
+ This test case verifies the retrieval of OpenStack clients:
+ Keystone, Glance, Neutron and Nova and may perform some
+ simple queries. When the config value of
+ snaps.use_keystone is True, functest must have access to
+ the cloud's private network.
+
+ dependencies:
+ installer: ''
+ scenario: ''
+ run:
+ module: 'functest.opnfv_tests.openstack.snaps.api_check'
+ class: 'ApiCheck'
+
+ -
+ name: snaps_smoke
+ criteria: 'status == "PASS"'
+ blocking: false
+ description: >-
+ This test case contains tests that setup and destroy
+ environments with VMs with and without Floating IPs
+ with a newly created user and project. Set the config
+ value snaps.use_floating_ips (True|False) to toggle
+ this functionality. When the config value of
+ snaps.use_keystone is True, functest must have access to
+ the cloud's private network.
+
+ dependencies:
+ installer: ''
+ scenario: ''
+ run:
+ module: 'functest.opnfv_tests.openstack.snaps.smoke'
+ class: 'SnapsSmoke'
+
-
name: features
order: 2
@@ -166,6 +224,10 @@ tiers:
dependencies:
installer: '(apex)|(joid)'
scenario: '^((?!fdio|lxd).)*$'
+ run:
+ module: 'functest.opnfv_tests.features.copper'
+ class: 'Copper'
+
-
name: moon
criteria: 'status == "PASS"'
@@ -184,6 +246,9 @@ tiers:
dependencies:
installer: '(fuel)|(compass)'
scenario: 'multisite'
+ run:
+ module: 'functest.opnfv_tests.openstack.tempest.tempest'
+ class: 'TempestMultisite'
-
name: odl-sfc
criteria: 'status == "PASS"'
@@ -193,6 +258,9 @@ tiers:
dependencies:
installer: '(apex)|(fuel)'
scenario: 'odl_l2-sfc'
+ run:
+ module: 'functest.opnfv_tests.features.odl_sfc'
+ class: 'OpenDaylightSFC'
-
name: onos_sfc
criteria: 'status == "PASS"'
@@ -215,7 +283,7 @@ tiers:
module: 'functest.opnfv_tests.vnf.rnc.parser'
class: 'Parser'
-
- name: openstack
+ name: components
order: 3
ci_loop: 'weekly'
description : >-
@@ -232,6 +300,9 @@ tiers:
dependencies:
installer: ''
scenario: ''
+ run:
+ module: 'functest.opnfv_tests.openstack.tempest.tempest'
+ class: 'TempestFullParallel'
-
name: rally_full
diff --git a/functest/cli/commands/cli_testcase.py b/functest/cli/commands/cli_testcase.py
index efe177d52..70a77a142 100644
--- a/functest/cli/commands/cli_testcase.py
+++ b/functest/cli/commands/cli_testcase.py
@@ -50,10 +50,12 @@ class CliTestcase:
click.echo("Functest environment is not ready. "
"Run first 'functest env prepare'")
else:
- if noclean:
- cmd = ("python %s/functest/ci/run_tests.py "
- "-n -t %s" % (ft_constants.FUNCTEST_REPO_DIR, testname))
- else:
- cmd = ("python %s/functest/ci/run_tests.py "
- "-t %s" % (ft_constants.FUNCTEST_REPO_DIR, testname))
- ft_utils.execute_command(cmd)
+ tests = testname.split(",")
+ for test in tests:
+ if noclean:
+ cmd = ("python %s/functest/ci/run_tests.py "
+ "-n -t %s" % (ft_constants.FUNCTEST_REPO_DIR, test))
+ else:
+ cmd = ("python %s/functest/ci/run_tests.py "
+ "-t %s" % (ft_constants.FUNCTEST_REPO_DIR, test))
+ ft_utils.execute_command(cmd)
diff --git a/functest/core/feature_base.py b/functest/core/feature_base.py
new file mode 100644
index 000000000..01a27f305
--- /dev/null
+++ b/functest/core/feature_base.py
@@ -0,0 +1,58 @@
+import time
+
+import testcase_base as base
+import functest.utils.functest_utils as ft_utils
+import functest.utils.functest_logger as ft_logger
+
+
+class FeatureBase(base.TestcaseBase):
+ def __init__(self, project='functest', case='', repo='', cmd=''):
+ super(FeatureBase, self).__init__()
+ self.project_name = project
+ self.case_name = case
+ self.cmd = cmd
+ self.repo = self.get_conf('general.directories.{}'.format(repo))
+ self.result_file = self.get_result_file()
+ self.logger = ft_logger.Logger(project).getLogger()
+
+ def run(self, **kwargs):
+ self.prepare()
+ self.start_time = time.time()
+ ret = ft_utils.execute_command(self.cmd, output_file=self.result_file)
+ self.stop_time = time.time()
+ self.post()
+ self.parse_results(ret)
+ self.log_results()
+ return base.TestcaseBase.EX_OK
+
+ def prepare(self, **kwargs):
+ pass
+
+ def post(self, **kwargs):
+ pass
+
+ def parse_results(self, ret):
+ exit_code = base.TestcaseBase.EX_OK
+ if ret == 0:
+ self.logger.info("{} OK".format(self.project_name))
+ self.criteria = 'PASS'
+ else:
+ self.logger.info("{} FAILED".format(self.project_name))
+ exit_code = base.TestcaseBase.EX_RUN_ERROR
+ self.criteria = "FAIL"
+
+ return exit_code
+
+ def get_result_file(self):
+ dir = self.get_conf('general.directories.dir_results')
+ return "{}/{}.log".format(dir, self.project_name)
+
+ def log_results(self):
+ ft_utils.logger_test_results(self.project_name,
+ self.case_name,
+ self.criteria,
+ self.details)
+
+ @staticmethod
+ def get_conf(parameter):
+ return ft_utils.get_functest_config(parameter)
diff --git a/functest/core/pytest_suite_runner.py b/functest/core/pytest_suite_runner.py
new file mode 100644
index 000000000..2d5b2667b
--- /dev/null
+++ b/functest/core/pytest_suite_runner.py
@@ -0,0 +1,55 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+
+import testcase_base as base
+import unittest
+import time
+
+
+class PyTestSuiteRunner(base.TestcaseBase):
+ """
+ This superclass is designed to execute pre-configured unittest.TestSuite()
+ objects
+ """
+ def __init__(self):
+ super(PyTestSuiteRunner, self).__init__()
+ self.suite = None
+
+ def run(self, **kwargs):
+ """
+ Starts test execution from the functest framework
+ """
+ self.start_time = time.time()
+ result = unittest.TextTestRunner(verbosity=2).run(self.suite)
+ self.stop_time = time.time()
+
+ if result.errors:
+ self.logger.error('Number of errors in test suite - ' +
+ str(len(result.errors)))
+ for test, message in result.errors:
+ self.logger.error(str(test) + " ERROR with " + message)
+
+ if result.failures:
+ self.logger.error('Number of failures in test suite - ' +
+ str(len(result.failures)))
+ for test, message in result.failures:
+ self.logger.error(str(test) + " FAILED with " + message)
+
+ if (result.errors and len(result.errors) > 0) \
+ or (result.failures and len(result.failures) > 0):
+ self.logger.info("%s FAILED" % self.case_name)
+ self.criteria = 'FAIL'
+ exit_code = base.TestcaseBase.EX_RUN_ERROR
+ else:
+ self.logger.info("%s OK" % self.case_name)
+ exit_code = base.TestcaseBase.EX_OK
+ self.criteria = 'PASS'
+
+ self.details = {}
+ return exit_code
diff --git a/functest/core/testcase_base.py b/functest/core/testcase_base.py
index e869803d8..838b63983 100644
--- a/functest/core/testcase_base.py
+++ b/functest/core/testcase_base.py
@@ -18,6 +18,7 @@ class TestcaseBase(object):
EX_OK = os.EX_OK
EX_RUN_ERROR = os.EX_SOFTWARE
EX_PUSH_TO_DB_ERROR = os.EX_SOFTWARE - 1
+ EX_TESTCASE_FAILED = os.EX_SOFTWARE - 2
logger = ft_logger.Logger(__name__).getLogger()
@@ -29,6 +30,15 @@ class TestcaseBase(object):
self.start_time = ""
self.stop_time = ""
+ def check_criteria(self):
+ try:
+ assert self.criteria
+ if self.criteria == 'PASS':
+ return TestcaseBase.EX_OK
+ except:
+ self.logger.error("Please run test before checking the results")
+ return TestcaseBase.EX_TESTCASE_FAILED
+
def run(self, **kwargs):
self.logger.error("Run must be implemented")
return TestcaseBase.EX_RUN_ERROR
diff --git a/functest/opnfv_tests/features/copper.py b/functest/opnfv_tests/features/copper.py
index d003779e8..8d5393c95 100755
--- a/functest/opnfv_tests/features/copper.py
+++ b/functest/opnfv_tests/features/copper.py
@@ -14,70 +14,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-import argparse
-import sys
-import time
+import functest.core.feature_base as base
-import functest.utils.functest_logger as ft_logger
-import functest.utils.functest_utils as functest_utils
-import functest.utils.functest_constants as ft_constants
-parser = argparse.ArgumentParser()
-parser.add_argument("-r", "--report",
- help="Create json result file",
- action="store_true")
-args = parser.parse_args()
-
-COPPER_REPO_DIR = ft_constants.COPPER_REPO_DIR
-RESULTS_DIR = ft_constants.FUNCTEST_RESULTS_DIR
-
-logger = ft_logger.Logger("copper").getLogger()
-
-
-def main():
- cmd = "%s/tests/run.sh %s/tests" % (COPPER_REPO_DIR, COPPER_REPO_DIR)
-
- start_time = time.time()
-
- log_file = RESULTS_DIR + "/copper.log"
- ret_val = functest_utils.execute_command(cmd,
- output_file=log_file)
-
- stop_time = time.time()
- duration = round(stop_time - start_time, 1)
- if ret_val == 0:
- logger.info("COPPER PASSED")
- test_status = 'PASS'
- else:
- logger.info("COPPER FAILED")
- test_status = 'FAIL'
-
- details = {
- 'timestart': start_time,
- 'duration': duration,
- 'status': test_status,
- }
- functest_utils.logger_test_results("Copper",
- "copper-notification",
- details['status'], details)
- try:
- if args.report:
- functest_utils.push_results_to_db("copper",
- "copper-notification",
- start_time,
- stop_time,
- details['status'],
- details)
- logger.info("COPPER results pushed to DB")
- except:
- logger.error("Error pushing results into Database '%s'"
- % sys.exc_info()[0])
-
- if ret_val != 0:
- sys.exit(-1)
-
- sys.exit(0)
-
-
-if __name__ == '__main__':
- main()
+class Copper(base.FeatureBase):
+ def __init__(self):
+ super(Copper, self).__init__(project='copper',
+ case='copper-notification',
+ repo='dir_repo_copper')
+ self.cmd = "%s/tests/run.sh %s/tests" % (self.repo, self.repo)
diff --git a/functest/opnfv_tests/features/odl_sfc.py b/functest/opnfv_tests/features/odl_sfc.py
new file mode 100644
index 000000000..b194b2840
--- /dev/null
+++ b/functest/opnfv_tests/features/odl_sfc.py
@@ -0,0 +1,20 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2016 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+
+import functest.core.feature_base as base
+
+
+class OpenDaylightSFC(base.FeatureBase):
+
+ def __init__(self):
+ super(OpenDaylightSFC, self).__init__(project='sfc',
+ case='functest-odl-sfc"',
+ repo='dir_repo_sfc')
+ self.cmd = 'cd %s/tests/functest && python ./run_tests.py' % self.repo
diff --git a/functest/opnfv_tests/features/sdnvpn.py b/functest/opnfv_tests/features/sdnvpn.py
index 451299eb3..451299eb3 100644..100755
--- a/functest/opnfv_tests/features/sdnvpn.py
+++ b/functest/opnfv_tests/features/sdnvpn.py
diff --git a/functest/opnfv_tests/openstack/snaps/__init__.py b/functest/opnfv_tests/openstack/snaps/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/functest/opnfv_tests/openstack/snaps/__init__.py
diff --git a/functest/opnfv_tests/openstack/snaps/api_check.py b/functest/opnfv_tests/openstack/snaps/api_check.py
new file mode 100644
index 000000000..278892094
--- /dev/null
+++ b/functest/opnfv_tests/openstack/snaps/api_check.py
@@ -0,0 +1,32 @@
+# Copyright (c) 2015 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+
+import functest.utils.functest_utils as ft_utils
+from functest.core.pytest_suite_runner import PyTestSuiteRunner
+from functest.opnfv_tests.openstack.snaps import snaps_utils
+from snaps import test_suite_builder
+import unittest
+
+
+class ApiCheck(PyTestSuiteRunner):
+ """
+ This test executes the Python Tests included with the SNAPS libraries
+ that exercise many of the OpenStack APIs within Keystone, Glance, Neutron,
+ and Nova
+ """
+ def __init__(self):
+ super(ApiCheck, self).__init__()
+
+ self.suite = unittest.TestSuite()
+ self.case_name = "api_check"
+ creds_file = ft_utils.get_functest_config('general.openstack.creds')
+ use_key = ft_utils.get_functest_config('snaps.use_keystone')
+ ext_net_name = snaps_utils.get_ext_net_name()
+
+ test_suite_builder.add_openstack_api_tests(self.suite, creds_file,
+ ext_net_name,
+ use_keystone=use_key)
diff --git a/functest/opnfv_tests/openstack/snaps/connection_check.py b/functest/opnfv_tests/openstack/snaps/connection_check.py
new file mode 100644
index 000000000..c2f5b1027
--- /dev/null
+++ b/functest/opnfv_tests/openstack/snaps/connection_check.py
@@ -0,0 +1,32 @@
+# Copyright (c) 2015 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+
+import functest.utils.functest_utils as ft_utils
+from functest.core.pytest_suite_runner import PyTestSuiteRunner
+from functest.opnfv_tests.openstack.snaps import snaps_utils
+from snaps import test_suite_builder
+import unittest
+
+
+class ConnectionCheck(PyTestSuiteRunner):
+ """
+ This test executes the Python Tests included with the SNAPS libraries
+ that simply obtain the different OpenStack clients and may perform
+ simple queries
+ """
+ def __init__(self):
+ super(ConnectionCheck, self).__init__()
+
+ self.suite = unittest.TestSuite()
+ self.case_name = "connection_check"
+ creds_file = ft_utils.get_functest_config('general.openstack.creds')
+ use_key = ft_utils.get_functest_config('snaps.use_keystone')
+ ext_net_name = snaps_utils.get_ext_net_name()
+
+ test_suite_builder.add_openstack_client_tests(self.suite, creds_file,
+ ext_net_name,
+ use_keystone=use_key)
diff --git a/functest/opnfv_tests/openstack/snaps/smoke.py b/functest/opnfv_tests/openstack/snaps/smoke.py
new file mode 100644
index 000000000..f66c17ff2
--- /dev/null
+++ b/functest/opnfv_tests/openstack/snaps/smoke.py
@@ -0,0 +1,41 @@
+# Copyright (c) 2015 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+
+import functest.utils.functest_utils as ft_utils
+from functest.core.pytest_suite_runner import PyTestSuiteRunner
+from functest.opnfv_tests.openstack.snaps import snaps_utils
+from snaps import test_suite_builder
+import unittest
+import os
+
+
+class SnapsSmoke(PyTestSuiteRunner):
+ """
+ This test executes the Python Tests included with the SNAPS libraries
+ that exercise many of the OpenStack APIs within Keystone, Glance, Neutron,
+ and Nova
+ """
+ def __init__(self):
+ super(SnapsSmoke, self).__init__()
+
+ self.suite = unittest.TestSuite()
+ self.case_name = "snaps_smoke"
+ creds_file = ft_utils.get_functest_config('general.openstack.creds')
+ use_key = ft_utils.get_functest_config('snaps.use_keystone')
+ use_fip = ft_utils.get_functest_config('snaps.use_floating_ips')
+ ext_net_name = snaps_utils.get_ext_net_name()
+
+ # Tests requiring floating IPs leverage files contained within the
+ # SNAPS repository and are found relative to that path
+ if use_fip:
+ snaps_dir = ft_utils.get_functest_config(
+ 'general.directories.dir_repo_snaps') + '/snaps'
+ os.chdir(snaps_dir)
+
+ test_suite_builder.add_openstack_integration_tests(
+ self.suite, creds_file, ext_net_name, use_keystone=use_key,
+ use_floating_ips=use_fip)
diff --git a/functest/opnfv_tests/openstack/snaps/snaps_utils.py b/functest/opnfv_tests/openstack/snaps/snaps_utils.py
new file mode 100644
index 000000000..a25ad3e0d
--- /dev/null
+++ b/functest/opnfv_tests/openstack/snaps/snaps_utils.py
@@ -0,0 +1,22 @@
+# Copyright (c) 2015 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+
+import functest.utils.functest_utils as ft_utils
+from snaps.openstack.tests import openstack_tests
+from snaps.openstack.utils import neutron_utils
+
+
+def get_ext_net_name():
+ """
+ Returns the first external network name
+ :return:
+ """
+ os_env_file = ft_utils.get_functest_config('general.openstack.creds')
+ os_creds = openstack_tests.get_credentials(os_env_file=os_env_file)
+ neutron = neutron_utils.neutron_client(os_creds)
+ ext_nets = neutron_utils.get_external_networks(neutron)
+ return ext_nets[0]['network']['name']
diff --git a/functest/opnfv_tests/openstack/tempest/__init__.py b/functest/opnfv_tests/openstack/tempest/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/functest/opnfv_tests/openstack/tempest/__init__.py
diff --git a/functest/opnfv_tests/openstack/tempest/gen_tempest_conf.py b/functest/opnfv_tests/openstack/tempest/conf_utils.py
index 1216a671d..38b97e74e 100755..100644
--- a/functest/opnfv_tests/openstack/tempest/gen_tempest_conf.py
+++ b/functest/opnfv_tests/openstack/tempest/conf_utils.py
@@ -7,38 +7,117 @@
#
# http://www.apache.org/licenses/LICENSE-2.0
#
-# Execute Multisite Tempest test cases
-##
-
import ConfigParser
import os
import re
import shutil
-import functest.utils.functest_utils as ft_utils
-import functest.utils.functest_logger as ft_logger
-from run_tempest import configure_tempest
-from run_tempest import TEMPEST_RESULTS_DIR
-import functest.utils.functest_constants as ft_constants
-logger = ft_logger.Logger("gen_tempest_conf").getLogger()
+import functest.utils.functest_constants as ft_constants
+import functest.utils.functest_utils as ft_utils
+import opnfv.utils.constants as releng_constants
+
+
+IMAGE_ID_ALT = None
+FLAVOR_ID_ALT = None
+REPO_PATH = ft_constants.FUNCTEST_REPO_DIR
+GLANCE_IMAGE_PATH = os.path.join(ft_constants.FUNCTEST_DATA_DIR,
+ ft_constants.GLANCE_IMAGE_FILENAME)
+TEMPEST_TEST_LIST_DIR = ft_constants.TEMPEST_TEST_LIST_DIR
+TEMPEST_RESULTS_DIR = os.path.join(ft_constants.FUNCTEST_RESULTS_DIR,
+ 'tempest')
+TEMPEST_CUSTOM = os.path.join(REPO_PATH, TEMPEST_TEST_LIST_DIR,
+ 'test_list.txt')
+TEMPEST_BLACKLIST = os.path.join(REPO_PATH, TEMPEST_TEST_LIST_DIR,
+ 'blacklist.txt')
+TEMPEST_DEFCORE = os.path.join(REPO_PATH, TEMPEST_TEST_LIST_DIR,
+ 'defcore_req.txt')
+TEMPEST_RAW_LIST = os.path.join(TEMPEST_RESULTS_DIR, 'test_raw_list.txt')
+TEMPEST_LIST = os.path.join(TEMPEST_RESULTS_DIR, 'test_list.txt')
CI_INSTALLER_TYPE = ft_constants.CI_INSTALLER_TYPE
CI_INSTALLER_IP = ft_constants.CI_INSTALLER_IP
-def configure_tempest_multisite(deployment_dir):
+def configure_tempest(logger, deployment_dir, IMAGE_ID=None, FLAVOR_ID=None):
+ """
+ Add/update needed parameters into tempest.conf file generated by Rally
+ """
+ tempest_conf_file = deployment_dir + "/tempest.conf"
+ if os.path.isfile(tempest_conf_file):
+ logger.debug("Deleting old tempest.conf file...")
+ os.remove(tempest_conf_file)
+
+ logger.debug("Generating new tempest.conf file...")
+ cmd = "rally verify genconfig"
+ ft_utils.execute_command(cmd)
+
+ logger.debug("Finding tempest.conf file...")
+ if not os.path.isfile(tempest_conf_file):
+ logger.error("Tempest configuration file %s NOT found."
+ % tempest_conf_file)
+ return releng_constants.EXIT_RUN_ERROR
+
+ logger.debug("Updating selected tempest.conf parameters...")
+ config = ConfigParser.RawConfigParser()
+ config.read(tempest_conf_file)
+ config.set(
+ 'compute',
+ 'fixed_network_name',
+ ft_constants.TEMPEST_PRIVATE_NET_NAME)
+ if ft_constants.TEMPEST_USE_CUSTOM_IMAGES:
+ if IMAGE_ID is not None:
+ config.set('compute', 'image_ref', IMAGE_ID)
+ if IMAGE_ID_ALT is not None:
+ config.set('compute', 'image_ref_alt', IMAGE_ID_ALT)
+ if ft_constants.TEMPEST_USE_CUSTOM_FLAVORS:
+ if FLAVOR_ID is not None:
+ config.set('compute', 'flavor_ref', FLAVOR_ID)
+ if FLAVOR_ID_ALT is not None:
+ config.set('compute', 'flavor_ref_alt', FLAVOR_ID_ALT)
+ config.set('identity', 'tenant_name', ft_constants.TEMPEST_TENANT_NAME)
+ config.set('identity', 'username', ft_constants.TEMPEST_USER_NAME)
+ config.set('identity', 'password', ft_constants.TEMPEST_USER_PASSWORD)
+ config.set(
+ 'validation', 'ssh_timeout', ft_constants.TEMPEST_SSH_TIMEOUT)
+
+ if ft_constants.OS_ENDPOINT_TYPE is not None:
+ services_list = ['compute',
+ 'volume',
+ 'image',
+ 'network',
+ 'data-processing',
+ 'object-storage',
+ 'orchestration']
+ sections = config.sections()
+ for service in services_list:
+ if service not in sections:
+ config.add_section(service)
+ config.set(service, 'endpoint_type',
+ ft_constants.OS_ENDPOINT_TYPE)
+
+ with open(tempest_conf_file, 'wb') as config_file:
+ config.write(config_file)
+
+ # Copy tempest.conf to /home/opnfv/functest/results/tempest/
+ shutil.copyfile(
+ tempest_conf_file, TEMPEST_RESULTS_DIR + '/tempest.conf')
+
+ return releng_constants.EXIT_OK
+
+
+def configure_tempest_multisite(logger, deployment_dir):
"""
Add/update needed parameters into tempest.conf file generated by Rally
"""
logger.debug("configure the tempest")
- configure_tempest(deployment_dir)
+ configure_tempest(logger, deployment_dir)
logger.debug("Finding tempest.conf file...")
tempest_conf_old = os.path.join(deployment_dir, '/tempest.conf')
if not os.path.isfile(tempest_conf_old):
logger.error("Tempest configuration file %s NOT found."
% tempest_conf_old)
- exit(-1)
+ return releng_constants.EXIT_RUN_ERROR
# Copy tempest.conf to /home/opnfv/functest/results/tempest/
cur_path = os.path.split(os.path.realpath(__file__))[0]
@@ -110,17 +189,4 @@ def configure_tempest_multisite(deployment_dir):
with open(tempest_conf_file, 'wb') as config_file:
config.write(config_file)
- return True
-
-
-def main():
-
- if not os.path.exists(TEMPEST_RESULTS_DIR):
- os.makedirs(TEMPEST_RESULTS_DIR)
-
- deployment_dir = ft_utils.get_deployment_dir()
- configure_tempest_multisite(deployment_dir)
-
-
-if __name__ == '__main__':
- main()
+ return releng_constants.EXIT_OK
diff --git a/functest/opnfv_tests/openstack/tempest/run_tempest.py b/functest/opnfv_tests/openstack/tempest/run_tempest.py
deleted file mode 100755
index 6406cd193..000000000
--- a/functest/opnfv_tests/openstack/tempest/run_tempest.py
+++ /dev/null
@@ -1,451 +0,0 @@
-#!/usr/bin/env python
-#
-# Description:
-# Runs tempest and pushes the results to the DB
-#
-# Authors:
-# morgan.richomme@orange.com
-# jose.lausuch@ericsson.com
-# viktor.tikkanen@nokia.com
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-import ConfigParser
-import os
-import re
-import shutil
-import subprocess
-import sys
-import time
-
-import argparse
-import yaml
-
-import functest.utils.functest_logger as ft_logger
-import functest.utils.functest_utils as ft_utils
-import functest.utils.openstack_utils as os_utils
-import functest.utils.functest_constants as ft_constants
-
-modes = ['full', 'smoke', 'baremetal', 'compute', 'data_processing',
- 'identity', 'image', 'network', 'object_storage', 'orchestration',
- 'telemetry', 'volume', 'custom', 'defcore', 'feature_multisite']
-
-""" tests configuration """
-parser = argparse.ArgumentParser()
-parser.add_argument("-d", "--debug",
- help="Debug mode",
- action="store_true")
-parser.add_argument("-s", "--serial",
- help="Run tests in one thread",
- action="store_true")
-parser.add_argument("-m", "--mode",
- help="Tempest test mode [smoke, all]",
- default="smoke")
-parser.add_argument("-r", "--report",
- help="Create json result file",
- action="store_true")
-parser.add_argument("-n", "--noclean",
- help="Don't clean the created resources for this test.",
- action="store_true")
-parser.add_argument("-c", "--conf",
- help="User-specified Tempest config file location",
- default="")
-
-args = parser.parse_args()
-
-""" logging configuration """
-logger = ft_logger.Logger("run_tempest").getLogger()
-
-GLANCE_IMAGE_NAME = ft_constants.GLANCE_IMAGE_NAME
-GLANCE_IMAGE_FILENAME = ft_constants.GLANCE_IMAGE_FILENAME
-GLANCE_IMAGE_FORMAT = ft_constants.GLANCE_IMAGE_FORMAT
-GLANCE_IMAGE_PATH = os.path.join(ft_constants.FUNCTEST_DATA_DIR,
- GLANCE_IMAGE_FILENAME)
-
-IMAGE_ID_ALT = None
-
-FLAVOR_NAME = ft_constants.FLAVOR_NAME
-FLAVOR_RAM = ft_constants.FLAVOR_RAM
-FLAVOR_DISK = ft_constants.FLAVOR_DISK
-FLAVOR_VCPUS = ft_constants.FLAVOR_VCPUS
-FLAVOR_ID_ALT = None
-
-TEMPEST_PRIVATE_NET_NAME = ft_constants.TEMPEST_PRIVATE_NET_NAME
-TEMPEST_PRIVATE_SUBNET_NAME = ft_constants.TEMPEST_PRIVATE_SUBNET_NAME
-TEMPEST_PRIVATE_SUBNET_CIDR = ft_constants.TEMPEST_PRIVATE_SUBNET_CIDR
-TEMPEST_ROUTER_NAME = ft_constants.TEMPEST_ROUTER_NAME
-TEMPEST_TENANT_NAME = ft_constants.TEMPEST_TENANT_NAME
-TEMPEST_TENANT_DESCRIPTION = ft_constants.TEMPEST_TENANT_DESCRIPTION
-TEMPEST_USER_NAME = ft_constants.TEMPEST_USER_NAME
-TEMPEST_USER_PASSWORD = ft_constants.TEMPEST_USER_PASSWORD
-TEMPEST_SSH_TIMEOUT = ft_constants.TEMPEST_SSH_TIMEOUT
-TEMPEST_USE_CUSTOM_IMAGES = ft_constants.TEMPEST_USE_CUSTOM_IMAGES
-TEMPEST_USE_CUSTOM_FLAVORS = ft_constants.TEMPEST_USE_CUSTOM_FLAVORS
-
-RESULTS_DIR = ft_constants.FUNCTEST_RESULTS_DIR
-TEMPEST_RESULTS_DIR = os.path.join(RESULTS_DIR, 'tempest')
-
-REPO_PATH = ft_constants.FUNCTEST_REPO_DIR
-TEMPEST_TEST_LIST_DIR = ft_constants.TEMPEST_TEST_LIST_DIR
-TEMPEST_CUSTOM = os.path.join(REPO_PATH, TEMPEST_TEST_LIST_DIR,
- 'test_list.txt')
-TEMPEST_BLACKLIST = os.path.join(REPO_PATH, TEMPEST_TEST_LIST_DIR,
- 'blacklist.txt')
-TEMPEST_DEFCORE = os.path.join(REPO_PATH, TEMPEST_TEST_LIST_DIR,
- 'defcore_req.txt')
-TEMPEST_RAW_LIST = os.path.join(TEMPEST_RESULTS_DIR, 'test_raw_list.txt')
-TEMPEST_LIST = os.path.join(TEMPEST_RESULTS_DIR, 'test_list.txt')
-
-
-class GlobalVariables:
- IMAGE_ID = None
- FLAVOR_ID = None
- MODE = "smoke"
-
-
-def get_info(file_result):
- test_run = ""
- duration = ""
- test_failed = ""
-
- p = subprocess.Popen('cat tempest.log',
- shell=True, stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
- for line in p.stdout.readlines():
- # print line,
- if (len(test_run) < 1):
- test_run = re.findall("[0-9]*\.[0-9]*s", line)
- if (len(duration) < 1):
- duration = re.findall("[0-9]*\ tests", line)
- regexp = r"(failures=[0-9]+)"
- if (len(test_failed) < 1):
- test_failed = re.findall(regexp, line)
-
- logger.debug("test_run:" + test_run)
- logger.debug("duration:" + duration)
-
-
-def create_tempest_resources():
- keystone_client = os_utils.get_keystone_client()
-
- logger.debug("Creating tenant and user for Tempest suite")
- tenant_id = os_utils.create_tenant(keystone_client,
- TEMPEST_TENANT_NAME,
- TEMPEST_TENANT_DESCRIPTION)
- if not tenant_id:
- logger.error("Error : Failed to create %s tenant"
- % TEMPEST_TENANT_NAME)
-
- user_id = os_utils.create_user(keystone_client, TEMPEST_USER_NAME,
- TEMPEST_USER_PASSWORD,
- None, tenant_id)
- if not user_id:
- logger.error("Error : Failed to create %s user" % TEMPEST_USER_NAME)
-
- logger.debug("Creating private network for Tempest suite")
- network_dic = \
- os_utils.create_shared_network_full(TEMPEST_PRIVATE_NET_NAME,
- TEMPEST_PRIVATE_SUBNET_NAME,
- TEMPEST_ROUTER_NAME,
- TEMPEST_PRIVATE_SUBNET_CIDR)
- if not network_dic:
- exit(1)
-
- if TEMPEST_USE_CUSTOM_IMAGES:
- # adding alternative image should be trivial should we need it
- logger.debug("Creating image for Tempest suite")
- _, GlobalVariables.IMAGE_ID = os_utils.get_or_create_image(
- GLANCE_IMAGE_NAME, GLANCE_IMAGE_PATH, GLANCE_IMAGE_FORMAT)
- if not GlobalVariables.IMAGE_ID:
- exit(-1)
-
- if TEMPEST_USE_CUSTOM_FLAVORS:
- # adding alternative flavor should be trivial should we need it
- logger.debug("Creating flavor for Tempest suite")
- _, GlobalVariables.FLAVOR_ID = os_utils.get_or_create_flavor(
- FLAVOR_NAME, FLAVOR_RAM, FLAVOR_DISK, FLAVOR_VCPUS)
- if not GlobalVariables.FLAVOR_ID:
- exit(-1)
-
-
-def configure_tempest(deployment_dir):
- """
- Add/update needed parameters into tempest.conf file generated by Rally
- """
-
- tempest_conf_file = deployment_dir + "/tempest.conf"
- if os.path.isfile(tempest_conf_file):
- logger.debug("Deleting old tempest.conf file...")
- os.remove(tempest_conf_file)
-
- logger.debug("Generating new tempest.conf file...")
- cmd = "rally verify genconfig"
- ft_utils.execute_command(cmd)
-
- logger.debug("Finding tempest.conf file...")
- if not os.path.isfile(tempest_conf_file):
- logger.error("Tempest configuration file %s NOT found."
- % tempest_conf_file)
- exit(-1)
-
- logger.debug("Updating selected tempest.conf parameters...")
- config = ConfigParser.RawConfigParser()
- config.read(tempest_conf_file)
- config.set('compute', 'fixed_network_name', TEMPEST_PRIVATE_NET_NAME)
- if TEMPEST_USE_CUSTOM_IMAGES:
- if GlobalVariables.IMAGE_ID is not None:
- config.set('compute', 'image_ref', GlobalVariables.IMAGE_ID)
- if IMAGE_ID_ALT is not None:
- config.set('compute', 'image_ref_alt', IMAGE_ID_ALT)
- if TEMPEST_USE_CUSTOM_FLAVORS:
- if GlobalVariables.FLAVOR_ID is not None:
- config.set('compute', 'flavor_ref', GlobalVariables.FLAVOR_ID)
- if FLAVOR_ID_ALT is not None:
- config.set('compute', 'flavor_ref_alt', FLAVOR_ID_ALT)
- config.set('identity', 'tenant_name', TEMPEST_TENANT_NAME)
- config.set('identity', 'username', TEMPEST_USER_NAME)
- config.set('identity', 'password', TEMPEST_USER_PASSWORD)
- config.set('validation', 'ssh_timeout', TEMPEST_SSH_TIMEOUT)
-
- if ft_constants.OS_ENDPOINT_TYPE is not None:
- services_list = ['compute', 'volume', 'image', 'network',
- 'data-processing', 'object-storage', 'orchestration']
- sections = config.sections()
- for service in services_list:
- if service not in sections:
- config.add_section(service)
- config.set(service, 'endpoint_type',
- ft_constants.OS_ENDPOINT_TYPE)
-
- with open(tempest_conf_file, 'wb') as config_file:
- config.write(config_file)
-
- # Copy tempest.conf to /home/opnfv/functest/results/tempest/
- shutil.copyfile(tempest_conf_file, TEMPEST_RESULTS_DIR + '/tempest.conf')
- return True
-
-
-def read_file(filename):
- with open(filename) as src:
- return [line.strip() for line in src.readlines()]
-
-
-def generate_test_list(deployment_dir, mode):
- logger.debug("Generating test case list...")
- if mode == 'defcore':
- shutil.copyfile(TEMPEST_DEFCORE, TEMPEST_RAW_LIST)
- elif mode == 'custom':
- if os.path.isfile(TEMPEST_CUSTOM):
- shutil.copyfile(TEMPEST_CUSTOM, TEMPEST_RAW_LIST)
- else:
- logger.error("Tempest test list file %s NOT found."
- % TEMPEST_CUSTOM)
- exit(-1)
- else:
- if mode == 'smoke':
- testr_mode = "smoke"
- elif mode == 'feature_multisite':
- testr_mode = " | grep -i kingbird "
- elif mode == 'full':
- testr_mode = ""
- else:
- testr_mode = 'tempest.api.' + mode
- cmd = ("cd " + deployment_dir + ";" + "testr list-tests " +
- testr_mode + ">" + TEMPEST_RAW_LIST + ";cd")
- ft_utils.execute_command(cmd)
-
-
-def apply_tempest_blacklist():
- logger.debug("Applying tempest blacklist...")
- cases_file = read_file(TEMPEST_RAW_LIST)
- result_file = open(TEMPEST_LIST, 'w')
- black_tests = []
- try:
- installer_type = ft_constants.CI_INSTALLER_TYPE
- deploy_scenario = ft_constants.CI_SCENARIO
- if (bool(installer_type) * bool(deploy_scenario)):
- # if INSTALLER_TYPE and DEPLOY_SCENARIO are set we read the file
- black_list_file = open(TEMPEST_BLACKLIST)
- black_list_yaml = yaml.safe_load(black_list_file)
- black_list_file.close()
- for item in black_list_yaml:
- scenarios = item['scenarios']
- installers = item['installers']
- if (deploy_scenario in scenarios and
- installer_type in installers):
- tests = item['tests']
- for test in tests:
- black_tests.append(test)
- break
- except:
- black_tests = []
- logger.debug("Tempest blacklist file does not exist.")
-
- for cases_line in cases_file:
- for black_tests_line in black_tests:
- if black_tests_line in cases_line:
- break
- else:
- result_file.write(str(cases_line) + '\n')
- result_file.close()
-
-
-def run_tempest(OPTION):
- #
- # the "main" function of the script which launches Rally to run Tempest
- # :param option: tempest option (smoke, ..)
- # :return: void
- #
- logger.info("Starting Tempest test suite: '%s'." % OPTION)
- start_time = time.time()
- stop_time = start_time
- cmd_line = "rally verify start " + OPTION + " --system-wide"
-
- header = ("Tempest environment:\n"
- " Installer: %s\n Scenario: %s\n Node: %s\n Date: %s\n" %
- (ft_constants.CI_INSTALLER_TYPE,
- ft_constants.CI_SCENARIO,
- ft_constants.CI_NODE,
- time.strftime("%a %b %d %H:%M:%S %Z %Y")))
-
- f_stdout = open(TEMPEST_RESULTS_DIR + "/tempest.log", 'w+')
- f_stderr = open(TEMPEST_RESULTS_DIR + "/tempest-error.log", 'w+')
- f_env = open(TEMPEST_RESULTS_DIR + "/environment.log", 'w+')
- f_env.write(header)
-
- # subprocess.call(cmd_line, shell=True, stdout=f_stdout, stderr=f_stderr)
- p = subprocess.Popen(
- cmd_line, shell=True,
- stdout=subprocess.PIPE,
- stderr=f_stderr,
- bufsize=1)
-
- with p.stdout:
- for line in iter(p.stdout.readline, b''):
- if re.search("\} tempest\.", line):
- logger.info(line.replace('\n', ''))
- f_stdout.write(line)
- p.wait()
-
- f_stdout.close()
- f_stderr.close()
- f_env.close()
-
- cmd_line = "rally verify show"
- output = ""
- p = subprocess.Popen(
- cmd_line, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- for line in p.stdout:
- if re.search("Tests\:", line):
- break
- output += line
- logger.info(output)
-
- cmd_line = "rally verify list"
- cmd = os.popen(cmd_line)
- output = (((cmd.read()).splitlines()[-2]).replace(" ", "")).split("|")
- # Format:
- # | UUID | Deployment UUID | smoke | tests | failures | Created at |
- # Duration | Status |
- num_tests = output[4]
- num_failures = output[5]
- time_start = output[6]
- duration = output[7]
- # Compute duration (lets assume it does not take more than 60 min)
- dur_min = int(duration.split(':')[1])
- dur_sec_float = float(duration.split(':')[2])
- dur_sec_int = int(round(dur_sec_float, 0))
- dur_sec_int = dur_sec_int + 60 * dur_min
- stop_time = time.time()
-
- try:
- diff = (int(num_tests) - int(num_failures))
- success_rate = 100 * diff / int(num_tests)
- except:
- success_rate = 0
-
- if 'smoke' in args.mode:
- case_name = 'tempest_smoke_serial'
- elif 'feature' in args.mode:
- case_name = args.mode.replace("feature_", "")
- else:
- case_name = 'tempest_full_parallel'
-
- status = ft_utils.check_success_rate(case_name, success_rate)
- logger.info("Tempest %s success_rate is %s%%, is marked as %s"
- % (case_name, success_rate, status))
-
- # Push results in payload of testcase
- if args.report:
- # add the test in error in the details sections
- # should be possible to do it during the test
- logger.debug("Pushing tempest results into DB...")
- with open(TEMPEST_RESULTS_DIR + "/tempest.log", 'r') as myfile:
- output = myfile.read()
- error_logs = ""
-
- for match in re.findall('(.*?)[. ]*FAILED', output):
- error_logs += match
-
- # Generate json results for DB
- json_results = {"timestart": time_start, "duration": dur_sec_int,
- "tests": int(num_tests), "failures": int(num_failures),
- "errors": error_logs}
- logger.info("Results: " + str(json_results))
- # split Tempest smoke and full
-
- try:
- ft_utils.push_results_to_db("functest",
- case_name,
- start_time,
- stop_time,
- status,
- json_results)
- except:
- logger.error("Error pushing results into Database '%s'"
- % sys.exc_info()[0])
-
- if status == "PASS":
- return 0
- else:
- return -1
-
-
-def main():
-
- if not (args.mode in modes):
- logger.error("Tempest mode not valid. "
- "Possible values are:\n" + str(modes))
- exit(-1)
-
- if not os.path.exists(TEMPEST_RESULTS_DIR):
- os.makedirs(TEMPEST_RESULTS_DIR)
-
- deployment_dir = ft_utils.get_deployment_dir()
- create_tempest_resources()
-
- if "" == args.conf:
- GlobalVariables.MODE = ""
- configure_tempest(deployment_dir)
- else:
- GlobalVariables.MODE = " --tempest-config " + args.conf
-
- generate_test_list(deployment_dir, args.mode)
- apply_tempest_blacklist()
-
- GlobalVariables.MODE += " --tests-file " + TEMPEST_LIST
- if args.serial:
- GlobalVariables.MODE += " --concur 1"
-
- ret_val = run_tempest(GlobalVariables.MODE)
- if ret_val != 0:
- sys.exit(-1)
-
- sys.exit(0)
-
-
-if __name__ == '__main__':
- main()
diff --git a/functest/opnfv_tests/openstack/tempest/tempest.py b/functest/opnfv_tests/openstack/tempest/tempest.py
new file mode 100644
index 000000000..ec0ca766b
--- /dev/null
+++ b/functest/opnfv_tests/openstack/tempest/tempest.py
@@ -0,0 +1,316 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+
+import os
+import re
+import shutil
+import subprocess
+import time
+
+import yaml
+
+import conf_utils
+import functest.core.testcase_base as testcase_base
+import functest.utils.functest_logger as ft_logger
+import functest.utils.functest_utils as ft_utils
+import functest.utils.openstack_utils as os_utils
+import functest.utils.functest_constants as ft_constants
+import opnfv.utils.constants as releng_constants
+
+""" logging configuration """
+logger = ft_logger.Logger("Tempest").getLogger()
+
+
+class TempestCommon(testcase_base.TestcaseBase):
+
+ def __init__(self):
+ self.case_name = ""
+ self.MODE = ""
+ self.OPTION = ""
+ self.FLAVOR_ID = None
+ self.IMAGE_ID = None
+ self.DEPLOYMENT_DIR = ft_utils.get_deployment_dir()
+
+ def read_file(self, filename):
+ with open(filename) as src:
+ return [line.strip() for line in src.readlines()]
+
+ def create_tempest_resources(self):
+ keystone_client = os_utils.get_keystone_client()
+
+ logger.debug("Creating tenant and user for Tempest suite")
+ tenant_id = os_utils.create_tenant(
+ keystone_client,
+ ft_constants.TEMPEST_TENANT_NAME,
+ ft_constants.TEMPEST_TENANT_DESCRIPTION)
+ if not tenant_id:
+ logger.error("Error : Failed to create %s tenant"
+ % ft_constants.TEMPEST_TENANT_NAME)
+
+ user_id = os_utils.create_user(keystone_client,
+ ft_constants.TEMPEST_USER_NAME,
+ ft_constants.TEMPEST_USER_PASSWORD,
+ None, tenant_id)
+ if not user_id:
+ logger.error("Error : Failed to create %s user" %
+ ft_constants.TEMPEST_USER_NAME)
+
+ logger.debug("Creating private network for Tempest suite")
+ network_dic = \
+ os_utils.create_shared_network_full(
+ ft_constants.TEMPEST_PRIVATE_NET_NAME,
+ ft_constants.TEMPEST_PRIVATE_SUBNET_NAME,
+ ft_constants.TEMPEST_ROUTER_NAME,
+ ft_constants.TEMPEST_PRIVATE_SUBNET_CIDR)
+ if not network_dic:
+ return releng_constants.EXIT_RUN_ERROR
+
+ if ft_constants.TEMPEST_USE_CUSTOM_IMAGES:
+ # adding alternative image should be trivial should we need it
+ logger.debug("Creating image for Tempest suite")
+ _, self.IMAGE_ID = os_utils.get_or_create_image(
+ ft_constants.GLANCE_IMAGE_NAME, conf_utils.GLANCE_IMAGE_PATH,
+ ft_constants.GLANCE_IMAGE_FORMAT)
+ if not self.IMAGE_ID:
+ return releng_constants.EXIT_RUN_ERROR
+
+ if ft_constants.TEMPEST_USE_CUSTOM_FLAVORS:
+ # adding alternative flavor should be trivial should we need it
+ logger.debug("Creating flavor for Tempest suite")
+ _, self.FLAVOR_ID = os_utils.get_or_create_flavor(
+ ft_constants.FLAVOR_NAME,
+ ft_constants.FLAVOR_RAM,
+ ft_constants.FLAVOR_DISK,
+ ft_constants.FLAVOR_VCPUS)
+ if not self.FLAVOR_ID:
+ return releng_constants.EXIT_RUN_ERROR
+
+ return releng_constants.EXIT_OK
+
+ def generate_test_list(self, DEPLOYMENT_DIR):
+ logger.debug("Generating test case list...")
+ if self.MODE == 'defcore':
+ shutil.copyfile(
+ conf_utils.TEMPEST_DEFCORE, conf_utils.TEMPEST_RAW_LIST)
+ elif self.MODE == 'custom':
+ if os.path.isfile(conf_utils.TEMPEST_CUSTOM):
+ shutil.copyfile(
+ conf_utils.TEMPEST_CUSTOM, conf_utils.TEMPEST_RAW_LIST)
+ else:
+ logger.error("Tempest test list file %s NOT found."
+ % conf_utils.TEMPEST_CUSTOM)
+ return releng_constants.EXIT_RUN_ERROR
+ else:
+ if self.MODE == 'smoke':
+ testr_mode = "smoke"
+ elif self.MODE == 'feature_multisite':
+ testr_mode = " | grep -i kingbird "
+ elif self.MODE == 'full':
+ testr_mode = ""
+ else:
+ testr_mode = 'tempest.api.' + self.MODE
+ cmd = ("cd " + DEPLOYMENT_DIR + ";" + "testr list-tests " +
+ testr_mode + ">" + conf_utils.TEMPEST_RAW_LIST + ";cd")
+ ft_utils.execute_command(cmd)
+
+ return releng_constants.EXIT_OK
+
+ def apply_tempest_blacklist(self):
+ logger.debug("Applying tempest blacklist...")
+ cases_file = self.read_file(conf_utils.TEMPEST_RAW_LIST)
+ result_file = open(conf_utils.TEMPEST_LIST, 'w')
+ black_tests = []
+ try:
+ installer_type = ft_constants.CI_INSTALLER_TYPE
+ deploy_scenario = ft_constants.CI_SCENARIO
+ if (bool(installer_type) * bool(deploy_scenario)):
+ # if INSTALLER_TYPE and DEPLOY_SCENARIO are set we read the
+ # file
+ black_list_file = open(conf_utils.TEMPEST_BLACKLIST)
+ black_list_yaml = yaml.safe_load(black_list_file)
+ black_list_file.close()
+ for item in black_list_yaml:
+ scenarios = item['scenarios']
+ installers = item['installers']
+ if (deploy_scenario in scenarios and
+ installer_type in installers):
+ tests = item['tests']
+ for test in tests:
+ black_tests.append(test)
+ break
+ except:
+ black_tests = []
+ logger.debug("Tempest blacklist file does not exist.")
+
+ for cases_line in cases_file:
+ for black_tests_line in black_tests:
+ if black_tests_line in cases_line:
+ break
+ else:
+ result_file.write(str(cases_line) + '\n')
+ result_file.close()
+ return releng_constants.EXIT_OK
+
+ def run(self):
+ if not os.path.exists(conf_utils.TEMPEST_RESULTS_DIR):
+ os.makedirs(conf_utils.TEMPEST_RESULTS_DIR)
+
+ # Pre-configuration
+ res = self.create_tempest_resources()
+ if res != releng_constants.EXIT_OK:
+ return res
+
+ res = conf_utils.configure_tempest(logger,
+ self.DEPLOYMENT_DIR,
+ self.IMAGE_ID,
+ self.FLAVOR_ID)
+ if res != releng_constants.EXIT_OK:
+ return res
+
+ res = self.generate_test_list(self.DEPLOYMENT_DIR)
+ if res != releng_constants.EXIT_OK:
+ return res
+
+ res = self.apply_tempest_blacklist()
+ if res != releng_constants.EXIT_OK:
+ return res
+
+ self.OPTION += (" --tests-file %s " % conf_utils.TEMPEST_LIST)
+
+ cmd_line = "rally verify start " + self.OPTION + " --system-wide"
+ logger.info("Starting Tempest test suite: '%s'." % cmd_line)
+
+ header = ("Tempest environment:\n"
+ " Installer: %s\n Scenario: %s\n Node: %s\n Date: %s\n" %
+ (ft_constants.CI_INSTALLER_TYPE,
+ ft_constants.CI_SCENARIO,
+ ft_constants.CI_NODE,
+ time.strftime("%a %b %d %H:%M:%S %Z %Y")))
+
+ f_stdout = open(conf_utils.TEMPEST_RESULTS_DIR + "/tempest.log", 'w+')
+ f_stderr = open(
+ conf_utils.TEMPEST_RESULTS_DIR + "/tempest-error.log", 'w+')
+ f_env = open(conf_utils.TEMPEST_RESULTS_DIR + "/environment.log", 'w+')
+ f_env.write(header)
+
+ # subprocess.call(cmd_line, shell=True,
+ # stdout=f_stdout, stderr=f_stderr)
+ p = subprocess.Popen(
+ cmd_line, shell=True,
+ stdout=subprocess.PIPE,
+ stderr=f_stderr,
+ bufsize=1)
+
+ with p.stdout:
+ for line in iter(p.stdout.readline, b''):
+ if re.search("\} tempest\.", line):
+ logger.info(line.replace('\n', ''))
+ f_stdout.write(line)
+ p.wait()
+
+ f_stdout.close()
+ f_stderr.close()
+ f_env.close()
+
+ cmd_line = "rally verify show"
+ output = ""
+ p = subprocess.Popen(cmd_line,
+ shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ for line in p.stdout:
+ if re.search("Tests\:", line):
+ break
+ output += line
+ logger.info(output)
+
+ cmd_line = "rally verify list"
+ cmd = os.popen(cmd_line)
+ output = (((cmd.read()).splitlines()[-2]).replace(" ", "")).split("|")
+ # Format:
+ # | UUID | Deployment UUID | smoke | tests | failures | Created at |
+ # Duration | Status |
+ num_tests = output[4]
+ num_failures = output[5]
+ duration = output[7]
+ # Compute duration (lets assume it does not take more than 60 min)
+ dur_min = int(duration.split(':')[1])
+ dur_sec_float = float(duration.split(':')[2])
+ dur_sec_int = int(round(dur_sec_float, 0))
+ dur_sec_int = dur_sec_int + 60 * dur_min
+
+ try:
+ diff = (int(num_tests) - int(num_failures))
+ success_rate = 100 * diff / int(num_tests)
+ except:
+ success_rate = 0
+
+ if 'smoke' in self.MODE:
+ self.CASE_NAME = 'tempest_smoke_serial'
+ elif 'feature' in self.MODE:
+ self.CASE_NAME = self.MODE.replace(
+ "feature_", "")
+ else:
+ self.CASE_NAME = 'tempest_full_parallel'
+
+ status = ft_utils.check_success_rate(
+ self.CASE_NAME, success_rate)
+ logger.info("Tempest %s success_rate is %s%%, is marked as %s"
+ % (self.CASE_NAME, success_rate, status))
+
+ if status == "PASS":
+ return releng_constants.EXIT_OK
+ else:
+ return releng_constants.EXIT_RUN_ERROR
+
+
+class TempestSmokeSerial(TempestCommon):
+
+ def __init__(self):
+ TempestCommon.__init__(self)
+ self.case_name = "tempest_smoke_serial"
+ self.MODE = "smoke"
+ self.OPTION = "--concur 1"
+
+
+class TempestSmokeParallel(TempestCommon):
+
+ def __init__(self):
+ TempestCommon.__init__(self)
+ self.case_name = "tempest_smoke_parallel"
+ self.MODE = "smoke"
+ self.OPTION = ""
+
+
+class TempestFullParallel(TempestCommon):
+
+ def __init__(self):
+ TempestCommon.__init__(self)
+ self.case_name = "tempest_full_parallel"
+ self.MODE = "full"
+
+
+class TempestMultisite(TempestCommon):
+
+ def __init__(self):
+ TempestCommon.__init__(self)
+ self.case_name = "multisite"
+ self.MODE = "feature_multisite"
+ self.OPTION = "--concur 1"
+ conf_utils.configure_tempest_multisite(logger, self.DEPLOYMENT_DIR)
+
+
+class TempestCustom(TempestCommon):
+
+ def __init__(self, mode, option):
+ TempestCommon.__init__(self)
+ self.case_name = "tempest_custom"
+ self.MODE = mode
+ self.OPTION = option
diff --git a/functest/opnfv_tests/openstack/vping/vping_ssh.py b/functest/opnfv_tests/openstack/vping/vping_ssh.py
index 8ae590eda..8ae590eda 100644..100755
--- a/functest/opnfv_tests/openstack/vping/vping_ssh.py
+++ b/functest/opnfv_tests/openstack/vping/vping_ssh.py
diff --git a/functest/opnfv_tests/openstack/vping/vping_userdata.py b/functest/opnfv_tests/openstack/vping/vping_userdata.py
index fa91c12a6..fa91c12a6 100644..100755
--- a/functest/opnfv_tests/openstack/vping/vping_userdata.py
+++ b/functest/opnfv_tests/openstack/vping/vping_userdata.py
diff --git a/functest/opnfv_tests/sdn/odl/odl.py b/functest/opnfv_tests/sdn/odl/odl.py
index 95440746c..0905e55cc 100755
--- a/functest/opnfv_tests/sdn/odl/odl.py
+++ b/functest/opnfv_tests/sdn/odl/odl.py
@@ -20,10 +20,9 @@ from robot.errors import RobotError
import robot.run
from robot.utils.robottime import timestamp_to_secs
-import functest.core.testcase_base as testcase_base
+from functest.core import testcase_base
import functest.utils.functest_logger as ft_logger
import functest.utils.openstack_utils as op_utils
-import functest.utils.functest_constants as ft_constants
class ODLResultVisitor(ResultVisitor):
@@ -36,7 +35,7 @@ class ODLResultVisitor(ResultVisitor):
output['name'] = test.name
output['parent'] = test.parent.name
output['status'] = test.status
- output['startime'] = test.starttime
+ output['starttime'] = test.starttime
output['endtime'] = test.endtime
output['critical'] = test.critical
output['text'] = test.message
@@ -49,17 +48,17 @@ class ODLResultVisitor(ResultVisitor):
class ODLTests(testcase_base.TestcaseBase):
- repos = ft_constants.REPOS_DIR
+ repos = "/home/opnfv/repos/"
odl_test_repo = os.path.join(repos, "odl_test")
neutron_suite_dir = os.path.join(odl_test_repo,
"csit/suites/openstack/neutron")
basic_suite_dir = os.path.join(odl_test_repo,
"csit/suites/integration/basic")
- res_dir = os.path.join(ft_constants.FUNCTEST_RESULTS_DIR, "odl")
-
+ res_dir = '/home/opnfv/functest/results/odl/'
logger = ft_logger.Logger("opendaylight").getLogger()
def __init__(self):
+ testcase_base.TestcaseBase.__init__(self)
self.case_name = "odl"
@classmethod
@@ -79,8 +78,8 @@ class ODLTests(testcase_base.TestcaseBase):
return False
def parse_results(self):
- output_dir = os.path.join(self.res_dir, 'output.xml')
- result = ExecutionResult(output_dir)
+ xml_file = os.path.join(self.res_dir, 'output.xml')
+ result = ExecutionResult(xml_file)
visitor = ODLResultVisitor()
result.visit(visitor)
self.criteria = result.suite.status
@@ -89,7 +88,6 @@ class ODLTests(testcase_base.TestcaseBase):
self.details = {}
self.details['description'] = result.suite.name
self.details['tests'] = visitor.get_data()
- return self.criteria
def main(self, **kwargs):
dirs = [self.basic_suite_dir, self.neutron_suite_dir]
@@ -128,10 +126,8 @@ class ODLTests(testcase_base.TestcaseBase):
self.logger.info("\n" + stdout.read())
self.logger.info("ODL results were successfully generated")
try:
- test_res = self.parse_results()
+ self.parse_results()
self.logger.info("ODL results were successfully parsed")
- if test_res is not "PASS":
- return self.EX_RUN_ERROR
except RobotError as e:
self.logger.error("Run tests before publishing: %s" %
e.message)
@@ -146,11 +142,8 @@ class ODLTests(testcase_base.TestcaseBase):
def run(self):
try:
- kclient = op_utils.get_keystone_client()
- keystone_url = kclient.service_catalog.url_for(
- service_type='identity', endpoint_type='publicURL')
- neutron_url = kclient.service_catalog.url_for(
- service_type='network', endpoint_type='publicURL')
+ keystone_url = op_utils.get_endpoint(service_type='identity')
+ neutron_url = op_utils.get_endpoint(service_type='network')
kwargs = {'keystoneip': urlparse.urlparse(keystone_url).hostname}
kwargs['neutronip'] = urlparse.urlparse(neutron_url).hostname
kwargs['odlip'] = kwargs['neutronip']
@@ -158,29 +151,23 @@ class ODLTests(testcase_base.TestcaseBase):
kwargs['odlrestconfport'] = '8181'
kwargs['odlusername'] = 'admin'
kwargs['odlpassword'] = 'admin'
-
- installer_type = ft_constants.CI_INSTALLER_TYPE
- kwargs['osusername'] = ft_constants.OS_USERNAME
- kwargs['ostenantname'] = ft_constants.OS_TENANT_NAME
- kwargs['ospassword'] = ft_constants.OS_PASSWORD
-
+ installer_type = None
+ if 'INSTALLER_TYPE' in os.environ:
+ installer_type = os.environ['INSTALLER_TYPE']
+ kwargs['osusername'] = os.environ['OS_USERNAME']
+ kwargs['ostenantname'] = os.environ['OS_TENANT_NAME']
+ kwargs['ospassword'] = os.environ['OS_PASSWORD']
if installer_type == 'fuel':
kwargs['odlwebport'] = '8282'
elif installer_type == 'apex':
- if ft_constants.SDN_CONTROLLER_IP is None:
- return self.EX_RUN_ERROR
- kwargs['odlip'] = ft_constants.SDN_CONTROLLER_IP
+ kwargs['odlip'] = os.environ['SDN_CONTROLLER_IP']
kwargs['odlwebport'] = '8181'
elif installer_type == 'joid':
- if ft_constants.SDN_CONTROLLER is None:
- return self.EX_RUN_ERROR
- kwargs['odlip'] = ft_constants.SDN_CONTROLLER
+ kwargs['odlip'] = os.environ['SDN_CONTROLLER']
elif installer_type == 'compass':
kwargs['odlwebport'] = '8181'
else:
- if ft_constants.SDN_CONTROLLER_IP is None:
- return self.EX_RUN_ERROR
- kwargs['odlip'] = ft_constants.SDN_CONTROLLER_IP
+ kwargs['odlip'] = os.environ['SDN_CONTROLLER_IP']
except KeyError as e:
self.logger.error("Cannot run ODL testcases. "
"Please check env var: "
diff --git a/functest/opnfv_tests/sdn/onos/sfc/sfc_onos.py b/functest/opnfv_tests/sdn/onos/sfc/sfc_onos.py
index 8ca32e9bb..349b42a88 100644
--- a/functest/opnfv_tests/sdn/onos/sfc/sfc_onos.py
+++ b/functest/opnfv_tests/sdn/onos/sfc/sfc_onos.py
@@ -1,4 +1,4 @@
-import os
+import os
import re
import time
import json
diff --git a/functest/opnfv_tests/vnf/rnc/parser.py b/functest/opnfv_tests/vnf/rnc/parser.py
index a50d4f1eb..1cff72209 100644
--- a/functest/opnfv_tests/vnf/rnc/parser.py
+++ b/functest/opnfv_tests/vnf/rnc/parser.py
@@ -14,66 +14,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-import os
-import sys
-import time
-import argparse
+import functest.core.feature_base as base
-import functest.core.testcase_base as testcase_base
-import functest.utils.functest_constants as ft_constants
-import functest.utils.functest_logger as ft_logger
-import functest.utils.functest_utils as ft_utils
-
-
-class Parser(testcase_base.TestcaseBase):
+class Parser(base.FeatureBase):
def __init__(self):
- super(Parser, self).__init__()
- self.project_name = "parser"
- self.case_name = "parser-basics"
- self.logger = ft_logger.Logger("parser").getLogger()
- self.log_file = os.path.join(
- ft_constants.FUNCTEST_RESULTS_DIR, "parser.log")
-
- def run(self, **kwargs):
- cmd = 'cd %s/tests && ./functest_run.sh' % ft_constants.PARSER_REPO_DIR
-
- self.start_time = time.time()
- ret = ft_utils.execute_command(cmd,
- info=True,
- output_file=self.log_file)
- self.stop_time = time.time()
-
- self.criteria, details = ft_utils.check_test_result(self.project_name,
- ret,
- self.start_time,
- self.stop_time)
-
- ft_utils.logger_test_results(self.project_name,
- self.case_name,
- self.criteria,
- details)
-
- return ret
-
- @staticmethod
- def get_conf(parameter):
- return ft_utils.get_functest_config(parameter)
-
-
-if __name__ == '__main__':
- args_parser = argparse.ArgumentParser()
- args_parser.add_argument("-r", "--report",
- help="Create json result file",
- action="store_true")
- args = vars(args_parser.parse_args())
- parser = Parser()
- try:
- result = parser.run(**args)
- if result != testcase_base.TestcaseBase.EX_OK:
- sys.exit(result)
- if args['report']:
- sys.exit(parser.push_to_db())
- except Exception:
- sys.exit(testcase_base.TestcaseBase.EX_RUN_ERROR)
+ super(Parser, self).__init__(project='parser',
+ case='parser-basics',
+ repo='dir_repo_parser')
+ self.cmd = 'cd %s/tests && ./functest_run.sh' % self.repo
diff --git a/functest/tests/unit/core/test_testcase_base.py b/functest/tests/unit/core/test_testcase_base.py
index fe7b0d054..b7c81d87c 100644
--- a/functest/tests/unit/core/test_testcase_base.py
+++ b/functest/tests/unit/core/test_testcase_base.py
@@ -11,7 +11,7 @@ import logging
import mock
import unittest
-import functest.core.testcase_base as testcase_base
+from functest.core import testcase_base
class TestcaseBaseTesting(unittest.TestCase):
@@ -24,7 +24,7 @@ class TestcaseBaseTesting(unittest.TestCase):
self.test.case_name = "base"
self.test.start_time = "1"
self.test.stop_time = "2"
- self.test.criteria = "100"
+ self.test.criteria = "PASS"
self.test.details = {"Hello": "World"}
def test_run_unimplemented(self):
@@ -82,6 +82,21 @@ class TestcaseBaseTesting(unittest.TestCase):
self.test.project, self.test.case_name, self.test.start_time,
self.test.stop_time, self.test.criteria, self.test.details)
+ def test_check_criteria_missing(self):
+ self.test.criteria = None
+ self.assertEqual(self.test.check_criteria(),
+ testcase_base.TestcaseBase.EX_TESTCASE_FAILED)
+
+ def test_check_criteria_failed(self):
+ self.test.criteria = 'FAILED'
+ self.assertEqual(self.test.check_criteria(),
+ testcase_base.TestcaseBase.EX_TESTCASE_FAILED)
+
+ def test_check_criteria_pass(self):
+ self.test.criteria = 'PASS'
+ self.assertEqual(self.test.check_criteria(),
+ testcase_base.TestcaseBase.EX_OK)
+
if __name__ == "__main__":
unittest.main(verbosity=2)
diff --git a/functest/tests/unit/odl/test_odl.py b/functest/tests/unit/odl/test_odl.py
index ef18016bf..d8c7f84ec 100644
--- a/functest/tests/unit/odl/test_odl.py
+++ b/functest/tests/unit/odl/test_odl.py
@@ -13,11 +13,12 @@ import mock
import os
import unittest
+from keystoneauth1.exceptions import auth_plugins
from robot.errors import RobotError
+from robot.result import testcase
-import functest.core.testcase_base as testcase_base
+from functest.core import testcase_base
from functest.opnfv_tests.sdn.odl import odl
-from functest.utils import functest_constants as ft_constants
class ODLTesting(unittest.TestCase):
@@ -36,11 +37,41 @@ class ODLTesting(unittest.TestCase):
_odl_password = "admin"
def setUp(self):
- ft_constants.OS_USERNAME = self._os_username
- ft_constants.OS_PASSWORD = self._os_password
- ft_constants.OS_TENANT_NAME = self._os_tenantname
+ for var in ("INSTALLER_TYPE", "SDN_CONTROLLER", "SDN_CONTROLLER_IP"):
+ if var in os.environ:
+ del os.environ[var]
+ os.environ["OS_USERNAME"] = self._os_username
+ os.environ["OS_PASSWORD"] = self._os_password
+ os.environ["OS_TENANT_NAME"] = self._os_tenantname
self.test = odl.ODLTests()
+ def test_empty_visitor(self):
+ visitor = odl.ODLResultVisitor()
+ self.assertFalse(visitor.get_data())
+
+ def test_visitor(self):
+ visitor = odl.ODLResultVisitor()
+ data = {'name': 'foo',
+ 'parent': 'bar',
+ 'status': 'PASS',
+ 'starttime': "20161216 16:00:00.000",
+ 'endtime': "20161216 16:00:01.000",
+ 'elapsedtime': 1000,
+ 'text': 'Hello, World!',
+ 'critical': True}
+ test = testcase.TestCase(name=data['name'],
+ status=data['status'],
+ message=data['text'],
+ starttime=data['starttime'],
+ endtime=data['endtime'])
+ test.parent = mock.Mock()
+ config = {'name': data['parent'],
+ 'criticality.test_is_critical.return_value': data[
+ 'critical']}
+ test.parent.configure_mock(**config)
+ visitor.visit_test(test)
+ self.assertEqual(visitor.get_data(), [data])
+
@mock.patch('fileinput.input', side_effect=Exception())
def test_set_robotframework_vars_failed(self, *args):
self.assertFalse(self.test.set_robotframework_vars())
@@ -59,14 +90,6 @@ class ODLTesting(unittest.TestCase):
else:
return None
- @classmethod
- def _get_fake_keystone_client(cls):
- kclient = mock.Mock()
- kclient.service_catalog = mock.Mock()
- kclient.service_catalog.url_for = mock.Mock(
- side_effect=cls._fake_url_for)
- return kclient
-
def _get_main_kwargs(self, key=None):
kwargs = {'odlusername': self._odl_username,
'odlpassword': self._odl_password,
@@ -85,9 +108,9 @@ class ODLTesting(unittest.TestCase):
def _test_main(self, status, *args):
kwargs = self._get_main_kwargs()
self.assertEqual(self.test.main(**kwargs), status)
- odl_res_dir = odl.ODLTests.res_dir
if len(args) > 0:
- args[0].assert_called_once_with(odl_res_dir)
+ args[0].assert_called_once_with(
+ odl.ODLTests.res_dir)
if len(args) > 1:
variable = ['KEYSTONE:{}'.format(self._keystone_ip),
'NEUTRON:{}'.format(self._neutron_ip),
@@ -97,18 +120,17 @@ class ODLTesting(unittest.TestCase):
'ODL_SYSTEM_IP:{}'.format(self._sdn_controller_ip),
'PORT:{}'.format(self._odl_webport),
'RESTCONFPORT:{}'.format(self._odl_restconfport)]
- output_file = os.path.join(odl_res_dir, 'output.xml')
args[1].assert_called_once_with(
odl.ODLTests.basic_suite_dir,
odl.ODLTests.neutron_suite_dir,
log='NONE',
- output=output_file,
+ output=os.path.join(odl.ODLTests.res_dir, 'output.xml'),
report='NONE',
stdout=mock.ANY,
variable=variable)
if len(args) > 2:
- stdout_file = os.path.join(odl_res_dir, 'stdout.txt')
- args[2].assert_called_with(stdout_file)
+ args[2].assert_called_with(
+ os.path.join(odl.ODLTests.res_dir, 'stdout.txt'))
def _test_main_missing_keyword(self, key):
kwargs = self._get_main_kwargs(key)
@@ -200,8 +222,7 @@ class ODLTesting(unittest.TestCase):
def test_main(self, *args):
with mock.patch.object(self.test, 'set_robotframework_vars',
return_value=True), \
- mock.patch.object(self.test, 'parse_results',
- return_value="PASS"):
+ mock.patch.object(self.test, 'parse_results'):
self._test_main(testcase_base.TestcaseBase.EX_OK, *args)
@mock.patch('os.remove')
@@ -210,8 +231,7 @@ class ODLTesting(unittest.TestCase):
def test_main_makedirs_oserror17(self, *args):
with mock.patch.object(self.test, 'set_robotframework_vars',
return_value=True), \
- mock.patch.object(self.test, 'parse_results',
- return_value="PASS"):
+ mock.patch.object(self.test, 'parse_results'):
self._test_main(testcase_base.TestcaseBase.EX_OK, *args)
@mock.patch('os.remove')
@@ -220,8 +240,7 @@ class ODLTesting(unittest.TestCase):
def test_main_testcases_in_failure(self, *args):
with mock.patch.object(self.test, 'set_robotframework_vars',
return_value=True), \
- mock.patch.object(self.test, 'parse_results',
- return_value="PASS"):
+ mock.patch.object(self.test, 'parse_results'):
self._test_main(testcase_base.TestcaseBase.EX_OK, *args)
@mock.patch('os.remove', side_effect=OSError)
@@ -230,25 +249,20 @@ class ODLTesting(unittest.TestCase):
def test_main_remove_oserror(self, *args):
with mock.patch.object(self.test, 'set_robotframework_vars',
return_value=True), \
- mock.patch.object(self.test, 'parse_results',
- return_value="PASS"):
+ mock.patch.object(self.test, 'parse_results'):
self._test_main(testcase_base.TestcaseBase.EX_OK, *args)
def _test_run_missing_env_var(self, var):
- if var == 'OS_USERNAME':
- ft_constants.OS_USERNAME = None
- elif var == 'OS_PASSWORD':
- ft_constants.OS_PASSWORD = None
- elif var == 'OS_TENANT_NAME':
- ft_constants.OS_TENANT_NAME = None
-
- self.assertEqual(self.test.run(),
- testcase_base.TestcaseBase.EX_RUN_ERROR)
+ with mock.patch('functest.utils.openstack_utils.get_endpoint',
+ side_effect=self._fake_url_for):
+ del os.environ[var]
+ self.assertEqual(self.test.run(),
+ testcase_base.TestcaseBase.EX_RUN_ERROR)
def _test_run(self, status=testcase_base.TestcaseBase.EX_OK,
exception=None, odlip="127.0.0.3", odlwebport="8080"):
- with mock.patch('functest.utils.openstack_utils.get_keystone_client',
- return_value=self._get_fake_keystone_client()):
+ with mock.patch('functest.utils.openstack_utils.get_endpoint',
+ side_effect=self._fake_url_for):
if exception:
self.test.main = mock.Mock(side_effect=exception)
else:
@@ -262,6 +276,12 @@ class ODLTesting(unittest.TestCase):
ospassword=self._os_password, ostenantname=self._os_tenantname,
osusername=self._os_username)
+ def test_run_exception(self):
+ with mock.patch('functest.utils.openstack_utils.get_endpoint',
+ side_effect=auth_plugins.MissingAuthPlugin()):
+ self.assertEqual(self.test.run(),
+ testcase_base.TestcaseBase.EX_RUN_ERROR)
+
def test_run_missing_os_username(self):
self._test_run_missing_env_var("OS_USERNAME")
@@ -272,72 +292,64 @@ class ODLTesting(unittest.TestCase):
self._test_run_missing_env_var("OS_TENANT_NAME")
def test_run_main_false(self):
- ft_constants.CI_INSTALLER_TYPE = None
- ft_constants.SDN_CONTROLLER_IP = self._sdn_controller_ip
+ os.environ["SDN_CONTROLLER_IP"] = self._sdn_controller_ip
self._test_run(testcase_base.TestcaseBase.EX_RUN_ERROR,
odlip=self._sdn_controller_ip,
odlwebport=self._odl_webport)
def test_run_main_exception(self):
- ft_constants.CI_INSTALLER_TYPE = None
- ft_constants.SDN_CONTROLLER_IP = self._sdn_controller_ip
with self.assertRaises(Exception):
+ os.environ["SDN_CONTROLLER_IP"] = self._sdn_controller_ip
self._test_run(status=testcase_base.TestcaseBase.EX_RUN_ERROR,
exception=Exception(),
odlip=self._sdn_controller_ip,
odlwebport=self._odl_webport)
def test_run_missing_sdn_controller_ip(self):
- with mock.patch('functest.utils.openstack_utils.get_keystone_client',
- return_value=self._get_fake_keystone_client()):
- ft_constants.CI_INSTALLER_TYPE = None
- ft_constants.SDN_CONTROLLER_IP = None
+ with mock.patch('functest.utils.openstack_utils.get_endpoint',
+ side_effect=self._fake_url_for):
self.assertEqual(self.test.run(),
testcase_base.TestcaseBase.EX_RUN_ERROR)
def test_run_without_installer_type(self):
- ft_constants.SDN_CONTROLLER_IP = self._sdn_controller_ip
- ft_constants.CI_INSTALLER_TYPE = None
+ os.environ["SDN_CONTROLLER_IP"] = self._sdn_controller_ip
self._test_run(testcase_base.TestcaseBase.EX_OK,
odlip=self._sdn_controller_ip,
odlwebport=self._odl_webport)
def test_run_fuel(self):
- ft_constants.CI_INSTALLER_TYPE = "fuel"
+ os.environ["INSTALLER_TYPE"] = "fuel"
self._test_run(testcase_base.TestcaseBase.EX_OK,
odlip=self._neutron_ip, odlwebport='8282')
def test_run_apex_missing_sdn_controller_ip(self):
- with mock.patch('functest.utils.openstack_utils.get_keystone_client',
- return_value=self._get_fake_keystone_client()):
- ft_constants.CI_INSTALLER_TYPE = "apex"
- ft_constants.SDN_CONTROLLER_IP = None
+ with mock.patch('functest.utils.openstack_utils.get_endpoint',
+ side_effect=self._fake_url_for):
+ os.environ["INSTALLER_TYPE"] = "apex"
self.assertEqual(self.test.run(),
testcase_base.TestcaseBase.EX_RUN_ERROR)
def test_run_apex(self):
- ft_constants.SDN_CONTROLLER_IP = self._sdn_controller_ip
- ft_constants.CI_INSTALLER_TYPE = "apex"
+ os.environ["SDN_CONTROLLER_IP"] = self._sdn_controller_ip
+ os.environ["INSTALLER_TYPE"] = "apex"
self._test_run(testcase_base.TestcaseBase.EX_OK,
odlip=self._sdn_controller_ip, odlwebport='8181')
def test_run_joid_missing_sdn_controller(self):
- with mock.patch('functest.utils.openstack_utils.get_keystone_client',
- return_value=self._get_fake_keystone_client()):
- ft_constants.CI_INSTALLER_TYPE = "joid"
- ft_constants.SDN_CONTROLLER = None
+ with mock.patch('functest.utils.openstack_utils.get_endpoint',
+ side_effect=self._fake_url_for):
+ os.environ["INSTALLER_TYPE"] = "joid"
self.assertEqual(self.test.run(),
testcase_base.TestcaseBase.EX_RUN_ERROR)
def test_run_joid(self):
- ft_constants.SDN_CONTROLLER = self._sdn_controller_ip
- ft_constants.CI_INSTALLER_TYPE = "joid"
+ os.environ["SDN_CONTROLLER"] = self._sdn_controller_ip
+ os.environ["INSTALLER_TYPE"] = "joid"
self._test_run(testcase_base.TestcaseBase.EX_OK,
- odlip=self._sdn_controller_ip,
- odlwebport=self._odl_webport)
+ odlip=self._sdn_controller_ip, odlwebport='8080')
def test_run_compass(self, *args):
- ft_constants.CI_INSTALLER_TYPE = "compass"
+ os.environ["INSTALLER_TYPE"] = "compass"
self._test_run(testcase_base.TestcaseBase.EX_OK,
odlip=self._neutron_ip, odlwebport='8181')
diff --git a/functest/utils/functest_utils.py b/functest/utils/functest_utils.py
index a25967b69..b1e4d3cdb 100644
--- a/functest/utils/functest_utils.py
+++ b/functest/utils/functest_utils.py
@@ -21,6 +21,9 @@ import requests
import yaml
from git import Repo
+import time
+import functools
+
import functest.utils.functest_logger as ft_logger
logger = ft_logger.Logger("functest_utils").getLogger()
@@ -418,25 +421,8 @@ def merge_dicts(dict1, dict2):
yield (k, dict2[k])
-def check_test_result(test_name, ret, start_time, stop_time):
- def get_criteria_value():
- return get_criteria_by_test(test_name).split('==')[1].strip()
-
- status = 'FAIL'
- if str(ret) == get_criteria_value():
- status = 'PASS'
-
- details = {
- 'timestart': start_time,
- 'duration': round(stop_time - start_time, 1),
- 'status': status,
- }
-
- return status, details
-
-
def get_testcases_file_dir():
- return "/home/opnfv/repos/functest/functest/ci/testcases.yaml"
+ return get_functest_config('general.functest.testcases_yaml')
def get_functest_yaml():
@@ -448,3 +434,17 @@ def get_functest_yaml():
def print_separator():
logger.info("==============================================")
+
+
+def timethis(func):
+ """Measure the time it takes for a function to complete"""
+ @functools.wraps(func)
+ def timed(*args, **kwargs):
+ ts = time.time()
+ result = func(*args, **kwargs)
+ te = time.time()
+ elapsed = '{0}'.format(te - ts)
+ logger.info('{f}(*{a}, **{kw}) took: {t} sec'.format(
+ f=func.__name__, a=args, kw=kwargs, t=elapsed))
+ return result, elapsed
+ return timed
diff --git a/functest/utils/openstack_clean.py b/functest/utils/openstack_clean.py
index 949eee90f..c08568bde 100755
--- a/functest/utils/openstack_clean.py
+++ b/functest/utils/openstack_clean.py
@@ -9,6 +9,8 @@
# - Neutron networks, subnets and ports
# - Routers
# - Users and tenants
+# - Tacker VNFDs and VNFs
+# - Tacker SFCs and SFC classifiers
#
# Author:
# jose.lausuch@ericsson.com
@@ -105,7 +107,7 @@ def remove_volumes(cinder_client, default_volumes):
for volume in volumes:
volume_id = getattr(volume, 'id')
- volume_name = getattr(volume, 'display_name')
+ volume_name = getattr(volume, 'name')
logger.debug("'%s', ID=%s " % (volume_name, volume_id))
if (volume_id not in default_volumes and
volume_name not in default_volumes.values()):
@@ -393,7 +395,7 @@ def main():
default_security_groups = snapshot_yaml.get('secgroups')
default_floatingips = snapshot_yaml.get('floatingips')
default_users = snapshot_yaml.get('users')
- default_tenants = snapshot_yaml.get('tenants')
+ # default_tenants = snapshot_yaml.get('tenants')
if not os_utils.check_credentials():
logger.error("Please source the openrc credentials and run "
@@ -414,8 +416,10 @@ def main():
separator()
remove_users(keystone_client, default_users)
separator()
- remove_tenants(keystone_client, default_tenants)
- separator()
+ # TODO (Helen) tenant does not exist in V3
+ # need to figure our anohter general verification point
+ # remove_tenants(keystone_client, default_tenants)
+ # separator()
if __name__ == '__main__':
diff --git a/functest/utils/openstack_snapshot.py b/functest/utils/openstack_snapshot.py
index 4be1af443..5b50ffa50 100755
--- a/functest/utils/openstack_snapshot.py
+++ b/functest/utils/openstack_snapshot.py
@@ -62,7 +62,7 @@ def get_volumes(cinder_client):
volumes = os_utils.get_volumes(cinder_client)
if volumes is not None:
for volume in volumes:
- dic_volumes.update({volume.id: volume.display_name})
+ dic_volumes.update({volume.id: volume.name})
return {'volumes': dic_volumes}
@@ -149,7 +149,7 @@ def main():
snapshot.update(get_security_groups(neutron_client))
snapshot.update(get_floatinips(nova_client))
snapshot.update(get_users(keystone_client))
- snapshot.update(get_tenants(keystone_client))
+ # snapshot.update(get_tenants(keystone_client))
with open(OS_SNAPSHOT_FILE, 'w+') as yaml_file:
yaml_file.write(yaml.safe_dump(snapshot, default_flow_style=False))
diff --git a/functest/utils/openstack_tacker.py b/functest/utils/openstack_tacker.py
index 6ab056683..f17b421e8 100644..100755
--- a/functest/utils/openstack_tacker.py
+++ b/functest/utils/openstack_tacker.py
@@ -21,7 +21,7 @@ logger = ft_logger.Logger("tacker_utils").getLogger()
def get_tacker_client():
- creds_tacker = os_utils.get_credentials('tacker')
+ creds_tacker = os_utils.get_credentials()
return tackerclient.Client(**creds_tacker)
diff --git a/functest/utils/openstack_utils.py b/functest/utils/openstack_utils.py
index df6fb5d1a..ec784121b 100755
--- a/functest/utils/openstack_utils.py
+++ b/functest/utils/openstack_utils.py
@@ -14,16 +14,21 @@ import subprocess
import sys
import time
+from keystoneauth1 import loading
+from keystoneauth1 import session
from cinderclient import client as cinderclient
-import functest.utils.functest_logger as ft_logger
-import functest.utils.functest_utils as ft_utils
from glanceclient import client as glanceclient
-from keystoneclient.v2_0 import client as keystoneclient
-from neutronclient.v2_0 import client as neutronclient
from novaclient import client as novaclient
+from keystoneclient import client as keystoneclient
+from neutronclient.neutron import client as neutronclient
+
+import functest.utils.functest_logger as ft_logger
+import functest.utils.functest_utils as ft_utils
logger = ft_logger.Logger("openstack_utils").getLogger()
+DEFAULT_API_VERSION = '2'
+
# *********************************************
# CREDENTIALS
@@ -37,68 +42,63 @@ class MissingEnvVar(Exception):
return str.format("Please set the mandatory env var: {}", self.var)
+def is_keystone_v3():
+ keystone_api_version = os.getenv('OS_IDENTITY_API_VERSION')
+ if (keystone_api_version is None or
+ keystone_api_version == '2'):
+ return False
+ else:
+ return True
+
+
+def get_rc_env_vars():
+ keystone_v3 = is_keystone_v3()
+ env_vars = ['OS_AUTH_URL', 'OS_USERNAME', 'OS_PASSWORD']
+ if keystone_v3 is False:
+ env_vars.extend(['OS_TENANT_NAME'])
+ else:
+ env_vars.extend(['OS_PROJECT_NAME',
+ 'OS_USER_DOMAIN_NAME',
+ 'OS_PROJECT_DOMAIN_NAME'])
+ return env_vars
+
+
def check_credentials():
"""
Check if the OpenStack credentials (openrc) are sourced
"""
- env_vars = ['OS_AUTH_URL', 'OS_USERNAME', 'OS_PASSWORD', 'OS_TENANT_NAME']
+ env_vars = get_rc_env_vars()
return all(map(lambda v: v in os.environ and os.environ[v], env_vars))
-def get_credentials(service):
- """Returns a creds dictionary filled with the following keys:
- * username
- * password/api_key (depending on the service)
- * tenant_name/project_id (depending on the service)
- * auth_url
- :param service: a string indicating the name of the service
- requesting the credentials.
+def get_env_cred_dict():
+ env_cred_dict = {
+ 'OS_USERNAME': 'username',
+ 'OS_PASSWORD': 'password',
+ 'OS_AUTH_URL': 'auth_url',
+ 'OS_TENANT_NAME': 'tenant_name',
+ 'OS_USER_DOMAIN_NAME': 'user_domain_name',
+ 'OS_PROJECT_DOMAIN_NAME': 'project_domain_name',
+ 'OS_PROJECT_NAME': 'project_name',
+ 'OS_ENDPOINT_TYPE': 'endpoint_type',
+ 'OS_REGION_NAME': 'region_name'
+ }
+ return env_cred_dict
+
+
+def get_credentials():
+ """Returns a creds dictionary filled with parsed from env
"""
creds = {}
+ env_vars = get_rc_env_vars()
+ env_cred_dict = get_env_cred_dict()
- # Check that the env vars exists:
- envvars = ('OS_USERNAME', 'OS_PASSWORD', 'OS_AUTH_URL', 'OS_TENANT_NAME')
- for envvar in envvars:
+ for envvar in env_vars:
if os.getenv(envvar) is None:
raise MissingEnvVar(envvar)
-
- # Unfortunately, each of the OpenStack client will request slightly
- # different entries in their credentials dict.
- if service.lower() in ("nova", "cinder"):
- password = "api_key"
- tenant = "project_id"
- else:
- password = "password"
- tenant = "tenant_name"
-
- # The most common way to pass these info to the script is to do it through
- # environment variables.
- creds.update({
- "username": os.environ.get("OS_USERNAME"),
- password: os.environ.get("OS_PASSWORD"),
- "auth_url": os.environ.get("OS_AUTH_URL"),
- tenant: os.environ.get("OS_TENANT_NAME")
- })
- if os.getenv('OS_ENDPOINT_TYPE') is not None:
- creds.update({
- "endpoint_type": os.environ.get("OS_ENDPOINT_TYPE")
- })
- if os.getenv('OS_REGION_NAME') is not None:
- creds.update({
- "region_name": os.environ.get("OS_REGION_NAME")
- })
- cacert = os.environ.get("OS_CACERT")
- if cacert is not None:
- # each openstack client uses differnt kwargs for this
- creds.update({"cacert": cacert,
- "ca_cert": cacert,
- "https_ca_cert": cacert,
- "https_cacert": cacert,
- "ca_file": cacert})
- creds.update({"insecure": "True", "https_insecure": "True"})
- if not os.path.isfile(cacert):
- logger.info("WARNING: The 'OS_CACERT' environment variable is "
- "set to %s but the file does not exist." % cacert)
+ else:
+ creds_key = env_cred_dict.get(envvar)
+ creds.update({creds_key: os.getenv(envvar)})
return creds
@@ -112,59 +112,121 @@ def source_credentials(rc_file):
def get_credentials_for_rally():
- creds = get_credentials("keystone")
- admin_keys = ['username', 'tenant_name', 'password']
- endpoint_types = [('internalURL', 'internal'),
- ('publicURL', 'public'), ('adminURL', 'admin')]
- if 'endpoint_type' in creds.keys():
- for k, v in endpoint_types:
- if creds['endpoint_type'] == k:
- creds['endpoint_type'] = v
+ creds = get_credentials()
+ env_cred_dict = get_env_cred_dict()
rally_conf = {"type": "ExistingCloud", "admin": {}}
for key in creds:
- if key in admin_keys:
- rally_conf['admin'][key] = creds[key]
- else:
+ if key == 'auth_url':
rally_conf[key] = creds[key]
+ else:
+ rally_conf['admin'][key] = creds[key]
+
+ endpoint_types = [('internalURL', 'internal'),
+ ('publicURL', 'public'), ('adminURL', 'admin')]
+
+ endpoint_type = os.getenv('OS_ENDPOINT_TYPE')
+ if endpoint_type is not None:
+ cred_key = env_cred_dict.get('OS_ENDPOINT_TYPE')
+ for k, v in endpoint_types:
+ if endpoint_type == k:
+ rally_conf[cred_key] = v
+
+ region_name = os.getenv('OS_REGION_NAME')
+ if region_name is not None:
+ cred_key = env_cred_dict.get('OS_REGION_NAME')
+ rally_conf[cred_key] = region_name
return rally_conf
+def get_session_auth():
+ loader = loading.get_plugin_loader('password')
+ creds = get_credentials()
+ auth = loader.load_from_options(**creds)
+ return auth
+
+
+def get_endpoint(service_type, endpoint_type='publicURL'):
+ auth = get_session_auth()
+ return get_session().get_endpoint(auth=auth,
+ service_type=service_type,
+ endpoint_type=endpoint_type)
+
+
+def get_session():
+ auth = get_session_auth()
+ return session.Session(auth=auth)
+
+
# *********************************************
# CLIENTS
# *********************************************
+def get_keystone_client_version():
+ api_version = os.getenv('OS_IDENTITY_API_VERSION')
+ if api_version is not None:
+ logger.info("OS_IDENTITY_API_VERSION is set in env as '%s'",
+ api_version)
+ return api_version
+ return DEFAULT_API_VERSION
+
+
def get_keystone_client():
- creds_keystone = get_credentials("keystone")
- return keystoneclient.Client(**creds_keystone)
+ sess = get_session()
+ return keystoneclient.Client(get_keystone_client_version(), session=sess)
+
+
+def get_nova_client_version():
+ api_version = os.getenv('OS_COMPUTE_API_VERSION')
+ if api_version is not None:
+ logger.info("OS_COMPUTE_API_VERSION is set in env as '%s'",
+ api_version)
+ return api_version
+ return DEFAULT_API_VERSION
def get_nova_client():
- creds_nova = get_credentials("nova")
- return novaclient.Client('2', **creds_nova)
+ sess = get_session()
+ return novaclient.Client(get_nova_client_version(), session=sess)
+
+
+def get_cinder_client_version():
+ api_version = os.getenv('OS_VOLUME_API_VERSION')
+ if api_version is not None:
+ logger.info("OS_VOLUME_API_VERSION is set in env as '%s'",
+ api_version)
+ return api_version
+ return DEFAULT_API_VERSION
def get_cinder_client():
- creds_cinder = get_credentials("cinder")
- creds_cinder.update({
- "service_type": "volume"
- })
- return cinderclient.Client('2', **creds_cinder)
+ sess = get_session()
+ return cinderclient.Client(get_cinder_client_version(), session=sess)
+
+
+def get_neutron_client_version():
+ api_version = os.getenv('OS_NETWORK_API_VERSION')
+ if api_version is not None:
+ logger.info("OS_NETWORK_API_VERSION is set in env as '%s'",
+ api_version)
+ return api_version
+ return DEFAULT_API_VERSION
def get_neutron_client():
- creds_neutron = get_credentials("neutron")
- return neutronclient.Client(**creds_neutron)
+ sess = get_session()
+ return neutronclient.Client(get_neutron_client_version(), session=sess)
+
+
+def get_glance_client_version():
+ api_version = os.getenv('OS_IMAGE_API_VERSION')
+ if api_version is not None:
+ logger.info("OS_IMAGE_API_VERSION is set in env as '%s'", api_version)
+ return api_version
+ return DEFAULT_API_VERSION
def get_glance_client():
- keystone_client = get_keystone_client()
- glance_endpoint_type = 'publicURL'
- os_endpoint_type = os.getenv('OS_ENDPOINT_TYPE')
- if os_endpoint_type is not None:
- glance_endpoint_type = os_endpoint_type
- glance_endpoint = keystone_client.service_catalog.url_for(
- service_type='image', endpoint_type=glance_endpoint_type)
- return glanceclient.Client(1, glance_endpoint,
- token=keystone_client.auth_token)
+ sess = get_session()
+ return glanceclient.Client(get_glance_client_version(), session=sess)
# *********************************************
@@ -218,6 +280,45 @@ def get_flavor_id_by_ram_range(nova_client, min_ram, max_ram):
return id
+def get_aggregates(nova_client):
+ try:
+ aggregates = nova_client.aggregates.list()
+ return aggregates
+ except Exception, e:
+ logger.error("Error [get_aggregates(nova_client)]: %s" % e)
+ return None
+
+
+def get_aggregate_id(nova_client, aggregate_name):
+ try:
+ aggregates = get_aggregates(nova_client)
+ _id = [ag.id for ag in aggregates if ag.name == aggregate_name][0]
+ return _id
+ except Exception, e:
+ logger.error("Error [get_aggregate_id(nova_client, %s)]:"
+ " %s" % (aggregate_name, e))
+ return None
+
+
+def get_availability_zones(nova_client):
+ try:
+ availability_zones = nova_client.availability_zones.list()
+ return availability_zones
+ except Exception, e:
+ logger.error("Error [get_availability_zones(nova_client)]: %s" % e)
+ return None
+
+
+def get_availability_zone_names(nova_client):
+ try:
+ az_names = [az.zoneName for az in get_availability_zones(nova_client)]
+ return az_names
+ except Exception, e:
+ logger.error("Error [get_availability_zone_names(nova_client)]:"
+ " %s" % e)
+ return None
+
+
def create_flavor(nova_client, flavor_name, ram, disk, vcpus, public=True):
try:
flavor = nova_client.flavors.create(
@@ -281,6 +382,40 @@ def get_hypervisors(nova_client):
return None
+def create_aggregate(nova_client, aggregate_name, av_zone):
+ try:
+ nova_client.aggregates.create(aggregate_name, av_zone)
+ return True
+ except Exception, e:
+ logger.error("Error [create_aggregate(nova_client, %s, %s)]: %s"
+ % (aggregate_name, av_zone, e))
+ return None
+
+
+def add_host_to_aggregate(nova_client, aggregate_name, compute_host):
+ try:
+ aggregate_id = get_aggregate_id(nova_client, aggregate_name)
+ nova_client.aggregates.add_host(aggregate_id, compute_host)
+ return True
+ except Exception, e:
+ logger.error("Error [add_host_to_aggregate(nova_client, %s, %s)]: %s"
+ % (aggregate_name, compute_host, e))
+ return None
+
+
+def create_aggregate_with_host(
+ nova_client, aggregate_name, av_zone, compute_host):
+ try:
+ create_aggregate(nova_client, aggregate_name, av_zone)
+ add_host_to_aggregate(nova_client, aggregate_name, compute_host)
+ return True
+ except Exception, e:
+ logger.error("Error [create_aggregate_with_host("
+ "nova_client, %s, %s, %s)]: %s"
+ % (aggregate_name, av_zone, compute_host, e))
+ return None
+
+
def create_instance(flavor_name,
image_id,
network_id,
@@ -373,13 +508,13 @@ def create_floating_ip(neutron_client):
return {'fip_addr': fip_addr, 'fip_id': fip_id}
-def add_floating_ip(nova_client, server_id, floatingip_id):
+def add_floating_ip(nova_client, server_id, floatingip_addr):
try:
- nova_client.servers.add_floating_ip(server_id, floatingip_id)
+ nova_client.servers.add_floating_ip(server_id, floatingip_addr)
return True
except Exception, e:
logger.error("Error [add_floating_ip(nova_client, '%s', '%s')]: %s"
- % (server_id, floatingip_id, e))
+ % (server_id, floatingip_addr, e))
return False
@@ -403,6 +538,36 @@ def delete_floating_ip(nova_client, floatingip_id):
return False
+def remove_host_from_aggregate(nova_client, aggregate_name, compute_host):
+ try:
+ aggregate_id = get_aggregate_id(nova_client, aggregate_name)
+ nova_client.aggregates.remove_host(aggregate_id, compute_host)
+ return True
+ except Exception, e:
+ logger.error("Error [remove_host_from_aggregate(nova_client, %s, %s)]:"
+ " %s" % (aggregate_name, compute_host, e))
+ return False
+
+
+def remove_hosts_from_aggregate(nova_client, aggregate_name):
+ aggregate_id = get_aggregate_id(nova_client, aggregate_name)
+ hosts = nova_client.aggregates.get(aggregate_id).hosts
+ assert(
+ all(remove_host_from_aggregate(nova_client, aggregate_name, host)
+ for host in hosts))
+
+
+def delete_aggregate(nova_client, aggregate_name):
+ try:
+ remove_hosts_from_aggregate(nova_client, aggregate_name)
+ nova_client.aggregates.delete(aggregate_name)
+ return True
+ except Exception, e:
+ logger.error("Error [delete_aggregate(nova_client, %s)]: %s"
+ % (aggregate_name, e))
+ return False
+
+
# *********************************************
# NEUTRON
# *********************************************
@@ -940,38 +1105,29 @@ def get_image_id(glance_client, image_name):
def create_glance_image(glance_client, image_name, file_path, disk="qcow2",
- container="bare", public=True):
+ container="bare", public="public"):
if not os.path.isfile(file_path):
logger.error("Error: file %s does not exist." % file_path)
return None
try:
image_id = get_image_id(glance_client, image_name)
if image_id != '':
- if logger:
- logger.info("Image %s already exists." % image_name)
+ logger.info("Image %s already exists." % image_name)
else:
- if logger:
- logger.info("Creating image '%s' from '%s'..." % (image_name,
- file_path))
- try:
- properties = ft_utils.get_functest_config(
- 'general.image_properties')
- except ValueError:
- # image properties are not configured
- # therefore don't add any properties
- properties = {}
- with open(file_path) as fimage:
- image = glance_client.images.create(name=image_name,
- is_public=public,
- disk_format=disk,
- container_format=container,
- properties=properties,
- data=fimage)
+ logger.info("Creating image '%s' from '%s'..." % (image_name,
+ file_path))
+
+ image = glance_client.images.create(name=image_name,
+ visibility=public,
+ disk_format=disk,
+ container_format=container)
image_id = image.id
+ with open(file_path) as image_data:
+ glance_client.images.upload(image_id, image_data)
return image_id
except Exception, e:
logger.error("Error [create_glance_image(glance_client, '%s', '%s', "
- "'%s')]: %s" % (image_name, file_path, str(public), e))
+ "'%s')]: %s" % (image_name, file_path, public, e))
return None
diff --git a/requirements.txt b/requirements.txt
index e4d2877c0..28b3fed3e 100644..100755
--- a/requirements.txt
+++ b/requirements.txt
@@ -12,9 +12,9 @@ python-ceilometerclient==2.6.2
python-keystoneclient==3.5.0
python-neutronclient==6.0.0
python-congressclient==1.5.0
-virtualenv==1.11.4
+virtualenv==15.1.0
pexpect==4.0
-requests==2.8.0
+requests>=2.8.0
robotframework==2.9.1
robotframework-requests==0.3.8
robotframework-sshlibrary==2.1.1
diff --git a/run_unit_tests.sh b/run_unit_tests.sh
index ecd57d8ae..71d21c9db 100755
--- a/run_unit_tests.sh
+++ b/run_unit_tests.sh
@@ -53,6 +53,7 @@ export CONFIG_FUNCTEST_YAML=$(pwd)/functest/ci/config_functest.yaml
nosetests --with-xunit \
--with-coverage \
--cover-erase \
+ --cover-tests \
--cover-package=functest.core.testcase_base \
--cover-package=functest.opnfv_tests.sdn.odl.odl \
--cover-xml \
diff --git a/setup.py b/setup.py
index 58a9a4886..0c53ffbc9 100644
--- a/setup.py
+++ b/setup.py
@@ -1,25 +1,25 @@
-##############################################################################
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-from setuptools import setup, find_packages
-
-
-setup(
- name="functest",
- version="master",
- py_modules=['cli_base'],
- packages=find_packages(),
- include_package_data=True,
- package_data={
- },
- url="https://www.opnfv.org",
- entry_points={
- 'console_scripts': [
- 'functest=functest.cli.cli_base:cli'
- ],
- },
-)
+##############################################################################
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+from setuptools import setup, find_packages
+
+
+setup(
+ name="functest",
+ version="master",
+ py_modules=['cli_base'],
+ packages=find_packages(),
+ include_package_data=True,
+ package_data={
+ },
+ url="https://www.opnfv.org",
+ entry_points={
+ 'console_scripts': [
+ 'functest=functest.cli.cli_base:cli'
+ ],
+ },
+)
diff --git a/test-requirements.txt b/test-requirements.txt
index d65e12f6e..8be8e2033 100644..100755
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -20,4 +20,4 @@ requests==2.8.0
robotframework==2.9.1
robotframework-requests==0.3.8
robotframework-sshlibrary==2.1.1
-virtualenv==1.11.4 \ No newline at end of file
+virtualenv==15.1.0 \ No newline at end of file