summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--build.sh21
-rw-r--r--docker/Dockerfile41
-rw-r--r--docker/Dockerfile.aarch64128
-rw-r--r--docker/Dockerfile.aarch64.patch62
-rw-r--r--docker/add_images.sh20
-rw-r--r--docker/components/Dockerfile4
-rw-r--r--docker/components/hooks/post_checkout6
-rw-r--r--docker/components/testcases.yaml56
-rw-r--r--docker/core/Dockerfile2
-rw-r--r--docker/features/Dockerfile21
-rw-r--r--docker/features/hooks/post_checkout6
-rw-r--r--docker/features/testcases.yaml108
-rw-r--r--docker/features/thirdparty-requirements.txt5
-rw-r--r--docker/healthcheck/Dockerfile2
-rw-r--r--docker/healthcheck/hooks/post_checkout6
-rw-r--r--docker/smoke/Dockerfile10
-rw-r--r--docker/smoke/hooks/post_checkout6
-rw-r--r--docker/smoke/testcases.yaml61
-rw-r--r--docker/smoke/thirdparty-requirements.txt7
-rw-r--r--docker/thirdparty-requirements.txt2
-rw-r--r--docker/vnf/Dockerfile12
-rw-r--r--docker/vnf/hooks/post_checkout6
-rw-r--r--docker/vnf/testcases.yaml49
-rw-r--r--docs/release/release-notes/functest-release.rst239
-rw-r--r--docs/release/release-notes/index.rst2
-rw-r--r--docs/testing/developer/devguide/index.rst979
-rw-r--r--docs/testing/user/configguide/ci.rst50
-rw-r--r--docs/testing/user/configguide/configguide.rst366
-rw-r--r--docs/testing/user/configguide/index.rst167
-rw-r--r--docs/testing/user/configguide/prerequisites.rst104
-rw-r--r--docs/testing/user/userguide/index.rst604
-rw-r--r--docs/testing/user/userguide/reporting.rst90
-rw-r--r--docs/testing/user/userguide/runfunctest.rst512
-rw-r--r--docs/testing/user/userguide/test_details.rst539
-rw-r--r--docs/testing/user/userguide/test_overview.rst (renamed from docs/testing/user/userguide/introduction.rst)27
-rw-r--r--docs/testing/user/userguide/test_results.rst50
-rw-r--r--functest/api/__init__.py (renamed from functest/opnfv_tests/vnf/aaa/__init__.py)0
-rw-r--r--functest/api/base.py66
-rw-r--r--functest/api/common/__init__.py (renamed from functest/tests/unit/vnf/rnc/__init__.py)0
-rw-r--r--functest/api/common/api_utils.py101
-rw-r--r--functest/api/common/thread.py52
-rw-r--r--functest/api/database/__init__.py0
-rw-r--r--functest/api/database/db.py26
-rw-r--r--functest/api/database/v1/__init__.py0
-rw-r--r--functest/api/database/v1/handlers.py43
-rw-r--r--functest/api/database/v1/models.py33
-rw-r--r--functest/api/resources/__init__.py0
-rw-r--r--functest/api/resources/v1/__init__.py0
-rw-r--r--functest/api/resources/v1/creds.py67
-rw-r--r--functest/api/resources/v1/envs.py40
-rw-r--r--functest/api/resources/v1/tasks.py58
-rw-r--r--functest/api/resources/v1/testcases.py115
-rw-r--r--functest/api/resources/v1/tiers.py67
-rw-r--r--functest/api/server.py103
-rw-r--r--functest/api/urls.py66
-rw-r--r--functest/ci/config_aarch64_patch.yaml21
-rw-r--r--functest/ci/config_functest.yaml58
-rw-r--r--functest/ci/config_patch.yaml3
-rw-r--r--functest/ci/download_images.sh55
-rw-r--r--functest/ci/logging.ini7
-rw-r--r--functest/ci/prepare_env.py2
-rw-r--r--functest/ci/run_tests.py152
-rw-r--r--functest/ci/testcases.yaml125
-rw-r--r--functest/ci/tier_builder.py9
-rw-r--r--functest/ci/tier_handler.py89
-rw-r--r--functest/cli/commands/cli_env.py37
-rw-r--r--functest/cli/commands/cli_os.py19
-rw-r--r--functest/cli/commands/cli_testcase.py27
-rw-r--r--functest/cli/commands/cli_tier.py44
-rw-r--r--functest/energy/energy.py227
-rw-r--r--functest/opnfv_tests/openstack/rally/blacklist.txt32
-rw-r--r--functest/opnfv_tests/openstack/rally/rally.py4
-rw-r--r--functest/opnfv_tests/openstack/rally/scenario/full/opnfv-ceilometer.yaml458
-rw-r--r--functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-ceilometer.yaml247
-rw-r--r--functest/opnfv_tests/openstack/rally/task.yaml4
-rw-r--r--functest/opnfv_tests/openstack/refstack_client/refstack_client.py126
-rw-r--r--functest/opnfv_tests/openstack/refstack_client/tempest_conf.py20
-rw-r--r--functest/opnfv_tests/openstack/snaps/snaps_test_runner.py7
-rw-r--r--functest/opnfv_tests/openstack/snaps/snaps_utils.py2
-rw-r--r--functest/opnfv_tests/openstack/tempest/conf_utils.py283
-rw-r--r--functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml13
-rw-r--r--functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt4
-rw-r--r--functest/opnfv_tests/openstack/tempest/tempest.py269
-rw-r--r--functest/opnfv_tests/openstack/vping/ping.sh19
-rw-r--r--functest/opnfv_tests/openstack/vping/vping_base.py35
-rw-r--r--functest/opnfv_tests/openstack/vping/vping_userdata.py2
-rw-r--r--functest/opnfv_tests/sdn/odl/odl.py5
-rw-r--r--functest/opnfv_tests/sdn/onos/teston/adapters/connection.py2
-rw-r--r--functest/opnfv_tests/sdn/onos/teston/adapters/environment.py22
-rw-r--r--functest/opnfv_tests/vnf/aaa/aaa.py41
-rw-r--r--functest/opnfv_tests/vnf/ims/clearwater_ims_base.py36
-rw-r--r--functest/opnfv_tests/vnf/ims/cloudify_ims.py66
-rw-r--r--functest/opnfv_tests/vnf/ims/cloudify_ims.yaml6
-rw-r--r--functest/opnfv_tests/vnf/ims/opera_ims.py131
-rw-r--r--functest/opnfv_tests/vnf/ims/orchestra.yaml61
-rw-r--r--functest/opnfv_tests/vnf/ims/orchestra_clearwaterims.py682
-rw-r--r--functest/opnfv_tests/vnf/ims/orchestra_ims.py487
-rw-r--r--functest/opnfv_tests/vnf/ims/orchestra_ims.yaml21
-rw-r--r--functest/opnfv_tests/vnf/ims/orchestra_openims.py718
-rw-r--r--functest/tests/unit/ci/test_run_tests.py171
-rw-r--r--functest/tests/unit/ci/test_tier_builder.py3
-rw-r--r--functest/tests/unit/core/test_feature.py4
-rw-r--r--functest/tests/unit/energy/test_functest_energy.py74
-rw-r--r--functest/tests/unit/odl/test_odl.py2
-rw-r--r--functest/tests/unit/openstack/rally/test_rally.py553
-rw-r--r--functest/tests/unit/openstack/refstack_client/test_refstack_client.py82
-rw-r--r--functest/tests/unit/openstack/tempest/test_conf_utils.py249
-rw-r--r--functest/tests/unit/openstack/tempest/test_tempest.py26
-rw-r--r--functest/tests/unit/openstack/vping/test_vping.py6
-rw-r--r--functest/tests/unit/utils/test_functest_utils.py42
-rw-r--r--functest/tests/unit/utils/test_openstack_utils.py17
-rw-r--r--functest/tests/unit/vnf/ims/test_cloudify_ims.py9
-rw-r--r--functest/tests/unit/vnf/ims/test_orchestra_clearwaterims.py227
-rw-r--r--functest/tests/unit/vnf/ims/test_orchestra_openims.py229
-rw-r--r--functest/utils/env.py3
-rw-r--r--functest/utils/functest_utils.py27
-rw-r--r--functest/utils/openstack_utils.py91
-rw-r--r--requirements.txt3
-rw-r--r--setup.cfg2
-rw-r--r--tox.ini12
-rw-r--r--upper-constraints.txt3
121 files changed, 7146 insertions, 4574 deletions
diff --git a/build.sh b/build.sh
new file mode 100644
index 000000000..77ea98beb
--- /dev/null
+++ b/build.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+set -e
+
+repo=${repo:-opnfv}
+dirs="\
+docker/core \
+docker/healthcheck \
+docker/smoke \
+docker/features \
+docker/components"
+
+(cd docker && docker build -t "${repo}/functest" .)
+docker push "${repo}/functest"
+
+for dir in ${dirs}; do
+ (cd ${dir} && docker build -t "${repo}/functest-${dir##**/}" .)
+ docker push "${repo}/functest-${dir##**/}"
+done
+
+exit $?
diff --git a/docker/Dockerfile b/docker/Dockerfile
index accbf5e30..0e896d6d2 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -14,9 +14,8 @@ LABEL version="0.1" description="OPNFV Functest Docker container"
# Environment variables
ARG BRANCH=master
ARG RALLY_TAG=0.8.1
-ARG ODL_TAG=release/beryllium-sr4
+ARG ODL_TAG=release/carbon
ARG OPENSTACK_TAG=stable/ocata
-ARG KINGBIRD_TAG=1.1.0
ARG VIMS_TAG=stable
ARG VROUTER_TAG=stable
ARG REPOS_DIR=/home/opnfv/repos
@@ -39,6 +38,7 @@ build-essential \
bundler \
crudini \
curl \
+dnsmasq \
gcc \
git \
libffi-dev \
@@ -52,7 +52,9 @@ python-dev \
python-mock \
python-pip \
postgresql \
-ruby1.9.1-dev \
+ruby \
+ruby-dev \
+ruby-bundler \
ssh \
sshpass \
wget \
@@ -84,24 +86,16 @@ RUN wget -q -O- https://git.openstack.org/cgit/openstack/requirements/plain/uppe
rm thirdparty-requirements.txt upper-constraints.txt
# OPNFV repositories
-RUN git clone --depth 1 -b $BRANCH https://gerrit.opnfv.org/gerrit/copper ${REPOS_DIR}/copper
-RUN git clone --depth 1 -b $BRANCH https://gerrit.opnfv.org/gerrit/domino ${REPOS_DIR}/domino
+RUN git clone --depth 1 -b $BRANCH https://gerrit.opnfv.org/gerrit/domino /src/domino
RUN git clone --depth 1 -b $BRANCH https://gerrit.opnfv.org/gerrit/doctor ${REPOS_DIR}/doctor
-RUN git clone --depth 1 -b $BRANCH https://gerrit.opnfv.org/gerrit/promise ${REPOS_DIR}/promise
-RUN git clone --depth 1 -b $BRANCH https://gerrit.opnfv.org/gerrit/netready ${REPOS_DIR}/netready
-RUN git clone --depth 1 -b $BRANCH https://gerrit.opnfv.org/gerrit/fds ${REPOS_DIR}/fds
+RUN git clone --depth 1 -b $BRANCH https://gerrit.opnfv.org/gerrit/fds /src/fds
# other repositories
-RUN git clone --depth 1 -b $ODL_TAG https://git.opendaylight.org/gerrit/p/integration/test.git ${REPOS_DIR}/odl_test
-RUN git clone --depth 1 -b $VIMS_TAG https://github.com/boucherv-orange/clearwater-live-test ${REPOS_VNFS_DIR}/vims-test
+RUN git clone --depth 1 -b $ODL_TAG https://git.opendaylight.org/gerrit/p/integration/test.git /src/odl_test
+RUN git clone --depth 1 -b $VIMS_TAG https://github.com/boucherv-orange/clearwater-live-test /src/vims-test
RUN git clone --depth 1 -b $VROUTER_TAG https://github.com/oolorg/opnfv-functest-vrouter.git ${REPOS_VNFS_DIR}/vrouter
RUN git clone --depth 1 https://github.com/wuwenbin2/OnosSystemTest.git ${REPOS_DIR}/onos
-RUN add_images.sh
-
-RUN gpg --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3
-RUN curl -L https://get.rvm.io | bash -s stable
-
# SFC integration
RUN /bin/bash -c ". /usr/local/lib/python2.7/dist-packages/sfc/tests/functest/setup_scripts/tacker_client_install.sh"
@@ -109,22 +103,11 @@ RUN /bin/bash -c ". /usr/local/lib/python2.7/dist-packages/sfc/tests/functest/se
RUN ln -s /src/tempest /src/refstack-client/.tempest \
&& virtualenv --system-site-packages /src/tempest/.venv
-RUN /bin/bash -c ". /etc/profile.d/rvm.sh \
- && cd ${REPOS_VNFS_DIR}/vims-test \
- && rvm autolibs enable"
-RUN /bin/bash -c ". /etc/profile.d/rvm.sh \
- && cd ${REPOS_VNFS_DIR}/vims-test \
- && rvm install 1.9.3"
-RUN /bin/bash -c ". /etc/profile.d/rvm.sh \
- && cd ${REPOS_VNFS_DIR}/vims-test \
- && rvm use 1.9.3"
-RUN /bin/bash -c ". /etc/profile.d/rvm.sh \
- && cd ${REPOS_VNFS_DIR}/vims-test \
- && bundle install"
+RUN cd /src/vims-test && bundle install
RUN sh -c 'curl -sL https://deb.nodesource.com/setup_4.x | sudo -E bash -' \
&& sudo apt-get install -y nodejs \
- && cd ${REPOS_DIR}/promise && sudo npm -g install npm@latest \
- && cd ${REPOS_DIR}/promise/source && npm install
+ && cd /src/promise && sudo npm -g install npm@latest \
+ && cd /src/promise/source && npm install
RUN echo ". ${FUNCTEST_DIR}/cli/functest-complete.sh" >> /root/.bashrc
diff --git a/docker/Dockerfile.aarch64 b/docker/Dockerfile.aarch64
deleted file mode 100644
index abd4d1afb..000000000
--- a/docker/Dockerfile.aarch64
+++ /dev/null
@@ -1,128 +0,0 @@
-########################################
-# Aarch64 Docker container for FUNCTEST
-########################################
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-
-FROM aarch64/ubuntu:14.04
-MAINTAINER Armband team <armband@enea.com>
-LABEL version="0.1" description="OPNFV Functest Aarch64 Docker container"
-
-# Environment variables
-ARG BRANCH=master
-ARG RALLY_TAG=0.8.1
-ARG ODL_TAG=release/beryllium-sr4
-ARG OPENSTACK_TAG=stable/ocata
-ARG KINGBIRD_TAG=0.2.2
-ARG VIMS_TAG=stable
-ARG REPOS_DIR=/home/opnfv/repos
-ARG FUNCTEST_BASE_DIR=/home/opnfv/functest
-ARG FUNCTEST_CONF_DIR=${FUNCTEST_BASE_DIR}/conf
-ARG FUNCTEST_DATA_DIR=${FUNCTEST_BASE_DIR}/data
-ARG FUNCTEST_IMAGES_DIR=${FUNCTEST_BASE_DIR}/images
-ARG FUNCTEST_RESULTS_DIR=${FUNCTEST_BASE_DIR}/results
-ARG FUNCTEST_DIR=/usr/local/lib/python2.7/dist-packages/functest/
-ARG REPOS_VNFS_DIR=${REPOS_DIR}/vnfs
-
-# Environment variables
-ENV CONFIG_FUNCTEST_YAML ${FUNCTEST_DIR}/functest/ci/config_functest.yaml
-ENV REPOS_DIR ${REPOS_DIR}
-ENV creds ${FUNCTEST_CONF_DIR}/openstack.creds
-
-# Packaged dependencies
-RUN apt-get update && apt-get install -y \
-build-essential \
-bundler \
-crudini \
-curl \
-gcc \
-git \
-libffi-dev \
-libgmp3-dev \
-libjpeg-dev \
-libpq-dev \
-libssl-dev \
-libxml2-dev \
-libxslt-dev \
-python-dev \
-python-mock \
-python-pip \
-postgresql \
-ruby1.9.1-dev \
-ssh \
-sshpass \
-wget \
---no-install-recommends
-
-RUN pip install --upgrade pip && easy_install -U setuptools==30.0.0
-
-RUN mkdir -p ${REPOS_VNFS_DIR} \
- && mkdir -p ${FUNCTEST_BASE_DIR}/results \
- && mkdir -p ${FUNCTEST_BASE_DIR}/conf \
- && mkdir -p ${FUNCTEST_DATA_DIR} \
- && mkdir -p ${FUNCTEST_IMAGES_DIR} \
- && mkdir -p /root/.ssh \
- && chmod 700 /root/.ssh
-
-RUN git config --global http.sslVerify false
-
-COPY thirdparty-requirements.txt thirdparty-requirements.txt
-RUN wget -q -O- https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt?h=$OPENSTACK_TAG | \
- sed -E s/^tempest==+\(.*\)$/-e\ git+https:\\/\\/github.com\\/openstack\\/tempest@\\1#egg=tempest/ \
- > upper-constraints.txt && \
- pip install --src /src -cupper-constraints.txt \
- -chttps://git.opnfv.org/functest/plain/upper-constraints.txt?h=$BRANCH \
- git+https://gerrit.opnfv.org/gerrit/functest@$BRANCH#egg=functest \
- -rthirdparty-requirements.txt && \
- mkdir -p /etc/rally && \
- printf "[database]\nconnection = 'sqlite:////var/lib/rally/database/rally.sqlite'" > /etc/rally/rally.conf && \
- mkdir -p /var/lib/rally/database && rally-manage db create && \
- rm thirdparty-requirements.txt upper-constraints.txt
-
-# OPNFV repositories
-RUN git clone --depth 1 -b $BRANCH https://gerrit.opnfv.org/gerrit/copper ${REPOS_DIR}/copper
-RUN git clone --depth 1 -b $BRANCH https://gerrit.opnfv.org/gerrit/domino ${REPOS_DIR}/domino
-RUN git clone --depth 1 -b $BRANCH https://gerrit.opnfv.org/gerrit/doctor ${REPOS_DIR}/doctor
-RUN git clone --depth 1 -b $BRANCH https://gerrit.opnfv.org/gerrit/promise ${REPOS_DIR}/promise
-
-# other repositories
-RUN git clone --depth 1 -b $ODL_TAG https://git.opendaylight.org/gerrit/p/integration/test.git ${REPOS_DIR}/odl_test
-RUN git clone --depth 1 -b $VIMS_TAG https://github.com/boucherv-orange/clearwater-live-test ${REPOS_VNFS_DIR}/vims-test
-RUN git clone --depth 1 https://github.com/wuwenbin2/OnosSystemTest.git ${REPOS_DIR}/onos
-
-RUN add_images.sh
-
-RUN gpg --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3
-RUN curl -L https://get.rvm.io | bash -s stable
-
-# SFC integration
-RUN /bin/bash -c ". /usr/local/lib/python2.7/dist-packages/sfc/tests/functest/setup_scripts/tacker_client_install.sh"
-
-# Install tempest venv and create symlink for running refstack-client
-RUN ln -s /src/tempest /src/refstack-client/.tempest \
- && virtualenv --system-site-packages /src/tempest/.venv
-
-RUN /bin/bash -c ". /etc/profile.d/rvm.sh \
- && cd ${REPOS_VNFS_DIR}/vims-test \
- && rvm autolibs enable"
-RUN /bin/bash -c ". /etc/profile.d/rvm.sh \
- && cd ${REPOS_VNFS_DIR}/vims-test \
- && rvm install 1.9.3"
-RUN /bin/bash -c ". /etc/profile.d/rvm.sh \
- && cd ${REPOS_VNFS_DIR}/vims-test \
- && rvm use 1.9.3"
-RUN /bin/bash -c ". /etc/profile.d/rvm.sh \
- && gem install bundler \
- && cd ${REPOS_VNFS_DIR}/vims-test \
- && bundle config build.nokogiri --use-system-libraries \
- && bundle install"
-
-RUN sh -c 'wget -qO- https://nodejs.org/dist/v4.7.2/node-v4.7.2-linux-arm64.tar.gz | \
- tar -xz -C /usr/local --exclude=CHANGELOG.md --exclude=LICENSE --exclude=README.md --strip-components 1 '\
- && cd ${REPOS_DIR}/promise && sudo npm -g install npm@latest \
- && cd ${REPOS_DIR}/promise/source && npm install
-
-RUN echo ". ${FUNCTEST_DIR}/cli/functest-complete.sh" >> /root/.bashrc
diff --git a/docker/Dockerfile.aarch64.patch b/docker/Dockerfile.aarch64.patch
new file mode 100644
index 000000000..1257206d2
--- /dev/null
+++ b/docker/Dockerfile.aarch64.patch
@@ -0,0 +1,62 @@
+diff --git a/docker/Dockerfile b/docker/Dockerfile
+index 0e896d6d..2a8f2b66 100644
+--- a/docker/Dockerfile
++++ b/docker/Dockerfile
+@@ -1,5 +1,5 @@
+ ########################################
+-# Docker container for FUNCTEST
++# Aarch64 Docker container for FUNCTEST
+ ########################################
+ # All rights reserved. This program and the accompanying materials
+ # are made available under the terms of the Apache License, Version 2.0
+@@ -7,9 +7,9 @@
+ # http://www.apache.org/licenses/LICENSE-2.0
+ #
+
+-FROM ubuntu:14.04
+-MAINTAINER Jose Lausuch <jose.lausuch@ericsson.com>
+-LABEL version="0.1" description="OPNFV Functest Docker container"
++FROM aarch64/ubuntu:14.04
++MAINTAINER Armband team <armband@enea.com>
++LABEL version="0.1" description="OPNFV Functest Aarch64 Docker container"
+
+ # Environment variables
+ ARG BRANCH=master
+@@ -43,6 +43,7 @@ gcc \
+ git \
+ libffi-dev \
+ libgmp3-dev \
++libjpeg-dev \
+ libpq-dev \
+ libssl-dev \
+ libxml2-dev \
+@@ -103,10 +104,26 @@ RUN /bin/bash -c ". /usr/local/lib/python2.7/dist-packages/sfc/tests/functest/se
+ RUN ln -s /src/tempest /src/refstack-client/.tempest \
+ && virtualenv --system-site-packages /src/tempest/.venv
+
+-RUN cd /src/vims-test && bundle install
++RUN gpg --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3
++RUN curl -L https://get.rvm.io | bash -s stable
+
+-RUN sh -c 'curl -sL https://deb.nodesource.com/setup_4.x | sudo -E bash -' \
+- && sudo apt-get install -y nodejs \
++RUN /bin/bash -c ". /etc/profile.d/rvm.sh \
++ && cd /src/vims-test \
++ && rvm autolibs enable"
++RUN /bin/bash -c ". /etc/profile.d/rvm.sh \
++ && cd /src/vims-test \
++ && rvm install 1.9.3"
++RUN /bin/bash -c ". /etc/profile.d/rvm.sh \
++ && cd /src/vims-test \
++ && rvm use 1.9.3"
++RUN /bin/bash -c ". /etc/profile.d/rvm.sh \
++ && gem install bundler \
++ && cd /src/vims-test \
++ && bundle config build.nokogiri --use-system-libraries \
++ && bundle install"
++
++RUN sh -c 'wget -qO- https://nodejs.org/dist/v4.7.2/node-v4.7.2-linux-arm64.tar.gz | \
++ tar -xz -C /usr/local --exclude=CHANGELOG.md --exclude=LICENSE --exclude=README.md --strip-components 1 '\
+ && cd /src/promise && sudo npm -g install npm@latest \
+ && cd /src/promise/source && npm install
+
diff --git a/docker/add_images.sh b/docker/add_images.sh
deleted file mode 100644
index 93afbd252..000000000
--- a/docker/add_images.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/env bash
-#
-# This script downloads the images that are used for testing
-# and places them in the functest docker image
-set -e
-
-FUNCTEST_IMAGES_DIR=${FUNCTEST_IMAGES_DIR:-/home/opnfv/functest/images}
-
-CIRROS_REPO_URL=https://download.cirros-cloud.net
-CIRROS_AARCH64_TAG=161201
-CIRROS_X86_64_TAG=0.3.5
-
-wget ${CIRROS_REPO_URL}/${CIRROS_X86_64_TAG}/cirros-${CIRROS_X86_64_TAG}-x86_64-disk.img -P ${FUNCTEST_IMAGES_DIR}
-wget ${CIRROS_REPO_URL}/${CIRROS_X86_64_TAG}/cirros-${CIRROS_X86_64_TAG}-x86_64-lxc.tar.gz -P ${FUNCTEST_IMAGES_DIR}
-wget http://artifacts.opnfv.org/onosfw/images/firewall_block_image.img -P ${FUNCTEST_IMAGES_DIR}
-
-# Add the 3-part image for aarch64, since functest can be run from an x86 machine to test an aarch64 POD
-wget ${CIRROS_REPO_URL}/daily/20${CIRROS_AARCH64_TAG}/cirros-d${CIRROS_AARCH64_TAG}-aarch64-disk.img -P ${FUNCTEST_IMAGES_DIR}
-wget ${CIRROS_REPO_URL}/daily/20${CIRROS_AARCH64_TAG}/cirros-d${CIRROS_AARCH64_TAG}-aarch64-initramfs -P ${FUNCTEST_IMAGES_DIR}
-wget ${CIRROS_REPO_URL}/daily/20${CIRROS_AARCH64_TAG}/cirros-d${CIRROS_AARCH64_TAG}-aarch64-kernel -P ${FUNCTEST_IMAGES_DIR}
diff --git a/docker/components/Dockerfile b/docker/components/Dockerfile
new file mode 100644
index 000000000..8923e4cd7
--- /dev/null
+++ b/docker/components/Dockerfile
@@ -0,0 +1,4 @@
+FROM opnfv/functest-core
+
+COPY testcases.yaml /usr/lib/python2.7/site-packages/functest/ci/testcases.yaml
+CMD ["bash","-c","prepare_env start && run_tests -t all"]
diff --git a/docker/components/hooks/post_checkout b/docker/components/hooks/post_checkout
new file mode 100644
index 000000000..20a6d4b95
--- /dev/null
+++ b/docker/components/hooks/post_checkout
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+from="${DOCKER_REPO%/*}/functest-core"
+sed -i "s|^FROM.*$|FROM ${from}|" Dockerfile
+
+exit $?
diff --git a/docker/components/testcases.yaml b/docker/components/testcases.yaml
new file mode 100644
index 000000000..1604161ac
--- /dev/null
+++ b/docker/components/testcases.yaml
@@ -0,0 +1,56 @@
+tiers:
+ -
+ name: components
+ order: 3
+ ci_loop: 'weekly'
+ description : >-
+ Extensive testing of OpenStack API.
+ testcases:
+ -
+ case_name: tempest_full_parallel
+ project_name: functest
+ criteria: 80
+ blocking: false
+ description: >-
+ The list of test cases is generated by
+ Tempest automatically and depends on the parameters of
+ the OpenStack deplopyment.
+ dependencies:
+ installer: '^((?!netvirt).)*$'
+ scenario: ''
+ run:
+ module: 'functest.opnfv_tests.openstack.tempest.tempest'
+ class: 'TempestFullParallel'
+
+ -
+ case_name: tempest_custom
+ project_name: functest
+ criteria: 100
+ blocking: false
+ description: >-
+ The test case allows running a customized list of tempest
+ test cases defined in a file under
+ <dir_functest_repo>/functest/opnfv_tests/openstack/
+ /tempest/custom_tests/test_list.txt
+ The file is empty and can be customized with the desired tests.
+ dependencies:
+ installer: 'unknown'
+ scenario: 'unknown'
+ run:
+ module: 'functest.opnfv_tests.openstack.tempest.tempest'
+ class: 'TempestCustom'
+
+ -
+ case_name: rally_full
+ project_name: functest
+ criteria: 90
+ blocking: false
+ description: >-
+ This test case runs the full suite of scenarios of the OpenStack
+ Rally suite using several threads and iterations.
+ dependencies:
+ installer: '^((?!netvirt).)*$'
+ scenario: ''
+ run:
+ module: 'functest.opnfv_tests.openstack.rally.rally'
+ class: 'RallyFull' \ No newline at end of file
diff --git a/docker/core/Dockerfile b/docker/core/Dockerfile
index 574de9bae..ebd76a262 100644
--- a/docker/core/Dockerfile
+++ b/docker/core/Dockerfile
@@ -12,7 +12,7 @@ RUN apk --no-cache add --update \
wget -q -O- https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt?h=$OPENSTACK_TAG | \
sed -E s/^tempest==+\(.*\)$/-e\ git+https:\\/\\/github.com\\/openstack\\/tempest@\\1#egg=tempest/ \
> upper-constraints.txt && \
- pip install --src /src -cupper-constraints.txt \
+ pip install --no-cache-dir --src /src -cupper-constraints.txt \
-chttps://git.opnfv.org/functest/plain/upper-constraints.txt?h=$BRANCH \
git+https://gerrit.opnfv.org/gerrit/functest@$BRANCH#egg=functest && \
rm upper-constraints.txt && \
diff --git a/docker/features/Dockerfile b/docker/features/Dockerfile
new file mode 100644
index 000000000..54bffe827
--- /dev/null
+++ b/docker/features/Dockerfile
@@ -0,0 +1,21 @@
+FROM opnfv/functest-core
+
+ARG BRANCH=master
+ARG OPENSTACK_TAG=stable/ocata
+
+COPY thirdparty-requirements.txt thirdparty-requirements.txt
+RUN apk --no-cache add --update nodejs nodejs-npm && \
+ apk --no-cache add --virtual .build-deps --update \
+ python-dev build-base linux-headers libffi-dev \
+ openssl-dev libjpeg-turbo-dev git && \
+ pip install --no-cache-dir --src /src \
+ -chttps://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt?h=$OPENSTACK_TAG \
+ -chttps://git.opnfv.org/functest/plain/upper-constraints.txt?h=$BRANCH \
+ -rthirdparty-requirements.txt && \
+ npm -g install npm@latest && \
+ (cd /src/promise/source && npm install) && \
+ git clone --depth 1 -b $BRANCH https://gerrit.opnfv.org/gerrit/domino /src/domino && \
+ rm -r thirdparty-requirements.txt /src/domino/.git && \
+ apk del .build-deps
+COPY testcases.yaml /usr/lib/python2.7/site-packages/functest/ci/testcases.yaml
+CMD ["bash","-c","prepare_env start && run_tests -t all"]
diff --git a/docker/features/hooks/post_checkout b/docker/features/hooks/post_checkout
new file mode 100644
index 000000000..20a6d4b95
--- /dev/null
+++ b/docker/features/hooks/post_checkout
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+from="${DOCKER_REPO%/*}/functest-core"
+sed -i "s|^FROM.*$|FROM ${from}|" Dockerfile
+
+exit $?
diff --git a/docker/features/testcases.yaml b/docker/features/testcases.yaml
new file mode 100644
index 000000000..69da9350e
--- /dev/null
+++ b/docker/features/testcases.yaml
@@ -0,0 +1,108 @@
+tiers:
+ -
+ name: features
+ order: 2
+ ci_loop: '(daily)|(weekly)'
+ description : >-
+ Test suites from feature projects
+ integrated in functest
+ testcases:
+ -
+ case_name: promise
+ project_name: promise
+ criteria: 100
+ blocking: false
+ description: >-
+ Test suite from Promise project.
+ dependencies:
+ installer: '(fuel)|(joid)'
+ scenario: ''
+ run:
+ module: 'functest.core.feature'
+ class: 'BashFeature'
+ args:
+ cmd: 'run_promise_tests.py'
+
+ -
+ case_name: bgpvpn
+ project_name: sdnvpn
+ criteria: 100
+ blocking: false
+ description: >-
+ Test suite from SDNVPN project.
+ dependencies:
+ installer: '(fuel)|(apex)|(netvirt)'
+ scenario: 'bgpvpn'
+ run:
+ module: 'functest.core.feature'
+ class: 'BashFeature'
+ args:
+ cmd: 'run_sdnvpn_tests.py'
+
+ -
+ case_name: security_scan
+ enabled: false
+ project_name: securityscanning
+ criteria: 100
+ blocking: false
+ description: >-
+ Simple Security Scan
+ dependencies:
+ installer: 'apex'
+ scenario: '^((?!fdio).)*$'
+ run:
+ module: 'functest.core.feature'
+ class: 'BashFeature'
+ args:
+ cmd: '. /home/opnfv/functest/conf/stackrc && security_scan --config /usr/local/etc/securityscanning/config.ini'
+
+ -
+ case_name: functest-odl-sfc
+ enabled: false
+ project_name: sfc
+ criteria: 100
+ blocking: false
+ description: >-
+ Test suite for odl-sfc to test two chains and two SFs
+ dependencies:
+ installer: '(apex)|(fuel)'
+ scenario: 'odl_l2-sfc'
+ run:
+ module: 'functest.core.feature'
+ class: 'BashFeature'
+ args:
+ cmd: 'run_sfc_tests.py'
+
+ -
+ case_name: domino-multinode
+ enabled: false
+ project_name: domino
+ criteria: 100
+ blocking: false
+ description: >-
+ Test suite from Domino project.
+ dependencies:
+ installer: ''
+ scenario: ''
+ run:
+ module: 'functest.core.feature'
+ class: 'BashFeature'
+ args:
+ cmd: 'cd /src/domino && ./tests/run_multinode.sh'
+
+ -
+ case_name: barometercollectd
+ enabled: true
+ project_name: barometer
+ criteria: 100
+ blocking: false
+ description: >-
+ Test suite for the Barometer project. Separate tests verify
+ the proper configuration and basic functionality of all the
+ collectd plugins as described in the Project Release Plan
+ dependencies:
+ installer: 'apex'
+ scenario: 'bar'
+ run:
+ module: 'baro_tests.barometer'
+ class: 'BarometerCollectd'
diff --git a/docker/features/thirdparty-requirements.txt b/docker/features/thirdparty-requirements.txt
new file mode 100644
index 000000000..0fa9be36c
--- /dev/null
+++ b/docker/features/thirdparty-requirements.txt
@@ -0,0 +1,5 @@
+baro_tests
+sdnvpn
+securityscanning
+sfc
+promise
diff --git a/docker/healthcheck/Dockerfile b/docker/healthcheck/Dockerfile
index 6dfea7f8a..8923e4cd7 100644
--- a/docker/healthcheck/Dockerfile
+++ b/docker/healthcheck/Dockerfile
@@ -1,4 +1,4 @@
-FROM ollivier/functest-core
+FROM opnfv/functest-core
COPY testcases.yaml /usr/lib/python2.7/site-packages/functest/ci/testcases.yaml
CMD ["bash","-c","prepare_env start && run_tests -t all"]
diff --git a/docker/healthcheck/hooks/post_checkout b/docker/healthcheck/hooks/post_checkout
new file mode 100644
index 000000000..20a6d4b95
--- /dev/null
+++ b/docker/healthcheck/hooks/post_checkout
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+from="${DOCKER_REPO%/*}/functest-core"
+sed -i "s|^FROM.*$|FROM ${from}|" Dockerfile
+
+exit $?
diff --git a/docker/smoke/Dockerfile b/docker/smoke/Dockerfile
index b6f84b64a..103854703 100644
--- a/docker/smoke/Dockerfile
+++ b/docker/smoke/Dockerfile
@@ -1,19 +1,23 @@
-FROM ollivier/functest-core
+FROM opnfv/functest-core
ARG BRANCH=master
ARG OPENSTACK_TAG=stable/ocata
+ARG ODL_TAG=release/carbon
COPY thirdparty-requirements.txt thirdparty-requirements.txt
RUN apk --no-cache add --virtual .build-deps --update \
python-dev build-base linux-headers libffi-dev \
openssl-dev libjpeg-turbo-dev git && \
- pip install --src /src \
+ pip install --no-cache-dir --src /src \
-chttps://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt?h=$OPENSTACK_TAG \
-chttps://git.opnfv.org/functest/plain/upper-constraints.txt?h=$BRANCH \
-rthirdparty-requirements.txt && \
+ git clone --depth 1 -b $ODL_TAG https://git.opendaylight.org/gerrit/p/integration/test.git /src/odl_test && \
+ git clone --depth 1 -b $BRANCH https://gerrit.opnfv.org/gerrit/fds /src/fds && \
ln -s /src/tempest /src/refstack-client/.tempest && \
virtualenv --system-site-packages /src/tempest/.venv && \
- rm thirdparty-requirements.txt && \
+ rm -r thirdparty-requirements.txt /src/refstack-client/.git /src/odl_test/.git \
+ /src/fds/.git && \
apk del .build-deps
COPY testcases.yaml /usr/lib/python2.7/site-packages/functest/ci/testcases.yaml
CMD ["bash","-c","prepare_env start && run_tests -t all"]
diff --git a/docker/smoke/hooks/post_checkout b/docker/smoke/hooks/post_checkout
new file mode 100644
index 000000000..20a6d4b95
--- /dev/null
+++ b/docker/smoke/hooks/post_checkout
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+from="${DOCKER_REPO%/*}/functest-core"
+sed -i "s|^FROM.*$|FROM ${from}|" Dockerfile
+
+exit $?
diff --git a/docker/smoke/testcases.yaml b/docker/smoke/testcases.yaml
index 69ea038a4..b3d4f3b96 100644
--- a/docker/smoke/testcases.yaml
+++ b/docker/smoke/testcases.yaml
@@ -85,6 +85,67 @@ tiers:
class: 'RefstackClient'
-
+ case_name: odl
+ project_name: functest
+ criteria: 100
+ blocking: false
+ description: >-
+ Test Suite for the OpenDaylight SDN Controller. It
+ integrates some test suites from upstream using
+ Robot as the test framework.
+ dependencies:
+ installer: ''
+ scenario: 'odl'
+ run:
+ module: 'functest.opnfv_tests.sdn.odl.odl'
+ class: 'ODLTests'
+ args:
+ suites:
+ - /src/odl_test/csit/suites/integration/basic
+ - /src/odl_test/csit/suites/openstack/neutron
+
+ -
+ case_name: odl_netvirt
+ project_name: functest
+ criteria: 100
+ blocking: false
+ description: >-
+ Test Suite for the OpenDaylight SDN Controller when
+ the NetVirt features are installed. It integrates
+ some test suites from upstream using Robot as the
+ test framework.
+ dependencies:
+ installer: 'apex'
+ scenario: 'os-odl_l3-nofeature'
+ run:
+ module: 'functest.opnfv_tests.sdn.odl.odl'
+ class: 'ODLTests'
+ args:
+ suites:
+ - /src/odl_test/csit/suites/integration/basic
+ - /src/odl_test/csit/suites/openstack/neutron
+ - /src/odl_test/csit/suites/openstack/connectivity
+
+ -
+ case_name: fds
+ project_name: functest
+ criteria: 100
+ blocking: false
+ description: >-
+ Test Suite for the OpenDaylight SDN Controller when GBP features are
+ installed. It integrates some test suites from upstream using
+ Robot as the test framework.
+ dependencies:
+ installer: 'apex'
+ scenario: 'odl.*-fdio'
+ run:
+ module: 'functest.opnfv_tests.sdn.odl.odl'
+ class: 'ODLTests'
+ args:
+ suites:
+ - /src/fds/testing/robot
+
+ -
case_name: snaps_smoke
project_name: functest
criteria: 100
diff --git a/docker/smoke/thirdparty-requirements.txt b/docker/smoke/thirdparty-requirements.txt
index be1980f2c..b298601b7 100644
--- a/docker/smoke/thirdparty-requirements.txt
+++ b/docker/smoke/thirdparty-requirements.txt
@@ -1,8 +1 @@
-baro_tests
-sdnvpn
-opera
-securityscanning
-sfc
-tosca-parser>=0.7.0 # Apache-2.0
-heat-translator>=0.4.0 # Apache-2.0
refstack-client
diff --git a/docker/thirdparty-requirements.txt b/docker/thirdparty-requirements.txt
index be1980f2c..773af7588 100644
--- a/docker/thirdparty-requirements.txt
+++ b/docker/thirdparty-requirements.txt
@@ -1,8 +1,8 @@
baro_tests
sdnvpn
-opera
securityscanning
sfc
+promise
tosca-parser>=0.7.0 # Apache-2.0
heat-translator>=0.4.0 # Apache-2.0
refstack-client
diff --git a/docker/vnf/Dockerfile b/docker/vnf/Dockerfile
new file mode 100644
index 000000000..d4f18c476
--- /dev/null
+++ b/docker/vnf/Dockerfile
@@ -0,0 +1,12 @@
+FROM opnfv/functest-core
+
+ARG VIMS_TAG=stable
+
+RUN apk --no-cache add --update \
+ ruby ruby-dev ruby-bundler ruby-irb ruby-rdoc dnsmasq \
+ procps git g++ make libxslt-dev libxml2-dev zlib-dev libffi-dev && \
+ git clone --depth 1 -b $VIMS_TAG https://github.com/boucherv-orange/clearwater-live-test /src/vims-test && \
+ rm -r /src/vims-test/.git && \
+ cd /src/vims-test && bundle config build.nokogiri --use-system-libraries && bundle install --system
+COPY testcases.yaml /usr/lib/python2.7/site-packages/functest/ci/testcases.yaml
+CMD ["bash","-c","prepare_env start && run_tests -t all"]
diff --git a/docker/vnf/hooks/post_checkout b/docker/vnf/hooks/post_checkout
new file mode 100644
index 000000000..20a6d4b95
--- /dev/null
+++ b/docker/vnf/hooks/post_checkout
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+from="${DOCKER_REPO%/*}/functest-core"
+sed -i "s|^FROM.*$|FROM ${from}|" Dockerfile
+
+exit $?
diff --git a/docker/vnf/testcases.yaml b/docker/vnf/testcases.yaml
new file mode 100644
index 000000000..9f6533930
--- /dev/null
+++ b/docker/vnf/testcases.yaml
@@ -0,0 +1,49 @@
+tiers:
+ -
+ name: vnf
+ order: 4
+ ci_loop: '(daily)|(weekly)'
+ description : >-
+ Collection of VNF test cases.
+ testcases:
+ -
+ case_name: cloudify_ims
+ project_name: functest
+ criteria: 100
+ blocking: false
+ description: >-
+ This test case deploys an OpenSource vIMS solution from Clearwater
+ using the Cloudify orchestrator. It also runs some signaling traffic.
+ dependencies:
+ installer: ''
+ scenario: 'os-nosdn-nofeature-ha'
+ run:
+ module: 'functest.opnfv_tests.vnf.ims.cloudify_ims'
+ class: 'CloudifyIms'
+ -
+ case_name: orchestra_openims
+ project_name: functest
+ criteria: 100
+ blocking: false
+ description: >-
+ OpenIMS VNF deployment with Open Baton (Orchestra)
+ dependencies:
+ installer: ''
+ scenario: 'os-nosdn-nofeature-ha'
+ run:
+ module: 'functest.opnfv_tests.vnf.ims.orchestra_openims'
+ class: 'OpenImsVnf'
+
+ -
+ case_name: orchestra_clearwaterims
+ project_name: functest
+ criteria: 100
+ blocking: false
+ description: >-
+ ClearwaterIMS VNF deployment with Open Baton (Orchestra)
+ dependencies:
+ installer: ''
+ scenario: 'os-nosdn-nofeature-ha'
+ run:
+ module: 'functest.opnfv_tests.vnf.ims.orchestra_clearwaterims'
+ class: 'ClearwaterImsVnf'
diff --git a/docs/release/release-notes/functest-release.rst b/docs/release/release-notes/functest-release.rst
index 22393c3a7..602861579 100644
--- a/docs/release/release-notes/functest-release.rst
+++ b/docs/release/release-notes/functest-release.rst
@@ -14,7 +14,7 @@ You should have received a copy of the license along with this.
If not, see <http://creativecommons.org/licenses/by/4.0/>.
===========================================
-OPNFV Danube1.0 release note for Functest
+OPNFV Danube2.0 release note for Functest
===========================================
Abstract
@@ -30,13 +30,24 @@ Version history
| **Date** | **Ver.** | **Author** | **Comment** |
| | | | |
+------------+----------+------------------+------------------------+
-| 2016-08-17 | 1.0.0 | Morgan Richomme | Functest for Colorado |
-| | | (Orange) | release |
+| 2016-08-17 | 1.0.0 | Morgan Richomme | Functest for |
+| | | (Orange) | Colorado.1.0 release |
+------------+----------+------------------+------------------------+
-| 2017-03-29 | 4.0.0 | Jose Lausuch | Functest for Danube |
-| | | (Ericsson) | release |
+| 2016-10-24 | 2.0.0 | Morgan Richomme | Functest for |
+| | | (Orange) | Colorado.2.0 release |
++------------+----------+------------------+------------------------+
+| 2016-08-17 | 3.0.0 | Morgan Richomme | Functest for |
+| | | (Orange) | Colorado.3.0 release |
++------------+----------+------------------+------------------------+
+| 2017-03-29 | 4.0.0 | Jose Lausuch | Functest for |
+| | | (Ericsson) | Danube.1.0 release |
++------------+----------+------------------+------------------------+
+| 2017-05-04 | 5.0.0 | Jose Lausuch | Functest for |
+| | | (Ericsson) | Danube.2.0 release |
++------------+----------+------------------+------------------------+
+| 2017-07-12 | 6.0.0 | Jose Lausuch | Functest for |
+| | | (Ericsson) | Danube.3.0 release |
+------------+----------+------------------+------------------------+
-
OPNFV Danube Release
======================
@@ -69,7 +80,6 @@ The OPNFV projects integrated into Functest framework for automation are:
* domino
* fds
* multisite
- * netready
* onos
* odl-sfc
* odl-netvirt
@@ -93,16 +103,16 @@ Release Data
| **Project** | functest |
| | |
+--------------------------------------+--------------------------------------+
-| **Repo/tag** | danube.1.0 |
+| **Repo/tag** | danube.3.0 |
| | |
+--------------------------------------+--------------------------------------+
-| **Release designation** | Danube base release |
+| **Release designation** | Danube service release |
| | |
+--------------------------------------+--------------------------------------+
-| **Release date** | March 31st 2017 |
+| **Release date** | July 14th 2017 |
| | |
+--------------------------------------+--------------------------------------+
-| **Purpose of the delivery** | Danube base release |
+| **Purpose of the delivery** | Danube third release |
| | |
+--------------------------------------+--------------------------------------+
@@ -112,87 +122,42 @@ Deliverables
Software
--------
- - The Functest Docker image: https://hub.docker.com/r/opnfv/functest (tag: danube.1.0)
+ - The Functest Docker image: https://hub.docker.com/r/opnfv/functest (tag: danube.3.0)
- - The TestAPI Docker image: https://hub.docker.com/r/opnfv/testapi (tag:danube.1.0)
+ - The TestAPI Docker image: https://hub.docker.com/r/opnfv/testapi (tag:danube.3.0)
Documents
---------
- - Installation/configuration guide: http://docs.opnfv.org/en/latest/submodules/functest/docs/testing/user/configguide/index.html
+ - Installation/configuration guide: http://docs.opnfv.org/en/stable-danube/submodules/functest/docs/testing/user/configguide/index.html
- - User Guide: http://docs.opnfv.org/en/latest/submodules/functest/docs/testing/user/userguide/index.html
+ - User Guide: http://docs.opnfv.org/en/stable-danube/submodules/functest/docs/testing/user/userguide/index.html
- - Developer Guide: http://docs.opnfv.org/en/latest/submodules/functest/docs/testing/developer/devguide/index.html
+ - Developer Guide: http://docs.opnfv.org/en/stable-danube/submodules/functest/docs/testing/developer/devguide/index.html
Version change
==============
-Feature evolution
------------------
-
-- Adoption of SNAPS as middleware in 4 new test cases (connection_check, api_check,
- snaps_health_check and snaps_smoke)
-
-- Introduction of refstack suite
-
-- Support new odl suites (odl-netvirt, fds)
-- Introduction of VNF onboarding capabilities
+- Bugfixes in refstack client
-- Support of new feature projects (fds, netready, barometer, orchestra, vyos_vrouter)
+- Fixed vIMS onboarding and ims deployment
+- Increased timeout of OpenBaton installation
+- Switched to mitaka-eol in openstack repositories, as stable/mitake is not longer existing
-Framework
----------
-
- - Harmonization of the naming, better adoption of OpenStack coding conventions
-
- - Enhanced code to be more Object Oriented, removed bash scripts
-
- - Introduction of abstraction classes to ease and harmonize the integration of
- test cases (internal or from feature projects)
+- Minor fixes in logging and test cases
- - New management of logger, env variables and configuration files
-
- - Creation of unit tests on the whole framework to ensure stability
-
- - Creation or ARM Functest docker
-
-
-Test API
----------
-
-- Automatic documentation (html & pdf)
-
-- Full dockerization and automation of the deployment on testresults.opnfv.org
-
-- Automation of test database backup on artifact
-
-
-New internal tests cases
-------------------------
-
-- connection_check
-
-- api_check
-
-- snaps_health_check (replacing shell script healtcheck)
-
-- refstack_defcore
-
-- snaps_smoke
-
-- vyos_vrouter
+- 3 scenarios have been successfully executed on aarch64 architectures on Fuel deployments (nosdn, odl_l2 and odl_l3)
Scenario Matrix
===============
-For Danube 1.0, Functest was tested on the following HA scenarios (new
+For Danube 3.0, Functest was tested on the following HA scenarios (new
dabube scenarios in bold):
+---------------------+---------+---------+---------+---------+
@@ -291,8 +256,8 @@ test [0-3]. The scoring method is described in https://wiki.opnfv.org/pages/view
e.g.
apex/odl_l2-nofeature-ha
- tests = vping_ssh+vping_userdata+tempest+rally+odl+doctor+copper
- Scoring = 21/21 = 7 * 3
+ tests = vping_ssh+vping_userdata+tempest+rally+odl+doctor
+ Scoring = 18/18 = 6 * 3
By default, if not specified, the scenarios are HA.
HA means OpenStack High Availability (main services). Note that not
@@ -311,23 +276,23 @@ Apex
| Scenario | Scoring | Success | Results |
| | | rate | |
+==================+=========+=========+=================+
-| nosdn | 33/33 | 100% | `apex-res-1`_ |
+| nosdn | 20/30 | 67% | `apex-res-1`_ |
+------------------+---------+---------+-----------------+
-| odl_l3 | 27/33 | 82% | `apex-res-2`_ |
+| odl_l3 | 16/30 | 53% | `apex-res-2`_ |
+------------------+---------+---------+-----------------+
-| odl-bgpvpn | 26/30 | 87% | `apex-res-3`_ |
+| odl-bgpvpn | 18/33 | 56% | `apex-res-3`_ |
+------------------+---------+---------+-----------------+
-| odl-gluon | 30/36 | 83% | `apex-res-4`_ |
+| odl-gluon | 20/33 | 61% | `apex-res-4`_ |
+------------------+---------+---------+-----------------+
-| kvm | 32/33 | 97% | `apex-res-5`_ |
+| kvm | 20/30 | 67% | `apex-res-5`_ |
+------------------+---------+---------+-----------------+
-| odl_l2-fdio | 28/36 | 78% | `apex-res-6`_ |
+| odl_l2-fdio | 25/33 | 76% | `apex-res-6`_ |
+------------------+---------+---------+-----------------+
-| odl_l2-fdio-noha | 30/36 | 83% | `apex-res-7`_ |
+| odl_l2-fdio-noha | 24/33 | 73% | local push |
+------------------+---------+---------+-----------------+
-| odl_l3-fdio-noha | 26/30 | 87% | `apex-res-8`_ |
+| odl_l3-fdio-noha | 18/30 | 60% | local push |
+------------------+---------+---------+-----------------+
-| fdio | 6/30 | 20% | `apex-res-9`_ |
+| fdio | 25/27 | 93% | local push |
+------------------+---------+---------+-----------------+
Compass
@@ -339,15 +304,13 @@ Compass
+==================+=========+=========+==================+
| nosdn | 29/30 | 97% | `compass-res-1`_ |
+------------------+---------+---------+------------------+
-| odl_l2 | 28/33 | 84% | `compass-res-2`_ |
+| odl_l2 | 29/33 | 88% | `compass-res-2`_ |
+------------------+---------+---------+------------------+
-| odl_l3 | 21/30 | 70% | `compass-res-3`_ |
+| odl_l3 | 23/30 | 77% | `compass-res-3`_ |
+------------------+---------+---------+------------------+
-| onos | 28/33 | 84% | `compass-res-4`_ |
+| onos | 29/30 | 97% | `compass-res-4`_ |
+------------------+---------+---------+------------------+
-| openo | 28/30 | 93% | `compass-res-5`_ |
-+------------------+---------+---------+------------------+
-| ocl | 4/30 | 13% | `compass-res-6`_ |
+| onos-sfc | 20/36 | 56% | `compass-res-5`_ |
+------------------+---------+---------+------------------+
Note: all the Compass tests for Danube have been executed on virtual
@@ -361,37 +324,37 @@ Fuel
| Scenario | Scoring | Success | Results |
| | | rate | |
+======================+=========+=========+================+
-| nosdn | 37/39 | 95% | `fuel-res-1`_ |
+| nosdn | 39/39 | 100% | `fuel-res-1`_ |
+----------------------+---------+---------+----------------+
| nosdn-noha | 36/36 | 100% | `fuel-res-2`_ |
+----------------------+---------+---------+----------------+
-| nosdn-kvm | 37/39 | 95% | `fuel-res-3`_ |
+| nosdn-kvm | 39/39 | 100% | `fuel-res-3`_ |
+----------------------+---------+---------+----------------+
| nosdn-kvm-noha | 36/36 | 100% | `fuel-res-4`_ |
+----------------------+---------+---------+----------------+
-| nosdn-ovs | 38/39 | 97% | `fuel-res-5`_ |
+| nosdn-ovs | 39/39 | 100% | `fuel-res-5`_ |
+----------------------+---------+---------+----------------+
| nosdn-ovs-noha | 36/36 | 100% | `fuel-res-6`_ |
+----------------------+---------+---------+----------------+
| odl_l2 | 42/42 | 100% | `fuel-res-7`_ |
+----------------------+---------+---------+----------------+
-| odl_l2-noha | 36/39 | 92% | `fuel-res-8`_ |
+| odl_l2-noha | 39/39 | 100% | `fuel-res-8`_ |
+----------------------+---------+---------+----------------+
-| odl_l2-sfc | 40/45 | 89% | `fuel-res-11`_ |
+| odl_l2-sfc | 45/45 | 100% | `fuel-res-9`_ |
+----------------------+---------+---------+----------------+
-| odl_l2-sfc-noha | 36/42 | 86% | `fuel-res-12`_ |
+| odl_l2-sfc-noha | 35/42 | 83% | `fuel-res-10`_ |
+----------------------+---------+---------+----------------+
-| odl_l3 | 34/39 | 87% | `fuel-res-13`_ |
+| odl_l3 | 36/39 | 92% | `fuel-res-11`_ |
+----------------------+---------+---------+----------------+
-| odl_l3-noha | 34/36 | 94% | `fuel-res-14`_ |
+| odl_l3-noha | 36/36 | 100% | `fuel-res-12`_ |
+----------------------+---------+---------+----------------+
-| kvm_ovs_dpdk | 6/39 | 15% | `fuel-res-15`_ |
+| kvm_ovs_dpdk | 39/39 | 100% | `fuel-res-13`_ |
+----------------------+---------+---------+----------------+
-| kvm_ovs_dpdk_noha | 36/36 | 100% | `fuel-res-16`_ |
+| kvm_ovs_dpdk_noha | 35/36 | 97% | `fuel-res-14`_ |
+----------------------+---------+---------+----------------+
-| kvm_ovs_dpdk_bar | 6/42 | 14% | `fuel-res-17`_ |
+| kvm_ovs_dpdk_bar | 42/42 | 100% | `fuel-res-15`_ |
+----------------------+---------+---------+----------------+
-| kvm_ovs_dpdk_bar_noha| 38/39 | 97% | `fuel-res-18`_ |
+| kvm_ovs_dpdk_bar_noha| 36/39 | 92% | `fuel-res-16`_ |
+----------------------+---------+---------+----------------+
@@ -404,15 +367,15 @@ Joid
| Scenario | Scoring | Success | Results |
| | | rate | |
+=====================+=========+=========+===============+
-| nosdn | 32/33 | 97% | `joid-res-1`_ |
+| nosdn | 30/30 | 100% | `joid-res-1`_ |
+---------------------+---------+---------+---------------+
-| nosdn-noha | 31/33 | 94% | `joid-res-2`_ |
+| nosdn-noha | 10/30 | 33% | `joid-res-2`_ |
+---------------------+---------+---------+---------------+
-| nosdn-lxd | 18/24 | 75% | `joid-res-3`_ |
+| nosdn-lxd | 19/21 | 90% | `joid-res-3`_ |
+---------------------+---------+---------+---------------+
-| nosdn-lxd-noha | 17/24 | 71% | `joid-res-4`_ |
+| nosdn-lxd-noha | 15/21 | 71% | `joid-res-4`_ |
+---------------------+---------+---------+---------------+
-| odl_l2 | 9/36 | 25% | `joid-res-5`_ |
+| odl_l2 | 6/33 | 18% | `joid-res-5`_ |
+---------------------+---------+---------+---------------+
It is highly recommended to install a json viewer in your browser
@@ -495,7 +458,7 @@ Open JIRA tickets
All the tickets that are not blocking have been fixed or postponed
the next release.
-Functest Danube 1.0 is released without known bugs.
+Functest Danube 2.0 is released without known bugs.
@@ -522,78 +485,70 @@ Useful links
.. _`gluon-bug`: https://bugs.opendaylight.org/show_bug.cgi?id=5586
-.. _`apex-res-1`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-apex-apex-daily-danube-daily-danube-68
-
-.. _`apex-res-2`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-apex-apex-daily-danube-daily-danube-69
-.. _`apex-res-3`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-apex-apex-daily-danube-daily-danube-70
+.. _`apex-res-1`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-apex-baremetal-daily-danube-455
-.. _`apex-res-4`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-apex-apex-daily-danube-daily-danube-66
+.. _`apex-res-2`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-apex-baremetal-daily-danube-466
-.. _`apex-res-5`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-apex-apex-daily-danube-daily-danube-60
+.. _`apex-res-3`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-apex-baremetal-daily-danube-449
-.. _`apex-res-6`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-apex-apex-daily-danube-daily-danube-73
+.. _`apex-res-4`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-apex-baremetal-daily-danube-450
-.. _`apex-res-7`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-apex-apex-daily-danube-daily-danube-72
+.. _`apex-res-5`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-apex-baremetal-daily-danube-461
-.. _`apex-res-8`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-apex-apex-daily-danube-daily-danube-69
+.. _`apex-res-6`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-apex-baremetal-daily-danube-485
-.. _`apex-res-9`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-apex-apex-daily-danube-daily-danube-62
-.. _`compass-res-1`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-compass-virtual-daily-danube-60
+.. _`compass-res-1`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-compass-virtual-daily-danube-484
-.. _`compass-res-2`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-compass-virtual-daily-danube-59
+.. _`compass-res-2`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-compass-virtual-daily-danube-454
-.. _`compass-res-3`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-compass-baremetal-daily-danube-69
+.. _`compass-res-3`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-compass-baremetal-daily-danube-482
-.. _`compass-res-4`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-compass-virtual-daily-danube-57
+.. _`compass-res-4`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-compass-virtual-daily-danube-481
-.. _`compass-res-5`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-compass-baremetal-daily-danube-67
+.. _`compass-res-5`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-compass-baremetal-daily-danube-453
-.. _`compass-res-6`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-compass-baremetal-daily-danube-65
.. _`fuel-res-1`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-fuel-baremetal-daily-danube-54
-.. _`fuel-res-2`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-fuel-virtual-daily-danube-46
-
-.. _`fuel-res-3`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-fuel-baremetal-daily-danube-53
+.. _`fuel-res-2`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-fuel-virtual-daily-danube-937
-.. _`fuel-res-4`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-fuel-virtual-daily-danube-44
+.. _`fuel-res-3`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-fuel-baremetal-daily-danube-873
-.. _`fuel-res-5`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-fuel-baremetal-daily-danube-55
+.. _`fuel-res-4`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-fuel-virtual-daily-danube-935
-.. _`fuel-res-6`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-fuel-virtual-daily-danube-45
+.. _`fuel-res-5`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-fuel-baremetal-daily-danube-875
-.. _`fuel-res-7`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-fuel-zte-pod1-daily-danube-4
+.. _`fuel-res-6`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-fuel-virtual-daily-danube-936
-.. _`fuel-res-8`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-fuel-virtual-daily-danube-48
+.. _`fuel-res-7`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-fuel-baremetal-daily-danube-867
-.. _`fuel-res-9`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-fuel-baremetal-daily-danube-52
+.. _`fuel-res-8`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-fuel-virtual-daily-danube-939
-.. _`fuel-res-10`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-fuel-virtual-daily-danube-43
+.. _`fuel-res-9`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-fuel-baremetal-daily-danube-870
-.. _`fuel-res-11`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-fuel-baremetal-daily-danube-50
+.. _`fuel-res-10`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-fuel-virtual-daily-danube-933
-.. _`fuel-res-12`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-fuel-virtual-daily-danube-42
+.. _`fuel-res-11`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-fuel-baremetal-daily-danube-868
-.. _`fuel-res-13`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-fuel-baremetal-daily-danube-48
+.. _`fuel-res-12`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-fuel-virtual-daily-danube-940
-.. _`fuel-res-14`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-fuel-virtual-daily-danube-50
+.. _`fuel-res-13`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-fuel-baremetal-daily-danube-871
-.. _`fuel-res-15`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-fuel-baremetal-daily-danube-51
+.. _`fuel-res-14`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-fuel-virtual-daily-danube-938
-.. _`fuel-res-16`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-fuel-virtual-daily-danube-49
+.. _`fuel-res-15`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-fuel-baremetal-daily-danube-869
-.. _`fuel-res-17`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-fuel-baremetal-daily-danube-49
+.. _`fuel-res-16`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-fuel-virtual-daily-danube-941
-.. _`fuel-res-18`: http://testresults.opnfv.org/test/api/v1/results?build_tag= jenkins-functest-fuel-virtual-daily-danube-51
-.. _`joid-res-1`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-joid-baremetal-daily-danube-54
+.. _`joid-res-1`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-joid-baremetal-daily-danube-298
-.. _`joid-res-2`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-joid-baremetal-daily-danube-55
+.. _`joid-res-2`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-joid-baremetal-daily-danube-260
-.. _`joid-res-3`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-joid-baremetal-daily-danube-56
+.. _`joid-res-3`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-joid-baremetal-daily-danube-281
-.. _`joid-res-4`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-joid-baremetal-daily-danube-57
+.. _`joid-res-4`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-joid-baremetal-daily-danube-301
-.. _`joid-res-5`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-joid-baremetal-daily-danube-46
+.. _`joid-res-5`: http://testresults.opnfv.org/test/api/v1/results?build_tag=jenkins-functest-joid-baremetal-daily-danube-291
diff --git a/docs/release/release-notes/index.rst b/docs/release/release-notes/index.rst
index a1a2aa1a7..d0ad9bde5 100644
--- a/docs/release/release-notes/index.rst
+++ b/docs/release/release-notes/index.rst
@@ -15,6 +15,6 @@ Functest Release Notes
Revision: _sha1_
-:Author: Morgan Richomme (morgan.richomme@orange.com)
+:Author: Jose Lausuch (jose.lausuch@ericsson.com)
Build date: |today|
diff --git a/docs/testing/developer/devguide/index.rst b/docs/testing/developer/devguide/index.rst
index 551edec6f..6bc46081f 100644
--- a/docs/testing/developer/devguide/index.rst
+++ b/docs/testing/developer/devguide/index.rst
@@ -9,6 +9,23 @@ OPNFV FUNCTEST developer guide
:numbered:
:maxdepth: 2
+Version history
+===============
++------------+----------+------------------+----------------------------------+
+| **Date** | **Ver.** | **Author** | **Comment** |
+| | | | |
++------------+----------+------------------+----------------------------------+
+| 2017-01-23 | 1.0.0 | Morgan Richomme | Creation for Danube |
++------------+----------+------------------+----------------------------------+
+| 2017-08-16 | 1.0.1 | Morgan Richomme | Adaptations for Euphrates |
+| | | | - move generic part to Testing |
+| | | | developer guide |
+| | | | - move reporting part to functest|
+| | | | user guide |
+| | | | - update test case list |
+| | | | - include auto generated core |
+| | | | documentation |
++------------+----------+------------------+----------------------------------+
============
Introduction
@@ -17,16 +34,18 @@ Introduction
Functest is a project dealing with functional testing.
Functest produces its own internal test cases but can also be considered
as a framework to support feature and VNF onboarding project testing.
-Functest developed a TestAPI and defined a test collection framework
-that can be used by any OPNFV project.
Therefore there are many ways to contribute to Functest. You can:
* Develop new internal test cases
* Integrate the tests from your feature project
* Develop the framework to ease the integration of external test cases
- * Develop the API / Test collection framework
- * Develop dashboards or automatic reporting portals
+
+Additional tasks involving Functest but addressing all the test projects
+may also be mentioned:
+
+ * Develop the API / Test collection framework
+ * Develop dashboards or automatic reporting portals
This document describes how, as a developer, you may interact with the
Functest project. The first section details the main working areas of
@@ -42,8 +61,23 @@ Functest developer areas
Functest High level architecture
================================
-Functest is project delivering a test container dedicated to OPNFV.
+Functest is a project delivering test containers dedicated to OPNFV.
It includes the tools, the scripts and the test scenarios.
+Until Danube, Functest produced 2 docker files based on Ubuntu 14.04:
+
+ * x86 Functest: https://hub.docker.com/r/opnfv/functest/
+ * aarch64 Functest: https://hub.docker.com/r/opnfv/functest_aarch64/
+
+In Euphrates Alpine containers have been introduced in order to lighten the
+container and manage testing slicing, the new containers are created according
+to the different tiers:
+
+ * functest-core: https://hub.docker.com/r/opnfv/functest-core/
+ * functest-healthcheck: https://hub.docker.com/r/opnfv/functest-healthcheck/
+ * functest-smoke: https://hub.docker.com/r/opnfv/functest-smoke/
+ * functest-features: TODO
+ * functest-components: TODO
+ * functest-vnf: TODO
Functest can be described as follow::
@@ -63,7 +97,7 @@ Functest can be described as follow::
Functest internal test cases
============================
-The internal test cases in Danube are:
+The internal test cases in Euphrates are:
* api_check
@@ -72,33 +106,37 @@ The internal test cases in Danube are:
* vping_ssh
* vping_userdata
* odl
+ * odl-netvirt
+ * odl-fds
* rally_full
* rally_sanity
* snaps_health_check
* tempest_full_parallel
* tempest_smoke_serial
+ * cloudify_ims
-By internal, we mean that this particular test cases have been
-developped and/or integrated by functest contributors and the associated
-code is hosted in the Functest repository.
+By internal, we mean that this particular test cases have been developed and/or
+integrated by functest contributors and the associated code is hosted in the
+Functest repository.
An internal case can be fully developed or a simple integration of
-upstream suites (e.g. Tempest/Rally developped in OpenStack are just
-integrated in Functest).
+upstream suites (e.g. Tempest/Rally developed in OpenStack, or odl suites are
+just integrated in Functest).
+
The structure of this repository is detailed in `[1]`_.
The main internal test cases are in the opnfv_tests subfolder of the
repository, the internal test cases are:
- * sdn: odl, onos
- * openstack: api_check, connection_check, snaps_health_check, vping_ssh, vping_userdata, tempest_*, rally_*, snaps_smoke
+ * sdn: odl, odl_netvirt, odl_fds, onos
+ * openstack: api_check, connection_check, snaps_health_check, vping_ssh, vping_userdata, tempest_*, rally_*
* vnf: cloudify_ims
-If you want to create a new test case you will have to create a new
-folder under the testcases directory.
+If you want to create a new test case you will have to create a new folder under
+the testcases directory (See next section for details).
Functest external test cases
============================
-The external test cases are inherited from other OPNFV projects,
-especially the feature projects.
+The external test cases are inherited from other OPNFV projects, especially the
+feature projects.
The external test cases are:
@@ -106,34 +144,36 @@ The external test cases are:
* bgpvpn
* doctor
* domino
- * odl-netvirt
* onos
* fds
- * multisite
- * netready
* orchestra_ims
* parser
* promise
* refstack_defcore
- * security_scan
* snaps_smoke
- * sfc-odl
+ * functest-odl-sfc
* vyos_vrouter
+External test cases integrated in previous versions but not released in
+Euphrates:
+
+ * copper
+ * netready
+ * security_scan
+
+
+The code to run these test cases is hosted in the repository of the project.
-The code to run these test cases may be directly in the repository of
-the project. We have also a **features** sub directory under opnfv_tests
-directory that may be used (it can be useful if you want to reuse
-Functest library).
Functest framework
==================
-Functest can be considered as a framework.
-Functest is release as a docker file, including tools, scripts and a CLI
-to prepare the environment and run tests.
-It simplifies the integration of external test suites in CI pipeline
-and provide commodity tools to collect and display results.
+Functest is a framework.
+
+Historically Functest is released as a docker file, including tools, scripts and
+a CLI to prepare the environment and run tests.
+It simplifies the integration of external test suites in CI pipeline and provide
+commodity tools to collect and display results.
Since Colorado, test categories also known as tiers have been created to
group similar tests, provide consistent sub-lists and at the end optimize
@@ -150,19 +190,23 @@ The tiers are:
* vnf
* stress
+Note Functest deals with healthcheck, smoke, features, components and vnf tiers.
+Performance and stress tiers are out of scope.
+
Functest abstraction classes
============================
-In order to harmonize test integration, 3 abstraction classes have been
-introduced in Danube:
+In order to harmonize test integration, abstraction classes have been
+introduced:
* testcase: base for any test case
+ * unit: run unit tests as test case
* feature: abstraction for feature project
- * vnf_base: abstraction for vnf onboarding
+ * vnf: abstraction for vnf onboarding
-The goal is to unify the way to run test from Functest.
+The goal is to unify the way to run tests in Functest.
-feature and vnf_base inherit from testcase::
+Feature, unit and vnf_base inherit from testcase::
+-----------------------------------------+
| |
@@ -174,787 +218,94 @@ feature and vnf_base inherit from testcase::
| - check_criteria() |
| |
+-----------------------------------------+
- | |
- V V
- +--------------------+ +--------------------------+
- | | | |
- | feature | | vnf_base |
- | | | |
- | - prepare() | | - prepare() |
- | - execute() | | - deploy_orchestrator() |
- | - post() | | - deploy_vnf() |
- | - parse_results() | | - test_vnf() |
- | | | - clean() |
- | | | - execute() |
- | | | |
- +--------------------+ +--------------------------+
+ | |
+ V V
+ +--------------------+ +--------------+ +--------------------------+
+ | | | | | |
+ | feature | | unit | | vnf |
+ | | | | | |
+ | | | | | - prepare() |
+ | - execute() | | | | - deploy_orchestrator() |
+ | BashFeature class | | | | - deploy_vnf() |
+ | | | | | - test_vnf() |
+ | | | | | - clean() |
+ +--------------------+ +--------------+ +--------------------------+
+
+
+Testcase
+========
+.. raw:: html
+ :url: http://artifacts.opnfv.org/functest/docs/apidoc/functest.core.testcase.html
+
+Feature
+=======
+.. raw:: html
+ :url: http://artifacts.opnfv.org/functest/docs/apidoc/functest.core.feature.html
+
+Unit
+====
+.. raw:: html
+ :url: http://artifacts.opnfv.org/functest/docs/apidoc/functest.core.unit.html
+
+VNF
+===
+.. raw:: html
+ :url: http://artifacts.opnfv.org/functest/docs/apidoc/functest.core.vnf.html
+
+
+see `Functest framework overview`_ to get code samples
Functest util classes
=====================
-In order to simplify the creation of test cases, Functest develops some
+In order to simplify the creation of test cases, Functest develops also some
functions that can be used by any feature or internal test cases.
Several features are supported such as logger, configuration management and
Openstack capabilities (snapshot, clean, tacker,..).
These functions can be found under <repo>/functest/utils and can be described as
-follows:
-
-functest/utils/
-|-- config.py
-|-- constants.py
-|-- env.py
-|-- functest_logger.py
-|-- functest_utils.py
-|-- openstack_clean.py
-|-- openstack_snapshot.py
-|-- openstack_tacker.py
-`-- openstack_utils.py
-
-Note that for Openstack, keystone v3 is now deployed by default by compass,
-fuel and joid in Danube. All installers still support keystone v2 (deprecated in
-next version).
-
-Test collection framework
-=========================
-
-The OPNFV testing group created a test collection database to collect
-the test results from CI:
-
-
- http://testresults.opnfv.org/test/swagger/spec.html
-
- Authentication: opnfv/api@opnfv
-
-Any test project running on any lab integrated in CI can push the
-results to this database.
-This database can be used to see the evolution of the tests and compare
-the results versus the installers, the scenarios or the labs.
-
-
-Overall Architecture
---------------------
-The Test result management can be summarized as follows::
-
- +-------------+ +-------------+ +-------------+
- | | | | | |
- | Test | | Test | | Test |
- | Project #1 | | Project #2 | | Project #N |
- | | | | | |
- +-------------+ +-------------+ +-------------+
- | | |
- V V V
- +-----------------------------------------+
- | |
- | Test Rest API front end |
- | http://testresults.opnfv.org/test |
- | |
- +-----------------------------------------+
- A |
- | V
- | +-------------------------+
- | | |
- | | Test Results DB |
- | | Mongo DB |
- | | |
- | +-------------------------+
- |
- |
- +----------------------+
- | |
- | test Dashboard |
- | |
- +----------------------+
-
-TestAPI description
--------------------
-The TestAPI is used to declare pods, projects, test cases and test
-results. Pods are the pods used to run the tests.
-The results pushed in the database are related to pods, projects and
-cases. If you try to push results of test done on non referenced pod,
-the API will return an error message.
-
-An additional method dashboard has been added to post-process
-the raw results in release Brahmaputra (deprecated in Colorado).
-
-The data model is very basic, 5 objects are created:
-
- * Pods
- * Projects
- * Testcases
- * Results
- * Scenarios
-
-The code of the API is hosted in the releng repository `[6]`_.
-The static documentation of the API can be found at `[17]`_.
-The TestAPI has been dockerized and may be installed locally in your
-lab. See `[15]`_ for details.
-
-The deployment of the TestAPI has been automated.
-A jenkins job manages:
- * the unit tests of the TestAPI
- * the creation of a new docker file
- * the deployment of the new TestAPI
- * the archive of the old TestAPI
- * the backup of the Mongo DB
-
-TestAPI Authorization
-~~~~~~~~~~~~~~~~~~~~~
-
-PUT/DELETE/POST operations of the TestAPI now require token based authorization. The token needs
-to be added in the request using a header 'X-Auth-Token' for access to the database.
-
-e.g::
- headers['X-Auth-Token']
-
-The value of the header i.e the token can be accessed in the jenkins environment variable
-*TestApiToken*. The token value is added as a masked password.
-
-.. code-block:: python
-
- headers['X-Auth-Token'] = os.environ.get('TestApiToken')
-
-The above example is in Python. Token based authentication has been added so that only ci pods
-jenkins job can have access to the database.
-
-Please note that currently token authorization is implemented but is not yet enabled.
-
- Automatic reporting
- ===================
-
- An automatic reporting page has been created in order to provide a
- consistent view of the scenarios.
- In this page, each scenario is evaluated according to test criteria.
- The code for the automatic reporting is available at `[8]`_.
-
- The results are collected from the centralized database every day and,
- per scenario. A score is calculated based on the results from the last
- 10 days. This score is the addition of single test scores. Each test
- case has a success criteria reflected in the criteria field from the
- results.
-
- Considering an instance of a scenario os-odl_l2-nofeature-ha, the
- scoring is the addition of the scores of all the runnable tests from the
- categories (tiers healthcheck, smoke and features)
- corresponding to this scenario.
-
-
- +---------------------+---------+---------+---------+---------+
- | Test | Apex | Compass | Fuel | Joid |
- +=====================+=========+=========+=========+=========+
- | vPing_ssh | X | X | X | X |
- +---------------------+---------+---------+---------+---------+
- | vPing_userdata | X | X | X | X |
- +---------------------+---------+---------+---------+---------+
- | tempest_smoke_serial| X | X | X | X |
- +---------------------+---------+---------+---------+---------+
- | rally_sanity | X | X | X | X |
- +---------------------+---------+---------+---------+---------+
- | odl | X | X | X | X |
- +---------------------+---------+---------+---------+---------+
- | promise | | | X | X |
- +---------------------+---------+---------+---------+---------+
- | doctor | X | | X | |
- +---------------------+---------+---------+---------+---------+
- | security_scan | X | | | |
- +---------------------+---------+---------+---------+---------+
- | parser | | | X | |
- +---------------------+---------+---------+---------+---------+
- | copper | X | | | X |
- +---------------------+---------+---------+---------+---------+
- src: colorado (see release note for the last matrix version)
-
- All the testcases listed in the table are runnable on
- os-odl_l2-nofeature scenarios.
- If no result is available or if all the results are failed, the test
- case get 0 point.
- If it was successful at least once but not anymore during the 4 runs,
- the case get 1 point (it worked once).
- If at least 3 of the last 4 runs were successful, the case get 2 points.
- If the last 4 runs of the test are successful, the test get 3 points.
-
- In the example above, the target score for fuel/os-odl_l2-nofeature-ha
- is 3x6 = 18 points.
-
- The scenario is validated per installer when we got 3 points for all
- individual test cases (e.g 18/18).
- Please note that complex or long duration tests are not considered for
- the scoring. The success criteria are not always easy to define and may
- require specific hardware configuration. These results however provide
- a good level of trust on the scenario.
-
- A web page is automatically generated every day to display the status.
- This page can be found at `[9]`_. For the status, click on Status menu,
- you may also get feedback for vims and tempest_smoke_serial test cases.
-
- Any validated scenario is stored in a local file on the web server. In
- fact as we are using a sliding windows to get results, it may happen
- that a successful scenarios is no more run (because considered as
- stable) and then the number of iterations (4 needed) would not be
- sufficient to get the green status.
-
- Please note that other test cases (e.g. sfc_odl, bgpvpn) need also
- ODL configuration addons and as a consequence specific scenario.
- There are not considered as runnable on the generic odl_l2 scenario.
-
-Dashboard
-=========
+follows::
-Dashboard is used to provide a consistent view of the results collected
-in CI.
-The results showed on the dashboard are post processed from the Database,
-which only contains raw results.
+ functest/utils/
+ |-- config.py
+ |-- constants.py
+ |-- decoratos.py
+ |-- env.py
+ |-- functest_utils.py
+ |-- openstack_clean.py
+ |-- openstack_snapshot.py
+ |-- openstack_tacker.py
+ `-- openstack_utils.py
-In Brahmaputra, we created a basic dashboard.
-Since Colorado, it was decided to adopt ELK framework. Mongo DB results
-are extracted to feed Elasticsearch database (`[7]`_).
+Please note that it is possible to use snaps utils. SNAPS `[4]`_ is an OPNFV
+project providing OpenStack utils.
-A script was developed to build elasticsearch data set. This
-script can be found in `[16]`_.
-For next versions, it was decided to integrated bitergia dashboard.
-Bitergia already provides a dashboard for code and infrastructure.
-A new Test tab will be added. The dataset will be built by consuming
-the TestAPI.
-
-
-=======
-How TOs
+TestAPI
=======
+Functest is using the Test collection framework and the TestAPI developed by
+the OPNFV community. See `OPNFV Test collection framework`_ for details.
-How Functest works?
-===================
-
-The installation and configuration of the Functest docker image is
-described in `[1]`_.
-
-The procedure to start tests is described in `[2]`_
-
-
-How can I contribute to Functest?
-=================================
-
-If you are already a contributor of any OPNFV project, you can
-contribute to functest. If you are totally new to OPNFV, you must first
-create your Linux Foundation account, then contact us in order to
-declare you in the repository database.
-
-We distinguish 2 levels of contributors:
-
- * the standard contributor can push patch and vote +1/0/-1 on any Functest patch
- * The commitor can vote -2/-1/0/+1/+2 and merge
-
-Functest commitors are promoted by the Functest contributors.
-
-
-Where can I find some help to start?
-====================================
-
-This guide is made for you. You can also have a look at the project wiki
-page `[10]`_.
-There are references on documentation, video tutorials, tips...
-
-You can also directly contact us by mail with [Functest] prefix in the
-title at opnfv-tech-discuss@lists.opnfv.org or on the IRC chan
-#opnfv-functest.
-
-
-What kind of testing do you do in Functest?
-===========================================
-
-Functest is focusing on Functional testing. The results must be PASS or
-FAIL. We do not deal with performance and/or qualification tests.
-We consider OPNFV as a black box and execute our tests from the jumphost
-according to Pharos reference technical architecture.
-
-Upstream test suites are integrated (Rally/Tempest/ODL/ONOS,...).
-If needed Functest may bootstrap temporarily testing activities if they
-are identified but not covered yet by an existing testing project (e.g
-security_scan before the creation of the security repository)
-
-
-How are test constraints defined?
-=================================
-
-Test constraints are defined according to 2 parameters:
-
- * The scenario (DEPLOY_SCENARIO env variable)
- * The installer (INSTALLER_TYPE env variable)
-
-A scenario is a formal description of the system under test.
-The rules to define a scenario are described in `[4]`_
-
-These 2 constraints are considered to determinate if the test is runnable
-or not (e.g. no need to run onos suite on odl scenario).
-
-In the test declaration for CI, the test owner shall indicate these 2
-constraints. The file testcases.yaml `[5]`_ must be patched in git to
-include new test cases. A more elaborated system based on template is
-planned for next releases
-
-For each dependency, it is possible to define a regex::
-
- name: promise
- criteria: 'success_rate == 100%'
- description: >-
- Test suite from Promise project.
- dependencies:
- installer: '(fuel)|(joid)'
- scenario: ''
-
-In the example above, it means that promise test will be runnable only
-with joid or fuel installers on any scenario.
-
-The vims criteria means any installer and exclude onos and odl with
-bgpvpn scenarios::
-
- name: vims
- criteria: 'status == "PASS"'
- description: >-
- This test case deploys an OpenSource vIMS solution from Clearwater
- using the Cloudify orchestrator. It also runs some signaling traffic.
- dependencies:
- installer: ''
- scenario: '(ocl)|(nosdn)|^(os-odl)((?!bgpvpn).)*$'
-
-
-How to write and check constraint regex?
-=======================================
-
-Regex are standard regex. You can have a look at `[11]`_
-
-You can also easily test your regex via an online regex checker such as `[12]`_.
-Put your scenario in the TEST STRING window (e.g. os-odl_l3-ovs-ha), put
-your regex in the REGULAR EXPRESSION window, then you can test your rule.
-
-
-How to know which test I can run?
-=================================
-
-You can use the API `[13]`_. The static declaration is in git `[5]`_
-
-If you are in a Functest docker container (assuming that the
-environment has been prepared): just use the CLI.
-
-You can get the list per Test cases or by Tier::
-
- # functest testcase list
- healthcheck
- vping_ssh
- vping_userdata
- tempest_smoke_serial
- rally_sanity
- odl
- doctor
- security_scan
- tempest_full_parallel
- rally_full
- vims
- # functest tier list
- - 0. healthcheck:
- ['healthcheck']
- - 1. smoke:
- ['vping_ssh', 'vping_userdata', 'tempest_smoke_serial', 'rally_sanity']
- - 2. sdn_suites:
- ['odl']
- - 3. features:
- ['doctor', 'security_scan']
- - 4. openstack:
- ['tempest_full_parallel', 'rally_full']
- - 5. vnf:
- ['vims']
-
-
-How to manually start Functest tests?
-=====================================
-
-Assuming that you are connected on the jumphost and that the system is
-"Pharos compliant", i.e the technical architecture is compatible with
-the one defined in the Pharos project::
-
- # docker pull opnfv/functest:latest
- # envs="-e INSTALLER_TYPE=fuel -e INSTALLER_IP=10.20.0.2 -e DEPLOY_SCENARIO=os-odl_l2-nofeature-ha -e CI_DEBUG=true"
- # sudo docker run --privileged=true -id ${envs} opnfv/functest:latest /bin/bash
-
-
-Then you must connect to the docker container and source the
-credentials::
-
- # docker ps (copy the id)
- # docker exec -ti <container_id> bash
- # source $creds
-
-
-You must first check if the environment is ready::
-
- # functest env status
- Functest environment ready to run tests.
-
-
-If not ready, prepare the env by launching::
-
- # functest env prepare
- Functest environment ready to run tests.
-
-Once the Functest env is ready, you can use the CLI to start tests.
-
-You can run test cases per test case or per tier:
- # functest testcase run <case name> or # functest tier run <tier name>
-
-
-e.g::
-
- # functest testcase run tempest_smoke_serial
- # functest tier run features
-
-
-If you want to run all the tests you can type::
-
- # functest testcase run all
-
-
-If you want to run all the tiers (same at the end that running all the
-test cases) you can type::
-
- # functest tier run all
-
-
-How to declare my tests in Functest?
-====================================
-
-If you want to add new internal test cases, you can submit patch under
-the testcases directory of Functest repository.
-
-For feature test integration, the code can be kept into your own
-repository. The Functest files to be modified are:
-
- * functest/docker/Dockerfile: get your code in Functest container
- * functest/ci/testcases.yaml: reference your test and its associated constraints
-
-
-Dockerfile
-----------
-
-This file lists the repositories (internal or external) to be cloned in
-the Functest container. You can also add external packages::
-
- RUN git clone https://gerrit.opnfv.org/gerrit/<your project> ${REPOS_DIR}/<your project>
-
-testcases.yaml
---------------
-
-All the test cases that must be run from CI / CLI must be declared in
-ci/testcases.yaml.
-This file is used to get the constraints related to the test::
-
- name: <my_super_test_case>
- criteria: <not used yet in Colorado, could be > 'PASS', 'rate > 90%'
- description: >-
- <the description of your super test suite>
- dependencies:
- installer: regex related to installer e.g. 'fuel', '(apex)||(joid)'
- scenario: regex related to the scenario e.g. 'ovs*no-ha'
-
-
-You must declare your test case in one of the category (tier).
-
-If you are integrating test suites from a feature project, the default
-category is **features**.
-
-
-How to select my list of tests for CI?
-======================================
-
-Functest can be run automatically from CI, a jenkins job is usually
-called after an OPNFV fresh installation.
-By default we try to run all the possible tests (see `[14]` called from
-Functest jenkins job)::
-
- cmd="python ${FUNCTEST_REPO_DIR}/ci/run_tests.py -t all ${flags}"
-
-
-Each case can be configured as daily and/or weekly task.
-Weekly tasks are used for long duration or experimental tests.
-Daily tasks correspond to the minimum set of test suites to validate a scenario.
-
-When executing run_tests.py, a check based on the jenkins build tag will
-be considered to detect whether it is a daily and/or a weekly test.
-
-in your CI you can customize the list of test you want to run by case or
-by tier, just change the line::
-
- cmd="python ${FUNCTEST_REPO_DIR}/ci/run_tests.py -t <whatever you want> ${flags}"
-
-e.g.::
-
- cmd="python ${FUNCTEST_REPO_DIR}/ci/run_tests.py -t healthcheck,smoke ${flags}"
-
-This command will run all the test cases of the first 2 tiers, i.e.
-healthcheck, connection_check, api_check, vping_ssh, vping_userdata,
-snaps_smoke, tempest_smoke_serial and rally_sanity.
-
-
-How to push your results into the Test Database
-===============================================
-
-The test database is used to collect test results. By default it is
-enabled only for CI tests from Production CI pods.
-
-The architecture and associated API is described in previous chapter.
-If you want to push your results from CI, you just have to call the API
-at the end of your script.
-
-You can also reuse a python function defined in functest_utils.py::
-
- def push_results_to_db(db_url, case_name, logger, pod_name,version, payload):
- """
- POST results to the Result target DB
- """
- url = db_url + "/results"
- installer = get_installer_type(logger)
- params = {"project_name": "functest", "case_name": case_name,
- "pod_name": pod_name, "installer": installer,
- "version": version, "details": payload}
-
- headers = {'Content-Type': 'application/json'}
- try:
- r = requests.post(url, data=json.dumps(params), headers=headers)
- if logger:
- logger.debug(r)
- return True
- except Exception, e:
- print "Error [push_results_to_db('%s', '%s', '%s', '%s', '%s')]:" \
- % (db_url, case_name, pod_name, version, payload), e
- return False
-
-
-Where can I find the documentation on the test API?
-===================================================
-
-http://artifacts.opnfv.org/releng/docs/testapi.html
-
-
-How to exclude Tempest case from default Tempest smoke suite?
-=============================================================
-
-Tempest default smoke suite deals with 165 test cases.
-Since Colorado the success criteria is 100%, i.e. if 1 test is failed the
-success criteria is not matched for the scenario.
-
-It is necessary to exclude some test cases that are expected to fail due to
-known upstream bugs (see release notes).
-
-A file has been created for such operation: https://git.opnfv.org/cgit/functest/tree/functest/opnfv_tests/openstack/tempest/custom_tests/blacklist.txt.
-
-It can be described as follows::
-
- -
- scenarios:
- - os-odl_l2-bgpvpn-ha
- - os-odl_l2-bgpvpn-noha
- installers:
- - fuel
- - apex
- tests:
- - tempest.api.compute.servers.test_create_server.ServersTestJSON.test_list_servers
- - tempest.api.compute.servers.test_create_server.ServersTestJSON.test_verify_server_details
- - tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_list_servers
- - tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_verify_server_details
- - tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_reboot_server_hard
- - tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_network_basic_ops
- - tempest.scenario.test_server_basic_ops.TestServerBasicOps.test_server_basic_ops
- - tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_volume_boot_pattern
- - tempest.scenario.test_volume_boot_pattern.TestVolumeBootPatternV2.test_volume_boot_pattern
-
-Please note that each exclusion must be justified. the goal is not to exclude
-test cases because they do not pass. Several scenarios reached the 100% criteria.
-So it is expected in the patch submitted to exclude the cases to indicate the
-reasons of the exclusion.
-
-
-How do I know the Functest status of a scenario?
-================================================
-
-A Functest automatic reporting page is generated daily.
-This page is dynamically created through a cron job and is based on the results
-stored in the Test DB.
-You can access this reporting page: http://testresults.opnfv.org/reporting
-
-See https://wiki.opnfv.org/pages/viewpage.action?pageId=6828617 for details.
-
-
-I have tests, to which category should I declare them?
-======================================================
-
-CATEGORIES/TIERS description:
-
-+----------------+-------------------------------------------------------------+
-| healthcheck | Simple OpenStack healthcheck tests case that validates the |
-| | basic operations in OpenStack |
-+----------------+-------------------------------------------------------------+
-| Smoke | Set of smoke test cases/suites to validate the most common |
-| | OpenStack and SDN Controller operations |
-+----------------+-------------------------------------------------------------+
-| Features | Test cases that validate a specific feature on top of OPNFV.|
-| | Those come from Feature projects and need a bit of support |
-| | for integration |
-+----------------+-------------------------------------------------------------+
-| Components | Advanced OpenStack tests: Full Tempest, Full Rally |
-+----------------+-------------------------------------------------------------+
-| Performance | Out of Functest Scope |
-+----------------+-------------------------------------------------------------+
-| VNF | Test cases related to deploy an open source VNF including |
-| | an orchestrator |
-+----------------+-------------------------------------------------------------+
-
-The main ambiguity could be between features and VNF.
-In fact sometimes you have to spawn VMs to demonstrate the capabilities of the
-feature you introduced.
-We recommend to declare your test in the feature category.
-
-VNF category is really dedicated to test including:
-
- * creation of resources
- * deployment of an orchestrator/VNFM
- * deployment of the VNF
- * test of the VNFM
- * free resources
-
-The goal is not to study a particular feature on the infrastructure but to have
-a whole end to end test of a VNF automatically deployed in CI.
-Moreover VNF are run in weekly jobs (one a week), feature tests are in daily
-jobs and use to get a scenario score.
-
-Where are the logs?
-===================
-
-Functest deals with internal and external testcases. Each testcase can generate
-logs.
-
-Since Colorado we introduce the possibility to push the logs to the artifact.
-A new script (https://git.opnfv.org/releng/tree/utils/push-test-logs.sh) has
-been created for CI.
-
-When called, and assuming that the POD is authorized to push the logs to
-artifacts, the script will push all the results or logs locally stored under
-/home/opnfv/functest/results/.
-
-If the POD is not connected to CI, logs are not pushed.
-But in both cases, logs are stored in /home/opnfv/functest/results in the
-container.
-Projects are encouraged to push their logs here.
-
-Since Colorado it is also easy for feature project to integrate this feature by
-adding the log file as output_file parameter when calling execute_command from
-functest_utils library
-
- ret_val = functest_utils.execute_command(cmd, output_file=log_file)
-
-
-How does Functest deal with VNF onboarding?
-===========================================
-
-VNF onboarding has been introduced in Brahmaputra through the automation of a
-clearwater vIMS deployed thanks to cloudify orchestrator.
-
-This automation has been described at OpenStack summit Barcelona:
-https://youtu.be/Jr4nG74glmY
-
-The goal of Functest consists in testing OPNFV from a functional perspective:
-the NFVI and/or the features developed in OPNFV. Feature test suites are
-provided by the feature project. Functest just simplifies the integration of
-the suite into the CI and gives a consolidated view of the tests per scenario.
-
-Functest does not develop VNFs.
-
-Functest does not test any MANO stack.
-
-OPNFV projects dealing with VNF onboarding
-------------------------------------------
-
-Testing VNF is not the main goal however it gives interesting and realistic
-feedback on OPNFV as a Telco cloud.
-
-Onboarding VNF also allows to test a full stack: orchestrator + VNF.
-
-Functest is VNF and MANO stack agnostic.
-
-An internship has been initiated to reference the Open Source VNF: Intern
-Project Open Source VNF catalog
-
-New projects dealing with orchestrators or VNFs are candidate for Danube.
-
-The 2 projects dealing with orchestration are:
-
- * orchestra (Openbaton)
- * opera (Open-O)
-
-The Models project address various goals for promoting availability and
-convergence of information and/or data models related to NFV service/VNF
-management, as being defined in standards (SDOs) and as developed in open
-source projects.
-
-Functest VNF onboarding
------------------------
-
-In order to simplify VNF onboarding a new abstraction class has been developed
-in Functest.
-
-This class is based on vnf_base and can be described as follow:
-
- +------------+ +--------------+
- | test_base |------------>| vnf_base |
- +------------+ +--------------+
- |_ prepare
- |_ deploy_orchestrator (optional)
- |_ deploy_vnf
- |_ test_vnf
- |_ clean
-
-
-Several methods are declared in vnf_base:
-
- * prepare
- * deploy_orchestrator
- * deploy_vnf
- * test_vnf
- * clean
-
-deploy_vnf and test_vnf are mandatory.
-
-prepare will create a user and a project.
-
-How to declare your orchestrator/VNF?
--------------------------------------
-1) test declaration
-
-You must declare your testcase in the file <Functest repo>/functest/ci/testcases.yaml
-
-2) configuration
-
-You can precise some configuration parameters in config_functest.yaml
-
-3) implement your test
+Reporting
+=========
+A web page is automatically generated every day to display the status based on
+jinja2 templates `[3]`_.
-Create your own VnfOnboarding file
-you must create your entry point through a python class as referenced in the
-configuration file
+Dashboard
+=========
-e.g. aaa => creation of the file <Functest repo>/functest/opnfv_tests/vnf/aaa/aaa.py
+Additional dashboarding is managed at the testing group level, see
+`OPNFV Testing dashboard`_
-the class shall inherit vnf_base.
-You must implement the methods deploy_vnf() and test_vnf() and may implement
-deploy_orchestrator()
-you can call the code from your repo (but need to add the repo in Functest if
-it is not the case)
+=======
+How TOs
+=======
-4) success criteria
+See `How to section`_ on Functest wiki
-So far we considered the test as PASS if vnf_deploy and test_vnf is PASS
-(see example in aaa).
==========
References
@@ -964,44 +315,16 @@ _`[1]`: http://artifacts.opnfv.org/functest/docs/configguide/index.html Functest
_`[2]`: http://artifacts.opnfv.org/functest/docs/userguide/index.html functest user guide
-_`[3]`: https://wiki.opnfv.org/opnfv_test_dashboard Brahmaputra dashboard
-
-_`[4]`: https://wiki.opnfv.org/display/INF/CI+Scenario+Naming
-
-_`[5]`: https://git.opnfv.org/cgit/functest/tree/ci/testcases.yaml
+_`[3]`: https://git.opnfv.org/cgit/releng/tree/utils/test/reporting
-_`[6]`: https://git.opnfv.org/cgit/releng/tree/utils/test/result_collection_api
+_`[4]`: https://git.opnfv.org/snaps/
-_`[7]`: https://git.opnfv.org/cgit/releng/tree/utils/test/scripts
+_`Functest framework overview` : http://testresults.opnfv.org/functest/framework/index.html
-_`[8]`: https://git.opnfv.org/cgit/releng/tree/utils/test/reporting/functest
+_`OPNFV Test collection framework`: TODO
-_`[9]`: http://testresults.opnfv.org/reporting/
+_`OPNFV Testing dashboard`: https://opnfv.biterg.io/goto/283dba93ca18e95964f852c63af1d1ba
-_`[10]`: https://wiki.opnfv.org/opnfv_functional_testing
-
-_`[11]`: https://docs.python.org/2/howto/regex.html
-
-_`[12]`: https://regex101.com/
-
-_`[13]`: http://testresults.opnfv.org/test/api/v1/projects/functest/cases
-
-_`[14]`: https://git.opnfv.org/cgit/releng/tree/jjb/functest/functest-daily.sh
-
-_`[15]`: https://git.opnfv.org/cgit/releng/tree/utils/test/result_collection_api/README.rst
-
-_`[16]`: https://git.opnfv.org/cgit/releng/tree/utils/test/scripts/mongo_to_elasticsearch.py
-
-_`[17]`: http://artifacts.opnfv.org/releng/docs/testapi.html
-
-OPNFV main site: http://www.opnfv.org
-
-OPNFV functional test page: https://wiki.opnfv.org/opnfv_functional_testing
+_`How to section`: https://wiki.opnfv.org/pages/viewpage.action?pageId=7768932
IRC support chan: #opnfv-functest
-
-_`OpenRC`: http://docs.openstack.org/user-guide/common/cli_set_environment_variables_using_openstack_rc.html
-
-_`Rally installation procedure`: https://rally.readthedocs.org/en/latest/tutorial/step_0_installation.html
-
-_`config_functest.yaml` : https://git.opnfv.org/cgit/functest/tree/functest/ci/config_functest.yaml
diff --git a/docs/testing/user/configguide/ci.rst b/docs/testing/user/configguide/ci.rst
new file mode 100644
index 000000000..384bc34e5
--- /dev/null
+++ b/docs/testing/user/configguide/ci.rst
@@ -0,0 +1,50 @@
+Integration in CI
+=================
+In CI we use the Docker image and execute the appropriate commands within the
+container from Jenkins.
+
+Docker creation in set-functest-env builder `[3]`_::
+
+ envs="-e INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP} -e NODE_NAME=${NODE_NAME}"
+ [...]
+ docker pull opnfv/functest:$DOCKER_TAG >/dev/null
+ cmd="sudo docker run -id ${envs} ${volumes} ${custom_params} ${TESTCASE_OPTIONS} opnfv/functest:${DOCKER_TAG} /bin/bash"
+ echo "Functest: Running docker run command: ${cmd}"
+ ${cmd} >${redirect}
+ sleep 5
+ container_id=$(docker ps | grep "opnfv/functest:${DOCKER_TAG}" | awk '{print $1}' | head -1)
+ echo "Container ID=${container_id}"
+ if [ -z ${container_id} ]; then
+ echo "Cannot find opnfv/functest container ID ${container_id}. Please check if it is existing."
+ docker ps -a
+ exit 1
+ fi
+ echo "Starting the container: docker start ${container_id}"
+ docker start ${container_id}
+ sleep 5
+ docker ps >${redirect}
+ if [ $(docker ps | grep "opnfv/functest:${DOCKER_TAG}" | wc -l) == 0 ]; then
+ echo "The container opnfv/functest with ID=${container_id} has not been properly started. Exiting..."
+ exit 1
+ fi
+
+ cmd="python ${FUNCTEST_REPO_DIR}/functest/ci/prepare_env.py start"
+ echo "Executing command inside the docker: ${cmd}"
+ docker exec ${container_id} ${cmd}
+
+
+Test execution in functest-all builder `[3]`_::
+
+ branch=${GIT_BRANCH##*/}
+ echo "Functest: run $FUNCTEST_SUITE_NAME on branch ${branch}"
+ cmd="functest testcase run $FUNCTEST_SUITE_NAME"
+ fi
+ container_id=$(docker ps -a | grep opnfv/functest | awk '{print $1}' | head -1)
+ docker exec $container_id $cmd
+ ret_value=$?
+ exit $ret_value
+
+Docker clean in functest-cleanup builder `[3]`_ calling docker rm and docker rmi
+
+
+.. _`[3]`: https://git.opnfv.org/releng/tree/jjb/functest/functest-daily-jobs.yml
diff --git a/docs/testing/user/configguide/configguide.rst b/docs/testing/user/configguide/configguide.rst
index 03b5c7135..716c8a135 100644
--- a/docs/testing/user/configguide/configguide.rst
+++ b/docs/testing/user/configguide/configguide.rst
@@ -1,24 +1,30 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. SPDX-License-Identifier: CC-BY-4.0
+Installation and configuration (Ubuntu)
+=======================================
+
+The historical docker file is based on Ubuntu. It has been maintained for
+Euphrates.
Pulling the Docker image
------------------------
Pull the Functest Docker image ('opnfv/functest') from the public
-dockerhub registry under the OPNFV account: [dockerhub_], with the
+dockerhub registry under the OPNFV account: [Ì€`dockerhub`_], with the
following docker command::
docker pull opnfv/functest:<TagIdentifier>
where <TagIdentifier> identifies a release of the Functest docker
-container image in the public dockerhub registry. There are many tags
+container image in the public Dockerhub registry. There are many tags
created automatically by the CI mechanisms, and you must ensure you
pull an image with the **correct tag** to match the OPNFV software
release installed in your environment. All available tagged images can
be seen from location [FunctestDockerTags_]. For example, when running
on the first official release of the OPNFV Danube system platform,
-tag "danube.1.0" is needed. Pulling other tags might cause some
-problems while running the tests.
+tag "danube.1.0" is needed. For the second and third releases, the tag
+"danube.2.0" and "danube.3.0" can be used respectively.
+Pulling other tags might cause some problems while running the tests.
Docker images pulled without a tag specifier bear the implicitly
assigned label "latest". If you need to specifically pull the latest
Functest docker image, then omit the tag argument::
@@ -29,16 +35,19 @@ After pulling the Docker image, check that it is available with the
following docker command::
[functester@jumphost ~]$ docker images
- REPOSITORY TAG IMAGE ID CREATED SIZE
- opnfv/functest latest 8cd6683c32ae 2 weeks ago 1.321 GB
- opnfv/functest danube.1.0 13fa54a1b238 4 weeks ago 1.29 GB
- opnfv/functest colorado.1.0 94b78faa94f7 9 weeks ago 968 MB
+ REPOSITORY TAG IMAGE ID CREATED SIZE
+ opnfv/functest latest 8cd6683c32ae 2 weeks ago 1.321 GB
+ opnfv/functest danube.2.0 d2c174a91911 7 minutes ago 1.471 GB
+ opnfv/functest danube.1.0 13fa54a1b238 4 weeks ago 1.29 GB
The Functest docker container environment can -in principle- be also
used with non-OPNFV official installers (e.g. 'devstack'), with the
**disclaimer** that support for such environments is outside of the
scope and responsibility of the OPNFV project.
+Please note that alpine dockers have been introduced in Euphrates. See alpine
+section for details.
+
Accessing the Openstack credentials
-----------------------------------
OpenStack credentials are mandatory and must be provided to Functest.
@@ -184,8 +193,11 @@ when performing manual test scenarios::
text can be sent to the test results file / log files
and also to the standard console output.
+Installer Tips
+--------------
+
Apex Installer Tips
--------------------
+^^^^^^^^^^^^^^^^^^^
Some specific tips are useful for the Apex Installer case. If not using
Apex Installer; ignore this section.
@@ -231,7 +243,7 @@ illustration purposes::
opnfv/functest /bin/bash
Compass installer local development env usage Tips
---------------------------------------------------
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In the compass-functest local test case check and development environment,
in order to get openstack service inside the functest container, some
parameters should be configured during container creation, which are
@@ -253,15 +265,22 @@ Tag omitted means the latest docker image::
To make a file used for the environment, such as 'functest-docker-env'::
- OS_AUTH_URL=http://172.16.1.222:35357/v2.0
+ CINDER_ENDPOINT_TYPE=publicURL
+ NOVA_ENDPOINT_TYPE=publicURL
+ OS_ENDPOINT_TYPE=publicURL
+ OS_INTERFACE=publicURL
OS_USERNAME=admin
- OS_PASSWORD=console
- OS_TENANT_NAME=admin
- OS_VOLUME_API_VERSION=2
+ OS_PASSWORD='990232e0885da343ac805523322d'
OS_PROJECT_NAME=admin
- INSTALLER_TYPE=compass
- INSTALLER_IP=192.168.200.2
- EXTERNAL_NETWORK=ext-net
+ OS_TENANT_NAME=admin
+ OS_AUTH_URL=https://192.16.1.222:5000/v3
+ OS_NO_CACHE=1
+ OS_USER_DOMAIN_NAME=Default
+ OS_PROJECT_DOMAIN_NAME=Default
+ OS_REGION_NAME=RegionOne
+ OS_IDENTITY_API_VERSION=3
+ OS_AUTH_VERSION=3
+
Note: please adjust the content according to the environment, such as
'TENANT_ID' maybe used for some special cases.
@@ -293,31 +312,21 @@ Inside the Functest docker container, the following directory structure
should now be in place::
`-- home
- `-- opnfv
- |-- functest
- | |-- conf
- | |-- data
- | `-- results
- `-- repos
- |-- bgpvpn
- |-- copper
- |-- doctor
- |-- domino
- |-- functest
- |-- kingbird
- |-- odl_test
- |-- onos
- |-- parser
- |-- promise
- |-- rally
- |-- refstack-client
- |-- releng
- |-- sdnvpn
- |-- securityscanning
- |-- sfc
- |-- tempest
- |-- vims_test
- `-- vnfs
+ | `-- opnfv
+ | |-- functest
+ | | |-- conf
+ | | |-- data
+ | | |-- images
+ | | `-- results
+ | `-- repos
+ | |-- onos
+ | |-- doctor
+ | `-- vnfs
+ -- src
+ |-- tempest
+ |-- vims-test
+ |-- odl_test
+ `-- fds
Underneath the '/home/opnfv/' directory, the Functest docker container
includes two main directories:
@@ -334,83 +343,132 @@ includes two main directories:
The structure under the **functest** repository can be described as
follows::
- . |-- INFO
- |-- LICENSE
- |-- requirements.txt
- |-- run_unit_tests.sh
- |-- setup.py
- |-- test-requirements.txt
- |-- commons
- | |-- ims
- | |-- mobile
- | `--traffic-profile-guidelines.rst
- |-- docker
- | |-- Dockerfile
- | |-- config_install_env.sh
- | `-- docker_remote_api
- |-- docs
- | |-- com
- | |-- configguide
- | |-- devguide
- | |-- images
- | |-- internship
- | |-- release-notes
- | |-- results
- | `--userguide
- |-- functest
- |-- __init__.py
- |-- ci
- | |-- __init__.py
- | |-- check_deployment.py
- | |-- config_functest.yaml
- | |-- config_patch.yaml
- | |-- generate_report.py
- | |-- prepare_env.py
- | |-- run_tests.py
- | |-- testcases.yaml
- | |-- tier_builder.py
- | `-- tier_handler.py
- |-- cli
- | |-- __init__.py
- | |-- cli_base.py
- | |-- commands
- | |-- functest-complete.sh
- | `-- setup.py
- |-- core
- | |-- __init__.py
- | |-- feature.py
- | |-- pytest_suite_runner.py
- | |-- testcase.py
- | |-- vnf_base.py
- |-- opnfv_tests
- | |-- __init__.py
- | |-- features
- | |-- mano
- | |-- openstack
- | |-- sdn
- | |-- security_scan
- | `-- vnf
- |-- tests
- | |-- __init__.py
- | `-- unit
- `-- utils
- |-- __init__.py
- |-- config.py
- |-- constants.py
- |-- env.py
- |-- functest_logger.py
- |-- functest_utils.py
- |-- openstack
- |-- openstack_clean.py
- |-- openstack_snapshot.py
- |-- openstack_tacker.py
- `-- openstack_utils.py
-
-
- (Note: All *.pyc files removed from above list for brevity...)
-
-We may distinguish several directories, the first level has 4 directories:
-
+ |-- INFO
+ |-- LICENSE
+ |-- api
+ | `-- apidoc
+ |-- build.sh
+ |-- commons
+ | |-- docker
+ | |-- Dockerfile
+ | |-- Dockerfile.aarch64.patch
+ | |-- components
+ | |-- config_install_env.sh
+ | |-- core
+ | |-- docker_remote_api
+ | |-- features
+ | |-- healthcheck
+ | |-- smoke
+ | |-- vnf
+ | `-- thirdparty-requirements.txt
+ |-- docs
+ | |-- com
+ | |-- images
+ | |-- release
+ | | `-- release-notes
+ | |-- results
+ | | testing
+ | | |-- developer
+ | | `-- user
+ | | |-- configguide
+ | | `-- userguide
+ `-- functest
+ |-- api
+ | |-- base.py
+ | |-- server.py
+ | |-- urls.py
+ | |-- common
+ | | |-- api_utils.py
+ | | `-- error.py
+ | `-- resources
+ | `-- v1
+ | |-- creds.py
+ | |-- envs.py
+ | |-- testcases.py
+ | `-- tiers.py
+ |-- ci
+ │   |-- check_deployment.py
+ │   |-- config_aarch64_patch.yaml
+ │   |-- config_functest.yaml
+ │   |-- config_patch.yaml
+ │   |-- download_images.sh
+ │   |-- installer_params.yaml
+ │   |-- logging.ini
+ │   |-- prepare_env.py
+ │   |-- rally_aarch64_patch.conf
+ │   |-- run_tests.py
+ │   |-- testcases.yaml
+ │   |-- tier_builder.py
+ │   `-- tier_handler.py
+ |-- cli
+ │   |-- cli_base.py
+ │   |-- commands
+ │   │   |-- cli_env.py
+ │   │   |-- cli_os.py
+ │   │   |-- cli_testcase.py
+ │   │   `-- cli_tier.py
+ │   |-- functest-complete.sh
+ |-- core
+ │   |-- feature.py
+ │   |-- testcase.py
+ │   |-- unit.py
+ │   `-- vnf.py
+ |-- energy
+ │   |-- energy.py
+ │   `-- energy.pyc
+ |-- opnfv_tests
+ │   |-- mano
+ │   │   |-- orchestra.py
+ │   |-- openstack
+ │   │   |-- rally
+ │   │   |-- refstack_client
+ │   │   |-- snaps
+ │   │   |-- tempest
+ │   │   `-- vping
+ │   |-- sdn
+ │   │   |-- odl
+ │   │   `-- onos
+ │   `-- vnf
+ │   |-- aaa
+ │   |-- ims
+ │   `-- router
+ |-- tests
+ │   `-- unit
+ │   |-- ci
+ │   |-- cli
+ │   |-- core
+ │   |-- energy
+ │   |-- features
+ │   |-- odl
+ │   |-- openstack
+ │   |-- opnfv_tests
+ │   |-- test_utils.py
+ │   |-- utils
+ │   `-- vnf
+ |-- utils
+ | |-- config.py
+ | |-- constants.py
+ | |-- decorators.py
+ | |-- env.py
+ | |-- functest_utils.py
+ | |-- functest_vacation.py
+ | |-- openstack_clean.py
+ | |-- openstack_snapshot.py
+ | |-- openstack_tacker.py
+ | `-- openstack_utils.py
+ |-- requirements.txt
+ |-- setup.cfg
+ |-- setup.py
+ |-- test-requirements.txt
+ |-- tox.ini
+ |-- upper-constraints.txt
+
+ (Note: All *.pyc files removed from above list for brevity...)
+
+We may distinguish several directories, the first level has 5 directories:
+
+* **api**: This directory is dedicated for the internal Functest API and the API
+ (framework) documentations.
* **commons**: This directory is dedicated for storage of traffic
profile or any other test inputs that could be reused by any test
project.
@@ -480,7 +538,7 @@ destroy it::
docker rm -f <CONTAINER_ID>
-Check the Docker documentation dockerdocs_ for more information.
+Check the Docker documentation [`dockerdocs`_] for more information.
Preparing the Functest environment
----------------------------------
@@ -490,7 +548,7 @@ CLI utility is available to perform the needed environment preparation
action. Once the environment is prepared, the **functest** CLI utility
can be used to run different functional tests. The usage of the
**functest** CLI utility to run tests is described further in the
-Functest User Guide `OPNFV_FuncTestUserGuide`_
+`Functest User Guide`_
Prior to commencing the Functest environment preparation, we can check
the initial status of the environment. Issue the **functest env status**
@@ -782,9 +840,63 @@ and install the **docker-engine**. The instructions conclude with a
"test pull" of a sample "Hello World" docker container. This should now
work with the above pre-requisite actions.
-.. _dockerdocs: https://docs.docker.com/
-.. _dockerhub: https://hub.docker.com/r/opnfv/functest/
-.. _Proxy: https://docs.docker.com/engine/admin/systemd/#http-proxy
-.. _FunctestDockerTags: https://hub.docker.com/r/opnfv/functest/tags/
-.. _InstallDockerCentOS: https://docs.docker.com/engine/installation/linux/centos/
-.. _OPNFV_FuncTestUserGuide: http://artifacts.opnfv.org/functest/docs/userguide/index.html
+
+Installation and Configuration (Alpine)
+=======================================
+
+Introduction to Alpine
+----------------------
+Alpine container have been introduced in Euphrates and released as experimental.
+Alpine allows Functest testing in several very light container and thanks to
+the refactoring on dependency management shoudl allow the creation of light and
+fully customized docker files
+
+Functest Alpine
+---------------
+Docker files are available on the the dockerhub:
+
+ * opnfv/functest-core
+ * opnfv/functest-healthcheck
+ * opnfv/functest-smoke
+ * opnfv/functest-features
+ * opnfv/functest-components
+ * opnfv/functest-vnf
+
+
+Preparing your environment
+--------------------------
+
+cat env::
+
+ INSTALLER_TYPE=XXX
+ INSTALLER_IP=XXX
+ EXTERNAL_NETWORK=XXX
+
+cat openstack.creds::
+
+ export OS_AUTH_URL=XXX
+ export OS_USER_DOMAIN_NAME=XXX
+ export OS_PROJECT_DOMAIN_NAME=XXX
+ export OS_USERNAME=XXX
+ export OS_TENANT_NAME=XXX
+ export OS_PROJECT_NAME=XXX
+ export OS_PASSWORD=XXX
+ export OS_VOLUME_API_VERSION=XXX
+ export OS_IDENTITY_API_VERSION=XXX
+ export OS_IMAGE_API_VERSION=XXX
+
+md5sum images/*md5sum images/*::
+
+ c03e55c22b6fb2127e7de391b488d8d6 `images/CentOS-7-x86_64-GenericCloud.qcow2`_
+ f8ab98ff5e73ebab884d80c9dc9c7290 `images/cirros-0.3.5-x86_64-disk.img`_
+ 845c9b0221469f9e0f4d7ea0039ab5f2 `images/ubuntu-14.04-server-cloudimg-amd64-disk1.img`_
+
+.. _`dockerdocs`: https://docs.docker.com/
+.. _`dockerhub`: https://hub.docker.com/r/opnfv/functest/
+.. _`Proxy`: https://docs.docker.com/engine/admin/systemd/#http-proxy
+.. _`FunctestDockerTags`: https://hub.docker.com/r/opnfv/functest/tags/
+.. _`InstallDockerCentOS`: https://docs.docker.com/engine/installation/linux/centos/
+.. _`Functest User Guide`: http://docs.opnfv.org/en/stable-danube/submodules/functest/docs/testing/user/userguide/index.html
+:: _`images/CentOS-7-x86_64-GenericCloud.qcow2` http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img
+:: _`images/cirros-0.3.5-x86_64-disk.img` https://cloud-images.ubuntu.com/releases/14.04/release/ubuntu-14.04-server-cloudimg-amd64-disk1.img
+:: _`images/ubuntu-14.04-server-cloudimg-amd64-disk1.img` https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2
diff --git a/docs/testing/user/configguide/index.rst b/docs/testing/user/configguide/index.rst
index 737f0ba7c..69c41660a 100644
--- a/docs/testing/user/configguide/index.rst
+++ b/docs/testing/user/configguide/index.rst
@@ -113,170 +113,23 @@ upstream communities.
The functional test cases are described in the Functest User Guide `[2]`_
-
-Prerequisites
-=============
-The OPNFV deployment is out of the scope of this document but it can be
-found in http://docs.opnfv.org.
-The OPNFV platform is considered as the SUT in this document.
-
-Several prerequisites are needed for Functest:
-
- #. A Jumphost to run Functest on
- #. A Docker daemon shall be installed on the Jumphost
- #. A public/external network created on the SUT
- #. An admin/management network created on the SUT
- #. Connectivity from the Jumphost to the SUT public/external network
- #. Connectivity from the Jumphost to the SUT admin/management network
-
-WARNING: Connectivity from Jumphost is essential and it is of paramount
-importance to make sure it is working before even considering to install
-and run Functest. Make also sure you understand how your networking is
-designed to work.
-
-NOTE: **Jumphost** refers to any server which meets the previous
-requirements. Normally it is the same server from where the OPNFV
-deployment has been triggered previously.
-
-NOTE: If your Jumphost is operating behind a company http proxy and/or
-firewall, please consult first the section `Proxy Support`_, towards
-the end of this document. The section details some tips/tricks which
-*may* be of help in a proxified environment.
-
-Docker installation
--------------------
-Docker installation and configuration is only needed to be done once
-through the life cycle of Jumphost.
-
-If your Jumphost is based on Ubuntu, SUSE, RHEL or CentOS linux, please
-consult the references below for more detailed instructions. The
-commands below are offered as a short reference.
-
-*Tip:* For running docker containers behind the proxy, you need first
-some extra configuration which is described in section
-`Docker Installation on CentOS behind http proxy`_. You should follow
-that section before installing the docker engine.
-
-Docker installation needs to be done as root user. You may use other
-userid's to create and run the actual containers later if so desired.
-Log on to your Jumphost as root user and install the Docker Engine
-(e.g. for CentOS family)::
-
- curl -sSL https://get.docker.com/ | sh
- systemctl start docker
-
- *Tip:* If you are working through proxy, please set the https_proxy
- environment variable first before executing the curl command.
-
-Add your user to docker group to be able to run commands without sudo::
-
- sudo usermod -aG docker <your_user>
-
-A reconnection is needed. There are 2 ways for this:
- #. Re-login to your account
- #. su - <username>
-
-References - Installing Docker Engine on different Linux Operating Systems:
- * Ubuntu_
- * RHEL_
- * CentOS_
- * SUSE_
-
-.. _Ubuntu: https://docs.docker.com/engine/installation/linux/ubuntulinux/
-.. _RHEL: https://docs.docker.com/engine/installation/linux/rhel/
-.. _CentOS: https://docs.docker.com/engine/installation/linux/centos/
-.. _SUSE: https://docs.docker.com/engine/installation/linux/suse/
-
-Public/External network on SUT
-------------------------------
-Some of the tests against the VIM (Virtual Infrastructure Manager) need
-connectivity through an existing public/external network in order to
-succeed. This is needed, for example, to create floating IPs to access
-VM instances through the public/external network (i.e. from the Docker
-container).
-
-By default, the four OPNFV installers provide a fresh installation with
-a public/external network created along with a router. Make sure that
-the public/external subnet is reachable from the Jumphost.
-
-*Hint:* For the given OPNFV Installer in use, the IP sub-net address
-used for the public/external network is usually a planning item and
-should thus be known. Consult the OPNFV Configuration guide `[4]`_, and
-ensure you can reach each node in the SUT, from the Jumphost using the
-'ping' command using the respective IP address on the public/external
-network for each node in the SUT. The details of how to determine the
-needed IP addresses for each node in the SUT may vary according to the
-used installer and are therefore ommitted here.
-
-Connectivity to Admin/Management network on SUT
------------------------------------------------
-Some of the Functest tools need to have access to the OpenStack
-admin/management network of the controllers `[1]`_.
-
-For this reason, check the connectivity from the Jumphost to all the
-controllers in cluster in the OpenStack admin/management network range.
-
-Installation and configuration
-==============================
+.. include:: ./prerequisites.rst
.. include:: ./configguide.rst
-Integration in CI
-=================
-In CI we use the Docker image and execute the appropriate commands within the
-container from Jenkins.
-
-Docker creation in set-functest-env builder `[3]`_::
-
- envs="-e INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP} -e NODE_NAME=${NODE_NAME}"
- [...]
- docker pull opnfv/functest:$DOCKER_TAG >/dev/null
- cmd="sudo docker run -id ${envs} ${volumes} ${custom_params} ${TESTCASE_OPTIONS} opnfv/functest:${DOCKER_TAG} /bin/bash"
- echo "Functest: Running docker run command: ${cmd}"
- ${cmd} >${redirect}
- sleep 5
- container_id=$(docker ps | grep "opnfv/functest:${DOCKER_TAG}" | awk '{print $1}' | head -1)
- echo "Container ID=${container_id}"
- if [ -z ${container_id} ]; then
- echo "Cannot find opnfv/functest container ID ${container_id}. Please check if it is existing."
- docker ps -a
- exit 1
- fi
- echo "Starting the container: docker start ${container_id}"
- docker start ${container_id}
- sleep 5
- docker ps >${redirect}
- if [ $(docker ps | grep "opnfv/functest:${DOCKER_TAG}" | wc -l) == 0 ]; then
- echo "The container opnfv/functest with ID=${container_id} has not been properly started. Exiting..."
- exit 1
- fi
-
- cmd="python ${FUNCTEST_REPO_DIR}/functest/ci/prepare_env.py start"
- echo "Executing command inside the docker: ${cmd}"
- docker exec ${container_id} ${cmd}
-
+.. include:: ./ci.rst
-Test execution in functest-all builder `[3]`_::
- branch=${GIT_BRANCH##*/}
- echo "Functest: run $FUNCTEST_SUITE_NAME on branch ${branch}"
- cmd="functest testcase run $FUNCTEST_SUITE_NAME"
- fi
- container_id=$(docker ps -a | grep opnfv/functest | awk '{print $1}' | head -1)
- docker exec $container_id $cmd
- ret_value=$?
- exit $ret_value
+References
+==========
-Docker clean in functest-cleanup builder `[3]`_ calling docker rm and docker rmi
+`[1]`_ : Keystone and public end point constraint
+`[2]`_ : Functest User guide
-References
-==========
-.. _`[1]`: https://ask.openstack.org/en/question/68144/keystone-unable-to-use-the-public-endpoint/
-.. _`[2]`: http://artifacts.opnfv.org/functest/docs/userguide/index.html
-.. _`[3]`: https://git.opnfv.org/cgit/releng/tree/jjb/functest/functest-ci-jobs.yml
-.. _`[4]`: http://artifacts.opnfv.org/functest/danube/docs/configguide/index.html
+`[3]`_ : Functest Jenkins jobs
+`[4]`_ : Functest Configuration guide
`OPNFV main site`_
@@ -284,5 +137,9 @@ References
IRC support channel: #opnfv-functest
+.. _`[1]`: https://ask.openstack.org/en/question/68144/keystone-unable-to-use-the-public-endpoint/
+.. _`[2]`: http://docs.opnfv.org/en/stable-danube/submodules/functest/docs/testing/user/userguide/index.html
+.. _`[3]`: https://git.opnfv.org/cgit/releng/tree/jjb/functest/functest-ci-jobs.yml
+.. _`[4]`: http://docs.opnfv.org/en/stable-danube/submodules/functest/docs/testing/user/configguide/index.html
.. _`OPNFV main site`: http://www.opnfv.org
.. _`Functest page`: https://wiki.opnfv.org/functest
diff --git a/docs/testing/user/configguide/prerequisites.rst b/docs/testing/user/configguide/prerequisites.rst
new file mode 100644
index 000000000..94f676008
--- /dev/null
+++ b/docs/testing/user/configguide/prerequisites.rst
@@ -0,0 +1,104 @@
+Prerequisites
+=============
+The OPNFV deployment is out of the scope of this document but it can be
+found in http://docs.opnfv.org.
+The OPNFV platform is considered as the SUT in this document.
+
+Several prerequisites are needed for Functest:
+
+ #. A Jumphost to run Functest on
+ #. A Docker daemon shall be installed on the Jumphost
+ #. A public/external network created on the SUT
+ #. An admin/management network created on the SUT
+ #. Connectivity from the Jumphost to the SUT public/external network
+ #. Connectivity from the Jumphost to the SUT admin/management network
+
+WARNING: Connectivity from Jumphost is essential and it is of paramount
+importance to make sure it is working before even considering to install
+and run Functest. Make also sure you understand how your networking is
+designed to work.
+
+NOTE: **Jumphost** refers to any server which meets the previous
+requirements. Normally it is the same server from where the OPNFV
+deployment has been triggered previously.
+
+NOTE: If your Jumphost is operating behind a company http proxy and/or
+firewall, please consult first the section `Proxy Support`_, towards
+the end of this document. The section details some tips/tricks which
+*may* be of help in a proxified environment.
+
+Docker installation
+-------------------
+Docker installation and configuration is only needed to be done once
+through the life cycle of Jumphost.
+
+If your Jumphost is based on Ubuntu, SUSE, RHEL or CentOS linux, please
+consult the references below for more detailed instructions. The
+commands below are offered as a short reference.
+
+*Tip:* For running docker containers behind the proxy, you need first
+some extra configuration which is described in section
+`Docker Installation on CentOS behind http proxy`_. You should follow
+that section before installing the docker engine.
+
+Docker installation needs to be done as root user. You may use other
+userid's to create and run the actual containers later if so desired.
+Log on to your Jumphost as root user and install the Docker Engine
+(e.g. for CentOS family)::
+
+ curl -sSL https://get.docker.com/ | sh
+ systemctl start docker
+
+ *Tip:* If you are working through proxy, please set the https_proxy
+ environment variable first before executing the curl command.
+
+Add your user to docker group to be able to run commands without sudo::
+
+ sudo usermod -aG docker <your_user>
+
+A reconnection is needed. There are 2 ways for this:
+ #. Re-login to your account
+ #. su - <username>
+
+References - Installing Docker Engine on different Linux Operating Systems:
+ * Ubuntu_
+ * RHEL_
+ * CentOS_
+ * SUSE_
+
+.. _Ubuntu: https://docs.docker.com/engine/installation/linux/ubuntulinux/
+.. _RHEL: https://docs.docker.com/engine/installation/linux/rhel/
+.. _CentOS: https://docs.docker.com/engine/installation/linux/centos/
+.. _SUSE: https://docs.docker.com/engine/installation/linux/suse/
+
+Public/External network on SUT
+------------------------------
+Some of the tests against the VIM (Virtual Infrastructure Manager) need
+connectivity through an existing public/external network in order to
+succeed. This is needed, for example, to create floating IPs to access
+VM instances through the public/external network (i.e. from the Docker
+container).
+
+By default, the four OPNFV installers provide a fresh installation with
+a public/external network created along with a router. Make sure that
+the public/external subnet is reachable from the Jumphost.
+
+*Hint:* For the given OPNFV Installer in use, the IP sub-net address
+used for the public/external network is usually a planning item and
+should thus be known. Consult the OPNFV Configuration guide `[4]`_, and
+ensure you can reach each node in the SUT, from the Jumphost using the
+'ping' command using the respective IP address on the public/external
+network for each node in the SUT. The details of how to determine the
+needed IP addresses for each node in the SUT may vary according to the
+used installer and are therefore ommitted here.
+
+Connectivity to Admin/Management network on SUT
+-----------------------------------------------
+Some of the Functest tools need to have access to the OpenStack
+admin/management network of the controllers `[1]`_.
+
+For this reason, check the connectivity from the Jumphost to all the
+controllers in cluster in the OpenStack admin/management network range.
+
+.. _`[1]`: https://ask.openstack.org/en/question/68144/keystone-unable-to-use-the-public-endpoint/
+.. _`[4]`: http://artifacts.opnfv.org/functest/danube/docs/configguide/index.html
diff --git a/docs/testing/user/userguide/index.rst b/docs/testing/user/userguide/index.rst
index c877be7b5..4b66eacd2 100644
--- a/docs/testing/user/userguide/index.rst
+++ b/docs/testing/user/userguide/index.rst
@@ -20,8 +20,8 @@ Version history
| | | Column Gaynor | |
+------------+----------+------------------+----------------------------------+
| 2017-01-23 | 1.0.1 | Morgan Richomme | Adaptations for Danube |
-| | | | |
-| | | | |
++------------+----------+------------------+----------------------------------+
+| 2017-08-16 | 1.0.2 | Morgan Richomme | Adaptations for Euphrates |
+------------+----------+------------------+----------------------------------+
@@ -36,594 +36,63 @@ a Functest CLI utility is introduced for an easier execution of test procedures.
properly deployed and that all instructions described in this guide are to be
performed from *inside* the deployed Functest Docker container.
-.. include:: ./introduction.rst
-
-The different test cases are described in the remaining sections of this document.
-
-VIM (Virtualized Infrastructure Manager)
-----------------------------------------
-
-Healthcheck tests
-^^^^^^^^^^^^^^^^^
-In Danube, healthcheck tests have been refactored and rely on SNAPS, a
-OPNFV middleware project.
-
-SNAPS stands for "SDN/NFV Application development Platform and Stack".
-SNAPS is an object-oriented OpenStack library packaged with tests that exercise
-OpenStack.
-More information on SNAPS can be found in  `[13]`_
-
-Three tests are declared as healthcheck tests and can be used for gating by the
-installer, they cover functionally the tests previously done by healthcheck
-test case.
-
-The tests are:
-
-
- * *connection_check*
- * *api_check*
- * *snaps_health_check*
-
-Connection_check consists in 9 test cases (test duration < 5s) checking the
-connectivity with Glance, Keystone, Neutron, Nova and the external network.
-
-Api_check verifies the retrieval of OpenStack clients: Keystone, Glance,
-Neutron and Nova and may perform some simple queries. When the config value of
-snaps.use_keystone is True, functest must have access to the cloud's private
-network. This suite consists in 49 tests (test duration < 2 minutes).
-
-snaps_health_check creates instance, allocate floating IP, connect to the VM.
-This test replaced the previous Colorado healthcheck test.
-
-Self-obviously, successful completion of the 'healthcheck' testcase is a
-necessary pre-requisite for the execution of all other test Tiers.
-
-
-vPing_ssh
-^^^^^^^^^
-
-Given the script **ping.sh**::
-
- #!/bin/sh
- while true; do
- ping -c 1 $1 2>&1 >/dev/null
- RES=$?
- if [ "Z$RES" = "Z0" ] ; then
- echo 'vPing OK'
- break
- else
- echo 'vPing KO'
- fi
- sleep 1
- done
-
-
-The goal of this test is to establish an SSH connection using a floating IP
-on the Public/External network and verify that 2 instances can talk over a Private
-Tenant network::
-
- vPing_ssh test case
- +-------------+ +-------------+
- | | | |
- | | Boot VM1 with IP1 | |
- | +------------------->| |
- | Tester | | System |
- | | Boot VM2 | Under |
- | +------------------->| Test |
- | | | |
- | | Create floating IP | |
- | +------------------->| |
- | | | |
- | | Assign floating IP | |
- | | to VM2 | |
- | +------------------->| |
- | | | |
- | | Establish SSH | |
- | | connection to VM2 | |
- | | through floating IP| |
- | +------------------->| |
- | | | |
- | | SCP ping.sh to VM2 | |
- | +------------------->| |
- | | | |
- | | VM2 executes | |
- | | ping.sh to VM1 | |
- | +------------------->| |
- | | | |
- | | If ping: | |
- | | exit OK | |
- | | else (timeout): | |
- | | exit Failed | |
- | | | |
- +-------------+ +-------------+
-
-This test can be considered as an "Hello World" example.
-It is the first basic use case which **must** work on any deployment.
-
-vPing_userdata
-^^^^^^^^^^^^^^
-
-This test case is similar to vPing_ssh but without the use of Floating IPs
-and the Public/External network to transfer the ping script.
-Instead, it uses Nova metadata service to pass it to the instance at booting time.
-As vPing_ssh, it checks that 2 instances can talk to
-each other on a Private Tenant network::
-
- vPing_userdata test case
- +-------------+ +-------------+
- | | | |
- | | Boot VM1 with IP1 | |
- | +------------------->| |
- | | | |
- | | Boot VM2 with | |
- | | ping.sh as userdata| |
- | | with IP1 as $1. | |
- | +------------------->| |
- | Tester | | System |
- | | VM2 exeutes ping.sh| Under |
- | | (ping IP1) | Test |
- | +------------------->| |
- | | | |
- | | Monitor nova | |
- | | console-log VM 2 | |
- | | If ping: | |
- | | exit OK | |
- | | else (timeout) | |
- | | exit Failed | |
- | | | |
- +-------------+ +-------------+
-
-When the second VM boots it will execute the script passed as userdata
-automatically. The ping will be detected by periodically capturing the output
-in the console-log of the second VM.
-
-
-Tempest
-^^^^^^^
-
-Tempest `[2]`_ is the reference OpenStack Integration test suite.
-It is a set of integration tests to be run against a live OpenStack cluster.
-Tempest has suites of tests for:
-
- * OpenStack API validation
- * Scenarios
- * Other specific tests useful in validating an OpenStack deployment
-
-Functest uses Rally `[3]`_ to run the Tempest suite.
-Rally generates automatically the Tempest configuration file **tempest.conf**.
-Before running the actual test cases,
-Functest creates the needed resources (user, tenant) and
-updates the appropriate parameters into the configuration file.
-
-When the Tempest suite is executed, each test duration is measured and the full
-console output is stored to a *log* file for further analysis.
-
-The Tempest testcases are distributed across two
-Tiers:
-
- * Smoke Tier - Test Case 'tempest_smoke_serial'
- * Components Tier - Test case 'tempest_full_parallel'
-
-NOTE: Test case 'tempest_smoke_serial' executes a defined set of tempest smoke
-tests with a single thread (i.e. serial mode). Test case 'tempest_full_parallel'
-executes all defined Tempest tests using several concurrent threads
-(i.e. parallel mode). The number of threads activated corresponds to the number
-of available logical CPUs.
-
-The goal of the Tempest test suite is to check the basic functionalities of the
-different OpenStack components on an OPNFV fresh installation, using the
-corresponding REST API interfaces.
-
-
-Rally bench test suites
-^^^^^^^^^^^^^^^^^^^^^^^
-
-Rally `[3]`_ is a benchmarking tool that answers the question:
-
-*How does OpenStack work at scale?*
-
-The goal of this test suite is to benchmark all the different OpenStack modules and
-get significant figures that could help to define Telco Cloud KPIs.
-
-The OPNFV Rally scenarios are based on the collection of the actual Rally scenarios:
-
- * authenticate
- * cinder
- * glance
- * heat
- * keystone
- * neutron
- * nova
- * quotas
- * requests
-
-A basic SLA (stop test on errors) has been implemented.
-
-The Rally testcases are distributed across two Tiers:
-
- * Smoke Tier - Test Case 'rally_sanity'
- * Components Tier - Test case 'rally_full'
-
-NOTE: Test case 'rally_sanity' executes a limited number of Rally smoke test
-cases. Test case 'rally_full' executes the full defined set of Rally tests.
-
-
-Refstack-client to run Defcore testcases
------------------------------------------
-
-Refstack-client `[8]`_ is a command line utility that allows you to
-execute Tempest test runs based on configurations you specify.
-It is the official tool to run Defcore `[9]`_ testcases,
-which focuses on testing interoperability between OpenStack clouds.
-
-Refstack-client is integrated in Functest, consumed by Dovetail, which
-intends to define and provide a set of OPNFV related validation criteria
-that will provide input for the evaluation of the use of OPNFV trademarks.
-This progress is under the guideline of Compliance Verification Program(CVP).
-
-Defcore testcases
-^^^^^^^^^^^^^^^^^^
-
-*Danube Release*
-
-Set of DefCore tempest test cases not flagged and required.
-According to `[10]`_, some tests are still flagged due to outstanding bugs
-in the Tempest library, particularly tests that require SSH. Refstack developers
-are working on correcting these bugs upstream. Please note that although some tests
-are flagged because of bugs, there is still an expectation that the capabilities
-covered by the tests are available. It only contains Openstack core compute
-(no object storage). The approved guidelines (2016.08) are valid for Kilo,
-Liberty, Mitaka and Newton releases of OpenStack.
-The list can be generated using the Rest API from RefStack project:
-https://refstack.openstack.org/api/v1/guidelines/2016.08/tests?target=compute&type=required&alias=true&flag=false
-
-Running methods
-^^^^^^^^^^^^^^^
-
-Two running methods are provided after refstack-client integrated into
-Functest, Functest command line and manually, respectively.
-
-By default, for Defcore test cases run by Functest command line,
-are run followed with automatically generated
-configuration file, i.e., refstack_tempest.conf. In some circumstances,
-the automatic configuration file may not quite satisfied with the SUT,
-Functest also inherits the refstack-client command line and provides a way
-for users to set its configuration file according to its own SUT manually.
-
-*command line*
-
-Inside the Functest container, first to prepare Functest environment:
-
-::
-
- functest env prepare
-
-then to run default defcore testcases by using refstack-client:
-
-::
-
- functest testcase run refstack_defcore
-
-In OPNFV Continuous Integration(CI) system, the command line method is used.
-
-*manually*
-
-Inside the Functest container, first to prepare the refstack virtualenv:
-
-::
-
- cd /home/opnfv/repos/refstack-client
- source .venv/bin/activate
-
-then prepare the tempest configuration file and the testcases want to run with the SUT,
-run the testcases with:
-
-::
-
- ./refstack-client test -c <Path of the tempest configuration file to use> -v --test-list <Path or URL of test list>
-
-using help for more information:
-
-::
-
- ./refstack-client --help
- ./refstack-client test --help
-
-Reference tempest configuration
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-*command line method*
-
-When command line method is used, the default tempest configuration file
-is generated by Rally.
-
-*manually*
-
-When running manually is used, recommended way to generate tempest configuration
-file is:
-
-::
-
- cd /usr/lib/python2.7/site-packages/functest/opnfv_tests/openstack/refstack_client
- python tempest_conf.py
-
-a file called tempest.conf is stored in the current path by default, users can do
-some adjustment according to the SUT:
-
-::
-
- vim refstack_tempest.conf
+.. include:: ./test_overview.rst
-a reference article can be used `[15]`_.
+.. include:: ./test_details.rst
+.. include:: ./runfunctest.rst
-snaps_smoke
-------------
-
-This test case contains tests that setup and destroy environments with VMs with
-and without Floating IPs with a newly created user and project. Set the config
-value snaps.use_floating_ips (True|False) to toggle this functionality. When
-the config value of snaps.use_keystone is True, Functest must have access
-the cloud's private network.
-This suite consists in 38 tests (test duration < 10 minutes)
-
-
-SDN Controllers
----------------
-
-There are currently 3 available controllers:
-
- * OpenDaylight (ODL)
- * ONOS
- * OpenContrail (OCL)
-
-OpenDaylight
-^^^^^^^^^^^^
-
-The OpenDaylight (ODL) test suite consists of a set of basic tests inherited
-from the ODL project using the Robot `[11]`_ framework.
-The suite verifies creation and deletion of networks, subnets and ports with
-OpenDaylight and Neutron.
-
-The list of tests can be described as follows:
-
- * Basic Restconf test cases
- * Connect to Restconf URL
- * Check the HTTP code status
-
- * Neutron Reachability test cases
- * Get the complete list of neutron resources (networks, subnets, ports)
-
- * Neutron Network test cases
- * Check OpenStack networks
- * Check OpenDaylight networks
- * Create a new network via OpenStack and check the HTTP status code returned by Neutron
- * Check that the network has also been successfully created in OpenDaylight
-
- * Neutron Subnet test cases
- * Check OpenStack subnets
- * Check OpenDaylight subnets
- * Create a new subnet via OpenStack and check the HTTP status code returned by Neutron
- * Check that the subnet has also been successfully created in OpenDaylight
-
- * Neutron Port test cases
- * Check OpenStack Neutron for known ports
- * Check OpenDaylight ports
- * Create a new port via OpenStack and check the HTTP status code returned by Neutron
- * Check that the new port has also been successfully created in OpenDaylight
-
- * Delete operations
- * Delete the port previously created via OpenStack
- * Check that the port has been also successfully deleted in OpenDaylight
- * Delete previously subnet created via OpenStack
- * Check that the subnet has also been successfully deleted in OpenDaylight
- * Delete the network created via OpenStack
- * Check that the network has also been successfully deleted in OpenDaylight
-
-Note: the checks in OpenDaylight are based on the returned HTTP status
-code returned by OpenDaylight.
-
-
-ONOS
-^^^^
-
-TestON Framework is used to test the ONOS SDN controller functions.
-The test cases deal with L2 and L3 functions.
-The ONOS test suite can be run on any ONOS compliant scenario.
-
-The test cases are described as follows:
-
- * onosfunctest: The main executable file contains the initialization of
- the docker environment and functions called by FUNCvirNetNB and
- FUNCvirNetNBL3
-
- * FUNCvirNetNB
-
- * Create Network: Post Network data and check it in ONOS
- * Update Network: Update the Network and compare it in ONOS
- * Delete Network: Delete the Network and check if it's NULL in ONOS or
- not
- * Create Subnet: Post Subnet data and check it in ONOS
- * Update Subnet: Update the Subnet and compare it in ONOS
- * Delete Subnet: Delete the Subnet and check if it's NULL in ONOS or not
- * Create Port: Post Port data and check it in ONOS
- * Update Port: Update the Port and compare it in ONOS
- * Delete Port: Delete the Port and check if it's NULL in ONOS or not
-
- * FUNCvirNetNBL3
-
- * Create Router: Post data for create Router and check it in ONOS
- * Update Router: Update the Router and compare it in ONOS
- * Delete Router: Delete the Router data and check it in ONOS
- * Create RouterInterface: Post Router Interface data to an existing Router
- and check it in ONOS
- * Delete RouterInterface: Delete the RouterInterface and check the Router
- * Create FloatingIp: Post data for create FloatingIp and check it in ONOS
- * Update FloatingIp: Update the FloatingIp and compare it in ONOS
- * Delete FloatingIp: Delete the FloatingIp and check that it is 'NULL' in
- ONOS
- * Create External Gateway: Post data to create an External Gateway for an
- existing Router and check it in ONOS
- * Update External Gateway: Update the External Gateway and compare the change
- * Delete External Gateway: Delete the External Gateway and check that it is
- 'NULL' in ONOS
-
-
-Features
---------
-
-In Danube, Functest supports the integration of:
-
- * barometer
- * bgpvpn
- * doctor
- * domino
- * fds
- * multisite
- * netready
- * odl-sfc
- * promise
- * security_scan
+.. include:: ./test_results.rst
-Note: copper is not supported in Danube.
-
-Please refer to the dedicated feature user guides for details.
-
-
-VNF
----
-
-
-cloudify_ims
-^^^^^^^^^^^^
-The IP Multimedia Subsystem or IP Multimedia Core Network Subsystem (IMS) is an
-architectural framework for delivering IP multimedia services.
-
-vIMS has been integrated in Functest to demonstrate the capability to deploy a
-relatively complex NFV scenario on the OPNFV platform. The deployment of a complete
-functional VNF allows the test of most of the essential functions needed for a
-NFV platform.
+.. include:: ./reporting.rst
-The goal of this test suite consists of:
+.. figure:: ../../../images/functest-reporting-status.png
+ :align: center
+ :alt: Functest reporting portal Fuel status page
- * deploy a VNF orchestrator (Cloudify)
- * deploy a Clearwater vIMS (IP Multimedia Subsystem) VNF from this
- orchestrator based on a TOSCA blueprint defined in `[5]`_
- * run suite of signaling tests on top of this VNF
+.. include:: ./troubleshooting.rst
-The Clearwater architecture is described as follows:
-.. figure:: ../../../images/clearwater-architecture.png
- :align: center
- :alt: vIMS architecture
+References
+==========
-orchestra_ims
-^^^^^^^^^^^^^
-Orchestra test case deals with the deployment of OpenIMS with OpenBaton
-orchestrator.
+`[1]`_: Functest configuration guide
-parser
-^^^^^^
+`[2]`_: OpenStack Tempest documentation
-See parser user guide for details: `[12]`_
+`[3]`_: Rally documentation
+`[4]`_: Functest in depth (Danube)
-vyos-vrouter
-^^^^^^^^^^^^
+`[5]`_: Clearwater vIMS blueprint
-This test case deals with the deployment and the test of vyos vrouter with
-Cloudify orchestrator. The test case can do testing for interchangeability of
-BGP Protocol using vyos.
+`[6]`_: NIST web site
-The Workflow is as follows:
- * Deploy
- Deploy VNF Testing topology by Cloudify using blueprint.
- * Configuration
- Setting configuration to Target VNF and reference VNF using ssh
- * Run
- Execution of test command for test item written YAML format file.
- Check VNF status and behavior.
- * Reporting
- Output of report based on result using JSON format.
+`[7]`_: OpenSCAP web site
-The vyos-vrouter architecture is described in `[14]`_
+`[8]`_: Refstack client
+`[9]`_: Defcore
+`[10]`_: OpenStack interoperability procedure
-.. include:: ./runfunctest.rst
+`[11]`_: Robot Framework web site
+`[12]`_: Functest User guide
-Test results
-============
+`[13]`_: SNAPS wiki
-Manual testing
---------------
-
-In manual mode test results are displayed in the console and result files
-are put in /home/opnfv/functest/results.
-
-Automated testing
---------------
-
-In automated mode, test results are displayed in jenkins logs, a summary is provided
-at the end of the job and can be described as follow::
-
- +==================================================================================================================================================+
- | FUNCTEST REPORT |
- +==================================================================================================================================================+
- | |
- | Deployment description: |
- | INSTALLER: fuel |
- | SCENARIO: os-odl_l2-nofeature-ha |
- | BUILD TAG: jenkins-functest-fuel-baremetal-daily-master-324 |
- | CI LOOP: daily |
- | |
- +=========================+===============+============+===============+===========================================================================+
- | TEST CASE | TIER | DURATION | RESULT | URL |
- +=========================+===============+============+===============+===========================================================================+
- | connection_check | healthcheck | 00:02 | PASS | http://testresults.opnfv.org/test/api/v1/results/58cb62b34079ac000a42e3fe |
- +-------------------------+---------------+------------+---------------+---------------------------------------------------------------------------+
- | api_check | healthcheck | 01:15 | PASS | http://testresults.opnfv.org/test/api/v1/results/58cb62fe4079ac000a42e3ff |
- +-------------------------+---------------+------------+---------------+---------------------------------------------------------------------------+
- | snaps_health_check | healthcheck | 00:50 | PASS | http://testresults.opnfv.org/test/api/v1/results/58cb63314079ac000a42e400 |
- +-------------------------+---------------+------------+---------------+---------------------------------------------------------------------------+
- | vping_ssh | smoke | 01:10 | PASS | http://testresults.opnfv.org/test/api/v1/results/58cb63654079ac000a42e401 |
- +-------------------------+---------------+------------+---------------+---------------------------------------------------------------------------+
- | vping_userdata | smoke | 00:59 | PASS | http://testresults.opnfv.org/test/api/v1/results/58cb63a14079ac000a42e403 |
- +-------------------------+---------------+------------+---------------+---------------------------------------------------------------------------+
- | tempest_smoke_serial | smoke | 12:57 | PASS | http://testresults.opnfv.org/test/api/v1/results/58cb66bd4079ac000a42e408 |
- +-------------------------+---------------+------------+---------------+---------------------------------------------------------------------------+
- | rally_sanity | smoke | 10:22 | PASS | http://testresults.opnfv.org/test/api/v1/results/58cb692b4079ac000a42e40a |
- +-------------------------+---------------+------------+---------------+---------------------------------------------------------------------------+
- | refstack_defcore | smoke | 12:28 | PASS | http://testresults.opnfv.org/test/api/v1/results/58cb6c184079ac000a42e40c |
- +-------------------------+---------------+------------+---------------+---------------------------------------------------------------------------+
- | snaps_smoke | smoke | 12:04 | PASS | http://testresults.opnfv.org/test/api/v1/results/58cb6eec4079ac000a42e40e |
- +-------------------------+---------------+------------+---------------+---------------------------------------------------------------------------+
- | domino | features | 00:29 | PASS | http://testresults.opnfv.org/test/api/v1/results/58cb6f044079ac000a42e40f |
- +-------------------------+---------------+------------+---------------+---------------------------------------------------------------------------+
-
-
-Results are automatically pushed to the test results database, some additional
-result files are pushed to OPNFV artifact web sites.
-
-Based on the results stored in the result database, a `Functest reporting`_
-portal is also automatically updated. This portal provides information on:
-
- * The overall status per scenario and per installer
- * Tempest: Tempest test case including reported errors per scenario and installer
- * vIMS: vIMS details per scenario and installer
+`[14]`_: vRouter
-.. figure:: ../../../images/functest-reporting-status.png
- :align: center
- :alt: Functest reporting portal Fuel status page
+`[15]`_: Testing OpenStack Tempest part 1
-.. include:: ./troubleshooting.rst
+`OPNFV main site`_: OPNFV official web site
+`Functest page`_: Functest wiki page
-References
-==========
+IRC support chan: #opnfv-functest
-.. _`[1]`: http://artifacts.opnfv.org/functest/colorado/docs/configguide/#
+.. _`[1]`: http://docs.opnfv.org/en/stable-danube/submodules/functest/docs/testing/user/configguide/index.html
.. _`[2]`: http://docs.openstack.org/developer/tempest/overview.html
.. _`[3]`: https://rally.readthedocs.org/en/latest/index.html
.. _`[4]`: http://events.linuxfoundation.org/sites/events/files/slides/Functest%20in%20Depth_0.pdf
@@ -634,17 +103,10 @@ References
.. _`[9]`: https://github.com/openstack/defcore
.. _`[10]`: https://github.com/openstack/interop/blob/master/2016.08/procedure.rst
.. _`[11]`: http://robotframework.org/
-.. _`[12]`: http://artifacts.opnfv.org/parser/colorado/docs/userguide/index.html
+.. _`[12]`: http://docs.opnfv.org/en/stable-danube/submodules/functest/docs/testing/user/userguide/index.html
.. _`[13]`: https://wiki.opnfv.org/display/PROJ/SNAPS-OO
.. _`[14]`: https://github.com/oolorg/opnfv-functest-vrouter
.. _`[15]`: https://aptira.com/testing-openstack-tempest-part-1/
-
-`OPNFV main site`_
-
-`Functest page`_
-
-IRC support chan: #opnfv-functest
-
.. _`OPNFV main site`: http://www.opnfv.org
.. _`Functest page`: https://wiki.opnfv.org/functest
.. _`OpenRC`: http://docs.openstack.org/user-guide/common/cli_set_environment_variables_using_openstack_rc.html
diff --git a/docs/testing/user/userguide/reporting.rst b/docs/testing/user/userguide/reporting.rst
new file mode 100644
index 000000000..14d22f230
--- /dev/null
+++ b/docs/testing/user/userguide/reporting.rst
@@ -0,0 +1,90 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+
+Test reporting
+==============
+
+An automatic reporting page has been created in order to provide a consistent
+view of the Functest tests on the different scenarios.
+
+In this page, each scenario is evaluated according to test criteria.
+
+The results are collected from the centralized database every day and, per
+scenario. A score is calculated based on the results from the last 10 days.
+This score is the addition of single test scores. Each test case has a success
+criteria reflected in the criteria field from the results.
+
+As an illustration, let's consider the scenario
+os-odl_l2-nofeature-ha scenario, the scenario scoring is the addition of the
+scores of all the runnable tests from the categories (tiers, healthcheck, smoke
+and features) corresponding to this scenario.
+
+ +---------------------+---------+---------+---------+---------+
+ | Test | Apex | Compass | Fuel | Joid |
+ +=====================+=========+=========+=========+=========+
+ | vPing_ssh | X | X | X | X |
+ +---------------------+---------+---------+---------+---------+
+ | vPing_userdata | X | X | X | X |
+ +---------------------+---------+---------+---------+---------+
+ | tempest_smoke_serial| X | X | X | X |
+ +---------------------+---------+---------+---------+---------+
+ | rally_sanity | X | X | X | X |
+ +---------------------+---------+---------+---------+---------+
+ | odl | X | X | X | X |
+ +---------------------+---------+---------+---------+---------+
+ | promise | | | X | X |
+ +---------------------+---------+---------+---------+---------+
+ | doctor | X | | X | |
+ +---------------------+---------+---------+---------+---------+
+ | security_scan | X | | | |
+ +---------------------+---------+---------+---------+---------+
+ | parser | | | X | |
+ +---------------------+---------+---------+---------+---------+
+ | copper | X | | | X |
+ +---------------------+---------+---------+---------+---------+
+ src: os-odl_l2-nofeature-ha Colorado (see release note for the last matrix version)
+
+All the testcases (X) listed in the table are runnable on os-odl_l2-nofeature
+scenarios.
+Please note that other test cases (e.g. sfc_odl, bgpvpn) need ODL configuration
+addons and, as a consequence, specific scenario.
+There are not considered as runnable on the generic odl_l2 scenario.
+
+
+If no result is available or if all the results are failed, the test case get 0
+point.
+If it was successful at least once but not anymore during the 4 runs, the case
+get 1 point (it worked once).
+If at least 3 of the last 4 runs were successful, the case get 2 points.
+If the last 4 runs of the test are successful, the test get 3 points.
+
+In the example above, the target score for fuel/os-odl_l2-nofeature-ha is
+3 x 8 = 24 points and for compass it is 3 x 5 = 15 points .
+
+The scenario is validated per installer when we got 3 points for all individual
+test cases (e.g 24/24 for fuel, 15/15 for compass).
+
+Please note that complex or long duration tests are not considered yet for the
+scoring. In fact the success criteria are not always easy to define and may
+require specific hardware configuration.
+
+Please also note that all the test cases have the same "weight" for the score
+calculation whatever the complexity of the test case. Concretely a vping has the
+same weith than the 200 tempst tests.
+Moreover some installers support more features than others. The more cases your
+scenario is dealing with, the most difficult to rich a good scoring.
+
+Therefore the scoring provides 3 types of indicators:
+
+ * the richness of the scenario: if the target scoring is high, it means that the scenario includes lots of features
+ * the maturity: if the percentage (scoring/target scoring * 100) is high, it means that all the tests are PASS
+ * the stability: as the number of iteration is included in the calculation, the pecentage can be high only if the scenario is run regularly (at least more than 4 iterations over the last 10 days in CI)
+
+In any case, the scoring is used to give feedback to the other projects and
+does not represent an absolute value of the scenario.
+
+See `reporting page`_ for details. For the status, click on the version,
+Functest then the Status menu.
+
+
+_`reporting page`: http://testresults.opnfv.org/reporting/
diff --git a/docs/testing/user/userguide/runfunctest.rst b/docs/testing/user/userguide/runfunctest.rst
index 079baddf9..c8db6ff19 100644
--- a/docs/testing/user/userguide/runfunctest.rst
+++ b/docs/testing/user/userguide/runfunctest.rst
@@ -1,8 +1,8 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
-Executing the functest suites
-=============================
+Executing the functest suites (Ubuntu)
+======================================
Manual testing
--------------
@@ -118,410 +118,57 @@ More specific details on specific Tiers or Test Cases can be seen wih the
To execute a Test Tier or Test Case, the 'run' command is used::
root@22e436918db0:~/repos/functest/ci# functest tier run healthcheck
- 2017-03-30 05:36:19,752 - run_tests - INFO - ############################################
- 2017-03-30 05:36:19,752 - run_tests - INFO - Running tier 'healthcheck'
- 2017-03-30 05:36:19,753 - run_tests - INFO - ############################################
- 2017-03-30 05:36:19,753 - run_tests - INFO -
-
- 2017-03-30 05:36:19,753 - run_tests - INFO - ============================================
- 2017-03-30 05:36:19,753 - run_tests - INFO - Running test case 'connection_check'...
- 2017-03-30 05:36:19,753 - run_tests - INFO - ============================================
- 2017-03-30 05:36:20,046 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:20,046 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:20,775 - functest - INFO - Using flavor metatdata '{'hw:mem_page_size': 'any'}'
- 2017-03-30 05:36:20,777 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:20,777 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:20,777 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:20,778 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:20,778 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:20,779 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:20,779 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:20,779 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:20,780 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:20,780 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:20,781 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:20,781 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:20,781 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:20,782 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:20,782 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:20,783 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:20,783 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:20,784 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- test_glance_connect_fail (snaps.openstack.utils.tests.glance_utils_tests.GlanceSmokeTests) ... ok
- test_glance_connect_success (snaps.openstack.utils.tests.glance_utils_tests.GlanceSmokeTests) ... ok
- test_keystone_connect_fail (snaps.openstack.utils.tests.keystone_utils_tests.KeystoneSmokeTests) ... ok
- test_keystone_connect_success (snaps.openstack.utils.tests.keystone_utils_tests.KeystoneSmokeTests) ... ok
- test_neutron_connect_fail (snaps.openstack.utils.tests.neutron_utils_tests.NeutronSmokeTests) ... ok
- test_neutron_connect_success (snaps.openstack.utils.tests.neutron_utils_tests.NeutronSmokeTests) ... ok
- test_retrieve_ext_network_name (snaps.openstack.utils.tests.neutron_utils_tests.NeutronSmokeTests) ... ok
- test_nova_connect_fail (snaps.openstack.utils.tests.nova_utils_tests.NovaSmokeTests) ... ok
- test_nova_connect_success (snaps.openstack.utils.tests.nova_utils_tests.NovaSmokeTests) ... ok
-
- ----------------------------------------------------------------------
- Ran 9 tests in 1.332s
-
- OK
- 2017-03-30 05:36:22,116 - functest - INFO - connection_check OK
- 2017-03-30 05:36:22,483 - functest - INFO - The results were successfully pushed to DB
- 2017-03-30 05:36:22,483 - run_tests - INFO - Test execution time: 00:02
- 2017-03-30 05:36:22,484 - run_tests - INFO -
-
- 2017-03-30 05:36:22,484 - run_tests - INFO - ============================================
- 2017-03-30 05:36:22,484 - run_tests - INFO - Running test case 'api_check'...
- 2017-03-30 05:36:22,484 - run_tests - INFO - ============================================
- 2017-03-30 05:36:22,590 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:22,591 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,361 - functest - INFO - Using flavor metatdata '{'hw:mem_page_size': 'any'}'
- 2017-03-30 05:36:23,362 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,362 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,363 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,364 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,364 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,365 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,365 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,365 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,366 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,366 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,367 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,367 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,368 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,368 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,368 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,369 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,370 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,370 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,370 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,371 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,372 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,372 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,372 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,373 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,373 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,374 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,374 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,374 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,375 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,376 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,376 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,376 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,376 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,377 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,377 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,377 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,377 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,377 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,378 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,378 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,378 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,378 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,379 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,379 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,379 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,380 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,380 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,380 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,380 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,380 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,381 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,381 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,381 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,381 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,381 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,382 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,382 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,382 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,382 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,382 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,383 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,383 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,383 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,384 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,384 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,384 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,384 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,384 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,385 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,385 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,385 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,385 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,386 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,386 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,386 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,386 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,387 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,387 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,387 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,388 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,388 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,388 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,388 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,388 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,420 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,420 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,420 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,420 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,421 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,421 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,421 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,421 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,422 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,422 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:36:23,422 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:36:23,422 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- test_create_project_minimal (snaps.openstack.utils.tests.keystone_utils_tests.KeystoneUtilsTests) ... ok
- test_create_user_minimal (snaps.openstack.utils.tests.keystone_utils_tests.KeystoneUtilsTests) ... ok
- test_create_delete_user (snaps.openstack.tests.create_user_tests.CreateUserSuccessTests) ... ok
- test_create_user (snaps.openstack.tests.create_user_tests.CreateUserSuccessTests) ... ok
- test_create_user_2x (snaps.openstack.tests.create_user_tests.CreateUserSuccessTests) ... 2017-03-30 05:36:27,257 - create_user - INFO - Found user with name - CreateUserSuccessTests-5ff765c1-56bd-4c4-name
- ok
- test_create_delete_project (snaps.openstack.tests.create_project_tests.CreateProjectSuccessTests) ... ok
- test_create_project (snaps.openstack.tests.create_project_tests.CreateProjectSuccessTests) ... ok
- test_create_project_2x (snaps.openstack.tests.create_project_tests.CreateProjectSuccessTests) ... 2017-03-30 05:36:29,798 - create_image - INFO - Found project with name - CreateProjectSuccessTests-1b2fce89-dd5e-471-name
- ok
- test_create_project_sec_grp_one_user (snaps.openstack.tests.create_project_tests.CreateProjectUserTests) ... 2017-03-30 05:36:31,327 - OpenStackSecurityGroup - INFO - Creating security group CreateProjectUserTests-34aa7d96-f19c-4db-name...
- 2017-03-30 05:36:31,327 - neutron_utils - INFO - Retrieving security group with name - CreateProjectUserTests-34aa7d96-f19c-4db-name
- 2017-03-30 05:36:31,705 - neutron_utils - INFO - Creating security group with name - CreateProjectUserTests-34aa7d96-f19c-4db-name
- 2017-03-30 05:36:31,878 - neutron_utils - INFO - Retrieving security group rules associate with the security group - CreateProjectUserTests-34aa7d96-f19c-4db-name
- 2017-03-30 05:36:31,915 - neutron_utils - INFO - Retrieving security group with ID - 4dc3e8e4-3dc8-4dda-9c7e-03d08171e17a
- 2017-03-30 05:36:31,980 - neutron_utils - INFO - Retrieving security group with ID - 4dc3e8e4-3dc8-4dda-9c7e-03d08171e17a
- 2017-03-30 05:36:32,048 - neutron_utils - INFO - Retrieving security group with name - CreateProjectUserTests-34aa7d96-f19c-4db-name
- 2017-03-30 05:36:32,108 - neutron_utils - INFO - Deleting security group rule with ID - 81cca252-45fe-4052-adb9-819191693618
- 2017-03-30 05:36:32,257 - neutron_utils - INFO - Deleting security group rule with ID - 8900647f-1587-4068-bd2e-7b77677d12ed
- 2017-03-30 05:36:32,367 - neutron_utils - INFO - Deleting security group with name - CreateProjectUserTests-34aa7d96-f19c-4db-name
- ok
- test_create_project_sec_grp_two_users (snaps.openstack.tests.create_project_tests.CreateProjectUserTests) ... 2017-03-30 05:36:34,950 - OpenStackSecurityGroup - INFO - Creating security group CreateProjectUserTests-6664b595-4657-4f9-name...
- 2017-03-30 05:36:34,950 - neutron_utils - INFO - Retrieving security group with name - CreateProjectUserTests-6664b595-4657-4f9-name
- 2017-03-30 05:36:35,337 - neutron_utils - INFO - Creating security group with name - CreateProjectUserTests-6664b595-4657-4f9-name
- 2017-03-30 05:36:35,528 - neutron_utils - INFO - Retrieving security group rules associate with the security group - CreateProjectUserTests-6664b595-4657-4f9-name
- 2017-03-30 05:36:35,566 - neutron_utils - INFO - Retrieving security group with ID - ea8b1da3-0e3d-45aa-b63f-68d2e7b57e48
- 2017-03-30 05:36:35,831 - neutron_utils - INFO - Retrieving security group with ID - ea8b1da3-0e3d-45aa-b63f-68d2e7b57e48
- 2017-03-30 05:36:36,118 - neutron_utils - INFO - Retrieving security group with name - CreateProjectUserTests-6664b595-4657-4f9-name
- 2017-03-30 05:36:36,474 - OpenStackSecurityGroup - INFO - Creating security group CreateProjectUserTests-6664b595-4657-4f9-name...
- 2017-03-30 05:36:36,475 - neutron_utils - INFO - Retrieving security group with name - CreateProjectUserTests-6664b595-4657-4f9-name
- 2017-03-30 05:36:36,717 - neutron_utils - INFO - Retrieving security group rules associate with the security group - CreateProjectUserTests-6664b595-4657-4f9-name
- 2017-03-30 05:36:36,768 - neutron_utils - INFO - Retrieving security group with ID - ea8b1da3-0e3d-45aa-b63f-68d2e7b57e48
- 2017-03-30 05:36:36,831 - neutron_utils - INFO - Retrieving security group with ID - ea8b1da3-0e3d-45aa-b63f-68d2e7b57e48
- 2017-03-30 05:36:36,902 - neutron_utils - INFO - Deleting security group rule with ID - f6e50aea-e6d2-4ba9-ab78-0674cdcd5415
- 2017-03-30 05:36:37,054 - neutron_utils - INFO - Deleting security group rule with ID - a32d9c1d-7ae0-4fe9-b4c9-8b039008f836
- 2017-03-30 05:36:37,204 - neutron_utils - INFO - Deleting security group with name - CreateProjectUserTests-6664b595-4657-4f9-name
- 2017-03-30 05:36:37,350 - neutron_utils - INFO - Deleting security group rule with ID - f6e50aea-e6d2-4ba9-ab78-0674cdcd5415
- 2017-03-30 05:36:37,387 - OpenStackSecurityGroup - WARNING - Rule not found, cannot delete - Security group rule f6e50aea-e6d2-4ba9-ab78-0674cdcd5415 does not exist
- Neutron server returns request_ids: ['req-e740871d-34c4-4b95-a76c-6b84028954e6']
- 2017-03-30 05:36:37,387 - neutron_utils - INFO - Deleting security group rule with ID - a32d9c1d-7ae0-4fe9-b4c9-8b039008f836
- 2017-03-30 05:36:37,426 - OpenStackSecurityGroup - WARNING - Rule not found, cannot delete - Security group rule a32d9c1d-7ae0-4fe9-b4c9-8b039008f836 does not exist
- Neutron server returns request_ids: ['req-8121308e-d7d3-4ccc-961f-5fa794fccc83']
- 2017-03-30 05:36:37,427 - neutron_utils - INFO - Deleting security group with name - CreateProjectUserTests-6664b595-4657-4f9-name
- 2017-03-30 05:36:37,470 - OpenStackSecurityGroup - WARNING - Security Group not found, cannot delete - Security group ea8b1da3-0e3d-45aa-b63f-68d2e7b57e48 does not exist
- Neutron server returns request_ids: ['req-09424914-a32d-4bcb-9d90-0ad307ec4c56']
- ok
- test_create_image_minimal_file (snaps.openstack.utils.tests.glance_utils_tests.GlanceUtilsTests) ... ok
- test_create_image_minimal_url (snaps.openstack.utils.tests.glance_utils_tests.GlanceUtilsTests) ... ok
- test_create_network (snaps.openstack.utils.tests.neutron_utils_tests.NeutronUtilsNetworkTests) ... 2017-03-30 05:37:02,330 - neutron_utils - INFO - Creating network with name NeutronUtilsNetworkTests-9dc31d5e-be87-480d-af6e-d89f0608e459-pub-net
- 2017-03-30 05:37:04,307 - neutron_utils - INFO - Deleting network with name NeutronUtilsNetworkTests-9dc31d5e-be87-480d-af6e-d89f0608e459-pub-net
- ok
- test_create_network_empty_name (snaps.openstack.utils.tests.neutron_utils_tests.NeutronUtilsNetworkTests) ... ok
- test_create_network_null_name (snaps.openstack.utils.tests.neutron_utils_tests.NeutronUtilsNetworkTests) ... ok
- test_create_subnet (snaps.openstack.utils.tests.neutron_utils_tests.NeutronUtilsSubnetTests) ... 2017-03-30 05:37:04,953 - neutron_utils - INFO - Creating network with name NeutronUtilsSubnetTests-127e135b-f050-4e85-8c9f-b4f72fb1b028-pub-net
- 2017-03-30 05:37:06,108 - neutron_utils - INFO - Creating subnet with name NeutronUtilsSubnetTests-127e135b-f050-4e85-8c9f-b4f72fb1b028-pub-subnet
- 2017-03-30 05:37:07,544 - neutron_utils - INFO - Deleting subnet with name NeutronUtilsSubnetTests-127e135b-f050-4e85-8c9f-b4f72fb1b028-pub-subnet
- 2017-03-30 05:37:07,944 - neutron_utils - INFO - Deleting network with name NeutronUtilsSubnetTests-127e135b-f050-4e85-8c9f-b4f72fb1b028-pub-net
- ok
- test_create_subnet_empty_cidr (snaps.openstack.utils.tests.neutron_utils_tests.NeutronUtilsSubnetTests) ... 2017-03-30 05:37:08,594 - neutron_utils - INFO - Creating network with name NeutronUtilsSubnetTests-4edb48fe-2532-409b-8dc7-dcb344068a20-pub-net
- 2017-03-30 05:37:09,862 - neutron_utils - INFO - Deleting network with name NeutronUtilsSubnetTests-4edb48fe-2532-409b-8dc7-dcb344068a20-pub-net
- ok
- test_create_subnet_empty_name (snaps.openstack.utils.tests.neutron_utils_tests.NeutronUtilsSubnetTests) ... 2017-03-30 05:37:10,962 - neutron_utils - INFO - Creating network with name NeutronUtilsSubnetTests-bb15908a-a475-45e9-9777-8b5d3faaaea8-pub-net
- 2017-03-30 05:37:11,973 - neutron_utils - INFO - Creating subnet with name NeutronUtilsSubnetTests-bb15908a-a475-45e9-9777-8b5d3faaaea8-pub-subnet
- 2017-03-30 05:37:13,088 - neutron_utils - INFO - Deleting network with name NeutronUtilsSubnetTests-bb15908a-a475-45e9-9777-8b5d3faaaea8-pub-net
- ok
- test_create_subnet_null_cidr (snaps.openstack.utils.tests.neutron_utils_tests.NeutronUtilsSubnetTests) ... 2017-03-30 05:37:14,032 - neutron_utils - INFO - Creating network with name NeutronUtilsSubnetTests-701278ff-f4b6-478f-b16f-1d3fdfb43ed7-pub-net
- 2017-03-30 05:37:15,100 - neutron_utils - INFO - Deleting network with name NeutronUtilsSubnetTests-701278ff-f4b6-478f-b16f-1d3fdfb43ed7-pub-net
- ok
- test_create_subnet_null_name (snaps.openstack.utils.tests.neutron_utils_tests.NeutronUtilsSubnetTests) ... 2017-03-30 05:37:15,658 - neutron_utils - INFO - Creating network with name NeutronUtilsSubnetTests-2dad9c37-c892-494b-a8dc-51963ce11cd8-pub-net
- 2017-03-30 05:37:16,184 - neutron_utils - INFO - Deleting network with name NeutronUtilsSubnetTests-2dad9c37-c892-494b-a8dc-51963ce11cd8-pub-net
- ok
- test_add_interface_router (snaps.openstack.utils.tests.neutron_utils_tests.NeutronUtilsRouterTests) ... 2017-03-30 05:37:17,438 - neutron_utils - INFO - Creating network with name NeutronUtilsRouterTests-5f1f0b29-1148-4628-8626-f2aa63b17914-pub-net
- 2017-03-30 05:37:18,624 - neutron_utils - INFO - Creating subnet with name NeutronUtilsRouterTests-5f1f0b29-1148-4628-8626-f2aa63b17914-pub-subnet
- 2017-03-30 05:37:20,041 - neutron_utils - INFO - Creating router with name - NeutronUtilsRouterTests-5f1f0b29-1148-4628-8626-f2aa63b17914-pub-router
- 2017-03-30 05:37:22,518 - neutron_utils - INFO - Adding interface to router with name NeutronUtilsRouterTests-5f1f0b29-1148-4628-8626-f2aa63b17914-pub-router
- 2017-03-30 05:37:23,883 - neutron_utils - INFO - Removing router interface from router named NeutronUtilsRouterTests-5f1f0b29-1148-4628-8626-f2aa63b17914-pub-router
- 2017-03-30 05:37:25,345 - neutron_utils - INFO - Deleting router with name - NeutronUtilsRouterTests-5f1f0b29-1148-4628-8626-f2aa63b17914-pub-router
- 2017-03-30 05:37:27,019 - neutron_utils - INFO - Deleting subnet with name NeutronUtilsRouterTests-5f1f0b29-1148-4628-8626-f2aa63b17914-pub-subnet
- 2017-03-30 05:37:28,570 - neutron_utils - INFO - Deleting network with name NeutronUtilsRouterTests-5f1f0b29-1148-4628-8626-f2aa63b17914-pub-net
- ok
- test_add_interface_router_null_router (snaps.openstack.utils.tests.neutron_utils_tests.NeutronUtilsRouterTests) ... 2017-03-30 05:37:29,583 - neutron_utils - INFO - Creating network with name NeutronUtilsRouterTests-021a41c7-e2b2-45df-bb6d-630ddba2b776-pub-net
- 2017-03-30 05:37:30,234 - neutron_utils - INFO - Creating subnet with name NeutronUtilsRouterTests-021a41c7-e2b2-45df-bb6d-630ddba2b776-pub-subnet
- 2017-03-30 05:37:30,724 - neutron_utils - INFO - Deleting subnet with name NeutronUtilsRouterTests-021a41c7-e2b2-45df-bb6d-630ddba2b776-pub-subnet
- 2017-03-30 05:37:31,128 - neutron_utils - INFO - Deleting network with name NeutronUtilsRouterTests-021a41c7-e2b2-45df-bb6d-630ddba2b776-pub-net
- ok
- test_add_interface_router_null_subnet (snaps.openstack.utils.tests.neutron_utils_tests.NeutronUtilsRouterTests) ... 2017-03-30 05:37:32,229 - neutron_utils - INFO - Creating network with name NeutronUtilsRouterTests-e52f96b6-92c9-49a3-ac7f-6a4a61a82c7e-pub-net
- 2017-03-30 05:37:32,833 - neutron_utils - INFO - Creating router with name - NeutronUtilsRouterTests-e52f96b6-92c9-49a3-ac7f-6a4a61a82c7e-pub-router
- 2017-03-30 05:37:34,002 - neutron_utils - INFO - Adding interface to router with name NeutronUtilsRouterTests-e52f96b6-92c9-49a3-ac7f-6a4a61a82c7e-pub-router
- 2017-03-30 05:37:34,003 - neutron_utils - INFO - Deleting router with name - NeutronUtilsRouterTests-e52f96b6-92c9-49a3-ac7f-6a4a61a82c7e-pub-router
- 2017-03-30 05:37:35,238 - neutron_utils - INFO - Deleting network with name NeutronUtilsRouterTests-e52f96b6-92c9-49a3-ac7f-6a4a61a82c7e-pub-net
- ok
- test_create_port (snaps.openstack.utils.tests.neutron_utils_tests.NeutronUtilsRouterTests) ... 2017-03-30 05:37:35,898 - neutron_utils - INFO - Creating network with name NeutronUtilsRouterTests-842cf533-4886-4539-86e4-15bcd8c77b63-pub-net
- 2017-03-30 05:37:36,426 - neutron_utils - INFO - Creating subnet with name NeutronUtilsRouterTests-842cf533-4886-4539-86e4-15bcd8c77b63-pub-subnet
- 2017-03-30 05:37:37,725 - neutron_utils - INFO - Creating port for network with name - NeutronUtilsRouterTests-842cf533-4886-4539-86e4-15bcd8c77b63-pub-net
- 2017-03-30 05:37:38,511 - neutron_utils - INFO - Deleting port with name NeutronUtilsRouterTests-842cf533-4886-4539-86e4-15bcd8c77b63-port
- 2017-03-30 05:37:39,036 - neutron_utils - INFO - Deleting subnet with name NeutronUtilsRouterTests-842cf533-4886-4539-86e4-15bcd8c77b63-pub-subnet
- 2017-03-30 05:37:41,326 - neutron_utils - INFO - Deleting network with name NeutronUtilsRouterTests-842cf533-4886-4539-86e4-15bcd8c77b63-pub-net
- ok
- test_create_port_empty_name (snaps.openstack.utils.tests.neutron_utils_tests.NeutronUtilsRouterTests) ... 2017-03-30 05:37:42,725 - neutron_utils - INFO - Creating network with name NeutronUtilsRouterTests-303b5eae-374a-4da9-a905-aa39a7d5f026-pub-net
- 2017-03-30 05:37:43,246 - neutron_utils - INFO - Creating subnet with name NeutronUtilsRouterTests-303b5eae-374a-4da9-a905-aa39a7d5f026-pub-subnet
- 2017-03-30 05:37:43,945 - neutron_utils - INFO - Creating port for network with name - NeutronUtilsRouterTests-303b5eae-374a-4da9-a905-aa39a7d5f026-pub-net
- 2017-03-30 05:37:45,674 - neutron_utils - INFO - Deleting port with name NeutronUtilsRouterTests-303b5eae-374a-4da9-a905-aa39a7d5f026-port
- 2017-03-30 05:37:46,197 - neutron_utils - INFO - Deleting subnet with name NeutronUtilsRouterTests-303b5eae-374a-4da9-a905-aa39a7d5f026-pub-subnet
- 2017-03-30 05:37:48,252 - neutron_utils - INFO - Deleting network with name NeutronUtilsRouterTests-303b5eae-374a-4da9-a905-aa39a7d5f026-pub-net
- ok
- test_create_port_invalid_ip (snaps.openstack.utils.tests.neutron_utils_tests.NeutronUtilsRouterTests) ... 2017-03-30 05:37:49,864 - neutron_utils - INFO - Creating network with name NeutronUtilsRouterTests-bd7a4489-79e8-4328-8519-5ad1951c0b5d-pub-net
- 2017-03-30 05:37:50,322 - neutron_utils - INFO - Creating subnet with name NeutronUtilsRouterTests-bd7a4489-79e8-4328-8519-5ad1951c0b5d-pub-subnet
- 2017-03-30 05:37:50,803 - neutron_utils - INFO - Deleting subnet with name NeutronUtilsRouterTests-bd7a4489-79e8-4328-8519-5ad1951c0b5d-pub-subnet
- 2017-03-30 05:37:51,240 - neutron_utils - INFO - Deleting network with name NeutronUtilsRouterTests-bd7a4489-79e8-4328-8519-5ad1951c0b5d-pub-net
- ok
- test_create_port_invalid_ip_to_subnet (snaps.openstack.utils.tests.neutron_utils_tests.NeutronUtilsRouterTests) ... 2017-03-30 05:37:51,767 - neutron_utils - INFO - Creating network with name NeutronUtilsRouterTests-ef2c3474-7b51-483b-b269-05fc4532f294-pub-net
- 2017-03-30 05:37:52,246 - neutron_utils - INFO - Creating subnet with name NeutronUtilsRouterTests-ef2c3474-7b51-483b-b269-05fc4532f294-pub-subnet
- 2017-03-30 05:37:52,795 - neutron_utils - INFO - Deleting subnet with name NeutronUtilsRouterTests-ef2c3474-7b51-483b-b269-05fc4532f294-pub-subnet
- 2017-03-30 05:37:53,199 - neutron_utils - INFO - Deleting network with name NeutronUtilsRouterTests-ef2c3474-7b51-483b-b269-05fc4532f294-pub-net
- ok
- test_create_port_null_ip (snaps.openstack.utils.tests.neutron_utils_tests.NeutronUtilsRouterTests) ... 2017-03-30 05:37:53,806 - neutron_utils - INFO - Creating network with name NeutronUtilsRouterTests-f5829764-e1ff-4a43-b24e-52c0107f12b2-pub-net
- 2017-03-30 05:37:54,326 - neutron_utils - INFO - Creating subnet with name NeutronUtilsRouterTests-f5829764-e1ff-4a43-b24e-52c0107f12b2-pub-subnet
- 2017-03-30 05:37:55,475 - neutron_utils - INFO - Deleting subnet with name NeutronUtilsRouterTests-f5829764-e1ff-4a43-b24e-52c0107f12b2-pub-subnet
- 2017-03-30 05:37:55,932 - neutron_utils - INFO - Deleting network with name NeutronUtilsRouterTests-f5829764-e1ff-4a43-b24e-52c0107f12b2-pub-net
- ok
- test_create_port_null_name (snaps.openstack.utils.tests.neutron_utils_tests.NeutronUtilsRouterTests) ... 2017-03-30 05:37:57,121 - neutron_utils - INFO - Creating network with name NeutronUtilsRouterTests-194aff34-e0e6-4218-8c17-33a9d9b34816-pub-net
- 2017-03-30 05:37:57,611 - neutron_utils - INFO - Creating subnet with name NeutronUtilsRouterTests-194aff34-e0e6-4218-8c17-33a9d9b34816-pub-subnet
- 2017-03-30 05:37:58,880 - neutron_utils - INFO - Deleting subnet with name NeutronUtilsRouterTests-194aff34-e0e6-4218-8c17-33a9d9b34816-pub-subnet
- 2017-03-30 05:37:59,638 - neutron_utils - INFO - Deleting network with name NeutronUtilsRouterTests-194aff34-e0e6-4218-8c17-33a9d9b34816-pub-net
- ok
- test_create_port_null_network_object (snaps.openstack.utils.tests.neutron_utils_tests.NeutronUtilsRouterTests) ... 2017-03-30 05:38:01,037 - neutron_utils - INFO - Creating network with name NeutronUtilsRouterTests-3a49f710-5292-411b-83df-42bb176a5020-pub-net
- 2017-03-30 05:38:01,609 - neutron_utils - INFO - Creating subnet with name NeutronUtilsRouterTests-3a49f710-5292-411b-83df-42bb176a5020-pub-subnet
- 2017-03-30 05:38:02,095 - neutron_utils - INFO - Deleting subnet with name NeutronUtilsRouterTests-3a49f710-5292-411b-83df-42bb176a5020-pub-subnet
- 2017-03-30 05:38:03,034 - neutron_utils - INFO - Deleting network with name NeutronUtilsRouterTests-3a49f710-5292-411b-83df-42bb176a5020-pub-net
- ok
- test_create_router_empty_name (snaps.openstack.utils.tests.neutron_utils_tests.NeutronUtilsRouterTests) ... ok
- test_create_router_null_name (snaps.openstack.utils.tests.neutron_utils_tests.NeutronUtilsRouterTests) ... ok
- test_create_router_simple (snaps.openstack.utils.tests.neutron_utils_tests.NeutronUtilsRouterTests) ... 2017-03-30 05:38:03,973 - neutron_utils - INFO - Creating router with name - NeutronUtilsRouterTests-54c3eaf0-00c8-4726-a248-b57f98a37999-pub-router
- 2017-03-30 05:38:05,749 - neutron_utils - INFO - Deleting router with name - NeutronUtilsRouterTests-54c3eaf0-00c8-4726-a248-b57f98a37999-pub-router
- ok
- test_create_router_with_public_interface (snaps.openstack.utils.tests.neutron_utils_tests.NeutronUtilsRouterTests) ... 2017-03-30 05:38:07,392 - neutron_utils - INFO - Creating router with name - NeutronUtilsRouterTests-a4e93ee5-781e-4e9d-9b55-b4d8fb3f0e7b-pub-router
- 2017-03-30 05:38:09,164 - neutron_utils - INFO - Deleting router with name - NeutronUtilsRouterTests-a4e93ee5-781e-4e9d-9b55-b4d8fb3f0e7b-pub-router
- ok
- test_create_delete_simple_sec_grp (snaps.openstack.utils.tests.neutron_utils_tests.NeutronUtilsSecurityGroupTests) ... 2017-03-30 05:38:10,643 - neutron_utils - INFO - Creating security group with name - NeutronUtilsSecurityGroupTests-0b62acfe-fc43-4c9f-bd30-2ce350c73c57name
- 2017-03-30 05:38:11,020 - neutron_utils - INFO - Retrieving security group with name - NeutronUtilsSecurityGroupTests-0b62acfe-fc43-4c9f-bd30-2ce350c73c57name
- 2017-03-30 05:38:11,084 - neutron_utils - INFO - Deleting security group with name - NeutronUtilsSecurityGroupTests-0b62acfe-fc43-4c9f-bd30-2ce350c73c57name
- 2017-03-30 05:38:11,230 - neutron_utils - INFO - Retrieving security group with name - NeutronUtilsSecurityGroupTests-0b62acfe-fc43-4c9f-bd30-2ce350c73c57name
- ok
- test_create_sec_grp_no_name (snaps.openstack.utils.tests.neutron_utils_tests.NeutronUtilsSecurityGroupTests) ... ok
- test_create_sec_grp_no_rules (snaps.openstack.utils.tests.neutron_utils_tests.NeutronUtilsSecurityGroupTests) ... 2017-03-30 05:38:11,653 - neutron_utils - INFO - Creating security group with name - NeutronUtilsSecurityGroupTests-19dc66ba-630d-4ef5-87e1-b0461971ef8ename
- 2017-03-30 05:38:11,960 - neutron_utils - INFO - Retrieving security group with name - NeutronUtilsSecurityGroupTests-19dc66ba-630d-4ef5-87e1-b0461971ef8ename
- 2017-03-30 05:38:12,047 - neutron_utils - INFO - Deleting security group with name - NeutronUtilsSecurityGroupTests-19dc66ba-630d-4ef5-87e1-b0461971ef8ename
- ok
- test_create_sec_grp_one_rule (snaps.openstack.utils.tests.neutron_utils_tests.NeutronUtilsSecurityGroupTests) ... 2017-03-30 05:38:12,321 - neutron_utils - INFO - Creating security group with name - NeutronUtilsSecurityGroupTests-cf3022e5-dc6e-4cc1-8fe0-41c8c1c56defname
- 2017-03-30 05:38:12,676 - neutron_utils - INFO - Retrieving security group rules associate with the security group - NeutronUtilsSecurityGroupTests-cf3022e5-dc6e-4cc1-8fe0-41c8c1c56defname
- 2017-03-30 05:38:12,735 - neutron_utils - INFO - Creating security group to security group - NeutronUtilsSecurityGroupTests-cf3022e5-dc6e-4cc1-8fe0-41c8c1c56defname
- 2017-03-30 05:38:12,736 - neutron_utils - INFO - Retrieving security group with name - NeutronUtilsSecurityGroupTests-cf3022e5-dc6e-4cc1-8fe0-41c8c1c56defname
- 2017-03-30 05:38:12,948 - neutron_utils - INFO - Retrieving security group with name - NeutronUtilsSecurityGroupTests-cf3022e5-dc6e-4cc1-8fe0-41c8c1c56defname
- 2017-03-30 05:38:13,024 - neutron_utils - INFO - Retrieving security group rules associate with the security group - NeutronUtilsSecurityGroupTests-cf3022e5-dc6e-4cc1-8fe0-41c8c1c56defname
- 2017-03-30 05:38:13,054 - neutron_utils - INFO - Retrieving security group with name - NeutronUtilsSecurityGroupTests-cf3022e5-dc6e-4cc1-8fe0-41c8c1c56defname
- 2017-03-30 05:38:13,121 - neutron_utils - INFO - Deleting security group rule with ID - 07b4bfbe-c632-496b-95f7-b42de9293519
- 2017-03-30 05:38:13,238 - neutron_utils - INFO - Deleting security group rule with ID - c5e58c9f-6cc8-4543-ae39-aa1960b9a3e1
- 2017-03-30 05:38:13,387 - neutron_utils - INFO - Deleting security group rule with ID - 184d29e8-b460-4f80-858f-7915b9bafe9b
- 2017-03-30 05:38:13,492 - neutron_utils - INFO - Deleting security group with name - NeutronUtilsSecurityGroupTests-cf3022e5-dc6e-4cc1-8fe0-41c8c1c56defname
- ok
- test_create_delete_keypair (snaps.openstack.utils.tests.nova_utils_tests.NovaUtilsKeypairTests) ... 2017-03-30 05:38:13,664 - nova_utils - INFO - Creating keypair with name - NovaUtilsKeypairTests-cb36f8f9-ceca-4802-8735-a1dc846ad2a8
- ok
- test_create_key_from_file (snaps.openstack.utils.tests.nova_utils_tests.NovaUtilsKeypairTests) ... 2017-03-30 05:38:18,337 - nova_utils - INFO - Saved public key to - tmp/NovaUtilsKeypairTests-a7eba01b-9615-4271-b5f9-8fe915972f16.pub
- 2017-03-30 05:38:18,338 - nova_utils - INFO - Saved private key to - tmp/NovaUtilsKeypairTests-a7eba01b-9615-4271-b5f9-8fe915972f16
- 2017-03-30 05:38:18,338 - nova_utils - INFO - Saving keypair to - tmp/NovaUtilsKeypairTests-a7eba01b-9615-4271-b5f9-8fe915972f16.pub
- 2017-03-30 05:38:18,338 - nova_utils - INFO - Creating keypair with name - NovaUtilsKeypairTests-a7eba01b-9615-4271-b5f9-8fe915972f16
- ok
- test_create_keypair (snaps.openstack.utils.tests.nova_utils_tests.NovaUtilsKeypairTests) ... 2017-03-30 05:38:21,492 - nova_utils - INFO - Creating keypair with name - NovaUtilsKeypairTests-74535dab-d450-47b1-8814-c0b3f48b7643
- ok
- test_floating_ips (snaps.openstack.utils.tests.nova_utils_tests.NovaUtilsKeypairTests) ... 2017-03-30 05:38:23,509 - nova_utils - INFO - Creating floating ip to external network - admin_floating_net
- ok
- test_create_delete_flavor (snaps.openstack.utils.tests.nova_utils_tests.NovaUtilsFlavorTests) ... ok
- test_create_flavor (snaps.openstack.utils.tests.nova_utils_tests.NovaUtilsFlavorTests) ... ok
- test_create_clean_flavor (snaps.openstack.tests.create_flavor_tests.CreateFlavorTests) ... ok
- test_create_delete_flavor (snaps.openstack.tests.create_flavor_tests.CreateFlavorTests) ... ok
- test_create_flavor (snaps.openstack.tests.create_flavor_tests.CreateFlavorTests) ... ok
- test_create_flavor_existing (snaps.openstack.tests.create_flavor_tests.CreateFlavorTests) ... 2017-03-30 05:38:34,855 - create_image - INFO - Found flavor with name - CreateFlavorTests-3e389b6c-ee95-4f2d-bf74-78d324722ef2name
- ok
-
- ----------------------------------------------------------------------
- Ran 48 tests in 131.483s
-
- OK
- 2017-03-30 05:38:34,905 - functest - INFO - api_check OK
- 2017-03-30 05:38:35,259 - functest - INFO - The results were successfully pushed to DB
- 2017-03-30 05:38:35,259 - run_tests - INFO - Test execution time: 02:12
- 2017-03-30 05:38:35,267 - run_tests - INFO -
-
- 2017-03-30 05:38:35,268 - run_tests - INFO - ============================================
- 2017-03-30 05:38:35,268 - run_tests - INFO - Running test case 'snaps_health_check'...
- 2017-03-30 05:38:35,268 - run_tests - INFO - ============================================
- 2017-03-30 05:38:35,383 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:38:35,384 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- 2017-03-30 05:38:35,855 - functest - INFO - Using flavor metatdata '{'hw:mem_page_size': 'any'}'
- 2017-03-30 05:38:35,856 - file_utils - INFO - Attempting to read OS environment file - /home/opnfv/functest/conf/openstack.creds
- 2017-03-30 05:38:35,856 - openstack_tests - INFO - OS Credentials = OSCreds - username=admin, password=admin, auth_url=http://192.168.10.2:5000/v3, project_name=admin, identity_api_version=3, image_api_version=1, network_api_version=2, compute_api_version=2, user_domain_id=default, proxy_settings=None
- test_check_vm_ip_dhcp (snaps.openstack.tests.create_instance_tests.SimpleHealthCheck) ... 2017-03-30 05:38:39,896 - create_image - INFO - Creating image
- 2017-03-30 05:38:41,843 - create_image - INFO - Image is active with name - SimpleHealthCheck-85a41a34-a9b3-463d-a12c-4bd057d70181-image
- 2017-03-30 05:38:41,843 - create_image - INFO - Image is now active with name - SimpleHealthCheck-85a41a34-a9b3-463d-a12c-4bd057d70181-image
- 2017-03-30 05:38:41,845 - OpenStackNetwork - INFO - Creating neutron network SimpleHealthCheck-85a41a34-a9b3-463d-a12c-4bd057d70181-priv-net...
- 2017-03-30 05:38:42,140 - neutron_utils - INFO - Creating network with name SimpleHealthCheck-85a41a34-a9b3-463d-a12c-4bd057d70181-priv-net
- 2017-03-30 05:38:42,480 - neutron_utils - INFO - Creating subnet with name SimpleHealthCheck-85a41a34-a9b3-463d-a12c-4bd057d70181-priv-subnet
- 2017-03-30 05:38:44,166 - neutron_utils - INFO - Creating port for network with name - SimpleHealthCheck-85a41a34-a9b3-463d-a12c-4bd057d70181-priv-net
- 2017-03-30 05:38:45,173 - create_instance - INFO - Creating VM with name - SimpleHealthCheck-85a41a34-a9b3-463d-a12c-4bd057d70181-inst
- 2017-03-30 05:38:48,419 - create_instance - INFO - Created instance with name - SimpleHealthCheck-85a41a34-a9b3-463d-a12c-4bd057d70181-inst
- 2017-03-30 05:39:05,164 - create_instance - INFO - VM is - ACTIVE
- 2017-03-30 05:39:05,164 - create_instance_tests - INFO - Looking for expression Lease of.*obtained in the console log
- 2017-03-30 05:39:06,547 - create_instance_tests - INFO - DHCP lease obtained logged in console
- 2017-03-30 05:39:06,548 - create_instance_tests - INFO - With correct IP address
- 2017-03-30 05:39:06,548 - create_instance - INFO - Deleting Port - SimpleHealthCheck-85a41a34-a9b3-463d-a12c-4bd057d70181port-1
- 2017-03-30 05:39:06,548 - neutron_utils - INFO - Deleting port with name SimpleHealthCheck-85a41a34-a9b3-463d-a12c-4bd057d70181port-1
- 2017-03-30 05:39:07,178 - create_instance - INFO - Deleting VM instance - SimpleHealthCheck-85a41a34-a9b3-463d-a12c-4bd057d70181-inst
- 2017-03-30 05:39:07,693 - create_instance - INFO - Checking deletion status
- 2017-03-30 05:39:11,088 - create_instance - INFO - VM has been properly deleted VM with name - SimpleHealthCheck-85a41a34-a9b3-463d-a12c-4bd057d70181-inst
- ok
-
- ----------------------------------------------------------------------
- Ran 1 test in 36.376s
-
- OK
- 2017-03-30 05:39:12,233 - functest - INFO - snaps_health_check OK
- 2017-03-30 05:39:12,598 - functest - INFO - The results were successfully pushed to DB
- 2017-03-30 05:39:12,598 - run_tests - INFO - Test execution time: 00:37
- 2017-03-30 05:39:12,599 - run_tests - INFO -
+
+ 2017-08-16 12:35:51,799 - functest.ci.run_tests - INFO - ############################################
+ 2017-08-16 12:35:51,799 - functest.ci.run_tests - INFO - Running tier 'healthcheck'
+ 2017-08-16 12:35:51,800 - functest.ci.run_tests - INFO - ############################################
+ 2017-08-16 12:35:51,800 - functest.ci.run_tests - INFO -
+ 2017-08-16 12:35:51,800 - functest.ci.run_tests - INFO - ============================================
+ 2017-08-16 12:35:51,800 - functest.ci.run_tests - INFO - Running test case 'connection_check'...
+ 2017-08-16 12:35:51,800 - functest.ci.run_tests - INFO - ============================================
+ 2017-08-16 12:36:00,278 - functest.core.testcase - INFO - The results were successfully pushed to DB
+ 2017-08-16 12:36:00,279 - functest.ci.run_tests - INFO - Test result:
+ +--------------------------+------------------+------------------+----------------+
+ | TEST CASE | PROJECT | DURATION | RESULT |
+ +--------------------------+------------------+------------------+----------------+
+ | connection_check | functest | 00:06 | PASS |
+ +--------------------------+------------------+------------------+----------------+
+ 2017-08-16 12:36:00,281 - functest.ci.run_tests - INFO -
+ 2017-08-16 12:36:00,281 - functest.ci.run_tests - INFO - ============================================
+ 2017-08-16 12:36:00,281 - functest.ci.run_tests - INFO - Running test case 'api_check'...
+ 2017-08-16 12:36:00,281 - functest.ci.run_tests - INFO - ============================================
+ 2017-08-16 12:41:04,088 - functest.core.testcase - INFO - The results were successfully pushed to DB
+ 2017-08-16 12:41:04,088 - functest.ci.run_tests - INFO - Test result:
+ +-------------------+------------------+------------------+----------------+
+ | TEST CASE | PROJECT | DURATION | RESULT |
+ +-------------------+------------------+------------------+----------------+
+ | api_check | functest | 05:03 | PASS |
+ +-------------------+------------------+------------------+----------------+
+ 2017-08-16 12:41:04,092 - functest.ci.run_tests - INFO -
+ 2017-08-16 12:41:04,092 - functest.ci.run_tests - INFO - ============================================
+ 2017-08-16 12:41:04,092 - functest.ci.run_tests - INFO - Running test case 'snaps_health_check'...
+ 2017-08-16 12:41:04,092 - functest.ci.run_tests - INFO - ============================================
+ 2017-08-16 12:41:39,817 - functest.core.testcase - INFO - The results were successfully pushed to DB
+ 2017-08-16 12:41:39,818 - functest.ci.run_tests - INFO - Test result:
+ +----------------------------+------------------+------------------+----------------+
+ | TEST CASE | PROJECT | DURATION | RESULT |
+ +----------------------------+------------------+------------------+----------------+
+ | snaps_health_check | functest | 00:35 | PASS |
+ +----------------------------+------------------+------------------+----------------+
and
root@22e436918db0:~/repos/functest/ci# functest testcase run vping_ssh
- 2016-06-30 11:50:31,865 - run_tests - INFO - ============================================
- 2016-06-30 11:50:31,865 - run_tests - INFO - Running test case 'vping_ssh'...
- 2016-06-30 11:50:31,865 - run_tests - INFO - ============================================
- 2016-06-30 11:50:32,977 - vping_ssh - INFO - Creating image 'functest-vping' from '/home/opnfv/functest/data/cirros-0.3.5-x86_64-disk.img'...
- 2016-06-30 11:50:45,470 - vping_ssh - INFO - Creating neutron network vping-net...
- 2016-06-30 11:50:47,645 - vping_ssh - INFO - Creating security group 'vPing-sg'...
- 2016-06-30 11:50:48,843 - vping_ssh - INFO - Using existing Flavor 'm1.small'...
- 2016-06-30 11:50:48,927 - vping_ssh - INFO - vPing Start Time:'2016-06-30 11:50:48'
- 2016-06-30 11:50:48,927 - vping_ssh - INFO - Creating instance 'opnfv-vping-1'...
- 2016-06-30 11:51:34,664 - vping_ssh - INFO - Instance 'opnfv-vping-1' is ACTIVE.
- 2016-06-30 11:51:34,818 - vping_ssh - INFO - Adding 'opnfv-vping-1' to security group 'vPing-sg'...
- 2016-06-30 11:51:35,209 - vping_ssh - INFO - Creating instance 'opnfv-vping-2'...
- 2016-06-30 11:52:01,439 - vping_ssh - INFO - Instance 'opnfv-vping-2' is ACTIVE.
- 2016-06-30 11:52:01,439 - vping_ssh - INFO - Adding 'opnfv-vping-2' to security group 'vPing-sg'...
- 2016-06-30 11:52:01,754 - vping_ssh - INFO - Creating floating IP for VM 'opnfv-vping-2'...
- 2016-06-30 11:52:01,969 - vping_ssh - INFO - Floating IP created: '10.17.94.140'
- 2016-06-30 11:52:01,969 - vping_ssh - INFO - Associating floating ip: '10.17.94.140' to VM 'opnfv-vping-2'
- 2016-06-30 11:52:02,792 - vping_ssh - INFO - Trying to establish SSH connection to 10.17.94.140...
- 2016-06-30 11:52:19,915 - vping_ssh - INFO - Waiting for ping...
- 2016-06-30 11:52:21,108 - vping_ssh - INFO - vPing detected!
- 2016-06-30 11:52:21,108 - vping_ssh - INFO - vPing duration:'92.2' s.
- 2016-06-30 11:52:21,109 - vping_ssh - INFO - vPing OK
- 2016-06-30 11:52:21,153 - clean_openstack - INFO - +++++++++++++++++++++++++++++++
- 2016-06-30 11:52:21,153 - clean_openstack - INFO - Cleaning OpenStack resources...
- 2016-06-30 11:52:21,153 - clean_openstack - INFO - +++++++++++++++++++++++++++++++
- Version 1 is deprecated, use alternative version 2 instead.
+ 2017-08-16 12:41:39,821 - functest.ci.run_tests - INFO - ============================================
+ 2017-08-16 12:41:39,821 - functest.ci.run_tests - INFO - Running test case 'vping_ssh'...
+ 2017-08-16 12:41:39,821 - functest.ci.run_tests - INFO - ============================================
+ 2017-08-16 12:42:49,861 - functest.core.testcase - INFO - The results were successfully pushed to DB
+ 2017-08-16 12:42:49,861 - functest.ci.run_tests - INFO - Test result:
+ +-------------------+------------------+------------------+----------------+
+ | TEST CASE | PROJECT | DURATION | RESULT |
+ +-------------------+------------------+------------------+----------------+
+ | vping_ssh | functest | 00:47 | PASS |
+ +-------------------+------------------+------------------+----------------+
:
:
etc.
@@ -610,7 +257,7 @@ variables:
* The scenario [controller]-[feature]-[mode], stored in DEPLOY_SCENARIO with
* controller = (odl|ocl|nosdn|onos)
- * feature = (ovs(dpdk)|kvm|sfc|bgpvpn|multisites|netready|ovs_dpdk_bar)
+ * feature = (ovs(dpdk)|kvm|sfc|bgpvpn|ovs_dpdk_bar)
* mode = (ha|noha)
The constraints per test case are defined in the Functest configuration file
@@ -686,3 +333,62 @@ might be eventually removed.
Please note that a system snapshot is taken before any test case execution.
This testcase.yaml file is used for CI, for the CLI and for the automatic reporting.
+
+
+Executing Functest suites (Alpine)
+==================================
+
+As mentioned in the configuration guide `[1]`_, Alpine docker containers have
+been introduced in Euphrates.
+Tier containers have been created.
+Assuming that you pulled the container and your environement is ready, you can
+simply run the tiers by typing (e.g. with functest-healthcheck)::
+
+ sudo docker run --env-file env \
+ -v $(pwd)/openstack.creds:/home/opnfv/functest/conf/openstack.creds \
+ -v $(pwd)/images:/home/opnfv/functest/images \
+ opnfv/functest-healthcheck
+
+You should get::
+
+ +----------------------------+------------------+---------------------+------------------+----------------+
+ | TEST CASE | PROJECT | TIER | DURATION | RESULT |
+ +----------------------------+------------------+---------------------+------------------+----------------+
+ | connection_check | functest | healthcheck | 00:02 | PASS |
+ | api_check | functest | healthcheck | 03:19 | PASS |
+ | snaps_health_check | functest | healthcheck | 00:46 | PASS |
+ +----------------------------+------------------+---------------------+------------------+----------------+
+
+You can run functest-healcheck, functest-smoke, functest-features,
+functest-components and functest-vnf.
+
+Please note that you may also use the CLI for manual tests using Alpine
+containers.
+
+
+Functest internal API
+=====================
+
+An internal API has been introduced in Euphrates. The goal is to trigger
+Functest operations through an API in addition of the CLI.
+This could be considered as a first step towards a pseudo micro services
+approach where the different test projects could expose and consume APIs to the
+other test projects.
+
+In Euphrates the main method of the APIs are:
+
+ * Show environment
+ * Prepare Environment
+ * Show credentials
+ * List all testcases
+ * Show a testcase
+ * List all tiers
+ * Show a tier
+ * List all testcases within given tier
+
+The API can be invoked as follows:
+ http://<functest_url>:5000/api/v1/functest/envs
+
+TODO
+
+.. _`[1]`: http://artifacts.opnfv.org/functest/colorado/docs/configguide/#
diff --git a/docs/testing/user/userguide/test_details.rst b/docs/testing/user/userguide/test_details.rst
new file mode 100644
index 000000000..5f5be4173
--- /dev/null
+++ b/docs/testing/user/userguide/test_details.rst
@@ -0,0 +1,539 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+
+
+The different test cases are described in the remaining sections of this document.
+
+VIM (Virtualized Infrastructure Manager)
+----------------------------------------
+
+Healthcheck tests
+^^^^^^^^^^^^^^^^^
+In Danube, healthcheck tests have been refactored and rely on SNAPS, an
+OPNFV middleware project.
+
+SNAPS stands for "SDN/NFV Application development Platform and Stack".
+SNAPS is an object-oriented OpenStack library packaged with tests that exercise
+OpenStack.
+More information on SNAPS can be found in  `[13]`_
+
+Three tests are declared as healthcheck tests and can be used for gating by the
+installer, they cover functionally the tests previously done by healthcheck
+test case.
+
+The tests are:
+
+
+ * *connection_check*
+ * *api_check*
+ * *snaps_health_check*
+
+Connection_check consists in 9 test cases (test duration < 5s) checking the
+connectivity with Glance, Keystone, Neutron, Nova and the external network.
+
+Api_check verifies the retrieval of OpenStack clients: Keystone, Glance,
+Neutron and Nova and may perform some simple queries. When the config value of
+snaps.use_keystone is True, functest must have access to the cloud's private
+network. This suite consists in 49 tests (test duration < 2 minutes).
+
+snaps_health_check creates instance, allocate floating IP, connect to the VM.
+This test replaced the previous Colorado healthcheck test.
+
+Self-obviously, successful completion of the 'healthcheck' testcase is a
+necessary pre-requisite for the execution of all other test Tiers.
+
+
+vPing_ssh
+^^^^^^^^^
+
+Given the script **ping.sh**::
+
+ #!/bin/sh
+ ping -c 1 $1 2>&1 >/dev/null
+ RES=$?
+ if [ "Z$RES" = "Z0" ] ; then
+ echo 'vPing OK'
+ else
+ echo 'vPing KO'
+ fi
+
+
+The goal of this test is to establish an SSH connection using a floating IP
+on the Public/External network and verify that 2 instances can talk over a Private
+Tenant network::
+
+ vPing_ssh test case
+ +-------------+ +-------------+
+ | | | |
+ | | Boot VM1 with IP1 | |
+ | +------------------->| |
+ | Tester | | System |
+ | | Boot VM2 | Under |
+ | +------------------->| Test |
+ | | | |
+ | | Create floating IP | |
+ | +------------------->| |
+ | | | |
+ | | Assign floating IP | |
+ | | to VM2 | |
+ | +------------------->| |
+ | | | |
+ | | Establish SSH | |
+ | | connection to VM2 | |
+ | | through floating IP| |
+ | +------------------->| |
+ | | | |
+ | | SCP ping.sh to VM2 | |
+ | +------------------->| |
+ | | | |
+ | | VM2 executes | |
+ | | ping.sh to VM1 | |
+ | +------------------->| |
+ | | | |
+ | | If ping: | |
+ | | exit OK | |
+ | | else (timeout): | |
+ | | exit Failed | |
+ | | | |
+ +-------------+ +-------------+
+
+This test can be considered as an "Hello World" example.
+It is the first basic use case which **must** work on any deployment.
+
+vPing_userdata
+^^^^^^^^^^^^^^
+
+This test case is similar to vPing_ssh but without the use of Floating IPs
+and the Public/External network to transfer the ping script.
+Instead, it uses Nova metadata service to pass it to the instance at booting time.
+As vPing_ssh, it checks that 2 instances can talk to
+each other on a Private Tenant network::
+
+ vPing_userdata test case
+ +-------------+ +-------------+
+ | | | |
+ | | Boot VM1 with IP1 | |
+ | +------------------->| |
+ | | | |
+ | | Boot VM2 with | |
+ | | ping.sh as userdata| |
+ | | with IP1 as $1. | |
+ | +------------------->| |
+ | Tester | | System |
+ | | VM2 exeutes ping.sh| Under |
+ | | (ping IP1) | Test |
+ | +------------------->| |
+ | | | |
+ | | Monitor nova | |
+ | | console-log VM 2 | |
+ | | If ping: | |
+ | | exit OK | |
+ | | else (timeout) | |
+ | | exit Failed | |
+ | | | |
+ +-------------+ +-------------+
+
+When the second VM boots it will execute the script passed as userdata
+automatically. The ping will be detected by periodically capturing the output
+in the console-log of the second VM.
+
+
+Tempest
+^^^^^^^
+
+Tempest `[2]`_ is the reference OpenStack Integration test suite.
+It is a set of integration tests to be run against a live OpenStack cluster.
+Tempest has suites of tests for:
+
+ * OpenStack API validation
+ * Scenarios
+ * Other specific tests useful in validating an OpenStack deployment
+
+Functest uses Rally `[3]`_ to run the Tempest suite.
+Rally generates automatically the Tempest configuration file **tempest.conf**.
+Before running the actual test cases,
+Functest creates the needed resources (user, tenant) and
+updates the appropriate parameters into the configuration file.
+
+When the Tempest suite is executed, each test duration is measured and the full
+console output is stored to a *log* file for further analysis.
+
+The Tempest testcases are distributed across two
+Tiers:
+
+ * Smoke Tier - Test Case 'tempest_smoke_serial'
+ * Components Tier - Test case 'tempest_full_parallel'
+
+NOTE: Test case 'tempest_smoke_serial' executes a defined set of tempest smoke
+tests with a single thread (i.e. serial mode). Test case 'tempest_full_parallel'
+executes all defined Tempest tests using several concurrent threads
+(i.e. parallel mode). The number of threads activated corresponds to the number
+of available logical CPUs.
+
+The goal of the Tempest test suite is to check the basic functionalities of the
+different OpenStack components on an OPNFV fresh installation, using the
+corresponding REST API interfaces.
+
+
+Rally bench test suites
+^^^^^^^^^^^^^^^^^^^^^^^
+
+Rally `[3]`_ is a benchmarking tool that answers the question:
+
+*How does OpenStack work at scale?*
+
+The goal of this test suite is to benchmark all the different OpenStack modules and
+get significant figures that could help to define Telco Cloud KPIs.
+
+The OPNFV Rally scenarios are based on the collection of the actual Rally scenarios:
+
+ * authenticate
+ * cinder
+ * glance
+ * heat
+ * keystone
+ * neutron
+ * nova
+ * quotas
+
+A basic SLA (stop test on errors) has been implemented.
+
+The Rally testcases are distributed across two Tiers:
+
+ * Smoke Tier - Test Case 'rally_sanity'
+ * Components Tier - Test case 'rally_full'
+
+NOTE: Test case 'rally_sanity' executes a limited number of Rally smoke test
+cases. Test case 'rally_full' executes the full defined set of Rally tests.
+
+
+Refstack-client to run Defcore testcases
+-----------------------------------------
+
+Refstack-client `[8]`_ is a command line utility that allows you to
+execute Tempest test runs based on configurations you specify.
+It is the official tool to run Defcore `[9]`_ testcases,
+which focuses on testing interoperability between OpenStack clouds.
+
+Refstack-client is integrated in Functest, consumed by Dovetail, which
+intends to define and provide a set of OPNFV related validation criteria
+that will provide input for the evaluation of the use of OPNFV trademarks.
+This progress is under the guideline of Compliance Verification Program(CVP).
+
+Defcore testcases
+^^^^^^^^^^^^^^^^^^
+
+*Danube Release*
+
+Set of DefCore tempest test cases not flagged and required.
+According to `[10]`_, some tests are still flagged due to outstanding bugs
+in the Tempest library, particularly tests that require SSH. Refstack developers
+are working on correcting these bugs upstream. Please note that although some tests
+are flagged because of bugs, there is still an expectation that the capabilities
+covered by the tests are available. It only contains Openstack core compute
+(no object storage). The approved guidelines (2016.08) are valid for Kilo,
+Liberty, Mitaka and Newton releases of OpenStack.
+The list can be generated using the Rest API from RefStack project:
+https://refstack.openstack.org/api/v1/guidelines/2016.08/tests?target=compute&type=required&alias=true&flag=false
+
+Running methods
+^^^^^^^^^^^^^^^
+
+Two running methods are provided after refstack-client integrated into
+Functest, Functest command line and manually, respectively.
+
+By default, for Defcore test cases run by Functest command line,
+are run followed with automatically generated
+configuration file, i.e., refstack_tempest.conf. In some circumstances,
+the automatic configuration file may not quite satisfied with the SUT,
+Functest also inherits the refstack-client command line and provides a way
+for users to set its configuration file according to its own SUT manually.
+
+*command line*
+
+Inside the Functest container, first to prepare Functest environment:
+
+::
+
+ functest env prepare
+
+then to run default defcore testcases by using refstack-client:
+
+::
+
+ functest testcase run refstack_defcore
+
+In OPNFV Continuous Integration(CI) system, the command line method is used.
+
+*manually*
+
+Prepare the tempest configuration file and the testcases want to run with the SUT,
+run the testcases with:
+
+::
+
+ ./refstack-client test -c <Path of the tempest configuration file to use> -v --test-list <Path or URL of test list>
+
+using help for more information:
+
+::
+
+ ./refstack-client --help
+ ./refstack-client test --help
+
+Reference tempest configuration
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+*command line method*
+
+When command line method is used, the default tempest configuration file
+is generated by Rally.
+
+*manually*
+
+When running manually is used, recommended way to generate tempest configuration
+file is:
+
+::
+
+ cd /usr/lib/python2.7/site-packages/functest/opnfv_tests/openstack/refstack_client
+ python tempest_conf.py
+
+a file called tempest.conf is stored in the current path by default, users can do
+some adjustment according to the SUT:
+
+::
+
+ vim refstack_tempest.conf
+
+a reference article can be used `[15]`_.
+
+
+snaps_smoke
+------------
+
+This test case contains tests that setup and destroy environments with VMs with
+and without Floating IPs with a newly created user and project. Set the config
+value snaps.use_floating_ips (True|False) to toggle this functionality. When
+the config value of snaps.use_keystone is True, Functest must have access
+the cloud's private network.
+This suite consists in 38 tests (test duration < 10 minutes)
+
+
+SDN Controllers
+---------------
+
+There are currently 3 available controllers:
+
+ * OpenDaylight (ODL)
+ * ONOS
+ * OpenContrail (OCL)
+
+OpenDaylight
+^^^^^^^^^^^^
+
+The OpenDaylight (ODL) test suite consists of a set of basic tests inherited
+from the ODL project using the Robot `[11]`_ framework.
+The suite verifies creation and deletion of networks, subnets and ports with
+OpenDaylight and Neutron.
+
+The list of tests can be described as follows:
+
+ * Basic Restconf test cases
+ * Connect to Restconf URL
+ * Check the HTTP code status
+
+ * Neutron Reachability test cases
+ * Get the complete list of neutron resources (networks, subnets, ports)
+
+ * Neutron Network test cases
+ * Check OpenStack networks
+ * Check OpenDaylight networks
+ * Create a new network via OpenStack and check the HTTP status code returned by Neutron
+ * Check that the network has also been successfully created in OpenDaylight
+
+ * Neutron Subnet test cases
+ * Check OpenStack subnets
+ * Check OpenDaylight subnets
+ * Create a new subnet via OpenStack and check the HTTP status code returned by Neutron
+ * Check that the subnet has also been successfully created in OpenDaylight
+
+ * Neutron Port test cases
+ * Check OpenStack Neutron for known ports
+ * Check OpenDaylight ports
+ * Create a new port via OpenStack and check the HTTP status code returned by Neutron
+ * Check that the new port has also been successfully created in OpenDaylight
+
+ * Delete operations
+ * Delete the port previously created via OpenStack
+ * Check that the port has been also successfully deleted in OpenDaylight
+ * Delete previously subnet created via OpenStack
+ * Check that the subnet has also been successfully deleted in OpenDaylight
+ * Delete the network created via OpenStack
+ * Check that the network has also been successfully deleted in OpenDaylight
+
+Note: the checks in OpenDaylight are based on the returned HTTP status
+code returned by OpenDaylight.
+
+
+ONOS
+^^^^
+
+TestON Framework is used to test the ONOS SDN controller functions.
+The test cases deal with L2 and L3 functions.
+The ONOS test suite can be run on any ONOS compliant scenario.
+
+The test cases are described as follows:
+
+ * onosfunctest: The main executable file contains the initialization of
+ the docker environment and functions called by FUNCvirNetNB and
+ FUNCvirNetNBL3
+
+ * FUNCvirNetNB
+
+ * Create Network: Post Network data and check it in ONOS
+ * Update Network: Update the Network and compare it in ONOS
+ * Delete Network: Delete the Network and check if it's NULL in ONOS or
+ not
+ * Create Subnet: Post Subnet data and check it in ONOS
+ * Update Subnet: Update the Subnet and compare it in ONOS
+ * Delete Subnet: Delete the Subnet and check if it's NULL in ONOS or not
+ * Create Port: Post Port data and check it in ONOS
+ * Update Port: Update the Port and compare it in ONOS
+ * Delete Port: Delete the Port and check if it's NULL in ONOS or not
+
+ * FUNCvirNetNBL3
+
+ * Create Router: Post data for create Router and check it in ONOS
+ * Update Router: Update the Router and compare it in ONOS
+ * Delete Router: Delete the Router data and check it in ONOS
+ * Create RouterInterface: Post Router Interface data to an existing Router
+ and check it in ONOS
+ * Delete RouterInterface: Delete the RouterInterface and check the Router
+ * Create FloatingIp: Post data for create FloatingIp and check it in ONOS
+ * Update FloatingIp: Update the FloatingIp and compare it in ONOS
+ * Delete FloatingIp: Delete the FloatingIp and check that it is 'NULL' in
+ ONOS
+ * Create External Gateway: Post data to create an External Gateway for an
+ existing Router and check it in ONOS
+ * Update External Gateway: Update the External Gateway and compare the change
+ * Delete External Gateway: Delete the External Gateway and check that it is
+ 'NULL' in ONOS
+
+
+Features
+--------
+
+Functest has been supporting several feature projects since Brahpamutra:
+
+
++-----------------+---------+----------+--------+-----------+
+| Test | Brahma | Colorado | Danube | Euphrates |
++=================+=========+==========+========+===========+
+| barometer | | | X | X |
++-----------------+---------+----------+--------+-----------+
+| bgpvpn | | X | X | X |
++-----------------+---------+----------+--------+-----------+
+| copper | | X | | |
++-----------------+---------+----------+--------+-----------+
+| doctor | X | X | X | X |
++-----------------+---------+----------+--------+-----------+
+| domino | | X | X | X |
++-----------------+---------+----------+--------+-----------+
+| fds | | | X | X |
++-----------------+---------+----------+--------+-----------+
+| moon | | X | | X |
++-----------------+---------+----------+--------+-----------+
+| multisite | | X | X | |
++-----------------+---------+----------+--------+-----------+
+| netready | | | X | |
++-----------------+---------+----------+--------+-----------+
+| odl_sfc | | X | X | X |
++-----------------+---------+----------+--------+-----------+
+| opera | | | X | |
++-----------------+---------+----------+--------+-----------+
+| orchestra | | | X | X |
++-----------------+---------+----------+--------+-----------+
+| parser | | | X | |
++-----------------+---------+----------+--------+-----------+
+| promise | X | X | X | X |
++-----------------+---------+----------+--------+-----------+
+| security_scan | | X | X | |
++-----------------+---------+----------+--------+-----------+
+
+Please refer to the dedicated feature user guides for details.
+
+
+VNF
+---
+
+
+cloudify_ims
+^^^^^^^^^^^^
+The IP Multimedia Subsystem or IP Multimedia Core Network Subsystem (IMS) is an
+architectural framework for delivering IP multimedia services.
+
+vIMS has been integrated in Functest to demonstrate the capability to deploy a
+relatively complex NFV scenario on the OPNFV platform. The deployment of a complete
+functional VNF allows the test of most of the essential functions needed for a
+NFV platform.
+
+The goal of this test suite consists of:
+
+ * deploy a VNF orchestrator (Cloudify)
+ * deploy a Clearwater vIMS (IP Multimedia Subsystem) VNF from this
+ orchestrator based on a TOSCA blueprint defined in `[5]`_
+ * run suite of signaling tests on top of this VNF
+
+The Clearwater architecture is described as follows:
+
+.. figure:: ../../../images/clearwater-architecture.png
+ :align: center
+ :alt: vIMS architecture
+
+orchestra_openims
+^^^^^^^^^^^^^^^^^
+Orchestra test case deals with the deployment of OpenIMS with OpenBaton
+orchestrator.
+
+orchestra_clearwaterims
+^^^^^^^^^^^^^^^^^^^^^^^
+Orchestra test case deals with the deployment of Clearwater vIMS with OpenBaton
+orchestrator.
+
+parser
+^^^^^^
+
+See parser user guide for details: `[12]`_
+
+
+vyos-vrouter
+^^^^^^^^^^^^
+
+This test case deals with the deployment and the test of vyos vrouter with
+Cloudify orchestrator. The test case can do testing for interchangeability of
+BGP Protocol using vyos.
+
+The Workflow is as follows:
+ * Deploy
+ Deploy VNF Testing topology by Cloudify using blueprint.
+ * Configuration
+ Setting configuration to Target VNF and reference VNF using ssh
+ * Run
+ Execution of test command for test item written YAML format file.
+ Check VNF status and behavior.
+ * Reporting
+ Output of report based on result using JSON format.
+
+The vyos-vrouter architecture is described in `[14]`_
+
+.. _`[2]`: http://docs.openstack.org/developer/tempest/overview.html
+.. _`[3]`: https://rally.readthedocs.org/en/latest/index.html
+.. _`[5]`: https://github.com/Orange-OpenSource/opnfv-cloudify-clearwater/blob/master/openstack-blueprint.yaml
+.. _`[8]`: https://github.com/openstack/refstack-client
+.. _`[10]`: https://github.com/openstack/interop/blob/master/2016.08/procedure.rst
+.. _`[11]`: http://robotframework.org/
+.. _`[12]`: http://artifacts.opnfv.org/parser/colorado/docs/userguide/index.html
+.. _`[13]`: https://wiki.opnfv.org/display/PROJ/SNAPS-OO
+.. _`[14]`: https://github.com/oolorg/opnfv-functest-vrouter
+.. _`[15]`: https://aptira.com/testing-openstack-tempest-part-1/
diff --git a/docs/testing/user/userguide/introduction.rst b/docs/testing/user/userguide/test_overview.rst
index d1b7d23e0..6aae2825d 100644
--- a/docs/testing/user/userguide/introduction.rst
+++ b/docs/testing/user/userguide/test_overview.rst
@@ -59,8 +59,8 @@ validate the scenario for the release.
| | | rally_sanity | Run a subset of the OpenStack |
| | | | Rally Test Suite in smoke mode |
| | +----------------+----------------------------------+
-| | | snaps_smoke | Run a subset of the OpenStack |
-| | | | Rally Test Suite in smoke mode |
+| | | snaps_smoke | Run the SNAPS-OO integration |
+| | | | tests |
| | +----------------+----------------------------------+
| | | refstack | Reference RefStack suite |
| | | \_defcore | tempest selection for NFV |
@@ -132,13 +132,6 @@ validate the scenario for the release.
| | | | See `Domino User Guide`_ for |
| | | | details |
| | +----------------+----------------------------------+
-| | | multisite | Multisite |
-| | | | See `Multisite User Guide`_ for |
-| | | | details |
-| | +----------------+----------------------------------+
-| | | netready | Testing from netready project |
-| | | | ping using gluon |
-| | +----------------+----------------------------------+
| | | odl-sfc | SFC testing for odl scenarios |
| | | | See `SFC User Guide`_ for details|
| | +----------------+----------------------------------+
@@ -162,11 +155,6 @@ validate the scenario for the release.
| | | | storage. |
| | | | See `Promise User Guide`_ for |
| | | | details. |
-| | +----------------+----------------------------------+
-| | | security_scan | Implementation of a simple |
-| | | | security scan. (Currently |
-| | | | available only for the Apex |
-| | | | installer environment) |
+-------------+---------------+----------------+----------------------------------+
| VNF | vnf | cloudify_ims | Example of a real VNF deployment |
| | | | to show the NFV capabilities of |
@@ -176,8 +164,11 @@ validate the scenario for the release.
| | | | It provides a fully functional |
| | | | VoIP System |
| | +----------------+----------------------------------+
-| | | orchestra_ims | OpenIMS deployment using |
-| | | | Openbaton orchestrator |
+| | | orchestra | OpenIMS deployment using |
+| | | openims | Openbaton orchestrator |
+| | +----------------+----------------------------------+
+| | | orchestra | Cleawater IMS deployment using |
+| | | cleawaterims | Openbaton orchestrator |
| | +----------------+----------------------------------+
| | | vyos_vrouter | vRouter testing |
+-------------+---------------+----------------+----------------------------------+
@@ -233,7 +224,7 @@ Functest considers OPNFV as a black box. As of Danube release the OPNFV
offers a lot of potential combinations:
* 3 controllers (OpenDaylight, ONOS, OpenContrail)
- * 4 installers (Apex, Compass, Fuel, Joid)
+ * 5 installers (Apex, Compass, Daisy, Fuel, Joid)
Most of the tests are runnable by any combination, but some tests might have
restrictions imposed by the utilized installers or due to the available
@@ -252,7 +243,6 @@ section `Executing the functest suites`_ of this document.
.. _`[2]`: http://docs.openstack.org/developer/tempest/overview.html
.. _`[3]`: https://rally.readthedocs.org/en/latest/index.html
-.. _`Copper User Guide`: http://artifacts.opnfv.org/copper/colorado/docs/userguide/index.html
.. _`Doctor User Guide`: http://artifacts.opnfv.org/doctor/colorado/userguide/index.html
.. _`Promise User Guide`: http://artifacts.opnfv.org/promise/colorado/docs/userguide/index.html
.. _`ONOSFW User Guide`: http://artifacts.opnfv.org/onosfw/colorado/userguide/index.html
@@ -261,4 +251,3 @@ section `Executing the functest suites`_ of this document.
.. _`Parser User Guide`: http://artifacts.opnfv.org/parser/colorado/docs/userguide/index.html
.. _`Functest Dashboard`: http://testresults.opnfv.org/kibana_dashboards/
.. _`SFC User Guide`: http://artifacts.opnfv.org/sfc/colorado/userguide/index.html
-.. _`Multisite User Guide`: http://artifacts.opnfv.org/multisite/docs/userguide/index.html
diff --git a/docs/testing/user/userguide/test_results.rst b/docs/testing/user/userguide/test_results.rst
new file mode 100644
index 000000000..53e4d3a86
--- /dev/null
+++ b/docs/testing/user/userguide/test_results.rst
@@ -0,0 +1,50 @@
+Test results
+============
+
+Manual testing
+--------------
+
+In manual mode test results are displayed in the console and result files
+are put in /home/opnfv/functest/results.
+
+If you want additionnal logs, you may configure the logging.ini under <repo>/functest:functest/ci
+
+Automated testing
+--------------
+
+In automated mode, test results are displayed in jenkins logs, a summary is provided
+at the end of the job and can be described as follow::
+
+ +-------------------------+----------------------------------------------------------+
+ | ENV VAR | VALUE |
+ +-------------------------+----------------------------------------------------------+
+ | INSTALLER_TYPE | daisy |
+ | DEPLOY_SCENARIO | os-nosdn-nofeature-ha |
+ | BUILD_TAG | jenkins-functest-daisy-baremetal-daily-master-67 |
+ | CI_LOOP | daily |
+ +-------------------------+----------------------------------------------------------+
+
+ +------------------------------+------------------+---------------------+------------------+----------------+
+ | TEST CASE | PROJECT | TIER | DURATION | RESULT |
+ +------------------------------+------------------+---------------------+------------------+----------------+
+ | connection_check | functest | healthcheck | 00:08 | PASS |
+ | api_check | functest | healthcheck | 04:22 | PASS |
+ | snaps_health_check | functest | healthcheck | 00:35 | PASS |
+ | vping_ssh | functest | smoke | 00:54 | PASS |
+ | vping_userdata | functest | smoke | 00:27 | PASS |
+ | tempest_smoke_serial | functest | smoke | 19:39 | FAIL |
+ | rally_sanity | functest | smoke | 15:16 | PASS |
+ | refstack_defcore | functest | smoke | 15:55 | PASS |
+ | snaps_smoke | functest | smoke | 26:45 | FAIL |
+ | cloudify_ims | functest | vnf | 83:33 | FAIL |
+ | orchestra_ims | functest | vnf | 11:32 | FAIL |
+ +------------------------------+------------------+---------------------+------------------+----------------+
+
+Results are automatically pushed to the test results database, some additional
+result files are pushed to OPNFV artifact web sites.
+
+Based on the results stored in the result database, a `Functest reporting`_
+portal is also automatically updated. This portal provides information on the
+overall status per scenario and per installer
+
+.. _`Functest reporting`: http://testresults.opnfv.org/reporting/functest/release/danube/index-status-fuel.html
diff --git a/functest/opnfv_tests/vnf/aaa/__init__.py b/functest/api/__init__.py
index e69de29bb..e69de29bb 100644
--- a/functest/opnfv_tests/vnf/aaa/__init__.py
+++ b/functest/api/__init__.py
diff --git a/functest/api/base.py b/functest/api/base.py
new file mode 100644
index 000000000..ffc567860
--- /dev/null
+++ b/functest/api/base.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""
+The base class to dispatch request
+
+"""
+
+import logging
+
+from flask import request
+from flask_restful import Resource
+
+from functest.api.common import api_utils
+
+
+LOGGER = logging.getLogger(__name__)
+
+
+class ApiResource(Resource):
+ """ API Resource class"""
+
+ def __init__(self):
+ super(ApiResource, self).__init__()
+
+ def _post_args(self): # pylint: disable=no-self-use
+ # pylint: disable=maybe-no-member
+ """ Return action and args after parsing request """
+
+ data = request.json if request.json else {}
+ params = api_utils.change_to_str_in_dict(data)
+ action = params.get('action', request.form.get('action', ''))
+ args = params.get('args', {})
+ try:
+ args['file'] = request.files['file']
+ except KeyError:
+ pass
+ LOGGER.debug('Input args are: action: %s, args: %s', action, args)
+
+ return action, args
+
+ def _dispatch_post(self):
+ """ Dispatch request """
+ action, args = self._post_args()
+ return self._dispatch(args, action)
+
+ def _dispatch(self, args, action):
+ """
+ Dynamically load the classes with reflection and
+ obtain corresponding methods
+ """
+ try:
+ return getattr(self, action)(args)
+ except AttributeError:
+ api_utils.result_handler(status=1, data='No such action')
+
+
+# Import modules from package "functest.api.resources"
+# and append them into sys.modules
+api_utils.import_modules_from_package("functest.api.resources")
diff --git a/functest/tests/unit/vnf/rnc/__init__.py b/functest/api/common/__init__.py
index e69de29bb..e69de29bb 100644
--- a/functest/tests/unit/vnf/rnc/__init__.py
+++ b/functest/api/common/__init__.py
diff --git a/functest/api/common/api_utils.py b/functest/api/common/api_utils.py
new file mode 100644
index 000000000..d85acf927
--- /dev/null
+++ b/functest/api/common/api_utils.py
@@ -0,0 +1,101 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""
+Utils for functest restapi
+
+"""
+
+import collections
+import logging
+import os
+import sys
+from oslo_utils import importutils
+
+from flask import jsonify
+import six
+
+import functest
+
+LOGGER = logging.getLogger(__name__)
+
+
+def change_to_str_in_dict(obj):
+ """
+ Return a dict with key and value both in string if they are in Unicode
+ """
+ if isinstance(obj, collections.Mapping):
+ return {str(k): change_to_str_in_dict(v) for k, v in obj.items()}
+ elif isinstance(obj, list):
+ return [change_to_str_in_dict(ele) for ele in obj]
+ elif isinstance(obj, six.text_type):
+ return str(obj)
+ return obj
+
+
+def itersubclasses(cls, _seen=None):
+ """ Generator over all subclasses of a given class in depth first order """
+
+ if not isinstance(cls, type):
+ raise TypeError("itersubclasses must be called with "
+ "new-style classes, not %.100r" % cls)
+ _seen = _seen or set()
+ try:
+ subs = cls.__subclasses__()
+ except TypeError: # fails only when cls is type
+ subs = cls.__subclasses__(cls)
+ for sub in subs:
+ if sub not in _seen:
+ _seen.add(sub)
+ yield sub
+ for itersub in itersubclasses(sub, _seen):
+ yield itersub
+
+
+def import_modules_from_package(package):
+ """
+ Import modules from package and append into sys.modules
+ :param: package - Full package name. For example: functest.api.resources
+ """
+ path = [os.path.dirname(functest.__file__), ".."] + package.split(".")
+ path = os.path.join(*path)
+ for root, _, files in os.walk(path):
+ for filename in files:
+ if filename.startswith("__") or not filename.endswith(".py"):
+ continue
+ new_package = ".".join(root.split(os.sep)).split("....")[1]
+ module_name = "%s.%s" % (new_package, filename[:-3])
+ try:
+ try_append_module(module_name, sys.modules)
+ except ImportError:
+ LOGGER.exception("unable to import %s", module_name)
+
+
+def try_append_module(name, modules):
+ """ Append the module into specified module system """
+
+ if name not in modules:
+ modules[name] = importutils.import_module(name)
+
+
+def change_obj_to_dict(obj):
+ """ Transfer the object into dict """
+ dic = {}
+ for key, value in vars(obj).items():
+ dic.update({key: value})
+ return dic
+
+
+def result_handler(status, data):
+ """ Return the json format of result in dict """
+ result = {
+ 'status': status,
+ 'result': data
+ }
+ return jsonify(result)
diff --git a/functest/api/common/thread.py b/functest/api/common/thread.py
new file mode 100644
index 000000000..fb60aaac7
--- /dev/null
+++ b/functest/api/common/thread.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""
+Used to handle multi-thread tasks
+"""
+
+import logging
+import threading
+
+from oslo_serialization import jsonutils
+
+
+LOGGER = logging.getLogger(__name__)
+
+
+class TaskThread(threading.Thread):
+ """ Task Thread Class """
+
+ def __init__(self, target, args, handler):
+ super(TaskThread, self).__init__(target=target, args=args)
+ self.target = target
+ self.args = args
+ self.handler = handler
+
+ def run(self):
+ """ Override the function run: run testcase and update database """
+ update_data = {'task_id': self.args.get('task_id'),
+ 'status': 'IN PROGRESS'}
+ self.handler.insert(update_data)
+
+ LOGGER.info('Starting running test case')
+
+ try:
+ data = self.target(self.args)
+ except Exception as err: # pylint: disable=broad-except
+ LOGGER.exception('Task Failed')
+ update_data = {'status': 'FAIL', 'error': str(err)}
+ self.handler.update_attr(self.args.get('task_id'), update_data)
+ else:
+ LOGGER.info('Task Finished')
+ LOGGER.debug('Result: %s', data)
+ new_data = {'status': 'FINISHED',
+ 'result': jsonutils.dumps(data.get('result', {}))}
+
+ self.handler.update_attr(self.args.get('task_id'), new_data)
diff --git a/functest/api/database/__init__.py b/functest/api/database/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/functest/api/database/__init__.py
diff --git a/functest/api/database/db.py b/functest/api/database/db.py
new file mode 100644
index 000000000..ea861ddbd
--- /dev/null
+++ b/functest/api/database/db.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""
+Create database to store task results using sqlalchemy
+"""
+
+from sqlalchemy import create_engine
+from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy.orm import scoped_session, sessionmaker
+
+
+SQLITE = 'sqlite:////tmp/functest.db'
+
+ENGINE = create_engine(SQLITE, convert_unicode=True)
+DB_SESSION = scoped_session(sessionmaker(autocommit=False,
+ autoflush=False,
+ bind=ENGINE))
+BASE = declarative_base()
+BASE.query = DB_SESSION.query_property()
diff --git a/functest/api/database/v1/__init__.py b/functest/api/database/v1/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/functest/api/database/v1/__init__.py
diff --git a/functest/api/database/v1/handlers.py b/functest/api/database/v1/handlers.py
new file mode 100644
index 000000000..7bd286ded
--- /dev/null
+++ b/functest/api/database/v1/handlers.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""
+Used to handle tasks: insert the task info into database and update it
+"""
+
+from functest.api.database.db import DB_SESSION
+from functest.api.database.v1.models import Tasks
+
+
+class TasksHandler(object):
+ """ Tasks Handler Class """
+
+ def insert(self, kwargs): # pylint: disable=no-self-use
+ """ To insert the task info into database """
+ task = Tasks(**kwargs)
+ DB_SESSION.add(task) # pylint: disable=maybe-no-member
+ DB_SESSION.commit() # pylint: disable=maybe-no-member
+ return task
+
+ def get_task_by_taskid(self, task_id): # pylint: disable=no-self-use
+ """ Obtain the task by task id """
+ # pylint: disable=maybe-no-member
+ task = Tasks.query.filter_by(task_id=task_id).first()
+ if not task:
+ raise ValueError
+
+ return task
+
+ def update_attr(self, task_id, attr):
+ """ Update the required attributes of the task """
+ task = self.get_task_by_taskid(task_id)
+
+ for key, value in attr.items():
+ setattr(task, key, value)
+ DB_SESSION.commit() # pylint: disable=maybe-no-member
diff --git a/functest/api/database/v1/models.py b/functest/api/database/v1/models.py
new file mode 100644
index 000000000..c5de91bca
--- /dev/null
+++ b/functest/api/database/v1/models.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""
+Define tables for tasks
+"""
+
+from sqlalchemy import Column
+from sqlalchemy import Integer
+from sqlalchemy import String
+from sqlalchemy import Text
+
+from functest.api.database.db import BASE
+
+
+class Tasks(BASE): # pylint: disable=too-few-public-methods, no-init
+ """ Create a table for tasks"""
+
+ __tablename__ = 'tasks'
+ id = Column(Integer, primary_key=True) # pylint: disable=invalid-name
+ task_id = Column(String(50))
+ status = Column(Integer)
+ error = Column(String(120))
+ result = Column(Text)
+
+ def __repr__(self):
+ return '<Task %r>' % Tasks.task_id
diff --git a/functest/api/resources/__init__.py b/functest/api/resources/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/functest/api/resources/__init__.py
diff --git a/functest/api/resources/v1/__init__.py b/functest/api/resources/v1/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/functest/api/resources/v1/__init__.py
diff --git a/functest/api/resources/v1/creds.py b/functest/api/resources/v1/creds.py
new file mode 100644
index 000000000..45e4559f4
--- /dev/null
+++ b/functest/api/resources/v1/creds.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""
+Resources to handle openstack related requests
+"""
+
+import collections
+import logging
+
+from flask import jsonify
+
+from functest.api.base import ApiResource
+from functest.api.common import api_utils
+from functest.cli.commands.cli_os import OpenStack
+from functest.utils import openstack_utils as os_utils
+from functest.utils.constants import CONST
+
+LOGGER = logging.getLogger(__name__)
+
+
+class V1Creds(ApiResource):
+ """ V1Creds Resource class"""
+
+ def get(self): # pylint: disable=no-self-use
+ """ Get credentials """
+ os_utils.source_credentials(CONST.__getattribute__('openstack_creds'))
+ credentials_show = OpenStack.show_credentials()
+ return jsonify(credentials_show)
+
+ def post(self):
+ """ Used to handle post request """
+ return self._dispatch_post()
+
+ def update_openrc(self, args): # pylint: disable=no-self-use
+ """ Used to update the OpenStack RC file """
+ try:
+ openrc_vars = args['openrc']
+ except KeyError:
+ return api_utils.result_handler(
+ status=0, data='openrc must be provided')
+ else:
+ if not isinstance(openrc_vars, collections.Mapping):
+ return api_utils.result_handler(
+ status=0, data='args should be a dict')
+
+ lines = ['export {}={}\n'.format(k, v) for k, v in openrc_vars.items()]
+
+ rc_file = CONST.__getattribute__('openstack_creds')
+ with open(rc_file, 'w') as creds_file:
+ creds_file.writelines(lines)
+
+ LOGGER.info("Sourcing the OpenStack RC file...")
+ try:
+ os_utils.source_credentials(rc_file)
+ except Exception as err: # pylint: disable=broad-except
+ LOGGER.exception('Failed to source the OpenStack RC file')
+ return api_utils.result_handler(status=0, data=str(err))
+
+ return api_utils.result_handler(
+ status=0, data='Update openrc successfully')
diff --git a/functest/api/resources/v1/envs.py b/functest/api/resources/v1/envs.py
new file mode 100644
index 000000000..9c455198d
--- /dev/null
+++ b/functest/api/resources/v1/envs.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""
+Resources to handle environment related requests
+"""
+
+from flask import jsonify
+
+from functest.api.base import ApiResource
+from functest.cli.commands.cli_env import Env
+from functest.api.common import api_utils
+import functest.utils.functest_utils as ft_utils
+
+
+class V1Envs(ApiResource):
+ """ V1Envs Resource class"""
+
+ def get(self): # pylint: disable=no-self-use
+ """ Get environment """
+ environment_show = Env().show()
+ return jsonify(environment_show)
+
+ def post(self):
+ """ Used to handle post request """
+ return self._dispatch_post()
+
+ def prepare(self, args): # pylint: disable=no-self-use, unused-argument
+ """ Prepare environment """
+ try:
+ ft_utils.execute_command("prepare_env start")
+ except Exception as err: # pylint: disable=broad-except
+ return api_utils.result_handler(status=1, data=str(err))
+ return api_utils.result_handler(
+ status=0, data="Prepare env successfully")
diff --git a/functest/api/resources/v1/tasks.py b/functest/api/resources/v1/tasks.py
new file mode 100644
index 000000000..7086e7075
--- /dev/null
+++ b/functest/api/resources/v1/tasks.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""
+Resources to retrieve the task results
+"""
+
+
+import json
+import logging
+import uuid
+
+from flask import jsonify
+
+from functest.api.base import ApiResource
+from functest.api.common import api_utils
+from functest.api.database.v1.handlers import TasksHandler
+
+
+LOGGER = logging.getLogger(__name__)
+
+
+class V1Tasks(ApiResource):
+ """ V1Tasks Resource class"""
+
+ def get(self, task_id): # pylint: disable=no-self-use
+ """ GET the result of the task id """
+ try:
+ uuid.UUID(task_id)
+ except ValueError:
+ return api_utils.result_handler(status=1, data='Invalid task id')
+
+ task_handler = TasksHandler()
+ try:
+ task = task_handler.get_task_by_taskid(task_id)
+ except ValueError:
+ return api_utils.result_handler(status=1, data='No such task id')
+
+ status = task.status
+ LOGGER.debug('Task status is: %s', status)
+
+ if status not in ['IN PROGRESS', 'FAIL', 'FINISHED']:
+ return api_utils.result_handler(status=1,
+ data='internal server error')
+ if status == 'IN PROGRESS':
+ result = {'status': status, 'result': ''}
+ elif status == 'FAIL':
+ result = {'status': status, 'error': task.error}
+ else:
+ result = {'status': status, 'result': json.loads(task.result)}
+
+ return jsonify(result)
diff --git a/functest/api/resources/v1/testcases.py b/functest/api/resources/v1/testcases.py
new file mode 100644
index 000000000..f146c24ce
--- /dev/null
+++ b/functest/api/resources/v1/testcases.py
@@ -0,0 +1,115 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""
+Resources to handle testcase related requests
+"""
+
+import os
+import logging
+import uuid
+
+from flask import abort, jsonify
+
+from functest.api.base import ApiResource
+from functest.api.common import api_utils, thread
+from functest.cli.commands.cli_testcase import Testcase
+from functest.api.database.v1.handlers import TasksHandler
+from functest.utils.constants import CONST
+import functest.utils.functest_utils as ft_utils
+
+LOGGER = logging.getLogger(__name__)
+
+
+class V1Testcases(ApiResource):
+ """ V1Testcases Resource class"""
+
+ def get(self): # pylint: disable=no-self-use
+ """ GET all testcases """
+ testcases_list = Testcase().list()
+ result = {'testcases': testcases_list.split('\n')[:-1]}
+ return jsonify(result)
+
+
+class V1Testcase(ApiResource):
+ """ V1Testcase Resource class"""
+
+ def get(self, testcase_name): # pylint: disable=no-self-use
+ """ GET the info of one testcase"""
+ testcase = Testcase().show(testcase_name)
+ if not testcase:
+ abort(404, "The test case '%s' does not exist or is not supported"
+ % testcase_name)
+ testcase_info = api_utils.change_obj_to_dict(testcase)
+ dependency_dict = api_utils.change_obj_to_dict(
+ testcase_info.get('dependency'))
+ testcase_info.pop('name')
+ testcase_info.pop('dependency')
+ result = {'testcase': testcase_name}
+ result.update(testcase_info)
+ result.update({'dependency': dependency_dict})
+ return jsonify(result)
+
+ def post(self):
+ """ Used to handle post request """
+ return self._dispatch_post()
+
+ def run_test_case(self, args):
+ """ Run a testcase """
+ try:
+ case_name = args['testcase']
+ except KeyError:
+ return api_utils.result_handler(
+ status=1, data='testcase name must be provided')
+
+ task_id = str(uuid.uuid4())
+
+ task_args = {'testcase': case_name, 'task_id': task_id}
+
+ task_args.update(args.get('opts', {}))
+
+ task_thread = thread.TaskThread(self._run, task_args, TasksHandler())
+ task_thread.start()
+
+ results = {'testcase': case_name, 'task_id': task_id}
+ return jsonify(results)
+
+ def _run(self, args): # pylint: disable=no-self-use
+ """ The built_in function to run a test case """
+
+ case_name = args.get('testcase')
+
+ if not os.path.isfile(CONST.__getattribute__('env_active')):
+ raise Exception("Functest environment is not ready.")
+ else:
+ try:
+ cmd = "run_tests -t {}".format(case_name)
+ runner = ft_utils.execute_command(cmd)
+ except Exception: # pylint: disable=broad-except
+ result = 'FAIL'
+ LOGGER.exception("Running test case %s failed!", case_name)
+ if runner == os.EX_OK:
+ result = 'PASS'
+ else:
+ result = 'FAIL'
+
+ env_info = {
+ 'installer': CONST.__getattribute__('INSTALLER_TYPE'),
+ 'scenario': CONST.__getattribute__('DEPLOY_SCENARIO'),
+ 'build_tag': CONST.__getattribute__('BUILD_TAG'),
+ 'ci_loop': CONST.__getattribute__('CI_LOOP')
+ }
+ result = {
+ 'task_id': args.get('task_id'),
+ 'case_name': case_name,
+ 'env_info': env_info,
+ 'result': result
+ }
+
+ return {'result': result}
diff --git a/functest/api/resources/v1/tiers.py b/functest/api/resources/v1/tiers.py
new file mode 100644
index 000000000..71a98bea6
--- /dev/null
+++ b/functest/api/resources/v1/tiers.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""
+Resources to handle tier related requests
+"""
+
+import re
+
+from flask import abort, jsonify
+
+from functest.api.base import ApiResource
+from functest.cli.commands.cli_tier import Tier
+
+
+class V1Tiers(ApiResource):
+ """ V1Tiers Resource class """
+
+ def get(self):
+ # pylint: disable=no-self-use
+ """ GET all tiers """
+ tiers_list = Tier().list()
+ data = re.split("[\n\t]", tiers_list)
+ data = [i.strip() for i in data if i != '']
+ data_dict = dict()
+ for i in range(len(data) / 2):
+ one_data = {data[i * 2]: data[i * 2 + 1]}
+ if i == 0:
+ data_dict = one_data
+ else:
+ data_dict.update(one_data)
+ result = {'tiers': data_dict}
+ return jsonify(result)
+
+
+class V1Tier(ApiResource):
+ """ V1Tier Resource class """
+
+ def get(self, tier_name): # pylint: disable=no-self-use
+ """ GET the info of one tier """
+ testcases = Tier().gettests(tier_name)
+ if not testcases:
+ abort(404, "The tier with name '%s' does not exist." % tier_name)
+ tier_info = Tier().show(tier_name)
+ tier_info.__dict__.pop('name')
+ tier_info.__dict__.pop('tests_array')
+ result = {'tier': tier_name, 'testcases': testcases}
+ result.update(tier_info.__dict__)
+ return jsonify(result)
+
+
+class V1TestcasesinTier(ApiResource):
+ """ V1TestcasesinTier Resource class """
+
+ def get(self, tier_name): # pylint: disable=no-self-use
+ """ GET all testcases within given tier """
+ testcases = Tier().gettests(tier_name)
+ if not testcases:
+ abort(404, "The tier with name '%s' does not exist." % tier_name)
+ result = {'tier': tier_name, 'testcases': testcases}
+ return jsonify(result)
diff --git a/functest/api/server.py b/functest/api/server.py
new file mode 100644
index 000000000..1d47b0dcb
--- /dev/null
+++ b/functest/api/server.py
@@ -0,0 +1,103 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""
+Used to launch Functest RestApi
+
+"""
+
+import inspect
+import logging
+import socket
+from urlparse import urljoin
+import pkg_resources
+
+from flask import Flask
+from flask_restful import Api
+
+from functest.api.base import ApiResource
+from functest.api.common import api_utils
+from functest.api.database.db import BASE
+from functest.api.database.db import DB_SESSION
+from functest.api.database.db import ENGINE
+from functest.api.database.v1 import models
+from functest.api.urls import URLPATTERNS
+
+
+LOGGER = logging.getLogger(__name__)
+
+APP = Flask(__name__)
+API = Api(APP)
+
+
+@APP.teardown_request
+def shutdown_session(exception=None): # pylint: disable=unused-argument
+ """
+ To be called at the end of each request whether it is successful
+ or an exception is raised
+ """
+ DB_SESSION.remove()
+
+
+def get_resource(resource_name):
+ """ Obtain the required resource according to resource name """
+ name = ''.join(resource_name.split('_'))
+ return next((r for r in api_utils.itersubclasses(ApiResource)
+ if r.__name__.lower() == name))
+
+
+def get_endpoint(url):
+ """ Obtain the endpoint of url """
+ address = socket.gethostbyname(socket.gethostname())
+ return urljoin('http://{}:5000'.format(address), url)
+
+
+def api_add_resource():
+ """
+ The resource has multiple URLs and you can pass multiple URLs to the
+ add_resource() method on the Api object. Each one will be routed to
+ your Resource
+ """
+ for url_pattern in URLPATTERNS:
+ try:
+ API.add_resource(
+ get_resource(url_pattern.target), url_pattern.url,
+ endpoint=get_endpoint(url_pattern.url))
+ except StopIteration:
+ LOGGER.error('url resource not found: %s', url_pattern.url)
+
+
+def init_db():
+ """
+ Import all modules here that might define models so that
+ they will be registered properly on the metadata, and then
+ create a database
+ """
+ def func(subcls):
+ """ To check the subclasses of BASE"""
+ try:
+ if issubclass(subcls[1], BASE):
+ return True
+ except TypeError:
+ pass
+ return False
+ # pylint: disable=bad-builtin
+ subclses = filter(func, inspect.getmembers(models, inspect.isclass))
+ LOGGER.debug('Import models: %s', [subcls[1] for subcls in subclses])
+ BASE.metadata.create_all(bind=ENGINE)
+
+
+def main():
+ """Entry point"""
+ logging.config.fileConfig(pkg_resources.resource_filename(
+ 'functest', 'ci/logging.ini'))
+ LOGGER.info('Starting Functest server')
+ api_add_resource()
+ init_db()
+ APP.run(host='0.0.0.0')
diff --git a/functest/api/urls.py b/functest/api/urls.py
new file mode 100644
index 000000000..f7bcae389
--- /dev/null
+++ b/functest/api/urls.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""
+Define multiple URLs
+"""
+
+
+class Url(object): # pylint: disable=too-few-public-methods
+ """ Url Class """
+
+ def __init__(self, url, target):
+ super(Url, self).__init__()
+ self.url = url
+ self.target = target
+
+
+URLPATTERNS = [
+ # GET /api/v1/functest/envs => GET environment
+ Url('/api/v1/functest/envs', 'v1_envs'),
+
+ # POST /api/v1/functest/envs/action , {"action":"prepare"}
+ # => Prepare environment
+ Url('/api/v1/functest/envs/action', 'v1_envs'),
+
+ # GET /api/v1/functest/openstack/credentials => GET credentials
+ Url('/api/v1/functest/openstack/credentials', 'v1_creds'),
+
+ # POST /api/v1/functest/openstack/action
+ # {"action":"update_openrc", "args": {"openrc": {}}} => Update openrc
+ Url('/api/v1/functest/openstack/action', 'v1_creds'),
+
+ # GET /api/v1/functest/testcases => GET all testcases
+ Url('/api/v1/functest/testcases', 'v1_test_cases'),
+
+ # GET /api/v1/functest/testcases/<testcase_name>
+ # => GET the info of one testcase
+ Url('/api/v1/functest/testcases/<testcase_name>', 'v1_testcase'),
+
+ # POST /api/v1/functest/testcases/action
+ # {"action":"run_test_case", "args": {"opts": {}, "testcase": "vping_ssh"}}
+ # => Run a testcase
+ Url('/api/v1/functest/testcases/action', 'v1_testcase'),
+
+ # GET /api/v1/functest/testcases => GET all tiers
+ Url('/api/v1/functest/tiers', 'v1_tiers'),
+
+ # GET /api/v1/functest/tiers/<tier_name>
+ # => GET the info of one tier
+ Url('/api/v1/functest/tiers/<tier_name>', 'v1_tier'),
+
+ # GET /api/v1/functest/tiers/<tier_name>/testcases
+ # => GET all testcases within given tier
+ Url('/api/v1/functest/tiers/<tier_name>/testcases',
+ 'v1_testcases_in_tier'),
+
+ # GET /api/v1/functest/tasks/<task_id>
+ # => GET the result of the task id
+ Url('/api/v1/functest/tasks/<task_id>', 'v1_tasks')
+]
diff --git a/functest/ci/config_aarch64_patch.yaml b/functest/ci/config_aarch64_patch.yaml
index 45af8d743..6b3699b4d 100644
--- a/functest/ci/config_aarch64_patch.yaml
+++ b/functest/ci/config_aarch64_patch.yaml
@@ -4,6 +4,27 @@ os:
image_name: TestVM
image_file_name: cirros-d161201-aarch64-disk.img
image_password: gocubsgo
+ snaps:
+ images:
+ glance_tests:
+ disk_file: /home/opnfv/functest/images/cirros-d161201-aarch64-disk.img
+ extra_properties:
+ hw_firmware_type: 'uefi'
+ short_id: 'ubuntu16.04'
+ hw_video_model: 'vga'
+ cirros:
+ disk_file: /home/opnfv/functest/images/cirros-d161201-aarch64-disk.img
+ extra_properties:
+ hw_firmware_type: 'uefi'
+ short_id: 'ubuntu16.04'
+ hw_video_model: 'vga'
+ ubuntu:
+ disk_file: /home/opnfv/functest/images/ubuntu-14.04-server-cloudimg-arm64-uefi1.img
+ extra_properties:
+ hw_firmware_type: 'uefi'
+ hw_video_model: 'vga'
+ centos:
+ disk_file: /home/opnfv/functest/images/CentOS-7-aarch64-GenericCloud.qcow2
vping:
image_name: TestVM
diff --git a/functest/ci/config_functest.yaml b/functest/ci/config_functest.yaml
index e26b31398..cf63e1edd 100644
--- a/functest/ci/config_functest.yaml
+++ b/functest/ci/config_functest.yaml
@@ -5,14 +5,12 @@ general:
dir_repo_rally: /home/opnfv/repos/rally
repo_tempest: /src/tempest
dir_repo_releng: /home/opnfv/repos/releng
- repo_vims_test: /home/opnfv/repos/vnfs/vims-test
+ repo_vims_test: /src/vims-test
repo_onos: /home/opnfv/repos/onos
- repo_netready: /home/opnfv/repos/netready
repo_barometer: /home/opnfv/repos/barometer
repo_doctor: /home/opnfv/repos/doctor
- repo_copper: /home/opnfv/repos/copper
- repo_domino: /home/opnfv/repos/domino
- repo_fds: /home/opnfv/repos/fds
+ repo_odl_test: /src/odl_test
+ repo_fds: /src/fds
repo_securityscan: /home/opnfv/repos/securityscanning
repo_vrouter: /home/opnfv/repos/vnfs/vrouter
functest: /home/opnfv/functest
@@ -63,6 +61,25 @@ snaps:
disk_file: /home/opnfv/functest/images/ubuntu-14.04-server-cloudimg-amd64-disk1.img
centos:
disk_file: /home/opnfv/functest/images/CentOS-7-x86_64-GenericCloud.qcow2
+ # All of these values are optional and will override the values retrieved
+ # by the RC file
+# os_creds_override:
+# username: {user}
+# password: {password}
+# auth_url: {auth_url}
+# project_name: {project_name}
+# identity_api_version: {2|3}
+# network_api_version: {2}
+# compute_api_version: {2}
+# image_api_version: {1|2}
+# user_domain_id: {user_domain_id}
+# project_domain_id: {projects_domain_id}
+# interface: {interface}
+# cacert: {True|False}
+# proxy_settings:
+# host: {proxy_host}
+# port: {proxy_port}
+# ssh_proxy_cmd: {OpenSSH -o ProxyCommand value}
vping:
ping_timeout: 200
@@ -71,6 +88,9 @@ vping:
vm_name_2: opnfv-vping-2
image_name: functest-vping
private_net_name: vping-net
+ # network_type: vlan
+ # physical_network: physnet2
+ # segmentation_id: 2366
private_subnet_name: vping-subnet
private_subnet_cidr: 192.168.130.0/24
router_name: vping-router
@@ -123,10 +143,6 @@ rally:
router_name: rally-router
vnf:
- aaa:
- tenant_name: aaa
- tenant_description: Freeradius server
- tenant_images: {}
juju_epc:
tenant_name: epc
tenant_description: OAI EPC deployed with Juju
@@ -135,13 +151,14 @@ vnf:
tenant_name: cloudify_ims
tenant_description: vIMS
config: cloudify_ims.yaml
- orchestra_ims:
- tenant_name: orchestra_ims
- tenant_description: ims deployed with openbaton
- config: orchestra_ims.yaml
- opera_ims:
- tenant_name: opera_ims
- tenant_description: ims deployed with open-o
+ orchestra_openims:
+ tenant_name: orchestra_openims
+ tenant_description: OpenIMS deployed with Open Baton
+ config: orchestra.yaml
+ orchestra_clearwaterims:
+ tenant_name: orchestra_clearwaterims
+ tenant_description: Clearwater IMS deployed with Open Baton
+ config: orchestra.yaml
ONOS:
general:
@@ -160,14 +177,6 @@ ONOS:
installer_master: '10.20.0.2'
installer_master_username: 'root'
installer_master_password: 'r00tme'
-multisite:
- fuel:
- installer_username: 'root'
- installer_password: 'r00tme'
- compass:
- installer_username: 'root'
- installer_password: 'root'
- multisite_controller_ip: '10.1.0.50'
promise:
tenant_name: promise
tenant_description: promise Functionality Testing
@@ -203,3 +212,4 @@ energy_recorder:
api_url: http://energy.opnfv.fr/resources
api_user: ""
api_password: ""
+
diff --git a/functest/ci/config_patch.yaml b/functest/ci/config_patch.yaml
index ad8b0889f..865a564e6 100644
--- a/functest/ci/config_patch.yaml
+++ b/functest/ci/config_patch.yaml
@@ -20,6 +20,3 @@ ovs:
image_properties: {'hw_mem_page_size':'large'}
tempest:
use_custom_flavors: True
-multisite:
- tempest:
- use_custom_flavors: True
diff --git a/functest/ci/download_images.sh b/functest/ci/download_images.sh
index 23e09c10b..86f37a3f5 100644
--- a/functest/ci/download_images.sh
+++ b/functest/ci/download_images.sh
@@ -1,38 +1,25 @@
#!/bin/bash
-CIRROS_REPO_URL=http://download.cirros-cloud.net
-CIRROS_AARCH64_TAG=161201
-CIRROS_X86_64_TAG=0.3.5
-
-RED='\033[1;31m'
-NC='\033[0m' # No Color
-
-function usage(){
- echo -e "${RED}USAGE: $script <destination_folder>${NC}"
- exit 0
-}
-
-script=`basename "$0"`
-IMAGES_FOLDER_DIR=$1
-
-if [[ -z $IMAGES_FOLDER_DIR ]]; then usage; fi;
-
set -ex
-mkdir -p ${IMAGES_FOLDER_DIR}
-
-wget -nc ${CIRROS_REPO_URL}/${CIRROS_X86_64_TAG}/cirros-${CIRROS_X86_64_TAG}-x86_64-disk.img -P ${IMAGES_FOLDER_DIR}
-wget -nc ${CIRROS_REPO_URL}/${CIRROS_X86_64_TAG}/cirros-${CIRROS_X86_64_TAG}-x86_64-lxc.tar.gz -P ${IMAGES_FOLDER_DIR}
-wget -nc http://artifacts.opnfv.org/sdnvpn/ubuntu-16.04-server-cloudimg-amd64-disk1.img -P ${IMAGES_FOLDER_DIR}
-
-# Add 3rd-party images for aarch64, since Functest can be run on an x86 machine to test an aarch64 POD
-wget -nc ${CIRROS_REPO_URL}/daily/20${CIRROS_AARCH64_TAG}/cirros-d${CIRROS_AARCH64_TAG}-aarch64-disk.img -P ${IMAGES_FOLDER_DIR}
-wget -nc ${CIRROS_REPO_URL}/daily/20${CIRROS_AARCH64_TAG}/cirros-d${CIRROS_AARCH64_TAG}-aarch64-initramfs -P ${IMAGES_FOLDER_DIR}
-wget -nc ${CIRROS_REPO_URL}/daily/20${CIRROS_AARCH64_TAG}/cirros-d${CIRROS_AARCH64_TAG}-aarch64-kernel -P ${IMAGES_FOLDER_DIR}
-
-# Add Ubuntu 14 qcow2 image
-wget -nc http://uec-images.ubuntu.com/releases/trusty/14.04/ubuntu-14.04-server-cloudimg-amd64-disk1.img -P ${IMAGES_FOLDER_DIR}
-
-# Add Centos 7 qcow2 image
-wget -nc http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2 -P ${IMAGES_FOLDER_DIR}
-set +ex \ No newline at end of file
+wget_opts="-N --tries=1 --connect-timeout=30"
+
+cat << EOF | wget ${wget_opts} -i - -P ${1:-/home/opnfv/functest/images}
+http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img
+https://cloud-images.ubuntu.com/releases/14.04/release/ubuntu-14.04-server-cloudimg-amd64-disk1.img
+https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2
+https://cloud-images.ubuntu.com/releases/16.04/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img
+http://repository.cloudifysource.org/cloudify/4.0.1/sp-release/cloudify-manager-premium-4.0.1.qcow2
+http://marketplace.openbaton.org:8082/api/v1/images/52e2ccc0-1dce-4663-894d-28aab49323aa/img
+http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img
+http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-lxc.tar.gz
+http://download.cirros-cloud.net/daily/20161201/cirros-d161201-aarch64-disk.img
+http://download.cirros-cloud.net/daily/20161201/cirros-d161201-aarch64-initramfs
+http://download.cirros-cloud.net/daily/20161201/cirros-d161201-aarch64-kernel
+https://cloud-images.ubuntu.com/releases/14.04/release/ubuntu-14.04-server-cloudimg-arm64-uefi1.img
+http://cloud.centos.org/altarch/7/images/aarch64/CentOS-7-aarch64-GenericCloud.qcow2.xz
+EOF
+
+xz --decompress ${1:-/home/opnfv/functest/images}/CentOS-7-aarch64-GenericCloud.qcow2.xz
+
+exit $?
diff --git a/functest/ci/logging.ini b/functest/ci/logging.ini
index 210c8f5f4..f1ab72414 100644
--- a/functest/ci/logging.ini
+++ b/functest/ci/logging.ini
@@ -1,5 +1,5 @@
[loggers]
-keys=root,functest,ci,cli,core,energy,opnfv_tests,utils
+keys=root,functest,api,ci,cli,core,energy,opnfv_tests,utils
[handlers]
keys=console,wconsole,file,null
@@ -16,6 +16,11 @@ level=NOTSET
handlers=file
qualname=functest
+[logger_api]
+level=NOTSET
+handlers=wconsole
+qualname=functest.api
+
[logger_ci]
level=NOTSET
handlers=console
diff --git a/functest/ci/prepare_env.py b/functest/ci/prepare_env.py
index c40e32660..9ed585f3d 100644
--- a/functest/ci/prepare_env.py
+++ b/functest/ci/prepare_env.py
@@ -33,7 +33,7 @@ actions = ['start', 'check']
logger = logging.getLogger('functest.ci.prepare_env')
handler = None
# set the architecture to default
-pod_arch = None
+pod_arch = os.getenv("HOST_ARCH", None)
arch_filter = ['aarch64']
CONFIG_FUNCTEST_PATH = pkg_resources.resource_filename(
diff --git a/functest/ci/run_tests.py b/functest/ci/run_tests.py
index b95e1008b..63a50dea2 100644
--- a/functest/ci/run_tests.py
+++ b/functest/ci/run_tests.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python
-#
-# Author: Jose Lausuch (jose.lausuch@ericsson.com)
+
+# Copyright (c) 2016 Ericsson AB and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
-#
import argparse
import enum
@@ -17,6 +16,7 @@ import os
import pkg_resources
import re
import sys
+import textwrap
import prettytable
@@ -66,17 +66,14 @@ class RunTestsParser(object):
class Runner(object):
def __init__(self):
- self.executed_test_cases = []
+ self.executed_test_cases = {}
self.overall_result = Result.EX_OK
self.clean_flag = True
self.report_flag = False
-
- @staticmethod
- def print_separator(str, count=45):
- line = ""
- for i in range(0, count - 1):
- line += str
- logger.info("%s" % line)
+ self._tiers = tb.TierBuilder(
+ CONST.__getattribute__('INSTALLER_TYPE'),
+ CONST.__getattribute__('DEPLOY_SCENARIO'),
+ pkg_resources.resource_filename('functest', 'ci/testcases.yaml'))
@staticmethod
def source_rc_file():
@@ -95,6 +92,8 @@ class Runner(object):
CONST.__setattr__('OS_TENANT_NAME', value)
elif key == 'OS_PASSWORD':
CONST.__setattr__('OS_PASSWORD', value)
+ elif key == "OS_PROJECT_DOMAIN_NAME":
+ CONST.__setattr__('OS_PROJECT_DOMAIN_NAME', value)
@staticmethod
def get_run_dict(testname):
@@ -109,21 +108,11 @@ class Runner(object):
logger.exception("Cannot get {}'s config options".format(testname))
return None
- def run_test(self, test, tier_name, testcases=None):
+ def run_test(self, test):
if not test.is_enabled():
raise TestNotEnabled(
"The test case {} is not enabled".format(test.get_name()))
- logger.info("\n") # blank line
- self.print_separator("=")
logger.info("Running test case '%s'...", test.get_name())
- self.print_separator("=")
- logger.debug("\n%s" % test)
- self.source_rc_file()
-
- flags = " -t %s" % test.get_name()
- if self.report_flag:
- flags += " -r"
-
result = testcase.TestCase.EX_RUN_ERROR
run_dict = self.get_run_dict(test.get_name())
if run_dict:
@@ -132,7 +121,7 @@ class Runner(object):
cls = getattr(module, run_dict['class'])
test_dict = ft_utils.get_dict_by_test(test.get_name())
test_case = cls(**test_dict)
- self.executed_test_cases.append(test_case)
+ self.executed_test_cases[test.get_name()] = test_case
if self.clean_flag:
if test_case.create_snapshot() != test_case.EX_OK:
return result
@@ -156,7 +145,6 @@ class Runner(object):
run_dict['class']))
else:
raise Exception("Cannot import the class for the test case.")
-
return result
def run_tier(self, tier):
@@ -165,68 +153,60 @@ class Runner(object):
if tests is None or len(tests) == 0:
logger.info("There are no supported test cases in this tier "
"for the given scenario")
- return 0
- logger.info("\n\n") # blank line
- self.print_separator("#")
- logger.info("Running tier '%s'" % tier_name)
- self.print_separator("#")
- logger.debug("\n%s" % tier)
- for test in tests:
- result = self.run_test(test, tier_name)
- if result != testcase.TestCase.EX_OK:
- logger.error("The test case '%s' failed.", test.get_name())
- self.overall_result = Result.EX_ERROR
- if test.is_blocking():
- raise BlockingTestFailed(
- "The test case {} failed and is blocking".format(
- test.get_name()))
+ self.overall_result = Result.EX_ERROR
+ else:
+ logger.info("Running tier '%s'" % tier_name)
+ for test in tests:
+ result = self.run_test(test)
+ if result != testcase.TestCase.EX_OK:
+ logger.error("The test case '%s' failed.", test.get_name())
+ self.overall_result = Result.EX_ERROR
+ if test.is_blocking():
+ raise BlockingTestFailed(
+ "The test case {} failed and is blocking".format(
+ test.get_name()))
+ return self.overall_result
- def run_all(self, tiers):
- summary = ""
+ def run_all(self):
tiers_to_run = []
-
- for tier in tiers.get_tiers():
+ msg = prettytable.PrettyTable(
+ header_style='upper', padding_width=5,
+ field_names=['tiers', 'order', 'CI Loop', 'description',
+ 'testcases'])
+ for tier in self._tiers.get_tiers():
if (len(tier.get_tests()) != 0 and
re.search(CONST.__getattribute__('CI_LOOP'),
tier.get_ci_loop()) is not None):
tiers_to_run.append(tier)
- summary += ("\n - %s:\n\t %s"
- % (tier.get_name(),
- tier.get_test_names()))
-
- logger.info("Tests to be executed:%s" % summary)
+ msg.add_row([tier.get_name(), tier.get_order(),
+ tier.get_ci_loop(),
+ textwrap.fill(tier.description, width=40),
+ textwrap.fill(' '.join([str(x.get_name(
+ )) for x in tier.get_tests()]), width=40)])
+ logger.info("TESTS TO BE EXECUTED:\n\n%s\n", msg)
for tier in tiers_to_run:
self.run_tier(tier)
def main(self, **kwargs):
- _tiers = tb.TierBuilder(
- CONST.__getattribute__('INSTALLER_TYPE'),
- CONST.__getattribute__('DEPLOY_SCENARIO'),
- pkg_resources.resource_filename('functest', 'ci/testcases.yaml'))
-
if kwargs['noclean']:
self.clean_flag = False
-
if kwargs['report']:
self.report_flag = True
-
try:
if kwargs['test']:
self.source_rc_file()
logger.debug("Test args: %s", kwargs['test'])
- if _tiers.get_tier(kwargs['test']):
- self.run_tier(_tiers.get_tier(kwargs['test']))
- elif _tiers.get_test(kwargs['test']):
+ if self._tiers.get_tier(kwargs['test']):
+ self.run_tier(self._tiers.get_tier(kwargs['test']))
+ elif self._tiers.get_test(kwargs['test']):
result = self.run_test(
- _tiers.get_test(kwargs['test']),
- _tiers.get_tier_name(kwargs['test']),
- kwargs['test'])
+ self._tiers.get_test(kwargs['test']))
if result != testcase.TestCase.EX_OK:
logger.error("The test case '%s' failed.",
kwargs['test'])
self.overall_result = Result.EX_ERROR
elif kwargs['test'] == "all":
- self.run_all(_tiers)
+ self.run_all()
else:
logger.error("Unknown test case or tier '%s', "
"or not supported by "
@@ -234,39 +214,51 @@ class Runner(object):
% (kwargs['test'],
CONST.__getattribute__('DEPLOY_SCENARIO')))
logger.debug("Available tiers are:\n\n%s",
- _tiers)
+ self._tiers)
return Result.EX_ERROR
else:
- self.run_all(_tiers)
+ self.run_all()
except BlockingTestFailed:
pass
except Exception:
logger.exception("Failures when running testcase(s)")
self.overall_result = Result.EX_ERROR
+ if not self._tiers.get_test(kwargs['test']):
+ self.summary(self._tiers.get_tier(kwargs['test']))
+ logger.info("Execution exit value: %s" % self.overall_result)
+ return self.overall_result
+ def summary(self, tier=None):
msg = prettytable.PrettyTable(
header_style='upper', padding_width=5,
field_names=['env var', 'value'])
for env_var in ['INSTALLER_TYPE', 'DEPLOY_SCENARIO', 'BUILD_TAG',
'CI_LOOP']:
msg.add_row([env_var, CONST.__getattribute__(env_var)])
- logger.info("Deployment description: \n\n%s\n", msg)
-
- if len(self.executed_test_cases) > 1:
- msg = prettytable.PrettyTable(
- header_style='upper', padding_width=5,
- field_names=['test case', 'project', 'tier',
- 'duration', 'result'])
- for test_case in self.executed_test_cases:
- result = 'PASS' if(test_case.is_successful(
+ logger.info("Deployment description:\n\n%s\n", msg)
+ msg = prettytable.PrettyTable(
+ header_style='upper', padding_width=5,
+ field_names=['test case', 'project', 'tier',
+ 'duration', 'result'])
+ tiers = [tier] if tier else self._tiers.get_tiers()
+ for tier in tiers:
+ for test in tier.get_tests():
+ try:
+ test_case = self.executed_test_cases[test.get_name()]
+ except KeyError:
+ msg.add_row([test.get_name(), test.get_project(),
+ tier.get_name(), "00:00", "SKIP"])
+ else:
+ result = 'PASS' if(test_case.is_successful(
) == test_case.EX_OK) else 'FAIL'
- msg.add_row([test_case.case_name, test_case.project_name,
- _tiers.get_tier_name(test_case.case_name),
- test_case.get_duration(), result])
- logger.info("FUNCTEST REPORT: \n\n%s\n", msg)
-
- logger.info("Execution exit value: %s" % self.overall_result)
- return self.overall_result
+ msg.add_row(
+ [test_case.case_name, test_case.project_name,
+ self._tiers.get_tier_name(test_case.case_name),
+ test_case.get_duration(), result])
+ for test in tier.get_skipped_test():
+ msg.add_row([test.get_name(), test.get_project(),
+ tier.get_name(), "00:00", "SKIP"])
+ logger.info("FUNCTEST REPORT:\n\n%s\n", msg)
def main():
diff --git a/functest/ci/testcases.yaml b/functest/ci/testcases.yaml
index d0b2785cb..fac81267f 100644
--- a/functest/ci/testcases.yaml
+++ b/functest/ci/testcases.yaml
@@ -149,7 +149,7 @@ tiers:
case_name: odl
project_name: functest
criteria: 100
- blocking: true
+ blocking: false
description: >-
Test Suite for the OpenDaylight SDN Controller. It
integrates some test suites from upstream using
@@ -162,8 +162,8 @@ tiers:
class: 'ODLTests'
args:
suites:
- - /home/opnfv/repos/odl_test/csit/suites/integration/basic
- - /home/opnfv/repos/odl_test/csit/suites/openstack/neutron
+ - /src/odl_test/csit/suites/integration/basic
+ - /src/odl_test/csit/suites/openstack/neutron
-
case_name: odl_netvirt
@@ -183,9 +183,9 @@ tiers:
class: 'ODLTests'
args:
suites:
- - /home/opnfv/repos/odl_test/csit/suites/integration/basic
- - /home/opnfv/repos/odl_test/csit/suites/openstack/neutron
- - /home/opnfv/repos/odl_test/csit/suites/openstack/connectivity
+ - /src/odl_test/csit/suites/integration/basic
+ - /src/odl_test/csit/suites/openstack/neutron
+ - /src/odl_test/csit/suites/openstack/connectivity
-
case_name: fds
@@ -204,7 +204,7 @@ tiers:
class: 'ODLTests'
args:
suites:
- - /home/opnfv/repos/fds/testing/robot
+ - /src/fds/testing/robot
-
case_name: onos
@@ -265,10 +265,11 @@ tiers:
module: 'functest.core.feature'
class: 'BashFeature'
args:
- cmd: 'cd /home/opnfv/repos/promise/promise/test/functest && python ./run_tests.py'
+ cmd: 'run_promise_tests.py'
-
case_name: doctor-notification
+ enabled: false
project_name: doctor
criteria: 100
blocking: false
@@ -297,7 +298,7 @@ tiers:
module: 'functest.core.feature'
class: 'BashFeature'
args:
- cmd: 'cd /usr/local/lib/python2.7/dist-packages/sdnvpn/test/functest && python ./run_tests.py'
+ cmd: 'run_sdnvpn_tests.py'
-
case_name: security_scan
@@ -317,38 +318,6 @@ tiers:
cmd: '. /home/opnfv/functest/conf/stackrc && security_scan --config /usr/local/etc/securityscanning/config.ini'
-
- case_name: copper
- enabled: false
- project_name: copper
- criteria: 100
- blocking: false
- description: >-
- Test suite for policy management based on OpenStack Congress
- dependencies:
- installer: 'apex'
- scenario: '^((?!fdio).)*$'
- run:
- module: 'functest.core.feature'
- class: 'BashFeature'
- args:
- cmd: 'cd /home/opnfv/repos/copper/tests && bash run.sh && cd -'
-
- -
- case_name: multisite
- enabled: false
- project_name: multisite
- criteria: 100
- blocking: false
- description: >-
- Test suite from kingbird
- dependencies:
- installer: '(fuel)|(compass)'
- scenario: 'multisite'
- run:
- module: 'functest.opnfv_tests.openstack.tempest.tempest'
- class: 'TempestMultisite'
-
- -
case_name: functest-odl-sfc
enabled: false
project_name: sfc
@@ -363,7 +332,7 @@ tiers:
module: 'functest.core.feature'
class: 'BashFeature'
args:
- cmd: 'cd /usr/local/lib/python2.7/dist-packages/sfc/tests/functest && python ./run_tests.py'
+ cmd: 'run_sfc_tests.py'
-
case_name: onos_sfc
@@ -412,39 +381,21 @@ tiers:
module: 'functest.core.feature'
class: 'BashFeature'
args:
- cmd: 'cd /home/opnfv/repos/domino && ./tests/run_multinode.sh'
-
- -
- case_name: gluon_vping
- enabled: false
- project_name: netready
- criteria: 100
- blocking: false
- description: >-
- Test suite from Netready project.
- dependencies:
- installer: 'apex'
- scenario: 'gluon'
- run:
- module: 'functest.core.feature'
- class: 'BashFeature'
- args:
- cmd: 'cd /home/opnfv/repos/netready/test/functest && python ./gluon-test-suite.py'
+ cmd: 'cd /src/domino && ./tests/run_multinode.sh'
-
case_name: barometercollectd
- enabled: false
+ enabled: true
project_name: barometer
criteria: 100
blocking: false
description: >-
- Test suite for the Barometer project. Separate tests verify the
- proper configuration and functionality of the following
- collectd plugins Ceilometer, Hugepages, Memory RAS (mcelog),
- and OVS Events
+ Test suite for the Barometer project. Separate tests verify
+ the proper configuration and basic functionality of all the
+ collectd plugins as described in the Project Release Plan
dependencies:
- installer: 'fuel'
- scenario: 'kvm_ovs_dpdk_bar'
+ installer: 'apex'
+ scenario: 'bar'
run:
module: 'baro_tests.barometer'
class: 'BarometerCollectd'
@@ -508,7 +459,7 @@ tiers:
-
name: vnf
order: 4
- ci_loop: 'daily'
+ ci_loop: '(daily)|(weekly)'
description : >-
Collection of VNF test cases.
testcases:
@@ -526,51 +477,33 @@ tiers:
run:
module: 'functest.opnfv_tests.vnf.ims.cloudify_ims'
class: 'CloudifyIms'
-
-
- case_name: aaa
- enabled: false
+ case_name: orchestra_openims
project_name: functest
criteria: 100
blocking: false
description: >-
- Test suite from Parser project.
+ OpenIMS VNF deployment with Open Baton (Orchestra)
dependencies:
installer: ''
- scenario: ''
+ scenario: 'os-nosdn-nofeature-ha'
run:
- module: 'functest.opnfv_tests.vnf.aaa.aaa'
- class: 'AaaVnf'
+ module: 'functest.opnfv_tests.vnf.ims.orchestra_openims'
+ class: 'OpenImsVnf'
-
- case_name: orchestra_ims
- enabled: false
+ case_name: orchestra_clearwaterims
project_name: functest
criteria: 100
blocking: false
description: >-
- VNF deployment with OpenBaton (Orchestra)
+ ClearwaterIMS VNF deployment with Open Baton (Orchestra)
dependencies:
installer: ''
- scenario: ''
- run:
- module: 'functest.opnfv_tests.vnf.ims.orchestra_ims'
- class: 'ImsVnf'
-
- -
- case_name: opera_vims
- enabled: false
- project_name: opera
- criteria: 100
- blocking: false
- description: >-
- VNF deployment with OPEN-O
- dependencies:
- installer: 'compass'
- scenario: 'os-nosdn-openo-ha'
+ scenario: 'os-nosdn-nofeature-ha'
run:
- module: 'functest.opnfv_tests.vnf.ims.opera_ims'
- class: 'OperaIms'
+ module: 'functest.opnfv_tests.vnf.ims.orchestra_clearwaterims'
+ class: 'ClearwaterImsVnf'
-
case_name: vyos_vrouter
diff --git a/functest/ci/tier_builder.py b/functest/ci/tier_builder.py
index f8038468f..d2722dc22 100644
--- a/functest/ci/tier_builder.py
+++ b/functest/ci/tier_builder.py
@@ -1,11 +1,11 @@
#!/usr/bin/env python
+
+# Copyright (c) 2016 Ericsson AB and others.
#
-# jose.lausuch@ericsson.com
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
-#
import tier_handler as th
import yaml
@@ -52,11 +52,14 @@ class TierBuilder(object):
dependency=dep,
criteria=dic_testcase['criteria'],
blocking=dic_testcase['blocking'],
- description=dic_testcase['description'])
+ description=dic_testcase['description'],
+ project=dic_testcase['project_name'])
if (testcase.is_compatible(self.ci_installer,
self.ci_scenario) and
testcase.is_enabled()):
tier.add_test(testcase)
+ else:
+ tier.skip_test(testcase)
self.tier_objects.append(tier)
diff --git a/functest/ci/tier_handler.py b/functest/ci/tier_handler.py
index 4f2f14ecd..dd3e77ce3 100644
--- a/functest/ci/tier_handler.py
+++ b/functest/ci/tier_handler.py
@@ -1,14 +1,18 @@
#!/usr/bin/env python
+
+# Copyright (c) 2016 Ericsson AB and others.
#
-# jose.lausuch@ericsson.com
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
-#
import re
+import textwrap
+
+import prettytable
+
LINE_LENGTH = 72
@@ -32,6 +36,7 @@ class Tier(object):
def __init__(self, name, order, ci_loop, description=""):
self.tests_array = []
+ self.skipped_tests_array = []
self.name = name
self.order = order
self.ci_loop = ci_loop
@@ -40,12 +45,18 @@ class Tier(object):
def add_test(self, testcase):
self.tests_array.append(testcase)
+ def skip_test(self, testcase):
+ self.skipped_tests_array.append(testcase)
+
def get_tests(self):
array_tests = []
for test in self.tests_array:
array_tests.append(test)
return array_tests
+ def get_skipped_test(self):
+ return self.skipped_tests_array
+
def get_test_names(self):
array_tests = []
for test in self.tests_array:
@@ -75,31 +86,16 @@ class Tier(object):
return self.ci_loop
def __str__(self):
- lines = split_text(self.description, LINE_LENGTH - 6)
-
- out = ""
- out += ("+%s+\n" % ("=" * (LINE_LENGTH - 2)))
- out += ("| Tier: " + self.name.ljust(LINE_LENGTH - 10) + "|\n")
- out += ("+%s+\n" % ("=" * (LINE_LENGTH - 2)))
- out += ("| Order: " + str(self.order).ljust(LINE_LENGTH - 10) + "|\n")
- out += ("| CI Loop: " + str(self.ci_loop).ljust(LINE_LENGTH - 12) +
- "|\n")
- out += ("| Description:".ljust(LINE_LENGTH - 1) + "|\n")
- for line in lines:
- out += ("| " + line.ljust(LINE_LENGTH - 7) + " |\n")
- out += ("| Test cases:".ljust(LINE_LENGTH - 1) + "|\n")
- tests = self.get_test_names()
- if len(tests) > 0:
- for i in range(len(tests)):
- out += ("| - %s |\n" % tests[i].ljust(LINE_LENGTH - 9))
- else:
- out += ("| (There are no supported test cases "
- .ljust(LINE_LENGTH - 1) + "|\n")
- out += ("| in this tier for the given scenario) "
- .ljust(LINE_LENGTH - 1) + "|\n")
- out += ("|".ljust(LINE_LENGTH - 1) + "|\n")
- out += ("+%s+\n" % ("-" * (LINE_LENGTH - 2)))
- return out
+ msg = prettytable.PrettyTable(
+ header_style='upper', padding_width=5,
+ field_names=['tiers', 'order', 'CI Loop', 'description',
+ 'testcases'])
+ msg.add_row(
+ [self.name, self.order, self.ci_loop,
+ textwrap.fill(self.description, width=40),
+ textwrap.fill(' '.join([str(x.get_name(
+ )) for x in self.get_tests()]), width=40)])
+ return msg.get_string()
class TestCase(object):
@@ -109,13 +105,15 @@ class TestCase(object):
dependency,
criteria,
blocking,
- description=""):
+ description="",
+ project=""):
self.name = name
self.enabled = enabled
self.dependency = dependency
self.criteria = criteria
self.blocking = blocking
self.description = description
+ self.project = project
@staticmethod
def is_none(item):
@@ -147,26 +145,16 @@ class TestCase(object):
def is_blocking(self):
return self.blocking
+ def get_project(self):
+ return self.project
+
def __str__(self):
- lines = split_text(self.description, LINE_LENGTH - 6)
-
- out = ""
- out += ("+%s+\n" % ("=" * (LINE_LENGTH - 2)))
- out += ("| Testcase: " + self.name.ljust(LINE_LENGTH - 14) + "|\n")
- out += ("+%s+\n" % ("=" * (LINE_LENGTH - 2)))
- out += ("| Description:".ljust(LINE_LENGTH - 1) + "|\n")
- for line in lines:
- out += ("| " + line.ljust(LINE_LENGTH - 7) + " |\n")
- out += ("| Criteria: " +
- str(self.criteria).ljust(LINE_LENGTH - 14) + "|\n")
- out += ("| Dependencies:".ljust(LINE_LENGTH - 1) + "|\n")
- installer = self.dependency.get_installer()
- scenario = self.dependency.get_scenario()
- out += ("| - Installer:" + installer.ljust(LINE_LENGTH - 17) + "|\n")
- out += ("| - Scenario :" + scenario.ljust(LINE_LENGTH - 17) + "|\n")
- out += ("|".ljust(LINE_LENGTH - 1) + "|\n")
- out += ("+%s+\n" % ("-" * (LINE_LENGTH - 2)))
- return out
+ msg = prettytable.PrettyTable(
+ header_style='upper', padding_width=5,
+ field_names=['test case', 'description', 'criteria', 'dependency'])
+ msg.add_row([self.name, textwrap.fill(self.description, width=40),
+ self.criteria, self.dependency])
+ return msg.get_string()
class Dependency(object):
@@ -182,6 +170,7 @@ class Dependency(object):
return self.scenario
def __str__(self):
- return ("Dependency info:\n"
- " installer: " + self.installer + "\n"
- " scenario: " + self.scenario + "\n")
+ delimitator = "\n" if self.get_installer(
+ ) and self.get_scenario() else ""
+ return "{}{}{}".format(self.get_installer(), delimitator,
+ self.get_scenario())
diff --git a/functest/cli/commands/cli_env.py b/functest/cli/commands/cli_env.py
index 99d36996d..72a870b59 100644
--- a/functest/cli/commands/cli_env.py
+++ b/functest/cli/commands/cli_env.py
@@ -16,7 +16,7 @@ from functest.utils.constants import CONST
import functest.utils.functest_utils as ft_utils
-class CliEnv(object):
+class Env(object):
def __init__(self):
pass
@@ -56,17 +56,14 @@ class CliEnv(object):
if self.status(verbose=False) == 0:
STATUS = "ready"
- msg = prettytable.PrettyTable(
- header_style='upper', padding_width=5,
- field_names=['Functest Environment', 'value'])
- msg.add_row(['INSTALLER', installer_info])
- msg.add_row(['SCENARIO', scenario])
- msg.add_row(['POD', node])
- if build_tag:
- msg.add_row(['BUILD TAG', build_tag])
- msg.add_row(['DEBUG FLAG', is_debug])
- msg.add_row(['STATUS', STATUS])
- click.echo(msg.get_string())
+ env_info = {'INSTALLER': installer_info,
+ 'SCENARIO': scenario,
+ 'POD': node,
+ 'DEBUG FLAG': is_debug,
+ 'BUILD_TAG': build_tag,
+ 'STATUS': STATUS}
+
+ return env_info
def status(self, verbose=True):
ret_val = 0
@@ -78,3 +75,19 @@ class CliEnv(object):
click.echo("Functest environment ready to run tests.\n")
return ret_val
+
+
+class CliEnv(Env):
+
+ def __init__(self):
+ super(CliEnv, self).__init__()
+
+ def show(self):
+ env_info = super(CliEnv, self).show()
+ msg = prettytable.PrettyTable(
+ header_style='upper', padding_width=5,
+ field_names=['Functest Environment', 'value'])
+ for key, value in env_info.iteritems():
+ if key is not None:
+ msg.add_row([key, value])
+ click.echo(msg.get_string())
diff --git a/functest/cli/commands/cli_os.py b/functest/cli/commands/cli_os.py
index f4ec16615..e97ab0809 100644
--- a/functest/cli/commands/cli_os.py
+++ b/functest/cli/commands/cli_os.py
@@ -18,7 +18,7 @@ import functest.utils.openstack_clean as os_clean
import functest.utils.openstack_snapshot as os_snapshot
-class CliOpenStack(object):
+class OpenStack(object):
def __init__(self):
self.os_auth_url = CONST.__getattribute__('OS_AUTH_URL')
@@ -43,9 +43,11 @@ class CliOpenStack(object):
@staticmethod
def show_credentials():
+ dic_credentials = {}
for key, value in os.environ.items():
if key.startswith('OS_'):
- click.echo("{}={}".format(key, value))
+ dic_credentials.update({key: value})
+ return dic_credentials
def check(self):
self.ping_endpoint()
@@ -88,3 +90,16 @@ class CliOpenStack(object):
"'functest openstack snapshot-create'")
return
os_clean.main()
+
+
+class CliOpenStack(OpenStack):
+
+ def __init__(self):
+ super(CliOpenStack, self).__init__()
+
+ @staticmethod
+ def show_credentials():
+ dic_credentials = OpenStack.show_credentials()
+ for key, value in dic_credentials.items():
+ if key.startswith('OS_'):
+ click.echo("{}={}".format(key, value))
diff --git a/functest/cli/commands/cli_testcase.py b/functest/cli/commands/cli_testcase.py
index cb3d47391..65dd9ab75 100644
--- a/functest/cli/commands/cli_testcase.py
+++ b/functest/cli/commands/cli_testcase.py
@@ -20,7 +20,7 @@ import functest.utils.functest_utils as ft_utils
import functest.utils.functest_vacation as vacation
-class CliTestcase(object):
+class Testcase(object):
def __init__(self):
self.tiers = tb.TierBuilder(
@@ -33,15 +33,11 @@ class CliTestcase(object):
for tier in self.tiers.get_tiers():
for test in tier.get_tests():
summary += (" %s\n" % test.get_name())
- click.echo(summary)
+ return summary
def show(self, testname):
description = self.tiers.get_test(testname)
- if description is None:
- click.echo("The test case '%s' does not exist or is not supported."
- % testname)
-
- click.echo(description)
+ return description
@staticmethod
def run(testname, noclean=False, report=False):
@@ -62,3 +58,20 @@ class CliTestcase(object):
for test in tests:
cmd = "run_tests {}-t {}".format(flags, test)
ft_utils.execute_command(cmd)
+
+
+class CliTestcase(Testcase):
+
+ def __init__(self):
+ super(CliTestcase, self).__init__()
+
+ def list(self):
+ click.echo(super(CliTestcase, self).list())
+
+ def show(self, testname):
+ testcase_show = super(CliTestcase, self).show(testname)
+ if testcase_show:
+ click.echo(testcase_show)
+ else:
+ click.echo("The test case '%s' does not exist or is not supported."
+ % testname)
diff --git a/functest/cli/commands/cli_tier.py b/functest/cli/commands/cli_tier.py
index 9b2e60baa..995354bbd 100644
--- a/functest/cli/commands/cli_tier.py
+++ b/functest/cli/commands/cli_tier.py
@@ -19,7 +19,7 @@ from functest.utils.constants import CONST
import functest.utils.functest_utils as ft_utils
-class CliTier(object):
+class Tier(object):
def __init__(self):
self.tiers = tb.TierBuilder(
@@ -34,26 +34,23 @@ class CliTier(object):
% (tier.get_order(),
tier.get_name(),
tier.get_test_names()))
- click.echo(summary)
+ return summary
def show(self, tiername):
tier = self.tiers.get_tier(tiername)
if tier is None:
- tier_names = self.tiers.get_tier_names()
- click.echo("The tier with name '%s' does not exist. "
- "Available tiers are:\n %s\n" % (tiername, tier_names))
+ return None
else:
- click.echo(self.tiers.get_tier(tiername))
+ tier_info = self.tiers.get_tier(tiername)
+ return tier_info
def gettests(self, tiername):
tier = self.tiers.get_tier(tiername)
if tier is None:
- tier_names = self.tiers.get_tier_names()
- click.echo("The tier with name '%s' does not exist. "
- "Available tiers are:\n %s\n" % (tiername, tier_names))
+ return None
else:
tests = tier.get_test_names()
- click.echo("Test cases in tier '%s':\n %s\n" % (tiername, tests))
+ return tests
@staticmethod
def run(tiername, noclean=False, report=False):
@@ -70,3 +67,30 @@ class CliTier(object):
else:
cmd = "run_tests {}-t {}".format(flags, tiername)
ft_utils.execute_command(cmd)
+
+
+class CliTier(Tier):
+
+ def __init__(self):
+ super(CliTier, self).__init__()
+
+ def list(self):
+ click.echo(super(CliTier, self).list())
+
+ def show(self, tiername):
+ tier_info = super(CliTier, self).show(tiername)
+ if tier_info:
+ click.echo(tier_info)
+ else:
+ tier_names = self.tiers.get_tier_names()
+ click.echo("The tier with name '%s' does not exist. "
+ "Available tiers are:\n %s\n" % (tiername, tier_names))
+
+ def gettests(self, tiername):
+ tests = super(CliTier, self).gettests(tiername)
+ if tests:
+ click.echo("Test cases in tier '%s':\n %s\n" % (tiername, tests))
+ else:
+ tier_names = self.tiers.get_tier_names()
+ click.echo("The tier with name '%s' does not exist. "
+ "Available tiers are:\n %s\n" % (tiername, tier_names))
diff --git a/functest/energy/energy.py b/functest/energy/energy.py
index 372c1d32f..c410e84f0 100644
--- a/functest/energy/energy.py
+++ b/functest/energy/energy.py
@@ -16,6 +16,7 @@ import urllib
from functools import wraps
import requests
+import urllib3
import functest.utils.functest_utils as ft_utils
@@ -55,9 +56,10 @@ def enable_recording(method):
try:
return_value = method(*args)
finish_session(current_scenario)
- except Exception: # pylint: disable=broad-except
+ except Exception as exc: # pylint: disable=broad-except
+ EnergyRecorder.logger.exception(exc)
finish_session(current_scenario)
- raise
+ raise exc
return return_value
return wrapper
@@ -74,6 +76,9 @@ class EnergyRecorder(object):
# Default initial step
INITIAL_STEP = "running"
+ # Default connection timeout
+ CONNECTION_TIMOUT = urllib3.Timeout(connect=1, read=3)
+
@staticmethod
def load_config():
"""
@@ -94,27 +99,41 @@ class EnergyRecorder(object):
assert energy_recorder_uri
assert environment
- energy_recorder_uri += "/recorders/environment/"
- energy_recorder_uri += urllib.quote_plus(environment)
+ uri_comp = "/recorders/environment/"
+ uri_comp += urllib.quote_plus(environment)
EnergyRecorder.logger.debug(
- "API recorder at: " + energy_recorder_uri)
+ "API recorder at: " + energy_recorder_uri + uri_comp)
# Creds
- user = ft_utils.get_functest_config(
+ creds_usr = ft_utils.get_functest_config(
"energy_recorder.api_user")
- password = ft_utils.get_functest_config(
+ creds_pass = ft_utils.get_functest_config(
"energy_recorder.api_password")
- if user != "" and password != "":
- energy_recorder_api_auth = (user, password)
+ if creds_usr != "" and creds_pass != "":
+ energy_recorder_api_auth = (creds_usr, creds_pass)
else:
energy_recorder_api_auth = None
+ try:
+ resp = requests.get(energy_recorder_uri + "/monitoring/ping",
+ auth=energy_recorder_api_auth,
+ headers={
+ 'content-type': 'application/json'
+ },
+ timeout=EnergyRecorder.CONNECTION_TIMOUT)
+ api_available = json.loads(resp.text)["status"] == "OK"
+ except Exception: # pylint: disable=broad-except
+ EnergyRecorder.logger.error(
+ "Energy recorder API is not available")
+ api_available = False
# Final config
EnergyRecorder.energy_recorder_api = {
- "uri": energy_recorder_uri,
- "auth": energy_recorder_api_auth
+ "uri": energy_recorder_uri + uri_comp,
+ "auth": energy_recorder_api_auth,
+ "available": api_available
}
+ return EnergyRecorder.energy_recorder_api["available"]
@staticmethod
def submit_scenario(scenario, step):
@@ -126,31 +145,36 @@ class EnergyRecorder(object):
param step: Step name
:type step: string
"""
- return_status = True
try:
- EnergyRecorder.logger.debug("Submitting scenario")
+ return_status = True
# Ensure that connectyvity settings are loaded
- EnergyRecorder.load_config()
+ if EnergyRecorder.load_config():
+ EnergyRecorder.logger.debug("Submitting scenario")
- # Create API payload
- payload = {
- "step": step,
- "scenario": scenario
- }
- # Call API to start energy recording
- response = requests.post(
- EnergyRecorder.energy_recorder_api["uri"],
- data=json.dumps(payload),
- auth=EnergyRecorder.energy_recorder_api["auth"],
- headers={
- 'content-type': 'application/json'
+ # Create API payload
+ payload = {
+ "step": step,
+ "scenario": scenario
}
- )
- if response.status_code != 200:
- log_msg = "Error while submitting scenario\n{}"
- log_msg = log_msg.format(response.text)
- EnergyRecorder.logger.info(log_msg)
- return_status = False
+ # Call API to start energy recording
+ response = requests.post(
+ EnergyRecorder.energy_recorder_api["uri"],
+ data=json.dumps(payload),
+ auth=EnergyRecorder.energy_recorder_api["auth"],
+ headers={
+ 'content-type': 'application/json'
+ },
+ timeout=EnergyRecorder.CONNECTION_TIMOUT
+ )
+ if response.status_code != 200:
+ EnergyRecorder.logger.error(
+ "Error while submitting scenario\n%s",
+ response.text)
+ return_status = False
+ except requests.exceptions.ConnectionError:
+ EnergyRecorder.logger.warning(
+ "submit_scenario: Unable to connect energy recorder API")
+ return_status = False
except Exception: # pylint: disable=broad-except
# Default exception handler to ensure that method
# is safe for caller
@@ -170,11 +194,12 @@ class EnergyRecorder(object):
"""
return_status = True
try:
- EnergyRecorder.logger.debug("Starting recording")
- return_status = EnergyRecorder.submit_scenario(
- scenario,
- EnergyRecorder.INITIAL_STEP
- )
+ if EnergyRecorder.load_config():
+ EnergyRecorder.logger.debug("Starting recording")
+ return_status = EnergyRecorder.submit_scenario(
+ scenario,
+ EnergyRecorder.INITIAL_STEP
+ )
except Exception: # pylint: disable=broad-except
# Default exception handler to ensure that method
@@ -188,25 +213,30 @@ class EnergyRecorder(object):
@staticmethod
def stop():
"""Stop current recording session."""
- EnergyRecorder.logger.debug("Stopping recording")
return_status = True
try:
# Ensure that connectyvity settings are loaded
- EnergyRecorder.load_config()
-
- # Call API to stop energy recording
- response = requests.delete(
- EnergyRecorder.energy_recorder_api["uri"],
- auth=EnergyRecorder.energy_recorder_api["auth"],
- headers={
- 'content-type': 'application/json'
- }
- )
- if response.status_code != 200:
- log_msg = "Error while stating energy recording session\n{}"
- log_msg = log_msg.format(response.text)
- EnergyRecorder.logger.error(log_msg)
- return_status = False
+ if EnergyRecorder.load_config():
+ EnergyRecorder.logger.debug("Stopping recording")
+
+ # Call API to stop energy recording
+ response = requests.delete(
+ EnergyRecorder.energy_recorder_api["uri"],
+ auth=EnergyRecorder.energy_recorder_api["auth"],
+ headers={
+ 'content-type': 'application/json'
+ },
+ timeout=EnergyRecorder.CONNECTION_TIMOUT
+ )
+ if response.status_code != 200:
+ EnergyRecorder.logger.error(
+ "Error while stating energy recording session\n%s",
+ response.text)
+ return_status = False
+ except requests.exceptions.ConnectionError:
+ EnergyRecorder.logger.warning(
+ "stop: Unable to connect energy recorder API")
+ return_status = False
except Exception: # pylint: disable=broad-except
# Default exception handler to ensure that method
# is safe for caller
@@ -219,31 +249,36 @@ class EnergyRecorder(object):
@staticmethod
def set_step(step):
"""Notify energy recording service of current step of the testcase."""
- EnergyRecorder.logger.debug("Setting step")
return_status = True
try:
# Ensure that connectyvity settings are loaded
- EnergyRecorder.load_config()
+ if EnergyRecorder.load_config():
+ EnergyRecorder.logger.debug("Setting step")
- # Create API payload
- payload = {
- "step": step,
- }
-
- # Call API to define step
- response = requests.post(
- EnergyRecorder.energy_recorder_api["uri"] + "/step",
- data=json.dumps(payload),
- auth=EnergyRecorder.energy_recorder_api["auth"],
- headers={
- 'content-type': 'application/json'
+ # Create API payload
+ payload = {
+ "step": step,
}
- )
- if response.status_code != 200:
- log_msg = "Error while setting current step of testcase\n{}"
- log_msg = log_msg.format(response.text)
- EnergyRecorder.logger.error(log_msg)
- return_status = False
+
+ # Call API to define step
+ response = requests.post(
+ EnergyRecorder.energy_recorder_api["uri"] + "/step",
+ data=json.dumps(payload),
+ auth=EnergyRecorder.energy_recorder_api["auth"],
+ headers={
+ 'content-type': 'application/json'
+ },
+ timeout=EnergyRecorder.CONNECTION_TIMOUT
+ )
+ if response.status_code != 200:
+ EnergyRecorder.logger.error(
+ "Error while setting current step of testcase\n%s",
+ response.text)
+ return_status = False
+ except requests.exceptions.ConnectionError:
+ EnergyRecorder.logger.warning(
+ "set_step: Unable to connect energy recorder API")
+ return_status = False
except Exception: # pylint: disable=broad-except
# Default exception handler to ensure that method
# is safe for caller
@@ -256,30 +291,34 @@ class EnergyRecorder(object):
@staticmethod
def get_current_scenario():
"""Get current running scenario (if any, None else)."""
- EnergyRecorder.logger.debug("Getting current scenario")
return_value = None
try:
# Ensure that connectyvity settings are loaded
- EnergyRecorder.load_config()
-
- # Call API get running scenario
- response = requests.get(
- EnergyRecorder.energy_recorder_api["uri"],
- auth=EnergyRecorder.energy_recorder_api["auth"]
- )
- if response.status_code == 200:
- return_value = json.loads(response.text)
- elif response.status_code == 404:
- log_msg = "No current running scenario at {}"
- log_msg = log_msg.format(
- EnergyRecorder.energy_recorder_api["uri"])
- EnergyRecorder.logger.error(log_msg)
- return_value = None
- else:
- log_msg = "Error while getting current scenario\n{}"
- log_msg = log_msg.format(response.text)
- EnergyRecorder.logger.error(log_msg)
- return_value = None
+ if EnergyRecorder.load_config():
+ EnergyRecorder.logger.debug("Getting current scenario")
+
+ # Call API get running scenario
+ response = requests.get(
+ EnergyRecorder.energy_recorder_api["uri"],
+ auth=EnergyRecorder.energy_recorder_api["auth"],
+ timeout=EnergyRecorder.CONNECTION_TIMOUT
+ )
+ if response.status_code == 200:
+ return_value = json.loads(response.text)
+ elif response.status_code == 404:
+ EnergyRecorder.logger.info(
+ "No current running scenario at %s",
+ EnergyRecorder.energy_recorder_api["uri"])
+ return_value = None
+ else:
+ EnergyRecorder.logger.error(
+ "Error while getting current scenario\n%s",
+ response.text)
+ return_value = None
+ except requests.exceptions.ConnectionError:
+ EnergyRecorder.logger.warning(
+ "get_currernt_sceario: Unable to connect energy recorder API")
+ return_value = None
except Exception: # pylint: disable=broad-except
# Default exception handler to ensure that method
# is safe for caller
diff --git a/functest/opnfv_tests/openstack/rally/blacklist.txt b/functest/opnfv_tests/openstack/rally/blacklist.txt
index 95bea2b7a..099d6864d 100644
--- a/functest/opnfv_tests/openstack/rally/blacklist.txt
+++ b/functest/opnfv_tests/openstack/rally/blacklist.txt
@@ -6,6 +6,38 @@ scenario:
- joid
tests:
- NovaServers.boot_server_from_volume_and_delete
+ -
+ scenarios:
+ - '^os-' # all scenarios
+ installers:
+ - '.+' # all installers
+ tests:
+ # Following tests currently fail due to required Gnocchi API:
+ # HTTP 410: "This telemetry installation is configured to use
+ # Gnocchi. Please use the Gnocchi API available on the
+ # metric endpoint to retrieve data."
+ # Issue: https://bugs.launchpad.net/rally/+bug/1704322
+ - CeilometerMeters.list_matched_meters
+ - CeilometerMeters.list_meters
+ - CeilometerQueries.create_and_query_samples
+ - CeilometerResource.get_tenant_resources
+ - CeilometerResource.list_matched_resources
+ - CeilometerResource.list_resources
+ - CeilometerSamples.list_matched_samples
+ - CeilometerSamples.list_samples
+ - CeilometerStats.create_meter_and_get_stats
+ - CeilometerStats.get_stats
+ -
+ scenarios:
+ - '^os-' # all scenarios
+ installers:
+ - '.+' # all installers
+ tests:
+ # Following test currently fails due to but in
+ # python-ceilometerclient during fetching of event_types
+ # Bug: https://bugs.launchpad.net/ubuntu/+bug/1704138
+ # Fix: https://review.openstack.org/#/c/483402/
+ - CeilometerEvents.create_user_and_list_event_types
functionality:
-
diff --git a/functest/opnfv_tests/openstack/rally/rally.py b/functest/opnfv_tests/openstack/rally/rally.py
index 6b7c49ca7..fdef8bed0 100644
--- a/functest/opnfv_tests/openstack/rally/rally.py
+++ b/functest/opnfv_tests/openstack/rally/rally.py
@@ -34,8 +34,8 @@ LOGGER = logging.getLogger(__name__)
class RallyBase(testcase.OSGCTestCase):
"""Base class form Rally testcases implementation."""
- TESTS = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
- 'neutron', 'nova', 'quotas', 'vm', 'all']
+ TESTS = ['authenticate', 'glance', 'ceilometer', 'cinder', 'heat',
+ 'keystone', 'neutron', 'nova', 'quotas', 'vm', 'all']
GLANCE_IMAGE_NAME = CONST.__getattribute__('openstack_image_name')
GLANCE_IMAGE_FILENAME = CONST.__getattribute__('openstack_image_file_name')
GLANCE_IMAGE_PATH = os.path.join(
diff --git a/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-ceilometer.yaml b/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-ceilometer.yaml
new file mode 100644
index 000000000..7efb5a83b
--- /dev/null
+++ b/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-ceilometer.yaml
@@ -0,0 +1,458 @@
+ CeilometerMeters.list_meters:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ ceilometer:
+ counter_name: "benchmark_meter"
+ counter_type: "gauge"
+ counter_unit: "%"
+ counter_volume: 100
+ resources_per_tenant: 100
+ samples_per_resource: 100
+ timestamp_interval: 10
+ metadata_list:
+ -
+ status: "active"
+ name: "rally benchmark on"
+ deleted: "false"
+ -
+ status: "terminated"
+ name: "rally benchmark off"
+ deleted: "true"
+ {% endcall %}
+ args:
+ limit: 50
+ metadata_query:
+ status: "terminated"
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerResource.list_resources:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ ceilometer:
+ counter_name: "benchmark_meter"
+ counter_type: "gauge"
+ counter_unit: "%"
+ counter_volume: 100
+ resources_per_tenant: 100
+ samples_per_resource: 100
+ timestamp_interval: 10
+ metadata_list:
+ -
+ status: "active"
+ name: "rally benchmark on"
+ deleted: "false"
+ -
+ status: "terminated"
+ name: "rally benchmark off"
+ deleted: "true"
+ {% endcall %}
+ args:
+ limit: 50
+ metadata_query:
+ status: "terminated"
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.create_alarm_and_get_history:
+ -
+ args:
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ state: "ok"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.create_and_delete_alarm:
+ -
+ args:
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.create_and_get_alarm:
+ -
+ args:
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.create_and_list_alarm:
+ -
+ args:
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerQueries.create_and_query_alarm_history:
+ -
+ args:
+ orderby: !!null
+ limit: !!null
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerQueries.create_and_query_alarms:
+ -
+ args:
+ filter: {"and": [{"!=": {"state": "dummy_state"}},{"=": {"type": "threshold"}}]}
+ orderby: !!null
+ limit: 10
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerQueries.create_and_query_samples:
+ -
+ args:
+ filter: {"=": {"counter_unit": "instance"}}
+ orderby: !!null
+ limit: 10
+ counter_name: "cpu_util"
+ counter_type: "gauge"
+ counter_unit: "instance"
+ counter_volume: 1.0
+ resource_id: "resource_id"
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.create_and_update_alarm:
+ -
+ args:
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerStats.create_meter_and_get_stats:
+ -
+ args:
+ user_id: "user-id"
+ resource_id: "resource-id"
+ counter_volume: 1.0
+ counter_unit: ""
+ counter_type: "cumulative"
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerEvents.create_user_and_get_event:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerEvents.create_user_and_list_events:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerEvents.create_user_and_list_event_types:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerTraits.create_user_and_list_trait_descriptions:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerTraits.create_user_and_list_traits:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerStats.get_stats:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ ceilometer:
+ counter_name: "benchmark_meter"
+ counter_type: "gauge"
+ counter_unit: "%"
+ counter_volume: 100
+ resources_per_tenant: 100
+ samples_per_resource: 100
+ timestamp_interval: 10
+ metadata_list:
+ -
+ status: "active"
+ name: "rally benchmark on"
+ deleted: "false"
+ -
+ status: "terminated"
+ name: "rally benchmark off"
+ deleted: "true"
+ {% endcall %}
+ args:
+ meter_name: "benchmark_meter"
+ filter_by_user_id: true
+ filter_by_project_id: true
+ filter_by_resource_id: true
+ metadata_query:
+ status: "terminated"
+ period: 300
+ groupby: "resource_id"
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerResource.get_tenant_resources:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ ceilometer:
+ counter_name: "cpu_util"
+ counter_type: "gauge"
+ counter_volume: 1.0
+ counter_unit: "instance"
+ {% endcall %}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.list_alarms:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerSamples.list_matched_samples:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ ceilometer:
+ counter_name: "cpu_util"
+ counter_type: "gauge"
+ counter_unit: "instance"
+ counter_volume: 1.0
+ resources_per_tenant: 100
+ samples_per_resource: 100
+ timestamp_interval: 60
+ metadata_list:
+ - status: "active"
+ name: "fake_resource"
+ deleted: "False"
+ created_at: "2015-09-04T12:34:19.000000"
+ - status: "not_active"
+ name: "fake_resource_1"
+ deleted: "False"
+ created_at: "2015-09-10T06:55:12.000000"
+ {% endcall %}
+ args:
+ limit: 50
+ filter_by_user_id: true
+ filter_by_project_id: true
+ filter_by_resource_id: true
+ metadata_query:
+ status: "not_active"
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerMeters.list_matched_meters:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ ceilometer:
+ counter_name: "benchmark_meter"
+ counter_type: "gauge"
+ counter_unit: "%"
+ counter_volume: 100
+ resources_per_tenant: 100
+ samples_per_resource: 100
+ timestamp_interval: 10
+ metadata_list:
+ -
+ status: "active"
+ name: "rally benchmark on"
+ deleted: "false"
+ -
+ status: "terminated"
+ name: "rally benchmark off"
+ deleted: "true"
+ {% endcall %}
+ args:
+ limit: 50
+ filter_by_user_id: true
+ filter_by_project_id: true
+ filter_by_resource_id: true
+ metadata_query:
+ status: "terminated"
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerResource.list_matched_resources:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ ceilometer:
+ counter_name: "benchmark_meter"
+ counter_type: "gauge"
+ counter_unit: "%"
+ counter_volume: 100
+ resources_per_tenant: 100
+ samples_per_resource: 100
+ timestamp_interval: 10
+ metadata_list:
+ -
+ status: "active"
+ name: "rally benchmark on"
+ deleted: "false"
+ -
+ status: "terminated"
+ name: "rally benchmark off"
+ deleted: "true"
+ {% endcall %}
+ args:
+ limit: 50
+ filter_by_user_id: true
+ filter_by_project_id: true
+ metadata_query:
+ status: "terminated"
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerSamples.list_samples:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ ceilometer:
+ counter_name: "cpu_util"
+ counter_type: "gauge"
+ counter_unit: "instance"
+ counter_volume: 1.0
+ resources_per_tenant: 100
+ samples_per_resource: 100
+ timestamp_interval: 60
+ metadata_list:
+ - status: "active"
+ name: "fake_resource"
+ deleted: "False"
+ created_at: "2015-09-04T12:34:19.000000"
+ - status: "not_active"
+ name: "fake_resource_1"
+ deleted: "False"
+ created_at: "2015-09-10T06:55:12.000000"
+ batch_size: 5
+ {% endcall %}
+ args:
+ limit: 50
+ metadata_query:
+ status: "not_active"
+ sla:
+ {{ no_failures_sla() }}
+
diff --git a/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-ceilometer.yaml b/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-ceilometer.yaml
new file mode 100644
index 000000000..bb070cd3a
--- /dev/null
+++ b/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-ceilometer.yaml
@@ -0,0 +1,247 @@
+ CeilometerAlarms.create_alarm_and_get_history:
+ -
+ args:
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ state: "ok"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.create_and_delete_alarm:
+ -
+ args:
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.create_and_get_alarm:
+ -
+ args:
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.create_and_list_alarm:
+ -
+ args:
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerQueries.create_and_query_alarm_history:
+ -
+ args:
+ orderby: !!null
+ limit: !!null
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerQueries.create_and_query_alarms:
+ -
+ args:
+ filter: {"and": [{"!=": {"state": "dummy_state"}},{"=": {"type": "threshold"}}]}
+ orderby: !!null
+ limit: 10
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerQueries.create_and_query_samples:
+ -
+ args:
+ filter: {"=": {"counter_unit": "instance"}}
+ orderby: !!null
+ limit: 10
+ counter_name: "cpu_util"
+ counter_type: "gauge"
+ counter_unit: "instance"
+ counter_volume: 1.0
+ resource_id: "resource_id"
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.create_and_update_alarm:
+ -
+ args:
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerEvents.create_user_and_get_event:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerEvents.create_user_and_list_events:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerEvents.create_user_and_list_event_types:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerTraits.create_user_and_list_trait_descriptions:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerTraits.create_user_and_list_traits:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerStats.get_stats:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ ceilometer:
+ counter_name: "benchmark_meter"
+ counter_type: "gauge"
+ counter_unit: "%"
+ counter_volume: 100
+ resources_per_tenant: 100
+ samples_per_resource: 100
+ timestamp_interval: 10
+ metadata_list:
+ -
+ status: "active"
+ name: "rally benchmark on"
+ deleted: "false"
+ -
+ status: "terminated"
+ name: "rally benchmark off"
+ deleted: "true"
+ {% endcall %}
+ args:
+ meter_name: "benchmark_meter"
+ filter_by_user_id: true
+ filter_by_project_id: true
+ filter_by_resource_id: true
+ metadata_query:
+ status: "terminated"
+ period: 300
+ groupby: "resource_id"
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerResource.get_tenant_resources:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ ceilometer:
+ counter_name: "cpu_util"
+ counter_type: "gauge"
+ counter_volume: 1.0
+ counter_unit: "instance"
+ {% endcall %}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.list_alarms:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
diff --git a/functest/opnfv_tests/openstack/rally/task.yaml b/functest/opnfv_tests/openstack/rally/task.yaml
index 033edb831..65f101fbe 100644
--- a/functest/opnfv_tests/openstack/rally/task.yaml
+++ b/functest/opnfv_tests/openstack/rally/task.yaml
@@ -31,6 +31,10 @@
{%- include "var/opnfv-neutron.yaml"-%}
{% endif %}
+{% if "ceilometer" in service_list %}
+{%- include "var/opnfv-ceilometer.yaml"-%}
+{% endif %}
+
{% if "quotas" in service_list %}
{%- include "var/opnfv-quotas.yaml"-%}
{% endif %}
diff --git a/functest/opnfv_tests/openstack/refstack_client/refstack_client.py b/functest/opnfv_tests/openstack/refstack_client/refstack_client.py
index 6ac721762..4f71b5f5d 100644
--- a/functest/opnfv_tests/openstack/refstack_client/refstack_client.py
+++ b/functest/opnfv_tests/openstack/refstack_client/refstack_client.py
@@ -28,12 +28,13 @@ from functest.opnfv_tests.openstack.refstack_client.tempest_conf \
from functest.opnfv_tests.openstack.tempest import conf_utils
from functest.utils.constants import CONST
import functest.utils.functest_utils as ft_utils
+import functest.utils.openstack_utils as os_utils
# logging configuration """
LOGGER = logging.getLogger(__name__)
-class RefstackClient(testcase.OSGCTestCase):
+class RefstackClient(testcase.TestCase):
"""RefstackClient testcase implementation class."""
def __init__(self, **kwargs):
@@ -41,6 +42,7 @@ class RefstackClient(testcase.OSGCTestCase):
if "case_name" not in kwargs:
kwargs["case_name"] = "refstack_defcore"
super(RefstackClient, self).__init__(**kwargs)
+ self.tempestconf = None
self.conf_path = pkg_resources.resource_filename(
'functest',
'opnfv_tests/openstack/refstack_client/refstack_tempest.conf')
@@ -57,6 +59,13 @@ class RefstackClient(testcase.OSGCTestCase):
CONST.__getattribute__('OS_INSECURE').lower() == 'true'):
self.insecure = '-k'
+ def generate_conf(self):
+ if not os.path.exists(conf_utils.REFSTACK_RESULTS_DIR):
+ os.makedirs(conf_utils.REFSTACK_RESULTS_DIR)
+
+ self.tempestconf = TempestConf()
+ self.tempestconf.generate_tempestconf()
+
def run_defcore(self, conf, testlist):
"""Run defcore sys command."""
cmd = ("refstack-client test {0} -c {1} -v --test-list {2}"
@@ -65,42 +74,29 @@ class RefstackClient(testcase.OSGCTestCase):
ft_utils.execute_command(cmd)
def run_defcore_default(self):
- """Run default defcare sys command."""
- cmd = ("refstack-client test {0} -c {1} -v --test-list {2}"
- .format(self.insecure, self.confpath, self.defcorelist))
+ """Run default defcore sys command."""
+ options = ["-v"] if not self.insecure else ["-v", self.insecure]
+ cmd = (["refstack-client", "test", "-c", self.confpath] +
+ options + ["--test-list", self.defcorelist])
LOGGER.info("Starting Refstack_defcore test case: '%s'.", cmd)
- header = ("Refstack environment:\n"
- " SUT: %s\n Scenario: %s\n Node: %s\n Date: %s\n" %
- (CONST.__getattribute__('INSTALLER_TYPE'),
- CONST.__getattribute__('DEPLOY_SCENARIO'),
- CONST.__getattribute__('NODE_NAME'),
- time.strftime("%a %b %d %H:%M:%S %Z %Y")))
-
- f_stdout = open(
- os.path.join(conf_utils.REFSTACK_RESULTS_DIR,
- "refstack.log"), 'w+')
- f_env = open(os.path.join(conf_utils.REFSTACK_RESULTS_DIR,
- "environment.log"), 'w+')
- f_env.write(header)
-
- process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT, bufsize=1)
-
- with process.stdout:
- for line in iter(process.stdout.readline, b''):
- if 'Tests' in line:
- break
- if re.search(r"\} tempest\.", line):
- LOGGER.info(line.replace('\n', ''))
- f_stdout.write(line)
- process.wait()
-
- f_stdout.close()
- f_env.close()
+ with open(os.path.join(conf_utils.REFSTACK_RESULTS_DIR,
+ "environment.log"), 'w+') as f_env:
+ f_env.write(
+ ("Refstack environment:\n"
+ " SUT: {}\n Scenario: {}\n Node: {}\n Date: {}\n").format(
+ CONST.__getattribute__('INSTALLER_TYPE'),
+ CONST.__getattribute__('DEPLOY_SCENARIO'),
+ CONST.__getattribute__('NODE_NAME'),
+ time.strftime("%a %b %d %H:%M:%S %Z %Y")))
+
+ with open(os.path.join(conf_utils.REFSTACK_RESULTS_DIR,
+ "refstack.log"), 'w+') as f_stdout:
+ subprocess.call(cmd, shell=False, stdout=f_stdout,
+ stderr=subprocess.STDOUT)
def parse_refstack_result(self):
- """Parse Refstact results."""
+ """Parse Refstack results."""
try:
with open(os.path.join(conf_utils.REFSTACK_RESULTS_DIR,
"refstack.log"), 'r') as logfile:
@@ -119,15 +115,15 @@ class RefstackClient(testcase.OSGCTestCase):
for match in re.findall(r"(- Failed: )(\d+)", output):
num_failures = match[1]
LOGGER.info("".join(match))
- success_testcases = ""
- for match in re.findall(r"\{0\}(.*?)[. ]*ok", output):
- success_testcases += match + ", "
- failed_testcases = ""
- for match in re.findall(r"\{0\}(.*?)[. ]*FAILED", output):
- failed_testcases += match + ", "
- skipped_testcases = ""
- for match in re.findall(r"\{0\}(.*?)[. ]*SKIPPED:", output):
- skipped_testcases += match + ", "
+ success_testcases = []
+ for match in re.findall(r"\{0\} (.*?)[. ]*ok", output):
+ success_testcases.append(match)
+ failed_testcases = []
+ for match in re.findall(r"\{0\} (.*?)[. ]*FAILED", output):
+ failed_testcases.append(match)
+ skipped_testcases = []
+ for match in re.findall(r"\{0\} (.*?)[. ]*SKIPPED:", output):
+ skipped_testcases.append(match)
num_executed = int(num_tests) - int(num_skipped)
@@ -157,18 +153,18 @@ class RefstackClient(testcase.OSGCTestCase):
"""
self.start_time = time.time()
- if not os.path.exists(conf_utils.REFSTACK_RESULTS_DIR):
- os.makedirs(conf_utils.REFSTACK_RESULTS_DIR)
-
try:
- tempestconf = TempestConf()
- tempestconf.generate_tempestconf()
+ # Make sure that Tempest is configured
+ if not self.tempestconf:
+ self.generate_conf()
self.run_defcore_default()
self.parse_refstack_result()
res = testcase.TestCase.EX_OK
except Exception:
LOGGER.exception("Error with run")
res = testcase.TestCase.EX_RUN_ERROR
+ finally:
+ self.tempestconf.clean()
self.stop_time = time.time()
return res
@@ -207,6 +203,42 @@ class RefstackClient(testcase.OSGCTestCase):
return res
+ def create_snapshot(self):
+ """
+ Run the Tempest cleanup utility to initialize OS state.
+ For details, see https://docs.openstack.org/tempest/latest/cleanup.html
+
+ :return: TestCase.EX_OK
+ """
+ LOGGER.info("Initializing the saved state of the OpenStack deployment")
+
+ # Make sure that Tempest is configured
+ if not self.tempestconf:
+ self.generate_conf()
+
+ os_utils.init_tempest_cleanup(
+ self.tempestconf.DEPLOYMENT_DIR, 'tempest.conf',
+ os.path.join(conf_utils.REFSTACK_RESULTS_DIR,
+ "tempest-cleanup-init.log")
+ )
+
+ return super(RefstackClient, self).create_snapshot()
+
+ def clean(self):
+ """
+ Run the Tempest cleanup utility to delete and destroy OS resources.
+ For details, see https://docs.openstack.org/tempest/latest/cleanup.html
+ """
+ LOGGER.info("Destroying the resources created for tempest")
+
+ os_utils.perform_tempest_cleanup(
+ self.tempestconf.DEPLOYMENT_DIR, 'tempest.conf',
+ os.path.join(conf_utils.REFSTACK_RESULTS_DIR,
+ "tempest-cleanup.log")
+ )
+
+ return super(RefstackClient, self).clean()
+
class RefstackClientParser(object): # pylint: disable=too-few-public-methods
"""Command line argument parser helper."""
diff --git a/functest/opnfv_tests/openstack/refstack_client/tempest_conf.py b/functest/opnfv_tests/openstack/refstack_client/tempest_conf.py
index 30590b9eb..db7452271 100644
--- a/functest/opnfv_tests/openstack/refstack_client/tempest_conf.py
+++ b/functest/opnfv_tests/openstack/refstack_client/tempest_conf.py
@@ -11,13 +11,15 @@ import pkg_resources
from functest.opnfv_tests.openstack.tempest import conf_utils
from functest.utils import openstack_utils
from functest.utils.constants import CONST
+from functest.opnfv_tests.openstack.tempest.tempest \
+ import TempestResourcesManager
""" logging configuration """
logger = logging.getLogger(__name__)
class TempestConf(object):
- def __init__(self):
+ def __init__(self, **kwargs):
self.VERIFIER_ID = conf_utils.get_verifier_id()
self.VERIFIER_REPO_DIR = conf_utils.get_verifier_repo_dir(
self.VERIFIER_ID)
@@ -27,15 +29,22 @@ class TempestConf(object):
self.confpath = pkg_resources.resource_filename(
'functest',
'opnfv_tests/openstack/refstack_client/refstack_tempest.conf')
+ self.resources = TempestResourcesManager(**kwargs)
def generate_tempestconf(self):
try:
openstack_utils.source_credentials(
CONST.__getattribute__('openstack_creds'))
- img_flavor_dict = conf_utils.create_tempest_resources(
- use_custom_images=True, use_custom_flavors=True)
+ resources = self.resources.create(create_project=True,
+ use_custom_images=True,
+ use_custom_flavors=True)
conf_utils.configure_tempest_defcore(
- self.DEPLOYMENT_DIR, img_flavor_dict)
+ self.DEPLOYMENT_DIR,
+ image_id=resources.get("image_id"),
+ flavor_id=resources.get("flavor_id"),
+ image_id_alt=resources.get("image_id_alt"),
+ flavor_id_alt=resources.get("flavor_id_alt"),
+ tenant_id=resources.get("project_id"))
except Exception as e:
logger.error("error with generating refstack client "
"reference tempest conf file: %s", e)
@@ -48,6 +57,9 @@ class TempestConf(object):
except Exception as e:
logger.error('Error with run: %s', e)
+ def clean(self):
+ self.resources.cleanup()
+
def main():
logging.basicConfig()
diff --git a/functest/opnfv_tests/openstack/snaps/snaps_test_runner.py b/functest/opnfv_tests/openstack/snaps/snaps_test_runner.py
index 0b87440b8..19c6a87f2 100644
--- a/functest/opnfv_tests/openstack/snaps/snaps_test_runner.py
+++ b/functest/opnfv_tests/openstack/snaps/snaps_test_runner.py
@@ -28,9 +28,14 @@ class SnapsTestRunner(unit.Suite):
if 'os_creds' in kwargs:
self.os_creds = kwargs['os_creds']
else:
+ creds_override = None
+ if hasattr(CONST, 'snaps_os_creds_override'):
+ creds_override = CONST.__getattribute__(
+ 'snaps_os_creds_override')
self.os_creds = openstack_tests.get_credentials(
os_env_file=CONST.__getattribute__('openstack_creds'),
- proxy_settings_str=None, ssh_proxy_cmd=None)
+ proxy_settings_str=None, ssh_proxy_cmd=None,
+ overrides=creds_override)
if 'ext_net_name' in kwargs:
self.ext_net_name = kwargs['ext_net_name']
diff --git a/functest/opnfv_tests/openstack/snaps/snaps_utils.py b/functest/opnfv_tests/openstack/snaps/snaps_utils.py
index 327ba073d..309f9db16 100644
--- a/functest/opnfv_tests/openstack/snaps/snaps_utils.py
+++ b/functest/opnfv_tests/openstack/snaps/snaps_utils.py
@@ -16,4 +16,4 @@ def get_ext_net_name(os_creds):
"""
neutron = neutron_utils.neutron_client(os_creds)
ext_nets = neutron_utils.get_external_networks(neutron)
- return ext_nets[0]['network']['name']
+ return ext_nets[0].name
diff --git a/functest/opnfv_tests/openstack/tempest/conf_utils.py b/functest/opnfv_tests/openstack/tempest/conf_utils.py
index fa8f00fc8..52fa60032 100644
--- a/functest/opnfv_tests/openstack/tempest/conf_utils.py
+++ b/functest/opnfv_tests/openstack/tempest/conf_utils.py
@@ -11,10 +11,11 @@ import ConfigParser
import logging
import os
import pkg_resources
-import re
import shutil
import subprocess
+import yaml
+
from functest.utils.constants import CONST
import functest.utils.functest_utils as ft_utils
import functest.utils.openstack_utils as os_utils
@@ -28,16 +29,21 @@ GLANCE_IMAGE_PATH = os.path.join(
TEMPEST_RESULTS_DIR = os.path.join(CONST.__getattribute__('dir_results'),
'tempest')
TEMPEST_CUSTOM = pkg_resources.resource_filename(
- 'functest', 'opnfv_tests/openstack/tempest/custom_tests/test_list.txt')
+ 'functest', 'opnfv_tests/openstack/tempest/custom_tests/test_list.txt')
TEMPEST_BLACKLIST = pkg_resources.resource_filename(
- 'functest', 'opnfv_tests/openstack/tempest/custom_tests/blacklist.txt')
+ 'functest', 'opnfv_tests/openstack/tempest/custom_tests/blacklist.txt')
TEMPEST_DEFCORE = pkg_resources.resource_filename(
- 'functest',
- 'opnfv_tests/openstack/tempest/custom_tests/defcore_req.txt')
+ 'functest',
+ 'opnfv_tests/openstack/tempest/custom_tests/defcore_req.txt')
TEMPEST_RAW_LIST = os.path.join(TEMPEST_RESULTS_DIR, 'test_raw_list.txt')
TEMPEST_LIST = os.path.join(TEMPEST_RESULTS_DIR, 'test_list.txt')
REFSTACK_RESULTS_DIR = os.path.join(CONST.__getattribute__('dir_results'),
'refstack')
+TEMPEST_CONF_YAML = pkg_resources.resource_filename(
+ 'functest', 'opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml')
+TEST_ACCOUNTS_FILE = pkg_resources.resource_filename(
+ 'functest',
+ 'opnfv_tests/openstack/tempest/custom_tests/test_accounts.yaml')
CI_INSTALLER_TYPE = CONST.__getattribute__('INSTALLER_TYPE')
CI_INSTALLER_IP = CONST.__getattribute__('INSTALLER_IP')
@@ -46,96 +52,9 @@ CI_INSTALLER_IP = CONST.__getattribute__('INSTALLER_IP')
logger = logging.getLogger(__name__)
-def create_tempest_resources(use_custom_images=False,
- use_custom_flavors=False):
- keystone_client = os_utils.get_keystone_client()
-
- logger.debug("Creating tenant and user for Tempest suite")
- tenant_id = os_utils.create_tenant(
- keystone_client,
- CONST.__getattribute__('tempest_identity_tenant_name'),
- CONST.__getattribute__('tempest_identity_tenant_description'))
- if not tenant_id:
- logger.error("Failed to create %s tenant"
- % CONST.__getattribute__('tempest_identity_tenant_name'))
-
- user_id = os_utils.create_user(
- keystone_client,
- CONST.__getattribute__('tempest_identity_user_name'),
- CONST.__getattribute__('tempest_identity_user_password'),
- None, tenant_id)
- if not user_id:
- logger.error("Failed to create %s user" %
- CONST.__getattribute__('tempest_identity_user_name'))
-
- logger.debug("Creating private network for Tempest suite")
- network_dic = os_utils.create_shared_network_full(
- CONST.__getattribute__('tempest_private_net_name'),
- CONST.__getattribute__('tempest_private_subnet_name'),
- CONST.__getattribute__('tempest_router_name'),
- CONST.__getattribute__('tempest_private_subnet_cidr'))
- if network_dic is None:
- raise Exception('Failed to create private network')
-
- image_id = ""
- image_id_alt = ""
- flavor_id = ""
- flavor_id_alt = ""
-
- if (CONST.__getattribute__('tempest_use_custom_images') or
- use_custom_images):
- # adding alternative image should be trivial should we need it
- logger.debug("Creating image for Tempest suite")
- _, image_id = os_utils.get_or_create_image(
- CONST.__getattribute__('openstack_image_name'),
- GLANCE_IMAGE_PATH,
- CONST.__getattribute__('openstack_image_disk_format'))
- if image_id is None:
- raise Exception('Failed to create image')
-
- if use_custom_images:
- logger.debug("Creating 2nd image for Tempest suite")
- _, image_id_alt = os_utils.get_or_create_image(
- CONST.__getattribute__('openstack_image_name_alt'),
- GLANCE_IMAGE_PATH,
- CONST.__getattribute__('openstack_image_disk_format'))
- if image_id_alt is None:
- raise Exception('Failed to create image')
-
- if (CONST.__getattribute__('tempest_use_custom_flavors') or
- use_custom_flavors):
- # adding alternative flavor should be trivial should we need it
- logger.debug("Creating flavor for Tempest suite")
- _, flavor_id = os_utils.get_or_create_flavor(
- CONST.__getattribute__('openstack_flavor_name'),
- CONST.__getattribute__('openstack_flavor_ram'),
- CONST.__getattribute__('openstack_flavor_disk'),
- CONST.__getattribute__('openstack_flavor_vcpus'))
- if flavor_id is None:
- raise Exception('Failed to create flavor')
-
- if use_custom_flavors:
- logger.debug("Creating 2nd flavor for tempest_defcore")
- _, flavor_id_alt = os_utils.get_or_create_flavor(
- CONST.__getattribute__('openstack_flavor_name_alt'),
- CONST.__getattribute__('openstack_flavor_ram'),
- CONST.__getattribute__('openstack_flavor_disk'),
- CONST.__getattribute__('openstack_flavor_vcpus'))
- if flavor_id_alt is None:
- raise Exception('Failed to create flavor')
-
- img_flavor_dict = {}
- img_flavor_dict['image_id'] = image_id
- img_flavor_dict['image_id_alt'] = image_id_alt
- img_flavor_dict['flavor_id'] = flavor_id
- img_flavor_dict['flavor_id_alt'] = flavor_id_alt
-
- return img_flavor_dict
-
-
def get_verifier_id():
"""
- Returns verifer id for current Tempest
+ Returns verifier id for current Tempest
"""
cmd = ("rally verify list-verifiers | awk '/" +
CONST.__getattribute__('tempest_deployment_name') +
@@ -169,7 +88,7 @@ def get_verifier_deployment_id():
def get_verifier_repo_dir(verifier_id):
"""
- Returns installed verfier repo directory for Tempest
+ Returns installed verifier repo directory for Tempest
"""
if not verifier_id:
verifier_id = get_verifier_id()
@@ -211,44 +130,42 @@ def backup_tempest_config(conf_file):
"""
Copy config file to tempest results directory
"""
- if not os.path.exists(TEMPEST_RESULTS_DIR):
- os.makedirs(TEMPEST_RESULTS_DIR)
-
shutil.copyfile(conf_file,
os.path.join(TEMPEST_RESULTS_DIR, 'tempest.conf'))
-def configure_tempest(deployment_dir, IMAGE_ID=None, FLAVOR_ID=None,
- MODE=None):
+def configure_tempest(deployment_dir, image_id=None, flavor_id=None,
+ mode=None):
"""
Calls rally verify and updates the generated tempest.conf with
given parameters
"""
conf_file = configure_verifier(deployment_dir)
- configure_tempest_update_params(conf_file,
- IMAGE_ID, FLAVOR_ID)
- if MODE == 'feature_multisite':
- configure_tempest_multisite_params(conf_file)
+ configure_tempest_update_params(conf_file, image_id, flavor_id)
-def configure_tempest_defcore(deployment_dir, img_flavor_dict):
+def configure_tempest_defcore(deployment_dir, image_id, flavor_id,
+ image_id_alt, flavor_id_alt, tenant_id):
"""
Add/update needed parameters into tempest.conf file
"""
conf_file = configure_verifier(deployment_dir)
- configure_tempest_update_params(conf_file,
- img_flavor_dict.get("image_id"),
- img_flavor_dict.get("flavor_id"))
+ configure_tempest_update_params(conf_file, image_id, flavor_id)
logger.debug("Updating selected tempest.conf parameters for defcore...")
config = ConfigParser.RawConfigParser()
config.read(conf_file)
- config.set('compute', 'image_ref', img_flavor_dict.get("image_id"))
- config.set('compute', 'image_ref_alt',
- img_flavor_dict['image_id_alt'])
- config.set('compute', 'flavor_ref', img_flavor_dict.get("flavor_id"))
- config.set('compute', 'flavor_ref_alt',
- img_flavor_dict['flavor_id_alt'])
+ config.set('DEFAULT', 'log_file', '{}/tempest.log'.format(deployment_dir))
+ config.set('oslo_concurrency', 'lock_path',
+ '{}/lock_files'.format(deployment_dir))
+ generate_test_accounts_file(tenant_id=tenant_id)
+ config.set('auth', 'test_accounts_file', TEST_ACCOUNTS_FILE)
+ config.set('scenario', 'img_dir', '{}'.format(deployment_dir))
+ config.set('scenario', 'img_file', 'tempest-image')
+ config.set('compute', 'image_ref', image_id)
+ config.set('compute', 'image_ref_alt', image_id_alt)
+ config.set('compute', 'flavor_ref', flavor_id)
+ config.set('compute', 'flavor_ref_alt', flavor_id_alt)
with open(conf_file, 'wb') as config_file:
config.write(config_file)
@@ -259,8 +176,29 @@ def configure_tempest_defcore(deployment_dir, img_flavor_dict):
shutil.copyfile(conf_file, confpath)
+def generate_test_accounts_file(tenant_id):
+ """
+ Add needed tenant and user params into test_accounts.yaml
+ """
+
+ logger.debug("Add needed params into test_accounts.yaml...")
+ accounts_list = [
+ {
+ 'tenant_name':
+ CONST.__getattribute__('tempest_identity_tenant_name'),
+ 'tenant_id': str(tenant_id),
+ 'username': CONST.__getattribute__('tempest_identity_user_name'),
+ 'password':
+ CONST.__getattribute__('tempest_identity_user_password')
+ }
+ ]
+
+ with open(TEST_ACCOUNTS_FILE, "w") as f:
+ yaml.dump(accounts_list, f, default_flow_style=False)
+
+
def configure_tempest_update_params(tempest_conf_file,
- IMAGE_ID=None, FLAVOR_ID=None):
+ image_id=None, flavor_id=None):
"""
Add/update needed parameters into tempest.conf file
"""
@@ -274,21 +212,15 @@ def configure_tempest_update_params(tempest_conf_file,
config.set('compute', 'volume_device_name',
CONST.__getattribute__('tempest_volume_device_name'))
if CONST.__getattribute__('tempest_use_custom_images'):
- if IMAGE_ID is not None:
- config.set('compute', 'image_ref', IMAGE_ID)
+ if image_id is not None:
+ config.set('compute', 'image_ref', image_id)
if IMAGE_ID_ALT is not None:
config.set('compute', 'image_ref_alt', IMAGE_ID_ALT)
if CONST.__getattribute__('tempest_use_custom_flavors'):
- if FLAVOR_ID is not None:
- config.set('compute', 'flavor_ref', FLAVOR_ID)
+ if flavor_id is not None:
+ config.set('compute', 'flavor_ref', flavor_id)
if FLAVOR_ID_ALT is not None:
config.set('compute', 'flavor_ref_alt', FLAVOR_ID_ALT)
- config.set('identity', 'tenant_name',
- CONST.__getattribute__('tempest_identity_tenant_name'))
- config.set('identity', 'username',
- CONST.__getattribute__('tempest_identity_user_name'))
- config.set('identity', 'password',
- CONST.__getattribute__('tempest_identity_user_password'))
config.set('identity', 'region', 'RegionOne')
if os_utils.is_keystone_v3():
auth_version = 'v3'
@@ -323,6 +255,19 @@ def configure_tempest_update_params(tempest_conf_file,
config.set(service, 'endpoint_type',
CONST.__getattribute__('OS_ENDPOINT_TYPE'))
+ logger.debug('Add/Update required params defined in tempest_conf.yaml '
+ 'into tempest.conf file')
+ with open(TEMPEST_CONF_YAML) as f:
+ conf_yaml = yaml.safe_load(f)
+ if conf_yaml:
+ sections = config.sections()
+ for section in conf_yaml:
+ if section not in sections:
+ config.add_section(section)
+ sub_conf = conf_yaml.get(section)
+ for key, value in sub_conf.items():
+ config.set(section, key, value)
+
with open(tempest_conf_file, 'wb') as config_file:
config.write(config_file)
@@ -351,93 +296,3 @@ def configure_verifier(deployment_dir):
% tempest_conf_file)
else:
return tempest_conf_file
-
-
-def configure_tempest_multisite_params(tempest_conf_file):
- """
- Add/update multisite parameters into tempest.conf file generated by Rally
- """
- logger.debug("Updating multisite tempest.conf parameters...")
- config = ConfigParser.RawConfigParser()
- config.read(tempest_conf_file)
-
- config.set('service_available', 'kingbird', 'true')
- # cmd = ("openstack endpoint show kingbird | grep publicurl |"
- # "awk '{print $4}' | awk -F '/' '{print $4}'")
- # kingbird_api_version = os.popen(cmd).read()
- # kingbird_api_version = os_utils.get_endpoint(service_type='multisite')
-
- if CI_INSTALLER_TYPE == 'fuel':
- # For MOS based setup, the service is accessible
- # via bind host
- kingbird_conf_path = "/etc/kingbird/kingbird.conf"
- installer_type = CI_INSTALLER_TYPE
- installer_ip = CI_INSTALLER_IP
- installer_username = CONST.__getattribute__(
- 'multisite_{}_installer_username'.format(installer_type))
- installer_password = CONST.__getattribute__(
- 'multisite_{}_installer_password'.format(installer_type))
-
- ssh_options = ("-o UserKnownHostsFile=/dev/null -o "
- "StrictHostKeyChecking=no")
-
- # Get the controller IP from the fuel node
- cmd = ('sshpass -p %s ssh 2>/dev/null %s %s@%s '
- '\'fuel node --env 1| grep controller | grep "True\| 1" '
- '| awk -F\| "{print \$5}"\'' % (installer_password,
- ssh_options,
- installer_username,
- installer_ip))
- multisite_controller_ip = "".join(os.popen(cmd).read().split())
-
- # Login to controller and get bind host details
- cmd = ('sshpass -p %s ssh 2>/dev/null %s %s@%s "ssh %s \\" '
- 'grep -e "^bind_" %s \\""' % (installer_password,
- ssh_options,
- installer_username,
- installer_ip,
- multisite_controller_ip,
- kingbird_conf_path))
- bind_details = os.popen(cmd).read()
- bind_details = "".join(bind_details.split())
- # Extract port number from the bind details
- bind_port = re.findall(r"\D(\d{4})", bind_details)[0]
- # Extract ip address from the bind details
- bind_host = re.findall(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}",
- bind_details)[0]
- kingbird_endpoint_url = "http://%s:%s/" % (bind_host, bind_port)
- else:
- # cmd = "openstack endpoint show kingbird | grep publicurl |\
- # awk '{print $4}' | awk -F '/' '{print $3}'"
- # kingbird_endpoint_url = os.popen(cmd).read()
- kingbird_endpoint_url = os_utils.get_endpoint(service_type='kingbird')
-
- try:
- config.add_section("kingbird")
- except Exception:
- logger.info('kingbird section exist')
-
- # set the domain id
- config.set('auth', 'admin_domain_name', 'default')
-
- config.set('kingbird', 'endpoint_type', 'publicURL')
- config.set('kingbird', 'TIME_TO_SYNC', '120')
- config.set('kingbird', 'endpoint_url', kingbird_endpoint_url)
- config.set('kingbird', 'api_version', 'v1.0')
- with open(tempest_conf_file, 'wb') as config_file:
- config.write(config_file)
-
- backup_tempest_config(tempest_conf_file)
-
-
-def install_verifier_ext(path):
- """
- Install extension to active verifier
- """
- logger.info("Installing verifier from existing repo...")
- tag = get_repo_tag(path)
- cmd = ("rally verify add-verifier-ext --source {0} "
- "--version {1}"
- .format(path, tag))
- error_msg = ("Problem while adding verifier extension from %s" % path)
- ft_utils.execute_command_raise(cmd, error_msg=error_msg)
diff --git a/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml b/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml
new file mode 100644
index 000000000..b47a9736a
--- /dev/null
+++ b/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml
@@ -0,0 +1,13 @@
+# This is an empty configuration file to be filled up with the desired options
+# to generate a custom tempest.conf
+# Examples:
+# network-feature-enabled:
+# port_security: True
+
+# volume-feature-enabled:
+# api_v1: False
+
+# validation:
+# image_ssh_user: root
+# ssh_timeout: 300
+
diff --git a/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt b/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
index ac4e37289..df2c31260 100644
--- a/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
+++ b/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
@@ -1,4 +1,4 @@
# This is an empty file to be filled up with the desired tempest test cases
# Examples:
-#tempest.scenario.test_server_basic_ops.TestServerBasicOps.test_server_basic_ops
-#tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_network_basic_ops \ No newline at end of file
+#tempest.scenario.test_server_basic_ops.TestServerBasicOps.test_server_basic_ops[compute,id-7fff3fb3-91d8-4fd0-bd7d-0204f1f180ba,network,smoke]
+#tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_network_basic_ops[compute,id-f323b3ba-82f8-4db7-8ea6-6a895869ec49,network,smoke]
diff --git a/functest/opnfv_tests/openstack/tempest/tempest.py b/functest/opnfv_tests/openstack/tempest/tempest.py
index e565f5f9d..c7ad4df2f 100644
--- a/functest/opnfv_tests/openstack/tempest/tempest.py
+++ b/functest/opnfv_tests/openstack/tempest/tempest.py
@@ -12,7 +12,6 @@ from __future__ import division
import logging
import os
-import pkg_resources
import re
import shutil
import subprocess
@@ -24,15 +23,26 @@ from functest.core import testcase
from functest.opnfv_tests.openstack.tempest import conf_utils
from functest.utils.constants import CONST
import functest.utils.functest_utils as ft_utils
+import functest.utils.openstack_utils as os_utils
+
+from snaps.openstack import create_flavor
+from snaps.openstack.create_flavor import FlavorSettings, OpenStackFlavor
+from snaps.openstack.create_project import ProjectSettings
+from snaps.openstack.create_network import NetworkSettings, SubnetSettings
+from snaps.openstack.create_user import UserSettings
+from snaps.openstack.tests import openstack_tests
+from snaps.openstack.utils import deploy_utils
+
""" logging configuration """
logger = logging.getLogger(__name__)
-class TempestCommon(testcase.OSGCTestCase):
+class TempestCommon(testcase.TestCase):
def __init__(self, **kwargs):
super(TempestCommon, self).__init__(**kwargs)
+ self.resources = TempestResourcesManager(**kwargs)
self.MODE = ""
self.OPTION = ""
self.VERIFIER_ID = conf_utils.get_verifier_id()
@@ -63,8 +73,6 @@ class TempestCommon(testcase.OSGCTestCase):
else:
if self.MODE == 'smoke':
testr_mode = "smoke"
- elif self.MODE == 'feature_multisite':
- testr_mode = "'[Kk]ingbird'"
elif self.MODE == 'full':
testr_mode = ""
else:
@@ -187,25 +195,32 @@ class TempestCommon(testcase.OSGCTestCase):
try:
self.result = 100 * int(num_success) / int(num_executed)
except ZeroDivisionError:
- logger.error("No test has been executed")
self.result = 0
- return
+ if int(num_tests) > 0:
+ logger.info("All tests have been skipped")
+ else:
+ logger.error("No test has been executed")
+ return
with open(os.path.join(conf_utils.TEMPEST_RESULTS_DIR,
"tempest.log"), 'r') as logfile:
output = logfile.read()
- error_logs = ""
- for match in re.findall('(.*?)[. ]*fail ', output):
- error_logs += match
- skipped_testcase = ""
- for match in re.findall('(.*?)[. ]*skip:', output):
- skipped_testcase += match
+ success_testcases = []
+ for match in re.findall('.*\{0\} (.*?)[. ]*success ', output):
+ success_testcases.append(match)
+ failed_testcases = []
+ for match in re.findall('.*\{0\} (.*?)[. ]*fail ', output):
+ failed_testcases.append(match)
+ skipped_testcases = []
+ for match in re.findall('.*\{0\} (.*?)[. ]*skip:', output):
+ skipped_testcases.append(match)
self.details = {"tests": int(num_tests),
"failures": int(num_failures),
- "errors": error_logs,
- "skipped": skipped_testcase}
+ "success": success_testcases,
+ "errors": failed_testcases,
+ "skipped": skipped_testcases}
except Exception:
self.result = 0
@@ -218,12 +233,12 @@ class TempestCommon(testcase.OSGCTestCase):
try:
if not os.path.exists(conf_utils.TEMPEST_RESULTS_DIR):
os.makedirs(conf_utils.TEMPEST_RESULTS_DIR)
- image_and_flavor = conf_utils.create_tempest_resources()
+ resources = self.resources.create()
conf_utils.configure_tempest(
self.DEPLOYMENT_DIR,
- IMAGE_ID=image_and_flavor.get("image_id"),
- FLAVOR_ID=image_and_flavor.get("flavor_id"),
- MODE=self.MODE)
+ image_id=resources.get("image_id"),
+ flavor_id=resources.get("flavor_id"),
+ mode=self.MODE)
self.generate_test_list(self.VERIFIER_REPO_DIR)
self.apply_tempest_blacklist()
self.run_verifier_tests()
@@ -232,10 +247,49 @@ class TempestCommon(testcase.OSGCTestCase):
except Exception as e:
logger.error('Error with run: %s' % e)
res = testcase.TestCase.EX_RUN_ERROR
+ finally:
+ self.resources.cleanup()
self.stop_time = time.time()
return res
+ def create_snapshot(self):
+ """
+ Run the Tempest cleanup utility to initialize OS state.
+
+ :return: TestCase.EX_OK
+ """
+ logger.info("Initializing the saved state of the OpenStack deployment")
+
+ if not os.path.exists(conf_utils.TEMPEST_RESULTS_DIR):
+ os.makedirs(conf_utils.TEMPEST_RESULTS_DIR)
+
+ # Make sure that the verifier is configured
+ conf_utils.configure_verifier(self.DEPLOYMENT_DIR)
+
+ os_utils.init_tempest_cleanup(
+ self.DEPLOYMENT_DIR, 'tempest.conf',
+ os.path.join(conf_utils.TEMPEST_RESULTS_DIR,
+ "tempest-cleanup-init.log")
+ )
+
+ return super(TempestCommon, self).create_snapshot()
+
+ def clean(self):
+ """
+ Run the Tempest cleanup utility to delete and destroy OS resources
+ created by Tempest.
+ """
+ logger.info("Destroying the resources created for refstack")
+
+ os_utils.perform_tempest_cleanup(
+ self.DEPLOYMENT_DIR, 'tempest.conf',
+ os.path.join(conf_utils.TEMPEST_RESULTS_DIR,
+ "tempest-cleanup.log")
+ )
+
+ return super(TempestCommon, self).clean()
+
class TempestSmokeSerial(TempestCommon):
@@ -266,18 +320,6 @@ class TempestFullParallel(TempestCommon):
self.MODE = "full"
-class TempestMultisite(TempestCommon):
-
- def __init__(self, **kwargs):
- if "case_name" not in kwargs:
- kwargs["case_name"] = 'multisite'
- TempestCommon.__init__(self, **kwargs)
- self.MODE = "feature_multisite"
- self.OPTION = "--concurrency 1"
- conf_utils.install_verifier_ext(
- pkg_resources.resource_filename('kingbird', '..'))
-
-
class TempestCustom(TempestCommon):
def __init__(self, **kwargs):
@@ -296,3 +338,170 @@ class TempestDefcore(TempestCommon):
TempestCommon.__init__(self, **kwargs)
self.MODE = "defcore"
self.OPTION = "--concurrency 1"
+
+
+class TempestResourcesManager(object):
+
+ def __init__(self, **kwargs):
+ self.os_creds = None
+ if 'os_creds' in kwargs:
+ self.os_creds = kwargs['os_creds']
+ else:
+ self.os_creds = openstack_tests.get_credentials(
+ os_env_file=CONST.__getattribute__('openstack_creds'))
+
+ self.creators = list()
+
+ if hasattr(CONST, 'snaps_images_cirros'):
+ self.cirros_image_config = CONST.__getattribute__(
+ 'snaps_images_cirros')
+ else:
+ self.cirros_image_config = None
+
+ def create(self, use_custom_images=False, use_custom_flavors=False,
+ create_project=False):
+ if create_project:
+ logger.debug("Creating project (tenant) for Tempest suite")
+ project_name = CONST.__getattribute__(
+ 'tempest_identity_tenant_name')
+ project_creator = deploy_utils.create_project(
+ self.os_creds, ProjectSettings(
+ name=project_name,
+ description=CONST.__getattribute__(
+ 'tempest_identity_tenant_description')))
+ if (project_creator is None or
+ project_creator.get_project() is None):
+ raise Exception("Failed to create tenant")
+ project_id = project_creator.get_project().id
+ self.creators.append(project_creator)
+
+ logger.debug("Creating user for Tempest suite")
+ user_creator = deploy_utils.create_user(
+ self.os_creds, UserSettings(
+ name=CONST.__getattribute__('tempest_identity_user_name'),
+ password=CONST.__getattribute__(
+ 'tempest_identity_user_password'),
+ project_name=project_name))
+ if user_creator is None or user_creator.get_user() is None:
+ raise Exception("Failed to create user")
+ user_id = user_creator.get_user().id
+ self.creators.append(user_creator)
+ else:
+ project_name = None
+ project_id = None
+ user_id = None
+
+ logger.debug("Creating private network for Tempest suite")
+ network_creator = deploy_utils.create_network(
+ self.os_creds, NetworkSettings(
+ name=CONST.__getattribute__('tempest_private_net_name'),
+ project_name=project_name,
+ subnet_settings=[SubnetSettings(
+ name=CONST.__getattribute__('tempest_private_subnet_name'),
+ cidr=CONST.__getattribute__('tempest_private_subnet_cidr'))
+ ]))
+ if network_creator is None or network_creator.get_network() is None:
+ raise Exception("Failed to create private network")
+ self.creators.append(network_creator)
+
+ image_id = None
+ image_id_alt = None
+ flavor_id = None
+ flavor_id_alt = None
+
+ if (CONST.__getattribute__('tempest_use_custom_images') or
+ use_custom_images):
+ logger.debug("Creating image for Tempest suite")
+ image_base_name = CONST.__getattribute__('openstack_image_name')
+ os_image_settings = openstack_tests.cirros_image_settings(
+ image_base_name, public=True,
+ image_metadata=self.cirros_image_config)
+ logger.debug("Creating image for Tempest suite")
+ image_creator = deploy_utils.create_image(
+ self.os_creds, os_image_settings)
+ if image_creator is None:
+ raise Exception('Failed to create image')
+ self.creators.append(image_creator)
+ image_id = image_creator.get_image().id
+
+ if use_custom_images:
+ logger.debug("Creating 2nd image for Tempest suite")
+ image_base_name_alt = CONST.__getattribute__(
+ 'openstack_image_name_alt')
+ os_image_settings_alt = openstack_tests.cirros_image_settings(
+ image_base_name_alt, public=True,
+ image_metadata=self.cirros_image_config)
+ logger.debug("Creating 2nd image for Tempest suite")
+ image_creator_alt = deploy_utils.create_image(
+ self.os_creds, os_image_settings_alt)
+ if image_creator_alt is None:
+ raise Exception('Failed to create image')
+ self.creators.append(image_creator_alt)
+ image_id_alt = image_creator_alt.get_image().id
+
+ if (CONST.__getattribute__('tempest_use_custom_flavors') or
+ use_custom_flavors):
+ logger.info("Creating flavor for Tempest suite")
+ scenario = ft_utils.get_scenario()
+ flavor_metadata = None
+ if 'ovs' in scenario or 'fdio' in scenario:
+ flavor_metadata = create_flavor.MEM_PAGE_SIZE_LARGE
+ flavor_creator = OpenStackFlavor(
+ self.os_creds, FlavorSettings(
+ name=CONST.__getattribute__('openstack_flavor_name'),
+ ram=CONST.__getattribute__('openstack_flavor_ram'),
+ disk=CONST.__getattribute__('openstack_flavor_disk'),
+ vcpus=CONST.__getattribute__('openstack_flavor_vcpus'),
+ metadata=flavor_metadata))
+ flavor = flavor_creator.create()
+ if flavor is None:
+ raise Exception('Failed to create flavor')
+ self.creators.append(flavor_creator)
+ flavor_id = flavor.id
+
+ if use_custom_flavors:
+ logger.info("Creating 2nd flavor for Tempest suite")
+ scenario = ft_utils.get_scenario()
+ flavor_metadata_alt = None
+ if 'ovs' in scenario or 'fdio' in scenario:
+ flavor_metadata_alt = create_flavor.MEM_PAGE_SIZE_LARGE
+ flavor_creator_alt = OpenStackFlavor(
+ self.os_creds, FlavorSettings(
+ name=CONST.__getattribute__('openstack_flavor_name_alt'),
+ ram=CONST.__getattribute__('openstack_flavor_ram'),
+ disk=CONST.__getattribute__('openstack_flavor_disk'),
+ vcpus=CONST.__getattribute__('openstack_flavor_vcpus'),
+ metadata=flavor_metadata_alt))
+ flavor_alt = flavor_creator_alt.create()
+ if flavor_alt is None:
+ raise Exception('Failed to create flavor')
+ self.creators.append(flavor_creator_alt)
+ flavor_id_alt = flavor_alt.id
+
+ print("RESOURCES CREATE: image_id: %s, image_id_alt: %s, "
+ "flavor_id: %s, flavor_id_alt: %s" % (
+ image_id, image_id_alt, flavor_id, flavor_id_alt,))
+
+ result = {
+ 'image_id': image_id,
+ 'image_id_alt': image_id_alt,
+ 'flavor_id': flavor_id,
+ 'flavor_id_alt': flavor_id_alt
+ }
+
+ if create_project:
+ result['project_id'] = project_id
+ result['tenant_id'] = project_id # for compatibility
+ result['user_id'] = user_id
+
+ return result
+
+ def cleanup(self):
+ """
+ Cleanup all OpenStack objects. Should be called on completion.
+ """
+ for creator in reversed(self.creators):
+ try:
+ creator.clean()
+ except Exception as e:
+ logger.error('Unexpected error cleaning - %s', e)
diff --git a/functest/opnfv_tests/openstack/vping/ping.sh b/functest/opnfv_tests/openstack/vping/ping.sh
index 693b86825..15f5e84e1 100644
--- a/functest/opnfv_tests/openstack/vping/ping.sh
+++ b/functest/opnfv_tests/openstack/vping/ping.sh
@@ -1,13 +1,10 @@
#!/bin/sh
-while true; do
- ping -c 1 $1 2>&1 >/dev/null
- RES=$?
- if [ "Z$RES" = "Z0" ] ; then
- echo 'vPing OK'
- break
- else
- echo 'vPing KO'
- fi
- sleep 1
-done \ No newline at end of file
+
+ping -c 1 $1 2>&1 >/dev/null
+RES=$?
+if [ "Z$RES" = "Z0" ] ; then
+ echo 'vPing OK'
+else
+ echo 'vPing KO'
+fi
diff --git a/functest/opnfv_tests/openstack/vping/vping_base.py b/functest/opnfv_tests/openstack/vping/vping_base.py
index 74fbce1b0..40fcb07f0 100644
--- a/functest/opnfv_tests/openstack/vping/vping_base.py
+++ b/functest/opnfv_tests/openstack/vping/vping_base.py
@@ -43,8 +43,14 @@ class VPingBase(testcase.TestCase):
if 'os_creds' in kwargs:
self.os_creds = kwargs['os_creds']
else:
+ creds_override = None
+ if hasattr(CONST, 'snaps_os_creds_override'):
+ creds_override = CONST.__getattribute__(
+ 'snaps_os_creds_override')
+
self.os_creds = openstack_tests.get_credentials(
- os_env_file=CONST.__getattribute__('openstack_creds'))
+ os_env_file=CONST.__getattribute__('openstack_creds'),
+ overrides=creds_override)
self.creators = list()
self.image_creator = None
@@ -102,14 +108,33 @@ class VPingBase(testcase.TestCase):
'vping_private_subnet_name') + self.guid
private_subnet_cidr = CONST.__getattribute__(
'vping_private_subnet_cidr')
+
+ vping_network_type = None
+ vping_physical_network = None
+ vping_segmentation_id = None
+
+ if (hasattr(CONST, 'network_type')):
+ vping_network_type = CONST.__getattribute__(
+ 'vping_network_type')
+ if (hasattr(CONST, 'physical_network')):
+ vping_physical_network = CONST.__getattribute__(
+ 'vping_physical_network')
+ if (hasattr(CONST, 'segmentation_id')):
+ vping_segmentation_id = CONST.__getattribute__(
+ 'vping_segmentation_id')
+
self.logger.info(
"Creating network with name: '%s'" % private_net_name)
self.network_creator = deploy_utils.create_network(
self.os_creds,
- NetworkSettings(name=private_net_name,
- subnet_settings=[SubnetSettings(
- name=private_subnet_name,
- cidr=private_subnet_cidr)]))
+ NetworkSettings(
+ name=private_net_name,
+ network_type=vping_network_type,
+ physical_network=vping_physical_network,
+ segmentation_id=vping_segmentation_id,
+ subnet_settings=[SubnetSettings(
+ name=private_subnet_name,
+ cidr=private_subnet_cidr)]))
self.creators.append(self.network_creator)
self.logger.info(
diff --git a/functest/opnfv_tests/openstack/vping/vping_userdata.py b/functest/opnfv_tests/openstack/vping/vping_userdata.py
index 9aed4c10a..8088a4db5 100644
--- a/functest/opnfv_tests/openstack/vping/vping_userdata.py
+++ b/functest/opnfv_tests/openstack/vping/vping_userdata.py
@@ -94,7 +94,7 @@ class VPingUserdata(vping_base.VPingBase):
while True:
time.sleep(1)
- p_console = vm_creator.get_os_vm_server_obj().get_console_output()
+ p_console = vm_creator.get_console_output()
if "vPing OK" in p_console:
self.logger.info("vPing detected!")
exit_code = TestCase.EX_OK
diff --git a/functest/opnfv_tests/sdn/odl/odl.py b/functest/opnfv_tests/sdn/odl/odl.py
index ede0fc500..841da834b 100644
--- a/functest/opnfv_tests/sdn/odl/odl.py
+++ b/functest/opnfv_tests/sdn/odl/odl.py
@@ -66,8 +66,7 @@ class ODLResultVisitor(robot.api.ResultVisitor):
class ODLTests(testcase.TestCase):
"""ODL test runner."""
- odl_test_repo = os.path.join(
- constants.CONST.__getattribute__('dir_repos'), 'odl_test')
+ odl_test_repo = constants.CONST.__getattribute__('dir_repo_odl_test')
neutron_suite_dir = os.path.join(odl_test_repo,
"csit/suites/openstack/neutron")
basic_suite_dir = os.path.join(odl_test_repo,
@@ -234,7 +233,7 @@ class ODLTests(testcase.TestCase):
elif installer_type == 'joid':
kwargs['odlip'] = os.environ['SDN_CONTROLLER']
elif installer_type == 'compass':
- kwargs['odlwebport'] = '8181'
+ kwargs['odlrestconfport'] = '8080'
elif installer_type == 'daisy':
kwargs['odlip'] = os.environ['SDN_CONTROLLER_IP']
kwargs['odlwebport'] = '8181'
diff --git a/functest/opnfv_tests/sdn/onos/teston/adapters/connection.py b/functest/opnfv_tests/sdn/onos/teston/adapters/connection.py
index dfaa5cc14..a6d192ee5 100644
--- a/functest/opnfv_tests/sdn/onos/teston/adapters/connection.py
+++ b/functest/opnfv_tests/sdn/onos/teston/adapters/connection.py
@@ -64,7 +64,7 @@ class Connection(Foundation):
"""
os.getenv only returns current user value
GetEnvValue returns a environment value of
- current handle
+ current handle
eg: GetEnvValue(handle,'HOME')
"""
envhandle = handle
diff --git a/functest/opnfv_tests/sdn/onos/teston/adapters/environment.py b/functest/opnfv_tests/sdn/onos/teston/adapters/environment.py
index cb75b5c3c..875a2dc9b 100644
--- a/functest/opnfv_tests/sdn/onos/teston/adapters/environment.py
+++ b/functest/opnfv_tests/sdn/onos/teston/adapters/environment.py
@@ -1,11 +1,11 @@
"""
Description:
- This file is used to setup the running environment
- Include Download code,setup environment variable
- Set onos running config
- Set user name/password
- Onos-push-keys and so on
- lanqinglong@huawei.com
+This file is used to setup the running environment
+Include Download code,setup environment variable
+Set onos running config
+Set user name/password
+Onos-push-keys and so on
+lanqinglong@huawei.com
#
# All rights reserved. This program and the accompanying materials
@@ -17,7 +17,7 @@ Description:
import logging
import pexpect
-import pxssh
+from pexpect import pxssh
import re
import os
import sys
@@ -196,10 +196,10 @@ class Environment(Connection):
def ChangeTestCasePara(self, testcase, user, password):
"""
- When running test script, there's something need
- to change in every test folder's *.param & *.topo files
- user: onos&compute node user
- password: onos&compute node password
+ When running test script, there\'s something need
+ to change in every test folder\'s \*.param & \*.topo files
+ user: onos\&compute node user
+ password: onos\&compute node password
"""
self.logger.info("Now Changing " + testcase + " name&password")
if self.masterusername == 'root':
diff --git a/functest/opnfv_tests/vnf/aaa/aaa.py b/functest/opnfv_tests/vnf/aaa/aaa.py
deleted file mode 100644
index 71e3c972a..000000000
--- a/functest/opnfv_tests/vnf/aaa/aaa.py
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (c) 2016 Orange and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-
-import logging
-
-import functest.core.vnf as vnf
-
-
-class AaaVnf(vnf.VnfOnBoarding):
- """AAA VNF sample"""
-
- logger = logging.getLogger(__name__)
-
- def __init__(self, **kwargs):
- if "case_name" not in kwargs:
- kwargs["case_name"] = "aaa"
- super(AaaVnf, self).__init__(**kwargs)
-
- def deploy_orchestrator(self):
- self.logger.info("No VNFM needed to deploy a free radius here")
- return True
-
- def deploy_vnf(self):
- self.logger.info("Freeradius VNF deployment")
- # find a way to deploy freeradius and tester (heat,manual, ..)
- deploy_vnf = {'status': 'PASS', 'version': 'xxxx'}
- self.details['deploy_vnf'] = deploy_vnf
- return True
-
- def test_vnf(self):
- self.logger.info("Run test towards freeradius")
- # once the freeradius is deployed..make some tests
- test_vnf = {'status': 'PASS', 'version': 'xxxx'}
- self.details['test_vnf'] = test_vnf
- return True
diff --git a/functest/opnfv_tests/vnf/ims/clearwater_ims_base.py b/functest/opnfv_tests/vnf/ims/clearwater_ims_base.py
index 25ddca216..8851f7a48 100644
--- a/functest/opnfv_tests/vnf/ims/clearwater_ims_base.py
+++ b/functest/opnfv_tests/vnf/ims/clearwater_ims_base.py
@@ -10,7 +10,9 @@ import json
import logging
import os
import pkg_resources
+import shlex
import shutil
+import subprocess
import time
import requests
@@ -43,7 +45,7 @@ class ClearwaterOnBoardingBase(vnf.VnfOnBoarding):
def config_ellis(self, ellis_ip, signup_code='secret', two_numbers=False):
output_dict = {}
- self.logger.info('Configure Ellis: %s', ellis_ip)
+ self.logger.debug('Configure Ellis: %s', ellis_ip)
output_dict['ellis_ip'] = ellis_ip
account_url = 'http://{0}/accounts'.format(ellis_ip)
params = {"password": "functest",
@@ -54,7 +56,7 @@ class ClearwaterOnBoardingBase(vnf.VnfOnBoarding):
output_dict['login'] = params
if rq.status_code != 201 and rq.status_code != 409:
raise Exception("Unable to create an account for number provision")
- self.logger.info('Account is created on Ellis: %s', params)
+ self.logger.debug('Account is created on Ellis: %s', params)
session_url = 'http://{0}/session'.format(ellis_ip)
session_data = {
@@ -66,13 +68,13 @@ class ClearwaterOnBoardingBase(vnf.VnfOnBoarding):
if rq.status_code != 201:
raise Exception('Failed to get cookie for Ellis')
cookies = rq.cookies
- self.logger.info('Cookies: %s', cookies)
+ self.logger.debug('Cookies: %s', cookies)
number_url = 'http://{0}/accounts/{1}/numbers'.format(
ellis_ip,
params['email'])
- self.logger.info('Create 1st calling number on Ellis')
- i = 24
+ self.logger.debug('Create 1st calling number on Ellis')
+ i = 30
while rq.status_code != 200 and i > 0:
try:
number_res = self.create_ellis_number(number_url, cookies)
@@ -86,7 +88,7 @@ class ClearwaterOnBoardingBase(vnf.VnfOnBoarding):
output_dict['number'] = number_res
if two_numbers:
- self.logger.info('Create 2nd calling number on Ellis')
+ self.logger.debug('Create 2nd calling number on Ellis')
number_res = self.create_ellis_number(number_url, cookies)
output_dict['number2'] = number_res
@@ -109,19 +111,17 @@ class ClearwaterOnBoardingBase(vnf.VnfOnBoarding):
bono_ip=None, ellis_ip=None,
signup_code='secret'):
self.logger.info('Run Clearwater live test')
- nameservers = ft_utils.get_resolvconf_ns()
- resolvconf = ['{0}{1}{2}'.format(os.linesep, 'nameserver ', ns)
- for ns in nameservers]
- self.logger.debug('resolvconf: %s', resolvconf)
dns_file = '/etc/resolv.conf'
dns_file_bak = '/etc/resolv.conf.bak'
+ self.logger.debug('Backup %s -> %s', dns_file, dns_file_bak)
shutil.copy(dns_file, dns_file_bak)
- script = ('echo -e "nameserver {0}{1}" > {2};'
- 'source /etc/profile.d/rvm.sh;'
- 'cd {3};'
- 'rake test[{4}] SIGNUP_CODE={5}'
- .format(dns_ip,
- ''.join(resolvconf),
+ cmd = ("dnsmasq -d -u root --server=/clearwater.opnfv/{0} "
+ "-r /etc/resolv.conf.bak".format(dns_ip))
+ dnsmasq_process = subprocess.Popen(shlex.split(cmd))
+ script = ('echo -e "nameserver {0}" > {1};'
+ 'cd {2};'
+ 'rake test[{3}] SIGNUP_CODE={4}'
+ .format('127.0.0.1',
dns_file,
self.test_dir,
public_domain,
@@ -131,12 +131,12 @@ class ClearwaterOnBoardingBase(vnf.VnfOnBoarding):
script = '{0}{1}'.format(script, subscript)
script = ('{0}{1}'.format(script, ' --trace'))
cmd = "/bin/bash -c '{0}'".format(script)
- self.logger.info('Live test cmd: %s', cmd)
+ self.logger.debug('Live test cmd: %s', cmd)
output_file = os.path.join(self.result_dir, "ims_test_output.txt")
ft_utils.execute_command(cmd,
error_msg='Clearwater live test failed',
output_file=output_file)
-
+ dnsmasq_process.kill()
with open(dns_file_bak, 'r') as bak_file:
result = bak_file.read()
with open(dns_file, 'w') as f:
diff --git a/functest/opnfv_tests/vnf/ims/cloudify_ims.py b/functest/opnfv_tests/vnf/ims/cloudify_ims.py
index fafc77e13..b07eaee23 100644
--- a/functest/opnfv_tests/vnf/ims/cloudify_ims.py
+++ b/functest/opnfv_tests/vnf/ims/cloudify_ims.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python
-# Copyright (c) 2016 Orange and others.
+# Copyright (c) 2017 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
@@ -25,16 +25,16 @@ from functest.utils.constants import CONST
import functest.utils.openstack_utils as os_utils
from snaps.openstack.os_credentials import OSCreds
-from snaps.openstack.create_network import NetworkSettings, SubnetSettings, \
- OpenStackNetwork
-from snaps.openstack.create_security_group import SecurityGroupSettings, \
- SecurityGroupRuleSettings,\
- Direction, Protocol, \
- OpenStackSecurityGroup
+from snaps.openstack.create_network import (NetworkSettings, SubnetSettings,
+ OpenStackNetwork)
+from snaps.openstack.create_security_group import (SecurityGroupSettings,
+ SecurityGroupRuleSettings,
+ Direction, Protocol,
+ OpenStackSecurityGroup)
from snaps.openstack.create_router import RouterSettings, OpenStackRouter
-from snaps.openstack.create_instance import VmInstanceSettings, \
- FloatingIpSettings, \
- OpenStackVmInstance
+from snaps.openstack.create_instance import (VmInstanceSettings,
+ FloatingIpSettings,
+ OpenStackVmInstance)
from snaps.openstack.create_flavor import FlavorSettings, OpenStackFlavor
from snaps.openstack.create_image import ImageSettings, OpenStackImage
from snaps.openstack.create_keypairs import KeypairSettings, OpenStackKeypair
@@ -110,15 +110,15 @@ class CloudifyIms(clearwater_ims_base.ClearwaterOnBoardingBase):
# needs some images
self.__logger.info("Upload some OS images if it doesn't exist")
- for image_name, image_url in self.images.iteritems():
- self.__logger.info("image: %s, url: %s", image_name, image_url)
- if image_url and image_name:
+ for image_name, image_file in self.images.iteritems():
+ self.__logger.info("image: %s, file: %s", image_name, image_file)
+ if image_file and image_name:
image_creator = OpenStackImage(
self.snaps_creds,
ImageSettings(name=image_name,
image_user='cloud',
img_format='qcow2',
- url=image_url))
+ image_file=image_file))
image_creator.create()
# self.created_object.append(image_creator)
@@ -239,6 +239,8 @@ class CloudifyIms(clearwater_ims_base.ClearwaterOnBoardingBase):
while str(cfy_status) != 'running' and retry:
try:
cfy_status = cfy_client.manager.get_status()['status']
+ self.__logger.debug("The current manager status is %s",
+ cfy_status)
except Exception: # pylint: disable=broad-except
self.__logger.warning("Cloudify Manager isn't " +
"up and running. Retrying ...")
@@ -263,14 +265,15 @@ class CloudifyIms(clearwater_ims_base.ClearwaterOnBoardingBase):
self.__logger.info("Put private keypair in manager")
if manager_creator.vm_ssh_active(block=True):
ssh = manager_creator.ssh_client()
- scp = SCPClient(ssh.get_transport())
+ scp = SCPClient(ssh.get_transport(), socket_timeout=15.0)
scp.put(kp_file, '~/')
cmd = "sudo cp ~/cloudify_ims.pem /etc/cloudify/"
- ssh.exec_command(cmd)
+ run_blocking_ssh_command(ssh, cmd)
cmd = "sudo chmod 444 /etc/cloudify/cloudify_ims.pem"
- ssh.exec_command(cmd)
+ run_blocking_ssh_command(ssh, cmd)
cmd = "sudo yum install -y gcc python-devel"
- ssh.exec_command(cmd)
+ run_blocking_ssh_command(ssh, cmd, "Unable to install packages \
+ on manager")
self.details['orchestrator'].update(status='PASS', duration=duration)
@@ -292,15 +295,17 @@ class CloudifyIms(clearwater_ims_base.ClearwaterOnBoardingBase):
descriptor.get('file_name'))
self.__logger.info("Get or create flavor for all clearwater vm")
- self.exist_obj['flavor2'], flavor_id = os_utils.get_or_create_flavor(
- self.vnf['requirements']['flavor']['name'],
- self.vnf['requirements']['flavor']['ram_min'],
- '30',
- '1',
- public=True)
+ flavor_settings = FlavorSettings(
+ name=self.vnf['requirements']['flavor']['name'],
+ ram=self.vnf['requirements']['flavor']['ram_min'],
+ disk=25,
+ vcpus=1)
+ flavor_creator = OpenStackFlavor(self.snaps_creds, flavor_settings)
+ flavor_creator.create()
+ self.created_object.append(flavor_creator)
self.vnf['inputs'].update(dict(
- flavor_id=flavor_id,
+ flavor_id=self.vnf['requirements']['flavor']['name'],
))
self.__logger.info("Create VNF Instance")
@@ -371,7 +376,7 @@ class CloudifyIms(clearwater_ims_base.ClearwaterOnBoardingBase):
try:
cfy_client.executions.cancel(execution['id'],
force=True)
- except:
+ except: # pylint: disable=broad-except
self.__logger.warn("Can't cancel the current exec")
execution = cfy_client.executions.start(
@@ -383,7 +388,7 @@ class CloudifyIms(clearwater_ims_base.ClearwaterOnBoardingBase):
wait_for_execution(cfy_client, execution, self.__logger)
cfy_client.deployments.delete(self.vnf['descriptor'].get('name'))
cfy_client.blueprints.delete(self.vnf['descriptor'].get('name'))
- except:
+ except: # pylint: disable=broad-except
self.__logger.warn("Some issue during the undeployment ..")
self.__logger.warn("Tenant clean continue ..")
@@ -507,3 +512,10 @@ def sig_test_format(sig_test):
total_sig_test_result['failures'] = nb_failures
total_sig_test_result['skipped'] = nb_skipped
return total_sig_test_result
+
+
+def run_blocking_ssh_command(ssh, cmd, error_msg="Unable to run this command"):
+ """Command to run ssh command with the exit status."""
+ stdin, stdout, stderr = ssh.exec_command(cmd)
+ if stdout.channel.recv_exit_status() != 0:
+ raise Exception(error_msg)
diff --git a/functest/opnfv_tests/vnf/ims/cloudify_ims.yaml b/functest/opnfv_tests/vnf/ims/cloudify_ims.yaml
index f1028ce73..280e0a6b8 100644
--- a/functest/opnfv_tests/vnf/ims/cloudify_ims.yaml
+++ b/functest/opnfv_tests/vnf/ims/cloudify_ims.yaml
@@ -1,6 +1,6 @@
tenant_images:
- ubuntu_14.04: http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img
- cloudify_manager_4.0: http://repository.cloudifysource.org/cloudify/4.0.1/sp-release/cloudify-manager-premium-4.0.1.qcow2
+ ubuntu_14.04: /home/opnfv/functest/images/trusty-server-cloudimg-amd64-disk1.img
+ cloudify_manager_4.0: /home/opnfv/functest/images/cloudify-manager-premium-4.0.1.qcow2
orchestrator:
name: cloudify
version: '4.0'
@@ -19,7 +19,7 @@ vnf:
version: '122'
requirements:
flavor:
- name: m1.medium
+ name: m1.small
ram_min: 2048
inputs:
image_id: 'ubuntu_14.04'
diff --git a/functest/opnfv_tests/vnf/ims/opera_ims.py b/functest/opnfv_tests/vnf/ims/opera_ims.py
deleted file mode 100644
index d420705aa..000000000
--- a/functest/opnfv_tests/vnf/ims/opera_ims.py
+++ /dev/null
@@ -1,131 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (c) 2017 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-
-import json
-import logging
-import os
-import time
-
-from opera import openo_connect
-import requests
-
-import functest.opnfv_tests.vnf.ims.clearwater_ims_base as clearwater_ims_base
-from functest.utils.constants import CONST
-
-
-class OperaIms(clearwater_ims_base.ClearwaterOnBoardingBase):
-
- def __init__(self, **kwargs):
- if "case_name" not in kwargs:
- kwargs["case_name"] = "opera_ims"
- super(OperaIms, self).__init__(**kwargs)
- self.logger = logging.getLogger(__name__)
- self.ellis_file = os.path.join(
- CONST.__getattribute__('dir_results'), 'ellis.info')
- self.live_test_file = os.path.join(
- CONST.__getattribute__('dir_results'), 'live_test_report.json')
- try:
- self.openo_msb_endpoint = os.environ['OPENO_MSB_ENDPOINT']
- except KeyError:
- raise Exception('OPENO_MSB_ENDPOINT is not specified,'
- ' put it as <OPEN-O ip>:<port>')
- else:
- self.logger.info('OPEN-O endpoint is: %s', self.openo_msb_endpoint)
-
- def prepare(self):
- pass
-
- def clean(self):
- pass
-
- def deploy_vnf(self):
- try:
- openo_connect.create_service(self.openo_msb_endpoint,
- 'functest_opera',
- 'VNF for functest testing')
- except Exception as e:
- self.logger.error(e)
- return {'status': 'FAIL', 'result': e}
- else:
- self.logger.info('vIMS deployment is kicked off')
- return {'status': 'PASS', 'result': ''}
-
- def dump_info(self, info_file, result):
- with open(info_file, 'w') as f:
- self.logger.debug('Save information to file: %s', info_file)
- json.dump(result, f)
-
- def test_vnf(self):
- vnfm_ip = openo_connect.get_vnfm_ip(self.openo_msb_endpoint)
- self.logger.info('VNFM IP: %s', vnfm_ip)
- vnf_status_url = 'http://{0}:5000/api/v1/model/status'.format(vnfm_ip)
- vnf_alive = False
- retry = 40
-
- self.logger.info('Check the VNF status')
- while retry > 0:
- rq = requests.get(vnf_status_url, timeout=90)
- response = rq.json()
- vnf_alive = response['vnf_alive']
- msg = response['msg']
- self.logger.info(msg)
- if vnf_alive:
- break
- self.logger.info('check again in one and half a minute...')
- retry = retry - 1
- time.sleep(90)
-
- if not vnf_alive:
- raise Exception('VNF failed to start: {0}'.format(msg))
-
- ellis_config_url = ('http://{0}:5000/api/v1/model/ellis/configure'
- .format(vnfm_ip))
- rq = requests.get(ellis_config_url, timeout=90)
- if rq.json() and not rq.json()['ellis_ok']:
- self.logger.error(rq.json()['data'])
- raise Exception('Failed to configure Ellis')
-
- self.logger.info('Get Clearwater deployment detail')
- vnf_info_url = ('http://{0}:5000/api/v1/model/output'
- .format(vnfm_ip))
- rq = requests.get(vnf_info_url, timeout=90)
- data = rq.json()['data']
- self.logger.info(data)
- bono_ip = data['bono_ip']
- ellis_ip = data['ellis_ip']
- dns_ip = data['dns_ip']
- result = self.config_ellis(ellis_ip, 'signup', True)
- self.logger.debug('Ellis Result: %s', result)
- self.dump_info(self.ellis_file, result)
-
- if dns_ip:
- vims_test_result = self.run_clearwater_live_test(
- dns_ip,
- 'clearwater.local',
- bono_ip,
- ellis_ip,
- 'signup')
- if vims_test_result != '':
- self.dump_info(self.live_test_file, vims_test_result)
- return {'status': 'PASS', 'result': vims_test_result}
- else:
- return {'status': 'FAIL', 'result': ''}
-
- def main(self, **kwargs):
- self.logger.info("Start to run Opera vIMS VNF onboarding test")
- self.execute()
- self.logger.info("Opera vIMS VNF onboarding test finished")
- if self.result is "PASS":
- return self.EX_OK
- else:
- return self.EX_RUN_ERROR
-
- def run(self):
- kwargs = {}
- return self.main(**kwargs)
diff --git a/functest/opnfv_tests/vnf/ims/orchestra.yaml b/functest/opnfv_tests/vnf/ims/orchestra.yaml
new file mode 100644
index 000000000..4cd18e72b
--- /dev/null
+++ b/functest/opnfv_tests/vnf/ims/orchestra.yaml
@@ -0,0 +1,61 @@
+tenant_images:
+ orchestrator:
+ ubuntu-14.04-server-cloudimg-amd64-disk1: /home/opnfv/functest/images/trusty-server-cloudimg-amd64-disk1.img
+ orchestra_openims:
+ openims: /home/opnfv/functest/images/img
+ orchestra_clearwaterims:
+ ubuntu-14.04-server-cloudimg-amd64-disk1: /home/opnfv/functest/images/trusty-server-cloudimg-amd64-disk1.img
+mano:
+ name: OpenBaton
+ version: '3.2.0'
+ requirements:
+ flavor:
+ name: openbaton
+ ram_min: 4096
+ disk: 5
+ vcpus: 2
+ image: 'ubuntu-14.04-server-cloudimg-amd64-disk1'
+ bootstrap:
+ url: http://get.openbaton.org/bootstraps/bootstrap_3.2.0_opnfv/bootstrap
+ config:
+ url: http://get.openbaton.org/bootstraps/bootstrap_3.2.0_opnfv/bootstrap-config-file
+ gvnfm:
+ userdata:
+ url: https://raw.githubusercontent.com/openbaton/generic-vnfm/3.2.0/src/main/resources/user-data.sh
+ credentials:
+ username: admin
+ password: openbaton
+
+orchestra_openims:
+ name: OpenIMS
+ descriptor:
+ url: http://marketplace.openbaton.org:8082/api/v1/nsds/fokus/OpenImsCore/3.2.0/json
+ requirements:
+ flavor:
+ name: m1.small
+ ram_min: 2048
+ disk: 5
+ vcpus: 2
+ test:
+ scscf:
+ ports: [3870, 6060]
+ pcscf:
+ ports: [4060]
+ icscf:
+ ports: [3869, 5060]
+ fhoss:
+ ports: [3868]
+ bind9:
+ ports: []
+
+orchestra_clearwaterims:
+ name: Clearwater IMS
+ descriptor:
+ url: http://marketplace.openbaton.org:8082/api/v1/nsds/fokus/ClearwaterIMS/3.2.0/json
+ requirements:
+ flavor:
+ name: m1.small
+ ram_min: 2048
+ disk: 5
+ vcpus: 2
+ test:
diff --git a/functest/opnfv_tests/vnf/ims/orchestra_clearwaterims.py b/functest/opnfv_tests/vnf/ims/orchestra_clearwaterims.py
new file mode 100644
index 000000000..a54059966
--- /dev/null
+++ b/functest/opnfv_tests/vnf/ims/orchestra_clearwaterims.py
@@ -0,0 +1,682 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2016 Orange and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""Orchestra Clearwater IMS testcase implementation."""
+
+import json
+import logging
+import os
+import socket
+import time
+import pkg_resources
+import yaml
+
+from snaps.openstack.create_image import OpenStackImage, ImageSettings
+from snaps.openstack.create_flavor import OpenStackFlavor, FlavorSettings
+from snaps.openstack.create_security_group import (
+ OpenStackSecurityGroup,
+ SecurityGroupSettings,
+ SecurityGroupRuleSettings,
+ Direction,
+ Protocol)
+from snaps.openstack.create_network import (
+ OpenStackNetwork,
+ NetworkSettings,
+ SubnetSettings,
+ PortSettings)
+from snaps.openstack.create_router import OpenStackRouter, RouterSettings
+from snaps.openstack.os_credentials import OSCreds
+from snaps.openstack.create_instance import (
+ VmInstanceSettings,
+ OpenStackVmInstance)
+from functest.opnfv_tests.openstack.snaps import snaps_utils
+
+import functest.core.vnf as vnf
+import functest.utils.openstack_utils as os_utils
+from functest.utils.constants import CONST
+
+from org.openbaton.cli.errors.errors import NfvoException
+from org.openbaton.cli.agents.agents import MainAgent
+
+
+__author__ = "Pauls, Michael <michael.pauls@fokus.fraunhofer.de>"
+# ----------------------------------------------------------
+#
+# UTILS
+#
+# -----------------------------------------------------------
+
+
+def get_config(parameter, file_path):
+ """
+ Get config parameter.
+
+ Returns the value of a given parameter in file.yaml
+ parameter must be given in string format with dots
+ Example: general.openstack.image_name
+ """
+ with open(file_path) as config_file:
+ file_yaml = yaml.safe_load(config_file)
+ config_file.close()
+ value = file_yaml
+ for element in parameter.split("."):
+ value = value.get(element)
+ if value is None:
+ raise ValueError("The parameter %s is not defined in"
+ " reporting.yaml", parameter)
+ return value
+
+
+def servertest(host, port):
+ """Method to test that a server is reachable at IP:port"""
+ args = socket.getaddrinfo(host, port, socket.AF_INET, socket.SOCK_STREAM)
+ for family, socktype, proto, canonname, sockaddr in args:
+ sock = socket.socket(family, socktype, proto)
+ try:
+ sock.connect(sockaddr)
+ except socket.error:
+ return False
+ else:
+ sock.close()
+ return True
+
+
+def get_userdata(orchestrator=dict):
+ """Build userdata for Open Baton machine"""
+ userdata = "#!/bin/bash\n"
+ userdata += "echo \"Executing userdata...\"\n"
+ userdata += "set -x\n"
+ userdata += "set -e\n"
+ userdata += "echo \"Set nameserver to '8.8.8.8'...\"\n"
+ userdata += "echo \"nameserver 8.8.8.8\" >> /etc/resolv.conf\n"
+ userdata += "echo \"Install curl...\"\n"
+ userdata += "apt-get install curl\n"
+ userdata += "echo \"Inject public key...\"\n"
+ userdata += ("echo \"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCuPXrV3"
+ "geeHc6QUdyUr/1Z+yQiqLcOskiEGBiXr4z76MK4abiFmDZ18OMQlc"
+ "fl0p3kS0WynVgyaOHwZkgy/DIoIplONVr2CKBKHtPK+Qcme2PVnCtv"
+ "EqItl/FcD+1h5XSQGoa+A1TSGgCod/DPo+pes0piLVXP8Ph6QS1k7S"
+ "ic7JDeRQ4oT1bXYpJ2eWBDMfxIWKZqcZRiGPgMIbJ1iEkxbpeaAd9O"
+ "4MiM9nGCPESmed+p54uYFjwEDlAJZShcAZziiZYAvMZhvAhe6USljc"
+ "7YAdalAnyD/jwCHuwIrUw/lxo7UdNCmaUxeobEYyyFA1YVXzpNFZya"
+ "XPGAAYIJwEq/ openbaton@opnfv\" >> /home/ubuntu/.ssh/aut"
+ "horized_keys\n")
+ userdata += "echo \"Download bootstrap...\"\n"
+ userdata += ("curl -s %s "
+ "> ./bootstrap\n" % orchestrator['bootstrap']['url'])
+ userdata += ("curl -s %s" "> ./config_file\n" %
+ orchestrator['bootstrap']['config']['url'])
+ userdata += ("echo \"Disable usage of mysql...\"\n")
+ userdata += "sed -i s/mysql=.*/mysql=no/g /config_file\n"
+ userdata += ("echo \"Setting 'rabbitmq_broker_ip' to '%s'\"\n"
+ % orchestrator['details']['fip'].ip)
+ userdata += ("sed -i s/rabbitmq_broker_ip=localhost/rabbitmq_broker_ip"
+ "=%s/g /config_file\n" % orchestrator['details']['fip'].ip)
+ userdata += "echo \"Set autostart of components to 'false'\"\n"
+ userdata += "export OPENBATON_COMPONENT_AUTOSTART=false\n"
+ userdata += "echo \"Execute bootstrap...\"\n"
+ bootstrap = "sh ./bootstrap release -configFile=./config_file"
+ userdata += bootstrap + "\n"
+ userdata += "echo \"Setting 'nfvo.plugin.timeout' to '300000'\"\n"
+ userdata += ("echo \"nfvo.plugin.timeout=600000\" >> "
+ "/etc/openbaton/openbaton-nfvo.properties\n")
+ userdata += (
+ "wget %s -O /etc/openbaton/openbaton-vnfm-generic-user-data.sh\n" %
+ orchestrator['gvnfm']['userdata']['url'])
+ userdata += "sed -i '113i"'\ \ \ \ '"sleep 60' " \
+ "/etc/openbaton/openbaton-vnfm-generic-user-data.sh\n"
+ userdata += "echo \"Starting NFVO\"\n"
+ userdata += "service openbaton-nfvo restart\n"
+ userdata += "echo \"Starting Generic VNFM\"\n"
+ userdata += "service openbaton-vnfm-generic restart\n"
+ userdata += "echo \"...end of userdata...\"\n"
+ return userdata
+
+
+class ClearwaterImsVnf(vnf.VnfOnBoarding):
+ """Clearwater IMS VNF deployed with openBaton orchestrator"""
+
+ logger = logging.getLogger(__name__)
+
+ def __init__(self, **kwargs):
+ if "case_name" not in kwargs:
+ kwargs["case_name"] = "orchestra_clearwaterims"
+ super(ClearwaterImsVnf, self).__init__(**kwargs)
+ # self.logger = logging.getLogger("functest.ci.run_tests.orchestra")
+ self.logger.info("kwargs %s", (kwargs))
+
+ self.case_dir = pkg_resources.resource_filename(
+ 'functest', 'opnfv_tests/vnf/ims/')
+ self.data_dir = CONST.__getattribute__('dir_ims_data')
+ self.test_dir = CONST.__getattribute__('dir_repo_vims_test')
+ self.created_resources = []
+ self.logger.info("%s VNF onboarding test starting", self.case_name)
+
+ try:
+ self.config = CONST.__getattribute__(
+ 'vnf_{}_config'.format(self.case_name))
+ except BaseException:
+ raise Exception("Orchestra VNF config file not found")
+ config_file = self.case_dir + self.config
+
+ self.mano = dict(
+ get_config("mano", config_file),
+ details={}
+ )
+ self.logger.debug("Orchestrator configuration %s", self.mano)
+
+ self.details['orchestrator'] = dict(
+ name=self.mano['name'],
+ version=self.mano['version'],
+ status='ERROR',
+ result=''
+ )
+
+ self.vnf = dict(
+ get_config(self.case_name, config_file),
+ )
+ self.logger.debug("VNF configuration: %s", self.vnf)
+
+ self.details['vnf'] = dict(
+ name=self.vnf['name'],
+ )
+
+ self.details['test_vnf'] = dict(
+ name=self.case_name,
+ )
+
+ # Orchestra base Data directory creation
+ if not os.path.exists(self.data_dir):
+ os.makedirs(self.data_dir)
+
+ self.images = get_config("tenant_images.orchestrator", config_file)
+ self.images.update(
+ get_config(
+ "tenant_images.%s" %
+ self.case_name,
+ config_file))
+ self.snaps_creds = None
+
+ def prepare(self):
+ """Prepare testscase (Additional pre-configuration steps)."""
+ super(ClearwaterImsVnf, self).prepare()
+
+ self.logger.info("Additional pre-configuration steps")
+ self.logger.info("creds %s", (self.creds))
+
+ self.snaps_creds = OSCreds(
+ username=self.creds['username'],
+ password=self.creds['password'],
+ auth_url=self.creds['auth_url'],
+ project_name=self.creds['tenant'],
+ identity_api_version=int(os_utils.get_keystone_client_version()))
+
+ self.prepare_images()
+ self.prepare_flavor()
+ self.prepare_security_groups()
+ self.prepare_network()
+ self.prepare_floating_ip()
+
+ def prepare_images(self):
+ """Upload images if they doen't exist yet"""
+ self.logger.info("Upload images if they doen't exist yet")
+ for image_name, image_file in self.images.iteritems():
+ self.logger.info("image: %s, file: %s", image_name, image_file)
+ if image_file and image_name:
+ image = OpenStackImage(
+ self.snaps_creds,
+ ImageSettings(name=image_name,
+ image_user='cloud',
+ img_format='qcow2',
+ image_file=image_file))
+ image.create()
+ # self.created_resources.append(image);
+
+ def prepare_security_groups(self):
+ """Create Open Baton security group if it doesn't exist yet"""
+ self.logger.info(
+ "Creating security group for Open Baton if not yet existing...")
+ sg_rules = list()
+ sg_rules.append(
+ SecurityGroupRuleSettings(
+ sec_grp_name="orchestra-sec-group-allowall",
+ direction=Direction.ingress,
+ protocol=Protocol.tcp,
+ port_range_min=1,
+ port_range_max=65535))
+ sg_rules.append(
+ SecurityGroupRuleSettings(
+ sec_grp_name="orchestra-sec-group-allowall",
+ direction=Direction.egress,
+ protocol=Protocol.tcp,
+ port_range_min=1,
+ port_range_max=65535))
+ sg_rules.append(
+ SecurityGroupRuleSettings(
+ sec_grp_name="orchestra-sec-group-allowall",
+ direction=Direction.ingress,
+ protocol=Protocol.udp,
+ port_range_min=1,
+ port_range_max=65535))
+ sg_rules.append(
+ SecurityGroupRuleSettings(
+ sec_grp_name="orchestra-sec-group-allowall",
+ direction=Direction.egress,
+ protocol=Protocol.udp,
+ port_range_min=1,
+ port_range_max=65535))
+ sg_rules.append(
+ SecurityGroupRuleSettings(
+ sec_grp_name="orchestra-sec-group-allowall",
+ direction=Direction.ingress,
+ protocol=Protocol.icmp))
+ sg_rules.append(
+ SecurityGroupRuleSettings(
+ sec_grp_name="orchestra-sec-group-allowall",
+ direction=Direction.egress,
+ protocol=Protocol.icmp))
+ # sg_rules.append(
+ # SecurityGroupRuleSettings(
+ # sec_grp_name="orchestra-sec-group-allowall",
+ # direction=Direction.ingress,
+ # protocol=Protocol.icmp,
+ # port_range_min=-1,
+ # port_range_max=-1))
+ # sg_rules.append(
+ # SecurityGroupRuleSettings(
+ # sec_grp_name="orchestra-sec-group-allowall",
+ # direction=Direction.egress,
+ # protocol=Protocol.icmp,
+ # port_range_min=-1,
+ # port_range_max=-1))
+
+ security_group = OpenStackSecurityGroup(
+ self.snaps_creds,
+ SecurityGroupSettings(
+ name="orchestra-sec-group-allowall",
+ rule_settings=sg_rules))
+
+ security_group_info = security_group.create()
+ self.created_resources.append(security_group)
+ self.mano['details']['sec_group'] = security_group_info.name
+ self.logger.info(
+ "Security group orchestra-sec-group-allowall prepared")
+
+ def prepare_flavor(self):
+ """Create Open Baton flavor if it doesn't exist yet"""
+ self.logger.info(
+ "Create Flavor for Open Baton NFVO if not yet existing")
+
+ flavor_settings = FlavorSettings(
+ name=self.mano['requirements']['flavor']['name'],
+ ram=self.mano['requirements']['flavor']['ram_min'],
+ disk=self.mano['requirements']['flavor']['disk'],
+ vcpus=self.mano['requirements']['flavor']['vcpus'])
+ flavor = OpenStackFlavor(self.snaps_creds, flavor_settings)
+ flavor_info = flavor.create()
+ self.created_resources.append(flavor)
+ self.mano['details']['flavor'] = {}
+ self.mano['details']['flavor']['name'] = flavor_settings.name
+ self.mano['details']['flavor']['id'] = flavor_info.id
+
+ def prepare_network(self):
+ """Create network/subnet/router if they doen't exist yet"""
+ self.logger.info(
+ "Creating network/subnet/router if they doen't exist yet...")
+ subnet_settings = SubnetSettings(
+ name='%s_subnet' %
+ self.case_name,
+ cidr="192.168.100.0/24")
+ network_settings = NetworkSettings(
+ name='%s_net' %
+ self.case_name,
+ subnet_settings=[subnet_settings])
+ orchestra_network = OpenStackNetwork(
+ self.snaps_creds, network_settings)
+ orchestra_network_info = orchestra_network.create()
+ self.mano['details']['network'] = {}
+ self.mano['details']['network']['id'] = orchestra_network_info.id
+ self.mano['details']['network']['name'] = orchestra_network_info.name
+ self.mano['details']['external_net_name'] = snaps_utils.\
+ get_ext_net_name(self.snaps_creds)
+ self.created_resources.append(orchestra_network)
+ orchestra_router = OpenStackRouter(
+ self.snaps_creds,
+ RouterSettings(
+ name='%s_router' %
+ self.case_name,
+ external_gateway=self.mano['details']['external_net_name'],
+ internal_subnets=[
+ subnet_settings.name]))
+ orchestra_router.create()
+ self.created_resources.append(orchestra_router)
+ self.logger.info("Created network and router for Open Baton NFVO...")
+
+ def prepare_floating_ip(self):
+ """Select/Create Floating IP if it doesn't exist yet"""
+ self.logger.info("Retrieving floating IP for Open Baton NFVO")
+ neutron_client = snaps_utils.neutron_utils.neutron_client(
+ self.snaps_creds)
+ # Finding Tenant ID to check to which tenant the Floating IP belongs
+ tenant_id = os_utils.get_tenant_id(
+ os_utils.get_keystone_client(self.creds),
+ self.tenant_name)
+ # Use os_utils to retrieve complete information of Floating IPs
+ floating_ips = os_utils.get_floating_ips(neutron_client)
+ my_floating_ips = []
+ # Filter Floating IPs with tenant id
+ for floating_ip in floating_ips:
+ # self.logger.info("Floating IP: %s", floating_ip)
+ if floating_ip.get('tenant_id') == tenant_id:
+ my_floating_ips.append(floating_ip.get('floating_ip_address'))
+ # Select if Floating IP exist else create new one
+ if len(my_floating_ips) >= 1:
+ # Get Floating IP object from snaps for clean up
+ snaps_floating_ips = snaps_utils.neutron_utils.get_floating_ips(
+ neutron_client)
+ for my_floating_ip in my_floating_ips:
+ for snaps_floating_ip in snaps_floating_ips:
+ if snaps_floating_ip.ip == my_floating_ip:
+ self.mano['details']['fip'] = snaps_floating_ip
+ self.logger.info(
+ "Selected floating IP for Open Baton NFVO %s",
+ (self.mano['details']['fip'].ip))
+ break
+ if self.mano['details']['fip'] is not None:
+ break
+ else:
+ self.logger.info("Creating floating IP for Open Baton NFVO")
+ self.mano['details']['fip'] = snaps_utils.neutron_utils.\
+ create_floating_ip(
+ neutron_client,
+ self.mano['details']['external_net_name'])
+ self.logger.info(
+ "Created floating IP for Open Baton NFVO %s",
+ (self.mano['details']['fip'].ip))
+
+ def get_vim_descriptor(self):
+ """"Create VIM descriptor to be used for onboarding"""
+ self.logger.info(
+ "Building VIM descriptor with PoP creds: %s",
+ self.creds)
+ # Depending on API version either tenant ID or project name must be
+ # used
+ if os_utils.is_keystone_v3():
+ self.logger.info(
+ "Using v3 API of OpenStack... -> Using OS_PROJECT_ID")
+ project_id = os_utils.get_tenant_id(
+ os_utils.get_keystone_client(),
+ self.creds.get("project_name"))
+ else:
+ self.logger.info(
+ "Using v2 API of OpenStack... -> Using OS_TENANT_NAME")
+ project_id = self.creds.get("tenant_name")
+ self.logger.debug("VIM project/tenant id: %s", project_id)
+ vim_json = {
+ "name": "vim-instance",
+ "authUrl": self.creds.get("auth_url"),
+ "tenant": project_id,
+ "username": self.creds.get("username"),
+ "password": self.creds.get("password"),
+ "securityGroups": [
+ self.mano['details']['sec_group']
+ ],
+ "type": "openstack",
+ "location": {
+ "name": "opnfv",
+ "latitude": "52.525876",
+ "longitude": "13.314400"
+ }
+ }
+ self.logger.info("Built VIM descriptor: %s", vim_json)
+ return vim_json
+
+ def deploy_orchestrator(self):
+ self.logger.info("Deploying Open Baton...")
+ self.logger.info("Details: %s", self.mano['details'])
+ start_time = time.time()
+
+ self.logger.info("Creating orchestra instance...")
+ userdata = get_userdata(self.mano)
+ self.logger.info("flavor: %s\n"
+ "image: %s\n"
+ "network_id: %s\n",
+ self.mano['details']['flavor']['name'],
+ self.mano['requirements']['image'],
+ self.mano['details']['network']['id'])
+ self.logger.debug("userdata: %s\n", userdata)
+ # setting up image
+ image_settings = ImageSettings(
+ name=self.mano['requirements']['image'],
+ image_user='ubuntu',
+ exists=True)
+ # setting up port
+ port_settings = PortSettings(
+ name='%s_port' % self.case_name,
+ network_name=self.mano['details']['network']['name'])
+ # build configuration of vm
+ orchestra_settings = VmInstanceSettings(
+ name=self.case_name,
+ flavor=self.mano['details']['flavor']['name'],
+ port_settings=[port_settings],
+ security_group_names=[self.mano['details']['sec_group']],
+ userdata=userdata)
+ orchestra_vm = OpenStackVmInstance(self.snaps_creds,
+ orchestra_settings,
+ image_settings)
+
+ orchestra_vm.create()
+ self.created_resources.append(orchestra_vm)
+ self.mano['details']['id'] = orchestra_vm.get_vm_info()['id']
+ self.logger.info(
+ "Created orchestra instance: %s",
+ self.mano['details']['id'])
+
+ self.logger.info("Associating floating ip: '%s' to VM '%s' ",
+ self.mano['details']['fip'].ip,
+ self.case_name)
+ nova_client = os_utils.get_nova_client()
+ if not os_utils.add_floating_ip(
+ nova_client,
+ self.mano['details']['id'],
+ self.mano['details']['fip'].ip):
+ duration = time.time() - start_time
+ self.details["orchestrator"].update(
+ status='FAIL', duration=duration)
+ self.logger.error("Cannot associate floating IP to VM.")
+ return False
+
+ self.logger.info("Waiting for Open Baton NFVO to be up and running...")
+ timeout = 0
+ while timeout < 200:
+ if servertest(
+ self.mano['details']['fip'].ip,
+ "8080"):
+ break
+ else:
+ self.logger.info(
+ "Open Baton NFVO is not started yet (%ss)",
+ (timeout * 5))
+ time.sleep(5)
+ timeout += 1
+
+ if timeout >= 200:
+ duration = time.time() - start_time
+ self.details["orchestrator"].update(
+ status='FAIL', duration=duration)
+ self.logger.error("Open Baton is not started correctly")
+ return False
+
+ self.logger.info("Waiting for all components to be up and running...")
+ time.sleep(60)
+ duration = time.time() - start_time
+ self.details["orchestrator"].update(status='PASS', duration=duration)
+ self.logger.info("Deploy Open Baton NFVO: OK")
+ return True
+
+ def deploy_vnf(self):
+ start_time = time.time()
+ self.logger.info("Deploying %s...", self.vnf['name'])
+
+ main_agent = MainAgent(
+ nfvo_ip=self.mano['details']['fip'].ip,
+ nfvo_port=8080,
+ https=False,
+ version=1,
+ username=self.mano['credentials']['username'],
+ password=self.mano['credentials']['password'])
+
+ self.logger.info(
+ "Create %s Flavor if not existing", self.vnf['name'])
+ flavor_settings = FlavorSettings(
+ name=self.vnf['requirements']['flavor']['name'],
+ ram=self.vnf['requirements']['flavor']['ram_min'],
+ disk=self.vnf['requirements']['flavor']['disk'],
+ vcpus=self.vnf['requirements']['flavor']['vcpus'])
+ flavor = OpenStackFlavor(self.snaps_creds, flavor_settings)
+ flavor_info = flavor.create()
+ self.logger.debug("Flavor id: %s", flavor_info.id)
+
+ self.logger.info("Getting project 'default'...")
+ project_agent = main_agent.get_agent("project", "")
+ for project in json.loads(project_agent.find()):
+ if project.get("name") == "default":
+ self.mano['details']['project_id'] = project.get("id")
+ self.logger.info("Found project 'default': %s", project)
+ break
+
+ vim_json = self.get_vim_descriptor()
+ self.logger.info("Registering VIM: %s", vim_json)
+
+ main_agent.get_agent(
+ "vim", project_id=self.mano['details']['project_id']).create(
+ entity=json.dumps(vim_json))
+
+ market_agent = main_agent.get_agent(
+ "market", project_id=self.mano['details']['project_id'])
+
+ try:
+ self.logger.info("sending: %s", self.vnf['descriptor']['url'])
+ nsd = market_agent.create(entity=self.vnf['descriptor']['url'])
+ if nsd.get('id') is None:
+ self.logger.error("NSD not onboarded correctly")
+ duration = time.time() - start_time
+ self.details["vnf"].update(status='FAIL', duration=duration)
+ return False
+ self.mano['details']['nsd_id'] = nsd.get('id')
+ self.logger.info("Onboarded NSD: " + nsd.get("name"))
+
+ nsr_agent = main_agent.get_agent(
+ "nsr", project_id=self.mano['details']['project_id'])
+
+ self.mano['details']['nsr'] = nsr_agent.create(
+ self.mano['details']['nsd_id'])
+ except NfvoException as exc:
+ self.logger.error(exc.message)
+ duration = time.time() - start_time
+ self.details["vnf"].update(status='FAIL', duration=duration)
+ return False
+
+ if self.mano['details']['nsr'].get('code') is not None:
+ self.logger.error(
+ "%s cannot be deployed: %s -> %s",
+ self.vnf['name'],
+ self.mano['details']['nsr'].get('code'),
+ self.mano['details']['nsr'].get('message'))
+ self.logger.error("%s cannot be deployed", self.vnf['name'])
+ duration = time.time() - start_time
+ self.details["vnf"].update(status='FAIL', duration=duration)
+ return False
+
+ timeout = 0
+ self.logger.info("Waiting for NSR to go to ACTIVE...")
+ while self.mano['details']['nsr'].get("status") != 'ACTIVE' \
+ and self.mano['details']['nsr'].get("status") != 'ERROR':
+ timeout += 1
+ self.logger.info("NSR is not yet ACTIVE... (%ss)", 5 * timeout)
+ if timeout == 300:
+ self.logger.error("INACTIVE NSR after %s sec..", 5 * timeout)
+ duration = time.time() - start_time
+ self.details["vnf"].update(status='FAIL', duration=duration)
+ return False
+ time.sleep(5)
+ self.mano['details']['nsr'] = json.loads(
+ nsr_agent.find(self.mano['details']['nsr'].get('id')))
+
+ duration = time.time() - start_time
+ if self.mano['details']['nsr'].get("status") == 'ACTIVE':
+ self.details["vnf"].update(status='PASS', duration=duration)
+ self.logger.info("Sleep for 60s to ensure that all "
+ "services are up and running...")
+ time.sleep(60)
+ result = True
+ else:
+ self.details["vnf"].update(status='FAIL', duration=duration)
+ self.logger.error("NSR: %s", self.mano['details'].get('nsr'))
+ result = False
+ return result
+
+ def test_vnf(self):
+ self.logger.info(
+ "Testing VNF Clearwater IMS is not yet implemented...")
+ start_time = time.time()
+
+ duration = time.time() - start_time
+ self.details["test_vnf"].update(status='PASS', duration=duration)
+ self.logger.info("Test VNF: OK")
+ return True
+
+ def clean(self):
+ self.logger.info("Cleaning %s...", self.case_name)
+ try:
+ main_agent = MainAgent(
+ nfvo_ip=self.mano['details']['fip'].ip,
+ nfvo_port=8080,
+ https=False,
+ version=1,
+ username=self.mano['credentials']['username'],
+ password=self.mano['credentials']['password'])
+ self.logger.info("Terminating %s...", self.vnf['name'])
+ if (self.mano['details'].get('nsr')):
+ main_agent.get_agent(
+ "nsr",
+ project_id=self.mano['details']['project_id']).delete(
+ self.mano['details']['nsr'].get('id'))
+ self.logger.info("Sleeping 60 seconds...")
+ time.sleep(60)
+ else:
+ self.logger.info("No need to terminate the VNF...")
+ # os_utils.delete_instance(nova_client=os_utils.get_nova_client(),
+ # instance_id=self.mano_instance_id)
+ except (NfvoException, KeyError) as exc:
+ self.logger.error('Unexpected error cleaning - %s', exc)
+
+ try:
+ neutron_client = os_utils.get_neutron_client(self.creds)
+ self.logger.info("Deleting Open Baton Port...")
+ port = snaps_utils.neutron_utils.get_port_by_name(
+ neutron_client, '%s_port' % self.case_name)
+ snaps_utils.neutron_utils.delete_port(neutron_client, port)
+ time.sleep(10)
+ except Exception as exc:
+ self.logger.error('Unexpected error cleaning - %s', exc)
+ try:
+ self.logger.info("Deleting Open Baton Floating IP...")
+ snaps_utils.neutron_utils.delete_floating_ip(
+ neutron_client, self.mano['details']['fip'])
+ except Exception as exc:
+ self.logger.error('Unexpected error cleaning - %s', exc)
+
+ for resource in reversed(self.created_resources):
+ try:
+ self.logger.info("Cleaning %s", str(resource))
+ resource.clean()
+ except Exception as exc:
+ self.logger.error('Unexpected error cleaning - %s', exc)
+ super(ClearwaterImsVnf, self).clean()
diff --git a/functest/opnfv_tests/vnf/ims/orchestra_ims.py b/functest/opnfv_tests/vnf/ims/orchestra_ims.py
deleted file mode 100644
index 7b1ea9ad8..000000000
--- a/functest/opnfv_tests/vnf/ims/orchestra_ims.py
+++ /dev/null
@@ -1,487 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (c) 2016 Orange and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-
-import json
-import logging
-import os
-import pkg_resources
-import socket
-import sys
-import time
-import yaml
-
-import functest.core.vnf as vnf
-import functest.utils.openstack_utils as os_utils
-from functest.utils.constants import CONST
-
-from org.openbaton.cli.agents.agents import MainAgent
-from org.openbaton.cli.errors.errors import NfvoException
-
-
-__author__ = "Pauls, Michael <michael.pauls@fokus.fraunhofer.de>"
-# ----------------------------------------------------------
-#
-# UTILS
-#
-# -----------------------------------------------------------
-
-
-def get_config(parameter, my_file):
- """
- Returns the value of a given parameter in file.yaml
- parameter must be given in string format with dots
- Example: general.openstack.image_name
- """
- with open(file) as f:
- file_yaml = yaml.safe_load(f)
- f.close()
- value = file_yaml
- for element in parameter.split("."):
- value = value.get(element)
- if value is None:
- raise ValueError("The parameter %s is not defined in"
- " %s" % (parameter, my_file))
- return value
-
-
-def servertest(host, port):
- args = socket.getaddrinfo(host, port, socket.AF_INET, socket.SOCK_STREAM)
- for family, socktype, proto, canonname, sockaddr in args:
- s = socket.socket(family, socktype, proto)
- try:
- s.connect(sockaddr)
- except socket.error:
- return False
- else:
- s.close()
- return True
-
-
-class ImsVnf(vnf.VnfOnBoarding):
- """OpenIMS VNF deployed with openBaton orchestrator"""
-
- def __init__(self, project='functest', case_name='orchestra_ims',
- repo='', cmd=''):
- super(ImsVnf, self).__init__(project, case_name, repo, cmd)
- self.logger = logging.getLogger(__name__)
- self.logger.info("Orchestra IMS VNF onboarding test starting")
- self.ob_password = "openbaton"
- self.ob_username = "admin"
- self.ob_https = False
- self.ob_port = "8080"
- self.ob_ip = "localhost"
- self.ob_instance_id = ""
- self.case_dir = pkg_resources.resource_filename(
- 'functest', 'opnfv_tests/vnf/ims/')
- self.data_dir = CONST.__getattribute__('dir_ims_data')
- self.test_dir = CONST.__getattribute__('dir_repo_vims_test')
- self.ob_projectid = ""
- self.keystone_client = os_utils.get_keystone_client()
- self.ob_nsr_id = ""
- self.nsr = None
- self.main_agent = None
- # vIMS Data directory creation
- if not os.path.exists(self.data_dir):
- os.makedirs(self.data_dir)
- # Retrieve the configuration
- try:
- self.config = CONST.__getattribute__(
- 'vnf_{}_config'.format(self.case_name))
- except BaseException:
- raise Exception("Orchestra VNF config file not found")
- config_file = self.case_dir + self.config
- self.imagename = get_config("openbaton.imagename", config_file)
- self.bootstrap_link = get_config("openbaton.bootstrap_link",
- config_file)
- self.bootstrap_config_link = get_config(
- "openbaton.bootstrap_config_link", config_file)
- self.market_link = get_config("openbaton.marketplace_link",
- config_file)
- self.images = get_config("tenant_images", config_file)
- self.ims_conf = get_config("vIMS", config_file)
- self.userdata_file = get_config("openbaton.userdata.file",
- config_file)
-
- def deploy_orchestrator(self):
- self.logger.info("Additional pre-configuration steps")
- nova_client = os_utils.get_nova_client()
- neutron_client = os_utils.get_neutron_client()
- glance_client = os_utils.get_glance_client()
-
- # Import images if needed
- # needs some images
- self.logger.info("Upload some OS images if it doesn't exist")
- temp_dir = os.path.join(self.data_dir, "tmp/")
- for image_name, image_url in self.images.iteritems():
- self.logger.info("image: %s, url: %s", image_name, image_url)
- try:
- image_id = os_utils.get_image_id(glance_client,
- image_name)
- self.logger.info("image_id: %s", image_id)
- except BaseException:
- self.logger.error("Unexpected error: %s", sys.exc_info()[0])
-
- if image_id == '':
- self.logger.info("""%s image doesn't exist on glance
- repository. Try downloading this image
- and upload on glance !""" % image_name)
- image_id = os_utils.download_and_add_image_on_glance(
- glance_client,
- image_name,
- image_url,
- temp_dir)
- if image_id == '':
- self.logger.error("Failed to find or upload required OS "
- "image for this deployment")
- return False
-
- network_dic = os_utils.create_network_full(neutron_client,
- "openbaton_mgmt",
- "openbaton_mgmt_subnet",
- "openbaton_router",
- "192.168.100.0/24")
-
- # orchestrator VM flavor
- self.logger.info(
- "Check if orchestra Flavor is available, if not create one")
- flavor_exist, flavor_id = os_utils.get_or_create_flavor(
- "orchestra",
- "4096",
- '20',
- '2',
- public=True)
- self.logger.debug("Flavor id: %s" % flavor_id)
-
- if not network_dic:
- self.logger.error("There has been a problem when creating the "
- "neutron network")
-
- network_id = network_dic["net_id"]
-
- self.logger.info("Creating floating IP for VM in advance...")
- floatip_dic = os_utils.create_floating_ip(neutron_client)
- floatip = floatip_dic['fip_addr']
-
- if floatip is None:
- self.logger.error("Cannot create floating IP.")
- return False
-
- userdata = "#!/bin/bash\n"
- userdata += "echo \"Executing userdata...\"\n"
- userdata += "set -x\n"
- userdata += "set -e\n"
- userdata += "echo \"Set nameserver to '8.8.8.8'...\"\n"
- userdata += "echo \"nameserver 8.8.8.8\" >> /etc/resolv.conf\n"
- userdata += "echo \"Install curl...\"\n"
- userdata += "apt-get install curl\n"
- userdata += "echo \"Inject public key...\"\n"
- userdata += ("echo \"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCuPXrV3"
- "geeHc6QUdyUr/1Z+yQiqLcOskiEGBiXr4z76MK4abiFmDZ18OMQlc"
- "fl0p3kS0WynVgyaOHwZkgy/DIoIplONVr2CKBKHtPK+Qcme2PVnCtv"
- "EqItl/FcD+1h5XSQGoa+A1TSGgCod/DPo+pes0piLVXP8Ph6QS1k7S"
- "ic7JDeRQ4oT1bXYpJ2eWBDMfxIWKZqcZRiGPgMIbJ1iEkxbpeaAd9O"
- "4MiM9nGCPESmed+p54uYFjwEDlAJZShcAZziiZYAvMZhvAhe6USljc"
- "7YAdalAnyD/jwCHuwIrUw/lxo7UdNCmaUxeobEYyyFA1YVXzpNFZya"
- "XPGAAYIJwEq/ openbaton@opnfv\" >> /home/ubuntu/.ssh/aut"
- "horized_keys\n")
- userdata += "echo \"Download bootstrap...\"\n"
- userdata += ("curl -s %s "
- "> ./bootstrap\n" % self.bootstrap_link)
- userdata += ("curl -s %s"
- "> ./config_file\n" % self.bootstrap_config_link)
- userdata += ("echo \"Disable usage of mysql...\"\n")
- userdata += "sed -i s/mysql=.*/mysql=no/g /config_file\n"
- userdata += ("echo \"Setting 'rabbitmq_broker_ip' to '%s'\"\n"
- % floatip)
- userdata += ("sed -i s/rabbitmq_broker_ip=localhost/rabbitmq_broker_ip"
- "=%s/g /config_file\n" % floatip)
- userdata += "echo \"Set autostart of components to 'false'\"\n"
- userdata += "export OPENBATON_COMPONENT_AUTOSTART=false\n"
- userdata += "echo \"Execute bootstrap...\"\n"
- bootstrap = "sh ./bootstrap release -configFile=./config_file"
- userdata += bootstrap + "\n"
- userdata += "echo \"Setting 'nfvo.plugin.timeout' to '300000'\"\n"
- userdata += ("echo \"nfvo.plugin.timeout=600000\" >> "
- "/etc/openbaton/openbaton-nfvo.properties\n")
- userdata += (
- "wget %s -O /etc/openbaton/openbaton-vnfm-generic-user-data.sh\n" %
- self.userdata_file)
- userdata += "sed -i '113i\ \ \ \ sleep 60' " \
- "/etc/openbaton/openbaton-vnfm-generic-user-data.sh\n"
- userdata += "echo \"Starting NFVO\"\n"
- userdata += "service openbaton-nfvo restart\n"
- userdata += "echo \"Starting Generic VNFM\"\n"
- userdata += "service openbaton-vnfm-generic restart\n"
- userdata += "echo \"...end of userdata...\"\n"
-
- sg_id = os_utils.create_security_group_full(neutron_client,
- "orchestra-sec-group",
- "allowall")
-
- os_utils.create_secgroup_rule(neutron_client, sg_id, "ingress",
- "icmp", 0, 255)
- os_utils.create_secgroup_rule(neutron_client, sg_id, "egress",
- "icmp", 0, 255)
- os_utils.create_secgroup_rule(neutron_client, sg_id, "ingress",
- "tcp", 1, 65535)
- os_utils.create_secgroup_rule(neutron_client, sg_id, "ingress",
- "udp", 1, 65535)
- os_utils.create_secgroup_rule(neutron_client, sg_id, "egress",
- "tcp", 1, 65535)
- os_utils.create_secgroup_rule(neutron_client, sg_id, "egress",
- "udp", 1, 65535)
-
- self.logger.info("Security group set")
-
- self.logger.info("Create instance....")
- self.logger.info("flavor: m1.medium\n"
- "image: %s\n"
- "network_id: %s\n"
- "userdata: %s\n",
- self.imagename,
- network_id,
- userdata)
-
- instance = os_utils.create_instance_and_wait_for_active(
- "orchestra",
- os_utils.get_image_id(glance_client, self.imagename),
- network_id,
- "orchestra-openbaton",
- config_drive=False,
- userdata=userdata)
-
- self.ob_instance_id = instance.id
-
- self.logger.info("Adding sec group to orchestra instance")
- os_utils.add_secgroup_to_instance(nova_client,
- self.ob_instance_id, sg_id)
-
- self.logger.info("Associating floating ip: '%s' to VM '%s' ",
- floatip,
- "orchestra-openbaton")
- if not os_utils.add_floating_ip(nova_client, instance.id, floatip):
- self.logger.error("Cannot associate floating IP to VM.")
- return False
-
- self.logger.info("Waiting for Open Baton NFVO to be up and running...")
- x = 0
- while x < 200:
- if servertest(floatip, "8080"):
- break
- else:
- self.logger.debug(
- "Open Baton NFVO is not started yet (%ss)" %
- (x * 5))
- time.sleep(5)
- x += 1
-
- if x == 200:
- self.logger.error("Open Baton is not started correctly")
-
- self.ob_ip = floatip
- self.ob_password = "openbaton"
- self.ob_username = "admin"
- self.ob_https = False
- self.ob_port = "8080"
- self.logger.info("Waiting for all components up and running...")
- time.sleep(60)
- self.details["orchestrator"] = {
- 'status': "PASS", 'result': "Deploy Open Baton NFVO: OK"}
- self.logger.info("Deploy Open Baton NFVO: OK")
- return True
-
- def deploy_vnf(self):
- self.logger.info("Starting vIMS Deployment...")
-
- self.main_agent = MainAgent(nfvo_ip=self.ob_ip,
- nfvo_port=self.ob_port,
- https=self.ob_https,
- version=1,
- username=self.ob_username,
- password=self.ob_password)
-
- self.logger.info(
- "Check if openims Flavor is available, if not create one")
- flavor_exist, flavor_id = os_utils.get_or_create_flavor(
- "m1.small",
- "2048",
- '20',
- '1',
- public=True)
- self.logger.debug("Flavor id: %s", flavor_id)
-
- self.logger.info("Getting project 'default'...")
- project_agent = self.main_agent.get_agent("project", self.ob_projectid)
- for p in json.loads(project_agent.find()):
- if p.get("name") == "default":
- self.ob_projectid = p.get("id")
- self.logger.info("Found project 'default': %s", p)
- break
-
- self.logger.debug("project id: %s", self.ob_projectid)
- if self.ob_projectid == "":
- self.logger.error("Default project id was not found!")
-
- creds = os_utils.get_credentials()
- self.logger.info("PoP creds: %s", creds)
-
- if os_utils.is_keystone_v3():
- self.logger.info(
- "Using v3 API of OpenStack... -> Using OS_PROJECT_ID")
- project_id = os_utils.get_tenant_id(
- os_utils.get_keystone_client(),
- creds.get("project_name"))
- else:
- self.logger.info(
- "Using v2 API of OpenStack... -> Using OS_TENANT_NAME")
- project_id = creds.get("tenant_name")
-
- self.logger.debug("project id: %s", project_id)
-
- vim_json = {
- "name": "vim-instance",
- "authUrl": creds.get("auth_url"),
- "tenant": project_id,
- "username": creds.get("username"),
- "password": creds.get("password"),
- "securityGroups": [
- "default",
- "orchestra-sec-group"
- ],
- "type": "openstack",
- "location": {
- "name": "opnfv",
- "latitude": "52.525876",
- "longitude": "13.314400"
- }
- }
-
- self.logger.debug("Registering VIM: %s", vim_json)
-
- self.main_agent.get_agent(
- "vim",
- project_id=self.ob_projectid).create(entity=json.dumps(vim_json))
-
- market_agent = self.main_agent.get_agent("market",
- project_id=self.ob_projectid)
-
- nsd = {}
- try:
- self.logger.info("sending: %s", self.market_link)
- nsd = market_agent.create(entity=self.market_link)
- self.logger.info("Onboarded NSD: " + nsd.get("name"))
- except NfvoException as e:
- self.logger.error(e.message)
-
- nsr_agent = self.main_agent.get_agent("nsr",
- project_id=self.ob_projectid)
- nsd_id = nsd.get('id')
- if nsd_id is None:
- self.logger.error("NSD not onboarded correctly")
-
- try:
- self.nsr = nsr_agent.create(nsd_id)
- except NfvoException as e:
- self.logger.error(e.message)
-
- if self.nsr.get('code') is not None:
- self.logger.error(
- "vIMS cannot be deployed: %s -> %s",
- self.nsr.get('code'),
- self.nsr.get('message'))
- self.logger.error("vIMS cannot be deployed")
-
- i = 0
- self.logger.info("Waiting for NSR to go to ACTIVE...")
- while self.nsr.get("status") != 'ACTIVE' and self.nsr.get(
- "status") != 'ERROR':
- i += 1
- if i == 150:
- self.logger.error("INACTIVE NSR after %s sec..", 5 * i)
-
- time.sleep(5)
- self.nsr = json.loads(nsr_agent.find(self.nsr.get('id')))
-
- if self.nsr.get("status") == 'ACTIVE':
- self.details["vnf"] = {'status': "PASS", 'result': self.nsr}
- self.logger.info("Deploy VNF: OK")
- else:
- self.details["vnf"] = {'status': "FAIL", 'result': self.nsr}
- self.logger.error(self.nsr)
- self.logger.error("Deploy VNF: ERROR")
- return False
-
- self.ob_nsr_id = self.nsr.get("id")
- self.logger.info(
- "Sleep for 60s to ensure that all services are up and running...")
- time.sleep(60)
- return True
-
- def test_vnf(self):
- # Adaptations probably needed
- # code used for cloudify_ims
- # ruby client on jumphost calling the vIMS on the SUT
- self.logger.info(
- "Testing if %s works properly...", self.nsr.get('name'))
- for vnfr in self.nsr.get('vnfr'):
- self.logger.info(
- "Checking ports %s of VNF %s",
- self.ims_conf.get(vnfr.get('name')).get('ports'),
- vnfr.get('name'))
- for vdu in vnfr.get('vdu'):
- for vnfci in vdu.get('vnfc_instance'):
- self.logger.debug(
- "Checking ports of VNFC instance %s",
- vnfci.get('hostname'))
- for floatingIp in vnfci.get('floatingIps'):
- self.logger.debug(
- "Testing %s:%s",
- vnfci.get('hostname'),
- floatingIp.get('ip'))
- for port in self.ims_conf.get(
- vnfr.get('name')).get('ports'):
- if servertest(floatingIp.get('ip'), port):
- self.logger.info(
- "VNFC instance %s is reachable at %s:%s",
- vnfci.get('hostname'),
- floatingIp.get('ip'),
- port)
- else:
- self.logger.error(
- "VNFC instance %s is not reachable "
- "at %s:%s",
- vnfci.get('hostname'),
- floatingIp.get('ip'),
- port)
- self.details["test_vnf"] = {
- 'status': "FAIL", 'result': (
- "Port %s of server %s -> %s is "
- "not reachable",
- port,
- vnfci.get('hostname'),
- floatingIp.get('ip'))}
- self.logger.error("Test VNF: ERROR")
- return False
-
- self.details["test_vnf"] = {
- 'status': "PASS",
- 'result': "All tests have been executed successfully"}
- self.logger.info("Test VNF: OK")
- return True
-
- def clean(self):
- self.main_agent.get_agent(
- "nsr",
- project_id=self.ob_projectid).delete(self.ob_nsr_id)
- time.sleep(5)
- os_utils.delete_instance(nova_client=os_utils.get_nova_client(),
- instance_id=self.ob_instance_id)
- # question is the clean removing also the VM?
- # I think so since is goinf to remove the tenant...
- super(ImsVnf, self).clean()
diff --git a/functest/opnfv_tests/vnf/ims/orchestra_ims.yaml b/functest/opnfv_tests/vnf/ims/orchestra_ims.yaml
deleted file mode 100644
index 5b25d3c96..000000000
--- a/functest/opnfv_tests/vnf/ims/orchestra_ims.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-tenant_images:
- ubuntu_14.04: http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img
- openims: http://marketplace.openbaton.org:8082/api/v1/images/52e2ccc0-1dce-4663-894d-28aab49323aa/img
-openbaton:
- bootstrap_link: http://get.openbaton.org/bootstraps/bootstrap_3.2.0_opnfv/bootstrap
- bootstrap_config_link: http://get.openbaton.org/bootstraps/bootstrap_3.2.0_opnfv/bootstrap-config-file
- userdata:
- file: https://raw.githubusercontent.com/openbaton/generic-vnfm/3.2.0/src/main/resources/user-data.sh
- marketplace_link: http://marketplace.openbaton.org:8082/api/v1/nsds/fokus/OpenImsCore/3.2.0/json
- imagename: ubuntu_14.04
-vIMS:
- scscf:
- ports: [3870, 6060]
- pcscf:
- ports: [4060]
- icscf:
- ports: [3869, 5060]
- fhoss:
- ports: [3868]
- bind9:
- ports: [] \ No newline at end of file
diff --git a/functest/opnfv_tests/vnf/ims/orchestra_openims.py b/functest/opnfv_tests/vnf/ims/orchestra_openims.py
new file mode 100644
index 000000000..f8acada44
--- /dev/null
+++ b/functest/opnfv_tests/vnf/ims/orchestra_openims.py
@@ -0,0 +1,718 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2016 Orange and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""Orchestra OpenIMS testcase implementation."""
+
+import json
+import logging
+import os
+import socket
+import time
+import pkg_resources
+import yaml
+
+
+from snaps.openstack.create_image import OpenStackImage, ImageSettings
+from snaps.openstack.create_flavor import OpenStackFlavor, FlavorSettings
+from snaps.openstack.create_security_group import (
+ OpenStackSecurityGroup,
+ SecurityGroupSettings,
+ SecurityGroupRuleSettings,
+ Direction,
+ Protocol)
+from snaps.openstack.create_network import (
+ OpenStackNetwork,
+ NetworkSettings,
+ SubnetSettings,
+ PortSettings)
+from snaps.openstack.create_router import OpenStackRouter, RouterSettings
+from snaps.openstack.os_credentials import OSCreds
+from snaps.openstack.create_instance import (
+ VmInstanceSettings, OpenStackVmInstance)
+from functest.opnfv_tests.openstack.snaps import snaps_utils
+
+import functest.core.vnf as vnf
+import functest.utils.openstack_utils as os_utils
+from functest.utils.constants import CONST
+
+from org.openbaton.cli.errors.errors import NfvoException
+from org.openbaton.cli.agents.agents import MainAgent
+
+
+__author__ = "Pauls, Michael <michael.pauls@fokus.fraunhofer.de>"
+# ----------------------------------------------------------
+#
+# UTILS
+#
+# -----------------------------------------------------------
+
+
+def get_config(parameter, file_path):
+ """
+ Get config parameter.
+
+ Returns the value of a given parameter in file.yaml
+ parameter must be given in string format with dots
+ Example: general.openstack.image_name
+ """
+ with open(file_path) as config_file:
+ file_yaml = yaml.safe_load(config_file)
+ config_file.close()
+ value = file_yaml
+ for element in parameter.split("."):
+ value = value.get(element)
+ if value is None:
+ raise ValueError("The parameter %s is not defined in"
+ " reporting.yaml", parameter)
+ return value
+
+
+def servertest(host, port):
+ """Method to test that a server is reachable at IP:port"""
+ args = socket.getaddrinfo(host, port, socket.AF_INET, socket.SOCK_STREAM)
+ for family, socktype, proto, canonname, sockaddr in args:
+ sock = socket.socket(family, socktype, proto)
+ try:
+ sock.connect(sockaddr)
+ except socket.error:
+ return False
+ else:
+ sock.close()
+ return True
+
+
+def get_userdata(orchestrator=dict):
+ """Build userdata for Open Baton machine"""
+ userdata = "#!/bin/bash\n"
+ userdata += "echo \"Executing userdata...\"\n"
+ userdata += "set -x\n"
+ userdata += "set -e\n"
+ userdata += "echo \"Set nameserver to '8.8.8.8'...\"\n"
+ userdata += "echo \"nameserver 8.8.8.8\" >> /etc/resolv.conf\n"
+ userdata += "echo \"Install curl...\"\n"
+ userdata += "apt-get install curl\n"
+ userdata += "echo \"Inject public key...\"\n"
+ userdata += ("echo \"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCuPXrV3"
+ "geeHc6QUdyUr/1Z+yQiqLcOskiEGBiXr4z76MK4abiFmDZ18OMQlc"
+ "fl0p3kS0WynVgyaOHwZkgy/DIoIplONVr2CKBKHtPK+Qcme2PVnCtv"
+ "EqItl/FcD+1h5XSQGoa+A1TSGgCod/DPo+pes0piLVXP8Ph6QS1k7S"
+ "ic7JDeRQ4oT1bXYpJ2eWBDMfxIWKZqcZRiGPgMIbJ1iEkxbpeaAd9O"
+ "4MiM9nGCPESmed+p54uYFjwEDlAJZShcAZziiZYAvMZhvAhe6USljc"
+ "7YAdalAnyD/jwCHuwIrUw/lxo7UdNCmaUxeobEYyyFA1YVXzpNFZya"
+ "XPGAAYIJwEq/ openbaton@opnfv\" >> /home/ubuntu/.ssh/aut"
+ "horized_keys\n")
+ userdata += "echo \"Download bootstrap...\"\n"
+ userdata += ("curl -s %s "
+ "> ./bootstrap\n" % orchestrator['bootstrap']['url'])
+ userdata += ("curl -s %s" "> ./config_file\n" %
+ orchestrator['bootstrap']['config']['url'])
+ userdata += ("echo \"Disable usage of mysql...\"\n")
+ userdata += "sed -i s/mysql=.*/mysql=no/g /config_file\n"
+ userdata += ("echo \"Setting 'rabbitmq_broker_ip' to '%s'\"\n"
+ % orchestrator['details']['fip'].ip)
+ userdata += ("sed -i s/rabbitmq_broker_ip=localhost/rabbitmq_broker_ip"
+ "=%s/g /config_file\n" % orchestrator['details']['fip'].ip)
+ userdata += "echo \"Set autostart of components to 'false'\"\n"
+ userdata += "export OPENBATON_COMPONENT_AUTOSTART=false\n"
+ userdata += "echo \"Execute bootstrap...\"\n"
+ bootstrap = "sh ./bootstrap release -configFile=./config_file"
+ userdata += bootstrap + "\n"
+ userdata += "echo \"Setting 'nfvo.plugin.timeout' to '300000'\"\n"
+ userdata += ("echo \"nfvo.plugin.timeout=600000\" >> "
+ "/etc/openbaton/openbaton-nfvo.properties\n")
+ userdata += (
+ "wget %s -O /etc/openbaton/openbaton-vnfm-generic-user-data.sh\n" %
+ orchestrator['gvnfm']['userdata']['url'])
+ userdata += "sed -i '113i"'\ \ \ \ '"sleep 60' " \
+ "/etc/openbaton/openbaton-vnfm-generic-user-data.sh\n"
+ userdata += "echo \"Starting NFVO\"\n"
+ userdata += "service openbaton-nfvo restart\n"
+ userdata += "echo \"Starting Generic VNFM\"\n"
+ userdata += "service openbaton-vnfm-generic restart\n"
+ userdata += "echo \"...end of userdata...\"\n"
+ return userdata
+
+
+class OpenImsVnf(vnf.VnfOnBoarding):
+ """OpenIMS VNF deployed with openBaton orchestrator"""
+
+ logger = logging.getLogger(__name__)
+
+ def __init__(self, **kwargs):
+ if "case_name" not in kwargs:
+ kwargs["case_name"] = "orchestra_openims"
+ super(OpenImsVnf, self).__init__(**kwargs)
+ # self.logger = logging.getLogger("functest.ci.run_tests.orchestra")
+ self.logger.info("kwargs %s", (kwargs))
+
+ self.case_dir = pkg_resources.resource_filename(
+ 'functest', 'opnfv_tests/vnf/ims/')
+ self.data_dir = CONST.__getattribute__('dir_ims_data')
+ self.test_dir = CONST.__getattribute__('dir_repo_vims_test')
+ self.created_resources = []
+ self.logger.info("%s VNF onboarding test starting", self.case_name)
+
+ try:
+ self.config = CONST.__getattribute__(
+ 'vnf_{}_config'.format(self.case_name))
+ except BaseException:
+ raise Exception("Orchestra VNF config file not found")
+ config_file = self.case_dir + self.config
+
+ self.mano = dict(
+ get_config("mano", config_file),
+ details={}
+ )
+ self.logger.debug("Orchestrator configuration %s", self.mano)
+
+ self.details['orchestrator'] = dict(
+ name=self.mano['name'],
+ version=self.mano['version'],
+ status='ERROR',
+ result=''
+ )
+
+ self.vnf = dict(
+ get_config(self.case_name, config_file),
+ )
+ self.logger.debug("VNF configuration: %s", self.vnf)
+
+ self.details['vnf'] = dict(
+ name=self.vnf['name'],
+ )
+
+ self.details['test_vnf'] = dict(
+ name=self.case_name,
+ )
+
+ # Orchestra base Data directory creation
+ if not os.path.exists(self.data_dir):
+ os.makedirs(self.data_dir)
+
+ self.images = get_config("tenant_images.orchestrator", config_file)
+ self.images.update(get_config("tenant_images.%s" %
+ self.case_name, config_file))
+ self.snaps_creds = None
+
+ def prepare(self):
+ """Prepare testscase (Additional pre-configuration steps)."""
+ super(OpenImsVnf, self).prepare()
+
+ self.logger.info("Additional pre-configuration steps")
+ self.logger.info("creds %s", (self.creds))
+
+ self.snaps_creds = OSCreds(
+ username=self.creds['username'],
+ password=self.creds['password'],
+ auth_url=self.creds['auth_url'],
+ project_name=self.creds['tenant'],
+ identity_api_version=int(os_utils.get_keystone_client_version()))
+
+ self.prepare_images()
+ self.prepare_flavor()
+ self.prepare_security_groups()
+ self.prepare_network()
+ self.prepare_floating_ip()
+
+ def prepare_images(self):
+ """Upload images if they doen't exist yet"""
+ self.logger.info("Upload images if they doen't exist yet")
+ for image_name, image_file in self.images.iteritems():
+ self.logger.info("image: %s, file: %s", image_name, image_file)
+ if image_file and image_name:
+ image = OpenStackImage(
+ self.snaps_creds,
+ ImageSettings(name=image_name,
+ image_user='cloud',
+ img_format='qcow2',
+ image_file=image_file))
+ image.create()
+ # self.created_resources.append(image);
+
+ def prepare_security_groups(self):
+ """Create Open Baton security group if it doesn't exist yet"""
+ self.logger.info(
+ "Creating security group for Open Baton if not yet existing...")
+ sg_rules = list()
+ sg_rules.append(
+ SecurityGroupRuleSettings(
+ sec_grp_name="orchestra-sec-group-allowall",
+ direction=Direction.ingress,
+ protocol=Protocol.tcp,
+ port_range_min=1,
+ port_range_max=65535))
+ sg_rules.append(
+ SecurityGroupRuleSettings(
+ sec_grp_name="orchestra-sec-group-allowall",
+ direction=Direction.egress,
+ protocol=Protocol.tcp,
+ port_range_min=1,
+ port_range_max=65535))
+ sg_rules.append(
+ SecurityGroupRuleSettings(
+ sec_grp_name="orchestra-sec-group-allowall",
+ direction=Direction.ingress,
+ protocol=Protocol.udp,
+ port_range_min=1,
+ port_range_max=65535))
+ sg_rules.append(
+ SecurityGroupRuleSettings(
+ sec_grp_name="orchestra-sec-group-allowall",
+ direction=Direction.egress,
+ protocol=Protocol.udp,
+ port_range_min=1,
+ port_range_max=65535))
+ sg_rules.append(
+ SecurityGroupRuleSettings(
+ sec_grp_name="orchestra-sec-group-allowall",
+ direction=Direction.ingress,
+ protocol=Protocol.icmp))
+ sg_rules.append(
+ SecurityGroupRuleSettings(
+ sec_grp_name="orchestra-sec-group-allowall",
+ direction=Direction.egress,
+ protocol=Protocol.icmp))
+ # sg_rules.append(
+ # SecurityGroupRuleSettings(
+ # sec_grp_name="orchestra-sec-group-allowall",
+ # direction=Direction.ingress,
+ # protocol=Protocol.icmp,
+ # port_range_min=-1,
+ # port_range_max=-1))
+ # sg_rules.append(
+ # SecurityGroupRuleSettings(
+ # sec_grp_name="orchestra-sec-group-allowall",
+ # direction=Direction.egress,
+ # protocol=Protocol.icmp,
+ # port_range_min=-1,
+ # port_range_max=-1))
+
+ security_group = OpenStackSecurityGroup(
+ self.snaps_creds,
+ SecurityGroupSettings(
+ name="orchestra-sec-group-allowall",
+ rule_settings=sg_rules))
+
+ security_group_info = security_group.create()
+ self.created_resources.append(security_group)
+ self.mano['details']['sec_group'] = security_group_info.name
+ self.logger.info(
+ "Security group orchestra-sec-group-allowall prepared")
+
+ def prepare_flavor(self):
+ """Create Open Baton flavor if it doesn't exist yet"""
+ self.logger.info(
+ "Create Flavor for Open Baton NFVO if not yet existing")
+
+ flavor_settings = FlavorSettings(
+ name=self.mano['requirements']['flavor']['name'],
+ ram=self.mano['requirements']['flavor']['ram_min'],
+ disk=self.mano['requirements']['flavor']['disk'],
+ vcpus=self.mano['requirements']['flavor']['vcpus'])
+ flavor = OpenStackFlavor(self.snaps_creds, flavor_settings)
+ flavor_info = flavor.create()
+ self.created_resources.append(flavor)
+ self.mano['details']['flavor'] = {}
+ self.mano['details']['flavor']['name'] = flavor_settings.name
+ self.mano['details']['flavor']['id'] = flavor_info.id
+
+ def prepare_network(self):
+ """Create network/subnet/router if they doen't exist yet"""
+ self.logger.info(
+ "Creating network/subnet/router if they doen't exist yet...")
+ subnet_settings = SubnetSettings(
+ name='%s_subnet' %
+ self.case_name,
+ cidr="192.168.100.0/24")
+ network_settings = NetworkSettings(
+ name='%s_net' %
+ self.case_name,
+ subnet_settings=[subnet_settings])
+ orchestra_network = OpenStackNetwork(
+ self.snaps_creds, network_settings)
+ orchestra_network_info = orchestra_network.create()
+ self.mano['details']['network'] = {}
+ self.mano['details']['network']['id'] = orchestra_network_info.id
+ self.mano['details']['network']['name'] = orchestra_network_info.name
+ self.mano['details']['external_net_name'] = \
+ snaps_utils.get_ext_net_name(self.snaps_creds)
+ self.created_resources.append(orchestra_network)
+ orchestra_router = OpenStackRouter(
+ self.snaps_creds,
+ RouterSettings(
+ name='%s_router' %
+ self.case_name,
+ external_gateway=self.mano['details']['external_net_name'],
+ internal_subnets=[
+ subnet_settings.name]))
+ orchestra_router.create()
+ self.created_resources.append(orchestra_router)
+ self.logger.info("Created network and router for Open Baton NFVO...")
+
+ def prepare_floating_ip(self):
+ """Select/Create Floating IP if it doesn't exist yet"""
+ self.logger.info("Retrieving floating IP for Open Baton NFVO")
+ neutron_client = snaps_utils.neutron_utils.neutron_client(
+ self.snaps_creds)
+ # Finding Tenant ID to check to which tenant the Floating IP belongs
+ tenant_id = os_utils.get_tenant_id(
+ os_utils.get_keystone_client(self.creds),
+ self.tenant_name)
+ # Use os_utils to retrieve complete information of Floating IPs
+ floating_ips = os_utils.get_floating_ips(neutron_client)
+ my_floating_ips = []
+ # Filter Floating IPs with tenant id
+ for floating_ip in floating_ips:
+ # self.logger.info("Floating IP: %s", floating_ip)
+ if floating_ip.get('tenant_id') == tenant_id:
+ my_floating_ips.append(floating_ip.get('floating_ip_address'))
+ # Select if Floating IP exist else create new one
+ if len(my_floating_ips) >= 1:
+ # Get Floating IP object from snaps for clean up
+ snaps_floating_ips = snaps_utils.neutron_utils.get_floating_ips(
+ neutron_client)
+ for my_floating_ip in my_floating_ips:
+ for snaps_floating_ip in snaps_floating_ips:
+ if snaps_floating_ip.ip == my_floating_ip:
+ self.mano['details']['fip'] = snaps_floating_ip
+ self.logger.info(
+ "Selected floating IP for Open Baton NFVO %s",
+ (self.mano['details']['fip'].ip))
+ break
+ if self.mano['details']['fip'] is not None:
+ break
+ else:
+ self.logger.info("Creating floating IP for Open Baton NFVO")
+ self.mano['details']['fip'] = (
+ snaps_utils.neutron_utils. create_floating_ip(
+ neutron_client, self.mano['details']['external_net_name']))
+ self.logger.info(
+ "Created floating IP for Open Baton NFVO %s",
+ (self.mano['details']['fip'].ip))
+
+ def get_vim_descriptor(self):
+ """"Create VIM descriptor to be used for onboarding"""
+ self.logger.info(
+ "Building VIM descriptor with PoP creds: %s",
+ self.creds)
+ # Depending on API version either tenant ID or project name must be
+ # used
+ if os_utils.is_keystone_v3():
+ self.logger.info(
+ "Using v3 API of OpenStack... -> Using OS_PROJECT_ID")
+ project_id = os_utils.get_tenant_id(
+ os_utils.get_keystone_client(),
+ self.creds.get("project_name"))
+ else:
+ self.logger.info(
+ "Using v2 API of OpenStack... -> Using OS_TENANT_NAME")
+ project_id = self.creds.get("tenant_name")
+ self.logger.debug("VIM project/tenant id: %s", project_id)
+ vim_json = {
+ "name": "vim-instance",
+ "authUrl": self.creds.get("auth_url"),
+ "tenant": project_id,
+ "username": self.creds.get("username"),
+ "password": self.creds.get("password"),
+ "securityGroups": [
+ self.mano['details']['sec_group']
+ ],
+ "type": "openstack",
+ "location": {
+ "name": "opnfv",
+ "latitude": "52.525876",
+ "longitude": "13.314400"
+ }
+ }
+ self.logger.info("Built VIM descriptor: %s", vim_json)
+ return vim_json
+
+ def deploy_orchestrator(self):
+ self.logger.info("Deploying Open Baton...")
+ self.logger.info("Details: %s", self.mano['details'])
+ start_time = time.time()
+
+ self.logger.info("Creating orchestra instance...")
+ userdata = get_userdata(self.mano)
+ self.logger.info("flavor: %s\n"
+ "image: %s\n"
+ "network_id: %s\n",
+ self.mano['details']['flavor']['name'],
+ self.mano['requirements']['image'],
+ self.mano['details']['network']['id'])
+ self.logger.debug("userdata: %s\n", userdata)
+ # setting up image
+ image_settings = ImageSettings(
+ name=self.mano['requirements']['image'],
+ image_user='ubuntu',
+ exists=True)
+ # setting up port
+ port_settings = PortSettings(
+ name='%s_port' % self.case_name,
+ network_name=self.mano['details']['network']['name'])
+ # build configuration of vm
+ orchestra_settings = VmInstanceSettings(
+ name=self.case_name,
+ flavor=self.mano['details']['flavor']['name'],
+ port_settings=[port_settings],
+ security_group_names=[self.mano['details']['sec_group']],
+ userdata=userdata)
+ orchestra_vm = OpenStackVmInstance(self.snaps_creds,
+ orchestra_settings,
+ image_settings)
+
+ orchestra_vm.create()
+ self.created_resources.append(orchestra_vm)
+ self.mano['details']['id'] = orchestra_vm.get_vm_info()['id']
+ self.logger.info(
+ "Created orchestra instance: %s",
+ self.mano['details']['id'])
+
+ self.logger.info("Associating floating ip: '%s' to VM '%s' ",
+ self.mano['details']['fip'].ip,
+ self.case_name)
+ nova_client = os_utils.get_nova_client()
+ if not os_utils.add_floating_ip(
+ nova_client,
+ self.mano['details']['id'],
+ self.mano['details']['fip'].ip):
+ duration = time.time() - start_time
+ self.details["orchestrator"].update(
+ status='FAIL', duration=duration)
+ self.logger.error("Cannot associate floating IP to VM.")
+ return False
+
+ self.logger.info("Waiting for Open Baton NFVO to be up and running...")
+ timeout = 0
+ while timeout < 200:
+ if servertest(
+ self.mano['details']['fip'].ip,
+ "8080"):
+ break
+ else:
+ self.logger.info("Open Baton NFVO is not started yet (%ss)",
+ (timeout * 5))
+ time.sleep(5)
+ timeout += 1
+
+ if timeout >= 200:
+ duration = time.time() - start_time
+ self.details["orchestrator"].update(
+ status='FAIL', duration=duration)
+ self.logger.error("Open Baton is not started correctly")
+ return False
+
+ self.logger.info("Waiting for all components to be up and running...")
+ time.sleep(60)
+ duration = time.time() - start_time
+ self.details["orchestrator"].update(status='PASS', duration=duration)
+ self.logger.info("Deploy Open Baton NFVO: OK")
+ return True
+
+ def deploy_vnf(self):
+ start_time = time.time()
+ self.logger.info("Deploying %s...", self.vnf['name'])
+
+ main_agent = MainAgent(
+ nfvo_ip=self.mano['details']['fip'].ip,
+ nfvo_port=8080,
+ https=False,
+ version=1,
+ username=self.mano['credentials']['username'],
+ password=self.mano['credentials']['password'])
+
+ self.logger.info(
+ "Create %s Flavor if not existing", self.vnf['name'])
+ flavor_settings = FlavorSettings(
+ name=self.vnf['requirements']['flavor']['name'],
+ ram=self.vnf['requirements']['flavor']['ram_min'],
+ disk=self.vnf['requirements']['flavor']['disk'],
+ vcpus=self.vnf['requirements']['flavor']['vcpus'])
+ flavor = OpenStackFlavor(self.snaps_creds, flavor_settings)
+ flavor_info = flavor.create()
+ self.logger.debug("Flavor id: %s", flavor_info.id)
+
+ self.logger.info("Getting project 'default'...")
+ project_agent = main_agent.get_agent("project", "")
+ for project in json.loads(project_agent.find()):
+ if project.get("name") == "default":
+ self.mano['details']['project_id'] = project.get("id")
+ self.logger.info("Found project 'default': %s", project)
+ break
+
+ vim_json = self.get_vim_descriptor()
+ self.logger.info("Registering VIM: %s", vim_json)
+
+ main_agent.get_agent(
+ "vim", project_id=self.mano['details']['project_id']).create(
+ entity=json.dumps(vim_json))
+
+ market_agent = main_agent.get_agent(
+ "market", project_id=self.mano['details']['project_id'])
+
+ try:
+ self.logger.info("sending: %s", self.vnf['descriptor']['url'])
+ nsd = market_agent.create(entity=self.vnf['descriptor']['url'])
+ if nsd.get('id') is None:
+ self.logger.error("NSD not onboarded correctly")
+ duration = time.time() - start_time
+ self.details["vnf"].update(status='FAIL', duration=duration)
+ return False
+ self.mano['details']['nsd_id'] = nsd.get('id')
+ self.logger.info("Onboarded NSD: " + nsd.get("name"))
+
+ nsr_agent = main_agent.get_agent(
+ "nsr", project_id=self.mano['details']['project_id'])
+
+ self.mano['details']['nsr'] = nsr_agent.create(
+ self.mano['details']['nsd_id'])
+ except NfvoException as exc:
+ self.logger.error(exc.message)
+ duration = time.time() - start_time
+ self.details["vnf"].update(status='FAIL', duration=duration)
+ return False
+
+ if self.mano['details']['nsr'].get('code') is not None:
+ self.logger.error(
+ "%s cannot be deployed: %s -> %s",
+ self.vnf['name'],
+ self.mano['details']['nsr'].get('code'),
+ self.mano['details']['nsr'].get('message'))
+ self.logger.error("%s cannot be deployed", self.vnf['name'])
+ duration = time.time() - start_time
+ self.details["vnf"].update(status='FAIL', duration=duration)
+ return False
+
+ timeout = 0
+ self.logger.info("Waiting for NSR to go to ACTIVE...")
+ while self.mano['details']['nsr'].get("status") != 'ACTIVE' \
+ and self.mano['details']['nsr'].get("status") != 'ERROR':
+ timeout += 1
+ self.logger.info("NSR is not yet ACTIVE... (%ss)", 5 * timeout)
+ if timeout == 300:
+ self.logger.error("INACTIVE NSR after %s sec..", 5 * timeout)
+ duration = time.time() - start_time
+ self.details["vnf"].update(status='FAIL', duration=duration)
+ return False
+ time.sleep(5)
+ self.mano['details']['nsr'] = json.loads(
+ nsr_agent.find(self.mano['details']['nsr'].get('id')))
+
+ duration = time.time() - start_time
+ if self.mano['details']['nsr'].get("status") == 'ACTIVE':
+ self.details["vnf"].update(status='PASS', duration=duration)
+ self.logger.info("Sleep for 60s to ensure that all "
+ "services are up and running...")
+ time.sleep(60)
+ result = True
+ else:
+ self.details["vnf"].update(status='FAIL', duration=duration)
+ self.logger.error("NSR: %s", self.mano['details'].get('nsr'))
+ result = False
+ return result
+
+ def test_vnf(self):
+ self.logger.info("Testing VNF OpenIMS...")
+ start_time = time.time()
+ self.logger.info(
+ "Testing if %s works properly...",
+ self.mano['details']['nsr'].get('name'))
+ for vnfr in self.mano['details']['nsr'].get('vnfr'):
+ self.logger.info(
+ "Checking ports %s of VNF %s",
+ self.vnf['test'][vnfr.get('name')]['ports'],
+ vnfr.get('name'))
+ for vdu in vnfr.get('vdu'):
+ for vnfci in vdu.get('vnfc_instance'):
+ self.logger.debug(
+ "Checking ports of VNFC instance %s",
+ vnfci.get('hostname'))
+ for floating_ip in vnfci.get('floatingIps'):
+ self.logger.debug(
+ "Testing %s:%s",
+ vnfci.get('hostname'),
+ floating_ip.get('ip'))
+ for port in self.vnf['test'][vnfr.get(
+ 'name')]['ports']:
+ if servertest(floating_ip.get('ip'), port):
+ self.logger.info(
+ "VNFC instance %s is reachable at %s:%s",
+ vnfci.get('hostname'),
+ floating_ip.get('ip'),
+ port)
+ else:
+ self.logger.error(
+ "VNFC instance %s is not reachable "
+ "at %s:%s",
+ vnfci.get('hostname'),
+ floating_ip.get('ip'),
+ port)
+ duration = time.time() - start_time
+ self.details["test_vnf"].update(
+ status='FAIL', duration=duration, esult=(
+ "Port %s of server %s -> %s is "
+ "not reachable",
+ port,
+ vnfci.get('hostname'),
+ floating_ip.get('ip')))
+ self.logger.error("Test VNF: ERROR")
+ return False
+ duration = time.time() - start_time
+ self.details["test_vnf"].update(status='PASS', duration=duration)
+ self.logger.info("Test VNF: OK")
+ return True
+
+ def clean(self):
+ self.logger.info("Cleaning %s...", self.case_name)
+ try:
+ main_agent = MainAgent(
+ nfvo_ip=self.mano['details']['fip'].ip,
+ nfvo_port=8080,
+ https=False,
+ version=1,
+ username=self.mano['credentials']['username'],
+ password=self.mano['credentials']['password'])
+ self.logger.info("Terminating %s...", self.vnf['name'])
+ if (self.mano['details'].get('nsr')):
+ main_agent.get_agent(
+ "nsr",
+ project_id=self.mano['details']['project_id']).\
+ delete(self.mano['details']['nsr'].get('id'))
+ self.logger.info("Sleeping 60 seconds...")
+ time.sleep(60)
+ else:
+ self.logger.info("No need to terminate the VNF...")
+ # os_utils.delete_instance(nova_client=os_utils.get_nova_client(),
+ # instance_id=self.mano_instance_id)
+ except (NfvoException, KeyError) as exc:
+ self.logger.error('Unexpected error cleaning - %s', exc)
+
+ try:
+ neutron_client = os_utils.get_neutron_client(self.creds)
+ self.logger.info("Deleting Open Baton Port...")
+ port = snaps_utils.neutron_utils.get_port_by_name(
+ neutron_client, '%s_port' % self.case_name)
+ snaps_utils.neutron_utils.delete_port(neutron_client, port)
+ time.sleep(10)
+ except Exception as exc:
+ self.logger.error('Unexpected error cleaning - %s', exc)
+ try:
+ self.logger.info("Deleting Open Baton Floating IP...")
+ snaps_utils.neutron_utils.delete_floating_ip(
+ neutron_client, self.mano['details']['fip'])
+ except Exception as exc:
+ self.logger.error('Unexpected error cleaning - %s', exc)
+
+ for resource in reversed(self.created_resources):
+ try:
+ self.logger.info("Cleaning %s", str(resource))
+ resource.clean()
+ except Exception as exc:
+ self.logger.error('Unexpected error cleaning - %s', exc)
+ super(OpenImsVnf, self).clean()
diff --git a/functest/tests/unit/ci/test_run_tests.py b/functest/tests/unit/ci/test_run_tests.py
index fb8cb3915..7495c40e4 100644
--- a/functest/tests/unit/ci/test_run_tests.py
+++ b/functest/tests/unit/ci/test_run_tests.py
@@ -54,11 +54,6 @@ class RunTestsTesting(unittest.TestCase):
self.run_tests_parser = run_tests.RunTestsParser()
- @mock.patch('functest.ci.run_tests.logger.info')
- def test_print_separator(self, mock_logger_info):
- self.runner.print_separator(self.sep)
- mock_logger_info.assert_called_once_with(self.sep * 44)
-
@mock.patch('functest.ci.run_tests.logger.error')
def test_source_rc_file_missing_file(self, mock_logger_error):
with mock.patch('functest.ci.run_tests.os.path.isfile',
@@ -120,8 +115,7 @@ class RunTestsTesting(unittest.TestCase):
args = {'get_name.return_value': 'test_name',
'needs_clean.return_value': False}
mock_test.configure_mock(**args)
- with mock.patch('functest.ci.run_tests.Runner.print_separator'),\
- mock.patch('functest.ci.run_tests.Runner.source_rc_file'), \
+ with mock.patch('functest.ci.run_tests.Runner.source_rc_file'), \
mock.patch('functest.ci.run_tests.Runner.get_run_dict',
return_value=None), \
self.assertRaises(Exception) as context:
@@ -129,7 +123,6 @@ class RunTestsTesting(unittest.TestCase):
msg = "Cannot import the class for the test case."
self.assertTrue(msg in context)
- @mock.patch('functest.ci.run_tests.Runner.print_separator')
@mock.patch('functest.ci.run_tests.Runner.source_rc_file')
@mock.patch('importlib.import_module', name="module",
return_value=mock.Mock(test_class=mock.Mock(
@@ -145,123 +138,107 @@ class RunTestsTesting(unittest.TestCase):
with mock.patch('functest.ci.run_tests.Runner.get_run_dict',
return_value=test_run_dict):
self.runner.clean_flag = True
- self.runner.run_test(mock_test, 'tier_name')
+ self.runner.run_test(mock_test)
self.assertEqual(self.runner.overall_result,
run_tests.Result.EX_OK)
- @mock.patch('functest.ci.run_tests.logger.info')
- def test_run_tier_default(self, mock_logger_info):
- with mock.patch('functest.ci.run_tests.Runner.print_separator'), \
- mock.patch(
- 'functest.ci.run_tests.Runner.run_test',
- return_value=TestCase.EX_OK) as mock_method:
- self.runner.run_tier(self.tier)
- mock_method.assert_any_call(mock.ANY, 'test_tier')
- self.assertTrue(mock_logger_info.called)
+ @mock.patch('functest.ci.run_tests.Runner.run_test',
+ return_value=TestCase.EX_OK)
+ def test_run_tier_default(self, *mock_methods):
+ self.assertEqual(self.runner.run_tier(self.tier),
+ run_tests.Result.EX_OK)
+ mock_methods[0].assert_called_with(mock.ANY)
@mock.patch('functest.ci.run_tests.logger.info')
def test_run_tier_missing_test(self, mock_logger_info):
- with mock.patch('functest.ci.run_tests.Runner.print_separator'):
- self.tier.get_tests.return_value = None
- self.assertEqual(self.runner.run_tier(self.tier), 0)
- self.assertTrue(mock_logger_info.called)
+ self.tier.get_tests.return_value = None
+ self.assertEqual(self.runner.run_tier(self.tier),
+ run_tests.Result.EX_ERROR)
+ self.assertTrue(mock_logger_info.called)
@mock.patch('functest.ci.run_tests.logger.info')
- def test_run_all_default(self, mock_logger_info):
- with mock.patch(
- 'functest.ci.run_tests.Runner.run_tier') as mock_method:
- CONST.__setattr__('CI_LOOP', 'test_ci_loop')
- self.runner.run_all(self.tiers)
- mock_method.assert_any_call(self.tier)
- self.assertTrue(mock_logger_info.called)
+ @mock.patch('functest.ci.run_tests.Runner.run_tier')
+ @mock.patch('functest.ci.run_tests.Runner.summary')
+ def test_run_all_default(self, *mock_methods):
+ CONST.__setattr__('CI_LOOP', 'test_ci_loop')
+ self.runner.run_all()
+ mock_methods[1].assert_not_called()
+ self.assertTrue(mock_methods[2].called)
@mock.patch('functest.ci.run_tests.logger.info')
- def test_run_all_missing_tier(self, mock_logger_info):
+ @mock.patch('functest.ci.run_tests.Runner.summary')
+ def test_run_all_missing_tier(self, *mock_methods):
CONST.__setattr__('CI_LOOP', 'loop_re_not_available')
- self.runner.run_all(self.tiers)
- self.assertTrue(mock_logger_info.called)
+ self.runner.run_all()
+ self.assertTrue(mock_methods[1].called)
- def test_main_failed(self):
+ @mock.patch('functest.ci.run_tests.Runner.source_rc_file',
+ side_effect=Exception)
+ @mock.patch('functest.ci.run_tests.Runner.summary')
+ def test_main_failed(self, *mock_methods):
kwargs = {'test': 'test_name', 'noclean': True, 'report': True}
- mock_obj = mock.Mock()
args = {'get_tier.return_value': False,
'get_test.return_value': False}
- mock_obj.configure_mock(**args)
- with mock.patch('functest.ci.run_tests.tb.TierBuilder'), \
- mock.patch('functest.ci.run_tests.Runner.source_rc_file',
- side_effect=Exception):
- self.assertEqual(self.runner.main(**kwargs),
- run_tests.Result.EX_ERROR)
- with mock.patch('functest.ci.run_tests.tb.TierBuilder',
- return_value=mock_obj), \
- mock.patch('functest.ci.run_tests.Runner.source_rc_file',
- side_effect=Exception):
- self.assertEqual(self.runner.main(**kwargs),
- run_tests.Result.EX_ERROR)
-
- def test_main_tier(self, *args):
+ self.runner._tiers = mock.Mock()
+ self.runner._tiers.configure_mock(**args)
+ self.assertEqual(self.runner.main(**kwargs),
+ run_tests.Result.EX_ERROR)
+ mock_methods[1].assert_called_once_with()
+
+ @mock.patch('functest.ci.run_tests.Runner.source_rc_file')
+ @mock.patch('functest.ci.run_tests.Runner.run_test',
+ return_value=TestCase.EX_OK)
+ @mock.patch('functest.ci.run_tests.Runner.summary')
+ def test_main_tier(self, *mock_methods):
mock_tier = mock.Mock()
- args = {'get_name.return_value': 'tier_name'}
+ args = {'get_name.return_value': 'tier_name',
+ 'get_tests.return_value': ['test_name']}
mock_tier.configure_mock(**args)
kwargs = {'test': 'tier_name', 'noclean': True, 'report': True}
- mock_obj = mock.Mock()
args = {'get_tier.return_value': mock_tier,
'get_test.return_value': None}
- mock_obj.configure_mock(**args)
- with mock.patch('functest.ci.run_tests.tb.TierBuilder',
- return_value=mock_obj), \
- mock.patch('functest.ci.run_tests.Runner.source_rc_file'), \
- mock.patch('functest.ci.run_tests.Runner.run_tier') as m:
- self.assertEqual(self.runner.main(**kwargs),
- run_tests.Result.EX_OK)
- self.assertTrue(m.called)
-
- def test_main_test(self, *args):
+ self.runner._tiers = mock.Mock()
+ self.runner._tiers.configure_mock(**args)
+ self.assertEqual(self.runner.main(**kwargs),
+ run_tests.Result.EX_OK)
+ mock_methods[1].assert_called_once_with('test_name')
+
+ @mock.patch('functest.ci.run_tests.Runner.source_rc_file')
+ @mock.patch('functest.ci.run_tests.Runner.run_test',
+ return_value=TestCase.EX_OK)
+ def test_main_test(self, *mock_methods):
kwargs = {'test': 'test_name', 'noclean': True, 'report': True}
- mock_test = mock.Mock()
- args = {'get_name.return_value': 'test_name',
- 'needs_clean.return_value': True}
- mock_test.configure_mock(**args)
- mock_obj = mock.Mock()
args = {'get_tier.return_value': None,
- 'get_test.return_value': mock_test}
- mock_obj.configure_mock(**args)
- with mock.patch('functest.ci.run_tests.tb.TierBuilder',
- return_value=mock_obj), \
- mock.patch('functest.ci.run_tests.Runner.source_rc_file'), \
- mock.patch('functest.ci.run_tests.Runner.run_test',
- return_value=TestCase.EX_OK) as m:
- self.assertEqual(self.runner.main(**kwargs),
- run_tests.Result.EX_OK)
- self.assertTrue(m.called)
-
- def test_main_all_tier(self, *args):
+ 'get_test.return_value': 'test_name'}
+ self.runner._tiers = mock.Mock()
+ self.runner._tiers.configure_mock(**args)
+ self.assertEqual(self.runner.main(**kwargs),
+ run_tests.Result.EX_OK)
+ mock_methods[0].assert_called_once_with('test_name')
+
+ @mock.patch('functest.ci.run_tests.Runner.source_rc_file')
+ @mock.patch('functest.ci.run_tests.Runner.run_all')
+ @mock.patch('functest.ci.run_tests.Runner.summary')
+ def test_main_all_tier(self, *mock_methods):
kwargs = {'test': 'all', 'noclean': True, 'report': True}
- mock_obj = mock.Mock()
args = {'get_tier.return_value': None,
'get_test.return_value': None}
- mock_obj.configure_mock(**args)
- with mock.patch('functest.ci.run_tests.tb.TierBuilder',
- return_value=mock_obj), \
- mock.patch('functest.ci.run_tests.Runner.source_rc_file'), \
- mock.patch('functest.ci.run_tests.Runner.run_all') as m:
- self.assertEqual(self.runner.main(**kwargs),
- run_tests.Result.EX_OK)
- self.assertTrue(m.called)
-
- def test_main_any_tier_test_ko(self, *args):
+ self.runner._tiers = mock.Mock()
+ self.runner._tiers.configure_mock(**args)
+ self.assertEqual(self.runner.main(**kwargs),
+ run_tests.Result.EX_OK)
+ mock_methods[1].assert_called_once_with()
+
+ @mock.patch('functest.ci.run_tests.Runner.source_rc_file')
+ @mock.patch('functest.ci.run_tests.Runner.summary')
+ def test_main_any_tier_test_ko(self, *mock_methods):
kwargs = {'test': 'any', 'noclean': True, 'report': True}
- mock_obj = mock.Mock()
args = {'get_tier.return_value': None,
'get_test.return_value': None}
- mock_obj.configure_mock(**args)
- with mock.patch('functest.ci.run_tests.tb.TierBuilder',
- return_value=mock_obj), \
- mock.patch('functest.ci.run_tests.Runner.source_rc_file'), \
- mock.patch('functest.ci.run_tests.logger.debug') as m:
- self.assertEqual(self.runner.main(**kwargs),
- run_tests.Result.EX_ERROR)
- self.assertTrue(m.called)
+ self.runner._tiers = mock.Mock()
+ self.runner._tiers.configure_mock(**args)
+ self.assertEqual(self.runner.main(**kwargs),
+ run_tests.Result.EX_ERROR)
if __name__ == "__main__":
diff --git a/functest/tests/unit/ci/test_tier_builder.py b/functest/tests/unit/ci/test_tier_builder.py
index ab75e15b9..700c6e917 100644
--- a/functest/tests/unit/ci/test_tier_builder.py
+++ b/functest/tests/unit/ci/test_tier_builder.py
@@ -24,7 +24,8 @@ class TierBuilderTesting(unittest.TestCase):
'case_name': 'test_name',
'criteria': 'test_criteria',
'blocking': 'test_blocking',
- 'description': 'test_desc'}
+ 'description': 'test_desc',
+ 'project_name': 'project_name'}
self.dic_tier = {'name': 'test_tier',
'order': 'test_order',
diff --git a/functest/tests/unit/core/test_feature.py b/functest/tests/unit/core/test_feature.py
index 988981eff..553a5dfa4 100644
--- a/functest/tests/unit/core/test_feature.py
+++ b/functest/tests/unit/core/test_feature.py
@@ -22,8 +22,8 @@ class FeatureTestingBase(unittest.TestCase):
_case_name = "foo"
_project_name = "bar"
- _repo = "dir_repo_copper"
- _cmd = "cd /home/opnfv/repos/foo/tests && bash run.sh && cd -"
+ _repo = "dir_repo_bar"
+ _cmd = "cd /home/opnfv/repos/bar/tests && bash run.sh && cd -"
_output_file = '/home/opnfv/functest/results/foo.log'
feature = None
diff --git a/functest/tests/unit/energy/test_functest_energy.py b/functest/tests/unit/energy/test_functest_energy.py
index f8bb13c99..a576e2c3f 100644
--- a/functest/tests/unit/energy/test_functest_energy.py
+++ b/functest/tests/unit/energy/test_functest_energy.py
@@ -35,6 +35,15 @@ class MockHttpResponse(object): # pylint: disable=too-few-public-methods
self.status_code = status_code
+API_OK = MockHttpResponse(
+ '{"status": "OK"}',
+ 200
+)
+API_KO = MockHttpResponse(
+ '{"message": "API-KO"}',
+ 500
+)
+
RECORDER_OK = MockHttpResponse(
'{"environment": "UNIT_TEST",'
' "step": "string",'
@@ -81,7 +90,7 @@ class EnergyRecorderTest(unittest.TestCase):
@mock.patch('functest.energy.energy.requests.post',
return_value=RECORDER_OK)
- def test_start(self, post_mock=None):
+ def test_start(self, post_mock=None, get_mock=None):
"""EnergyRecorder.start method (regular case)."""
self.test_load_config()
self.assertTrue(EnergyRecorder.start(self.case_name))
@@ -89,7 +98,8 @@ class EnergyRecorderTest(unittest.TestCase):
EnergyRecorder.energy_recorder_api["uri"],
auth=EnergyRecorder.energy_recorder_api["auth"],
data=mock.ANY,
- headers=self.request_headers
+ headers=self.request_headers,
+ timeout=EnergyRecorder.CONNECTION_TIMOUT
)
@mock.patch('functest.energy.energy.requests.post',
@@ -102,7 +112,8 @@ class EnergyRecorderTest(unittest.TestCase):
EnergyRecorder.energy_recorder_api["uri"],
auth=EnergyRecorder.energy_recorder_api["auth"],
data=mock.ANY,
- headers=self.request_headers
+ headers=self.request_headers,
+ timeout=EnergyRecorder.CONNECTION_TIMOUT
)
@mock.patch('functest.energy.energy.requests.post',
@@ -115,7 +126,8 @@ class EnergyRecorderTest(unittest.TestCase):
EnergyRecorder.energy_recorder_api["uri"],
auth=EnergyRecorder.energy_recorder_api["auth"],
data=mock.ANY,
- headers=self.request_headers
+ headers=self.request_headers,
+ timeout=EnergyRecorder.CONNECTION_TIMOUT
)
@mock.patch('functest.energy.energy.requests.post',
@@ -128,7 +140,8 @@ class EnergyRecorderTest(unittest.TestCase):
EnergyRecorder.energy_recorder_api["uri"] + "/step",
auth=EnergyRecorder.energy_recorder_api["auth"],
data=mock.ANY,
- headers=self.request_headers
+ headers=self.request_headers,
+ timeout=EnergyRecorder.CONNECTION_TIMOUT
)
@mock.patch('functest.energy.energy.requests.post',
@@ -141,7 +154,8 @@ class EnergyRecorderTest(unittest.TestCase):
EnergyRecorder.energy_recorder_api["uri"] + "/step",
auth=EnergyRecorder.energy_recorder_api["auth"],
data=mock.ANY,
- headers=self.request_headers
+ headers=self.request_headers,
+ timeout=EnergyRecorder.CONNECTION_TIMOUT
)
@mock.patch('functest.energy.energy.requests.post',
@@ -154,7 +168,8 @@ class EnergyRecorderTest(unittest.TestCase):
EnergyRecorder.energy_recorder_api["uri"] + "/step",
auth=EnergyRecorder.energy_recorder_api["auth"],
data=mock.ANY,
- headers=self.request_headers
+ headers=self.request_headers,
+ timeout=EnergyRecorder.CONNECTION_TIMOUT
)
@mock.patch('functest.energy.energy.requests.delete',
@@ -166,7 +181,8 @@ class EnergyRecorderTest(unittest.TestCase):
delete_mock.assert_called_once_with(
EnergyRecorder.energy_recorder_api["uri"],
auth=EnergyRecorder.energy_recorder_api["auth"],
- headers=self.request_headers
+ headers=self.request_headers,
+ timeout=EnergyRecorder.CONNECTION_TIMOUT
)
@mock.patch('functest.energy.energy.requests.delete',
@@ -178,7 +194,8 @@ class EnergyRecorderTest(unittest.TestCase):
delete_mock.assert_called_once_with(
EnergyRecorder.energy_recorder_api["uri"],
auth=EnergyRecorder.energy_recorder_api["auth"],
- headers=self.request_headers
+ headers=self.request_headers,
+ timeout=EnergyRecorder.CONNECTION_TIMOUT
)
@mock.patch('functest.energy.energy.requests.delete',
@@ -190,7 +207,8 @@ class EnergyRecorderTest(unittest.TestCase):
delete_mock.assert_called_once_with(
EnergyRecorder.energy_recorder_api["uri"],
auth=EnergyRecorder.energy_recorder_api["auth"],
- headers=self.request_headers
+ headers=self.request_headers,
+ timeout=EnergyRecorder.CONNECTION_TIMOUT
)
@energy.enable_recording
@@ -206,13 +224,7 @@ class EnergyRecorderTest(unittest.TestCase):
@mock.patch("functest.energy.energy.EnergyRecorder.get_current_scenario",
return_value=None)
@mock.patch("functest.energy.energy.EnergyRecorder")
- @mock.patch("functest.utils.functest_utils.get_pod_name",
- return_value="MOCK_POD")
- @mock.patch("functest.utils.functest_utils.get_functest_config",
- side_effect=config_loader_mock)
def test_decorators(self,
- loader_mock=None,
- pod_mock=None,
recorder_mock=None,
cur_scenario_mock=None):
"""Test energy module decorators."""
@@ -264,10 +276,14 @@ class EnergyRecorderTest(unittest.TestCase):
side_effect=config_loader_mock)
@mock.patch("functest.utils.functest_utils.get_pod_name",
return_value="MOCK_POD")
- def test_load_config(self, loader_mock=None, pod_mock=None):
+ @mock.patch("functest.energy.energy.requests.get",
+ return_value=API_OK)
+ def test_load_config(self, loader_mock=None, pod_mock=None,
+ get_mock=None):
"""Test load config."""
EnergyRecorder.energy_recorder_api = None
EnergyRecorder.load_config()
+
self.assertEquals(
EnergyRecorder.energy_recorder_api["auth"],
("user", "password")
@@ -281,7 +297,10 @@ class EnergyRecorderTest(unittest.TestCase):
side_effect=config_loader_mock_no_creds)
@mock.patch("functest.utils.functest_utils.get_pod_name",
return_value="MOCK_POD")
- def test_load_config_no_creds(self, loader_mock=None, pod_mock=None):
+ @mock.patch("functest.energy.energy.requests.get",
+ return_value=API_OK)
+ def test_load_config_no_creds(self, loader_mock=None, pod_mock=None,
+ get_mock=None):
"""Test load config without creds."""
EnergyRecorder.energy_recorder_api = None
EnergyRecorder.load_config()
@@ -295,7 +314,10 @@ class EnergyRecorderTest(unittest.TestCase):
return_value=None)
@mock.patch("functest.utils.functest_utils.get_pod_name",
return_value="MOCK_POD")
- def test_load_config_ex(self, loader_mock=None, pod_mock=None):
+ @mock.patch("functest.energy.energy.requests.get",
+ return_value=API_OK)
+ def test_load_config_ex(self, loader_mock=None, pod_mock=None,
+ get_mock=None):
"""Test load config with exception."""
with self.assertRaises(AssertionError):
EnergyRecorder.energy_recorder_api = None
@@ -303,6 +325,20 @@ class EnergyRecorderTest(unittest.TestCase):
self.assertEquals(EnergyRecorder.energy_recorder_api, None)
@mock.patch("functest.utils.functest_utils.get_functest_config",
+ side_effect=config_loader_mock)
+ @mock.patch("functest.utils.functest_utils.get_pod_name",
+ return_value="MOCK_POD")
+ @mock.patch("functest.energy.energy.requests.get",
+ return_value=API_KO)
+ def test_load_config_api_ko(self, loader_mock=None, pod_mock=None,
+ get_mock=None):
+ """Test load config with API unavailable."""
+ EnergyRecorder.energy_recorder_api = None
+ EnergyRecorder.load_config()
+ self.assertEquals(EnergyRecorder.energy_recorder_api["available"],
+ False)
+
+ @mock.patch("functest.utils.functest_utils.get_functest_config",
return_value=None)
@mock.patch("functest.utils.functest_utils.get_pod_name",
return_value="MOCK_POD")
diff --git a/functest/tests/unit/odl/test_odl.py b/functest/tests/unit/odl/test_odl.py
index 8c8a6cec3..8aeea41de 100644
--- a/functest/tests/unit/odl/test_odl.py
+++ b/functest/tests/unit/odl/test_odl.py
@@ -511,7 +511,7 @@ class ODLRunTesting(ODLTesting):
def test_compass(self):
os.environ["INSTALLER_TYPE"] = "compass"
self._test_run(testcase.TestCase.EX_OK,
- odlip=self._neutron_ip, odlwebport='8181')
+ odlip=self._neutron_ip, odlrestconfport='8080')
def test_daisy_no_controller_ip(self):
with mock.patch('functest.utils.openstack_utils.get_endpoint',
diff --git a/functest/tests/unit/openstack/rally/test_rally.py b/functest/tests/unit/openstack/rally/test_rally.py
index 32cc15134..05311c3ff 100644
--- a/functest/tests/unit/openstack/rally/test_rally.py
+++ b/functest/tests/unit/openstack/rally/test_rally.py
@@ -18,75 +18,73 @@ from functest.utils.constants import CONST
class OSRallyTesting(unittest.TestCase):
-
- def setUp(self):
- self.nova_client = mock.Mock()
- self.neutron_client = mock.Mock()
- self.cinder_client = mock.Mock()
- with mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os_utils.get_nova_client',
- return_value=self.nova_client), \
- mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os_utils.get_neutron_client',
- return_value=self.neutron_client), \
- mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os_utils.get_cinder_client',
- return_value=self.cinder_client):
- self.rally_base = rally.RallyBase()
- self.rally_base.network_dict['net_id'] = 'test_net_id'
- self.polling_iter = 2
-
- def test_build_task_args_missing_floating_network(self):
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os_utils.'
+ 'get_nova_client', return_value=mock.Mock())
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os_utils.'
+ 'get_neutron_client', return_value=mock.Mock())
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os_utils.'
+ 'get_cinder_client', return_value=mock.Mock())
+ def setUp(self, mock_func1, mock_func2, mock_func3):
+ self.rally_base = rally.RallyBase()
+ self.rally_base.network_dict['net_id'] = 'test_net_id'
+ self.polling_iter = 2
+ mock_func1.assert_called()
+ mock_func2.assert_called()
+ mock_func3.assert_called()
+
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os_utils.'
+ 'get_external_net', return_value=None)
+ def test_build_task_args_missing_floating_network(self, mock_func):
CONST.__setattr__('OS_AUTH_URL', None)
- with mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os_utils.get_external_net',
- return_value=None):
- task_args = self.rally_base._build_task_args('test_file_name')
- self.assertEqual(task_args['floating_network'], '')
+ task_args = self.rally_base._build_task_args('test_file_name')
+ self.assertEqual(task_args['floating_network'], '')
+ mock_func.assert_called()
- def test_build_task_args_missing_net_id(self):
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os_utils.'
+ 'get_external_net', return_value='test_floating_network')
+ def test_build_task_args_missing_net_id(self, mock_func):
CONST.__setattr__('OS_AUTH_URL', None)
self.rally_base.network_dict['net_id'] = ''
- with mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os_utils.get_external_net',
- return_value='test_floating_network'):
- task_args = self.rally_base._build_task_args('test_file_name')
- self.assertEqual(task_args['netid'], '')
+ task_args = self.rally_base._build_task_args('test_file_name')
+ self.assertEqual(task_args['netid'], '')
+ mock_func.assert_called()
- def check_scenario_file(self, value):
+ @staticmethod
+ def check_scenario_file(value):
yaml_file = 'opnfv-{}.yaml'.format('test_file_name')
if yaml_file in value:
return False
return True
- def test_prepare_test_list_missing_scenario_file(self):
- with mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os.path.exists',
- side_effect=self.check_scenario_file), \
- self.assertRaises(Exception):
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os.path.exists')
+ def test_prepare_test_list_missing_scenario_file(self, mock_func):
+ mock_func.side_effect = self.check_scenario_file
+ with self.assertRaises(Exception):
self.rally_base._prepare_test_list('test_file_name')
+ mock_func.assert_called()
- def check_temp_dir(self, value):
+ @staticmethod
+ def check_temp_dir(value):
yaml_file = 'opnfv-{}.yaml'.format('test_file_name')
if yaml_file in value:
return True
return False
- def test_prepare_test_list_missing_temp_dir(self):
- with mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os.path.exists',
- side_effect=self.check_temp_dir), \
- mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os.makedirs') as mock_os_makedir, \
- mock.patch.object(self.rally_base, 'apply_blacklist',
- return_value=mock.Mock()) as mock_method:
- yaml_file = 'opnfv-{}.yaml'.format('test_file_name')
- ret_val = os.path.join(self.rally_base.TEMP_DIR, yaml_file)
- self.assertEqual(self.rally_base.
- _prepare_test_list('test_file_name'),
- ret_val)
- self.assertTrue(mock_method.called)
- self.assertTrue(mock_os_makedir.called)
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os.path.exists')
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os.makedirs')
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ 'apply_blacklist', return_value=mock.Mock())
+ def test_prepare_test_list_missing_temp_dir(
+ self, mock_method, mock_os_makedirs, mock_path_exists):
+ mock_path_exists.side_effect = self.check_temp_dir
+
+ yaml_file = 'opnfv-{}.yaml'.format('test_file_name')
+ ret_val = os.path.join(self.rally_base.TEMP_DIR, yaml_file)
+ self.assertEqual(self.rally_base._prepare_test_list('test_file_name'),
+ ret_val)
+ mock_path_exists.assert_called()
+ mock_method.assert_called()
+ mock_os_makedirs.assert_called()
def test_get_task_id_default(self):
cmd_raw = 'Task 1: started'
@@ -125,170 +123,163 @@ class OSRallyTesting(unittest.TestCase):
self.assertEqual(self.rally_base.get_cmd_output(proc),
'lineline')
- def test_excl_scenario_default(self):
+ @mock.patch('__builtin__.open', mock.mock_open())
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.yaml.safe_load',
+ return_value={'scenario': [
+ {'scenarios': ['test_scenario'],
+ 'installers': ['test_installer'],
+ 'tests': ['test']},
+ {'scenarios': ['other_scenario'],
+ 'installers': ['test_installer'],
+ 'tests': ['other_test']}]})
+ def test_excl_scenario_default(self, mock_func):
CONST.__setattr__('INSTALLER_TYPE', 'test_installer')
CONST.__setattr__('DEPLOY_SCENARIO', 'test_scenario')
- dic = {'scenario': [{'scenarios': ['test_scenario'],
- 'installers': ['test_installer'],
- 'tests': ['test']},
- {'scenarios': ['other_scenario'],
- 'installers': ['test_installer'],
- 'tests': ['other_test']}]}
- with mock.patch('__builtin__.open', mock.mock_open()), \
- mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'yaml.safe_load',
- return_value=dic):
- self.assertEqual(self.rally_base.excl_scenario(),
- ['test'])
-
- def test_excl_scenario_regex(self):
+ self.assertEqual(self.rally_base.excl_scenario(), ['test'])
+ mock_func.assert_called()
+
+ @mock.patch('__builtin__.open', mock.mock_open())
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.yaml.safe_load',
+ return_value={'scenario': [
+ {'scenarios': ['^os-[^-]+-featT-modeT$'],
+ 'installers': ['test_installer'],
+ 'tests': ['test1']},
+ {'scenarios': ['^os-ctrlT-[^-]+-modeT$'],
+ 'installers': ['test_installer'],
+ 'tests': ['test2']},
+ {'scenarios': ['^os-ctrlT-featT-[^-]+$'],
+ 'installers': ['test_installer'],
+ 'tests': ['test3']},
+ {'scenarios': ['^os-'],
+ 'installers': ['test_installer'],
+ 'tests': ['test4']},
+ {'scenarios': ['other_scenario'],
+ 'installers': ['test_installer'],
+ 'tests': ['test0a']},
+ {'scenarios': [''], # empty scenario
+ 'installers': ['test_installer'],
+ 'tests': ['test0b']}]})
+ def test_excl_scenario_regex(self, mock_func):
CONST.__setattr__('INSTALLER_TYPE', 'test_installer')
CONST.__setattr__('DEPLOY_SCENARIO', 'os-ctrlT-featT-modeT')
- dic = {'scenario': [{'scenarios': ['^os-[^-]+-featT-modeT$'],
- 'installers': ['test_installer'],
- 'tests': ['test1']},
- {'scenarios': ['^os-ctrlT-[^-]+-modeT$'],
- 'installers': ['test_installer'],
- 'tests': ['test2']},
- {'scenarios': ['^os-ctrlT-featT-[^-]+$'],
- 'installers': ['test_installer'],
- 'tests': ['test3']},
- {'scenarios': ['^os-'],
- 'installers': ['test_installer'],
- 'tests': ['test4']},
- {'scenarios': ['other_scenario'],
- 'installers': ['test_installer'],
- 'tests': ['test0a']},
- {'scenarios': [''], # empty scenario
- 'installers': ['test_installer'],
- 'tests': ['test0b']}]}
- with mock.patch('__builtin__.open', mock.mock_open()), \
- mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'yaml.safe_load',
- return_value=dic):
- self.assertEqual(self.rally_base.excl_scenario(),
- ['test1', 'test2', 'test3', 'test4'])
-
- def test_excl_scenario_exception(self):
- with mock.patch('__builtin__.open', side_effect=Exception):
- self.assertEqual(self.rally_base.excl_scenario(),
- [])
-
- def test_excl_func_default(self):
+ self.assertEqual(self.rally_base.excl_scenario(),
+ ['test1', 'test2', 'test3', 'test4'])
+ mock_func.assert_called()
+
+ @mock.patch('__builtin__.open', side_effect=Exception)
+ def test_excl_scenario_exception(self, mock_open):
+ self.assertEqual(self.rally_base.excl_scenario(), [])
+ mock_open.assert_called()
+
+ @mock.patch('__builtin__.open', mock.mock_open())
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.yaml.safe_load',
+ return_value={'functionality': [
+ {'functions': ['no_live_migration'], 'tests': ['test']}]})
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ 'live_migration_supported', return_value=False)
+ def test_excl_func_default(self, mock_func, mock_yaml_load):
CONST.__setattr__('INSTALLER_TYPE', 'test_installer')
CONST.__setattr__('DEPLOY_SCENARIO', 'test_scenario')
- dic = {'functionality': [{'functions': ['no_live_migration'],
- 'tests': ['test']}]}
- with mock.patch('__builtin__.open', mock.mock_open()), \
- mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'yaml.safe_load',
- return_value=dic), \
- mock.patch.object(self.rally_base, 'live_migration_supported',
- return_value=False):
- self.assertEqual(self.rally_base.excl_func(),
- ['test'])
-
- def test_excl_func_exception(self):
- with mock.patch('__builtin__.open', side_effect=Exception):
- self.assertEqual(self.rally_base.excl_func(),
- [])
-
- def test_file_is_empty_default(self):
- mock_obj = mock.Mock()
+ self.assertEqual(self.rally_base.excl_func(), ['test'])
+ mock_func.assert_called()
+ mock_yaml_load.assert_called()
+
+ @mock.patch('__builtin__.open', side_effect=Exception)
+ def test_excl_func_exception(self, mock_open):
+ self.assertEqual(self.rally_base.excl_func(), [])
+ mock_open.assert_called()
+
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os.stat',
+ return_value=mock.Mock())
+ def test_file_is_empty_default(self, mock_os_stat):
attrs = {'st_size': 10}
- mock_obj.configure_mock(**attrs)
- with mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os.stat',
- return_value=mock_obj):
- self.assertEqual(self.rally_base.file_is_empty('test_file_name'),
- False)
-
- def test_file_is_empty_exception(self):
- with mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os.stat',
- side_effect=Exception):
- self.assertEqual(self.rally_base.file_is_empty('test_file_name'),
- True)
-
- def test_run_task_missing_task_file(self):
- with mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os.path.exists',
- return_value=False), \
- self.assertRaises(Exception):
- self.rally_base._run_task('test_name')
+ mock_os_stat.return_value.configure_mock(**attrs)
+ self.assertEqual(self.rally_base.file_is_empty('test_file_name'),
+ False)
+ mock_os_stat.assert_called()
- @mock.patch('functest.opnfv_tests.openstack.rally.rally.LOGGER.info')
- def test_run_task_no_tests_for_scenario(self, mock_logger_info):
- with mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os.path.exists',
- return_value=True), \
- mock.patch.object(self.rally_base, '_prepare_test_list',
- return_value='test_file_name'), \
- mock.patch.object(self.rally_base, 'file_is_empty',
- return_value=True):
- self.rally_base._run_task('test_name')
- mock_logger_info.assert_any_call('No tests for scenario \"%s\"',
- 'test_name')
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os.stat',
+ side_effect=Exception)
+ def test_file_is_empty_exception(self, mock_os_stat):
+ self.assertEqual(self.rally_base.file_is_empty('test_file_name'), True)
+ mock_os_stat.assert_called()
- @mock.patch('functest.opnfv_tests.openstack.rally.rally.LOGGER.error')
- def test_run_task_taskid_missing(self, mock_logger_error):
- with mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os.path.exists',
- return_value=True), \
- mock.patch.object(self.rally_base, '_prepare_test_list',
- return_value='test_file_name'), \
- mock.patch.object(self.rally_base, 'file_is_empty',
- return_value=False), \
- mock.patch.object(self.rally_base, '_build_task_args',
- return_value={}), \
- mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'subprocess.Popen'), \
- mock.patch.object(self.rally_base, '_get_output',
- return_value=mock.Mock()), \
- mock.patch.object(self.rally_base, 'get_task_id',
- return_value=None), \
- mock.patch.object(self.rally_base, 'get_cmd_output',
- return_value=''):
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os.path.exists',
+ return_value=False)
+ def test_run_task_missing_task_file(self, mock_path_exists):
+ with self.assertRaises(Exception):
self.rally_base._run_task('test_name')
- str = 'Failed to retrieve task_id, validating task...'
- mock_logger_error.assert_any_call(str)
-
+ mock_path_exists.assert_called()
+
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os.path.exists',
+ return_value=True)
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ '_prepare_test_list', return_value='test_file_name')
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ 'file_is_empty', return_value=True)
@mock.patch('functest.opnfv_tests.openstack.rally.rally.LOGGER.info')
+ def test_run_task_no_tests_for_scenario(self, mock_logger_info,
+ mock_file_empty, mock_prep_list,
+ mock_path_exists):
+ self.rally_base._run_task('test_name')
+ mock_logger_info.assert_any_call('No tests for scenario \"%s\"',
+ 'test_name')
+ mock_file_empty.assert_called()
+ mock_prep_list.assert_called()
+ mock_path_exists.assert_called()
+
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ '_prepare_test_list', return_value='test_file_name')
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ 'file_is_empty', return_value=False)
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ '_build_task_args', return_value={})
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ '_get_output', return_value=mock.Mock())
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ 'get_task_id', return_value=None)
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ 'get_cmd_output', return_value='')
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os.path.exists',
+ return_value=True)
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.subprocess.Popen')
@mock.patch('functest.opnfv_tests.openstack.rally.rally.LOGGER.error')
- def test_run_task_default(self, mock_logger_error,
- mock_logger_info):
- popen = mock.Mock()
+ def test_run_task_taskid_missing(self, mock_logger_error, *args):
+ self.rally_base._run_task('test_name')
+ text = 'Failed to retrieve task_id, validating task...'
+ mock_logger_error.assert_any_call(text)
+
+ @mock.patch('__builtin__.open', mock.mock_open())
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ '_prepare_test_list', return_value='test_file_name')
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ 'file_is_empty', return_value=False)
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ '_build_task_args', return_value={})
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ '_get_output', return_value=mock.Mock())
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ 'get_task_id', return_value='1')
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ 'get_cmd_output', return_value='')
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ 'task_succeed', return_value=True)
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os.path.exists',
+ return_value=True)
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.subprocess.Popen')
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os.makedirs')
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os.popen',
+ return_value=mock.Mock())
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.LOGGER.info')
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.LOGGER.error')
+ def test_run_task_default(self, mock_logger_error, mock_logger_info,
+ mock_popen, *args):
attrs = {'read.return_value': 'json_result'}
- popen.configure_mock(**attrs)
-
- with mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os.path.exists',
- return_value=True), \
- mock.patch.object(self.rally_base, '_prepare_test_list',
- return_value='test_file_name'), \
- mock.patch.object(self.rally_base, 'file_is_empty',
- return_value=False), \
- mock.patch.object(self.rally_base, '_build_task_args',
- return_value={}), \
- mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'subprocess.Popen'), \
- mock.patch.object(self.rally_base, '_get_output',
- return_value=mock.Mock()), \
- mock.patch.object(self.rally_base, 'get_task_id',
- return_value='1'), \
- mock.patch.object(self.rally_base, 'get_cmd_output',
- return_value=''), \
- mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os.makedirs'), \
- mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os.popen',
- return_value=popen), \
- mock.patch('__builtin__.open', mock.mock_open()), \
- mock.patch.object(self.rally_base, 'task_succeed',
- return_value=True):
- self.rally_base._run_task('test_name')
- str = 'Test scenario: "test_name" OK.\n'
- mock_logger_info.assert_any_call(str)
+ mock_popen.return_value.configure_mock(**attrs)
+ self.rally_base._run_task('test_name')
+ text = 'Test scenario: "test_name" OK.\n'
+ mock_logger_info.assert_any_call(text)
+ mock_logger_error.assert_not_called()
def test_prepare_env_testname_invalid(self):
self.rally_base.TESTS = ['test1', 'test2']
@@ -296,103 +287,103 @@ class OSRallyTesting(unittest.TestCase):
with self.assertRaises(Exception):
self.rally_base._prepare_env()
- def test_prepare_env_volume_creation_failed(self):
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os_utils.'
+ 'list_volume_types', return_value=None)
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os_utils.'
+ 'create_volume_type', return_value=None)
+ def test_prepare_env_volume_creation_failed(self, mock_list, mock_create):
self.rally_base.TESTS = ['test1', 'test2']
self.rally_base.test_name = 'test1'
- volume_type = None
- with mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os_utils.list_volume_types',
- return_value=None), \
- mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os_utils.create_volume_type',
- return_value=volume_type), \
- self.assertRaises(Exception):
+ with self.assertRaises(Exception):
self.rally_base._prepare_env()
-
- def test_prepare_env_image_missing(self):
+ mock_list.assert_called()
+ mock_create.assert_called()
+
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os_utils.'
+ 'list_volume_types', return_value=None)
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os_utils.'
+ 'create_volume_type', return_value=mock.Mock())
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os_utils.'
+ 'get_or_create_image', return_value=(True, None))
+ def test_prepare_env_image_missing(self, mock_get_img, mock_create_vt,
+ mock_list_vt):
self.rally_base.TESTS = ['test1', 'test2']
self.rally_base.test_name = 'test1'
- volume_type = mock.Mock()
- image_id = None
- with mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os_utils.list_volume_types',
- return_value=None), \
- mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os_utils.create_volume_type',
- return_value=volume_type), \
- mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os_utils.get_or_create_image',
- return_value=(True, image_id)), \
- self.assertRaises(Exception):
+ with self.assertRaises(Exception):
self.rally_base._prepare_env()
-
- def test_prepare_env_image_shared_network_creation_failed(self):
+ mock_get_img.assert_called()
+ mock_create_vt.assert_called()
+ mock_list_vt.assert_called()
+
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os_utils.'
+ 'list_volume_types', return_value=None)
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os_utils.'
+ 'create_volume_type', return_value=mock.Mock())
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os_utils.'
+ 'get_or_create_image', return_value=(True, 'image_id'))
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os_utils.'
+ 'create_shared_network_full', return_value=None)
+ def test_prepare_env_image_shared_network_creation_failed(
+ self, mock_create_net, mock_get_img, mock_create_vt, mock_list_vt):
self.rally_base.TESTS = ['test1', 'test2']
self.rally_base.test_name = 'test1'
- volume_type = mock.Mock()
- image_id = 'image_id'
- network_dict = None
- with mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os_utils.list_volume_types',
- return_value=None), \
- mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os_utils.create_volume_type',
- return_value=volume_type), \
- mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os_utils.get_or_create_image',
- return_value=(True, image_id)), \
- mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os_utils.create_shared_network_full',
- return_value=network_dict), \
- self.assertRaises(Exception):
+ with self.assertRaises(Exception):
self.rally_base._prepare_env()
-
- def test_run_tests_all(self):
+ mock_create_net.assert_called()
+ mock_get_img.assert_called()
+ mock_create_vt.assert_called()
+ mock_list_vt.assert_called()
+
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ '_run_task', return_value=mock.Mock())
+ def test_run_tests_all(self, mock_run_task):
self.rally_base.TESTS = ['test1', 'test2']
self.rally_base.test_name = 'all'
- with mock.patch.object(self.rally_base, '_run_task',
- return_value=mock.Mock()):
- self.rally_base._run_tests()
- self.rally_base._run_task.assert_any_call('test1')
- self.rally_base._run_task.assert_any_call('test2')
+ self.rally_base._run_tests()
+ mock_run_task.assert_any_call('test1')
+ mock_run_task.assert_any_call('test2')
- def test_run_tests_default(self):
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ '_run_task', return_value=mock.Mock())
+ def test_run_tests_default(self, mock_run_task):
self.rally_base.TESTS = ['test1', 'test2']
self.rally_base.test_name = 'test1'
- with mock.patch.object(self.rally_base, '_run_task',
- return_value=mock.Mock()):
- self.rally_base._run_tests()
- self.rally_base._run_task.assert_any_call('test1')
-
- def test_clean_up_default(self):
+ self.rally_base._run_tests()
+ mock_run_task.assert_any_call('test1')
+
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os_utils.'
+ 'delete_volume_type')
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os_utils.'
+ 'delete_glance_image')
+ def test_clean_up_default(self, mock_glance_method, mock_vol_method):
self.rally_base.volume_type = mock.Mock()
self.rally_base.cinder_client = mock.Mock()
self.rally_base.image_exists = False
self.rally_base.image_id = 1
self.rally_base.nova_client = mock.Mock()
- with mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os_utils.delete_volume_type') as mock_vol_method, \
- mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os_utils.delete_glance_image') as mock_glance_method:
- self.rally_base._clean_up()
- mock_vol_method.assert_any_call(self.rally_base.cinder_client,
- self.rally_base.volume_type)
- mock_glance_method.assert_any_call(self.rally_base.nova_client,
- 1)
-
- def test_run_default(self):
- with mock.patch.object(self.rally_base, '_prepare_env'), \
- mock.patch.object(self.rally_base, '_run_tests'), \
- mock.patch.object(self.rally_base, '_generate_report'), \
- mock.patch.object(self.rally_base, '_clean_up'):
- self.assertEqual(self.rally_base.run(),
- testcase.TestCase.EX_OK)
-
- def test_run_exception(self):
- with mock.patch.object(self.rally_base, '_prepare_env',
- side_effect=Exception):
- self.assertEqual(self.rally_base.run(),
- testcase.TestCase.EX_RUN_ERROR)
+ self.rally_base._clean_up()
+ mock_vol_method.assert_any_call(self.rally_base.cinder_client,
+ self.rally_base.volume_type)
+ mock_glance_method.assert_any_call(self.rally_base.nova_client,
+ 1)
+
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ '_prepare_env')
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ '_run_tests')
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ '_generate_report')
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ '_clean_up')
+ def test_run_default(self, *args):
+ self.assertEqual(self.rally_base.run(), testcase.TestCase.EX_OK)
+ map(lambda m: m.assert_called(), args)
+
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ '_prepare_env', side_effect=Exception)
+ def test_run_exception(self, mock_prep_env):
+ self.assertEqual(self.rally_base.run(), testcase.TestCase.EX_RUN_ERROR)
+ mock_prep_env.assert_called()
if __name__ == "__main__":
diff --git a/functest/tests/unit/openstack/refstack_client/test_refstack_client.py b/functest/tests/unit/openstack/refstack_client/test_refstack_client.py
index 51dbb6408..ca0974832 100644
--- a/functest/tests/unit/openstack/refstack_client/test_refstack_client.py
+++ b/functest/tests/unit/openstack/refstack_client/test_refstack_client.py
@@ -12,9 +12,12 @@ import pkg_resources
import unittest
from functest.core import testcase
-from functest.opnfv_tests.openstack.refstack_client import refstack_client
+from functest.opnfv_tests.openstack.refstack_client.refstack_client import \
+ RefstackClient, RefstackClientParser
from functest.utils.constants import CONST
+from snaps.openstack.os_credentials import OSCreds
+
class OSRefstackClientTesting(unittest.TestCase):
@@ -25,36 +28,79 @@ class OSRefstackClientTesting(unittest.TestCase):
'functest', 'opnfv_tests/openstack/refstack_client/defcore.txt')
def setUp(self):
- self.defaultargs = {'config': self._config,
- 'testlist': self._testlist}
+ self.default_args = {'config': self._config,
+ 'testlist': self._testlist}
CONST.__setattr__('OS_AUTH_URL', 'https://ip:5000/v3')
CONST.__setattr__('OS_INSECURE', 'true')
- self.refstackclient = refstack_client.RefstackClient()
+ self.os_creds = OSCreds(
+ username='user', password='pass',
+ auth_url='http://foo.com:5000/v3', project_name='bar')
+
+ @mock.patch('functest.opnfv_tests.openstack.refstack_client.tempest_conf.'
+ 'TempestConf', return_value=mock.Mock())
+ def _create_client(self, mock_conf):
+ with mock.patch('snaps.openstack.tests.openstack_tests.'
+ 'get_credentials', return_value=self.os_creds):
+ return RefstackClient()
def test_run_defcore_insecure(self):
insecure = '-k'
config = 'tempest.conf'
testlist = 'testlist'
+ client = self._create_client()
with mock.patch('functest.opnfv_tests.openstack.refstack_client.'
'refstack_client.ft_utils.execute_command') as m:
cmd = ("refstack-client test {0} -c {1} -v --test-list {2}"
.format(insecure, config, testlist))
- self.refstackclient.run_defcore(config, testlist)
+ client.run_defcore(config, testlist)
m.assert_any_call(cmd)
def test_run_defcore(self):
CONST.__setattr__('OS_AUTH_URL', 'http://ip:5000/v3')
- refstackclient = refstack_client.RefstackClient()
insecure = ''
config = 'tempest.conf'
testlist = 'testlist'
+ client = self._create_client()
with mock.patch('functest.opnfv_tests.openstack.refstack_client.'
'refstack_client.ft_utils.execute_command') as m:
cmd = ("refstack-client test {0} -c {1} -v --test-list {2}"
.format(insecure, config, testlist))
- refstackclient.run_defcore(config, testlist)
+ client.run_defcore(config, testlist)
m.assert_any_call(cmd)
+ @mock.patch('functest.opnfv_tests.openstack.refstack_client.'
+ 'refstack_client.LOGGER.info')
+ @mock.patch('__builtin__.open', side_effect=Exception)
+ def test_parse_refstack_result_missing_log_file(self, mock_open,
+ mock_logger_info):
+ self.case_name = 'refstack_defcore'
+ self.result = 0
+ self._create_client().parse_refstack_result()
+ mock_logger_info.assert_called_once_with(
+ "Testcase %s success_rate is %s%%",
+ self.case_name, self.result)
+
+ def test_parse_refstack_result_default(self):
+ log_file = ('''
+ {0} tempest.api.compute [18.464988s] ... ok
+ {0} tempest.api.volume [0.230334s] ... FAILED
+ {0} tempest.api.network [1.265828s] ... SKIPPED:
+ Ran: 3 tests in 1259.0000 sec.
+ - Passed: 1
+ - Skipped: 1
+ - Failed: 1
+ ''')
+ self.details = {"tests": 3,
+ "failures": 1,
+ "success": ['tempest.api.compute [18.464988s]'],
+ "errors": ['tempest.api.volume [0.230334s]'],
+ "skipped": ['tempest.api.network [1.265828s]']}
+ client = self._create_client()
+ with mock.patch('__builtin__.open',
+ mock.mock_open(read_data=log_file)):
+ client.parse_refstack_result()
+ self.assertEqual(client.details, self.details)
+
def _get_main_kwargs(self, key=None):
kwargs = {'config': self._config,
'testlist': self._testlist}
@@ -64,16 +110,18 @@ class OSRefstackClientTesting(unittest.TestCase):
def _test_main(self, status, *args):
kwargs = self._get_main_kwargs()
- self.assertEqual(self.refstackclient.main(**kwargs), status)
+ client = self._create_client()
+ self.assertEqual(client.main(**kwargs), status)
if len(args) > 0:
args[0].assert_called_once_with(
- refstack_client.RefstackClient.result_dir)
+ RefstackClient.result_dir)
if len(args) > 1:
args
def _test_main_missing_keyword(self, key):
kwargs = self._get_main_kwargs(key)
- self.assertEqual(self.refstackclient.main(**kwargs),
+ client = self._create_client()
+ self.assertEqual(client.main(**kwargs),
testcase.TestCase.EX_RUN_ERROR)
def test_main_missing_conf(self):
@@ -83,10 +131,10 @@ class OSRefstackClientTesting(unittest.TestCase):
self._test_main_missing_keyword('testlist')
def _test_argparser(self, arg, value):
- self.defaultargs[arg] = value
- parser = refstack_client.RefstackClientParser()
+ self.default_args[arg] = value
+ parser = RefstackClientParser()
self.assertEqual(parser.parse_args(["--{}={}".format(arg, value)]),
- self.defaultargs)
+ self.default_args)
def test_argparser_conf(self):
self._test_argparser('config', self._config)
@@ -95,13 +143,13 @@ class OSRefstackClientTesting(unittest.TestCase):
self._test_argparser('testlist', self._testlist)
def test_argparser_multiple_args(self):
- self.defaultargs['config'] = self._config
- self.defaultargs['testlist'] = self._testlist
- parser = refstack_client.RefstackClientParser()
+ self.default_args['config'] = self._config
+ self.default_args['testlist'] = self._testlist
+ parser = RefstackClientParser()
self.assertEqual(parser.parse_args(
["--config={}".format(self._config),
"--testlist={}".format(self._testlist)
- ]), self.defaultargs)
+ ]), self.default_args)
if __name__ == "__main__":
diff --git a/functest/tests/unit/openstack/tempest/test_conf_utils.py b/functest/tests/unit/openstack/tempest/test_conf_utils.py
index 379049653..77558086b 100644
--- a/functest/tests/unit/openstack/tempest/test_conf_utils.py
+++ b/functest/tests/unit/openstack/tempest/test_conf_utils.py
@@ -10,89 +10,83 @@ import unittest
import mock
-from functest.opnfv_tests.openstack.tempest import conf_utils
+from functest.opnfv_tests.openstack.tempest import tempest, conf_utils
from functest.utils.constants import CONST
+from snaps.openstack.os_credentials import OSCreds
class OSTempestConfUtilsTesting(unittest.TestCase):
- def test_create_tempest_resources_missing_network_dic(self):
- with mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
- 'os_utils.get_keystone_client',
- return_value=mock.Mock()), \
- mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
- 'os_utils.create_tenant',
- return_value='test_tenant_id'), \
- mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
- 'os_utils.create_user',
- return_value='test_user_id'), \
- mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
- 'os_utils.create_shared_network_full',
- return_value=None), \
- self.assertRaises(Exception) as context:
- conf_utils.create_tempest_resources()
- msg = 'Failed to create private network'
- self.assertTrue(msg in context)
-
- def test_create_tempest_resources_missing_image(self):
- with mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
- 'os_utils.get_keystone_client',
- return_value=mock.Mock()), \
- mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
- 'os_utils.create_tenant',
- return_value='test_tenant_id'), \
- mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
- 'os_utils.create_user',
- return_value='test_user_id'), \
- mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
- 'os_utils.create_shared_network_full',
- return_value=mock.Mock()), \
- mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
- 'os_utils.get_or_create_image',
- return_value=(mock.Mock(), None)), \
- self.assertRaises(Exception) as context:
-
- CONST.__setattr__('tempest_use_custom_images', True)
- conf_utils.create_tempest_resources()
- msg = 'Failed to create image'
- self.assertTrue(msg in context)
-
- CONST.__setattr__('tempest_use_custom_images', False)
- conf_utils.create_tempest_resources(use_custom_images=True)
- msg = 'Failed to create image'
- self.assertTrue(msg in context)
-
- def test_create_tempest_resources_missing_flavor(self):
- with mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
- 'os_utils.get_keystone_client',
- return_value=mock.Mock()), \
- mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
- 'os_utils.create_tenant',
- return_value='test_tenant_id'), \
- mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
- 'os_utils.create_user',
- return_value='test_user_id'), \
- mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
- 'os_utils.create_shared_network_full',
- return_value=mock.Mock()), \
- mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
- 'os_utils.get_or_create_image',
- return_value=(mock.Mock(), 'image_id')), \
- mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
- 'os_utils.get_or_create_flavor',
- return_value=(mock.Mock(), None)), \
- self.assertRaises(Exception) as context:
- CONST.__setattr__('tempest_use_custom_images', True)
- CONST.__setattr__('tempest_use_custom_flavors', True)
- conf_utils.create_tempest_resources()
- msg = 'Failed to create flavor'
- self.assertTrue(msg in context)
-
- CONST.__setattr__('tempest_use_custom_images', True)
- CONST.__setattr__('tempest_use_custom_flavors', False)
- conf_utils.create_tempest_resources(use_custom_flavors=False)
- msg = 'Failed to create flavor'
- self.assertTrue(msg in context)
+ def setUp(self):
+ self.os_creds = OSCreds(
+ username='user', password='pass',
+ auth_url='http://foo.com:5000/v3', project_name='bar')
+
+ @mock.patch('snaps.openstack.utils.deploy_utils.create_project',
+ return_value=mock.Mock())
+ @mock.patch('snaps.openstack.utils.deploy_utils.create_user',
+ return_value=mock.Mock())
+ @mock.patch('snaps.openstack.utils.deploy_utils.create_network',
+ return_value=None)
+ @mock.patch('snaps.openstack.utils.deploy_utils.create_image',
+ return_value=mock.Mock())
+ def test_create_tempest_resources_missing_network_dic(self, *mock_args):
+ tempest_resources = tempest.TempestResourcesManager(os_creds={})
+ with self.assertRaises(Exception) as context:
+ tempest_resources.create()
+ msg = 'Failed to create private network'
+ self.assertTrue(msg in context.exception)
+
+ @mock.patch('snaps.openstack.utils.deploy_utils.create_project',
+ return_value=mock.Mock())
+ @mock.patch('snaps.openstack.utils.deploy_utils.create_user',
+ return_value=mock.Mock())
+ @mock.patch('snaps.openstack.utils.deploy_utils.create_network',
+ return_value=mock.Mock())
+ @mock.patch('snaps.openstack.utils.deploy_utils.create_image',
+ return_value=None)
+ def test_create_tempest_resources_missing_image(self, *mock_args):
+ tempest_resources = tempest.TempestResourcesManager(os_creds={})
+
+ CONST.__setattr__('tempest_use_custom_imagess', True)
+ with self.assertRaises(Exception) as context:
+ tempest_resources.create()
+ msg = 'Failed to create image'
+ self.assertTrue(msg in context.exception, msg=str(context.exception))
+
+ CONST.__setattr__('tempest_use_custom_imagess', False)
+ with self.assertRaises(Exception) as context:
+ tempest_resources.create(use_custom_images=True)
+ msg = 'Failed to create image'
+ self.assertTrue(msg in context.exception, msg=str(context.exception))
+
+ @mock.patch('snaps.openstack.utils.deploy_utils.create_project',
+ return_value=mock.Mock())
+ @mock.patch('snaps.openstack.utils.deploy_utils.create_user',
+ return_value=mock.Mock())
+ @mock.patch('snaps.openstack.utils.deploy_utils.create_network',
+ return_value=mock.Mock())
+ @mock.patch('snaps.openstack.utils.deploy_utils.create_image',
+ return_value=mock.Mock())
+ @mock.patch('snaps.openstack.create_flavor.OpenStackFlavor.create',
+ return_value=None)
+ def test_create_tempest_resources_missing_flavor(self, *mock_args):
+ tempest_resources = tempest.TempestResourcesManager(
+ os_creds=self.os_creds)
+
+ CONST.__setattr__('tempest_use_custom_images', True)
+ CONST.__setattr__('tempest_use_custom_flavors', True)
+ with self.assertRaises(Exception) as context:
+ tempest_resources.create()
+ msg = 'Failed to create flavor'
+ self.assertTrue(msg in context.exception, msg=str(context.exception))
+
+ CONST.__setattr__('tempest_use_custom_images', True)
+ CONST.__setattr__('tempest_use_custom_flavors', False)
+ with self.assertRaises(Exception) as context:
+ tempest_resources.create(use_custom_flavors=True)
+ msg = 'Failed to create flavor'
+ self.assertTrue(msg in context.exception, msg=str(context.exception))
def test_get_verifier_id_missing_verifier(self):
CONST.__setattr__('tempest_deployment_name', 'test_deploy_name')
@@ -176,51 +170,20 @@ class OSTempestConfUtilsTesting(unittest.TestCase):
def test_backup_tempest_config_default(self):
with mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.os.path.exists',
- return_value=False), \
- mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.os.makedirs') as m1, \
- mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.shutil.copyfile') as m2:
+ 'conf_utils.shutil.copyfile') as m1:
conf_utils.backup_tempest_config('test_conf_file')
self.assertTrue(m1.called)
- self.assertTrue(m2.called)
-
- with mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.os.path.exists',
- return_value=True), \
- mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.shutil.copyfile') as m2:
- conf_utils.backup_tempest_config('test_conf_file')
- self.assertTrue(m2.called)
def test_configure_tempest_default(self):
with mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.configure_verifier',
return_value='test_conf_file'), \
mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.configure_tempest_update_params') as m1, \
- mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.configure_tempest_multisite_params') as m2:
- conf_utils.configure_tempest('test_dep_dir',
- MODE='feature_multisite')
- self.assertTrue(m1.called)
- self.assertTrue(m2.called)
-
- with mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.configure_verifier',
- return_value='test_conf_file'), \
- mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.configure_tempest_update_params') as m1:
conf_utils.configure_tempest('test_dep_dir')
self.assertTrue(m1.called)
- self.assertTrue(m2.called)
def test_configure_tempest_defcore_default(self):
- img_flavor_dict = {'image_id': 'test_image_id',
- 'flavor_id': 'test_flavor_id',
- 'image_id_alt': 'test_image_alt_id',
- 'flavor_id_alt': 'test_flavor_alt_id'}
with mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.configure_verifier',
return_value='test_conf_file'), \
@@ -237,9 +200,12 @@ class OSTempestConfUtilsTesting(unittest.TestCase):
'write') as mwrite, \
mock.patch('__builtin__.open', mock.mock_open()), \
mock.patch('functest.opnfv_tests.openstack.tempest.'
+ 'conf_utils.generate_test_accounts_file'), \
+ mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.shutil.copyfile'):
- conf_utils.configure_tempest_defcore('test_dep_dir',
- img_flavor_dict)
+ conf_utils.configure_tempest_defcore(
+ 'test_dep_dir', 'test_image_id', 'test_flavor_id',
+ 'test_image_alt_id', 'test_flavor_alt_id', 'test_tenant_id')
mset.assert_any_call('compute', 'image_ref', 'test_image_id')
mset.assert_any_call('compute', 'image_ref_alt',
'test_image_alt_id')
@@ -249,6 +215,13 @@ class OSTempestConfUtilsTesting(unittest.TestCase):
self.assertTrue(mread.called)
self.assertTrue(mwrite.called)
+ def test_generate_test_accounts_file_default(self):
+ with mock.patch("__builtin__.open", mock.mock_open()), \
+ mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
+ 'yaml.dump') as mock_dump:
+ conf_utils.generate_test_accounts_file('test_tenant_id')
+ self.assertTrue(mock_dump.called)
+
def _test_missing_param(self, params, image_id, flavor_id):
with mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.ConfigParser.RawConfigParser.'
@@ -261,12 +234,14 @@ class OSTempestConfUtilsTesting(unittest.TestCase):
'write') as mwrite, \
mock.patch('__builtin__.open', mock.mock_open()), \
mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.backup_tempest_config'):
+ 'conf_utils.backup_tempest_config'), \
+ mock.patch('functest.utils.functest_utils.yaml.safe_load',
+ return_value={'validation': {'ssh_timeout': 300}}):
CONST.__setattr__('OS_ENDPOINT_TYPE', None)
conf_utils.\
configure_tempest_update_params('test_conf_file',
- IMAGE_ID=image_id,
- FLAVOR_ID=flavor_id)
+ image_id=image_id,
+ flavor_id=flavor_id)
mset.assert_any_call(params[0], params[1], params[2])
self.assertTrue(mread.called)
self.assertTrue(mwrite.called)
@@ -320,50 +295,6 @@ class OSTempestConfUtilsTesting(unittest.TestCase):
mexe.assert_any_call("rally verify configure-verifier "
"--reconfigure")
- def test_configure_tempest_multisite_params_without_fuel(self):
- conf_utils.CI_INSTALLER_TYPE = 'not_fuel'
- with mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.os_utils.get_endpoint',
- return_value='kingbird_endpoint_url'), \
- mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.ConfigParser.RawConfigParser.'
- 'set') as mset, \
- mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.ConfigParser.RawConfigParser.'
- 'read') as mread, \
- mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.ConfigParser.RawConfigParser.'
- 'add_section') as msection, \
- mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.ConfigParser.RawConfigParser.'
- 'write') as mwrite, \
- mock.patch('__builtin__.open', mock.mock_open()), \
- mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.backup_tempest_config'):
-
- conf_utils.configure_tempest_multisite_params('test_conf_file')
- msection.assert_any_call("kingbird")
- mset.assert_any_call('service_available', 'kingbird', 'true')
- mset.assert_any_call('kingbird', 'endpoint_type', 'publicURL')
- mset.assert_any_call('kingbird', 'TIME_TO_SYNC', '120')
- mset.assert_any_call('kingbird', 'endpoint_url',
- 'kingbird_endpoint_url')
- self.assertTrue(mread.called)
- self.assertTrue(mwrite.called)
-
- def test_install_verifier_ext_default(self):
- with mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.get_repo_tag',
- return_value='test_tag'), \
- mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.ft_utils.'
- 'execute_command_raise') as mexe:
- conf_utils.install_verifier_ext('test_path')
- cmd = ("rally verify add-verifier-ext --source test_path "
- "--version test_tag")
- error_msg = ("Problem while adding verifier extension from"
- " test_path")
- mexe.assert_called_once_with(cmd, error_msg=error_msg)
if __name__ == "__main__":
logging.disable(logging.CRITICAL)
diff --git a/functest/tests/unit/openstack/tempest/test_tempest.py b/functest/tests/unit/openstack/tempest/test_tempest.py
index b8b258b36..54d7d49bb 100644
--- a/functest/tests/unit/openstack/tempest/test_tempest.py
+++ b/functest/tests/unit/openstack/tempest/test_tempest.py
@@ -15,10 +15,16 @@ from functest.opnfv_tests.openstack.tempest import tempest
from functest.opnfv_tests.openstack.tempest import conf_utils
from functest.utils.constants import CONST
+from snaps.openstack.os_credentials import OSCreds
+
class OSTempestTesting(unittest.TestCase):
def setUp(self):
+ os_creds = OSCreds(
+ username='user', password='pass',
+ auth_url='http://foo.com:5000/v3', project_name='bar')
+
with mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
'conf_utils.get_verifier_id',
return_value='test_deploy_id'), \
@@ -30,14 +36,13 @@ class OSTempestTesting(unittest.TestCase):
return_value='test_verifier_repo_dir'), \
mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
'conf_utils.get_verifier_deployment_dir',
- return_value='test_verifier_deploy_dir'):
+ return_value='test_verifier_deploy_dir'), \
+ mock.patch('snaps.openstack.tests.openstack_tests.get_credentials',
+ return_value=os_creds):
self.tempestcommon = tempest.TempestCommon()
self.tempestsmoke_serial = tempest.TempestSmokeSerial()
self.tempestsmoke_parallel = tempest.TempestSmokeParallel()
self.tempestfull_parallel = tempest.TempestFullParallel()
- with mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
- 'conf_utils.install_verifier_ext'):
- self.tempestmultisite = tempest.TempestMultisite()
self.tempestcustom = tempest.TempestCustom()
self.tempestdefcore = tempest.TempestDefcore()
@@ -75,8 +80,6 @@ class OSTempestTesting(unittest.TestCase):
self.tempestcommon.MODE = mode
if self.tempestcommon.MODE == 'smoke':
testr_mode = "smoke"
- elif self.tempestcommon.MODE == 'feature_multisite':
- testr_mode = "'[Kk]ingbird'"
elif self.tempestcommon.MODE == 'full':
testr_mode = ""
else:
@@ -96,9 +99,6 @@ class OSTempestTesting(unittest.TestCase):
def test_generate_test_list_smoke_mode(self):
self._test_generate_test_list_mode_default('smoke')
- def test_generate_test_list_feature_multisite_mode(self):
- self._test_generate_test_list_mode_default('feature_multisite')
-
def test_generate_test_list_full_mode(self):
self._test_generate_test_list_mode_default('full')
@@ -161,8 +161,8 @@ class OSTempestTesting(unittest.TestCase):
'os.path.exists', return_value=False)
@mock.patch('functest.opnfv_tests.openstack.tempest.tempest.os.makedirs')
@mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
- 'conf_utils.create_tempest_resources', side_effect=Exception)
- def test_run_create_tempest_resources_ko(self, *args):
+ 'TempestResourcesManager.create', side_effect=Exception)
+ def test_run_tempest_create_resources_ko(self, *args):
self.assertEqual(self.tempestcommon.run(),
testcase.TestCase.EX_RUN_ERROR)
@@ -170,7 +170,7 @@ class OSTempestTesting(unittest.TestCase):
'os.path.exists', return_value=False)
@mock.patch('functest.opnfv_tests.openstack.tempest.tempest.os.makedirs')
@mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
- 'conf_utils.create_tempest_resources', return_value={})
+ 'TempestResourcesManager.create', return_value={})
@mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
'conf_utils.configure_tempest', side_effect=Exception)
def test_run_configure_tempest_ko(self, *args):
@@ -181,7 +181,7 @@ class OSTempestTesting(unittest.TestCase):
'os.path.exists', return_value=False)
@mock.patch('functest.opnfv_tests.openstack.tempest.tempest.os.makedirs')
@mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
- 'conf_utils.create_tempest_resources', return_value={})
+ 'TempestResourcesManager.create', return_value={})
@mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
'conf_utils.configure_tempest')
def _test_run(self, status, *args):
diff --git a/functest/tests/unit/openstack/vping/test_vping.py b/functest/tests/unit/openstack/vping/test_vping.py
index b229c3519..a28c61aeb 100644
--- a/functest/tests/unit/openstack/vping/test_vping.py
+++ b/functest/tests/unit/openstack/vping/test_vping.py
@@ -50,8 +50,6 @@ class VPingUserdataTesting(unittest.TestCase):
'vm_active', return_value=True)
def test_vping_userdata(self, deploy_vm, path_exists, create_flavor,
get_port_ip, vm_active):
- os_vm_inst = mock.MagicMock(name='get_console_output')
- os_vm_inst.get_console_output.return_value = 'vPing OK'
with mock.patch('snaps.openstack.utils.deploy_utils.create_image',
return_value=OpenStackImage(self.os_creds, None)), \
mock.patch('snaps.openstack.utils.deploy_utils.create_network',
@@ -67,8 +65,8 @@ class VPingUserdataTesting(unittest.TestCase):
name='foo', network_name='bar')]),
None)), \
mock.patch('snaps.openstack.create_instance.'
- 'OpenStackVmInstance.get_os_vm_server_obj',
- return_value=os_vm_inst):
+ 'OpenStackVmInstance.get_console_output',
+ return_value='vPing OK'):
self.assertEquals(TestCase.EX_OK, self.vping_userdata.run())
diff --git a/functest/tests/unit/utils/test_functest_utils.py b/functest/tests/unit/utils/test_functest_utils.py
index 98c7d6e9c..b4cc5b73b 100644
--- a/functest/tests/unit/utils/test_functest_utils.py
+++ b/functest/tests/unit/utils/test_functest_utils.py
@@ -133,24 +133,20 @@ class FunctestUtilsTesting(unittest.TestCase):
self.assertEqual(functest_utils.get_scenario(),
self.scenario)
- @mock.patch('functest.utils.functest_utils.get_build_tag')
- def test_get_version_daily_job(self, mock_get_build_tag):
- mock_get_build_tag.return_value = self.build_tag
+ def test_get_version_daily_job(self):
+ CONST.__setattr__('BUILD_TAG', self.build_tag)
self.assertEqual(functest_utils.get_version(), self.version)
- @mock.patch('functest.utils.functest_utils.get_build_tag')
- def test_get_version_weekly_job(self, mock_get_build_tag):
- mock_get_build_tag.return_value = self.build_tag_week
+ def test_get_version_weekly_job(self):
+ CONST.__setattr__('BUILD_TAG', self.build_tag_week)
self.assertEqual(functest_utils.get_version(), self.version)
- @mock.patch('functest.utils.functest_utils.get_build_tag')
- def test_get_version_with_dummy_build_tag(self, mock_get_build_tag):
- mock_get_build_tag.return_value = 'whatever'
+ def test_get_version_with_dummy_build_tag(self):
+ CONST.__setattr__('BUILD_TAG', 'whatever')
self.assertEqual(functest_utils.get_version(), 'unknown')
- @mock.patch('functest.utils.functest_utils.get_build_tag')
- def test_get_version_unknown(self, mock_get_build_tag):
- mock_get_build_tag.return_value = "unknown_build_tag"
+ def test_get_version_unknown(self):
+ CONST.__setattr__('BUILD_TAG', 'unknown_build_tag')
self.assertEqual(functest_utils.get_version(), "unknown")
@mock.patch('functest.utils.functest_utils.logger.info')
@@ -173,33 +169,15 @@ class FunctestUtilsTesting(unittest.TestCase):
self.node_name)
@mock.patch('functest.utils.functest_utils.logger.info')
- def test_get_build_tag_failed(self, mock_logger_info):
- with mock.patch.dict(os.environ,
- {},
- clear=True):
- self.assertEqual(functest_utils.get_build_tag(),
- "none")
- mock_logger_info.assert_called_once_with("Impossible to retrieve"
- " the build tag")
-
- def test_get_build_tag_default(self):
- with mock.patch.dict(os.environ,
- {'BUILD_TAG': self.build_tag},
- clear=True):
- self.assertEqual(functest_utils.get_build_tag(),
- self.build_tag)
-
- @mock.patch('functest.utils.functest_utils.logger.info')
def test_logger_test_results(self, mock_logger_info):
CONST.__setattr__('results_test_db_url', self.db_url)
+ CONST.__setattr__('BUILD_TAG', self.build_tag)
with mock.patch('functest.utils.functest_utils.get_pod_name',
return_value=self.node_name), \
mock.patch('functest.utils.functest_utils.get_scenario',
return_value=self.scenario), \
mock.patch('functest.utils.functest_utils.get_version',
- return_value=self.version), \
- mock.patch('functest.utils.functest_utils.get_build_tag',
- return_value=self.build_tag):
+ return_value=self.version):
functest_utils.logger_test_results(self.project, self.case_name,
self.status, self.details)
mock_logger_info.assert_called_once_with(
diff --git a/functest/tests/unit/utils/test_openstack_utils.py b/functest/tests/unit/utils/test_openstack_utils.py
index 828fb3d45..3bd7e3dd6 100644
--- a/functest/tests/unit/utils/test_openstack_utils.py
+++ b/functest/tests/unit/utils/test_openstack_utils.py
@@ -13,6 +13,7 @@ import unittest
import mock
from functest.utils import openstack_utils
+from functest.utils.constants import CONST
class OSUtilsTesting(unittest.TestCase):
@@ -187,11 +188,18 @@ class OSUtilsTesting(unittest.TestCase):
mock_obj.configure_mock(**attrs)
self.role = mock_obj
+ mock_obj = mock.Mock()
+ attrs = {'id': 'domain_id',
+ 'name': 'test_domain'}
+ mock_obj.configure_mock(**attrs)
+ self.domain = mock_obj
+
self.keystone_client = mock.Mock()
attrs = {'projects.list.return_value': [self.tenant],
'tenants.list.return_value': [self.tenant],
'users.list.return_value': [self.user],
'roles.list.return_value': [self.role],
+ 'domains.list.return_value': [self.domain],
'projects.create.return_value': self.tenant,
'tenants.create.return_value': self.tenant,
'users.create.return_value': self.user,
@@ -390,8 +398,6 @@ class OSUtilsTesting(unittest.TestCase):
self._test_source_credentials('export OS_TENANT_NAME =admin')
self._test_source_credentials('export OS_TENANT_NAME = admin')
self._test_source_credentials('export OS_TENANT_NAME = "admin"')
- self._test_source_credentials('OS_TENANT_NAME', value='')
- self._test_source_credentials('export OS_TENANT_NAME', value='')
# This test will fail as soon as rc_file is fixed
self._test_source_credentials(
'export "\'OS_TENANT_NAME\'" = "\'admin\'"')
@@ -1652,9 +1658,16 @@ class OSUtilsTesting(unittest.TestCase):
'test_role'),
'role_id')
+ def test_get_domain_id_default(self):
+ self.assertEqual(openstack_utils.
+ get_domain_id(self.keystone_client,
+ 'test_domain'),
+ 'domain_id')
+
def test_create_tenant_default(self):
with mock.patch('functest.utils.openstack_utils.'
'is_keystone_v3', return_value=True):
+ CONST.__setattr__('OS_PROJECT_DOMAIN_NAME', 'Default')
self.assertEqual(openstack_utils.
create_tenant(self.keystone_client,
'test_tenant',
diff --git a/functest/tests/unit/vnf/ims/test_cloudify_ims.py b/functest/tests/unit/vnf/ims/test_cloudify_ims.py
index 537c5146b..f0483c69f 100644
--- a/functest/tests/unit/vnf/ims/test_cloudify_ims.py
+++ b/functest/tests/unit/vnf/ims/test_cloudify_ims.py
@@ -13,6 +13,8 @@ import mock
from functest.core import vnf
from functest.opnfv_tests.vnf.ims import cloudify_ims
+from snaps.openstack.os_credentials import OSCreds
+
class CloudifyImsTesting(unittest.TestCase):
@@ -79,8 +81,11 @@ class CloudifyImsTesting(unittest.TestCase):
@mock.patch('snaps.openstack.create_image.OpenStackImage.create')
def test_prepare_bad_auth_url(self, *args):
with self.assertRaises(Exception):
- self.ims_vnf.prepare()
- args[0].assert_not_called()
+ self.ims_vnf.image_creator(
+ OSCreds(username='user', password='pass', auth_url='url',
+ project_name='project', identity_api_version=3),
+ mock.Mock())
+ args[0].assert_not_called()
def test_prepare_missing_param(self):
with self.assertRaises(vnf.VnfPreparationException):
diff --git a/functest/tests/unit/vnf/ims/test_orchestra_clearwaterims.py b/functest/tests/unit/vnf/ims/test_orchestra_clearwaterims.py
new file mode 100644
index 000000000..ef227ca43
--- /dev/null
+++ b/functest/tests/unit/vnf/ims/test_orchestra_clearwaterims.py
@@ -0,0 +1,227 @@
+#!/usr/bin/env python
+
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""Test module for orchestra_clearwaterims"""
+
+import logging
+import unittest
+
+import mock
+from snaps.openstack.os_credentials import OSCreds
+
+from functest.core import vnf
+from functest.opnfv_tests.vnf.ims import orchestra_clearwaterims
+
+
+class OrchestraClearwaterImsTesting(unittest.TestCase):
+ """Test class for orchestra_clearwaterims"""
+ def setUp(self):
+
+ self.tenant = 'orchestra_clearwaterims'
+ self.creds = {'username': 'mocked_username',
+ 'password': 'mocked_password'}
+ self.tenant_images = {
+ 'image1': 'mocked_image_url_1',
+ 'image2': 'mocked_image_url_2'
+ }
+ self.mano = {
+ 'name': 'openbaton',
+ 'version': '3.2.0',
+ 'object': 'foo',
+ 'requirements': {
+ 'flavor': {
+ 'name': 'mocked_flavor',
+ 'ram_min': 4096,
+ 'disk': 5,
+ 'vcpus': 2
+ },
+ 'os_image': 'mocked_image'
+ },
+ 'bootstrap': {
+ 'url': 'mocked_bootstrap_url',
+ 'config': {
+ 'url': 'mocked_config_url'}
+ },
+ 'gvnfm': {
+ 'userdata': {
+ 'url': 'mocked_userdata_url'
+ }
+ },
+ 'credentials': {
+ 'username': 'mocked_username',
+ 'password': 'mocked_password'
+ }
+ }
+ self.vnf = {
+ 'name': 'openims',
+ 'descriptor': {
+ 'url': 'mocked_descriptor_url'
+ },
+ 'requirements': {
+ 'flavor': {
+ 'name': 'mocked_flavor',
+ 'ram_min': 2048,
+ 'disk': 5,
+ 'vcpus': 2}
+ }
+ }
+ self.clearwaterims = {
+ 'scscf': {
+ 'ports': [3870, 6060]
+ },
+ 'pcscf': {
+ 'ports': [4060]
+ },
+ 'icscf': {
+ 'ports': [3869, 5060]
+ },
+ 'fhoss': {
+ 'ports': [3868]
+ },
+ 'bind9': {
+ 'ports': []
+ }
+ }
+ with mock.patch('functest.opnfv_tests.vnf.ims.orchestra_clearwaterims.'
+ 'os.makedirs'),\
+ mock.patch('functest.opnfv_tests.vnf.ims.orchestra_clearwaterims.'
+ 'get_config', return_value={
+ 'orchestrator': self.mano,
+ 'name': self.mano['name'],
+ 'version': self.mano['version'],
+ 'requirements': self.mano['requirements'],
+ 'credentials': self.mano['credentials'],
+ 'bootstrap': self.mano['bootstrap'],
+ 'gvnfm': self.mano['gvnfm'],
+ 'os_image': self.mano['requirements']['os_image'],
+ 'flavor': self.mano['requirements']['flavor'],
+ 'url': self.mano['bootstrap']['url'],
+ 'config': self.mano['bootstrap']['config'],
+ 'tenant_images': self.tenant_images,
+ 'vnf': self.vnf,
+ 'orchestra_clearwaterims': self.clearwaterims}):
+ self.ims_vnf = orchestra_clearwaterims.ClearwaterImsVnf()
+
+ self.details = {'orchestrator': {'status': 'PASS', 'duration': 120},
+ 'vnf': {},
+ 'test_vnf': {}}
+
+ @mock.patch('functest.core.vnf.os_utils.get_keystone_client',
+ return_value='test')
+ @mock.patch('functest.core.vnf.os_utils.get_or_create_tenant_for_vnf',
+ return_value=True)
+ @mock.patch('functest.core.vnf.os_utils.get_or_create_user_for_vnf',
+ return_value=True)
+ @mock.patch('functest.core.vnf.os_utils.get_credentials',
+ return_value={'auth_url': 'test/v1'})
+ @mock.patch(
+ 'functest.utils.openstack_utils.get_tenant_id',
+ return_value={'mocked_tenant_id'})
+ @mock.patch(
+ 'functest.utils.openstack_utils.get_floating_ips',
+ return_value=[])
+ @mock.patch('snaps.openstack.create_image.OpenStackImage.create')
+ @mock.patch('snaps.openstack.create_flavor.OpenStackFlavor.create')
+ @mock.patch(
+ 'snaps.openstack.create_security_group.OpenStackSecurityGroup.create')
+ @mock.patch('snaps.openstack.create_network.OpenStackNetwork.create')
+ @mock.patch('snaps.openstack.create_router.OpenStackRouter.create')
+ @mock.patch(
+ 'functest.opnfv_tests.openstack.snaps.snaps_utils.get_ext_net_name')
+ @mock.patch(
+ 'functest.opnfv_tests.openstack.snaps.'
+ 'snaps_utils.neutron_utils.create_floating_ip')
+ def test_prepare_default(self, *args):
+ """Testing prepare function without any exceptions expected"""
+ self.assertIsNone(self.ims_vnf.prepare())
+ args[4].assert_called_once_with()
+
+ @mock.patch('functest.core.vnf.os_utils.get_keystone_client',
+ return_value='test')
+ @mock.patch('functest.core.vnf.os_utils.get_or_create_tenant_for_vnf',
+ return_value=True)
+ @mock.patch('functest.core.vnf.os_utils.get_or_create_user_for_vnf',
+ return_value=True)
+ @mock.patch('functest.core.vnf.os_utils.get_credentials',
+ return_value={'auth_url': 'test/no_v'})
+ @mock.patch('snaps.openstack.create_image.OpenStackImage.create')
+ def test_prepare_bad_auth_url(self, *args):
+ """Testing prepare function with bad auth url"""
+ with self.assertRaises(Exception):
+ self.ims_vnf.image_creator(
+ OSCreds(username='user', password='pass', auth_url='url',
+ project_name='project', identity_api_version=3),
+ mock.Mock())
+ args[0].assert_not_called()
+
+ def test_prepare_missing_param(self):
+ """Testing prepare function with missing param"""
+ with self.assertRaises(vnf.VnfPreparationException):
+ self.ims_vnf.prepare()
+
+ @mock.patch('functest.core.vnf.os_utils.get_keystone_client',
+ side_effect=Exception)
+ def test_prepare_keystone_exception(self, *args):
+ """Testing prepare function with keystone exception"""
+ with self.assertRaises(vnf.VnfPreparationException):
+ self.ims_vnf.prepare()
+ args[0].assert_called_once_with()
+
+ @mock.patch('functest.core.vnf.os_utils.get_keystone_client',
+ return_value='test')
+ @mock.patch('functest.core.vnf.os_utils.get_or_create_tenant_for_vnf',
+ side_effect=Exception)
+ def test_prepare_tenant_exception(self, *args):
+ """Testing prepare function with tenant exception"""
+ with self.assertRaises(vnf.VnfPreparationException):
+ self.ims_vnf.prepare()
+ args[1].assert_called_once_with()
+
+ @mock.patch('functest.core.vnf.os_utils.get_keystone_client',
+ return_value='test')
+ @mock.patch('functest.core.vnf.os_utils.get_or_create_tenant_for_vnf',
+ return_value=True)
+ @mock.patch('functest.core.vnf.os_utils.get_or_create_user_for_vnf',
+ side_effect=Exception)
+ def test_prepare_user_exception(self, *args):
+ """Testing prepare function with user exception"""
+ with self.assertRaises(vnf.VnfPreparationException):
+ self.ims_vnf.prepare()
+ args[2].assert_called_once_with()
+
+ @mock.patch('functest.core.vnf.os_utils.get_keystone_client',
+ return_value='test')
+ @mock.patch('functest.core.vnf.os_utils.get_or_create_tenant_for_vnf',
+ return_value=True)
+ @mock.patch('functest.core.vnf.os_utils.get_or_create_user_for_vnf',
+ return_value=True)
+ @mock.patch('functest.core.vnf.os_utils.get_credentials',
+ side_effect=Exception)
+ def test_prepare_credentials_exception(self, *args):
+ """Testing prepare function with credentials exception"""
+ with self.assertRaises(vnf.VnfPreparationException):
+ self.ims_vnf.prepare()
+ args[0].assert_called_once_with()
+
+ # # @mock.patch('functest.opnfv_tests.vnf.
+ # ims.orchestra_clearwaterims.get_userdata')
+ # def test_deploy_orchestrator(self, *args):
+ # floating_ip = FloatingIp
+ # floating_ip.ip = 'mocked_ip'
+ # details = {'fip':floating_ip,'flavor':{'name':'mocked_name'}}
+ # self.mano['details'] = details
+ # with mock.patch.dict(self.mano, {'details':
+ # {'fip':floating_ip,'flavor':{'name':'mocked_name'}}}):
+ # # with mock.patch.dict(self.mano, details):
+ # orchestra_clearwaterims.get_userdata(self.mano)
+ # self.assertIsNone(self.ims_vnf.deploy_orchestrator())
+ # args[4].assert_called_once_with()
+
+
+if __name__ == "__main__":
+ logging.disable(logging.CRITICAL)
+ unittest.main(verbosity=2)
diff --git a/functest/tests/unit/vnf/ims/test_orchestra_openims.py b/functest/tests/unit/vnf/ims/test_orchestra_openims.py
new file mode 100644
index 000000000..5911cf77b
--- /dev/null
+++ b/functest/tests/unit/vnf/ims/test_orchestra_openims.py
@@ -0,0 +1,229 @@
+#!/usr/bin/env python
+
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""Test module for orchestra_openims"""
+
+import logging
+import unittest
+
+import mock
+from snaps.openstack.os_credentials import OSCreds
+
+from functest.core import vnf
+from functest.opnfv_tests.vnf.ims import orchestra_openims
+
+
+class OrchestraOpenImsTesting(unittest.TestCase):
+ """Test class for orchestra_openims"""
+ def setUp(self):
+
+ self.tenant = 'orchestra_openims'
+ self.creds = {'username': 'mocked_username',
+ 'password': 'mocked_password'}
+ self.tenant_images = {
+ 'image1': 'mocked_image_url_1',
+ 'image2': 'mocked_image_url_2'
+ }
+ self.mano = {
+ 'name': 'openbaton',
+ 'version': '3.2.0',
+ 'object': 'foo',
+ 'requirements': {
+ 'flavor': {
+ 'name': 'mocked_flavor',
+ 'ram_min': 4096,
+ 'disk': 5,
+ 'vcpus': 2
+ },
+ 'os_image': 'mocked_image'
+ },
+ 'bootstrap': {
+ 'url': 'mocked_bootstrap_url',
+ 'config': {
+ 'url': 'mocked_config_url'}
+ },
+ 'gvnfm': {
+ 'userdata': {
+ 'url': 'mocked_userdata_url'
+ }
+ },
+ 'credentials': {
+ 'username': 'mocked_username',
+ 'password': 'mocked_password'
+ }
+ }
+ self.vnf = {
+ 'name': 'openims',
+ 'descriptor': {
+ 'url': 'mocked_descriptor_url'
+ },
+ 'requirements': {
+ 'flavor': {
+ 'name': 'mocked_flavor',
+ 'ram_min': 2048,
+ 'disk': 5,
+ 'vcpus': 2}
+ }
+ }
+ self.openims = {
+ 'scscf': {
+ 'ports': [3870, 6060]
+ },
+ 'pcscf': {
+ 'ports': [4060]
+ },
+ 'icscf': {
+ 'ports': [3869, 5060]
+ },
+ 'fhoss': {
+ 'ports': [3868]
+ },
+ 'bind9': {
+ 'ports': []
+ }
+ }
+ with mock.patch('functest.opnfv_tests.vnf.ims.orchestra_openims.'
+ 'os.makedirs'),\
+ mock.patch('functest.opnfv_tests.vnf.ims.orchestra_openims.'
+ 'get_config', return_value={
+ 'orchestrator': self.mano,
+ 'name': self.mano['name'],
+ 'version': self.mano['version'],
+ 'requirements': self.mano['requirements'],
+ 'credentials': self.mano['credentials'],
+ 'bootstrap': self.mano['bootstrap'],
+ 'gvnfm': self.mano['gvnfm'],
+ 'os_image':
+ self.mano['requirements']['os_image'],
+ 'flavor':
+ self.mano['requirements']['flavor'],
+ 'url': self.mano['bootstrap']['url'],
+ 'config': self.mano['bootstrap']['config'],
+ 'tenant_images': self.tenant_images,
+ 'vnf': self.vnf,
+ 'orchestra_openims': self.openims}):
+ self.ims_vnf = orchestra_openims.OpenImsVnf()
+
+ self.details = {'orchestrator': {'status': 'PASS', 'duration': 120},
+ 'vnf': {},
+ 'test_vnf': {}}
+
+ @mock.patch('functest.core.vnf.os_utils.get_keystone_client',
+ return_value='test')
+ @mock.patch('functest.core.vnf.os_utils.get_or_create_tenant_for_vnf',
+ return_value=True)
+ @mock.patch('functest.core.vnf.os_utils.get_or_create_user_for_vnf',
+ return_value=True)
+ @mock.patch('functest.core.vnf.os_utils.get_credentials',
+ return_value={'auth_url': 'test/v1'})
+ @mock.patch(
+ 'functest.utils.openstack_utils.get_tenant_id',
+ return_value={'mocked_tenant_id'})
+ @mock.patch(
+ 'functest.utils.openstack_utils.get_floating_ips',
+ return_value=[])
+ @mock.patch('snaps.openstack.create_image.OpenStackImage.create')
+ @mock.patch('snaps.openstack.create_flavor.OpenStackFlavor.create')
+ @mock.patch(
+ 'snaps.openstack.create_security_group.OpenStackSecurityGroup.create')
+ @mock.patch('snaps.openstack.create_network.OpenStackNetwork.create')
+ @mock.patch('snaps.openstack.create_router.OpenStackRouter.create')
+ @mock.patch(
+ 'functest.opnfv_tests.openstack.snaps.snaps_utils.get_ext_net_name')
+ @mock.patch(
+ 'functest.opnfv_tests.openstack.snaps.snaps_utils.'
+ 'neutron_utils.create_floating_ip')
+ def test_prepare_default(self, *args):
+ """Testing prepare function without any exceptions expected"""
+ self.assertIsNone(self.ims_vnf.prepare())
+ args[4].assert_called_once_with()
+
+ @mock.patch('functest.core.vnf.os_utils.get_keystone_client',
+ return_value='test')
+ @mock.patch('functest.core.vnf.os_utils.get_or_create_tenant_for_vnf',
+ return_value=True)
+ @mock.patch('functest.core.vnf.os_utils.get_or_create_user_for_vnf',
+ return_value=True)
+ @mock.patch('functest.core.vnf.os_utils.get_credentials',
+ return_value={'auth_url': 'test/no_v'})
+ @mock.patch('snaps.openstack.create_image.OpenStackImage.create')
+ def test_prepare_bad_auth_url(self, *args):
+ """Testing prepare function with bad auth url"""
+ with self.assertRaises(Exception):
+ self.ims_vnf.image_creator(
+ OSCreds(username='user', password='pass', auth_url='url',
+ project_name='project', identity_api_version=3),
+ mock.Mock())
+ args[0].assert_not_called()
+
+ def test_prepare_missing_param(self):
+ """Testing prepare function with missing param"""
+ with self.assertRaises(vnf.VnfPreparationException):
+ self.ims_vnf.prepare()
+
+ @mock.patch('functest.core.vnf.os_utils.get_keystone_client',
+ side_effect=Exception)
+ def test_prepare_keystone_exception(self, *args):
+ """Testing prepare function with keystone exception"""
+ with self.assertRaises(vnf.VnfPreparationException):
+ self.ims_vnf.prepare()
+ args[0].assert_called_once_with()
+
+ @mock.patch('functest.core.vnf.os_utils.get_keystone_client',
+ return_value='test')
+ @mock.patch('functest.core.vnf.os_utils.get_or_create_tenant_for_vnf',
+ side_effect=Exception)
+ def test_prepare_tenant_exception(self, *args):
+ """Testing prepare function with tenant exception"""
+ with self.assertRaises(vnf.VnfPreparationException):
+ self.ims_vnf.prepare()
+ args[1].assert_called_once_with()
+
+ @mock.patch('functest.core.vnf.os_utils.get_keystone_client',
+ return_value='test')
+ @mock.patch('functest.core.vnf.os_utils.get_or_create_tenant_for_vnf',
+ return_value=True)
+ @mock.patch('functest.core.vnf.os_utils.get_or_create_user_for_vnf',
+ side_effect=Exception)
+ def test_prepare_user_exception(self, *args):
+ """Testing prepare function with user exception"""
+ with self.assertRaises(vnf.VnfPreparationException):
+ self.ims_vnf.prepare()
+ args[2].assert_called_once_with()
+
+ @mock.patch('functest.core.vnf.os_utils.get_keystone_client',
+ return_value='test')
+ @mock.patch('functest.core.vnf.os_utils.get_or_create_tenant_for_vnf',
+ return_value=True)
+ @mock.patch('functest.core.vnf.os_utils.get_or_create_user_for_vnf',
+ return_value=True)
+ @mock.patch('functest.core.vnf.os_utils.get_credentials',
+ side_effect=Exception)
+ def test_prepare_credentials_exception(self, *args):
+ """Testing prepare function with credentials exception"""
+ with self.assertRaises(vnf.VnfPreparationException):
+ self.ims_vnf.prepare()
+ args[0].assert_called_once_with()
+
+ # # @mock.patch('functest.opnfv_tests.
+ # vnf.ims.orchestra_openims.get_userdata')
+ # def test_deploy_orchestrator(self, *args):
+ # floating_ip = FloatingIp
+ # floating_ip.ip = 'mocked_ip'
+ # details = {'fip':floating_ip,'flavor':{'name':'mocked_name'}}
+ # self.mano['details'] = details
+ # with mock.patch.dict(self.mano, {'details':
+ # {'fip':floating_ip,'flavor':{'name':'mocked_name'}}}):
+ # # with mock.patch.dict(self.mano, details):
+ # orchestra_openims.get_userdata(self.mano)
+ # self.assertIsNone(self.ims_vnf.deploy_orchestrator())
+ # args[4].assert_called_once_with()
+
+
+if __name__ == "__main__":
+ logging.disable(logging.CRITICAL)
+ unittest.main(verbosity=2)
diff --git a/functest/utils/env.py b/functest/utils/env.py
index 2fb766d32..d7b396eaa 100644
--- a/functest/utils/env.py
+++ b/functest/utils/env.py
@@ -32,7 +32,8 @@ class Environment(object):
if k not in os.environ:
self.__setattr__(k, v)
self._set_ci_run()
- self._set_ci_loop()
+ if 'CI_LOOP' not in os.environ:
+ self._set_ci_loop()
def _set_ci_run(self):
if self.BUILD_TAG:
diff --git a/functest/utils/functest_utils.py b/functest/utils/functest_utils.py
index 91781bd27..a766ef953 100644
--- a/functest/utils/functest_utils.py
+++ b/functest/utils/functest_utils.py
@@ -107,7 +107,9 @@ def get_version():
# jenkins-functest-fuel-baremetal-weekly-master-8
# use regex to match branch info
rule = "(dai|week)ly-(.+?)-[0-9]*"
- build_tag = get_build_tag()
+ build_tag = CONST.__getattribute__('BUILD_TAG')
+ if not build_tag:
+ build_tag = 'none'
m = re.search(rule, build_tag)
if m:
return m.group(2)
@@ -128,19 +130,6 @@ def get_pod_name():
return "unknown-pod"
-def get_build_tag():
- """
- Get build tag of jenkins jobs
- """
- try:
- build_tag = os.environ['BUILD_TAG']
- except KeyError:
- logger.info("Impossible to retrieve the build tag")
- build_tag = "none"
-
- return build_tag
-
-
def logger_test_results(project, case_name, status, details):
"""
Format test case results for the logger
@@ -148,7 +137,7 @@ def logger_test_results(project, case_name, status, details):
pod_name = get_pod_name()
scenario = get_scenario()
version = get_version()
- build_tag = get_build_tag()
+ build_tag = CONST.__getattribute__('BUILD_TAG')
db_url = CONST.__getattribute__("results_test_db_url")
logger.info(
@@ -278,14 +267,14 @@ def get_ci_envvars():
def execute_command_raise(cmd, info=False, error_msg="",
- verbose=True, output_file=None):
- ret = execute_command(cmd, info, error_msg, verbose, output_file)
+ verbose=True, output_file=None, env=None):
+ ret = execute_command(cmd, info, error_msg, verbose, output_file, env)
if ret != 0:
raise Exception(error_msg)
def execute_command(cmd, info=False, error_msg="",
- verbose=True, output_file=None):
+ verbose=True, output_file=None, env=None):
if not error_msg:
error_msg = ("The command '%s' failed." % cmd)
msg_exec = ("Executing command: '%s'" % cmd)
@@ -294,7 +283,7 @@ def execute_command(cmd, info=False, error_msg="",
logger.info(msg_exec)
else:
logger.debug(msg_exec)
- p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
+ p = subprocess.Popen(cmd, env=env, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
if output_file:
f = open(output_file, "w")
diff --git a/functest/utils/openstack_utils.py b/functest/utils/openstack_utils.py
index 4f8d6c357..73d1cde49 100644
--- a/functest/utils/openstack_utils.py
+++ b/functest/utils/openstack_utils.py
@@ -23,6 +23,7 @@ from novaclient import client as novaclient
from keystoneclient import client as keystoneclient
from neutronclient.neutron import client as neutronclient
+from functest.utils.constants import CONST
import functest.utils.functest_utils as ft_utils
logger = logging.getLogger(__name__)
@@ -117,13 +118,15 @@ def get_credentials(other_creds={}):
def source_credentials(rc_file):
with open(rc_file, "r") as f:
for line in f:
- var = line.rstrip('"\n').replace('export ', '').split("=")
+ var = (line.rstrip('"\n').replace('export ', '').split("=")
+ if re.search(r'(.*)=(.*)', line) else None)
# The two next lines should be modified as soon as rc_file
# conforms with common rules. Be aware that it could induce
# issues if value starts with '
- key = re.sub(r'^["\' ]*|[ \'"]*$', '', var[0])
- value = re.sub(r'^["\' ]*|[ \'"]*$', '', "".join(var[1:]))
- os.environ[key] = value
+ if var:
+ key = re.sub(r'^["\' ]*|[ \'"]*$', '', var[0])
+ value = re.sub(r'^["\' ]*|[ \'"]*$', '', "".join(var[1:]))
+ os.environ[key] = value
def get_credentials_for_rally():
@@ -710,6 +713,8 @@ def get_private_net(neutron_client):
def get_external_net(neutron_client):
+ if (hasattr(CONST, 'EXTERNAL_NETWORK')):
+ return CONST.__getattribute__('EXTERNAL_NETWORK')
for network in neutron_client.list_networks()['networks']:
if network['router:external']:
return network['name']
@@ -717,6 +722,11 @@ def get_external_net(neutron_client):
def get_external_net_id(neutron_client):
+ if (hasattr(CONST, 'EXTERNAL_NETWORK')):
+ networks = neutron_client.list_networks(
+ name=CONST.__getattribute__('EXTERNAL_NETWORK'))
+ net_id = networks['networks'][0]['id']
+ return net_id
for network in neutron_client.list_networks()['networks']:
if network['router:external']:
return network['id']
@@ -1374,13 +1384,25 @@ def get_role_id(keystone_client, role_name):
return id
+def get_domain_id(keystone_client, domain_name):
+ domains = keystone_client.domains.list()
+ id = ''
+ for d in domains:
+ if d.name == domain_name:
+ id = d.id
+ break
+ return id
+
+
def create_tenant(keystone_client, tenant_name, tenant_description):
try:
if is_keystone_v3():
+ domain_name = CONST.__getattribute__('OS_PROJECT_DOMAIN_NAME')
+ domain_id = get_domain_id(keystone_client, domain_name)
tenant = keystone_client.projects.create(
name=tenant_name,
description=tenant_description,
- domain="default",
+ domain=domain_id,
enabled=True)
else:
tenant = keystone_client.tenants.create(tenant_name,
@@ -1539,3 +1561,62 @@ def get_resource(heat_client, stack_id, resource):
except Exception as e:
logger.error("Error [get_resource]: %s" % e)
return None
+
+
+# *********************************************
+# TEMPEST
+# *********************************************
+def init_tempest_cleanup(tempest_config_dir=None,
+ tempest_config_filename='tempest.conf',
+ output_file=None):
+ """
+ Initialize the Tempest Cleanup utility.
+ See https://docs.openstack.org/tempest/latest/cleanup.html for docs.
+
+ :param tempest_config_dir: The directory where the Tempest config file is
+ located. If not specified, we let Tempest pick both the directory
+ and the filename (i.e. second parameter is ignored)
+ :param tempest_config_filename: The filename of the Tempest config file
+ :param output_file: Optional file where to save output
+ """
+ # The Tempest cleanup utility currently offers no cmd argument to specify
+ # the config file, therefore it has to be configured with env variables
+ env = None
+ if tempest_config_dir:
+ env = os.environ.copy()
+ env['TEMPEST_CONFIG_DIR'] = tempest_config_dir
+ env['TEMPEST_CONFIG'] = tempest_config_filename
+
+ # If this command fails, an exception must be raised to stop the script
+ # otherwise the later cleanup would destroy also other resources
+ cmd_line = "tempest cleanup --init-saved-state"
+ ft_utils.execute_command_raise(cmd_line, env=env, output_file=output_file,
+ error_msg="Tempest cleanup init failed")
+
+
+def perform_tempest_cleanup(tempest_config_dir=None,
+ tempest_config_filename='tempest.conf',
+ output_file=None):
+ """
+ Perform cleanup using the Tempest Cleanup utility.
+ See https://docs.openstack.org/tempest/latest/cleanup.html for docs.
+
+ :param tempest_config_dir: The directory where the Tempest config file is
+ located. If not specified, we let Tempest pick both the directory
+ and the filename (i.e. second parameter is ignored)
+ :param tempest_config_filename: The filename of the Tempest config file
+ :param output_file: Optional file where to save output
+ """
+ # The Tempest cleanup utility currently offers no cmd argument to specify
+ # the config file, therefore it has to be configured with env variables
+ env = None
+ if tempest_config_dir:
+ env = os.environ.copy()
+ env['TEMPEST_CONFIG_DIR'] = tempest_config_dir
+ env['TEMPEST_CONFIG'] = tempest_config_filename
+
+ # If this command fails, an exception must be raised to stop the script
+ # otherwise the later cleanup would destroy also other resources
+ cmd_line = "tempest cleanup"
+ ft_utils.execute_command(cmd_line, env=env, output_file=output_file,
+ error_msg="Tempest cleanup failed")
diff --git a/requirements.txt b/requirements.txt
index 5344d0c30..e1d34a36a 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -21,11 +21,12 @@ dnspython3!=1.13.0,!=1.14.0,>=1.12.0;python_version>='3.0' # http://www.dnspytho
click
openbaton-cli
cloudify_rest_client
+Flask!=0.11,<1.0,>=0.10 # BSD
+Flask-RESTful>=0.3.5 # BSD
mock>=2.0 # BSD
iniparse==0.4
PrettyTable<0.8,>=0.7.1 # BSD
six>=1.9.0 # MIT
-kingbird
opnfv
snaps
rally
diff --git a/setup.cfg b/setup.cfg
index e29259f34..89aa033e7 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -7,7 +7,6 @@ home-page = https://wiki.opnfv.org/display/functest
packages = functest
scripts =
docker/docker_remote_api/enable_remote_api.sh
- docker/add_images.sh
docker/config_install_env.sh
functest/ci/download_images.sh
@@ -23,3 +22,4 @@ console_scripts =
prepare_env = functest.ci.prepare_env:main
run_tests = functest.ci.run_tests:main
check_deployment = functest.ci.check_deployment:main
+ functest_restapi = functest.api.server:main
diff --git a/tox.ini b/tox.ini
index 208091ac3..9fc18b39f 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
[tox]
-envlist = docs,pep8,pylint,py35,py27,perm
+envlist = docs,pep8,pylint,py35,py27,perm,aarch64
[testenv]
usedevelop = True
@@ -30,6 +30,7 @@ commands = flake8
basepython = python2.7
whitelist_externals = bash
modules =
+ functest.api
functest.core
functest.opnfv_tests.sdn.odl
functest.tests.unit.core
@@ -59,3 +60,12 @@ commands =
-exec ls -l \{\} + | grep '.' && exit 1 || exit 0"
bash -c "\
find {[testenv:perm]path} -exec file \{\} + | grep CRLF && exit 1 || exit 0"
+
+[testenv:aarch64]
+basepython = python2.7
+whitelist_externals =
+ bash
+ git
+commands =
+ bash -c "patch -f -p1 < docker/Dockerfile.aarch64.patch"
+ git checkout docker/Dockerfile
diff --git a/upper-constraints.txt b/upper-constraints.txt
index 73ec7f41b..74d363c50 100644
--- a/upper-constraints.txt
+++ b/upper-constraints.txt
@@ -2,9 +2,9 @@ git+https://gerrit.opnfv.org/gerrit/releng#egg=opnfv&subdirectory=modules
git+https://gerrit.opnfv.org/gerrit/snaps#egg=snaps
git+https://gerrit.opnfv.org/gerrit/barometer#egg=baro_tests
git+https://gerrit.opnfv.org/gerrit/sdnvpn#egg=sdnvpn
-git+https://gerrit.opnfv.org/gerrit/opera#egg=opera
git+https://gerrit.opnfv.org/gerrit/securityscanning#egg=securityscanning
git+https://gerrit.opnfv.org/gerrit/sfc#egg=sfc
+-e git+https://gerrit.opnfv.org/gerrit/promise#egg=promise
-e git+https://github.com/openstack/refstack-client#egg=refstack-client
cloudify_rest_client===4.0
iniparse===0.4
@@ -13,5 +13,4 @@ robotframework===3.0.2
robotframework-httplibrary===0.4.2
robotframework-requests===0.4.7
robotframework-sshlibrary===2.1.3;python_version=='2.7'
-kingbird===1.1.0
rally===0.9.1