summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--docker/Dockerfile1
-rw-r--r--docker/Dockerfile.aarch64.patch59
-rw-r--r--docker/features/testcases.yaml13
-rw-r--r--docker/vnf/Dockerfile2
-rw-r--r--docker/vnf/testcases.yaml16
-rw-r--r--functest/api/base.py4
-rw-r--r--functest/api/common/api_utils.py10
-rw-r--r--functest/api/common/error.py24
-rw-r--r--functest/api/common/thread.py52
-rw-r--r--functest/api/database/__init__.py (renamed from functest/opnfv_tests/vnf/aaa/__init__.py)0
-rw-r--r--functest/api/database/db.py26
-rw-r--r--functest/api/database/v1/__init__.py0
-rw-r--r--functest/api/database/v1/handlers.py43
-rw-r--r--functest/api/database/v1/models.py33
-rw-r--r--functest/api/resources/v1/creds.py38
-rw-r--r--functest/api/resources/v1/envs.py8
-rw-r--r--functest/api/resources/v1/tasks.py58
-rw-r--r--functest/api/resources/v1/testcases.py69
-rw-r--r--functest/api/server.py50
-rw-r--r--functest/api/urls.py16
-rw-r--r--functest/ci/config_aarch64_patch.yaml8
-rw-r--r--functest/ci/config_functest.yaml12
-rw-r--r--functest/ci/download_images.sh79
-rw-r--r--functest/ci/prepare_env.py2
-rw-r--r--functest/ci/testcases.yaml29
-rw-r--r--functest/opnfv_tests/openstack/rally/blacklist.txt32
-rw-r--r--functest/opnfv_tests/openstack/rally/rally.py4
-rw-r--r--functest/opnfv_tests/openstack/rally/scenario/full/opnfv-ceilometer.yaml458
-rw-r--r--functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-ceilometer.yaml247
-rw-r--r--functest/opnfv_tests/openstack/rally/task.yaml4
-rw-r--r--functest/opnfv_tests/openstack/refstack_client/refstack_client.py59
-rw-r--r--functest/opnfv_tests/openstack/refstack_client/tempest_conf.py20
-rw-r--r--functest/opnfv_tests/openstack/tempest/conf_utils.py135
-rw-r--r--functest/opnfv_tests/openstack/tempest/tempest.py234
-rw-r--r--functest/opnfv_tests/openstack/vping/vping_base.py27
-rw-r--r--functest/opnfv_tests/vnf/aaa/aaa.py41
-rw-r--r--functest/opnfv_tests/vnf/ims/clearwater_ims_base.py21
-rw-r--r--functest/opnfv_tests/vnf/ims/cloudify_ims.py8
-rw-r--r--functest/opnfv_tests/vnf/ims/cloudify_ims.yaml4
-rw-r--r--functest/opnfv_tests/vnf/ims/orchestra.yaml8
-rw-r--r--functest/opnfv_tests/vnf/ims/orchestra_clearwaterims.py12
-rw-r--r--functest/opnfv_tests/vnf/ims/orchestra_openims.py11
-rw-r--r--functest/tests/unit/openstack/refstack_client/test_refstack_client.py56
-rw-r--r--functest/tests/unit/openstack/tempest/test_conf_utils.py170
-rw-r--r--functest/tests/unit/openstack/tempest/test_tempest.py18
-rw-r--r--functest/utils/functest_utils.py8
-rw-r--r--functest/utils/openstack_utils.py68
-rw-r--r--tox.ini11
48 files changed, 1829 insertions, 479 deletions
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 8c7e4b0fc..0e896d6d2 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -38,6 +38,7 @@ build-essential \
bundler \
crudini \
curl \
+dnsmasq \
gcc \
git \
libffi-dev \
diff --git a/docker/Dockerfile.aarch64.patch b/docker/Dockerfile.aarch64.patch
index 1b553b1c5..1257206d2 100644
--- a/docker/Dockerfile.aarch64.patch
+++ b/docker/Dockerfile.aarch64.patch
@@ -1,39 +1,25 @@
-From: Delia Popescu <delia.popescu@enea.com>
-Date: Thu, 20 Jul 2017 17:36:13 +0300
-Subject: [PATCH] Modified Dockerfile.aarch to a patch
-
-Docker image for functest on ARM was build using a different Dockerfile.
-Now the ARM Dockerfile is created with a patch,
-in order to avoid modifying both files.
-This Dockerfile.aarch64.patch is applied by opnfv-docker.sh from releng project.
-
-Signed-off-by: Delia Popescu <delia.popescu@enea.com>
----
- docker/Dockerfile | 15 +++++++++------
- 1 file changed, 9 insertions(+), 6 deletions(-)
-
diff --git a/docker/Dockerfile b/docker/Dockerfile
-index 924da68..dd87e6c 100644
+index 0e896d6d..2a8f2b66 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -1,5 +1,5 @@
########################################
-# Docker container for FUNCTEST
-+# Aarch64 Docker container for FUNCTEST
++# Aarch64 Docker container for FUNCTEST
########################################
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
@@ -7,9 +7,9 @@
# http://www.apache.org/licenses/LICENSE-2.0
#
-
+
-FROM ubuntu:14.04
-MAINTAINER Jose Lausuch <jose.lausuch@ericsson.com>
-LABEL version="0.1" description="OPNFV Functest Docker container"
+FROM aarch64/ubuntu:14.04
+MAINTAINER Armband team <armband@enea.com>
+LABEL version="0.1" description="OPNFV Functest Aarch64 Docker container"
-
+
# Environment variables
ARG BRANCH=master
@@ -43,6 +43,7 @@ gcc \
@@ -44,18 +30,33 @@ index 924da68..dd87e6c 100644
libpq-dev \
libssl-dev \
libxml2-dev \
-@@ -117,11 +118,13 @@ RUN /bin/bash -c ". /etc/profile.d/rvm.sh \
- && cd ${REPOS_VNFS_DIR}/vims-test \
- && rvm use 1.9.3"
- RUN /bin/bash -c ". /etc/profile.d/rvm.sh \
-+ && gem install bundler \
- && cd ${REPOS_VNFS_DIR}/vims-test \
-+ && bundle config build.nokogiri --use-system-libraries \
- && bundle install"
-
+@@ -103,10 +104,26 @@ RUN /bin/bash -c ". /usr/local/lib/python2.7/dist-packages/sfc/tests/functest/se
+ RUN ln -s /src/tempest /src/refstack-client/.tempest \
+ && virtualenv --system-site-packages /src/tempest/.venv
+
+-RUN cd /src/vims-test && bundle install
++RUN gpg --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3
++RUN curl -L https://get.rvm.io | bash -s stable
+
-RUN sh -c 'curl -sL https://deb.nodesource.com/setup_4.x | sudo -E bash -' \
- && sudo apt-get install -y nodejs \
++RUN /bin/bash -c ". /etc/profile.d/rvm.sh \
++ && cd /src/vims-test \
++ && rvm autolibs enable"
++RUN /bin/bash -c ". /etc/profile.d/rvm.sh \
++ && cd /src/vims-test \
++ && rvm install 1.9.3"
++RUN /bin/bash -c ". /etc/profile.d/rvm.sh \
++ && cd /src/vims-test \
++ && rvm use 1.9.3"
++RUN /bin/bash -c ". /etc/profile.d/rvm.sh \
++ && gem install bundler \
++ && cd /src/vims-test \
++ && bundle config build.nokogiri --use-system-libraries \
++ && bundle install"
++
+RUN sh -c 'wget -qO- https://nodejs.org/dist/v4.7.2/node-v4.7.2-linux-arm64.tar.gz | \
+ tar -xz -C /usr/local --exclude=CHANGELOG.md --exclude=LICENSE --exclude=README.md --strip-components 1 '\
- && cd ${REPOS_DIR}/promise && sudo npm -g install npm@latest \
- && cd ${REPOS_DIR}/promise/source && npm install
+ && cd /src/promise && sudo npm -g install npm@latest \
+ && cd /src/promise/source && npm install
+
diff --git a/docker/features/testcases.yaml b/docker/features/testcases.yaml
index 052bd47f3..69da9350e 100644
--- a/docker/features/testcases.yaml
+++ b/docker/features/testcases.yaml
@@ -92,18 +92,17 @@ tiers:
-
case_name: barometercollectd
- enabled: false
+ enabled: true
project_name: barometer
criteria: 100
blocking: false
description: >-
- Test suite for the Barometer project. Separate tests verify the
- proper configuration and functionality of the following
- collectd plugins Ceilometer, Hugepages, Memory RAS (mcelog),
- and OVS Events
+ Test suite for the Barometer project. Separate tests verify
+ the proper configuration and basic functionality of all the
+ collectd plugins as described in the Project Release Plan
dependencies:
- installer: 'fuel'
- scenario: 'kvm_ovs_dpdk_bar'
+ installer: 'apex'
+ scenario: 'bar'
run:
module: 'baro_tests.barometer'
class: 'BarometerCollectd'
diff --git a/docker/vnf/Dockerfile b/docker/vnf/Dockerfile
index 70c13ab29..d4f18c476 100644
--- a/docker/vnf/Dockerfile
+++ b/docker/vnf/Dockerfile
@@ -3,7 +3,7 @@ FROM opnfv/functest-core
ARG VIMS_TAG=stable
RUN apk --no-cache add --update \
- ruby ruby-dev ruby-bundler ruby-irb ruby-rdoc \
+ ruby ruby-dev ruby-bundler ruby-irb ruby-rdoc dnsmasq \
procps git g++ make libxslt-dev libxml2-dev zlib-dev libffi-dev && \
git clone --depth 1 -b $VIMS_TAG https://github.com/boucherv-orange/clearwater-live-test /src/vims-test && \
rm -r /src/vims-test/.git && \
diff --git a/docker/vnf/testcases.yaml b/docker/vnf/testcases.yaml
index 1a42101e9..9f6533930 100644
--- a/docker/vnf/testcases.yaml
+++ b/docker/vnf/testcases.yaml
@@ -20,22 +20,6 @@ tiers:
run:
module: 'functest.opnfv_tests.vnf.ims.cloudify_ims'
class: 'CloudifyIms'
-
- -
- case_name: aaa
- enabled: false
- project_name: functest
- criteria: 100
- blocking: false
- description: >-
- Simple VNF.
- dependencies:
- installer: ''
- scenario: ''
- run:
- module: 'functest.opnfv_tests.vnf.aaa.aaa'
- class: 'AaaVnf'
-
-
case_name: orchestra_openims
project_name: functest
diff --git a/functest/api/base.py b/functest/api/base.py
index efeab8243..ffc567860 100644
--- a/functest/api/base.py
+++ b/functest/api/base.py
@@ -17,7 +17,7 @@ import logging
from flask import request
from flask_restful import Resource
-from functest.api.common import api_utils, error
+from functest.api.common import api_utils
LOGGER = logging.getLogger(__name__)
@@ -58,7 +58,7 @@ class ApiResource(Resource):
try:
return getattr(self, action)(args)
except AttributeError:
- error.result_handler(status=1, data='No such action')
+ api_utils.result_handler(status=1, data='No such action')
# Import modules from package "functest.api.resources"
diff --git a/functest/api/common/api_utils.py b/functest/api/common/api_utils.py
index f518e777c..d85acf927 100644
--- a/functest/api/common/api_utils.py
+++ b/functest/api/common/api_utils.py
@@ -18,6 +18,7 @@ import os
import sys
from oslo_utils import importutils
+from flask import jsonify
import six
import functest
@@ -89,3 +90,12 @@ def change_obj_to_dict(obj):
for key, value in vars(obj).items():
dic.update({key: value})
return dic
+
+
+def result_handler(status, data):
+ """ Return the json format of result in dict """
+ result = {
+ 'status': status,
+ 'result': data
+ }
+ return jsonify(result)
diff --git a/functest/api/common/error.py b/functest/api/common/error.py
deleted file mode 100644
index d00452252..000000000
--- a/functest/api/common/error.py
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-
-"""
-Used to handle results
-
-"""
-
-from flask import jsonify
-
-
-def result_handler(status, data):
- """ Return the json format of result in dict """
- result = {
- 'status': status,
- 'result': data
- }
- return jsonify(result)
diff --git a/functest/api/common/thread.py b/functest/api/common/thread.py
new file mode 100644
index 000000000..fb60aaac7
--- /dev/null
+++ b/functest/api/common/thread.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""
+Used to handle multi-thread tasks
+"""
+
+import logging
+import threading
+
+from oslo_serialization import jsonutils
+
+
+LOGGER = logging.getLogger(__name__)
+
+
+class TaskThread(threading.Thread):
+ """ Task Thread Class """
+
+ def __init__(self, target, args, handler):
+ super(TaskThread, self).__init__(target=target, args=args)
+ self.target = target
+ self.args = args
+ self.handler = handler
+
+ def run(self):
+ """ Override the function run: run testcase and update database """
+ update_data = {'task_id': self.args.get('task_id'),
+ 'status': 'IN PROGRESS'}
+ self.handler.insert(update_data)
+
+ LOGGER.info('Starting running test case')
+
+ try:
+ data = self.target(self.args)
+ except Exception as err: # pylint: disable=broad-except
+ LOGGER.exception('Task Failed')
+ update_data = {'status': 'FAIL', 'error': str(err)}
+ self.handler.update_attr(self.args.get('task_id'), update_data)
+ else:
+ LOGGER.info('Task Finished')
+ LOGGER.debug('Result: %s', data)
+ new_data = {'status': 'FINISHED',
+ 'result': jsonutils.dumps(data.get('result', {}))}
+
+ self.handler.update_attr(self.args.get('task_id'), new_data)
diff --git a/functest/opnfv_tests/vnf/aaa/__init__.py b/functest/api/database/__init__.py
index e69de29bb..e69de29bb 100644
--- a/functest/opnfv_tests/vnf/aaa/__init__.py
+++ b/functest/api/database/__init__.py
diff --git a/functest/api/database/db.py b/functest/api/database/db.py
new file mode 100644
index 000000000..ea861ddbd
--- /dev/null
+++ b/functest/api/database/db.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""
+Create database to store task results using sqlalchemy
+"""
+
+from sqlalchemy import create_engine
+from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy.orm import scoped_session, sessionmaker
+
+
+SQLITE = 'sqlite:////tmp/functest.db'
+
+ENGINE = create_engine(SQLITE, convert_unicode=True)
+DB_SESSION = scoped_session(sessionmaker(autocommit=False,
+ autoflush=False,
+ bind=ENGINE))
+BASE = declarative_base()
+BASE.query = DB_SESSION.query_property()
diff --git a/functest/api/database/v1/__init__.py b/functest/api/database/v1/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/functest/api/database/v1/__init__.py
diff --git a/functest/api/database/v1/handlers.py b/functest/api/database/v1/handlers.py
new file mode 100644
index 000000000..7bd286ded
--- /dev/null
+++ b/functest/api/database/v1/handlers.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""
+Used to handle tasks: insert the task info into database and update it
+"""
+
+from functest.api.database.db import DB_SESSION
+from functest.api.database.v1.models import Tasks
+
+
+class TasksHandler(object):
+ """ Tasks Handler Class """
+
+ def insert(self, kwargs): # pylint: disable=no-self-use
+ """ To insert the task info into database """
+ task = Tasks(**kwargs)
+ DB_SESSION.add(task) # pylint: disable=maybe-no-member
+ DB_SESSION.commit() # pylint: disable=maybe-no-member
+ return task
+
+ def get_task_by_taskid(self, task_id): # pylint: disable=no-self-use
+ """ Obtain the task by task id """
+ # pylint: disable=maybe-no-member
+ task = Tasks.query.filter_by(task_id=task_id).first()
+ if not task:
+ raise ValueError
+
+ return task
+
+ def update_attr(self, task_id, attr):
+ """ Update the required attributes of the task """
+ task = self.get_task_by_taskid(task_id)
+
+ for key, value in attr.items():
+ setattr(task, key, value)
+ DB_SESSION.commit() # pylint: disable=maybe-no-member
diff --git a/functest/api/database/v1/models.py b/functest/api/database/v1/models.py
new file mode 100644
index 000000000..c5de91bca
--- /dev/null
+++ b/functest/api/database/v1/models.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""
+Define tables for tasks
+"""
+
+from sqlalchemy import Column
+from sqlalchemy import Integer
+from sqlalchemy import String
+from sqlalchemy import Text
+
+from functest.api.database.db import BASE
+
+
+class Tasks(BASE): # pylint: disable=too-few-public-methods, no-init
+ """ Create a table for tasks"""
+
+ __tablename__ = 'tasks'
+ id = Column(Integer, primary_key=True) # pylint: disable=invalid-name
+ task_id = Column(String(50))
+ status = Column(Integer)
+ error = Column(String(120))
+ result = Column(Text)
+
+ def __repr__(self):
+ return '<Task %r>' % Tasks.task_id
diff --git a/functest/api/resources/v1/creds.py b/functest/api/resources/v1/creds.py
index e402d7e3a..45e4559f4 100644
--- a/functest/api/resources/v1/creds.py
+++ b/functest/api/resources/v1/creds.py
@@ -11,13 +11,19 @@
Resources to handle openstack related requests
"""
+import collections
+import logging
+
from flask import jsonify
from functest.api.base import ApiResource
+from functest.api.common import api_utils
from functest.cli.commands.cli_os import OpenStack
from functest.utils import openstack_utils as os_utils
from functest.utils.constants import CONST
+LOGGER = logging.getLogger(__name__)
+
class V1Creds(ApiResource):
""" V1Creds Resource class"""
@@ -27,3 +33,35 @@ class V1Creds(ApiResource):
os_utils.source_credentials(CONST.__getattribute__('openstack_creds'))
credentials_show = OpenStack.show_credentials()
return jsonify(credentials_show)
+
+ def post(self):
+ """ Used to handle post request """
+ return self._dispatch_post()
+
+ def update_openrc(self, args): # pylint: disable=no-self-use
+ """ Used to update the OpenStack RC file """
+ try:
+ openrc_vars = args['openrc']
+ except KeyError:
+ return api_utils.result_handler(
+ status=0, data='openrc must be provided')
+ else:
+ if not isinstance(openrc_vars, collections.Mapping):
+ return api_utils.result_handler(
+ status=0, data='args should be a dict')
+
+ lines = ['export {}={}\n'.format(k, v) for k, v in openrc_vars.items()]
+
+ rc_file = CONST.__getattribute__('openstack_creds')
+ with open(rc_file, 'w') as creds_file:
+ creds_file.writelines(lines)
+
+ LOGGER.info("Sourcing the OpenStack RC file...")
+ try:
+ os_utils.source_credentials(rc_file)
+ except Exception as err: # pylint: disable=broad-except
+ LOGGER.exception('Failed to source the OpenStack RC file')
+ return api_utils.result_handler(status=0, data=str(err))
+
+ return api_utils.result_handler(
+ status=0, data='Update openrc successfully')
diff --git a/functest/api/resources/v1/envs.py b/functest/api/resources/v1/envs.py
index 35bffb04f..9c455198d 100644
--- a/functest/api/resources/v1/envs.py
+++ b/functest/api/resources/v1/envs.py
@@ -14,6 +14,7 @@ from flask import jsonify
from functest.api.base import ApiResource
from functest.cli.commands.cli_env import Env
+from functest.api.common import api_utils
import functest.utils.functest_utils as ft_utils
@@ -31,4 +32,9 @@ class V1Envs(ApiResource):
def prepare(self, args): # pylint: disable=no-self-use, unused-argument
""" Prepare environment """
- ft_utils.execute_command("prepare_env start")
+ try:
+ ft_utils.execute_command("prepare_env start")
+ except Exception as err: # pylint: disable=broad-except
+ return api_utils.result_handler(status=1, data=str(err))
+ return api_utils.result_handler(
+ status=0, data="Prepare env successfully")
diff --git a/functest/api/resources/v1/tasks.py b/functest/api/resources/v1/tasks.py
new file mode 100644
index 000000000..7086e7075
--- /dev/null
+++ b/functest/api/resources/v1/tasks.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""
+Resources to retrieve the task results
+"""
+
+
+import json
+import logging
+import uuid
+
+from flask import jsonify
+
+from functest.api.base import ApiResource
+from functest.api.common import api_utils
+from functest.api.database.v1.handlers import TasksHandler
+
+
+LOGGER = logging.getLogger(__name__)
+
+
+class V1Tasks(ApiResource):
+ """ V1Tasks Resource class"""
+
+ def get(self, task_id): # pylint: disable=no-self-use
+ """ GET the result of the task id """
+ try:
+ uuid.UUID(task_id)
+ except ValueError:
+ return api_utils.result_handler(status=1, data='Invalid task id')
+
+ task_handler = TasksHandler()
+ try:
+ task = task_handler.get_task_by_taskid(task_id)
+ except ValueError:
+ return api_utils.result_handler(status=1, data='No such task id')
+
+ status = task.status
+ LOGGER.debug('Task status is: %s', status)
+
+ if status not in ['IN PROGRESS', 'FAIL', 'FINISHED']:
+ return api_utils.result_handler(status=1,
+ data='internal server error')
+ if status == 'IN PROGRESS':
+ result = {'status': status, 'result': ''}
+ elif status == 'FAIL':
+ result = {'status': status, 'error': task.error}
+ else:
+ result = {'status': status, 'result': json.loads(task.result)}
+
+ return jsonify(result)
diff --git a/functest/api/resources/v1/testcases.py b/functest/api/resources/v1/testcases.py
index c3b8217a1..f146c24ce 100644
--- a/functest/api/resources/v1/testcases.py
+++ b/functest/api/resources/v1/testcases.py
@@ -11,11 +11,20 @@
Resources to handle testcase related requests
"""
+import os
+import logging
+import uuid
+
from flask import abort, jsonify
from functest.api.base import ApiResource
-from functest.api.common import api_utils
+from functest.api.common import api_utils, thread
from functest.cli.commands.cli_testcase import Testcase
+from functest.api.database.v1.handlers import TasksHandler
+from functest.utils.constants import CONST
+import functest.utils.functest_utils as ft_utils
+
+LOGGER = logging.getLogger(__name__)
class V1Testcases(ApiResource):
@@ -46,3 +55,61 @@ class V1Testcase(ApiResource):
result.update(testcase_info)
result.update({'dependency': dependency_dict})
return jsonify(result)
+
+ def post(self):
+ """ Used to handle post request """
+ return self._dispatch_post()
+
+ def run_test_case(self, args):
+ """ Run a testcase """
+ try:
+ case_name = args['testcase']
+ except KeyError:
+ return api_utils.result_handler(
+ status=1, data='testcase name must be provided')
+
+ task_id = str(uuid.uuid4())
+
+ task_args = {'testcase': case_name, 'task_id': task_id}
+
+ task_args.update(args.get('opts', {}))
+
+ task_thread = thread.TaskThread(self._run, task_args, TasksHandler())
+ task_thread.start()
+
+ results = {'testcase': case_name, 'task_id': task_id}
+ return jsonify(results)
+
+ def _run(self, args): # pylint: disable=no-self-use
+ """ The built_in function to run a test case """
+
+ case_name = args.get('testcase')
+
+ if not os.path.isfile(CONST.__getattribute__('env_active')):
+ raise Exception("Functest environment is not ready.")
+ else:
+ try:
+ cmd = "run_tests -t {}".format(case_name)
+ runner = ft_utils.execute_command(cmd)
+ except Exception: # pylint: disable=broad-except
+ result = 'FAIL'
+ LOGGER.exception("Running test case %s failed!", case_name)
+ if runner == os.EX_OK:
+ result = 'PASS'
+ else:
+ result = 'FAIL'
+
+ env_info = {
+ 'installer': CONST.__getattribute__('INSTALLER_TYPE'),
+ 'scenario': CONST.__getattribute__('DEPLOY_SCENARIO'),
+ 'build_tag': CONST.__getattribute__('BUILD_TAG'),
+ 'ci_loop': CONST.__getattribute__('CI_LOOP')
+ }
+ result = {
+ 'task_id': args.get('task_id'),
+ 'case_name': case_name,
+ 'env_info': env_info,
+ 'result': result
+ }
+
+ return {'result': result}
diff --git a/functest/api/server.py b/functest/api/server.py
index e246333ef..1d47b0dcb 100644
--- a/functest/api/server.py
+++ b/functest/api/server.py
@@ -12,6 +12,7 @@ Used to launch Functest RestApi
"""
+import inspect
import logging
import socket
from urlparse import urljoin
@@ -21,12 +22,28 @@ from flask import Flask
from flask_restful import Api
from functest.api.base import ApiResource
-from functest.api.urls import URLPATTERNS
from functest.api.common import api_utils
+from functest.api.database.db import BASE
+from functest.api.database.db import DB_SESSION
+from functest.api.database.db import ENGINE
+from functest.api.database.v1 import models
+from functest.api.urls import URLPATTERNS
LOGGER = logging.getLogger(__name__)
+APP = Flask(__name__)
+API = Api(APP)
+
+
+@APP.teardown_request
+def shutdown_session(exception=None): # pylint: disable=unused-argument
+ """
+ To be called at the end of each request whether it is successful
+ or an exception is raised
+ """
+ DB_SESSION.remove()
+
def get_resource(resource_name):
""" Obtain the required resource according to resource name """
@@ -41,7 +58,7 @@ def get_endpoint(url):
return urljoin('http://{}:5000'.format(address), url)
-def api_add_resource(api):
+def api_add_resource():
"""
The resource has multiple URLs and you can pass multiple URLs to the
add_resource() method on the Api object. Each one will be routed to
@@ -49,19 +66,38 @@ def api_add_resource(api):
"""
for url_pattern in URLPATTERNS:
try:
- api.add_resource(
+ API.add_resource(
get_resource(url_pattern.target), url_pattern.url,
endpoint=get_endpoint(url_pattern.url))
except StopIteration:
LOGGER.error('url resource not found: %s', url_pattern.url)
+def init_db():
+ """
+ Import all modules here that might define models so that
+ they will be registered properly on the metadata, and then
+ create a database
+ """
+ def func(subcls):
+ """ To check the subclasses of BASE"""
+ try:
+ if issubclass(subcls[1], BASE):
+ return True
+ except TypeError:
+ pass
+ return False
+ # pylint: disable=bad-builtin
+ subclses = filter(func, inspect.getmembers(models, inspect.isclass))
+ LOGGER.debug('Import models: %s', [subcls[1] for subcls in subclses])
+ BASE.metadata.create_all(bind=ENGINE)
+
+
def main():
"""Entry point"""
logging.config.fileConfig(pkg_resources.resource_filename(
'functest', 'ci/logging.ini'))
LOGGER.info('Starting Functest server')
- app = Flask(__name__)
- api = Api(app)
- api_add_resource(api)
- app.run(host='0.0.0.0')
+ api_add_resource()
+ init_db()
+ APP.run(host='0.0.0.0')
diff --git a/functest/api/urls.py b/functest/api/urls.py
index ca45b4beb..f7bcae389 100644
--- a/functest/api/urls.py
+++ b/functest/api/urls.py
@@ -32,6 +32,10 @@ URLPATTERNS = [
# GET /api/v1/functest/openstack/credentials => GET credentials
Url('/api/v1/functest/openstack/credentials', 'v1_creds'),
+ # POST /api/v1/functest/openstack/action
+ # {"action":"update_openrc", "args": {"openrc": {}}} => Update openrc
+ Url('/api/v1/functest/openstack/action', 'v1_creds'),
+
# GET /api/v1/functest/testcases => GET all testcases
Url('/api/v1/functest/testcases', 'v1_test_cases'),
@@ -39,6 +43,11 @@ URLPATTERNS = [
# => GET the info of one testcase
Url('/api/v1/functest/testcases/<testcase_name>', 'v1_testcase'),
+ # POST /api/v1/functest/testcases/action
+ # {"action":"run_test_case", "args": {"opts": {}, "testcase": "vping_ssh"}}
+ # => Run a testcase
+ Url('/api/v1/functest/testcases/action', 'v1_testcase'),
+
# GET /api/v1/functest/testcases => GET all tiers
Url('/api/v1/functest/tiers', 'v1_tiers'),
@@ -48,5 +57,10 @@ URLPATTERNS = [
# GET /api/v1/functest/tiers/<tier_name>/testcases
# => GET all testcases within given tier
- Url('/api/v1/functest/tiers/<tier_name>/testcases', 'v1_testcases_in_tier')
+ Url('/api/v1/functest/tiers/<tier_name>/testcases',
+ 'v1_testcases_in_tier'),
+
+ # GET /api/v1/functest/tasks/<task_id>
+ # => GET the result of the task id
+ Url('/api/v1/functest/tasks/<task_id>', 'v1_tasks')
]
diff --git a/functest/ci/config_aarch64_patch.yaml b/functest/ci/config_aarch64_patch.yaml
index de82dbc70..6b3699b4d 100644
--- a/functest/ci/config_aarch64_patch.yaml
+++ b/functest/ci/config_aarch64_patch.yaml
@@ -18,6 +18,14 @@ os:
hw_firmware_type: 'uefi'
short_id: 'ubuntu16.04'
hw_video_model: 'vga'
+ ubuntu:
+ disk_file: /home/opnfv/functest/images/ubuntu-14.04-server-cloudimg-arm64-uefi1.img
+ extra_properties:
+ hw_firmware_type: 'uefi'
+ hw_video_model: 'vga'
+ centos:
+ disk_file: /home/opnfv/functest/images/CentOS-7-aarch64-GenericCloud.qcow2
+
vping:
image_name: TestVM
diff --git a/functest/ci/config_functest.yaml b/functest/ci/config_functest.yaml
index 3352889c4..cf63e1edd 100644
--- a/functest/ci/config_functest.yaml
+++ b/functest/ci/config_functest.yaml
@@ -88,6 +88,9 @@ vping:
vm_name_2: opnfv-vping-2
image_name: functest-vping
private_net_name: vping-net
+ # network_type: vlan
+ # physical_network: physnet2
+ # segmentation_id: 2366
private_subnet_name: vping-subnet
private_subnet_cidr: 192.168.130.0/24
router_name: vping-router
@@ -115,6 +118,11 @@ odl_sfc:
tempest:
deployment_name: opnfv-tempest
+ identity:
+ tenant_name: tempest
+ tenant_description: Tenant for Tempest test suite
+ user_name: tempest
+ user_password: Tempest123!
validation:
ssh_timeout: 130
object_storage:
@@ -135,10 +143,6 @@ rally:
router_name: rally-router
vnf:
- aaa:
- tenant_name: aaa
- tenant_description: Freeradius server
- tenant_images: {}
juju_epc:
tenant_name: epc
tenant_description: OAI EPC deployed with Juju
diff --git a/functest/ci/download_images.sh b/functest/ci/download_images.sh
index dd3e37894..86f37a3f5 100644
--- a/functest/ci/download_images.sh
+++ b/functest/ci/download_images.sh
@@ -1,62 +1,25 @@
#!/bin/bash
-CIRROS_REPO_URL=http://download.cirros-cloud.net
-CIRROS_AARCH64_TAG=161201
-CIRROS_X86_64_TAG=0.3.5
-
-RED='\033[1;31m'
-NC='\033[0m' # No Color
-
-function usage(){
- echo -e "${RED}USAGE: $script <destination_folder> <scenario_name> [arch]${NC}"
- exit 0
-}
-
-script=`basename "$0"`
-IMAGES_FOLDER_DIR=$1
-SCENARIO=$2
-ARCH=$3
-
-if [[ -z $IMAGES_FOLDER_DIR ]]; then usage; fi;
-
set -ex
-mkdir -p ${IMAGES_FOLDER_DIR}
-
-
-####################
-# MANDATORY IMAGES #
-####################
-# These images should be present in Functest for the tests to work
-
-# Functest:
-wget -nc ${CIRROS_REPO_URL}/${CIRROS_X86_64_TAG}/cirros-${CIRROS_X86_64_TAG}-x86_64-disk.img -P ${IMAGES_FOLDER_DIR}
-wget -nc ${CIRROS_REPO_URL}/${CIRROS_X86_64_TAG}/cirros-${CIRROS_X86_64_TAG}-x86_64-lxc.tar.gz -P ${IMAGES_FOLDER_DIR}
-
-# SNAPS:
-wget -nc http://uec-images.ubuntu.com/releases/trusty/14.04/ubuntu-14.04-server-cloudimg-amd64-disk1.img -P ${IMAGES_FOLDER_DIR}
-wget -nc http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2 -P ${IMAGES_FOLDER_DIR}
-
-
-###################
-# OPTIONAL IMAGES #
-###################
-# Optional images can be commented if they are not going to be used by the tests
-
-# SDNVPN (odl-bgpvpn scenarios):
-if [[ ${SCENARIO} == *"bgpvpn"* ]]; then
- wget -nc http://artifacts.opnfv.org/sdnvpn/ubuntu-16.04-server-cloudimg-amd64-disk1.img -P ${IMAGES_FOLDER_DIR}
-fi
-
-# ONOS (onos-sfc scenarios):
-if [[ ${SCENARIO} == *"onos-sfc"* ]]; then
- wget -nc http://artifacts.opnfv.org/onosfw/images/firewall_block_image.img -P ${IMAGES_FOLDER_DIR}
-fi
-
-if [[ ${ARCH} == "arm" ]] || [[ ${ARCH} == "aarch64" ]]; then
- # ARM (aarch64 cirros images):
- wget -nc ${CIRROS_REPO_URL}/daily/20${CIRROS_AARCH64_TAG}/cirros-d${CIRROS_AARCH64_TAG}-aarch64-disk.img -P ${IMAGES_FOLDER_DIR}
- wget -nc ${CIRROS_REPO_URL}/daily/20${CIRROS_AARCH64_TAG}/cirros-d${CIRROS_AARCH64_TAG}-aarch64-initramfs -P ${IMAGES_FOLDER_DIR}
- wget -nc ${CIRROS_REPO_URL}/daily/20${CIRROS_AARCH64_TAG}/cirros-d${CIRROS_AARCH64_TAG}-aarch64-kernel -P ${IMAGES_FOLDER_DIR}
-fi
-set +ex \ No newline at end of file
+wget_opts="-N --tries=1 --connect-timeout=30"
+
+cat << EOF | wget ${wget_opts} -i - -P ${1:-/home/opnfv/functest/images}
+http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img
+https://cloud-images.ubuntu.com/releases/14.04/release/ubuntu-14.04-server-cloudimg-amd64-disk1.img
+https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2
+https://cloud-images.ubuntu.com/releases/16.04/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img
+http://repository.cloudifysource.org/cloudify/4.0.1/sp-release/cloudify-manager-premium-4.0.1.qcow2
+http://marketplace.openbaton.org:8082/api/v1/images/52e2ccc0-1dce-4663-894d-28aab49323aa/img
+http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img
+http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-lxc.tar.gz
+http://download.cirros-cloud.net/daily/20161201/cirros-d161201-aarch64-disk.img
+http://download.cirros-cloud.net/daily/20161201/cirros-d161201-aarch64-initramfs
+http://download.cirros-cloud.net/daily/20161201/cirros-d161201-aarch64-kernel
+https://cloud-images.ubuntu.com/releases/14.04/release/ubuntu-14.04-server-cloudimg-arm64-uefi1.img
+http://cloud.centos.org/altarch/7/images/aarch64/CentOS-7-aarch64-GenericCloud.qcow2.xz
+EOF
+
+xz --decompress ${1:-/home/opnfv/functest/images}/CentOS-7-aarch64-GenericCloud.qcow2.xz
+
+exit $?
diff --git a/functest/ci/prepare_env.py b/functest/ci/prepare_env.py
index c40e32660..9ed585f3d 100644
--- a/functest/ci/prepare_env.py
+++ b/functest/ci/prepare_env.py
@@ -33,7 +33,7 @@ actions = ['start', 'check']
logger = logging.getLogger('functest.ci.prepare_env')
handler = None
# set the architecture to default
-pod_arch = None
+pod_arch = os.getenv("HOST_ARCH", None)
arch_filter = ['aarch64']
CONFIG_FUNCTEST_PATH = pkg_resources.resource_filename(
diff --git a/functest/ci/testcases.yaml b/functest/ci/testcases.yaml
index 7c51987cf..fac81267f 100644
--- a/functest/ci/testcases.yaml
+++ b/functest/ci/testcases.yaml
@@ -385,18 +385,17 @@ tiers:
-
case_name: barometercollectd
- enabled: false
+ enabled: true
project_name: barometer
criteria: 100
blocking: false
description: >-
- Test suite for the Barometer project. Separate tests verify the
- proper configuration and functionality of the following
- collectd plugins Ceilometer, Hugepages, Memory RAS (mcelog),
- and OVS Events
+ Test suite for the Barometer project. Separate tests verify
+ the proper configuration and basic functionality of all the
+ collectd plugins as described in the Project Release Plan
dependencies:
- installer: 'fuel'
- scenario: 'kvm_ovs_dpdk_bar'
+ installer: 'apex'
+ scenario: 'bar'
run:
module: 'baro_tests.barometer'
class: 'BarometerCollectd'
@@ -478,22 +477,6 @@ tiers:
run:
module: 'functest.opnfv_tests.vnf.ims.cloudify_ims'
class: 'CloudifyIms'
-
- -
- case_name: aaa
- enabled: false
- project_name: functest
- criteria: 100
- blocking: false
- description: >-
- Simple VNF.
- dependencies:
- installer: ''
- scenario: ''
- run:
- module: 'functest.opnfv_tests.vnf.aaa.aaa'
- class: 'AaaVnf'
-
-
case_name: orchestra_openims
project_name: functest
diff --git a/functest/opnfv_tests/openstack/rally/blacklist.txt b/functest/opnfv_tests/openstack/rally/blacklist.txt
index 95bea2b7a..099d6864d 100644
--- a/functest/opnfv_tests/openstack/rally/blacklist.txt
+++ b/functest/opnfv_tests/openstack/rally/blacklist.txt
@@ -6,6 +6,38 @@ scenario:
- joid
tests:
- NovaServers.boot_server_from_volume_and_delete
+ -
+ scenarios:
+ - '^os-' # all scenarios
+ installers:
+ - '.+' # all installers
+ tests:
+ # Following tests currently fail due to required Gnocchi API:
+ # HTTP 410: "This telemetry installation is configured to use
+ # Gnocchi. Please use the Gnocchi API available on the
+ # metric endpoint to retrieve data."
+ # Issue: https://bugs.launchpad.net/rally/+bug/1704322
+ - CeilometerMeters.list_matched_meters
+ - CeilometerMeters.list_meters
+ - CeilometerQueries.create_and_query_samples
+ - CeilometerResource.get_tenant_resources
+ - CeilometerResource.list_matched_resources
+ - CeilometerResource.list_resources
+ - CeilometerSamples.list_matched_samples
+ - CeilometerSamples.list_samples
+ - CeilometerStats.create_meter_and_get_stats
+ - CeilometerStats.get_stats
+ -
+ scenarios:
+ - '^os-' # all scenarios
+ installers:
+ - '.+' # all installers
+ tests:
+ # Following test currently fails due to but in
+ # python-ceilometerclient during fetching of event_types
+ # Bug: https://bugs.launchpad.net/ubuntu/+bug/1704138
+ # Fix: https://review.openstack.org/#/c/483402/
+ - CeilometerEvents.create_user_and_list_event_types
functionality:
-
diff --git a/functest/opnfv_tests/openstack/rally/rally.py b/functest/opnfv_tests/openstack/rally/rally.py
index 6b7c49ca7..fdef8bed0 100644
--- a/functest/opnfv_tests/openstack/rally/rally.py
+++ b/functest/opnfv_tests/openstack/rally/rally.py
@@ -34,8 +34,8 @@ LOGGER = logging.getLogger(__name__)
class RallyBase(testcase.OSGCTestCase):
"""Base class form Rally testcases implementation."""
- TESTS = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
- 'neutron', 'nova', 'quotas', 'vm', 'all']
+ TESTS = ['authenticate', 'glance', 'ceilometer', 'cinder', 'heat',
+ 'keystone', 'neutron', 'nova', 'quotas', 'vm', 'all']
GLANCE_IMAGE_NAME = CONST.__getattribute__('openstack_image_name')
GLANCE_IMAGE_FILENAME = CONST.__getattribute__('openstack_image_file_name')
GLANCE_IMAGE_PATH = os.path.join(
diff --git a/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-ceilometer.yaml b/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-ceilometer.yaml
new file mode 100644
index 000000000..7efb5a83b
--- /dev/null
+++ b/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-ceilometer.yaml
@@ -0,0 +1,458 @@
+ CeilometerMeters.list_meters:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ ceilometer:
+ counter_name: "benchmark_meter"
+ counter_type: "gauge"
+ counter_unit: "%"
+ counter_volume: 100
+ resources_per_tenant: 100
+ samples_per_resource: 100
+ timestamp_interval: 10
+ metadata_list:
+ -
+ status: "active"
+ name: "rally benchmark on"
+ deleted: "false"
+ -
+ status: "terminated"
+ name: "rally benchmark off"
+ deleted: "true"
+ {% endcall %}
+ args:
+ limit: 50
+ metadata_query:
+ status: "terminated"
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerResource.list_resources:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ ceilometer:
+ counter_name: "benchmark_meter"
+ counter_type: "gauge"
+ counter_unit: "%"
+ counter_volume: 100
+ resources_per_tenant: 100
+ samples_per_resource: 100
+ timestamp_interval: 10
+ metadata_list:
+ -
+ status: "active"
+ name: "rally benchmark on"
+ deleted: "false"
+ -
+ status: "terminated"
+ name: "rally benchmark off"
+ deleted: "true"
+ {% endcall %}
+ args:
+ limit: 50
+ metadata_query:
+ status: "terminated"
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.create_alarm_and_get_history:
+ -
+ args:
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ state: "ok"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.create_and_delete_alarm:
+ -
+ args:
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.create_and_get_alarm:
+ -
+ args:
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.create_and_list_alarm:
+ -
+ args:
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerQueries.create_and_query_alarm_history:
+ -
+ args:
+ orderby: !!null
+ limit: !!null
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerQueries.create_and_query_alarms:
+ -
+ args:
+ filter: {"and": [{"!=": {"state": "dummy_state"}},{"=": {"type": "threshold"}}]}
+ orderby: !!null
+ limit: 10
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerQueries.create_and_query_samples:
+ -
+ args:
+ filter: {"=": {"counter_unit": "instance"}}
+ orderby: !!null
+ limit: 10
+ counter_name: "cpu_util"
+ counter_type: "gauge"
+ counter_unit: "instance"
+ counter_volume: 1.0
+ resource_id: "resource_id"
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.create_and_update_alarm:
+ -
+ args:
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerStats.create_meter_and_get_stats:
+ -
+ args:
+ user_id: "user-id"
+ resource_id: "resource-id"
+ counter_volume: 1.0
+ counter_unit: ""
+ counter_type: "cumulative"
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerEvents.create_user_and_get_event:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerEvents.create_user_and_list_events:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerEvents.create_user_and_list_event_types:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerTraits.create_user_and_list_trait_descriptions:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerTraits.create_user_and_list_traits:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerStats.get_stats:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ ceilometer:
+ counter_name: "benchmark_meter"
+ counter_type: "gauge"
+ counter_unit: "%"
+ counter_volume: 100
+ resources_per_tenant: 100
+ samples_per_resource: 100
+ timestamp_interval: 10
+ metadata_list:
+ -
+ status: "active"
+ name: "rally benchmark on"
+ deleted: "false"
+ -
+ status: "terminated"
+ name: "rally benchmark off"
+ deleted: "true"
+ {% endcall %}
+ args:
+ meter_name: "benchmark_meter"
+ filter_by_user_id: true
+ filter_by_project_id: true
+ filter_by_resource_id: true
+ metadata_query:
+ status: "terminated"
+ period: 300
+ groupby: "resource_id"
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerResource.get_tenant_resources:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ ceilometer:
+ counter_name: "cpu_util"
+ counter_type: "gauge"
+ counter_volume: 1.0
+ counter_unit: "instance"
+ {% endcall %}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.list_alarms:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerSamples.list_matched_samples:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ ceilometer:
+ counter_name: "cpu_util"
+ counter_type: "gauge"
+ counter_unit: "instance"
+ counter_volume: 1.0
+ resources_per_tenant: 100
+ samples_per_resource: 100
+ timestamp_interval: 60
+ metadata_list:
+ - status: "active"
+ name: "fake_resource"
+ deleted: "False"
+ created_at: "2015-09-04T12:34:19.000000"
+ - status: "not_active"
+ name: "fake_resource_1"
+ deleted: "False"
+ created_at: "2015-09-10T06:55:12.000000"
+ {% endcall %}
+ args:
+ limit: 50
+ filter_by_user_id: true
+ filter_by_project_id: true
+ filter_by_resource_id: true
+ metadata_query:
+ status: "not_active"
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerMeters.list_matched_meters:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ ceilometer:
+ counter_name: "benchmark_meter"
+ counter_type: "gauge"
+ counter_unit: "%"
+ counter_volume: 100
+ resources_per_tenant: 100
+ samples_per_resource: 100
+ timestamp_interval: 10
+ metadata_list:
+ -
+ status: "active"
+ name: "rally benchmark on"
+ deleted: "false"
+ -
+ status: "terminated"
+ name: "rally benchmark off"
+ deleted: "true"
+ {% endcall %}
+ args:
+ limit: 50
+ filter_by_user_id: true
+ filter_by_project_id: true
+ filter_by_resource_id: true
+ metadata_query:
+ status: "terminated"
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerResource.list_matched_resources:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ ceilometer:
+ counter_name: "benchmark_meter"
+ counter_type: "gauge"
+ counter_unit: "%"
+ counter_volume: 100
+ resources_per_tenant: 100
+ samples_per_resource: 100
+ timestamp_interval: 10
+ metadata_list:
+ -
+ status: "active"
+ name: "rally benchmark on"
+ deleted: "false"
+ -
+ status: "terminated"
+ name: "rally benchmark off"
+ deleted: "true"
+ {% endcall %}
+ args:
+ limit: 50
+ filter_by_user_id: true
+ filter_by_project_id: true
+ metadata_query:
+ status: "terminated"
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerSamples.list_samples:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ ceilometer:
+ counter_name: "cpu_util"
+ counter_type: "gauge"
+ counter_unit: "instance"
+ counter_volume: 1.0
+ resources_per_tenant: 100
+ samples_per_resource: 100
+ timestamp_interval: 60
+ metadata_list:
+ - status: "active"
+ name: "fake_resource"
+ deleted: "False"
+ created_at: "2015-09-04T12:34:19.000000"
+ - status: "not_active"
+ name: "fake_resource_1"
+ deleted: "False"
+ created_at: "2015-09-10T06:55:12.000000"
+ batch_size: 5
+ {% endcall %}
+ args:
+ limit: 50
+ metadata_query:
+ status: "not_active"
+ sla:
+ {{ no_failures_sla() }}
+
diff --git a/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-ceilometer.yaml b/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-ceilometer.yaml
new file mode 100644
index 000000000..bb070cd3a
--- /dev/null
+++ b/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-ceilometer.yaml
@@ -0,0 +1,247 @@
+ CeilometerAlarms.create_alarm_and_get_history:
+ -
+ args:
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ state: "ok"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.create_and_delete_alarm:
+ -
+ args:
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.create_and_get_alarm:
+ -
+ args:
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.create_and_list_alarm:
+ -
+ args:
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerQueries.create_and_query_alarm_history:
+ -
+ args:
+ orderby: !!null
+ limit: !!null
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerQueries.create_and_query_alarms:
+ -
+ args:
+ filter: {"and": [{"!=": {"state": "dummy_state"}},{"=": {"type": "threshold"}}]}
+ orderby: !!null
+ limit: 10
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerQueries.create_and_query_samples:
+ -
+ args:
+ filter: {"=": {"counter_unit": "instance"}}
+ orderby: !!null
+ limit: 10
+ counter_name: "cpu_util"
+ counter_type: "gauge"
+ counter_unit: "instance"
+ counter_volume: 1.0
+ resource_id: "resource_id"
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.create_and_update_alarm:
+ -
+ args:
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerEvents.create_user_and_get_event:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerEvents.create_user_and_list_events:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerEvents.create_user_and_list_event_types:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerTraits.create_user_and_list_trait_descriptions:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerTraits.create_user_and_list_traits:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerStats.get_stats:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ ceilometer:
+ counter_name: "benchmark_meter"
+ counter_type: "gauge"
+ counter_unit: "%"
+ counter_volume: 100
+ resources_per_tenant: 100
+ samples_per_resource: 100
+ timestamp_interval: 10
+ metadata_list:
+ -
+ status: "active"
+ name: "rally benchmark on"
+ deleted: "false"
+ -
+ status: "terminated"
+ name: "rally benchmark off"
+ deleted: "true"
+ {% endcall %}
+ args:
+ meter_name: "benchmark_meter"
+ filter_by_user_id: true
+ filter_by_project_id: true
+ filter_by_resource_id: true
+ metadata_query:
+ status: "terminated"
+ period: 300
+ groupby: "resource_id"
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerResource.get_tenant_resources:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ ceilometer:
+ counter_name: "cpu_util"
+ counter_type: "gauge"
+ counter_volume: 1.0
+ counter_unit: "instance"
+ {% endcall %}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.list_alarms:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
diff --git a/functest/opnfv_tests/openstack/rally/task.yaml b/functest/opnfv_tests/openstack/rally/task.yaml
index 033edb831..65f101fbe 100644
--- a/functest/opnfv_tests/openstack/rally/task.yaml
+++ b/functest/opnfv_tests/openstack/rally/task.yaml
@@ -31,6 +31,10 @@
{%- include "var/opnfv-neutron.yaml"-%}
{% endif %}
+{% if "ceilometer" in service_list %}
+{%- include "var/opnfv-ceilometer.yaml"-%}
+{% endif %}
+
{% if "quotas" in service_list %}
{%- include "var/opnfv-quotas.yaml"-%}
{% endif %}
diff --git a/functest/opnfv_tests/openstack/refstack_client/refstack_client.py b/functest/opnfv_tests/openstack/refstack_client/refstack_client.py
index 86053ccf3..4f71b5f5d 100644
--- a/functest/opnfv_tests/openstack/refstack_client/refstack_client.py
+++ b/functest/opnfv_tests/openstack/refstack_client/refstack_client.py
@@ -28,12 +28,13 @@ from functest.opnfv_tests.openstack.refstack_client.tempest_conf \
from functest.opnfv_tests.openstack.tempest import conf_utils
from functest.utils.constants import CONST
import functest.utils.functest_utils as ft_utils
+import functest.utils.openstack_utils as os_utils
# logging configuration """
LOGGER = logging.getLogger(__name__)
-class RefstackClient(testcase.OSGCTestCase):
+class RefstackClient(testcase.TestCase):
"""RefstackClient testcase implementation class."""
def __init__(self, **kwargs):
@@ -41,6 +42,7 @@ class RefstackClient(testcase.OSGCTestCase):
if "case_name" not in kwargs:
kwargs["case_name"] = "refstack_defcore"
super(RefstackClient, self).__init__(**kwargs)
+ self.tempestconf = None
self.conf_path = pkg_resources.resource_filename(
'functest',
'opnfv_tests/openstack/refstack_client/refstack_tempest.conf')
@@ -57,6 +59,13 @@ class RefstackClient(testcase.OSGCTestCase):
CONST.__getattribute__('OS_INSECURE').lower() == 'true'):
self.insecure = '-k'
+ def generate_conf(self):
+ if not os.path.exists(conf_utils.REFSTACK_RESULTS_DIR):
+ os.makedirs(conf_utils.REFSTACK_RESULTS_DIR)
+
+ self.tempestconf = TempestConf()
+ self.tempestconf.generate_tempestconf()
+
def run_defcore(self, conf, testlist):
"""Run defcore sys command."""
cmd = ("refstack-client test {0} -c {1} -v --test-list {2}"
@@ -87,7 +96,7 @@ class RefstackClient(testcase.OSGCTestCase):
stderr=subprocess.STDOUT)
def parse_refstack_result(self):
- """Parse Refstact results."""
+ """Parse Refstack results."""
try:
with open(os.path.join(conf_utils.REFSTACK_RESULTS_DIR,
"refstack.log"), 'r') as logfile:
@@ -144,18 +153,18 @@ class RefstackClient(testcase.OSGCTestCase):
"""
self.start_time = time.time()
- if not os.path.exists(conf_utils.REFSTACK_RESULTS_DIR):
- os.makedirs(conf_utils.REFSTACK_RESULTS_DIR)
-
try:
- tempestconf = TempestConf()
- tempestconf.generate_tempestconf()
+ # Make sure that Tempest is configured
+ if not self.tempestconf:
+ self.generate_conf()
self.run_defcore_default()
self.parse_refstack_result()
res = testcase.TestCase.EX_OK
except Exception:
LOGGER.exception("Error with run")
res = testcase.TestCase.EX_RUN_ERROR
+ finally:
+ self.tempestconf.clean()
self.stop_time = time.time()
return res
@@ -194,6 +203,42 @@ class RefstackClient(testcase.OSGCTestCase):
return res
+ def create_snapshot(self):
+ """
+ Run the Tempest cleanup utility to initialize OS state.
+ For details, see https://docs.openstack.org/tempest/latest/cleanup.html
+
+ :return: TestCase.EX_OK
+ """
+ LOGGER.info("Initializing the saved state of the OpenStack deployment")
+
+ # Make sure that Tempest is configured
+ if not self.tempestconf:
+ self.generate_conf()
+
+ os_utils.init_tempest_cleanup(
+ self.tempestconf.DEPLOYMENT_DIR, 'tempest.conf',
+ os.path.join(conf_utils.REFSTACK_RESULTS_DIR,
+ "tempest-cleanup-init.log")
+ )
+
+ return super(RefstackClient, self).create_snapshot()
+
+ def clean(self):
+ """
+ Run the Tempest cleanup utility to delete and destroy OS resources.
+ For details, see https://docs.openstack.org/tempest/latest/cleanup.html
+ """
+ LOGGER.info("Destroying the resources created for tempest")
+
+ os_utils.perform_tempest_cleanup(
+ self.tempestconf.DEPLOYMENT_DIR, 'tempest.conf',
+ os.path.join(conf_utils.REFSTACK_RESULTS_DIR,
+ "tempest-cleanup.log")
+ )
+
+ return super(RefstackClient, self).clean()
+
class RefstackClientParser(object): # pylint: disable=too-few-public-methods
"""Command line argument parser helper."""
diff --git a/functest/opnfv_tests/openstack/refstack_client/tempest_conf.py b/functest/opnfv_tests/openstack/refstack_client/tempest_conf.py
index 30590b9eb..db7452271 100644
--- a/functest/opnfv_tests/openstack/refstack_client/tempest_conf.py
+++ b/functest/opnfv_tests/openstack/refstack_client/tempest_conf.py
@@ -11,13 +11,15 @@ import pkg_resources
from functest.opnfv_tests.openstack.tempest import conf_utils
from functest.utils import openstack_utils
from functest.utils.constants import CONST
+from functest.opnfv_tests.openstack.tempest.tempest \
+ import TempestResourcesManager
""" logging configuration """
logger = logging.getLogger(__name__)
class TempestConf(object):
- def __init__(self):
+ def __init__(self, **kwargs):
self.VERIFIER_ID = conf_utils.get_verifier_id()
self.VERIFIER_REPO_DIR = conf_utils.get_verifier_repo_dir(
self.VERIFIER_ID)
@@ -27,15 +29,22 @@ class TempestConf(object):
self.confpath = pkg_resources.resource_filename(
'functest',
'opnfv_tests/openstack/refstack_client/refstack_tempest.conf')
+ self.resources = TempestResourcesManager(**kwargs)
def generate_tempestconf(self):
try:
openstack_utils.source_credentials(
CONST.__getattribute__('openstack_creds'))
- img_flavor_dict = conf_utils.create_tempest_resources(
- use_custom_images=True, use_custom_flavors=True)
+ resources = self.resources.create(create_project=True,
+ use_custom_images=True,
+ use_custom_flavors=True)
conf_utils.configure_tempest_defcore(
- self.DEPLOYMENT_DIR, img_flavor_dict)
+ self.DEPLOYMENT_DIR,
+ image_id=resources.get("image_id"),
+ flavor_id=resources.get("flavor_id"),
+ image_id_alt=resources.get("image_id_alt"),
+ flavor_id_alt=resources.get("flavor_id_alt"),
+ tenant_id=resources.get("project_id"))
except Exception as e:
logger.error("error with generating refstack client "
"reference tempest conf file: %s", e)
@@ -48,6 +57,9 @@ class TempestConf(object):
except Exception as e:
logger.error('Error with run: %s', e)
+ def clean(self):
+ self.resources.cleanup()
+
def main():
logging.basicConfig()
diff --git a/functest/opnfv_tests/openstack/tempest/conf_utils.py b/functest/opnfv_tests/openstack/tempest/conf_utils.py
index fd3785b99..52fa60032 100644
--- a/functest/opnfv_tests/openstack/tempest/conf_utils.py
+++ b/functest/opnfv_tests/openstack/tempest/conf_utils.py
@@ -41,6 +41,9 @@ REFSTACK_RESULTS_DIR = os.path.join(CONST.__getattribute__('dir_results'),
'refstack')
TEMPEST_CONF_YAML = pkg_resources.resource_filename(
'functest', 'opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml')
+TEST_ACCOUNTS_FILE = pkg_resources.resource_filename(
+ 'functest',
+ 'opnfv_tests/openstack/tempest/custom_tests/test_accounts.yaml')
CI_INSTALLER_TYPE = CONST.__getattribute__('INSTALLER_TYPE')
CI_INSTALLER_IP = CONST.__getattribute__('INSTALLER_IP')
@@ -49,77 +52,9 @@ CI_INSTALLER_IP = CONST.__getattribute__('INSTALLER_IP')
logger = logging.getLogger(__name__)
-def create_tempest_resources(use_custom_images=False,
- use_custom_flavors=False):
-
- logger.debug("Creating private network for Tempest suite")
- network_dic = os_utils.create_shared_network_full(
- CONST.__getattribute__('tempest_private_net_name'),
- CONST.__getattribute__('tempest_private_subnet_name'),
- CONST.__getattribute__('tempest_router_name'),
- CONST.__getattribute__('tempest_private_subnet_cidr'))
- if network_dic is None:
- raise Exception('Failed to create private network')
-
- image_id = ""
- image_id_alt = ""
- flavor_id = ""
- flavor_id_alt = ""
-
- if (CONST.__getattribute__('tempest_use_custom_images') or
- use_custom_images):
- # adding alternative image should be trivial should we need it
- logger.debug("Creating image for Tempest suite")
- _, image_id = os_utils.get_or_create_image(
- CONST.__getattribute__('openstack_image_name'),
- GLANCE_IMAGE_PATH,
- CONST.__getattribute__('openstack_image_disk_format'))
- if image_id is None:
- raise Exception('Failed to create image')
-
- if use_custom_images:
- logger.debug("Creating 2nd image for Tempest suite")
- _, image_id_alt = os_utils.get_or_create_image(
- CONST.__getattribute__('openstack_image_name_alt'),
- GLANCE_IMAGE_PATH,
- CONST.__getattribute__('openstack_image_disk_format'))
- if image_id_alt is None:
- raise Exception('Failed to create image')
-
- if (CONST.__getattribute__('tempest_use_custom_flavors') or
- use_custom_flavors):
- # adding alternative flavor should be trivial should we need it
- logger.debug("Creating flavor for Tempest suite")
- _, flavor_id = os_utils.get_or_create_flavor(
- CONST.__getattribute__('openstack_flavor_name'),
- CONST.__getattribute__('openstack_flavor_ram'),
- CONST.__getattribute__('openstack_flavor_disk'),
- CONST.__getattribute__('openstack_flavor_vcpus'))
- if flavor_id is None:
- raise Exception('Failed to create flavor')
-
- if use_custom_flavors:
- logger.debug("Creating 2nd flavor for tempest_defcore")
- _, flavor_id_alt = os_utils.get_or_create_flavor(
- CONST.__getattribute__('openstack_flavor_name_alt'),
- CONST.__getattribute__('openstack_flavor_ram'),
- CONST.__getattribute__('openstack_flavor_disk'),
- CONST.__getattribute__('openstack_flavor_vcpus'))
- if flavor_id_alt is None:
- raise Exception('Failed to create flavor')
-
- img_flavor_dict = {}
- img_flavor_dict['image_id'] = image_id
- img_flavor_dict['image_id_alt'] = image_id_alt
- img_flavor_dict['flavor_id'] = flavor_id
- img_flavor_dict['flavor_id_alt'] = flavor_id_alt
-
- return img_flavor_dict
-
-
def get_verifier_id():
"""
- Returns verifer id for current Tempest
+ Returns verifier id for current Tempest
"""
cmd = ("rally verify list-verifiers | awk '/" +
CONST.__getattribute__('tempest_deployment_name') +
@@ -153,7 +88,7 @@ def get_verifier_deployment_id():
def get_verifier_repo_dir(verifier_id):
"""
- Returns installed verfier repo directory for Tempest
+ Returns installed verifier repo directory for Tempest
"""
if not verifier_id:
verifier_id = get_verifier_id()
@@ -195,32 +130,27 @@ def backup_tempest_config(conf_file):
"""
Copy config file to tempest results directory
"""
- if not os.path.exists(TEMPEST_RESULTS_DIR):
- os.makedirs(TEMPEST_RESULTS_DIR)
-
shutil.copyfile(conf_file,
os.path.join(TEMPEST_RESULTS_DIR, 'tempest.conf'))
-def configure_tempest(deployment_dir, IMAGE_ID=None, FLAVOR_ID=None,
- MODE=None):
+def configure_tempest(deployment_dir, image_id=None, flavor_id=None,
+ mode=None):
"""
Calls rally verify and updates the generated tempest.conf with
given parameters
"""
conf_file = configure_verifier(deployment_dir)
- configure_tempest_update_params(conf_file,
- IMAGE_ID, FLAVOR_ID)
+ configure_tempest_update_params(conf_file, image_id, flavor_id)
-def configure_tempest_defcore(deployment_dir, img_flavor_dict):
+def configure_tempest_defcore(deployment_dir, image_id, flavor_id,
+ image_id_alt, flavor_id_alt, tenant_id):
"""
Add/update needed parameters into tempest.conf file
"""
conf_file = configure_verifier(deployment_dir)
- configure_tempest_update_params(conf_file,
- img_flavor_dict.get("image_id"),
- img_flavor_dict.get("flavor_id"))
+ configure_tempest_update_params(conf_file, image_id, flavor_id)
logger.debug("Updating selected tempest.conf parameters for defcore...")
config = ConfigParser.RawConfigParser()
@@ -228,14 +158,14 @@ def configure_tempest_defcore(deployment_dir, img_flavor_dict):
config.set('DEFAULT', 'log_file', '{}/tempest.log'.format(deployment_dir))
config.set('oslo_concurrency', 'lock_path',
'{}/lock_files'.format(deployment_dir))
+ generate_test_accounts_file(tenant_id=tenant_id)
+ config.set('auth', 'test_accounts_file', TEST_ACCOUNTS_FILE)
config.set('scenario', 'img_dir', '{}'.format(deployment_dir))
config.set('scenario', 'img_file', 'tempest-image')
- config.set('compute', 'image_ref', img_flavor_dict.get("image_id"))
- config.set('compute', 'image_ref_alt',
- img_flavor_dict['image_id_alt'])
- config.set('compute', 'flavor_ref', img_flavor_dict.get("flavor_id"))
- config.set('compute', 'flavor_ref_alt',
- img_flavor_dict['flavor_id_alt'])
+ config.set('compute', 'image_ref', image_id)
+ config.set('compute', 'image_ref_alt', image_id_alt)
+ config.set('compute', 'flavor_ref', flavor_id)
+ config.set('compute', 'flavor_ref_alt', flavor_id_alt)
with open(conf_file, 'wb') as config_file:
config.write(config_file)
@@ -246,8 +176,29 @@ def configure_tempest_defcore(deployment_dir, img_flavor_dict):
shutil.copyfile(conf_file, confpath)
+def generate_test_accounts_file(tenant_id):
+ """
+ Add needed tenant and user params into test_accounts.yaml
+ """
+
+ logger.debug("Add needed params into test_accounts.yaml...")
+ accounts_list = [
+ {
+ 'tenant_name':
+ CONST.__getattribute__('tempest_identity_tenant_name'),
+ 'tenant_id': str(tenant_id),
+ 'username': CONST.__getattribute__('tempest_identity_user_name'),
+ 'password':
+ CONST.__getattribute__('tempest_identity_user_password')
+ }
+ ]
+
+ with open(TEST_ACCOUNTS_FILE, "w") as f:
+ yaml.dump(accounts_list, f, default_flow_style=False)
+
+
def configure_tempest_update_params(tempest_conf_file,
- IMAGE_ID=None, FLAVOR_ID=None):
+ image_id=None, flavor_id=None):
"""
Add/update needed parameters into tempest.conf file
"""
@@ -261,13 +212,13 @@ def configure_tempest_update_params(tempest_conf_file,
config.set('compute', 'volume_device_name',
CONST.__getattribute__('tempest_volume_device_name'))
if CONST.__getattribute__('tempest_use_custom_images'):
- if IMAGE_ID is not None:
- config.set('compute', 'image_ref', IMAGE_ID)
+ if image_id is not None:
+ config.set('compute', 'image_ref', image_id)
if IMAGE_ID_ALT is not None:
config.set('compute', 'image_ref_alt', IMAGE_ID_ALT)
if CONST.__getattribute__('tempest_use_custom_flavors'):
- if FLAVOR_ID is not None:
- config.set('compute', 'flavor_ref', FLAVOR_ID)
+ if flavor_id is not None:
+ config.set('compute', 'flavor_ref', flavor_id)
if FLAVOR_ID_ALT is not None:
config.set('compute', 'flavor_ref_alt', FLAVOR_ID_ALT)
config.set('identity', 'region', 'RegionOne')
diff --git a/functest/opnfv_tests/openstack/tempest/tempest.py b/functest/opnfv_tests/openstack/tempest/tempest.py
index 003d4ea41..c7ad4df2f 100644
--- a/functest/opnfv_tests/openstack/tempest/tempest.py
+++ b/functest/opnfv_tests/openstack/tempest/tempest.py
@@ -23,15 +23,26 @@ from functest.core import testcase
from functest.opnfv_tests.openstack.tempest import conf_utils
from functest.utils.constants import CONST
import functest.utils.functest_utils as ft_utils
+import functest.utils.openstack_utils as os_utils
+
+from snaps.openstack import create_flavor
+from snaps.openstack.create_flavor import FlavorSettings, OpenStackFlavor
+from snaps.openstack.create_project import ProjectSettings
+from snaps.openstack.create_network import NetworkSettings, SubnetSettings
+from snaps.openstack.create_user import UserSettings
+from snaps.openstack.tests import openstack_tests
+from snaps.openstack.utils import deploy_utils
+
""" logging configuration """
logger = logging.getLogger(__name__)
-class TempestCommon(testcase.OSGCTestCase):
+class TempestCommon(testcase.TestCase):
def __init__(self, **kwargs):
super(TempestCommon, self).__init__(**kwargs)
+ self.resources = TempestResourcesManager(**kwargs)
self.MODE = ""
self.OPTION = ""
self.VERIFIER_ID = conf_utils.get_verifier_id()
@@ -184,9 +195,12 @@ class TempestCommon(testcase.OSGCTestCase):
try:
self.result = 100 * int(num_success) / int(num_executed)
except ZeroDivisionError:
- logger.error("No test has been executed")
self.result = 0
- return
+ if int(num_tests) > 0:
+ logger.info("All tests have been skipped")
+ else:
+ logger.error("No test has been executed")
+ return
with open(os.path.join(conf_utils.TEMPEST_RESULTS_DIR,
"tempest.log"), 'r') as logfile:
@@ -219,12 +233,12 @@ class TempestCommon(testcase.OSGCTestCase):
try:
if not os.path.exists(conf_utils.TEMPEST_RESULTS_DIR):
os.makedirs(conf_utils.TEMPEST_RESULTS_DIR)
- image_and_flavor = conf_utils.create_tempest_resources()
+ resources = self.resources.create()
conf_utils.configure_tempest(
self.DEPLOYMENT_DIR,
- IMAGE_ID=image_and_flavor.get("image_id"),
- FLAVOR_ID=image_and_flavor.get("flavor_id"),
- MODE=self.MODE)
+ image_id=resources.get("image_id"),
+ flavor_id=resources.get("flavor_id"),
+ mode=self.MODE)
self.generate_test_list(self.VERIFIER_REPO_DIR)
self.apply_tempest_blacklist()
self.run_verifier_tests()
@@ -233,10 +247,49 @@ class TempestCommon(testcase.OSGCTestCase):
except Exception as e:
logger.error('Error with run: %s' % e)
res = testcase.TestCase.EX_RUN_ERROR
+ finally:
+ self.resources.cleanup()
self.stop_time = time.time()
return res
+ def create_snapshot(self):
+ """
+ Run the Tempest cleanup utility to initialize OS state.
+
+ :return: TestCase.EX_OK
+ """
+ logger.info("Initializing the saved state of the OpenStack deployment")
+
+ if not os.path.exists(conf_utils.TEMPEST_RESULTS_DIR):
+ os.makedirs(conf_utils.TEMPEST_RESULTS_DIR)
+
+ # Make sure that the verifier is configured
+ conf_utils.configure_verifier(self.DEPLOYMENT_DIR)
+
+ os_utils.init_tempest_cleanup(
+ self.DEPLOYMENT_DIR, 'tempest.conf',
+ os.path.join(conf_utils.TEMPEST_RESULTS_DIR,
+ "tempest-cleanup-init.log")
+ )
+
+ return super(TempestCommon, self).create_snapshot()
+
+ def clean(self):
+ """
+ Run the Tempest cleanup utility to delete and destroy OS resources
+ created by Tempest.
+ """
+ logger.info("Destroying the resources created for refstack")
+
+ os_utils.perform_tempest_cleanup(
+ self.DEPLOYMENT_DIR, 'tempest.conf',
+ os.path.join(conf_utils.TEMPEST_RESULTS_DIR,
+ "tempest-cleanup.log")
+ )
+
+ return super(TempestCommon, self).clean()
+
class TempestSmokeSerial(TempestCommon):
@@ -285,3 +338,170 @@ class TempestDefcore(TempestCommon):
TempestCommon.__init__(self, **kwargs)
self.MODE = "defcore"
self.OPTION = "--concurrency 1"
+
+
+class TempestResourcesManager(object):
+
+ def __init__(self, **kwargs):
+ self.os_creds = None
+ if 'os_creds' in kwargs:
+ self.os_creds = kwargs['os_creds']
+ else:
+ self.os_creds = openstack_tests.get_credentials(
+ os_env_file=CONST.__getattribute__('openstack_creds'))
+
+ self.creators = list()
+
+ if hasattr(CONST, 'snaps_images_cirros'):
+ self.cirros_image_config = CONST.__getattribute__(
+ 'snaps_images_cirros')
+ else:
+ self.cirros_image_config = None
+
+ def create(self, use_custom_images=False, use_custom_flavors=False,
+ create_project=False):
+ if create_project:
+ logger.debug("Creating project (tenant) for Tempest suite")
+ project_name = CONST.__getattribute__(
+ 'tempest_identity_tenant_name')
+ project_creator = deploy_utils.create_project(
+ self.os_creds, ProjectSettings(
+ name=project_name,
+ description=CONST.__getattribute__(
+ 'tempest_identity_tenant_description')))
+ if (project_creator is None or
+ project_creator.get_project() is None):
+ raise Exception("Failed to create tenant")
+ project_id = project_creator.get_project().id
+ self.creators.append(project_creator)
+
+ logger.debug("Creating user for Tempest suite")
+ user_creator = deploy_utils.create_user(
+ self.os_creds, UserSettings(
+ name=CONST.__getattribute__('tempest_identity_user_name'),
+ password=CONST.__getattribute__(
+ 'tempest_identity_user_password'),
+ project_name=project_name))
+ if user_creator is None or user_creator.get_user() is None:
+ raise Exception("Failed to create user")
+ user_id = user_creator.get_user().id
+ self.creators.append(user_creator)
+ else:
+ project_name = None
+ project_id = None
+ user_id = None
+
+ logger.debug("Creating private network for Tempest suite")
+ network_creator = deploy_utils.create_network(
+ self.os_creds, NetworkSettings(
+ name=CONST.__getattribute__('tempest_private_net_name'),
+ project_name=project_name,
+ subnet_settings=[SubnetSettings(
+ name=CONST.__getattribute__('tempest_private_subnet_name'),
+ cidr=CONST.__getattribute__('tempest_private_subnet_cidr'))
+ ]))
+ if network_creator is None or network_creator.get_network() is None:
+ raise Exception("Failed to create private network")
+ self.creators.append(network_creator)
+
+ image_id = None
+ image_id_alt = None
+ flavor_id = None
+ flavor_id_alt = None
+
+ if (CONST.__getattribute__('tempest_use_custom_images') or
+ use_custom_images):
+ logger.debug("Creating image for Tempest suite")
+ image_base_name = CONST.__getattribute__('openstack_image_name')
+ os_image_settings = openstack_tests.cirros_image_settings(
+ image_base_name, public=True,
+ image_metadata=self.cirros_image_config)
+ logger.debug("Creating image for Tempest suite")
+ image_creator = deploy_utils.create_image(
+ self.os_creds, os_image_settings)
+ if image_creator is None:
+ raise Exception('Failed to create image')
+ self.creators.append(image_creator)
+ image_id = image_creator.get_image().id
+
+ if use_custom_images:
+ logger.debug("Creating 2nd image for Tempest suite")
+ image_base_name_alt = CONST.__getattribute__(
+ 'openstack_image_name_alt')
+ os_image_settings_alt = openstack_tests.cirros_image_settings(
+ image_base_name_alt, public=True,
+ image_metadata=self.cirros_image_config)
+ logger.debug("Creating 2nd image for Tempest suite")
+ image_creator_alt = deploy_utils.create_image(
+ self.os_creds, os_image_settings_alt)
+ if image_creator_alt is None:
+ raise Exception('Failed to create image')
+ self.creators.append(image_creator_alt)
+ image_id_alt = image_creator_alt.get_image().id
+
+ if (CONST.__getattribute__('tempest_use_custom_flavors') or
+ use_custom_flavors):
+ logger.info("Creating flavor for Tempest suite")
+ scenario = ft_utils.get_scenario()
+ flavor_metadata = None
+ if 'ovs' in scenario or 'fdio' in scenario:
+ flavor_metadata = create_flavor.MEM_PAGE_SIZE_LARGE
+ flavor_creator = OpenStackFlavor(
+ self.os_creds, FlavorSettings(
+ name=CONST.__getattribute__('openstack_flavor_name'),
+ ram=CONST.__getattribute__('openstack_flavor_ram'),
+ disk=CONST.__getattribute__('openstack_flavor_disk'),
+ vcpus=CONST.__getattribute__('openstack_flavor_vcpus'),
+ metadata=flavor_metadata))
+ flavor = flavor_creator.create()
+ if flavor is None:
+ raise Exception('Failed to create flavor')
+ self.creators.append(flavor_creator)
+ flavor_id = flavor.id
+
+ if use_custom_flavors:
+ logger.info("Creating 2nd flavor for Tempest suite")
+ scenario = ft_utils.get_scenario()
+ flavor_metadata_alt = None
+ if 'ovs' in scenario or 'fdio' in scenario:
+ flavor_metadata_alt = create_flavor.MEM_PAGE_SIZE_LARGE
+ flavor_creator_alt = OpenStackFlavor(
+ self.os_creds, FlavorSettings(
+ name=CONST.__getattribute__('openstack_flavor_name_alt'),
+ ram=CONST.__getattribute__('openstack_flavor_ram'),
+ disk=CONST.__getattribute__('openstack_flavor_disk'),
+ vcpus=CONST.__getattribute__('openstack_flavor_vcpus'),
+ metadata=flavor_metadata_alt))
+ flavor_alt = flavor_creator_alt.create()
+ if flavor_alt is None:
+ raise Exception('Failed to create flavor')
+ self.creators.append(flavor_creator_alt)
+ flavor_id_alt = flavor_alt.id
+
+ print("RESOURCES CREATE: image_id: %s, image_id_alt: %s, "
+ "flavor_id: %s, flavor_id_alt: %s" % (
+ image_id, image_id_alt, flavor_id, flavor_id_alt,))
+
+ result = {
+ 'image_id': image_id,
+ 'image_id_alt': image_id_alt,
+ 'flavor_id': flavor_id,
+ 'flavor_id_alt': flavor_id_alt
+ }
+
+ if create_project:
+ result['project_id'] = project_id
+ result['tenant_id'] = project_id # for compatibility
+ result['user_id'] = user_id
+
+ return result
+
+ def cleanup(self):
+ """
+ Cleanup all OpenStack objects. Should be called on completion.
+ """
+ for creator in reversed(self.creators):
+ try:
+ creator.clean()
+ except Exception as e:
+ logger.error('Unexpected error cleaning - %s', e)
diff --git a/functest/opnfv_tests/openstack/vping/vping_base.py b/functest/opnfv_tests/openstack/vping/vping_base.py
index 967472fac..40fcb07f0 100644
--- a/functest/opnfv_tests/openstack/vping/vping_base.py
+++ b/functest/opnfv_tests/openstack/vping/vping_base.py
@@ -108,14 +108,33 @@ class VPingBase(testcase.TestCase):
'vping_private_subnet_name') + self.guid
private_subnet_cidr = CONST.__getattribute__(
'vping_private_subnet_cidr')
+
+ vping_network_type = None
+ vping_physical_network = None
+ vping_segmentation_id = None
+
+ if (hasattr(CONST, 'network_type')):
+ vping_network_type = CONST.__getattribute__(
+ 'vping_network_type')
+ if (hasattr(CONST, 'physical_network')):
+ vping_physical_network = CONST.__getattribute__(
+ 'vping_physical_network')
+ if (hasattr(CONST, 'segmentation_id')):
+ vping_segmentation_id = CONST.__getattribute__(
+ 'vping_segmentation_id')
+
self.logger.info(
"Creating network with name: '%s'" % private_net_name)
self.network_creator = deploy_utils.create_network(
self.os_creds,
- NetworkSettings(name=private_net_name,
- subnet_settings=[SubnetSettings(
- name=private_subnet_name,
- cidr=private_subnet_cidr)]))
+ NetworkSettings(
+ name=private_net_name,
+ network_type=vping_network_type,
+ physical_network=vping_physical_network,
+ segmentation_id=vping_segmentation_id,
+ subnet_settings=[SubnetSettings(
+ name=private_subnet_name,
+ cidr=private_subnet_cidr)]))
self.creators.append(self.network_creator)
self.logger.info(
diff --git a/functest/opnfv_tests/vnf/aaa/aaa.py b/functest/opnfv_tests/vnf/aaa/aaa.py
deleted file mode 100644
index 71e3c972a..000000000
--- a/functest/opnfv_tests/vnf/aaa/aaa.py
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (c) 2016 Orange and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-
-import logging
-
-import functest.core.vnf as vnf
-
-
-class AaaVnf(vnf.VnfOnBoarding):
- """AAA VNF sample"""
-
- logger = logging.getLogger(__name__)
-
- def __init__(self, **kwargs):
- if "case_name" not in kwargs:
- kwargs["case_name"] = "aaa"
- super(AaaVnf, self).__init__(**kwargs)
-
- def deploy_orchestrator(self):
- self.logger.info("No VNFM needed to deploy a free radius here")
- return True
-
- def deploy_vnf(self):
- self.logger.info("Freeradius VNF deployment")
- # find a way to deploy freeradius and tester (heat,manual, ..)
- deploy_vnf = {'status': 'PASS', 'version': 'xxxx'}
- self.details['deploy_vnf'] = deploy_vnf
- return True
-
- def test_vnf(self):
- self.logger.info("Run test towards freeradius")
- # once the freeradius is deployed..make some tests
- test_vnf = {'status': 'PASS', 'version': 'xxxx'}
- self.details['test_vnf'] = test_vnf
- return True
diff --git a/functest/opnfv_tests/vnf/ims/clearwater_ims_base.py b/functest/opnfv_tests/vnf/ims/clearwater_ims_base.py
index 1c3f69c6d..8851f7a48 100644
--- a/functest/opnfv_tests/vnf/ims/clearwater_ims_base.py
+++ b/functest/opnfv_tests/vnf/ims/clearwater_ims_base.py
@@ -10,7 +10,9 @@ import json
import logging
import os
import pkg_resources
+import shlex
import shutil
+import subprocess
import time
import requests
@@ -109,18 +111,17 @@ class ClearwaterOnBoardingBase(vnf.VnfOnBoarding):
bono_ip=None, ellis_ip=None,
signup_code='secret'):
self.logger.info('Run Clearwater live test')
- nameservers = ft_utils.get_resolvconf_ns()
- resolvconf = ['{0}{1}{2}'.format(os.linesep, 'nameserver ', ns)
- for ns in nameservers]
- self.logger.debug('resolvconf: %s', resolvconf)
dns_file = '/etc/resolv.conf'
dns_file_bak = '/etc/resolv.conf.bak'
+ self.logger.debug('Backup %s -> %s', dns_file, dns_file_bak)
shutil.copy(dns_file, dns_file_bak)
- script = ('echo -e "nameserver {0}{1}" > {2};'
- 'cd {3};'
- 'rake test[{4}] SIGNUP_CODE={5}'
- .format(dns_ip,
- ''.join(resolvconf),
+ cmd = ("dnsmasq -d -u root --server=/clearwater.opnfv/{0} "
+ "-r /etc/resolv.conf.bak".format(dns_ip))
+ dnsmasq_process = subprocess.Popen(shlex.split(cmd))
+ script = ('echo -e "nameserver {0}" > {1};'
+ 'cd {2};'
+ 'rake test[{3}] SIGNUP_CODE={4}'
+ .format('127.0.0.1',
dns_file,
self.test_dir,
public_domain,
@@ -135,7 +136,7 @@ class ClearwaterOnBoardingBase(vnf.VnfOnBoarding):
ft_utils.execute_command(cmd,
error_msg='Clearwater live test failed',
output_file=output_file)
-
+ dnsmasq_process.kill()
with open(dns_file_bak, 'r') as bak_file:
result = bak_file.read()
with open(dns_file, 'w') as f:
diff --git a/functest/opnfv_tests/vnf/ims/cloudify_ims.py b/functest/opnfv_tests/vnf/ims/cloudify_ims.py
index 8f6fcec89..b07eaee23 100644
--- a/functest/opnfv_tests/vnf/ims/cloudify_ims.py
+++ b/functest/opnfv_tests/vnf/ims/cloudify_ims.py
@@ -110,15 +110,15 @@ class CloudifyIms(clearwater_ims_base.ClearwaterOnBoardingBase):
# needs some images
self.__logger.info("Upload some OS images if it doesn't exist")
- for image_name, image_url in self.images.iteritems():
- self.__logger.info("image: %s, url: %s", image_name, image_url)
- if image_url and image_name:
+ for image_name, image_file in self.images.iteritems():
+ self.__logger.info("image: %s, file: %s", image_name, image_file)
+ if image_file and image_name:
image_creator = OpenStackImage(
self.snaps_creds,
ImageSettings(name=image_name,
image_user='cloud',
img_format='qcow2',
- url=image_url))
+ image_file=image_file))
image_creator.create()
# self.created_object.append(image_creator)
diff --git a/functest/opnfv_tests/vnf/ims/cloudify_ims.yaml b/functest/opnfv_tests/vnf/ims/cloudify_ims.yaml
index 743c6ddd8..280e0a6b8 100644
--- a/functest/opnfv_tests/vnf/ims/cloudify_ims.yaml
+++ b/functest/opnfv_tests/vnf/ims/cloudify_ims.yaml
@@ -1,6 +1,6 @@
tenant_images:
- ubuntu_14.04: http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img
- cloudify_manager_4.0: http://repository.cloudifysource.org/cloudify/4.0.1/sp-release/cloudify-manager-premium-4.0.1.qcow2
+ ubuntu_14.04: /home/opnfv/functest/images/trusty-server-cloudimg-amd64-disk1.img
+ cloudify_manager_4.0: /home/opnfv/functest/images/cloudify-manager-premium-4.0.1.qcow2
orchestrator:
name: cloudify
version: '4.0'
diff --git a/functest/opnfv_tests/vnf/ims/orchestra.yaml b/functest/opnfv_tests/vnf/ims/orchestra.yaml
index 7b43c0012..4cd18e72b 100644
--- a/functest/opnfv_tests/vnf/ims/orchestra.yaml
+++ b/functest/opnfv_tests/vnf/ims/orchestra.yaml
@@ -1,10 +1,10 @@
tenant_images:
orchestrator:
- ubuntu-14.04-server-cloudimg-amd64-disk1: http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img
+ ubuntu-14.04-server-cloudimg-amd64-disk1: /home/opnfv/functest/images/trusty-server-cloudimg-amd64-disk1.img
orchestra_openims:
- openims: http://marketplace.openbaton.org:8082/api/v1/images/52e2ccc0-1dce-4663-894d-28aab49323aa/img
+ openims: /home/opnfv/functest/images/img
orchestra_clearwaterims:
- ubuntu-14.04-server-cloudimg-amd64-disk1: http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img
+ ubuntu-14.04-server-cloudimg-amd64-disk1: /home/opnfv/functest/images/trusty-server-cloudimg-amd64-disk1.img
mano:
name: OpenBaton
version: '3.2.0'
@@ -58,4 +58,4 @@ orchestra_clearwaterims:
ram_min: 2048
disk: 5
vcpus: 2
- test: \ No newline at end of file
+ test:
diff --git a/functest/opnfv_tests/vnf/ims/orchestra_clearwaterims.py b/functest/opnfv_tests/vnf/ims/orchestra_clearwaterims.py
index 9e14711c1..a54059966 100644
--- a/functest/opnfv_tests/vnf/ims/orchestra_clearwaterims.py
+++ b/functest/opnfv_tests/vnf/ims/orchestra_clearwaterims.py
@@ -195,9 +195,7 @@ class ClearwaterImsVnf(vnf.VnfOnBoarding):
if not os.path.exists(self.data_dir):
os.makedirs(self.data_dir)
- self.images = get_config(
- "tenant_images.%s" %
- self.case_name, config_file)
+ self.images = get_config("tenant_images.orchestrator", config_file)
self.images.update(
get_config(
"tenant_images.%s" %
@@ -228,15 +226,15 @@ class ClearwaterImsVnf(vnf.VnfOnBoarding):
def prepare_images(self):
"""Upload images if they doen't exist yet"""
self.logger.info("Upload images if they doen't exist yet")
- for image_name, image_url in self.images.iteritems():
- self.logger.info("image: %s, url: %s", image_name, image_url)
- if image_url and image_name:
+ for image_name, image_file in self.images.iteritems():
+ self.logger.info("image: %s, file: %s", image_name, image_file)
+ if image_file and image_name:
image = OpenStackImage(
self.snaps_creds,
ImageSettings(name=image_name,
image_user='cloud',
img_format='qcow2',
- url=image_url))
+ image_file=image_file))
image.create()
# self.created_resources.append(image);
diff --git a/functest/opnfv_tests/vnf/ims/orchestra_openims.py b/functest/opnfv_tests/vnf/ims/orchestra_openims.py
index f9a81f225..f8acada44 100644
--- a/functest/opnfv_tests/vnf/ims/orchestra_openims.py
+++ b/functest/opnfv_tests/vnf/ims/orchestra_openims.py
@@ -195,8 +195,7 @@ class OpenImsVnf(vnf.VnfOnBoarding):
if not os.path.exists(self.data_dir):
os.makedirs(self.data_dir)
- self.images = get_config("tenant_images.%s" %
- self.case_name, config_file)
+ self.images = get_config("tenant_images.orchestrator", config_file)
self.images.update(get_config("tenant_images.%s" %
self.case_name, config_file))
self.snaps_creds = None
@@ -224,15 +223,15 @@ class OpenImsVnf(vnf.VnfOnBoarding):
def prepare_images(self):
"""Upload images if they doen't exist yet"""
self.logger.info("Upload images if they doen't exist yet")
- for image_name, image_url in self.images.iteritems():
- self.logger.info("image: %s, url: %s", image_name, image_url)
- if image_url and image_name:
+ for image_name, image_file in self.images.iteritems():
+ self.logger.info("image: %s, file: %s", image_name, image_file)
+ if image_file and image_name:
image = OpenStackImage(
self.snaps_creds,
ImageSettings(name=image_name,
image_user='cloud',
img_format='qcow2',
- url=image_url))
+ image_file=image_file))
image.create()
# self.created_resources.append(image);
diff --git a/functest/tests/unit/openstack/refstack_client/test_refstack_client.py b/functest/tests/unit/openstack/refstack_client/test_refstack_client.py
index c5601075e..ca0974832 100644
--- a/functest/tests/unit/openstack/refstack_client/test_refstack_client.py
+++ b/functest/tests/unit/openstack/refstack_client/test_refstack_client.py
@@ -12,9 +12,12 @@ import pkg_resources
import unittest
from functest.core import testcase
-from functest.opnfv_tests.openstack.refstack_client import refstack_client
+from functest.opnfv_tests.openstack.refstack_client.refstack_client import \
+ RefstackClient, RefstackClientParser
from functest.utils.constants import CONST
+from snaps.openstack.os_credentials import OSCreds
+
class OSRefstackClientTesting(unittest.TestCase):
@@ -25,34 +28,44 @@ class OSRefstackClientTesting(unittest.TestCase):
'functest', 'opnfv_tests/openstack/refstack_client/defcore.txt')
def setUp(self):
- self.defaultargs = {'config': self._config,
- 'testlist': self._testlist}
+ self.default_args = {'config': self._config,
+ 'testlist': self._testlist}
CONST.__setattr__('OS_AUTH_URL', 'https://ip:5000/v3')
CONST.__setattr__('OS_INSECURE', 'true')
- self.refstackclient = refstack_client.RefstackClient()
+ self.os_creds = OSCreds(
+ username='user', password='pass',
+ auth_url='http://foo.com:5000/v3', project_name='bar')
+
+ @mock.patch('functest.opnfv_tests.openstack.refstack_client.tempest_conf.'
+ 'TempestConf', return_value=mock.Mock())
+ def _create_client(self, mock_conf):
+ with mock.patch('snaps.openstack.tests.openstack_tests.'
+ 'get_credentials', return_value=self.os_creds):
+ return RefstackClient()
def test_run_defcore_insecure(self):
insecure = '-k'
config = 'tempest.conf'
testlist = 'testlist'
+ client = self._create_client()
with mock.patch('functest.opnfv_tests.openstack.refstack_client.'
'refstack_client.ft_utils.execute_command') as m:
cmd = ("refstack-client test {0} -c {1} -v --test-list {2}"
.format(insecure, config, testlist))
- self.refstackclient.run_defcore(config, testlist)
+ client.run_defcore(config, testlist)
m.assert_any_call(cmd)
def test_run_defcore(self):
CONST.__setattr__('OS_AUTH_URL', 'http://ip:5000/v3')
- refstackclient = refstack_client.RefstackClient()
insecure = ''
config = 'tempest.conf'
testlist = 'testlist'
+ client = self._create_client()
with mock.patch('functest.opnfv_tests.openstack.refstack_client.'
'refstack_client.ft_utils.execute_command') as m:
cmd = ("refstack-client test {0} -c {1} -v --test-list {2}"
.format(insecure, config, testlist))
- refstackclient.run_defcore(config, testlist)
+ client.run_defcore(config, testlist)
m.assert_any_call(cmd)
@mock.patch('functest.opnfv_tests.openstack.refstack_client.'
@@ -62,7 +75,7 @@ class OSRefstackClientTesting(unittest.TestCase):
mock_logger_info):
self.case_name = 'refstack_defcore'
self.result = 0
- self.refstackclient.parse_refstack_result()
+ self._create_client().parse_refstack_result()
mock_logger_info.assert_called_once_with(
"Testcase %s success_rate is %s%%",
self.case_name, self.result)
@@ -82,10 +95,11 @@ class OSRefstackClientTesting(unittest.TestCase):
"success": ['tempest.api.compute [18.464988s]'],
"errors": ['tempest.api.volume [0.230334s]'],
"skipped": ['tempest.api.network [1.265828s]']}
+ client = self._create_client()
with mock.patch('__builtin__.open',
mock.mock_open(read_data=log_file)):
- self.refstackclient.parse_refstack_result()
- self.assertEqual(self.refstackclient.details, self.details)
+ client.parse_refstack_result()
+ self.assertEqual(client.details, self.details)
def _get_main_kwargs(self, key=None):
kwargs = {'config': self._config,
@@ -96,16 +110,18 @@ class OSRefstackClientTesting(unittest.TestCase):
def _test_main(self, status, *args):
kwargs = self._get_main_kwargs()
- self.assertEqual(self.refstackclient.main(**kwargs), status)
+ client = self._create_client()
+ self.assertEqual(client.main(**kwargs), status)
if len(args) > 0:
args[0].assert_called_once_with(
- refstack_client.RefstackClient.result_dir)
+ RefstackClient.result_dir)
if len(args) > 1:
args
def _test_main_missing_keyword(self, key):
kwargs = self._get_main_kwargs(key)
- self.assertEqual(self.refstackclient.main(**kwargs),
+ client = self._create_client()
+ self.assertEqual(client.main(**kwargs),
testcase.TestCase.EX_RUN_ERROR)
def test_main_missing_conf(self):
@@ -115,10 +131,10 @@ class OSRefstackClientTesting(unittest.TestCase):
self._test_main_missing_keyword('testlist')
def _test_argparser(self, arg, value):
- self.defaultargs[arg] = value
- parser = refstack_client.RefstackClientParser()
+ self.default_args[arg] = value
+ parser = RefstackClientParser()
self.assertEqual(parser.parse_args(["--{}={}".format(arg, value)]),
- self.defaultargs)
+ self.default_args)
def test_argparser_conf(self):
self._test_argparser('config', self._config)
@@ -127,13 +143,13 @@ class OSRefstackClientTesting(unittest.TestCase):
self._test_argparser('testlist', self._testlist)
def test_argparser_multiple_args(self):
- self.defaultargs['config'] = self._config
- self.defaultargs['testlist'] = self._testlist
- parser = refstack_client.RefstackClientParser()
+ self.default_args['config'] = self._config
+ self.default_args['testlist'] = self._testlist
+ parser = RefstackClientParser()
self.assertEqual(parser.parse_args(
["--config={}".format(self._config),
"--testlist={}".format(self._testlist)
- ]), self.defaultargs)
+ ]), self.default_args)
if __name__ == "__main__":
diff --git a/functest/tests/unit/openstack/tempest/test_conf_utils.py b/functest/tests/unit/openstack/tempest/test_conf_utils.py
index bbfcc57d5..77558086b 100644
--- a/functest/tests/unit/openstack/tempest/test_conf_utils.py
+++ b/functest/tests/unit/openstack/tempest/test_conf_utils.py
@@ -10,71 +10,83 @@ import unittest
import mock
-from functest.opnfv_tests.openstack.tempest import conf_utils
+from functest.opnfv_tests.openstack.tempest import tempest, conf_utils
from functest.utils.constants import CONST
+from snaps.openstack.os_credentials import OSCreds
class OSTempestConfUtilsTesting(unittest.TestCase):
- def test_create_tempest_resources_missing_network_dic(self):
- with mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
- 'os_utils.get_keystone_client',
- return_value=mock.Mock()), \
- mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
- 'os_utils.create_shared_network_full',
- return_value=None), \
- self.assertRaises(Exception) as context:
- conf_utils.create_tempest_resources()
- msg = 'Failed to create private network'
- self.assertTrue(msg in context)
-
- def test_create_tempest_resources_missing_image(self):
- with mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
- 'os_utils.get_keystone_client',
- return_value=mock.Mock()), \
- mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
- 'os_utils.create_shared_network_full',
- return_value=mock.Mock()), \
- mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
- 'os_utils.get_or_create_image',
- return_value=(mock.Mock(), None)), \
- self.assertRaises(Exception) as context:
-
- CONST.__setattr__('tempest_use_custom_images', True)
- conf_utils.create_tempest_resources()
- msg = 'Failed to create image'
- self.assertTrue(msg in context)
-
- CONST.__setattr__('tempest_use_custom_images', False)
- conf_utils.create_tempest_resources(use_custom_images=True)
- msg = 'Failed to create image'
- self.assertTrue(msg in context)
-
- def test_create_tempest_resources_missing_flavor(self):
- with mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
- 'os_utils.get_keystone_client',
- return_value=mock.Mock()), \
- mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
- 'os_utils.create_shared_network_full',
- return_value=mock.Mock()), \
- mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
- 'os_utils.get_or_create_image',
- return_value=(mock.Mock(), 'image_id')), \
- mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
- 'os_utils.get_or_create_flavor',
- return_value=(mock.Mock(), None)), \
- self.assertRaises(Exception) as context:
- CONST.__setattr__('tempest_use_custom_images', True)
- CONST.__setattr__('tempest_use_custom_flavors', True)
- conf_utils.create_tempest_resources()
- msg = 'Failed to create flavor'
- self.assertTrue(msg in context)
-
- CONST.__setattr__('tempest_use_custom_images', True)
- CONST.__setattr__('tempest_use_custom_flavors', False)
- conf_utils.create_tempest_resources(use_custom_flavors=False)
- msg = 'Failed to create flavor'
- self.assertTrue(msg in context)
+ def setUp(self):
+ self.os_creds = OSCreds(
+ username='user', password='pass',
+ auth_url='http://foo.com:5000/v3', project_name='bar')
+
+ @mock.patch('snaps.openstack.utils.deploy_utils.create_project',
+ return_value=mock.Mock())
+ @mock.patch('snaps.openstack.utils.deploy_utils.create_user',
+ return_value=mock.Mock())
+ @mock.patch('snaps.openstack.utils.deploy_utils.create_network',
+ return_value=None)
+ @mock.patch('snaps.openstack.utils.deploy_utils.create_image',
+ return_value=mock.Mock())
+ def test_create_tempest_resources_missing_network_dic(self, *mock_args):
+ tempest_resources = tempest.TempestResourcesManager(os_creds={})
+ with self.assertRaises(Exception) as context:
+ tempest_resources.create()
+ msg = 'Failed to create private network'
+ self.assertTrue(msg in context.exception)
+
+ @mock.patch('snaps.openstack.utils.deploy_utils.create_project',
+ return_value=mock.Mock())
+ @mock.patch('snaps.openstack.utils.deploy_utils.create_user',
+ return_value=mock.Mock())
+ @mock.patch('snaps.openstack.utils.deploy_utils.create_network',
+ return_value=mock.Mock())
+ @mock.patch('snaps.openstack.utils.deploy_utils.create_image',
+ return_value=None)
+ def test_create_tempest_resources_missing_image(self, *mock_args):
+ tempest_resources = tempest.TempestResourcesManager(os_creds={})
+
+ CONST.__setattr__('tempest_use_custom_imagess', True)
+ with self.assertRaises(Exception) as context:
+ tempest_resources.create()
+ msg = 'Failed to create image'
+ self.assertTrue(msg in context.exception, msg=str(context.exception))
+
+ CONST.__setattr__('tempest_use_custom_imagess', False)
+ with self.assertRaises(Exception) as context:
+ tempest_resources.create(use_custom_images=True)
+ msg = 'Failed to create image'
+ self.assertTrue(msg in context.exception, msg=str(context.exception))
+
+ @mock.patch('snaps.openstack.utils.deploy_utils.create_project',
+ return_value=mock.Mock())
+ @mock.patch('snaps.openstack.utils.deploy_utils.create_user',
+ return_value=mock.Mock())
+ @mock.patch('snaps.openstack.utils.deploy_utils.create_network',
+ return_value=mock.Mock())
+ @mock.patch('snaps.openstack.utils.deploy_utils.create_image',
+ return_value=mock.Mock())
+ @mock.patch('snaps.openstack.create_flavor.OpenStackFlavor.create',
+ return_value=None)
+ def test_create_tempest_resources_missing_flavor(self, *mock_args):
+ tempest_resources = tempest.TempestResourcesManager(
+ os_creds=self.os_creds)
+
+ CONST.__setattr__('tempest_use_custom_images', True)
+ CONST.__setattr__('tempest_use_custom_flavors', True)
+ with self.assertRaises(Exception) as context:
+ tempest_resources.create()
+ msg = 'Failed to create flavor'
+ self.assertTrue(msg in context.exception, msg=str(context.exception))
+
+ CONST.__setattr__('tempest_use_custom_images', True)
+ CONST.__setattr__('tempest_use_custom_flavors', False)
+ with self.assertRaises(Exception) as context:
+ tempest_resources.create(use_custom_flavors=True)
+ msg = 'Failed to create flavor'
+ self.assertTrue(msg in context.exception, msg=str(context.exception))
def test_get_verifier_id_missing_verifier(self):
CONST.__setattr__('tempest_deployment_name', 'test_deploy_name')
@@ -158,23 +170,9 @@ class OSTempestConfUtilsTesting(unittest.TestCase):
def test_backup_tempest_config_default(self):
with mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.os.path.exists',
- return_value=False), \
- mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.os.makedirs') as m1, \
- mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.shutil.copyfile') as m2:
+ 'conf_utils.shutil.copyfile') as m1:
conf_utils.backup_tempest_config('test_conf_file')
self.assertTrue(m1.called)
- self.assertTrue(m2.called)
-
- with mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.os.path.exists',
- return_value=True), \
- mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.shutil.copyfile') as m2:
- conf_utils.backup_tempest_config('test_conf_file')
- self.assertTrue(m2.called)
def test_configure_tempest_default(self):
with mock.patch('functest.opnfv_tests.openstack.tempest.'
@@ -186,10 +184,6 @@ class OSTempestConfUtilsTesting(unittest.TestCase):
self.assertTrue(m1.called)
def test_configure_tempest_defcore_default(self):
- img_flavor_dict = {'image_id': 'test_image_id',
- 'flavor_id': 'test_flavor_id',
- 'image_id_alt': 'test_image_alt_id',
- 'flavor_id_alt': 'test_flavor_alt_id'}
with mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.configure_verifier',
return_value='test_conf_file'), \
@@ -206,9 +200,12 @@ class OSTempestConfUtilsTesting(unittest.TestCase):
'write') as mwrite, \
mock.patch('__builtin__.open', mock.mock_open()), \
mock.patch('functest.opnfv_tests.openstack.tempest.'
+ 'conf_utils.generate_test_accounts_file'), \
+ mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.shutil.copyfile'):
- conf_utils.configure_tempest_defcore('test_dep_dir',
- img_flavor_dict)
+ conf_utils.configure_tempest_defcore(
+ 'test_dep_dir', 'test_image_id', 'test_flavor_id',
+ 'test_image_alt_id', 'test_flavor_alt_id', 'test_tenant_id')
mset.assert_any_call('compute', 'image_ref', 'test_image_id')
mset.assert_any_call('compute', 'image_ref_alt',
'test_image_alt_id')
@@ -218,6 +215,13 @@ class OSTempestConfUtilsTesting(unittest.TestCase):
self.assertTrue(mread.called)
self.assertTrue(mwrite.called)
+ def test_generate_test_accounts_file_default(self):
+ with mock.patch("__builtin__.open", mock.mock_open()), \
+ mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
+ 'yaml.dump') as mock_dump:
+ conf_utils.generate_test_accounts_file('test_tenant_id')
+ self.assertTrue(mock_dump.called)
+
def _test_missing_param(self, params, image_id, flavor_id):
with mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.ConfigParser.RawConfigParser.'
@@ -236,8 +240,8 @@ class OSTempestConfUtilsTesting(unittest.TestCase):
CONST.__setattr__('OS_ENDPOINT_TYPE', None)
conf_utils.\
configure_tempest_update_params('test_conf_file',
- IMAGE_ID=image_id,
- FLAVOR_ID=flavor_id)
+ image_id=image_id,
+ flavor_id=flavor_id)
mset.assert_any_call(params[0], params[1], params[2])
self.assertTrue(mread.called)
self.assertTrue(mwrite.called)
diff --git a/functest/tests/unit/openstack/tempest/test_tempest.py b/functest/tests/unit/openstack/tempest/test_tempest.py
index d5016f71b..54d7d49bb 100644
--- a/functest/tests/unit/openstack/tempest/test_tempest.py
+++ b/functest/tests/unit/openstack/tempest/test_tempest.py
@@ -15,10 +15,16 @@ from functest.opnfv_tests.openstack.tempest import tempest
from functest.opnfv_tests.openstack.tempest import conf_utils
from functest.utils.constants import CONST
+from snaps.openstack.os_credentials import OSCreds
+
class OSTempestTesting(unittest.TestCase):
def setUp(self):
+ os_creds = OSCreds(
+ username='user', password='pass',
+ auth_url='http://foo.com:5000/v3', project_name='bar')
+
with mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
'conf_utils.get_verifier_id',
return_value='test_deploy_id'), \
@@ -30,7 +36,9 @@ class OSTempestTesting(unittest.TestCase):
return_value='test_verifier_repo_dir'), \
mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
'conf_utils.get_verifier_deployment_dir',
- return_value='test_verifier_deploy_dir'):
+ return_value='test_verifier_deploy_dir'), \
+ mock.patch('snaps.openstack.tests.openstack_tests.get_credentials',
+ return_value=os_creds):
self.tempestcommon = tempest.TempestCommon()
self.tempestsmoke_serial = tempest.TempestSmokeSerial()
self.tempestsmoke_parallel = tempest.TempestSmokeParallel()
@@ -153,8 +161,8 @@ class OSTempestTesting(unittest.TestCase):
'os.path.exists', return_value=False)
@mock.patch('functest.opnfv_tests.openstack.tempest.tempest.os.makedirs')
@mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
- 'conf_utils.create_tempest_resources', side_effect=Exception)
- def test_run_create_tempest_resources_ko(self, *args):
+ 'TempestResourcesManager.create', side_effect=Exception)
+ def test_run_tempest_create_resources_ko(self, *args):
self.assertEqual(self.tempestcommon.run(),
testcase.TestCase.EX_RUN_ERROR)
@@ -162,7 +170,7 @@ class OSTempestTesting(unittest.TestCase):
'os.path.exists', return_value=False)
@mock.patch('functest.opnfv_tests.openstack.tempest.tempest.os.makedirs')
@mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
- 'conf_utils.create_tempest_resources', return_value={})
+ 'TempestResourcesManager.create', return_value={})
@mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
'conf_utils.configure_tempest', side_effect=Exception)
def test_run_configure_tempest_ko(self, *args):
@@ -173,7 +181,7 @@ class OSTempestTesting(unittest.TestCase):
'os.path.exists', return_value=False)
@mock.patch('functest.opnfv_tests.openstack.tempest.tempest.os.makedirs')
@mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
- 'conf_utils.create_tempest_resources', return_value={})
+ 'TempestResourcesManager.create', return_value={})
@mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
'conf_utils.configure_tempest')
def _test_run(self, status, *args):
diff --git a/functest/utils/functest_utils.py b/functest/utils/functest_utils.py
index bf68e43a3..a766ef953 100644
--- a/functest/utils/functest_utils.py
+++ b/functest/utils/functest_utils.py
@@ -267,14 +267,14 @@ def get_ci_envvars():
def execute_command_raise(cmd, info=False, error_msg="",
- verbose=True, output_file=None):
- ret = execute_command(cmd, info, error_msg, verbose, output_file)
+ verbose=True, output_file=None, env=None):
+ ret = execute_command(cmd, info, error_msg, verbose, output_file, env)
if ret != 0:
raise Exception(error_msg)
def execute_command(cmd, info=False, error_msg="",
- verbose=True, output_file=None):
+ verbose=True, output_file=None, env=None):
if not error_msg:
error_msg = ("The command '%s' failed." % cmd)
msg_exec = ("Executing command: '%s'" % cmd)
@@ -283,7 +283,7 @@ def execute_command(cmd, info=False, error_msg="",
logger.info(msg_exec)
else:
logger.debug(msg_exec)
- p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
+ p = subprocess.Popen(cmd, env=env, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
if output_file:
f = open(output_file, "w")
diff --git a/functest/utils/openstack_utils.py b/functest/utils/openstack_utils.py
index 335f14cd1..73d1cde49 100644
--- a/functest/utils/openstack_utils.py
+++ b/functest/utils/openstack_utils.py
@@ -22,8 +22,8 @@ from heatclient import client as heatclient
from novaclient import client as novaclient
from keystoneclient import client as keystoneclient
from neutronclient.neutron import client as neutronclient
-from functest.utils.constants import CONST
+from functest.utils.constants import CONST
import functest.utils.functest_utils as ft_utils
logger = logging.getLogger(__name__)
@@ -713,6 +713,8 @@ def get_private_net(neutron_client):
def get_external_net(neutron_client):
+ if (hasattr(CONST, 'EXTERNAL_NETWORK')):
+ return CONST.__getattribute__('EXTERNAL_NETWORK')
for network in neutron_client.list_networks()['networks']:
if network['router:external']:
return network['name']
@@ -720,6 +722,11 @@ def get_external_net(neutron_client):
def get_external_net_id(neutron_client):
+ if (hasattr(CONST, 'EXTERNAL_NETWORK')):
+ networks = neutron_client.list_networks(
+ name=CONST.__getattribute__('EXTERNAL_NETWORK'))
+ net_id = networks['networks'][0]['id']
+ return net_id
for network in neutron_client.list_networks()['networks']:
if network['router:external']:
return network['id']
@@ -1554,3 +1561,62 @@ def get_resource(heat_client, stack_id, resource):
except Exception as e:
logger.error("Error [get_resource]: %s" % e)
return None
+
+
+# *********************************************
+# TEMPEST
+# *********************************************
+def init_tempest_cleanup(tempest_config_dir=None,
+ tempest_config_filename='tempest.conf',
+ output_file=None):
+ """
+ Initialize the Tempest Cleanup utility.
+ See https://docs.openstack.org/tempest/latest/cleanup.html for docs.
+
+ :param tempest_config_dir: The directory where the Tempest config file is
+ located. If not specified, we let Tempest pick both the directory
+ and the filename (i.e. second parameter is ignored)
+ :param tempest_config_filename: The filename of the Tempest config file
+ :param output_file: Optional file where to save output
+ """
+ # The Tempest cleanup utility currently offers no cmd argument to specify
+ # the config file, therefore it has to be configured with env variables
+ env = None
+ if tempest_config_dir:
+ env = os.environ.copy()
+ env['TEMPEST_CONFIG_DIR'] = tempest_config_dir
+ env['TEMPEST_CONFIG'] = tempest_config_filename
+
+ # If this command fails, an exception must be raised to stop the script
+ # otherwise the later cleanup would destroy also other resources
+ cmd_line = "tempest cleanup --init-saved-state"
+ ft_utils.execute_command_raise(cmd_line, env=env, output_file=output_file,
+ error_msg="Tempest cleanup init failed")
+
+
+def perform_tempest_cleanup(tempest_config_dir=None,
+ tempest_config_filename='tempest.conf',
+ output_file=None):
+ """
+ Perform cleanup using the Tempest Cleanup utility.
+ See https://docs.openstack.org/tempest/latest/cleanup.html for docs.
+
+ :param tempest_config_dir: The directory where the Tempest config file is
+ located. If not specified, we let Tempest pick both the directory
+ and the filename (i.e. second parameter is ignored)
+ :param tempest_config_filename: The filename of the Tempest config file
+ :param output_file: Optional file where to save output
+ """
+ # The Tempest cleanup utility currently offers no cmd argument to specify
+ # the config file, therefore it has to be configured with env variables
+ env = None
+ if tempest_config_dir:
+ env = os.environ.copy()
+ env['TEMPEST_CONFIG_DIR'] = tempest_config_dir
+ env['TEMPEST_CONFIG'] = tempest_config_filename
+
+ # If this command fails, an exception must be raised to stop the script
+ # otherwise the later cleanup would destroy also other resources
+ cmd_line = "tempest cleanup"
+ ft_utils.execute_command(cmd_line, env=env, output_file=output_file,
+ error_msg="Tempest cleanup failed")
diff --git a/tox.ini b/tox.ini
index 94063c293..9fc18b39f 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
[tox]
-envlist = docs,pep8,pylint,py35,py27,perm
+envlist = docs,pep8,pylint,py35,py27,perm,aarch64
[testenv]
usedevelop = True
@@ -60,3 +60,12 @@ commands =
-exec ls -l \{\} + | grep '.' && exit 1 || exit 0"
bash -c "\
find {[testenv:perm]path} -exec file \{\} + | grep CRLF && exit 1 || exit 0"
+
+[testenv:aarch64]
+basepython = python2.7
+whitelist_externals =
+ bash
+ git
+commands =
+ bash -c "patch -f -p1 < docker/Dockerfile.aarch64.patch"
+ git checkout docker/Dockerfile