aboutsummaryrefslogtreecommitdiffstats
path: root/functest
diff options
context:
space:
mode:
Diffstat (limited to 'functest')
-rw-r--r--functest/api/__init__.py (renamed from functest/opnfv_tests/vnf/aaa/__init__.py)0
-rw-r--r--functest/api/base.py66
-rw-r--r--functest/api/common/__init__.py (renamed from functest/tests/unit/vnf/rnc/__init__.py)0
-rw-r--r--functest/api/common/api_utils.py101
-rw-r--r--functest/api/common/thread.py52
-rw-r--r--functest/api/database/__init__.py0
-rw-r--r--functest/api/database/db.py26
-rw-r--r--functest/api/database/v1/__init__.py0
-rw-r--r--functest/api/database/v1/handlers.py43
-rw-r--r--functest/api/database/v1/models.py33
-rw-r--r--functest/api/resources/__init__.py0
-rw-r--r--functest/api/resources/v1/__init__.py0
-rw-r--r--functest/api/resources/v1/creds.py67
-rw-r--r--functest/api/resources/v1/envs.py40
-rw-r--r--functest/api/resources/v1/tasks.py58
-rw-r--r--functest/api/resources/v1/testcases.py115
-rw-r--r--functest/api/resources/v1/tiers.py67
-rw-r--r--functest/api/server.py103
-rw-r--r--functest/api/urls.py66
-rw-r--r--functest/ci/config_aarch64_patch.yaml21
-rw-r--r--functest/ci/config_functest.yaml58
-rw-r--r--functest/ci/config_patch.yaml3
-rw-r--r--functest/ci/download_images.sh55
-rw-r--r--functest/ci/logging.ini7
-rw-r--r--functest/ci/prepare_env.py2
-rw-r--r--functest/ci/run_tests.py152
-rw-r--r--functest/ci/testcases.yaml125
-rw-r--r--functest/ci/tier_builder.py9
-rw-r--r--functest/ci/tier_handler.py89
-rw-r--r--functest/cli/commands/cli_env.py37
-rw-r--r--functest/cli/commands/cli_os.py19
-rw-r--r--functest/cli/commands/cli_testcase.py27
-rw-r--r--functest/cli/commands/cli_tier.py44
-rw-r--r--functest/energy/energy.py227
-rw-r--r--functest/opnfv_tests/openstack/rally/blacklist.txt32
-rw-r--r--functest/opnfv_tests/openstack/rally/rally.py4
-rw-r--r--functest/opnfv_tests/openstack/rally/scenario/full/opnfv-ceilometer.yaml458
-rw-r--r--functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-ceilometer.yaml247
-rw-r--r--functest/opnfv_tests/openstack/rally/task.yaml4
-rw-r--r--functest/opnfv_tests/openstack/refstack_client/refstack_client.py126
-rw-r--r--functest/opnfv_tests/openstack/refstack_client/tempest_conf.py20
-rw-r--r--functest/opnfv_tests/openstack/snaps/snaps_test_runner.py7
-rw-r--r--functest/opnfv_tests/openstack/snaps/snaps_utils.py2
-rw-r--r--functest/opnfv_tests/openstack/tempest/conf_utils.py283
-rw-r--r--functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml13
-rw-r--r--functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt4
-rw-r--r--functest/opnfv_tests/openstack/tempest/tempest.py269
-rw-r--r--functest/opnfv_tests/openstack/vping/ping.sh19
-rw-r--r--functest/opnfv_tests/openstack/vping/vping_base.py35
-rw-r--r--functest/opnfv_tests/openstack/vping/vping_userdata.py2
-rw-r--r--functest/opnfv_tests/sdn/odl/odl.py5
-rw-r--r--functest/opnfv_tests/sdn/onos/teston/adapters/connection.py2
-rw-r--r--functest/opnfv_tests/sdn/onos/teston/adapters/environment.py22
-rw-r--r--functest/opnfv_tests/vnf/aaa/aaa.py41
-rw-r--r--functest/opnfv_tests/vnf/ims/clearwater_ims_base.py36
-rw-r--r--functest/opnfv_tests/vnf/ims/cloudify_ims.py66
-rw-r--r--functest/opnfv_tests/vnf/ims/cloudify_ims.yaml6
-rw-r--r--functest/opnfv_tests/vnf/ims/opera_ims.py131
-rw-r--r--functest/opnfv_tests/vnf/ims/orchestra.yaml61
-rw-r--r--functest/opnfv_tests/vnf/ims/orchestra_clearwaterims.py682
-rw-r--r--functest/opnfv_tests/vnf/ims/orchestra_ims.py487
-rw-r--r--functest/opnfv_tests/vnf/ims/orchestra_ims.yaml21
-rw-r--r--functest/opnfv_tests/vnf/ims/orchestra_openims.py718
-rw-r--r--functest/tests/unit/ci/test_run_tests.py171
-rw-r--r--functest/tests/unit/ci/test_tier_builder.py3
-rw-r--r--functest/tests/unit/core/test_feature.py4
-rw-r--r--functest/tests/unit/energy/test_functest_energy.py74
-rw-r--r--functest/tests/unit/odl/test_odl.py2
-rw-r--r--functest/tests/unit/openstack/rally/test_rally.py553
-rw-r--r--functest/tests/unit/openstack/refstack_client/test_refstack_client.py82
-rw-r--r--functest/tests/unit/openstack/tempest/test_conf_utils.py249
-rw-r--r--functest/tests/unit/openstack/tempest/test_tempest.py26
-rw-r--r--functest/tests/unit/openstack/vping/test_vping.py6
-rw-r--r--functest/tests/unit/utils/test_functest_utils.py42
-rw-r--r--functest/tests/unit/utils/test_openstack_utils.py17
-rw-r--r--functest/tests/unit/vnf/ims/test_cloudify_ims.py9
-rw-r--r--functest/tests/unit/vnf/ims/test_orchestra_clearwaterims.py227
-rw-r--r--functest/tests/unit/vnf/ims/test_orchestra_openims.py229
-rw-r--r--functest/utils/env.py3
-rw-r--r--functest/utils/functest_utils.py27
-rw-r--r--functest/utils/openstack_utils.py91
81 files changed, 5197 insertions, 2133 deletions
diff --git a/functest/opnfv_tests/vnf/aaa/__init__.py b/functest/api/__init__.py
index e69de29b..e69de29b 100644
--- a/functest/opnfv_tests/vnf/aaa/__init__.py
+++ b/functest/api/__init__.py
diff --git a/functest/api/base.py b/functest/api/base.py
new file mode 100644
index 00000000..ffc56786
--- /dev/null
+++ b/functest/api/base.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""
+The base class to dispatch request
+
+"""
+
+import logging
+
+from flask import request
+from flask_restful import Resource
+
+from functest.api.common import api_utils
+
+
+LOGGER = logging.getLogger(__name__)
+
+
+class ApiResource(Resource):
+ """ API Resource class"""
+
+ def __init__(self):
+ super(ApiResource, self).__init__()
+
+ def _post_args(self): # pylint: disable=no-self-use
+ # pylint: disable=maybe-no-member
+ """ Return action and args after parsing request """
+
+ data = request.json if request.json else {}
+ params = api_utils.change_to_str_in_dict(data)
+ action = params.get('action', request.form.get('action', ''))
+ args = params.get('args', {})
+ try:
+ args['file'] = request.files['file']
+ except KeyError:
+ pass
+ LOGGER.debug('Input args are: action: %s, args: %s', action, args)
+
+ return action, args
+
+ def _dispatch_post(self):
+ """ Dispatch request """
+ action, args = self._post_args()
+ return self._dispatch(args, action)
+
+ def _dispatch(self, args, action):
+ """
+ Dynamically load the classes with reflection and
+ obtain corresponding methods
+ """
+ try:
+ return getattr(self, action)(args)
+ except AttributeError:
+ api_utils.result_handler(status=1, data='No such action')
+
+
+# Import modules from package "functest.api.resources"
+# and append them into sys.modules
+api_utils.import_modules_from_package("functest.api.resources")
diff --git a/functest/tests/unit/vnf/rnc/__init__.py b/functest/api/common/__init__.py
index e69de29b..e69de29b 100644
--- a/functest/tests/unit/vnf/rnc/__init__.py
+++ b/functest/api/common/__init__.py
diff --git a/functest/api/common/api_utils.py b/functest/api/common/api_utils.py
new file mode 100644
index 00000000..d85acf92
--- /dev/null
+++ b/functest/api/common/api_utils.py
@@ -0,0 +1,101 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""
+Utils for functest restapi
+
+"""
+
+import collections
+import logging
+import os
+import sys
+from oslo_utils import importutils
+
+from flask import jsonify
+import six
+
+import functest
+
+LOGGER = logging.getLogger(__name__)
+
+
+def change_to_str_in_dict(obj):
+ """
+ Return a dict with key and value both in string if they are in Unicode
+ """
+ if isinstance(obj, collections.Mapping):
+ return {str(k): change_to_str_in_dict(v) for k, v in obj.items()}
+ elif isinstance(obj, list):
+ return [change_to_str_in_dict(ele) for ele in obj]
+ elif isinstance(obj, six.text_type):
+ return str(obj)
+ return obj
+
+
+def itersubclasses(cls, _seen=None):
+ """ Generator over all subclasses of a given class in depth first order """
+
+ if not isinstance(cls, type):
+ raise TypeError("itersubclasses must be called with "
+ "new-style classes, not %.100r" % cls)
+ _seen = _seen or set()
+ try:
+ subs = cls.__subclasses__()
+ except TypeError: # fails only when cls is type
+ subs = cls.__subclasses__(cls)
+ for sub in subs:
+ if sub not in _seen:
+ _seen.add(sub)
+ yield sub
+ for itersub in itersubclasses(sub, _seen):
+ yield itersub
+
+
+def import_modules_from_package(package):
+ """
+ Import modules from package and append into sys.modules
+ :param: package - Full package name. For example: functest.api.resources
+ """
+ path = [os.path.dirname(functest.__file__), ".."] + package.split(".")
+ path = os.path.join(*path)
+ for root, _, files in os.walk(path):
+ for filename in files:
+ if filename.startswith("__") or not filename.endswith(".py"):
+ continue
+ new_package = ".".join(root.split(os.sep)).split("....")[1]
+ module_name = "%s.%s" % (new_package, filename[:-3])
+ try:
+ try_append_module(module_name, sys.modules)
+ except ImportError:
+ LOGGER.exception("unable to import %s", module_name)
+
+
+def try_append_module(name, modules):
+ """ Append the module into specified module system """
+
+ if name not in modules:
+ modules[name] = importutils.import_module(name)
+
+
+def change_obj_to_dict(obj):
+ """ Transfer the object into dict """
+ dic = {}
+ for key, value in vars(obj).items():
+ dic.update({key: value})
+ return dic
+
+
+def result_handler(status, data):
+ """ Return the json format of result in dict """
+ result = {
+ 'status': status,
+ 'result': data
+ }
+ return jsonify(result)
diff --git a/functest/api/common/thread.py b/functest/api/common/thread.py
new file mode 100644
index 00000000..fb60aaac
--- /dev/null
+++ b/functest/api/common/thread.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""
+Used to handle multi-thread tasks
+"""
+
+import logging
+import threading
+
+from oslo_serialization import jsonutils
+
+
+LOGGER = logging.getLogger(__name__)
+
+
+class TaskThread(threading.Thread):
+ """ Task Thread Class """
+
+ def __init__(self, target, args, handler):
+ super(TaskThread, self).__init__(target=target, args=args)
+ self.target = target
+ self.args = args
+ self.handler = handler
+
+ def run(self):
+ """ Override the function run: run testcase and update database """
+ update_data = {'task_id': self.args.get('task_id'),
+ 'status': 'IN PROGRESS'}
+ self.handler.insert(update_data)
+
+ LOGGER.info('Starting running test case')
+
+ try:
+ data = self.target(self.args)
+ except Exception as err: # pylint: disable=broad-except
+ LOGGER.exception('Task Failed')
+ update_data = {'status': 'FAIL', 'error': str(err)}
+ self.handler.update_attr(self.args.get('task_id'), update_data)
+ else:
+ LOGGER.info('Task Finished')
+ LOGGER.debug('Result: %s', data)
+ new_data = {'status': 'FINISHED',
+ 'result': jsonutils.dumps(data.get('result', {}))}
+
+ self.handler.update_attr(self.args.get('task_id'), new_data)
diff --git a/functest/api/database/__init__.py b/functest/api/database/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/functest/api/database/__init__.py
diff --git a/functest/api/database/db.py b/functest/api/database/db.py
new file mode 100644
index 00000000..ea861ddb
--- /dev/null
+++ b/functest/api/database/db.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""
+Create database to store task results using sqlalchemy
+"""
+
+from sqlalchemy import create_engine
+from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy.orm import scoped_session, sessionmaker
+
+
+SQLITE = 'sqlite:////tmp/functest.db'
+
+ENGINE = create_engine(SQLITE, convert_unicode=True)
+DB_SESSION = scoped_session(sessionmaker(autocommit=False,
+ autoflush=False,
+ bind=ENGINE))
+BASE = declarative_base()
+BASE.query = DB_SESSION.query_property()
diff --git a/functest/api/database/v1/__init__.py b/functest/api/database/v1/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/functest/api/database/v1/__init__.py
diff --git a/functest/api/database/v1/handlers.py b/functest/api/database/v1/handlers.py
new file mode 100644
index 00000000..7bd286de
--- /dev/null
+++ b/functest/api/database/v1/handlers.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""
+Used to handle tasks: insert the task info into database and update it
+"""
+
+from functest.api.database.db import DB_SESSION
+from functest.api.database.v1.models import Tasks
+
+
+class TasksHandler(object):
+ """ Tasks Handler Class """
+
+ def insert(self, kwargs): # pylint: disable=no-self-use
+ """ To insert the task info into database """
+ task = Tasks(**kwargs)
+ DB_SESSION.add(task) # pylint: disable=maybe-no-member
+ DB_SESSION.commit() # pylint: disable=maybe-no-member
+ return task
+
+ def get_task_by_taskid(self, task_id): # pylint: disable=no-self-use
+ """ Obtain the task by task id """
+ # pylint: disable=maybe-no-member
+ task = Tasks.query.filter_by(task_id=task_id).first()
+ if not task:
+ raise ValueError
+
+ return task
+
+ def update_attr(self, task_id, attr):
+ """ Update the required attributes of the task """
+ task = self.get_task_by_taskid(task_id)
+
+ for key, value in attr.items():
+ setattr(task, key, value)
+ DB_SESSION.commit() # pylint: disable=maybe-no-member
diff --git a/functest/api/database/v1/models.py b/functest/api/database/v1/models.py
new file mode 100644
index 00000000..c5de91bc
--- /dev/null
+++ b/functest/api/database/v1/models.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""
+Define tables for tasks
+"""
+
+from sqlalchemy import Column
+from sqlalchemy import Integer
+from sqlalchemy import String
+from sqlalchemy import Text
+
+from functest.api.database.db import BASE
+
+
+class Tasks(BASE): # pylint: disable=too-few-public-methods, no-init
+ """ Create a table for tasks"""
+
+ __tablename__ = 'tasks'
+ id = Column(Integer, primary_key=True) # pylint: disable=invalid-name
+ task_id = Column(String(50))
+ status = Column(Integer)
+ error = Column(String(120))
+ result = Column(Text)
+
+ def __repr__(self):
+ return '<Task %r>' % Tasks.task_id
diff --git a/functest/api/resources/__init__.py b/functest/api/resources/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/functest/api/resources/__init__.py
diff --git a/functest/api/resources/v1/__init__.py b/functest/api/resources/v1/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/functest/api/resources/v1/__init__.py
diff --git a/functest/api/resources/v1/creds.py b/functest/api/resources/v1/creds.py
new file mode 100644
index 00000000..45e4559f
--- /dev/null
+++ b/functest/api/resources/v1/creds.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""
+Resources to handle openstack related requests
+"""
+
+import collections
+import logging
+
+from flask import jsonify
+
+from functest.api.base import ApiResource
+from functest.api.common import api_utils
+from functest.cli.commands.cli_os import OpenStack
+from functest.utils import openstack_utils as os_utils
+from functest.utils.constants import CONST
+
+LOGGER = logging.getLogger(__name__)
+
+
+class V1Creds(ApiResource):
+ """ V1Creds Resource class"""
+
+ def get(self): # pylint: disable=no-self-use
+ """ Get credentials """
+ os_utils.source_credentials(CONST.__getattribute__('openstack_creds'))
+ credentials_show = OpenStack.show_credentials()
+ return jsonify(credentials_show)
+
+ def post(self):
+ """ Used to handle post request """
+ return self._dispatch_post()
+
+ def update_openrc(self, args): # pylint: disable=no-self-use
+ """ Used to update the OpenStack RC file """
+ try:
+ openrc_vars = args['openrc']
+ except KeyError:
+ return api_utils.result_handler(
+ status=0, data='openrc must be provided')
+ else:
+ if not isinstance(openrc_vars, collections.Mapping):
+ return api_utils.result_handler(
+ status=0, data='args should be a dict')
+
+ lines = ['export {}={}\n'.format(k, v) for k, v in openrc_vars.items()]
+
+ rc_file = CONST.__getattribute__('openstack_creds')
+ with open(rc_file, 'w') as creds_file:
+ creds_file.writelines(lines)
+
+ LOGGER.info("Sourcing the OpenStack RC file...")
+ try:
+ os_utils.source_credentials(rc_file)
+ except Exception as err: # pylint: disable=broad-except
+ LOGGER.exception('Failed to source the OpenStack RC file')
+ return api_utils.result_handler(status=0, data=str(err))
+
+ return api_utils.result_handler(
+ status=0, data='Update openrc successfully')
diff --git a/functest/api/resources/v1/envs.py b/functest/api/resources/v1/envs.py
new file mode 100644
index 00000000..9c455198
--- /dev/null
+++ b/functest/api/resources/v1/envs.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""
+Resources to handle environment related requests
+"""
+
+from flask import jsonify
+
+from functest.api.base import ApiResource
+from functest.cli.commands.cli_env import Env
+from functest.api.common import api_utils
+import functest.utils.functest_utils as ft_utils
+
+
+class V1Envs(ApiResource):
+ """ V1Envs Resource class"""
+
+ def get(self): # pylint: disable=no-self-use
+ """ Get environment """
+ environment_show = Env().show()
+ return jsonify(environment_show)
+
+ def post(self):
+ """ Used to handle post request """
+ return self._dispatch_post()
+
+ def prepare(self, args): # pylint: disable=no-self-use, unused-argument
+ """ Prepare environment """
+ try:
+ ft_utils.execute_command("prepare_env start")
+ except Exception as err: # pylint: disable=broad-except
+ return api_utils.result_handler(status=1, data=str(err))
+ return api_utils.result_handler(
+ status=0, data="Prepare env successfully")
diff --git a/functest/api/resources/v1/tasks.py b/functest/api/resources/v1/tasks.py
new file mode 100644
index 00000000..7086e707
--- /dev/null
+++ b/functest/api/resources/v1/tasks.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""
+Resources to retrieve the task results
+"""
+
+
+import json
+import logging
+import uuid
+
+from flask import jsonify
+
+from functest.api.base import ApiResource
+from functest.api.common import api_utils
+from functest.api.database.v1.handlers import TasksHandler
+
+
+LOGGER = logging.getLogger(__name__)
+
+
+class V1Tasks(ApiResource):
+ """ V1Tasks Resource class"""
+
+ def get(self, task_id): # pylint: disable=no-self-use
+ """ GET the result of the task id """
+ try:
+ uuid.UUID(task_id)
+ except ValueError:
+ return api_utils.result_handler(status=1, data='Invalid task id')
+
+ task_handler = TasksHandler()
+ try:
+ task = task_handler.get_task_by_taskid(task_id)
+ except ValueError:
+ return api_utils.result_handler(status=1, data='No such task id')
+
+ status = task.status
+ LOGGER.debug('Task status is: %s', status)
+
+ if status not in ['IN PROGRESS', 'FAIL', 'FINISHED']:
+ return api_utils.result_handler(status=1,
+ data='internal server error')
+ if status == 'IN PROGRESS':
+ result = {'status': status, 'result': ''}
+ elif status == 'FAIL':
+ result = {'status': status, 'error': task.error}
+ else:
+ result = {'status': status, 'result': json.loads(task.result)}
+
+ return jsonify(result)
diff --git a/functest/api/resources/v1/testcases.py b/functest/api/resources/v1/testcases.py
new file mode 100644
index 00000000..f146c24c
--- /dev/null
+++ b/functest/api/resources/v1/testcases.py
@@ -0,0 +1,115 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""
+Resources to handle testcase related requests
+"""
+
+import os
+import logging
+import uuid
+
+from flask import abort, jsonify
+
+from functest.api.base import ApiResource
+from functest.api.common import api_utils, thread
+from functest.cli.commands.cli_testcase import Testcase
+from functest.api.database.v1.handlers import TasksHandler
+from functest.utils.constants import CONST
+import functest.utils.functest_utils as ft_utils
+
+LOGGER = logging.getLogger(__name__)
+
+
+class V1Testcases(ApiResource):
+ """ V1Testcases Resource class"""
+
+ def get(self): # pylint: disable=no-self-use
+ """ GET all testcases """
+ testcases_list = Testcase().list()
+ result = {'testcases': testcases_list.split('\n')[:-1]}
+ return jsonify(result)
+
+
+class V1Testcase(ApiResource):
+ """ V1Testcase Resource class"""
+
+ def get(self, testcase_name): # pylint: disable=no-self-use
+ """ GET the info of one testcase"""
+ testcase = Testcase().show(testcase_name)
+ if not testcase:
+ abort(404, "The test case '%s' does not exist or is not supported"
+ % testcase_name)
+ testcase_info = api_utils.change_obj_to_dict(testcase)
+ dependency_dict = api_utils.change_obj_to_dict(
+ testcase_info.get('dependency'))
+ testcase_info.pop('name')
+ testcase_info.pop('dependency')
+ result = {'testcase': testcase_name}
+ result.update(testcase_info)
+ result.update({'dependency': dependency_dict})
+ return jsonify(result)
+
+ def post(self):
+ """ Used to handle post request """
+ return self._dispatch_post()
+
+ def run_test_case(self, args):
+ """ Run a testcase """
+ try:
+ case_name = args['testcase']
+ except KeyError:
+ return api_utils.result_handler(
+ status=1, data='testcase name must be provided')
+
+ task_id = str(uuid.uuid4())
+
+ task_args = {'testcase': case_name, 'task_id': task_id}
+
+ task_args.update(args.get('opts', {}))
+
+ task_thread = thread.TaskThread(self._run, task_args, TasksHandler())
+ task_thread.start()
+
+ results = {'testcase': case_name, 'task_id': task_id}
+ return jsonify(results)
+
+ def _run(self, args): # pylint: disable=no-self-use
+ """ The built_in function to run a test case """
+
+ case_name = args.get('testcase')
+
+ if not os.path.isfile(CONST.__getattribute__('env_active')):
+ raise Exception("Functest environment is not ready.")
+ else:
+ try:
+ cmd = "run_tests -t {}".format(case_name)
+ runner = ft_utils.execute_command(cmd)
+ except Exception: # pylint: disable=broad-except
+ result = 'FAIL'
+ LOGGER.exception("Running test case %s failed!", case_name)
+ if runner == os.EX_OK:
+ result = 'PASS'
+ else:
+ result = 'FAIL'
+
+ env_info = {
+ 'installer': CONST.__getattribute__('INSTALLER_TYPE'),
+ 'scenario': CONST.__getattribute__('DEPLOY_SCENARIO'),
+ 'build_tag': CONST.__getattribute__('BUILD_TAG'),
+ 'ci_loop': CONST.__getattribute__('CI_LOOP')
+ }
+ result = {
+ 'task_id': args.get('task_id'),
+ 'case_name': case_name,
+ 'env_info': env_info,
+ 'result': result
+ }
+
+ return {'result': result}
diff --git a/functest/api/resources/v1/tiers.py b/functest/api/resources/v1/tiers.py
new file mode 100644
index 00000000..71a98bea
--- /dev/null
+++ b/functest/api/resources/v1/tiers.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""
+Resources to handle tier related requests
+"""
+
+import re
+
+from flask import abort, jsonify
+
+from functest.api.base import ApiResource
+from functest.cli.commands.cli_tier import Tier
+
+
+class V1Tiers(ApiResource):
+ """ V1Tiers Resource class """
+
+ def get(self):
+ # pylint: disable=no-self-use
+ """ GET all tiers """
+ tiers_list = Tier().list()
+ data = re.split("[\n\t]", tiers_list)
+ data = [i.strip() for i in data if i != '']
+ data_dict = dict()
+ for i in range(len(data) / 2):
+ one_data = {data[i * 2]: data[i * 2 + 1]}
+ if i == 0:
+ data_dict = one_data
+ else:
+ data_dict.update(one_data)
+ result = {'tiers': data_dict}
+ return jsonify(result)
+
+
+class V1Tier(ApiResource):
+ """ V1Tier Resource class """
+
+ def get(self, tier_name): # pylint: disable=no-self-use
+ """ GET the info of one tier """
+ testcases = Tier().gettests(tier_name)
+ if not testcases:
+ abort(404, "The tier with name '%s' does not exist." % tier_name)
+ tier_info = Tier().show(tier_name)
+ tier_info.__dict__.pop('name')
+ tier_info.__dict__.pop('tests_array')
+ result = {'tier': tier_name, 'testcases': testcases}
+ result.update(tier_info.__dict__)
+ return jsonify(result)
+
+
+class V1TestcasesinTier(ApiResource):
+ """ V1TestcasesinTier Resource class """
+
+ def get(self, tier_name): # pylint: disable=no-self-use
+ """ GET all testcases within given tier """
+ testcases = Tier().gettests(tier_name)
+ if not testcases:
+ abort(404, "The tier with name '%s' does not exist." % tier_name)
+ result = {'tier': tier_name, 'testcases': testcases}
+ return jsonify(result)
diff --git a/functest/api/server.py b/functest/api/server.py
new file mode 100644
index 00000000..1d47b0dc
--- /dev/null
+++ b/functest/api/server.py
@@ -0,0 +1,103 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""
+Used to launch Functest RestApi
+
+"""
+
+import inspect
+import logging
+import socket
+from urlparse import urljoin
+import pkg_resources
+
+from flask import Flask
+from flask_restful import Api
+
+from functest.api.base import ApiResource
+from functest.api.common import api_utils
+from functest.api.database.db import BASE
+from functest.api.database.db import DB_SESSION
+from functest.api.database.db import ENGINE
+from functest.api.database.v1 import models
+from functest.api.urls import URLPATTERNS
+
+
+LOGGER = logging.getLogger(__name__)
+
+APP = Flask(__name__)
+API = Api(APP)
+
+
+@APP.teardown_request
+def shutdown_session(exception=None): # pylint: disable=unused-argument
+ """
+ To be called at the end of each request whether it is successful
+ or an exception is raised
+ """
+ DB_SESSION.remove()
+
+
+def get_resource(resource_name):
+ """ Obtain the required resource according to resource name """
+ name = ''.join(resource_name.split('_'))
+ return next((r for r in api_utils.itersubclasses(ApiResource)
+ if r.__name__.lower() == name))
+
+
+def get_endpoint(url):
+ """ Obtain the endpoint of url """
+ address = socket.gethostbyname(socket.gethostname())
+ return urljoin('http://{}:5000'.format(address), url)
+
+
+def api_add_resource():
+ """
+ The resource has multiple URLs and you can pass multiple URLs to the
+ add_resource() method on the Api object. Each one will be routed to
+ your Resource
+ """
+ for url_pattern in URLPATTERNS:
+ try:
+ API.add_resource(
+ get_resource(url_pattern.target), url_pattern.url,
+ endpoint=get_endpoint(url_pattern.url))
+ except StopIteration:
+ LOGGER.error('url resource not found: %s', url_pattern.url)
+
+
+def init_db():
+ """
+ Import all modules here that might define models so that
+ they will be registered properly on the metadata, and then
+ create a database
+ """
+ def func(subcls):
+ """ To check the subclasses of BASE"""
+ try:
+ if issubclass(subcls[1], BASE):
+ return True
+ except TypeError:
+ pass
+ return False
+ # pylint: disable=bad-builtin
+ subclses = filter(func, inspect.getmembers(models, inspect.isclass))
+ LOGGER.debug('Import models: %s', [subcls[1] for subcls in subclses])
+ BASE.metadata.create_all(bind=ENGINE)
+
+
+def main():
+ """Entry point"""
+ logging.config.fileConfig(pkg_resources.resource_filename(
+ 'functest', 'ci/logging.ini'))
+ LOGGER.info('Starting Functest server')
+ api_add_resource()
+ init_db()
+ APP.run(host='0.0.0.0')
diff --git a/functest/api/urls.py b/functest/api/urls.py
new file mode 100644
index 00000000..f7bcae38
--- /dev/null
+++ b/functest/api/urls.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""
+Define multiple URLs
+"""
+
+
+class Url(object): # pylint: disable=too-few-public-methods
+ """ Url Class """
+
+ def __init__(self, url, target):
+ super(Url, self).__init__()
+ self.url = url
+ self.target = target
+
+
+URLPATTERNS = [
+ # GET /api/v1/functest/envs => GET environment
+ Url('/api/v1/functest/envs', 'v1_envs'),
+
+ # POST /api/v1/functest/envs/action , {"action":"prepare"}
+ # => Prepare environment
+ Url('/api/v1/functest/envs/action', 'v1_envs'),
+
+ # GET /api/v1/functest/openstack/credentials => GET credentials
+ Url('/api/v1/functest/openstack/credentials', 'v1_creds'),
+
+ # POST /api/v1/functest/openstack/action
+ # {"action":"update_openrc", "args": {"openrc": {}}} => Update openrc
+ Url('/api/v1/functest/openstack/action', 'v1_creds'),
+
+ # GET /api/v1/functest/testcases => GET all testcases
+ Url('/api/v1/functest/testcases', 'v1_test_cases'),
+
+ # GET /api/v1/functest/testcases/<testcase_name>
+ # => GET the info of one testcase
+ Url('/api/v1/functest/testcases/<testcase_name>', 'v1_testcase'),
+
+ # POST /api/v1/functest/testcases/action
+ # {"action":"run_test_case", "args": {"opts": {}, "testcase": "vping_ssh"}}
+ # => Run a testcase
+ Url('/api/v1/functest/testcases/action', 'v1_testcase'),
+
+ # GET /api/v1/functest/testcases => GET all tiers
+ Url('/api/v1/functest/tiers', 'v1_tiers'),
+
+ # GET /api/v1/functest/tiers/<tier_name>
+ # => GET the info of one tier
+ Url('/api/v1/functest/tiers/<tier_name>', 'v1_tier'),
+
+ # GET /api/v1/functest/tiers/<tier_name>/testcases
+ # => GET all testcases within given tier
+ Url('/api/v1/functest/tiers/<tier_name>/testcases',
+ 'v1_testcases_in_tier'),
+
+ # GET /api/v1/functest/tasks/<task_id>
+ # => GET the result of the task id
+ Url('/api/v1/functest/tasks/<task_id>', 'v1_tasks')
+]
diff --git a/functest/ci/config_aarch64_patch.yaml b/functest/ci/config_aarch64_patch.yaml
index 45af8d74..6b3699b4 100644
--- a/functest/ci/config_aarch64_patch.yaml
+++ b/functest/ci/config_aarch64_patch.yaml
@@ -4,6 +4,27 @@ os:
image_name: TestVM
image_file_name: cirros-d161201-aarch64-disk.img
image_password: gocubsgo
+ snaps:
+ images:
+ glance_tests:
+ disk_file: /home/opnfv/functest/images/cirros-d161201-aarch64-disk.img
+ extra_properties:
+ hw_firmware_type: 'uefi'
+ short_id: 'ubuntu16.04'
+ hw_video_model: 'vga'
+ cirros:
+ disk_file: /home/opnfv/functest/images/cirros-d161201-aarch64-disk.img
+ extra_properties:
+ hw_firmware_type: 'uefi'
+ short_id: 'ubuntu16.04'
+ hw_video_model: 'vga'
+ ubuntu:
+ disk_file: /home/opnfv/functest/images/ubuntu-14.04-server-cloudimg-arm64-uefi1.img
+ extra_properties:
+ hw_firmware_type: 'uefi'
+ hw_video_model: 'vga'
+ centos:
+ disk_file: /home/opnfv/functest/images/CentOS-7-aarch64-GenericCloud.qcow2
vping:
image_name: TestVM
diff --git a/functest/ci/config_functest.yaml b/functest/ci/config_functest.yaml
index e26b3139..cf63e1ed 100644
--- a/functest/ci/config_functest.yaml
+++ b/functest/ci/config_functest.yaml
@@ -5,14 +5,12 @@ general:
dir_repo_rally: /home/opnfv/repos/rally
repo_tempest: /src/tempest
dir_repo_releng: /home/opnfv/repos/releng
- repo_vims_test: /home/opnfv/repos/vnfs/vims-test
+ repo_vims_test: /src/vims-test
repo_onos: /home/opnfv/repos/onos
- repo_netready: /home/opnfv/repos/netready
repo_barometer: /home/opnfv/repos/barometer
repo_doctor: /home/opnfv/repos/doctor
- repo_copper: /home/opnfv/repos/copper
- repo_domino: /home/opnfv/repos/domino
- repo_fds: /home/opnfv/repos/fds
+ repo_odl_test: /src/odl_test
+ repo_fds: /src/fds
repo_securityscan: /home/opnfv/repos/securityscanning
repo_vrouter: /home/opnfv/repos/vnfs/vrouter
functest: /home/opnfv/functest
@@ -63,6 +61,25 @@ snaps:
disk_file: /home/opnfv/functest/images/ubuntu-14.04-server-cloudimg-amd64-disk1.img
centos:
disk_file: /home/opnfv/functest/images/CentOS-7-x86_64-GenericCloud.qcow2
+ # All of these values are optional and will override the values retrieved
+ # by the RC file
+# os_creds_override:
+# username: {user}
+# password: {password}
+# auth_url: {auth_url}
+# project_name: {project_name}
+# identity_api_version: {2|3}
+# network_api_version: {2}
+# compute_api_version: {2}
+# image_api_version: {1|2}
+# user_domain_id: {user_domain_id}
+# project_domain_id: {projects_domain_id}
+# interface: {interface}
+# cacert: {True|False}
+# proxy_settings:
+# host: {proxy_host}
+# port: {proxy_port}
+# ssh_proxy_cmd: {OpenSSH -o ProxyCommand value}
vping:
ping_timeout: 200
@@ -71,6 +88,9 @@ vping:
vm_name_2: opnfv-vping-2
image_name: functest-vping
private_net_name: vping-net
+ # network_type: vlan
+ # physical_network: physnet2
+ # segmentation_id: 2366
private_subnet_name: vping-subnet
private_subnet_cidr: 192.168.130.0/24
router_name: vping-router
@@ -123,10 +143,6 @@ rally:
router_name: rally-router
vnf:
- aaa:
- tenant_name: aaa
- tenant_description: Freeradius server
- tenant_images: {}
juju_epc:
tenant_name: epc
tenant_description: OAI EPC deployed with Juju
@@ -135,13 +151,14 @@ vnf:
tenant_name: cloudify_ims
tenant_description: vIMS
config: cloudify_ims.yaml
- orchestra_ims:
- tenant_name: orchestra_ims
- tenant_description: ims deployed with openbaton
- config: orchestra_ims.yaml
- opera_ims:
- tenant_name: opera_ims
- tenant_description: ims deployed with open-o
+ orchestra_openims:
+ tenant_name: orchestra_openims
+ tenant_description: OpenIMS deployed with Open Baton
+ config: orchestra.yaml
+ orchestra_clearwaterims:
+ tenant_name: orchestra_clearwaterims
+ tenant_description: Clearwater IMS deployed with Open Baton
+ config: orchestra.yaml
ONOS:
general:
@@ -160,14 +177,6 @@ ONOS:
installer_master: '10.20.0.2'
installer_master_username: 'root'
installer_master_password: 'r00tme'
-multisite:
- fuel:
- installer_username: 'root'
- installer_password: 'r00tme'
- compass:
- installer_username: 'root'
- installer_password: 'root'
- multisite_controller_ip: '10.1.0.50'
promise:
tenant_name: promise
tenant_description: promise Functionality Testing
@@ -203,3 +212,4 @@ energy_recorder:
api_url: http://energy.opnfv.fr/resources
api_user: ""
api_password: ""
+
diff --git a/functest/ci/config_patch.yaml b/functest/ci/config_patch.yaml
index ad8b0889..865a564e 100644
--- a/functest/ci/config_patch.yaml
+++ b/functest/ci/config_patch.yaml
@@ -20,6 +20,3 @@ ovs:
image_properties: {'hw_mem_page_size':'large'}
tempest:
use_custom_flavors: True
-multisite:
- tempest:
- use_custom_flavors: True
diff --git a/functest/ci/download_images.sh b/functest/ci/download_images.sh
index 23e09c10..86f37a3f 100644
--- a/functest/ci/download_images.sh
+++ b/functest/ci/download_images.sh
@@ -1,38 +1,25 @@
#!/bin/bash
-CIRROS_REPO_URL=http://download.cirros-cloud.net
-CIRROS_AARCH64_TAG=161201
-CIRROS_X86_64_TAG=0.3.5
-
-RED='\033[1;31m'
-NC='\033[0m' # No Color
-
-function usage(){
- echo -e "${RED}USAGE: $script <destination_folder>${NC}"
- exit 0
-}
-
-script=`basename "$0"`
-IMAGES_FOLDER_DIR=$1
-
-if [[ -z $IMAGES_FOLDER_DIR ]]; then usage; fi;
-
set -ex
-mkdir -p ${IMAGES_FOLDER_DIR}
-
-wget -nc ${CIRROS_REPO_URL}/${CIRROS_X86_64_TAG}/cirros-${CIRROS_X86_64_TAG}-x86_64-disk.img -P ${IMAGES_FOLDER_DIR}
-wget -nc ${CIRROS_REPO_URL}/${CIRROS_X86_64_TAG}/cirros-${CIRROS_X86_64_TAG}-x86_64-lxc.tar.gz -P ${IMAGES_FOLDER_DIR}
-wget -nc http://artifacts.opnfv.org/sdnvpn/ubuntu-16.04-server-cloudimg-amd64-disk1.img -P ${IMAGES_FOLDER_DIR}
-
-# Add 3rd-party images for aarch64, since Functest can be run on an x86 machine to test an aarch64 POD
-wget -nc ${CIRROS_REPO_URL}/daily/20${CIRROS_AARCH64_TAG}/cirros-d${CIRROS_AARCH64_TAG}-aarch64-disk.img -P ${IMAGES_FOLDER_DIR}
-wget -nc ${CIRROS_REPO_URL}/daily/20${CIRROS_AARCH64_TAG}/cirros-d${CIRROS_AARCH64_TAG}-aarch64-initramfs -P ${IMAGES_FOLDER_DIR}
-wget -nc ${CIRROS_REPO_URL}/daily/20${CIRROS_AARCH64_TAG}/cirros-d${CIRROS_AARCH64_TAG}-aarch64-kernel -P ${IMAGES_FOLDER_DIR}
-
-# Add Ubuntu 14 qcow2 image
-wget -nc http://uec-images.ubuntu.com/releases/trusty/14.04/ubuntu-14.04-server-cloudimg-amd64-disk1.img -P ${IMAGES_FOLDER_DIR}
-
-# Add Centos 7 qcow2 image
-wget -nc http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2 -P ${IMAGES_FOLDER_DIR}
-set +ex \ No newline at end of file
+wget_opts="-N --tries=1 --connect-timeout=30"
+
+cat << EOF | wget ${wget_opts} -i - -P ${1:-/home/opnfv/functest/images}
+http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img
+https://cloud-images.ubuntu.com/releases/14.04/release/ubuntu-14.04-server-cloudimg-amd64-disk1.img
+https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2
+https://cloud-images.ubuntu.com/releases/16.04/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img
+http://repository.cloudifysource.org/cloudify/4.0.1/sp-release/cloudify-manager-premium-4.0.1.qcow2
+http://marketplace.openbaton.org:8082/api/v1/images/52e2ccc0-1dce-4663-894d-28aab49323aa/img
+http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img
+http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-lxc.tar.gz
+http://download.cirros-cloud.net/daily/20161201/cirros-d161201-aarch64-disk.img
+http://download.cirros-cloud.net/daily/20161201/cirros-d161201-aarch64-initramfs
+http://download.cirros-cloud.net/daily/20161201/cirros-d161201-aarch64-kernel
+https://cloud-images.ubuntu.com/releases/14.04/release/ubuntu-14.04-server-cloudimg-arm64-uefi1.img
+http://cloud.centos.org/altarch/7/images/aarch64/CentOS-7-aarch64-GenericCloud.qcow2.xz
+EOF
+
+xz --decompress ${1:-/home/opnfv/functest/images}/CentOS-7-aarch64-GenericCloud.qcow2.xz
+
+exit $?
diff --git a/functest/ci/logging.ini b/functest/ci/logging.ini
index 210c8f5f..f1ab7241 100644
--- a/functest/ci/logging.ini
+++ b/functest/ci/logging.ini
@@ -1,5 +1,5 @@
[loggers]
-keys=root,functest,ci,cli,core,energy,opnfv_tests,utils
+keys=root,functest,api,ci,cli,core,energy,opnfv_tests,utils
[handlers]
keys=console,wconsole,file,null
@@ -16,6 +16,11 @@ level=NOTSET
handlers=file
qualname=functest
+[logger_api]
+level=NOTSET
+handlers=wconsole
+qualname=functest.api
+
[logger_ci]
level=NOTSET
handlers=console
diff --git a/functest/ci/prepare_env.py b/functest/ci/prepare_env.py
index c40e3266..9ed585f3 100644
--- a/functest/ci/prepare_env.py
+++ b/functest/ci/prepare_env.py
@@ -33,7 +33,7 @@ actions = ['start', 'check']
logger = logging.getLogger('functest.ci.prepare_env')
handler = None
# set the architecture to default
-pod_arch = None
+pod_arch = os.getenv("HOST_ARCH", None)
arch_filter = ['aarch64']
CONFIG_FUNCTEST_PATH = pkg_resources.resource_filename(
diff --git a/functest/ci/run_tests.py b/functest/ci/run_tests.py
index b95e1008..63a50dea 100644
--- a/functest/ci/run_tests.py
+++ b/functest/ci/run_tests.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python
-#
-# Author: Jose Lausuch (jose.lausuch@ericsson.com)
+
+# Copyright (c) 2016 Ericsson AB and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
-#
import argparse
import enum
@@ -17,6 +16,7 @@ import os
import pkg_resources
import re
import sys
+import textwrap
import prettytable
@@ -66,17 +66,14 @@ class RunTestsParser(object):
class Runner(object):
def __init__(self):
- self.executed_test_cases = []
+ self.executed_test_cases = {}
self.overall_result = Result.EX_OK
self.clean_flag = True
self.report_flag = False
-
- @staticmethod
- def print_separator(str, count=45):
- line = ""
- for i in range(0, count - 1):
- line += str
- logger.info("%s" % line)
+ self._tiers = tb.TierBuilder(
+ CONST.__getattribute__('INSTALLER_TYPE'),
+ CONST.__getattribute__('DEPLOY_SCENARIO'),
+ pkg_resources.resource_filename('functest', 'ci/testcases.yaml'))
@staticmethod
def source_rc_file():
@@ -95,6 +92,8 @@ class Runner(object):
CONST.__setattr__('OS_TENANT_NAME', value)
elif key == 'OS_PASSWORD':
CONST.__setattr__('OS_PASSWORD', value)
+ elif key == "OS_PROJECT_DOMAIN_NAME":
+ CONST.__setattr__('OS_PROJECT_DOMAIN_NAME', value)
@staticmethod
def get_run_dict(testname):
@@ -109,21 +108,11 @@ class Runner(object):
logger.exception("Cannot get {}'s config options".format(testname))
return None
- def run_test(self, test, tier_name, testcases=None):
+ def run_test(self, test):
if not test.is_enabled():
raise TestNotEnabled(
"The test case {} is not enabled".format(test.get_name()))
- logger.info("\n") # blank line
- self.print_separator("=")
logger.info("Running test case '%s'...", test.get_name())
- self.print_separator("=")
- logger.debug("\n%s" % test)
- self.source_rc_file()
-
- flags = " -t %s" % test.get_name()
- if self.report_flag:
- flags += " -r"
-
result = testcase.TestCase.EX_RUN_ERROR
run_dict = self.get_run_dict(test.get_name())
if run_dict:
@@ -132,7 +121,7 @@ class Runner(object):
cls = getattr(module, run_dict['class'])
test_dict = ft_utils.get_dict_by_test(test.get_name())
test_case = cls(**test_dict)
- self.executed_test_cases.append(test_case)
+ self.executed_test_cases[test.get_name()] = test_case
if self.clean_flag:
if test_case.create_snapshot() != test_case.EX_OK:
return result
@@ -156,7 +145,6 @@ class Runner(object):
run_dict['class']))
else:
raise Exception("Cannot import the class for the test case.")
-
return result
def run_tier(self, tier):
@@ -165,68 +153,60 @@ class Runner(object):
if tests is None or len(tests) == 0:
logger.info("There are no supported test cases in this tier "
"for the given scenario")
- return 0
- logger.info("\n\n") # blank line
- self.print_separator("#")
- logger.info("Running tier '%s'" % tier_name)
- self.print_separator("#")
- logger.debug("\n%s" % tier)
- for test in tests:
- result = self.run_test(test, tier_name)
- if result != testcase.TestCase.EX_OK:
- logger.error("The test case '%s' failed.", test.get_name())
- self.overall_result = Result.EX_ERROR
- if test.is_blocking():
- raise BlockingTestFailed(
- "The test case {} failed and is blocking".format(
- test.get_name()))
+ self.overall_result = Result.EX_ERROR
+ else:
+ logger.info("Running tier '%s'" % tier_name)
+ for test in tests:
+ result = self.run_test(test)
+ if result != testcase.TestCase.EX_OK:
+ logger.error("The test case '%s' failed.", test.get_name())
+ self.overall_result = Result.EX_ERROR
+ if test.is_blocking():
+ raise BlockingTestFailed(
+ "The test case {} failed and is blocking".format(
+ test.get_name()))
+ return self.overall_result
- def run_all(self, tiers):
- summary = ""
+ def run_all(self):
tiers_to_run = []
-
- for tier in tiers.get_tiers():
+ msg = prettytable.PrettyTable(
+ header_style='upper', padding_width=5,
+ field_names=['tiers', 'order', 'CI Loop', 'description',
+ 'testcases'])
+ for tier in self._tiers.get_tiers():
if (len(tier.get_tests()) != 0 and
re.search(CONST.__getattribute__('CI_LOOP'),
tier.get_ci_loop()) is not None):
tiers_to_run.append(tier)
- summary += ("\n - %s:\n\t %s"
- % (tier.get_name(),
- tier.get_test_names()))
-
- logger.info("Tests to be executed:%s" % summary)
+ msg.add_row([tier.get_name(), tier.get_order(),
+ tier.get_ci_loop(),
+ textwrap.fill(tier.description, width=40),
+ textwrap.fill(' '.join([str(x.get_name(
+ )) for x in tier.get_tests()]), width=40)])
+ logger.info("TESTS TO BE EXECUTED:\n\n%s\n", msg)
for tier in tiers_to_run:
self.run_tier(tier)
def main(self, **kwargs):
- _tiers = tb.TierBuilder(
- CONST.__getattribute__('INSTALLER_TYPE'),
- CONST.__getattribute__('DEPLOY_SCENARIO'),
- pkg_resources.resource_filename('functest', 'ci/testcases.yaml'))
-
if kwargs['noclean']:
self.clean_flag = False
-
if kwargs['report']:
self.report_flag = True
-
try:
if kwargs['test']:
self.source_rc_file()
logger.debug("Test args: %s", kwargs['test'])
- if _tiers.get_tier(kwargs['test']):
- self.run_tier(_tiers.get_tier(kwargs['test']))
- elif _tiers.get_test(kwargs['test']):
+ if self._tiers.get_tier(kwargs['test']):
+ self.run_tier(self._tiers.get_tier(kwargs['test']))
+ elif self._tiers.get_test(kwargs['test']):
result = self.run_test(
- _tiers.get_test(kwargs['test']),
- _tiers.get_tier_name(kwargs['test']),
- kwargs['test'])
+ self._tiers.get_test(kwargs['test']))
if result != testcase.TestCase.EX_OK:
logger.error("The test case '%s' failed.",
kwargs['test'])
self.overall_result = Result.EX_ERROR
elif kwargs['test'] == "all":
- self.run_all(_tiers)
+ self.run_all()
else:
logger.error("Unknown test case or tier '%s', "
"or not supported by "
@@ -234,39 +214,51 @@ class Runner(object):
% (kwargs['test'],
CONST.__getattribute__('DEPLOY_SCENARIO')))
logger.debug("Available tiers are:\n\n%s",
- _tiers)
+ self._tiers)
return Result.EX_ERROR
else:
- self.run_all(_tiers)
+ self.run_all()
except BlockingTestFailed:
pass
except Exception:
logger.exception("Failures when running testcase(s)")
self.overall_result = Result.EX_ERROR
+ if not self._tiers.get_test(kwargs['test']):
+ self.summary(self._tiers.get_tier(kwargs['test']))
+ logger.info("Execution exit value: %s" % self.overall_result)
+ return self.overall_result
+ def summary(self, tier=None):
msg = prettytable.PrettyTable(
header_style='upper', padding_width=5,
field_names=['env var', 'value'])
for env_var in ['INSTALLER_TYPE', 'DEPLOY_SCENARIO', 'BUILD_TAG',
'CI_LOOP']:
msg.add_row([env_var, CONST.__getattribute__(env_var)])
- logger.info("Deployment description: \n\n%s\n", msg)
-
- if len(self.executed_test_cases) > 1:
- msg = prettytable.PrettyTable(
- header_style='upper', padding_width=5,
- field_names=['test case', 'project', 'tier',
- 'duration', 'result'])
- for test_case in self.executed_test_cases:
- result = 'PASS' if(test_case.is_successful(
+ logger.info("Deployment description:\n\n%s\n", msg)
+ msg = prettytable.PrettyTable(
+ header_style='upper', padding_width=5,
+ field_names=['test case', 'project', 'tier',
+ 'duration', 'result'])
+ tiers = [tier] if tier else self._tiers.get_tiers()
+ for tier in tiers:
+ for test in tier.get_tests():
+ try:
+ test_case = self.executed_test_cases[test.get_name()]
+ except KeyError:
+ msg.add_row([test.get_name(), test.get_project(),
+ tier.get_name(), "00:00", "SKIP"])
+ else:
+ result = 'PASS' if(test_case.is_successful(
) == test_case.EX_OK) else 'FAIL'
- msg.add_row([test_case.case_name, test_case.project_name,
- _tiers.get_tier_name(test_case.case_name),
- test_case.get_duration(), result])
- logger.info("FUNCTEST REPORT: \n\n%s\n", msg)
-
- logger.info("Execution exit value: %s" % self.overall_result)
- return self.overall_result
+ msg.add_row(
+ [test_case.case_name, test_case.project_name,
+ self._tiers.get_tier_name(test_case.case_name),
+ test_case.get_duration(), result])
+ for test in tier.get_skipped_test():
+ msg.add_row([test.get_name(), test.get_project(),
+ tier.get_name(), "00:00", "SKIP"])
+ logger.info("FUNCTEST REPORT:\n\n%s\n", msg)
def main():
diff --git a/functest/ci/testcases.yaml b/functest/ci/testcases.yaml
index d0b2785c..fac81267 100644
--- a/functest/ci/testcases.yaml
+++ b/functest/ci/testcases.yaml
@@ -149,7 +149,7 @@ tiers:
case_name: odl
project_name: functest
criteria: 100
- blocking: true
+ blocking: false
description: >-
Test Suite for the OpenDaylight SDN Controller. It
integrates some test suites from upstream using
@@ -162,8 +162,8 @@ tiers:
class: 'ODLTests'
args:
suites:
- - /home/opnfv/repos/odl_test/csit/suites/integration/basic
- - /home/opnfv/repos/odl_test/csit/suites/openstack/neutron
+ - /src/odl_test/csit/suites/integration/basic
+ - /src/odl_test/csit/suites/openstack/neutron
-
case_name: odl_netvirt
@@ -183,9 +183,9 @@ tiers:
class: 'ODLTests'
args:
suites:
- - /home/opnfv/repos/odl_test/csit/suites/integration/basic
- - /home/opnfv/repos/odl_test/csit/suites/openstack/neutron
- - /home/opnfv/repos/odl_test/csit/suites/openstack/connectivity
+ - /src/odl_test/csit/suites/integration/basic
+ - /src/odl_test/csit/suites/openstack/neutron
+ - /src/odl_test/csit/suites/openstack/connectivity
-
case_name: fds
@@ -204,7 +204,7 @@ tiers:
class: 'ODLTests'
args:
suites:
- - /home/opnfv/repos/fds/testing/robot
+ - /src/fds/testing/robot
-
case_name: onos
@@ -265,10 +265,11 @@ tiers:
module: 'functest.core.feature'
class: 'BashFeature'
args:
- cmd: 'cd /home/opnfv/repos/promise/promise/test/functest && python ./run_tests.py'
+ cmd: 'run_promise_tests.py'
-
case_name: doctor-notification
+ enabled: false
project_name: doctor
criteria: 100
blocking: false
@@ -297,7 +298,7 @@ tiers:
module: 'functest.core.feature'
class: 'BashFeature'
args:
- cmd: 'cd /usr/local/lib/python2.7/dist-packages/sdnvpn/test/functest && python ./run_tests.py'
+ cmd: 'run_sdnvpn_tests.py'
-
case_name: security_scan
@@ -317,38 +318,6 @@ tiers:
cmd: '. /home/opnfv/functest/conf/stackrc && security_scan --config /usr/local/etc/securityscanning/config.ini'
-
- case_name: copper
- enabled: false
- project_name: copper
- criteria: 100
- blocking: false
- description: >-
- Test suite for policy management based on OpenStack Congress
- dependencies:
- installer: 'apex'
- scenario: '^((?!fdio).)*$'
- run:
- module: 'functest.core.feature'
- class: 'BashFeature'
- args:
- cmd: 'cd /home/opnfv/repos/copper/tests && bash run.sh && cd -'
-
- -
- case_name: multisite
- enabled: false
- project_name: multisite
- criteria: 100
- blocking: false
- description: >-
- Test suite from kingbird
- dependencies:
- installer: '(fuel)|(compass)'
- scenario: 'multisite'
- run:
- module: 'functest.opnfv_tests.openstack.tempest.tempest'
- class: 'TempestMultisite'
-
- -
case_name: functest-odl-sfc
enabled: false
project_name: sfc
@@ -363,7 +332,7 @@ tiers:
module: 'functest.core.feature'
class: 'BashFeature'
args:
- cmd: 'cd /usr/local/lib/python2.7/dist-packages/sfc/tests/functest && python ./run_tests.py'
+ cmd: 'run_sfc_tests.py'
-
case_name: onos_sfc
@@ -412,39 +381,21 @@ tiers:
module: 'functest.core.feature'
class: 'BashFeature'
args:
- cmd: 'cd /home/opnfv/repos/domino && ./tests/run_multinode.sh'
-
- -
- case_name: gluon_vping
- enabled: false
- project_name: netready
- criteria: 100
- blocking: false
- description: >-
- Test suite from Netready project.
- dependencies:
- installer: 'apex'
- scenario: 'gluon'
- run:
- module: 'functest.core.feature'
- class: 'BashFeature'
- args:
- cmd: 'cd /home/opnfv/repos/netready/test/functest && python ./gluon-test-suite.py'
+ cmd: 'cd /src/domino && ./tests/run_multinode.sh'
-
case_name: barometercollectd
- enabled: false
+ enabled: true
project_name: barometer
criteria: 100
blocking: false
description: >-
- Test suite for the Barometer project. Separate tests verify the
- proper configuration and functionality of the following
- collectd plugins Ceilometer, Hugepages, Memory RAS (mcelog),
- and OVS Events
+ Test suite for the Barometer project. Separate tests verify
+ the proper configuration and basic functionality of all the
+ collectd plugins as described in the Project Release Plan
dependencies:
- installer: 'fuel'
- scenario: 'kvm_ovs_dpdk_bar'
+ installer: 'apex'
+ scenario: 'bar'
run:
module: 'baro_tests.barometer'
class: 'BarometerCollectd'
@@ -508,7 +459,7 @@ tiers:
-
name: vnf
order: 4
- ci_loop: 'daily'
+ ci_loop: '(daily)|(weekly)'
description : >-
Collection of VNF test cases.
testcases:
@@ -526,51 +477,33 @@ tiers:
run:
module: 'functest.opnfv_tests.vnf.ims.cloudify_ims'
class: 'CloudifyIms'
-
-
- case_name: aaa
- enabled: false
+ case_name: orchestra_openims
project_name: functest
criteria: 100
blocking: false
description: >-
- Test suite from Parser project.
+ OpenIMS VNF deployment with Open Baton (Orchestra)
dependencies:
installer: ''
- scenario: ''
+ scenario: 'os-nosdn-nofeature-ha'
run:
- module: 'functest.opnfv_tests.vnf.aaa.aaa'
- class: 'AaaVnf'
+ module: 'functest.opnfv_tests.vnf.ims.orchestra_openims'
+ class: 'OpenImsVnf'
-
- case_name: orchestra_ims
- enabled: false
+ case_name: orchestra_clearwaterims
project_name: functest
criteria: 100
blocking: false
description: >-
- VNF deployment with OpenBaton (Orchestra)
+ ClearwaterIMS VNF deployment with Open Baton (Orchestra)
dependencies:
installer: ''
- scenario: ''
- run:
- module: 'functest.opnfv_tests.vnf.ims.orchestra_ims'
- class: 'ImsVnf'
-
- -
- case_name: opera_vims
- enabled: false
- project_name: opera
- criteria: 100
- blocking: false
- description: >-
- VNF deployment with OPEN-O
- dependencies:
- installer: 'compass'
- scenario: 'os-nosdn-openo-ha'
+ scenario: 'os-nosdn-nofeature-ha'
run:
- module: 'functest.opnfv_tests.vnf.ims.opera_ims'
- class: 'OperaIms'
+ module: 'functest.opnfv_tests.vnf.ims.orchestra_clearwaterims'
+ class: 'ClearwaterImsVnf'
-
case_name: vyos_vrouter
diff --git a/functest/ci/tier_builder.py b/functest/ci/tier_builder.py
index f8038468..d2722dc2 100644
--- a/functest/ci/tier_builder.py
+++ b/functest/ci/tier_builder.py
@@ -1,11 +1,11 @@
#!/usr/bin/env python
+
+# Copyright (c) 2016 Ericsson AB and others.
#
-# jose.lausuch@ericsson.com
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
-#
import tier_handler as th
import yaml
@@ -52,11 +52,14 @@ class TierBuilder(object):
dependency=dep,
criteria=dic_testcase['criteria'],
blocking=dic_testcase['blocking'],
- description=dic_testcase['description'])
+ description=dic_testcase['description'],
+ project=dic_testcase['project_name'])
if (testcase.is_compatible(self.ci_installer,
self.ci_scenario) and
testcase.is_enabled()):
tier.add_test(testcase)
+ else:
+ tier.skip_test(testcase)
self.tier_objects.append(tier)
diff --git a/functest/ci/tier_handler.py b/functest/ci/tier_handler.py
index 4f2f14ec..dd3e77ce 100644
--- a/functest/ci/tier_handler.py
+++ b/functest/ci/tier_handler.py
@@ -1,14 +1,18 @@
#!/usr/bin/env python
+
+# Copyright (c) 2016 Ericsson AB and others.
#
-# jose.lausuch@ericsson.com
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
-#
import re
+import textwrap
+
+import prettytable
+
LINE_LENGTH = 72
@@ -32,6 +36,7 @@ class Tier(object):
def __init__(self, name, order, ci_loop, description=""):
self.tests_array = []
+ self.skipped_tests_array = []
self.name = name
self.order = order
self.ci_loop = ci_loop
@@ -40,12 +45,18 @@ class Tier(object):
def add_test(self, testcase):
self.tests_array.append(testcase)
+ def skip_test(self, testcase):
+ self.skipped_tests_array.append(testcase)
+
def get_tests(self):
array_tests = []
for test in self.tests_array:
array_tests.append(test)
return array_tests
+ def get_skipped_test(self):
+ return self.skipped_tests_array
+
def get_test_names(self):
array_tests = []
for test in self.tests_array:
@@ -75,31 +86,16 @@ class Tier(object):
return self.ci_loop
def __str__(self):
- lines = split_text(self.description, LINE_LENGTH - 6)
-
- out = ""
- out += ("+%s+\n" % ("=" * (LINE_LENGTH - 2)))
- out += ("| Tier: " + self.name.ljust(LINE_LENGTH - 10) + "|\n")
- out += ("+%s+\n" % ("=" * (LINE_LENGTH - 2)))
- out += ("| Order: " + str(self.order).ljust(LINE_LENGTH - 10) + "|\n")
- out += ("| CI Loop: " + str(self.ci_loop).ljust(LINE_LENGTH - 12) +
- "|\n")
- out += ("| Description:".ljust(LINE_LENGTH - 1) + "|\n")
- for line in lines:
- out += ("| " + line.ljust(LINE_LENGTH - 7) + " |\n")
- out += ("| Test cases:".ljust(LINE_LENGTH - 1) + "|\n")
- tests = self.get_test_names()
- if len(tests) > 0:
- for i in range(len(tests)):
- out += ("| - %s |\n" % tests[i].ljust(LINE_LENGTH - 9))
- else:
- out += ("| (There are no supported test cases "
- .ljust(LINE_LENGTH - 1) + "|\n")
- out += ("| in this tier for the given scenario) "
- .ljust(LINE_LENGTH - 1) + "|\n")
- out += ("|".ljust(LINE_LENGTH - 1) + "|\n")
- out += ("+%s+\n" % ("-" * (LINE_LENGTH - 2)))
- return out
+ msg = prettytable.PrettyTable(
+ header_style='upper', padding_width=5,
+ field_names=['tiers', 'order', 'CI Loop', 'description',
+ 'testcases'])
+ msg.add_row(
+ [self.name, self.order, self.ci_loop,
+ textwrap.fill(self.description, width=40),
+ textwrap.fill(' '.join([str(x.get_name(
+ )) for x in self.get_tests()]), width=40)])
+ return msg.get_string()
class TestCase(object):
@@ -109,13 +105,15 @@ class TestCase(object):
dependency,
criteria,
blocking,
- description=""):
+ description="",
+ project=""):
self.name = name
self.enabled = enabled
self.dependency = dependency
self.criteria = criteria
self.blocking = blocking
self.description = description
+ self.project = project
@staticmethod
def is_none(item):
@@ -147,26 +145,16 @@ class TestCase(object):
def is_blocking(self):
return self.blocking
+ def get_project(self):
+ return self.project
+
def __str__(self):
- lines = split_text(self.description, LINE_LENGTH - 6)
-
- out = ""
- out += ("+%s+\n" % ("=" * (LINE_LENGTH - 2)))
- out += ("| Testcase: " + self.name.ljust(LINE_LENGTH - 14) + "|\n")
- out += ("+%s+\n" % ("=" * (LINE_LENGTH - 2)))
- out += ("| Description:".ljust(LINE_LENGTH - 1) + "|\n")
- for line in lines:
- out += ("| " + line.ljust(LINE_LENGTH - 7) + " |\n")
- out += ("| Criteria: " +
- str(self.criteria).ljust(LINE_LENGTH - 14) + "|\n")
- out += ("| Dependencies:".ljust(LINE_LENGTH - 1) + "|\n")
- installer = self.dependency.get_installer()
- scenario = self.dependency.get_scenario()
- out += ("| - Installer:" + installer.ljust(LINE_LENGTH - 17) + "|\n")
- out += ("| - Scenario :" + scenario.ljust(LINE_LENGTH - 17) + "|\n")
- out += ("|".ljust(LINE_LENGTH - 1) + "|\n")
- out += ("+%s+\n" % ("-" * (LINE_LENGTH - 2)))
- return out
+ msg = prettytable.PrettyTable(
+ header_style='upper', padding_width=5,
+ field_names=['test case', 'description', 'criteria', 'dependency'])
+ msg.add_row([self.name, textwrap.fill(self.description, width=40),
+ self.criteria, self.dependency])
+ return msg.get_string()
class Dependency(object):
@@ -182,6 +170,7 @@ class Dependency(object):
return self.scenario
def __str__(self):
- return ("Dependency info:\n"
- " installer: " + self.installer + "\n"
- " scenario: " + self.scenario + "\n")
+ delimitator = "\n" if self.get_installer(
+ ) and self.get_scenario() else ""
+ return "{}{}{}".format(self.get_installer(), delimitator,
+ self.get_scenario())
diff --git a/functest/cli/commands/cli_env.py b/functest/cli/commands/cli_env.py
index 99d36996..72a870b5 100644
--- a/functest/cli/commands/cli_env.py
+++ b/functest/cli/commands/cli_env.py
@@ -16,7 +16,7 @@ from functest.utils.constants import CONST
import functest.utils.functest_utils as ft_utils
-class CliEnv(object):
+class Env(object):
def __init__(self):
pass
@@ -56,17 +56,14 @@ class CliEnv(object):
if self.status(verbose=False) == 0:
STATUS = "ready"
- msg = prettytable.PrettyTable(
- header_style='upper', padding_width=5,
- field_names=['Functest Environment', 'value'])
- msg.add_row(['INSTALLER', installer_info])
- msg.add_row(['SCENARIO', scenario])
- msg.add_row(['POD', node])
- if build_tag:
- msg.add_row(['BUILD TAG', build_tag])
- msg.add_row(['DEBUG FLAG', is_debug])
- msg.add_row(['STATUS', STATUS])
- click.echo(msg.get_string())
+ env_info = {'INSTALLER': installer_info,
+ 'SCENARIO': scenario,
+ 'POD': node,
+ 'DEBUG FLAG': is_debug,
+ 'BUILD_TAG': build_tag,
+ 'STATUS': STATUS}
+
+ return env_info
def status(self, verbose=True):
ret_val = 0
@@ -78,3 +75,19 @@ class CliEnv(object):
click.echo("Functest environment ready to run tests.\n")
return ret_val
+
+
+class CliEnv(Env):
+
+ def __init__(self):
+ super(CliEnv, self).__init__()
+
+ def show(self):
+ env_info = super(CliEnv, self).show()
+ msg = prettytable.PrettyTable(
+ header_style='upper', padding_width=5,
+ field_names=['Functest Environment', 'value'])
+ for key, value in env_info.iteritems():
+ if key is not None:
+ msg.add_row([key, value])
+ click.echo(msg.get_string())
diff --git a/functest/cli/commands/cli_os.py b/functest/cli/commands/cli_os.py
index f4ec1661..e97ab080 100644
--- a/functest/cli/commands/cli_os.py
+++ b/functest/cli/commands/cli_os.py
@@ -18,7 +18,7 @@ import functest.utils.openstack_clean as os_clean
import functest.utils.openstack_snapshot as os_snapshot
-class CliOpenStack(object):
+class OpenStack(object):
def __init__(self):
self.os_auth_url = CONST.__getattribute__('OS_AUTH_URL')
@@ -43,9 +43,11 @@ class CliOpenStack(object):
@staticmethod
def show_credentials():
+ dic_credentials = {}
for key, value in os.environ.items():
if key.startswith('OS_'):
- click.echo("{}={}".format(key, value))
+ dic_credentials.update({key: value})
+ return dic_credentials
def check(self):
self.ping_endpoint()
@@ -88,3 +90,16 @@ class CliOpenStack(object):
"'functest openstack snapshot-create'")
return
os_clean.main()
+
+
+class CliOpenStack(OpenStack):
+
+ def __init__(self):
+ super(CliOpenStack, self).__init__()
+
+ @staticmethod
+ def show_credentials():
+ dic_credentials = OpenStack.show_credentials()
+ for key, value in dic_credentials.items():
+ if key.startswith('OS_'):
+ click.echo("{}={}".format(key, value))
diff --git a/functest/cli/commands/cli_testcase.py b/functest/cli/commands/cli_testcase.py
index cb3d4739..65dd9ab7 100644
--- a/functest/cli/commands/cli_testcase.py
+++ b/functest/cli/commands/cli_testcase.py
@@ -20,7 +20,7 @@ import functest.utils.functest_utils as ft_utils
import functest.utils.functest_vacation as vacation
-class CliTestcase(object):
+class Testcase(object):
def __init__(self):
self.tiers = tb.TierBuilder(
@@ -33,15 +33,11 @@ class CliTestcase(object):
for tier in self.tiers.get_tiers():
for test in tier.get_tests():
summary += (" %s\n" % test.get_name())
- click.echo(summary)
+ return summary
def show(self, testname):
description = self.tiers.get_test(testname)
- if description is None:
- click.echo("The test case '%s' does not exist or is not supported."
- % testname)
-
- click.echo(description)
+ return description
@staticmethod
def run(testname, noclean=False, report=False):
@@ -62,3 +58,20 @@ class CliTestcase(object):
for test in tests:
cmd = "run_tests {}-t {}".format(flags, test)
ft_utils.execute_command(cmd)
+
+
+class CliTestcase(Testcase):
+
+ def __init__(self):
+ super(CliTestcase, self).__init__()
+
+ def list(self):
+ click.echo(super(CliTestcase, self).list())
+
+ def show(self, testname):
+ testcase_show = super(CliTestcase, self).show(testname)
+ if testcase_show:
+ click.echo(testcase_show)
+ else:
+ click.echo("The test case '%s' does not exist or is not supported."
+ % testname)
diff --git a/functest/cli/commands/cli_tier.py b/functest/cli/commands/cli_tier.py
index 9b2e60ba..995354bb 100644
--- a/functest/cli/commands/cli_tier.py
+++ b/functest/cli/commands/cli_tier.py
@@ -19,7 +19,7 @@ from functest.utils.constants import CONST
import functest.utils.functest_utils as ft_utils
-class CliTier(object):
+class Tier(object):
def __init__(self):
self.tiers = tb.TierBuilder(
@@ -34,26 +34,23 @@ class CliTier(object):
% (tier.get_order(),
tier.get_name(),
tier.get_test_names()))
- click.echo(summary)
+ return summary
def show(self, tiername):
tier = self.tiers.get_tier(tiername)
if tier is None:
- tier_names = self.tiers.get_tier_names()
- click.echo("The tier with name '%s' does not exist. "
- "Available tiers are:\n %s\n" % (tiername, tier_names))
+ return None
else:
- click.echo(self.tiers.get_tier(tiername))
+ tier_info = self.tiers.get_tier(tiername)
+ return tier_info
def gettests(self, tiername):
tier = self.tiers.get_tier(tiername)
if tier is None:
- tier_names = self.tiers.get_tier_names()
- click.echo("The tier with name '%s' does not exist. "
- "Available tiers are:\n %s\n" % (tiername, tier_names))
+ return None
else:
tests = tier.get_test_names()
- click.echo("Test cases in tier '%s':\n %s\n" % (tiername, tests))
+ return tests
@staticmethod
def run(tiername, noclean=False, report=False):
@@ -70,3 +67,30 @@ class CliTier(object):
else:
cmd = "run_tests {}-t {}".format(flags, tiername)
ft_utils.execute_command(cmd)
+
+
+class CliTier(Tier):
+
+ def __init__(self):
+ super(CliTier, self).__init__()
+
+ def list(self):
+ click.echo(super(CliTier, self).list())
+
+ def show(self, tiername):
+ tier_info = super(CliTier, self).show(tiername)
+ if tier_info:
+ click.echo(tier_info)
+ else:
+ tier_names = self.tiers.get_tier_names()
+ click.echo("The tier with name '%s' does not exist. "
+ "Available tiers are:\n %s\n" % (tiername, tier_names))
+
+ def gettests(self, tiername):
+ tests = super(CliTier, self).gettests(tiername)
+ if tests:
+ click.echo("Test cases in tier '%s':\n %s\n" % (tiername, tests))
+ else:
+ tier_names = self.tiers.get_tier_names()
+ click.echo("The tier with name '%s' does not exist. "
+ "Available tiers are:\n %s\n" % (tiername, tier_names))
diff --git a/functest/energy/energy.py b/functest/energy/energy.py
index 372c1d32..c410e84f 100644
--- a/functest/energy/energy.py
+++ b/functest/energy/energy.py
@@ -16,6 +16,7 @@ import urllib
from functools import wraps
import requests
+import urllib3
import functest.utils.functest_utils as ft_utils
@@ -55,9 +56,10 @@ def enable_recording(method):
try:
return_value = method(*args)
finish_session(current_scenario)
- except Exception: # pylint: disable=broad-except
+ except Exception as exc: # pylint: disable=broad-except
+ EnergyRecorder.logger.exception(exc)
finish_session(current_scenario)
- raise
+ raise exc
return return_value
return wrapper
@@ -74,6 +76,9 @@ class EnergyRecorder(object):
# Default initial step
INITIAL_STEP = "running"
+ # Default connection timeout
+ CONNECTION_TIMOUT = urllib3.Timeout(connect=1, read=3)
+
@staticmethod
def load_config():
"""
@@ -94,27 +99,41 @@ class EnergyRecorder(object):
assert energy_recorder_uri
assert environment
- energy_recorder_uri += "/recorders/environment/"
- energy_recorder_uri += urllib.quote_plus(environment)
+ uri_comp = "/recorders/environment/"
+ uri_comp += urllib.quote_plus(environment)
EnergyRecorder.logger.debug(
- "API recorder at: " + energy_recorder_uri)
+ "API recorder at: " + energy_recorder_uri + uri_comp)
# Creds
- user = ft_utils.get_functest_config(
+ creds_usr = ft_utils.get_functest_config(
"energy_recorder.api_user")
- password = ft_utils.get_functest_config(
+ creds_pass = ft_utils.get_functest_config(
"energy_recorder.api_password")
- if user != "" and password != "":
- energy_recorder_api_auth = (user, password)
+ if creds_usr != "" and creds_pass != "":
+ energy_recorder_api_auth = (creds_usr, creds_pass)
else:
energy_recorder_api_auth = None
+ try:
+ resp = requests.get(energy_recorder_uri + "/monitoring/ping",
+ auth=energy_recorder_api_auth,
+ headers={
+ 'content-type': 'application/json'
+ },
+ timeout=EnergyRecorder.CONNECTION_TIMOUT)
+ api_available = json.loads(resp.text)["status"] == "OK"
+ except Exception: # pylint: disable=broad-except
+ EnergyRecorder.logger.error(
+ "Energy recorder API is not available")
+ api_available = False
# Final config
EnergyRecorder.energy_recorder_api = {
- "uri": energy_recorder_uri,
- "auth": energy_recorder_api_auth
+ "uri": energy_recorder_uri + uri_comp,
+ "auth": energy_recorder_api_auth,
+ "available": api_available
}
+ return EnergyRecorder.energy_recorder_api["available"]
@staticmethod
def submit_scenario(scenario, step):
@@ -126,31 +145,36 @@ class EnergyRecorder(object):
param step: Step name
:type step: string
"""
- return_status = True
try:
- EnergyRecorder.logger.debug("Submitting scenario")
+ return_status = True
# Ensure that connectyvity settings are loaded
- EnergyRecorder.load_config()
+ if EnergyRecorder.load_config():
+ EnergyRecorder.logger.debug("Submitting scenario")
- # Create API payload
- payload = {
- "step": step,
- "scenario": scenario
- }
- # Call API to start energy recording
- response = requests.post(
- EnergyRecorder.energy_recorder_api["uri"],
- data=json.dumps(payload),
- auth=EnergyRecorder.energy_recorder_api["auth"],
- headers={
- 'content-type': 'application/json'
+ # Create API payload
+ payload = {
+ "step": step,
+ "scenario": scenario
}
- )
- if response.status_code != 200:
- log_msg = "Error while submitting scenario\n{}"
- log_msg = log_msg.format(response.text)
- EnergyRecorder.logger.info(log_msg)
- return_status = False
+ # Call API to start energy recording
+ response = requests.post(
+ EnergyRecorder.energy_recorder_api["uri"],
+ data=json.dumps(payload),
+ auth=EnergyRecorder.energy_recorder_api["auth"],
+ headers={
+ 'content-type': 'application/json'
+ },
+ timeout=EnergyRecorder.CONNECTION_TIMOUT
+ )
+ if response.status_code != 200:
+ EnergyRecorder.logger.error(
+ "Error while submitting scenario\n%s",
+ response.text)
+ return_status = False
+ except requests.exceptions.ConnectionError:
+ EnergyRecorder.logger.warning(
+ "submit_scenario: Unable to connect energy recorder API")
+ return_status = False
except Exception: # pylint: disable=broad-except
# Default exception handler to ensure that method
# is safe for caller
@@ -170,11 +194,12 @@ class EnergyRecorder(object):
"""
return_status = True
try:
- EnergyRecorder.logger.debug("Starting recording")
- return_status = EnergyRecorder.submit_scenario(
- scenario,
- EnergyRecorder.INITIAL_STEP
- )
+ if EnergyRecorder.load_config():
+ EnergyRecorder.logger.debug("Starting recording")
+ return_status = EnergyRecorder.submit_scenario(
+ scenario,
+ EnergyRecorder.INITIAL_STEP
+ )
except Exception: # pylint: disable=broad-except
# Default exception handler to ensure that method
@@ -188,25 +213,30 @@ class EnergyRecorder(object):
@staticmethod
def stop():
"""Stop current recording session."""
- EnergyRecorder.logger.debug("Stopping recording")
return_status = True
try:
# Ensure that connectyvity settings are loaded
- EnergyRecorder.load_config()
-
- # Call API to stop energy recording
- response = requests.delete(
- EnergyRecorder.energy_recorder_api["uri"],
- auth=EnergyRecorder.energy_recorder_api["auth"],
- headers={
- 'content-type': 'application/json'
- }
- )
- if response.status_code != 200:
- log_msg = "Error while stating energy recording session\n{}"
- log_msg = log_msg.format(response.text)
- EnergyRecorder.logger.error(log_msg)
- return_status = False
+ if EnergyRecorder.load_config():
+ EnergyRecorder.logger.debug("Stopping recording")
+
+ # Call API to stop energy recording
+ response = requests.delete(
+ EnergyRecorder.energy_recorder_api["uri"],
+ auth=EnergyRecorder.energy_recorder_api["auth"],
+ headers={
+ 'content-type': 'application/json'
+ },
+ timeout=EnergyRecorder.CONNECTION_TIMOUT
+ )
+ if response.status_code != 200:
+ EnergyRecorder.logger.error(
+ "Error while stating energy recording session\n%s",
+ response.text)
+ return_status = False
+ except requests.exceptions.ConnectionError:
+ EnergyRecorder.logger.warning(
+ "stop: Unable to connect energy recorder API")
+ return_status = False
except Exception: # pylint: disable=broad-except
# Default exception handler to ensure that method
# is safe for caller
@@ -219,31 +249,36 @@ class EnergyRecorder(object):
@staticmethod
def set_step(step):
"""Notify energy recording service of current step of the testcase."""
- EnergyRecorder.logger.debug("Setting step")
return_status = True
try:
# Ensure that connectyvity settings are loaded
- EnergyRecorder.load_config()
+ if EnergyRecorder.load_config():
+ EnergyRecorder.logger.debug("Setting step")
- # Create API payload
- payload = {
- "step": step,
- }
-
- # Call API to define step
- response = requests.post(
- EnergyRecorder.energy_recorder_api["uri"] + "/step",
- data=json.dumps(payload),
- auth=EnergyRecorder.energy_recorder_api["auth"],
- headers={
- 'content-type': 'application/json'
+ # Create API payload
+ payload = {
+ "step": step,
}
- )
- if response.status_code != 200:
- log_msg = "Error while setting current step of testcase\n{}"
- log_msg = log_msg.format(response.text)
- EnergyRecorder.logger.error(log_msg)
- return_status = False
+
+ # Call API to define step
+ response = requests.post(
+ EnergyRecorder.energy_recorder_api["uri"] + "/step",
+ data=json.dumps(payload),
+ auth=EnergyRecorder.energy_recorder_api["auth"],
+ headers={
+ 'content-type': 'application/json'
+ },
+ timeout=EnergyRecorder.CONNECTION_TIMOUT
+ )
+ if response.status_code != 200:
+ EnergyRecorder.logger.error(
+ "Error while setting current step of testcase\n%s",
+ response.text)
+ return_status = False
+ except requests.exceptions.ConnectionError:
+ EnergyRecorder.logger.warning(
+ "set_step: Unable to connect energy recorder API")
+ return_status = False
except Exception: # pylint: disable=broad-except
# Default exception handler to ensure that method
# is safe for caller
@@ -256,30 +291,34 @@ class EnergyRecorder(object):
@staticmethod
def get_current_scenario():
"""Get current running scenario (if any, None else)."""
- EnergyRecorder.logger.debug("Getting current scenario")
return_value = None
try:
# Ensure that connectyvity settings are loaded
- EnergyRecorder.load_config()
-
- # Call API get running scenario
- response = requests.get(
- EnergyRecorder.energy_recorder_api["uri"],
- auth=EnergyRecorder.energy_recorder_api["auth"]
- )
- if response.status_code == 200:
- return_value = json.loads(response.text)
- elif response.status_code == 404:
- log_msg = "No current running scenario at {}"
- log_msg = log_msg.format(
- EnergyRecorder.energy_recorder_api["uri"])
- EnergyRecorder.logger.error(log_msg)
- return_value = None
- else:
- log_msg = "Error while getting current scenario\n{}"
- log_msg = log_msg.format(response.text)
- EnergyRecorder.logger.error(log_msg)
- return_value = None
+ if EnergyRecorder.load_config():
+ EnergyRecorder.logger.debug("Getting current scenario")
+
+ # Call API get running scenario
+ response = requests.get(
+ EnergyRecorder.energy_recorder_api["uri"],
+ auth=EnergyRecorder.energy_recorder_api["auth"],
+ timeout=EnergyRecorder.CONNECTION_TIMOUT
+ )
+ if response.status_code == 200:
+ return_value = json.loads(response.text)
+ elif response.status_code == 404:
+ EnergyRecorder.logger.info(
+ "No current running scenario at %s",
+ EnergyRecorder.energy_recorder_api["uri"])
+ return_value = None
+ else:
+ EnergyRecorder.logger.error(
+ "Error while getting current scenario\n%s",
+ response.text)
+ return_value = None
+ except requests.exceptions.ConnectionError:
+ EnergyRecorder.logger.warning(
+ "get_currernt_sceario: Unable to connect energy recorder API")
+ return_value = None
except Exception: # pylint: disable=broad-except
# Default exception handler to ensure that method
# is safe for caller
diff --git a/functest/opnfv_tests/openstack/rally/blacklist.txt b/functest/opnfv_tests/openstack/rally/blacklist.txt
index 95bea2b7..099d6864 100644
--- a/functest/opnfv_tests/openstack/rally/blacklist.txt
+++ b/functest/opnfv_tests/openstack/rally/blacklist.txt
@@ -6,6 +6,38 @@ scenario:
- joid
tests:
- NovaServers.boot_server_from_volume_and_delete
+ -
+ scenarios:
+ - '^os-' # all scenarios
+ installers:
+ - '.+' # all installers
+ tests:
+ # Following tests currently fail due to required Gnocchi API:
+ # HTTP 410: "This telemetry installation is configured to use
+ # Gnocchi. Please use the Gnocchi API available on the
+ # metric endpoint to retrieve data."
+ # Issue: https://bugs.launchpad.net/rally/+bug/1704322
+ - CeilometerMeters.list_matched_meters
+ - CeilometerMeters.list_meters
+ - CeilometerQueries.create_and_query_samples
+ - CeilometerResource.get_tenant_resources
+ - CeilometerResource.list_matched_resources
+ - CeilometerResource.list_resources
+ - CeilometerSamples.list_matched_samples
+ - CeilometerSamples.list_samples
+ - CeilometerStats.create_meter_and_get_stats
+ - CeilometerStats.get_stats
+ -
+ scenarios:
+ - '^os-' # all scenarios
+ installers:
+ - '.+' # all installers
+ tests:
+ # Following test currently fails due to but in
+ # python-ceilometerclient during fetching of event_types
+ # Bug: https://bugs.launchpad.net/ubuntu/+bug/1704138
+ # Fix: https://review.openstack.org/#/c/483402/
+ - CeilometerEvents.create_user_and_list_event_types
functionality:
-
diff --git a/functest/opnfv_tests/openstack/rally/rally.py b/functest/opnfv_tests/openstack/rally/rally.py
index 6b7c49ca..fdef8bed 100644
--- a/functest/opnfv_tests/openstack/rally/rally.py
+++ b/functest/opnfv_tests/openstack/rally/rally.py
@@ -34,8 +34,8 @@ LOGGER = logging.getLogger(__name__)
class RallyBase(testcase.OSGCTestCase):
"""Base class form Rally testcases implementation."""
- TESTS = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
- 'neutron', 'nova', 'quotas', 'vm', 'all']
+ TESTS = ['authenticate', 'glance', 'ceilometer', 'cinder', 'heat',
+ 'keystone', 'neutron', 'nova', 'quotas', 'vm', 'all']
GLANCE_IMAGE_NAME = CONST.__getattribute__('openstack_image_name')
GLANCE_IMAGE_FILENAME = CONST.__getattribute__('openstack_image_file_name')
GLANCE_IMAGE_PATH = os.path.join(
diff --git a/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-ceilometer.yaml b/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-ceilometer.yaml
new file mode 100644
index 00000000..7efb5a83
--- /dev/null
+++ b/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-ceilometer.yaml
@@ -0,0 +1,458 @@
+ CeilometerMeters.list_meters:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ ceilometer:
+ counter_name: "benchmark_meter"
+ counter_type: "gauge"
+ counter_unit: "%"
+ counter_volume: 100
+ resources_per_tenant: 100
+ samples_per_resource: 100
+ timestamp_interval: 10
+ metadata_list:
+ -
+ status: "active"
+ name: "rally benchmark on"
+ deleted: "false"
+ -
+ status: "terminated"
+ name: "rally benchmark off"
+ deleted: "true"
+ {% endcall %}
+ args:
+ limit: 50
+ metadata_query:
+ status: "terminated"
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerResource.list_resources:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ ceilometer:
+ counter_name: "benchmark_meter"
+ counter_type: "gauge"
+ counter_unit: "%"
+ counter_volume: 100
+ resources_per_tenant: 100
+ samples_per_resource: 100
+ timestamp_interval: 10
+ metadata_list:
+ -
+ status: "active"
+ name: "rally benchmark on"
+ deleted: "false"
+ -
+ status: "terminated"
+ name: "rally benchmark off"
+ deleted: "true"
+ {% endcall %}
+ args:
+ limit: 50
+ metadata_query:
+ status: "terminated"
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.create_alarm_and_get_history:
+ -
+ args:
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ state: "ok"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.create_and_delete_alarm:
+ -
+ args:
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.create_and_get_alarm:
+ -
+ args:
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.create_and_list_alarm:
+ -
+ args:
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerQueries.create_and_query_alarm_history:
+ -
+ args:
+ orderby: !!null
+ limit: !!null
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerQueries.create_and_query_alarms:
+ -
+ args:
+ filter: {"and": [{"!=": {"state": "dummy_state"}},{"=": {"type": "threshold"}}]}
+ orderby: !!null
+ limit: 10
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerQueries.create_and_query_samples:
+ -
+ args:
+ filter: {"=": {"counter_unit": "instance"}}
+ orderby: !!null
+ limit: 10
+ counter_name: "cpu_util"
+ counter_type: "gauge"
+ counter_unit: "instance"
+ counter_volume: 1.0
+ resource_id: "resource_id"
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.create_and_update_alarm:
+ -
+ args:
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerStats.create_meter_and_get_stats:
+ -
+ args:
+ user_id: "user-id"
+ resource_id: "resource-id"
+ counter_volume: 1.0
+ counter_unit: ""
+ counter_type: "cumulative"
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerEvents.create_user_and_get_event:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerEvents.create_user_and_list_events:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerEvents.create_user_and_list_event_types:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerTraits.create_user_and_list_trait_descriptions:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerTraits.create_user_and_list_traits:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerStats.get_stats:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ ceilometer:
+ counter_name: "benchmark_meter"
+ counter_type: "gauge"
+ counter_unit: "%"
+ counter_volume: 100
+ resources_per_tenant: 100
+ samples_per_resource: 100
+ timestamp_interval: 10
+ metadata_list:
+ -
+ status: "active"
+ name: "rally benchmark on"
+ deleted: "false"
+ -
+ status: "terminated"
+ name: "rally benchmark off"
+ deleted: "true"
+ {% endcall %}
+ args:
+ meter_name: "benchmark_meter"
+ filter_by_user_id: true
+ filter_by_project_id: true
+ filter_by_resource_id: true
+ metadata_query:
+ status: "terminated"
+ period: 300
+ groupby: "resource_id"
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerResource.get_tenant_resources:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ ceilometer:
+ counter_name: "cpu_util"
+ counter_type: "gauge"
+ counter_volume: 1.0
+ counter_unit: "instance"
+ {% endcall %}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.list_alarms:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerSamples.list_matched_samples:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ ceilometer:
+ counter_name: "cpu_util"
+ counter_type: "gauge"
+ counter_unit: "instance"
+ counter_volume: 1.0
+ resources_per_tenant: 100
+ samples_per_resource: 100
+ timestamp_interval: 60
+ metadata_list:
+ - status: "active"
+ name: "fake_resource"
+ deleted: "False"
+ created_at: "2015-09-04T12:34:19.000000"
+ - status: "not_active"
+ name: "fake_resource_1"
+ deleted: "False"
+ created_at: "2015-09-10T06:55:12.000000"
+ {% endcall %}
+ args:
+ limit: 50
+ filter_by_user_id: true
+ filter_by_project_id: true
+ filter_by_resource_id: true
+ metadata_query:
+ status: "not_active"
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerMeters.list_matched_meters:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ ceilometer:
+ counter_name: "benchmark_meter"
+ counter_type: "gauge"
+ counter_unit: "%"
+ counter_volume: 100
+ resources_per_tenant: 100
+ samples_per_resource: 100
+ timestamp_interval: 10
+ metadata_list:
+ -
+ status: "active"
+ name: "rally benchmark on"
+ deleted: "false"
+ -
+ status: "terminated"
+ name: "rally benchmark off"
+ deleted: "true"
+ {% endcall %}
+ args:
+ limit: 50
+ filter_by_user_id: true
+ filter_by_project_id: true
+ filter_by_resource_id: true
+ metadata_query:
+ status: "terminated"
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerResource.list_matched_resources:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ ceilometer:
+ counter_name: "benchmark_meter"
+ counter_type: "gauge"
+ counter_unit: "%"
+ counter_volume: 100
+ resources_per_tenant: 100
+ samples_per_resource: 100
+ timestamp_interval: 10
+ metadata_list:
+ -
+ status: "active"
+ name: "rally benchmark on"
+ deleted: "false"
+ -
+ status: "terminated"
+ name: "rally benchmark off"
+ deleted: "true"
+ {% endcall %}
+ args:
+ limit: 50
+ filter_by_user_id: true
+ filter_by_project_id: true
+ metadata_query:
+ status: "terminated"
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerSamples.list_samples:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ ceilometer:
+ counter_name: "cpu_util"
+ counter_type: "gauge"
+ counter_unit: "instance"
+ counter_volume: 1.0
+ resources_per_tenant: 100
+ samples_per_resource: 100
+ timestamp_interval: 60
+ metadata_list:
+ - status: "active"
+ name: "fake_resource"
+ deleted: "False"
+ created_at: "2015-09-04T12:34:19.000000"
+ - status: "not_active"
+ name: "fake_resource_1"
+ deleted: "False"
+ created_at: "2015-09-10T06:55:12.000000"
+ batch_size: 5
+ {% endcall %}
+ args:
+ limit: 50
+ metadata_query:
+ status: "not_active"
+ sla:
+ {{ no_failures_sla() }}
+
diff --git a/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-ceilometer.yaml b/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-ceilometer.yaml
new file mode 100644
index 00000000..bb070cd3
--- /dev/null
+++ b/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-ceilometer.yaml
@@ -0,0 +1,247 @@
+ CeilometerAlarms.create_alarm_and_get_history:
+ -
+ args:
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ state: "ok"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.create_and_delete_alarm:
+ -
+ args:
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.create_and_get_alarm:
+ -
+ args:
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.create_and_list_alarm:
+ -
+ args:
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerQueries.create_and_query_alarm_history:
+ -
+ args:
+ orderby: !!null
+ limit: !!null
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerQueries.create_and_query_alarms:
+ -
+ args:
+ filter: {"and": [{"!=": {"state": "dummy_state"}},{"=": {"type": "threshold"}}]}
+ orderby: !!null
+ limit: 10
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerQueries.create_and_query_samples:
+ -
+ args:
+ filter: {"=": {"counter_unit": "instance"}}
+ orderby: !!null
+ limit: 10
+ counter_name: "cpu_util"
+ counter_type: "gauge"
+ counter_unit: "instance"
+ counter_volume: 1.0
+ resource_id: "resource_id"
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.create_and_update_alarm:
+ -
+ args:
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerEvents.create_user_and_get_event:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerEvents.create_user_and_list_events:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerEvents.create_user_and_list_event_types:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerTraits.create_user_and_list_trait_descriptions:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerTraits.create_user_and_list_traits:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerStats.get_stats:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ ceilometer:
+ counter_name: "benchmark_meter"
+ counter_type: "gauge"
+ counter_unit: "%"
+ counter_volume: 100
+ resources_per_tenant: 100
+ samples_per_resource: 100
+ timestamp_interval: 10
+ metadata_list:
+ -
+ status: "active"
+ name: "rally benchmark on"
+ deleted: "false"
+ -
+ status: "terminated"
+ name: "rally benchmark off"
+ deleted: "true"
+ {% endcall %}
+ args:
+ meter_name: "benchmark_meter"
+ filter_by_user_id: true
+ filter_by_project_id: true
+ filter_by_resource_id: true
+ metadata_query:
+ status: "terminated"
+ period: 300
+ groupby: "resource_id"
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerResource.get_tenant_resources:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ ceilometer:
+ counter_name: "cpu_util"
+ counter_type: "gauge"
+ counter_volume: 1.0
+ counter_unit: "instance"
+ {% endcall %}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.list_alarms:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
diff --git a/functest/opnfv_tests/openstack/rally/task.yaml b/functest/opnfv_tests/openstack/rally/task.yaml
index 033edb83..65f101fb 100644
--- a/functest/opnfv_tests/openstack/rally/task.yaml
+++ b/functest/opnfv_tests/openstack/rally/task.yaml
@@ -31,6 +31,10 @@
{%- include "var/opnfv-neutron.yaml"-%}
{% endif %}
+{% if "ceilometer" in service_list %}
+{%- include "var/opnfv-ceilometer.yaml"-%}
+{% endif %}
+
{% if "quotas" in service_list %}
{%- include "var/opnfv-quotas.yaml"-%}
{% endif %}
diff --git a/functest/opnfv_tests/openstack/refstack_client/refstack_client.py b/functest/opnfv_tests/openstack/refstack_client/refstack_client.py
index 6ac72176..4f71b5f5 100644
--- a/functest/opnfv_tests/openstack/refstack_client/refstack_client.py
+++ b/functest/opnfv_tests/openstack/refstack_client/refstack_client.py
@@ -28,12 +28,13 @@ from functest.opnfv_tests.openstack.refstack_client.tempest_conf \
from functest.opnfv_tests.openstack.tempest import conf_utils
from functest.utils.constants import CONST
import functest.utils.functest_utils as ft_utils
+import functest.utils.openstack_utils as os_utils
# logging configuration """
LOGGER = logging.getLogger(__name__)
-class RefstackClient(testcase.OSGCTestCase):
+class RefstackClient(testcase.TestCase):
"""RefstackClient testcase implementation class."""
def __init__(self, **kwargs):
@@ -41,6 +42,7 @@ class RefstackClient(testcase.OSGCTestCase):
if "case_name" not in kwargs:
kwargs["case_name"] = "refstack_defcore"
super(RefstackClient, self).__init__(**kwargs)
+ self.tempestconf = None
self.conf_path = pkg_resources.resource_filename(
'functest',
'opnfv_tests/openstack/refstack_client/refstack_tempest.conf')
@@ -57,6 +59,13 @@ class RefstackClient(testcase.OSGCTestCase):
CONST.__getattribute__('OS_INSECURE').lower() == 'true'):
self.insecure = '-k'
+ def generate_conf(self):
+ if not os.path.exists(conf_utils.REFSTACK_RESULTS_DIR):
+ os.makedirs(conf_utils.REFSTACK_RESULTS_DIR)
+
+ self.tempestconf = TempestConf()
+ self.tempestconf.generate_tempestconf()
+
def run_defcore(self, conf, testlist):
"""Run defcore sys command."""
cmd = ("refstack-client test {0} -c {1} -v --test-list {2}"
@@ -65,42 +74,29 @@ class RefstackClient(testcase.OSGCTestCase):
ft_utils.execute_command(cmd)
def run_defcore_default(self):
- """Run default defcare sys command."""
- cmd = ("refstack-client test {0} -c {1} -v --test-list {2}"
- .format(self.insecure, self.confpath, self.defcorelist))
+ """Run default defcore sys command."""
+ options = ["-v"] if not self.insecure else ["-v", self.insecure]
+ cmd = (["refstack-client", "test", "-c", self.confpath] +
+ options + ["--test-list", self.defcorelist])
LOGGER.info("Starting Refstack_defcore test case: '%s'.", cmd)
- header = ("Refstack environment:\n"
- " SUT: %s\n Scenario: %s\n Node: %s\n Date: %s\n" %
- (CONST.__getattribute__('INSTALLER_TYPE'),
- CONST.__getattribute__('DEPLOY_SCENARIO'),
- CONST.__getattribute__('NODE_NAME'),
- time.strftime("%a %b %d %H:%M:%S %Z %Y")))
-
- f_stdout = open(
- os.path.join(conf_utils.REFSTACK_RESULTS_DIR,
- "refstack.log"), 'w+')
- f_env = open(os.path.join(conf_utils.REFSTACK_RESULTS_DIR,
- "environment.log"), 'w+')
- f_env.write(header)
-
- process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT, bufsize=1)
-
- with process.stdout:
- for line in iter(process.stdout.readline, b''):
- if 'Tests' in line:
- break
- if re.search(r"\} tempest\.", line):
- LOGGER.info(line.replace('\n', ''))
- f_stdout.write(line)
- process.wait()
-
- f_stdout.close()
- f_env.close()
+ with open(os.path.join(conf_utils.REFSTACK_RESULTS_DIR,
+ "environment.log"), 'w+') as f_env:
+ f_env.write(
+ ("Refstack environment:\n"
+ " SUT: {}\n Scenario: {}\n Node: {}\n Date: {}\n").format(
+ CONST.__getattribute__('INSTALLER_TYPE'),
+ CONST.__getattribute__('DEPLOY_SCENARIO'),
+ CONST.__getattribute__('NODE_NAME'),
+ time.strftime("%a %b %d %H:%M:%S %Z %Y")))
+
+ with open(os.path.join(conf_utils.REFSTACK_RESULTS_DIR,
+ "refstack.log"), 'w+') as f_stdout:
+ subprocess.call(cmd, shell=False, stdout=f_stdout,
+ stderr=subprocess.STDOUT)
def parse_refstack_result(self):
- """Parse Refstact results."""
+ """Parse Refstack results."""
try:
with open(os.path.join(conf_utils.REFSTACK_RESULTS_DIR,
"refstack.log"), 'r') as logfile:
@@ -119,15 +115,15 @@ class RefstackClient(testcase.OSGCTestCase):
for match in re.findall(r"(- Failed: )(\d+)", output):
num_failures = match[1]
LOGGER.info("".join(match))
- success_testcases = ""
- for match in re.findall(r"\{0\}(.*?)[. ]*ok", output):
- success_testcases += match + ", "
- failed_testcases = ""
- for match in re.findall(r"\{0\}(.*?)[. ]*FAILED", output):
- failed_testcases += match + ", "
- skipped_testcases = ""
- for match in re.findall(r"\{0\}(.*?)[. ]*SKIPPED:", output):
- skipped_testcases += match + ", "
+ success_testcases = []
+ for match in re.findall(r"\{0\} (.*?)[. ]*ok", output):
+ success_testcases.append(match)
+ failed_testcases = []
+ for match in re.findall(r"\{0\} (.*?)[. ]*FAILED", output):
+ failed_testcases.append(match)
+ skipped_testcases = []
+ for match in re.findall(r"\{0\} (.*?)[. ]*SKIPPED:", output):
+ skipped_testcases.append(match)
num_executed = int(num_tests) - int(num_skipped)
@@ -157,18 +153,18 @@ class RefstackClient(testcase.OSGCTestCase):
"""
self.start_time = time.time()
- if not os.path.exists(conf_utils.REFSTACK_RESULTS_DIR):
- os.makedirs(conf_utils.REFSTACK_RESULTS_DIR)
-
try:
- tempestconf = TempestConf()
- tempestconf.generate_tempestconf()
+ # Make sure that Tempest is configured
+ if not self.tempestconf:
+ self.generate_conf()
self.run_defcore_default()
self.parse_refstack_result()
res = testcase.TestCase.EX_OK
except Exception:
LOGGER.exception("Error with run")
res = testcase.TestCase.EX_RUN_ERROR
+ finally:
+ self.tempestconf.clean()
self.stop_time = time.time()
return res
@@ -207,6 +203,42 @@ class RefstackClient(testcase.OSGCTestCase):
return res
+ def create_snapshot(self):
+ """
+ Run the Tempest cleanup utility to initialize OS state.
+ For details, see https://docs.openstack.org/tempest/latest/cleanup.html
+
+ :return: TestCase.EX_OK
+ """
+ LOGGER.info("Initializing the saved state of the OpenStack deployment")
+
+ # Make sure that Tempest is configured
+ if not self.tempestconf:
+ self.generate_conf()
+
+ os_utils.init_tempest_cleanup(
+ self.tempestconf.DEPLOYMENT_DIR, 'tempest.conf',
+ os.path.join(conf_utils.REFSTACK_RESULTS_DIR,
+ "tempest-cleanup-init.log")
+ )
+
+ return super(RefstackClient, self).create_snapshot()
+
+ def clean(self):
+ """
+ Run the Tempest cleanup utility to delete and destroy OS resources.
+ For details, see https://docs.openstack.org/tempest/latest/cleanup.html
+ """
+ LOGGER.info("Destroying the resources created for tempest")
+
+ os_utils.perform_tempest_cleanup(
+ self.tempestconf.DEPLOYMENT_DIR, 'tempest.conf',
+ os.path.join(conf_utils.REFSTACK_RESULTS_DIR,
+ "tempest-cleanup.log")
+ )
+
+ return super(RefstackClient, self).clean()
+
class RefstackClientParser(object): # pylint: disable=too-few-public-methods
"""Command line argument parser helper."""
diff --git a/functest/opnfv_tests/openstack/refstack_client/tempest_conf.py b/functest/opnfv_tests/openstack/refstack_client/tempest_conf.py
index 30590b9e..db745227 100644
--- a/functest/opnfv_tests/openstack/refstack_client/tempest_conf.py
+++ b/functest/opnfv_tests/openstack/refstack_client/tempest_conf.py
@@ -11,13 +11,15 @@ import pkg_resources
from functest.opnfv_tests.openstack.tempest import conf_utils
from functest.utils import openstack_utils
from functest.utils.constants import CONST
+from functest.opnfv_tests.openstack.tempest.tempest \
+ import TempestResourcesManager
""" logging configuration """
logger = logging.getLogger(__name__)
class TempestConf(object):
- def __init__(self):
+ def __init__(self, **kwargs):
self.VERIFIER_ID = conf_utils.get_verifier_id()
self.VERIFIER_REPO_DIR = conf_utils.get_verifier_repo_dir(
self.VERIFIER_ID)
@@ -27,15 +29,22 @@ class TempestConf(object):
self.confpath = pkg_resources.resource_filename(
'functest',
'opnfv_tests/openstack/refstack_client/refstack_tempest.conf')
+ self.resources = TempestResourcesManager(**kwargs)
def generate_tempestconf(self):
try:
openstack_utils.source_credentials(
CONST.__getattribute__('openstack_creds'))
- img_flavor_dict = conf_utils.create_tempest_resources(
- use_custom_images=True, use_custom_flavors=True)
+ resources = self.resources.create(create_project=True,
+ use_custom_images=True,
+ use_custom_flavors=True)
conf_utils.configure_tempest_defcore(
- self.DEPLOYMENT_DIR, img_flavor_dict)
+ self.DEPLOYMENT_DIR,
+ image_id=resources.get("image_id"),
+ flavor_id=resources.get("flavor_id"),
+ image_id_alt=resources.get("image_id_alt"),
+ flavor_id_alt=resources.get("flavor_id_alt"),
+ tenant_id=resources.get("project_id"))
except Exception as e:
logger.error("error with generating refstack client "
"reference tempest conf file: %s", e)
@@ -48,6 +57,9 @@ class TempestConf(object):
except Exception as e:
logger.error('Error with run: %s', e)
+ def clean(self):
+ self.resources.cleanup()
+
def main():
logging.basicConfig()
diff --git a/functest/opnfv_tests/openstack/snaps/snaps_test_runner.py b/functest/opnfv_tests/openstack/snaps/snaps_test_runner.py
index 0b87440b..19c6a87f 100644
--- a/functest/opnfv_tests/openstack/snaps/snaps_test_runner.py
+++ b/functest/opnfv_tests/openstack/snaps/snaps_test_runner.py
@@ -28,9 +28,14 @@ class SnapsTestRunner(unit.Suite):
if 'os_creds' in kwargs:
self.os_creds = kwargs['os_creds']
else:
+ creds_override = None
+ if hasattr(CONST, 'snaps_os_creds_override'):
+ creds_override = CONST.__getattribute__(
+ 'snaps_os_creds_override')
self.os_creds = openstack_tests.get_credentials(
os_env_file=CONST.__getattribute__('openstack_creds'),
- proxy_settings_str=None, ssh_proxy_cmd=None)
+ proxy_settings_str=None, ssh_proxy_cmd=None,
+ overrides=creds_override)
if 'ext_net_name' in kwargs:
self.ext_net_name = kwargs['ext_net_name']
diff --git a/functest/opnfv_tests/openstack/snaps/snaps_utils.py b/functest/opnfv_tests/openstack/snaps/snaps_utils.py
index 327ba073..309f9db1 100644
--- a/functest/opnfv_tests/openstack/snaps/snaps_utils.py
+++ b/functest/opnfv_tests/openstack/snaps/snaps_utils.py
@@ -16,4 +16,4 @@ def get_ext_net_name(os_creds):
"""
neutron = neutron_utils.neutron_client(os_creds)
ext_nets = neutron_utils.get_external_networks(neutron)
- return ext_nets[0]['network']['name']
+ return ext_nets[0].name
diff --git a/functest/opnfv_tests/openstack/tempest/conf_utils.py b/functest/opnfv_tests/openstack/tempest/conf_utils.py
index fa8f00fc..52fa6003 100644
--- a/functest/opnfv_tests/openstack/tempest/conf_utils.py
+++ b/functest/opnfv_tests/openstack/tempest/conf_utils.py
@@ -11,10 +11,11 @@ import ConfigParser
import logging
import os
import pkg_resources
-import re
import shutil
import subprocess
+import yaml
+
from functest.utils.constants import CONST
import functest.utils.functest_utils as ft_utils
import functest.utils.openstack_utils as os_utils
@@ -28,16 +29,21 @@ GLANCE_IMAGE_PATH = os.path.join(
TEMPEST_RESULTS_DIR = os.path.join(CONST.__getattribute__('dir_results'),
'tempest')
TEMPEST_CUSTOM = pkg_resources.resource_filename(
- 'functest', 'opnfv_tests/openstack/tempest/custom_tests/test_list.txt')
+ 'functest', 'opnfv_tests/openstack/tempest/custom_tests/test_list.txt')
TEMPEST_BLACKLIST = pkg_resources.resource_filename(
- 'functest', 'opnfv_tests/openstack/tempest/custom_tests/blacklist.txt')
+ 'functest', 'opnfv_tests/openstack/tempest/custom_tests/blacklist.txt')
TEMPEST_DEFCORE = pkg_resources.resource_filename(
- 'functest',
- 'opnfv_tests/openstack/tempest/custom_tests/defcore_req.txt')
+ 'functest',
+ 'opnfv_tests/openstack/tempest/custom_tests/defcore_req.txt')
TEMPEST_RAW_LIST = os.path.join(TEMPEST_RESULTS_DIR, 'test_raw_list.txt')
TEMPEST_LIST = os.path.join(TEMPEST_RESULTS_DIR, 'test_list.txt')
REFSTACK_RESULTS_DIR = os.path.join(CONST.__getattribute__('dir_results'),
'refstack')
+TEMPEST_CONF_YAML = pkg_resources.resource_filename(
+ 'functest', 'opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml')
+TEST_ACCOUNTS_FILE = pkg_resources.resource_filename(
+ 'functest',
+ 'opnfv_tests/openstack/tempest/custom_tests/test_accounts.yaml')
CI_INSTALLER_TYPE = CONST.__getattribute__('INSTALLER_TYPE')
CI_INSTALLER_IP = CONST.__getattribute__('INSTALLER_IP')
@@ -46,96 +52,9 @@ CI_INSTALLER_IP = CONST.__getattribute__('INSTALLER_IP')
logger = logging.getLogger(__name__)
-def create_tempest_resources(use_custom_images=False,
- use_custom_flavors=False):
- keystone_client = os_utils.get_keystone_client()
-
- logger.debug("Creating tenant and user for Tempest suite")
- tenant_id = os_utils.create_tenant(
- keystone_client,
- CONST.__getattribute__('tempest_identity_tenant_name'),
- CONST.__getattribute__('tempest_identity_tenant_description'))
- if not tenant_id:
- logger.error("Failed to create %s tenant"
- % CONST.__getattribute__('tempest_identity_tenant_name'))
-
- user_id = os_utils.create_user(
- keystone_client,
- CONST.__getattribute__('tempest_identity_user_name'),
- CONST.__getattribute__('tempest_identity_user_password'),
- None, tenant_id)
- if not user_id:
- logger.error("Failed to create %s user" %
- CONST.__getattribute__('tempest_identity_user_name'))
-
- logger.debug("Creating private network for Tempest suite")
- network_dic = os_utils.create_shared_network_full(
- CONST.__getattribute__('tempest_private_net_name'),
- CONST.__getattribute__('tempest_private_subnet_name'),
- CONST.__getattribute__('tempest_router_name'),
- CONST.__getattribute__('tempest_private_subnet_cidr'))
- if network_dic is None:
- raise Exception('Failed to create private network')
-
- image_id = ""
- image_id_alt = ""
- flavor_id = ""
- flavor_id_alt = ""
-
- if (CONST.__getattribute__('tempest_use_custom_images') or
- use_custom_images):
- # adding alternative image should be trivial should we need it
- logger.debug("Creating image for Tempest suite")
- _, image_id = os_utils.get_or_create_image(
- CONST.__getattribute__('openstack_image_name'),
- GLANCE_IMAGE_PATH,
- CONST.__getattribute__('openstack_image_disk_format'))
- if image_id is None:
- raise Exception('Failed to create image')
-
- if use_custom_images:
- logger.debug("Creating 2nd image for Tempest suite")
- _, image_id_alt = os_utils.get_or_create_image(
- CONST.__getattribute__('openstack_image_name_alt'),
- GLANCE_IMAGE_PATH,
- CONST.__getattribute__('openstack_image_disk_format'))
- if image_id_alt is None:
- raise Exception('Failed to create image')
-
- if (CONST.__getattribute__('tempest_use_custom_flavors') or
- use_custom_flavors):
- # adding alternative flavor should be trivial should we need it
- logger.debug("Creating flavor for Tempest suite")
- _, flavor_id = os_utils.get_or_create_flavor(
- CONST.__getattribute__('openstack_flavor_name'),
- CONST.__getattribute__('openstack_flavor_ram'),
- CONST.__getattribute__('openstack_flavor_disk'),
- CONST.__getattribute__('openstack_flavor_vcpus'))
- if flavor_id is None:
- raise Exception('Failed to create flavor')
-
- if use_custom_flavors:
- logger.debug("Creating 2nd flavor for tempest_defcore")
- _, flavor_id_alt = os_utils.get_or_create_flavor(
- CONST.__getattribute__('openstack_flavor_name_alt'),
- CONST.__getattribute__('openstack_flavor_ram'),
- CONST.__getattribute__('openstack_flavor_disk'),
- CONST.__getattribute__('openstack_flavor_vcpus'))
- if flavor_id_alt is None:
- raise Exception('Failed to create flavor')
-
- img_flavor_dict = {}
- img_flavor_dict['image_id'] = image_id
- img_flavor_dict['image_id_alt'] = image_id_alt
- img_flavor_dict['flavor_id'] = flavor_id
- img_flavor_dict['flavor_id_alt'] = flavor_id_alt
-
- return img_flavor_dict
-
-
def get_verifier_id():
"""
- Returns verifer id for current Tempest
+ Returns verifier id for current Tempest
"""
cmd = ("rally verify list-verifiers | awk '/" +
CONST.__getattribute__('tempest_deployment_name') +
@@ -169,7 +88,7 @@ def get_verifier_deployment_id():
def get_verifier_repo_dir(verifier_id):
"""
- Returns installed verfier repo directory for Tempest
+ Returns installed verifier repo directory for Tempest
"""
if not verifier_id:
verifier_id = get_verifier_id()
@@ -211,44 +130,42 @@ def backup_tempest_config(conf_file):
"""
Copy config file to tempest results directory
"""
- if not os.path.exists(TEMPEST_RESULTS_DIR):
- os.makedirs(TEMPEST_RESULTS_DIR)
-
shutil.copyfile(conf_file,
os.path.join(TEMPEST_RESULTS_DIR, 'tempest.conf'))
-def configure_tempest(deployment_dir, IMAGE_ID=None, FLAVOR_ID=None,
- MODE=None):
+def configure_tempest(deployment_dir, image_id=None, flavor_id=None,
+ mode=None):
"""
Calls rally verify and updates the generated tempest.conf with
given parameters
"""
conf_file = configure_verifier(deployment_dir)
- configure_tempest_update_params(conf_file,
- IMAGE_ID, FLAVOR_ID)
- if MODE == 'feature_multisite':
- configure_tempest_multisite_params(conf_file)
+ configure_tempest_update_params(conf_file, image_id, flavor_id)
-def configure_tempest_defcore(deployment_dir, img_flavor_dict):
+def configure_tempest_defcore(deployment_dir, image_id, flavor_id,
+ image_id_alt, flavor_id_alt, tenant_id):
"""
Add/update needed parameters into tempest.conf file
"""
conf_file = configure_verifier(deployment_dir)
- configure_tempest_update_params(conf_file,
- img_flavor_dict.get("image_id"),
- img_flavor_dict.get("flavor_id"))
+ configure_tempest_update_params(conf_file, image_id, flavor_id)
logger.debug("Updating selected tempest.conf parameters for defcore...")
config = ConfigParser.RawConfigParser()
config.read(conf_file)
- config.set('compute', 'image_ref', img_flavor_dict.get("image_id"))
- config.set('compute', 'image_ref_alt',
- img_flavor_dict['image_id_alt'])
- config.set('compute', 'flavor_ref', img_flavor_dict.get("flavor_id"))
- config.set('compute', 'flavor_ref_alt',
- img_flavor_dict['flavor_id_alt'])
+ config.set('DEFAULT', 'log_file', '{}/tempest.log'.format(deployment_dir))
+ config.set('oslo_concurrency', 'lock_path',
+ '{}/lock_files'.format(deployment_dir))
+ generate_test_accounts_file(tenant_id=tenant_id)
+ config.set('auth', 'test_accounts_file', TEST_ACCOUNTS_FILE)
+ config.set('scenario', 'img_dir', '{}'.format(deployment_dir))
+ config.set('scenario', 'img_file', 'tempest-image')
+ config.set('compute', 'image_ref', image_id)
+ config.set('compute', 'image_ref_alt', image_id_alt)
+ config.set('compute', 'flavor_ref', flavor_id)
+ config.set('compute', 'flavor_ref_alt', flavor_id_alt)
with open(conf_file, 'wb') as config_file:
config.write(config_file)
@@ -259,8 +176,29 @@ def configure_tempest_defcore(deployment_dir, img_flavor_dict):
shutil.copyfile(conf_file, confpath)
+def generate_test_accounts_file(tenant_id):
+ """
+ Add needed tenant and user params into test_accounts.yaml
+ """
+
+ logger.debug("Add needed params into test_accounts.yaml...")
+ accounts_list = [
+ {
+ 'tenant_name':
+ CONST.__getattribute__('tempest_identity_tenant_name'),
+ 'tenant_id': str(tenant_id),
+ 'username': CONST.__getattribute__('tempest_identity_user_name'),
+ 'password':
+ CONST.__getattribute__('tempest_identity_user_password')
+ }
+ ]
+
+ with open(TEST_ACCOUNTS_FILE, "w") as f:
+ yaml.dump(accounts_list, f, default_flow_style=False)
+
+
def configure_tempest_update_params(tempest_conf_file,
- IMAGE_ID=None, FLAVOR_ID=None):
+ image_id=None, flavor_id=None):
"""
Add/update needed parameters into tempest.conf file
"""
@@ -274,21 +212,15 @@ def configure_tempest_update_params(tempest_conf_file,
config.set('compute', 'volume_device_name',
CONST.__getattribute__('tempest_volume_device_name'))
if CONST.__getattribute__('tempest_use_custom_images'):
- if IMAGE_ID is not None:
- config.set('compute', 'image_ref', IMAGE_ID)
+ if image_id is not None:
+ config.set('compute', 'image_ref', image_id)
if IMAGE_ID_ALT is not None:
config.set('compute', 'image_ref_alt', IMAGE_ID_ALT)
if CONST.__getattribute__('tempest_use_custom_flavors'):
- if FLAVOR_ID is not None:
- config.set('compute', 'flavor_ref', FLAVOR_ID)
+ if flavor_id is not None:
+ config.set('compute', 'flavor_ref', flavor_id)
if FLAVOR_ID_ALT is not None:
config.set('compute', 'flavor_ref_alt', FLAVOR_ID_ALT)
- config.set('identity', 'tenant_name',
- CONST.__getattribute__('tempest_identity_tenant_name'))
- config.set('identity', 'username',
- CONST.__getattribute__('tempest_identity_user_name'))
- config.set('identity', 'password',
- CONST.__getattribute__('tempest_identity_user_password'))
config.set('identity', 'region', 'RegionOne')
if os_utils.is_keystone_v3():
auth_version = 'v3'
@@ -323,6 +255,19 @@ def configure_tempest_update_params(tempest_conf_file,
config.set(service, 'endpoint_type',
CONST.__getattribute__('OS_ENDPOINT_TYPE'))
+ logger.debug('Add/Update required params defined in tempest_conf.yaml '
+ 'into tempest.conf file')
+ with open(TEMPEST_CONF_YAML) as f:
+ conf_yaml = yaml.safe_load(f)
+ if conf_yaml:
+ sections = config.sections()
+ for section in conf_yaml:
+ if section not in sections:
+ config.add_section(section)
+ sub_conf = conf_yaml.get(section)
+ for key, value in sub_conf.items():
+ config.set(section, key, value)
+
with open(tempest_conf_file, 'wb') as config_file:
config.write(config_file)
@@ -351,93 +296,3 @@ def configure_verifier(deployment_dir):
% tempest_conf_file)
else:
return tempest_conf_file
-
-
-def configure_tempest_multisite_params(tempest_conf_file):
- """
- Add/update multisite parameters into tempest.conf file generated by Rally
- """
- logger.debug("Updating multisite tempest.conf parameters...")
- config = ConfigParser.RawConfigParser()
- config.read(tempest_conf_file)
-
- config.set('service_available', 'kingbird', 'true')
- # cmd = ("openstack endpoint show kingbird | grep publicurl |"
- # "awk '{print $4}' | awk -F '/' '{print $4}'")
- # kingbird_api_version = os.popen(cmd).read()
- # kingbird_api_version = os_utils.get_endpoint(service_type='multisite')
-
- if CI_INSTALLER_TYPE == 'fuel':
- # For MOS based setup, the service is accessible
- # via bind host
- kingbird_conf_path = "/etc/kingbird/kingbird.conf"
- installer_type = CI_INSTALLER_TYPE
- installer_ip = CI_INSTALLER_IP
- installer_username = CONST.__getattribute__(
- 'multisite_{}_installer_username'.format(installer_type))
- installer_password = CONST.__getattribute__(
- 'multisite_{}_installer_password'.format(installer_type))
-
- ssh_options = ("-o UserKnownHostsFile=/dev/null -o "
- "StrictHostKeyChecking=no")
-
- # Get the controller IP from the fuel node
- cmd = ('sshpass -p %s ssh 2>/dev/null %s %s@%s '
- '\'fuel node --env 1| grep controller | grep "True\| 1" '
- '| awk -F\| "{print \$5}"\'' % (installer_password,
- ssh_options,
- installer_username,
- installer_ip))
- multisite_controller_ip = "".join(os.popen(cmd).read().split())
-
- # Login to controller and get bind host details
- cmd = ('sshpass -p %s ssh 2>/dev/null %s %s@%s "ssh %s \\" '
- 'grep -e "^bind_" %s \\""' % (installer_password,
- ssh_options,
- installer_username,
- installer_ip,
- multisite_controller_ip,
- kingbird_conf_path))
- bind_details = os.popen(cmd).read()
- bind_details = "".join(bind_details.split())
- # Extract port number from the bind details
- bind_port = re.findall(r"\D(\d{4})", bind_details)[0]
- # Extract ip address from the bind details
- bind_host = re.findall(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}",
- bind_details)[0]
- kingbird_endpoint_url = "http://%s:%s/" % (bind_host, bind_port)
- else:
- # cmd = "openstack endpoint show kingbird | grep publicurl |\
- # awk '{print $4}' | awk -F '/' '{print $3}'"
- # kingbird_endpoint_url = os.popen(cmd).read()
- kingbird_endpoint_url = os_utils.get_endpoint(service_type='kingbird')
-
- try:
- config.add_section("kingbird")
- except Exception:
- logger.info('kingbird section exist')
-
- # set the domain id
- config.set('auth', 'admin_domain_name', 'default')
-
- config.set('kingbird', 'endpoint_type', 'publicURL')
- config.set('kingbird', 'TIME_TO_SYNC', '120')
- config.set('kingbird', 'endpoint_url', kingbird_endpoint_url)
- config.set('kingbird', 'api_version', 'v1.0')
- with open(tempest_conf_file, 'wb') as config_file:
- config.write(config_file)
-
- backup_tempest_config(tempest_conf_file)
-
-
-def install_verifier_ext(path):
- """
- Install extension to active verifier
- """
- logger.info("Installing verifier from existing repo...")
- tag = get_repo_tag(path)
- cmd = ("rally verify add-verifier-ext --source {0} "
- "--version {1}"
- .format(path, tag))
- error_msg = ("Problem while adding verifier extension from %s" % path)
- ft_utils.execute_command_raise(cmd, error_msg=error_msg)
diff --git a/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml b/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml
new file mode 100644
index 00000000..b47a9736
--- /dev/null
+++ b/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml
@@ -0,0 +1,13 @@
+# This is an empty configuration file to be filled up with the desired options
+# to generate a custom tempest.conf
+# Examples:
+# network-feature-enabled:
+# port_security: True
+
+# volume-feature-enabled:
+# api_v1: False
+
+# validation:
+# image_ssh_user: root
+# ssh_timeout: 300
+
diff --git a/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt b/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
index ac4e3728..df2c3126 100644
--- a/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
+++ b/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
@@ -1,4 +1,4 @@
# This is an empty file to be filled up with the desired tempest test cases
# Examples:
-#tempest.scenario.test_server_basic_ops.TestServerBasicOps.test_server_basic_ops
-#tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_network_basic_ops \ No newline at end of file
+#tempest.scenario.test_server_basic_ops.TestServerBasicOps.test_server_basic_ops[compute,id-7fff3fb3-91d8-4fd0-bd7d-0204f1f180ba,network,smoke]
+#tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_network_basic_ops[compute,id-f323b3ba-82f8-4db7-8ea6-6a895869ec49,network,smoke]
diff --git a/functest/opnfv_tests/openstack/tempest/tempest.py b/functest/opnfv_tests/openstack/tempest/tempest.py
index e565f5f9..c7ad4df2 100644
--- a/functest/opnfv_tests/openstack/tempest/tempest.py
+++ b/functest/opnfv_tests/openstack/tempest/tempest.py
@@ -12,7 +12,6 @@ from __future__ import division
import logging
import os
-import pkg_resources
import re
import shutil
import subprocess
@@ -24,15 +23,26 @@ from functest.core import testcase
from functest.opnfv_tests.openstack.tempest import conf_utils
from functest.utils.constants import CONST
import functest.utils.functest_utils as ft_utils
+import functest.utils.openstack_utils as os_utils
+
+from snaps.openstack import create_flavor
+from snaps.openstack.create_flavor import FlavorSettings, OpenStackFlavor
+from snaps.openstack.create_project import ProjectSettings
+from snaps.openstack.create_network import NetworkSettings, SubnetSettings
+from snaps.openstack.create_user import UserSettings
+from snaps.openstack.tests import openstack_tests
+from snaps.openstack.utils import deploy_utils
+
""" logging configuration """
logger = logging.getLogger(__name__)
-class TempestCommon(testcase.OSGCTestCase):
+class TempestCommon(testcase.TestCase):
def __init__(self, **kwargs):
super(TempestCommon, self).__init__(**kwargs)
+ self.resources = TempestResourcesManager(**kwargs)
self.MODE = ""
self.OPTION = ""
self.VERIFIER_ID = conf_utils.get_verifier_id()
@@ -63,8 +73,6 @@ class TempestCommon(testcase.OSGCTestCase):
else:
if self.MODE == 'smoke':
testr_mode = "smoke"
- elif self.MODE == 'feature_multisite':
- testr_mode = "'[Kk]ingbird'"
elif self.MODE == 'full':
testr_mode = ""
else:
@@ -187,25 +195,32 @@ class TempestCommon(testcase.OSGCTestCase):
try:
self.result = 100 * int(num_success) / int(num_executed)
except ZeroDivisionError:
- logger.error("No test has been executed")
self.result = 0
- return
+ if int(num_tests) > 0:
+ logger.info("All tests have been skipped")
+ else:
+ logger.error("No test has been executed")
+ return
with open(os.path.join(conf_utils.TEMPEST_RESULTS_DIR,
"tempest.log"), 'r') as logfile:
output = logfile.read()
- error_logs = ""
- for match in re.findall('(.*?)[. ]*fail ', output):
- error_logs += match
- skipped_testcase = ""
- for match in re.findall('(.*?)[. ]*skip:', output):
- skipped_testcase += match
+ success_testcases = []
+ for match in re.findall('.*\{0\} (.*?)[. ]*success ', output):
+ success_testcases.append(match)
+ failed_testcases = []
+ for match in re.findall('.*\{0\} (.*?)[. ]*fail ', output):
+ failed_testcases.append(match)
+ skipped_testcases = []
+ for match in re.findall('.*\{0\} (.*?)[. ]*skip:', output):
+ skipped_testcases.append(match)
self.details = {"tests": int(num_tests),
"failures": int(num_failures),
- "errors": error_logs,
- "skipped": skipped_testcase}
+ "success": success_testcases,
+ "errors": failed_testcases,
+ "skipped": skipped_testcases}
except Exception:
self.result = 0
@@ -218,12 +233,12 @@ class TempestCommon(testcase.OSGCTestCase):
try:
if not os.path.exists(conf_utils.TEMPEST_RESULTS_DIR):
os.makedirs(conf_utils.TEMPEST_RESULTS_DIR)
- image_and_flavor = conf_utils.create_tempest_resources()
+ resources = self.resources.create()
conf_utils.configure_tempest(
self.DEPLOYMENT_DIR,
- IMAGE_ID=image_and_flavor.get("image_id"),
- FLAVOR_ID=image_and_flavor.get("flavor_id"),
- MODE=self.MODE)
+ image_id=resources.get("image_id"),
+ flavor_id=resources.get("flavor_id"),
+ mode=self.MODE)
self.generate_test_list(self.VERIFIER_REPO_DIR)
self.apply_tempest_blacklist()
self.run_verifier_tests()
@@ -232,10 +247,49 @@ class TempestCommon(testcase.OSGCTestCase):
except Exception as e:
logger.error('Error with run: %s' % e)
res = testcase.TestCase.EX_RUN_ERROR
+ finally:
+ self.resources.cleanup()
self.stop_time = time.time()
return res
+ def create_snapshot(self):
+ """
+ Run the Tempest cleanup utility to initialize OS state.
+
+ :return: TestCase.EX_OK
+ """
+ logger.info("Initializing the saved state of the OpenStack deployment")
+
+ if not os.path.exists(conf_utils.TEMPEST_RESULTS_DIR):
+ os.makedirs(conf_utils.TEMPEST_RESULTS_DIR)
+
+ # Make sure that the verifier is configured
+ conf_utils.configure_verifier(self.DEPLOYMENT_DIR)
+
+ os_utils.init_tempest_cleanup(
+ self.DEPLOYMENT_DIR, 'tempest.conf',
+ os.path.join(conf_utils.TEMPEST_RESULTS_DIR,
+ "tempest-cleanup-init.log")
+ )
+
+ return super(TempestCommon, self).create_snapshot()
+
+ def clean(self):
+ """
+ Run the Tempest cleanup utility to delete and destroy OS resources
+ created by Tempest.
+ """
+ logger.info("Destroying the resources created for refstack")
+
+ os_utils.perform_tempest_cleanup(
+ self.DEPLOYMENT_DIR, 'tempest.conf',
+ os.path.join(conf_utils.TEMPEST_RESULTS_DIR,
+ "tempest-cleanup.log")
+ )
+
+ return super(TempestCommon, self).clean()
+
class TempestSmokeSerial(TempestCommon):
@@ -266,18 +320,6 @@ class TempestFullParallel(TempestCommon):
self.MODE = "full"
-class TempestMultisite(TempestCommon):
-
- def __init__(self, **kwargs):
- if "case_name" not in kwargs:
- kwargs["case_name"] = 'multisite'
- TempestCommon.__init__(self, **kwargs)
- self.MODE = "feature_multisite"
- self.OPTION = "--concurrency 1"
- conf_utils.install_verifier_ext(
- pkg_resources.resource_filename('kingbird', '..'))
-
-
class TempestCustom(TempestCommon):
def __init__(self, **kwargs):
@@ -296,3 +338,170 @@ class TempestDefcore(TempestCommon):
TempestCommon.__init__(self, **kwargs)
self.MODE = "defcore"
self.OPTION = "--concurrency 1"
+
+
+class TempestResourcesManager(object):
+
+ def __init__(self, **kwargs):
+ self.os_creds = None
+ if 'os_creds' in kwargs:
+ self.os_creds = kwargs['os_creds']
+ else:
+ self.os_creds = openstack_tests.get_credentials(
+ os_env_file=CONST.__getattribute__('openstack_creds'))
+
+ self.creators = list()
+
+ if hasattr(CONST, 'snaps_images_cirros'):
+ self.cirros_image_config = CONST.__getattribute__(
+ 'snaps_images_cirros')
+ else:
+ self.cirros_image_config = None
+
+ def create(self, use_custom_images=False, use_custom_flavors=False,
+ create_project=False):
+ if create_project:
+ logger.debug("Creating project (tenant) for Tempest suite")
+ project_name = CONST.__getattribute__(
+ 'tempest_identity_tenant_name')
+ project_creator = deploy_utils.create_project(
+ self.os_creds, ProjectSettings(
+ name=project_name,
+ description=CONST.__getattribute__(
+ 'tempest_identity_tenant_description')))
+ if (project_creator is None or
+ project_creator.get_project() is None):
+ raise Exception("Failed to create tenant")
+ project_id = project_creator.get_project().id
+ self.creators.append(project_creator)
+
+ logger.debug("Creating user for Tempest suite")
+ user_creator = deploy_utils.create_user(
+ self.os_creds, UserSettings(
+ name=CONST.__getattribute__('tempest_identity_user_name'),
+ password=CONST.__getattribute__(
+ 'tempest_identity_user_password'),
+ project_name=project_name))
+ if user_creator is None or user_creator.get_user() is None:
+ raise Exception("Failed to create user")
+ user_id = user_creator.get_user().id
+ self.creators.append(user_creator)
+ else:
+ project_name = None
+ project_id = None
+ user_id = None
+
+ logger.debug("Creating private network for Tempest suite")
+ network_creator = deploy_utils.create_network(
+ self.os_creds, NetworkSettings(
+ name=CONST.__getattribute__('tempest_private_net_name'),
+ project_name=project_name,
+ subnet_settings=[SubnetSettings(
+ name=CONST.__getattribute__('tempest_private_subnet_name'),
+ cidr=CONST.__getattribute__('tempest_private_subnet_cidr'))
+ ]))
+ if network_creator is None or network_creator.get_network() is None:
+ raise Exception("Failed to create private network")
+ self.creators.append(network_creator)
+
+ image_id = None
+ image_id_alt = None
+ flavor_id = None
+ flavor_id_alt = None
+
+ if (CONST.__getattribute__('tempest_use_custom_images') or
+ use_custom_images):
+ logger.debug("Creating image for Tempest suite")
+ image_base_name = CONST.__getattribute__('openstack_image_name')
+ os_image_settings = openstack_tests.cirros_image_settings(
+ image_base_name, public=True,
+ image_metadata=self.cirros_image_config)
+ logger.debug("Creating image for Tempest suite")
+ image_creator = deploy_utils.create_image(
+ self.os_creds, os_image_settings)
+ if image_creator is None:
+ raise Exception('Failed to create image')
+ self.creators.append(image_creator)
+ image_id = image_creator.get_image().id
+
+ if use_custom_images:
+ logger.debug("Creating 2nd image for Tempest suite")
+ image_base_name_alt = CONST.__getattribute__(
+ 'openstack_image_name_alt')
+ os_image_settings_alt = openstack_tests.cirros_image_settings(
+ image_base_name_alt, public=True,
+ image_metadata=self.cirros_image_config)
+ logger.debug("Creating 2nd image for Tempest suite")
+ image_creator_alt = deploy_utils.create_image(
+ self.os_creds, os_image_settings_alt)
+ if image_creator_alt is None:
+ raise Exception('Failed to create image')
+ self.creators.append(image_creator_alt)
+ image_id_alt = image_creator_alt.get_image().id
+
+ if (CONST.__getattribute__('tempest_use_custom_flavors') or
+ use_custom_flavors):
+ logger.info("Creating flavor for Tempest suite")
+ scenario = ft_utils.get_scenario()
+ flavor_metadata = None
+ if 'ovs' in scenario or 'fdio' in scenario:
+ flavor_metadata = create_flavor.MEM_PAGE_SIZE_LARGE
+ flavor_creator = OpenStackFlavor(
+ self.os_creds, FlavorSettings(
+ name=CONST.__getattribute__('openstack_flavor_name'),
+ ram=CONST.__getattribute__('openstack_flavor_ram'),
+ disk=CONST.__getattribute__('openstack_flavor_disk'),
+ vcpus=CONST.__getattribute__('openstack_flavor_vcpus'),
+ metadata=flavor_metadata))
+ flavor = flavor_creator.create()
+ if flavor is None:
+ raise Exception('Failed to create flavor')
+ self.creators.append(flavor_creator)
+ flavor_id = flavor.id
+
+ if use_custom_flavors:
+ logger.info("Creating 2nd flavor for Tempest suite")
+ scenario = ft_utils.get_scenario()
+ flavor_metadata_alt = None
+ if 'ovs' in scenario or 'fdio' in scenario:
+ flavor_metadata_alt = create_flavor.MEM_PAGE_SIZE_LARGE
+ flavor_creator_alt = OpenStackFlavor(
+ self.os_creds, FlavorSettings(
+ name=CONST.__getattribute__('openstack_flavor_name_alt'),
+ ram=CONST.__getattribute__('openstack_flavor_ram'),
+ disk=CONST.__getattribute__('openstack_flavor_disk'),
+ vcpus=CONST.__getattribute__('openstack_flavor_vcpus'),
+ metadata=flavor_metadata_alt))
+ flavor_alt = flavor_creator_alt.create()
+ if flavor_alt is None:
+ raise Exception('Failed to create flavor')
+ self.creators.append(flavor_creator_alt)
+ flavor_id_alt = flavor_alt.id
+
+ print("RESOURCES CREATE: image_id: %s, image_id_alt: %s, "
+ "flavor_id: %s, flavor_id_alt: %s" % (
+ image_id, image_id_alt, flavor_id, flavor_id_alt,))
+
+ result = {
+ 'image_id': image_id,
+ 'image_id_alt': image_id_alt,
+ 'flavor_id': flavor_id,
+ 'flavor_id_alt': flavor_id_alt
+ }
+
+ if create_project:
+ result['project_id'] = project_id
+ result['tenant_id'] = project_id # for compatibility
+ result['user_id'] = user_id
+
+ return result
+
+ def cleanup(self):
+ """
+ Cleanup all OpenStack objects. Should be called on completion.
+ """
+ for creator in reversed(self.creators):
+ try:
+ creator.clean()
+ except Exception as e:
+ logger.error('Unexpected error cleaning - %s', e)
diff --git a/functest/opnfv_tests/openstack/vping/ping.sh b/functest/opnfv_tests/openstack/vping/ping.sh
index 693b8682..15f5e84e 100644
--- a/functest/opnfv_tests/openstack/vping/ping.sh
+++ b/functest/opnfv_tests/openstack/vping/ping.sh
@@ -1,13 +1,10 @@
#!/bin/sh
-while true; do
- ping -c 1 $1 2>&1 >/dev/null
- RES=$?
- if [ "Z$RES" = "Z0" ] ; then
- echo 'vPing OK'
- break
- else
- echo 'vPing KO'
- fi
- sleep 1
-done \ No newline at end of file
+
+ping -c 1 $1 2>&1 >/dev/null
+RES=$?
+if [ "Z$RES" = "Z0" ] ; then
+ echo 'vPing OK'
+else
+ echo 'vPing KO'
+fi
diff --git a/functest/opnfv_tests/openstack/vping/vping_base.py b/functest/opnfv_tests/openstack/vping/vping_base.py
index 74fbce1b..40fcb07f 100644
--- a/functest/opnfv_tests/openstack/vping/vping_base.py
+++ b/functest/opnfv_tests/openstack/vping/vping_base.py
@@ -43,8 +43,14 @@ class VPingBase(testcase.TestCase):
if 'os_creds' in kwargs:
self.os_creds = kwargs['os_creds']
else:
+ creds_override = None
+ if hasattr(CONST, 'snaps_os_creds_override'):
+ creds_override = CONST.__getattribute__(
+ 'snaps_os_creds_override')
+
self.os_creds = openstack_tests.get_credentials(
- os_env_file=CONST.__getattribute__('openstack_creds'))
+ os_env_file=CONST.__getattribute__('openstack_creds'),
+ overrides=creds_override)
self.creators = list()
self.image_creator = None
@@ -102,14 +108,33 @@ class VPingBase(testcase.TestCase):
'vping_private_subnet_name') + self.guid
private_subnet_cidr = CONST.__getattribute__(
'vping_private_subnet_cidr')
+
+ vping_network_type = None
+ vping_physical_network = None
+ vping_segmentation_id = None
+
+ if (hasattr(CONST, 'network_type')):
+ vping_network_type = CONST.__getattribute__(
+ 'vping_network_type')
+ if (hasattr(CONST, 'physical_network')):
+ vping_physical_network = CONST.__getattribute__(
+ 'vping_physical_network')
+ if (hasattr(CONST, 'segmentation_id')):
+ vping_segmentation_id = CONST.__getattribute__(
+ 'vping_segmentation_id')
+
self.logger.info(
"Creating network with name: '%s'" % private_net_name)
self.network_creator = deploy_utils.create_network(
self.os_creds,
- NetworkSettings(name=private_net_name,
- subnet_settings=[SubnetSettings(
- name=private_subnet_name,
- cidr=private_subnet_cidr)]))
+ NetworkSettings(
+ name=private_net_name,
+ network_type=vping_network_type,
+ physical_network=vping_physical_network,
+ segmentation_id=vping_segmentation_id,
+ subnet_settings=[SubnetSettings(
+ name=private_subnet_name,
+ cidr=private_subnet_cidr)]))
self.creators.append(self.network_creator)
self.logger.info(
diff --git a/functest/opnfv_tests/openstack/vping/vping_userdata.py b/functest/opnfv_tests/openstack/vping/vping_userdata.py
index 9aed4c10..8088a4db 100644
--- a/functest/opnfv_tests/openstack/vping/vping_userdata.py
+++ b/functest/opnfv_tests/openstack/vping/vping_userdata.py
@@ -94,7 +94,7 @@ class VPingUserdata(vping_base.VPingBase):
while True:
time.sleep(1)
- p_console = vm_creator.get_os_vm_server_obj().get_console_output()
+ p_console = vm_creator.get_console_output()
if "vPing OK" in p_console:
self.logger.info("vPing detected!")
exit_code = TestCase.EX_OK
diff --git a/functest/opnfv_tests/sdn/odl/odl.py b/functest/opnfv_tests/sdn/odl/odl.py
index ede0fc50..841da834 100644
--- a/functest/opnfv_tests/sdn/odl/odl.py
+++ b/functest/opnfv_tests/sdn/odl/odl.py
@@ -66,8 +66,7 @@ class ODLResultVisitor(robot.api.ResultVisitor):
class ODLTests(testcase.TestCase):
"""ODL test runner."""
- odl_test_repo = os.path.join(
- constants.CONST.__getattribute__('dir_repos'), 'odl_test')
+ odl_test_repo = constants.CONST.__getattribute__('dir_repo_odl_test')
neutron_suite_dir = os.path.join(odl_test_repo,
"csit/suites/openstack/neutron")
basic_suite_dir = os.path.join(odl_test_repo,
@@ -234,7 +233,7 @@ class ODLTests(testcase.TestCase):
elif installer_type == 'joid':
kwargs['odlip'] = os.environ['SDN_CONTROLLER']
elif installer_type == 'compass':
- kwargs['odlwebport'] = '8181'
+ kwargs['odlrestconfport'] = '8080'
elif installer_type == 'daisy':
kwargs['odlip'] = os.environ['SDN_CONTROLLER_IP']
kwargs['odlwebport'] = '8181'
diff --git a/functest/opnfv_tests/sdn/onos/teston/adapters/connection.py b/functest/opnfv_tests/sdn/onos/teston/adapters/connection.py
index dfaa5cc1..a6d192ee 100644
--- a/functest/opnfv_tests/sdn/onos/teston/adapters/connection.py
+++ b/functest/opnfv_tests/sdn/onos/teston/adapters/connection.py
@@ -64,7 +64,7 @@ class Connection(Foundation):
"""
os.getenv only returns current user value
GetEnvValue returns a environment value of
- current handle
+ current handle
eg: GetEnvValue(handle,'HOME')
"""
envhandle = handle
diff --git a/functest/opnfv_tests/sdn/onos/teston/adapters/environment.py b/functest/opnfv_tests/sdn/onos/teston/adapters/environment.py
index cb75b5c3..875a2dc9 100644
--- a/functest/opnfv_tests/sdn/onos/teston/adapters/environment.py
+++ b/functest/opnfv_tests/sdn/onos/teston/adapters/environment.py
@@ -1,11 +1,11 @@
"""
Description:
- This file is used to setup the running environment
- Include Download code,setup environment variable
- Set onos running config
- Set user name/password
- Onos-push-keys and so on
- lanqinglong@huawei.com
+This file is used to setup the running environment
+Include Download code,setup environment variable
+Set onos running config
+Set user name/password
+Onos-push-keys and so on
+lanqinglong@huawei.com
#
# All rights reserved. This program and the accompanying materials
@@ -17,7 +17,7 @@ Description:
import logging
import pexpect
-import pxssh
+from pexpect import pxssh
import re
import os
import sys
@@ -196,10 +196,10 @@ class Environment(Connection):
def ChangeTestCasePara(self, testcase, user, password):
"""
- When running test script, there's something need
- to change in every test folder's *.param & *.topo files
- user: onos&compute node user
- password: onos&compute node password
+ When running test script, there\'s something need
+ to change in every test folder\'s \*.param & \*.topo files
+ user: onos\&compute node user
+ password: onos\&compute node password
"""
self.logger.info("Now Changing " + testcase + " name&password")
if self.masterusername == 'root':
diff --git a/functest/opnfv_tests/vnf/aaa/aaa.py b/functest/opnfv_tests/vnf/aaa/aaa.py
deleted file mode 100644
index 71e3c972..00000000
--- a/functest/opnfv_tests/vnf/aaa/aaa.py
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (c) 2016 Orange and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-
-import logging
-
-import functest.core.vnf as vnf
-
-
-class AaaVnf(vnf.VnfOnBoarding):
- """AAA VNF sample"""
-
- logger = logging.getLogger(__name__)
-
- def __init__(self, **kwargs):
- if "case_name" not in kwargs:
- kwargs["case_name"] = "aaa"
- super(AaaVnf, self).__init__(**kwargs)
-
- def deploy_orchestrator(self):
- self.logger.info("No VNFM needed to deploy a free radius here")
- return True
-
- def deploy_vnf(self):
- self.logger.info("Freeradius VNF deployment")
- # find a way to deploy freeradius and tester (heat,manual, ..)
- deploy_vnf = {'status': 'PASS', 'version': 'xxxx'}
- self.details['deploy_vnf'] = deploy_vnf
- return True
-
- def test_vnf(self):
- self.logger.info("Run test towards freeradius")
- # once the freeradius is deployed..make some tests
- test_vnf = {'status': 'PASS', 'version': 'xxxx'}
- self.details['test_vnf'] = test_vnf
- return True
diff --git a/functest/opnfv_tests/vnf/ims/clearwater_ims_base.py b/functest/opnfv_tests/vnf/ims/clearwater_ims_base.py
index 25ddca21..8851f7a4 100644
--- a/functest/opnfv_tests/vnf/ims/clearwater_ims_base.py
+++ b/functest/opnfv_tests/vnf/ims/clearwater_ims_base.py
@@ -10,7 +10,9 @@ import json
import logging
import os
import pkg_resources
+import shlex
import shutil
+import subprocess
import time
import requests
@@ -43,7 +45,7 @@ class ClearwaterOnBoardingBase(vnf.VnfOnBoarding):
def config_ellis(self, ellis_ip, signup_code='secret', two_numbers=False):
output_dict = {}
- self.logger.info('Configure Ellis: %s', ellis_ip)
+ self.logger.debug('Configure Ellis: %s', ellis_ip)
output_dict['ellis_ip'] = ellis_ip
account_url = 'http://{0}/accounts'.format(ellis_ip)
params = {"password": "functest",
@@ -54,7 +56,7 @@ class ClearwaterOnBoardingBase(vnf.VnfOnBoarding):
output_dict['login'] = params
if rq.status_code != 201 and rq.status_code != 409:
raise Exception("Unable to create an account for number provision")
- self.logger.info('Account is created on Ellis: %s', params)
+ self.logger.debug('Account is created on Ellis: %s', params)
session_url = 'http://{0}/session'.format(ellis_ip)
session_data = {
@@ -66,13 +68,13 @@ class ClearwaterOnBoardingBase(vnf.VnfOnBoarding):
if rq.status_code != 201:
raise Exception('Failed to get cookie for Ellis')
cookies = rq.cookies
- self.logger.info('Cookies: %s', cookies)
+ self.logger.debug('Cookies: %s', cookies)
number_url = 'http://{0}/accounts/{1}/numbers'.format(
ellis_ip,
params['email'])
- self.logger.info('Create 1st calling number on Ellis')
- i = 24
+ self.logger.debug('Create 1st calling number on Ellis')
+ i = 30
while rq.status_code != 200 and i > 0:
try:
number_res = self.create_ellis_number(number_url, cookies)
@@ -86,7 +88,7 @@ class ClearwaterOnBoardingBase(vnf.VnfOnBoarding):
output_dict['number'] = number_res
if two_numbers:
- self.logger.info('Create 2nd calling number on Ellis')
+ self.logger.debug('Create 2nd calling number on Ellis')
number_res = self.create_ellis_number(number_url, cookies)
output_dict['number2'] = number_res
@@ -109,19 +111,17 @@ class ClearwaterOnBoardingBase(vnf.VnfOnBoarding):
bono_ip=None, ellis_ip=None,
signup_code='secret'):
self.logger.info('Run Clearwater live test')
- nameservers = ft_utils.get_resolvconf_ns()
- resolvconf = ['{0}{1}{2}'.format(os.linesep, 'nameserver ', ns)
- for ns in nameservers]
- self.logger.debug('resolvconf: %s', resolvconf)
dns_file = '/etc/resolv.conf'
dns_file_bak = '/etc/resolv.conf.bak'
+ self.logger.debug('Backup %s -> %s', dns_file, dns_file_bak)
shutil.copy(dns_file, dns_file_bak)
- script = ('echo -e "nameserver {0}{1}" > {2};'
- 'source /etc/profile.d/rvm.sh;'
- 'cd {3};'
- 'rake test[{4}] SIGNUP_CODE={5}'
- .format(dns_ip,
- ''.join(resolvconf),
+ cmd = ("dnsmasq -d -u root --server=/clearwater.opnfv/{0} "
+ "-r /etc/resolv.conf.bak".format(dns_ip))
+ dnsmasq_process = subprocess.Popen(shlex.split(cmd))
+ script = ('echo -e "nameserver {0}" > {1};'
+ 'cd {2};'
+ 'rake test[{3}] SIGNUP_CODE={4}'
+ .format('127.0.0.1',
dns_file,
self.test_dir,
public_domain,
@@ -131,12 +131,12 @@ class ClearwaterOnBoardingBase(vnf.VnfOnBoarding):
script = '{0}{1}'.format(script, subscript)
script = ('{0}{1}'.format(script, ' --trace'))
cmd = "/bin/bash -c '{0}'".format(script)
- self.logger.info('Live test cmd: %s', cmd)
+ self.logger.debug('Live test cmd: %s', cmd)
output_file = os.path.join(self.result_dir, "ims_test_output.txt")
ft_utils.execute_command(cmd,
error_msg='Clearwater live test failed',
output_file=output_file)
-
+ dnsmasq_process.kill()
with open(dns_file_bak, 'r') as bak_file:
result = bak_file.read()
with open(dns_file, 'w') as f:
diff --git a/functest/opnfv_tests/vnf/ims/cloudify_ims.py b/functest/opnfv_tests/vnf/ims/cloudify_ims.py
index fafc77e1..b07eaee2 100644
--- a/functest/opnfv_tests/vnf/ims/cloudify_ims.py
+++ b/functest/opnfv_tests/vnf/ims/cloudify_ims.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python
-# Copyright (c) 2016 Orange and others.
+# Copyright (c) 2017 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
@@ -25,16 +25,16 @@ from functest.utils.constants import CONST
import functest.utils.openstack_utils as os_utils
from snaps.openstack.os_credentials import OSCreds
-from snaps.openstack.create_network import NetworkSettings, SubnetSettings, \
- OpenStackNetwork
-from snaps.openstack.create_security_group import SecurityGroupSettings, \
- SecurityGroupRuleSettings,\
- Direction, Protocol, \
- OpenStackSecurityGroup
+from snaps.openstack.create_network import (NetworkSettings, SubnetSettings,
+ OpenStackNetwork)
+from snaps.openstack.create_security_group import (SecurityGroupSettings,
+ SecurityGroupRuleSettings,
+ Direction, Protocol,
+ OpenStackSecurityGroup)
from snaps.openstack.create_router import RouterSettings, OpenStackRouter
-from snaps.openstack.create_instance import VmInstanceSettings, \
- FloatingIpSettings, \
- OpenStackVmInstance
+from snaps.openstack.create_instance import (VmInstanceSettings,
+ FloatingIpSettings,
+ OpenStackVmInstance)
from snaps.openstack.create_flavor import FlavorSettings, OpenStackFlavor
from snaps.openstack.create_image import ImageSettings, OpenStackImage
from snaps.openstack.create_keypairs import KeypairSettings, OpenStackKeypair
@@ -110,15 +110,15 @@ class CloudifyIms(clearwater_ims_base.ClearwaterOnBoardingBase):
# needs some images
self.__logger.info("Upload some OS images if it doesn't exist")
- for image_name, image_url in self.images.iteritems():
- self.__logger.info("image: %s, url: %s", image_name, image_url)
- if image_url and image_name:
+ for image_name, image_file in self.images.iteritems():
+ self.__logger.info("image: %s, file: %s", image_name, image_file)
+ if image_file and image_name:
image_creator = OpenStackImage(
self.snaps_creds,
ImageSettings(name=image_name,
image_user='cloud',
img_format='qcow2',
- url=image_url))
+ image_file=image_file))
image_creator.create()
# self.created_object.append(image_creator)
@@ -239,6 +239,8 @@ class CloudifyIms(clearwater_ims_base.ClearwaterOnBoardingBase):
while str(cfy_status) != 'running' and retry:
try:
cfy_status = cfy_client.manager.get_status()['status']
+ self.__logger.debug("The current manager status is %s",
+ cfy_status)
except Exception: # pylint: disable=broad-except
self.__logger.warning("Cloudify Manager isn't " +
"up and running. Retrying ...")
@@ -263,14 +265,15 @@ class CloudifyIms(clearwater_ims_base.ClearwaterOnBoardingBase):
self.__logger.info("Put private keypair in manager")
if manager_creator.vm_ssh_active(block=True):
ssh = manager_creator.ssh_client()
- scp = SCPClient(ssh.get_transport())
+ scp = SCPClient(ssh.get_transport(), socket_timeout=15.0)
scp.put(kp_file, '~/')
cmd = "sudo cp ~/cloudify_ims.pem /etc/cloudify/"
- ssh.exec_command(cmd)
+ run_blocking_ssh_command(ssh, cmd)
cmd = "sudo chmod 444 /etc/cloudify/cloudify_ims.pem"
- ssh.exec_command(cmd)
+ run_blocking_ssh_command(ssh, cmd)
cmd = "sudo yum install -y gcc python-devel"
- ssh.exec_command(cmd)
+ run_blocking_ssh_command(ssh, cmd, "Unable to install packages \
+ on manager")
self.details['orchestrator'].update(status='PASS', duration=duration)
@@ -292,15 +295,17 @@ class CloudifyIms(clearwater_ims_base.ClearwaterOnBoardingBase):
descriptor.get('file_name'))
self.__logger.info("Get or create flavor for all clearwater vm")
- self.exist_obj['flavor2'], flavor_id = os_utils.get_or_create_flavor(
- self.vnf['requirements']['flavor']['name'],
- self.vnf['requirements']['flavor']['ram_min'],
- '30',
- '1',
- public=True)
+ flavor_settings = FlavorSettings(
+ name=self.vnf['requirements']['flavor']['name'],
+ ram=self.vnf['requirements']['flavor']['ram_min'],
+ disk=25,
+ vcpus=1)
+ flavor_creator = OpenStackFlavor(self.snaps_creds, flavor_settings)
+ flavor_creator.create()
+ self.created_object.append(flavor_creator)
self.vnf['inputs'].update(dict(
- flavor_id=flavor_id,
+ flavor_id=self.vnf['requirements']['flavor']['name'],
))
self.__logger.info("Create VNF Instance")
@@ -371,7 +376,7 @@ class CloudifyIms(clearwater_ims_base.ClearwaterOnBoardingBase):
try:
cfy_client.executions.cancel(execution['id'],
force=True)
- except:
+ except: # pylint: disable=broad-except
self.__logger.warn("Can't cancel the current exec")
execution = cfy_client.executions.start(
@@ -383,7 +388,7 @@ class CloudifyIms(clearwater_ims_base.ClearwaterOnBoardingBase):
wait_for_execution(cfy_client, execution, self.__logger)
cfy_client.deployments.delete(self.vnf['descriptor'].get('name'))
cfy_client.blueprints.delete(self.vnf['descriptor'].get('name'))
- except:
+ except: # pylint: disable=broad-except
self.__logger.warn("Some issue during the undeployment ..")
self.__logger.warn("Tenant clean continue ..")
@@ -507,3 +512,10 @@ def sig_test_format(sig_test):
total_sig_test_result['failures'] = nb_failures
total_sig_test_result['skipped'] = nb_skipped
return total_sig_test_result
+
+
+def run_blocking_ssh_command(ssh, cmd, error_msg="Unable to run this command"):
+ """Command to run ssh command with the exit status."""
+ stdin, stdout, stderr = ssh.exec_command(cmd)
+ if stdout.channel.recv_exit_status() != 0:
+ raise Exception(error_msg)
diff --git a/functest/opnfv_tests/vnf/ims/cloudify_ims.yaml b/functest/opnfv_tests/vnf/ims/cloudify_ims.yaml
index f1028ce7..280e0a6b 100644
--- a/functest/opnfv_tests/vnf/ims/cloudify_ims.yaml
+++ b/functest/opnfv_tests/vnf/ims/cloudify_ims.yaml
@@ -1,6 +1,6 @@
tenant_images:
- ubuntu_14.04: http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img
- cloudify_manager_4.0: http://repository.cloudifysource.org/cloudify/4.0.1/sp-release/cloudify-manager-premium-4.0.1.qcow2
+ ubuntu_14.04: /home/opnfv/functest/images/trusty-server-cloudimg-amd64-disk1.img
+ cloudify_manager_4.0: /home/opnfv/functest/images/cloudify-manager-premium-4.0.1.qcow2
orchestrator:
name: cloudify
version: '4.0'
@@ -19,7 +19,7 @@ vnf:
version: '122'
requirements:
flavor:
- name: m1.medium
+ name: m1.small
ram_min: 2048
inputs:
image_id: 'ubuntu_14.04'
diff --git a/functest/opnfv_tests/vnf/ims/opera_ims.py b/functest/opnfv_tests/vnf/ims/opera_ims.py
deleted file mode 100644
index d420705a..00000000
--- a/functest/opnfv_tests/vnf/ims/opera_ims.py
+++ /dev/null
@@ -1,131 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (c) 2017 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-
-import json
-import logging
-import os
-import time
-
-from opera import openo_connect
-import requests
-
-import functest.opnfv_tests.vnf.ims.clearwater_ims_base as clearwater_ims_base
-from functest.utils.constants import CONST
-
-
-class OperaIms(clearwater_ims_base.ClearwaterOnBoardingBase):
-
- def __init__(self, **kwargs):
- if "case_name" not in kwargs:
- kwargs["case_name"] = "opera_ims"
- super(OperaIms, self).__init__(**kwargs)
- self.logger = logging.getLogger(__name__)
- self.ellis_file = os.path.join(
- CONST.__getattribute__('dir_results'), 'ellis.info')
- self.live_test_file = os.path.join(
- CONST.__getattribute__('dir_results'), 'live_test_report.json')
- try:
- self.openo_msb_endpoint = os.environ['OPENO_MSB_ENDPOINT']
- except KeyError:
- raise Exception('OPENO_MSB_ENDPOINT is not specified,'
- ' put it as <OPEN-O ip>:<port>')
- else:
- self.logger.info('OPEN-O endpoint is: %s', self.openo_msb_endpoint)
-
- def prepare(self):
- pass
-
- def clean(self):
- pass
-
- def deploy_vnf(self):
- try:
- openo_connect.create_service(self.openo_msb_endpoint,
- 'functest_opera',
- 'VNF for functest testing')
- except Exception as e:
- self.logger.error(e)
- return {'status': 'FAIL', 'result': e}
- else:
- self.logger.info('vIMS deployment is kicked off')
- return {'status': 'PASS', 'result': ''}
-
- def dump_info(self, info_file, result):
- with open(info_file, 'w') as f:
- self.logger.debug('Save information to file: %s', info_file)
- json.dump(result, f)
-
- def test_vnf(self):
- vnfm_ip = openo_connect.get_vnfm_ip(self.openo_msb_endpoint)
- self.logger.info('VNFM IP: %s', vnfm_ip)
- vnf_status_url = 'http://{0}:5000/api/v1/model/status'.format(vnfm_ip)
- vnf_alive = False
- retry = 40
-
- self.logger.info('Check the VNF status')
- while retry > 0:
- rq = requests.get(vnf_status_url, timeout=90)
- response = rq.json()
- vnf_alive = response['vnf_alive']
- msg = response['msg']
- self.logger.info(msg)
- if vnf_alive:
- break
- self.logger.info('check again in one and half a minute...')
- retry = retry - 1
- time.sleep(90)
-
- if not vnf_alive:
- raise Exception('VNF failed to start: {0}'.format(msg))
-
- ellis_config_url = ('http://{0}:5000/api/v1/model/ellis/configure'
- .format(vnfm_ip))
- rq = requests.get(ellis_config_url, timeout=90)
- if rq.json() and not rq.json()['ellis_ok']:
- self.logger.error(rq.json()['data'])
- raise Exception('Failed to configure Ellis')
-
- self.logger.info('Get Clearwater deployment detail')
- vnf_info_url = ('http://{0}:5000/api/v1/model/output'
- .format(vnfm_ip))
- rq = requests.get(vnf_info_url, timeout=90)
- data = rq.json()['data']
- self.logger.info(data)
- bono_ip = data['bono_ip']
- ellis_ip = data['ellis_ip']
- dns_ip = data['dns_ip']
- result = self.config_ellis(ellis_ip, 'signup', True)
- self.logger.debug('Ellis Result: %s', result)
- self.dump_info(self.ellis_file, result)
-
- if dns_ip:
- vims_test_result = self.run_clearwater_live_test(
- dns_ip,
- 'clearwater.local',
- bono_ip,
- ellis_ip,
- 'signup')
- if vims_test_result != '':
- self.dump_info(self.live_test_file, vims_test_result)
- return {'status': 'PASS', 'result': vims_test_result}
- else:
- return {'status': 'FAIL', 'result': ''}
-
- def main(self, **kwargs):
- self.logger.info("Start to run Opera vIMS VNF onboarding test")
- self.execute()
- self.logger.info("Opera vIMS VNF onboarding test finished")
- if self.result is "PASS":
- return self.EX_OK
- else:
- return self.EX_RUN_ERROR
-
- def run(self):
- kwargs = {}
- return self.main(**kwargs)
diff --git a/functest/opnfv_tests/vnf/ims/orchestra.yaml b/functest/opnfv_tests/vnf/ims/orchestra.yaml
new file mode 100644
index 00000000..4cd18e72
--- /dev/null
+++ b/functest/opnfv_tests/vnf/ims/orchestra.yaml
@@ -0,0 +1,61 @@
+tenant_images:
+ orchestrator:
+ ubuntu-14.04-server-cloudimg-amd64-disk1: /home/opnfv/functest/images/trusty-server-cloudimg-amd64-disk1.img
+ orchestra_openims:
+ openims: /home/opnfv/functest/images/img
+ orchestra_clearwaterims:
+ ubuntu-14.04-server-cloudimg-amd64-disk1: /home/opnfv/functest/images/trusty-server-cloudimg-amd64-disk1.img
+mano:
+ name: OpenBaton
+ version: '3.2.0'
+ requirements:
+ flavor:
+ name: openbaton
+ ram_min: 4096
+ disk: 5
+ vcpus: 2
+ image: 'ubuntu-14.04-server-cloudimg-amd64-disk1'
+ bootstrap:
+ url: http://get.openbaton.org/bootstraps/bootstrap_3.2.0_opnfv/bootstrap
+ config:
+ url: http://get.openbaton.org/bootstraps/bootstrap_3.2.0_opnfv/bootstrap-config-file
+ gvnfm:
+ userdata:
+ url: https://raw.githubusercontent.com/openbaton/generic-vnfm/3.2.0/src/main/resources/user-data.sh
+ credentials:
+ username: admin
+ password: openbaton
+
+orchestra_openims:
+ name: OpenIMS
+ descriptor:
+ url: http://marketplace.openbaton.org:8082/api/v1/nsds/fokus/OpenImsCore/3.2.0/json
+ requirements:
+ flavor:
+ name: m1.small
+ ram_min: 2048
+ disk: 5
+ vcpus: 2
+ test:
+ scscf:
+ ports: [3870, 6060]
+ pcscf:
+ ports: [4060]
+ icscf:
+ ports: [3869, 5060]
+ fhoss:
+ ports: [3868]
+ bind9:
+ ports: []
+
+orchestra_clearwaterims:
+ name: Clearwater IMS
+ descriptor:
+ url: http://marketplace.openbaton.org:8082/api/v1/nsds/fokus/ClearwaterIMS/3.2.0/json
+ requirements:
+ flavor:
+ name: m1.small
+ ram_min: 2048
+ disk: 5
+ vcpus: 2
+ test:
diff --git a/functest/opnfv_tests/vnf/ims/orchestra_clearwaterims.py b/functest/opnfv_tests/vnf/ims/orchestra_clearwaterims.py
new file mode 100644
index 00000000..a5405996
--- /dev/null
+++ b/functest/opnfv_tests/vnf/ims/orchestra_clearwaterims.py
@@ -0,0 +1,682 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2016 Orange and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""Orchestra Clearwater IMS testcase implementation."""
+
+import json
+import logging
+import os
+import socket
+import time
+import pkg_resources
+import yaml
+
+from snaps.openstack.create_image import OpenStackImage, ImageSettings
+from snaps.openstack.create_flavor import OpenStackFlavor, FlavorSettings
+from snaps.openstack.create_security_group import (
+ OpenStackSecurityGroup,
+ SecurityGroupSettings,
+ SecurityGroupRuleSettings,
+ Direction,
+ Protocol)
+from snaps.openstack.create_network import (
+ OpenStackNetwork,
+ NetworkSettings,
+ SubnetSettings,
+ PortSettings)
+from snaps.openstack.create_router import OpenStackRouter, RouterSettings
+from snaps.openstack.os_credentials import OSCreds
+from snaps.openstack.create_instance import (
+ VmInstanceSettings,
+ OpenStackVmInstance)
+from functest.opnfv_tests.openstack.snaps import snaps_utils
+
+import functest.core.vnf as vnf
+import functest.utils.openstack_utils as os_utils
+from functest.utils.constants import CONST
+
+from org.openbaton.cli.errors.errors import NfvoException
+from org.openbaton.cli.agents.agents import MainAgent
+
+
+__author__ = "Pauls, Michael <michael.pauls@fokus.fraunhofer.de>"
+# ----------------------------------------------------------
+#
+# UTILS
+#
+# -----------------------------------------------------------
+
+
+def get_config(parameter, file_path):
+ """
+ Get config parameter.
+
+ Returns the value of a given parameter in file.yaml
+ parameter must be given in string format with dots
+ Example: general.openstack.image_name
+ """
+ with open(file_path) as config_file:
+ file_yaml = yaml.safe_load(config_file)
+ config_file.close()
+ value = file_yaml
+ for element in parameter.split("."):
+ value = value.get(element)
+ if value is None:
+ raise ValueError("The parameter %s is not defined in"
+ " reporting.yaml", parameter)
+ return value
+
+
+def servertest(host, port):
+ """Method to test that a server is reachable at IP:port"""
+ args = socket.getaddrinfo(host, port, socket.AF_INET, socket.SOCK_STREAM)
+ for family, socktype, proto, canonname, sockaddr in args:
+ sock = socket.socket(family, socktype, proto)
+ try:
+ sock.connect(sockaddr)
+ except socket.error:
+ return False
+ else:
+ sock.close()
+ return True
+
+
+def get_userdata(orchestrator=dict):
+ """Build userdata for Open Baton machine"""
+ userdata = "#!/bin/bash\n"
+ userdata += "echo \"Executing userdata...\"\n"
+ userdata += "set -x\n"
+ userdata += "set -e\n"
+ userdata += "echo \"Set nameserver to '8.8.8.8'...\"\n"
+ userdata += "echo \"nameserver 8.8.8.8\" >> /etc/resolv.conf\n"
+ userdata += "echo \"Install curl...\"\n"
+ userdata += "apt-get install curl\n"
+ userdata += "echo \"Inject public key...\"\n"
+ userdata += ("echo \"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCuPXrV3"
+ "geeHc6QUdyUr/1Z+yQiqLcOskiEGBiXr4z76MK4abiFmDZ18OMQlc"
+ "fl0p3kS0WynVgyaOHwZkgy/DIoIplONVr2CKBKHtPK+Qcme2PVnCtv"
+ "EqItl/FcD+1h5XSQGoa+A1TSGgCod/DPo+pes0piLVXP8Ph6QS1k7S"
+ "ic7JDeRQ4oT1bXYpJ2eWBDMfxIWKZqcZRiGPgMIbJ1iEkxbpeaAd9O"
+ "4MiM9nGCPESmed+p54uYFjwEDlAJZShcAZziiZYAvMZhvAhe6USljc"
+ "7YAdalAnyD/jwCHuwIrUw/lxo7UdNCmaUxeobEYyyFA1YVXzpNFZya"
+ "XPGAAYIJwEq/ openbaton@opnfv\" >> /home/ubuntu/.ssh/aut"
+ "horized_keys\n")
+ userdata += "echo \"Download bootstrap...\"\n"
+ userdata += ("curl -s %s "
+ "> ./bootstrap\n" % orchestrator['bootstrap']['url'])
+ userdata += ("curl -s %s" "> ./config_file\n" %
+ orchestrator['bootstrap']['config']['url'])
+ userdata += ("echo \"Disable usage of mysql...\"\n")
+ userdata += "sed -i s/mysql=.*/mysql=no/g /config_file\n"
+ userdata += ("echo \"Setting 'rabbitmq_broker_ip' to '%s'\"\n"
+ % orchestrator['details']['fip'].ip)
+ userdata += ("sed -i s/rabbitmq_broker_ip=localhost/rabbitmq_broker_ip"
+ "=%s/g /config_file\n" % orchestrator['details']['fip'].ip)
+ userdata += "echo \"Set autostart of components to 'false'\"\n"
+ userdata += "export OPENBATON_COMPONENT_AUTOSTART=false\n"
+ userdata += "echo \"Execute bootstrap...\"\n"
+ bootstrap = "sh ./bootstrap release -configFile=./config_file"
+ userdata += bootstrap + "\n"
+ userdata += "echo \"Setting 'nfvo.plugin.timeout' to '300000'\"\n"
+ userdata += ("echo \"nfvo.plugin.timeout=600000\" >> "
+ "/etc/openbaton/openbaton-nfvo.properties\n")
+ userdata += (
+ "wget %s -O /etc/openbaton/openbaton-vnfm-generic-user-data.sh\n" %
+ orchestrator['gvnfm']['userdata']['url'])
+ userdata += "sed -i '113i"'\ \ \ \ '"sleep 60' " \
+ "/etc/openbaton/openbaton-vnfm-generic-user-data.sh\n"
+ userdata += "echo \"Starting NFVO\"\n"
+ userdata += "service openbaton-nfvo restart\n"
+ userdata += "echo \"Starting Generic VNFM\"\n"
+ userdata += "service openbaton-vnfm-generic restart\n"
+ userdata += "echo \"...end of userdata...\"\n"
+ return userdata
+
+
+class ClearwaterImsVnf(vnf.VnfOnBoarding):
+ """Clearwater IMS VNF deployed with openBaton orchestrator"""
+
+ logger = logging.getLogger(__name__)
+
+ def __init__(self, **kwargs):
+ if "case_name" not in kwargs:
+ kwargs["case_name"] = "orchestra_clearwaterims"
+ super(ClearwaterImsVnf, self).__init__(**kwargs)
+ # self.logger = logging.getLogger("functest.ci.run_tests.orchestra")
+ self.logger.info("kwargs %s", (kwargs))
+
+ self.case_dir = pkg_resources.resource_filename(
+ 'functest', 'opnfv_tests/vnf/ims/')
+ self.data_dir = CONST.__getattribute__('dir_ims_data')
+ self.test_dir = CONST.__getattribute__('dir_repo_vims_test')
+ self.created_resources = []
+ self.logger.info("%s VNF onboarding test starting", self.case_name)
+
+ try:
+ self.config = CONST.__getattribute__(
+ 'vnf_{}_config'.format(self.case_name))
+ except BaseException:
+ raise Exception("Orchestra VNF config file not found")
+ config_file = self.case_dir + self.config
+
+ self.mano = dict(
+ get_config("mano", config_file),
+ details={}
+ )
+ self.logger.debug("Orchestrator configuration %s", self.mano)
+
+ self.details['orchestrator'] = dict(
+ name=self.mano['name'],
+ version=self.mano['version'],
+ status='ERROR',
+ result=''
+ )
+
+ self.vnf = dict(
+ get_config(self.case_name, config_file),
+ )
+ self.logger.debug("VNF configuration: %s", self.vnf)
+
+ self.details['vnf'] = dict(
+ name=self.vnf['name'],
+ )
+
+ self.details['test_vnf'] = dict(
+ name=self.case_name,
+ )
+
+ # Orchestra base Data directory creation
+ if not os.path.exists(self.data_dir):
+ os.makedirs(self.data_dir)
+
+ self.images = get_config("tenant_images.orchestrator", config_file)
+ self.images.update(
+ get_config(
+ "tenant_images.%s" %
+ self.case_name,
+ config_file))
+ self.snaps_creds = None
+
+ def prepare(self):
+ """Prepare testscase (Additional pre-configuration steps)."""
+ super(ClearwaterImsVnf, self).prepare()
+
+ self.logger.info("Additional pre-configuration steps")
+ self.logger.info("creds %s", (self.creds))
+
+ self.snaps_creds = OSCreds(
+ username=self.creds['username'],
+ password=self.creds['password'],
+ auth_url=self.creds['auth_url'],
+ project_name=self.creds['tenant'],
+ identity_api_version=int(os_utils.get_keystone_client_version()))
+
+ self.prepare_images()
+ self.prepare_flavor()
+ self.prepare_security_groups()
+ self.prepare_network()
+ self.prepare_floating_ip()
+
+ def prepare_images(self):
+ """Upload images if they doen't exist yet"""
+ self.logger.info("Upload images if they doen't exist yet")
+ for image_name, image_file in self.images.iteritems():
+ self.logger.info("image: %s, file: %s", image_name, image_file)
+ if image_file and image_name:
+ image = OpenStackImage(
+ self.snaps_creds,
+ ImageSettings(name=image_name,
+ image_user='cloud',
+ img_format='qcow2',
+ image_file=image_file))
+ image.create()
+ # self.created_resources.append(image);
+
+ def prepare_security_groups(self):
+ """Create Open Baton security group if it doesn't exist yet"""
+ self.logger.info(
+ "Creating security group for Open Baton if not yet existing...")
+ sg_rules = list()
+ sg_rules.append(
+ SecurityGroupRuleSettings(
+ sec_grp_name="orchestra-sec-group-allowall",
+ direction=Direction.ingress,
+ protocol=Protocol.tcp,
+ port_range_min=1,
+ port_range_max=65535))
+ sg_rules.append(
+ SecurityGroupRuleSettings(
+ sec_grp_name="orchestra-sec-group-allowall",
+ direction=Direction.egress,
+ protocol=Protocol.tcp,
+ port_range_min=1,
+ port_range_max=65535))
+ sg_rules.append(
+ SecurityGroupRuleSettings(
+ sec_grp_name="orchestra-sec-group-allowall",
+ direction=Direction.ingress,
+ protocol=Protocol.udp,
+ port_range_min=1,
+ port_range_max=65535))
+ sg_rules.append(
+ SecurityGroupRuleSettings(
+ sec_grp_name="orchestra-sec-group-allowall",
+ direction=Direction.egress,
+ protocol=Protocol.udp,
+ port_range_min=1,
+ port_range_max=65535))
+ sg_rules.append(
+ SecurityGroupRuleSettings(
+ sec_grp_name="orchestra-sec-group-allowall",
+ direction=Direction.ingress,
+ protocol=Protocol.icmp))
+ sg_rules.append(
+ SecurityGroupRuleSettings(
+ sec_grp_name="orchestra-sec-group-allowall",
+ direction=Direction.egress,
+ protocol=Protocol.icmp))
+ # sg_rules.append(
+ # SecurityGroupRuleSettings(
+ # sec_grp_name="orchestra-sec-group-allowall",
+ # direction=Direction.ingress,
+ # protocol=Protocol.icmp,
+ # port_range_min=-1,
+ # port_range_max=-1))
+ # sg_rules.append(
+ # SecurityGroupRuleSettings(
+ # sec_grp_name="orchestra-sec-group-allowall",
+ # direction=Direction.egress,
+ # protocol=Protocol.icmp,
+ # port_range_min=-1,
+ # port_range_max=-1))
+
+ security_group = OpenStackSecurityGroup(
+ self.snaps_creds,
+ SecurityGroupSettings(
+ name="orchestra-sec-group-allowall",
+ rule_settings=sg_rules))
+
+ security_group_info = security_group.create()
+ self.created_resources.append(security_group)
+ self.mano['details']['sec_group'] = security_group_info.name
+ self.logger.info(
+ "Security group orchestra-sec-group-allowall prepared")
+
+ def prepare_flavor(self):
+ """Create Open Baton flavor if it doesn't exist yet"""
+ self.logger.info(
+ "Create Flavor for Open Baton NFVO if not yet existing")
+
+ flavor_settings = FlavorSettings(
+ name=self.mano['requirements']['flavor']['name'],
+ ram=self.mano['requirements']['flavor']['ram_min'],
+ disk=self.mano['requirements']['flavor']['disk'],
+ vcpus=self.mano['requirements']['flavor']['vcpus'])
+ flavor = OpenStackFlavor(self.snaps_creds, flavor_settings)
+ flavor_info = flavor.create()
+ self.created_resources.append(flavor)
+ self.mano['details']['flavor'] = {}
+ self.mano['details']['flavor']['name'] = flavor_settings.name
+ self.mano['details']['flavor']['id'] = flavor_info.id
+
+ def prepare_network(self):
+ """Create network/subnet/router if they doen't exist yet"""
+ self.logger.info(
+ "Creating network/subnet/router if they doen't exist yet...")
+ subnet_settings = SubnetSettings(
+ name='%s_subnet' %
+ self.case_name,
+ cidr="192.168.100.0/24")
+ network_settings = NetworkSettings(
+ name='%s_net' %
+ self.case_name,
+ subnet_settings=[subnet_settings])
+ orchestra_network = OpenStackNetwork(
+ self.snaps_creds, network_settings)
+ orchestra_network_info = orchestra_network.create()
+ self.mano['details']['network'] = {}
+ self.mano['details']['network']['id'] = orchestra_network_info.id
+ self.mano['details']['network']['name'] = orchestra_network_info.name
+ self.mano['details']['external_net_name'] = snaps_utils.\
+ get_ext_net_name(self.snaps_creds)
+ self.created_resources.append(orchestra_network)
+ orchestra_router = OpenStackRouter(
+ self.snaps_creds,
+ RouterSettings(
+ name='%s_router' %
+ self.case_name,
+ external_gateway=self.mano['details']['external_net_name'],
+ internal_subnets=[
+ subnet_settings.name]))
+ orchestra_router.create()
+ self.created_resources.append(orchestra_router)
+ self.logger.info("Created network and router for Open Baton NFVO...")
+
+ def prepare_floating_ip(self):
+ """Select/Create Floating IP if it doesn't exist yet"""
+ self.logger.info("Retrieving floating IP for Open Baton NFVO")
+ neutron_client = snaps_utils.neutron_utils.neutron_client(
+ self.snaps_creds)
+ # Finding Tenant ID to check to which tenant the Floating IP belongs
+ tenant_id = os_utils.get_tenant_id(
+ os_utils.get_keystone_client(self.creds),
+ self.tenant_name)
+ # Use os_utils to retrieve complete information of Floating IPs
+ floating_ips = os_utils.get_floating_ips(neutron_client)
+ my_floating_ips = []
+ # Filter Floating IPs with tenant id
+ for floating_ip in floating_ips:
+ # self.logger.info("Floating IP: %s", floating_ip)
+ if floating_ip.get('tenant_id') == tenant_id:
+ my_floating_ips.append(floating_ip.get('floating_ip_address'))
+ # Select if Floating IP exist else create new one
+ if len(my_floating_ips) >= 1:
+ # Get Floating IP object from snaps for clean up
+ snaps_floating_ips = snaps_utils.neutron_utils.get_floating_ips(
+ neutron_client)
+ for my_floating_ip in my_floating_ips:
+ for snaps_floating_ip in snaps_floating_ips:
+ if snaps_floating_ip.ip == my_floating_ip:
+ self.mano['details']['fip'] = snaps_floating_ip
+ self.logger.info(
+ "Selected floating IP for Open Baton NFVO %s",
+ (self.mano['details']['fip'].ip))
+ break
+ if self.mano['details']['fip'] is not None:
+ break
+ else:
+ self.logger.info("Creating floating IP for Open Baton NFVO")
+ self.mano['details']['fip'] = snaps_utils.neutron_utils.\
+ create_floating_ip(
+ neutron_client,
+ self.mano['details']['external_net_name'])
+ self.logger.info(
+ "Created floating IP for Open Baton NFVO %s",
+ (self.mano['details']['fip'].ip))
+
+ def get_vim_descriptor(self):
+ """"Create VIM descriptor to be used for onboarding"""
+ self.logger.info(
+ "Building VIM descriptor with PoP creds: %s",
+ self.creds)
+ # Depending on API version either tenant ID or project name must be
+ # used
+ if os_utils.is_keystone_v3():
+ self.logger.info(
+ "Using v3 API of OpenStack... -> Using OS_PROJECT_ID")
+ project_id = os_utils.get_tenant_id(
+ os_utils.get_keystone_client(),
+ self.creds.get("project_name"))
+ else:
+ self.logger.info(
+ "Using v2 API of OpenStack... -> Using OS_TENANT_NAME")
+ project_id = self.creds.get("tenant_name")
+ self.logger.debug("VIM project/tenant id: %s", project_id)
+ vim_json = {
+ "name": "vim-instance",
+ "authUrl": self.creds.get("auth_url"),
+ "tenant": project_id,
+ "username": self.creds.get("username"),
+ "password": self.creds.get("password"),
+ "securityGroups": [
+ self.mano['details']['sec_group']
+ ],
+ "type": "openstack",
+ "location": {
+ "name": "opnfv",
+ "latitude": "52.525876",
+ "longitude": "13.314400"
+ }
+ }
+ self.logger.info("Built VIM descriptor: %s", vim_json)
+ return vim_json
+
+ def deploy_orchestrator(self):
+ self.logger.info("Deploying Open Baton...")
+ self.logger.info("Details: %s", self.mano['details'])
+ start_time = time.time()
+
+ self.logger.info("Creating orchestra instance...")
+ userdata = get_userdata(self.mano)
+ self.logger.info("flavor: %s\n"
+ "image: %s\n"
+ "network_id: %s\n",
+ self.mano['details']['flavor']['name'],
+ self.mano['requirements']['image'],
+ self.mano['details']['network']['id'])
+ self.logger.debug("userdata: %s\n", userdata)
+ # setting up image
+ image_settings = ImageSettings(
+ name=self.mano['requirements']['image'],
+ image_user='ubuntu',
+ exists=True)
+ # setting up port
+ port_settings = PortSettings(
+ name='%s_port' % self.case_name,
+ network_name=self.mano['details']['network']['name'])
+ # build configuration of vm
+ orchestra_settings = VmInstanceSettings(
+ name=self.case_name,
+ flavor=self.mano['details']['flavor']['name'],
+ port_settings=[port_settings],
+ security_group_names=[self.mano['details']['sec_group']],
+ userdata=userdata)
+ orchestra_vm = OpenStackVmInstance(self.snaps_creds,
+ orchestra_settings,
+ image_settings)
+
+ orchestra_vm.create()
+ self.created_resources.append(orchestra_vm)
+ self.mano['details']['id'] = orchestra_vm.get_vm_info()['id']
+ self.logger.info(
+ "Created orchestra instance: %s",
+ self.mano['details']['id'])
+
+ self.logger.info("Associating floating ip: '%s' to VM '%s' ",
+ self.mano['details']['fip'].ip,
+ self.case_name)
+ nova_client = os_utils.get_nova_client()
+ if not os_utils.add_floating_ip(
+ nova_client,
+ self.mano['details']['id'],
+ self.mano['details']['fip'].ip):
+ duration = time.time() - start_time
+ self.details["orchestrator"].update(
+ status='FAIL', duration=duration)
+ self.logger.error("Cannot associate floating IP to VM.")
+ return False
+
+ self.logger.info("Waiting for Open Baton NFVO to be up and running...")
+ timeout = 0
+ while timeout < 200:
+ if servertest(
+ self.mano['details']['fip'].ip,
+ "8080"):
+ break
+ else:
+ self.logger.info(
+ "Open Baton NFVO is not started yet (%ss)",
+ (timeout * 5))
+ time.sleep(5)
+ timeout += 1
+
+ if timeout >= 200:
+ duration = time.time() - start_time
+ self.details["orchestrator"].update(
+ status='FAIL', duration=duration)
+ self.logger.error("Open Baton is not started correctly")
+ return False
+
+ self.logger.info("Waiting for all components to be up and running...")
+ time.sleep(60)
+ duration = time.time() - start_time
+ self.details["orchestrator"].update(status='PASS', duration=duration)
+ self.logger.info("Deploy Open Baton NFVO: OK")
+ return True
+
+ def deploy_vnf(self):
+ start_time = time.time()
+ self.logger.info("Deploying %s...", self.vnf['name'])
+
+ main_agent = MainAgent(
+ nfvo_ip=self.mano['details']['fip'].ip,
+ nfvo_port=8080,
+ https=False,
+ version=1,
+ username=self.mano['credentials']['username'],
+ password=self.mano['credentials']['password'])
+
+ self.logger.info(
+ "Create %s Flavor if not existing", self.vnf['name'])
+ flavor_settings = FlavorSettings(
+ name=self.vnf['requirements']['flavor']['name'],
+ ram=self.vnf['requirements']['flavor']['ram_min'],
+ disk=self.vnf['requirements']['flavor']['disk'],
+ vcpus=self.vnf['requirements']['flavor']['vcpus'])
+ flavor = OpenStackFlavor(self.snaps_creds, flavor_settings)
+ flavor_info = flavor.create()
+ self.logger.debug("Flavor id: %s", flavor_info.id)
+
+ self.logger.info("Getting project 'default'...")
+ project_agent = main_agent.get_agent("project", "")
+ for project in json.loads(project_agent.find()):
+ if project.get("name") == "default":
+ self.mano['details']['project_id'] = project.get("id")
+ self.logger.info("Found project 'default': %s", project)
+ break
+
+ vim_json = self.get_vim_descriptor()
+ self.logger.info("Registering VIM: %s", vim_json)
+
+ main_agent.get_agent(
+ "vim", project_id=self.mano['details']['project_id']).create(
+ entity=json.dumps(vim_json))
+
+ market_agent = main_agent.get_agent(
+ "market", project_id=self.mano['details']['project_id'])
+
+ try:
+ self.logger.info("sending: %s", self.vnf['descriptor']['url'])
+ nsd = market_agent.create(entity=self.vnf['descriptor']['url'])
+ if nsd.get('id') is None:
+ self.logger.error("NSD not onboarded correctly")
+ duration = time.time() - start_time
+ self.details["vnf"].update(status='FAIL', duration=duration)
+ return False
+ self.mano['details']['nsd_id'] = nsd.get('id')
+ self.logger.info("Onboarded NSD: " + nsd.get("name"))
+
+ nsr_agent = main_agent.get_agent(
+ "nsr", project_id=self.mano['details']['project_id'])
+
+ self.mano['details']['nsr'] = nsr_agent.create(
+ self.mano['details']['nsd_id'])
+ except NfvoException as exc:
+ self.logger.error(exc.message)
+ duration = time.time() - start_time
+ self.details["vnf"].update(status='FAIL', duration=duration)
+ return False
+
+ if self.mano['details']['nsr'].get('code') is not None:
+ self.logger.error(
+ "%s cannot be deployed: %s -> %s",
+ self.vnf['name'],
+ self.mano['details']['nsr'].get('code'),
+ self.mano['details']['nsr'].get('message'))
+ self.logger.error("%s cannot be deployed", self.vnf['name'])
+ duration = time.time() - start_time
+ self.details["vnf"].update(status='FAIL', duration=duration)
+ return False
+
+ timeout = 0
+ self.logger.info("Waiting for NSR to go to ACTIVE...")
+ while self.mano['details']['nsr'].get("status") != 'ACTIVE' \
+ and self.mano['details']['nsr'].get("status") != 'ERROR':
+ timeout += 1
+ self.logger.info("NSR is not yet ACTIVE... (%ss)", 5 * timeout)
+ if timeout == 300:
+ self.logger.error("INACTIVE NSR after %s sec..", 5 * timeout)
+ duration = time.time() - start_time
+ self.details["vnf"].update(status='FAIL', duration=duration)
+ return False
+ time.sleep(5)
+ self.mano['details']['nsr'] = json.loads(
+ nsr_agent.find(self.mano['details']['nsr'].get('id')))
+
+ duration = time.time() - start_time
+ if self.mano['details']['nsr'].get("status") == 'ACTIVE':
+ self.details["vnf"].update(status='PASS', duration=duration)
+ self.logger.info("Sleep for 60s to ensure that all "
+ "services are up and running...")
+ time.sleep(60)
+ result = True
+ else:
+ self.details["vnf"].update(status='FAIL', duration=duration)
+ self.logger.error("NSR: %s", self.mano['details'].get('nsr'))
+ result = False
+ return result
+
+ def test_vnf(self):
+ self.logger.info(
+ "Testing VNF Clearwater IMS is not yet implemented...")
+ start_time = time.time()
+
+ duration = time.time() - start_time
+ self.details["test_vnf"].update(status='PASS', duration=duration)
+ self.logger.info("Test VNF: OK")
+ return True
+
+ def clean(self):
+ self.logger.info("Cleaning %s...", self.case_name)
+ try:
+ main_agent = MainAgent(
+ nfvo_ip=self.mano['details']['fip'].ip,
+ nfvo_port=8080,
+ https=False,
+ version=1,
+ username=self.mano['credentials']['username'],
+ password=self.mano['credentials']['password'])
+ self.logger.info("Terminating %s...", self.vnf['name'])
+ if (self.mano['details'].get('nsr')):
+ main_agent.get_agent(
+ "nsr",
+ project_id=self.mano['details']['project_id']).delete(
+ self.mano['details']['nsr'].get('id'))
+ self.logger.info("Sleeping 60 seconds...")
+ time.sleep(60)
+ else:
+ self.logger.info("No need to terminate the VNF...")
+ # os_utils.delete_instance(nova_client=os_utils.get_nova_client(),
+ # instance_id=self.mano_instance_id)
+ except (NfvoException, KeyError) as exc:
+ self.logger.error('Unexpected error cleaning - %s', exc)
+
+ try:
+ neutron_client = os_utils.get_neutron_client(self.creds)
+ self.logger.info("Deleting Open Baton Port...")
+ port = snaps_utils.neutron_utils.get_port_by_name(
+ neutron_client, '%s_port' % self.case_name)
+ snaps_utils.neutron_utils.delete_port(neutron_client, port)
+ time.sleep(10)
+ except Exception as exc:
+ self.logger.error('Unexpected error cleaning - %s', exc)
+ try:
+ self.logger.info("Deleting Open Baton Floating IP...")
+ snaps_utils.neutron_utils.delete_floating_ip(
+ neutron_client, self.mano['details']['fip'])
+ except Exception as exc:
+ self.logger.error('Unexpected error cleaning - %s', exc)
+
+ for resource in reversed(self.created_resources):
+ try:
+ self.logger.info("Cleaning %s", str(resource))
+ resource.clean()
+ except Exception as exc:
+ self.logger.error('Unexpected error cleaning - %s', exc)
+ super(ClearwaterImsVnf, self).clean()
diff --git a/functest/opnfv_tests/vnf/ims/orchestra_ims.py b/functest/opnfv_tests/vnf/ims/orchestra_ims.py
deleted file mode 100644
index 7b1ea9ad..00000000
--- a/functest/opnfv_tests/vnf/ims/orchestra_ims.py
+++ /dev/null
@@ -1,487 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (c) 2016 Orange and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-
-import json
-import logging
-import os
-import pkg_resources
-import socket
-import sys
-import time
-import yaml
-
-import functest.core.vnf as vnf
-import functest.utils.openstack_utils as os_utils
-from functest.utils.constants import CONST
-
-from org.openbaton.cli.agents.agents import MainAgent
-from org.openbaton.cli.errors.errors import NfvoException
-
-
-__author__ = "Pauls, Michael <michael.pauls@fokus.fraunhofer.de>"
-# ----------------------------------------------------------
-#
-# UTILS
-#
-# -----------------------------------------------------------
-
-
-def get_config(parameter, my_file):
- """
- Returns the value of a given parameter in file.yaml
- parameter must be given in string format with dots
- Example: general.openstack.image_name
- """
- with open(file) as f:
- file_yaml = yaml.safe_load(f)
- f.close()
- value = file_yaml
- for element in parameter.split("."):
- value = value.get(element)
- if value is None:
- raise ValueError("The parameter %s is not defined in"
- " %s" % (parameter, my_file))
- return value
-
-
-def servertest(host, port):
- args = socket.getaddrinfo(host, port, socket.AF_INET, socket.SOCK_STREAM)
- for family, socktype, proto, canonname, sockaddr in args:
- s = socket.socket(family, socktype, proto)
- try:
- s.connect(sockaddr)
- except socket.error:
- return False
- else:
- s.close()
- return True
-
-
-class ImsVnf(vnf.VnfOnBoarding):
- """OpenIMS VNF deployed with openBaton orchestrator"""
-
- def __init__(self, project='functest', case_name='orchestra_ims',
- repo='', cmd=''):
- super(ImsVnf, self).__init__(project, case_name, repo, cmd)
- self.logger = logging.getLogger(__name__)
- self.logger.info("Orchestra IMS VNF onboarding test starting")
- self.ob_password = "openbaton"
- self.ob_username = "admin"
- self.ob_https = False
- self.ob_port = "8080"
- self.ob_ip = "localhost"
- self.ob_instance_id = ""
- self.case_dir = pkg_resources.resource_filename(
- 'functest', 'opnfv_tests/vnf/ims/')
- self.data_dir = CONST.__getattribute__('dir_ims_data')
- self.test_dir = CONST.__getattribute__('dir_repo_vims_test')
- self.ob_projectid = ""
- self.keystone_client = os_utils.get_keystone_client()
- self.ob_nsr_id = ""
- self.nsr = None
- self.main_agent = None
- # vIMS Data directory creation
- if not os.path.exists(self.data_dir):
- os.makedirs(self.data_dir)
- # Retrieve the configuration
- try:
- self.config = CONST.__getattribute__(
- 'vnf_{}_config'.format(self.case_name))
- except BaseException:
- raise Exception("Orchestra VNF config file not found")
- config_file = self.case_dir + self.config
- self.imagename = get_config("openbaton.imagename", config_file)
- self.bootstrap_link = get_config("openbaton.bootstrap_link",
- config_file)
- self.bootstrap_config_link = get_config(
- "openbaton.bootstrap_config_link", config_file)
- self.market_link = get_config("openbaton.marketplace_link",
- config_file)
- self.images = get_config("tenant_images", config_file)
- self.ims_conf = get_config("vIMS", config_file)
- self.userdata_file = get_config("openbaton.userdata.file",
- config_file)
-
- def deploy_orchestrator(self):
- self.logger.info("Additional pre-configuration steps")
- nova_client = os_utils.get_nova_client()
- neutron_client = os_utils.get_neutron_client()
- glance_client = os_utils.get_glance_client()
-
- # Import images if needed
- # needs some images
- self.logger.info("Upload some OS images if it doesn't exist")
- temp_dir = os.path.join(self.data_dir, "tmp/")
- for image_name, image_url in self.images.iteritems():
- self.logger.info("image: %s, url: %s", image_name, image_url)
- try:
- image_id = os_utils.get_image_id(glance_client,
- image_name)
- self.logger.info("image_id: %s", image_id)
- except BaseException:
- self.logger.error("Unexpected error: %s", sys.exc_info()[0])
-
- if image_id == '':
- self.logger.info("""%s image doesn't exist on glance
- repository. Try downloading this image
- and upload on glance !""" % image_name)
- image_id = os_utils.download_and_add_image_on_glance(
- glance_client,
- image_name,
- image_url,
- temp_dir)
- if image_id == '':
- self.logger.error("Failed to find or upload required OS "
- "image for this deployment")
- return False
-
- network_dic = os_utils.create_network_full(neutron_client,
- "openbaton_mgmt",
- "openbaton_mgmt_subnet",
- "openbaton_router",
- "192.168.100.0/24")
-
- # orchestrator VM flavor
- self.logger.info(
- "Check if orchestra Flavor is available, if not create one")
- flavor_exist, flavor_id = os_utils.get_or_create_flavor(
- "orchestra",
- "4096",
- '20',
- '2',
- public=True)
- self.logger.debug("Flavor id: %s" % flavor_id)
-
- if not network_dic:
- self.logger.error("There has been a problem when creating the "
- "neutron network")
-
- network_id = network_dic["net_id"]
-
- self.logger.info("Creating floating IP for VM in advance...")
- floatip_dic = os_utils.create_floating_ip(neutron_client)
- floatip = floatip_dic['fip_addr']
-
- if floatip is None:
- self.logger.error("Cannot create floating IP.")
- return False
-
- userdata = "#!/bin/bash\n"
- userdata += "echo \"Executing userdata...\"\n"
- userdata += "set -x\n"
- userdata += "set -e\n"
- userdata += "echo \"Set nameserver to '8.8.8.8'...\"\n"
- userdata += "echo \"nameserver 8.8.8.8\" >> /etc/resolv.conf\n"
- userdata += "echo \"Install curl...\"\n"
- userdata += "apt-get install curl\n"
- userdata += "echo \"Inject public key...\"\n"
- userdata += ("echo \"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCuPXrV3"
- "geeHc6QUdyUr/1Z+yQiqLcOskiEGBiXr4z76MK4abiFmDZ18OMQlc"
- "fl0p3kS0WynVgyaOHwZkgy/DIoIplONVr2CKBKHtPK+Qcme2PVnCtv"
- "EqItl/FcD+1h5XSQGoa+A1TSGgCod/DPo+pes0piLVXP8Ph6QS1k7S"
- "ic7JDeRQ4oT1bXYpJ2eWBDMfxIWKZqcZRiGPgMIbJ1iEkxbpeaAd9O"
- "4MiM9nGCPESmed+p54uYFjwEDlAJZShcAZziiZYAvMZhvAhe6USljc"
- "7YAdalAnyD/jwCHuwIrUw/lxo7UdNCmaUxeobEYyyFA1YVXzpNFZya"
- "XPGAAYIJwEq/ openbaton@opnfv\" >> /home/ubuntu/.ssh/aut"
- "horized_keys\n")
- userdata += "echo \"Download bootstrap...\"\n"
- userdata += ("curl -s %s "
- "> ./bootstrap\n" % self.bootstrap_link)
- userdata += ("curl -s %s"
- "> ./config_file\n" % self.bootstrap_config_link)
- userdata += ("echo \"Disable usage of mysql...\"\n")
- userdata += "sed -i s/mysql=.*/mysql=no/g /config_file\n"
- userdata += ("echo \"Setting 'rabbitmq_broker_ip' to '%s'\"\n"
- % floatip)
- userdata += ("sed -i s/rabbitmq_broker_ip=localhost/rabbitmq_broker_ip"
- "=%s/g /config_file\n" % floatip)
- userdata += "echo \"Set autostart of components to 'false'\"\n"
- userdata += "export OPENBATON_COMPONENT_AUTOSTART=false\n"
- userdata += "echo \"Execute bootstrap...\"\n"
- bootstrap = "sh ./bootstrap release -configFile=./config_file"
- userdata += bootstrap + "\n"
- userdata += "echo \"Setting 'nfvo.plugin.timeout' to '300000'\"\n"
- userdata += ("echo \"nfvo.plugin.timeout=600000\" >> "
- "/etc/openbaton/openbaton-nfvo.properties\n")
- userdata += (
- "wget %s -O /etc/openbaton/openbaton-vnfm-generic-user-data.sh\n" %
- self.userdata_file)
- userdata += "sed -i '113i\ \ \ \ sleep 60' " \
- "/etc/openbaton/openbaton-vnfm-generic-user-data.sh\n"
- userdata += "echo \"Starting NFVO\"\n"
- userdata += "service openbaton-nfvo restart\n"
- userdata += "echo \"Starting Generic VNFM\"\n"
- userdata += "service openbaton-vnfm-generic restart\n"
- userdata += "echo \"...end of userdata...\"\n"
-
- sg_id = os_utils.create_security_group_full(neutron_client,
- "orchestra-sec-group",
- "allowall")
-
- os_utils.create_secgroup_rule(neutron_client, sg_id, "ingress",
- "icmp", 0, 255)
- os_utils.create_secgroup_rule(neutron_client, sg_id, "egress",
- "icmp", 0, 255)
- os_utils.create_secgroup_rule(neutron_client, sg_id, "ingress",
- "tcp", 1, 65535)
- os_utils.create_secgroup_rule(neutron_client, sg_id, "ingress",
- "udp", 1, 65535)
- os_utils.create_secgroup_rule(neutron_client, sg_id, "egress",
- "tcp", 1, 65535)
- os_utils.create_secgroup_rule(neutron_client, sg_id, "egress",
- "udp", 1, 65535)
-
- self.logger.info("Security group set")
-
- self.logger.info("Create instance....")
- self.logger.info("flavor: m1.medium\n"
- "image: %s\n"
- "network_id: %s\n"
- "userdata: %s\n",
- self.imagename,
- network_id,
- userdata)
-
- instance = os_utils.create_instance_and_wait_for_active(
- "orchestra",
- os_utils.get_image_id(glance_client, self.imagename),
- network_id,
- "orchestra-openbaton",
- config_drive=False,
- userdata=userdata)
-
- self.ob_instance_id = instance.id
-
- self.logger.info("Adding sec group to orchestra instance")
- os_utils.add_secgroup_to_instance(nova_client,
- self.ob_instance_id, sg_id)
-
- self.logger.info("Associating floating ip: '%s' to VM '%s' ",
- floatip,
- "orchestra-openbaton")
- if not os_utils.add_floating_ip(nova_client, instance.id, floatip):
- self.logger.error("Cannot associate floating IP to VM.")
- return False
-
- self.logger.info("Waiting for Open Baton NFVO to be up and running...")
- x = 0
- while x < 200:
- if servertest(floatip, "8080"):
- break
- else:
- self.logger.debug(
- "Open Baton NFVO is not started yet (%ss)" %
- (x * 5))
- time.sleep(5)
- x += 1
-
- if x == 200:
- self.logger.error("Open Baton is not started correctly")
-
- self.ob_ip = floatip
- self.ob_password = "openbaton"
- self.ob_username = "admin"
- self.ob_https = False
- self.ob_port = "8080"
- self.logger.info("Waiting for all components up and running...")
- time.sleep(60)
- self.details["orchestrator"] = {
- 'status': "PASS", 'result': "Deploy Open Baton NFVO: OK"}
- self.logger.info("Deploy Open Baton NFVO: OK")
- return True
-
- def deploy_vnf(self):
- self.logger.info("Starting vIMS Deployment...")
-
- self.main_agent = MainAgent(nfvo_ip=self.ob_ip,
- nfvo_port=self.ob_port,
- https=self.ob_https,
- version=1,
- username=self.ob_username,
- password=self.ob_password)
-
- self.logger.info(
- "Check if openims Flavor is available, if not create one")
- flavor_exist, flavor_id = os_utils.get_or_create_flavor(
- "m1.small",
- "2048",
- '20',
- '1',
- public=True)
- self.logger.debug("Flavor id: %s", flavor_id)
-
- self.logger.info("Getting project 'default'...")
- project_agent = self.main_agent.get_agent("project", self.ob_projectid)
- for p in json.loads(project_agent.find()):
- if p.get("name") == "default":
- self.ob_projectid = p.get("id")
- self.logger.info("Found project 'default': %s", p)
- break
-
- self.logger.debug("project id: %s", self.ob_projectid)
- if self.ob_projectid == "":
- self.logger.error("Default project id was not found!")
-
- creds = os_utils.get_credentials()
- self.logger.info("PoP creds: %s", creds)
-
- if os_utils.is_keystone_v3():
- self.logger.info(
- "Using v3 API of OpenStack... -> Using OS_PROJECT_ID")
- project_id = os_utils.get_tenant_id(
- os_utils.get_keystone_client(),
- creds.get("project_name"))
- else:
- self.logger.info(
- "Using v2 API of OpenStack... -> Using OS_TENANT_NAME")
- project_id = creds.get("tenant_name")
-
- self.logger.debug("project id: %s", project_id)
-
- vim_json = {
- "name": "vim-instance",
- "authUrl": creds.get("auth_url"),
- "tenant": project_id,
- "username": creds.get("username"),
- "password": creds.get("password"),
- "securityGroups": [
- "default",
- "orchestra-sec-group"
- ],
- "type": "openstack",
- "location": {
- "name": "opnfv",
- "latitude": "52.525876",
- "longitude": "13.314400"
- }
- }
-
- self.logger.debug("Registering VIM: %s", vim_json)
-
- self.main_agent.get_agent(
- "vim",
- project_id=self.ob_projectid).create(entity=json.dumps(vim_json))
-
- market_agent = self.main_agent.get_agent("market",
- project_id=self.ob_projectid)
-
- nsd = {}
- try:
- self.logger.info("sending: %s", self.market_link)
- nsd = market_agent.create(entity=self.market_link)
- self.logger.info("Onboarded NSD: " + nsd.get("name"))
- except NfvoException as e:
- self.logger.error(e.message)
-
- nsr_agent = self.main_agent.get_agent("nsr",
- project_id=self.ob_projectid)
- nsd_id = nsd.get('id')
- if nsd_id is None:
- self.logger.error("NSD not onboarded correctly")
-
- try:
- self.nsr = nsr_agent.create(nsd_id)
- except NfvoException as e:
- self.logger.error(e.message)
-
- if self.nsr.get('code') is not None:
- self.logger.error(
- "vIMS cannot be deployed: %s -> %s",
- self.nsr.get('code'),
- self.nsr.get('message'))
- self.logger.error("vIMS cannot be deployed")
-
- i = 0
- self.logger.info("Waiting for NSR to go to ACTIVE...")
- while self.nsr.get("status") != 'ACTIVE' and self.nsr.get(
- "status") != 'ERROR':
- i += 1
- if i == 150:
- self.logger.error("INACTIVE NSR after %s sec..", 5 * i)
-
- time.sleep(5)
- self.nsr = json.loads(nsr_agent.find(self.nsr.get('id')))
-
- if self.nsr.get("status") == 'ACTIVE':
- self.details["vnf"] = {'status': "PASS", 'result': self.nsr}
- self.logger.info("Deploy VNF: OK")
- else:
- self.details["vnf"] = {'status': "FAIL", 'result': self.nsr}
- self.logger.error(self.nsr)
- self.logger.error("Deploy VNF: ERROR")
- return False
-
- self.ob_nsr_id = self.nsr.get("id")
- self.logger.info(
- "Sleep for 60s to ensure that all services are up and running...")
- time.sleep(60)
- return True
-
- def test_vnf(self):
- # Adaptations probably needed
- # code used for cloudify_ims
- # ruby client on jumphost calling the vIMS on the SUT
- self.logger.info(
- "Testing if %s works properly...", self.nsr.get('name'))
- for vnfr in self.nsr.get('vnfr'):
- self.logger.info(
- "Checking ports %s of VNF %s",
- self.ims_conf.get(vnfr.get('name')).get('ports'),
- vnfr.get('name'))
- for vdu in vnfr.get('vdu'):
- for vnfci in vdu.get('vnfc_instance'):
- self.logger.debug(
- "Checking ports of VNFC instance %s",
- vnfci.get('hostname'))
- for floatingIp in vnfci.get('floatingIps'):
- self.logger.debug(
- "Testing %s:%s",
- vnfci.get('hostname'),
- floatingIp.get('ip'))
- for port in self.ims_conf.get(
- vnfr.get('name')).get('ports'):
- if servertest(floatingIp.get('ip'), port):
- self.logger.info(
- "VNFC instance %s is reachable at %s:%s",
- vnfci.get('hostname'),
- floatingIp.get('ip'),
- port)
- else:
- self.logger.error(
- "VNFC instance %s is not reachable "
- "at %s:%s",
- vnfci.get('hostname'),
- floatingIp.get('ip'),
- port)
- self.details["test_vnf"] = {
- 'status': "FAIL", 'result': (
- "Port %s of server %s -> %s is "
- "not reachable",
- port,
- vnfci.get('hostname'),
- floatingIp.get('ip'))}
- self.logger.error("Test VNF: ERROR")
- return False
-
- self.details["test_vnf"] = {
- 'status': "PASS",
- 'result': "All tests have been executed successfully"}
- self.logger.info("Test VNF: OK")
- return True
-
- def clean(self):
- self.main_agent.get_agent(
- "nsr",
- project_id=self.ob_projectid).delete(self.ob_nsr_id)
- time.sleep(5)
- os_utils.delete_instance(nova_client=os_utils.get_nova_client(),
- instance_id=self.ob_instance_id)
- # question is the clean removing also the VM?
- # I think so since is goinf to remove the tenant...
- super(ImsVnf, self).clean()
diff --git a/functest/opnfv_tests/vnf/ims/orchestra_ims.yaml b/functest/opnfv_tests/vnf/ims/orchestra_ims.yaml
deleted file mode 100644
index 5b25d3c9..00000000
--- a/functest/opnfv_tests/vnf/ims/orchestra_ims.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-tenant_images:
- ubuntu_14.04: http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img
- openims: http://marketplace.openbaton.org:8082/api/v1/images/52e2ccc0-1dce-4663-894d-28aab49323aa/img
-openbaton:
- bootstrap_link: http://get.openbaton.org/bootstraps/bootstrap_3.2.0_opnfv/bootstrap
- bootstrap_config_link: http://get.openbaton.org/bootstraps/bootstrap_3.2.0_opnfv/bootstrap-config-file
- userdata:
- file: https://raw.githubusercontent.com/openbaton/generic-vnfm/3.2.0/src/main/resources/user-data.sh
- marketplace_link: http://marketplace.openbaton.org:8082/api/v1/nsds/fokus/OpenImsCore/3.2.0/json
- imagename: ubuntu_14.04
-vIMS:
- scscf:
- ports: [3870, 6060]
- pcscf:
- ports: [4060]
- icscf:
- ports: [3869, 5060]
- fhoss:
- ports: [3868]
- bind9:
- ports: [] \ No newline at end of file
diff --git a/functest/opnfv_tests/vnf/ims/orchestra_openims.py b/functest/opnfv_tests/vnf/ims/orchestra_openims.py
new file mode 100644
index 00000000..f8acada4
--- /dev/null
+++ b/functest/opnfv_tests/vnf/ims/orchestra_openims.py
@@ -0,0 +1,718 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2016 Orange and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""Orchestra OpenIMS testcase implementation."""
+
+import json
+import logging
+import os
+import socket
+import time
+import pkg_resources
+import yaml
+
+
+from snaps.openstack.create_image import OpenStackImage, ImageSettings
+from snaps.openstack.create_flavor import OpenStackFlavor, FlavorSettings
+from snaps.openstack.create_security_group import (
+ OpenStackSecurityGroup,
+ SecurityGroupSettings,
+ SecurityGroupRuleSettings,
+ Direction,
+ Protocol)
+from snaps.openstack.create_network import (
+ OpenStackNetwork,
+ NetworkSettings,
+ SubnetSettings,
+ PortSettings)
+from snaps.openstack.create_router import OpenStackRouter, RouterSettings
+from snaps.openstack.os_credentials import OSCreds
+from snaps.openstack.create_instance import (
+ VmInstanceSettings, OpenStackVmInstance)
+from functest.opnfv_tests.openstack.snaps import snaps_utils
+
+import functest.core.vnf as vnf
+import functest.utils.openstack_utils as os_utils
+from functest.utils.constants import CONST
+
+from org.openbaton.cli.errors.errors import NfvoException
+from org.openbaton.cli.agents.agents import MainAgent
+
+
+__author__ = "Pauls, Michael <michael.pauls@fokus.fraunhofer.de>"
+# ----------------------------------------------------------
+#
+# UTILS
+#
+# -----------------------------------------------------------
+
+
+def get_config(parameter, file_path):
+ """
+ Get config parameter.
+
+ Returns the value of a given parameter in file.yaml
+ parameter must be given in string format with dots
+ Example: general.openstack.image_name
+ """
+ with open(file_path) as config_file:
+ file_yaml = yaml.safe_load(config_file)
+ config_file.close()
+ value = file_yaml
+ for element in parameter.split("."):
+ value = value.get(element)
+ if value is None:
+ raise ValueError("The parameter %s is not defined in"
+ " reporting.yaml", parameter)
+ return value
+
+
+def servertest(host, port):
+ """Method to test that a server is reachable at IP:port"""
+ args = socket.getaddrinfo(host, port, socket.AF_INET, socket.SOCK_STREAM)
+ for family, socktype, proto, canonname, sockaddr in args:
+ sock = socket.socket(family, socktype, proto)
+ try:
+ sock.connect(sockaddr)
+ except socket.error:
+ return False
+ else:
+ sock.close()
+ return True
+
+
+def get_userdata(orchestrator=dict):
+ """Build userdata for Open Baton machine"""
+ userdata = "#!/bin/bash\n"
+ userdata += "echo \"Executing userdata...\"\n"
+ userdata += "set -x\n"
+ userdata += "set -e\n"
+ userdata += "echo \"Set nameserver to '8.8.8.8'...\"\n"
+ userdata += "echo \"nameserver 8.8.8.8\" >> /etc/resolv.conf\n"
+ userdata += "echo \"Install curl...\"\n"
+ userdata += "apt-get install curl\n"
+ userdata += "echo \"Inject public key...\"\n"
+ userdata += ("echo \"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCuPXrV3"
+ "geeHc6QUdyUr/1Z+yQiqLcOskiEGBiXr4z76MK4abiFmDZ18OMQlc"
+ "fl0p3kS0WynVgyaOHwZkgy/DIoIplONVr2CKBKHtPK+Qcme2PVnCtv"
+ "EqItl/FcD+1h5XSQGoa+A1TSGgCod/DPo+pes0piLVXP8Ph6QS1k7S"
+ "ic7JDeRQ4oT1bXYpJ2eWBDMfxIWKZqcZRiGPgMIbJ1iEkxbpeaAd9O"
+ "4MiM9nGCPESmed+p54uYFjwEDlAJZShcAZziiZYAvMZhvAhe6USljc"
+ "7YAdalAnyD/jwCHuwIrUw/lxo7UdNCmaUxeobEYyyFA1YVXzpNFZya"
+ "XPGAAYIJwEq/ openbaton@opnfv\" >> /home/ubuntu/.ssh/aut"
+ "horized_keys\n")
+ userdata += "echo \"Download bootstrap...\"\n"
+ userdata += ("curl -s %s "
+ "> ./bootstrap\n" % orchestrator['bootstrap']['url'])
+ userdata += ("curl -s %s" "> ./config_file\n" %
+ orchestrator['bootstrap']['config']['url'])
+ userdata += ("echo \"Disable usage of mysql...\"\n")
+ userdata += "sed -i s/mysql=.*/mysql=no/g /config_file\n"
+ userdata += ("echo \"Setting 'rabbitmq_broker_ip' to '%s'\"\n"
+ % orchestrator['details']['fip'].ip)
+ userdata += ("sed -i s/rabbitmq_broker_ip=localhost/rabbitmq_broker_ip"
+ "=%s/g /config_file\n" % orchestrator['details']['fip'].ip)
+ userdata += "echo \"Set autostart of components to 'false'\"\n"
+ userdata += "export OPENBATON_COMPONENT_AUTOSTART=false\n"
+ userdata += "echo \"Execute bootstrap...\"\n"
+ bootstrap = "sh ./bootstrap release -configFile=./config_file"
+ userdata += bootstrap + "\n"
+ userdata += "echo \"Setting 'nfvo.plugin.timeout' to '300000'\"\n"
+ userdata += ("echo \"nfvo.plugin.timeout=600000\" >> "
+ "/etc/openbaton/openbaton-nfvo.properties\n")
+ userdata += (
+ "wget %s -O /etc/openbaton/openbaton-vnfm-generic-user-data.sh\n" %
+ orchestrator['gvnfm']['userdata']['url'])
+ userdata += "sed -i '113i"'\ \ \ \ '"sleep 60' " \
+ "/etc/openbaton/openbaton-vnfm-generic-user-data.sh\n"
+ userdata += "echo \"Starting NFVO\"\n"
+ userdata += "service openbaton-nfvo restart\n"
+ userdata += "echo \"Starting Generic VNFM\"\n"
+ userdata += "service openbaton-vnfm-generic restart\n"
+ userdata += "echo \"...end of userdata...\"\n"
+ return userdata
+
+
+class OpenImsVnf(vnf.VnfOnBoarding):
+ """OpenIMS VNF deployed with openBaton orchestrator"""
+
+ logger = logging.getLogger(__name__)
+
+ def __init__(self, **kwargs):
+ if "case_name" not in kwargs:
+ kwargs["case_name"] = "orchestra_openims"
+ super(OpenImsVnf, self).__init__(**kwargs)
+ # self.logger = logging.getLogger("functest.ci.run_tests.orchestra")
+ self.logger.info("kwargs %s", (kwargs))
+
+ self.case_dir = pkg_resources.resource_filename(
+ 'functest', 'opnfv_tests/vnf/ims/')
+ self.data_dir = CONST.__getattribute__('dir_ims_data')
+ self.test_dir = CONST.__getattribute__('dir_repo_vims_test')
+ self.created_resources = []
+ self.logger.info("%s VNF onboarding test starting", self.case_name)
+
+ try:
+ self.config = CONST.__getattribute__(
+ 'vnf_{}_config'.format(self.case_name))
+ except BaseException:
+ raise Exception("Orchestra VNF config file not found")
+ config_file = self.case_dir + self.config
+
+ self.mano = dict(
+ get_config("mano", config_file),
+ details={}
+ )
+ self.logger.debug("Orchestrator configuration %s", self.mano)
+
+ self.details['orchestrator'] = dict(
+ name=self.mano['name'],
+ version=self.mano['version'],
+ status='ERROR',
+ result=''
+ )
+
+ self.vnf = dict(
+ get_config(self.case_name, config_file),
+ )
+ self.logger.debug("VNF configuration: %s", self.vnf)
+
+ self.details['vnf'] = dict(
+ name=self.vnf['name'],
+ )
+
+ self.details['test_vnf'] = dict(
+ name=self.case_name,
+ )
+
+ # Orchestra base Data directory creation
+ if not os.path.exists(self.data_dir):
+ os.makedirs(self.data_dir)
+
+ self.images = get_config("tenant_images.orchestrator", config_file)
+ self.images.update(get_config("tenant_images.%s" %
+ self.case_name, config_file))
+ self.snaps_creds = None
+
+ def prepare(self):
+ """Prepare testscase (Additional pre-configuration steps)."""
+ super(OpenImsVnf, self).prepare()
+
+ self.logger.info("Additional pre-configuration steps")
+ self.logger.info("creds %s", (self.creds))
+
+ self.snaps_creds = OSCreds(
+ username=self.creds['username'],
+ password=self.creds['password'],
+ auth_url=self.creds['auth_url'],
+ project_name=self.creds['tenant'],
+ identity_api_version=int(os_utils.get_keystone_client_version()))
+
+ self.prepare_images()
+ self.prepare_flavor()
+ self.prepare_security_groups()
+ self.prepare_network()
+ self.prepare_floating_ip()
+
+ def prepare_images(self):
+ """Upload images if they doen't exist yet"""
+ self.logger.info("Upload images if they doen't exist yet")
+ for image_name, image_file in self.images.iteritems():
+ self.logger.info("image: %s, file: %s", image_name, image_file)
+ if image_file and image_name:
+ image = OpenStackImage(
+ self.snaps_creds,
+ ImageSettings(name=image_name,
+ image_user='cloud',
+ img_format='qcow2',
+ image_file=image_file))
+ image.create()
+ # self.created_resources.append(image);
+
+ def prepare_security_groups(self):
+ """Create Open Baton security group if it doesn't exist yet"""
+ self.logger.info(
+ "Creating security group for Open Baton if not yet existing...")
+ sg_rules = list()
+ sg_rules.append(
+ SecurityGroupRuleSettings(
+ sec_grp_name="orchestra-sec-group-allowall",
+ direction=Direction.ingress,
+ protocol=Protocol.tcp,
+ port_range_min=1,
+ port_range_max=65535))
+ sg_rules.append(
+ SecurityGroupRuleSettings(
+ sec_grp_name="orchestra-sec-group-allowall",
+ direction=Direction.egress,
+ protocol=Protocol.tcp,
+ port_range_min=1,
+ port_range_max=65535))
+ sg_rules.append(
+ SecurityGroupRuleSettings(
+ sec_grp_name="orchestra-sec-group-allowall",
+ direction=Direction.ingress,
+ protocol=Protocol.udp,
+ port_range_min=1,
+ port_range_max=65535))
+ sg_rules.append(
+ SecurityGroupRuleSettings(
+ sec_grp_name="orchestra-sec-group-allowall",
+ direction=Direction.egress,
+ protocol=Protocol.udp,
+ port_range_min=1,
+ port_range_max=65535))
+ sg_rules.append(
+ SecurityGroupRuleSettings(
+ sec_grp_name="orchestra-sec-group-allowall",
+ direction=Direction.ingress,
+ protocol=Protocol.icmp))
+ sg_rules.append(
+ SecurityGroupRuleSettings(
+ sec_grp_name="orchestra-sec-group-allowall",
+ direction=Direction.egress,
+ protocol=Protocol.icmp))
+ # sg_rules.append(
+ # SecurityGroupRuleSettings(
+ # sec_grp_name="orchestra-sec-group-allowall",
+ # direction=Direction.ingress,
+ # protocol=Protocol.icmp,
+ # port_range_min=-1,
+ # port_range_max=-1))
+ # sg_rules.append(
+ # SecurityGroupRuleSettings(
+ # sec_grp_name="orchestra-sec-group-allowall",
+ # direction=Direction.egress,
+ # protocol=Protocol.icmp,
+ # port_range_min=-1,
+ # port_range_max=-1))
+
+ security_group = OpenStackSecurityGroup(
+ self.snaps_creds,
+ SecurityGroupSettings(
+ name="orchestra-sec-group-allowall",
+ rule_settings=sg_rules))
+
+ security_group_info = security_group.create()
+ self.created_resources.append(security_group)
+ self.mano['details']['sec_group'] = security_group_info.name
+ self.logger.info(
+ "Security group orchestra-sec-group-allowall prepared")
+
+ def prepare_flavor(self):
+ """Create Open Baton flavor if it doesn't exist yet"""
+ self.logger.info(
+ "Create Flavor for Open Baton NFVO if not yet existing")
+
+ flavor_settings = FlavorSettings(
+ name=self.mano['requirements']['flavor']['name'],
+ ram=self.mano['requirements']['flavor']['ram_min'],
+ disk=self.mano['requirements']['flavor']['disk'],
+ vcpus=self.mano['requirements']['flavor']['vcpus'])
+ flavor = OpenStackFlavor(self.snaps_creds, flavor_settings)
+ flavor_info = flavor.create()
+ self.created_resources.append(flavor)
+ self.mano['details']['flavor'] = {}
+ self.mano['details']['flavor']['name'] = flavor_settings.name
+ self.mano['details']['flavor']['id'] = flavor_info.id
+
+ def prepare_network(self):
+ """Create network/subnet/router if they doen't exist yet"""
+ self.logger.info(
+ "Creating network/subnet/router if they doen't exist yet...")
+ subnet_settings = SubnetSettings(
+ name='%s_subnet' %
+ self.case_name,
+ cidr="192.168.100.0/24")
+ network_settings = NetworkSettings(
+ name='%s_net' %
+ self.case_name,
+ subnet_settings=[subnet_settings])
+ orchestra_network = OpenStackNetwork(
+ self.snaps_creds, network_settings)
+ orchestra_network_info = orchestra_network.create()
+ self.mano['details']['network'] = {}
+ self.mano['details']['network']['id'] = orchestra_network_info.id
+ self.mano['details']['network']['name'] = orchestra_network_info.name
+ self.mano['details']['external_net_name'] = \
+ snaps_utils.get_ext_net_name(self.snaps_creds)
+ self.created_resources.append(orchestra_network)
+ orchestra_router = OpenStackRouter(
+ self.snaps_creds,
+ RouterSettings(
+ name='%s_router' %
+ self.case_name,
+ external_gateway=self.mano['details']['external_net_name'],
+ internal_subnets=[
+ subnet_settings.name]))
+ orchestra_router.create()
+ self.created_resources.append(orchestra_router)
+ self.logger.info("Created network and router for Open Baton NFVO...")
+
+ def prepare_floating_ip(self):
+ """Select/Create Floating IP if it doesn't exist yet"""
+ self.logger.info("Retrieving floating IP for Open Baton NFVO")
+ neutron_client = snaps_utils.neutron_utils.neutron_client(
+ self.snaps_creds)
+ # Finding Tenant ID to check to which tenant the Floating IP belongs
+ tenant_id = os_utils.get_tenant_id(
+ os_utils.get_keystone_client(self.creds),
+ self.tenant_name)
+ # Use os_utils to retrieve complete information of Floating IPs
+ floating_ips = os_utils.get_floating_ips(neutron_client)
+ my_floating_ips = []
+ # Filter Floating IPs with tenant id
+ for floating_ip in floating_ips:
+ # self.logger.info("Floating IP: %s", floating_ip)
+ if floating_ip.get('tenant_id') == tenant_id:
+ my_floating_ips.append(floating_ip.get('floating_ip_address'))
+ # Select if Floating IP exist else create new one
+ if len(my_floating_ips) >= 1:
+ # Get Floating IP object from snaps for clean up
+ snaps_floating_ips = snaps_utils.neutron_utils.get_floating_ips(
+ neutron_client)
+ for my_floating_ip in my_floating_ips:
+ for snaps_floating_ip in snaps_floating_ips:
+ if snaps_floating_ip.ip == my_floating_ip:
+ self.mano['details']['fip'] = snaps_floating_ip
+ self.logger.info(
+ "Selected floating IP for Open Baton NFVO %s",
+ (self.mano['details']['fip'].ip))
+ break
+ if self.mano['details']['fip'] is not None:
+ break
+ else:
+ self.logger.info("Creating floating IP for Open Baton NFVO")
+ self.mano['details']['fip'] = (
+ snaps_utils.neutron_utils. create_floating_ip(
+ neutron_client, self.mano['details']['external_net_name']))
+ self.logger.info(
+ "Created floating IP for Open Baton NFVO %s",
+ (self.mano['details']['fip'].ip))
+
+ def get_vim_descriptor(self):
+ """"Create VIM descriptor to be used for onboarding"""
+ self.logger.info(
+ "Building VIM descriptor with PoP creds: %s",
+ self.creds)
+ # Depending on API version either tenant ID or project name must be
+ # used
+ if os_utils.is_keystone_v3():
+ self.logger.info(
+ "Using v3 API of OpenStack... -> Using OS_PROJECT_ID")
+ project_id = os_utils.get_tenant_id(
+ os_utils.get_keystone_client(),
+ self.creds.get("project_name"))
+ else:
+ self.logger.info(
+ "Using v2 API of OpenStack... -> Using OS_TENANT_NAME")
+ project_id = self.creds.get("tenant_name")
+ self.logger.debug("VIM project/tenant id: %s", project_id)
+ vim_json = {
+ "name": "vim-instance",
+ "authUrl": self.creds.get("auth_url"),
+ "tenant": project_id,
+ "username": self.creds.get("username"),
+ "password": self.creds.get("password"),
+ "securityGroups": [
+ self.mano['details']['sec_group']
+ ],
+ "type": "openstack",
+ "location": {
+ "name": "opnfv",
+ "latitude": "52.525876",
+ "longitude": "13.314400"
+ }
+ }
+ self.logger.info("Built VIM descriptor: %s", vim_json)
+ return vim_json
+
+ def deploy_orchestrator(self):
+ self.logger.info("Deploying Open Baton...")
+ self.logger.info("Details: %s", self.mano['details'])
+ start_time = time.time()
+
+ self.logger.info("Creating orchestra instance...")
+ userdata = get_userdata(self.mano)
+ self.logger.info("flavor: %s\n"
+ "image: %s\n"
+ "network_id: %s\n",
+ self.mano['details']['flavor']['name'],
+ self.mano['requirements']['image'],
+ self.mano['details']['network']['id'])
+ self.logger.debug("userdata: %s\n", userdata)
+ # setting up image
+ image_settings = ImageSettings(
+ name=self.mano['requirements']['image'],
+ image_user='ubuntu',
+ exists=True)
+ # setting up port
+ port_settings = PortSettings(
+ name='%s_port' % self.case_name,
+ network_name=self.mano['details']['network']['name'])
+ # build configuration of vm
+ orchestra_settings = VmInstanceSettings(
+ name=self.case_name,
+ flavor=self.mano['details']['flavor']['name'],
+ port_settings=[port_settings],
+ security_group_names=[self.mano['details']['sec_group']],
+ userdata=userdata)
+ orchestra_vm = OpenStackVmInstance(self.snaps_creds,
+ orchestra_settings,
+ image_settings)
+
+ orchestra_vm.create()
+ self.created_resources.append(orchestra_vm)
+ self.mano['details']['id'] = orchestra_vm.get_vm_info()['id']
+ self.logger.info(
+ "Created orchestra instance: %s",
+ self.mano['details']['id'])
+
+ self.logger.info("Associating floating ip: '%s' to VM '%s' ",
+ self.mano['details']['fip'].ip,
+ self.case_name)
+ nova_client = os_utils.get_nova_client()
+ if not os_utils.add_floating_ip(
+ nova_client,
+ self.mano['details']['id'],
+ self.mano['details']['fip'].ip):
+ duration = time.time() - start_time
+ self.details["orchestrator"].update(
+ status='FAIL', duration=duration)
+ self.logger.error("Cannot associate floating IP to VM.")
+ return False
+
+ self.logger.info("Waiting for Open Baton NFVO to be up and running...")
+ timeout = 0
+ while timeout < 200:
+ if servertest(
+ self.mano['details']['fip'].ip,
+ "8080"):
+ break
+ else:
+ self.logger.info("Open Baton NFVO is not started yet (%ss)",
+ (timeout * 5))
+ time.sleep(5)
+ timeout += 1
+
+ if timeout >= 200:
+ duration = time.time() - start_time
+ self.details["orchestrator"].update(
+ status='FAIL', duration=duration)
+ self.logger.error("Open Baton is not started correctly")
+ return False
+
+ self.logger.info("Waiting for all components to be up and running...")
+ time.sleep(60)
+ duration = time.time() - start_time
+ self.details["orchestrator"].update(status='PASS', duration=duration)
+ self.logger.info("Deploy Open Baton NFVO: OK")
+ return True
+
+ def deploy_vnf(self):
+ start_time = time.time()
+ self.logger.info("Deploying %s...", self.vnf['name'])
+
+ main_agent = MainAgent(
+ nfvo_ip=self.mano['details']['fip'].ip,
+ nfvo_port=8080,
+ https=False,
+ version=1,
+ username=self.mano['credentials']['username'],
+ password=self.mano['credentials']['password'])
+
+ self.logger.info(
+ "Create %s Flavor if not existing", self.vnf['name'])
+ flavor_settings = FlavorSettings(
+ name=self.vnf['requirements']['flavor']['name'],
+ ram=self.vnf['requirements']['flavor']['ram_min'],
+ disk=self.vnf['requirements']['flavor']['disk'],
+ vcpus=self.vnf['requirements']['flavor']['vcpus'])
+ flavor = OpenStackFlavor(self.snaps_creds, flavor_settings)
+ flavor_info = flavor.create()
+ self.logger.debug("Flavor id: %s", flavor_info.id)
+
+ self.logger.info("Getting project 'default'...")
+ project_agent = main_agent.get_agent("project", "")
+ for project in json.loads(project_agent.find()):
+ if project.get("name") == "default":
+ self.mano['details']['project_id'] = project.get("id")
+ self.logger.info("Found project 'default': %s", project)
+ break
+
+ vim_json = self.get_vim_descriptor()
+ self.logger.info("Registering VIM: %s", vim_json)
+
+ main_agent.get_agent(
+ "vim", project_id=self.mano['details']['project_id']).create(
+ entity=json.dumps(vim_json))
+
+ market_agent = main_agent.get_agent(
+ "market", project_id=self.mano['details']['project_id'])
+
+ try:
+ self.logger.info("sending: %s", self.vnf['descriptor']['url'])
+ nsd = market_agent.create(entity=self.vnf['descriptor']['url'])
+ if nsd.get('id') is None:
+ self.logger.error("NSD not onboarded correctly")
+ duration = time.time() - start_time
+ self.details["vnf"].update(status='FAIL', duration=duration)
+ return False
+ self.mano['details']['nsd_id'] = nsd.get('id')
+ self.logger.info("Onboarded NSD: " + nsd.get("name"))
+
+ nsr_agent = main_agent.get_agent(
+ "nsr", project_id=self.mano['details']['project_id'])
+
+ self.mano['details']['nsr'] = nsr_agent.create(
+ self.mano['details']['nsd_id'])
+ except NfvoException as exc:
+ self.logger.error(exc.message)
+ duration = time.time() - start_time
+ self.details["vnf"].update(status='FAIL', duration=duration)
+ return False
+
+ if self.mano['details']['nsr'].get('code') is not None:
+ self.logger.error(
+ "%s cannot be deployed: %s -> %s",
+ self.vnf['name'],
+ self.mano['details']['nsr'].get('code'),
+ self.mano['details']['nsr'].get('message'))
+ self.logger.error("%s cannot be deployed", self.vnf['name'])
+ duration = time.time() - start_time
+ self.details["vnf"].update(status='FAIL', duration=duration)
+ return False
+
+ timeout = 0
+ self.logger.info("Waiting for NSR to go to ACTIVE...")
+ while self.mano['details']['nsr'].get("status") != 'ACTIVE' \
+ and self.mano['details']['nsr'].get("status") != 'ERROR':
+ timeout += 1
+ self.logger.info("NSR is not yet ACTIVE... (%ss)", 5 * timeout)
+ if timeout == 300:
+ self.logger.error("INACTIVE NSR after %s sec..", 5 * timeout)
+ duration = time.time() - start_time
+ self.details["vnf"].update(status='FAIL', duration=duration)
+ return False
+ time.sleep(5)
+ self.mano['details']['nsr'] = json.loads(
+ nsr_agent.find(self.mano['details']['nsr'].get('id')))
+
+ duration = time.time() - start_time
+ if self.mano['details']['nsr'].get("status") == 'ACTIVE':
+ self.details["vnf"].update(status='PASS', duration=duration)
+ self.logger.info("Sleep for 60s to ensure that all "
+ "services are up and running...")
+ time.sleep(60)
+ result = True
+ else:
+ self.details["vnf"].update(status='FAIL', duration=duration)
+ self.logger.error("NSR: %s", self.mano['details'].get('nsr'))
+ result = False
+ return result
+
+ def test_vnf(self):
+ self.logger.info("Testing VNF OpenIMS...")
+ start_time = time.time()
+ self.logger.info(
+ "Testing if %s works properly...",
+ self.mano['details']['nsr'].get('name'))
+ for vnfr in self.mano['details']['nsr'].get('vnfr'):
+ self.logger.info(
+ "Checking ports %s of VNF %s",
+ self.vnf['test'][vnfr.get('name')]['ports'],
+ vnfr.get('name'))
+ for vdu in vnfr.get('vdu'):
+ for vnfci in vdu.get('vnfc_instance'):
+ self.logger.debug(
+ "Checking ports of VNFC instance %s",
+ vnfci.get('hostname'))
+ for floating_ip in vnfci.get('floatingIps'):
+ self.logger.debug(
+ "Testing %s:%s",
+ vnfci.get('hostname'),
+ floating_ip.get('ip'))
+ for port in self.vnf['test'][vnfr.get(
+ 'name')]['ports']:
+ if servertest(floating_ip.get('ip'), port):
+ self.logger.info(
+ "VNFC instance %s is reachable at %s:%s",
+ vnfci.get('hostname'),
+ floating_ip.get('ip'),
+ port)
+ else:
+ self.logger.error(
+ "VNFC instance %s is not reachable "
+ "at %s:%s",
+ vnfci.get('hostname'),
+ floating_ip.get('ip'),
+ port)
+ duration = time.time() - start_time
+ self.details["test_vnf"].update(
+ status='FAIL', duration=duration, esult=(
+ "Port %s of server %s -> %s is "
+ "not reachable",
+ port,
+ vnfci.get('hostname'),
+ floating_ip.get('ip')))
+ self.logger.error("Test VNF: ERROR")
+ return False
+ duration = time.time() - start_time
+ self.details["test_vnf"].update(status='PASS', duration=duration)
+ self.logger.info("Test VNF: OK")
+ return True
+
+ def clean(self):
+ self.logger.info("Cleaning %s...", self.case_name)
+ try:
+ main_agent = MainAgent(
+ nfvo_ip=self.mano['details']['fip'].ip,
+ nfvo_port=8080,
+ https=False,
+ version=1,
+ username=self.mano['credentials']['username'],
+ password=self.mano['credentials']['password'])
+ self.logger.info("Terminating %s...", self.vnf['name'])
+ if (self.mano['details'].get('nsr')):
+ main_agent.get_agent(
+ "nsr",
+ project_id=self.mano['details']['project_id']).\
+ delete(self.mano['details']['nsr'].get('id'))
+ self.logger.info("Sleeping 60 seconds...")
+ time.sleep(60)
+ else:
+ self.logger.info("No need to terminate the VNF...")
+ # os_utils.delete_instance(nova_client=os_utils.get_nova_client(),
+ # instance_id=self.mano_instance_id)
+ except (NfvoException, KeyError) as exc:
+ self.logger.error('Unexpected error cleaning - %s', exc)
+
+ try:
+ neutron_client = os_utils.get_neutron_client(self.creds)
+ self.logger.info("Deleting Open Baton Port...")
+ port = snaps_utils.neutron_utils.get_port_by_name(
+ neutron_client, '%s_port' % self.case_name)
+ snaps_utils.neutron_utils.delete_port(neutron_client, port)
+ time.sleep(10)
+ except Exception as exc:
+ self.logger.error('Unexpected error cleaning - %s', exc)
+ try:
+ self.logger.info("Deleting Open Baton Floating IP...")
+ snaps_utils.neutron_utils.delete_floating_ip(
+ neutron_client, self.mano['details']['fip'])
+ except Exception as exc:
+ self.logger.error('Unexpected error cleaning - %s', exc)
+
+ for resource in reversed(self.created_resources):
+ try:
+ self.logger.info("Cleaning %s", str(resource))
+ resource.clean()
+ except Exception as exc:
+ self.logger.error('Unexpected error cleaning - %s', exc)
+ super(OpenImsVnf, self).clean()
diff --git a/functest/tests/unit/ci/test_run_tests.py b/functest/tests/unit/ci/test_run_tests.py
index fb8cb391..7495c40e 100644
--- a/functest/tests/unit/ci/test_run_tests.py
+++ b/functest/tests/unit/ci/test_run_tests.py
@@ -54,11 +54,6 @@ class RunTestsTesting(unittest.TestCase):
self.run_tests_parser = run_tests.RunTestsParser()
- @mock.patch('functest.ci.run_tests.logger.info')
- def test_print_separator(self, mock_logger_info):
- self.runner.print_separator(self.sep)
- mock_logger_info.assert_called_once_with(self.sep * 44)
-
@mock.patch('functest.ci.run_tests.logger.error')
def test_source_rc_file_missing_file(self, mock_logger_error):
with mock.patch('functest.ci.run_tests.os.path.isfile',
@@ -120,8 +115,7 @@ class RunTestsTesting(unittest.TestCase):
args = {'get_name.return_value': 'test_name',
'needs_clean.return_value': False}
mock_test.configure_mock(**args)
- with mock.patch('functest.ci.run_tests.Runner.print_separator'),\
- mock.patch('functest.ci.run_tests.Runner.source_rc_file'), \
+ with mock.patch('functest.ci.run_tests.Runner.source_rc_file'), \
mock.patch('functest.ci.run_tests.Runner.get_run_dict',
return_value=None), \
self.assertRaises(Exception) as context:
@@ -129,7 +123,6 @@ class RunTestsTesting(unittest.TestCase):
msg = "Cannot import the class for the test case."
self.assertTrue(msg in context)
- @mock.patch('functest.ci.run_tests.Runner.print_separator')
@mock.patch('functest.ci.run_tests.Runner.source_rc_file')
@mock.patch('importlib.import_module', name="module",
return_value=mock.Mock(test_class=mock.Mock(
@@ -145,123 +138,107 @@ class RunTestsTesting(unittest.TestCase):
with mock.patch('functest.ci.run_tests.Runner.get_run_dict',
return_value=test_run_dict):
self.runner.clean_flag = True
- self.runner.run_test(mock_test, 'tier_name')
+ self.runner.run_test(mock_test)
self.assertEqual(self.runner.overall_result,
run_tests.Result.EX_OK)
- @mock.patch('functest.ci.run_tests.logger.info')
- def test_run_tier_default(self, mock_logger_info):
- with mock.patch('functest.ci.run_tests.Runner.print_separator'), \
- mock.patch(
- 'functest.ci.run_tests.Runner.run_test',
- return_value=TestCase.EX_OK) as mock_method:
- self.runner.run_tier(self.tier)
- mock_method.assert_any_call(mock.ANY, 'test_tier')
- self.assertTrue(mock_logger_info.called)
+ @mock.patch('functest.ci.run_tests.Runner.run_test',
+ return_value=TestCase.EX_OK)
+ def test_run_tier_default(self, *mock_methods):
+ self.assertEqual(self.runner.run_tier(self.tier),
+ run_tests.Result.EX_OK)
+ mock_methods[0].assert_called_with(mock.ANY)
@mock.patch('functest.ci.run_tests.logger.info')
def test_run_tier_missing_test(self, mock_logger_info):
- with mock.patch('functest.ci.run_tests.Runner.print_separator'):
- self.tier.get_tests.return_value = None
- self.assertEqual(self.runner.run_tier(self.tier), 0)
- self.assertTrue(mock_logger_info.called)
+ self.tier.get_tests.return_value = None
+ self.assertEqual(self.runner.run_tier(self.tier),
+ run_tests.Result.EX_ERROR)
+ self.assertTrue(mock_logger_info.called)
@mock.patch('functest.ci.run_tests.logger.info')
- def test_run_all_default(self, mock_logger_info):
- with mock.patch(
- 'functest.ci.run_tests.Runner.run_tier') as mock_method:
- CONST.__setattr__('CI_LOOP', 'test_ci_loop')
- self.runner.run_all(self.tiers)
- mock_method.assert_any_call(self.tier)
- self.assertTrue(mock_logger_info.called)
+ @mock.patch('functest.ci.run_tests.Runner.run_tier')
+ @mock.patch('functest.ci.run_tests.Runner.summary')
+ def test_run_all_default(self, *mock_methods):
+ CONST.__setattr__('CI_LOOP', 'test_ci_loop')
+ self.runner.run_all()
+ mock_methods[1].assert_not_called()
+ self.assertTrue(mock_methods[2].called)
@mock.patch('functest.ci.run_tests.logger.info')
- def test_run_all_missing_tier(self, mock_logger_info):
+ @mock.patch('functest.ci.run_tests.Runner.summary')
+ def test_run_all_missing_tier(self, *mock_methods):
CONST.__setattr__('CI_LOOP', 'loop_re_not_available')
- self.runner.run_all(self.tiers)
- self.assertTrue(mock_logger_info.called)
+ self.runner.run_all()
+ self.assertTrue(mock_methods[1].called)
- def test_main_failed(self):
+ @mock.patch('functest.ci.run_tests.Runner.source_rc_file',
+ side_effect=Exception)
+ @mock.patch('functest.ci.run_tests.Runner.summary')
+ def test_main_failed(self, *mock_methods):
kwargs = {'test': 'test_name', 'noclean': True, 'report': True}
- mock_obj = mock.Mock()
args = {'get_tier.return_value': False,
'get_test.return_value': False}
- mock_obj.configure_mock(**args)
- with mock.patch('functest.ci.run_tests.tb.TierBuilder'), \
- mock.patch('functest.ci.run_tests.Runner.source_rc_file',
- side_effect=Exception):
- self.assertEqual(self.runner.main(**kwargs),
- run_tests.Result.EX_ERROR)
- with mock.patch('functest.ci.run_tests.tb.TierBuilder',
- return_value=mock_obj), \
- mock.patch('functest.ci.run_tests.Runner.source_rc_file',
- side_effect=Exception):
- self.assertEqual(self.runner.main(**kwargs),
- run_tests.Result.EX_ERROR)
-
- def test_main_tier(self, *args):
+ self.runner._tiers = mock.Mock()
+ self.runner._tiers.configure_mock(**args)
+ self.assertEqual(self.runner.main(**kwargs),
+ run_tests.Result.EX_ERROR)
+ mock_methods[1].assert_called_once_with()
+
+ @mock.patch('functest.ci.run_tests.Runner.source_rc_file')
+ @mock.patch('functest.ci.run_tests.Runner.run_test',
+ return_value=TestCase.EX_OK)
+ @mock.patch('functest.ci.run_tests.Runner.summary')
+ def test_main_tier(self, *mock_methods):
mock_tier = mock.Mock()
- args = {'get_name.return_value': 'tier_name'}
+ args = {'get_name.return_value': 'tier_name',
+ 'get_tests.return_value': ['test_name']}
mock_tier.configure_mock(**args)
kwargs = {'test': 'tier_name', 'noclean': True, 'report': True}
- mock_obj = mock.Mock()
args = {'get_tier.return_value': mock_tier,
'get_test.return_value': None}
- mock_obj.configure_mock(**args)
- with mock.patch('functest.ci.run_tests.tb.TierBuilder',
- return_value=mock_obj), \
- mock.patch('functest.ci.run_tests.Runner.source_rc_file'), \
- mock.patch('functest.ci.run_tests.Runner.run_tier') as m:
- self.assertEqual(self.runner.main(**kwargs),
- run_tests.Result.EX_OK)
- self.assertTrue(m.called)
-
- def test_main_test(self, *args):
+ self.runner._tiers = mock.Mock()
+ self.runner._tiers.configure_mock(**args)
+ self.assertEqual(self.runner.main(**kwargs),
+ run_tests.Result.EX_OK)
+ mock_methods[1].assert_called_once_with('test_name')
+
+ @mock.patch('functest.ci.run_tests.Runner.source_rc_file')
+ @mock.patch('functest.ci.run_tests.Runner.run_test',
+ return_value=TestCase.EX_OK)
+ def test_main_test(self, *mock_methods):
kwargs = {'test': 'test_name', 'noclean': True, 'report': True}
- mock_test = mock.Mock()
- args = {'get_name.return_value': 'test_name',
- 'needs_clean.return_value': True}
- mock_test.configure_mock(**args)
- mock_obj = mock.Mock()
args = {'get_tier.return_value': None,
- 'get_test.return_value': mock_test}
- mock_obj.configure_mock(**args)
- with mock.patch('functest.ci.run_tests.tb.TierBuilder',
- return_value=mock_obj), \
- mock.patch('functest.ci.run_tests.Runner.source_rc_file'), \
- mock.patch('functest.ci.run_tests.Runner.run_test',
- return_value=TestCase.EX_OK) as m:
- self.assertEqual(self.runner.main(**kwargs),
- run_tests.Result.EX_OK)
- self.assertTrue(m.called)
-
- def test_main_all_tier(self, *args):
+ 'get_test.return_value': 'test_name'}
+ self.runner._tiers = mock.Mock()
+ self.runner._tiers.configure_mock(**args)
+ self.assertEqual(self.runner.main(**kwargs),
+ run_tests.Result.EX_OK)
+ mock_methods[0].assert_called_once_with('test_name')
+
+ @mock.patch('functest.ci.run_tests.Runner.source_rc_file')
+ @mock.patch('functest.ci.run_tests.Runner.run_all')
+ @mock.patch('functest.ci.run_tests.Runner.summary')
+ def test_main_all_tier(self, *mock_methods):
kwargs = {'test': 'all', 'noclean': True, 'report': True}
- mock_obj = mock.Mock()
args = {'get_tier.return_value': None,
'get_test.return_value': None}
- mock_obj.configure_mock(**args)
- with mock.patch('functest.ci.run_tests.tb.TierBuilder',
- return_value=mock_obj), \
- mock.patch('functest.ci.run_tests.Runner.source_rc_file'), \
- mock.patch('functest.ci.run_tests.Runner.run_all') as m:
- self.assertEqual(self.runner.main(**kwargs),
- run_tests.Result.EX_OK)
- self.assertTrue(m.called)
-
- def test_main_any_tier_test_ko(self, *args):
+ self.runner._tiers = mock.Mock()
+ self.runner._tiers.configure_mock(**args)
+ self.assertEqual(self.runner.main(**kwargs),
+ run_tests.Result.EX_OK)
+ mock_methods[1].assert_called_once_with()
+
+ @mock.patch('functest.ci.run_tests.Runner.source_rc_file')
+ @mock.patch('functest.ci.run_tests.Runner.summary')
+ def test_main_any_tier_test_ko(self, *mock_methods):
kwargs = {'test': 'any', 'noclean': True, 'report': True}
- mock_obj = mock.Mock()
args = {'get_tier.return_value': None,
'get_test.return_value': None}
- mock_obj.configure_mock(**args)
- with mock.patch('functest.ci.run_tests.tb.TierBuilder',
- return_value=mock_obj), \
- mock.patch('functest.ci.run_tests.Runner.source_rc_file'), \
- mock.patch('functest.ci.run_tests.logger.debug') as m:
- self.assertEqual(self.runner.main(**kwargs),
- run_tests.Result.EX_ERROR)
- self.assertTrue(m.called)
+ self.runner._tiers = mock.Mock()
+ self.runner._tiers.configure_mock(**args)
+ self.assertEqual(self.runner.main(**kwargs),
+ run_tests.Result.EX_ERROR)
if __name__ == "__main__":
diff --git a/functest/tests/unit/ci/test_tier_builder.py b/functest/tests/unit/ci/test_tier_builder.py
index ab75e15b..700c6e91 100644
--- a/functest/tests/unit/ci/test_tier_builder.py
+++ b/functest/tests/unit/ci/test_tier_builder.py
@@ -24,7 +24,8 @@ class TierBuilderTesting(unittest.TestCase):
'case_name': 'test_name',
'criteria': 'test_criteria',
'blocking': 'test_blocking',
- 'description': 'test_desc'}
+ 'description': 'test_desc',
+ 'project_name': 'project_name'}
self.dic_tier = {'name': 'test_tier',
'order': 'test_order',
diff --git a/functest/tests/unit/core/test_feature.py b/functest/tests/unit/core/test_feature.py
index 988981ef..553a5dfa 100644
--- a/functest/tests/unit/core/test_feature.py
+++ b/functest/tests/unit/core/test_feature.py
@@ -22,8 +22,8 @@ class FeatureTestingBase(unittest.TestCase):
_case_name = "foo"
_project_name = "bar"
- _repo = "dir_repo_copper"
- _cmd = "cd /home/opnfv/repos/foo/tests && bash run.sh && cd -"
+ _repo = "dir_repo_bar"
+ _cmd = "cd /home/opnfv/repos/bar/tests && bash run.sh && cd -"
_output_file = '/home/opnfv/functest/results/foo.log'
feature = None
diff --git a/functest/tests/unit/energy/test_functest_energy.py b/functest/tests/unit/energy/test_functest_energy.py
index f8bb13c9..a576e2c3 100644
--- a/functest/tests/unit/energy/test_functest_energy.py
+++ b/functest/tests/unit/energy/test_functest_energy.py
@@ -35,6 +35,15 @@ class MockHttpResponse(object): # pylint: disable=too-few-public-methods
self.status_code = status_code
+API_OK = MockHttpResponse(
+ '{"status": "OK"}',
+ 200
+)
+API_KO = MockHttpResponse(
+ '{"message": "API-KO"}',
+ 500
+)
+
RECORDER_OK = MockHttpResponse(
'{"environment": "UNIT_TEST",'
' "step": "string",'
@@ -81,7 +90,7 @@ class EnergyRecorderTest(unittest.TestCase):
@mock.patch('functest.energy.energy.requests.post',
return_value=RECORDER_OK)
- def test_start(self, post_mock=None):
+ def test_start(self, post_mock=None, get_mock=None):
"""EnergyRecorder.start method (regular case)."""
self.test_load_config()
self.assertTrue(EnergyRecorder.start(self.case_name))
@@ -89,7 +98,8 @@ class EnergyRecorderTest(unittest.TestCase):
EnergyRecorder.energy_recorder_api["uri"],
auth=EnergyRecorder.energy_recorder_api["auth"],
data=mock.ANY,
- headers=self.request_headers
+ headers=self.request_headers,
+ timeout=EnergyRecorder.CONNECTION_TIMOUT
)
@mock.patch('functest.energy.energy.requests.post',
@@ -102,7 +112,8 @@ class EnergyRecorderTest(unittest.TestCase):
EnergyRecorder.energy_recorder_api["uri"],
auth=EnergyRecorder.energy_recorder_api["auth"],
data=mock.ANY,
- headers=self.request_headers
+ headers=self.request_headers,
+ timeout=EnergyRecorder.CONNECTION_TIMOUT
)
@mock.patch('functest.energy.energy.requests.post',
@@ -115,7 +126,8 @@ class EnergyRecorderTest(unittest.TestCase):
EnergyRecorder.energy_recorder_api["uri"],
auth=EnergyRecorder.energy_recorder_api["auth"],
data=mock.ANY,
- headers=self.request_headers
+ headers=self.request_headers,
+ timeout=EnergyRecorder.CONNECTION_TIMOUT
)
@mock.patch('functest.energy.energy.requests.post',
@@ -128,7 +140,8 @@ class EnergyRecorderTest(unittest.TestCase):
EnergyRecorder.energy_recorder_api["uri"] + "/step",
auth=EnergyRecorder.energy_recorder_api["auth"],
data=mock.ANY,
- headers=self.request_headers
+ headers=self.request_headers,
+ timeout=EnergyRecorder.CONNECTION_TIMOUT
)
@mock.patch('functest.energy.energy.requests.post',
@@ -141,7 +154,8 @@ class EnergyRecorderTest(unittest.TestCase):
EnergyRecorder.energy_recorder_api["uri"] + "/step",
auth=EnergyRecorder.energy_recorder_api["auth"],
data=mock.ANY,
- headers=self.request_headers
+ headers=self.request_headers,
+ timeout=EnergyRecorder.CONNECTION_TIMOUT
)
@mock.patch('functest.energy.energy.requests.post',
@@ -154,7 +168,8 @@ class EnergyRecorderTest(unittest.TestCase):
EnergyRecorder.energy_recorder_api["uri"] + "/step",
auth=EnergyRecorder.energy_recorder_api["auth"],
data=mock.ANY,
- headers=self.request_headers
+ headers=self.request_headers,
+ timeout=EnergyRecorder.CONNECTION_TIMOUT
)
@mock.patch('functest.energy.energy.requests.delete',
@@ -166,7 +181,8 @@ class EnergyRecorderTest(unittest.TestCase):
delete_mock.assert_called_once_with(
EnergyRecorder.energy_recorder_api["uri"],
auth=EnergyRecorder.energy_recorder_api["auth"],
- headers=self.request_headers
+ headers=self.request_headers,
+ timeout=EnergyRecorder.CONNECTION_TIMOUT
)
@mock.patch('functest.energy.energy.requests.delete',
@@ -178,7 +194,8 @@ class EnergyRecorderTest(unittest.TestCase):
delete_mock.assert_called_once_with(
EnergyRecorder.energy_recorder_api["uri"],
auth=EnergyRecorder.energy_recorder_api["auth"],
- headers=self.request_headers
+ headers=self.request_headers,
+ timeout=EnergyRecorder.CONNECTION_TIMOUT
)
@mock.patch('functest.energy.energy.requests.delete',
@@ -190,7 +207,8 @@ class EnergyRecorderTest(unittest.TestCase):
delete_mock.assert_called_once_with(
EnergyRecorder.energy_recorder_api["uri"],
auth=EnergyRecorder.energy_recorder_api["auth"],
- headers=self.request_headers
+ headers=self.request_headers,
+ timeout=EnergyRecorder.CONNECTION_TIMOUT
)
@energy.enable_recording
@@ -206,13 +224,7 @@ class EnergyRecorderTest(unittest.TestCase):
@mock.patch("functest.energy.energy.EnergyRecorder.get_current_scenario",
return_value=None)
@mock.patch("functest.energy.energy.EnergyRecorder")
- @mock.patch("functest.utils.functest_utils.get_pod_name",
- return_value="MOCK_POD")
- @mock.patch("functest.utils.functest_utils.get_functest_config",
- side_effect=config_loader_mock)
def test_decorators(self,
- loader_mock=None,
- pod_mock=None,
recorder_mock=None,
cur_scenario_mock=None):
"""Test energy module decorators."""
@@ -264,10 +276,14 @@ class EnergyRecorderTest(unittest.TestCase):
side_effect=config_loader_mock)
@mock.patch("functest.utils.functest_utils.get_pod_name",
return_value="MOCK_POD")
- def test_load_config(self, loader_mock=None, pod_mock=None):
+ @mock.patch("functest.energy.energy.requests.get",
+ return_value=API_OK)
+ def test_load_config(self, loader_mock=None, pod_mock=None,
+ get_mock=None):
"""Test load config."""
EnergyRecorder.energy_recorder_api = None
EnergyRecorder.load_config()
+
self.assertEquals(
EnergyRecorder.energy_recorder_api["auth"],
("user", "password")
@@ -281,7 +297,10 @@ class EnergyRecorderTest(unittest.TestCase):
side_effect=config_loader_mock_no_creds)
@mock.patch("functest.utils.functest_utils.get_pod_name",
return_value="MOCK_POD")
- def test_load_config_no_creds(self, loader_mock=None, pod_mock=None):
+ @mock.patch("functest.energy.energy.requests.get",
+ return_value=API_OK)
+ def test_load_config_no_creds(self, loader_mock=None, pod_mock=None,
+ get_mock=None):
"""Test load config without creds."""
EnergyRecorder.energy_recorder_api = None
EnergyRecorder.load_config()
@@ -295,7 +314,10 @@ class EnergyRecorderTest(unittest.TestCase):
return_value=None)
@mock.patch("functest.utils.functest_utils.get_pod_name",
return_value="MOCK_POD")
- def test_load_config_ex(self, loader_mock=None, pod_mock=None):
+ @mock.patch("functest.energy.energy.requests.get",
+ return_value=API_OK)
+ def test_load_config_ex(self, loader_mock=None, pod_mock=None,
+ get_mock=None):
"""Test load config with exception."""
with self.assertRaises(AssertionError):
EnergyRecorder.energy_recorder_api = None
@@ -303,6 +325,20 @@ class EnergyRecorderTest(unittest.TestCase):
self.assertEquals(EnergyRecorder.energy_recorder_api, None)
@mock.patch("functest.utils.functest_utils.get_functest_config",
+ side_effect=config_loader_mock)
+ @mock.patch("functest.utils.functest_utils.get_pod_name",
+ return_value="MOCK_POD")
+ @mock.patch("functest.energy.energy.requests.get",
+ return_value=API_KO)
+ def test_load_config_api_ko(self, loader_mock=None, pod_mock=None,
+ get_mock=None):
+ """Test load config with API unavailable."""
+ EnergyRecorder.energy_recorder_api = None
+ EnergyRecorder.load_config()
+ self.assertEquals(EnergyRecorder.energy_recorder_api["available"],
+ False)
+
+ @mock.patch("functest.utils.functest_utils.get_functest_config",
return_value=None)
@mock.patch("functest.utils.functest_utils.get_pod_name",
return_value="MOCK_POD")
diff --git a/functest/tests/unit/odl/test_odl.py b/functest/tests/unit/odl/test_odl.py
index 8c8a6cec..8aeea41d 100644
--- a/functest/tests/unit/odl/test_odl.py
+++ b/functest/tests/unit/odl/test_odl.py
@@ -511,7 +511,7 @@ class ODLRunTesting(ODLTesting):
def test_compass(self):
os.environ["INSTALLER_TYPE"] = "compass"
self._test_run(testcase.TestCase.EX_OK,
- odlip=self._neutron_ip, odlwebport='8181')
+ odlip=self._neutron_ip, odlrestconfport='8080')
def test_daisy_no_controller_ip(self):
with mock.patch('functest.utils.openstack_utils.get_endpoint',
diff --git a/functest/tests/unit/openstack/rally/test_rally.py b/functest/tests/unit/openstack/rally/test_rally.py
index 32cc1513..05311c3f 100644
--- a/functest/tests/unit/openstack/rally/test_rally.py
+++ b/functest/tests/unit/openstack/rally/test_rally.py
@@ -18,75 +18,73 @@ from functest.utils.constants import CONST
class OSRallyTesting(unittest.TestCase):
-
- def setUp(self):
- self.nova_client = mock.Mock()
- self.neutron_client = mock.Mock()
- self.cinder_client = mock.Mock()
- with mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os_utils.get_nova_client',
- return_value=self.nova_client), \
- mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os_utils.get_neutron_client',
- return_value=self.neutron_client), \
- mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os_utils.get_cinder_client',
- return_value=self.cinder_client):
- self.rally_base = rally.RallyBase()
- self.rally_base.network_dict['net_id'] = 'test_net_id'
- self.polling_iter = 2
-
- def test_build_task_args_missing_floating_network(self):
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os_utils.'
+ 'get_nova_client', return_value=mock.Mock())
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os_utils.'
+ 'get_neutron_client', return_value=mock.Mock())
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os_utils.'
+ 'get_cinder_client', return_value=mock.Mock())
+ def setUp(self, mock_func1, mock_func2, mock_func3):
+ self.rally_base = rally.RallyBase()
+ self.rally_base.network_dict['net_id'] = 'test_net_id'
+ self.polling_iter = 2
+ mock_func1.assert_called()
+ mock_func2.assert_called()
+ mock_func3.assert_called()
+
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os_utils.'
+ 'get_external_net', return_value=None)
+ def test_build_task_args_missing_floating_network(self, mock_func):
CONST.__setattr__('OS_AUTH_URL', None)
- with mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os_utils.get_external_net',
- return_value=None):
- task_args = self.rally_base._build_task_args('test_file_name')
- self.assertEqual(task_args['floating_network'], '')
+ task_args = self.rally_base._build_task_args('test_file_name')
+ self.assertEqual(task_args['floating_network'], '')
+ mock_func.assert_called()
- def test_build_task_args_missing_net_id(self):
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os_utils.'
+ 'get_external_net', return_value='test_floating_network')
+ def test_build_task_args_missing_net_id(self, mock_func):
CONST.__setattr__('OS_AUTH_URL', None)
self.rally_base.network_dict['net_id'] = ''
- with mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os_utils.get_external_net',
- return_value='test_floating_network'):
- task_args = self.rally_base._build_task_args('test_file_name')
- self.assertEqual(task_args['netid'], '')
+ task_args = self.rally_base._build_task_args('test_file_name')
+ self.assertEqual(task_args['netid'], '')
+ mock_func.assert_called()
- def check_scenario_file(self, value):
+ @staticmethod
+ def check_scenario_file(value):
yaml_file = 'opnfv-{}.yaml'.format('test_file_name')
if yaml_file in value:
return False
return True
- def test_prepare_test_list_missing_scenario_file(self):
- with mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os.path.exists',
- side_effect=self.check_scenario_file), \
- self.assertRaises(Exception):
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os.path.exists')
+ def test_prepare_test_list_missing_scenario_file(self, mock_func):
+ mock_func.side_effect = self.check_scenario_file
+ with self.assertRaises(Exception):
self.rally_base._prepare_test_list('test_file_name')
+ mock_func.assert_called()
- def check_temp_dir(self, value):
+ @staticmethod
+ def check_temp_dir(value):
yaml_file = 'opnfv-{}.yaml'.format('test_file_name')
if yaml_file in value:
return True
return False
- def test_prepare_test_list_missing_temp_dir(self):
- with mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os.path.exists',
- side_effect=self.check_temp_dir), \
- mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os.makedirs') as mock_os_makedir, \
- mock.patch.object(self.rally_base, 'apply_blacklist',
- return_value=mock.Mock()) as mock_method:
- yaml_file = 'opnfv-{}.yaml'.format('test_file_name')
- ret_val = os.path.join(self.rally_base.TEMP_DIR, yaml_file)
- self.assertEqual(self.rally_base.
- _prepare_test_list('test_file_name'),
- ret_val)
- self.assertTrue(mock_method.called)
- self.assertTrue(mock_os_makedir.called)
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os.path.exists')
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os.makedirs')
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ 'apply_blacklist', return_value=mock.Mock())
+ def test_prepare_test_list_missing_temp_dir(
+ self, mock_method, mock_os_makedirs, mock_path_exists):
+ mock_path_exists.side_effect = self.check_temp_dir
+
+ yaml_file = 'opnfv-{}.yaml'.format('test_file_name')
+ ret_val = os.path.join(self.rally_base.TEMP_DIR, yaml_file)
+ self.assertEqual(self.rally_base._prepare_test_list('test_file_name'),
+ ret_val)
+ mock_path_exists.assert_called()
+ mock_method.assert_called()
+ mock_os_makedirs.assert_called()
def test_get_task_id_default(self):
cmd_raw = 'Task 1: started'
@@ -125,170 +123,163 @@ class OSRallyTesting(unittest.TestCase):
self.assertEqual(self.rally_base.get_cmd_output(proc),
'lineline')
- def test_excl_scenario_default(self):
+ @mock.patch('__builtin__.open', mock.mock_open())
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.yaml.safe_load',
+ return_value={'scenario': [
+ {'scenarios': ['test_scenario'],
+ 'installers': ['test_installer'],
+ 'tests': ['test']},
+ {'scenarios': ['other_scenario'],
+ 'installers': ['test_installer'],
+ 'tests': ['other_test']}]})
+ def test_excl_scenario_default(self, mock_func):
CONST.__setattr__('INSTALLER_TYPE', 'test_installer')
CONST.__setattr__('DEPLOY_SCENARIO', 'test_scenario')
- dic = {'scenario': [{'scenarios': ['test_scenario'],
- 'installers': ['test_installer'],
- 'tests': ['test']},
- {'scenarios': ['other_scenario'],
- 'installers': ['test_installer'],
- 'tests': ['other_test']}]}
- with mock.patch('__builtin__.open', mock.mock_open()), \
- mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'yaml.safe_load',
- return_value=dic):
- self.assertEqual(self.rally_base.excl_scenario(),
- ['test'])
-
- def test_excl_scenario_regex(self):
+ self.assertEqual(self.rally_base.excl_scenario(), ['test'])
+ mock_func.assert_called()
+
+ @mock.patch('__builtin__.open', mock.mock_open())
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.yaml.safe_load',
+ return_value={'scenario': [
+ {'scenarios': ['^os-[^-]+-featT-modeT$'],
+ 'installers': ['test_installer'],
+ 'tests': ['test1']},
+ {'scenarios': ['^os-ctrlT-[^-]+-modeT$'],
+ 'installers': ['test_installer'],
+ 'tests': ['test2']},
+ {'scenarios': ['^os-ctrlT-featT-[^-]+$'],
+ 'installers': ['test_installer'],
+ 'tests': ['test3']},
+ {'scenarios': ['^os-'],
+ 'installers': ['test_installer'],
+ 'tests': ['test4']},
+ {'scenarios': ['other_scenario'],
+ 'installers': ['test_installer'],
+ 'tests': ['test0a']},
+ {'scenarios': [''], # empty scenario
+ 'installers': ['test_installer'],
+ 'tests': ['test0b']}]})
+ def test_excl_scenario_regex(self, mock_func):
CONST.__setattr__('INSTALLER_TYPE', 'test_installer')
CONST.__setattr__('DEPLOY_SCENARIO', 'os-ctrlT-featT-modeT')
- dic = {'scenario': [{'scenarios': ['^os-[^-]+-featT-modeT$'],
- 'installers': ['test_installer'],
- 'tests': ['test1']},
- {'scenarios': ['^os-ctrlT-[^-]+-modeT$'],
- 'installers': ['test_installer'],
- 'tests': ['test2']},
- {'scenarios': ['^os-ctrlT-featT-[^-]+$'],
- 'installers': ['test_installer'],
- 'tests': ['test3']},
- {'scenarios': ['^os-'],
- 'installers': ['test_installer'],
- 'tests': ['test4']},
- {'scenarios': ['other_scenario'],
- 'installers': ['test_installer'],
- 'tests': ['test0a']},
- {'scenarios': [''], # empty scenario
- 'installers': ['test_installer'],
- 'tests': ['test0b']}]}
- with mock.patch('__builtin__.open', mock.mock_open()), \
- mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'yaml.safe_load',
- return_value=dic):
- self.assertEqual(self.rally_base.excl_scenario(),
- ['test1', 'test2', 'test3', 'test4'])
-
- def test_excl_scenario_exception(self):
- with mock.patch('__builtin__.open', side_effect=Exception):
- self.assertEqual(self.rally_base.excl_scenario(),
- [])
-
- def test_excl_func_default(self):
+ self.assertEqual(self.rally_base.excl_scenario(),
+ ['test1', 'test2', 'test3', 'test4'])
+ mock_func.assert_called()
+
+ @mock.patch('__builtin__.open', side_effect=Exception)
+ def test_excl_scenario_exception(self, mock_open):
+ self.assertEqual(self.rally_base.excl_scenario(), [])
+ mock_open.assert_called()
+
+ @mock.patch('__builtin__.open', mock.mock_open())
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.yaml.safe_load',
+ return_value={'functionality': [
+ {'functions': ['no_live_migration'], 'tests': ['test']}]})
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ 'live_migration_supported', return_value=False)
+ def test_excl_func_default(self, mock_func, mock_yaml_load):
CONST.__setattr__('INSTALLER_TYPE', 'test_installer')
CONST.__setattr__('DEPLOY_SCENARIO', 'test_scenario')
- dic = {'functionality': [{'functions': ['no_live_migration'],
- 'tests': ['test']}]}
- with mock.patch('__builtin__.open', mock.mock_open()), \
- mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'yaml.safe_load',
- return_value=dic), \
- mock.patch.object(self.rally_base, 'live_migration_supported',
- return_value=False):
- self.assertEqual(self.rally_base.excl_func(),
- ['test'])
-
- def test_excl_func_exception(self):
- with mock.patch('__builtin__.open', side_effect=Exception):
- self.assertEqual(self.rally_base.excl_func(),
- [])
-
- def test_file_is_empty_default(self):
- mock_obj = mock.Mock()
+ self.assertEqual(self.rally_base.excl_func(), ['test'])
+ mock_func.assert_called()
+ mock_yaml_load.assert_called()
+
+ @mock.patch('__builtin__.open', side_effect=Exception)
+ def test_excl_func_exception(self, mock_open):
+ self.assertEqual(self.rally_base.excl_func(), [])
+ mock_open.assert_called()
+
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os.stat',
+ return_value=mock.Mock())
+ def test_file_is_empty_default(self, mock_os_stat):
attrs = {'st_size': 10}
- mock_obj.configure_mock(**attrs)
- with mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os.stat',
- return_value=mock_obj):
- self.assertEqual(self.rally_base.file_is_empty('test_file_name'),
- False)
-
- def test_file_is_empty_exception(self):
- with mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os.stat',
- side_effect=Exception):
- self.assertEqual(self.rally_base.file_is_empty('test_file_name'),
- True)
-
- def test_run_task_missing_task_file(self):
- with mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os.path.exists',
- return_value=False), \
- self.assertRaises(Exception):
- self.rally_base._run_task('test_name')
+ mock_os_stat.return_value.configure_mock(**attrs)
+ self.assertEqual(self.rally_base.file_is_empty('test_file_name'),
+ False)
+ mock_os_stat.assert_called()
- @mock.patch('functest.opnfv_tests.openstack.rally.rally.LOGGER.info')
- def test_run_task_no_tests_for_scenario(self, mock_logger_info):
- with mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os.path.exists',
- return_value=True), \
- mock.patch.object(self.rally_base, '_prepare_test_list',
- return_value='test_file_name'), \
- mock.patch.object(self.rally_base, 'file_is_empty',
- return_value=True):
- self.rally_base._run_task('test_name')
- mock_logger_info.assert_any_call('No tests for scenario \"%s\"',
- 'test_name')
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os.stat',
+ side_effect=Exception)
+ def test_file_is_empty_exception(self, mock_os_stat):
+ self.assertEqual(self.rally_base.file_is_empty('test_file_name'), True)
+ mock_os_stat.assert_called()
- @mock.patch('functest.opnfv_tests.openstack.rally.rally.LOGGER.error')
- def test_run_task_taskid_missing(self, mock_logger_error):
- with mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os.path.exists',
- return_value=True), \
- mock.patch.object(self.rally_base, '_prepare_test_list',
- return_value='test_file_name'), \
- mock.patch.object(self.rally_base, 'file_is_empty',
- return_value=False), \
- mock.patch.object(self.rally_base, '_build_task_args',
- return_value={}), \
- mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'subprocess.Popen'), \
- mock.patch.object(self.rally_base, '_get_output',
- return_value=mock.Mock()), \
- mock.patch.object(self.rally_base, 'get_task_id',
- return_value=None), \
- mock.patch.object(self.rally_base, 'get_cmd_output',
- return_value=''):
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os.path.exists',
+ return_value=False)
+ def test_run_task_missing_task_file(self, mock_path_exists):
+ with self.assertRaises(Exception):
self.rally_base._run_task('test_name')
- str = 'Failed to retrieve task_id, validating task...'
- mock_logger_error.assert_any_call(str)
-
+ mock_path_exists.assert_called()
+
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os.path.exists',
+ return_value=True)
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ '_prepare_test_list', return_value='test_file_name')
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ 'file_is_empty', return_value=True)
@mock.patch('functest.opnfv_tests.openstack.rally.rally.LOGGER.info')
+ def test_run_task_no_tests_for_scenario(self, mock_logger_info,
+ mock_file_empty, mock_prep_list,
+ mock_path_exists):
+ self.rally_base._run_task('test_name')
+ mock_logger_info.assert_any_call('No tests for scenario \"%s\"',
+ 'test_name')
+ mock_file_empty.assert_called()
+ mock_prep_list.assert_called()
+ mock_path_exists.assert_called()
+
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ '_prepare_test_list', return_value='test_file_name')
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ 'file_is_empty', return_value=False)
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ '_build_task_args', return_value={})
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ '_get_output', return_value=mock.Mock())
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ 'get_task_id', return_value=None)
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ 'get_cmd_output', return_value='')
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os.path.exists',
+ return_value=True)
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.subprocess.Popen')
@mock.patch('functest.opnfv_tests.openstack.rally.rally.LOGGER.error')
- def test_run_task_default(self, mock_logger_error,
- mock_logger_info):
- popen = mock.Mock()
+ def test_run_task_taskid_missing(self, mock_logger_error, *args):
+ self.rally_base._run_task('test_name')
+ text = 'Failed to retrieve task_id, validating task...'
+ mock_logger_error.assert_any_call(text)
+
+ @mock.patch('__builtin__.open', mock.mock_open())
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ '_prepare_test_list', return_value='test_file_name')
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ 'file_is_empty', return_value=False)
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ '_build_task_args', return_value={})
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ '_get_output', return_value=mock.Mock())
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ 'get_task_id', return_value='1')
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ 'get_cmd_output', return_value='')
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ 'task_succeed', return_value=True)
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os.path.exists',
+ return_value=True)
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.subprocess.Popen')
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os.makedirs')
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os.popen',
+ return_value=mock.Mock())
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.LOGGER.info')
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.LOGGER.error')
+ def test_run_task_default(self, mock_logger_error, mock_logger_info,
+ mock_popen, *args):
attrs = {'read.return_value': 'json_result'}
- popen.configure_mock(**attrs)
-
- with mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os.path.exists',
- return_value=True), \
- mock.patch.object(self.rally_base, '_prepare_test_list',
- return_value='test_file_name'), \
- mock.patch.object(self.rally_base, 'file_is_empty',
- return_value=False), \
- mock.patch.object(self.rally_base, '_build_task_args',
- return_value={}), \
- mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'subprocess.Popen'), \
- mock.patch.object(self.rally_base, '_get_output',
- return_value=mock.Mock()), \
- mock.patch.object(self.rally_base, 'get_task_id',
- return_value='1'), \
- mock.patch.object(self.rally_base, 'get_cmd_output',
- return_value=''), \
- mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os.makedirs'), \
- mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os.popen',
- return_value=popen), \
- mock.patch('__builtin__.open', mock.mock_open()), \
- mock.patch.object(self.rally_base, 'task_succeed',
- return_value=True):
- self.rally_base._run_task('test_name')
- str = 'Test scenario: "test_name" OK.\n'
- mock_logger_info.assert_any_call(str)
+ mock_popen.return_value.configure_mock(**attrs)
+ self.rally_base._run_task('test_name')
+ text = 'Test scenario: "test_name" OK.\n'
+ mock_logger_info.assert_any_call(text)
+ mock_logger_error.assert_not_called()
def test_prepare_env_testname_invalid(self):
self.rally_base.TESTS = ['test1', 'test2']
@@ -296,103 +287,103 @@ class OSRallyTesting(unittest.TestCase):
with self.assertRaises(Exception):
self.rally_base._prepare_env()
- def test_prepare_env_volume_creation_failed(self):
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os_utils.'
+ 'list_volume_types', return_value=None)
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os_utils.'
+ 'create_volume_type', return_value=None)
+ def test_prepare_env_volume_creation_failed(self, mock_list, mock_create):
self.rally_base.TESTS = ['test1', 'test2']
self.rally_base.test_name = 'test1'
- volume_type = None
- with mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os_utils.list_volume_types',
- return_value=None), \
- mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os_utils.create_volume_type',
- return_value=volume_type), \
- self.assertRaises(Exception):
+ with self.assertRaises(Exception):
self.rally_base._prepare_env()
-
- def test_prepare_env_image_missing(self):
+ mock_list.assert_called()
+ mock_create.assert_called()
+
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os_utils.'
+ 'list_volume_types', return_value=None)
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os_utils.'
+ 'create_volume_type', return_value=mock.Mock())
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os_utils.'
+ 'get_or_create_image', return_value=(True, None))
+ def test_prepare_env_image_missing(self, mock_get_img, mock_create_vt,
+ mock_list_vt):
self.rally_base.TESTS = ['test1', 'test2']
self.rally_base.test_name = 'test1'
- volume_type = mock.Mock()
- image_id = None
- with mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os_utils.list_volume_types',
- return_value=None), \
- mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os_utils.create_volume_type',
- return_value=volume_type), \
- mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os_utils.get_or_create_image',
- return_value=(True, image_id)), \
- self.assertRaises(Exception):
+ with self.assertRaises(Exception):
self.rally_base._prepare_env()
-
- def test_prepare_env_image_shared_network_creation_failed(self):
+ mock_get_img.assert_called()
+ mock_create_vt.assert_called()
+ mock_list_vt.assert_called()
+
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os_utils.'
+ 'list_volume_types', return_value=None)
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os_utils.'
+ 'create_volume_type', return_value=mock.Mock())
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os_utils.'
+ 'get_or_create_image', return_value=(True, 'image_id'))
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os_utils.'
+ 'create_shared_network_full', return_value=None)
+ def test_prepare_env_image_shared_network_creation_failed(
+ self, mock_create_net, mock_get_img, mock_create_vt, mock_list_vt):
self.rally_base.TESTS = ['test1', 'test2']
self.rally_base.test_name = 'test1'
- volume_type = mock.Mock()
- image_id = 'image_id'
- network_dict = None
- with mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os_utils.list_volume_types',
- return_value=None), \
- mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os_utils.create_volume_type',
- return_value=volume_type), \
- mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os_utils.get_or_create_image',
- return_value=(True, image_id)), \
- mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os_utils.create_shared_network_full',
- return_value=network_dict), \
- self.assertRaises(Exception):
+ with self.assertRaises(Exception):
self.rally_base._prepare_env()
-
- def test_run_tests_all(self):
+ mock_create_net.assert_called()
+ mock_get_img.assert_called()
+ mock_create_vt.assert_called()
+ mock_list_vt.assert_called()
+
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ '_run_task', return_value=mock.Mock())
+ def test_run_tests_all(self, mock_run_task):
self.rally_base.TESTS = ['test1', 'test2']
self.rally_base.test_name = 'all'
- with mock.patch.object(self.rally_base, '_run_task',
- return_value=mock.Mock()):
- self.rally_base._run_tests()
- self.rally_base._run_task.assert_any_call('test1')
- self.rally_base._run_task.assert_any_call('test2')
+ self.rally_base._run_tests()
+ mock_run_task.assert_any_call('test1')
+ mock_run_task.assert_any_call('test2')
- def test_run_tests_default(self):
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ '_run_task', return_value=mock.Mock())
+ def test_run_tests_default(self, mock_run_task):
self.rally_base.TESTS = ['test1', 'test2']
self.rally_base.test_name = 'test1'
- with mock.patch.object(self.rally_base, '_run_task',
- return_value=mock.Mock()):
- self.rally_base._run_tests()
- self.rally_base._run_task.assert_any_call('test1')
-
- def test_clean_up_default(self):
+ self.rally_base._run_tests()
+ mock_run_task.assert_any_call('test1')
+
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os_utils.'
+ 'delete_volume_type')
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.os_utils.'
+ 'delete_glance_image')
+ def test_clean_up_default(self, mock_glance_method, mock_vol_method):
self.rally_base.volume_type = mock.Mock()
self.rally_base.cinder_client = mock.Mock()
self.rally_base.image_exists = False
self.rally_base.image_id = 1
self.rally_base.nova_client = mock.Mock()
- with mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os_utils.delete_volume_type') as mock_vol_method, \
- mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'os_utils.delete_glance_image') as mock_glance_method:
- self.rally_base._clean_up()
- mock_vol_method.assert_any_call(self.rally_base.cinder_client,
- self.rally_base.volume_type)
- mock_glance_method.assert_any_call(self.rally_base.nova_client,
- 1)
-
- def test_run_default(self):
- with mock.patch.object(self.rally_base, '_prepare_env'), \
- mock.patch.object(self.rally_base, '_run_tests'), \
- mock.patch.object(self.rally_base, '_generate_report'), \
- mock.patch.object(self.rally_base, '_clean_up'):
- self.assertEqual(self.rally_base.run(),
- testcase.TestCase.EX_OK)
-
- def test_run_exception(self):
- with mock.patch.object(self.rally_base, '_prepare_env',
- side_effect=Exception):
- self.assertEqual(self.rally_base.run(),
- testcase.TestCase.EX_RUN_ERROR)
+ self.rally_base._clean_up()
+ mock_vol_method.assert_any_call(self.rally_base.cinder_client,
+ self.rally_base.volume_type)
+ mock_glance_method.assert_any_call(self.rally_base.nova_client,
+ 1)
+
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ '_prepare_env')
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ '_run_tests')
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ '_generate_report')
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ '_clean_up')
+ def test_run_default(self, *args):
+ self.assertEqual(self.rally_base.run(), testcase.TestCase.EX_OK)
+ map(lambda m: m.assert_called(), args)
+
+ @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+ '_prepare_env', side_effect=Exception)
+ def test_run_exception(self, mock_prep_env):
+ self.assertEqual(self.rally_base.run(), testcase.TestCase.EX_RUN_ERROR)
+ mock_prep_env.assert_called()
if __name__ == "__main__":
diff --git a/functest/tests/unit/openstack/refstack_client/test_refstack_client.py b/functest/tests/unit/openstack/refstack_client/test_refstack_client.py
index 51dbb640..ca097483 100644
--- a/functest/tests/unit/openstack/refstack_client/test_refstack_client.py
+++ b/functest/tests/unit/openstack/refstack_client/test_refstack_client.py
@@ -12,9 +12,12 @@ import pkg_resources
import unittest
from functest.core import testcase
-from functest.opnfv_tests.openstack.refstack_client import refstack_client
+from functest.opnfv_tests.openstack.refstack_client.refstack_client import \
+ RefstackClient, RefstackClientParser
from functest.utils.constants import CONST
+from snaps.openstack.os_credentials import OSCreds
+
class OSRefstackClientTesting(unittest.TestCase):
@@ -25,36 +28,79 @@ class OSRefstackClientTesting(unittest.TestCase):
'functest', 'opnfv_tests/openstack/refstack_client/defcore.txt')
def setUp(self):
- self.defaultargs = {'config': self._config,
- 'testlist': self._testlist}
+ self.default_args = {'config': self._config,
+ 'testlist': self._testlist}
CONST.__setattr__('OS_AUTH_URL', 'https://ip:5000/v3')
CONST.__setattr__('OS_INSECURE', 'true')
- self.refstackclient = refstack_client.RefstackClient()
+ self.os_creds = OSCreds(
+ username='user', password='pass',
+ auth_url='http://foo.com:5000/v3', project_name='bar')
+
+ @mock.patch('functest.opnfv_tests.openstack.refstack_client.tempest_conf.'
+ 'TempestConf', return_value=mock.Mock())
+ def _create_client(self, mock_conf):
+ with mock.patch('snaps.openstack.tests.openstack_tests.'
+ 'get_credentials', return_value=self.os_creds):
+ return RefstackClient()
def test_run_defcore_insecure(self):
insecure = '-k'
config = 'tempest.conf'
testlist = 'testlist'
+ client = self._create_client()
with mock.patch('functest.opnfv_tests.openstack.refstack_client.'
'refstack_client.ft_utils.execute_command') as m:
cmd = ("refstack-client test {0} -c {1} -v --test-list {2}"
.format(insecure, config, testlist))
- self.refstackclient.run_defcore(config, testlist)
+ client.run_defcore(config, testlist)
m.assert_any_call(cmd)
def test_run_defcore(self):
CONST.__setattr__('OS_AUTH_URL', 'http://ip:5000/v3')
- refstackclient = refstack_client.RefstackClient()
insecure = ''
config = 'tempest.conf'
testlist = 'testlist'
+ client = self._create_client()
with mock.patch('functest.opnfv_tests.openstack.refstack_client.'
'refstack_client.ft_utils.execute_command') as m:
cmd = ("refstack-client test {0} -c {1} -v --test-list {2}"
.format(insecure, config, testlist))
- refstackclient.run_defcore(config, testlist)
+ client.run_defcore(config, testlist)
m.assert_any_call(cmd)
+ @mock.patch('functest.opnfv_tests.openstack.refstack_client.'
+ 'refstack_client.LOGGER.info')
+ @mock.patch('__builtin__.open', side_effect=Exception)
+ def test_parse_refstack_result_missing_log_file(self, mock_open,
+ mock_logger_info):
+ self.case_name = 'refstack_defcore'
+ self.result = 0
+ self._create_client().parse_refstack_result()
+ mock_logger_info.assert_called_once_with(
+ "Testcase %s success_rate is %s%%",
+ self.case_name, self.result)
+
+ def test_parse_refstack_result_default(self):
+ log_file = ('''
+ {0} tempest.api.compute [18.464988s] ... ok
+ {0} tempest.api.volume [0.230334s] ... FAILED
+ {0} tempest.api.network [1.265828s] ... SKIPPED:
+ Ran: 3 tests in 1259.0000 sec.
+ - Passed: 1
+ - Skipped: 1
+ - Failed: 1
+ ''')
+ self.details = {"tests": 3,
+ "failures": 1,
+ "success": ['tempest.api.compute [18.464988s]'],
+ "errors": ['tempest.api.volume [0.230334s]'],
+ "skipped": ['tempest.api.network [1.265828s]']}
+ client = self._create_client()
+ with mock.patch('__builtin__.open',
+ mock.mock_open(read_data=log_file)):
+ client.parse_refstack_result()
+ self.assertEqual(client.details, self.details)
+
def _get_main_kwargs(self, key=None):
kwargs = {'config': self._config,
'testlist': self._testlist}
@@ -64,16 +110,18 @@ class OSRefstackClientTesting(unittest.TestCase):
def _test_main(self, status, *args):
kwargs = self._get_main_kwargs()
- self.assertEqual(self.refstackclient.main(**kwargs), status)
+ client = self._create_client()
+ self.assertEqual(client.main(**kwargs), status)
if len(args) > 0:
args[0].assert_called_once_with(
- refstack_client.RefstackClient.result_dir)
+ RefstackClient.result_dir)
if len(args) > 1:
args
def _test_main_missing_keyword(self, key):
kwargs = self._get_main_kwargs(key)
- self.assertEqual(self.refstackclient.main(**kwargs),
+ client = self._create_client()
+ self.assertEqual(client.main(**kwargs),
testcase.TestCase.EX_RUN_ERROR)
def test_main_missing_conf(self):
@@ -83,10 +131,10 @@ class OSRefstackClientTesting(unittest.TestCase):
self._test_main_missing_keyword('testlist')
def _test_argparser(self, arg, value):
- self.defaultargs[arg] = value
- parser = refstack_client.RefstackClientParser()
+ self.default_args[arg] = value
+ parser = RefstackClientParser()
self.assertEqual(parser.parse_args(["--{}={}".format(arg, value)]),
- self.defaultargs)
+ self.default_args)
def test_argparser_conf(self):
self._test_argparser('config', self._config)
@@ -95,13 +143,13 @@ class OSRefstackClientTesting(unittest.TestCase):
self._test_argparser('testlist', self._testlist)
def test_argparser_multiple_args(self):
- self.defaultargs['config'] = self._config
- self.defaultargs['testlist'] = self._testlist
- parser = refstack_client.RefstackClientParser()
+ self.default_args['config'] = self._config
+ self.default_args['testlist'] = self._testlist
+ parser = RefstackClientParser()
self.assertEqual(parser.parse_args(
["--config={}".format(self._config),
"--testlist={}".format(self._testlist)
- ]), self.defaultargs)
+ ]), self.default_args)
if __name__ == "__main__":
diff --git a/functest/tests/unit/openstack/tempest/test_conf_utils.py b/functest/tests/unit/openstack/tempest/test_conf_utils.py
index 37904965..77558086 100644
--- a/functest/tests/unit/openstack/tempest/test_conf_utils.py
+++ b/functest/tests/unit/openstack/tempest/test_conf_utils.py
@@ -10,89 +10,83 @@ import unittest
import mock
-from functest.opnfv_tests.openstack.tempest import conf_utils
+from functest.opnfv_tests.openstack.tempest import tempest, conf_utils
from functest.utils.constants import CONST
+from snaps.openstack.os_credentials import OSCreds
class OSTempestConfUtilsTesting(unittest.TestCase):
- def test_create_tempest_resources_missing_network_dic(self):
- with mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
- 'os_utils.get_keystone_client',
- return_value=mock.Mock()), \
- mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
- 'os_utils.create_tenant',
- return_value='test_tenant_id'), \
- mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
- 'os_utils.create_user',
- return_value='test_user_id'), \
- mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
- 'os_utils.create_shared_network_full',
- return_value=None), \
- self.assertRaises(Exception) as context:
- conf_utils.create_tempest_resources()
- msg = 'Failed to create private network'
- self.assertTrue(msg in context)
-
- def test_create_tempest_resources_missing_image(self):
- with mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
- 'os_utils.get_keystone_client',
- return_value=mock.Mock()), \
- mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
- 'os_utils.create_tenant',
- return_value='test_tenant_id'), \
- mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
- 'os_utils.create_user',
- return_value='test_user_id'), \
- mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
- 'os_utils.create_shared_network_full',
- return_value=mock.Mock()), \
- mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
- 'os_utils.get_or_create_image',
- return_value=(mock.Mock(), None)), \
- self.assertRaises(Exception) as context:
-
- CONST.__setattr__('tempest_use_custom_images', True)
- conf_utils.create_tempest_resources()
- msg = 'Failed to create image'
- self.assertTrue(msg in context)
-
- CONST.__setattr__('tempest_use_custom_images', False)
- conf_utils.create_tempest_resources(use_custom_images=True)
- msg = 'Failed to create image'
- self.assertTrue(msg in context)
-
- def test_create_tempest_resources_missing_flavor(self):
- with mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
- 'os_utils.get_keystone_client',
- return_value=mock.Mock()), \
- mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
- 'os_utils.create_tenant',
- return_value='test_tenant_id'), \
- mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
- 'os_utils.create_user',
- return_value='test_user_id'), \
- mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
- 'os_utils.create_shared_network_full',
- return_value=mock.Mock()), \
- mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
- 'os_utils.get_or_create_image',
- return_value=(mock.Mock(), 'image_id')), \
- mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
- 'os_utils.get_or_create_flavor',
- return_value=(mock.Mock(), None)), \
- self.assertRaises(Exception) as context:
- CONST.__setattr__('tempest_use_custom_images', True)
- CONST.__setattr__('tempest_use_custom_flavors', True)
- conf_utils.create_tempest_resources()
- msg = 'Failed to create flavor'
- self.assertTrue(msg in context)
-
- CONST.__setattr__('tempest_use_custom_images', True)
- CONST.__setattr__('tempest_use_custom_flavors', False)
- conf_utils.create_tempest_resources(use_custom_flavors=False)
- msg = 'Failed to create flavor'
- self.assertTrue(msg in context)
+ def setUp(self):
+ self.os_creds = OSCreds(
+ username='user', password='pass',
+ auth_url='http://foo.com:5000/v3', project_name='bar')
+
+ @mock.patch('snaps.openstack.utils.deploy_utils.create_project',
+ return_value=mock.Mock())
+ @mock.patch('snaps.openstack.utils.deploy_utils.create_user',
+ return_value=mock.Mock())
+ @mock.patch('snaps.openstack.utils.deploy_utils.create_network',
+ return_value=None)
+ @mock.patch('snaps.openstack.utils.deploy_utils.create_image',
+ return_value=mock.Mock())
+ def test_create_tempest_resources_missing_network_dic(self, *mock_args):
+ tempest_resources = tempest.TempestResourcesManager(os_creds={})
+ with self.assertRaises(Exception) as context:
+ tempest_resources.create()
+ msg = 'Failed to create private network'
+ self.assertTrue(msg in context.exception)
+
+ @mock.patch('snaps.openstack.utils.deploy_utils.create_project',
+ return_value=mock.Mock())
+ @mock.patch('snaps.openstack.utils.deploy_utils.create_user',
+ return_value=mock.Mock())
+ @mock.patch('snaps.openstack.utils.deploy_utils.create_network',
+ return_value=mock.Mock())
+ @mock.patch('snaps.openstack.utils.deploy_utils.create_image',
+ return_value=None)
+ def test_create_tempest_resources_missing_image(self, *mock_args):
+ tempest_resources = tempest.TempestResourcesManager(os_creds={})
+
+ CONST.__setattr__('tempest_use_custom_imagess', True)
+ with self.assertRaises(Exception) as context:
+ tempest_resources.create()
+ msg = 'Failed to create image'
+ self.assertTrue(msg in context.exception, msg=str(context.exception))
+
+ CONST.__setattr__('tempest_use_custom_imagess', False)
+ with self.assertRaises(Exception) as context:
+ tempest_resources.create(use_custom_images=True)
+ msg = 'Failed to create image'
+ self.assertTrue(msg in context.exception, msg=str(context.exception))
+
+ @mock.patch('snaps.openstack.utils.deploy_utils.create_project',
+ return_value=mock.Mock())
+ @mock.patch('snaps.openstack.utils.deploy_utils.create_user',
+ return_value=mock.Mock())
+ @mock.patch('snaps.openstack.utils.deploy_utils.create_network',
+ return_value=mock.Mock())
+ @mock.patch('snaps.openstack.utils.deploy_utils.create_image',
+ return_value=mock.Mock())
+ @mock.patch('snaps.openstack.create_flavor.OpenStackFlavor.create',
+ return_value=None)
+ def test_create_tempest_resources_missing_flavor(self, *mock_args):
+ tempest_resources = tempest.TempestResourcesManager(
+ os_creds=self.os_creds)
+
+ CONST.__setattr__('tempest_use_custom_images', True)
+ CONST.__setattr__('tempest_use_custom_flavors', True)
+ with self.assertRaises(Exception) as context:
+ tempest_resources.create()
+ msg = 'Failed to create flavor'
+ self.assertTrue(msg in context.exception, msg=str(context.exception))
+
+ CONST.__setattr__('tempest_use_custom_images', True)
+ CONST.__setattr__('tempest_use_custom_flavors', False)
+ with self.assertRaises(Exception) as context:
+ tempest_resources.create(use_custom_flavors=True)
+ msg = 'Failed to create flavor'
+ self.assertTrue(msg in context.exception, msg=str(context.exception))
def test_get_verifier_id_missing_verifier(self):
CONST.__setattr__('tempest_deployment_name', 'test_deploy_name')
@@ -176,51 +170,20 @@ class OSTempestConfUtilsTesting(unittest.TestCase):
def test_backup_tempest_config_default(self):
with mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.os.path.exists',
- return_value=False), \
- mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.os.makedirs') as m1, \
- mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.shutil.copyfile') as m2:
+ 'conf_utils.shutil.copyfile') as m1:
conf_utils.backup_tempest_config('test_conf_file')
self.assertTrue(m1.called)
- self.assertTrue(m2.called)
-
- with mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.os.path.exists',
- return_value=True), \
- mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.shutil.copyfile') as m2:
- conf_utils.backup_tempest_config('test_conf_file')
- self.assertTrue(m2.called)
def test_configure_tempest_default(self):
with mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.configure_verifier',
return_value='test_conf_file'), \
mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.configure_tempest_update_params') as m1, \
- mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.configure_tempest_multisite_params') as m2:
- conf_utils.configure_tempest('test_dep_dir',
- MODE='feature_multisite')
- self.assertTrue(m1.called)
- self.assertTrue(m2.called)
-
- with mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.configure_verifier',
- return_value='test_conf_file'), \
- mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.configure_tempest_update_params') as m1:
conf_utils.configure_tempest('test_dep_dir')
self.assertTrue(m1.called)
- self.assertTrue(m2.called)
def test_configure_tempest_defcore_default(self):
- img_flavor_dict = {'image_id': 'test_image_id',
- 'flavor_id': 'test_flavor_id',
- 'image_id_alt': 'test_image_alt_id',
- 'flavor_id_alt': 'test_flavor_alt_id'}
with mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.configure_verifier',
return_value='test_conf_file'), \
@@ -237,9 +200,12 @@ class OSTempestConfUtilsTesting(unittest.TestCase):
'write') as mwrite, \
mock.patch('__builtin__.open', mock.mock_open()), \
mock.patch('functest.opnfv_tests.openstack.tempest.'
+ 'conf_utils.generate_test_accounts_file'), \
+ mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.shutil.copyfile'):
- conf_utils.configure_tempest_defcore('test_dep_dir',
- img_flavor_dict)
+ conf_utils.configure_tempest_defcore(
+ 'test_dep_dir', 'test_image_id', 'test_flavor_id',
+ 'test_image_alt_id', 'test_flavor_alt_id', 'test_tenant_id')
mset.assert_any_call('compute', 'image_ref', 'test_image_id')
mset.assert_any_call('compute', 'image_ref_alt',
'test_image_alt_id')
@@ -249,6 +215,13 @@ class OSTempestConfUtilsTesting(unittest.TestCase):
self.assertTrue(mread.called)
self.assertTrue(mwrite.called)
+ def test_generate_test_accounts_file_default(self):
+ with mock.patch("__builtin__.open", mock.mock_open()), \
+ mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
+ 'yaml.dump') as mock_dump:
+ conf_utils.generate_test_accounts_file('test_tenant_id')
+ self.assertTrue(mock_dump.called)
+
def _test_missing_param(self, params, image_id, flavor_id):
with mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.ConfigParser.RawConfigParser.'
@@ -261,12 +234,14 @@ class OSTempestConfUtilsTesting(unittest.TestCase):
'write') as mwrite, \
mock.patch('__builtin__.open', mock.mock_open()), \
mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.backup_tempest_config'):
+ 'conf_utils.backup_tempest_config'), \
+ mock.patch('functest.utils.functest_utils.yaml.safe_load',
+ return_value={'validation': {'ssh_timeout': 300}}):
CONST.__setattr__('OS_ENDPOINT_TYPE', None)
conf_utils.\
configure_tempest_update_params('test_conf_file',
- IMAGE_ID=image_id,
- FLAVOR_ID=flavor_id)
+ image_id=image_id,
+ flavor_id=flavor_id)
mset.assert_any_call(params[0], params[1], params[2])
self.assertTrue(mread.called)
self.assertTrue(mwrite.called)
@@ -320,50 +295,6 @@ class OSTempestConfUtilsTesting(unittest.TestCase):
mexe.assert_any_call("rally verify configure-verifier "
"--reconfigure")
- def test_configure_tempest_multisite_params_without_fuel(self):
- conf_utils.CI_INSTALLER_TYPE = 'not_fuel'
- with mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.os_utils.get_endpoint',
- return_value='kingbird_endpoint_url'), \
- mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.ConfigParser.RawConfigParser.'
- 'set') as mset, \
- mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.ConfigParser.RawConfigParser.'
- 'read') as mread, \
- mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.ConfigParser.RawConfigParser.'
- 'add_section') as msection, \
- mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.ConfigParser.RawConfigParser.'
- 'write') as mwrite, \
- mock.patch('__builtin__.open', mock.mock_open()), \
- mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.backup_tempest_config'):
-
- conf_utils.configure_tempest_multisite_params('test_conf_file')
- msection.assert_any_call("kingbird")
- mset.assert_any_call('service_available', 'kingbird', 'true')
- mset.assert_any_call('kingbird', 'endpoint_type', 'publicURL')
- mset.assert_any_call('kingbird', 'TIME_TO_SYNC', '120')
- mset.assert_any_call('kingbird', 'endpoint_url',
- 'kingbird_endpoint_url')
- self.assertTrue(mread.called)
- self.assertTrue(mwrite.called)
-
- def test_install_verifier_ext_default(self):
- with mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.get_repo_tag',
- return_value='test_tag'), \
- mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.ft_utils.'
- 'execute_command_raise') as mexe:
- conf_utils.install_verifier_ext('test_path')
- cmd = ("rally verify add-verifier-ext --source test_path "
- "--version test_tag")
- error_msg = ("Problem while adding verifier extension from"
- " test_path")
- mexe.assert_called_once_with(cmd, error_msg=error_msg)
if __name__ == "__main__":
logging.disable(logging.CRITICAL)
diff --git a/functest/tests/unit/openstack/tempest/test_tempest.py b/functest/tests/unit/openstack/tempest/test_tempest.py
index b8b258b3..54d7d49b 100644
--- a/functest/tests/unit/openstack/tempest/test_tempest.py
+++ b/functest/tests/unit/openstack/tempest/test_tempest.py
@@ -15,10 +15,16 @@ from functest.opnfv_tests.openstack.tempest import tempest
from functest.opnfv_tests.openstack.tempest import conf_utils
from functest.utils.constants import CONST
+from snaps.openstack.os_credentials import OSCreds
+
class OSTempestTesting(unittest.TestCase):
def setUp(self):
+ os_creds = OSCreds(
+ username='user', password='pass',
+ auth_url='http://foo.com:5000/v3', project_name='bar')
+
with mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
'conf_utils.get_verifier_id',
return_value='test_deploy_id'), \
@@ -30,14 +36,13 @@ class OSTempestTesting(unittest.TestCase):
return_value='test_verifier_repo_dir'), \
mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
'conf_utils.get_verifier_deployment_dir',
- return_value='test_verifier_deploy_dir'):
+ return_value='test_verifier_deploy_dir'), \
+ mock.patch('snaps.openstack.tests.openstack_tests.get_credentials',
+ return_value=os_creds):
self.tempestcommon = tempest.TempestCommon()
self.tempestsmoke_serial = tempest.TempestSmokeSerial()
self.tempestsmoke_parallel = tempest.TempestSmokeParallel()
self.tempestfull_parallel = tempest.TempestFullParallel()
- with mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
- 'conf_utils.install_verifier_ext'):
- self.tempestmultisite = tempest.TempestMultisite()
self.tempestcustom = tempest.TempestCustom()
self.tempestdefcore = tempest.TempestDefcore()
@@ -75,8 +80,6 @@ class OSTempestTesting(unittest.TestCase):
self.tempestcommon.MODE = mode
if self.tempestcommon.MODE == 'smoke':
testr_mode = "smoke"
- elif self.tempestcommon.MODE == 'feature_multisite':
- testr_mode = "'[Kk]ingbird'"
elif self.tempestcommon.MODE == 'full':
testr_mode = ""
else:
@@ -96,9 +99,6 @@ class OSTempestTesting(unittest.TestCase):
def test_generate_test_list_smoke_mode(self):
self._test_generate_test_list_mode_default('smoke')
- def test_generate_test_list_feature_multisite_mode(self):
- self._test_generate_test_list_mode_default('feature_multisite')
-
def test_generate_test_list_full_mode(self):
self._test_generate_test_list_mode_default('full')
@@ -161,8 +161,8 @@ class OSTempestTesting(unittest.TestCase):
'os.path.exists', return_value=False)
@mock.patch('functest.opnfv_tests.openstack.tempest.tempest.os.makedirs')
@mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
- 'conf_utils.create_tempest_resources', side_effect=Exception)
- def test_run_create_tempest_resources_ko(self, *args):
+ 'TempestResourcesManager.create', side_effect=Exception)
+ def test_run_tempest_create_resources_ko(self, *args):
self.assertEqual(self.tempestcommon.run(),
testcase.TestCase.EX_RUN_ERROR)
@@ -170,7 +170,7 @@ class OSTempestTesting(unittest.TestCase):
'os.path.exists', return_value=False)
@mock.patch('functest.opnfv_tests.openstack.tempest.tempest.os.makedirs')
@mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
- 'conf_utils.create_tempest_resources', return_value={})
+ 'TempestResourcesManager.create', return_value={})
@mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
'conf_utils.configure_tempest', side_effect=Exception)
def test_run_configure_tempest_ko(self, *args):
@@ -181,7 +181,7 @@ class OSTempestTesting(unittest.TestCase):
'os.path.exists', return_value=False)
@mock.patch('functest.opnfv_tests.openstack.tempest.tempest.os.makedirs')
@mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
- 'conf_utils.create_tempest_resources', return_value={})
+ 'TempestResourcesManager.create', return_value={})
@mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
'conf_utils.configure_tempest')
def _test_run(self, status, *args):
diff --git a/functest/tests/unit/openstack/vping/test_vping.py b/functest/tests/unit/openstack/vping/test_vping.py
index b229c351..a28c61ae 100644
--- a/functest/tests/unit/openstack/vping/test_vping.py
+++ b/functest/tests/unit/openstack/vping/test_vping.py
@@ -50,8 +50,6 @@ class VPingUserdataTesting(unittest.TestCase):
'vm_active', return_value=True)
def test_vping_userdata(self, deploy_vm, path_exists, create_flavor,
get_port_ip, vm_active):
- os_vm_inst = mock.MagicMock(name='get_console_output')
- os_vm_inst.get_console_output.return_value = 'vPing OK'
with mock.patch('snaps.openstack.utils.deploy_utils.create_image',
return_value=OpenStackImage(self.os_creds, None)), \
mock.patch('snaps.openstack.utils.deploy_utils.create_network',
@@ -67,8 +65,8 @@ class VPingUserdataTesting(unittest.TestCase):
name='foo', network_name='bar')]),
None)), \
mock.patch('snaps.openstack.create_instance.'
- 'OpenStackVmInstance.get_os_vm_server_obj',
- return_value=os_vm_inst):
+ 'OpenStackVmInstance.get_console_output',
+ return_value='vPing OK'):
self.assertEquals(TestCase.EX_OK, self.vping_userdata.run())
diff --git a/functest/tests/unit/utils/test_functest_utils.py b/functest/tests/unit/utils/test_functest_utils.py
index 98c7d6e9..b4cc5b73 100644
--- a/functest/tests/unit/utils/test_functest_utils.py
+++ b/functest/tests/unit/utils/test_functest_utils.py
@@ -133,24 +133,20 @@ class FunctestUtilsTesting(unittest.TestCase):
self.assertEqual(functest_utils.get_scenario(),
self.scenario)
- @mock.patch('functest.utils.functest_utils.get_build_tag')
- def test_get_version_daily_job(self, mock_get_build_tag):
- mock_get_build_tag.return_value = self.build_tag
+ def test_get_version_daily_job(self):
+ CONST.__setattr__('BUILD_TAG', self.build_tag)
self.assertEqual(functest_utils.get_version(), self.version)
- @mock.patch('functest.utils.functest_utils.get_build_tag')
- def test_get_version_weekly_job(self, mock_get_build_tag):
- mock_get_build_tag.return_value = self.build_tag_week
+ def test_get_version_weekly_job(self):
+ CONST.__setattr__('BUILD_TAG', self.build_tag_week)
self.assertEqual(functest_utils.get_version(), self.version)
- @mock.patch('functest.utils.functest_utils.get_build_tag')
- def test_get_version_with_dummy_build_tag(self, mock_get_build_tag):
- mock_get_build_tag.return_value = 'whatever'
+ def test_get_version_with_dummy_build_tag(self):
+ CONST.__setattr__('BUILD_TAG', 'whatever')
self.assertEqual(functest_utils.get_version(), 'unknown')
- @mock.patch('functest.utils.functest_utils.get_build_tag')
- def test_get_version_unknown(self, mock_get_build_tag):
- mock_get_build_tag.return_value = "unknown_build_tag"
+ def test_get_version_unknown(self):
+ CONST.__setattr__('BUILD_TAG', 'unknown_build_tag')
self.assertEqual(functest_utils.get_version(), "unknown")
@mock.patch('functest.utils.functest_utils.logger.info')
@@ -173,33 +169,15 @@ class FunctestUtilsTesting(unittest.TestCase):
self.node_name)
@mock.patch('functest.utils.functest_utils.logger.info')
- def test_get_build_tag_failed(self, mock_logger_info):
- with mock.patch.dict(os.environ,
- {},
- clear=True):
- self.assertEqual(functest_utils.get_build_tag(),
- "none")
- mock_logger_info.assert_called_once_with("Impossible to retrieve"
- " the build tag")
-
- def test_get_build_tag_default(self):
- with mock.patch.dict(os.environ,
- {'BUILD_TAG': self.build_tag},
- clear=True):
- self.assertEqual(functest_utils.get_build_tag(),
- self.build_tag)
-
- @mock.patch('functest.utils.functest_utils.logger.info')
def test_logger_test_results(self, mock_logger_info):
CONST.__setattr__('results_test_db_url', self.db_url)
+ CONST.__setattr__('BUILD_TAG', self.build_tag)
with mock.patch('functest.utils.functest_utils.get_pod_name',
return_value=self.node_name), \
mock.patch('functest.utils.functest_utils.get_scenario',
return_value=self.scenario), \
mock.patch('functest.utils.functest_utils.get_version',
- return_value=self.version), \
- mock.patch('functest.utils.functest_utils.get_build_tag',
- return_value=self.build_tag):
+ return_value=self.version):
functest_utils.logger_test_results(self.project, self.case_name,
self.status, self.details)
mock_logger_info.assert_called_once_with(
diff --git a/functest/tests/unit/utils/test_openstack_utils.py b/functest/tests/unit/utils/test_openstack_utils.py
index 828fb3d4..3bd7e3dd 100644
--- a/functest/tests/unit/utils/test_openstack_utils.py
+++ b/functest/tests/unit/utils/test_openstack_utils.py
@@ -13,6 +13,7 @@ import unittest
import mock
from functest.utils import openstack_utils
+from functest.utils.constants import CONST
class OSUtilsTesting(unittest.TestCase):
@@ -187,11 +188,18 @@ class OSUtilsTesting(unittest.TestCase):
mock_obj.configure_mock(**attrs)
self.role = mock_obj
+ mock_obj = mock.Mock()
+ attrs = {'id': 'domain_id',
+ 'name': 'test_domain'}
+ mock_obj.configure_mock(**attrs)
+ self.domain = mock_obj
+
self.keystone_client = mock.Mock()
attrs = {'projects.list.return_value': [self.tenant],
'tenants.list.return_value': [self.tenant],
'users.list.return_value': [self.user],
'roles.list.return_value': [self.role],
+ 'domains.list.return_value': [self.domain],
'projects.create.return_value': self.tenant,
'tenants.create.return_value': self.tenant,
'users.create.return_value': self.user,
@@ -390,8 +398,6 @@ class OSUtilsTesting(unittest.TestCase):
self._test_source_credentials('export OS_TENANT_NAME =admin')
self._test_source_credentials('export OS_TENANT_NAME = admin')
self._test_source_credentials('export OS_TENANT_NAME = "admin"')
- self._test_source_credentials('OS_TENANT_NAME', value='')
- self._test_source_credentials('export OS_TENANT_NAME', value='')
# This test will fail as soon as rc_file is fixed
self._test_source_credentials(
'export "\'OS_TENANT_NAME\'" = "\'admin\'"')
@@ -1652,9 +1658,16 @@ class OSUtilsTesting(unittest.TestCase):
'test_role'),
'role_id')
+ def test_get_domain_id_default(self):
+ self.assertEqual(openstack_utils.
+ get_domain_id(self.keystone_client,
+ 'test_domain'),
+ 'domain_id')
+
def test_create_tenant_default(self):
with mock.patch('functest.utils.openstack_utils.'
'is_keystone_v3', return_value=True):
+ CONST.__setattr__('OS_PROJECT_DOMAIN_NAME', 'Default')
self.assertEqual(openstack_utils.
create_tenant(self.keystone_client,
'test_tenant',
diff --git a/functest/tests/unit/vnf/ims/test_cloudify_ims.py b/functest/tests/unit/vnf/ims/test_cloudify_ims.py
index 537c5146..f0483c69 100644
--- a/functest/tests/unit/vnf/ims/test_cloudify_ims.py
+++ b/functest/tests/unit/vnf/ims/test_cloudify_ims.py
@@ -13,6 +13,8 @@ import mock
from functest.core import vnf
from functest.opnfv_tests.vnf.ims import cloudify_ims
+from snaps.openstack.os_credentials import OSCreds
+
class CloudifyImsTesting(unittest.TestCase):
@@ -79,8 +81,11 @@ class CloudifyImsTesting(unittest.TestCase):
@mock.patch('snaps.openstack.create_image.OpenStackImage.create')
def test_prepare_bad_auth_url(self, *args):
with self.assertRaises(Exception):
- self.ims_vnf.prepare()
- args[0].assert_not_called()
+ self.ims_vnf.image_creator(
+ OSCreds(username='user', password='pass', auth_url='url',
+ project_name='project', identity_api_version=3),
+ mock.Mock())
+ args[0].assert_not_called()
def test_prepare_missing_param(self):
with self.assertRaises(vnf.VnfPreparationException):
diff --git a/functest/tests/unit/vnf/ims/test_orchestra_clearwaterims.py b/functest/tests/unit/vnf/ims/test_orchestra_clearwaterims.py
new file mode 100644
index 00000000..ef227ca4
--- /dev/null
+++ b/functest/tests/unit/vnf/ims/test_orchestra_clearwaterims.py
@@ -0,0 +1,227 @@
+#!/usr/bin/env python
+
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""Test module for orchestra_clearwaterims"""
+
+import logging
+import unittest
+
+import mock
+from snaps.openstack.os_credentials import OSCreds
+
+from functest.core import vnf
+from functest.opnfv_tests.vnf.ims import orchestra_clearwaterims
+
+
+class OrchestraClearwaterImsTesting(unittest.TestCase):
+ """Test class for orchestra_clearwaterims"""
+ def setUp(self):
+
+ self.tenant = 'orchestra_clearwaterims'
+ self.creds = {'username': 'mocked_username',
+ 'password': 'mocked_password'}
+ self.tenant_images = {
+ 'image1': 'mocked_image_url_1',
+ 'image2': 'mocked_image_url_2'
+ }
+ self.mano = {
+ 'name': 'openbaton',
+ 'version': '3.2.0',
+ 'object': 'foo',
+ 'requirements': {
+ 'flavor': {
+ 'name': 'mocked_flavor',
+ 'ram_min': 4096,
+ 'disk': 5,
+ 'vcpus': 2
+ },
+ 'os_image': 'mocked_image'
+ },
+ 'bootstrap': {
+ 'url': 'mocked_bootstrap_url',
+ 'config': {
+ 'url': 'mocked_config_url'}
+ },
+ 'gvnfm': {
+ 'userdata': {
+ 'url': 'mocked_userdata_url'
+ }
+ },
+ 'credentials': {
+ 'username': 'mocked_username',
+ 'password': 'mocked_password'
+ }
+ }
+ self.vnf = {
+ 'name': 'openims',
+ 'descriptor': {
+ 'url': 'mocked_descriptor_url'
+ },
+ 'requirements': {
+ 'flavor': {
+ 'name': 'mocked_flavor',
+ 'ram_min': 2048,
+ 'disk': 5,
+ 'vcpus': 2}
+ }
+ }
+ self.clearwaterims = {
+ 'scscf': {
+ 'ports': [3870, 6060]
+ },
+ 'pcscf': {
+ 'ports': [4060]
+ },
+ 'icscf': {
+ 'ports': [3869, 5060]
+ },
+ 'fhoss': {
+ 'ports': [3868]
+ },
+ 'bind9': {
+ 'ports': []
+ }
+ }
+ with mock.patch('functest.opnfv_tests.vnf.ims.orchestra_clearwaterims.'
+ 'os.makedirs'),\
+ mock.patch('functest.opnfv_tests.vnf.ims.orchestra_clearwaterims.'
+ 'get_config', return_value={
+ 'orchestrator': self.mano,
+ 'name': self.mano['name'],
+ 'version': self.mano['version'],
+ 'requirements': self.mano['requirements'],
+ 'credentials': self.mano['credentials'],
+ 'bootstrap': self.mano['bootstrap'],
+ 'gvnfm': self.mano['gvnfm'],
+ 'os_image': self.mano['requirements']['os_image'],
+ 'flavor': self.mano['requirements']['flavor'],
+ 'url': self.mano['bootstrap']['url'],
+ 'config': self.mano['bootstrap']['config'],
+ 'tenant_images': self.tenant_images,
+ 'vnf': self.vnf,
+ 'orchestra_clearwaterims': self.clearwaterims}):
+ self.ims_vnf = orchestra_clearwaterims.ClearwaterImsVnf()
+
+ self.details = {'orchestrator': {'status': 'PASS', 'duration': 120},
+ 'vnf': {},
+ 'test_vnf': {}}
+
+ @mock.patch('functest.core.vnf.os_utils.get_keystone_client',
+ return_value='test')
+ @mock.patch('functest.core.vnf.os_utils.get_or_create_tenant_for_vnf',
+ return_value=True)
+ @mock.patch('functest.core.vnf.os_utils.get_or_create_user_for_vnf',
+ return_value=True)
+ @mock.patch('functest.core.vnf.os_utils.get_credentials',
+ return_value={'auth_url': 'test/v1'})
+ @mock.patch(
+ 'functest.utils.openstack_utils.get_tenant_id',
+ return_value={'mocked_tenant_id'})
+ @mock.patch(
+ 'functest.utils.openstack_utils.get_floating_ips',
+ return_value=[])
+ @mock.patch('snaps.openstack.create_image.OpenStackImage.create')
+ @mock.patch('snaps.openstack.create_flavor.OpenStackFlavor.create')
+ @mock.patch(
+ 'snaps.openstack.create_security_group.OpenStackSecurityGroup.create')
+ @mock.patch('snaps.openstack.create_network.OpenStackNetwork.create')
+ @mock.patch('snaps.openstack.create_router.OpenStackRouter.create')
+ @mock.patch(
+ 'functest.opnfv_tests.openstack.snaps.snaps_utils.get_ext_net_name')
+ @mock.patch(
+ 'functest.opnfv_tests.openstack.snaps.'
+ 'snaps_utils.neutron_utils.create_floating_ip')
+ def test_prepare_default(self, *args):
+ """Testing prepare function without any exceptions expected"""
+ self.assertIsNone(self.ims_vnf.prepare())
+ args[4].assert_called_once_with()
+
+ @mock.patch('functest.core.vnf.os_utils.get_keystone_client',
+ return_value='test')
+ @mock.patch('functest.core.vnf.os_utils.get_or_create_tenant_for_vnf',
+ return_value=True)
+ @mock.patch('functest.core.vnf.os_utils.get_or_create_user_for_vnf',
+ return_value=True)
+ @mock.patch('functest.core.vnf.os_utils.get_credentials',
+ return_value={'auth_url': 'test/no_v'})
+ @mock.patch('snaps.openstack.create_image.OpenStackImage.create')
+ def test_prepare_bad_auth_url(self, *args):
+ """Testing prepare function with bad auth url"""
+ with self.assertRaises(Exception):
+ self.ims_vnf.image_creator(
+ OSCreds(username='user', password='pass', auth_url='url',
+ project_name='project', identity_api_version=3),
+ mock.Mock())
+ args[0].assert_not_called()
+
+ def test_prepare_missing_param(self):
+ """Testing prepare function with missing param"""
+ with self.assertRaises(vnf.VnfPreparationException):
+ self.ims_vnf.prepare()
+
+ @mock.patch('functest.core.vnf.os_utils.get_keystone_client',
+ side_effect=Exception)
+ def test_prepare_keystone_exception(self, *args):
+ """Testing prepare function with keystone exception"""
+ with self.assertRaises(vnf.VnfPreparationException):
+ self.ims_vnf.prepare()
+ args[0].assert_called_once_with()
+
+ @mock.patch('functest.core.vnf.os_utils.get_keystone_client',
+ return_value='test')
+ @mock.patch('functest.core.vnf.os_utils.get_or_create_tenant_for_vnf',
+ side_effect=Exception)
+ def test_prepare_tenant_exception(self, *args):
+ """Testing prepare function with tenant exception"""
+ with self.assertRaises(vnf.VnfPreparationException):
+ self.ims_vnf.prepare()
+ args[1].assert_called_once_with()
+
+ @mock.patch('functest.core.vnf.os_utils.get_keystone_client',
+ return_value='test')
+ @mock.patch('functest.core.vnf.os_utils.get_or_create_tenant_for_vnf',
+ return_value=True)
+ @mock.patch('functest.core.vnf.os_utils.get_or_create_user_for_vnf',
+ side_effect=Exception)
+ def test_prepare_user_exception(self, *args):
+ """Testing prepare function with user exception"""
+ with self.assertRaises(vnf.VnfPreparationException):
+ self.ims_vnf.prepare()
+ args[2].assert_called_once_with()
+
+ @mock.patch('functest.core.vnf.os_utils.get_keystone_client',
+ return_value='test')
+ @mock.patch('functest.core.vnf.os_utils.get_or_create_tenant_for_vnf',
+ return_value=True)
+ @mock.patch('functest.core.vnf.os_utils.get_or_create_user_for_vnf',
+ return_value=True)
+ @mock.patch('functest.core.vnf.os_utils.get_credentials',
+ side_effect=Exception)
+ def test_prepare_credentials_exception(self, *args):
+ """Testing prepare function with credentials exception"""
+ with self.assertRaises(vnf.VnfPreparationException):
+ self.ims_vnf.prepare()
+ args[0].assert_called_once_with()
+
+ # # @mock.patch('functest.opnfv_tests.vnf.
+ # ims.orchestra_clearwaterims.get_userdata')
+ # def test_deploy_orchestrator(self, *args):
+ # floating_ip = FloatingIp
+ # floating_ip.ip = 'mocked_ip'
+ # details = {'fip':floating_ip,'flavor':{'name':'mocked_name'}}
+ # self.mano['details'] = details
+ # with mock.patch.dict(self.mano, {'details':
+ # {'fip':floating_ip,'flavor':{'name':'mocked_name'}}}):
+ # # with mock.patch.dict(self.mano, details):
+ # orchestra_clearwaterims.get_userdata(self.mano)
+ # self.assertIsNone(self.ims_vnf.deploy_orchestrator())
+ # args[4].assert_called_once_with()
+
+
+if __name__ == "__main__":
+ logging.disable(logging.CRITICAL)
+ unittest.main(verbosity=2)
diff --git a/functest/tests/unit/vnf/ims/test_orchestra_openims.py b/functest/tests/unit/vnf/ims/test_orchestra_openims.py
new file mode 100644
index 00000000..5911cf77
--- /dev/null
+++ b/functest/tests/unit/vnf/ims/test_orchestra_openims.py
@@ -0,0 +1,229 @@
+#!/usr/bin/env python
+
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""Test module for orchestra_openims"""
+
+import logging
+import unittest
+
+import mock
+from snaps.openstack.os_credentials import OSCreds
+
+from functest.core import vnf
+from functest.opnfv_tests.vnf.ims import orchestra_openims
+
+
+class OrchestraOpenImsTesting(unittest.TestCase):
+ """Test class for orchestra_openims"""
+ def setUp(self):
+
+ self.tenant = 'orchestra_openims'
+ self.creds = {'username': 'mocked_username',
+ 'password': 'mocked_password'}
+ self.tenant_images = {
+ 'image1': 'mocked_image_url_1',
+ 'image2': 'mocked_image_url_2'
+ }
+ self.mano = {
+ 'name': 'openbaton',
+ 'version': '3.2.0',
+ 'object': 'foo',
+ 'requirements': {
+ 'flavor': {
+ 'name': 'mocked_flavor',
+ 'ram_min': 4096,
+ 'disk': 5,
+ 'vcpus': 2
+ },
+ 'os_image': 'mocked_image'
+ },
+ 'bootstrap': {
+ 'url': 'mocked_bootstrap_url',
+ 'config': {
+ 'url': 'mocked_config_url'}
+ },
+ 'gvnfm': {
+ 'userdata': {
+ 'url': 'mocked_userdata_url'
+ }
+ },
+ 'credentials': {
+ 'username': 'mocked_username',
+ 'password': 'mocked_password'
+ }
+ }
+ self.vnf = {
+ 'name': 'openims',
+ 'descriptor': {
+ 'url': 'mocked_descriptor_url'
+ },
+ 'requirements': {
+ 'flavor': {
+ 'name': 'mocked_flavor',
+ 'ram_min': 2048,
+ 'disk': 5,
+ 'vcpus': 2}
+ }
+ }
+ self.openims = {
+ 'scscf': {
+ 'ports': [3870, 6060]
+ },
+ 'pcscf': {
+ 'ports': [4060]
+ },
+ 'icscf': {
+ 'ports': [3869, 5060]
+ },
+ 'fhoss': {
+ 'ports': [3868]
+ },
+ 'bind9': {
+ 'ports': []
+ }
+ }
+ with mock.patch('functest.opnfv_tests.vnf.ims.orchestra_openims.'
+ 'os.makedirs'),\
+ mock.patch('functest.opnfv_tests.vnf.ims.orchestra_openims.'
+ 'get_config', return_value={
+ 'orchestrator': self.mano,
+ 'name': self.mano['name'],
+ 'version': self.mano['version'],
+ 'requirements': self.mano['requirements'],
+ 'credentials': self.mano['credentials'],
+ 'bootstrap': self.mano['bootstrap'],
+ 'gvnfm': self.mano['gvnfm'],
+ 'os_image':
+ self.mano['requirements']['os_image'],
+ 'flavor':
+ self.mano['requirements']['flavor'],
+ 'url': self.mano['bootstrap']['url'],
+ 'config': self.mano['bootstrap']['config'],
+ 'tenant_images': self.tenant_images,
+ 'vnf': self.vnf,
+ 'orchestra_openims': self.openims}):
+ self.ims_vnf = orchestra_openims.OpenImsVnf()
+
+ self.details = {'orchestrator': {'status': 'PASS', 'duration': 120},
+ 'vnf': {},
+ 'test_vnf': {}}
+
+ @mock.patch('functest.core.vnf.os_utils.get_keystone_client',
+ return_value='test')
+ @mock.patch('functest.core.vnf.os_utils.get_or_create_tenant_for_vnf',
+ return_value=True)
+ @mock.patch('functest.core.vnf.os_utils.get_or_create_user_for_vnf',
+ return_value=True)
+ @mock.patch('functest.core.vnf.os_utils.get_credentials',
+ return_value={'auth_url': 'test/v1'})
+ @mock.patch(
+ 'functest.utils.openstack_utils.get_tenant_id',
+ return_value={'mocked_tenant_id'})
+ @mock.patch(
+ 'functest.utils.openstack_utils.get_floating_ips',
+ return_value=[])
+ @mock.patch('snaps.openstack.create_image.OpenStackImage.create')
+ @mock.patch('snaps.openstack.create_flavor.OpenStackFlavor.create')
+ @mock.patch(
+ 'snaps.openstack.create_security_group.OpenStackSecurityGroup.create')
+ @mock.patch('snaps.openstack.create_network.OpenStackNetwork.create')
+ @mock.patch('snaps.openstack.create_router.OpenStackRouter.create')
+ @mock.patch(
+ 'functest.opnfv_tests.openstack.snaps.snaps_utils.get_ext_net_name')
+ @mock.patch(
+ 'functest.opnfv_tests.openstack.snaps.snaps_utils.'
+ 'neutron_utils.create_floating_ip')
+ def test_prepare_default(self, *args):
+ """Testing prepare function without any exceptions expected"""
+ self.assertIsNone(self.ims_vnf.prepare())
+ args[4].assert_called_once_with()
+
+ @mock.patch('functest.core.vnf.os_utils.get_keystone_client',
+ return_value='test')
+ @mock.patch('functest.core.vnf.os_utils.get_or_create_tenant_for_vnf',
+ return_value=True)
+ @mock.patch('functest.core.vnf.os_utils.get_or_create_user_for_vnf',
+ return_value=True)
+ @mock.patch('functest.core.vnf.os_utils.get_credentials',
+ return_value={'auth_url': 'test/no_v'})
+ @mock.patch('snaps.openstack.create_image.OpenStackImage.create')
+ def test_prepare_bad_auth_url(self, *args):
+ """Testing prepare function with bad auth url"""
+ with self.assertRaises(Exception):
+ self.ims_vnf.image_creator(
+ OSCreds(username='user', password='pass', auth_url='url',
+ project_name='project', identity_api_version=3),
+ mock.Mock())
+ args[0].assert_not_called()
+
+ def test_prepare_missing_param(self):
+ """Testing prepare function with missing param"""
+ with self.assertRaises(vnf.VnfPreparationException):
+ self.ims_vnf.prepare()
+
+ @mock.patch('functest.core.vnf.os_utils.get_keystone_client',
+ side_effect=Exception)
+ def test_prepare_keystone_exception(self, *args):
+ """Testing prepare function with keystone exception"""
+ with self.assertRaises(vnf.VnfPreparationException):
+ self.ims_vnf.prepare()
+ args[0].assert_called_once_with()
+
+ @mock.patch('functest.core.vnf.os_utils.get_keystone_client',
+ return_value='test')
+ @mock.patch('functest.core.vnf.os_utils.get_or_create_tenant_for_vnf',
+ side_effect=Exception)
+ def test_prepare_tenant_exception(self, *args):
+ """Testing prepare function with tenant exception"""
+ with self.assertRaises(vnf.VnfPreparationException):
+ self.ims_vnf.prepare()
+ args[1].assert_called_once_with()
+
+ @mock.patch('functest.core.vnf.os_utils.get_keystone_client',
+ return_value='test')
+ @mock.patch('functest.core.vnf.os_utils.get_or_create_tenant_for_vnf',
+ return_value=True)
+ @mock.patch('functest.core.vnf.os_utils.get_or_create_user_for_vnf',
+ side_effect=Exception)
+ def test_prepare_user_exception(self, *args):
+ """Testing prepare function with user exception"""
+ with self.assertRaises(vnf.VnfPreparationException):
+ self.ims_vnf.prepare()
+ args[2].assert_called_once_with()
+
+ @mock.patch('functest.core.vnf.os_utils.get_keystone_client',
+ return_value='test')
+ @mock.patch('functest.core.vnf.os_utils.get_or_create_tenant_for_vnf',
+ return_value=True)
+ @mock.patch('functest.core.vnf.os_utils.get_or_create_user_for_vnf',
+ return_value=True)
+ @mock.patch('functest.core.vnf.os_utils.get_credentials',
+ side_effect=Exception)
+ def test_prepare_credentials_exception(self, *args):
+ """Testing prepare function with credentials exception"""
+ with self.assertRaises(vnf.VnfPreparationException):
+ self.ims_vnf.prepare()
+ args[0].assert_called_once_with()
+
+ # # @mock.patch('functest.opnfv_tests.
+ # vnf.ims.orchestra_openims.get_userdata')
+ # def test_deploy_orchestrator(self, *args):
+ # floating_ip = FloatingIp
+ # floating_ip.ip = 'mocked_ip'
+ # details = {'fip':floating_ip,'flavor':{'name':'mocked_name'}}
+ # self.mano['details'] = details
+ # with mock.patch.dict(self.mano, {'details':
+ # {'fip':floating_ip,'flavor':{'name':'mocked_name'}}}):
+ # # with mock.patch.dict(self.mano, details):
+ # orchestra_openims.get_userdata(self.mano)
+ # self.assertIsNone(self.ims_vnf.deploy_orchestrator())
+ # args[4].assert_called_once_with()
+
+
+if __name__ == "__main__":
+ logging.disable(logging.CRITICAL)
+ unittest.main(verbosity=2)
diff --git a/functest/utils/env.py b/functest/utils/env.py
index 2fb766d3..d7b396ea 100644
--- a/functest/utils/env.py
+++ b/functest/utils/env.py
@@ -32,7 +32,8 @@ class Environment(object):
if k not in os.environ:
self.__setattr__(k, v)
self._set_ci_run()
- self._set_ci_loop()
+ if 'CI_LOOP' not in os.environ:
+ self._set_ci_loop()
def _set_ci_run(self):
if self.BUILD_TAG:
diff --git a/functest/utils/functest_utils.py b/functest/utils/functest_utils.py
index 91781bd2..a766ef95 100644
--- a/functest/utils/functest_utils.py
+++ b/functest/utils/functest_utils.py
@@ -107,7 +107,9 @@ def get_version():
# jenkins-functest-fuel-baremetal-weekly-master-8
# use regex to match branch info
rule = "(dai|week)ly-(.+?)-[0-9]*"
- build_tag = get_build_tag()
+ build_tag = CONST.__getattribute__('BUILD_TAG')
+ if not build_tag:
+ build_tag = 'none'
m = re.search(rule, build_tag)
if m:
return m.group(2)
@@ -128,19 +130,6 @@ def get_pod_name():
return "unknown-pod"
-def get_build_tag():
- """
- Get build tag of jenkins jobs
- """
- try:
- build_tag = os.environ['BUILD_TAG']
- except KeyError:
- logger.info("Impossible to retrieve the build tag")
- build_tag = "none"
-
- return build_tag
-
-
def logger_test_results(project, case_name, status, details):
"""
Format test case results for the logger
@@ -148,7 +137,7 @@ def logger_test_results(project, case_name, status, details):
pod_name = get_pod_name()
scenario = get_scenario()
version = get_version()
- build_tag = get_build_tag()
+ build_tag = CONST.__getattribute__('BUILD_TAG')
db_url = CONST.__getattribute__("results_test_db_url")
logger.info(
@@ -278,14 +267,14 @@ def get_ci_envvars():
def execute_command_raise(cmd, info=False, error_msg="",
- verbose=True, output_file=None):
- ret = execute_command(cmd, info, error_msg, verbose, output_file)
+ verbose=True, output_file=None, env=None):
+ ret = execute_command(cmd, info, error_msg, verbose, output_file, env)
if ret != 0:
raise Exception(error_msg)
def execute_command(cmd, info=False, error_msg="",
- verbose=True, output_file=None):
+ verbose=True, output_file=None, env=None):
if not error_msg:
error_msg = ("The command '%s' failed." % cmd)
msg_exec = ("Executing command: '%s'" % cmd)
@@ -294,7 +283,7 @@ def execute_command(cmd, info=False, error_msg="",
logger.info(msg_exec)
else:
logger.debug(msg_exec)
- p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
+ p = subprocess.Popen(cmd, env=env, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
if output_file:
f = open(output_file, "w")
diff --git a/functest/utils/openstack_utils.py b/functest/utils/openstack_utils.py
index 4f8d6c35..73d1cde4 100644
--- a/functest/utils/openstack_utils.py
+++ b/functest/utils/openstack_utils.py
@@ -23,6 +23,7 @@ from novaclient import client as novaclient
from keystoneclient import client as keystoneclient
from neutronclient.neutron import client as neutronclient
+from functest.utils.constants import CONST
import functest.utils.functest_utils as ft_utils
logger = logging.getLogger(__name__)
@@ -117,13 +118,15 @@ def get_credentials(other_creds={}):
def source_credentials(rc_file):
with open(rc_file, "r") as f:
for line in f:
- var = line.rstrip('"\n').replace('export ', '').split("=")
+ var = (line.rstrip('"\n').replace('export ', '').split("=")
+ if re.search(r'(.*)=(.*)', line) else None)
# The two next lines should be modified as soon as rc_file
# conforms with common rules. Be aware that it could induce
# issues if value starts with '
- key = re.sub(r'^["\' ]*|[ \'"]*$', '', var[0])
- value = re.sub(r'^["\' ]*|[ \'"]*$', '', "".join(var[1:]))
- os.environ[key] = value
+ if var:
+ key = re.sub(r'^["\' ]*|[ \'"]*$', '', var[0])
+ value = re.sub(r'^["\' ]*|[ \'"]*$', '', "".join(var[1:]))
+ os.environ[key] = value
def get_credentials_for_rally():
@@ -710,6 +713,8 @@ def get_private_net(neutron_client):
def get_external_net(neutron_client):
+ if (hasattr(CONST, 'EXTERNAL_NETWORK')):
+ return CONST.__getattribute__('EXTERNAL_NETWORK')
for network in neutron_client.list_networks()['networks']:
if network['router:external']:
return network['name']
@@ -717,6 +722,11 @@ def get_external_net(neutron_client):
def get_external_net_id(neutron_client):
+ if (hasattr(CONST, 'EXTERNAL_NETWORK')):
+ networks = neutron_client.list_networks(
+ name=CONST.__getattribute__('EXTERNAL_NETWORK'))
+ net_id = networks['networks'][0]['id']
+ return net_id
for network in neutron_client.list_networks()['networks']:
if network['router:external']:
return network['id']
@@ -1374,13 +1384,25 @@ def get_role_id(keystone_client, role_name):
return id
+def get_domain_id(keystone_client, domain_name):
+ domains = keystone_client.domains.list()
+ id = ''
+ for d in domains:
+ if d.name == domain_name:
+ id = d.id
+ break
+ return id
+
+
def create_tenant(keystone_client, tenant_name, tenant_description):
try:
if is_keystone_v3():
+ domain_name = CONST.__getattribute__('OS_PROJECT_DOMAIN_NAME')
+ domain_id = get_domain_id(keystone_client, domain_name)
tenant = keystone_client.projects.create(
name=tenant_name,
description=tenant_description,
- domain="default",
+ domain=domain_id,
enabled=True)
else:
tenant = keystone_client.tenants.create(tenant_name,
@@ -1539,3 +1561,62 @@ def get_resource(heat_client, stack_id, resource):
except Exception as e:
logger.error("Error [get_resource]: %s" % e)
return None
+
+
+# *********************************************
+# TEMPEST
+# *********************************************
+def init_tempest_cleanup(tempest_config_dir=None,
+ tempest_config_filename='tempest.conf',
+ output_file=None):
+ """
+ Initialize the Tempest Cleanup utility.
+ See https://docs.openstack.org/tempest/latest/cleanup.html for docs.
+
+ :param tempest_config_dir: The directory where the Tempest config file is
+ located. If not specified, we let Tempest pick both the directory
+ and the filename (i.e. second parameter is ignored)
+ :param tempest_config_filename: The filename of the Tempest config file
+ :param output_file: Optional file where to save output
+ """
+ # The Tempest cleanup utility currently offers no cmd argument to specify
+ # the config file, therefore it has to be configured with env variables
+ env = None
+ if tempest_config_dir:
+ env = os.environ.copy()
+ env['TEMPEST_CONFIG_DIR'] = tempest_config_dir
+ env['TEMPEST_CONFIG'] = tempest_config_filename
+
+ # If this command fails, an exception must be raised to stop the script
+ # otherwise the later cleanup would destroy also other resources
+ cmd_line = "tempest cleanup --init-saved-state"
+ ft_utils.execute_command_raise(cmd_line, env=env, output_file=output_file,
+ error_msg="Tempest cleanup init failed")
+
+
+def perform_tempest_cleanup(tempest_config_dir=None,
+ tempest_config_filename='tempest.conf',
+ output_file=None):
+ """
+ Perform cleanup using the Tempest Cleanup utility.
+ See https://docs.openstack.org/tempest/latest/cleanup.html for docs.
+
+ :param tempest_config_dir: The directory where the Tempest config file is
+ located. If not specified, we let Tempest pick both the directory
+ and the filename (i.e. second parameter is ignored)
+ :param tempest_config_filename: The filename of the Tempest config file
+ :param output_file: Optional file where to save output
+ """
+ # The Tempest cleanup utility currently offers no cmd argument to specify
+ # the config file, therefore it has to be configured with env variables
+ env = None
+ if tempest_config_dir:
+ env = os.environ.copy()
+ env['TEMPEST_CONFIG_DIR'] = tempest_config_dir
+ env['TEMPEST_CONFIG'] = tempest_config_filename
+
+ # If this command fails, an exception must be raised to stop the script
+ # otherwise the later cleanup would destroy also other resources
+ cmd_line = "tempest cleanup"
+ ft_utils.execute_command(cmd_line, env=env, output_file=output_file,
+ error_msg="Tempest cleanup failed")