summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--INFO1
-rwxr-xr-xdashboard/backend/dovetail/__init__.py8
-rwxr-xr-xdashboard/backend/dovetail/api/__init__.py29
-rwxr-xr-xdashboard/backend/dovetail/api/api.py183
-rwxr-xr-xdashboard/backend/dovetail/api/exception_handler.py93
-rwxr-xr-xdashboard/backend/dovetail/api/utils.py20
-rwxr-xr-xdashboard/backend/dovetail/db/__init__.py8
-rwxr-xr-xdashboard/backend/dovetail/db/api.py72
-rwxr-xr-xdashboard/backend/dovetail/db/database.py182
-rwxr-xr-xdashboard/backend/dovetail/db/exception.py121
-rwxr-xr-xdashboard/backend/dovetail/db/models.py105
-rwxr-xr-xdashboard/backend/dovetail/db/utils.py478
-rwxr-xr-xdashboard/backend/dovetail/utils/__init__.py8
-rwxr-xr-xdashboard/backend/dovetail/utils/flags.py82
-rwxr-xr-xdashboard/backend/dovetail/utils/logsetting.py98
-rwxr-xr-xdashboard/backend/dovetail/utils/setting_wrapper.py18
-rwxr-xr-xdashboard/backend/dovetail/utils/util.py71
-rwxr-xr-xdashboard/backend/install_db.py55
-rwxr-xr-xdashboard/backend/wsgi.py35
-rw-r--r--docker/Dockerfile25
-rw-r--r--docs/images/dovetail_offline_mode.pngbin0 -> 128341 bytes
-rw-r--r--docs/images/dovetail_online_mode.pngbin0 -> 128509 bytes
-rw-r--r--docs/testing/developer/testscope/index.rst613
-rw-r--r--docs/testing/user/testspecification/highavailability/index.rst743
-rw-r--r--docs/testing/user/testspecification/ipv6/index.rst1787
-rw-r--r--docs/testing/user/testspecification/old_files/ipv6/designspecification.rst133
-rw-r--r--docs/testing/user/testspecification/old_files/ipv6/index.rst19
-rw-r--r--docs/testing/user/testspecification/old_files/ipv6/ipv6.tc001.specification.rst59
-rw-r--r--docs/testing/user/testspecification/old_files/ipv6/ipv6.tc026.specification.rst54
-rw-r--r--docs/testing/user/testspecification/old_files/ipv6/ipv6_all_testcases.rst243
-rw-r--r--docs/testing/user/testspecification/old_files/ipv6/testplan.rst34
-rw-r--r--docs/testing/user/testspecification/old_files/ipv6/testprocedure.rst9
-rw-r--r--docs/testing/user/testspecification/old_files/ipv6/testspecification.rst57
-rw-r--r--docs/testing/user/testspecification/vping/index.rst279
-rw-r--r--docs/testing/user/testspecification/vpn/index.rst487
-rw-r--r--docs/testing/user/userguide/cli_reference.rst9
-rw-r--r--docs/testing/user/userguide/index.rst477
-rw-r--r--docs/testing/user/userguide/testing_guide.rst517
-rw-r--r--dovetail/compliance/debug.yml5
-rw-r--r--dovetail/compliance/proposed_tests.yml16
-rw-r--r--dovetail/conf/bottlenecks_config.yml20
-rw-r--r--dovetail/conf/cmd_config.yml13
-rw-r--r--dovetail/conf/dovetail_config.yml17
-rw-r--r--dovetail/conf/functest_config.yml2
-rw-r--r--dovetail/conf/yardstick_config.yml7
-rw-r--r--dovetail/container.py147
-rw-r--r--dovetail/parser.py9
-rw-r--r--dovetail/report.py144
-rwxr-xr-xdovetail/run.py95
-rw-r--r--dovetail/test_runner.py46
-rw-r--r--dovetail/testcase.py49
-rw-r--r--dovetail/testcase/defcore.tc001.yml4
-rw-r--r--dovetail/testcase/ipv6.tc001.yml4
-rw-r--r--dovetail/testcase/ipv6.tc002.yml4
-rw-r--r--dovetail/testcase/ipv6.tc003.yml4
-rw-r--r--dovetail/testcase/ipv6.tc004.yml4
-rw-r--r--dovetail/testcase/ipv6.tc005.yml4
-rw-r--r--dovetail/testcase/ipv6.tc006.yml4
-rw-r--r--dovetail/testcase/ipv6.tc007.yml4
-rw-r--r--dovetail/testcase/ipv6.tc008.yml4
-rw-r--r--dovetail/testcase/ipv6.tc009.yml4
-rw-r--r--dovetail/testcase/ipv6.tc010.yml4
-rw-r--r--dovetail/testcase/ipv6.tc011.yml4
-rw-r--r--dovetail/testcase/ipv6.tc012.yml4
-rw-r--r--dovetail/testcase/ipv6.tc013.yml4
-rw-r--r--dovetail/testcase/ipv6.tc014.yml4
-rw-r--r--dovetail/testcase/ipv6.tc015.yml4
-rw-r--r--dovetail/testcase/ipv6.tc016.yml4
-rw-r--r--dovetail/testcase/ipv6.tc017.yml4
-rw-r--r--dovetail/testcase/ipv6.tc018.yml4
-rw-r--r--dovetail/testcase/ipv6.tc019.yml4
-rw-r--r--dovetail/testcase/ipv6.tc020.yml4
-rw-r--r--dovetail/testcase/ipv6.tc021.yml4
-rw-r--r--dovetail/testcase/ipv6.tc022.yml4
-rw-r--r--dovetail/testcase/ipv6.tc023.yml4
-rw-r--r--dovetail/testcase/ipv6.tc024.yml4
-rw-r--r--dovetail/testcase/ipv6.tc025.yml4
-rw-r--r--dovetail/testcase/nfvi.tc101.yml9
-rw-r--r--dovetail/testcase/nfvi.tc102.yml9
-rw-r--r--dovetail/testcase/resiliency.tc001.yml11
-rw-r--r--dovetail/testcase/sdnvpn.tc001.yml2
-rw-r--r--dovetail/testcase/sdnvpn.tc002.yml2
-rw-r--r--dovetail/testcase/sdnvpn.tc003.yml2
-rw-r--r--dovetail/testcase/sdnvpn.tc004.yml2
-rw-r--r--dovetail/testcase/sdnvpn.tc008.yml2
-rw-r--r--dovetail/testcase/tempest.tc001.yml15
-rw-r--r--dovetail/testcase/tempest.tc002.yml20
-rw-r--r--dovetail/testcase/tempest.tc003.yml19
-rw-r--r--dovetail/testcase/tempest.tc004.yml38
-rw-r--r--dovetail/testcase/tempest.tc005.yml15
-rw-r--r--dovetail/testcase/tempest.tc006.yml16
-rw-r--r--dovetail/testcase/vping.tc001.yml (renamed from dovetail/testcase/nfvi.tc002.yml)4
-rw-r--r--dovetail/testcase/vping.tc002.yml (renamed from dovetail/testcase/nfvi.tc001.yml)4
-rw-r--r--dovetail/userconfig/hosts.yaml (renamed from userconfig/hosts.yaml)0
-rw-r--r--dovetail/userconfig/pod.yaml.sample (renamed from userconfig/pod.yaml.sample)0
-rw-r--r--dovetail/userconfig/sdnvpn_config_testcase1.yaml (renamed from userconfig/sdnvpn_config_testcase1.yaml)0
-rw-r--r--dovetail/userconfig/sdnvpn_config_testcase2.yaml (renamed from userconfig/sdnvpn_config_testcase2.yaml)0
-rw-r--r--dovetail/userconfig/sdnvpn_config_testcase3.yaml (renamed from userconfig/sdnvpn_config_testcase3.yaml)0
-rw-r--r--dovetail/userconfig/sdnvpn_config_testcase4.yaml (renamed from userconfig/sdnvpn_config_testcase4.yaml)0
-rw-r--r--dovetail/userconfig/sdnvpn_config_testcase8.yaml (renamed from userconfig/sdnvpn_config_testcase8.yaml)0
-rw-r--r--dovetail/userconfig/tempest_conf.yaml16
-rw-r--r--dovetail/utils/dovetail_config.py5
-rw-r--r--dovetail/utils/dovetail_utils.py82
-rw-r--r--dovetail/utils/local_db/cases.json36
-rw-r--r--dovetail/utils/local_db/get_db_schema.py61
-rw-r--r--dovetail/utils/local_db/init_dovetail.py59
-rwxr-xr-xdovetail/utils/local_db/launch_db.sh52
-rw-r--r--dovetail/utils/local_db/pods.json382
-rw-r--r--dovetail/utils/local_db/projects.json218
-rwxr-xr-xdovetail/utils/local_db/restart_db.sh2
-rw-r--r--dovetail/utils/offline/config.yaml14
-rw-r--r--setup.cfg2
112 files changed, 5731 insertions, 3254 deletions
diff --git a/INFO b/INFO
index ad996d4f..470b7ca8 100644
--- a/INFO
+++ b/INFO
@@ -11,7 +11,6 @@ IRC: Server:freenode.net Channel:#opnfv-meeting
Repository: dovetail
Committers:
-christopher.price@ericsson.com
wenjing.chu@huawei.com
hongbo.tianhongbo@huawei.com
dneary@redhat.com
diff --git a/dashboard/backend/dovetail/__init__.py b/dashboard/backend/dovetail/__init__.py
deleted file mode 100755
index 6dbd8d79..00000000
--- a/dashboard/backend/dovetail/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
diff --git a/dashboard/backend/dovetail/api/__init__.py b/dashboard/backend/dovetail/api/__init__.py
deleted file mode 100755
index f9c4e5a2..00000000
--- a/dashboard/backend/dovetail/api/__init__.py
+++ /dev/null
@@ -1,29 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-import datetime
-import logging
-
-from flask import Flask
-
-from dovetail.utils import util
-
-logging.info('flask app: begin to init')
-
-app = Flask(__name__)
-app.debug = True
-logging.info('flask app config:%s', app.config)
-
-app.config['REMEMBER_COOKIE_DURATION'] = (
- datetime.timedelta(
- seconds=util.parse_time_interval('2h')
- )
-)
-
-logging.info('flask app: finish init')
diff --git a/dashboard/backend/dovetail/api/api.py b/dashboard/backend/dovetail/api/api.py
deleted file mode 100755
index 7839b893..00000000
--- a/dashboard/backend/dovetail/api/api.py
+++ /dev/null
@@ -1,183 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-import logging
-
-from dovetail.api import utils
-from dovetail.api import exception_handler
-from dovetail.db import api as db_api
-
-from flask import Flask
-from flask import request
-
-import json
-
-app = Flask(__name__)
-
-
-@app.after_request
-def after_request(response):
- response.headers.add('Access-Control-Allow-Origin', '*')
- response.headers.add(
- 'Access-Control-Allow-Headers',
- 'Content-Type, Authorization')
- response.headers.add('Aceess-Control-Allow-Methods', 'GET,PUT,DELETE,POST')
- return response
-
-# test
-
-
-@app.route("/test", methods=['GET'])
-def test():
- """backend api test"""
- logging.info('test functest')
- resp = utils.make_json_response(
- 200, {'test': 20}
- )
- return resp
-
-
-# settings
-@app.route("/clear", methods=['POST'])
-def clear_settings():
- """ clear all settings data on backend server """
- logging.info('clear all settings')
-
- return utils.make_json_response(
- 200, {}
- )
-
-
-@app.route("/settings", methods=['GET'])
-def list_settings():
- """list settings"""
- logging.info('list settings')
- global settings
- return utils.make_json_response(200, settings)
-
-
-@app.route("/settings", methods=['POST'])
-def add_settings():
- pass
-
-
-@app.route("/settings", methods=['POST'])
-def remove_settings():
- pass
-
-
-@app.route("/testcases", methods=['GET'])
-def get_testcases():
- pass
-
-
-@app.route("/results/<test_id>", methods=['GET'])
-def show_result(test_id):
- data = _get_request_args()
- return utils.make_json_response(
- 200,
- db_api.get_result(
- test_id, **data
- )
- )
-
-
-@app.route("/results", methods=['GET'])
-def list_results():
- data = _get_request_args()
- return utils.make_json_response(
- 200,
- db_api.list_results(
- **data
- )
- )
-
-
-@app.route("/results", methods=['POST'])
-def add_result():
- data = _get_request_data()
- ret_code = 200
- json_object = json.loads(data)
- logging.debug('json_object:%s', (json_object))
- if not db_api.store_result(**json_object):
- ret_code = 500
- resp = utils.make_json_response(
- ret_code, data
- )
- return resp
-
-
-@app.route("/results/<test_id>", methods=['DELETE'])
-def remove_results(test_id):
- data = _get_request_data()
- logging.debug('data:%s', data)
- response = db_api.del_result(
- test_id, **data
- )
- return utils.make_json_response(
- 200, response
- )
-
-
-def _get_request_data():
- """Convert reqeust data from string to python dict.
-
- If the request data is not json formatted, raises
- exception_handler.BadRequest.
- If the request data is not json formatted dict, raises
- exception_handler.BadRequest
- If the request data is empty, return default as empty dict.
-
- Usage: It is used to add or update a single resource.
- """
- if request.data:
- try:
- data = json.loads(request.data)
- except Exception:
- raise exception_handler.BadRequest(
- 'request data is not json formatted: %s' % request.data
- )
- if not isinstance(data, dict):
- raise exception_handler.BadRequest(
- 'request data is not json formatted dict: %s' % request.data
- )
-
- return request.data
- else:
- return {}
-
-
-def _get_request_args(**kwargs):
- """Get request args as dict.
-
- The value in the dict is converted to expected type.
-
- Args:
- kwargs: for each key, the value is the type converter.
- """
- args = dict(request.args)
- for key, value in args.items():
- if key in kwargs:
- converter = kwargs[key]
- if isinstance(value, list):
- args[key] = [converter(item) for item in value]
- else:
- args[key] = converter(value)
- return args
-
-
-'''
-@app.teardown_appcontext
-def shutdown_session(exception=None):
- db_session.remove()
-'''
-# user login/logout
-
-if __name__ == '__main__':
- app.run(host='127.0.0.1')
diff --git a/dashboard/backend/dovetail/api/exception_handler.py b/dashboard/backend/dovetail/api/exception_handler.py
deleted file mode 100755
index b7ce592a..00000000
--- a/dashboard/backend/dovetail/api/exception_handler.py
+++ /dev/null
@@ -1,93 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-"""Exceptions for RESTful API."""
-import traceback
-
-from dovetail.api import app
-from dovetail.api import utils
-
-
-class HTTPException(Exception):
-
- def __init__(self, message, status_code):
- super(HTTPException, self).__init__(message)
- self.traceback = traceback.format_exc()
- self.status_code = status_code
-
- def to_dict(self):
- return {'message': str(self)}
-
-
-class ItemNotFound(HTTPException):
- """Define the exception for referring non-existing object."""
-
- def __init__(self, message):
- super(ItemNotFound, self).__init__(message, 410)
-
-
-class BadRequest(HTTPException):
- """Define the exception for invalid/missing parameters.
-
- User making a request in invalid state cannot be processed.
- """
-
- def __init__(self, message):
- super(BadRequest, self).__init__(message, 400)
-
-
-class Unauthorized(HTTPException):
- """Define the exception for invalid user login."""
-
- def __init__(self, message):
- super(Unauthorized, self).__init__(message, 401)
-
-
-class UserDisabled(HTTPException):
- """Define the exception for disabled users."""
-
- def __init__(self, message):
- super(UserDisabled, self).__init__(message, 403)
-
-
-class Forbidden(HTTPException):
- """Define the exception for invalid permissions."""
-
- def __init__(self, message):
- super(Forbidden, self).__init__(message, 403)
-
-
-class BadMethod(HTTPException):
- """Define the exception for invoking unsupported methods."""
-
- def __init__(self, message):
- super(BadMethod, self).__init__(message, 405)
-
-
-class ConflictObject(HTTPException):
- """Define the exception for creating an existing object."""
-
- def __init__(self, message):
- super(ConflictObject, self).__init__(message, 409)
-
-
-@app.errorhandler(Exception)
-def handle_exception(error):
- if hasattr(error, 'to_dict'):
- response = error.to_dict()
- else:
- response = {'message': str(error)}
- if app.debug and hasattr(error, 'traceback'):
- response['traceback'] = error.traceback
-
- status_code = 400
- if hasattr(error, 'status_code'):
- status_code = error.status_code
-
- return utils.make_json_response(status_code, response)
diff --git a/dashboard/backend/dovetail/api/utils.py b/dashboard/backend/dovetail/api/utils.py
deleted file mode 100755
index dbe8d082..00000000
--- a/dashboard/backend/dovetail/api/utils.py
+++ /dev/null
@@ -1,20 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-import json
-from flask import make_response
-
-
-def make_json_response(status_code, data):
- """Wrap json format to the reponse object."""
-
- result = json.dumps(data, indent=4, default=lambda x: None) + '\r\n'
- resp = make_response(result, status_code)
- resp.headers['Content-type'] = 'application/json'
- return resp
diff --git a/dashboard/backend/dovetail/db/__init__.py b/dashboard/backend/dovetail/db/__init__.py
deleted file mode 100755
index 6dbd8d79..00000000
--- a/dashboard/backend/dovetail/db/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
diff --git a/dashboard/backend/dovetail/db/api.py b/dashboard/backend/dovetail/db/api.py
deleted file mode 100755
index 631ed2a3..00000000
--- a/dashboard/backend/dovetail/db/api.py
+++ /dev/null
@@ -1,72 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-"""
-Defines interface for DB access.
-"""
-
-import logging
-
-from dovetail.db import database
-from dovetail.db import utils
-from dovetail.db import models
-
-
-@database.run_in_session()
-def store_result(exception_when_existing=True,
- session=None, **kwargs):
- """Storing results into database.
-
- :param data: Dict describes test results.
- """
- logging.debug('store_result:%s', kwargs)
- result = utils.add_db_object(
- session, models.Result, exception_when_existing,
- **kwargs)
-
- return result
-
-
-@database.run_in_session()
-@utils.wrap_to_dict()
-def list_results(session=None, **filters):
- """Get all results
- """
- logging.debug('session:%s', session)
- results = utils.list_db_objects(
- session, models.Result, **filters
- )
- return results
-
-
-@database.run_in_session()
-@utils.wrap_to_dict()
-def get_result(test_id, exception_when_missing=True,
- session=None, **kwargs):
- """Get specific result with the test_id
-
- :param test_id: the unique serial number for the test
- """
- return _get_result(test_id, session,
- exception_when_missing=exception_when_missing, **kwargs)
-
-
-def _get_result(test_id, session=None, **kwargs):
- return utils.get_db_object(
- session, models.Result, test_id=test_id, **kwargs)
-
-
-@database.run_in_session()
-def del_result(test_id, session=None, **kwargs):
- """Delete a results from database
-
- :param test_id: the unique serial number for the test
- """
- return utils.del_db_objects(session, models.Result,
- test_id=test_id, **kwargs)
diff --git a/dashboard/backend/dovetail/db/database.py b/dashboard/backend/dovetail/db/database.py
deleted file mode 100755
index bc09d3bd..00000000
--- a/dashboard/backend/dovetail/db/database.py
+++ /dev/null
@@ -1,182 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-import logging
-import functools
-
-from threading import local
-
-from sqlalchemy import create_engine
-from sqlalchemy.exc import IntegrityError
-from sqlalchemy.exc import OperationalError
-from sqlalchemy.orm import scoped_session
-from sqlalchemy.orm import sessionmaker
-from sqlalchemy.pool import StaticPool
-
-from contextlib import contextmanager
-from dovetail.db import exception
-from dovetail.db import models
-
-ENGINE = None
-SESSION = sessionmaker(autocommit=False, autoflush=False)
-SCOPED_SESSION = None
-SESSION_HOLDER = local()
-
-SQLALCHEMY_DATABASE_URI = "mysql://root:%s@localhost:3306/dovetail" % ('root')
-
-
-def init(database_url=None):
- """Initialize database.
-
- :param database_url: string, database url.
- """
- global ENGINE
- global SCOPED_SESSION
- if not database_url:
- database_url = SQLALCHEMY_DATABASE_URI
- logging.info('init database %s', database_url)
- print("database init %s" % database_url)
- ENGINE = create_engine(
- database_url, convert_unicode=True,
- poolclass=StaticPool
- )
- SESSION.configure(bind=ENGINE)
- SCOPED_SESSION = scoped_session(SESSION)
- models.BASE.query = SCOPED_SESSION.query_property()
-
-
-def in_session():
- """check if in database session scope."""
- bool(hasattr(SESSION_HOLDER, 'session'))
-
-
-@contextmanager
-def session(exception_when_in_session=True):
- """database session scope.
-
- To operate database, it should be called in database session.
- If not exception_when_in_session, the with session statement support
- nested session and only the out most session commit/rollback the
- transaction.
- """
- if not ENGINE:
- init()
-
- nested_session = False
- if hasattr(SESSION_HOLDER, 'session'):
- if exception_when_in_session:
- logging.error('we are already in session')
- raise exception.DatabaseException('session already exist')
- else:
- new_session = SESSION_HOLDER.session
- nested_session = True
- logging.log(
- logging.DEBUG,
- 'reuse session %s', nested_session
- )
- else:
- new_session = SCOPED_SESSION()
- setattr(SESSION_HOLDER, 'session', new_session)
- logging.log(
- logging.DEBUG,
- 'enter session %s', new_session
- )
- try:
- yield new_session
- if not nested_session:
- new_session.commit()
- except Exception as error:
- if not nested_session:
- new_session.rollback()
- logging.error('failed to commit session')
- logging.exception(error)
- if isinstance(error, IntegrityError):
- for item in error.statement.split():
- if item.islower():
- object = item
- break
- raise exception.DuplicatedRecord(
- '%s in %s' % (error.orig, object)
- )
- elif isinstance(error, OperationalError):
- raise exception.DatabaseException(
- 'operation error in database'
- )
- elif isinstance(error, exception.DatabaseException):
- raise error
- else:
- raise exception.DatabaseException(str(error))
- finally:
- if not nested_session:
- new_session.close()
- SCOPED_SESSION.remove()
- delattr(SESSION_HOLDER, 'session')
- logging.log(
- logging.DEBUG,
- 'exit session %s', new_session
- )
-
-
-def current_session():
- """Get the current session scope when it is called.
-
- :return: database session.
- :raises: DatabaseException when it is not in session.
- """
- try:
- return SESSION_HOLDER.session
- except Exception as error:
- logging.error('It is not in the session scope')
- logging.exception(error)
- if isinstance(error, exception.DatabaseException):
- raise error
- else:
- raise exception.DatabaseException(str(error))
-
-
-def run_in_session(exception_when_in_session=True):
- """Decorator to make sure the decorated function run in session.
-
- When not exception_when_in_session, the run_in_session can be
- decorated several times.
- """
- def decorator(func):
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- try:
- my_session = kwargs.get('session')
- if my_session is not None:
- return func(*args, **kwargs)
- else:
- with session(
- exception_when_in_session=exception_when_in_session
- ) as my_session:
- kwargs['session'] = my_session
- return func(*args, **kwargs)
- except Exception as error:
- logging.error(
- 'got exception with func %s args %s kwargs %s',
- func, args, kwargs
- )
- logging.exception(error)
- raise error
- return wrapper
- return decorator
-
-
-@run_in_session()
-def create_db(session=None):
- """Create database."""
- models.BASE.metadata.create_all(bind=ENGINE)
- print('create_db')
-
-
-def drop_db():
- """Drop database."""
- models.BASE.metadata.drop_all(bind=ENGINE)
diff --git a/dashboard/backend/dovetail/db/exception.py b/dashboard/backend/dovetail/db/exception.py
deleted file mode 100755
index 4acc5fbd..00000000
--- a/dashboard/backend/dovetail/db/exception.py
+++ /dev/null
@@ -1,121 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-"""Custom exception"""
-import traceback
-
-
-class DatabaseException(Exception):
- """Base class for all database exceptions."""
-
- def __init__(self, message):
- super(DatabaseException, self).__init__(message)
- self.traceback = traceback.format_exc()
- self.status_code = 400
-
- def to_dict(self):
- return {'message': str(self)}
-
-
-class RecordNotExists(DatabaseException):
- """Define the exception for referring non-existing object in DB."""
-
- def __init__(self, message):
- super(RecordNotExists, self).__init__(message)
- self.status_code = 404
-
-
-class DuplicatedRecord(DatabaseException):
- """Define the exception for trying to insert an existing object in DB."""
-
- def __init__(self, message):
- super(DuplicatedRecord, self).__init__(message)
- self.status_code = 409
-
-
-class Unauthorized(DatabaseException):
- """Define the exception for invalid user login."""
-
- def __init__(self, message):
- super(Unauthorized, self).__init__(message)
- self.status_code = 401
-
-
-class UserDisabled(DatabaseException):
- """Define the exception that a disabled user tries to do some operations.
-
- """
-
- def __init__(self, message):
- super(UserDisabled, self).__init__(message)
- self.status_code = 403
-
-
-class Forbidden(DatabaseException):
- """Define the exception that a user is trying to make some action
-
- without the right permission.
-
- """
-
- def __init__(self, message):
- super(Forbidden, self).__init__(message)
- self.status_code = 403
-
-
-class NotAcceptable(DatabaseException):
- """The data is not acceptable."""
-
- def __init__(self, message):
- super(NotAcceptable, self).__init__(message)
- self.status_code = 406
-
-
-class InvalidParameter(DatabaseException):
- """Define the exception that the request has invalid or missing parameters.
-
- """
-
- def __init__(self, message):
- super(InvalidParameter, self).__init__(message)
- self.status_code = 400
-
-
-class InvalidResponse(DatabaseException):
- """Define the exception that the response is invalid.
-
- """
-
- def __init__(self, message):
- super(InvalidResponse, self).__init__(message)
- self.status_code = 400
-
-
-class MultiDatabaseException(DatabaseException):
- """Define the exception composites with multi exceptions."""
-
- def __init__(self, exceptions):
- super(MultiDatabaseException, self).__init__('multi exceptions')
- self.exceptions = exceptions
- self.status_code = 400
-
- @property
- def traceback(self):
- tracebacks = []
- for exception in self.exceptions:
- tracebacks.append(exception.trackback)
-
- def to_dict(self):
- dict_info = super(MultiDatabaseException, self).to_dict()
- dict_info.update({
- 'exceptions': [
- exception.to_dict() for exception in self.exceptions
- ]
- })
- return dict_info
diff --git a/dashboard/backend/dovetail/db/models.py b/dashboard/backend/dovetail/db/models.py
deleted file mode 100755
index e0f3ffa3..00000000
--- a/dashboard/backend/dovetail/db/models.py
+++ /dev/null
@@ -1,105 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-import datetime
-
-from sqlalchemy import Column, Integer, String, DateTime
-from sqlalchemy.ext.declarative import declarative_base
-
-from dovetail.utils import util
-from dovetail.db import exception
-
-BASE = declarative_base()
-
-
-class MarkTimestamp(object):
- created = Column(DateTime, default=lambda: datetime.datetime.now())
- updated = Column(DateTime, default=lambda: datetime.datetime.now(),
- onupdate=lambda: datetime.datetime.now())
-
-
-class ModelHandler(object):
-
- def initialize(self):
- self.update()
-
- def update(self):
- pass
-
- @staticmethod
- def type_check(value, column_type):
- if value is None:
- return True
- if not hasattr(column_type, 'python_type'):
- return True
- column_python_type = column_type.python_type
- if isinstance(value, column_python_type):
- return True
- if issubclass(column_python_type, basestring):
- return isinstance(value, basestring)
- if column_python_type in [int, long]:
- return type(value) in [int, long]
- if column_python_type in [float]:
- return type(value) in [float]
- if column_python_type in [bool]:
- return type(value) in [bool]
- return False
-
- def validate(self):
- columns = self.__mapper__.columns
- for key, column in columns.items():
- value = getattr(self, key)
- if not self.type_check(value, column.type):
- raise exception.InvalidParameter(
- 'column %s value %r type is unexpected: %s' % (
- key, value, column.type
- )
- )
-
- def to_dict(self):
- """General function to convert record to dict.
-
- Convert all columns not starting with '_' to
- {<column_name>: <column_value>}
- """
- keys = self.__mapper__.columns.keys()
- dict_info = {}
- for key in keys:
- if key.startswith('_'):
- continue
- value = getattr(self, key)
- if value is not None:
- if isinstance(value, datetime.datetime):
- value = util.format_datetime(value)
- dict_info[key] = value
- return dict_info
-
-
-class Result(BASE, MarkTimestamp, ModelHandler):
- __tablename__ = 'result'
- id = Column(Integer, primary_key=True)
- test_id = Column(String(120), unique=True)
- name = Column(String(120))
- data = Column(String(64000))
-
- def __init__(self, **kwargs):
- super(Result, self).__init__(**kwargs)
-
- def __repr__(self):
- return '<Result %r>' % (self.name)
-
- def __str__(self):
- return 'Result[%s:%s]' % (self.name, self.test_id)
-
- def to_dict(self):
- dict_info = super(Result, self).to_dict()
- dict_info['name'] = self.name
- dict_info['test_id'] = self.test_id
- dict_info['data'] = self.data
- return dict_info
diff --git a/dashboard/backend/dovetail/db/utils.py b/dashboard/backend/dovetail/db/utils.py
deleted file mode 100755
index 4bb0026d..00000000
--- a/dashboard/backend/dovetail/db/utils.py
+++ /dev/null
@@ -1,478 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-"""Utilities for database."""
-
-
-import functools
-import inspect
-import logging
-
-from sqlalchemy import and_
-from sqlalchemy import or_
-
-from dovetail.db import exception
-from dovetail.db import models
-
-
-def add_db_object(session, table, exception_when_existing=True,
- *args, **kwargs):
- """Create db object.
-
- If not exception_when_existing and the db object exists,
- Instead of raising exception, updating the existing db object.
- """
- if not session:
- raise exception.DatabaseException('session param is None')
- with session.begin(subtransactions=True):
- logging.debug(
- 'session %s add object %s atributes %s to table %s',
- id(session), args, kwargs, table.__name__)
- argspec = inspect.getargspec(table.__init__)
- arg_names = argspec.args[1:]
- arg_defaults = argspec.defaults
- if not arg_defaults:
- arg_defaults = []
- if not (
- len(arg_names) - len(arg_defaults) <= len(args) <= len(arg_names)
- ):
- raise exception.InvalidParameter(
- 'arg names %s does not match arg values %s' % (
- arg_names, args)
- )
- db_keys = dict(zip(arg_names, args))
- logging.debug('db_keys:%s', db_keys)
- if db_keys:
- db_object = session.query(table).filter_by(**db_keys).first()
- else:
- logging.debug('db object is None')
- db_object = None
-
- new_object = False
- if db_object:
- logging.debug(
- 'got db object %s: %s', db_keys, db_object
- )
- if exception_when_existing:
- raise exception.DuplicatedRecord(
- '%s exists in table %s' % (db_keys, table.__name__)
- )
- else:
- db_object = table(**db_keys)
- new_object = True
-
- for key, value in kwargs.items():
- setattr(db_object, key, value)
-
- logging.debug('db_object:%s', db_object)
- if new_object:
- session.add(db_object)
- session.flush()
- db_object.initialize()
- db_object.validate()
- logging.debug(
- 'session %s db object %s added', id(session), db_object
- )
- return db_object
-
-
-def list_db_objects(session, table, order_by=[], **filters):
- """List db objects.
-
- If order by given, the db objects should be sorted by the ordered keys.
- """
- if not session:
- raise exception.DatabaseException('session param is None')
- with session.begin(subtransactions=True):
- logging.debug(
- 'session %s list db objects by filters %s in table %s',
- id(session), filters, table.__name__
- )
- db_objects = model_order_by(
- model_filter(
- model_query(session, table),
- table,
- **filters
- ),
- table,
- order_by
- ).all()
- logging.debug(
- 'session %s got listed db objects: %s',
- id(session), db_objects
- )
- return db_objects
-
-
-def get_db_object(session, table, exception_when_missing=True, **kwargs):
- """Get db object.
-
- If not exception_when_missing and the db object can not be found,
- return None instead of raising exception.
- """
- if not session:
- raise exception.DatabaseException('session param is None')
- with session.begin(subtransactions=True):
- logging.debug(
- 'session %s get db object %s from table %s',
- id(session), kwargs, table.__name__)
- db_object = model_filter(
- model_query(session, table), table, **kwargs
- ).first()
- logging.debug(
- 'session %s got db object %s', id(session), db_object
- )
- if db_object:
- return db_object
-
- if not exception_when_missing:
- return None
-
- raise exception.RecordNotExists(
- 'Cannot find the record in table %s: %s' % (
- table.__name__, kwargs
- )
- )
-
-
-def del_db_objects(session, table, **filters):
- """delete db objects."""
- if not session:
- raise exception.DatabaseException('session param is None')
- with session.begin(subtransactions=True):
- logging.debug(
- 'session %s delete db objects by filters %s in table %s',
- id(session), filters, table.__name__
- )
- query = model_filter(
- model_query(session, table), table, **filters
- )
- db_objects = query.all()
- query.delete(synchronize_session=False)
- logging.debug(
- 'session %s db objects %s deleted', id(session), db_objects
- )
- return db_objects
-
-
-def model_order_by(query, model, order_by):
- """append order by into sql query model."""
- if not order_by:
- return query
- order_by_cols = []
- for key in order_by:
- if isinstance(key, tuple):
- key, is_desc = key
- else:
- is_desc = False
- if isinstance(key, basestring):
- if hasattr(model, key):
- col_attr = getattr(model, key)
- else:
- continue
- else:
- col_attr = key
- if is_desc:
- order_by_cols.append(col_attr.desc())
- else:
- order_by_cols.append(col_attr)
- return query.order_by(*order_by_cols)
-
-
-def _model_condition(col_attr, value):
- """Generate condition for one column.
-
- Example for col_attr is name:
- value is 'a': name == 'a'
- value is ['a']: name == 'a'
- value is ['a', 'b']: name == 'a' or name == 'b'
- value is {'eq': 'a'}: name == 'a'
- value is {'lt': 'a'}: name < 'a'
- value is {'le': 'a'}: name <= 'a'
- value is {'gt': 'a'}: name > 'a'
- value is {'ge': 'a'}: name >= 'a'
- value is {'ne': 'a'}: name != 'a'
- value is {'in': ['a', 'b']}: name in ['a', 'b']
- value is {'notin': ['a', 'b']}: name not in ['a', 'b']
- value is {'startswith': 'abc'}: name like 'abc%'
- value is {'endswith': 'abc'}: name like '%abc'
- value is {'like': 'abc'}: name like '%abc%'
- value is {'between': ('a', 'c')}: name >= 'a' and name <= 'c'
- value is [{'lt': 'a'}]: name < 'a'
- value is [{'lt': 'a'}, {'gt': c'}]: name < 'a' or name > 'c'
- value is {'lt': 'c', 'gt': 'a'}: name > 'a' and name < 'c'
-
- If value is a list, the condition is the or relationship among
- conditions of each item.
- If value is dict and there are multi keys in the dict, the relationship
- is and conditions of each key.
- Otherwise the condition is to compare the column with the value.
- """
- if isinstance(value, list):
- basetype_values = []
- composite_values = []
- for item in value:
- if isinstance(item, (list, dict)):
- composite_values.append(item)
- else:
- basetype_values.append(item)
- conditions = []
- if basetype_values:
- if len(basetype_values) == 1:
- condition = (col_attr == basetype_values[0])
- else:
- condition = col_attr.in_(basetype_values)
- conditions.append(condition)
- for composite_value in composite_values:
- condition = _model_condition(col_attr, composite_value)
- if condition is not None:
- conditions.append(condition)
- if not conditions:
- return None
- if len(conditions) == 1:
- return conditions[0]
- return or_(*conditions)
- elif isinstance(value, dict):
- conditions = []
- if 'eq' in value:
- conditions.append(_model_condition_func(
- col_attr, value['eq'],
- lambda attr, data: attr == data,
- lambda attr, data, item_condition_func: attr.in_(data)
- ))
- if 'lt' in value:
- conditions.append(_model_condition_func(
- col_attr, value['lt'],
- lambda attr, data: attr < data,
- _one_item_list_condition_func
- ))
- if 'gt' in value:
- conditions.append(_model_condition_func(
- col_attr, value['gt'],
- lambda attr, data: attr > data,
- _one_item_list_condition_func
- ))
- if 'le' in value:
- conditions.append(_model_condition_func(
- col_attr, value['le'],
- lambda attr, data: attr <= data,
- _one_item_list_condition_func
- ))
- if 'ge' in value:
- conditions.append(_model_condition_func(
- col_attr, value['ge'],
- lambda attr, data: attr >= data,
- _one_item_list_condition_func
- ))
- if 'ne' in value:
- conditions.append(_model_condition_func(
- col_attr, value['ne'],
- lambda attr, data: attr != data,
- lambda attr, data, item_condition_func: attr.notin_(data)
- ))
- if 'in' in value:
- conditions.append(col_attr.in_(value['in']))
- if 'notin' in value:
- conditions.append(col_attr.notin_(value['notin']))
- if 'startswith' in value:
- conditions.append(_model_condition_func(
- col_attr, value['startswith'],
- lambda attr, data: attr.like('%s%%' % data)
- ))
- if 'endswith' in value:
- conditions.append(_model_condition_func(
- col_attr, value['endswith'],
- lambda attr, data: attr.like('%%%s' % data)
- ))
- if 'like' in value:
- conditions.append(_model_condition_func(
- col_attr, value['like'],
- lambda attr, data: attr.like('%%%s%%' % data)
- ))
- conditions = [
- condition
- for condition in conditions
- if condition is not None
- ]
- if not conditions:
- return None
- if len(conditions) == 1:
- return conditions[0]
- return and_(conditions)
- else:
- condition = (col_attr == value)
- return condition
-
-
-def _default_list_condition_func(col_attr, value, condition_func):
- """The default condition func for a list of data.
-
- Given the condition func for single item of data, this function
- wrap the condition_func and return another condition func using
- or_ to merge the conditions of each single item to deal with a
- list of data item.
-
- Args:
- col_attr: the colomn name
- value: the column value need to be compared.
- condition_func: the sqlalchemy condition object like ==
-
- Examples:
- col_attr is name, value is ['a', 'b', 'c'] and
- condition_func is ==, the returned condition is
- name == 'a' or name == 'b' or name == 'c'
- """
- conditions = []
- for sub_value in value:
- condition = condition_func(col_attr, sub_value)
- if condition is not None:
- conditions.append(condition)
- if conditions:
- return or_(*conditions)
- else:
- return None
-
-
-def _one_item_list_condition_func(col_attr, value, condition_func):
- """The wrapper condition func to deal with one item data list.
-
- For simplification, it is used to reduce generating too complex
- sql conditions.
- """
- if value:
- return condition_func(col_attr, value[0])
- else:
- return None
-
-
-def _model_condition_func(
- col_attr, value,
- item_condition_func,
- list_condition_func=_default_list_condition_func
-):
- """Return sql condition based on value type."""
- if isinstance(value, list):
- if not value:
- return None
- if len(value) == 1:
- return item_condition_func(col_attr, value)
- return list_condition_func(
- col_attr, value, item_condition_func
- )
- else:
- return item_condition_func(col_attr, value)
-
-
-def model_filter(query, model, **filters):
- """Append conditons to query for each possible column."""
- for key, value in filters.items():
- if isinstance(key, basestring):
- if hasattr(model, key):
- col_attr = getattr(model, key)
- else:
- continue
- else:
- col_attr = key
-
- condition = _model_condition(col_attr, value)
- if condition is not None:
- query = query.filter(condition)
- return query
-
-
-def model_query(session, model):
- """model query.
-
- Return sqlalchemy query object.
- """
- if not issubclass(model, models.BASE):
- raise exception.DatabaseException("model should be sublass of BASE!")
-
- return session.query(model)
-
-
-def wrap_to_dict(support_keys=[], **filters):
- """Decrator to convert returned object to dict.
-
- The details is decribed in _wrapper_dict.
- """
- def decorator(func):
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- return _wrapper_dict(
- func(*args, **kwargs), support_keys, **filters
- )
- return wrapper
- return decorator
-
-
-def _wrapper_dict(data, support_keys, **filters):
- """Helper for warpping db object into dictionary.
-
- If data is list, convert it to a list of dict
- If data is Base model, convert it to dict
- for the data as a dict, filter it with the supported keys.
- For each filter_key, filter_value in filters, also filter
- data[filter_key] by filter_value recursively if it exists.
-
- Example:
- data is models.Switch, it will be converted to
- {
- 'id': 1, 'ip': '10.0.0.1', 'ip_int': 123456,
- 'credentials': {'version': 2, 'password': 'abc'}
- }
- Then if support_keys are ['id', 'ip', 'credentials'],
- it will be filtered to {
- 'id': 1, 'ip': '10.0.0.1',
- 'credentials': {'version': 2, 'password': 'abc'}
- }
- Then if filters is {'credentials': ['version']},
- it will be filtered to {
- 'id': 1, 'ip': '10.0.0.1',
- 'credentials': {'version': 2}
- }
- """
- logging.debug(
- 'wrap dict %s by support_keys=%s filters=%s',
- data, support_keys, filters
- )
- if isinstance(data, list):
- return [
- _wrapper_dict(item, support_keys, **filters)
- for item in data
- ]
- if isinstance(data, models.ModelHandler):
- data = data.to_dict()
- if not isinstance(data, dict):
- raise exception.InvalidResponse(
- 'response %s type is not dict' % data
- )
- info = {}
- try:
- if len(support_keys) == 0:
- support_keys = data.keys()
- for key in support_keys:
- if key in data and data[key] is not None:
- if key in filters:
- filter_keys = filters[key]
- if isinstance(filter_keys, dict):
- info[key] = _wrapper_dict(
- data[key], filter_keys.keys(),
- **filter_keys
- )
- else:
- info[key] = _wrapper_dict(
- data[key], filter_keys
- )
- else:
- info[key] = data[key]
- return info
- except Exception as error:
- logging.exception(error)
- raise error
diff --git a/dashboard/backend/dovetail/utils/__init__.py b/dashboard/backend/dovetail/utils/__init__.py
deleted file mode 100755
index 6dbd8d79..00000000
--- a/dashboard/backend/dovetail/utils/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
diff --git a/dashboard/backend/dovetail/utils/flags.py b/dashboard/backend/dovetail/utils/flags.py
deleted file mode 100755
index dd10670b..00000000
--- a/dashboard/backend/dovetail/utils/flags.py
+++ /dev/null
@@ -1,82 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-import sys
-
-from optparse import OptionParser
-
-
-class Flags(object):
- """Class to store flags."""
-
- PARSER = OptionParser()
- PARSED_OPTIONS = None
-
- @classmethod
- def parse_args(cls):
- """parse args."""
- (options, argv) = Flags.PARSER.parse_args()
- sys.argv = [sys.argv[0]] + argv
- Flags.PARSED_OPTIONS = options
-
- def __getattr__(self, name):
- if Flags.PARSED_OPTIONS and hasattr(Flags.PARSED_OPTIONS, name):
- return getattr(Flags.PARSED_OPTIONS, name)
-
- for option in Flags.PARSER.option_list:
- if option.dest == name:
- return option.default
-
- raise AttributeError('Option instance has no attribute %s' % name)
-
- def __setattr__(self, name, value):
- if Flags.PARSED_OPTIONS and hasattr(Flags.PARSED_OPTIONS, name):
- setattr(Flags.PARSED_OPTIONS, name, value)
- return
-
- for option in Flags.PARSER.option_list:
- if option.dest == name:
- option.default = value
- return
-
- object.__setattr__(self, name, value)
-
-
-OPTIONS = Flags()
-
-
-def init():
- """Init flag parsing."""
- OPTIONS.parse_args()
-
-
-def add(flagname, **kwargs):
- """Add a flag name and its setting.
-
- :param flagname: flag name declared in cmd as --<flagname>=...
- :type flagname: str
- """
- Flags.PARSER.add_option('--%s' % flagname,
- dest=flagname, **kwargs)
-
-
-def add_bool(flagname, default=True, **kwargs):
- """Add a bool flag name and its setting.
-
- :param flagname: flag name declared in cmd as --[no]<flagname>.
- :type flagname: str
- :param default: default value
- :type default: bool
- """
- Flags.PARSER.add_option('--%s' % flagname,
- dest=flagname, default=default,
- action="store_true", **kwargs)
- Flags.PARSER.add_option('--no%s' % flagname,
- dest=flagname,
- action="store_false", **kwargs)
diff --git a/dashboard/backend/dovetail/utils/logsetting.py b/dashboard/backend/dovetail/utils/logsetting.py
deleted file mode 100755
index 27255688..00000000
--- a/dashboard/backend/dovetail/utils/logsetting.py
+++ /dev/null
@@ -1,98 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-import logging
-import logging.handlers
-import os
-import os.path
-import sys
-
-from dovetail.utils import flags
-from dovetail.utils import setting_wrapper as setting
-
-
-flags.add('loglevel',
- help='logging level', default=setting.DEFAULT_LOGLEVEL)
-flags.add('logdir',
- help='logging directory', default=setting.DEFAULT_LOGDIR)
-flags.add('logfile',
- help='logging filename', default=None)
-flags.add('log_interval', type='int',
- help='log interval', default=setting.DEFAULT_LOGINTERVAL)
-flags.add('log_interval_unit',
- help='log interval unit', default=setting.DEFAULT_LOGINTERVAL_UNIT)
-flags.add('log_format',
- help='log format', default=setting.DEFAULT_LOGFORMAT)
-flags.add('log_backup_count', type='int',
- help='log backup count', default=setting.DEFAULT_LOGBACKUPCOUNT)
-
-
-# mapping str setting in flag --loglevel to logging level.
-LOGLEVEL_MAPPING = {
- 'finest': logging.DEBUG - 2, # more detailed log.
- 'fine': logging.DEBUG - 1, # detailed log.
- 'debug': logging.DEBUG,
- 'info': logging.INFO,
- 'warning': logging.WARNING,
- 'error': logging.ERROR,
- 'critical': logging.CRITICAL,
-}
-
-
-logging.addLevelName(LOGLEVEL_MAPPING['fine'], 'fine')
-logging.addLevelName(LOGLEVEL_MAPPING['finest'], 'finest')
-
-
-# disable logging when logsetting.init not called
-logging.getLogger().setLevel(logging.CRITICAL)
-
-
-def getLevelByName(level_name):
- """Get log level by level name."""
- return LOGLEVEL_MAPPING[level_name]
-
-
-def init():
- """Init loggsetting. It should be called after flags.init."""
- loglevel = flags.OPTIONS.loglevel.lower()
- logdir = flags.OPTIONS.logdir
- logfile = flags.OPTIONS.logfile
- logger = logging.getLogger()
- if logger.handlers:
- for handler in logger.handlers:
- logger.removeHandler(handler)
-
- if logdir:
- if not logfile:
- logfile = './%s.log' % os.path.basename(sys.argv[0])
-
- handler = logging.handlers.TimedRotatingFileHandler(
- os.path.join(logdir, logfile),
- when=flags.OPTIONS.log_interval_unit,
- interval=flags.OPTIONS.log_interval,
- backupCount=flags.OPTIONS.log_backup_count)
- else:
- if not logfile:
- handler = logging.StreamHandler(sys.stderr)
- else:
- handler = logging.handlers.TimedRotatingFileHandler(
- logfile,
- when=flags.OPTIONS.log_interval_unit,
- interval=flags.OPTIONS.log_interval,
- backupCount=flags.OPTIONS.log_backup_count)
-
- if loglevel in LOGLEVEL_MAPPING:
- logger.setLevel(LOGLEVEL_MAPPING[loglevel])
- handler.setLevel(LOGLEVEL_MAPPING[loglevel])
-
- formatter = logging.Formatter(
- flags.OPTIONS.log_format)
-
- handler.setFormatter(formatter)
- logger.addHandler(handler)
diff --git a/dashboard/backend/dovetail/utils/setting_wrapper.py b/dashboard/backend/dovetail/utils/setting_wrapper.py
deleted file mode 100755
index bb390ada..00000000
--- a/dashboard/backend/dovetail/utils/setting_wrapper.py
+++ /dev/null
@@ -1,18 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-
-DEFAULT_LOGLEVEL = 'debug'
-DEFAULT_LOGDIR = '/var/log/dovetail/'
-DEFAULT_LOGINTERVAL = 30
-DEFAULT_LOGINTERVAL_UNIT = 'M'
-DEFAULT_LOGFORMAT = (
- '%(asctime)s - %(filename)s - %(lineno)d - %(levelname)s - %(message)s')
-DEFAULT_LOGBACKUPCOUNT = 10
-WEB_LOGFILE = 'dovetail_web.log'
diff --git a/dashboard/backend/dovetail/utils/util.py b/dashboard/backend/dovetail/utils/util.py
deleted file mode 100755
index bfd257d7..00000000
--- a/dashboard/backend/dovetail/utils/util.py
+++ /dev/null
@@ -1,71 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-
-import datetime
-import re
-import sys
-
-
-def format_datetime(date_time):
- """Generate string from datetime object."""
- return date_time.strftime("%Y-%m-%d %H:%M:%S")
-
-
-def parse_time_interval(time_interval_str):
- """parse string of time interval to time interval.
-
- supported time interval unit: ['d', 'w', 'h', 'm', 's']
- Examples:
- time_interval_str: '3d 2h' time interval to 3 days and 2 hours.
- """
- if not time_interval_str:
- return 0
-
- time_interval_tuple = [
- time_interval_element
- for time_interval_element in time_interval_str.split(' ')
- if time_interval_element
- ]
- time_interval_dict = {}
- time_interval_unit_mapping = {
- 'd': 'days',
- 'w': 'weeks',
- 'h': 'hours',
- 'm': 'minutes',
- 's': 'seconds'
- }
- for time_interval_element in time_interval_tuple:
- mat = re.match(r'^([+-]?\d+)(w|d|h|m|s).*', time_interval_element)
- if not mat:
- continue
-
- time_interval_value = int(mat.group(1))
- time_interval_unit = time_interval_unit_mapping[mat.group(2)]
- time_interval_dict[time_interval_unit] = (
- time_interval_dict.get(time_interval_unit, 0) + time_interval_value
- )
-
- time_interval = datetime.timedelta(**time_interval_dict)
- if sys.version_info[0:2] > (2, 6):
- return time_interval.total_seconds()
- else:
- return (
- time_interval.microseconds + (
- time_interval.seconds + time_interval.days * 24 * 3600
- ) * 1e6
- ) / 1e6
-
-
-def pretty_print(*contents):
- """pretty print contents."""
- if len(contents) == 0:
- print ""
- else:
- print "\n".join(content for content in contents)
diff --git a/dashboard/backend/install_db.py b/dashboard/backend/install_db.py
deleted file mode 100755
index d37a4099..00000000
--- a/dashboard/backend/install_db.py
+++ /dev/null
@@ -1,55 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-# create db in new env
-from dovetail.utils import flags
-from dovetail.utils import logsetting
-from dovetail.utils import setting_wrapper as setting
-
-from flask_script import Manager
-
-from dovetail.db import database
-from dovetail.api.api import app
-
-import os
-
-app_manager = Manager(app, usage="Perform database operations")
-
-# flags.init()
-curr_path = os.path.dirname(os.path.abspath(__file__))
-logdir = os.path.join(curr_path, 'log')
-if not os.path.exists(logdir):
- os.makedirs(logdir)
-
-flags.OPTIONS.logdir = logdir
-flags.OPTIONS.logfile = setting.WEB_LOGFILE
-logsetting.init()
-
-
-@app_manager.command
-def createdb():
- """Creates database from sqlalchemy models."""
- database.init()
- try:
- database.drop_db()
- except Exception:
- pass
-
- database.create_db()
-
-
-@app_manager.command
-def dropdb():
- """Drops database from sqlalchemy models."""
- database.init()
- database.drop_db()
-
-
-if __name__ == "__main__":
- app_manager.run()
diff --git a/dashboard/backend/wsgi.py b/dashboard/backend/wsgi.py
deleted file mode 100755
index 088299d7..00000000
--- a/dashboard/backend/wsgi.py
+++ /dev/null
@@ -1,35 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-from dovetail.utils import flags
-from dovetail.utils import logsetting
-from dovetail.utils import setting_wrapper as setting
-
-from dovetail.api.api import app
-
-import os
-import logging
-
-gunicorn_error_logger = logging.getLogger('gunicorn.error')
-app.logger.handlers.extend(gunicorn_error_logger.handlers)
-app.logger.setLevel(logging.DEBUG)
-
-# flags.init()
-# logdir = setting.DEFAULT_LOGDIR
-curr_path = os.path.dirname(os.path.abspath(__file__))
-logdir = os.path.join(curr_path, 'log')
-if not os.path.exists(logdir):
- os.makedirs(logdir)
-
-flags.OPTIONS.logdir = logdir
-flags.OPTIONS.logfile = setting.WEB_LOGFILE
-logsetting.init()
-
-
-if __name__ == "__main__":
- app.run()
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 0401af71..c4b11213 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -2,12 +2,17 @@ FROM ubuntu:14.04
MAINTAINER Leo Wang <grakiss.wanglei@huawei.com>
LABEL version="0.1" description="OPNFV Dovetail Docker Container"
+ARG BRANCH=master
+
RUN \
apt-get update \
&& \
apt-get install -y \
+ build-essential \
gcc \
git \
+ libssl-dev \
+ libffi-dev \
vim \
python-dev \
python-mock \
@@ -18,24 +23,26 @@ RUN \
&& \
apt-get update
-RUN wget -qO- https://get.docker.com/ | sh
+RUN wget -qO- https://get.docker.com/ \
+| \
+ sed 's/-q docker-ce/-q docker-ce=17.03.0~ce-0~ubuntu-trusty/' \
+| \
+ sed 's/edge/stable/' \
+| \
+ sh
ENV HOME /home/opnfv
ENV REPOS_DIR ${HOME}/dovetail
WORKDIR /home/opnfv
RUN \
- git config --global http.sslVerify false \
-&& \
- git clone https://git.opnfv.org/dovetail ${REPOS_DIR} \
+ mkdir -p ${REPOS_DIR} \
&& \
- pip install -U pip \
-&& \
- pip install -r ${REPOS_DIR}/requirements.txt \
+ git config --global http.sslVerify false \
&& \
- cd ${REPOS_DIR} \
+ pip install git+https://git.opnfv.org/dovetail@$BRANCH#egg=dovetail \
&& \
- pip install -e .
+ ln -s /usr/local/lib/python2.7/dist-packages/dovetail ${REPOS_DIR}/dovetail
WORKDIR ${REPOS_DIR}/dovetail
diff --git a/docs/images/dovetail_offline_mode.png b/docs/images/dovetail_offline_mode.png
new file mode 100644
index 00000000..520108cc
--- /dev/null
+++ b/docs/images/dovetail_offline_mode.png
Binary files differ
diff --git a/docs/images/dovetail_online_mode.png b/docs/images/dovetail_online_mode.png
new file mode 100644
index 00000000..e150d9ff
--- /dev/null
+++ b/docs/images/dovetail_online_mode.png
Binary files differ
diff --git a/docs/testing/developer/testscope/index.rst b/docs/testing/developer/testscope/index.rst
index ffa91fd1..09901333 100644
--- a/docs/testing/developer/testscope/index.rst
+++ b/docs/testing/developer/testscope/index.rst
@@ -1,13 +1,13 @@
.. This work is lit_snapshots_list_details_with_paramsensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
-.. (c) Ericsson AB
+.. (c) OPNFV
=======================================================
Compliance and Verification program accepted test cases
=======================================================
-.. toctree::
- :maxdepth: 2
+ .. toctree::
+ :maxdepth: 2
Mandatory CVP Test Areas
@@ -19,105 +19,112 @@ Test Area VIM Operations - Compute
Image operations within the Compute API
---------------------------------------
-tempest.api.compute.images.test_images_oneserver.ImagesOneServerTestJSON.test_create_delete_image
-tempest.api.compute.images.test_images_oneserver.ImagesOneServerTestJSON.test_create_image_specify_multibyte_character_image_name
+
+| tempest.api.compute.images.test_images_oneserver.ImagesOneServerTestJSON.test_create_delete_image
+| tempest.api.compute.images.test_images_oneserver.ImagesOneServerTestJSON.test_create_image_specify_multibyte_character_image_name
Basic support Compute API for server actions such as reboot, rebuild, resize
----------------------------------------------------------------------------
-tempest.api.compute.servers.test_instance_actions.InstanceActionsTestJSON.test_get_instance_action
-tempest.api.compute.servers.test_instance_actions.InstanceActionsTestJSON.test_list_instance_actions
+
+| tempest.api.compute.servers.test_instance_actions.InstanceActionsTestJSON.test_get_instance_action
+| tempest.api.compute.servers.test_instance_actions.InstanceActionsTestJSON.test_list_instance_actions
Generate, import, and delete SSH keys within Compute services
-------------------------------------------------------------
-tempest.api.compute.servers.test_servers.ServersTestJSON.test_create_specify_keypair
+
+| tempest.api.compute.servers.test_servers.ServersTestJSON.test_create_specify_keypair
List supported versions of the Compute API
------------------------------------------
-tempest.api.compute.test_versions.TestVersions.test_list_api_versions
+
+| tempest.api.compute.test_versions.TestVersions.test_list_api_versions
Quotas management in Compute API
--------------------------------
-tempest.api.compute.test_quotas.QuotasTestJSON.test_get_default_quotas
-tempest.api.compute.test_quotas.QuotasTestJSON.test_get_quotas
+
+| tempest.api.compute.test_quotas.QuotasTestJSON.test_get_default_quotas
+| tempest.api.compute.test_quotas.QuotasTestJSON.test_get_quotas
Basic server operations in the Compute API
------------------------------------------
-tempest.api.compute.servers.test_servers.ServersTestJSON.test_create_server_with_admin_password
-tempest.api.compute.servers.test_servers.ServersTestJSON.test_create_with_existing_server_name
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_numeric_server_name
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_server_metadata_exceeds_length_limit
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_server_name_length_exceeds_256
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_flavor
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_image
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_network_uuid
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_delete_server_pass_id_exceeding_length_limit
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_delete_server_pass_negative_id
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_get_non_existent_server
-tempest.api.compute.servers.test_create_server.ServersTestJSON.test_host_name_is_same_as_server_name
-tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_host_name_is_same_as_server_name
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_invalid_ip_v6_address
-tempest.api.compute.servers.test_create_server.ServersTestJSON.test_list_servers
-tempest.api.compute.servers.test_create_server.ServersTestJSON.test_list_servers_with_detail
-tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_list_servers
-tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_list_servers_with_detail
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_flavor
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_image
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_server_name
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_server_status
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_limit_results
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_flavor
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_image
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_limit
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_server_name
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_server_status
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filtered_by_name_wildcard
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_changes_since_future_date
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_changes_since_invalid_date
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_greater_than_actual_count
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_pass_negative_value
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_pass_string
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_flavor
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_image
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_server_name
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_detail_server_is_deleted
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_status_non_existing
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_with_a_deleted_server
-tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_lock_unlock_server
-tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_delete_server_metadata_item
-tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_get_server_metadata_item
-tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_list_server_metadata
-tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_set_server_metadata
-tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_set_server_metadata_item
-tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_update_server_metadata
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_server_name_blank
-tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_reboot_server_hard
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_reboot_non_existent_server
-tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_rebuild_server
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_rebuild_deleted_server
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_rebuild_non_existent_server
-tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_stop_start_server
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_stop_non_existent_server
-tempest.api.compute.servers.test_servers.ServersTestJSON.test_update_access_server_address
-tempest.api.compute.servers.test_servers.ServersTestJSON.test_update_server_name
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_name_of_non_existent_server
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_server_name_length_exceeds_256
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_server_set_empty_name
-tempest.api.compute.servers.test_create_server.ServersTestJSON.test_verify_created_server_vcpus
-tempest.api.compute.servers.test_create_server.ServersTestJSON.test_verify_server_details
-tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_verify_created_server_vcpus
-tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_verify_server_details
+
+| tempest.api.compute.servers.test_servers.ServersTestJSON.test_create_server_with_admin_password
+| tempest.api.compute.servers.test_servers.ServersTestJSON.test_create_with_existing_server_name
+| tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_numeric_server_name
+| tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_server_metadata_exceeds_length_limit
+| tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_server_name_length_exceeds_256
+| tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_flavor
+| tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_image
+| tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_network_uuid
+| tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_delete_server_pass_id_exceeding_length_limit
+| tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_delete_server_pass_negative_id
+| tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_get_non_existent_server
+| tempest.api.compute.servers.test_create_server.ServersTestJSON.test_host_name_is_same_as_server_name
+| tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_host_name_is_same_as_server_name
+| tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_invalid_ip_v6_address
+| tempest.api.compute.servers.test_create_server.ServersTestJSON.test_list_servers
+| tempest.api.compute.servers.test_create_server.ServersTestJSON.test_list_servers_with_detail
+| tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_list_servers
+| tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_list_servers_with_detail
+| tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_flavor
+| tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_image
+| tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_server_name
+| tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_server_status
+| tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_limit_results
+| tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_flavor
+| tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_image
+| tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_limit
+| tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_server_name
+| tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_server_status
+| tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filtered_by_name_wildcard
+| tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_changes_since_future_date
+| tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_changes_since_invalid_date
+| tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits
+| tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_greater_than_actual_count
+| tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_pass_negative_value
+| tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_pass_string
+| tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_flavor
+| tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_image
+| tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_server_name
+| tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_detail_server_is_deleted
+| tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_status_non_existing
+| tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_with_a_deleted_server
+| tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_lock_unlock_server
+| tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_delete_server_metadata_item
+| tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_get_server_metadata_item
+| tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_list_server_metadata
+| tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_set_server_metadata
+| tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_set_server_metadata_item
+| tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_update_server_metadata
+| tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_server_name_blank
+| tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_reboot_server_hard
+| tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_reboot_non_existent_server
+| tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_rebuild_server
+| tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_rebuild_deleted_server
+| tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_rebuild_non_existent_server
+| tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_stop_start_server
+| tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_stop_non_existent_server
+| tempest.api.compute.servers.test_servers.ServersTestJSON.test_update_access_server_address
+| tempest.api.compute.servers.test_servers.ServersTestJSON.test_update_server_name
+| tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_name_of_non_existent_server
+| tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_server_name_length_exceeds_256
+| tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_server_set_empty_name
+| tempest.api.compute.servers.test_create_server.ServersTestJSON.test_verify_created_server_vcpus
+| tempest.api.compute.servers.test_create_server.ServersTestJSON.test_verify_server_details
+| tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_verify_created_server_vcpus
+| tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_verify_server_details
Retrieve volume information through the Compute API
---------------------------------------------------
-tempest.api.compute.volumes.test_attach_volume.AttachVolumeTestJSON.test_attach_detach_volume
-tempest.api.compute.volumes.test_attach_volume.AttachVolumeTestJSON.test_list_get_volume_attachments
+
+| tempest.api.compute.volumes.test_attach_volume.AttachVolumeTestJSON.test_attach_detach_volume
+| tempest.api.compute.volumes.test_attach_volume.AttachVolumeTestJSON.test_list_get_volume_attachments
@@ -127,15 +134,16 @@ Test Area VIM Operations - Identity
API discovery operations within the Identity v3 API
---------------------------------------------------
-tempest.api.identity.v3.test_api_discovery.TestApiDiscovery.test_api_media_types
-tempest.api.identity.v3.test_api_discovery.TestApiDiscovery.test_api_version_resources
-tempest.api.identity.v3.test_api_discovery.TestApiDiscovery.test_api_version_statuses
+
+| tempest.api.identity.v3.test_api_discovery.TestApiDiscovery.test_api_media_types
+| tempest.api.identity.v3.test_api_discovery.TestApiDiscovery.test_api_version_resources
+| tempest.api.identity.v3.test_api_discovery.TestApiDiscovery.test_api_version_statuses
Auth operations within the Identity API
---------------------------------------
-tempest.api.identity.v3.test_tokens.TokensV3Test.test_create_token
+| tempest.api.identity.v3.test_tokens.TokensV3Test.test_create_token
--------------------------------
@@ -144,42 +152,47 @@ Test Area VIM Operations - Image
Image deletion tests using the Glance v2 API
--------------------------------------------
-tempest.api.image.v2.test_images.BasicOperationsImagesTest.test_delete_image
-tempest.api.image.v2.test_images_negative.ImagesNegativeTest.test_delete_image_null_id
-tempest.api.image.v2.test_images_negative.ImagesNegativeTest.test_delete_non_existing_image
-tempest.api.image.v2.test_images_tags_negative.ImagesTagsNegativeTest.test_delete_non_existing_tag
+
+| tempest.api.image.v2.test_images.BasicOperationsImagesTest.test_delete_image
+| tempest.api.image.v2.test_images_negative.ImagesNegativeTest.test_delete_image_null_id
+| tempest.api.image.v2.test_images_negative.ImagesNegativeTest.test_delete_non_existing_image
+| tempest.api.image.v2.test_images_tags_negative.ImagesTagsNegativeTest.test_delete_non_existing_tag
Image get tests using the Glance v2 API
---------------------------------------
-tempest.api.image.v2.test_images.ListImagesTest.test_get_image_schema
-tempest.api.image.v2.test_images.ListImagesTest.test_get_images_schema
-tempest.api.image.v2.test_images_negative.ImagesNegativeTest.test_get_delete_deleted_image
-tempest.api.image.v2.test_images_negative.ImagesNegativeTest.test_get_image_null_id
-tempest.api.image.v2.test_images_negative.ImagesNegativeTest.test_get_non_existent_image
+
+| tempest.api.image.v2.test_images.ListImagesTest.test_get_image_schema
+| tempest.api.image.v2.test_images.ListImagesTest.test_get_images_schema
+| tempest.api.image.v2.test_images_negative.ImagesNegativeTest.test_get_delete_deleted_image
+| tempest.api.image.v2.test_images_negative.ImagesNegativeTest.test_get_image_null_id
+| tempest.api.image.v2.test_images_negative.ImagesNegativeTest.test_get_non_existent_image
CRUD image operations in Images API v2
--------------------------------------
-tempest.api.image.v2.test_images.ListImagesTest.test_list_no_params
+
+| tempest.api.image.v2.test_images.ListImagesTest.test_list_no_params
Image list tests using the Glance v2 API
----------------------------------------
-tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_container_format
-tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_disk_format
-tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_limit
-tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_min_max_size
-tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_size
-tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_status
-tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_visibility
+
+| tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_container_format
+| tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_disk_format
+| tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_limit
+| tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_min_max_size
+| tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_size
+| tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_status
+| tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_visibility
Image update tests using the Glance v2 API
------------------------------------------
-tempest.api.image.v2.test_images.BasicOperationsImagesTest.test_update_image
-tempest.api.image.v2.test_images_tags.ImagesTagsTest.test_update_delete_tags_for_image
-tempest.api.image.v2.test_images_tags_negative.ImagesTagsNegativeTest.test_update_tags_for_non_existing_image
+
+| tempest.api.image.v2.test_images.BasicOperationsImagesTest.test_update_image
+| tempest.api.image.v2.test_images_tags.ImagesTagsTest.test_update_delete_tags_for_image
+| tempest.api.image.v2.test_images_tags_negative.ImagesTagsNegativeTest.test_update_tags_for_non_existing_image
----------------------------------
@@ -189,56 +202,57 @@ Test Area VIM Operations - Network
Basic CRUD operations on L2 networks and L2 network ports
---------------------------------------------------------
-tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_all_attributes
-tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_allocation_pools
-tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_dhcp_enabled
-tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_gw
-tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_gw_and_allocation_pools
-tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_host_routes_and_dns_nameservers
-tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_without_gateway
-tempest.api.network.test_networks.NetworksTest.test_create_update_delete_network_subnet
-tempest.api.network.test_networks.NetworksTest.test_delete_network_with_subnet
-tempest.api.network.test_networks.NetworksTest.test_list_networks
-tempest.api.network.test_networks.NetworksTest.test_list_networks_fields
-tempest.api.network.test_networks.NetworksTest.test_list_subnets
-tempest.api.network.test_networks.NetworksTest.test_list_subnets_fields
-tempest.api.network.test_networks.NetworksTest.test_show_network
-tempest.api.network.test_networks.NetworksTest.test_show_network_fields
-tempest.api.network.test_networks.NetworksTest.test_show_subnet
-tempest.api.network.test_networks.NetworksTest.test_show_subnet_fields
-tempest.api.network.test_networks.NetworksTest.test_update_subnet_gw_dns_host_routes_dhcp
-tempest.api.network.test_ports.PortsTestJSON.test_create_bulk_port
-tempest.api.network.test_ports.PortsTestJSON.test_create_port_in_allowed_allocation_pools
-tempest.api.network.test_ports.PortsTestJSON.test_create_update_delete_port
-tempest.api.network.test_ports.PortsTestJSON.test_list_ports
-tempest.api.network.test_ports.PortsTestJSON.test_list_ports_fields
-tempest.api.network.test_ports.PortsTestJSON.test_show_port
-tempest.api.network.test_ports.PortsTestJSON.test_show_port_fields
-tempest.api.network.test_ports.PortsTestJSON.test_update_port_with_security_group_and_extra_attributes
-tempest.api.network.test_ports.PortsTestJSON.test_update_port_with_two_security_groups_and_extra_attributes
+| tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_all_attributes
+| tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_allocation_pools
+| tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_dhcp_enabled
+| tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_gw
+| tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_gw_and_allocation_pools
+| tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_host_routes_and_dns_nameservers
+| tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_without_gateway
+| tempest.api.network.test_networks.NetworksTest.test_create_update_delete_network_subnet
+| tempest.api.network.test_networks.NetworksTest.test_delete_network_with_subnet
+| tempest.api.network.test_networks.NetworksTest.test_list_networks
+| tempest.api.network.test_networks.NetworksTest.test_list_networks_fields
+| tempest.api.network.test_networks.NetworksTest.test_list_subnets
+| tempest.api.network.test_networks.NetworksTest.test_list_subnets_fields
+| tempest.api.network.test_networks.NetworksTest.test_show_network
+| tempest.api.network.test_networks.NetworksTest.test_show_network_fields
+| tempest.api.network.test_networks.NetworksTest.test_show_subnet
+| tempest.api.network.test_networks.NetworksTest.test_show_subnet_fields
+| tempest.api.network.test_networks.NetworksTest.test_update_subnet_gw_dns_host_routes_dhcp
+| tempest.api.network.test_ports.PortsTestJSON.test_create_bulk_port
+| tempest.api.network.test_ports.PortsTestJSON.test_create_port_in_allowed_allocation_pools
+| tempest.api.network.test_ports.PortsTestJSON.test_create_update_delete_port
+| tempest.api.network.test_ports.PortsTestJSON.test_list_ports
+| tempest.api.network.test_ports.PortsTestJSON.test_list_ports_fields
+| tempest.api.network.test_ports.PortsTestJSON.test_show_port
+| tempest.api.network.test_ports.PortsTestJSON.test_show_port_fields
+| tempest.api.network.test_ports.PortsTestJSON.test_update_port_with_security_group_and_extra_attributes
+| tempest.api.network.test_ports.PortsTestJSON.test_update_port_with_two_security_groups_and_extra_attributes
Basic CRUD operations on security groups
----------------------------------------
-tempest.api.network.test_security_groups.SecGroupTest.test_create_list_update_show_delete_security_group
-tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_additional_args
-tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_icmp_type_code
-tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_protocol_integer_value
-tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_remote_group_id
-tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_remote_ip_prefix
-tempest.api.network.test_security_groups.SecGroupTest.test_create_show_delete_security_group_rule
-tempest.api.network.test_security_groups.SecGroupTest.test_list_security_groups
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_additional_default_security_group_fails
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_duplicate_security_group_rule_fails
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_bad_ethertype
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_bad_protocol
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_bad_remote_ip_prefix
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_invalid_ports
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_non_existent_remote_groupid
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_non_existent_security_group
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_delete_non_existent_security_group
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_show_non_existent_security_group
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_show_non_existent_security_group_rule
+
+| tempest.api.network.test_security_groups.SecGroupTest.test_create_list_update_show_delete_security_group
+| tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_additional_args
+| tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_icmp_type_code
+| tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_protocol_integer_value
+| tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_remote_group_id
+| tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_remote_ip_prefix
+| tempest.api.network.test_security_groups.SecGroupTest.test_create_show_delete_security_group_rule
+| tempest.api.network.test_security_groups.SecGroupTest.test_list_security_groups
+| tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_additional_default_security_group_fails
+| tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_duplicate_security_group_rule_fails
+| tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_bad_ethertype
+| tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_bad_protocol
+| tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_bad_remote_ip_prefix
+| tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_invalid_ports
+| tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_non_existent_remote_groupid
+| tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_non_existent_security_group
+| tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_delete_non_existent_security_group
+| tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_show_non_existent_security_group
+| tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_show_non_existent_security_group_rule
---------------------------------
@@ -247,117 +261,300 @@ Test Area VIM Operations - Volume
Volume attach and detach operations with the Cinder v2 API
----------------------------------------------------------
-tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_attach_detach_volume_to_instance
-tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_get_volume_attachment
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_attach_volumes_with_nonexistent_volume_id
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_detach_volumes_with_invalid_volume_id
+
+| tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_attach_detach_volume_to_instance
+| tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_get_volume_attachment
+| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_attach_volumes_with_nonexistent_volume_id
+| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_detach_volumes_with_invalid_volume_id
Volume service availability zone operations with the Cinder v2 API
------------------------------------------------------------------
-tempest.api.volume.test_availability_zone.AvailabilityZoneV2TestJSON.test_get_availability_zone_list
+
+| tempest.api.volume.test_availability_zone.AvailabilityZoneV2TestJSON.test_get_availability_zone_list
Volume cloning operations with the Cinder v2 API
------------------------------------------------
-tempest.api.volume.test_volumes_get.VolumesV2GetTest.test_volume_create_get_update_delete_as_clone
+
+| tempest.api.volume.test_volumes_get.VolumesV2GetTest.test_volume_create_get_update_delete_as_clone
Image copy-to-volume operations with the Cinder v2 API
------------------------------------------------------
-tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_volume_bootable
-tempest.api.volume.test_volumes_get.VolumesV2GetTest.test_volume_create_get_update_delete_from_image
+
+| tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_volume_bootable
+| tempest.api.volume.test_volumes_get.VolumesV2GetTest.test_volume_create_get_update_delete_from_image
Volume creation and deletion operations with the Cinder v2 API
--------------------------------------------------------------
-tempest.api.volume.test_volumes_get.VolumesV2GetTest.test_volume_create_get_update_delete
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_invalid_size
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_nonexistent_source_volid
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_nonexistent_volume_type
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_out_passing_size
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_size_negative
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_size_zero
+
+| tempest.api.volume.test_volumes_get.VolumesV2GetTest.test_volume_create_get_update_delete
+| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_invalid_size
+| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_nonexistent_source_volid
+| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_nonexistent_volume_type
+| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_out_passing_size
+| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_size_negative
+| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_size_zero
Volume service extension listing operations with the Cinder v2 API
------------------------------------------------------------------
-tempest.api.volume.test_extensions.ExtensionsV2TestJSON.test_list_extensions
+
+| tempest.api.volume.test_extensions.ExtensionsV2TestJSON.test_list_extensions
Volume GET operations with the Cinder v2 API
--------------------------------------------
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_get_invalid_volume_id
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_get_volume_without_passing_volume_id
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_volume_get_nonexistent_volume_id
+
+| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_get_invalid_volume_id
+| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_get_volume_without_passing_volume_id
+| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_volume_get_nonexistent_volume_id
+
Volume listing operations with the Cinder v2 API
------------------------------------------------
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_by_name
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_details_by_name
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_param_display_name_and_status
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_with_detail_param_display_name_and_status
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_with_detail_param_metadata
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_with_details
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_with_param_metadata
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volumes_list_by_availability_zone
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volumes_list_by_status
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volumes_list_details_by_availability_zone
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volumes_list_details_by_status
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_list_volumes_detail_with_invalid_status
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_list_volumes_detail_with_nonexistent_name
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_list_volumes_with_invalid_status
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_list_volumes_with_nonexistent_name
-tempest.api.volume.v2.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_details_pagination
-tempest.api.volume.v2.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_details_with_multiple_params
-tempest.api.volume.v2.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_pagination
+
+| tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list
+| tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_by_name
+| tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_details_by_name
+| tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_param_display_name_and_status
+| tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_with_detail_param_display_name_and_status
+| tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_with_detail_param_metadata
+| tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_with_details
+| tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_with_param_metadata
+| tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volumes_list_by_availability_zone
+| tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volumes_list_by_status
+| tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volumes_list_details_by_availability_zone
+| tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volumes_list_details_by_status
+| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_list_volumes_detail_with_invalid_status
+| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_list_volumes_detail_with_nonexistent_name
+| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_list_volumes_with_invalid_status
+| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_list_volumes_with_nonexistent_name
+| tempest.api.volume.v2.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_details_pagination
+| tempest.api.volume.v2.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_details_with_multiple_params
+| tempest.api.volume.v2.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_pagination
Volume metadata operations with the Cinder v2 API
-------------------------------------------------
-tempest.api.volume.test_volume_metadata.VolumesV2MetadataTest.test_create_get_delete_volume_metadata
-tempest.api.volume.test_volume_metadata.VolumesV2MetadataTest.test_update_volume_metadata_item
+| tempest.api.volume.test_volume_metadata.VolumesV2MetadataTest.test_create_get_delete_volume_metadata
+| tempest.api.volume.test_volume_metadata.VolumesV2MetadataTest.test_update_volume_metadata_item
Verification of read-only status on volumes with the Cinder v2 API
------------------------------------------------------------------
-tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_volume_readonly_update
+
+| tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_volume_readonly_update
Volume reservation operations with the Cinder v2 API
----------------------------------------------------
-tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_reserve_unreserve_volume
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_reserve_volume_with_negative_volume_status
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_reserve_volume_with_nonexistent_volume_id
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_unreserve_volume_with_nonexistent_volume_id
+
+| tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_reserve_unreserve_volume
+| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_reserve_volume_with_negative_volume_status
+| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_reserve_volume_with_nonexistent_volume_id
+| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_unreserve_volume_with_nonexistent_volume_id
Volume snapshot creation/deletion operations with the Cinder v2 API
-------------------------------------------------------------------
-tempest.api.volume.test_snapshot_metadata.SnapshotV2MetadataTestJSON.test_create_get_delete_snapshot_metadata
-tempest.api.volume.test_snapshot_metadata.SnapshotV2MetadataTestJSON.test_update_snapshot_metadata_item
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_nonexistent_snapshot_id
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_delete_invalid_volume_id
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_delete_volume_without_passing_volume_id
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_volume_delete_nonexistent_volume_id
-tempest.api.volume.test_volumes_snapshots.VolumesV2SnapshotTestJSON.test_snapshot_create_get_list_update_delete
-tempest.api.volume.test_volumes_snapshots.VolumesV2SnapshotTestJSON.test_volume_from_snapshot
-tempest.api.volume.test_volumes_snapshots.VolumesV2SnapshotTestJSON.test_snapshots_list_details_with_params
-tempest.api.volume.test_volumes_snapshots.VolumesV2SnapshotTestJSON.test_snapshots_list_with_params
-tempest.api.volume.test_volumes_snapshots_negative.VolumesV2SnapshotNegativeTestJSON.test_create_snapshot_with_nonexistent_volume_id
-tempest.api.volume.test_volumes_snapshots_negative.VolumesV2SnapshotNegativeTestJSON.test_create_snapshot_without_passing_volume_id
+
+| tempest.api.volume.test_snapshot_metadata.SnapshotV2MetadataTestJSON.test_create_get_delete_snapshot_metadata
+| tempest.api.volume.test_snapshot_metadata.SnapshotV2MetadataTestJSON.test_update_snapshot_metadata_item
+| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_nonexistent_snapshot_id
+| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_delete_invalid_volume_id
+| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_delete_volume_without_passing_volume_id
+| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_volume_delete_nonexistent_volume_id
+| tempest.api.volume.test_volumes_snapshots.VolumesV2SnapshotTestJSON.test_snapshot_create_get_list_update_delete
+| tempest.api.volume.test_volumes_snapshots.VolumesV2SnapshotTestJSON.test_volume_from_snapshot
+| tempest.api.volume.test_volumes_snapshots.VolumesV2SnapshotTestJSON.test_snapshots_list_details_with_params
+| tempest.api.volume.test_volumes_snapshots.VolumesV2SnapshotTestJSON.test_snapshots_list_with_params
+| tempest.api.volume.test_volumes_snapshots_negative.VolumesV2SnapshotNegativeTestJSON.test_create_snapshot_with_nonexistent_volume_id
+| tempest.api.volume.test_volumes_snapshots_negative.VolumesV2SnapshotNegativeTestJSON.test_create_snapshot_without_passing_volume_id
Volume update operations with the Cinder v2 API
-----------------------------------------------
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_update_volume_with_empty_volume_id
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_update_volume_with_invalid_volume_id
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_update_volume_with_nonexistent_volume_id
+
+| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_update_volume_with_empty_volume_id
+| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_update_volume_with_invalid_volume_id
+| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_update_volume_with_nonexistent_volume_id
+
+
+---------------------------
+Test Area High Availability
+---------------------------
+
+Verify high availability of OpenStack controller services
+------------------------------------------------------
+
+| opnfv.ha.tc001.nova-api_service_down
+| opnfv.ha.tc003.neutron-server_service_down
+| opnfv.ha.tc004.keystone_service_down
+| opnfv.ha.tc005.glance-api_service_down
+| opnfv.ha.tc006.cinder-api_service_down
+| opnfv.ha.tc009.cpu_overload
+| opnfv.ha.tc010.disk_I/O_block
+| opnfv.ha.tc011.load_balance_service_down
+
+----------------------------------------
+Test Area vPing - Basic VNF Connectivity
+----------------------------------------
+
+| opnfv.vping.userdata
+| opnfv.vping.ssh
Optional CVP Test Areas
========================
+
+-----------------
+Test Area BGP VPN
+-----------------
+
+Verify association and dissasocitation of node using route targets
+------------------------------------------------------------------
+
+| opnfv.sdnvpn.subnet_connectivity
+| opnfv.sdnvpn.tenant separation
+| opnfv.sdnvpn.router_association
+| opnfv.sdnvpn.router_association_floating_ip
+
+--------------------------------------------------
+IPv6 Compliance Testing Methodology and Test Cases
+--------------------------------------------------
+
+Test Case 1: Create and Delete an IPv6 Network, Port and Subnet
+---------------------------------------------------------------
+
+| tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_network
+| tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_port
+| tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_subnet
+
+Test Case 2: Create, Update and Delete an IPv6 Network and Subnet
+-----------------------------------------------------------------
+
+| tempest.api.network.test_networks.NetworksIpV6Test.test_create_update_delete_network_subnet
+
+Test Case 3: Check External Network Visibility
+----------------------------------------------
+
+| tempest.api.network.test_networks.NetworksIpV6Test.test_external_network_visibility
+
+Test Case 4: List IPv6 Networks and Subnets of a Tenant
+-------------------------------------------------------
+
+| tempest.api.network.test_networks.NetworksIpV6Test.test_list_networks
+| tempest.api.network.test_networks.NetworksIpV6Test.test_list_subnets
+
+Test Case 5: Show Information of an IPv6 Network and Subnet
+-----------------------------------------------------------
+
+| tempest.api.network.test_networks.NetworksIpV6Test.test_show_network
+| tempest.api.network.test_networks.NetworksIpV6Test.test_show_subnet
+
+Test Case 6: Create an IPv6 Port in Allowed Allocation Pools
+------------------------------------------------------------
+
+| tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_port_in_allowed_allocation_pools
+
+Test Case 7: Create an IPv6 Port without Security Groups
+--------------------------------------------------------
+
+| tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_port_with_no_securitygroups
+
+Test Case 8: Create, Update and Delete an IPv6 Port
+---------------------------------------------------
+
+| tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_update_delete_port
+
+Test Case 9: List IPv6 Ports of a Tenant
+----------------------------------------
+
+| tempest.api.network.test_ports.PortsIpV6TestJSON.test_list_ports
+
+Test Case 10: Show Information of an IPv6 Port
+----------------------------------------------
+
+| tempest.api.network.test_ports.PortsIpV6TestJSON.test_show_port
+
+Test Case 11: Add Multiple Interfaces for an IPv6 Router
+--------------------------------------------------------
+
+| tempest.api.network.test_routers.RoutersIpV6Test.test_add_multiple_router_interfaces
+
+Test Case 12: Add and Remove an IPv6 Router Interface with port_id
+------------------------------------------------------------------
+
+| tempest.api.network.test_routers.RoutersIpV6Test.test_add_remove_router_interface_with_port_id
+
+Test Case 13: Add and Remove an IPv6 Router Interface with subnet_id
+--------------------------------------------------------------------
+
+| tempest.api.network.test_routers.RoutersIpV6Test.test_add_remove_router_interface_with_subnet_id
+
+Test Case 14: Create, Update, Delete, List and Show an IPv6 Router
+------------------------------------------------------------------
+
+| tempest.api.network.test_routers.RoutersIpV6Test.test_create_show_list_update_delete_router
+
+Test Case 15: Create, Update, Delete, List and Show an IPv6 Security Group
+--------------------------------------------------------------------------
+
+| tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_list_update_show_delete_security_group
+
+Test Case 16: Create, Delete and Show Security Group Rules
+----------------------------------------------------------
+
+| tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_show_delete_security_group_rule
+
+Test Case 17: List All Security Groups
+--------------------------------------
+
+| tempest.api.network.test_security_groups.SecGroupIPv6Test.test_list_security_groups
+
+Test Case 18: IPv6 Address Assignment - Dual Stack, SLAAC, DHCPv6 Stateless
+---------------------------------------------------------------------------
+
+| tempest.scenario.test_network_v6.TestGettingAddress.test_dhcp6_stateless_from_os
+
+Test Case 19: IPv6 Address Assignment - Dual Net, Dual Stack, SLAAC, DHCPv6 Stateless
+-------------------------------------------------------------------------------------
+
+| tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_dhcp6_stateless_from_os
+
+Test Case 20: IPv6 Address Assignment - Multiple Prefixes, Dual Stack, SLAAC, DHCPv6 Stateless
+----------------------------------------------------------------------------------------------
+
+| tempest.scenario.test_network_v6.TestGettingAddress.test_multi_prefix_dhcpv6_stateless
+
+Test Case 21: IPv6 Address Assignment - Dual Net, Multiple Prefixes, Dual Stack, SLAAC, DHCPv6 Stateless
+--------------------------------------------------------------------------------------------------------
+
+| tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_multi_prefix_dhcpv6_stateless
+
+Test Case 22: IPv6 Address Assignment - Dual Stack, SLAAC
+---------------------------------------------------------
+
+| tempest.scenario.test_network_v6.TestGettingAddress.test_slaac_from_os
+
+Test Case 23: IPv6 Address Assignment - Dual Net, Dual Stack, SLAAC
+-------------------------------------------------------------------
+
+| tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_slaac_from_os
+
+Test Case 24: IPv6 Address Assignment - Multiple Prefixes, Dual Stack, SLAAC
+----------------------------------------------------------------------------
+
+| tempest.scenario.test_network_v6.TestGettingAddress.test_multi_prefix_slaac
+
+Test Case 25: IPv6 Address Assignment - Dual Net, Dual Stack, Multiple Prefixes, SLAAC
+--------------------------------------------------------------------------------------
+
+| tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_multi_prefix_slaac
+
diff --git a/docs/testing/user/testspecification/highavailability/index.rst b/docs/testing/user/testspecification/highavailability/index.rst
index e69de29b..715f84d0 100644
--- a/docs/testing/user/testspecification/highavailability/index.rst
+++ b/docs/testing/user/testspecification/highavailability/index.rst
@@ -0,0 +1,743 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International
+.. License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, China Mobile and others.
+
+==========================================
+OpenStack Services HA test specification
+==========================================
+
+.. toctree::
+:maxdepth:
+
+Scope
+=====
+
+The HA test area evaluates the ability of the System Under Test to support service
+continuity and recovery from component failures on part of OpenStack controller services("nova-api",
+"neutron-server", "keystone", "glance-api", "cinder-api") and on "load balancer" service.
+
+The tests in this test area will emulate component failures by killing the
+processes of above target services, stressing the CPU load or blocking
+disk I/O on the selected controller node, and then check if the impacted
+services are still available and the killed processes are recovered on the
+selected controller node within a given time interval.
+
+
+References
+================
+
+This test area references the following specifications:
+
+- ETSI GS NFV-REL 001
+
+ - http://www.etsi.org/deliver/etsi_gs/NFV-REL/001_099/001/01.01.01_60/gs_nfv-rel001v010101p.pdf
+
+- OpenStack High Availability Guide
+
+ - https://docs.openstack.org/ha-guide/
+
+
+Definitions and abbreviations
+=============================
+
+The following terms and abbreviations are used in conjunction with this test area
+
+- SUT - system under test
+- Monitor - tools used to measure the service outage time and the process
+ outage time
+- Service outage time - the outage time (seconds) of the specific OpenStack
+ service
+- Process outage time - the outage time (seconds) from the specific processes
+ being killed to recovered
+
+
+System Under Test (SUT)
+=======================
+
+The system under test is assumed to be the NFVi and VIM in operation on a
+Pharos compliant infrastructure.
+
+SUT is assumed to be in high availability configuration, which typically means
+more than one controller nodes are in the System Under Test.
+
+Test Area Structure
+====================
+
+The HA test area is structured with the following test cases in a sequential
+manner.
+
+Each test case is able to run independently. Preceding test case's failure will
+not affect the subsequent test cases.
+
+Preconditions of each test case will be described in the following test
+descriptions.
+
+
+Test Descriptions
+=================
+
+---------------------------------------------------------------
+Test Case 1 - Controller node OpenStack service down - nova-api
+---------------------------------------------------------------
+
+Short name
+----------
+
+opnfv.ha.tc001.nova-api_service_down
+
+Use case specification
+----------------------
+
+This test case verifies the service continuity capability in the face of the
+software process failure. It kills the processes of OpenStack "nova-api"
+service on the selected controller node, then checks whether the "nova-api"
+service is still available during the failure, by creating a VM then deleting
+the VM, and checks whether the killed processes are recovered within a given
+time interval.
+
+
+Test preconditions
+------------------
+
+There is more than one controller node, which is providing the "nova-api"
+service for API end-point.
+Denoted a controller node as Node1 in the following configuration.
+
+
+Basic test flow execution description and pass/fail criteria
+------------------------------------------------------------
+
+Methodology for verifying service continuity and recovery
+'''''''''''''''''''''''''''''''''''''''''''''''''''''''''
+
+The service continuity and process recovery capabilities of "nova-api" service
+is evaluated by monitoring service outage time, process outage time, and results
+of nova operations.
+
+Service outage time is measured by continuously executing "openstack server list"
+command in loop and checking if the response of the command request is returned
+with no failure.
+When the response fails, the "nova-api" service is considered in outage.
+The time between the first response failure and the last response failure is
+considered as service outage time.
+
+Process outage time is measured by checking the status of "nova-api" processes on
+the selected controller node. The time of "nova-api" processes being killed to
+the time of the "nova-api" processes being recovered is the process outage time.
+Process recovery is verified by checking the existence of "nova-api" processes.
+
+All nova operations are carried out correctly within a given time interval which
+suggests that the "nova-api" service is continuously available.
+
+Test execution
+''''''''''''''
+* Test action 1: Connect to Node1 through SSH, and check that "nova-api"
+ processes are running on Node1
+* Test action 2: Create a image with "openstack image create test-cirros
+ --file cirros-0.3.5-x86_64-disk.img --disk-format qcow2 --container-format bare"
+* Test action 3: Execute"openstack flavor create m1.test --id auto --ram 512
+ --disk 1 --vcpus 1" to create flavor "m1.test".
+* Test action 4: Start two monitors: one for "nova-api" processes and the other
+ for "openstack server list" command.
+ Each monitor will run as an independent process
+* Test action 5: Connect to Node1 through SSH, and then kill the "nova-api"
+ processes
+* Test action 6: When "openstack server list" returns with no error, calculate
+ the service outage time, and execute command "openstack server create
+ --flavor m1.test --image test-cirros test-instance"
+* Test action 7: Continuously Execute "openstack server show test-instance"
+ to check if the status of VM "test-instance" is "Active"
+* Test action 8: If VM "test-instance" is "Active", execute "openstack server
+ delete test-instance", then execute "openstack server list" to check if the
+ VM is not in the list
+* Test action 9: Continuously measure process outage time from the monitor until
+ the process outage time is more than 30s
+
+Pass / fail criteria
+''''''''''''''''''''
+
+The process outage time is less than 30s.
+
+The service outage time is less than 5s.
+
+The nova operations are carried out in above order and no errors occur.
+
+A negative result will be generated if the above is not met in completion.
+
+Post conditions
+---------------
+
+Restart the process of "nova-api" if they are not running.
+Delete image with "openstack image delete test-cirros"
+Delete flavor with "openstack flavor delete m1.test"
+
+
+---------------------------------------------------------------------
+Test Case 2 - Controller node OpenStack service down - neutron-server
+---------------------------------------------------------------------
+
+Short name
+----------
+
+opnfv.ha.tc002.neutron-server_service_down
+
+Use case specification
+----------------------
+
+This test verifies the high availability of the "neutron-server" service
+provided by OpenStack controller nodes. It kills the processes of OpenStack
+"neutron-server" service on the selected controller node, then checks whether
+the "neutron-server" service is still available, by creating a network and
+deleting the network, and checks whether the killed processes are recovered.
+
+Test preconditions
+------------------
+
+There is more than one controller node, which is providing the "neutron-server"
+service for API end-point.
+Denoted a controller node as Node1 in the following configuration.
+
+Basic test flow execution description and pass/fail criteria
+------------------------------------------------------------
+
+Methodology for monitoring high availability
+''''''''''''''''''''''''''''''''''''''''''''
+
+The high availability of "neutron-server" service is evaluated by monitoring
+service outage time, process outage time, and results of neutron operations.
+
+Service outage time is tested by continuously executing "openstack router list"
+command in loop and checking if the response of the command request is returned
+with no failure.
+When the response fails, the "neutron-server" service is considered in outage.
+The time between the first response failure and the last response failure is
+considered as service outage time.
+
+Process outage time is tested by checking the status of "neutron-server"
+processes on the selected controller node. The time of "neutron-server"
+processes being killed to the time of the "neutron-server" processes being
+recovered is the process outage time. Process recovery is verified by checking
+the existence of "neutron-server" processes.
+
+Test execution
+''''''''''''''
+
+* Test action 1: Connect to Node1 through SSH, and check that "neutron-server"
+ processes are running on Node1
+* Test action 2: Start two monitors: one for "neutron-server" process and the
+ other for "openstack router list" command.
+ Each monitor will run as an independent process.
+* Test action 3: Connect to Node1 through SSH, and then kill the
+ "neutron-server" processes
+* Test action 4: When "openstack router list" returns with no error, calculate
+ the service outage time, and execute "openstack network create test-network"
+* Test action 5: Continuously executing "openstack network show test-network",
+ check if the status of "test-network" is "Active"
+* Test action 6: If "test-network" is "Active", execute "openstack network
+ delete test-network", then execute "openstack network list" to check if the
+ "test-network" is not in the list
+* Test action 7: Continuously measure process outage time from the monitor until
+ the process outage time is more than 30s
+
+Pass / fail criteria
+''''''''''''''''''''
+
+The process outage time is less than 30s.
+
+The service outage time is less than 5s.
+
+The neutron operations are carried out in above order and no errors occur.
+
+A negative result will be generated if the above is not met in completion.
+
+Post conditions
+---------------
+
+Restart the processes of "neutron-server" if they are not running.
+
+
+---------------------------------------------------------------
+Test Case 3 - Controller node OpenStack service down - keystone
+---------------------------------------------------------------
+
+Short name
+----------
+
+opnfv.ha.tc003.keystone_service_down
+
+Use case specification
+----------------------
+
+This test verifies the high availability of the "keystone" service provided by
+OpenStack controller nodes. It kills the processes of OpenStack "keystone"
+service on the selected controller node, then checks whether the "keystone"
+service is still available by executing command "openstack user list" and
+whether the killed processes are recovered.
+
+Test preconditions
+------------------
+
+There is more than one controller node, which is providing the "keystone"
+service for API end-point.
+Denoted a controller node as Node1 in the following configuration.
+
+Basic test flow execution description and pass/fail criteria
+------------------------------------------------------------
+
+Methodology for monitoring high availability
+''''''''''''''''''''''''''''''''''''''''''''
+
+The high availability of "keystone" service is evaluated by monitoring service
+outage time and process outage time
+
+Service outage time is tested by continuously executing "openstack user list"
+command in loop and checking if the response of the command request is reutrned
+with no failure.
+When the response fails, the "keystone" service is considered in outage.
+The time between the first response failure and the last response failure is
+considered as service outage time.
+
+Process outage time is tested by checking the status of "keystone" processes on
+the selected controller node. The time of "keystone" processes being killed to
+the time of the "keystone" processes being recovered is the process outage
+time. Process recovery is verified by checking the existence of "keystone"
+processes.
+
+Test execution
+''''''''''''''
+
+* Test action 1: Connect to Node1 through SSH, and check that "keystone"
+ processes are running on Node1
+* Test action 2: Start two monitors: one for "keystone" process and the other
+ for "openstack user list" command.
+ Each monitor will run as an independent process.
+* Test action 3: Connect to Node1 through SSH, and then kill the "keystone"
+ processes
+* Test action 4: Calculate the service outage time and process outage time
+* Test action 5: The test passes if process outage time is less than 20s and
+ service outage time is less than 5s
+* Test action 6: Continuously measure process outage time from the monitor until
+ the process outage time is more than 30s
+
+Pass / fail criteria
+''''''''''''''''''''
+
+The process outage time is less than 30s.
+
+The service outage time is less than 5s.
+
+A negative result will be generated if the above is not met in completion.
+
+Post conditions
+---------------
+
+Restart the processes of "keystone" if they are not running.
+
+
+-----------------------------------------------------------------
+Test Case 4 - Controller node OpenStack service down - glance-api
+-----------------------------------------------------------------
+
+Short name
+----------
+
+opnfv.ha.tc004.glance-api_service_down
+
+Use case specification
+----------------------
+
+This test verifies the high availability of the "glance-api" service provided
+by OpenStack controller nodes. It kills the processes of OpenStack "glance-api"
+service on the selected controller node, then checks whether the "glance-api"
+service is still available, by creating image and deleting image, and checks
+whether the killed processes are recovered.
+
+Test preconditions
+------------------
+
+There is more than one controller node, which is providing the "glance-api"
+service for API end-point.
+Denoted a controller node as Node1 in the following configuration.
+
+
+Basic test flow execution description and pass/fail criteria
+------------------------------------------------------------
+
+Methodology for monitoring high availability
+''''''''''''''''''''''''''''''''''''''''''''
+
+The high availability of "glance-api" service is evaluated by monitoring
+service outage time, process outage time, and results of glance operations.
+
+Service outage time is tested by continuously executing "openstack image list"
+command in loop and checking if the response of the command request is returned
+with no failure.
+When the response fails, the "glance-api" service is considered in outage.
+The time between the first response failure and the last response failure is
+considered as service outage time.
+
+Process outage time is tested by checking the status of "glance-api" processes
+on the selected controller node. The time of "glance-api" processes being
+killed to the time of the "glance-api" processes being recovered is the process
+outage time. Process recovery is verified by checking the existence of
+"glance-api" processes.
+
+Test execution
+''''''''''''''
+
+* Test action 1: Connect to Node1 through SSH, and check that "glance-api"
+ processes are running on Node1
+* Test action 2: Start two monitors: one for "glance-api" process and the other
+ for "openstack image list" command.
+ Each monitor will run as an independent process.
+* Test action 3: Connect to Node1 through SSH, and then kill the "glance-api"
+ processes
+* Test action 4: When "openstack image list" returns with no error, calculate
+ the service outage time, and execute "openstack image create test-image
+ --file cirros-0.3.5-x86_64-disk.img --disk-format qcow2 --container-format bare"
+* Test action 5: Continuously execute "openstack image show test-image", check
+ if status of "test-image" is "active"
+* Test action 6: If "test-image" is "active", execute "openstack image delete
+ test-image". Then execute "openstack image list" to check if "test-image" is
+ not in the list
+* Test action 7: Continuously measure process outage time from the monitor until
+ the process outage time is more than 30s
+
+Pass / fail criteria
+''''''''''''''''''''
+
+The process outage time is less than 30s.
+
+The service outage time is less than 5s.
+
+The glance operations are carried out in above order and no errors occur.
+
+A negative result will be generated if the above is not met in completion.
+
+Post conditions
+---------------
+
+Restart the processes of "glance-api" if they are not running.
+
+Delete image with "openstack image delete test-image".
+
+
+-----------------------------------------------------------------
+Test Case 5 - Controller node OpenStack service down - cinder-api
+-----------------------------------------------------------------
+
+Short name
+----------
+
+opnfv.ha.tc005.cinder-api_service_down
+
+Use case specification
+----------------------
+
+This test verifies the high availability of the "cinder-api" service provided
+by OpenStack controller nodes. It kills the processes of OpenStack "cinder-api"
+service on the selected controller node, then checks whether the "cinder-api"
+service is still available by executing command "openstack volume list" and
+whether the killed processes are recovered.
+
+Test preconditions
+------------------
+
+There is more than one controller node, which is providing the "cinder-api"
+service for API end-point.
+Denoted a controller node as Node1 in the following configuration.
+
+Basic test flow execution description and pass/fail criteria
+------------------------------------------------------------
+
+Methodology for monitoring high availability
+''''''''''''''''''''''''''''''''''''''''''''
+
+The high availability of "cinder-api" service is evaluated by monitoring
+service outage time and process outage time
+
+Service outage time is tested by continuously executing "openstack volume list"
+command in loop and checking if the response of the command request is returned
+with no failure.
+When the response fails, the "cinder-api" service is considered in outage.
+The time between the first response failure and the last response failure is
+considered as service outage time.
+
+Process outage time is tested by checking the status of "cinder-api" processes
+on the selected controller node. The time of "cinder-api" processes being
+killed to the time of the "cinder-api" processes being recovered is the process
+outage time. Process recovery is verified by checking the existence of
+"cinder-api" processes.
+
+Test execution
+''''''''''''''
+
+* Test action 1: Connect to Node1 through SSH, and check that "cinder-api"
+ processes are running on Node1
+* Test action 2: Start two monitors: one for "cinder-api" process and the other
+ for "openstack volume list" command.
+ Each monitor will run as an independent process.
+* Test action 3: Connect to Node1 through SSH, and then execute kill the
+ "cinder-api" processes
+* Test action 4: Continuously measure service outage time from the monitor until
+ the service outage time is more than 5s
+* Test action 5: Continuously measure process outage time from the monitor until
+ the process outage time is more than 30s
+
+Pass / fail criteria
+''''''''''''''''''''
+
+The process outage time is less than 30s.
+
+The service outage time is less than 5s.
+
+The cinder operations are carried out in above order and no errors occur.
+
+A negative result will be generated if the above is not met in completion.
+
+Post conditions
+---------------
+
+Restart the processes of "cinder-api" if they are not running.
+
+
+------------------------------------------------------------
+Test Case 6 - Controller Node CPU Overload High Availability
+------------------------------------------------------------
+
+Short name
+----------
+
+opnfv.ha.tc006.cpu_overload
+
+Use case specification
+----------------------
+
+This test verifies the availability of services when one of the controller node
+suffers from heavy CPU overload. When the CPU usage of the specified controller
+node is up to 100%, which breaks down the OpenStack services on this node,
+the Openstack services should continue to be available. This test case stresses
+the CPU usage of a specific controller node to 100%, then checks whether all
+services provided by the SUT are still available with the monitor tools.
+
+Test preconditions
+------------------
+
+There is more than one controller node, which is providing the "cinder-api",
+"neutron-server", "glance-api" and "keystone" services for API end-point.
+Denoted a controller node as Node1 in the following configuration.
+
+Basic test flow execution description and pass/fail criteria
+------------------------------------------------------------
+
+Methodology for monitoring high availability
+''''''''''''''''''''''''''''''''''''''''''''
+
+The high availability of related OpenStack service is evaluated by monitoring service
+outage time
+
+Service outage time is tested by continuously executing "openstack router list",
+"openstack stack list", "openstack volume list", "openstack image list" commands
+in loop and checking if the response of the command request is returned with no
+failure.
+When the response fails, the related service is considered in outage. The time
+between the first response failure and the last response failure is considered
+as service outage time.
+
+
+Methodology for stressing CPU usage
+'''''''''''''''''''''''''''''''''''
+
+To evaluate the high availability of target OpenStack service under heavy CPU
+load, the test case will first get the number of logical CPU cores on the
+target controller node by shell command, then use the number to execute 'dd'
+command to continuously copy from /dev/zero and output to /dev/null in loop.
+The 'dd' operation only uses CPU, no I/O operation, which is ideal for
+stressing the CPU usage.
+
+Since the 'dd' command is continuously executed and the CPU usage rate is
+stressed to 100%, the scheduler will schedule each 'dd' command to be
+processed on a different logical CPU core. Eventually to achieve all logical
+CPU cores usage rate to 100%.
+
+Test execution
+''''''''''''''
+
+* Test action 1: Start four monitors: one for "openstack image list" command,
+ one for "openstack router list" command, one for "openstack stack list"
+ command and the last one for "openstack volume list" command. Each monitor
+ will run as an independent process.
+* Test action 2: Connect to Node1 through SSH, and then stress all logical CPU
+ cores usage rate to 100%
+* Test action 3: Continuously measure all the service outage times until they are
+ more than 5s
+* Test action 4: Kill the process that stresses the CPU usage
+
+Pass / fail criteria
+''''''''''''''''''''
+
+All the service outage times are less than 5s.
+
+A negative result will be generated if the above is not met in completion.
+
+Post conditions
+---------------
+
+No impact on the SUT.
+
+
+-----------------------------------------------------------------
+Test Case 7 - Controller Node Disk I/O Overload High Availability
+-----------------------------------------------------------------
+
+Short name
+----------
+
+opnfv.ha.tc007.disk_I/O_overload
+
+Use case specification
+----------------------
+
+This test verifies the high availability of control node. When the disk I/O of
+the specific disk is overload, which breaks down the OpenStack services on this
+node, the read and write services should continue to be available. This test
+case blocks the disk I/O of the specific controller node, then checks whether
+the services that need to read or write the disk of the controller node are
+available with some monitor tools.
+
+Test preconditions
+------------------
+
+There is more than one controller node.
+Denoted a controller node as Node1 in the following configuration.
+The controller node has at least 20GB free disk space.
+
+Basic test flow execution description and pass/fail criteria
+------------------------------------------------------------
+
+Methodology for monitoring high availability
+''''''''''''''''''''''''''''''''''''''''''''
+
+The high availability of nova service is evaluated by monitoring
+service outage time
+
+Service availability is tested by continuously executing
+"openstack flavor list" command in loop and checking if the response of the
+command request is returned with no failure.
+When the response fails, the related service is considered in outage.
+
+
+Methodology for stressing disk I/O
+''''''''''''''''''''''''''''''''''
+
+To evaluate the high availability of target OpenStack service under heavy I/O
+load, the test case will execute shell command on the selected controller node
+to continuously writing 8kb blocks to /test.dbf
+
+Test execution
+''''''''''''''
+
+* Test action 1: Connect to Node1 through SSH, and then stress disk I/O by
+ continuously writing 8kb blocks to /test.dbf
+* Test action 2: Start a monitor: for "openstack flavor list" command
+* Test action 3: Create a flavor called "test-001"
+* Test action 4: Check whether the flavor "test-001" is created
+* Test action 5: Continuously measure service outage time from the monitor
+ until the service outage time is more than 5s
+* Test action 6: Stop writing to /test.dbf and delete file /test.dbf
+
+Pass / fail criteria
+''''''''''''''''''''
+
+The service outage time is less than 5s.
+
+The nova operations are carried out in above order and no errors occur.
+
+A negative result will be generated if the above is not met in completion.
+
+Post conditions
+---------------
+
+Delete flavor with "openstack flavor delete test-001".
+
+--------------------------------------------------------------------
+Test Case 8 - Controller Load Balance as a Service High Availability
+--------------------------------------------------------------------
+
+Short name
+----------
+
+opnfv.ha.tc008.load_balance_service_down
+
+Use case specification
+----------------------
+
+This test verifies the high availability of "load balancer" service. When
+the "load balancer" service of a specified controller node is killed, whether
+"load balancer" service on other controller nodes will work, and whether the
+controller node will restart the "load balancer" service are checked. This
+test case kills the processes of "load balancer" service on the selected
+controller node, then checks whether the request of the related OpenStack
+command is processed with no failure and whether the killed processes are
+recovered.
+
+Test preconditions
+------------------
+
+There is more than one controller node, which is providing the "load balancer"
+service for rest-api. Denoted as Node1 in the following configuration.
+
+Basic test flow execution description and pass/fail criteria
+------------------------------------------------------------
+
+Methodology for monitoring high availability
+''''''''''''''''''''''''''''''''''''''''''''
+
+The high availability of "load balancer" service is evaluated by monitoring
+service outage time and process outage time
+
+Service outage time is tested by continuously executing "openstack image list"
+command in loop and checking if the response of the command request is returned
+with no failure.
+When the response fails, the "load balancer" service is considered in outage.
+The time between the first response failure and the last response failure is
+considered as service outage time.
+
+Process outage time is tested by checking the status of processes of "load
+balancer" service on the selected controller node. The time of those processes
+being killed to the time of those processes being recovered is the process
+outage time.
+Process recovery is verified by checking the existence of processes of "load
+balancer" service.
+
+Test execution
+''''''''''''''
+
+* Test action 1: Connect to Node1 through SSH, and check that processes of
+ "load balancer" service are running on Node1
+* Test action 2: Start two monitors: one for processes of "load balancer"
+ service and the other for "openstack image list" command. Each monitor will
+ run as an independent process
+* Test action 3: Connect to Node1 through SSH, and then kill the processes of
+ "load balancer" service
+* Test action 4: Continuously measure service outage time from the monitor until
+ the service outage time is more than 5s
+* Test action 5: Continuously measure process outage time from the monitor until
+ the process outage time is more than 30s
+
+Pass / fail criteria
+''''''''''''''''''''
+
+The process outage time is less than 30s.
+
+The service outage time is less than 5s.
+
+A negative result will be generated if the above is not met in completion.
+
+Post conditions
+---------------
+Restart the processes of "load balancer" if they are not running.
+
+
+
diff --git a/docs/testing/user/testspecification/ipv6/index.rst b/docs/testing/user/testspecification/ipv6/index.rst
new file mode 100644
index 00000000..c3dc844b
--- /dev/null
+++ b/docs/testing/user/testspecification/ipv6/index.rst
@@ -0,0 +1,1787 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV
+
+========================
+IPv6 test specification
+========================
+
+.. toctree::
+ :maxdepth: 2
+
+Scope
+=====
+
+The IPv6 test area will evaluate the ability for a SUT to support IPv6
+Tenant Network features and functionality. The tests in this test area will
+evaluate,
+
+- network, subnet, port, router API CRUD operations
+- interface add and remove operations
+- security group and security group rule API CRUD operations
+- IPv6 address assignment with dual stack, dual net, multiprefix in mode DHCPv6 stateless or SLAAC
+
+References
+================
+
+- upstream openstack API reference
+
+ - http://developer.openstack.org/api-ref
+
+- upstream openstack IPv6 reference
+
+ - https://docs.openstack.org/newton/networking-guide/config-ipv6.html
+
+Definitions and abbreviations
+=============================
+
+The following terms and abbreviations are used in conjunction with this test area
+
+- API - Application Programming Interface
+- CIDR - Classless Inter-Domain Routing
+- CRUD - Create, Read, Update, and Delete
+- DHCP - Dynamic Host Configuration Protocol
+- DHCPv6 - Dynamic Host Configuration Protocol version 6
+- ICMP - Internet Control Message Protocol
+- NFVI - Network Functions Virtualization Infrastructure
+- NIC - Network Interface Controller
+- RA - Router Advertisements
+- radvd - The Router Advertisement Daemon
+- SDN - Software Defined Network
+- SLAAC - Stateless Address Auto Configuration
+- TCP - Transmission Control Protocol
+- UDP - User Datagram Protocol
+- VM - Virtual Machine
+- vNIC - virtual Network Interface Card
+
+System Under Test (SUT)
+=======================
+
+The system under test is assumed to be the NFVI and VIM deployed with a Pharos compliant infrastructure.
+
+Test Area Structure
+====================
+
+The test area is structured based on network, port and subnet operations. Each test case
+is able to run independently, i.e. irrelevant of the state created by a previous test.
+
+Test Descriptions
+=================
+
+API Used and Reference
+----------------------
+
+Networks: https://developer.openstack.org/api-ref/networking/v2/index.html#networks
+
+- show network details
+- update network
+- delete network
+- list networks
+- create netowrk
+- bulk create networks
+
+Subnets: https://developer.openstack.org/api-ref/networking/v2/index.html#subnets
+
+- list subnets
+- create subnet
+- bulk create subnet
+- show subnet details
+- update subnet
+- delete subnet
+
+Routers and interface: https://developer.openstack.org/api-ref/networking/v2/index.html#routers-routers
+
+- list routers
+- create router
+- show router details
+- update router
+- delete router
+- add interface to router
+- remove interface from router
+
+Ports: https://developer.openstack.org/api-ref/networking/v2/index.html#ports
+
+- show port details
+- update port
+- delete port
+- list port
+- create port
+- bulk create ports
+
+Security groups: https://developer.openstack.org/api-ref/networking/v2/index.html#security-groups-security-groups
+
+- list security groups
+- create security groups
+- show security group
+- update security group
+- delete security group
+
+Security groups rules: https://developer.openstack.org/api-ref/networking/v2/index.html#security-group-rules-security-group-rules
+
+- list security group rules
+- create security group rule
+- show security group rule
+- delete security group rule
+
+Servers: https://developer.openstack.org/api-ref/compute/
+
+- list servers
+- create server
+- create multiple servers
+- list servers detailed
+- show server details
+- update server
+- delete server
+
+------------------------------------------------------------------
+Test Case 1 - Create and Delete Bulk Network, IPv6 Subnet and Port
+------------------------------------------------------------------
+
+Short name
+----------
+
+opnfv.ipv6.bulk_network_subnet_port_create_delete
+
+Use case specification
+----------------------
+
+This test case evaluates the SUT API ability of creating and deleting multiple networks,
+IPv6 subnets, ports in one request, the reference is,
+
+tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_network
+tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_subnet
+tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_port
+
+Test preconditions
+------------------
+
+None
+
+Basic test flow execution description and pass/fail criteria
+------------------------------------------------------------
+
+Test execution
+'''''''''''''''
+
+* Test action 1: Create 2 networks using bulk create, storing the "id" parameters returned in the response
+* Test action 2: List all networks, verifying the two network id's are found in the list
+* **Test assertion 1:** The two "id" parameters are found in the network list
+* Test action 3: Delete the 2 created networks using the stored network ids
+* Test action 4: List all networks, verifying the network ids are no longer present
+* **Test assertion 2:** The two "id" parameters are not present in the network list
+* Test action 5: Create 2 networks using bulk create, storing the "id" parameters returned in the response
+* Test action 6: Create an IPv6 subnets on each of the two networks using bulk create commands,
+ storing the associated "id" parameters
+* Test action 7: List all subnets, verify the IPv6 subnets are found in the list
+* **Test assertion 3:** The two IPv6 subnet "id" parameters are found in the network list
+* Test action 8: Delete the 2 IPv6 subnets using the stored "id" parameters
+* Test action 9: List all subnets, verify the IPv6 subnets are no longer present in the list
+* **Test assertion 4:** The two IPv6 subnet "id" parameters, are not present in list
+* Test action 10: Delete the 2 networks created in test action 5, using the stored network ids
+* Test action 11: List all networks, verifying the network ids are no longer present
+* **Test assertion 5:** The two "id" parameters are not present in the network list
+* Test action 12: Create 2 networks using bulk create, storing the "id" parameters returned in the response
+* Test action 13: Create a port on each of the two networks using bulk create commands,
+ storing the associated "port_id" parameters
+* Test action 14: List all ports, verify the port_ids are found in the list
+* **Test assertion 6:** The two "port_id" parameters are found in the ports list
+* Test action 15: Delete the 2 ports using the stored "port_id" parameters
+* Test action 16: List all ports, verify port_ids are no longer present in the list
+* **Test assertion 7:** The two "port_id" parameters, are not present in list
+* Test action 17: Delete the 2 networks created in test action 12, using the stored network ids
+* Test action 18: List all networks, verifying the network ids are no longer present
+* **Test assertion 8:** The two "id" parameters are not present in the network list
+
+Pass / fail criteria
+'''''''''''''''''''''
+
+This test evaluates the ability to use bulk create commands to create networks, IPv6 subnets and ports on
+the SUT API. Specifically it verifies that:
+
+* Bulk network create commands return valid "id" parameters which are reported in the list commands
+* Bulk IPv6 subnet commands return valid "id" parameters which are reported in the list commands
+* Bulk port commands return valid "port_id" parameters which are reported in the list commands
+* All items created using bulk create commands are able to be removed using the returned identifiers
+
+Post conditions
+---------------
+
+N/A
+
+-------------------------------------------------------------------
+Test Case 2 - Create, Update and Delete an IPv6 Network and Subnet
+-------------------------------------------------------------------
+
+Short name
+-----------
+
+opnfv.ipv6.network_subnet_create_update_delete
+
+Use case specification
+----------------------
+
+This test case evaluates the SUT API ability of creating, updating, deleting
+network and IPv6 subnet with the network, the reference is
+
+tempest.api.network.test_networks.NetworksIpV6Test.test_create_update_delete_network_subnet
+
+Test preconditions
+------------------
+
+None
+
+Basic test flow execution description and pass/fail criteria
+------------------------------------------------------------
+
+Test execution
+'''''''''''''''
+
+* Test action 1: Create a network, storing the "id" and "status" parameters returned
+ in the response
+* Test action 2: Verify the value of the created network's "status" is ACTIVE
+* **Test assertion 1:** The created network's "status" is ACTIVE
+* Test action 3: Update this network with a new_name
+* Test action 4: Verify the network's name equals the new_name
+* **Test assertion 2:** The network's name equals to the new_name after name updating
+* Test action 5: Create an IPv6 subnet within the network, storing the "id" parameters
+ returned in the response
+* Test action 6: Update this IPv6 subnet with a new_name
+* Test action 7: Verify the IPv6 subnet's name equals the new_name
+* **Test assertion 3:** The IPv6 subnet's name equals to the new_name after name updating
+* Test action 8: Delete the IPv6 subnet created in test action 5, using the stored subnet id
+* Test action 9: List all subnets, verifying the subnet id is no longer present
+* **Test assertion 4:** The IPv6 subnet "id" is not present in the subnet list
+* Test action 10: Delete the network created in test action 1, using the stored network id
+* Test action 11: List all networks, verifying the network id is no longer present
+* **Test assertion 5:** The network "id" is not present in the network list
+
+
+Pass / fail criteria
+'''''''''''''''''''''
+
+This test evaluates the ability to create, update, delete network, IPv6 subnet on the
+SUT API. Specifically it verifies that:
+
+* Create network commands return ACTIVE "status" parameters which are reported in the list commands
+* Update network commands return updated "name" parameters which equals to the "name" used
+* Update subnet commands return updated "name" parameters which equals to the "name" used
+* All items created using create commands are able to be removed using the returned identifiers
+
+Post conditions
+---------------
+
+None
+
+-------------------------------------------------
+Test Case 3 - Check External Network Visibility
+-------------------------------------------------
+
+Short name
+-----------
+
+opnfv.ipv6.external_network_visibility
+
+Use case specification
+----------------------
+
+This test case verifies user can see external networks but not subnets, the reference is,
+
+tempest.api.network.test_networks.NetworksIpV6Test.test_external_network_visibility
+
+Test preconditions
+------------------
+
+1. The SUT has at least one external network.
+2. In the external network list, there is no network without external router, i.e.,
+all networks in this list are with external router.
+3. There is one external network with configured public network id and there is
+no subnet on this network
+
+Basic test flow execution description and pass/fail criteria
+------------------------------------------------------------
+
+Test execution
+'''''''''''''''
+
+* Test action 1: List all networks with external router, storing the "id"s parameters returned in the response
+* Test action 2: Verify list in test action 1 is not empty
+* **Test assertion 1:** The network with external router list is not empty
+* Test action 3: List all netowrks without external router in test action 1 list
+* Test action 4: Verify list in test action 3 is empty
+* **Test assertion 2:** networks without external router in the external network
+ list is empty
+* Test action 5: Verify the configured public network id is found in test action 1 stored "id"s
+* **Test assertion 3:** the public network id is found in the external network "id"s
+* Test action 6: List the subnets of the external network with the configured
+ public network id
+* Test action 7: Verify list in test action 6 is empty
+* **Test assertion 4:** There is no subnet of the external network with the configured
+ public network id
+
+Pass / fail criteria
+'''''''''''''''''''''
+
+This test evaluates the ability to use list commands to list external networks, pre-configured
+public network. Specifically it verifies that:
+
+* Network list commands to find visible networks with external router
+* Network list commands to find visible network with pre-configured public network id
+* Subnet list commands to find no subnet on the pre-configured public network
+
+Post conditions
+---------------
+
+None
+
+---------------------------------------------
+Test Case 4 - List IPv6 Networks and Subnets
+---------------------------------------------
+
+Short name
+-----------
+
+opnfv.ipv6.network_subnet_list
+
+Use case specification
+----------------------
+
+This test case evaluates the SUT API ability of listing netowrks,
+subnets after creating a network and an IPv6 subnet, the reference is
+
+tempest.api.network.test_networks.NetworksIpV6Test.test_list_networks
+tempest.api.network.test_networks.NetworksIpV6Test.test_list_subnets
+
+Test preconditions
+------------------
+
+None
+
+Basic test flow execution description and pass/fail criteria
+------------------------------------------------------------
+
+Test execution
+'''''''''''''''
+
+* Test action 1: Create a network, storing the "id" parameter returned in the response
+* Test action 2: List all networks, verifying the network id is found in the list
+* **Test assertion 1:** The "id" parameter is found in the network list
+* Test action 3: Create an IPv6 subnet of the network created in test action 1.
+ storing the "id" parameter returned in the response
+* Test action 4: List all subnets of this network, verifying the IPv6 subnet id
+ is found in the list
+* **Test assertion 2:** The "id" parameter is found in the IPv6 subnet list
+* Test action 5: Delete the IPv6 subnet using the stored "id" parameters
+* Test action 6: List all subnets, verify subnet_id is no longer present in the list
+* **Test assertion 3:** The IPv6 subnet "id" parameter is not present in list
+* Test action 7: Delete the network created in test action 1, using the stored network ids
+* Test action 8: List all networks, verifying the network id is no longer present
+* **Test assertion 4:** The network "id" parameter is not present in the network list
+
+Pass / fail criteria
+''''''''''''''''''''
+
+This test evaluates the ability to use create commands to create network, IPv6 subnet, list
+commands to list the created networks, IPv6 subnet on the SUT API. Specifically it verifies that:
+
+* Create commands to create network, IPv6 subnet
+* List commands to find that netowrk, IPv6 subnet in the all networks, subnets list after creating
+* All items created using create commands are able to be removed using the returned identifiers
+
+Post conditions
+---------------
+
+None
+
+-------------------------------------------------------------
+Test Case 5 - Show Details of an IPv6 Network and Subnet
+-------------------------------------------------------------
+
+Short name
+----------
+
+opnfv.ipv6.network_subnet_show
+
+Use case specification
+----------------------
+
+This test case evaluates the SUT API ability of showing the network, subnet
+details, the reference is,
+
+tempest.api.network.test_networks.NetworksIpV6Test.test_show_network
+tempest.api.network.test_networks.NetworksIpV6Test.test_show_subnet
+
+Test preconditions
+------------------
+
+None
+
+Basic test flow execution description and pass/fail criteria
+------------------------------------------------------------
+
+Test execution
+'''''''''''''''
+
+* Test action 1: Create a network, storing the "id" and "name" parameter returned in the response
+* Test action 2: Show the network id and name, verifying the network id and name equal to the
+ "id" and "name" stored in test action 1
+* **Test assertion 1:** The id and name equal to the "id" and "name" stored in test action 1
+* Test action 3: Create an IPv6 subnet of the network, storing the "id" and CIDR parameter
+ returned in the response
+* Test action 4: Show the details of the created IPv6 subnet, verifying the
+ id and CIDR in the details are equal to the stored id and CIDR in test action 3.
+* **Test assertion 2:** The "id" and CIDR in show details equal to "id" and CIDR stored in test action 3
+* Test action 5: Delete the IPv6 subnet using the stored "id" parameter
+* Test action 6: List all subnets on the network, verify the IPv6 subnet id is no longer present in the list
+* **Test assertion 3:** The IPv6 subnet "id" parameter is not present in list
+* Test action 7: Delete the network created in test action 1, using the stored network id
+* Test action 8: List all networks, verifying the network id is no longer present
+* **Test assertion 4:** The "id" parameter is not present in the network list
+
+Pass / fail criteria
+'''''''''''''''''''''
+
+This test evaluates the ability to use create commands to create network, IPv6 subnet and show
+commands to show network, IPv6 subnet details on the SUT API. Specifically it verifies that:
+
+* Network show commands return correct "id" and "name" parameter which equal to the returned response in the create commands
+* IPv6 subnet show commands return correct "id" and CIDR parameter which equal to the returned response in the create commands
+* All items created using create commands are able to be removed using the returned identifiers
+
+Post conditions
+---------------
+
+None
+
+-------------------------------------------------------------
+Test Case 6 - Create an IPv6 Port in Allowed Allocation Pools
+-------------------------------------------------------------
+
+Short name
+----------
+
+opnfv.ipv6.port_create_in_allocation_pool
+
+Use case specification
+----------------------
+
+This test case evaluates the SUT API ability of creating
+an IPv6 subnet within allowed IPv6 address allocation pool and creating
+a port whose address is in the range of the pool, the reference is,
+
+tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_port_in_allowed_allocation_pools
+
+Test preconditions
+------------------
+
+There should be an IPv6 CIDR configuration, which prefixlen is less than 126.
+
+Basic test flow execution description and pass/fail criteria
+------------------------------------------------------------
+
+Test execution
+'''''''''''''''
+
+* Test action 1: Create a network, storing the "id" parameter returned in the response
+* Test action 2: Check the allocation pools configuration, verifying the prefixlen
+ of the IPv6 CIDR configuration is less than 126.
+* **Test assertion 1:** The prefixlen of the IPv6 CIDR configuration is less than 126
+* Test action 3: Get the allocation pool by setting the start_ip and end_ip
+ based on the IPv6 CIDR configuration.
+* Test action 4: Create an IPv6 subnet of the network within the allocation pools,
+ storing the "id" parameter returned in the response
+* Test action 5: Create a port of the network, storing the "id" parameter returned in the response
+* Test action 6: Verify the port's id is in the range of the allocation pools which is got is test action 3
+* **Test assertion 2:** the port's id is in the range of the allocation pools
+* Test action 7: Delete the port using the stored "id" parameter
+* Test action 8: List all ports, verify the port id is no longer present in the list
+* **Test assertion 3:** The port "id" parameter is not present in list
+* Test action 9: Delete the IPv6 subnet using the stored "id" parameter
+* Test action 10: List all subnets on the network, verify the IPv6 subnet id is no longer present in the list
+* **Test assertion 4:** The IPv6 subnet "id" parameter is not present in list
+* Test action 11: Delete the network created in test action 1, using the stored network id
+* Test action 12: List all networks, verifying the network id is no longer present
+* **Test assertion 5:** The "id" parameter is not present in the network list
+
+Pass / fail criteria
+'''''''''''''''''''''
+
+This test evaluates the ability to use create commands to create an IPv6 subnet within allowed
+IPv6 address allocation pool and create a port whose address is in the range of the pool. Specifically it verifies that:
+
+* IPv6 subnet create command to create an IPv6 subnet within allowed IPv6 address allocation pool
+* Port create command to create a port whose id is in the range of the allocation pools
+* All items created using create commands are able to be removed using the returned identifiers
+
+Post conditions
+---------------
+
+None
+
+-------------------------------------------------------------
+Test Case 7 - Create an IPv6 Port with Empty Security Groups
+-------------------------------------------------------------
+
+Short name
+-----------
+
+opnfv.ipv6.port_create_empty_security_group
+
+Use case specification
+----------------------
+
+This test case evaluates the SUT API ability of creating port with empty
+security group, the reference is,
+
+tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_port_with_no_securitygroups
+
+Test preconditions
+------------------
+
+None
+
+Basic test flow execution description and pass/fail criteria
+------------------------------------------------------------
+
+Test execution
+'''''''''''''''
+
+* Test action 1: Create a network, storing the "id" parameter returned in the response
+* Test action 2: Create an IPv6 subnet of the network, storing the "id" parameter returned in the response
+* Test action 3: Create a port of the network with an empty security group, storing the "id" parameter returned in the response
+* Test action 4: Verify the security group of the port is not none but is empty
+* **Test assertion 1:** the security group of the port is not none but is empty
+* Test action 5: Delete the port using the stored "id" parameter
+* Test action 6: List all ports, verify the port id is no longer present in the list
+* **Test assertion 2:** The port "id" parameter is not present in list
+* Test action 7: Delete the IPv6 subnet using the stored "id" parameter
+* Test action 8: List all subnets on the network, verify the IPv6 subnet id is no longer present in the list
+* **Test assertion 3:** The IPv6 subnet "id" parameter is not present in list
+* Test action 9: Delete the network created in test action 1, using the stored network id
+* Test action 10: List all networks, verifying the network id is no longer present
+* **Test assertion 4:** The "id" parameter is not present in the network list
+
+Pass / fail criteria
+'''''''''''''''''''''
+
+This test evaluates the ability to use create commands to create port with
+empty security group of the SUT API. Specifically it verifies that:
+
+* Port create commands to create a port with an empty security group
+* All items created using create commands are able to be removed using the returned identifiers
+
+Post conditions
+---------------
+
+None
+
+-----------------------------------------------------
+Test Case 8 - Create, Update and Delete an IPv6 Port
+-----------------------------------------------------
+
+Short name
+----------
+
+opnfv.ipv6.port_create_update_delete
+
+Use case specification
+----------------------
+
+This test case evaluates the SUT API ability of creating, updating,
+deleting IPv6 port, the reference is,
+
+tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_update_delete_port
+
+Test preconditions
+------------------
+
+None
+
+Basic test flow execution description and pass/fail criteria
+------------------------------------------------------------
+
+Test execution
+'''''''''''''''
+
+* Test action 1: Create a network, storing the "id" parameter returned in the response
+* Test action 2: Create a port of the network, storing the "id" and "admin_state_up" parameters
+ returned in the response
+* Test action 3: Verify the value of port's 'admin_state_up' is True
+* **Test assertion 1:** the value of port's 'admin_state_up' is True after creating
+* Test action 4: Update the port's name with a new_name and set port's admin_state_up to False,
+ storing the name and admin_state_up parameters returned in the response
+* Test action 5: Verify the stored port's name equals to new_name and the port's admin_state_up is False.
+* **Test assertion 2:** the stored port's name equals to new_name and the port's admin_state_up is False
+* Test action 6: Delete the port using the stored "id" parameter
+* Test action 7: List all ports, verify the port is no longer present in the list
+* **Test assertion 3:** The port "id" parameter is not present in list
+* Test action 8: Delete the network created in test action 1, using the stored network id
+* Test action 9: List all networks, verifying the network id is no longer present
+* **Test assertion 4:** The "id" parameter is not present in the network list
+
+Pass / fail criteria
+''''''''''''''''''''
+
+This test evaluates the ability to use create/update/delete commands to create/update/delete port
+of the SUT API. Specifically it verifies that:
+
+* Port create commands return True of 'admin_state_up' in response
+* Port update commands to update 'name' to new_name and 'admin_state_up' to false
+* All items created using create commands are able to be removed using the returned identifiers
+
+Post conditions
+---------------
+
+None
+
+------------------------------
+Test Case 9 - List IPv6 Ports
+------------------------------
+
+Short name
+----------
+
+opnfv.ipv6.tc009.port_list
+
+Use case specification
+----------------------
+
+This test case evaluates the SUT ability of creating a port on a network and
+finding the port in the all ports list, the reference is,
+
+tempest.api.network.test_ports.PortsIpV6TestJSON.test_list_ports
+
+Test preconditions
+------------------
+
+None
+
+Basic test flow execution description and pass/fail criteria
+------------------------------------------------------------
+
+Test execution
+'''''''''''''''
+
+* Test action 1: Create a network, storing the "id" parameter returned in the response
+* Test action 2: Create a port of the network, storing the "id" parameter returned in the response
+* Test action 3: List all ports, verify the port id is found in the list
+* **Test assertion 1:** The "id" parameter is found in the port list
+* Test action 4: Delete the port using the stored "id" parameter
+* Test action 5: List all ports, verify the port is no longer present in the list
+* **Test assertion 2:** The port "id" parameter is not present in list
+* Test action 6: Delete the network created in test action 1, using the stored network id
+* Test action 7: List all networks, verifying the network id is no longer present
+* **Test assertion 3:** The "id" parameter is not present in the network list
+
+Pass / fail criteria
+'''''''''''''''''''''
+
+This test evaluates the ability to use list commands to list the networks and ports on
+the SUT API. Specifically it verifies that:
+
+* Port list command to list all ports, the created port is found in the list.
+* All items created using create commands are able to be removed using the returned identifiers
+
+Post conditions
+---------------
+
+None
+
+-------------------------------------------------------
+Test Case 10 - Show Key/Valus Details of an IPv6 Port
+-------------------------------------------------------
+
+Short name
+----------
+
+opnfv.ipv6.tc010.port_show_details
+
+Use case specification
+----------------------
+
+This test case evaluates the SUT ability of showing the port
+details, the values in the details should be equal to the values to create the port,
+the reference is,
+
+tempest.api.network.test_ports.PortsIpV6TestJSON.test_show_port
+
+Test preconditions
+------------------
+
+None
+
+Basic test flow execution description and pass/fail criteria
+------------------------------------------------------------
+
+Test execution
+'''''''''''''''
+
+* Test action 1: Create a network, storing the "id" parameter returned in the response
+* Test action 2: Create a port of the network, storing the "id" parameter returned in the response
+* Test action 3: Show the details of the port, verify the stored port's id
+ in test action 2 exists in the details
+* **Test assertion 1:** The "id" parameter is found in the port shown details
+* Test action 4: Verify the values in the details of the port are the same as the values
+ to create the port
+* **Test assertion 2:** The values in the details of the port are the same as the values
+ to create the port
+* Test action 5: Delete the port using the stored "id" parameter
+* Test action 6: List all ports, verify the port is no longer present in the list
+* **Test assertion 3:** The port "id" parameter is not present in list
+* Test action 7: Delete the network created in test action 1, using the stored network id
+* Test action 8: List all networks, verifying the network id is no longer present
+* **Test assertion 4:** The "id" parameter is not present in the network list
+
+Pass / fail criteria
+'''''''''''''''''''''
+
+This test evaluates the ability to use show commands to show port details on the SUT API.
+Specifically it verifies that:
+
+* Port show commands to show the details of the port, whose id is in the details
+* Port show commands to show the details of the port, whose values are the same as the values
+ to create the port
+* All items created using create commands are able to be removed using the returned identifiers
+
+Post conditions
+---------------
+
+None
+
+---------------------------------------------------------
+Test Case 11 - Add Multiple Interfaces for an IPv6 Router
+---------------------------------------------------------
+
+Short name
+-----------
+
+opnfv.ipv6.router_add_multiple_interface
+
+Use case specification
+----------------------
+
+This test case evaluates the SUT ability of adding multiple interface
+to a router, the reference is,
+
+tempest.api.network.test_routers.RoutersIpV6Test.test_add_multiple_router_interfaces
+
+Test preconditions
+------------------
+
+None
+
+Basic test flow execution description and pass/fail criteria
+------------------------------------------------------------
+
+Test execution
+'''''''''''''''
+
+* Test action 1: Create 2 networks named network01 and network02 sequentially,
+ storing the "id" parameters returned in the response
+* Test action 2: Create an IPv6 subnet01 in network01, an IPv6 subnet02 in network02 sequentially,
+ storing the "id" parameters returned in the response
+* Test action 3: Create a router, storing the "id" parameter returned in the response
+* Test action 4: Create interface01 with subnet01 and the router
+* Test action 5: Verify the router_id stored in test action 3 equals to the interface01's 'device_id'
+ and subnet01_id stored in test action 2 equals to the interface01's 'subnet_id'
+* **Test assertion 1:** the router_id equals to the interface01's 'device_id'
+ and subnet01_id equals to the interface01's 'subnet_id'
+* Test action 5: Create interface02 with subnet02 and the router
+* Test action 6: Verify the router_id stored in test action 3 equals to the interface02's 'device_id'
+ and subnet02_id stored in test action 2 equals to the interface02's 'subnet_id'
+* **Test assertion 2:** the router_id equals to the interface02's 'device_id'
+ and subnet02_id equals to the interface02's 'subnet_id'
+* Test action 7: Delete the interfaces, router, IPv6 subnets and networks, networks, subnets, then list
+ all interfaces, ports, IPv6 subnets, networks, the test passes if the deleted ones
+ are not found in the list.
+* **Test assertion 3:** The interfaces, router, IPv6 subnets and networks ids are not present in the lists
+ after deleting
+
+Pass / fail criteria
+'''''''''''''''''''''
+
+This test evaluates the ability to use bulk create commands to create networks, IPv6 subnets and ports on
+the SUT API. Specifically it verifies that:
+
+* Interface create commands to create interface with IPv6 subnet and router, interface 'device_id' and
+ 'subnet_id' should equal to the router id and IPv6 subnet id, respectively.
+* Interface create commands to create multiple interface with the same router and multiple IPv6 subnets.
+* All items created using create commands are able to be removed using the returned identifiers
+
+Post conditions
+---------------
+
+None
+
+-------------------------------------------------------------------
+Test Case 12 - Add and Remove an IPv6 Router Interface with port_id
+-------------------------------------------------------------------
+
+Short name
+----------
+
+opnfv.ipv6.router_interface_add_remove_with_port
+
+Use case specification
+----------------------
+
+This test case evaluates the SUT abiltiy of adding, removing router interface to
+a port, the subnet_id and port_id of the interface will be checked,
+the port's device_id will be checked if equals to the router_id or not. The
+reference is,
+
+tempest.api.network.test_routers.RoutersIpV6Test.test_add_remove_router_interface_with_port_id
+
+Test preconditions
+------------------
+
+None
+
+Basic test flow execution description and pass/fail criteria
+------------------------------------------------------------
+
+Test execution
+'''''''''''''''
+
+* Test action 1: Create a network, storing the "id" parameter returned in the response
+* Test action 2: Create an IPv6 subnet of the network, storing the "id" parameter returned in the response
+* Test action 3: Create a router, storing the "id" parameter returned in the response
+* Test action 4: Create a port of the network, storing the "id" parameter returned in the response
+* Test action 5: Add router interface to the port created, storing the "id" parameter returned in the response
+* Test action 6: Verify the interface's keys include 'subnet_id' and 'port_id'
+* **Test assertion 1:** the interface's keys include 'subnet_id' and 'port_id'
+* Test action 7: Show the port details, verify the 'device_id' in port details equals to the router id stored
+ in test action 3
+* **Test assertion 2:** 'device_id' in port details equals to the router id
+* Test action 8: Delete the interface, port, router, subnet and network, then list
+ all interfaces, ports, routers, subnets and networks, the test passes if the deleted
+ ones are not found in the list.
+* **Test assertion 3:** interfaces, ports, routers, subnets and networks are not found in the lists after deleting
+
+Pass / fail criteria
+'''''''''''''''''''''
+
+This test evaluates the ability to use add/remove commands to add/remove router interface to the port,
+show commands to show port details on the SUT API. Specifically it verifies that:
+
+* Router_interface add commands to add router interface to a port, the interface's keys should include 'subnet_id' and 'port_id'
+* Port show commands to show 'device_id' in port details, which should be equal to the router id
+* All items created using create commands are able to be removed using the returned identifiers
+
+Post conditions
+---------------
+
+None
+
+---------------------------------------------------------------------
+Test Case 13 - Add and Remove an IPv6 Router Interface with subnet_id
+---------------------------------------------------------------------
+
+Short name
+----------
+
+opnfv.ipv6.router_interface_add_remove
+
+Use case specification
+----------------------
+
+This test case evaluates the SUT API ability of adding and removing a router interface with
+the IPv6 subnet id, the reference is
+
+tempest.api.network.test_routers.RoutersIpV6Test.test_add_remove_router_interface_with_subnet_id
+
+Test preconditions
+------------------
+
+None
+
+Basic test flow execution description and pass/fail criteria
+------------------------------------------------------------
+
+Test execution
+'''''''''''''''
+
+* Test action 1: Create a network, storing the "id" parameter returned in the response
+* Test action 2: Create an IPv6 subnet with the network created, storing the "id" parameter
+ returned in the response
+* Test action 3: Create a router, storing the "id" parameter returned in the response
+* Test action 4: Add a router interface with the stored ids of the router and IPv6 subnet
+* **Test assertion 1:** Key 'subnet_id' is included in the added interface's keys
+* **Test assertion 2:** Key 'port_id' is included in the added interface's keys
+* Test action 5: Show the port info with the stored interface's port id
+* **Test assertion 3:**: The stored router id is equal to the device id shown in the port info
+* Test action 6: Delete the router interface created in test action 4, using the stored subnet id
+* Test action 7: List all router interfaces, verifying the router interface is no longer present
+* **Test assertion 4:** The router interface with the stored subnet id is not present
+ in the router interface list
+* Test action 8: Delete the router created in test action 3, using the stored router id
+* Test action 9: List all routers, verifying the router id is no longer present
+* **Test assertion 5:** The router "id" parameter is not present in the router list
+* Test action 10: Delete the subnet created in test action 2, using the stored subnet id
+* Test action 11: List all subnets, verifying the subnet id is no longer present
+* **Test assertion 6:** The subnet "id" parameter is not present in the subnet list
+* Test action 12: Delete the network created in test action 1, using the stored network id
+* Test action 13: List all networks, verifying the network id is no longer present
+* **Test assertion 7:** The network "id" parameter is not present in the network list
+
+Pass / fail criteria
+''''''''''''''''''''
+
+This test evaluates the ability to add and remove router interface with the subnet id on the
+SUT API. Specifically it verifies that:
+
+* Router interface add command returns valid 'subnet_id' parameter which is reported
+ in the interface's keys
+* Router interface add command returns valid 'port_id' parameter which is reported
+ in the interface's keys
+* All items created using create commands are able to be removed using the returned identifiers
+
+Post conditions
+---------------
+
+None
+
+-------------------------------------------------------------------
+Test Case 14 - Create, Show, List, Update and Delete an IPv6 router
+-------------------------------------------------------------------
+
+Short name
+----------
+
+opnfv.ipv6.router_create_show_list_update_delete
+
+Use case specification
+----------------------
+
+This test case evaluates the SUT API ability of creating, showing, listing, updating
+and deleting routers, the reference is
+
+tempest.api.network.test_routers.RoutersIpV6Test.test_create_show_list_update_delete_router
+
+Test preconditions
+------------------
+
+There should exist an OpenStack external network.
+
+Basic test flow execution description and pass/fail criteria
+------------------------------------------------------------
+
+Test execution
+'''''''''''''''
+
+* Test action 1: Create a router, set the admin_state_up to be False and external_network_id
+ to be public network id, storing the "id" parameter returned in the response
+* **Test assertion 1:** The created router's admin_state_up is False
+* **Test assertion 2:** The created router's external network id equals to the public network id
+* Test action 2: Show details of the router created in test action 1, using the stored router id
+* **Test assertion 3:** The router's name shown is the same as the router created
+* **Test assertion 4:** The router's external network id shown is the same as the public network id
+* Test action 3: List all routers and verify if created router is in response message
+* **Test assertion 5:** The stored router id is in the router list
+* Test action 4: Update the name of router and verify if it is updated
+* **Test assertion 6:** The name of router equals to the name used to update in test action 4
+* Test action 5: Show the details of router, using the stored router id
+* **Test assertion 7:** The router's name shown equals to the name used to update in test action 4
+* Test action 6: Delete the router created in test action 1, using the stored router id
+* Test action 7: List all routers, verifying the router id is no longer present
+* **Test assertion 8:** The "id" parameter is not present in the router list
+
+Pass / fail criteria
+'''''''''''''''''''''
+
+This test evaluates the ability to create, show, list, update and delete router on
+the SUT API. Specifically it verifies that:
+
+* Router create command returns valid "admin_state_up" and "id" parameters which equal to the
+ "admin_state_up" and "id" returned in the response
+* Router show command returns valid "name" parameter which equals to the "name" returned in the response
+* Router show command returns valid "external network id" parameters which equals to the public network id
+* Router list command returns valid "id" parameter which equals to the stored router "id"
+* Router update command returns updated "name" parameters which equals to the "name" used to update
+* Router created using create command is able to be removed using the returned identifiers
+
+Post conditions
+---------------
+
+None
+
+---------------------------------------------------------------------------
+Test Case 15 - Create, List, Update, Show and Delete an IPv6 security group
+---------------------------------------------------------------------------
+
+Short name
+----------
+
+opnfv.ipv6.security_group_create_list_update_show_delete
+
+Use case specification
+----------------------
+
+This test case evaluates the SUT API ability of creating, listing, updating, showing
+and deleting security groups, the reference is
+
+tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_list_update_show_delete_security_group
+
+Test preconditions
+------------------
+
+None
+
+Basic test flow execution description and pass/fail criteria
+------------------------------------------------------------
+
+Test execution
+'''''''''''''''
+
+* Test action 1: Create a security group, storing the "id" parameter returned in the response
+* Test action 2: List all security groups and verify if created security group is there in response
+* **Test assertion 1:** The created security group's "id" is found in the list
+* Test action 3: Update the name and description of this security group, using the stored id
+* Test action 4: Verify if the security group's name and description are updated
+* **Test assertion 2:** The security group's name equals to the name used in test action 3
+* **Test assertion 3:** The security group's description equals to the description used in test action 3
+* Test action 5: Show details of the updated security group, using the stored id
+* **Test assertion 4:** The security group's name shown equals to the name used in test action 3
+* **Test assertion 5:** The security group's description shown equals to the description used in test action 3
+* Test action 6: Delete the security group created in test action 1, using the stored id
+* Test action 7: List all security groups, verifying the security group's id is no longer present
+* **Test assertion 6:** The "id" parameter is not present in the security group list
+
+Pass / fail criteria
+''''''''''''''''''''
+
+This test evaluates the ability to create list, update, show and delete security group on
+the SUT API. Specifically it verifies that:
+
+* Security group create commands return valid "id" parameter which is reported in the list commands
+* Security group update commands return valid "name" and "description" parameters which are
+ reported in the show commands
+* Security group created using create command is able to be removed using the returned identifiers
+
+Post conditions
+---------------
+
+None
+
+---------------------------------------------------------------
+Test Case 16 - Create, Show and Delete IPv6 security group rule
+---------------------------------------------------------------
+
+Short name
+----------
+
+opnfv.ipv6.security_group_rule_create_show_delete
+
+Use case specification
+----------------------
+
+This test case evaluates the SUT API ability of creating, showing, listing and deleting
+security group rules, the reference is
+
+tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_show_delete_security_group_rule
+
+Test preconditions
+------------------
+
+None
+
+Basic test flow execution description and pass/fail criteria
+------------------------------------------------------------
+
+Test execution
+'''''''''''''''
+
+* Test action 1: Create a security group, storing the "id" parameter returned in the response
+* Test action 2: Create a rule of the security group with protocol tcp, udp and icmp, respectively,
+ using the stored security group's id, storing the "id" parameter returned in the response
+* Test action 3: Show details of the created security group rule, using the stored id of the
+ security group rule
+* **Test assertion 1:** All the created security group rule's values equal to the rule values
+ shown in test action 3
+* Test action 4: List all security group rules
+* **Test assertion 2:** The stored security group rule's id is found in the list
+* Test action 5: Delete the security group rule, using the stored security group rule's id
+* Test action 6: List all security group rules, verifying the security group rule's id is no longer present
+* **Test assertion 3:** The security group rule "id" parameter is not present in the list
+* Test action 7: Delete the security group, using the stored security group's id
+* Test action 8: List all security groups, verifying the security group's id is no longer present
+* **Test assertion 4:** The security group "id" parameter is not present in the list
+
+Pass / fail criteria
+'''''''''''''''''''''
+
+This test evaluates the ability to create, show, list and delete security group rules on
+the SUT API. Specifically it verifies that:
+
+* Security group rule create command returns valid values which are reported in the show command
+* Security group rule created using create command is able to be removed using the returned identifiers
+
+Post conditions
+---------------
+
+None
+
+----------------------------------------
+Test Case 17 - List IPv6 Security Groups
+----------------------------------------
+
+Short name
+----------
+
+opnfv.ipv6.security_group_list
+
+Use case specification
+----------------------
+
+This test case evaluates the SUT API ability of listing security groups, the reference is
+
+tempest.api.network.test_security_groups.SecGroupIPv6Test.test_list_security_groups
+
+Test preconditions
+------------------
+
+There should exist a default security group.
+
+Basic test flow execution description and pass/fail criteria
+------------------------------------------------------------
+
+Test execution
+'''''''''''''''
+
+* Test action 1: List all security groups
+* Test action 2: Verify the default security group exists in the list, the test passes
+ if the default security group exists
+* **Test assertion 1:** The default security group is in the list
+
+Pass / fail criteria
+'''''''''''''''''''''
+
+This test evaluates the ability to list security groups on the SUT API.
+Specifically it verifies that:
+
+* Security group list command return valid security groups which include the default security group
+
+Post conditions
+---------------
+
+None
+
+----------------------------------------------------------------------------
+Test Case 18 - IPv6 Address Assignment - Dual Stack, SLAAC, DHCPv6 Stateless
+----------------------------------------------------------------------------
+
+Short name
+----------
+
+opnfv.ipv6.dhcpv6_stateless
+
+Use case specification
+----------------------
+
+This test case evaluates IPv6 address assignment in ipv6_ra_mode 'dhcpv6_stateless'
+and ipv6_address_mode 'dhcpv6_stateless'.
+In this case, guest instance obtains IPv6 address from OpenStack managed radvd
+using SLAAC and optional info from dnsmasq using DHCPv6 stateless. This test case then
+verifies the ping6 available VM can ping the other VM's v4 and v6 addresses
+as well as the v6 subnet's gateway ip in the same network, the reference is
+
+tempest.scenario.test_network_v6.TestGettingAddress.test_dhcp6_stateless_from_os
+
+Test preconditions
+------------------
+
+There should exist a public router or a public network.
+
+Basic test flow execution description and pass/fail criteria
+------------------------------------------------------------
+
+Test execution
+'''''''''''''''
+
+* Test action 1: Create one network, storing the "id" parameter returned in the response
+* Test action 2: Create one IPv4 subnet of the created network, storing the "id"
+ parameter returned in the response
+* Test action 3: If there exists a public router, use it as the router. Otherwise,
+ use the public network to create a router
+* Test action 4: Connect the IPv4 subnet to the router, using the stored IPv4 subnet id
+* Test action 5: Create one IPv6 subnet of the network created in test action 1 in
+ ipv6_ra_mode 'dhcpv6_stateless' and ipv6_address_mode 'dhcpv6_stateless',
+ storing the "id" parameter returned in the response
+* Test action 6: Connect the IPv6 subnet to the router, using the stored IPv6 subnet id
+* Test action 7: Boot two VMs on this network, storing the "id" parameters returned in the response
+* **Test assertion 1:** The vNIC of each VM gets one v4 address and one v6 address actually assigned
+* **Test assertion 2:** Each VM can ping the other's v4 private address
+* **Test assertion 3:** The ping6 available VM can ping the other's v6 address
+ as well as the v6 subnet's gateway ip
+* Test action 8: Delete the 2 VMs created in test action 7, using the stored ids
+* Test action 9: List all VMs, verifying the ids are no longer present
+* **Test assertion 4:** The two "id" parameters are not present in the VM list
+* Test action 10: Delete the IPv4 subnet created in test action 2, using the stored id
+* Test action 11: Delete the IPv6 subnet created in test action 5, using the stored id
+* Test action 12: List all subnets, verifying the ids are no longer present
+* **Test assertion 5:** The "id" parameters of IPv4 and IPv6 are not present in the list
+* Test action 13: Delete the network created in test action 1, using the stored id
+* Test action 14: List all networks, verifying the id is no longer present
+* **Test assertion 6:** The "id" parameter is not present in the network list
+
+Pass / fail criteria
+'''''''''''''''''''''
+
+This test evaluates the ability to assign IPv6 addresses in ipv6_ra_mode
+'dhcpv6_stateless' and ipv6_address_mode 'dhcpv6_stateless',
+and verify the ping6 available VM can ping the other VM's v4 and v6 addresses as well as
+the v6 subnet's gateway ip in the same network. Specifically it verifies that:
+
+* The IPv6 addresses in mode 'dhcpv6_stateless' assigned successfully
+* The VM can ping the other VM's IPv4 and IPv6 private addresses as well as the v6 subnet's gateway ip
+* All items created using create commands are able to be removed using the returned identifiers
+
+Post conditions
+---------------
+
+None
+
+--------------------------------------------------------------------------------------
+Test Case 19 - IPv6 Address Assignment - Dual Net, Dual Stack, SLAAC, DHCPv6 Stateless
+--------------------------------------------------------------------------------------
+
+Short name
+----------
+
+opnfv.ipv6.dualnet_dhcpv6_stateless
+
+Use case specification
+----------------------
+
+This test case evaluates IPv6 address assignment in ipv6_ra_mode 'dhcpv6_stateless'
+and ipv6_address_mode 'dhcpv6_stateless'.
+In this case, guest instance obtains IPv6 address from OpenStack managed radvd
+using SLAAC and optional info from dnsmasq using DHCPv6 stateless. This test case then
+verifies the ping6 available VM can ping the other VM's v4 address in one network
+and v6 address in another network as well as the v6 subnet's gateway ip, the reference is
+
+tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_dhcp6_stateless_from_os
+
+Test preconditions
+------------------
+
+There should exists a public router or a public network.
+
+Basic test flow execution description and pass/fail criteria
+------------------------------------------------------------
+
+Test execution
+'''''''''''''''
+
+* Test action 1: Create one network, storing the "id" parameter returned in the response
+* Test action 2: Create one IPv4 subnet of the created network, storing the "id"
+ parameter returned in the response
+* Test action 3: If there exists a public router, use it as the router. Otherwise,
+ use the public network to create a router
+* Test action 4: Connect the IPv4 subnet to the router, using the stored IPv4 subnet id
+* Test action 5: Create another network, storing the "id" parameter returned in the response
+* Test action 6: Create one IPv6 subnet of network created in test action 5 in
+ ipv6_ra_mode 'dhcpv6_stateless' and ipv6_address_mode 'dhcpv6_stateless',
+ storing the "id" parameter returned in the response
+* Test action 7: Connect the IPv6 subnet to the router, using the stored IPv6 subnet id
+* Test action 8: Boot two VMs on these two networks, storing the "id" parameters returned in the response
+* Test action 9: Turn on 2nd NIC of each VM for the network created in test action 5
+* **Test assertion 1:** The 1st vNIC of each VM gets one v4 address assigned and
+ the 2nd vNIC of each VM gets one v6 address actually assigned
+* **Test assertion 2:** Each VM can ping the other's v4 private address
+* **Test assertion 3:** The ping6 available VM can ping the other's v6 address
+ as well as the v6 subnet's gateway ip
+* Test action 10: Delete the 2 VMs created in test action 8, using the stored ids
+* Test action 11: List all VMs, verifying the ids are no longer present
+* **Test assertion 4:** The two "id" parameters are not present in the VM list
+* Test action 12: Delete the IPv4 subnet created in test action 2, using the stored id
+* Test action 13: Delete the IPv6 subnet created in test action 6, using the stored id
+* Test action 14: List all subnets, verifying the ids are no longer present
+* **Test assertion 5:** The "id" parameters of IPv4 and IPv6 are not present in the list
+* Test action 15: Delete the 2 networks created in test action 1 and 5, using the stored ids
+* Test action 16: List all networks, verifying the ids are no longer present
+* **Test assertion 6:** The two "id" parameters are not present in the network list
+
+Pass / fail criteria
+''''''''''''''''''''
+
+This test evaluates the ability to assign IPv6 addresses in ipv6_ra_mode 'dhcpv6_stateless'
+and ipv6_address_mode 'dhcpv6_stateless', and verify the ping6 available VM can ping
+the other VM's v4 address in one network and v6 address in another network as well as
+the v6 subnet's gateway ip. Specifically it verifies that:
+
+* The IPv6 addresses in mode 'dhcpv6_stateless' assigned successfully
+* The VM can ping the other VM's IPv4 address in one network and IPv6 address in another
+ network as well as the v6 subnet's gateway ip
+* All items created using create commands are able to be removed using the returned identifiers
+
+Post conditions
+---------------
+
+None
+
+-----------------------------------------------------------------------------------------------
+Test Case 20 - IPv6 Address Assignment - Multiple Prefixes, Dual Stack, SLAAC, DHCPv6 Stateless
+-----------------------------------------------------------------------------------------------
+
+Short name
+----------
+
+opnfv.ipv6.multiple_prefixes_dhcpv6_stateless
+
+Use case specification
+----------------------
+
+This test case evaluates IPv6 address assignment in ipv6_ra_mode 'dhcpv6_stateless'
+and ipv6_address_mode 'dhcpv6_stateless'.
+In this case, guest instance obtains IPv6 addresses from OpenStack managed radvd
+using SLAAC and optional info from dnsmasq using DHCPv6 stateless. This test case then
+verifies the ping6 available VM can ping the other VM's one v4 address and two v6
+addresses with different prefixes as well as the v6 subnets' gateway ips in the
+same network, the reference is
+
+tempest.scenario.test_network_v6.TestGettingAddress.test_multi_prefix_dhcpv6_stateless
+
+Test preconditions
+------------------
+
+There should exist a public router or a public network.
+
+Basic test flow execution description and pass/fail criteria
+------------------------------------------------------------
+
+Test execution
+'''''''''''''''
+
+* Test action 1: Create one network, storing the "id" parameter returned in the response
+* Test action 2: Create one IPv4 subnet of the created network, storing the "id"
+ parameter returned in the response
+* Test action 3: If there exists a public router, use it as the router. Otherwise,
+ use the public network to create a router
+* Test action 4: Connect the IPv4 subnet to the router, using the stored IPv4 subnet id
+* Test action 5: Create two IPv6 subnets of the network created in test action 1 in
+ ipv6_ra_mode 'dhcpv6_stateless' and ipv6_address_mode 'dhcpv6_stateless',
+ storing the "id" parameters returned in the response
+* Test action 6: Connect the two IPv6 subnets to the router, using the stored IPv6 subnet ids
+* Test action 7: Boot two VMs on this network, storing the "id" parameters returned in the response
+* **Test assertion 1:** The vNIC of each VM gets one v4 address and two v6 addresses with
+ different prefixes actually assigned
+* **Test assertion 2:** Each VM can ping the other's v4 private address
+* **Test assertion 3:** The ping6 available VM can ping the other's v6 addresses
+ as well as the v6 subnets' gateway ips
+* Test action 8: Delete the 2 VMs created in test action 7, using the stored ids
+* Test action 9: List all VMs, verifying the ids are no longer present
+* **Test assertion 4:** The two "id" parameters are not present in the VM list
+* Test action 10: Delete the IPv4 subnet created in test action 2, using the stored id
+* Test action 11: Delete two IPv6 subnets created in test action 5, using the stored ids
+* Test action 12: List all subnets, verifying the ids are no longer present
+* **Test assertion 5:** The "id" parameters of IPv4 and IPv6 are not present in the list
+* Test action 13: Delete the network created in test action 1, using the stored id
+* Test action 14: List all networks, verifying the id is no longer present
+* **Test assertion 6:** The "id" parameter is not present in the network list
+
+Pass / fail criteria
+'''''''''''''''''''''
+
+This test evaluates the ability to assign IPv6 addresses in ipv6_ra_mode 'dhcpv6_stateless'
+and ipv6_address_mode 'dhcpv6_stateless',
+and verify the ping6 available VM can ping the other VM's v4 address and two
+v6 addresses with different prefixes as well as the v6 subnets' gateway ips in the same network.
+Specifically it verifies that:
+
+* The different prefixes IPv6 addresses in mode 'dhcpv6_stateless' assigned successfully
+* The VM can ping the other VM's IPv4 and IPv6 private addresses as well as the v6 subnets' gateway ips
+* All items created using create commands are able to be removed using the returned identifiers
+
+Post conditions
+---------------
+
+None
+
+---------------------------------------------------------------------------------------------------------
+Test Case 21 - IPv6 Address Assignment - Dual Net, Multiple Prefixes, Dual Stack, SLAAC, DHCPv6 Stateless
+---------------------------------------------------------------------------------------------------------
+
+Short name
+----------
+
+opnfv.ipv6.dualnet_multiple_prefixes_dhcpv6_stateless
+
+Use case specification
+----------------------
+
+This test case evaluates IPv6 address assignment in ipv6_ra_mode 'dhcpv6_stateless'
+and ipv6_address_mode 'dhcpv6_stateless'.
+In this case, guest instance obtains IPv6 addresses from OpenStack managed radvd
+using SLAAC and optional info from dnsmasq using DHCPv6 stateless. This test case then
+verifies the ping6 available VM can ping the other VM's v4 address in one network
+and two v6 addresses with different prefixes in another network as well as the
+v6 subnets' gateway ips, the reference is
+
+tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_multi_prefix_dhcpv6_stateless
+
+Test preconditions
+------------------
+
+There should exist a public router or a public network.
+
+Basic test flow execution description and pass/fail criteria
+------------------------------------------------------------
+
+Test execution
+'''''''''''''''
+
+* Test action 1: Create one network, storing the "id" parameter returned in the response
+* Test action 2: Create one IPv4 subnet of the created network, storing the "id"
+ parameter returned in the response
+* Test action 3: If there exists a public router, use it as the router. Otherwise,
+ use the public network to create a router
+* Test action 4: Connect the IPv4 subnet to the router, using the stored IPv4 subnet id
+* Test action 5: Create another network, storing the "id" parameter returned in the response
+* Test action 6: Create two IPv6 subnets of network created in test action 5 in
+ ipv6_ra_mode 'dhcpv6_stateless' and ipv6_address_mode 'dhcpv6_stateless',
+ storing the "id" parameters returned in the response
+* Test action 7: Connect the two IPv6 subnets to the router, using the stored IPv6 subnet ids
+* Test action 8: Boot two VMs on these two networks, storing the "id" parameters returned in the response
+* Test action 9: Turn on 2nd NIC of each VM for the network created in test action 5
+* **Test assertion 1:** The vNIC of each VM gets one v4 address and two v6 addresses
+ with different prefixes actually assigned
+* **Test assertion 2:** Each VM can ping the other's v4 private address
+* **Test assertion 3:** The ping6 available VM can ping the other's v6 addresses
+ as well as the v6 subnets' gateway ips
+* Test action 10: Delete the 2 VMs created in test action 8, using the stored ids
+* Test action 11: List all VMs, verifying the ids are no longer present
+* **Test assertion 4:** The two "id" parameters are not present in the VM list
+* Test action 12: Delete the IPv4 subnet created in test action 2, using the stored id
+* Test action 13: Delete two IPv6 subnets created in test action 6, using the stored ids
+* Test action 14: List all subnets, verifying the ids are no longer present
+* **Test assertion 5:** The "id" parameters of IPv4 and IPv6 are not present in the list
+* Test action 15: Delete the 2 networks created in test action 1 and 5, using the stored ids
+* Test action 16: List all networks, verifying the ids are no longer present
+* **Test assertion 6:** The two "id" parameters are not present in the network list
+
+Pass / fail criteria
+'''''''''''''''''''''
+
+This test evaluates the ability to assign IPv6 addresses in ipv6_ra_mode 'dhcpv6_stateless'
+and ipv6_address_mode 'dhcpv6_stateless',
+and verify the ping6 available VM can ping the other VM's v4 address in one network and two
+v6 addresses with different prefixes in another network as well as the v6 subnets'
+gateway ips. Specifically it verifies that:
+
+* The IPv6 addresses in mode 'dhcpv6_stateless' assigned successfully
+* The VM can ping the other VM's IPv4 and IPv6 private addresses as well as the v6 subnets' gateway ips
+* All items created using create commands are able to be removed using the returned identifiers
+
+Post conditions
+---------------
+
+None
+
+----------------------------------------------------------
+Test Case 22 - IPv6 Address Assignment - Dual Stack, SLAAC
+----------------------------------------------------------
+
+Short name
+----------
+
+opnfv.ipv6.slaac
+
+Use case specification
+----------------------
+
+This test case evaluates IPv6 address assignment in ipv6_ra_mode 'slaac' and
+ipv6_address_mode 'slaac'.
+In this case, guest instance obtains IPv6 address from OpenStack managed radvd
+using SLAAC. This test case then verifies the ping6 available VM can ping the other
+VM's v4 and v6 addresses as well as the v6 subnet's gateway ip in the
+same network, the reference is
+
+tempest.scenario.test_network_v6.TestGettingAddress.test_slaac_from_os
+
+Test preconditions
+------------------
+
+There should exist a public router or a public network.
+
+Basic test flow execution description and pass/fail criteria
+------------------------------------------------------------
+
+Test execution
+'''''''''''''''
+
+* Test action 1: Create one network, storing the "id" parameter returned in the response
+* Test action 2: Create one IPv4 subnet of the created network, storing the "id"
+ parameter returned in the response
+* Test action 3: If there exists a public router, use it as the router. Otherwise,
+ use the public network to create a router
+* Test action 4: Connect the IPv4 subnet to the router, using the stored IPv4 subnet id
+* Test action 5: Create one IPv6 subnet of the network created in test action 1 in
+ ipv6_ra_mode 'slaac' and ipv6_address_mode 'slaac', storing the "id" parameter returned in the response
+* Test action 6: Connect the IPv6 subnet to the router, using the stored IPv6 subnet id
+* Test action 7: Boot two VMs on this network, storing the "id" parameters returned in the response
+* **Test assertion 1:** The vNIC of each VM gets one v4 address and one v6 address actually assigned
+* **Test assertion 2:** Each VM can ping the other's v4 private address
+* **Test assertion 3:** The ping6 available VM can ping the other's v6 address
+ as well as the v6 subnet's gateway ip
+* Test action 8: Delete the 2 VMs created in test action 7, using the stored ids
+* Test action 9: List all VMs, verifying the ids are no longer present
+* **Test assertion 4:** The two "id" parameters are not present in the VM list
+* Test action 10: Delete the IPv4 subnet created in test action 2, using the stored id
+* Test action 11: Delete the IPv6 subnet created in test action 5, using the stored id
+* Test action 12: List all subnets, verifying the ids are no longer present
+* **Test assertion 5:** The "id" parameters of IPv4 and IPv6 are not present in the list
+* Test action 13: Delete the network created in test action 1, using the stored id
+* Test action 14: List all networks, verifying the id is no longer present
+* **Test assertion 6:** The "id" parameter is not present in the network list
+
+Pass / fail criteria
+'''''''''''''''''''''
+
+This test evaluates the ability to assign IPv6 addresses in ipv6_ra_mode 'slaac'
+and ipv6_address_mode 'slaac',
+and verify the ping6 available VM can ping the other VM's v4 and v6 addresses as well as
+the v6 subnet's gateway ip in the same network. Specifically it verifies that:
+
+* The IPv6 addresses in mode 'slaac' assigned successfully
+* The VM can ping the other VM's IPv4 and IPv6 private addresses as well as the v6 subnet's gateway ip
+* All items created using create commands are able to be removed using the returned identifiers
+
+Post conditions
+---------------
+
+None
+
+--------------------------------------------------------------------
+Test Case 23 - IPv6 Address Assignment - Dual Net, Dual Stack, SLAAC
+--------------------------------------------------------------------
+
+Short name
+----------
+
+opnfv.ipv6.dualnet_slaac
+
+Use case specification
+----------------------
+
+This test case evaluates IPv6 address assignment in ipv6_ra_mode 'slaac' and
+ipv6_address_mode 'slaac'.
+In this case, guest instance obtains IPv6 address from OpenStack managed radvd
+using SLAAC. This test case then verifies the ping6 available VM can ping the other
+VM's v4 address in one network and v6 address in another network as well as the
+v6 subnet's gateway ip, the reference is
+
+tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_slaac_from_os
+
+Test preconditions
+------------------
+
+There should exist a public router or a public network.
+
+Basic test flow execution description and pass/fail criteria
+------------------------------------------------------------
+
+Test execution
+'''''''''''''''
+
+* Test action 1: Create one network, storing the "id" parameter returned in the response
+* Test action 2: Create one IPv4 subnet of the created network, storing the "id"
+ parameter returned in the response
+* Test action 3: If there exists a public router, use it as the router. Otherwise,
+ use the public network to create a router
+* Test action 4: Connect the IPv4 subnet to the router, using the stored IPv4 subnet id
+* Test action 5: Create another network, storing the "id" parameter returned in the response
+* Test action 6: Create one IPv6 subnet of network created in test action 5 in
+ ipv6_ra_mode 'slaac' and ipv6_address_mode 'slaac', storing the "id" parameter returned in the response
+* Test action 7: Connect the IPv6 subnet to the router, using the stored IPv6 subnet id
+* Test action 8: Boot two VMs on these two networks, storing the "id" parameters returned in the response
+* Test action 9: Turn on 2nd NIC of each VM for the network created in test action 5
+* **Test assertion 1:** The 1st vNIC of each VM gets one v4 address assigned and
+ the 2nd vNIC of each VM gets one v6 address actually assigned
+* **Test assertion 2:** Each VM can ping the other's v4 private address
+* **Test assertion 3:** The ping6 available VM can ping the other's v6 address
+ as well as the v6 subnet's gateway ip
+* Test action 10: Delete the 2 VMs created in test action 8, using the stored ids
+* Test action 11: List all VMs, verifying the ids are no longer present
+* **Test assertion 4:** The two "id" parameters are not present in the VM list
+* Test action 12: Delete the IPv4 subnet created in test action 2, using the stored id
+* Test action 13: Delete the IPv6 subnet created in test action 6, using the stored id
+* Test action 14: List all subnets, verifying the ids are no longer present
+* **Test assertion 5:** The "id" parameters of IPv4 and IPv6 are not present in the list
+* Test action 15: Delete the 2 networks created in test action 1 and 5, using the stored ids
+* Test action 16: List all networks, verifying the ids are no longer present
+* **Test assertion 6:** The two "id" parameters are not present in the network list
+
+Pass / fail criteria
+'''''''''''''''''''''
+
+This test evaluates the ability to assign IPv6 addresses in ipv6_ra_mode 'slaac'
+and ipv6_address_mode 'slaac',
+and verify the ping6 available VM can ping the other VM's v4 address in one network and
+v6 address in another network as well as the v6 subnet's gateway ip. Specifically it verifies that:
+
+* The IPv6 addresses in mode 'slaac' assigned successfully
+* The VM can ping the other VM's IPv4 address in one network and IPv6 address
+ in another network as well as the v6 subnet's gateway ip
+* All items created using create commands are able to be removed using the returned identifiers
+
+Post conditions
+---------------
+
+None
+
+-----------------------------------------------------------------------------
+Test Case 24 - IPv6 Address Assignment - Multiple Prefixes, Dual Stack, SLAAC
+-----------------------------------------------------------------------------
+
+Short name
+----------
+
+opnfv.ipv6.multiple_prefixes_slaac
+
+Use case specification
+----------------------
+
+This test case evaluates IPv6 address assignment in ipv6_ra_mode 'slaac' and
+ipv6_address_mode 'slaac'.
+In this case, guest instance obtains IPv6 addresses from OpenStack managed radvd
+using SLAAC. This test case then verifies the ping6 available VM can ping the other
+VM's one v4 address and two v6 addresses with different prefixes as well as the v6
+subnets' gateway ips in the same network, the reference is
+
+tempest.scenario.test_network_v6.TestGettingAddress.test_multi_prefix_slaac
+
+Test preconditions
+------------------
+
+There should exists a public router or a public network.
+
+Basic test flow execution description and pass/fail criteria
+------------------------------------------------------------
+
+Test execution
+'''''''''''''''
+
+* Test action 1: Create one network, storing the "id" parameter returned in the response
+* Test action 2: Create one IPv4 subnet of the created network, storing the "id
+ parameter returned in the response
+* Test action 3: If there exists a public router, use it as the router. Otherwise,
+ use the public network to create a router
+* Test action 4: Connect the IPv4 subnet to the router, using the stored IPv4 subnet id
+* Test action 5: Create two IPv6 subnets of the network created in test action 1 in
+ ipv6_ra_mode 'slaac' and ipv6_address_mode 'slaac', storing the "id" parameters returned in the response
+* Test action 6: Connect the two IPv6 subnets to the router, using the stored IPv6 subnet ids
+* Test action 7: Boot two VMs on this network, storing the "id" parameters returned in the response
+* **Test assertion 1:** The vNIC of each VM gets one v4 address and two v6 addresses with
+ different prefixes actually assigned
+* **Test assertion 2:** Each VM can ping the other's v4 private address
+* **Test assertion 3:** The ping6 available VM can ping the other's v6 addresses
+ as well as the v6 subnets' gateway ips
+* Test action 8: Delete the 2 VMs created in test action 7, using the stored ids
+* Test action 9: List all VMs, verifying the ids are no longer present
+* **Test assertion 4:** The two "id" parameters are not present in the VM list
+* Test action 10: Delete the IPv4 subnet created in test action 2, using the stored id
+* Test action 11: Delete two IPv6 subnets created in test action 5, using the stored ids
+* Test action 12: List all subnets, verifying the ids are no longer present
+* **Test assertion 5:** The "id" parameters of IPv4 and IPv6 are not present in the list
+* Test action 13: Delete the network created in test action 1, using the stored id
+* Test action 14: List all networks, verifying the id is no longer present
+* **Test assertion 6:** The "id" parameter is not present in the network list
+
+Pass / fail criteria
+'''''''''''''''''''''
+
+This test evaluates the ability to assign IPv6 addresses in ipv6_ra_mode 'slaac'
+and ipv6_address_mode 'slaac',
+and verify the ping6 available VM can ping the other VM's v4 address and two
+v6 addresses with different prefixes as well as the v6 subnets' gateway ips in the same network.
+Specifically it verifies that:
+
+* The different prefixes IPv6 addresses in mode 'slaac' assigned successfully
+* The VM can ping the other VM's IPv4 and IPv6 private addresses as well as the v6 subnets' gateway ips
+* All items created using create commands are able to be removed using the returned identifiers
+
+Post conditions
+---------------
+
+None
+
+---------------------------------------------------------------------------------------
+Test Case 25 - IPv6 Address Assignment - Dual Net, Dual Stack, Multiple Prefixes, SLAAC
+---------------------------------------------------------------------------------------
+
+Short name
+----------
+
+opnfv.ipv6.dualnet_multiple_prefixes_slaac
+
+Use case specification
+----------------------
+
+This test case evaluates IPv6 address assignment in ipv6_ra_mode 'slaac' and
+ipv6_address_mode 'slaac'.
+In this case, guest instance obtains IPv6 addresses from OpenStack managed radvd
+using SLAAC. This test case then verifies the ping6 available VM can ping the other
+VM's v4 address in one network and two v6 addresses with different prefixes in another
+network as well as the v6 subnets' gateway ips, the reference is
+
+tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_multi_prefix_slaac
+
+Test preconditions
+------------------
+
+There should exist a public router or a public network.
+
+Basic test flow execution description and pass/fail criteria
+------------------------------------------------------------
+
+Test execution
+'''''''''''''''
+
+* Test action 1: Create one network, storing the "id" parameter returned in the response
+* Test action 2: Create one IPv4 subnet of the created network, storing the "id"
+ parameter returned in the response
+* Test action 3: If there exists a public router, use it as the router. Otherwise,
+ use the public network to create a router
+* Test action 4: Connect the IPv4 subnet to the router, using the stored IPv4 subnet id
+* Test action 5: Create another network, storing the "id" parameter returned in the response
+* Test action 6: Create two IPv6 subnets of network created in test action 5 in
+ ipv6_ra_mode 'slaac' and ipv6_address_mode 'slaac', storing the "id" parameters returned in the response
+* Test action 7: Connect the two IPv6 subnets to the router, using the stored IPv6 subnet ids
+* Test action 8: Boot two VMs on these two networks, storing the "id" parameters returned in the response
+* Test action 9: Turn on 2nd NIC of each VM for the network created in test action 5
+* **Test assertion 1:** The vNIC of each VM gets one v4 address and two v6 addresses
+ with different prefixes actually assigned
+* **Test assertion 2:** Each VM can ping the other's v4 private address
+* **Test assertion 3:** The ping6 available VM can ping the other's v6 addresses
+ as well as the v6 subnets' gateway ips
+* Test action 10: Delete the 2 VMs created in test action 8, using the stored ids
+* Test action 11: List all VMs, verifying the ids are no longer present
+* **Test assertion 4:** The two "id" parameters are not present in the VM list
+* Test action 12: Delete the IPv4 subnet created in test action 2, using the stored id
+* Test action 13: Delete two IPv6 subnets created in test action 6, using the stored ids
+* Test action 14: List all subnets, verifying the ids are no longer present
+* **Test assertion 5:** The "id" parameters of IPv4 and IPv6 are not present in the list
+* Test action 15: Delete the 2 networks created in test action 1 and 5, using the stored ids
+* Test action 16: List all networks, verifying the ids are no longer present
+* **Test assertion 6:** The two "id" parameters are not present in the network list
+
+Pass / fail criteria
+'''''''''''''''''''''
+
+This test evaluates the ability to assign IPv6 addresses in ipv6_ra_mode 'slaac'
+and ipv6_address_mode 'slaac',
+and verify the ping6 available VM can ping the other VM's v4 address in one network and two
+v6 addresses with different prefixes in another network as well as the v6 subnets' gateway ips.
+Specifically it verifies that:
+
+* The IPv6 addresses in mode 'slaac' assigned successfully
+* The VM can ping the other VM's IPv4 and IPv6 private addresses as well as the v6 subnets' gateway ips
+* All items created using create commands are able to be removed using the returned identifiers
+
+Post conditions
+---------------
+
+None
+
+
+
diff --git a/docs/testing/user/testspecification/old_files/ipv6/designspecification.rst b/docs/testing/user/testspecification/old_files/ipv6/designspecification.rst
deleted file mode 100644
index 9e403472..00000000
--- a/docs/testing/user/testspecification/old_files/ipv6/designspecification.rst
+++ /dev/null
@@ -1,133 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) Christopher Price (Ericsson AB) and others
-
-==============================
-IPv6 test design specification
-==============================
-
-This document outlines the approach and method for testing IPv6 in the OPNFV compliance test
-suite. Providing a brief outline of the features to be tested, the methodology for testing,
-schema's and criteria.
-
-Features to be tested
-=====================
-
-The IPv6 compliance test plan outlines the method for testing IPv6 compliance to the OPNFV
-platform behaviours and features of IPv6 enabled VNFi platforms. The specific features to
-be tested by the IPv6 compliance test suite is outlined in the following table.
-
-.. table::
- :class: longtable
-
-+-----------------------------------------------------------+-------------------+--------------------------------------------------------------------+
-|Features / Requirements |Tests available | Test Cases |
-+===========================================================+===================+====================================================================+
-|All topologies work in a multi-tenant environment |No | |
-| | | |
-| | | |
-| | | |
-| | | |
-| | | |
-+-----------------------------------------------------------+-------------------+--------------------------------------------------------------------+
-|IPv6 VM to VM only |No | |
-| | | |
-| | | |
-+-----------------------------------------------------------+-------------------+--------------------------------------------------------------------+
-|IPv6 external L2 VLAN directly attached to a VM |No | |
-| | | |
-+-----------------------------------------------------------+-------------------+--------------------------------------------------------------------+
-|IPv6 subnet routed via L3 agent to an external IPv6 network|No | |
-| | | |
-|1. Both VLAN and overlay (e.g. GRE, VXLAN) subnet attached | | |
-| to VMs; | | |
-|2. Must be able to support multiple L3 agents for a given | | |
-| external network to support scaling (neutron scheduler | | |
-| to assign vRouters to the L3 agents) | | |
-+-----------------------------------------------------------+-------------------+--------------------------------------------------------------------+
-|Ability for a NIC to support both IPv4 and IPv6 (dual |No | |
-|stack) address. | | |
-| | | |
-|1. VM with a single interface associated with a network, | | |
-| which is then associated with two subnets. | | |
-|2. VM with two different interfaces associated with two | | |
-| different networks and two different subnets. | | |
-+-----------------------------------------------------------+-------------------+--------------------------------------------------------------------+
-|Support IPv6 Address assignment modes. |No | |
-| | | |
-|1. SLAAC | | |
-|2. DHCPv6 Stateless | | |
-|3. DHCPv6 Stateful | | |
-+-----------------------------------------------------------+-------------------+--------------------------------------------------------------------+
-|Ability to create a port on an IPv6 DHCPv6 Stateful subnet |No | |
-|and assign a specific IPv6 address to the port and have it | | |
-|taken out of the DHCP address pool. | | |
-+-----------------------------------------------------------+-------------------+--------------------------------------------------------------------+
-|Full support for IPv6 matching (i.e., IPv6, ICMPv6, TCP, |No | |
-|UDP) in security groups. Ability to control and manage all | | |
-|IPv6 security group capabilities via Neutron/Nova API (REST| | |
-|and CLI) as well as via Horizon. | | |
-+-----------------------------------------------------------+-------------------+--------------------------------------------------------------------+
-|During network/subnet/router create, there should be an |No | |
-|option to allow user to specify the type of address | | |
-|management they would like. This includes all options | | |
-|including those low priority if implemented (e.g., toggle | | |
-|on/off router and address prefix advertisements); It must | | |
-|be supported via Neutron API (REST and CLI) as well as via | | |
-|Horizon | | |
-+-----------------------------------------------------------+-------------------+--------------------------------------------------------------------+
-|Security groups anti-spoofing: Prevent VM from using a |No | |
-|source IPv6/MAC address which is not assigned to the VM | | |
-+-----------------------------------------------------------+-------------------+--------------------------------------------------------------------+
-|Protect tenant and provider network from rogue RAs |No | |
-| | | |
-| | | |
-| | | |
-| | | |
-| | | |
-+-----------------------------------------------------------+-------------------+--------------------------------------------------------------------+
-|Support the ability to assign multiple IPv6 addresses to |No | |
-|an interface; both for Neutron router interfaces and VM | | |
-|interfaces. | | |
-+-----------------------------------------------------------+-------------------+--------------------------------------------------------------------+
-|Ability for a VM to support a mix of multiple IPv4 and IPv6|No | |
-|networks, including multiples of the same type. | | |
-+-----------------------------------------------------------+-------------------+--------------------------------------------------------------------+
-|Support for IPv6 Prefix Delegation. |No | |
-+-----------------------------------------------------------+-------------------+--------------------------------------------------------------------+
-|IPv6 First-Hop Security, IPv6 ND spoofing |No | |
-+-----------------------------------------------------------+-------------------+--------------------------------------------------------------------+
-|IPv6 support in Neutron Layer3 High Availability |No | |
-|(keepalived+VRRP). | | |
-+-----------------------------------------------------------+-------------------+--------------------------------------------------------------------+
-
-
-Test approach for IPv6
-======================
-
-The most common approach for testing IPv6 capabilities in the test suite is through interaction with the SUT control plane.
-In this instance the test framework will exercise the NBI provided by the VIM to configure and leverage IPv6 related features
-in the platform, instantiate workloads, and invoke behaviours in the platform. The suite may also interact directly with the
-data plane to exercise platform capabilities and further invoke helper functions on the platform for the same purpose.
-
-Test result analysis
---------------------
-
-All functional tests in the IPv6 test suite will provide a pass/fail result on completion of the test. In addition test logs
-and relevant additional information will be provided as part of the test log, available on test suite completion.
-
-Some tests in the compliance suite measure such metrics as latency and performance. At this time these tests are intended to
-provide a feature based pass/fail metric not related to system performance.
-These tests may however provide detailed results of performance and latency in the 'test report'_ document.
-
-Test identification
-===================
-
-TBD: WE need to identify the test naming scheme we will use in DoveTail in order that we can cross reference to the test
-projects and maintain our suite effectively. This naming scheme needs to be externally relevant to non-OPNFV consumers and as
-such some consideration is required on the selection.
-
-Pass Fail Criteria
-==================
-
-This section requires some further work with the test teams to identify how and where we generate, store and provide results.
diff --git a/docs/testing/user/testspecification/old_files/ipv6/index.rst b/docs/testing/user/testspecification/old_files/ipv6/index.rst
deleted file mode 100644
index a806d644..00000000
--- a/docs/testing/user/testspecification/old_files/ipv6/index.rst
+++ /dev/null
@@ -1,19 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) OPNFV
-
-*******************************
-OPNFV IPv6 Compliance Test Plan
-*******************************
-
-.. toctree::
- :maxdepth: 2
-
- ./testplan.rst
- ./testprocedure.rst
- ./testspecification.rst
- ./designspecification.rst
- ./ipv6.tc001.specification.rst
- ./ipv6.tc026.specification.rst
- ./ipv6_all_testcases.rst
-
diff --git a/docs/testing/user/testspecification/old_files/ipv6/ipv6.tc001.specification.rst b/docs/testing/user/testspecification/old_files/ipv6/ipv6.tc001.specification.rst
deleted file mode 100644
index 5afb2095..00000000
--- a/docs/testing/user/testspecification/old_files/ipv6/ipv6.tc001.specification.rst
+++ /dev/null
@@ -1,59 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) OPNFV
-
-==================================================================================================
-Dovetail IPv6 tc001 specification - Bulk Creation and Deletion of IPv6 Networks, Ports and Subnets
-==================================================================================================
-
-
-+-----------------------+----------------------------------------------------------------------------------------------------+
-|test case name |Bulk creation and deletion of IPv6 networks, ports and subnets |
-| | |
-+-----------------------+----------------------------------------------------------------------------------------------------+
-|id |dovetail.ipv6.tc001 |
-+-----------------------+----------------------------------------------------------------------------------------------------+
-|objective |To verify that platform is able to create/delete networks, ports and subnets in bulk operation |
-+-----------------------+----------------------------------------------------------------------------------------------------+
-|test items |tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_network |
-| |{idempotent_id('d4f9024d-1e28-4fc1-a6b1-25dbc6fa11e2')} |
-| |tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_port |
-| |{idempotent_id('48037ff2-e889-4c3b-b86a-8e3f34d2d060')} |
-| |tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_subnet |
-| |{idempotent_id('8936533b-c0aa-4f29-8e53-6cc873aec489')} |
-+-----------------------+----------------------------------------------------------------------------------------------------+
-|environmental | |
-|requirements & | environment can be deployed on bare metal of virtualized infrastructure |
-|preconditions | deployment can be HA or non-HA |
-| | |
-+-----------------------+----------------------------------------------------------------------------------------------------+
-|scenario dependencies | NA |
-+-----------------------+----------------------------------------------------------------------------------------------------+
-|procedural |Step 1: create/delete network: |
-|requirements | create 2 networks in one request |
-| | asserting that the networks are found in the list after creation |
-| | |
-| |Step 2: create/delete subnet: |
-| | create 2 subnets in one request |
-| | asserting that the subnets are found in the list after creation |
-| | |
-| |Step 3: create/delete port: |
-| | create 2 ports in one request |
-| | asserting that the ports are found in the list after creation |
-| | |
-+-----------------------+----------------------------------------------------------------------------------------------------+
-|input specifications |The parameters needed to execute Neutron network APIs. |
-| |Refer to Neutron Networking API v2.0 `[1]`_ `[2]`_ |
-+-----------------------+----------------------------------------------------------------------------------------------------+
-|output specifications |The responses after executing Network network APIs. |
-| |Refer to Neutron Networking API v2.0 `[1]`_ `[2]`_ |
-+-----------------------+----------------------------------------------------------------------------------------------------+
-|pass/fail criteria |If normal response code 200 is returned, the test passes. |
-| |Otherwise, the test fails with various error codes. |
-| |Refer to Neutron Networking API v2.0 `[1]`_ `[2]`_ |
-+-----------------------+----------------------------------------------------------------------------------------------------+
-|test report |TBD |
-+-----------------------+----------------------------------------------------------------------------------------------------+
-
-.. _`[1]`: http://developer.openstack.org/api-ref/networking/v2/
-.. _`[2]`: http://wiki.openstack.org/wiki/Neutron/APIv2-specification
diff --git a/docs/testing/user/testspecification/old_files/ipv6/ipv6.tc026.specification.rst b/docs/testing/user/testspecification/old_files/ipv6/ipv6.tc026.specification.rst
deleted file mode 100644
index e7fd82e7..00000000
--- a/docs/testing/user/testspecification/old_files/ipv6/ipv6.tc026.specification.rst
+++ /dev/null
@@ -1,54 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) OPNFV
-
-==============================================================
-Dovetail IPv6 tc026 specification - Service VM as IPv6 vRouter
-==============================================================
-
-
-+-----------------------+--------------------------------------------------------------------------+
-|test case name |Service VM as IPv6 vRouter |
-| | |
-+-----------------------+--------------------------------------------------------------------------+
-|id |dovetail.ipv6.tc026 |
-+-----------------------+--------------------------------------------------------------------------+
-|objective |IPv6 connnectivity, service VM as IPv6 vRouter |
-+-----------------------+--------------------------------------------------------------------------+
-|modules under test |neutron, nova, etc |
-+-----------------------+--------------------------------------------------------------------------+
-|dependent test project |yardstick |
-+-----------------------+--------------------------------------------------------------------------+
-|test items |yardstick_tc027 |
-+-----------------------+--------------------------------------------------------------------------+
-|environmental | OpenStack-only environment |
-|requirements & | environment can be deplyed on bare metal of virtualized infrastructure |
-|preconditions | deployment can be HA or non-HA |
-| | test case image needs to be installed into Glance with ping6 included |
-+-----------------------+--------------------------------------------------------------------------+
-|scenario dependencies | nosdn |
-+-----------------------+--------------------------------------------------------------------------+
-|procedural |step 1: to setup IPv6 testing environment |
-|requirements | 1.1 disable security group |
-| | 1.2 create (ipv6, ipv4) router, network and subnet |
-| | 1.3 create vRouter, VM1, VM2 |
-| |step 2: to run ping6 to verify IPv6 connectivity |
-| | 2.1 ssh to VM1 |
-| | 2.2 ping6 to ipv6 router from VM1 |
-| | 2.3 get the result and store the logs |
-| |step 3: to teardown IPv6 testing environment |
-| | 3.1 delete vRouter, VM1, VM2 |
-| | 3.2 delete (ipv6, ipv4) router, network and subnet |
-| | 3.3 enable security group |
-+-----------------------+--------------------------------------------------------------------------+
-|input specifications |packetsize: 56 |
-| |ping_count: 5 |
-| | |
-+-----------------------+--------------------------------------------------------------------------+
-|output specifications |output includes max_rtt, min_rtt, average_rtt |
-+-----------------------+--------------------------------------------------------------------------+
-|pass/fail criteria |ping6 connectivity success, no SLA |
-+-----------------------+--------------------------------------------------------------------------+
-|test report | dovetail dashboard DB here |
-+-----------------------+--------------------------------------------------------------------------+
-
diff --git a/docs/testing/user/testspecification/old_files/ipv6/ipv6_all_testcases.rst b/docs/testing/user/testspecification/old_files/ipv6/ipv6_all_testcases.rst
deleted file mode 100644
index 02115ec3..00000000
--- a/docs/testing/user/testspecification/old_files/ipv6/ipv6_all_testcases.rst
+++ /dev/null
@@ -1,243 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) OPNFV
-
-==================================================
-IPv6 Compliance Testing Methodology and Test Cases
-==================================================
-
-IPv6 Compliance Testing focuses on overlay IPv6 capabilities, i.e. to validate that
-IPv6 capability is supported in tenant networks, subnets and routers. Both Tempest API
-testing and Tempest Scenario testing are reused as much as we can in IPv6 Compliance
-Testing. In addition, Yardstick Test Case 027 is also used to validate a specific use case
-of using a Service VM as an IPv6 vRouter.
-
-IPv6 Compliance Testing test cases are described as follows:
-
----------------------------------------------------------------
-Test Case 1: Create and Delete an IPv6 Network, Port and Subnet
----------------------------------------------------------------
-
-.. code-block:: bash
-
- tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_network
- tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_port
- tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_subnet
-
------------------------------------------------------------------
-Test Case 2: Create, Update and Delete an IPv6 Network and Subnet
------------------------------------------------------------------
-
-.. code-block:: bash
-
- tempest.api.network.test_networks.NetworksIpV6Test.test_create_update_delete_network_subnet
-
-----------------------------------------------
-Test Case 3: Check External Network Visibility
-----------------------------------------------
-
-.. code-block:: bash
-
- tempest.api.network.test_networks.NetworksIpV6Test.test_external_network_visibility
-
--------------------------------------------------------
-Test Case 4: List IPv6 Networks and Subnets of a Tenant
--------------------------------------------------------
-
-.. code-block:: bash
-
- tempest.api.network.test_networks.NetworksIpV6Test.test_list_networks
- tempest.api.network.test_networks.NetworksIpV6Test.test_list_subnets
-
------------------------------------------------------------
-Test Case 5: Show Information of an IPv6 Network and Subnet
------------------------------------------------------------
-
-.. code-block:: bash
-
- tempest.api.network.test_networks.NetworksIpV6Test.test_show_network
- tempest.api.network.test_networks.NetworksIpV6Test.test_show_subnet
-
-------------------------------------------------------------
-Test Case 6: Create an IPv6 Port in Allowed Allocation Pools
-------------------------------------------------------------
-
-.. code-block:: bash
-
- tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_port_in_allowed_allocation_pools
-
---------------------------------------------------------
-Test Case 7: Create an IPv6 Port without Security Groups
---------------------------------------------------------
-
-.. code-block:: bash
-
- tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_port_with_no_securitygroups
-
----------------------------------------------------
-Test Case 8: Create, Update and Delete an IPv6 Port
----------------------------------------------------
-
-.. code-block:: bash
-
- tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_update_delete_port
-
-----------------------------------------
-Test Case 9: List IPv6 Ports of a Tenant
-----------------------------------------
-
-.. code-block:: bash
-
- tempest.api.network.test_ports.PortsIpV6TestJSON.test_list_ports
-
-----------------------------------------------
-Test Case 10: Show Information of an IPv6 Port
-----------------------------------------------
-
-.. code-block:: bash
-
- tempest.api.network.test_ports.PortsIpV6TestJSON.test_show_port
-
---------------------------------------------------------
-Test Case 11: Add Multiple Interfaces for an IPv6 Router
---------------------------------------------------------
-
-.. code-block:: bash
-
- tempest.api.network.test_routers.RoutersIpV6Test.test_add_multiple_router_interfaces
-
-------------------------------------------------------------------
-Test Case 12: Add and Remove an IPv6 Router Interface with port_id
-------------------------------------------------------------------
-
-.. code-block:: bash
-
- tempest.api.network.test_routers.RoutersIpV6Test.test_add_remove_router_interface_with_port_id
-
---------------------------------------------------------------------
-Test Case 13: Add and Remove an IPv6 Router Interface with subnet_id
---------------------------------------------------------------------
-
-.. code-block:: bash
-
- tempest.api.network.test_routers.RoutersIpV6Test.test_add_remove_router_interface_with_subnet_id
-
-------------------------------------------------------------------
-Test Case 14: Create, Update, Delete, List and Show an IPv6 Router
-------------------------------------------------------------------
-
-.. code-block:: bash
-
- tempest.api.network.test_routers.RoutersIpV6Test.test_create_show_list_update_delete_router
-
---------------------------------------------------------------------------
-Test Case 15: Create, Update, Delete, List and Show an IPv6 Security Group
---------------------------------------------------------------------------
-
-.. code-block:: bash
-
- tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_list_update_show_delete_security_group
-
-----------------------------------------------------------
-Test Case 16: Create, Delete and Show Security Group Rules
-----------------------------------------------------------
-
-.. code-block:: bash
-
- tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_show_delete_security_group_rule
-
---------------------------------------
-Test Case 17: List All Security Groups
---------------------------------------
-
-.. code-block:: bash
-
- tempest.api.network.test_security_groups.SecGroupIPv6Test.test_list_security_groups
-
---------------------------------------------------------
-Test Case 18: IPv6 Address Assignment - DHCPv6 Stateless
---------------------------------------------------------
-
-.. code-block:: bash
-
- tempest.scenario.test_network_v6.TestGettingAddress.test_dhcp6_stateless_from_os
-
---------------------------------------------------------------------
-Test Case 19: IPv6 Address Assignment - Dual Stack, DHCPv6 Stateless
---------------------------------------------------------------------
-
-.. code-block:: bash
-
- tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_dhcp6_stateless_from_os
-
----------------------------------------------------------------------------
-Test Case 20: IPv6 Address Assignment - Multiple Prefixes, DHCPv6 Stateless
----------------------------------------------------------------------------
-
-.. code-block:: bash
-
- tempest.scenario.test_network_v6.TestGettingAddress.test_multi_prefix_dhcpv6_stateless
-
----------------------------------------------------------------------------------------
-Test Case 21: IPv6 Address Assignment - Dual Stack, Multiple Prefixes, DHCPv6 Stateless
----------------------------------------------------------------------------------------
-
-.. code-block:: bash
-
- tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_multi_prefix_dhcpv6_stateless
-
----------------------------------------------
-Test Case 22: IPv6 Address Assignment - SLAAC
----------------------------------------------
-
-.. code-block:: bash
-
- tempest.scenario.test_network_v6.TestGettingAddress.test_slaac_from_os
-
----------------------------------------------------------
-Test Case 23: IPv6 Address Assignment - Dual Stack, SLAAC
----------------------------------------------------------
-
-.. code-block:: bash
-
- tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_slaac_from_os
-
-----------------------------------------------------------------
-Test Case 24: IPv6 Address Assignment - Multiple Prefixes, SLAAC
-----------------------------------------------------------------
-
-.. code-block:: bash
-
- tempest.scenario.test_network_v6.TestGettingAddress.test_multi_prefix_slaac
-
-----------------------------------------------------------------------------
-Test Case 25: IPv6 Address Assignment - Dual Stack, Multiple Prefixes, SLAAC
-----------------------------------------------------------------------------
-
-.. code-block:: bash
-
- tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_multi_prefix_slaac
-
--------------------------------------------
-Test Case 26: Service VM as an IPv6 vRouter
--------------------------------------------
-
-.. code-block:: bash
-
- # Refer to Yardstick Test Case 027
- # Instruction: http://artifacts.opnfv.org/ipv6/docs/configurationguide/index.html
- # Step 1: Set up Service VM as an IPv6 vRouter
- # 1.1: Install OPNFV and Preparation
- # 1.2: Disable Security Groups in OpenStack ML2 Setup
- # 1.3: Create IPv4 and IPv6 Neutron routers, networks and subnets
- # 1.4: Boot vRouter VM, and Guest VM1 and Guest VM2
- # Step 2: Verify IPv6 Connectivity
- # 2.1: ssh to Guest VM1
- # 2.2: Ping6 from Guest VM1 to Guest VM2
- # 2.3: Ping6 from Guest VM1 to vRouter VM
- # 2.4: Ping6 from Guest VM1 to Neutron IPv6 Router Namespace
- # Step 3: Tear down Setup
- # 3.1: Delete Guest VM1, Guest VM2 and vRouter VM
- # 3.2: Delete IPv4 and IPv6 Neutron routers, networks and subnets
- # 3.3: Enable Security Groups
-
diff --git a/docs/testing/user/testspecification/old_files/ipv6/testplan.rst b/docs/testing/user/testspecification/old_files/ipv6/testplan.rst
deleted file mode 100644
index 3470e7a6..00000000
--- a/docs/testing/user/testspecification/old_files/ipv6/testplan.rst
+++ /dev/null
@@ -1,34 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) OPNFV
-
-===============================
-OPNFV IPv6 Compliance Test Plan
-===============================
-
-Introduction
-============
-
-The IPv6 compliance test plan outlines the method for testing IPv6 Tenant Network feature
-compliance with the OPNFV platform.
-
-Scope
------
-
-This test, and other tests in the test suite, are designed to verify an entire SUT,
-and not any individual component of the system.
-
-Test suite scope and procedures
-===============================
-
-The IPv6 compliance test suite will evaluate the ability for a SUT to support IPv6
-Tenant Network features and functionality provided by OPNFV platform.
-
-Please refer to the complete list of the test cases for details.
-
-Test suite execution
-====================
-
-Please refer to each test case for specific setup and execution procedure.
-
-.._[1]: http://www.opnfv.org
diff --git a/docs/testing/user/testspecification/old_files/ipv6/testprocedure.rst b/docs/testing/user/testspecification/old_files/ipv6/testprocedure.rst
deleted file mode 100644
index 2119ed61..00000000
--- a/docs/testing/user/testspecification/old_files/ipv6/testprocedure.rst
+++ /dev/null
@@ -1,9 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) Christopher Price (Ericsson AB) and others
-
-===================
-IPv6 test procedure
-===================
-
-Draft to be patched this week, someone feel free to work on this in parallel.
diff --git a/docs/testing/user/testspecification/old_files/ipv6/testspecification.rst b/docs/testing/user/testspecification/old_files/ipv6/testspecification.rst
deleted file mode 100644
index e51f2a5b..00000000
--- a/docs/testing/user/testspecification/old_files/ipv6/testspecification.rst
+++ /dev/null
@@ -1,57 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) Christopher Price (Ericsson AB) and others
-
-===============================================
-Test specification - Service VM as IPv6 vRouter
-===============================================
-
-Draft to be worked on, this represents the YardStick test but I would suggest we need to break
-this into a set of tests which provide more details per action with boundary validation.
-
-Test Item
-=========
-
-TBD -> IPv6 Ping...
-
-Identify the items or features to be tested by this test case. The item description and
-definition can be referenced from any one of several sources, depending on the level of the
-test case specification. It may be a good idea to reference the source documents as well.
-
-Environmental requirements
-==========================
-
-For ipv6 Test Case 18-25, those test cases are scenario tests, they need to boot virtual
-machines and ping6 in addition to test APIs, ping6 to vRouter is not supported by SDN controller
-yet, such as Opendaylight (Boron and previous releases), so they are scenario dependent,
-i.e., currently ipv6 Test Case 18-25 can only run on scenario os-nosdn-nofeature.
-
-Preconditions and procedural requirements
-=========================================
-
-TBD
-
-.. <Start>
-.. this section may be iterated over for a set of simillar test cases that would be run as one.
-
-Input Specifications
-====================
-
-TBD
-
-Output Specifications
-=====================
-
-TBD
-
-.. <End>
-
-Test Reporting
-==============
-
-The test report for this test case will be generated with links to relevant data sources.
-This section can be updated once we have a template for the report in place.
-
-http://testresults.opnfv.org/grafana/dashboard/db/yardstick-tc027
-
-
diff --git a/docs/testing/user/testspecification/vping/index.rst b/docs/testing/user/testspecification/vping/index.rst
new file mode 100644
index 00000000..d7a207c0
--- /dev/null
+++ b/docs/testing/user/testspecification/vping/index.rst
@@ -0,0 +1,279 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) Ericsson AB
+
+========================
+Vping test specification
+========================
+
+.. toctree::
+ :maxdepth: 2
+
+Scope
+=====
+
+The vping test area evaluates basic NFVi capabilities of the system under test.
+These capabilities include creating a small number of virtual machines,
+establishing basic L3 connectivity between them and verifying connectivity by
+means of ICMP packets.
+
+
+References
+==========
+
+- Neutron Client
+
+ - https://docs.openstack.org/developer/python-neutronclient/usage/library.html
+
+- Nova Client
+
+ - https://docs.openstack.org/developer/python-novaclient/ref/v2/servers.html
+
+- SSHClient
+
+ - http://docs.paramiko.org/en/2.2/
+
+- SCPClient
+
+ - https://pypi.python.org/pypi/scp
+
+
+Definitions and abbreviations
+=============================
+
+The following terms and abbreviations are used in conjunction with this test
+area
+
+- ICMP - Internet Control Message Protocol
+- L3 - Layer 3
+- NFVi - Network functions virtualization infrastructure
+- SCP - Secure Copy
+- SSH - Secure Shell
+- VM - Virtual machine
+
+
+System Under Test (SUT)
+=======================
+
+The system under test is assumed to be the NFVi and VIM in operation on a
+Pharos compliant infrastructure.
+
+
+Test Area Structure
+===================
+
+The test area is structured in two separate tests which are executed
+sequentially. The order of the tests is arbitrary as there are no dependencies
+across the tests.
+
+
+Test Descriptions
+=================
+
+--------------------------------------------------------------------
+Test Case 1 - vPing using userdata provided by nova metadata service
+--------------------------------------------------------------------
+
+Short name
+----------
+
+opnfv.vping.userdata
+
+
+Use case specification
+----------------------
+
+This test evaluates the use case where an NFVi tenant boots up two VMs and
+requires L3 connectivity between those VMs. The target IP is passed to the VM
+that will initiate pings by using a custom userdata script provided by nova metadata service.
+
+
+Test preconditions
+------------------
+
+At least one compute node is available. No further pre-configuration needed.
+
+
+Basic test flow execution description and pass/fail criteria
+------------------------------------------------------------
+
+Methodology for verifying connectivity
+''''''''''''''''''''''''''''''''''''''
+
+Connectivity between VMs is tested by sending ICMP ping packets between
+selected VMs. The target IP is passed to the VM sending pings by using a
+custom userdata script by means of the config driver mechanism provided by
+Nova metadata service. Whether or not a ping was successful is determined by
+checking the console output of the source VMs.
+
+
+Test execution
+''''''''''''''
+
+* Test action 1:
+ * Create a private tenant network by using neutron client
+ * Create one subnet and one router in the network by neutron client
+ * Add one interface between the subnet and router
+ * Add one gateway route to the router by neutron client
+ * Store the network id in the response
+* **Test assertion 1:** The network id, subnet id and router id can be found in the response
+* Test action 2:
+ * Create an security group by using neutron client
+ * Store the security group id parameter in the response
+* **Test assertion 2:** The security group id can be found in the response
+* Test action 3: boot VM1 by using nova client with configured name, image, flavor, private tenant
+ network created in test action 1, security group created in test action 2
+* **Test assertion 3:** The VM1 object can be found in the response
+* Test action 4: Generate ping script with the IP of VM1 to be passed as userdata provided by
+ the **nova metadata service**.
+* Test action 5: Boot VM2 by using nova client with configured name, image, flavor, private tenant
+ network created in test action 1, security group created in test action 2, userdata created
+ in test action 4
+* **Test assertion 4:** The VM2 object can be found in the response
+* Test action 6: Inside VM2, the ping script is executed automatically when booted and it contains a
+ loop doing the ping until the return code is 0 or timeout reached. For each ping, when the return
+ code is 0, "vPing OK" is printed in the VM2 console-log, otherwise, "vPing KO" is printed.
+ Monitoring the console-log of VM2 to see the response generated by the script.
+* **Test assertion 5:** "vPing OK" is detected, when monitoring the console-log in VM2
+* Test action 7: delete VM1, VM2
+* **Test assertion 6:** VM1 and VM2 are not present in the VM list
+* Test action 8: delete security group, gateway, interface, router, subnet and network
+* **Test assertion 7:** The security group, gateway, interface, router, subnet and network are
+ no longer present in the lists after deleting
+
+
+Pass / fail criteria
+''''''''''''''''''''
+
+This test evaluates basic NFVi capabilities of the system under test.
+Specifically, the test verifies that:
+
+* Neutron client network, subnet, router, interface create commands return valid "id" parameters
+ which are shown in the create response message
+* Neutron client interface add command to add between subnet and router returns success code
+* Neutron client gateway add command to add to router returns success code
+* Neutron client security group create command returns valid "id" parameter which is shown in
+ the response message
+* Nova client VM create command returns valid VM attributes response message
+* Nova metadata server can transfer userdata configuration at nova client VM booting time
+* Ping command from one VM to the other in same private tenant network returns valid code
+* All items created using neutron client or nova client create commands are able to be removed by
+ using the returned identifiers
+
+In order to pass this test, all test assertions listed in the test execution
+above need to pass.
+
+
+Post conditions
+---------------
+
+None
+
+
+----------------------------------------------
+Test Case 2 - vPing using SSH to a floating IP
+----------------------------------------------
+
+Short name
+----------
+
+opnfv.vping.ssh
+
+
+Use case specification
+----------------------
+
+This test evaluates the use case where an NFVi tenant boots up two VMs and requires
+L3 connectivity between those VMs. An SSH connection is establised from the host to
+a floating IP associated with VM2 and ``ping`` is executed on VM2 with the IP of VM1 as target.
+
+
+Test preconditions
+------------------
+
+At least one compute node is available. There should exist an OpenStack external network
+and can assign floating IP.
+
+
+Basic test flow execution description and pass/fail criteria
+------------------------------------------------------------
+
+Methodology for verifying connectivity
+''''''''''''''''''''''''''''''''''''''
+
+Connectivity between VMs is tested by sending ICMP ping packets between
+selected VMs. To this end, the test establishes an SSH connection from the host
+running the test suite to a floating IP associated with VM2 and executes ``ping``
+on VM2 with the IP of VM1 as target.
+
+
+Test execution
+''''''''''''''
+
+
+* Test action 1:
+ * Create a private tenant network by neutron client
+ * Create one subnet and one router are created in the network by using neutron client
+ * Create one interface between the subnet and router
+ * Add one gateway route to the router by neutron client
+ * Store the network id in the response
+* **Test assertion 1:** The network id, subnet id and router id can be found in the response
+* Test action 2:
+ * Create an security group by using neutron client
+ * Store the security group id parameter in the response
+* **Test assertion 2:** The security group id can be found in the response
+* Test action 3: Boot VM1 by using nova client with configured name, image, flavor, private tenant
+ network created in test action 1, security group created in test action 2
+* **Test assertion 3:** The VM1 object can be found in the response
+* Test action 4: Boot VM2 by using nova client with configured name, image, flavor, private tenant
+ network created in test action 1, security group created in test action 2
+* **Test assertion 4:** The VM2 object can be found in the response
+* Test action 5: create one floating IP by using neutron client, storing the floating IP address
+ returned in the response
+* **Test assertion 5:** Floating IP address can be found in the response
+* Test action 6: Assign the floating IP address created in test action 5 to VM2 by using nova client
+* **Test assertion 6:** The assigned floating IP can be found in the VM2 console log file
+* Test action 7: Establish SSH connection between the test host and VM2 through the floating IP
+* **Test assertion 7:** SSH connection between the test host and VM2 is established within
+ 300 seconds
+* Test action 8: Copy the Ping script from the test host to VM2 by using SCPClient
+* **Test assertion 8:** The Ping script can be found inside VM2
+* Test action 9: Inside VM2, to execute the Ping script to ping VM1, the Ping script contains a
+ loop doing the ping until the return code is 0 or timeout reached, for each ping, when the return
+ code is 0, "vPing OK" is printed in the VM2 console-log, otherwise, "vPing KO" is printed.
+ Monitoring the console-log of VM2 to see the response generated by the script.
+* **Test assertion 9:** "vPing OK" is detected, when monitoring the console-log in VM2
+* Test action 10: delete VM1, VM2
+* **Test assertion 10:** VM1 and VM2 are not present in the VM list
+* Test action 11: delete floating IP, security group, gateway, interface, router, subnet and network
+* **Test assertion 11:** The security group, gateway, interface, router, subnet and network are
+ no longer present in the lists after deleting
+
+Pass / fail criteria
+''''''''''''''''''''
+
+This test evaluates basic NFVi capabilities of the system under test.
+Specifically, the test verifies that:
+
+* Neutron client network, subnet, router, interface create commands return valid "id" parameters
+ which are shown in the create response message
+* Neutron client interface add command to add between subnet and router return success code
+* Neutron client gateway add command to add to router return success code
+* Neutron client security group create command returns valid "id" parameter which is shown in the
+ response message
+* Nova client VM create command returns valid VM attributes response message
+* Neutron client floating IP create command return valid floating IP address
+* Nova client add floating IP command returns valid response message
+* SSH connection can be established using a floating IP
+* Ping command from one VM to another in same private tenant network returns valid code
+* All items created using neutron client or nova client create commands are able to be removed by
+ using the returned identifiers
+
+In order to pass this test, all test assertions listed in the test execution
+above need to pass.
+
+
+Post conditions
+---------------
+
+None
diff --git a/docs/testing/user/testspecification/vpn/index.rst b/docs/testing/user/testspecification/vpn/index.rst
index 1b5fe439..0a8a8d17 100644
--- a/docs/testing/user/testspecification/vpn/index.rst
+++ b/docs/testing/user/testspecification/vpn/index.rst
@@ -12,14 +12,17 @@ VPN test specification
Scope
=====
-The VPN test area evaluates the ability of the system under test to support VPN networking
-for virtual workdloads. The tests in this suite will evaluate establishing VPN networks,
-publishing and communication between endpoints using BGP and tear down of the networks.
+The VPN test area evaluates the ability of the system under test to support VPN
+networking for virtual workloads. The tests in this test area will evaluate
+establishing VPN networks, publishing and communication between endpoints using
+BGP and tear down of the networks.
References
-================
+==========
-This test suite assumes support for the following specifications:
+This test area evaluates the ability of the system to perform selected actions
+defined in the following specifications. Details of specific features evaluated
+are described in the test descriptions.
- RFC 4364 - BGP/MPLS IP Virtual Private Networks
@@ -33,10 +36,12 @@ This test suite assumes support for the following specifications:
- https://tools.ietf.org/html/rfc2547
+
Definitions and abbreviations
=============================
-The following terms and abreviations are used in conunction with this test suite
+The following terms and abbreviations are used in conjunction with this test
+area
- BGP - Border gateway protocol
- eRT - Export route target
@@ -48,15 +53,27 @@ The following terms and abreviations are used in conunction with this test suite
- VPN - Virtual private network
- VLAN - Virtual local area network
+
System Under Test (SUT)
=======================
-The system under test is assumed to be the NFVi in operation on an Pharos compliant infrastructure.
+The system under test is assumed to be the NFVi and VIM in operation on a
+Pharos compliant infrastructure.
+
-Test Suite Structure
-====================
+Test Area Structure
+===================
+
+The test area is structured in four separate tests which are executed
+sequentially. The order of the tests is arbitrary as there are no dependencies
+across the tests. Specifially, every test performs clean-up operations which
+return the system to the same state as before the test.
+
+The test area evaluates the ability of the SUT to establish connectivity
+between Virtual Machines using an appropriate route target configuration,
+reconfigure the route targets to remove connectivity between the VMs, then
+reestablish connectivity by re-association.
-The test suite is structured in some way that I am unable to articulate at this time.
Test Descriptions
=================
@@ -65,43 +82,451 @@ Test Descriptions
Test Case 1 - VPN provides connectivity between Neutron subnets
----------------------------------------------------------------
+Short name
+----------
+
+opnfv.sdnvpn.subnet_connectivity
+
+
Use case specification
----------------------
-This test evaluate the instance where an NFVi tenant wants to use a BGPVPN to provide
-connectivity between VMs on different Neutron networks and Subnets that reside on different hosts.
+This test evaluates the use case where an NFVi tenant uses a BGPVPN to provide
+connectivity between VMs on different Neutron networks and subnets that reside
+on different hosts.
+
Test preconditions
------------------
-2 compute nodes are available, denoted Node1 and Node 2 in the following.
+2 compute nodes are available, denoted Node1 and Node2 in the following.
+
Basic test flow execution description and pass/fail criteria
------------------------------------------------------------
-Set up VM1 and VM2 on Node1 and VM3 on Node2, all having ports in the same Neutron Network N1
-and all having 10.10.10/24 addresses (this subnet is denoted SN1 in the following).
+Methodology for verifying connectivity
+''''''''''''''''''''''''''''''''''''''
+
+Connectivity between VMs is tested by sending ICMP ping packets between
+selected VMs. The target IPs are passed to the VMs sending pings by means of a
+custom user data script. Whether or not a ping was successful is determined by
+checking the console output of the source VMs.
+
+
+Test execution
+''''''''''''''
+
+* Create Neutron network N1 and subnet SN1 with IP range 10.10.10.0/24
+* Create Neutron network N2 and subnet SN2 with IP range 10.10.11.0/24
+
+* Create VM1 on Node1 with a port in network N1
+* Create VM2 on Node1 with a port in network N1
+* Create VM3 on Node2 with a port in network N1
+* Create VM4 on Node1 with a port in network N2
+* Create VM5 on Node2 with a port in network N2
+
+* Create VPN1 with eRT<>iRT
+* Create network association between network N1 and VPN1
+
+* VM1 sends ICMP packets to VM2 using ``ping``
+
+* **Test assertion 1:** Ping from VM1 to VM2 succeeds: ``ping`` exits with return code 0
+
+* VM1 sends ICMP packets to VM3 using ``ping``
+
+* **Test assertion 2:** Ping from VM1 to VM3 succeeds: ``ping`` exits with return code 0
+
+* VM1 sends ICMP packets to VM4 using ``ping``
+
+* **Test assertion 3:** Ping from VM1 to VM4 fails: ``ping`` exits with a non-zero return code
+
+* Create network association between network N2 and VPN1
+
+* VM4 sends ICMP packets to VM5 using ``ping``
-Set up VM4 on Node1 and VM5 on Node2, both having ports in Neutron Network N2
-and having 10.10.11/24 addresses (this subnet is denoted SN2 in the following).
+* **Test assertion 4:** Ping from VM4 to VM5 succeeds: ``ping`` exits with return code 0
-* Create VPN1 with eRT<>iRT and associate SN1 to it
-* Test action 1: SSH into VM1 and ping VM2, test passes if ping works
-* Test action 2: SSH into VM1 and ping VM3, test passes is ping works
-* Test action 3: SSH into VM1 and ping VM4, test passes if ping does not work
-* Associate SN2 to VPN1
-* Test action 4: Ping from VM4 to VM5 should work
-* Test action 5: Ping from VM1 to VM4 should not work
-* Test action 6: Ping from VM1 to VM5 should not work
* Configure iRT=eRT in VPN1
-* Test action 7: Ping from VM1 to VM4 should work
-* Test action 8: Ping from VM1 to VM5 should work
-The pass criteria for this test case is that all instructions are able to be carried out
-according to the described behaviour without deviation.
-A negative result will be generated if the above is not met in completion.
+* VM1 sends ICMP packets to VM4 using ``ping``
+
+* **Test assertion 5:** Ping from VM1 to VM4 succeeds: ``ping`` exits with return code 0
+
+* VM1 sends ICMP packets to VM5 using ``ping``
+
+* **Test assertion 6:** Ping from VM1 to VM5 succeeds: ``ping`` exits with return code 0
+
+* Delete all instances: VM1, VM2, VM3, VM4 and VM5
+
+* Delete all networks and subnets: networks N1 and N2 including subnets SN1 and SN2
+
+* Delete all network associations and VPN1
+
+
+Pass / fail criteria
+''''''''''''''''''''
+
+This test evaluates the capability of the NFVi and VIM to provide routed IP
+connectivity between VMs by means of BGP/MPLS VPNs. Specifically, the test
+verifies that:
+
+* VMs in the same Neutron subnet have IP connectivity regardless of BGP/MPLS
+ VPNs (test assertion 1, 2, 4)
+
+* VMs in different Neutron subnets do not have IP connectivity by default - in
+ this case without associating VPNs with the same import and export route
+ targets to the Neutron networks (test assertion 3)
+
+* VMs in different Neutron subnets have routed IP connectivity after
+ associating both networks with BGP/MPLS VPNs which have been configured with
+ the same import and export route targets (test assertion 5, 6). Hence,
+ adjusting the ingress and egress route targets enables as well as prohibits
+ routing.
+
+In order to pass this test, all test assertions listed in the test execution
+above need to pass.
+
+
+Post conditions
+---------------
+
+N/A
+
+------------------------------------------------------------
+Test Case 2 - VPNs ensure traffic separation between tenants
+------------------------------------------------------------
+
+Short Name
+----------
+
+opnfv.sdnvpn.tenant_separation
+
+
+Use case specification
+----------------------
+
+This test evaluates if VPNs provide separation of traffic such that overlapping
+IP ranges can be used.
+
+
+Test preconditions
+------------------
+
+2 compute nodes are available, denoted Node1 and Node2 in the following.
+
+
+Basic test flow execution description and pass/fail criteria
+------------------------------------------------------------
+
+Methodology for verifying connectivity
+''''''''''''''''''''''''''''''''''''''
+
+Connectivity between VMs is tested by establishing an SSH connection. Moreover,
+the command "hostname" is executed at the remote VM in order to retrieve the
+hostname of the remote VM. The retrieved hostname is furthermore compared
+against an expected value. This is used to verify tenant traffic separation,
+i.e., despite overlapping IPs, a connection is made to the correct VM as
+determined by means of the hostname of the target VM.
+
+
+
+Test execution
+''''''''''''''
+
+* Create Neutron network N1
+* Create subnet SN1a of network N1 with IP range 10.10.10.0/24
+* Create subnet SN1b of network N1 with IP range 10.10.11.0/24
+
+* Create Neutron network N2
+* Create subnet SN2a of network N2 with IP range 10.10.10.0/24
+* Create subnet SN2b of network N2 with IP range 10.10.11.0/24
+
+* Create VM1 on Node1 with a port in network N1 and IP 10.10.10.11.
+* Create VM2 on Node1 with a port in network N1 and IP 10.10.10.12.
+* Create VM3 on Node2 with a port in network N1 and IP 10.10.11.13.
+* Create VM4 on Node1 with a port in network N2 and IP 10.10.10.12.
+* Create VM5 on Node2 with a port in network N2 and IP 10.10.11.13.
+
+* Create VPN1 with iRT=eRT=RT1
+* Create network association between network N1 and VPN1
+
+* VM1 attempts to execute the command ``hostname`` on the VM with IP 10.10.10.12 via SSH.
+
+* **Test assertion 1:** VM1 can successfully connect to the VM with IP
+ 10.10.10.12. via SSH and execute the remote command ``hostname``. The
+ retrieved hostname equals the hostname of VM2.
+
+* VM1 attempts to execute the command ``hostname`` on the VM with IP 10.10.11.13 via SSH.
+
+* **Test assertion 2:** VM1 can successfully connect to the VM with IP
+ 10.10.11.13 via SSH and execute the remote command ``hostname``. The
+ retrieved hostname equals the hostname of VM3.
+
+* Create VPN2 with iRT=eRT=RT2
+* Create network association between network N2 and VPN2
+
+* VM4 attempts to execute the command ``hostname`` on the VM with IP 10.10.11.13 via SSH.
+
+* **Test assertion 3:** VM4 can successfully connect to the VM with IP
+ 10.10.11.13 via SSH and execute the remote command ``hostname``. The
+ retrieved hostname equals the hostname of VM5.
+
+* VM4 attempts to execute the command ``hostname`` on the VM with IP 10.10.11.11 via SSH.
+
+* **Test assertion 4:** VM4 cannot connect to the VM with IP 10.10.11.11 via SSH.
+
+* Delete all instances: VM1, VM2, VM3, VM4 and VM5
+
+* Delete all networks and subnets: networks N1 and N2 including subnets SN1a, SN1b, SN2a and SN2b
+
+* Delete all network associations, VPN1 and VPN2
+
+
+Pass / fail criteria
+''''''''''''''''''''
+
+This test evaluates the capability of the NFVi and VIM to provide routed IP
+connectivity between VMs by means of BGP/MPLS VPNs. Specifically, the test
+verifies that:
+
+* VMs in the same Neutron subnet (still) have IP connectivity between each
+ other when a BGP/MPLS VPN is associated with the network (test assertion 1).
+
+* VMs in different Neutron subnets have routed IP connectivity between each
+ other when BGP/MPLS VPNs with the same import and expert route targets are
+ associated with both networks (assertion 2).
+
+* VMs in different Neutron networks and BGP/MPLS VPNs with different import and
+ export route targets can have overlapping IP ranges. The BGP/MPLS VPNs
+ provide traffic separation (assertion 3 and 4).
+
+In order to pass this test, all test assertions listed in the test execution
+above need to pass.
+
+
+Post conditions
+---------------
+
+N/A
+
+--------------------------------------------------------------------------------
+Test Case 3 - VPN provides connectivity between subnets using router association
+--------------------------------------------------------------------------------
+
+Short Name
+----------
+
+opnfv.sdnvpn.router_association
+
+
+Use case specification
+----------------------
+
+This test evaluates if a VPN provides connectivity between two subnets by
+utilizing two different VPN association mechanisms: a router association and a
+network association.
+
+Specifically, the test network topology comprises two networks N1 and N2 with
+corresponding subnets. Additionally, network N1 is connected to a router R1.
+This test verifies that a VPN V1 provides connectivity between both networks
+when applying a router association to router R1 and a network association to
+network N2.
+
+
+Test preconditions
+------------------
+
+2 compute nodes are available, denoted Node1 and Node2 in the following.
+
+Basic test flow execution description and pass/fail criteria
+------------------------------------------------------------
+
+Methodology for verifying connectivity
+''''''''''''''''''''''''''''''''''''''
+
+Connectivity between VMs is tested by sending ICMP ping packets between
+selected VMs. The target IPs are passed to the VMs sending pings by means of a
+custom user data script. Whether or not a ping was successful is determined by
+checking the console output of the source VMs.
+
+
+Test execution
+''''''''''''''
+
+* Create a network N1, a subnet SN1 with IP range 10.10.10.0/24 and a connected router R1
+* Create a network N2, a subnet SN2 with IP range 10.10.11.0/24
+
+* Create VM1 on Node1 with a port in network N1
+* Create VM2 on Node1 with a port in network N1
+* Create VM3 on Node2 with a port in network N1
+* Create VM4 on Node1 with a port in network N2
+* Create VM5 on Node2 with a port in network N2
+
+* Create VPN1 with eRT<>iRT so that connected subnets should not reach each other
+
+* Create route association between router R1 and VPN1
+
+* VM1 sends ICMP packets to VM2 using ``ping``
+
+* **Test assertion 1:** Ping from VM1 to VM2 succeeds: ``ping`` exits with return code 0
+
+* VM1 sends ICMP packets to VM3 using ``ping``
+
+* **Test assertion 2:** Ping from VM1 to VM3 succeeds: ``ping`` exits with return code 0
+
+* VM1 sends ICMP packets to VM4 using ``ping``
+
+* **Test assertion 3:** Ping from VM1 to VM4 fails: ``ping`` exits with a non-zero return code
+
+* Create network association between network N2 and VPN1
+
+* VM4 sends ICMP packets to VM5 using ``ping``
+
+* **Test assertion 4:** Ping from VM4 to VM5 succeeds: ``ping`` exits with return code 0
+
+* Change VPN1 so that iRT=eRT
+
+* VM1 sends ICMP packets to VM4 using ``ping``
+
+* **Test assertion 5:** Ping from VM1 to VM4 succeeds: ``ping`` exits with return code 0
+
+* VM1 sends ICMP packets to VM5 using ``ping``
+
+* **Test assertion 6:** Ping from VM1 to VM5 succeeds: ``ping`` exits with return code 0
+
+* Delete all instances: VM1, VM2, VM3, VM4 and VM5
+
+* Delete all networks, subnets and routers: networks N1 and N2 including subnets SN1 and SN2, router R1
+
+* Delete all network and router associations and VPN1
+
+
+Pass / fail criteria
+''''''''''''''''''''
+
+This test evaluates the capability of the NFVi and VIM to provide routed IP
+connectivity between VMs by means of BGP/MPLS VPNs. Specifically, the test
+verifies that:
+
+* VMs in the same Neutron subnet have IP connectivity regardless of the import
+ and export route target configuration of BGP/MPLS VPNs (test assertion 1, 2, 4)
+
+* VMs in different Neutron subnets do not have IP connectivity by default - in
+ this case without associating VPNs with the same import and export route
+ targets to the Neutron networks or connected Neutron routers (test assertion 3).
+
+* VMs in two different Neutron subnets have routed IP connectivity after
+ associating the first network and a router connected to the second network
+ with BGP/MPLS VPNs which have been configured with the same import and export
+ route targets (test assertion 5, 6). Hence, adjusting the ingress and egress
+ route targets enables as well as prohibits routing.
+
+* Network and router associations are equivalent methods for binding Neutron networks
+ to VPN.
+
+In order to pass this test, all test assertions listed in the test execution
+above need to pass.
+
+
+Post conditions
+---------------
+
+N/A
+
+---------------------------------------------------------------------------------------------------
+Test Case 4 - Verify interworking of router and network associations with floating IP functionality
+---------------------------------------------------------------------------------------------------
+
+Short Name
+----------
+
+opnfv.sdnvpn.router_association_floating_ip
+
+
+Use case specification
+----------------------
+
+This test evaluates if both the router association and network association
+mechanisms interwork with floating IP functionality.
+
+Specifically, the test network topology comprises two networks N1 and N2 with
+corresponding subnets. Additionally, network N1 is connected to a router R1.
+This test verifies that i) a VPN V1 provides connectivity between both networks
+when applying a router association to router R1 and a network association to
+network N2 and ii) a VM in network N1 is reachable externally by means of a
+floating IP.
+
+
+Test preconditions
+------------------
+
+At least one compute node is available.
+
+Basic test flow execution description and pass/fail criteria
+------------------------------------------------------------
+
+Methodology for verifying connectivity
+''''''''''''''''''''''''''''''''''''''
+
+Connectivity between VMs is tested by sending ICMP ping packets between
+selected VMs. The target IPs are passed to the VMs sending pings by means of a
+custom user data script. Whether or not a ping was successful is determined by
+checking the console output of the source VMs.
+
+
+Test execution
+''''''''''''''
+
+* Create a network N1, a subnet SN1 with IP range 10.10.10.0/24 and a connected router R1
+* Create a network N2 with IP range 10.10.20.0/24
+
+* Create VM1 with a port in network N1
+* Create VM2 with a port in network N2
+
+* Create VPN1
+* Create a router association between router R1 and VPN1
+* Create a network association between network N2 and VPN1
+
+
+* VM1 sends ICMP packets to VM2 using ``ping``
+
+* **Test assertion 1:** Ping from VM1 to VM2 succeeds: ``ping`` exits with return code 0
+
+* Assign a floating IP to VM1
+
+* The host running the test framework sends ICMP packets to VM1 using ``ping``
+
+* **Test assertion 2:** Ping from the host running the test framework to the
+ floating IP of VM1 succeeds: ``ping`` exits with return code 0
+
+* Delete floating IP assigned to VM1
+
+* Delete all instances: VM1, VM2
+
+* Delete all networks, subnets and routers: networks N1 and N2 including subnets SN1 and SN2, router R1
+
+* Delete all network and router associations as well as VPN1
+
+
+Pass / fail criteria
+''''''''''''''''''''
+
+This test evaluates the capability of the NFVi and VIM to provide routed IP
+connectivity between VMs by means of BGP/MPLS VPNs. Specifically, the test
+verifies that:
+
+* VMs in the same Neutron subnet have IP connectivity regardless of the import
+ and export route target configuration of BGP/MPLS VPNs (test assertion 1)
+
+* VMs connected to a network which has been associated with a BGP/MPLS VPN are
+ reachable through floating IPs.
+
+In order to pass this test, all test assertions listed in the test execution
+above need to pass.
+
Post conditions
---------------
-TBD - should there be any other than the system is in the same state it started out as?
+N/A
diff --git a/docs/testing/user/userguide/cli_reference.rst b/docs/testing/user/userguide/cli_reference.rst
new file mode 100644
index 00000000..719a991f
--- /dev/null
+++ b/docs/testing/user/userguide/cli_reference.rst
@@ -0,0 +1,9 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV
+
+=========================================
+Dovetail Command Line Interface Reference
+=========================================
+
+
diff --git a/docs/testing/user/userguide/index.rst b/docs/testing/user/userguide/index.rst
index d8eb124b..aec3e861 100644
--- a/docs/testing/user/userguide/index.rst
+++ b/docs/testing/user/userguide/index.rst
@@ -1,479 +1,12 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
-.. (c) Ericsson AB
+.. (c) OPNFV
-==============================================
-Compliance and Verification program user guide
-==============================================
+********************************************************
+Compliance Verification Program Testing User Guide
+********************************************************
.. toctree::
:maxdepth: 2
-Version history
-===============
-
-+------------+----------+------------------+----------------------------------+
-| **Date** | **Ver.** | **Author** | **Comment** |
-| | | | |
-+------------+----------+------------------+----------------------------------+
-| 2017-03-15 | 0.0.1 | Chris Price | Draft version |
-| | | | |
-+------------+----------+------------------+----------------------------------+
-
-
-Dovetail CVP Testing Overview
-=============================
-
-The Dovetail testing framework consists of two major parts: the testing client that executes
-all test cases in a vendor lab (self-testing) or a third party lab, and the server system that
-is under the OPNFV's administration to store and view test results based on OPNFV Test API. The
-following diagram illustrates this overall framework.
-
-/* here is a draft diagram that needs to be revised when exact information is known and fixed */
-
-This section mainly focuses on helping the testers in the vendor's domain attempting to run the
-CVP tests.
-
-Dovetail client tool (or just Dovetail tool or Dovetail for short) can be installed in the
-jumphost either directly as Python software, or as a Docker(r) container. Comments of pros
-and cons of the two options TBD.
-
-The section 'Installing the test tool'_ describes the steps the tester needs to take to install
-Dovetail directly from the source. In 2.3, we describe steps needed for installing Dovetail
-Docker(r) container. Once installed, and properly configured, the remaining test process is mostly
-identical for the two options. In 2.4, we go over the steps of actually running the test suite.
-In 2.5, we discuss how to view test results and make sense of them, for example, what the tester
-may do in case of unexpected test failures. Section 2.6 describes additional Dovetail features
-that are not absolutely necessary in CVP testing but users may find useful for other purposes.
-One example is to run Dovetail for in-house testing as preparation before official CVP testing;
-another example is to run Dovetail experimental test suites other than the CVP test suite.
-Experimental tests may be made available by the community for experimenting less mature test
-cases or functionalities for the purpose of getting feedbacks for improvement.
-
-Installing the test tool
-========================
-
-Before taking this step, testers should check the hardware and networking requirements of
-the POD, and the jumphost in particular, to make sure they are compliant.
-
-In this section, we describe the procedure to install Dovetail client tool that runs the CVP
-test suite from the jumphost. The jumphost must have network access to both the public Internet
-and to the O&M (Operation and Management) network with access rights to all VIM APIs being tested.
-
--------------------------------
-Checking the Jumphost Readiness
--------------------------------
-
-While Dovetail does not have hard requirement on a specific operating system type or version,
-these have been validated by the community through some level of exercise in OPNFV labs or PlugFests.
-
-Ubuntu 16.04.2 LTS (Xenial) for x86_64
-Ubuntu 14.04 LTS (Trusty) for x86_64
-CentOS-7-1611 for x86_64
-Red Hat Enterprise Linux 7.3 for x86_64
-Fedora 24 Server for x86_64
-Fedora 25 Server for x86_64
-
-------------------------------------
-Configuring the Jumphost Environment
-------------------------------------
-
-/* First, openstack env variables to be passed to Functest */
-
-The jumphost needs to have the right environmental variable setting to enable access to the
-Openstack API. This is usually done through the Openstack credential file.
-
-Sample Openstack credential file environment_config.sh:
-
-/*Project-level authentication scope (name or ID), recommend admin project.*/
-
-export OS_PROJECT_NAME=admin
-
-/* Authentication username, belongs to the project above, recommend admin user.*/
-
-export OS_USERNAME=admin
-
-
-/* Authentication password.*/
-
-export OS_PASSWORD=secret
-
-
-/* Authentication URL, one of the endpoints of keystone service. If this is v3 version, there need some extra variables as follows.*/
-
-export OS_AUTH_URL='http://xxx.xxx.xxx.xxx:5000/v3'
-
-
-/* Default is 2.0. If use keystone v3 API, this should be set as 3.*/
-
-export OS_IDENTITY_API_VERSION=3
-
-
-/* Domain name or ID containing the user above. Command to check the domain: openstack
-user show <OS_USERNAME>*/
-
-export OS_USER_DOMAIN_NAME=default
-
-
-/* Domain name or ID containing the project above. Command to check the domain: openstack
-project show <OS_PROJECT_NAME>*/
-
-export OS_PROJECT_DOMAIN_NAME=default
-
-
-/* home directory for dovetail, if install Dovetail Docker container, DOVETAIL_HOME can
-just be /home/opnfv*/
-
-export DOVETAIL_HOME=$HOME/cvp
-
-Export all these variables into environment by,
-
-% source <OpenStack-credential-file-path>
-
-
-The tester should validate that the Openstack environmental settings are correct by,
-% openstack service list
-
------------------------------------
-Installing Prerequisite on Jumphost
------------------------------------
-
-1. Dovetail requires Python 2.7 and later
-
-Use the following steps to check if the right version of python is already installed,
-and if not, install it.
-
-% python --version
-
-2. Dovetail requires Docker 1.8.0 and later
-
-Use the following steps to check if the right version of Docker is already installed,
-and if not, install it.
-
-% docker --version
-
-As the docker installation process is much complex, you can refer to the official
-document: https://docs.docker.com/engine/installation/linux/
-
--------------------------------------
-2.2.4 Installing Dovetail on Jumphost
--------------------------------------
-
-A tester can choose one of the following two methods for installing and running Dovetail.
-In part1, we explain the steps to install Dovetail from the source. In part2, an alternative
-using a Docker image with preinstalled Dovetail is introduced. part1. Installing Dovetail directly
-
-Update and install packages
-
-a) Ubuntu
-
-sudo apt-get update
-
-sudo apt-get -y install gcc git vim python-dev python-pip --no-install-recommends
-
-b) centos and redhat
-
-sudo yum -y update
-
-sudo yum -y install epel-release
-
-sudo yum -y install gcc git vim-enhanced python-devel python-pip
-
-c) fedora
-
-sudo dnf -y update
-
-sudo dnf -y install gcc git vim-enhanced python-devel python-pip redhat-rpm-config
-
-p.s When testing SUT's https service, there need some extra packages, such as
-apt-transport-https. This still remains to be verified.
-
-
-Installing Dovetail
-
-Now we are ready to install Dovetail.
-
-/* Version of dovetail is not specified yet? we are still using the latest in the master
-- this needs to be fixed before launch. */
-
-First change directory to $DOVETAIL_HOME,
-
-% cd $DOVETAIL_HOME
-
-% sudo git clone https://git.opnfv.org/dovetail
-
-% cd $DOVETAIL_HOME/dovetail
-
-% sudo pip install -e ./
-
-/* test dovetail install is successful */
-
-% dovetail -h
-part2. Installing Dovetail Docker Container
-
-The Dovetail project also maintains a Docker image that has Dovetail test tools preinstalled.
-
-Running CVP Test Suite
-======================
-
-------------------
-Running Test Suite
-------------------
-
-The Dovetail client CLI allows the tester to specify which test suite to run.
-By default the results are stored in a local file $DOVETAIL_HOME/dovetail/results.
-
-% dovetail run --testsuite <test suite name> --openrc <path-to-openrc-file> /*?? */
-
-Multiple test suites may be available, testsuites named "debug" and "proposed_tests" are just provided for testing. But for the purpose of running CVP test suite, the test suite name follows the following format,
-
-CVP.<major>.<minor>.<patch> /* test if this format works */
-
-For example, CVP_1_0_0
-
-% dovetail run --testsuite CVP_1_0_0
-
-When the SUT's VIM (Virtual Infrastructure Manager) is Openstack, its configuration is commonly defined in the openrc file. In that case, you can specify the openrc file in the command line,
-
-% dovetail run --testsuite CVP_1_0_0 --openrc <path-to-openrc-file>
-
-In order to report official results to OPNFV, run the CVP test suite and report to OPNFV official URL,
-
-% dovetail run --testsuite <test suite name> --openrc <path-to-openrc-file> --report https://www.opnfv.org/cvp
-
-The official server https://www.opnfv.org/cvp is still under development, there is a temporal server to use http://205.177.226.237:9997/api/v1/results
-
---------------------------------
-Making Sense of CVP Test Results
---------------------------------
-
-When a tester is performing trial runs, Dovetail stores results in a local file by default.
-
-% cd $DOVETAIL_HOME/dovetail/results
-
-
-
-1. local file
-
-a) Log file: dovetail.log
-
-/* review the dovetail.log to see if all important information has been captured - in default mode without DEBUG */
-
-/* the end of the log file has a summary of all test case test results */
-
-Additional log files may be of interests: refstack.log, opnfv_yardstick_tcXXX.out ...
-
-b) Example: Openstack refstack test case example
-
-can see the log details in refstack.log, which has the passed/skipped/failed test cases result, the failed test cases have rich debug information
-
-for the users to see why this test case fails.
-
-c) Example: OPNFV Yardstick test case example
-
-for yardstick tool, its log is stored in yardstick.log
-
-for each test case result in Yardstick, the logs are stored in opnfv_yardstick_tcXXX.out, respectively.
-
-
-
-2. OPNFV web interface
-
-wait for the complement of LF, test community, etc.
-2.3.3 Updating Dovetail or Test Suite
-
-% cd $DOVETAIL_HOME/dovetail
-
-% sudo git pull
-
-% sudo pip install -e ./
-
-This step is necessary if dovetail software or the CVP test suite have updates.
-
-
-Other Dovetail Usage
-====================
-
-------------------------
-Running Dovetail Locally
-------------------------
-
-/*DB*/
-
----------------------------------------------
-Running Dovetail with Experimental Test Cases
----------------------------------------------
-
-
---------------------------------------------------
-Running Individual Test Cases or for Special Cases
---------------------------------------------------
-
-1. Refstack client to run Defcore testcases
-
-a) By default, for Defcore test cases run by Refstack-client, which are consumed by
-DoveTail, are run followed with automatically generated configuration file, i.e.,
-refstack_tempest.conf.
-
-In some circumstances, the automatic configuration file may not quite satisfied with
-the SUT, DoveTail provide a way for users to set its configuration file according
-to its own SUT manually,
-
-besides, the users should define Defcore testcase file, i.e., defcore.txt, at the
-same time. The steps are shown as,
-
-when "Installing Dovetail Docker Container" method is used,
-
-
-% sudo mkdir /home/opnfv/dovetail/userconfig
-
-% cd /home/opnfv/dovetail/userconfig
-
-% touch refstack_tempest.conf defcore.txt
-
-% vim refstack_tempest.conf
-
-% vim defcore.txt
-
-
-the recommend way to set refstack_tempest.conf is shown in
-https://aptira.com/testing-openstack-tempest-part-1/
-
-the recommended way to edit defcore.txt is to open
-https://refstack.openstack.org/api/v1/guidelines/2016.08/tests?target=compute&type=required&alias=true&flag=false
-and copy all the test cases into defcore.txt.
-
-Then use “docker run” to create a container,
-
-
-% sudo docker run --privileged=true -it -v <openrc_path>:<openrc_path> \
-
--v /home/opnfv/dovetail/results:/home/opnfv/dovetail/results \
-
--v /home/opnfv/dovetail/userconfig:/home/opnfv/dovetail/userconfig \
-
--v /var/run/docker.sock:/var/run/docker.sock \
-
---name <DoveTail_Container_Name> (optional) \
-
-opnfv/dovetail:<Tag> /bin/bash
-
-
-
-there is a need to adjust the CVP_1_0_0 testsuite, for dovetail,
-defcore.tc001.yml and defcore.tc002.yml are used for automatic and
-manual running method, respectively.
-
-Inside the dovetail container,
-
-
-% cd /home/opnfv/dovetail/compliance
-
-% vim CVP_1_0_0.yml
-
-
-to add defcore.tc002 and annotate defcore.tc001.
-
-
-b) when "Installing Dovetail Directly" method is used, before to run
-the dovetail commands, there is a need to set configuration file and
-defcore test cases file
-
-
-% cd $DOVETAIL_HOME/dovetail
-
-% mkdir userconfig
-
-% cd userconfig
-
-% touch refstack_tempest.conf defcore.txt
-
-% vim refstack_tempest.conf
-
-% vim defcore.txt
-
-recommended way to set refstack_tempest.conf and defcore.txt is
-same as above in "Installing Dovetail Docker Container" method section.
-
-
-
-For Defcore test cases manually running method, there is a need to adjust
-the compliance_set test suite,
-
-for dovetail, defcore.tc001.yml and defcore.tc002.yml are used for automatic
-and manual running method, respectively.
-
-
-
-% cd $DOVETAIL_HOME/dovetail/compliance
-
-% vim CVP_1_0_0.yml
-
-
-to add defcore.tc002 and annotate defcore.tc001
-
-3 Dovetail Client CLI Manual
-
-This section contains a brief manual for all the features available through the Dovetail client command line interface (CLI).
-3.1 Check dovetail commands
-
-% dovetail -h
-
-dovetail.PNG
-
-Dovetail has three commands: list, run and show.
-6.2 List
-6.2.1 List help
-
-% dovetail list -h
-
-list-help.PNG
-6.2.2 List a test suite
-
-List command will list all test cases belong to the given test suite.
-
-% dovetail list compliance_set
-
-list-compliance.PNG
-
-% dovetail list debug
-
-list-debug.PNG
-
-The ipv6, example and nfvi are test areas. If no <TESTSUITE> is given, it will list all testsuites.
-6.3 Show
-
-Show command will give the detailed info of one certain test case.
-6.3.1 Show help
-
-% dovetail show -h
-
-show-help.PNG
-6.3.2 Show test case
-
-show-ipv6.PNG
-6.4 Run
-
-Dovetail supports running a named test suite, or one named test area of a test suite.
-6.4.1 Run help
-
-% dovetail run -h
-
-run-help.PNGThere are some options:
-
-func_tag: set FuncTest’s Docker tag, for example stable,latest and danube.1.0
-
-openrc: give the path of OpenStack credential file
-
-yard_tag: set Yardstick’s Docker tag
-
-testarea: set a certain testarea within a certain testsuite
-
-offline: run without pull the docker images, and it requires the jumphost to have these images locally. This will ensure DoveTail run in an offline environment.
-
-report: push results to DB or store with files
-
-testsuite: set the testsuite to be tested
-
-debug: flag to show the debug log messages
-
+ testing_guide.rst
diff --git a/docs/testing/user/userguide/testing_guide.rst b/docs/testing/user/userguide/testing_guide.rst
new file mode 100644
index 00000000..08fd8acf
--- /dev/null
+++ b/docs/testing/user/userguide/testing_guide.rst
@@ -0,0 +1,517 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, Huawei Technologies Co.,Ltd and others.
+
+==========================================
+Conducting CVP Testing with Dovetail
+==========================================
+
+Overview
+------------------------------
+
+The Dovetail testing framework for CVP consists of two major parts: the testing client that
+executes all test cases in a lab (vendor self-testing or a third party lab),
+and the server system that is hosted by the CVP administrator to store and
+view test results based on a web API. The following diagram illustrates
+this overall framework.
+
+.. image:: ../../../images/dovetail_online_mode.png
+ :align: center
+ :scale: 50%
+
+Within the tester's lab, the Test Host is the machine where Dovetail executes all
+automated test cases. As it hosts the test harness, the Test Host must not be part of
+the System Under Test (SUT) itself.
+The above diagram assumes that the tester's Test Host is situated in a DMZ which
+has internal network access to the SUT and external access to the OPNFV server
+via the public Internet.
+This arrangement may not be supported in some labs.
+Dovetail also supports an offline mode of testing that is
+illustrated in the next diagram.
+
+.. image:: ../../../images/dovetail_offline_mode.png
+ :align: center
+ :scale: 50%
+
+In the offline mode, the Test Host only needs to have access to the SUT
+via the internal network, but does not need to connect to the public Internet. This
+user guide will highlight differences between the online and offline modes of
+the Test Host. While it is possible to run the Test Host as a virtual machine,
+this user guide assumes it is a physical machine for simplicity.
+
+The rest of this guide will describe how to install the Dovetail tool as a
+Docker container image, go over the steps of running the CVP test suite, and
+then discuss how to view test results and make sense of them.
+
+Readers interested
+in using Dovetail for its functionalities beyond CVP testing, e.g. for in-house
+or extended testing, should consult the Dovetail developer's guide for additional
+information.
+
+Installing Dovetail
+--------------------
+
+In this section, we describe the procedure to install Dovetail client tool on the Test Host.
+The Test Host must have network access to the management network with access rights to
+the Virtual Infrastructure Manager's API.
+
+Checking the Test Host Readiness
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The Test Host must have network access to the Virtual Infrastructure Manager's API
+hosted in the SUT so that the Dovetail tool can exercise the API from the Test Host.
+It must also have ``ssh`` access to the Linux operating system
+of the compute nodes in the SUT. The ``ssh`` mechanism is used by some test cases
+to generate test events in the compute nodes. You can find out which test cases
+use this mechanism in the test specification document.
+
+We have tested the Dovetail tool on the following host operating systems. Other versions
+or distribution of Linux may also work, but community support may be more available on
+these versions.
+
+- Ubuntu 16.04.2 LTS (Xenial) or 14.04 LTS (Trusty)
+- CentOS-7-1611
+- Red Hat Enterprise Linux 7.3
+- Fedora 24 or 25 Server
+
+Non-Linux operating systems, such as Windows, Mac OS, have not been tested
+and are not supported.
+
+If online mode is used, the tester should also validate that the Test Host can reach
+the public Internet. For example,
+
+.. code-block:: bash
+
+ $ ping www.opnfv.org
+ PING www.opnfv.org (50.56.49.117): 56 data bytes
+ 64 bytes from 50.56.49.117: icmp_seq=0 ttl=48 time=52.952 ms
+ 64 bytes from 50.56.49.117: icmp_seq=1 ttl=48 time=53.805 ms
+ 64 bytes from 50.56.49.117: icmp_seq=2 ttl=48 time=53.349 ms
+ ...
+
+
+Or, if the lab environment does not allow ping, try validating it using HTTPS instead.
+
+.. code-block:: bash
+
+ $ curl https://www.opnfv.org
+ <!doctype html>
+
+
+ <html lang="en-US" class="no-js">
+ <head>
+ ...
+
+
+Configuring the Test Host Environment
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The Test Host needs a few environment variables set correctly in order to access the
+Openstack API required to drive the Dovetail tests. For convenience and as a convention,
+we will also create a home directory for storing all Dovetail related config files and
+results files:
+
+.. code-block:: bash
+
+ $ mkdir -p /home/dovetail
+ $ export DOVETAIL_HOME=/home/dovetail
+
+Here we set dovetail home directory to be ``/home/dovetail`` for an example.
+Then create a directory named ``pre_config`` in this directory to store all
+Dovetail related config files:
+
+.. code-block:: bash
+
+ $ mkdir -p ${DOVETAIL_HOME}/pre_config
+
+At this point, you will need to consult your SUT (Openstack) administrator to correctly set
+the configurations in a file named ``env_config.sh``.
+The Openstack settings need to be configured such that the Dovetail client has all the necessary
+credentials and privileges to execute all test operations. If the SUT uses terms
+somewhat differently from the standard Openstack naming, you will need to adjust
+this file accordingly.
+
+In our example, we will use the file '${DOVETAIL_HOME}/pre_config/env_config.sh'. Create and edit
+the file so that all parameters are set correctly to match your SUT. Here is an example of what
+this file should contain.
+
+.. code-block:: bash
+
+ $ cat ${DOVETAIL_HOME}/pre_config/env_config.sh
+
+ # Project-level authentication scope (name or ID), recommend admin project.
+ export OS_PROJECT_NAME=admin
+
+ # For identity v2, it uses OS_TENANT_NAME rather than OS_PROJECT_NAME.
+ export OS_TENANT_NAME=admin
+
+ # Authentication username, belongs to the project above, recommend admin user.
+ export OS_USERNAME=admin
+
+ # Authentication password. Use your own password
+ export OS_PASSWORD=xxxxxxxx
+
+ # Authentication URL, one of the endpoints of keystone service. If this is v3 version,
+ # there need some extra variables as follows.
+ export OS_AUTH_URL='http://xxx.xxx.xxx.xxx:5000/v3'
+
+ # Default is 2.0. If use keystone v3 API, this should be set as 3.
+ export OS_IDENTITY_API_VERSION=3
+
+ # Domain name or ID containing the user above.
+ # Command to check the domain: openstack user show <OS_USERNAME>
+ export OS_USER_DOMAIN_NAME=default
+
+ # Domain name or ID containing the project above.
+ # Command to check the domain: openstack project show <OS_PROJECT_NAME>
+ export OS_PROJECT_DOMAIN_NAME=default
+
+ # Home directory for dovetail that you have created before.
+ export DOVETAIL_HOME=/home/dovetail
+
+Export all these variables into environment by,
+
+.. code-block:: bash
+
+ $ source ${DOVETAIL_HOME}/pre_config/env_config.sh
+
+If OpenStack client is installed, you can validate that the OpenStack environmental
+settings are correct by,
+
+.. code-block:: bash
+
+ $ openstack service list
+
+
+Installing Prerequisite on the Test Host
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The main prerequisite software for Dovetail are Python and Docker.
+
+In the CVP test suite for the Danube release, Dovetail requires Python 2.7. Python 3.x
+is not supported at this time.
+
+Use the following steps to check if the right version of python is already installed,
+and if not, install it.
+
+.. code-block:: bash
+
+ $ python --version
+ Python 2.7.6
+
+If your Test Host does not have Python installed, or the version is not 2.7, you
+should consult Python installation guides corresponding to the operating system
+in your Test Host on how to install Python 2.7.
+
+Dovetail does not work with Docker versions prior to 1.12.3. We have validated
+Dovetail with Docker 17.03 CE. Other versions of Docker later than 1.12.3 may
+also work, but community support may be more available on Docker 17.03 CE.
+
+.. code-block:: bash
+
+ $ sudo docker version
+ Client:
+ Version: 17.03.1-ce
+ API version: 1.27
+ Go version: go1.7.5
+ Git commit: c6d412e
+ Built: Mon Mar 27 17:10:36 2017
+ OS/Arch: linux/amd64
+
+ Server:
+ Version: 17.03.1-ce
+ API version: 1.27 (minimum version 1.12)
+ Go version: go1.7.5
+ Git commit: c6d412e
+ Built: Mon Mar 27 17:10:36 2017
+ OS/Arch: linux/amd64
+ Experimental: false
+
+If your Test Host does not have Docker installed, or Docker is older than 1.12.3,
+or you have Docker version other than 17.03 CE and wish to change,
+you will need to install, upgrade, or re-install in order to run Dovetail.
+The Docker installation process
+can be more complex, you should refer to the official
+Docker installation guide that is relevant to your Test Host's operating system.
+
+The above installation steps assume that the Test Host is in the online mode. For offline
+testing, use the following offline installation steps instead.
+
+In order to install or upgrade Python offline, you may download packaged Python 2.7
+for your Test Host's operating system on a connected host, copy the packge to
+the Test Host, then install from that local copy.
+
+In order to install Docker offline, download Docker static binaries and copy the
+tar file to the Test Host, such as for Ubuntu14.04, you may follow the following link
+to install,
+
+.. code-block:: bash
+
+ https://github.com/meetyg/docker-offline-install
+
+
+Installing Dovetail on the Test Host
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The Dovetail project maintains a Docker image that has Dovetail test tools preinstalled.
+This Docker image is tagged with versions. Before pulling the Dovetail image, check the
+OPNFV's CVP web page first to determine the right tag for CVP testing.
+
+If the Test Host is online, you can directly pull.
+
+.. code-block:: bash
+
+ $ sudo docker pull opnfv/dovetail:cvp.0.5.0
+ cvp.0.5.0: Pulling from opnfv/dovetail
+ 30d541b48fc0: Pull complete
+ 8ecd7f80d390: Pull complete
+ 46ec9927bb81: Pull complete
+ 2e67a4d67b44: Pull complete
+ 7d9dd9155488: Pull complete
+ cc79be29f08e: Pull complete
+ e102eed9bf6a: Pull complete
+ 952b8a9d2150: Pull complete
+ bfbb639d1f38: Pull complete
+ bf7c644692de: Pull complete
+ cdc345e3f363: Pull complete
+ Digest: sha256:d571b1073b2fdada79562e8cc67f63018e8d89268ff7faabee3380202c05edee
+ Status: Downloaded newer image for opnfv/dovetail:cvp.0.5.0
+
+An example of the <tag> is *cvp.0.5.0*.
+
+If the Test Host is offline, you will need to first pull the Dovetail Docker image, and all the
+dependent images that Dovetail uses, to a host that is online. The reason that you need
+to pull all dependent images is because Dovetail normally does dependency checking at run-time
+and automatically pull images as needed, if the Test Host is online. If the Test Host is
+offline, then all these dependencies will also need to be manually copied.
+
+.. code-block:: bash
+
+ $ sudo docker pull opnfv/dovetail:cvp.0.5.0
+ $ sudo docker pull opnfv/functest:cvp.0.5.0
+ $ sudo docker pull opnfv/yardstick:danube.3.2
+ $ sudo docker pull opnfv/bottlenecks:cvp.0.4.0
+ $ sudo wget -nc http://artifacts.opnfv.org/sdnvpn/ubuntu-16.04-server-cloudimg-amd64-disk1.img -P {ANY_DIR}
+
+Once all these images are pulled, save the images, copy to the Test Host, and then load
+the Dovetail and all dependent images at the Test Host.
+
+At the online host, save images.
+
+.. code-block:: bash
+
+ $ sudo docker save -o dovetail.tar opnfv/dovetail:cvp.0.5.0 opnfv/functest:cvp.0.5.0 \
+ opnfv/yardstick:danube.3.2 opnfv/bottlenecks:cvp.0.4.0
+
+Copy dovetail.tar file to the Test Host, and then load the images on the Test Host.
+
+.. code-block:: bash
+
+ $ sudo docker load --input dovetail.tar
+
+Copy sdnvpn test area image ubuntu-16.04-server-cloudimg-amd64-disk1.img to ${DOVETAIL_HOME}/pre_config/.
+
+Now check to see that the Dovetail image has been pulled or loaded properly.
+
+.. code-block:: bash
+
+ $ sudo docker images
+ REPOSITORY TAG IMAGE ID CREATED SIZE
+ opnfv/functest cvp.0.5.0 9eaeaea5f203 8 days ago 1.53GB
+ opnfv/dovetail cvp.0.5.0 5d25b289451c 8 days ago 516MB
+ opnfv/yardstick danube.3.2 574596b6ea12 8 days ago 1.2GB
+ opnfv/bottlenecks cvp.0.4.0 00450688bcae 3 hours ago 622 MB
+
+Regardless of whether you pulled down the Dovetail image directly online, or loaded from
+a static image tar file, you are ready to run Dovetail.
+
+.. code-block:: bash
+
+ $ sudo docker run --privileged=true -it \
+ -e DOVETAIL_HOME=$DOVETAIL_HOME \
+ -v $DOVETAIL_HOME:$DOVETAIL_HOME \
+ -v /var/run/docker.sock:/var/run/docker.sock \
+ opnfv/dovetail:<tag> /bin/bash
+
+The ``-e`` options set the env variables in the container and the ``-v`` options map files
+in the host to files in the container.
+
+Running the CVP Test Suite
+----------------------------
+
+Now you should be in the Dovetail container's prompt and ready to execute
+test suites.
+
+The Dovetail client CLI allows the tester to specify which test suite to run.
+By default the results are stored in a local file
+``$DOVETAIL_HOME/results``.
+
+.. code-block:: bash
+
+ $ dovetail run --testsuite <test-suite-name>
+
+Multiple test suites may be available. For the purpose of running
+CVP test suite, the test suite name follows the following format,
+``CVP_<major>_<minor>_<patch>``
+For example, CVP_1_0_0.
+
+.. code-block:: bash
+
+ $ dovetail run --testsuite CVP_1_0_0
+
+If you are not running the entire test suite, you can choose to run an
+individual test area instead.
+
+.. code-block:: bash
+
+ $ dovetail run --testsuite CVP_1_0_0 --testarea ipv6
+
+Until the official test suite is approved and released, you can use
+the *proposed_tests* for your trial runs, like this.
+
+.. code-block:: bash
+
+ $ dovetail run --testsuite proposed_tests --testarea ipv6
+ 2017-05-23 05:01:49,488 - run - INFO - ================================================
+ 2017-05-23 05:01:49,488 - run - INFO - Dovetail compliance: proposed_tests!
+ 2017-05-23 05:01:49,488 - run - INFO - ================================================
+ 2017-05-23 05:01:49,488 - run - INFO - Build tag: daily-master-4bdde6b8-afa6-40bb-8fc9-5d568d74c8d7
+ 2017-05-23 05:01:49,536 - run - INFO -
+ 2017-05-23 05:01:49,710 - run - INFO - >>[testcase]: dovetail.ipv6.tc001
+ 2017-05-23 05:08:22,532 - run - INFO - Results have been stored with file /home/dovetail/results/functest_results.txt.
+ 2017-05-23 05:08:22,538 - run - INFO - >>[testcase]: dovetail.ipv6.tc002
+ ...
+
+Special Configuration for Running HA Test Cases
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+HA test cases need to know the info of a controller node of the OpenStack.
+It should include the node's name, role, ip, as well as the user and key_filename
+or password to login the node. Users should create file ${DOVETAIL_HOME}/pre_config/pod.yaml
+to store the info.
+
+There is a sample file for users.
+
+.. code-block:: bash
+
+ nodes:
+ -
+ # This can not be changed and must be node1.
+ name: node1
+
+ # This must be controller.
+ role: Controller
+
+ # This is the install IP of a controller node.
+ ip: xx.xx.xx.xx
+
+ # User name of this node. This user must have sudo privileges.
+ user: root
+
+ # Password of the user.
+ password: root
+
+Besides the 'password', user could also provide 'key_filename' to login the node.
+Users need to create file $DOVETAIL_HOME/pre_config/id_rsa to store the private key.
+
+.. code-block:: bash
+
+ name: node1
+ role: Controller
+ ip: 10.1.0.50
+ user: root
+
+ # Private key of this node. It must be /root/.ssh/id_rsa
+ # Dovetail will move the key file from $DOVETAIL_HOME/pre_config/id_rsa
+ # to /root/.ssh/id_rsa of Yardstick container
+ key_filename: /root/.ssh/id_rsa
+
+
+Making Sense of CVP Test Results
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+When a tester is performing trial runs, Dovetail stores results in a local file by default.
+
+.. code-block:: bash
+
+ cd $DOVETAIL_HOME/results
+
+#. Local file
+
+ * Log file: dovetail.log
+
+ * Review the dovetail.log to see if all important information has been captured
+ - in default mode without DEBUG.
+
+ * The end of the log file has a summary of all test case test results.
+
+ * Additional log files may be of interests: refstack.log, dovetail_ha_tcXXX.out ...
+
+ * Example: Openstack refstack test case example
+
+ * Can see the log details in refstack.log, which has the passed/skipped/failed
+ test cases result, the failed test cases have rich debug information for the
+ users to see why this test case fails.
+
+ * Example: OPNFV Functest test case example
+
+ * For Functest tool, its log is stored in functest.log
+
+ * For each test case result in Functest, the logs are stored in functest_results.txt.
+
+ * Example: OPNFV Yardstick test case example
+
+ * For Yardstick tool, its log is stored in yardstick.log
+
+ * For each test case result in Yardstick, the logs are stored in dovetail_ha_tcXXX.out, respectively.
+
+#. OPNFV web interface
+ CVP will host a web site to collect test results. Users can upload their results to this web site,
+ so they can review these results in the future.
+
+ * web site url
+
+ * Wait for the complement of LF, test community, etc.
+
+ * Sign in / Sign up
+
+ * You need to sign in you account, then you can upload results, and check your private results.
+ CVP is now using openstack id as account provider, but will soon support Linux Foundation ID
+ as well.
+
+ * If you already have a openstack id, you can sign in directly with your id.
+
+ * If you do not have a openstack id, you can sign up a new one on the sign up page.
+
+ * If you do not sign in, you can only check the community results.
+
+ * My results
+
+ * This page lists all results uploaded by you after you signed in,
+
+ * You can also upload your results on this page.
+
+ * There is a *choose file* button, once you click it, you can choose your reuslt file in your harddisk
+ then click the *upload* button, and you will see a results id once your uploading succeed.
+
+ * Check the *review* box to submit your result to the OPNFV. Uncheck the box to withdraw your result.
+
+ * profile
+
+ * This page shows your account info after you signed in.
+
+Updating Dovetail or a Test Suite
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Follow the instructions in section `Installing Dovetail on the Test Host`_ and
+`Running the CVP Test Suite`_ by replacing the docker images with new_tags,
+
+.. code-block:: bash
+
+ sudo docker pull opnfv/dovetail:<dovetail_new_tag>
+ sudo docker pull opnfv/functest:<functest_new_tag>
+ sudo docker pull opnfv/yardstick:<yardstick_new_tag>
+
+This step is necessary if dovetail software or the CVP test suite have updates.
+
+
diff --git a/dovetail/compliance/debug.yml b/dovetail/compliance/debug.yml
index 0ae734ea..9f6d2389 100644
--- a/dovetail/compliance/debug.yml
+++ b/dovetail/compliance/debug.yml
@@ -12,6 +12,5 @@ debug:
# - dovetail.ipv6.tc009
# - dovetail.ipv6.tc018
# - dovetail.ipv6.tc019
- - dovetail.nfvi.tc001
- - dovetail.nfvi.tc002
- - dovetail.nfvi.tc101
+ - dovetail.vping.tc001
+ - dovetail.vping.tc002
diff --git a/dovetail/compliance/proposed_tests.yml b/dovetail/compliance/proposed_tests.yml
index de892e5d..13fcf719 100644
--- a/dovetail/compliance/proposed_tests.yml
+++ b/dovetail/compliance/proposed_tests.yml
@@ -5,6 +5,9 @@ proposed_tests:
# defcore,defcore.tc001 used for auto-method, defcore.tc002 used for manually method
- dovetail.defcore.tc001
# - dovetail.defcore.tc002
+ # vping
+ - dovetail.vping.tc001
+ - dovetail.vping.tc002
# ipv6
- dovetail.ipv6.tc001
- dovetail.ipv6.tc002
@@ -31,9 +34,6 @@ proposed_tests:
- dovetail.ipv6.tc023
- dovetail.ipv6.tc024
- dovetail.ipv6.tc025
- # nfvi, vping_ssh, vping_userdata
- - dovetail.nfvi.tc001
- - dovetail.nfvi.tc002
# HA
- dovetail.ha.tc001
- dovetail.ha.tc002
@@ -46,6 +46,14 @@ proposed_tests:
# sdnvpn
- dovetail.sdnvpn.tc001
- dovetail.sdnvpn.tc002
- - dovetail.sdnvpn.tc003
- dovetail.sdnvpn.tc004
- dovetail.sdnvpn.tc008
+ # resiliency
+ - dovetail.resiliency.tc001
+ # tempest
+ - dovetail.tempest.tc001
+ - dovetail.tempest.tc002
+ - dovetail.tempest.tc003
+ - dovetail.tempest.tc004
+ - dovetail.tempest.tc005
+ - dovetail.tempest.tc006
diff --git a/dovetail/conf/bottlenecks_config.yml b/dovetail/conf/bottlenecks_config.yml
new file mode 100644
index 00000000..43df8c55
--- /dev/null
+++ b/dovetail/conf/bottlenecks_config.yml
@@ -0,0 +1,20 @@
+---
+bottlenecks:
+ image_name: opnfv/bottlenecks
+ docker_tag: cvp.0.4.0
+ opts: '-id --privileged=true'
+ config:
+ dir: '/home/opnfv/userconfig'
+ pre_condition:
+ - 'echo test for precondition in bottlenecks'
+ cmds:
+ - '/home/opnfv/bottlenecks/run_tests.sh -c {{validate_testcase}}'
+ post_condition:
+ - 'echo test for postcondition in bottlenecks'
+ result:
+ dir: '/tmp'
+ file_path: 'bottlenecks.log'
+ openrc: '/tmp/admin_rc.sh'
+ extra_container:
+ - 'Bottlenecks-Yardstick'
+ - 'Bottlenecks-ELK'
diff --git a/dovetail/conf/cmd_config.yml b/dovetail/conf/cmd_config.yml
index aa27f293..58409f2b 100644
--- a/dovetail/conf/cmd_config.yml
+++ b/dovetail/conf/cmd_config.yml
@@ -21,14 +21,21 @@ cli:
- '-y'
path:
- 'yardstick/docker_tag'
- help: 'Overwrite tag for yardstick docker container (e.g. stable or latest)'
+ help: 'Overwrite tag for yardstick docker container (e.g. danube.3.2)'
func_tag:
flags:
- '--func_tag'
- '-f'
path:
- 'functest/docker_tag'
- help: 'Overwrite tag for functest docker container (e.g. stable or latest)'
+ help: 'Overwrite tag for functest docker container (e.g. cvp.0.5.0)'
+ bott_tag:
+ flags:
+ - '--bott_tag'
+ - '-b'
+ path:
+ - 'bottlenecks/docker_tag'
+ help: 'Overwrite tag for bottlenecks docker container (e.g. cvp.0.4.0)'
control:
testsuite:
flags:
@@ -50,7 +57,7 @@ cli:
flags:
- '--report'
- '-r'
- help: 'push results to DB (e.g. --report http://192.168.135.2:8000/api/v1)'
+ help: 'push results to DB (e.g. --report http://192.168.135.2:8000/api/v1/results)'
offline:
flags:
- '--offline'
diff --git a/dovetail/conf/dovetail_config.yml b/dovetail/conf/dovetail_config.yml
index 36b31401..738b892f 100644
--- a/dovetail/conf/dovetail_config.yml
+++ b/dovetail/conf/dovetail_config.yml
@@ -17,6 +17,9 @@ pri_key: 'id_rsa'
# SDNVPN offline image
sdnvpn_image: 'ubuntu-16.04-server-cloudimg-amd64-disk1.img'
+# Tempest image
+cirros_image: 'cirros-0.3.5-x86_64-disk.img'
+
COMPLIANCE_PATH: compliance/
TESTCASE_PATH: testcase/
# testsuite supported, should adjust accordingly
@@ -30,9 +33,10 @@ testarea_supported:
- example
- ha
- ipv6
- - nfvi
- sdnvpn
- - vimops
+ - vping
+ - resiliency
+ - tempest
functest_testsuite:
- refstack_defcore
@@ -67,10 +71,12 @@ parameters:
include_config:
- functest_config.yml
- yardstick_config.yml
+ - bottlenecks_config.yml
test_project:
- 'yardstick'
- 'functest'
+ - 'bottlenecks'
validate_input:
valid_docker_tag:
@@ -79,3 +85,10 @@ validate_input:
- 'danube.1.0'
- 'danube.2.0'
- 'danube.3.0'
+ - 'danube.3.1'
+ - 'danube.3.2'
+ - 'cvp.0.1.0'
+ - 'cvp.0.2.0'
+ - 'cvp.0.3.0'
+ - 'cvp.0.4.0'
+ - 'cvp.0.5.0'
diff --git a/dovetail/conf/functest_config.yml b/dovetail/conf/functest_config.yml
index 460506a6..11c49e32 100644
--- a/dovetail/conf/functest_config.yml
+++ b/dovetail/conf/functest_config.yml
@@ -1,7 +1,7 @@
---
functest:
image_name: opnfv/functest
- docker_tag: latest
+ docker_tag: cvp.0.5.0
opts: '-id --privileged=true'
config:
dir: '/home/opnfv/userconfig'
diff --git a/dovetail/conf/yardstick_config.yml b/dovetail/conf/yardstick_config.yml
index bc207d7f..62a4abf9 100644
--- a/dovetail/conf/yardstick_config.yml
+++ b/dovetail/conf/yardstick_config.yml
@@ -1,7 +1,7 @@
---
yardstick:
image_name: opnfv/yardstick
- docker_tag: latest
+ docker_tag: danube.3.2
opts: '-id --privileged=true'
config:
dir: '/home/opnfv/userconfig'
@@ -15,7 +15,7 @@ yardstick:
- 'mkdir -p /home/opnfv/yardstick/results/'
- "cd /home/opnfv/repos/yardstick && source tests/ci/prepare_env.sh &&
yardstick -d task start tests/opnfv/test_cases/{{validate_testcase}}.yaml
- --output-file /home/opnfv/yardstick/results/{{validate_testcase}}.out
+ --output-file /home/opnfv/yardstick/results/{{testcase}}.out
--task-args '{'file': '/home/opnfv/userconfig/pre_config/pod.yaml'}'"
post_condition:
- ''
@@ -25,3 +25,6 @@ yardstick:
file_path: 'yardstick.log'
key_path: '/root/.ssh/id_rsa'
openrc: '/etc/yardstick/openstack.creds'
+ yard_conf:
+ src_file: '/home/opnfv/repos/yardstick/etc/yardstick/yardstick.conf.sample'
+ dest_file: '/etc/yardstick/yardstick.conf'
diff --git a/dovetail/container.py b/dovetail/container.py
index 2d8a9e20..f3d08373 100644
--- a/dovetail/container.py
+++ b/dovetail/container.py
@@ -18,7 +18,8 @@ from utils.dovetail_config import DovetailConfig as dt_cfg
class Container(object):
container_list = {}
- has_pull_latest_image = {'yardstick': False, 'functest': False}
+ has_pull_latest_image = {'yardstick': False, 'functest': False,
+ 'bottlenecks': False}
logger = None
@@ -42,7 +43,8 @@ class Container(object):
return '%s:%s' % (dt_cfg.dovetail_config[type]['image_name'],
dt_cfg.dovetail_config[type]['docker_tag'])
except KeyError as e:
- cls.logger.error('There is no %s in %s config file.', e, type)
+ cls.logger.exception(
+ 'There is no key {} in {} config file.'.format(e, type))
return None
# get the openrc_volume for creating the container
@@ -56,7 +58,8 @@ class Container(object):
dovetail_config[type]['openrc'])
return openrc
else:
- cls.logger.error("File %s is not exist", dovetail_config['openrc'])
+ cls.logger.error(
+ "File {} doesn't exist.".format(dovetail_config['openrc']))
return None
# set functest envs and TEST_DB_URL for creating functest container
@@ -74,8 +77,7 @@ class Container(object):
if 'sdnvpn' in testcase_name:
ins_type = "-e INSTALLER_TYPE=netvirt"
scenario = " -e DEPLOY_SCENARIO=bgpvpn"
- node = " -e NODE_NAME=master"
- envs = "%s %s %s" % (ins_type, scenario, node)
+ envs = "%s %s" % (ins_type, scenario)
dovetail_config = dt_cfg.dovetail_config
if dovetail_config['report_dest'].startswith("http"):
@@ -101,10 +103,6 @@ class Container(object):
cls.logger.error("Can't find any external network.")
return None
- if dovetail_config['report_dest'].startswith("http"):
- cls.logger.info("Yardstick can't push results to DB.")
- cls.logger.info("Results will be stored with files.")
-
log_vol = '-v %s:%s ' % (dovetail_config['result_dir'],
dovetail_config["yardstick"]['result']['log'])
@@ -112,20 +110,33 @@ class Container(object):
pod_file = os.path.join(dovetail_config['config_dir'],
dovetail_config['pod_file'])
if not os.path.isfile(pod_file):
- cls.logger.error("File %s doesn't exist.", pod_file)
+ cls.logger.error("File {} doesn't exist.".format(pod_file))
return None
key_file = os.path.join(dovetail_config['config_dir'],
dovetail_config['pri_key'])
key_container_path = dovetail_config["yardstick"]['result']['key_path']
if not os.path.isfile(key_file):
- cls.logger.debug("Key file %s is not found, maybe can use passwd "
- "method in %s to do HA test.", key_file, pod_file)
+ cls.logger.debug("Key file {} is not found, must use password in "
+ "{} to do HA test.".format(key_file, pod_file))
key_vol = ''
else:
key_vol = '-v %s:%s ' % (key_file, key_container_path)
return "%s %s %s" % (envs, log_vol, key_vol)
@classmethod
+ def set_bottlenecks_config(cls, testcase_name):
+ dovetail_config = dt_cfg.dovetail_config
+ yard_tag = dovetail_config['yardstick']['docker_tag']
+ docker_vol = '-v /var/run/docker.sock:/var/run/docker.sock'
+ env = ('-e Yardstick_TAG={} -e OUTPUT_FILE={}.out'
+ .format(yard_tag, testcase_name))
+ report = ""
+ if dovetail_config['report_dest'].startswith("http"):
+ report = ("-e BOTTLENECKS_DB_TARGET={}"
+ .format(dovetail_config['report_dest']))
+ return "{} {} {}".format(docker_vol, env, report)
+
+ @classmethod
def create(cls, type, testcase_name):
dovetail_config = dt_cfg.dovetail_config
docker_image = cls.get_docker_image(type)
@@ -138,7 +149,7 @@ class Container(object):
# CI_DEBUG is used for showing the debug logs of the upstream projects
# BUILD_TAG is the unique id for this test
- envs = ' -e CI_DEBUG=true'
+ envs = ' -e CI_DEBUG=true -e NODE_NAME=master'
envs = envs + ' -e BUILD_TAG=%s-%s' % (dovetail_config['build_tag'],
testcase_name)
@@ -147,6 +158,8 @@ class Container(object):
config = cls.set_functest_config(testcase_name)
if type.lower() == "yardstick":
config = cls.set_yardstick_config()
+ if type.lower() == "bottlenecks":
+ config = cls.set_bottlenecks_config(testcase_name)
if not config:
return None
@@ -157,7 +170,7 @@ class Container(object):
hosts_config = ""
hosts_config_path = os.path.abspath(
- os.path.join(os.path.dirname(__file__), os.pardir, 'userconfig'))
+ os.path.join(os.path.dirname(__file__), 'userconfig'))
try:
with open(os.path.join(hosts_config_path, 'hosts.yaml')) as f:
hosts_info = yaml.safe_load(f)
@@ -165,17 +178,38 @@ class Container(object):
for host in hosts_info['hosts_info']:
hosts_config += " --add-host "
hosts_config += str(host)
- cls.logger.info('get hosts info %s', hosts_config)
+ cls.logger.info('Get hosts info {}.'.format(hosts_config))
except Exception:
- cls.logger.warn('fail to get hosts info in %s/hosts.yaml, \
- maybe some issue with domain name resolution',
- hosts_config_path)
+ cls.logger.warn('Failed to get hosts info in {}/hosts.yaml, '
+ 'maybe some issues with domain name resolution.'
+ .format(hosts_config_path))
+
+ cacert_volume = ""
+ https_enabled = dt_utils.check_https_enabled(cls.logger)
+ cacert = os.getenv('OS_CACERT',)
+ if https_enabled:
+ cls.logger.info("https enabled...")
+ if cacert is not None:
+ if not os.path.isfile(cacert):
+ cls.logger.error("Env variable 'OS_CACERT' is set to {}"
+ "but the file does not exist."
+ .format(cacert))
+ return None
+ elif not dovetail_config['config_dir'] in cacert:
+ cls.logger.error("Credential file has to be put in {},"
+ "which can be mount into container."
+ .format(dovetail_config['config_dir']))
+ return None
+ cacert_volume = ' -v %s:%s ' % (cacert, cacert)
+ else:
+ cls.logger.warn("https enabled, OS_CACERT not set, insecure"
+ "connection used or OS_CACERT missed")
result_volume = ' -v %s:%s ' % (dovetail_config['result_dir'],
dovetail_config[type]['result']['dir'])
- cmd = 'sudo docker run %s %s %s %s %s %s %s %s /bin/bash' % \
- (opts, envs, config, hosts_config, openrc, config_volume,
- result_volume, docker_image)
+ cmd = 'sudo docker run %s %s %s %s %s %s %s %s %s /bin/bash' % \
+ (opts, envs, config, hosts_config, openrc, cacert_volume,
+ config_volume, result_volume, docker_image)
dt_utils.exec_cmd(cmd, cls.logger)
ret, container_id = \
dt_utils.exec_cmd("sudo docker ps | grep " + docker_image +
@@ -188,6 +222,17 @@ class Container(object):
src_path = os.path.join(prefix_path, 'pre_config', file_name)
dest_path = '/home/opnfv/functest/images'
Container.pre_copy(container_id, src_path, dest_path)
+
+ if type.lower() == 'functest':
+ prefix_path = dt_cfg.dovetail_config[type]['config']['dir']
+ file_name = dt_cfg.dovetail_config['cirros_image']
+ src_path = os.path.join(prefix_path, 'pre_config', file_name)
+ dest_path = '/home/opnfv/functest/images'
+ Container.pre_copy(container_id, src_path, dest_path)
+
+ if type.lower() == 'yardstick':
+ cls.set_yardstick_conf_file(container_id)
+
return container_id
@classmethod
@@ -206,15 +251,15 @@ class Container(object):
cmd = "sudo docker ps -aq -f 'ancestor=%s'" % (image_id)
ret, msg = dt_utils.exec_cmd(cmd, cls.logger)
if msg and ret == 0:
- cls.logger.debug('image %s has containers, skip.', image_id)
+ cls.logger.debug('Image {} has containers, skip.'.format(image_id))
return True
cmd = 'sudo docker rmi %s' % (image_id)
- cls.logger.debug('remove image %s', image_id)
+ cls.logger.debug('Remove image {}.'.format(image_id))
ret, msg = dt_utils.exec_cmd(cmd, cls.logger)
if ret == 0:
- cls.logger.debug('remove image %s successfully', image_id)
+ cls.logger.debug('Remove image {} successfully.'.format(image_id))
return True
- cls.logger.error('fail to remove image %s.', image_id)
+ cls.logger.error('Failed to remove image {}.'.format(image_id))
return False
@classmethod
@@ -222,9 +267,10 @@ class Container(object):
cmd = 'sudo docker pull %s' % (image_name)
ret, msg = dt_utils.exec_cmd(cmd, cls.logger)
if ret != 0:
- cls.logger.error('fail to pull docker image %s!', image_name)
+ cls.logger.error(
+ 'Failed to pull docker image {}!'.format(image_name))
return False
- cls.logger.debug('success to pull docker image %s!', image_name)
+ cls.logger.debug('Success to pull docker image {}!'.format(image_name))
return True
@classmethod
@@ -233,7 +279,8 @@ class Container(object):
if not docker_image:
return None
if cls.has_pull_latest_image[validate_type] is True:
- cls.logger.debug('%s is already the newest version.', docker_image)
+ cls.logger.debug(
+ '{} is already the newest version.'.format(docker_image))
return docker_image
old_image_id = cls.get_image_id(docker_image)
if not cls.pull_image_only(docker_image):
@@ -241,13 +288,14 @@ class Container(object):
cls.has_pull_latest_image[validate_type] = True
new_image_id = cls.get_image_id(docker_image)
if not new_image_id:
- cls.logger.error("fail to get the new image's id %s", docker_image)
+ cls.logger.error(
+ "Failed to get the id of image {}.".format(docker_image))
return None
if not old_image_id:
return docker_image
if new_image_id == old_image_id:
- cls.logger.debug('image %s has no changes, no need to remove.',
- docker_image)
+ cls.logger.debug('Image {} has no changes, no need to remove.'
+ .format(docker_image))
else:
cls.remove_image(old_image_id)
return docker_image
@@ -259,11 +307,23 @@ class Container(object):
return image_id
@classmethod
- def clean(cls, container_id):
- cmd1 = 'sudo docker stop %s' % (container_id)
- dt_utils.exec_cmd(cmd1, cls.logger)
- cmd2 = 'sudo docker rm %s' % (container_id)
- dt_utils.exec_cmd(cmd2, cls.logger)
+ def check_container_exist(cls, container_name):
+ cmd = ('sudo docker ps -aq -f name={}'.format(container_name))
+ ret, msg = dt_utils.exec_cmd(cmd, cls.logger)
+ if ret == 0 and msg:
+ return True
+ return False
+
+ @classmethod
+ def clean(cls, container_id, valid_type):
+ cmd = ('sudo docker rm -f {}'.format(container_id))
+ dt_utils.exec_cmd(cmd, cls.logger)
+ if valid_type.lower() == 'bottlenecks':
+ containers = dt_cfg.dovetail_config[valid_type]['extra_container']
+ for container in containers:
+ if cls.check_container_exist(container):
+ cmd = ('sudo docker rm -f {}'.format(container))
+ dt_utils.exec_cmd(cmd, cls.logger)
@classmethod
def exec_cmd(cls, container_id, sub_cmd, exit_on_error=False):
@@ -279,3 +339,18 @@ class Container(object):
return (1, 'src_path or dest_path is empty')
cmd = 'cp %s %s' % (src_path, dest_path)
return cls.exec_cmd(container_id, cmd, exit_on_error)
+
+ @classmethod
+ def set_yardstick_conf_file(cls, container_id):
+ valid_type = 'yardstick'
+ src = dt_cfg.dovetail_config[valid_type]['yard_conf']['src_file']
+ dest = dt_cfg.dovetail_config[valid_type]['yard_conf']['dest_file']
+ cls.pre_copy(container_id, src, dest)
+ url = dt_cfg.dovetail_config['report_dest']
+ if url.startswith("http"):
+ cmd = ("sed -i '16s#http://127.0.0.1:8000/results#{}#g' {}"
+ .format(url, dest))
+ cls.exec_cmd(container_id, cmd)
+ if url.lower() == 'file':
+ cmd = ("sed -i '12s/http/file/g' {}".format(dest))
+ cls.exec_cmd(container_id, cmd)
diff --git a/dovetail/parser.py b/dovetail/parser.py
index fdde4f9e..1b539c85 100644
--- a/dovetail/parser.py
+++ b/dovetail/parser.py
@@ -32,15 +32,16 @@ class Parser(object):
kwargs = {}
for arg in dt_cfg.dovetail_config['parameters']:
path = eval(arg['path'])
- cls.logger.debug('name: %s, eval path: %s ',
- arg['name'], path)
+ cls.logger.debug(
+ 'name: {}, eval path: {}'.format(arg['name'], path))
kwargs[arg['name']] = \
dt_utils.get_obj_by_path(testcase.testcase, path)
- cls.logger.debug('kwargs: %s', kwargs)
+ cls.logger.debug('kwargs: {}'.format(kwargs))
cmd_lines = template.render(**kwargs)
except Exception as e:
- cls.logger.error('failed to parse cmd %s, exception:%s', cmd, e)
+ cls.logger.exception(
+ 'Failed to parse cmd {}, exception: {}'.format(cmd, e))
return None
return cmd_lines
diff --git a/dovetail/report.py b/dovetail/report.py
index 0d83831d..fa6a0ba4 100644
--- a/dovetail/report.py
+++ b/dovetail/report.py
@@ -13,6 +13,7 @@ import urllib2
import re
import os
import datetime
+import tarfile
from pbr import version
@@ -25,7 +26,7 @@ from testcase import Testcase
class Report(object):
- results = {'functest': {}, 'yardstick': {}, 'shell': {}}
+ results = {'functest': {}, 'yardstick': {}, 'bottlenecks': {}, 'shell': {}}
logger = None
@@ -112,8 +113,8 @@ class Report(object):
'|'.join(dt_cfg.dovetail_config['testarea_supported']))
area = pattern.findall(testcase['name'])
if not area:
- cls.logger.error("testcase %s not in supported testarea",
- testcase['name'])
+ cls.logger.error("Test case {} not in supported testarea."
+ .format(testcase['name']))
return None
area = area[0]
testarea_scope.append(area)
@@ -162,6 +163,17 @@ class Report(object):
# cls.save(report_txt)
return report_txt
+ @classmethod
+ def save_logs(cls):
+ logs_gz = "logs.tar.gz"
+ result_dir = dt_cfg.dovetail_config['result_dir']
+
+ with tarfile.open(os.path.join(result_dir, logs_gz), "w:gz") as f_out:
+ files = os.listdir(result_dir)
+ for f in files:
+ if f not in ['workspace']:
+ f_out.add(os.path.join(result_dir, f))
+
# save to disk as default
@classmethod
def save(cls, report):
@@ -170,9 +182,9 @@ class Report(object):
with open(os.path.join(dt_cfg.dovetail_config['result_dir'],
report_file_name), 'w') as report_file:
report_file.write(report)
- cls.logger.info('save report to %s', report_file_name)
+ cls.logger.info('Save report to {}'.format(report_file_name))
except Exception:
- cls.logger.error('Failed to save: %s', report_file_name)
+ cls.logger.exception('Failed to save: {}'.format(report_file_name))
@classmethod
def get_result(cls, testcase):
@@ -180,7 +192,7 @@ class Report(object):
type = testcase.validate_type()
crawler = CrawlerFactory.create(type)
if crawler is None:
- cls.logger.error('crawler is None:%s', testcase.name())
+ cls.logger.error('Crawler is None: {}'.format(testcase.name()))
return None
# if validate_testcase in cls.results[type]:
@@ -191,12 +203,12 @@ class Report(object):
if result is not None:
cls.results[type][validate_testcase] = result
# testcase.script_result_acquired(True)
- cls.logger.debug('testcase: %s -> result acquired',
- validate_testcase)
+ cls.logger.debug(
+ 'Test case: {} -> result acquired'.format(validate_testcase))
else:
retry = testcase.increase_retry()
- cls.logger.debug('testcase: %s -> result acquired retry:%d',
- validate_testcase, retry)
+ cls.logger.debug('Test case: {} -> result acquired retry: {}'
+ .format(validate_testcase, retry))
return result
@@ -206,7 +218,7 @@ class FunctestCrawler(object):
def __init__(self):
self.type = 'functest'
- self.logger.debug('create crawler:%s', self.type)
+ self.logger.debug('Create crawler: {}'.format(self.type))
@classmethod
def create_log(cls):
@@ -234,14 +246,15 @@ class FunctestCrawler(object):
os.path.join(dovetail_config['result_dir'],
dovetail_config[self.type]['result']['file_path'])
if not os.path.exists(file_path):
- self.logger.info('result file not found: %s', file_path)
+ self.logger.error('Result file not found: {}'.format(file_path))
return None
if testcase_name in dt_cfg.dovetail_config['functest_testcase']:
complex_testcase = False
elif testcase_name in dt_cfg.dovetail_config['functest_testsuite']:
complex_testcase = True
else:
- self.logger.error("Wrong Functest test case %s.", testcase_name)
+ self.logger.error(
+ "Wrong Functest test case {}.".format(testcase_name))
return None
with open(file_path, 'r') as f:
for jsonfile in f:
@@ -264,7 +277,8 @@ class FunctestCrawler(object):
"errors": error_case,
"skipped": skipped_case}
except KeyError as e:
- self.logger.error("Key error, exception: %s", e)
+ self.logger.exception(
+ "Result data don't have key {}.".format(e))
return None
except ValueError:
continue
@@ -273,20 +287,20 @@ class FunctestCrawler(object):
'timestop': timestop, 'duration': duration,
'details': details}
- self.logger.debug('Results: %s', str(json_results))
+ self.logger.debug('Results: {}'.format(str(json_results)))
return json_results
def crawl_from_url(self, testcase=None):
url = "%s?case=%s&last=1" % \
(dt_cfg.dovetail_config['report_dest'],
testcase.validate_testcase())
- self.logger.debug("Query to rest api: %s", url)
+ self.logger.debug("Query to rest api: {}".format(url))
try:
data = json.load(urllib2.urlopen(url))
return data['results'][0]
except Exception as e:
- self.logger.error("Cannot read content from the url: %s, "
- "exception: %s", url, e)
+ self.logger.exception("Cannot read content from the url: {}, "
+ "exception: {}".format(url, e))
return None
@@ -296,7 +310,7 @@ class YardstickCrawler(object):
def __init__(self):
self.type = 'yardstick'
- self.logger.debug('create crawler:%s', self.type)
+ self.logger.debug('Create crawler: {}'.format(self.type))
@classmethod
def create_log(cls):
@@ -313,9 +327,9 @@ class YardstickCrawler(object):
def crawl_from_file(self, testcase=None):
file_path = os.path.join(dt_cfg.dovetail_config['result_dir'],
- testcase.validate_testcase() + '.out')
+ testcase.name() + '.out')
if not os.path.exists(file_path):
- self.logger.info('result file not found: %s', file_path)
+ self.logger.error('Result file not found: {}'.format(file_path))
return None
criteria = 'FAIL'
with open(file_path, 'r') as f:
@@ -327,9 +341,57 @@ class YardstickCrawler(object):
if 1 == v:
criteria = 'PASS'
except KeyError as e:
- self.logger.error('pass flag not found %s', e)
+ self.logger.exception(
+ 'Pass flag not found {}'.format(e))
json_results = {'criteria': criteria}
- self.logger.debug('Results: %s', str(json_results))
+ self.logger.debug('Results: {}'.format(str(json_results)))
+ return json_results
+
+ def crawl_from_url(self, testcase=None):
+ return None
+
+
+class BottlenecksCrawler(object):
+
+ logger = None
+
+ def __init__(self):
+ self.type = 'bottlenecks'
+ self.logger.debug('Create crawler: {}'.format(self.type))
+
+ @classmethod
+ def create_log(cls):
+ cls.logger = \
+ dt_logger.Logger(__name__ + '.BottlenecksCrawler').getLogger()
+
+ def crawl(self, testcase=None):
+ report_dest = dt_cfg.dovetail_config['report_dest']
+ if report_dest.lower() == 'file':
+ return self.crawl_from_file(testcase)
+
+ if report_dest.lower().startswith('http'):
+ return self.crawl_from_url(testcase)
+
+ def crawl_from_file(self, testcase=None):
+ file_path = os.path.join(dt_cfg.dovetail_config['result_dir'],
+ testcase.name() + '.out')
+ if not os.path.exists(file_path):
+ self.logger.error('Result file not found: {}'.format(file_path))
+ return None
+ criteria = 'FAIL'
+ with open(file_path, 'r') as f:
+ for jsonfile in f:
+ data = json.loads(jsonfile)
+ try:
+ if 'PASS' == data["data_body"]["result"]:
+ criteria = 'PASS'
+ else:
+ criteria = 'FAIL'
+ break
+ except KeyError as e:
+ self.logger.exception('Pass flag not found {}'.format(e))
+ json_results = {'criteria': criteria}
+ self.logger.debug('Results: {}'.format(str(json_results)))
return json_results
def crawl_from_url(self, testcase=None):
@@ -361,6 +423,7 @@ class CrawlerFactory(object):
CRAWLER_MAP = {'functest': FunctestCrawler,
'yardstick': YardstickCrawler,
+ 'bottlenecks': BottlenecksCrawler,
'shell': ShellCrawler}
@classmethod
@@ -394,14 +457,16 @@ class FunctestChecker(object):
sub_testcase = re.sub("\[.*?\]", "", sub_testcase)
reg = sub_testcase + '[\s+\d+]'
find_reg = re.compile(reg)
- match = find_reg.findall(result)
- if match:
- return True
+ for tc in result:
+ match = find_reg.findall(tc)
+ if match:
+ return True
reg = sub_testcase + '$'
find_reg = re.compile(reg)
- match = find_reg.findall(result)
- if match:
- return True
+ for tc in result:
+ match = find_reg.findall(tc)
+ if match:
+ return True
return False
def check(self, testcase, db_result):
@@ -420,7 +485,7 @@ class FunctestChecker(object):
testcase_passed = 'SKIP'
for sub_testcase in sub_testcase_list:
- self.logger.debug('check sub_testcase:%s', sub_testcase)
+ self.logger.debug('Check sub_testcase: {}'.format(sub_testcase))
try:
if self.get_sub_testcase(sub_testcase,
db_result['details']['errors']):
@@ -463,6 +528,24 @@ class YardstickChecker(object):
return
+class BottlenecksChecker(object):
+
+ logger = None
+
+ @classmethod
+ def create_log(cls):
+ cls.logger = \
+ dt_logger.Logger(__name__ + '.BottlenecksChecker').getLogger()
+
+ @staticmethod
+ def check(testcase, result):
+ if not result:
+ testcase.passed('FAIL')
+ else:
+ testcase.passed(result['criteria'])
+ return
+
+
class ShellChecker(object):
@staticmethod
@@ -477,6 +560,7 @@ class CheckerFactory(object):
CHECKER_MAP = {'functest': FunctestChecker,
'yardstick': YardstickChecker,
+ 'bottlenecks': BottlenecksChecker,
'shell': ShellChecker}
@classmethod
diff --git a/dovetail/run.py b/dovetail/run.py
index 521379d7..01111dd3 100755
--- a/dovetail/run.py
+++ b/dovetail/run.py
@@ -22,8 +22,8 @@ from container import Container
from testcase import Testcase
from testcase import Testsuite
from report import Report
-from report import FunctestCrawler, YardstickCrawler
-from report import FunctestChecker, YardstickChecker
+from report import FunctestCrawler, YardstickCrawler, BottlenecksCrawler
+from report import FunctestChecker, YardstickChecker, BottlenecksChecker
from utils.dovetail_config import DovetailConfig as dt_cfg
from test_runner import DockerRunner, ShellRunner
@@ -46,11 +46,11 @@ def run_test(testsuite, testarea, logger):
duration = 0
start_time = time.time()
for testcase_name in testarea_list:
- logger.info('>>[testcase]: %s', testcase_name)
+ logger.info('>>[testcase]: {}'.format(testcase_name))
testcase = Testcase.get(testcase_name)
if testcase is None:
- logger.error('test case %s is not defined in testcase folder, \
- skipping', testcase_name)
+ logger.error('Test case {} is not defined in testcase folder, '
+ 'skipping.'.format(testcase_name))
continue
run_testcase = True
@@ -77,27 +77,30 @@ def check_tc_result(testcase, logger):
dovetail_result = os.path.join(result_dir,
dt_cfg.dovetail_config['result_file'])
if dt_cfg.dovetail_config['report_dest'].startswith("http"):
- if validate_type.lower() == 'yardstick':
- logger.info("Results have been stored with file %s.",
- os.path.join(result_dir,
- testcase.validate_testcase() + '.out'))
+ if dt_utils.store_db_results(dt_cfg.dovetail_config['report_dest'],
+ dt_cfg.dovetail_config['build_tag'],
+ testcase.name(), dovetail_result,
+ logger):
+ logger.info("Results have been pushed to database and stored "
+ "with local file {}.".format(dovetail_result))
else:
- if dt_utils.store_db_results(dt_cfg.dovetail_config['report_dest'],
- dt_cfg.dovetail_config['build_tag'],
- testcase.name(), dovetail_result,
- logger):
- logger.info("Results have been pushed to database and stored "
- "with local file %s.", dovetail_result)
- else:
- logger.error("Fail to push results to database.")
+ logger.error("Failed to push results to database.")
if dt_cfg.dovetail_config['report_dest'] == "file":
if validate_type.lower() == 'yardstick':
- logger.info("Results have been stored with file %s.",
- os.path.join(result_dir,
- testcase.validate_testcase() + '.out'))
- if validate_type.lower() == 'functest':
- logger.info("Results have been stored with file %s.",
- os.path.join(result_dir, functest_result))
+ result_file = os.path.join(result_dir, testcase.name() + '.out')
+ elif validate_type.lower() == 'functest':
+ result_file = os.path.join(result_dir, functest_result)
+ elif validate_type.lower() == 'bottlenecks':
+ result_file = os.path.join(result_dir, testcase.name() + '.out')
+ else:
+ logger.error("Don't support {} now.".format(validate_type))
+ return
+ if os.path.isfile(result_file):
+ logger.info(
+ "Results have been stored with file {}.".format(result_file))
+ else:
+ logger.error(
+ "Failed to store results with file {}.".format(result_file))
result = Report.get_result(testcase)
Report.check_result(testcase, result)
@@ -106,20 +109,27 @@ def validate_input(input_dict, check_dict, logger):
# for 'func_tag' and 'yard_tag' options
func_tag = input_dict['func_tag']
yard_tag = input_dict['yard_tag']
+ bott_tag = input_dict['bott_tag']
valid_tag = check_dict['valid_docker_tag']
if func_tag is not None and func_tag not in valid_tag:
- logger.error("func_tag can't be %s, valid in %s", func_tag, valid_tag)
+ logger.error("The input option 'func_tag' can't be {}, "
+ "valid values are {}.".format(func_tag, valid_tag))
raise SystemExit(1)
if yard_tag is not None and yard_tag not in valid_tag:
- logger.error("yard_tag can't be %s, valid in %s", yard_tag, valid_tag)
+ logger.error("The input option 'yard_tag' can't be {}, "
+ "valid values are {}.".format(yard_tag, valid_tag))
+ raise SystemExit(1)
+ if bott_tag is not None and bott_tag not in valid_tag:
+ logger.error("The input option 'bott_tag' can't be {}, "
+ "valid values are {}.".format(bott_tag, valid_tag))
raise SystemExit(1)
# for 'report' option
report = input_dict['report']
if report:
if not (report.startswith("http") or report == "file"):
- logger.error("report can't be %s", input_dict['report'])
- logger.info("valid report types are 'file' and 'http'")
+ logger.error("Report type can't be {}, valid types are 'file' "
+ "and 'http'.".format(input_dict['report']))
raise SystemExit(1)
@@ -148,7 +158,7 @@ def filter_config(input_dict, logger):
configs[key.upper()] = value_dict
break
except KeyError as e:
- logger.exception('%s lacks subsection %s', config_key, e)
+ logger.exception('KeyError {}.'.format(e))
raise SystemExit(1)
if not configs:
return None
@@ -161,8 +171,10 @@ def create_logs():
Report.create_log()
FunctestCrawler.create_log()
YardstickCrawler.create_log()
+ BottlenecksCrawler.create_log()
FunctestChecker.create_log()
YardstickChecker.create_log()
+ BottlenecksChecker.create_log()
Testcase.create_log()
Testsuite.create_log()
DockerRunner.create_log()
@@ -174,7 +186,7 @@ def clean_results_dir():
if os.path.exists(result_path):
if os.path.isdir(result_path):
cmd = 'sudo rm -rf %s/*' % (result_path)
- dt_utils.exec_cmd(cmd, exit_on_error=False)
+ dt_utils.exec_cmd(cmd, exit_on_error=False, exec_msg_on=False)
else:
print "result_dir in dovetail_config.yml is not a directory."
raise SystemExit(1)
@@ -196,7 +208,7 @@ def get_result_path():
def copy_userconfig_files(logger):
- dovetail_home = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+ dovetail_home = os.path.dirname(os.path.abspath(__file__))
userconfig_path = os.path.join(dovetail_home, 'userconfig')
pre_config_path = dt_cfg.dovetail_config['config_dir']
if not os.path.isdir(pre_config_path):
@@ -205,9 +217,19 @@ def copy_userconfig_files(logger):
dt_utils.exec_cmd(cmd, logger, exit_on_error=False)
+# env_init can source some env variable used in dovetail, such as
+# when https+credential used, OS_CACERT
+def env_init(logger):
+ openrc = os.path.join(dt_cfg.dovetail_config['config_dir'],
+ dt_cfg.dovetail_config['env_file'])
+ if not os.path.isfile(openrc):
+ logger.error("File {} does not exist.".format(openrc))
+ dt_utils.source_env(openrc)
+
+
def main(*args, **kwargs):
"""Dovetail compliance test entry!"""
- build_tag = "daily-master-%s" % str(uuid.uuid4())
+ build_tag = "daily-master-%s" % str(uuid.uuid1())
dt_cfg.dovetail_config['build_tag'] = build_tag
if not get_result_path():
return
@@ -217,9 +239,10 @@ def main(*args, **kwargs):
create_logs()
logger = dt_logger.Logger('run').getLogger()
logger.info('================================================')
- logger.info('Dovetail compliance: %s!', (kwargs['testsuite']))
+ logger.info('Dovetail compliance: {}!'.format(kwargs['testsuite']))
logger.info('================================================')
- logger.info('Build tag: %s', dt_cfg.dovetail_config['build_tag'])
+ logger.info('Build tag: {}'.format(dt_cfg.dovetail_config['build_tag']))
+ env_init(logger)
copy_userconfig_files(logger)
dt_utils.check_docker_version(logger)
validate_input(kwargs, dt_cfg.dovetail_config['validate_input'], logger)
@@ -232,6 +255,7 @@ def main(*args, **kwargs):
if(kwargs['report'].endswith('/')):
kwargs['report'] = kwargs['report'][0:kwargs['report'].rfind('/')]
dt_cfg.dovetail_config['report_dest'] = kwargs['report']
+ dt_cfg.update_cmds()
if kwargs['offline']:
dt_cfg.dovetail_config['offline'] = True
@@ -252,9 +276,10 @@ def main(*args, **kwargs):
duration = run_test(testsuite_yaml, testarea, logger)
if dt_cfg.dovetail_config['report_dest'] == "file":
Report.generate(testsuite_yaml, testarea, duration)
+ Report.save_logs()
else:
- logger.error('invalid input commands, testsuite %s testarea %s',
- kwargs['testsuite'], testarea)
+ logger.error('Invalid input commands, testsuite {} testarea {}'
+ .format(kwargs['testsuite'], testarea))
dt_cfg.load_config_files()
diff --git a/dovetail/test_runner.py b/dovetail/test_runner.py
index 6ae410c1..603156fe 100644
--- a/dovetail/test_runner.py
+++ b/dovetail/test_runner.py
@@ -22,7 +22,7 @@ class DockerRunner(object):
def __init__(self, testcase):
self.testcase = testcase
- self.logger.debug('create runner: %s', self.type)
+ self.logger.debug('Create runner: {}'.format(self.type))
@classmethod
def create_log(cls):
@@ -31,8 +31,8 @@ class DockerRunner(object):
def pre_copy(self, container_id=None, dest_path=None,
src_file=None, exist_file=None):
if not dest_path:
- self.logger.error("There has no dest_path in %s config file.",
- self.testcase.name())
+ self.logger.error("There has no dest_path in {} config file."
+ .format(self.testcase.name()))
return None
if src_file:
self.testcase.mk_src_file()
@@ -49,8 +49,8 @@ class DockerRunner(object):
if dt_cfg.dovetail_config['offline']:
exist = Container.check_image_exist(self.testcase.validate_type())
if not exist:
- self.logger.error('%s image not exist when running offline',
- self.testcase.validate_type())
+ self.logger.error("{} image doesn't exist, can't run offline."
+ .format(self.testcase.validate_type()))
return
else:
if not Container.pull_image(self.testcase.validate_type()):
@@ -63,15 +63,15 @@ class DockerRunner(object):
img_file = os.path.join(dt_cfg.dovetail_config['config_dir'],
img_name)
if not os.path.isfile(img_file):
- self.logger.error('image %s not found', img_name)
+ self.logger.error('Image {} not found.'.format(img_name))
return
container_id = Container.create(self.testcase.validate_type(),
self.testcase.name())
if not container_id:
- self.logger.error('failed to create container')
+ self.logger.error('Failed to create container.')
return
- self.logger.debug('container id:%s', container_id)
+ self.logger.debug('container id: {}'.format(container_id))
dest_path = self.testcase.pre_copy_path("dest_path")
src_file_name = self.testcase.pre_copy_path("src_file")
@@ -95,14 +95,14 @@ class DockerRunner(object):
self.testcase.prepared(True)
if not self.testcase.prepare_cmd(self.type):
- self.logger.error('failed to prepare testcase:%s',
- self.testcase.name())
+ self.logger.error(
+ 'Failed to prepare test case: {}'.format(self.testcase.name()))
else:
for cmd in self.testcase.cmds:
ret, msg = Container.exec_cmd(container_id, cmd)
if ret != 0:
- self.logger.error('Failed to exec %s, ret:%d, msg:%s',
- cmd, ret, msg)
+ self.logger.error('Failed to exec {}, ret: {}, msg: {}'
+ .format(cmd, ret, msg))
break
cmds = self.testcase.post_condition()
@@ -111,7 +111,7 @@ class DockerRunner(object):
ret, msg = Container.exec_cmd(container_id, cmd)
self.testcase.cleaned(True)
- Container.clean(container_id)
+ Container.clean(container_id, self.type)
class FunctestRunner(DockerRunner):
@@ -128,6 +128,13 @@ class YardstickRunner(DockerRunner):
super(YardstickRunner, self).__init__(testcase)
+class BottlenecksRunner(DockerRunner):
+
+ def __init__(self, testcase):
+ self.type = 'bottlenecks'
+ super(BottlenecksRunner, self).__init__(testcase)
+
+
class ShellRunner(object):
logger = None
@@ -140,7 +147,7 @@ class ShellRunner(object):
super(ShellRunner, self).__init__()
self.testcase = testcase
self.type = 'shell'
- self.logger.debug('create runner:%s', self.type)
+ self.logger.debug('Create runner: {}'.format(self.type))
def run(self):
testcase_passed = 'PASS'
@@ -158,8 +165,8 @@ class ShellRunner(object):
self.testcase.prepared(True)
if not self.testcase.prepare_cmd(self.type):
- self.logger.error('failed to prepare cmd:%s',
- self.testcase.name())
+ self.logger.error(
+ 'Failed to prepare cmd: {}'.format(self.testcase.name()))
else:
for cmd in self.testcase.cmds:
ret, msg = dt_utils.exec_cmd(cmd, self.logger)
@@ -176,13 +183,13 @@ class ShellRunner(object):
result_filename = os.path.join(dt_cfg.dovetail_config['result_dir'],
self.testcase.name()) + '.out'
- self.logger.debug('save result:%s', result_filename)
+ self.logger.debug('Save result: {}'.format(result_filename))
try:
with open(result_filename, 'w') as f:
f.write(json.dumps(result))
except Exception as e:
- self.logger.exception('Failed to write result into file:%s, \
- except:%s', result_filename, e)
+ self.logger.exception('Failed to write result into file: {}, '
+ 'exception: {}'.format(result_filename, e))
class TestRunnerFactory(object):
@@ -190,6 +197,7 @@ class TestRunnerFactory(object):
TEST_RUNNER_MAP = {
"functest": FunctestRunner,
"yardstick": YardstickRunner,
+ "bottlenecks": BottlenecksRunner,
"shell": ShellRunner,
}
diff --git a/dovetail/testcase.py b/dovetail/testcase.py
index 2ceb8f20..bdfd3d35 100644
--- a/dovetail/testcase.py
+++ b/dovetail/testcase.py
@@ -39,7 +39,7 @@ class Testcase(object):
return False
# self.logger.debug('cmd_lines:%s', cmd_lines)
self.cmds.append(cmd_lines)
- self.logger.debug('cmds:%s', self.cmds)
+ self.logger.debug('cmds: {}'.format(self.cmds))
return True
def prepare_cmd(self, test_type):
@@ -55,7 +55,7 @@ class Testcase(object):
return self.parse_cmd(testcase_cmds)
if config_cmds:
return self.parse_cmd(config_cmds)
- self.logger.error('testcase %s has no cmds', self.name())
+ self.logger.error('Test case {} has no cmds.'.format(self.name()))
return False
def __str__(self):
@@ -75,7 +75,8 @@ class Testcase(object):
def sub_testcase_passed(self, name, passed=None):
if passed is not None:
- self.logger.debug('sub_testcase_passed:%s %s', name, passed)
+ self.logger.debug(
+ 'sub_testcase_passed: {} {}'.format(name, passed))
self.sub_testcase_status[name] = passed
return self.sub_testcase_status[name]
@@ -111,8 +112,8 @@ class Testcase(object):
return pre_condition
pre_condition = self.pre_condition_cls(self.validate_type())
if not pre_condition:
- self.logger.debug('testcase:%s pre_condition is empty',
- self.name())
+ self.logger.debug(
+ 'Test case: {} pre_condition is empty.'.format(self.name()))
return pre_condition
def pre_copy_path(self, key_name):
@@ -131,8 +132,8 @@ class Testcase(object):
return post_condition
post_condition = self.post_condition_cls(self.validate_type())
if not post_condition:
- self.logger.debug('testcae:%s post_condition is empty',
- self.name())
+ self.logger.debug(
+ 'Test case: {} post_condition is empty.'.format(self.name()))
return post_condition
def mk_src_file(self):
@@ -143,12 +144,13 @@ class Testcase(object):
with open(file_path, 'w+') as src_file:
if self.sub_testcase() is not None:
for sub_test in self.sub_testcase():
- self.logger.debug('save testcases %s', sub_test)
+ self.logger.debug(
+ 'Save test cases {}'.format(sub_test))
src_file.write(sub_test + '\n')
- self.logger.debug('save testcases to %s', file_path)
+ self.logger.debug('Save test cases to {}'.format(file_path))
return file_path
except Exception:
- self.logger.error('Failed to save: %s', file_path)
+ self.logger.exception('Failed to save: {}'.format(file_path))
return None
def run(self):
@@ -156,7 +158,8 @@ class Testcase(object):
try:
runner.run()
except AttributeError as e:
- self.logger.exception('testcase:%s except:%s', self.name, e)
+ self.logger.exception(
+ 'Test case: {} Exception: {}'.format(self.name, e))
# testcase in upstream testing project
# validate_testcase_list = {'functest': {}, 'yardstick': {}, 'shell': {}}
@@ -230,8 +233,8 @@ class Testcase(object):
cls.testcase_list[next(testcase_yaml.iterkeys())] = \
testcase
else:
- cls.logger.error('failed to create testcase: %s',
- testcase_file)
+ cls.logger.error('Failed to create test case: {}'
+ .format(testcase_file))
@classmethod
def get(cls, testcase_name):
@@ -251,13 +254,30 @@ class FunctestTestcase(Testcase):
class YardstickTestcase(Testcase):
- validate_testcae_list = {}
+ validate_testcase_list = {}
def __init__(self, testcase_yaml):
super(YardstickTestcase, self).__init__(testcase_yaml)
self.type = 'yardstick'
+class BottlenecksTestcase(Testcase):
+
+ validate_testcase_list = {}
+
+ def __init__(self, testcase_yaml):
+ super(BottlenecksTestcase, self).__init__(testcase_yaml)
+ self.type = 'bottlenecks'
+ self._update_cmds()
+
+ def _update_cmds(self):
+ if dt_cfg.dovetail_config['report_dest'].startswith("http"):
+ try:
+ self.testcase['validate']['cmds'][0] += ' --report'
+ except KeyError:
+ return
+
+
class ShellTestcase(Testcase):
validate_testcase_list = {}
@@ -271,6 +291,7 @@ class TestcaseFactory(object):
TESTCASE_TYPE_MAP = {
'functest': FunctestTestcase,
'yardstick': YardstickTestcase,
+ 'bottlenecks': BottlenecksTestcase,
'shell': ShellTestcase,
}
diff --git a/dovetail/testcase/defcore.tc001.yml b/dovetail/testcase/defcore.tc001.yml
index 39be7471..19092c09 100644
--- a/dovetail/testcase/defcore.tc001.yml
+++ b/dovetail/testcase/defcore.tc001.yml
@@ -9,10 +9,10 @@ dovetail.defcore.tc001:
type: functest
testcase: refstack_defcore
pre_condition:
- - 'echo test for precondition in testcase'
+ - 'cp /home/opnfv/userconfig/pre_config/tempest_conf.yaml /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml'
pre_copy:
src_file: defcore.txt
- dest_path: /home/opnfv/repos/functest/functest/opnfv_tests/openstack/refstack_client/defcore.txt
+ dest_path: /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/refstack_client/defcore.txt
cmds:
- 'functest env prepare'
- 'functest testcase run refstack_defcore -r'
diff --git a/dovetail/testcase/ipv6.tc001.yml b/dovetail/testcase/ipv6.tc001.yml
index a70d41eb..6a2f2543 100644
--- a/dovetail/testcase/ipv6.tc001.yml
+++ b/dovetail/testcase/ipv6.tc001.yml
@@ -5,9 +5,11 @@ dovetail.ipv6.tc001:
validate:
type: functest
testcase: tempest_custom
+ pre_condition:
+ - 'cp /home/opnfv/userconfig/pre_config/tempest_conf.yaml /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml'
pre_copy:
src_file: tempest_custom.txt
- dest_path: /home/opnfv/repos/functest/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
+ dest_path: /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
report:
sub_testcase_list:
- tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_network[id-d4f9024d-1e28-4fc1-a6b1-25dbc6fa11e2,smoke]
diff --git a/dovetail/testcase/ipv6.tc002.yml b/dovetail/testcase/ipv6.tc002.yml
index c8254bb8..77720581 100644
--- a/dovetail/testcase/ipv6.tc002.yml
+++ b/dovetail/testcase/ipv6.tc002.yml
@@ -5,9 +5,11 @@ dovetail.ipv6.tc002:
validate:
type: functest
testcase: tempest_custom
+ pre_condition:
+ - 'cp /home/opnfv/userconfig/pre_config/tempest_conf.yaml /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml'
pre_copy:
src_file: tempest_custom.txt
- dest_path: /home/opnfv/repos/functest/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
+ dest_path: /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
report:
sub_testcase_list:
- tempest.api.network.test_networks.NetworksIpV6Test.test_create_update_delete_network_subnet[id-0e269138-0da6-4efc-a46d-578161e7b221,smoke]
diff --git a/dovetail/testcase/ipv6.tc003.yml b/dovetail/testcase/ipv6.tc003.yml
index 339d405b..a3b0b9d1 100644
--- a/dovetail/testcase/ipv6.tc003.yml
+++ b/dovetail/testcase/ipv6.tc003.yml
@@ -5,9 +5,11 @@ dovetail.ipv6.tc003:
validate:
type: functest
testcase: tempest_custom
+ pre_condition:
+ - 'cp /home/opnfv/userconfig/pre_config/tempest_conf.yaml /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml'
pre_copy:
src_file: tempest_custom.txt
- dest_path: /home/opnfv/repos/functest/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
+ dest_path: /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
report:
sub_testcase_list:
- tempest.api.network.test_networks.NetworksIpV6Test.test_external_network_visibility[id-af774677-42a9-4e4b-bb58-16fe6a5bc1ec,smoke]
diff --git a/dovetail/testcase/ipv6.tc004.yml b/dovetail/testcase/ipv6.tc004.yml
index 514a846e..41d703c3 100644
--- a/dovetail/testcase/ipv6.tc004.yml
+++ b/dovetail/testcase/ipv6.tc004.yml
@@ -5,9 +5,11 @@ dovetail.ipv6.tc004:
validate:
type: functest
testcase: tempest_custom
+ pre_condition:
+ - 'cp /home/opnfv/userconfig/pre_config/tempest_conf.yaml /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml'
pre_copy:
src_file: tempest_custom.txt
- dest_path: /home/opnfv/repos/functest/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
+ dest_path: /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
report:
sub_testcase_list:
- tempest.api.network.test_networks.NetworksIpV6Test.test_list_networks[id-f7ffdeda-e200-4a7a-bcbe-05716e86bf43,smoke]
diff --git a/dovetail/testcase/ipv6.tc005.yml b/dovetail/testcase/ipv6.tc005.yml
index 3dcca9b2..7b197ec1 100644
--- a/dovetail/testcase/ipv6.tc005.yml
+++ b/dovetail/testcase/ipv6.tc005.yml
@@ -5,9 +5,11 @@ dovetail.ipv6.tc005:
validate:
type: functest
testcase: tempest_custom
+ pre_condition:
+ - 'cp /home/opnfv/userconfig/pre_config/tempest_conf.yaml /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml'
pre_copy:
src_file: tempest_custom.txt
- dest_path: /home/opnfv/repos/functest/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
+ dest_path: /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
report:
sub_testcase_list:
- tempest.api.network.test_networks.NetworksIpV6Test.test_show_network[id-2bf13842-c93f-4a69-83ed-717d2ec3b44e,smoke]
diff --git a/dovetail/testcase/ipv6.tc006.yml b/dovetail/testcase/ipv6.tc006.yml
index dc6b2da6..0f9fd286 100644
--- a/dovetail/testcase/ipv6.tc006.yml
+++ b/dovetail/testcase/ipv6.tc006.yml
@@ -5,9 +5,11 @@ dovetail.ipv6.tc006:
validate:
type: functest
testcase: tempest_custom
+ pre_condition:
+ - 'cp /home/opnfv/userconfig/pre_config/tempest_conf.yaml /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml'
pre_copy:
src_file: tempest_custom.txt
- dest_path: /home/opnfv/repos/functest/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
+ dest_path: /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
report:
sub_testcase_list:
- tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_port_in_allowed_allocation_pools[id-0435f278-40ae-48cb-a404-b8a087bc09b1,smoke]
diff --git a/dovetail/testcase/ipv6.tc007.yml b/dovetail/testcase/ipv6.tc007.yml
index b2658ec9..92ba2fba 100644
--- a/dovetail/testcase/ipv6.tc007.yml
+++ b/dovetail/testcase/ipv6.tc007.yml
@@ -5,9 +5,11 @@ dovetail.ipv6.tc007:
validate:
type: functest
testcase: tempest_custom
+ pre_condition:
+ - 'cp /home/opnfv/userconfig/pre_config/tempest_conf.yaml /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml'
pre_copy:
src_file: tempest_custom.txt
- dest_path: /home/opnfv/repos/functest/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
+ dest_path: /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
report:
sub_testcase_list:
- tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_port_with_no_securitygroups[id-4179dcb9-1382-4ced-84fe-1b91c54f5735,smoke]
diff --git a/dovetail/testcase/ipv6.tc008.yml b/dovetail/testcase/ipv6.tc008.yml
index 597a4716..b1b60049 100644
--- a/dovetail/testcase/ipv6.tc008.yml
+++ b/dovetail/testcase/ipv6.tc008.yml
@@ -5,9 +5,11 @@ dovetail.ipv6.tc008:
validate:
type: functest
testcase: tempest_custom
+ pre_condition:
+ - 'cp /home/opnfv/userconfig/pre_config/tempest_conf.yaml /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml'
pre_copy:
src_file: tempest_custom.txt
- dest_path: /home/opnfv/repos/functest/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
+ dest_path: /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
report:
sub_testcase_list:
- tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_update_delete_port[id-c72c1c0c-2193-4aca-aaa4-b1442640f51c,smoke]
diff --git a/dovetail/testcase/ipv6.tc009.yml b/dovetail/testcase/ipv6.tc009.yml
index 013a76ea..579cee54 100644
--- a/dovetail/testcase/ipv6.tc009.yml
+++ b/dovetail/testcase/ipv6.tc009.yml
@@ -5,9 +5,11 @@ dovetail.ipv6.tc009:
validate:
type: functest
testcase: tempest_custom
+ pre_condition:
+ - 'cp /home/opnfv/userconfig/pre_config/tempest_conf.yaml /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml'
pre_copy:
src_file: tempest_custom.txt
- dest_path: /home/opnfv/repos/functest/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
+ dest_path: /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
report:
sub_testcase_list:
- tempest.api.network.test_ports.PortsIpV6TestJSON.test_list_ports[id-cf95b358-3e92-4a29-a148-52445e1ac50e,smoke]
diff --git a/dovetail/testcase/ipv6.tc010.yml b/dovetail/testcase/ipv6.tc010.yml
index 48191f0b..a44ab44a 100644
--- a/dovetail/testcase/ipv6.tc010.yml
+++ b/dovetail/testcase/ipv6.tc010.yml
@@ -5,9 +5,11 @@ dovetail.ipv6.tc010:
validate:
type: functest
testcase: tempest_custom
+ pre_condition:
+ - 'cp /home/opnfv/userconfig/pre_config/tempest_conf.yaml /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml'
pre_copy:
src_file: tempest_custom.txt
- dest_path: /home/opnfv/repos/functest/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
+ dest_path: /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
report:
sub_testcase_list:
- tempest.api.network.test_ports.PortsIpV6TestJSON.test_show_port[id-c9a685bd-e83f-499c-939f-9f7863ca259f,smoke]
diff --git a/dovetail/testcase/ipv6.tc011.yml b/dovetail/testcase/ipv6.tc011.yml
index 18373ef5..16a214fd 100644
--- a/dovetail/testcase/ipv6.tc011.yml
+++ b/dovetail/testcase/ipv6.tc011.yml
@@ -5,9 +5,11 @@ dovetail.ipv6.tc011:
validate:
type: functest
testcase: tempest_custom
+ pre_condition:
+ - 'cp /home/opnfv/userconfig/pre_config/tempest_conf.yaml /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml'
pre_copy:
src_file: tempest_custom.txt
- dest_path: /home/opnfv/repos/functest/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
+ dest_path: /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
report:
sub_testcase_list:
- tempest.api.network.test_routers.RoutersIpV6Test.test_add_multiple_router_interfaces[id-802c73c9-c937-4cef-824b-2191e24a6aab,smoke]
diff --git a/dovetail/testcase/ipv6.tc012.yml b/dovetail/testcase/ipv6.tc012.yml
index 620fdd6b..a987ccb6 100644
--- a/dovetail/testcase/ipv6.tc012.yml
+++ b/dovetail/testcase/ipv6.tc012.yml
@@ -5,9 +5,11 @@ dovetail.ipv6.tc012:
validate:
type: functest
testcase: tempest_custom
+ pre_condition:
+ - 'cp /home/opnfv/userconfig/pre_config/tempest_conf.yaml /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml'
pre_copy:
src_file: tempest_custom.txt
- dest_path: /home/opnfv/repos/functest/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
+ dest_path: /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
report:
sub_testcase_list:
- tempest.api.network.test_routers.RoutersIpV6Test.test_add_remove_router_interface_with_port_id[id-2b7d2f37-6748-4d78-92e5-1d590234f0d5,smoke]
diff --git a/dovetail/testcase/ipv6.tc013.yml b/dovetail/testcase/ipv6.tc013.yml
index 8f739bc7..738c8207 100644
--- a/dovetail/testcase/ipv6.tc013.yml
+++ b/dovetail/testcase/ipv6.tc013.yml
@@ -5,9 +5,11 @@ dovetail.ipv6.tc013:
validate:
type: functest
testcase: tempest_custom
+ pre_condition:
+ - 'cp /home/opnfv/userconfig/pre_config/tempest_conf.yaml /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml'
pre_copy:
src_file: tempest_custom.txt
- dest_path: /home/opnfv/repos/functest/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
+ dest_path: /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
report:
sub_testcase_list:
- tempest.api.network.test_routers.RoutersIpV6Test.test_add_remove_router_interface_with_subnet_id[id-b42e6e39-2e37-49cc-a6f4-8467e940900a,smoke]
diff --git a/dovetail/testcase/ipv6.tc014.yml b/dovetail/testcase/ipv6.tc014.yml
index 8d3ed342..bf89464a 100644
--- a/dovetail/testcase/ipv6.tc014.yml
+++ b/dovetail/testcase/ipv6.tc014.yml
@@ -5,9 +5,11 @@ dovetail.ipv6.tc014:
validate:
type: functest
testcase: tempest_custom
+ pre_condition:
+ - 'cp /home/opnfv/userconfig/pre_config/tempest_conf.yaml /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml'
pre_copy:
src_file: tempest_custom.txt
- dest_path: /home/opnfv/repos/functest/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
+ dest_path: /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
report:
sub_testcase_list:
- tempest.api.network.test_routers.RoutersIpV6Test.test_create_show_list_update_delete_router[id-f64403e2-8483-4b34-8ccd-b09a87bcc68c,smoke]
diff --git a/dovetail/testcase/ipv6.tc015.yml b/dovetail/testcase/ipv6.tc015.yml
index 19a9806d..225dd719 100644
--- a/dovetail/testcase/ipv6.tc015.yml
+++ b/dovetail/testcase/ipv6.tc015.yml
@@ -5,9 +5,11 @@ dovetail.ipv6.tc015:
validate:
type: functest
testcase: tempest_custom
+ pre_condition:
+ - 'cp /home/opnfv/userconfig/pre_config/tempest_conf.yaml /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml'
pre_copy:
src_file: tempest_custom.txt
- dest_path: /home/opnfv/repos/functest/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
+ dest_path: /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
report:
sub_testcase_list:
- tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_list_update_show_delete_security_group[id-bfd128e5-3c92-44b6-9d66-7fe29d22c802,smoke]
diff --git a/dovetail/testcase/ipv6.tc016.yml b/dovetail/testcase/ipv6.tc016.yml
index f5899058..3489aa57 100644
--- a/dovetail/testcase/ipv6.tc016.yml
+++ b/dovetail/testcase/ipv6.tc016.yml
@@ -5,9 +5,11 @@ dovetail.ipv6.tc016:
validate:
type: functest
testcase: tempest_custom
+ pre_condition:
+ - 'cp /home/opnfv/userconfig/pre_config/tempest_conf.yaml /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml'
pre_copy:
src_file: tempest_custom.txt
- dest_path: /home/opnfv/repos/functest/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
+ dest_path: /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
report:
sub_testcase_list:
- tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_show_delete_security_group_rule[id-cfb99e0e-7410-4a3d-8a0c-959a63ee77e9,smoke]
diff --git a/dovetail/testcase/ipv6.tc017.yml b/dovetail/testcase/ipv6.tc017.yml
index 8bef6036..5642ba38 100644
--- a/dovetail/testcase/ipv6.tc017.yml
+++ b/dovetail/testcase/ipv6.tc017.yml
@@ -5,9 +5,11 @@ dovetail.ipv6.tc017:
validate:
type: functest
testcase: tempest_custom
+ pre_condition:
+ - 'cp /home/opnfv/userconfig/pre_config/tempest_conf.yaml /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml'
pre_copy:
src_file: tempest_custom.txt
- dest_path: /home/opnfv/repos/functest/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
+ dest_path: /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
report:
sub_testcase_list:
- tempest.api.network.test_security_groups.SecGroupIPv6Test.test_list_security_groups[id-e30abd17-fef9-4739-8617-dc26da88e686,smoke]
diff --git a/dovetail/testcase/ipv6.tc018.yml b/dovetail/testcase/ipv6.tc018.yml
index 406e02af..0345d071 100644
--- a/dovetail/testcase/ipv6.tc018.yml
+++ b/dovetail/testcase/ipv6.tc018.yml
@@ -5,9 +5,11 @@ dovetail.ipv6.tc018:
validate:
type: functest
testcase: tempest_custom
+ pre_condition:
+ - 'cp /home/opnfv/userconfig/pre_config/tempest_conf.yaml /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml'
pre_copy:
src_file: tempest_custom.txt
- dest_path: /home/opnfv/repos/functest/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
+ dest_path: /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
report:
sub_testcase_list:
- tempest.scenario.test_network_v6.TestGettingAddress.test_dhcp6_stateless_from_os[compute,id-d7e1f858-187c-45a6-89c9-bdafde619a9f,network,slow]
diff --git a/dovetail/testcase/ipv6.tc019.yml b/dovetail/testcase/ipv6.tc019.yml
index 1e3c171b..afa1d06b 100644
--- a/dovetail/testcase/ipv6.tc019.yml
+++ b/dovetail/testcase/ipv6.tc019.yml
@@ -5,9 +5,11 @@ dovetail.ipv6.tc019:
validate:
type: functest
testcase: tempest_custom
+ pre_condition:
+ - 'cp /home/opnfv/userconfig/pre_config/tempest_conf.yaml /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml'
pre_copy:
src_file: tempest_custom.txt
- dest_path: /home/opnfv/repos/functest/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
+ dest_path: /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
report:
sub_testcase_list:
- tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_dhcp6_stateless_from_os[compute,id-76f26acd-9688-42b4-bc3e-cd134c4cb09e,network,slow]
diff --git a/dovetail/testcase/ipv6.tc020.yml b/dovetail/testcase/ipv6.tc020.yml
index f7ab8b47..caa423f4 100644
--- a/dovetail/testcase/ipv6.tc020.yml
+++ b/dovetail/testcase/ipv6.tc020.yml
@@ -5,9 +5,11 @@ dovetail.ipv6.tc020:
validate:
type: functest
testcase: tempest_custom
+ pre_condition:
+ - 'cp /home/opnfv/userconfig/pre_config/tempest_conf.yaml /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml'
pre_copy:
src_file: tempest_custom.txt
- dest_path: /home/opnfv/repos/functest/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
+ dest_path: /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
report:
sub_testcase_list:
- tempest.scenario.test_network_v6.TestGettingAddress.test_multi_prefix_dhcpv6_stateless[compute,id-7ab23f41-833b-4a16-a7c9-5b42fe6d4123,network,slow]
diff --git a/dovetail/testcase/ipv6.tc021.yml b/dovetail/testcase/ipv6.tc021.yml
index 466b75ca..c5578184 100644
--- a/dovetail/testcase/ipv6.tc021.yml
+++ b/dovetail/testcase/ipv6.tc021.yml
@@ -5,9 +5,11 @@ dovetail.ipv6.tc021:
validate:
type: functest
testcase: tempest_custom
+ pre_condition:
+ - 'cp /home/opnfv/userconfig/pre_config/tempest_conf.yaml /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml'
pre_copy:
src_file: tempest_custom.txt
- dest_path: /home/opnfv/repos/functest/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
+ dest_path: /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
report:
sub_testcase_list:
- tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_multi_prefix_dhcpv6_stateless[compute,id-cf1c4425-766b-45b8-be35-e2959728eb00,network]
diff --git a/dovetail/testcase/ipv6.tc022.yml b/dovetail/testcase/ipv6.tc022.yml
index c1d371d6..f8b7e262 100644
--- a/dovetail/testcase/ipv6.tc022.yml
+++ b/dovetail/testcase/ipv6.tc022.yml
@@ -5,9 +5,11 @@ dovetail.ipv6.tc022:
validate:
type: functest
testcase: tempest_custom
+ pre_condition:
+ - 'cp /home/opnfv/userconfig/pre_config/tempest_conf.yaml /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml'
pre_copy:
src_file: tempest_custom.txt
- dest_path: /home/opnfv/repos/functest/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
+ dest_path: /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
report:
sub_testcase_list:
- tempest.scenario.test_network_v6.TestGettingAddress.test_slaac_from_os[compute,id-2c92df61-29f0-4eaa-bee3-7c65bef62a43,network,slow]
diff --git a/dovetail/testcase/ipv6.tc023.yml b/dovetail/testcase/ipv6.tc023.yml
index 8b816252..f97ec799 100644
--- a/dovetail/testcase/ipv6.tc023.yml
+++ b/dovetail/testcase/ipv6.tc023.yml
@@ -5,9 +5,11 @@ dovetail.ipv6.tc023:
validate:
type: functest
testcase: tempest_custom
+ pre_condition:
+ - 'cp /home/opnfv/userconfig/pre_config/tempest_conf.yaml /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml'
pre_copy:
src_file: tempest_custom.txt
- dest_path: /home/opnfv/repos/functest/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
+ dest_path: /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
report:
sub_testcase_list:
- tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_slaac_from_os[compute,id-b6399d76-4438-4658-bcf5-0d6c8584fde2,network,slow]
diff --git a/dovetail/testcase/ipv6.tc024.yml b/dovetail/testcase/ipv6.tc024.yml
index 8d248901..705e3be7 100644
--- a/dovetail/testcase/ipv6.tc024.yml
+++ b/dovetail/testcase/ipv6.tc024.yml
@@ -5,9 +5,11 @@ dovetail.ipv6.tc024:
validate:
type: functest
testcase: tempest_custom
+ pre_condition:
+ - 'cp /home/opnfv/userconfig/pre_config/tempest_conf.yaml /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml'
pre_copy:
src_file: tempest_custom.txt
- dest_path: /home/opnfv/repos/functest/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
+ dest_path: /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
report:
sub_testcase_list:
- tempest.scenario.test_network_v6.TestGettingAddress.test_multi_prefix_slaac[compute,id-dec222b1-180c-4098-b8c5-cc1b8342d611,network,slow]
diff --git a/dovetail/testcase/ipv6.tc025.yml b/dovetail/testcase/ipv6.tc025.yml
index 35ef78c7..a4013530 100644
--- a/dovetail/testcase/ipv6.tc025.yml
+++ b/dovetail/testcase/ipv6.tc025.yml
@@ -5,9 +5,11 @@ dovetail.ipv6.tc025:
validate:
type: functest
testcase: tempest_custom
+ pre_condition:
+ - 'cp /home/opnfv/userconfig/pre_config/tempest_conf.yaml /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml'
pre_copy:
src_file: tempest_custom.txt
- dest_path: /home/opnfv/repos/functest/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
+ dest_path: /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
report:
sub_testcase_list:
- tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_multi_prefix_slaac[compute,id-9178ad42-10e4-47e9-8987-e02b170cc5cd,network]
diff --git a/dovetail/testcase/nfvi.tc101.yml b/dovetail/testcase/nfvi.tc101.yml
deleted file mode 100644
index 2fa47dee..00000000
--- a/dovetail/testcase/nfvi.tc101.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-dovetail.nfvi.tc101:
- name: dovetail.nfvi.tc101
- objective: measure number of cores and threads, available memory size and cache size
- validate:
- type: yardstick
- testcase: opnfv_yardstick_tc001
- report:
- sub_testcase_list:
diff --git a/dovetail/testcase/nfvi.tc102.yml b/dovetail/testcase/nfvi.tc102.yml
deleted file mode 100644
index 6da52243..00000000
--- a/dovetail/testcase/nfvi.tc102.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-dovetail.nfvi.tc102:
- name: dovetail.nfvi.tc102
- objective: measure number of cores and threads, available memory size and cache size
- validate:
- type: yardstick
- testcase: opnfv_yardstick_tc002
- report:
- sub_testcase_list:
diff --git a/dovetail/testcase/resiliency.tc001.yml b/dovetail/testcase/resiliency.tc001.yml
new file mode 100644
index 00000000..86399849
--- /dev/null
+++ b/dovetail/testcase/resiliency.tc001.yml
@@ -0,0 +1,11 @@
+---
+dovetail.resiliency.tc001:
+ name: dovetail.resiliency.tc001
+ objective: > # This test case verifies the ability of the SUT setting up VM pairs
+ # for different tenants and providing acceptable capacity after the amount of
+ # VM pairs reaches certain quantity.
+ validate:
+ type: bottlenecks
+ testcase: posca_factor_ping
+ report:
+ sub_testcase_list:
diff --git a/dovetail/testcase/sdnvpn.tc001.yml b/dovetail/testcase/sdnvpn.tc001.yml
index 25a636d3..9ab3d445 100644
--- a/dovetail/testcase/sdnvpn.tc001.yml
+++ b/dovetail/testcase/sdnvpn.tc001.yml
@@ -7,6 +7,6 @@ dovetail.sdnvpn.tc001:
testcase: bgpvpn
pre_copy:
exist_src_file: sdnvpn_config_testcase1.yaml
- dest_path: /home/opnfv/repos/sdnvpn/sdnvpn/test/functest/config.yaml
+ dest_path: /usr/local/lib/python2.7/dist-packages/sdnvpn/test/functest/config.yaml
report:
sub_testcase_list:
diff --git a/dovetail/testcase/sdnvpn.tc002.yml b/dovetail/testcase/sdnvpn.tc002.yml
index 79846513..a5c70ba9 100644
--- a/dovetail/testcase/sdnvpn.tc002.yml
+++ b/dovetail/testcase/sdnvpn.tc002.yml
@@ -7,6 +7,6 @@ dovetail.sdnvpn.tc002:
testcase: bgpvpn
pre_copy:
exist_src_file: sdnvpn_config_testcase2.yaml
- dest_path: /home/opnfv/repos/sdnvpn/sdnvpn/test/functest/config.yaml
+ dest_path: /usr/local/lib/python2.7/dist-packages/sdnvpn/test/functest/config.yaml
report:
sub_testcase_list:
diff --git a/dovetail/testcase/sdnvpn.tc003.yml b/dovetail/testcase/sdnvpn.tc003.yml
index 4ae3926d..c8c8b2be 100644
--- a/dovetail/testcase/sdnvpn.tc003.yml
+++ b/dovetail/testcase/sdnvpn.tc003.yml
@@ -7,6 +7,6 @@ dovetail.sdnvpn.tc003:
testcase: bgpvpn
pre_copy:
exist_src_file: sdnvpn_config_testcase3.yaml
- dest_path: /home/opnfv/repos/sdnvpn/sdnvpn/test/functest/config.yaml
+ dest_path: /usr/local/lib/python2.7/dist-packages/sdnvpn/test/functest/config.yaml
report:
sub_testcase_list:
diff --git a/dovetail/testcase/sdnvpn.tc004.yml b/dovetail/testcase/sdnvpn.tc004.yml
index 4c7d8d46..f6a4a6ff 100644
--- a/dovetail/testcase/sdnvpn.tc004.yml
+++ b/dovetail/testcase/sdnvpn.tc004.yml
@@ -7,6 +7,6 @@ dovetail.sdnvpn.tc004:
testcase: bgpvpn
pre_copy:
exist_src_file: sdnvpn_config_testcase4.yaml
- dest_path: /home/opnfv/repos/sdnvpn/sdnvpn/test/functest/config.yaml
+ dest_path: /usr/local/lib/python2.7/dist-packages/sdnvpn/test/functest/config.yaml
report:
sub_testcase_list:
diff --git a/dovetail/testcase/sdnvpn.tc008.yml b/dovetail/testcase/sdnvpn.tc008.yml
index e8050977..5713e102 100644
--- a/dovetail/testcase/sdnvpn.tc008.yml
+++ b/dovetail/testcase/sdnvpn.tc008.yml
@@ -7,6 +7,6 @@ dovetail.sdnvpn.tc008:
testcase: bgpvpn
pre_copy:
exist_src_file: sdnvpn_config_testcase8.yaml
- dest_path: /home/opnfv/repos/sdnvpn/sdnvpn/test/functest/config.yaml
+ dest_path: /usr/local/lib/python2.7/dist-packages/sdnvpn/test/functest/config.yaml
report:
sub_testcase_list:
diff --git a/dovetail/testcase/tempest.tc001.yml b/dovetail/testcase/tempest.tc001.yml
new file mode 100644
index 00000000..be02e67a
--- /dev/null
+++ b/dovetail/testcase/tempest.tc001.yml
@@ -0,0 +1,15 @@
+---
+dovetail.tempest.tc001:
+ name: dovetail.tempest.tc001
+ objective: forwarding packets through virtual networks in data path
+ validate:
+ type: functest
+ testcase: tempest_custom
+ pre_condition:
+ - 'cp /home/opnfv/userconfig/pre_config/tempest_conf.yaml /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml'
+ pre_copy:
+ src_file: tempest_custom.txt
+ dest_path: /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
+ report:
+ sub_testcase_list:
+ - tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_mtu_sized_frames[compute,id-b158ea55-472e-4086-8fa9-c64ac0c6c1d0,network]
diff --git a/dovetail/testcase/tempest.tc002.yml b/dovetail/testcase/tempest.tc002.yml
new file mode 100644
index 00000000..54a56f54
--- /dev/null
+++ b/dovetail/testcase/tempest.tc002.yml
@@ -0,0 +1,20 @@
+---
+dovetail.tempest.tc002:
+ name: dovetail.tempest.tc002
+ objective: filtering packets based on security rules and port security in data path
+ validate:
+ type: functest
+ testcase: tempest_custom
+ pre_condition:
+ - 'cp /home/opnfv/userconfig/pre_config/tempest_conf.yaml /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml'
+ pre_copy:
+ src_file: tempest_custom.txt
+ dest_path: /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
+ report:
+ sub_testcase_list:
+ - tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_port_security_macspoofing_port[compute,id-7c0bb1a2-d053-49a4-98f9-ca1a1d849f63,network]
+ - tempest.scenario.test_security_groups_basic_ops.TestSecurityGroupsBasicOps.test_cross_tenant_traffic[compute,id-e79f879e-debb-440c-a7e4-efeda05b6848,network]
+ - tempest.scenario.test_security_groups_basic_ops.TestSecurityGroupsBasicOps.test_in_tenant_traffic[compute,id-63163892-bbf6-4249-aa12-d5ea1f8f421b,network]
+ - tempest.scenario.test_security_groups_basic_ops.TestSecurityGroupsBasicOps.test_multiple_security_groups[compute,id-d2f77418-fcc4-439d-b935-72eca704e293,network]
+ - tempest.scenario.test_security_groups_basic_ops.TestSecurityGroupsBasicOps.test_port_security_disable_security_group[compute,id-7c811dcc-263b-49a3-92d2-1b4d8405f50c,network]
+ - tempest.scenario.test_security_groups_basic_ops.TestSecurityGroupsBasicOps.test_port_update_new_security_group[compute,id-f4d556d7-1526-42ad-bafb-6bebf48568f6,network]
diff --git a/dovetail/testcase/tempest.tc003.yml b/dovetail/testcase/tempest.tc003.yml
new file mode 100644
index 00000000..07d7097f
--- /dev/null
+++ b/dovetail/testcase/tempest.tc003.yml
@@ -0,0 +1,19 @@
+---
+dovetail.tempest.tc003:
+ name: dovetail.tempest.tc003
+ objective: dynamic network runtime operations through the life of a VNF
+ validate:
+ type: functest
+ testcase: tempest_custom
+ pre_condition:
+ - 'cp /home/opnfv/userconfig/pre_config/tempest_conf.yaml /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml'
+ pre_copy:
+ src_file: tempest_custom.txt
+ dest_path: /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
+ report:
+ sub_testcase_list:
+ - tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_hotplug_nic[compute,id-c5adff73-e961-41f1-b4a9-343614f18cfa,network]
+ - tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_network_basic_ops[compute,id-f323b3ba-82f8-4db7-8ea6-6a895869ec49,network,smoke]
+ - tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_subnet_details[compute,id-d8bb918e-e2df-48b2-97cd-b73c95450980,network]
+ - tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_update_instance_port_admin_state[compute,id-f5dfcc22-45fd-409f-954c-5bd500d7890b,network]
+ - tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_update_router_admin_state[compute,id-04b9fe4e-85e8-4aea-b937-ea93885ac59f,network]
diff --git a/dovetail/testcase/tempest.tc004.yml b/dovetail/testcase/tempest.tc004.yml
new file mode 100644
index 00000000..0f5fdd89
--- /dev/null
+++ b/dovetail/testcase/tempest.tc004.yml
@@ -0,0 +1,38 @@
+---
+dovetail.tempest.tc004:
+ name: dovetail.tempest.tc004
+ objective: correct behavior after common virtual machine life cycles events
+ validate:
+ type: functest
+ testcase: tempest_custom
+ pre_condition:
+ - 'cp /home/opnfv/userconfig/pre_config/tempest_conf.yaml /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml'
+ pre_copy:
+ src_file: tempest_custom.txt
+ dest_path: /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
+ report:
+ sub_testcase_list:
+ - tempest.scenario.test_minimum_basic.TestMinimumBasicScenario.test_minimum_basic_scenario[compute,id-bdbb5441-9204-419d-a225-b4fdbfb1a1a8,image,network,volume]
+ - tempest.scenario.test_network_advanced_server_ops.TestNetworkAdvancedServerOps.test_server_connectivity_cold_migration[compute,id-a4858f6c-401e-4155-9a49-d5cd053d1a2f,network]
+ - tempest.scenario.test_network_advanced_server_ops.TestNetworkAdvancedServerOps.test_server_connectivity_cold_migration_revert[compute,id-25b188d7-0183-4b1e-a11d-15840c8e2fd6,network]
+ - tempest.scenario.test_network_advanced_server_ops.TestNetworkAdvancedServerOps.test_server_connectivity_pause_unpause[compute,id-2b2642db-6568-4b35-b812-eceed3fa20ce,network]
+ - tempest.scenario.test_network_advanced_server_ops.TestNetworkAdvancedServerOps.test_server_connectivity_reboot[compute,id-7b6860c2-afa3-4846-9522-adeb38dfbe08,network]
+ - tempest.scenario.test_network_advanced_server_ops.TestNetworkAdvancedServerOps.test_server_connectivity_rebuild[compute,id-88a529c2-1daa-4c85-9aec-d541ba3eb699,network]
+ - tempest.scenario.test_network_advanced_server_ops.TestNetworkAdvancedServerOps.test_server_connectivity_resize[compute,id-719eb59d-2f42-4b66-b8b1-bb1254473967,network]
+ - tempest.scenario.test_network_advanced_server_ops.TestNetworkAdvancedServerOps.test_server_connectivity_stop_start[compute,id-61f1aa9a-1573-410e-9054-afa557cab021,network]
+ - tempest.scenario.test_network_advanced_server_ops.TestNetworkAdvancedServerOps.test_server_connectivity_suspend_resume[compute,id-5cdf9499-541d-4923-804e-b9a60620a7f0,network]
+ - tempest.scenario.test_server_advanced_ops.TestServerAdvancedOps.test_resize_volume_backed_server_confirm[compute,id-e6c28180-7454-4b59-b188-0257af08a63b,volume]
+ - tempest.scenario.test_server_advanced_ops.TestServerAdvancedOps.test_server_sequence_suspend_resume[compute,id-949da7d5-72c8-4808-8802-e3d70df98e2c]
+ - tempest.scenario.test_shelve_instance.TestShelveInstance.test_shelve_instance[compute,id-1164e700-0af0-4a4c-8792-35909a88743c,image,network]
+ - tempest.scenario.test_shelve_instance.TestShelveInstance.test_shelve_volume_backed_instance[compute,id-c1b6318c-b9da-490b-9c67-9339b627271f,image,network,volume]
+ - tempest.api.compute.admin.test_live_migration.LiveAutoBlockMigrationV225TestJSON.test_iscsi_volume[id-e19c0cc6-6720-4ed8-be83-b6603ed5c812]
+ - tempest.api.compute.admin.test_live_migration.LiveAutoBlockMigrationV225TestJSON.test_live_block_migration[id-1dce86b8-eb04-4c03-a9d8-9c1dc3ee0c7b]
+ - tempest.api.compute.admin.test_live_migration.LiveAutoBlockMigrationV225TestJSON.test_live_block_migration_paused[id-1e107f21-61b2-4988-8f22-b196e938ab88]
+ - tempest.api.compute.admin.test_live_migration.LiveBlockMigrationTestJSON.test_iscsi_volume[id-e19c0cc6-6720-4ed8-be83-b6603ed5c812]
+ - tempest.api.compute.admin.test_live_migration.LiveBlockMigrationTestJSON.test_live_block_migration[id-1dce86b8-eb04-4c03-a9d8-9c1dc3ee0c7b]
+ - tempest.api.compute.admin.test_live_migration.LiveBlockMigrationTestJSON.test_live_block_migration_paused[id-1e107f21-61b2-4988-8f22-b196e938ab88]
+ - tempest.api.compute.admin.test_migrations.MigrationsAdminTest.test_cold_migration[id-4bf0be52-3b6f-4746-9a27-3143636fe30d]
+ - tempest.api.compute.admin.test_migrations.MigrationsAdminTest.test_list_migrations[id-75c0b83d-72a0-4cf8-a153-631e83e7d53f]
+ - tempest.api.compute.admin.test_migrations.MigrationsAdminTest.test_list_migrations_in_flavor_resize_situation[id-1b512062-8093-438e-b47a-37d2f597cd64]
+ - tempest.api.compute.admin.test_migrations.MigrationsAdminTest.test_resize_server_revert_deleted_flavor[id-33f1fec3-ba18-4470-8e4e-1d888e7c3593]
+ - tempest.api.compute.admin.test_migrations.MigrationsAdminTest.test_revert_cold_migration[id-caa1aa8b-f4ef-4374-be0d-95f001c2ac2d]
diff --git a/dovetail/testcase/tempest.tc005.yml b/dovetail/testcase/tempest.tc005.yml
new file mode 100644
index 00000000..1698e093
--- /dev/null
+++ b/dovetail/testcase/tempest.tc005.yml
@@ -0,0 +1,15 @@
+---
+dovetail.tempest.tc005:
+ name: dovetail.tempest.tc005
+ objective: simple virtual machine resource scheduling on multiple nodes
+ validate:
+ type: functest
+ testcase: tempest_custom
+ pre_condition:
+ - 'cp /home/opnfv/userconfig/pre_config/tempest_conf.yaml /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml'
+ pre_copy:
+ src_file: tempest_custom.txt
+ dest_path: /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
+ report:
+ sub_testcase_list:
+ - tempest.scenario.test_server_multinode.TestServerMultinode.test_schedule_to_all_nodes[compute,id-9cecbe35-b9d4-48da-a37e-7ce70aa43d30,network,smoke]
diff --git a/dovetail/testcase/tempest.tc006.yml b/dovetail/testcase/tempest.tc006.yml
new file mode 100644
index 00000000..f0f57090
--- /dev/null
+++ b/dovetail/testcase/tempest.tc006.yml
@@ -0,0 +1,16 @@
+---
+dovetail.tempest.tc006:
+ name: dovetail.tempest.tc006
+ objective: storage security
+ validate:
+ type: functest
+ testcase: tempest_custom
+ pre_condition:
+ - 'cp /home/opnfv/userconfig/pre_config/tempest_conf.yaml /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml'
+ pre_copy:
+ src_file: tempest_custom.txt
+ dest_path: /usr/local/lib/python2.7/dist-packages/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
+ report:
+ sub_testcase_list:
+ - tempest.scenario.test_encrypted_cinder_volumes.TestEncryptedCinderVolumes.test_encrypted_cinder_volumes_cryptsetup[compute,id-cbc752ed-b716-4717-910f-956cce965722,image,volume]
+ - tempest.scenario.test_encrypted_cinder_volumes.TestEncryptedCinderVolumes.test_encrypted_cinder_volumes_luks[compute,id-79165fb4-5534-4b9d-8429-97ccffb8f86e,image,volume]
diff --git a/dovetail/testcase/nfvi.tc002.yml b/dovetail/testcase/vping.tc001.yml
index 28a7adfd..e3b501be 100644
--- a/dovetail/testcase/nfvi.tc002.yml
+++ b/dovetail/testcase/vping.tc001.yml
@@ -1,6 +1,6 @@
---
-dovetail.nfvi.tc002:
- name: dovetail.nfvi.tc002
+dovetail.vping.tc001:
+ name: dovetail.vping.tc001
objective: testing for vping using userdata
validate:
type: functest
diff --git a/dovetail/testcase/nfvi.tc001.yml b/dovetail/testcase/vping.tc002.yml
index 157fc8da..a800a4db 100644
--- a/dovetail/testcase/nfvi.tc001.yml
+++ b/dovetail/testcase/vping.tc002.yml
@@ -1,6 +1,6 @@
---
-dovetail.nfvi.tc001:
- name: dovetail.nfvi.tc001
+dovetail.vping.tc002:
+ name: dovetail.vping.tc002
objective: testing for vping using ssh
validate:
type: functest
diff --git a/userconfig/hosts.yaml b/dovetail/userconfig/hosts.yaml
index e4687dfb..e4687dfb 100644
--- a/userconfig/hosts.yaml
+++ b/dovetail/userconfig/hosts.yaml
diff --git a/userconfig/pod.yaml.sample b/dovetail/userconfig/pod.yaml.sample
index 26636a6b..26636a6b 100644
--- a/userconfig/pod.yaml.sample
+++ b/dovetail/userconfig/pod.yaml.sample
diff --git a/userconfig/sdnvpn_config_testcase1.yaml b/dovetail/userconfig/sdnvpn_config_testcase1.yaml
index d9e4a1b4..d9e4a1b4 100644
--- a/userconfig/sdnvpn_config_testcase1.yaml
+++ b/dovetail/userconfig/sdnvpn_config_testcase1.yaml
diff --git a/userconfig/sdnvpn_config_testcase2.yaml b/dovetail/userconfig/sdnvpn_config_testcase2.yaml
index 07b0adfa..07b0adfa 100644
--- a/userconfig/sdnvpn_config_testcase2.yaml
+++ b/dovetail/userconfig/sdnvpn_config_testcase2.yaml
diff --git a/userconfig/sdnvpn_config_testcase3.yaml b/dovetail/userconfig/sdnvpn_config_testcase3.yaml
index 60592cbb..60592cbb 100644
--- a/userconfig/sdnvpn_config_testcase3.yaml
+++ b/dovetail/userconfig/sdnvpn_config_testcase3.yaml
diff --git a/userconfig/sdnvpn_config_testcase4.yaml b/dovetail/userconfig/sdnvpn_config_testcase4.yaml
index 1e221354..1e221354 100644
--- a/userconfig/sdnvpn_config_testcase4.yaml
+++ b/dovetail/userconfig/sdnvpn_config_testcase4.yaml
diff --git a/userconfig/sdnvpn_config_testcase8.yaml b/dovetail/userconfig/sdnvpn_config_testcase8.yaml
index c825997b..c825997b 100644
--- a/userconfig/sdnvpn_config_testcase8.yaml
+++ b/dovetail/userconfig/sdnvpn_config_testcase8.yaml
diff --git a/dovetail/userconfig/tempest_conf.yaml b/dovetail/userconfig/tempest_conf.yaml
new file mode 100644
index 00000000..944e3a9f
--- /dev/null
+++ b/dovetail/userconfig/tempest_conf.yaml
@@ -0,0 +1,16 @@
+# This is an empty configuration file to be filled up with the desired options
+# to generate a custom tempest.conf
+# Examples:
+# network-feature-enabled:
+# port_security: True
+
+# volume-feature-enabled:
+# api_v1: False
+
+# validation:
+# image_ssh_user: root
+# ssh_timeout: 300
+
+# compute:
+# min_compute_nodes: 2
+# volume_device_name: vdb
diff --git a/dovetail/utils/dovetail_config.py b/dovetail/utils/dovetail_config.py
index f8193e5d..5e100d69 100644
--- a/dovetail/utils/dovetail_config.py
+++ b/dovetail/utils/dovetail_config.py
@@ -54,3 +54,8 @@ class DovetailConfig(object):
def update_non_envs(cls, path, value):
if value:
cls.set_leaf_dict(cls.dovetail_config, path, value)
+
+ @classmethod
+ def update_cmds(cls):
+ if cls.dovetail_config['report_dest'].startswith("http"):
+ cls.dovetail_config['bottlenecks']['cmds'][0] += ' --report'
diff --git a/dovetail/utils/dovetail_utils.py b/dovetail/utils/dovetail_utils.py
index e6a775f1..2c7ac317 100644
--- a/dovetail/utils/dovetail_utils.py
+++ b/dovetail/utils/dovetail_utils.py
@@ -111,21 +111,43 @@ def source_env(env_file):
with open(env_file, 'r') as f:
lines = f.readlines()
for line in lines:
- for match in re.findall(r"export (.*)=(.*)", line):
- match = (match[0].strip('\"'), match[1].strip('\"'))
- match = (match[0].strip('\''), match[1].strip('\''))
- os.environ.update({match[0]: match[1]})
+ if line.lstrip().startswith('export'):
+ for match in re.findall(r"export (.*)=(.*)", line):
+ match = (match[0].strip('\"'), match[1].strip('\"'))
+ match = (match[0].strip('\''), match[1].strip('\''))
+ os.environ.update({match[0]: match[1]})
+
+
+def check_https_enabled(logger=None):
+ logger.debug("Checking if https enabled or not...")
+ os_auth_url = os.getenv('OS_AUTH_URL')
+ if os_auth_url.startswith('https'):
+ logger.debug("https is enabled")
+ return True
+ logger.debug("https is not enabled")
+ return False
def get_ext_net_name(env_file, logger=None):
- source_env(env_file)
- cmd_check = "openstack network list"
+ https_enabled = check_https_enabled(logger)
+ insecure_option = ''
+ insecure = os.getenv('OS_INSECURE',)
+ if https_enabled:
+ logger.info("https enabled...")
+ if insecure.lower() == "true":
+ insecure_option = ' --insecure '
+ else:
+ logger.warn("Env variable OS_INSECURE is {}, if https + no "
+ "credential used, should be set as True."
+ .format(insecure))
+
+ cmd_check = "openstack %s network list" % insecure_option
ret, msg = exec_cmd(cmd_check, logger)
if ret:
- logger.error("The credentials info in %s is invalid." % env_file)
+ logger.error("The credentials info in {} is invalid.".format(env_file))
return None
- cmd = "openstack network list --long | grep 'External' | head -1 | \
- awk '{print $4}'"
+ cmd = "openstack %s network list --long | grep 'External' | head -1 | \
+ awk '{print $4}'" % insecure_option
ret, msg = exec_cmd(cmd, logger)
if not ret:
return msg
@@ -134,7 +156,7 @@ def get_ext_net_name(env_file, logger=None):
def store_db_results(db_url, build_tag, testcase, dest_file, logger):
url = "%s?build_tag=%s-%s" % (db_url, build_tag, testcase)
- logger.debug("Query to rest api: %s", url)
+ logger.debug("Query to rest api: {}".format(url))
try:
data = json.load(urllib2.urlopen(url))
if data['results']:
@@ -144,7 +166,8 @@ def store_db_results(db_url, build_tag, testcase, dest_file, logger):
else:
return False
except Exception as e:
- logger.error("Cannot read content from %s, exception: %s", url, e)
+ logger.exception(
+ "Cannot read content from {}, exception: {}".format(url, e))
return False
@@ -157,7 +180,7 @@ def get_duration(start_date, stop_date, logger):
res = "%sm%ss" % (delta / 60, delta % 60)
return res
except ValueError as e:
- logger.error("ValueError: %s", e)
+ logger.exception("ValueError: {}".format(e))
return None
@@ -171,24 +194,17 @@ def show_progress_bar(length):
def check_docker_version(logger=None):
- ret, server_ver = exec_cmd("sudo docker version -f'{{.Server.Version}}'",
- logger=logger)
- ret, client_ver = exec_cmd("sudo docker version -f'{{.Client.Version}}'",
- logger=logger)
- logger.info("\ndocker version: \nclient:%s\nserver:%s", client_ver,
- server_ver)
- if(LooseVersion(client_ver) <= LooseVersion('1.8.0') or
- LooseVersion(server_ver) <= LooseVersion('1.8.0')):
- logger.warn("\n\nDocker version is too old, may cause unpredictable "
- "errors, you can update or install the lastest docker "
- "for both host and container as below:\nwget -qO- "
- "https://get.docker.com/ | sh\n\nClient:%s\nServer:%s",
- client_ver, server_ver)
- exit(-1)
-
- if(client_ver != server_ver):
- logger.warn("\n\nVersion mismatch, may cause unpredictable "
- "errors, you can update or install the lastest "
- "docker for both host and container as below:\nwget "
- "-qO- https://get.docker.com/ | "
- "sh\n\nClient:%s\nServer:%s", client_ver, server_ver)
+ server_ret, server_ver = \
+ exec_cmd("sudo docker version -f'{{.Server.Version}}'", logger=logger)
+ client_ret, client_ver = \
+ exec_cmd("sudo docker version -f'{{.Client.Version}}'", logger=logger)
+ if server_ret == 0:
+ logger.debug("docker server version: {}".format(server_ver))
+ if server_ret != 0 or (LooseVersion(server_ver) < LooseVersion('1.12.3')):
+ logger.error("Don't support this Docker server version. "
+ "Docker server should be updated to at least 1.12.3.")
+ if client_ret == 0:
+ logger.debug("docker client version: {}".format(client_ver))
+ if client_ret != 0 or (LooseVersion(client_ver) < LooseVersion('1.12.3')):
+ logger.error("Don't support this Docker client version. "
+ "Docker client should be updated to at least 1.12.3.")
diff --git a/dovetail/utils/local_db/cases.json b/dovetail/utils/local_db/cases.json
new file mode 100644
index 00000000..118f740b
--- /dev/null
+++ b/dovetail/utils/local_db/cases.json
@@ -0,0 +1,36 @@
+{"testcases": [{"project_name": "functest", "run": {"class": "VPingSSH", "module": "functest.opnfv_tests.openstack.vping.vping_ssh"}, "description": "NFV \u201cHello World\u201d using SSH connection and floatting IP", "ci_loop": "daily", "tags": "ping", "url": "http://artifacts.opnfv.org/functest/colorado/docs/userguide/index.html", "_id": "55e05dba514bc52914149f2e", "catalog_description": "vPing using SSH", "creation_date": "2015-08-28 13:10:18.478986", "domains": null, "dependencies": {"installer": "", "scenario": "^((?!bgpvpn|odl_l3).)*$"}, "version": ">arno.1.0", "criteria": null, "tier": "healthcheck", "trust": "silver", "blocking": true, "name": "vping_ssh"}, {"project_name": "functest", "run": {"class": "", "module": ""}, "description": "This test consists in running OpenStack Tempest fonctional suite.Initially based on the smoke suite, functest aims to define its own subset of tests", "ci_loop": "daily", "tags": "tempest,api,interfaces,openstack,vim", "url": "http://artifacts.opnfv.org/functest/colorado/docs/userguide/index.html", "_id": "55effe6f514bc52b1791f966", "catalog_description": "OpenStack Tempest (smoke)", "creation_date": "2015-09-09 09:39:59.661046", "domains": "vim", "dependencies": {"installer": "", "scenario": ""}, "version": ">arno.1.0", "criteria": null, "tier": "smoke", "trust": "silver", "blocking": true, "name": "tempest_smoke_serial"}, {"project_name": "functest", "run": {"class": "ODLTests", "module": "functest.opnfv_tests.sdn.odl.odl"}, "description": "This test consists in creating, updating and deleting, networks, ports, subnets, routers in ODL and Neutron", "ci_loop": "daily", "tags": "odl,sdn", "url": "http://artifacts.opnfv.org/functest/colorado/docs/userguide/index.html", "_id": "56169c4e514bc52b17f4adaa", "catalog_description": "OpenDaylight", "creation_date": "2015-10-08 16:39:42.727060", "domains": "networking", "dependencies": {"installer": "", "scenario": "odl"}, "version": ">arno.1.0", "criteria": null, "tier": "smoke", "trust": "silver", "blocking": true, "name": "odl"}, {"project_name": "functest", "run": {"class": "", "module": ""}, "description": "Tests of OpenStack performed with Rally framework. These tests aim to qualify OpenStack modules and their associated APIs", "ci_loop": "daily", "tags": "rally,api,interfaces,openstack,vim", "url": "http://artifacts.opnfv.org/functest/colorado/docs/userguide/index.html", "_id": "561e504c514bc5355ba8b3a8", "catalog_description": "OpenStack Rally (sanity)", "creation_date": "2015-10-14 12:53:32.126750", "domains": "vim", "dependencies": {"installer": "", "scenario": "^((?!bgpvpn).)*$"}, "version": ">arno.1.0", "criteria": null, "tier": "smoke", "trust": "silver", "blocking": true, "name": "rally_sanity"}, {"project_name": "functest", "run": {"class": "", "module": ""}, "description": "Tests of the ONOS Virtual Network Northbound API", "ci_loop": "daily", "tags": "onos,sdn", "url": "http://artifacts.opnfv.org/functest/colorado/docs/userguide/index.html", "_id": "565d6579514bc5087f2ddd0b", "catalog_description": "ONOS", "creation_date": "2015-12-01 09:16:41.343670", "domains": "networking", "dependencies": {"installer": "", "scenario": "onos"}, "version": ">brahmaputra.1.0", "criteria": null, "tier": "smoke", "trust": "silver", "blocking": true, "name": "onos"}, {"project_name": "functest", "run": {"class": "VPingUserdata", "module": "functest.opnfv_tests.openstack.vping.vping_userdata"}, "description": "vPing using userdata and cloudinit mechanisms", "ci_loop": "daily", "tags": "ping", "url": "http://artifacts.opnfv.org/functest/colorado/docs/userguide/index.html", "_id": "569f60cf514bc541f885b206", "catalog_description": "vPing using userdata", "creation_date": "2016-01-20 10:26:23.160020", "domains": "", "dependencies": {"installer": "", "scenario": "^((?!lxd).)*$"}, "version": ">brahmaputra.1.0", "criteria": null, "tier": "smoke", "trust": "silver", "blocking": true, "name": "vping_userdata"}, {"project_name": "functest", "run": {"class": "", "module": ""}, "description": "Rally full extended suite", "ci_loop": "weekly", "tags": "openstack,vim", "url": "http://artifacts.opnfv.org/functest/colorado/docs/userguide/index.html", "_id": "573c83ef9377c5287772dc81", "catalog_description": "OpenStack Rally (full)", "creation_date": "2016-05-18 15:02:07.153735", "domains": "vim", "dependencies": {"installer": "", "scenario": ""}, "version": ">colorado.1.0", "criteria": {"step1": "success_rate >=90"}, "tier": "components", "trust": "silver", "blocking": false, "name": "rally_full"}, {"project_name": "functest", "run": {"class": "", "module": ""}, "description": "Tempest full using parallel option", "ci_loop": "weekly", "tags": "openstack,vim,tempest", "url": "http://artifacts.opnfv.org/functest/colorado/docs/userguide/index.html", "_id": "573c840d9377c5287772dc82", "catalog_description": "OpenStack Tempest (full)", "creation_date": "2016-05-18 15:02:37.517083", "domains": "vim", "dependencies": {"installer": "", "scenario": ""}, "version": ">colorado.1.0", "criteria": {"step1": "success_rate >=80"}, "tier": "components", "trust": "silver", "blocking": false, "name": "tempest_full_parallel"}, {"project_name": "functest", "run": {"class": "", "module": ""}, "description": "SFC tests for ONOS based scenarios", "ci_loop": "daily", "tags": "sfc,service", "url": "http://artifacts.opnfv.org/functest/colorado/docs/userguide/index.html", "_id": "578502b89377c54b278bbfe3", "catalog_description": "ONOS Service Function Chaining", "creation_date": "2016-07-12 14:46:16.149806", "domains": "networking", "dependencies": {"installer": "", "scenario": "onos-sfc"}, "version": ">colorado.1.0", "criteria": null, "tier": "feature", "trust": "silver", "blocking": true, "name": "onos_sfc"}, {"project_name": "functest", "run": {"class": "ConnectionCheck", "module": "functest.opnfv_tests.openstack.snaps.connection_check"}, "description": "This test case verifies the retrieval of OpenStack clients: Keystone, Glance, Neutron and Nova and may perform some simple queries. When the config value of snaps. use_keystone is True, functest must have access to the cloud's private network.", "ci_loop": "daily", "tags": "snaps,openstack,vim", "url": "http://artifacts.opnfv.org/functest/danube/docs/userguide/index.html", "_id": "584a5d4c1d2c6e000ab2d7e4", "catalog_description": "Healthcheck OpenStack connectivity", "creation_date": "2016-12-09 07:29:16.270583", "domains": "vim", "dependencies": {"installer": "", "scenario": ""}, "version": ">danube.1.0", "criteria": null, "tier": "healthcheck", "trust": "silver", "blocking": true, "name": "connection_check"}, {"project_name": "functest", "run": {"class": "ApiCheck", "module": "functest.opnfv_tests.openstack.snaps.api_check"}, "description": "This test case verifies the retrieval of OpenStack clients:Keystone, Glance, Neutron and Nova and may perform some simple queries. When the config value of snaps. use_keystone is True, functest must have access to the cloud's private network.", "ci_loop": "daily", "tags": "snaps,api,interfaces,openstack,vim", "url": "http://artifacts.opnfv.org/functest/danube/docs/userguide/index.html", "_id": "584a5e1d1d2c6e000ab2d7e5", "catalog_description": "Healthcheck OpenStack API", "creation_date": "2016-12-09 07:32:45.435360", "domains": "vim", "dependencies": {"installer": "", "scenario": ""}, "version": ">danube.1.0", "criteria": null, "tier": "healthcheck", "trust": "silver", "blocking": true, "name": "api_check"}, {"project_name": "functest", "run": {"class": "SnapsSmoke", "module": "functest.opnfv_tests.openstack.snaps.smoke"}, "description": "This test case contains tests that setup and destroy environments with VMs with and without Floating IPs with a newly created user and project. Set the config value snaps.use_floating_ips (True|False) to toggle this functionality. When the config value of snaps. use_keystone is True, functest must have access to the cloud's private network.", "ci_loop": "daily", "tags": "snaps,openstack,vim", "url": "http://artifacts.opnfv.org/functest/danube/docs/userguide/index.html", "_id": "584a5e731d2c6e000ab2d7e6", "catalog_description": "SNAPS (smoke)", "creation_date": "2016-12-09 07:34:11.982435", "domains": "vim", "dependencies": {"installer": "", "scenario": ""}, "version": ">danube.1.0", "criteria": null, "tier": "smoke", "trust": "silver", "blocking": true, "name": "snaps_smoke"}, {"project_name": "functest", "run": "daily", "description": "This test case creates executes the SimpleHealthCheck Python test class which creates an, image, flavor, network, and Cirros VM instance and observes the console output to validate the single port obtains the correct IP address.", "ci_loop": "daily", "tags": "dhcp", "url": "http://artifacts.opnfv.org/functest/danube/docs/userguide/index.html", "_id": "589c8f248cf551000c78211e", "catalog_description": "Healthcheck OpenStack (network)", "creation_date": "2017-02-09 15:47:48.739222", "domains": "vim", "dependencies": "", "version": ">danube", "criteria": "", "tier": "healthcheck", "trust": null, "blocking": true, "name": "snaps_health_check"}, {"project_name": "functest", "run": {"class": "AaaVnf", "module": "functest.opnfv_tests.vnf.aaa.aaa"}, "description": "AAA VNF sample", "ci_loop": "weekly", "tags": "aaa, freeradius, vnf", "url": "http://artifacts.opnfv.org/functest/danube/docs/userguide/index.html", "_id": "58a324e6584d06000b0157f2", "catalog_description": "VNF AAA", "creation_date": "2017-02-14 15:40:22.150387", "domains": "vnf", "dependencies": "", "version": ">Danube", "criteria": "", "tier": "vnf", "trust": null, "blocking": false, "name": "aaa"}, {"project_name": "functest", "run": {"class": "ImsVnf", "module": "functest.opnfv_tests.vnf.ims.cloudify_ims"}, "description": "Clearwater vIMS deployed with cloudify orchestrator", "ci_loop": "weekly", "tags": "vnf, ims", "url": "http://artifacts.opnfv.org/functest/danube/docs/userguide/index.html", "_id": "58a325e8584d06000b0157f5", "catalog_description": "VNF clearwater IMS with Cloudify orchestrator", "creation_date": "2017-02-14 15:44:40.235672", "domains": "vnf", "dependencies": "", "version": ">Danube", "criteria": "", "tier": "vnf", "trust": null, "blocking": false, "name": "cloudify_ims"}, {"project_name": "functest", "run": {"class": "ImsVnf", "module": "functest.opnfv_tests.vnf.ims.orchestra_ims"}, "description": "openIMS deployed with openBaton orchestrator", "ci_loop": "weekly", "tags": "vnf, ims", "url": "http://artifacts.opnfv.org/functest/danube/docs/userguide/index.html", "_id": "58a3263b584d06000b0157f6", "catalog_description": "VNF openIMS with Openbaton orchestrator", "creation_date": "2017-02-14 15:46:03.398028", "domains": "vnf", "dependencies": "", "version": ">Danube", "criteria": "", "tier": "vnf", "trust": null, "blocking": false, "name": "orchestra_openims"}, {"project_name": "functest", "run": "", "description": "Defcore List of tempest testcases", "ci_loop": "daily", "tags": "", "url": "", "_id": "58b438daf5a101000ac478ea", "catalog_description": "OpenStack Tempest (defcore)", "creation_date": "2017-02-27 14:34:02.219544", "domains": "", "dependencies": "", "version": "", "criteria": "", "tier": "components", "trust": null, "blocking": "false", "name": "tempest_defcore"}, {"project_name": "functest", "run": null, "description": "A sub group of tests of the OpenStack Defcore testcases by using refstack client", "ci_loop": "daily", "tags": null, "url": null, "_id": "58c91c64b91f23000a62ba0a", "catalog_description": "OpenStack RefStack", "creation_date": "2017-03-15 10:50:12.764115", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": "smoke", "trust": null, "blocking": "false", "name": "refstack_defcore"}, {"project_name": "functest", "run": {"class": "ImsVnf", "module": "functest.opnfv_tests.vnf.ims.opera_ims"}, "description": "Exemple of use of Open-O to deploy a VNF", "ci_loop": "weekly", "tags": "ims, open-o, onap, orchestration", "url": "", "_id": "58ca69abb91f23000a62baae", "catalog_description": "VNF clearwater IMS with Open-O orchestrator", "creation_date": "2017-03-16 10:32:11.203581", "domains": "orchestration, vnf", "dependencies": {"installer": "compass", "scenario": "os-nosdn-openo"}, "version": ">danube", "criteria": "", "tier": "vnf", "trust": null, "blocking": false, "name": "opera_ims"}, {"project_name": "functest", "run": {"class": "VrouterVnf", "module": "functest.opnfv_tests.vnf.router.vyos_vrouter"}, "description": "This test case is vRouter testing", "ci_loop": "", "tags": "vrouter, vnf", "url": "", "_id": "58ca6adeb91f23000a62baaf", "catalog_description": "VNF Vyatta vRouter with Cloudify orchestrator", "creation_date": "2017-03-16 10:37:18.293020", "domains": "network, vnf", "dependencies": {"installer": "fuel", "scenario": "nosdn-nofeature"}, "version": "", "criteria": "", "tier": "vnf", "trust": null, "blocking": "false", "name": "vyos_vrouter"}, {"project_name": "functest", "run": {"class": "ImsVnf", "module": "functest.opnfv_tests.vnf.ims.orchestra_ims"}, "description": "Clearwater vIMS deployed with Openbaton Mano orchestrator.", "ci_loop": "weekly", "tags": "vnf,ims", "url": "http://artifacts.opnfv.org/functest/euphrates/docs/userguide/index.html", "_id": "5922859a356833000a195187", "catalog_description": "VNF Clearwater IMS with OpenBaton orchestrator", "creation_date": "2017-05-22 06:30:50.512246", "domains": "mano,vnf", "dependencies": "", "version": ">euphrates", "criteria": 100, "tier": "vnf", "trust": null, "blocking": "false", "name": "orchestra_ims"}]}
+{"testcases": [{"project_name": "vsperf", "run": null, "description": "LTD.Throughput.RFC2544.PacketLossRatio for DPDK Ovs", "ci_loop": null, "tags": null, "url": "http://tput_ovsdpdk", "_id": "565feb6b514bc5087f3cfe2e", "catalog_description": "Packet Loss Ratio for DPDK OVS (RFC2544)", "creation_date": "2015-12-03 07:12:43.925943", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "tput_ovsdpdk"}, {"project_name": "vsperf", "run": null, "description": "LTD.Throughput.RFC2544.PacketLossRatio for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://tput_ovs", "_id": "566005d9514bc5087f3cfe30", "catalog_description": "Packet Loss Ratio for Vanilla Ovs (RFC2544)", "creation_date": "2015-12-03 09:05:29.686136", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "tput_ovs"}, {"project_name": "vsperf", "run": null, "description": "LTD.Throughput.RFC2544.PacketLossRatio for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://tput_ovs", "_id": "566005ed514bc5087f3cfe31", "catalog_description": "Packet Loss Ratio for Vanilla Ovs (RFC2544)", "creation_date": "2015-12-03 09:05:49.363961", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "tput_ovs"}, {"project_name": "vsperf", "run": null, "description": "LTD.Throughput.RFC2544.BackToBackFrames for DPDK Ovs", "ci_loop": null, "tags": null, "url": "http://b2b_ovsdpdk", "_id": "566006c8514bc5087f3cfe32", "catalog_description": "Back To Back Frames for DPDK Ovs (RFC2544)", "creation_date": "2015-12-03 09:09:28.927130", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "b2b_ovsdpdk"}, {"project_name": "vsperf", "run": null, "description": "LTD.Throughput.RFC2544.BackToBackFrames for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://b2b_ovs", "_id": "5660071e514bc5087f3cfe33", "catalog_description": "Back To Back Frames for Vanilla Ovs (RFC2544)", "creation_date": "2015-12-03 09:10:54.473180", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "b2b_ovs"}, {"project_name": "vsperf", "run": null, "description": "LTD.Throughput.RFC2544.PacketLossRatioFrameModification for DPDK Ovs", "ci_loop": null, "tags": null, "url": "http://tput_mod_vlan_ovsdpdk", "_id": "566007a9514bc5087f3cfe34", "catalog_description": "Packet Loss Ratio Frame Modification for DPDK Ovs (RFC2544)", "creation_date": "2015-12-03 09:13:13.600168", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "tput_mod_vlan_ovsdpdk"}, {"project_name": "vsperf", "run": null, "description": "LTD.Throughput.RFC2544.PacketLossRatioFrameModification for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://tput_mod_vlan_ovs", "_id": "566007ec514bc5087f3cfe35", "catalog_description": "Packet Loss Ratio Frame Modification for Vanilla Ovs (RFC2544)", "creation_date": "2015-12-03 09:14:20.594501", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "tput_mod_vlan_ovs"}, {"project_name": "vsperf", "run": null, "description": "LTD.Scalability.RFC2544.0PacketLoss for DPDK Ovs", "ci_loop": null, "tags": null, "url": "http://scalability_ovsdpdk", "_id": "56600870514bc5087f3cfe36", "catalog_description": ".Scalability Packet Loss for DPDK Ovs", "creation_date": "2015-12-03 09:16:32.491960", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "scalability_ovsdpdk"}, {"project_name": "vsperf", "run": null, "description": "LTD.Scalability.RFC2544.0PacketLoss for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://scalability_ovs", "_id": "566008b3514bc5087f3cfe37", "catalog_description": "Scalability Packet Loss for Vanilla Ovs (RFC2544)", "creation_date": "2015-12-03 09:17:39.501079", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "scalability_ovs"}, {"project_name": "vsperf", "run": null, "description": "PVP LTD.Throughput.RFC2544.PacketLossRatio for DPDK User Ovs", "ci_loop": null, "tags": null, "url": "http://pvp_tput_ovsdpdkuser", "_id": "5660095a514bc5087f3cfe38", "catalog_description": "PVP Packet Loss Ratio for DPDK User Ovs", "creation_date": "2015-12-03 09:20:26.244843", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "pvp_tput_ovsdpdkuser"}, {"project_name": "vsperf", "run": null, "description": "PVP LTD.Throughput.RFC2544.PacketLossRatio for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://pvp_tput_ovsvirtio", "_id": "566009ae514bc5087f3cfe39", "catalog_description": "PVP Packet Loss Ratio for Vanilla Ovs", "creation_date": "2015-12-03 09:21:50.251212", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "pvp_tput_ovsvirtio"}, {"project_name": "vsperf", "run": null, "description": "PVP LTD.Throughput.RFC2544.BackToBackFrames for DPDK User Ovs", "ci_loop": null, "tags": null, "url": "http://pvp_b2b_ovsdpdkuser", "_id": "56600a1a514bc5087f3cfe3a", "catalog_description": "PVP Back To Back Frames for DPDK User Ovs", "creation_date": "2015-12-03 09:23:38.269821", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "pvp_b2b_ovsdpdkuser"}, {"project_name": "vsperf", "run": null, "description": "PVP LTD.Throughput.RFC2544.BackToBackFrames for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://pvp_b2b_ovsvirtio", "_id": "56600a5f514bc5087f3cfe3b", "catalog_description": "PVP Back To Back Frames for Vanilla Ovs", "creation_date": "2015-12-03 09:24:47.990062", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "pvp_b2b_ovsvirtio"}, {"project_name": "vsperf", "run": null, "description": "PVVP LTD.Throughput.RFC2544.PacketLossRatio for DPDK User Ovs", "ci_loop": null, "tags": null, "url": "http://pvvp_tput_ovsdpdkuser", "_id": "56600ab3514bc5087f3cfe3c", "catalog_description": "PVVP Packet Loss Ratio for DPDK User Ovs", "creation_date": "2015-12-03 09:26:11.657515", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "pvvp_tput_ovsdpdkuser"}, {"project_name": "vsperf", "run": null, "description": "PVVP LTD.Throughput.RFC2544.PacketLossRatio for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://pvvp_tput_ovsvirtio", "_id": "56600ae9514bc5087f3cfe3d", "catalog_description": "PVVP Packet Loss Ratio for Vanilla Ovs", "creation_date": "2015-12-03 09:27:05.466374", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "pvvp_tput_ovsvirtio"}, {"project_name": "vsperf", "run": null, "description": "PVVP LTD.Throughput.RFC2544.BackToBackFrames for DPDK User Ovs", "ci_loop": null, "tags": null, "url": "http://pvvp_b2b_ovsdpdkuser", "_id": "56600b2a514bc5087f3cfe3e", "catalog_description": "PVVP Back To Back Frames for DPDK User Ovs", "creation_date": "2015-12-03 09:28:10.150217", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "pvvp_b2b_ovsdpdkuser"}, {"project_name": "vsperf", "run": null, "description": "PVVP LTD.Throughput.RFC2544.BackToBackFrames for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://pvvp_b2b_ovsvirtio", "_id": "56600b4f514bc5087f3cfe3f", "catalog_description": "PVVP Back To Back Frames for Vanilla Ovs", "creation_date": "2015-12-03 09:28:47.108529", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "pvvp_b2b_ovsvirtio"}, {"project_name": "vsperf", "run": "", "description": "", "ci_loop": "", "tags": "ovs,dpdk", "url": "", "_id": "591e8a8f41b755000a68c831", "catalog_description": "Phy2Phy Continuous Stream DPDK", "creation_date": "2017-05-19 06:02:55.177254", "domains": "compute", "dependencies": "", "version": ">euphrates", "criteria": "", "tier": "performance", "trust": null, "blocking": "", "name": "cont_ovsdpdk"}, {"project_name": "vsperf", "run": "", "description": "", "ci_loop": "", "tags": "ovs", "url": "", "_id": "5980d1b073ce050010c339ca", "catalog_description": "Phy2Phy Continuous Stream", "creation_date": "2017-08-01 19:08:32.518983", "domains": "compute", "dependencies": "", "version": "euphrates", "criteria": "", "tier": null, "trust": null, "blocking": "", "name": "cont_ovs"}]}
+{"testcases": [{"project_name": "promise", "run": null, "description": "Reservation of a VM for immediate use followed by allocation", "ci_loop": null, "tags": null, "url": "https://wiki.opnfv.org/_media/promise/test_cases.docx", "_id": "56583055514bc5087f2ddcea", "catalog_description": null, "creation_date": "2015-11-27 10:28:37.379982", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "VM-reservation-for-immediate-use-followed-by-allocation"}, {"project_name": "promise", "run": null, "description": "Reservation of a VM followed by denial of service to another user and by allocation of reserved VM", "ci_loop": null, "tags": null, "url": "https://wiki.opnfv.org/_media/promise/test_cases.docx", "_id": "565830a1514bc5087f2ddceb", "catalog_description": null, "creation_date": "2015-11-27 10:29:53.195016", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "VM-reservation-followed-by-denial-of-service-to-another-user-and-by-allocation-of-reserved-VM"}, {"project_name": "promise", "run": null, "description": "Reservation of a VM for future use", "ci_loop": null, "tags": null, "url": "https://wiki.opnfv.org/_media/promise/test_cases.docx", "_id": "565830cb514bc5087f2ddcec", "catalog_description": null, "creation_date": "2015-11-27 10:30:35.622990", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "VM-reservation-for-future-use"}, {"project_name": "promise", "run": null, "description": "Update of an outstanding reservation, increase capacity", "ci_loop": null, "tags": null, "url": "https://wiki.opnfv.org/_media/promise/test_cases.docx", "_id": "565830f0514bc5087f2ddced", "catalog_description": null, "creation_date": "2015-11-27 10:31:12.251753", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "Update-of-an-outstanding-reservation-increase-capacity"}, {"project_name": "promise", "run": null, "description": "Update of an outstanding reservation, decrease capacity", "ci_loop": null, "tags": null, "url": "https://wiki.opnfv.org/_media/promise/test_cases.docx", "_id": "5658310e514bc5087f2ddcee", "catalog_description": null, "creation_date": "2015-11-27 10:31:42.492243", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "Update-of-an-outstanding-reservation-decrease-capacity"}, {"project_name": "promise", "run": null, "description": "Notification of reservation change", "ci_loop": null, "tags": null, "url": "https://wiki.opnfv.org/_media/promise/test_cases.docx", "_id": "56583126514bc5087f2ddcef", "catalog_description": null, "creation_date": "2015-11-27 10:32:06.281109", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "Notification-of-reservation-change"}, {"project_name": "promise", "run": null, "description": "Cancellation of a reservation", "ci_loop": null, "tags": null, "url": "https://wiki.opnfv.org/_media/promise/test_cases.docx", "_id": "5658313f514bc5087f2ddcf0", "catalog_description": null, "creation_date": "2015-11-27 10:32:31.819144", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "Cancellation-of-a-reservation"}, {"project_name": "promise", "run": null, "description": "Query of a reservation", "ci_loop": null, "tags": null, "url": "https://wiki.opnfv.org/_media/promise/test_cases.docx", "_id": "56583162514bc5087f2ddcf1", "catalog_description": null, "creation_date": "2015-11-27 10:33:06.651788", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "Query-of-a-reservation"}, {"project_name": "promise", "run": null, "description": "Create a bulk reservation of compute capacity", "ci_loop": null, "tags": null, "url": "https://wiki.opnfv.org/_media/promise/test_cases.docx", "_id": "56583180514bc5087f2ddcf2", "catalog_description": null, "creation_date": "2015-11-27 10:33:36.633730", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "Create-a-bulk-reservation-of-compute-capacity"}, {"project_name": "promise", "run": null, "description": "Rejection of a reservation due to lack of resources", "ci_loop": null, "tags": null, "url": "https://wiki.opnfv.org/_media/promise/test_cases.docx", "_id": "565831a1514bc5087f2ddcf3", "catalog_description": null, "creation_date": "2015-11-27 10:34:09.196279", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "Rejection-of-a-reservation-due-to-lack-of-resources"}, {"project_name": "promise", "run": null, "description": "Reservation of block storage for future use", "ci_loop": null, "tags": null, "url": "https://wiki.opnfv.org/_media/promise/test_cases.docx", "_id": "565831c0514bc5087f2ddcf4", "catalog_description": null, "creation_date": "2015-11-27 10:34:40.449825", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "Reservation-of-block-storage-for-future-use"}, {"project_name": "promise", "run": null, "description": "Capacity Management, query", "ci_loop": null, "tags": null, "url": "https://wiki.opnfv.org/_media/promise/test_cases.docx", "_id": "565831db514bc5087f2ddcf5", "catalog_description": null, "creation_date": "2015-11-27 10:35:07.912239", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "Capacity-Management-query"}, {"project_name": "promise", "run": null, "description": "Capacity Management, notification", "ci_loop": null, "tags": null, "url": "https://wiki.opnfv.org/_media/promise/test_cases.docx", "_id": "565831eb514bc5087f2ddcf6", "catalog_description": null, "creation_date": "2015-11-27 10:35:23.003655", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "Capacity-Management-notification"}, {"project_name": "promise", "run": {"class": "", "module": ""}, "description": "Global promise suite", "ci_loop": "daily", "tags": "promise,resource reservation, resource management", "url": "https://wiki.opnfv.org/promise", "_id": "56a9d74a851d7e6a0f74930c", "catalog_description": null, "creation_date": "2016-01-28 08:54:34.857669", "domains": "mano", "dependencies": {"installer": "(fuel)|(joid)", "scenario": ""}, "version": ">brahmaputra.1.0", "criteria": null, "tier": "feature", "trust": "silver", "blocking": false, "name": "promise"}]}
+{"testcases": [{"project_name": "sdnvpn", "run": null, "description": "RF tests for ODL VPN service using ODL REST API.", "ci_loop": null, "tags": null, "url": "https://wiki.opnfv.org/sdnvpn/main#functest", "_id": "565c7341514bc5087f2ddcfc", "catalog_description": null, "creation_date": "2015-11-30 16:03:13.149596", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "odl_vpn"}, {"project_name": "sdnvpn", "run": null, "description": "RF tests for BGPVPN Neutron API extensions using Tempest framework.", "ci_loop": null, "tags": null, "url": "https://wiki.opnfv.org/display/sdnvpn/main#Main-OpenStackNeutronBGPVPNAPItests", "_id": "565c7348514bc5087f2ddcfd", "catalog_description": null, "creation_date": "2015-11-30 16:03:20.997647", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "functest_tempest"}, {"project_name": "sdnvpn", "run": null, "description": "VPNs provide connectivity across Neutron networks and subnets if configured accordingly.", "ci_loop": null, "tags": null, "url": "https://wiki.opnfv.org/display/sdnvpn/main#Main-TestCase1-VPNprovidesconnectivitybetweensubnets", "_id": "565d5e45514bc5087f2ddd03", "catalog_description": null, "creation_date": "2015-12-01 08:45:57.900254", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "functest_testcase_1"}, {"project_name": "sdnvpn", "run": null, "description": "Using VPNs to isolate tenants so that overlapping IP address ranges can be used.", "ci_loop": null, "tags": null, "url": "https://wiki.opnfv.org/display/sdnvpn/main#Main-TestCase2-tenantseparation", "_id": "565d5e60514bc5087f2ddd04", "catalog_description": null, "creation_date": "2015-12-01 08:46:24.194775", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "functest_testcase_2"}, {"project_name": "sdnvpn", "run": {"class": "SdnVpnTests", "module": "functest.opnfv_tests.features.sdnvpn"}, "description": "This test case is to report the overall result of all the test cases executed in a specific CI run", "ci_loop": "daily", "tags": "bgpvpn", "url": "https://wiki.opnfv.org/display/sdnvpn/main", "_id": "578f743d9377c54b278bc466", "catalog_description": null, "creation_date": "2016-07-20 12:53:17.290365", "domains": "networking", "dependencies": {"installer": "(fuel)|(apex)", "scenario": "bgpvpn"}, "version": ">colorado.1.0", "criteria": null, "tier": "feature", "trust": "silver", "blocking": false, "name": "bgpvpn"}]}
+{"testcases": [{"project_name": "sfc", "run": {"class": "OpenDaylightSFC", "module": "functest.opnfv_tests.features.odl_sfc"}, "description": "Test case written in Functest.", "ci_loop": "daily", "tags": "sfc,odl", "url": "", "_id": "57d6d3d41d2c6e000ab1d7a8", "catalog_description": null, "creation_date": "2016-09-12 16:12:04.638903", "domains": "networking", "dependencies": {"installer": "(apex)|(fuel)", "scenario": "odl_l2-sfc"}, "version": ">colorado.1.0", "criteria": null, "tier": "feature", "trust": "silver", "blocking": false, "name": "functest-odl-sfc"}]}
+{"testcases": [{"project_name": "ha", "run": null, "description": "Sample test case for the HA of controller node Openstack service.\n", "ci_loop": "daily", "tags": "serviceHA", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc019.html", "_id": "5923c338356833000a1951d1", "catalog_description": "HA_nova-api_service_down", "creation_date": "2017-05-23 05:06:00.729799", "domains": null, "dependencies": null, "version": ">brahmaputra.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc019"}, {"project_name": "ha", "run": null, "description": "Test case for TC045 :Control node Openstack service down - neutron server.\n", "ci_loop": "daily", "tags": "serviceHA", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc045.html", "_id": "5923c339356833000a1951d2", "catalog_description": "HA_neutron-server_service_down", "creation_date": "2017-05-23 05:06:01.464749", "domains": "ha", "dependencies": null, "version": ">colorado.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc045"}, {"project_name": "ha", "run": null, "description": "Test case for TC046 :Control node Openstack service down - keystone.\n", "ci_loop": "daily", "tags": "serviceHA", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc046.html", "_id": "5923c33a356833000a1951d3", "catalog_description": "HA_keystone_service_down", "creation_date": "2017-05-23 05:06:02.029462", "domains": "ha", "dependencies": null, "version": ">colorado.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc046"}, {"project_name": "ha", "run": null, "description": "Test case for TC047 :Control node Openstack service down - glance api.\n", "ci_loop": "daily", "tags": "serviceHA", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc047.html", "_id": "5923c33a356833000a1951d4", "catalog_description": "HA_glance-api_service_down", "creation_date": "2017-05-23 05:06:02.675239", "domains": "ha", "dependencies": null, "version": ">colorado.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc047"}, {"project_name": "ha", "run": null, "description": "Test case for TC048 :Control node Openstack service down - cinder api.\n", "ci_loop": "daily", "tags": "serviceHA", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc048.html", "_id": "5923c33b356833000a1951d5", "catalog_description": "HA_cinder-api_service_down", "creation_date": "2017-05-23 05:06:03.215756", "domains": "ha", "dependencies": null, "version": ">colorado.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc048"}, {"project_name": "ha", "run": null, "description": "Test case for TC051 :OpenStack Controller Node CPU Overload High Availability; This test case is written by new scenario-based HA testing framework.\n", "ci_loop": "daily", "tags": "GeneralHA", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc051.html", "_id": "5923c33b356833000a1951d6", "catalog_description": "HA_CPU_overload", "creation_date": "2017-05-23 05:06:03.900311", "domains": null, "dependencies": null, "version": ">colorado.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc051"}, {"project_name": "ha", "run": null, "description": "Test case for TC052 :OpenStack Controller Node Disk I/O Block High Availability; This test case is written by new scenario-based HA testing framework.\n", "ci_loop": "daily", "tags": "GeneralHA", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc052.html", "_id": "5923c33c356833000a1951d7", "catalog_description": "HA_disk_I/O_block", "creation_date": "2017-05-23 05:06:04.452133", "domains": null, "dependencies": null, "version": ">colorado.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc052"}, {"project_name": "ha", "run": null, "description": "Test case for TC053 :Openstack Controller Load Balance Service High Availability; This test case is written by new scenario-based HA testing framework.\n", "ci_loop": "daily", "tags": "GeneralHA", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc053.html", "_id": "5923c33d356833000a1951d8", "catalog_description": "HA_load_balancer_service_down", "creation_date": "2017-05-23 05:06:05.031012", "domains": null, "dependencies": null, "version": ">colorado.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc053"}]}
+{"testcases": [{"project_name": "ipvsix", "run": null, "description": "Test IPv6 connectivity between nodes on the tenant network", "ci_loop": null, "tags": null, "url": "https://jira.opnfv.org/browse/YARDSTICK-186", "_id": "565da626514bc5087f2ddd21", "catalog_description": null, "creation_date": "2015-12-01 13:52:38.704783", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "OPNFV_YARDSTICK_TC027_IPVSIX: IPv6 connectivity between nodes on the tenant network"}]}
+{"testcases": [{"project_name": "vnfgraph", "run": null, "description": "Verify HTTP is blocked, OpenStack Networking-SFC", "ci_loop": null, "tags": null, "url": "https://jira.opnfv.org/browse/YARDSTICK-194", "_id": "565dabcd514bc5087f2ddd23", "catalog_description": null, "creation_date": "2015-12-01 14:16:45.424034", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "OPNFV_YARDSTICK_TC032_VNFGRAPH: Block HTTP (OpenStack Networking-SFC)"}, {"project_name": "vnfgraph", "run": null, "description": "Verify HTTP is blocked, ONOS-SFC", "ci_loop": null, "tags": null, "url": "https://jira.opnfv.org/browse/YARDSTICK-195", "_id": "565dac1a514bc5087f2ddd24", "catalog_description": null, "creation_date": "2015-12-01 14:18:02.138468", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "OPNFV_YARDSTICK_TC034_VNFGRAPH: Block HTTP (ONOS-SFC)"}]}
+{"testcases": [{"project_name": "kvmfornfv", "run": null, "description": "Latency measurements with cyclictest", "ci_loop": null, "tags": null, "url": "https://jira.opnfv.org/browse/YARDSTICK-188?jql=project%20%3D%20YARDSTICK", "_id": "565d68f4514bc5087f2ddd0d", "catalog_description": null, "creation_date": "2015-12-01 09:31:32.920841", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "OPNFV_YARDSTICK_TC028_KVM: Latency measurements with cyclictest"}, {"project_name": "kvmfornfv", "run": null, "description": "packet forwarding latency with vsperf", "ci_loop": "daily", "tags": null, "url": "https://jira.opnfv.org/browse/KVMFORNFV-58", "_id": "59249e6578a2ad000ae6a15d", "catalog_description": null, "creation_date": "2017-05-23 20:41:09.969403", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "packet forwarding latency with vsperf"}, {"project_name": "kvmfornfv", "run": null, "description": "Fast Live Migration", "ci_loop": "daily", "tags": null, "url": "https://jira.opnfv.org/browse/KVMFORNFV-83", "_id": "5924a0af78a2ad000ae6a15f", "catalog_description": null, "creation_date": "2017-05-23 20:50:55.839838", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "Fast Live Migration"}, {"project_name": "kvmfornfv", "run": null, "description": "DPDK interrupt mode", "ci_loop": "daily", "tags": null, "url": "https://jira.opnfv.org/browse/KVMFORNFV-81", "_id": "5924a11578a2ad000ae6a160", "catalog_description": null, "creation_date": "2017-05-23 20:52:37.016807", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "DPDK interrupt mode"}]}
+{"testcases": [{"project_name": "ovsnfv", "run": null, "description": "Yardstick NFVI generic test cases run on OVSNFV environment", "ci_loop": null, "tags": null, "url": "https://jira.opnfv.org/browse/YARDSTICK-196", "_id": "565dae87514bc5087f2ddd25", "catalog_description": null, "creation_date": "2015-12-01 14:28:23.946511", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "OPNFV_YARDSTICK_TC035_OVSNFV: Yardstick Test suite for OVSNFV"}]}
+{"testcases": [{"project_name": "armband", "run": null, "description": "Yardstick NFVI generic test cases run on ARM-based hardware", "ci_loop": null, "tags": null, "url": "https://jira.opnfv.org/browse/YARDSTICK-197", "_id": "565db01d514bc5087f2ddd26", "catalog_description": null, "creation_date": "2015-12-01 14:35:09.108065", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "OPNFV_YARDSTICK_TC036_ARMBAND: Yardstick Test suite for ARMBAND"}]}
+{"testcases": [{"project_name": "copper", "run": null, "description": "Set of Tempest test cases to verify the Copper component.", "ci_loop": null, "tags": null, "url": "https://github.com/openstack/congress/tree/master/congress_tempest_tests", "_id": "565d99cb514bc5087f2ddd19", "catalog_description": null, "creation_date": "2015-12-01 12:59:55.741216", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "Tempest-copper"}, {"project_name": "copper", "run": null, "description": "An OpenStack Congress policy test. Sets up and validates policy creation and execution for: 1) Identifying VMs connected to a DMZ (currently identified through a specifically-named security group); 2) Identifying VMs connected per (1), which are by policy not allowed to be (currently implemented through an image tag intended to identify images that are 'authorized' i.e. tested and secure, to be DMZ-connected); 3) Reactively enforce the dmz placement rule by pausing VMs found to be in violation of the policy.", "ci_loop": null, "tags": null, "url": "https://git.opnfv.org/cgit/copper/plain/tests/dmz.sh", "_id": "577d49739377c54b278bbc92", "catalog_description": null, "creation_date": "2016-07-06 18:09:55.391869", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "dmz"}, {"project_name": "copper", "run": null, "description": "An OpenStack Congress policy test. Sets up and validates policy creation and execution for: 1) Detecting that a reserved subnet has been created, by mistake. 'Reserved' in this example means e.g. not intended for use by VMs.", "ci_loop": null, "tags": null, "url": "https://git.opnfv.org/cgit/copper/plain/tests/reserved_subnet.sh", "_id": "577d49819377c54b278bbc93", "catalog_description": null, "creation_date": "2016-07-06 18:10:09.205388", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "reserved_subnet"}, {"project_name": "copper", "run": null, "description": "An OpenStack Congress policy test. Sets up and validates policy creation and execution for: 1) Identifying VMs that have STMP (TCP port 25) open for ingress.", "ci_loop": null, "tags": null, "url": "https://git.opnfv.org/cgit/copper/plain/tests/smtp_ingress.sh", "_id": "577d498e9377c54b278bbc94", "catalog_description": null, "creation_date": "2016-07-06 18:10:22.310104", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "smtp_ingress"}, {"project_name": "copper", "run": {"class": "Copper", "module": "functest.opnfv_tests.feature.copper"}, "description": "", "ci_loop": "daily", "tags": "copper,policy management,congress", "url": "https://wiki.opnfv.org/display/copper", "_id": "57adcbce9377c54b278bd67a", "catalog_description": null, "creation_date": "2016-08-12 13:14:54.469155", "domains": "mano", "dependencies": {"installer": "(apex)|(joid)", "scenario": "^((?!fdio|lxd).)*$"}, "version": ">colorado.1.0", "criteria": null, "tier": "feature", "trust": "silver", "blocking": false, "name": "copper-notification"}, {"project_name": "copper", "run": null, "description": "An OpenStack Congress policy test. Sets up and validates policy creation and execution for: 1) Detecting that a VM is connected to two networks of different 'security levels' by mistake. 'Security levels' in this example means that the service provider assigns distinct sensitivity/risk to connections over those networks, e.g. a public network (e.g. DMZ) and an internal/private network (e.g. service provider admin network).", "ci_loop": null, "tags": null, "url": "https://git.opnfv.org/cgit/copper/plain/tests/network_bridging.sh", "_id": "5846e6341d2c6e000ab2d403", "catalog_description": null, "creation_date": "2016-12-06 16:24:20.749582", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "network_bridging"}, {"project_name": "copper", "run": null, "description": "An OpenStack Congress test. Verifies that if one instance of an HA-deployed Congress service fails, the Congress service continues to function during the failure period and after HA is service is restored.", "ci_loop": null, "tags": null, "url": "https://git.opnfv.org/cgit/copper/plain/tests/congress_ha.sh", "_id": "5846e6611d2c6e000ab2d404", "catalog_description": null, "creation_date": "2016-12-06 16:25:05.027643", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "congress_ha"}]}
+{"testcases": [{"project_name": "doctor", "run": {"class": "", "module": ""}, "description": "immediate notification for fault management.", "ci_loop": "daily", "tags": "doctor,fault monitoring", "url": "https://etherpad.opnfv.org/p/doctor_use_case_for_b_release", "_id": "565d9f35514bc5087f2ddd1e", "catalog_description": null, "creation_date": "2015-12-01 13:23:01.972178", "domains": "mano", "dependencies": {"installer": "apex", "scenario": "^((?!fdio).)*$"}, "version": ">brahmaputra.1.0", "criteria": null, "tier": "feature", "trust": "silver", "blocking": false, "name": "doctor-notification"}, {"project_name": "doctor", "run": null, "description": "resource state correction for fault management.", "ci_loop": null, "tags": null, "url": "https://etherpad.opnfv.org/p/doctor_use_case_for_b_release", "_id": "565d9f43514bc5087f2ddd1f", "catalog_description": null, "creation_date": "2015-12-01 13:23:15.994153", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "doctor-mark-down"}]}
+{"testcases": [{"project_name": "qtip", "run": "", "description": "Overall score for system compute performance.", "ci_loop": "", "tags": "", "url": "", "_id": "59530da6bdd637000acb9f02", "catalog_description": "compute performance index", "creation_date": "2017-06-28 02:00:06.539932", "domains": "", "dependencies": "", "version": "", "criteria": "", "tier": "", "trust": null, "blocking": "", "name": "compute"}]}
+{"testcases": [{"project_name": "ovno", "run": null, "description": "This test suite will check the basic operation such as network creation, noetwork policy, connectivity between VMs on an IPv4 network", "ci_loop": null, "tags": null, "url": "https://wiki.opnfv.org/ovno/project_proposal", "_id": "565ec2cc514bc5087f3cfe27", "catalog_description": null, "creation_date": "2015-12-02 10:07:08.008647", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "opencontrail"}, {"project_name": "ovno", "run": null, "description": "This test suite will check the support of IPv6 for basic operations in opencontrail.", "ci_loop": null, "tags": null, "url": "https://wiki.opnfv.org/ovno/project_proposal", "_id": "565ec2fb514bc5087f3cfe28", "catalog_description": null, "creation_date": "2015-12-02 10:07:55.729717", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "opencontrail-ipvsix"}, {"project_name": "ovno", "run": null, "description": "This test suite will check the support of Network policy features in opencontrail.", "ci_loop": null, "tags": null, "url": "https://wiki.opnfv.org/ovno/project_proposal", "_id": "565ec31a514bc5087f3cfe29", "catalog_description": null, "creation_date": "2015-12-02 10:08:26.150838", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "opencontrail-policy"}, {"project_name": "ovno", "run": null, "description": "This test suite will check the creation of service template, service instance, network policy related to service instance or policy using opencontrail.", "ci_loop": null, "tags": null, "url": "https://wiki.opnfv.org/ovno/project_proposal", "_id": "565ec351514bc5087f3cfe2a", "catalog_description": null, "creation_date": "2015-12-02 10:09:21.915762", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "opencontrail-service"}, {"project_name": "ovno", "run": null, "description": "This test suite requires an gateway router (probably not possible in all labs in first step). Tests deal with creation of external router, floating IP address pool creation, floating IP allocation, connectivity check with external IP addresses.", "ci_loop": null, "tags": null, "url": "https://wiki.opnfv.org/ovno/project_proposal", "_id": "565ec3b2514bc5087f3cfe2b", "catalog_description": null, "creation_date": "2015-12-02 10:10:58.745032", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "opencontrail-ext"}]}
+{"testcases": [{"project_name": "bottlenecks", "run": null, "description": "Stress tess on dataplane traffic using netperf.", "ci_loop": null, "tags": null, "url": "https://jira.opnfv.org/browse/BOTTLENECK-94", "_id": "58cf436a32c829000a11507e", "catalog_description": "Bandwidth Baseline", "creation_date": "2017-03-20 02:50:18.066854", "domains": null, "dependencies": null, "version": ">=danube.1.0", "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "posca_factor_system_bandwidth"}, {"project_name": "bottlenecks", "run": null, "description": "Stress tess on performance life-cycle using ping.", "ci_loop": null, "tags": null, "url": "https://jira.opnfv.org/browse/BOTTLENECK-136", "_id": "58cf486332c829000a11508c", "catalog_description": "Ping Life-cycle Event", "creation_date": "2017-03-20 03:11:31.347177", "domains": null, "dependencies": null, "version": ">=danube.1.0", "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "posca_factor_ping"}]}
+{"testcases": [{"project_name": "policy-test", "run": null, "description": "groupe based policy testing on ODL", "ci_loop": null, "tags": null, "url": "https://wiki.opnfv.org/policytest", "_id": "5669a939514bc5068a345d30", "catalog_description": null, "creation_date": "2015-12-10 16:32:57.643318", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "odl-gbp"}]}
+{"testcases": [{"project_name": "moon", "run": {"class": "", "module": ""}, "description": "This case deals with moon authentication and moon authorization test suites.", "ci_loop": "daily", "tags": "moon,security,keystone,odl,multisite", "url": "To be completed", "_id": "571740a19377c5332042a5e2", "catalog_description": "Moon security module", "creation_date": "2016-04-20 08:41:05.453036", "domains": "security", "dependencies": {"installer": "compass", "scenario": "(odl)*(moon)"}, "version": ">colorado.1.0", "criteria": null, "tier": "feature", "trust": "silver", "blocking": false, "name": "moon"}]}
+{"testcases": [{"project_name": "storperf", "run": {"class": "", "module": ""}, "description": "Following SNIA guidelines, this test consists of creating as many VMs and Cinder volumes as there are Cinder storage nodes, and then running a series of workloads against that environment.", "ci_loop": "daily", "tags": "openstack,cinder,storage", "url": "https://wiki.opnfv.org/display/storperf/SNIA+Targets", "_id": "588bb3d58cf551000c781d6f", "catalog_description": "Cinder volume testing (SNIA)", "creation_date": "2017-01-27 20:55:49.593736", "domains": "storage", "dependencies": {"installer": "", "scenario": ""}, "version": ">danube.1.0", "criteria": "", "tier": "performance", "trust": null, "blocking": true, "name": "snia_steady_state"}]}
+{"testcases": [{"project_name": "domino", "run": {"class": "Domino", "module": "functest.opnfv_tests.features.domino"}, "description": null, "ci_loop": "daily", "tags": "domino,policy", "url": null, "_id": "578ec6449377c54b278bc3f0", "catalog_description": null, "creation_date": "2016-07-20 00:31:00.307041", "domains": "mano", "dependencies": {"installer": "joid", "scenario": ""}, "version": ">colorado.1.0", "criteria": null, "tier": "feature", "trust": "silver", "blocking": false, "name": "domino-multinode"}, {"project_name": "domino", "run": null, "description": null, "ci_loop": null, "tags": null, "url": null, "_id": "578ec7779377c54b278bc3f1", "catalog_description": null, "creation_date": "2016-07-20 00:36:07.772175", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "domino-singlenode"}]}
+{"testcases": [{"project_name": "fastdatastacks", "run": {"args:": {"suites": "/home/opnfv/repos/fds/testing/robot"}, "class": "ODLTests", "module": "functest.opnfv_tests.sdn.odl.odl"}, "description": "Security groups test for http traffic in a fast data stack", "ci_loop": "daily", "tags": "networking, fdio, gbp, odl, openstack, vpp, honeycomb, apex ", "url": "http://artifacts.opnfv.org/fds/colorado/2.0/docs/index.html", "_id": "58c15e568fd6ac000aa241b1", "catalog_description": null, "creation_date": "2017-03-09 13:53:26.886856", "domains": "networking", "dependencies": {"installer": "apex", "scenario": "odl.*fdio"}, "version": ">danube", "criteria": "", "tier": "smoke", "trust": "silver", "blocking": false, "name": "fds"}, {"project_name": "fastdatastacks", "run": null, "description": "Verify that networking resources are created and deleted correctly across the whole stack - ODL GBP, ODL VBD, ODL LISP, Honeycomb and VPP", "ci_loop": "daily", "tags": "networking, fdio, gbp, vbd, odl, lisp, openstack, vpp, honeycomb, apex", "url": null, "_id": "591eeba9e93345000afa3841", "catalog_description": null, "creation_date": "2017-05-19 12:57:13.983864", "domains": "networking", "dependencies": {"installer": "apex", "scenario": "odl-fdio"}, "version": ">euphrates", "criteria": null, "tier": "smoke", "trust": null, "blocking": false, "name": "fds_stack_integrity"}, {"project_name": "fastdatastacks", "run": null, "description": "Distributed Virtual Routing (DVR) specific testcases focused on traffic inspection", "ci_loop": "daily", "tags": "networking, fdio, gbp, odl, lisp, openstack, vpp, honeycomb, apex", "url": null, "_id": "591eed2ee93345000afa3843", "catalog_description": null, "creation_date": "2017-05-19 13:03:42.350775", "domains": "networking", "dependencies": {"installer": "apex", "scenario": "odl-fdio-dvr"}, "version": ">euphrates", "criteria": null, "tier": "features", "trust": null, "blocking": false, "name": "fds_dvr"}, {"project_name": "fastdatastacks", "run": null, "description": "ODL split-rejoin test. In case any of the ODL controllers becomes unreachable the operability of the whole stack is not affected.", "ci_loop": "daily", "tags": "networking, fdio, gbp, odl, lisp, openstack, vpp, honeycomb, apex", "url": null, "_id": "591eeed2e93345000afa3849", "catalog_description": null, "creation_date": "2017-05-19 13:10:42.437167", "domains": "networking", "dependencies": {"installer": "apex", "scenario": "odl-fdio.*-ha"}, "version": ">euphrates", "criteria": null, "tier": "sdn_suites", "trust": null, "blocking": false, "name": "fds_odl_split_rejoin"}, {"project_name": "fastdatastacks", "run": null, "description": "ODL cluster stress test. Create a very large amount of networks and ports (~20 000) and verify that all of those were created properly.", "ci_loop": null, "tags": "networking, fdio, gbp, odl, lisp, openstack, vpp, honeycomb, apex", "url": null, "_id": "591ef15ce93345000afa384e", "catalog_description": null, "creation_date": "2017-05-19 13:21:32.020394", "domains": "networking", "dependencies": {"installer": "apex", "scenario": "odl-fdio.*-ha"}, "version": ">euphrates", "criteria": null, "tier": "sdn_suites", "trust": null, "blocking": false, "name": "fds_odl_stress"}]}
+{"testcases": [{"project_name": "models", "run": null, "description": "A basic TOSCA blueprint based test using Cloudify as VNFM. Based upon the vIMS Functest test, this test uses the same Clearwater IMS blueprint but simplifies it to deploy the Cloudify HelloWorld example, a simple web server. The test will include basic verification that the web server is deployed and operational.", "ci_loop": null, "tags": null, "url": "https://github.com/blsaws/vHello", "_id": "577d4d339377c54b278bbc95", "catalog_description": null, "creation_date": "2016-07-06 18:25:55.058124", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "vHello"}, {"project_name": "models", "run": null, "description": "A basic TOSCA blueprint based test using Cloudify as VNFM. A single-node simple python web server, connected to two internal networks (private and admin), and accessible via a floating IP. Based upon the OpenStack Tacker project's 'tosca-vnfd-hello-world' blueprint, modified/extended for testing of Cloudify-supported features as of OpenStack Newton.", "ci_loop": null, "tags": null, "url": "https://git.opnfv.org/models/plain/tests/vHello_Cloudify.sh\ufeff", "_id": "5846edbc1d2c6e000ab2d407", "catalog_description": null, "creation_date": "2016-12-06 16:56:28.523336", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "vHello_Cloudify"}, {"project_name": "models", "run": null, "description": "A basic TOSCA blueprint based test using Tacker as VNFM. A single-node simple python web server, connected to two internal networks (private and admin), and accessible via a floating IP. Based upon the OpenStack Tacker project's 'tosca-vnfd-hello-world' blueprint, as modified/extended for testing of Tacker-supported features as of OpenStack Newton.", "ci_loop": null, "tags": null, "url": "https://git.opnfv.org/models/plain/tests/vHello_Tacker.sh", "_id": "5846ee7a1d2c6e000ab2d408", "catalog_description": null, "creation_date": "2016-12-06 16:59:38.631893", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "vHello_Tacker"}, {"project_name": "models", "run": null, "description": "A basic TOSCA blueprint based test using OpenBaton as VNFM. A single-node simple python web server, connected to two internal networks (private and admin), and accessible via a floating IP. Based upon the OpenStack Tacker project's 'tosca-vnfd-hello-world' blueprint, as modified/extended for testing of OpenBaton-supported features as of OpenStack Newton.", "ci_loop": null, "tags": null, "url": "https://git.opnfv.org/models/plain/tests/vHello_OpenBaton.sh\ufeff", "_id": "5846eeed1d2c6e000ab2d409", "catalog_description": null, "creation_date": "2016-12-06 17:01:33.215412", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "vHello_OpenBaton"}, {"project_name": "models", "run": null, "description": "A basic TOSCA blueprint based test using OpenBaton as VNFM. A single-node simple python web server, connected to two internal networks (private and admin), and accessible via a floating IP. Based upon the OpenStack Tacker project's 'tosca-vnfd-hello-world' blueprint, as modified/extended for testing of OpenBaton-supported features as of OpenStack Newton.", "ci_loop": null, "tags": null, "url": "https://git.opnfv.org/models/plain/tests/vHello_JuJu.sh\ufeff", "_id": "5846ef011d2c6e000ab2d40a", "catalog_description": null, "creation_date": "2016-12-06 17:01:53.074430", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "vHello_JuJu"}]}
+{"testcases": [{"project_name": "multisite", "run": {"class": "", "module": ""}, "description": "", "ci_loop": "daily", "tags": "multisite", "url": "", "_id": "57add11d9377c54b278bd67c", "catalog_description": null, "creation_date": "2016-08-12 13:37:33.326414", "domains": "vim", "dependencies": {"installer": "(fuel)|(compass)", "scenario": "multisite"}, "version": ">colorado.1.0", "criteria": null, "tier": "feature", "trust": "silver", "blocking": false, "name": "multisite"}]}
+{"testcases": [{"project_name": "parser", "run": {"class": "", "module": ""}, "description": "OPNFV parser Project basic test case", "ci_loop": "daily", "tags": "parser,policy", "url": "", "_id": "57b2cd509377c54b278bdb91", "catalog_description": null, "creation_date": "2016-08-16 08:22:40.579465", "domains": "mano", "dependencies": {"installer": "fuel", "scenario": "^((?!bgpvpn|noha).)*$"}, "version": ">colorado.1.0", "criteria": null, "tier": "feature", "trust": "silver", "blocking": false, "name": "parser-basics"}]}
+{"testcases": [{"project_name": "netready", "run": null, "description": "This test case will exercise a new API exposed by a L3VPN Proton to configure a L3VPN network service. The API is addressed through a new protonclient.", "ci_loop": null, "tags": null, "url": "https://wiki.opnfv.org/display/netready/Test+Strategy", "_id": "582ef66d1d2c6e000ab2b227", "catalog_description": null, "creation_date": "2016-11-18 12:39:09.904884", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "gluon_vping"}, {"project_name": "netready", "run": null, "description": "This test case exercises the model-driven API creation capabilities of Gluon.", "ci_loop": null, "tags": null, "url": "https://wiki.opnfv.org/display/netready/Test+Strategy", "_id": "5922ca9c356833000a1951b2", "catalog_description": null, "creation_date": "2017-05-22 11:25:16.951468", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "gluon_api_generation"}]}
+{"testcases": [{"project_name": "securityscanning", "run": null, "description": "OpenSCAP security scan of OPNFV system", "ci_loop": null, "tags": null, "url": "https://wiki.opnfv.org/display/security/Security+Scanning", "_id": "582f29f51d2c6e000ab2b27e", "catalog_description": null, "creation_date": "2016-11-18 16:19:01.360172", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "security_scan"}]}
+{"testcases": [{"project_name": "opera", "run": null, "description": "vIMS is used as initial use case, based on which test cases will be created and aligned with Open-O first release for OPNFV D release.", "ci_loop": null, "tags": null, "url": "https://wiki.opnfv.org/display/PROJ/Opera+Project", "_id": "584888a41d2c6e000ab2d619", "catalog_description": null, "creation_date": "2016-12-07 22:09:40.481402", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "opera-vims"}]}
+{"testcases": []}
+null
+{"testcases": []}
+{"testcases": [{"project_name": "yardstick", "run": null, "description": "This is for ci to store test suites status", "ci_loop": "daily", "tags": null, "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/scenario_status.html", "_id": "592292b7356833000a195188", "catalog_description": "Not applicable (used for reporting)", "creation_date": "2017-05-22 07:26:47.523411", "domains": null, "dependencies": null, "version": ">colorado.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "scenario_status"}, {"project_name": "yardstick", "run": null, "description": "Yardstick TC001 config file; Measure network throughput using pktgen; Different amounts of flows are tested with, from 2 up to 1001000; All tests are run twice. First twice with the least amount of ports and further on.\n", "ci_loop": "daily", "tags": "pktgen", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc001.html", "_id": "592292b8356833000a195189", "catalog_description": "network: throughput (pktgen)", "creation_date": "2017-05-22 07:26:48.714560", "domains": null, "dependencies": null, "version": ">brahmaputra.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc001"}, {"project_name": "yardstick", "run": null, "description": "Yardstick TC002 config file; measure network latency using ping;\n", "ci_loop": "daily", "tags": "ping", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc002.html", "_id": "592292b9356833000a19518a", "catalog_description": "network: latency (ping)", "creation_date": "2017-05-22 07:26:49.820133", "domains": "network", "dependencies": null, "version": ">brahmaputra.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc002"}, {"project_name": "yardstick", "run": null, "description": "Yardstick TC005 config file; Measure Storage IOPS, throughput and latency using fio.\n", "ci_loop": "daily", "tags": "fio", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc005.html", "_id": "592292ba356833000a19518b", "catalog_description": "storage: disk IOPS, latency & throughput (fio)", "creation_date": "2017-05-22 07:26:50.927211", "domains": "storage", "dependencies": null, "version": ">brahmaputra.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc005"}, {"project_name": "yardstick", "run": null, "description": "Yardstick TC006 config file.\n", "ci_loop": "daily", "tags": "vtc_throughput", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc006.html", "_id": "592292bc356833000a19518c", "catalog_description": "vtc: data plane throughput", "creation_date": "2017-05-22 07:26:52.058010", "domains": "vtc", "dependencies": null, "version": ">brahmaputra.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc006"}, {"project_name": "yardstick", "run": null, "description": "Sample benchmark task config file; vTC.\n", "ci_loop": "daily", "tags": "vtc_throughput_noisy", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc007.html", "_id": "592292bd356833000a19518d", "catalog_description": "vtc: data plane throughput(Noisy neighbours)", "creation_date": "2017-05-22 07:26:53.143162", "domains": "vtc", "dependencies": null, "version": ">brahmaputra.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc007"}, {"project_name": "yardstick", "run": null, "description": "Yardstick TC008 config file; Measure network throughput and packet loss using Pktgen; Different amount of flows, from 2 up to 1001000, in combination with different packet sizes are run in each test. Each combination of packet size and flow amount is run 10 times. First 10 times with the smallest packet size, starting with the least amount of ports/flows, then next amount of ports with same packet size, and so on. The test sequence continues with the next packet size, with same ports/flows sequence as before.\n", "ci_loop": "daily", "tags": "pktgen", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc008.html", "_id": "592292be356833000a19518e", "catalog_description": "network: throughput and packet loss extended(pktgen)", "creation_date": "2017-05-22 07:26:54.255469", "domains": null, "dependencies": null, "version": ">brahmaputra.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc008"}, {"project_name": "yardstick", "run": null, "description": "Yardstick TC009 config file; Measure network throughput and packet loss using pktgen; Different amounts of flows are tested with, from 2 up to 1001000; All tests are run 10 times each. First 10 times with the least amount of ports, then 10 times with the next amount of ports, and so on until all packet sizes have been run with;\n", "ci_loop": "daily", "tags": "pktgen", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc009.html", "_id": "592292bf356833000a19518f", "catalog_description": "network: throughput and packet loss (pktgen)", "creation_date": "2017-05-22 07:26:55.406119", "domains": null, "dependencies": null, "version": ">brahmaputra.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc009"}, {"project_name": "yardstick", "run": null, "description": "Yardstick TC010 config file; measure memory read latency using lmbench.\n", "ci_loop": "daily", "tags": "lmbench", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc010.html", "_id": "592292c0356833000a195190", "catalog_description": "compute: memory read latency (lmbench)", "creation_date": "2017-05-22 07:26:56.562861", "domains": "compute", "dependencies": null, "version": ">brahmaputra.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc010"}, {"project_name": "yardstick", "run": null, "description": "Yardstick TC011 config file; Measure packet delay variation (jitter) using iperf3.\n", "ci_loop": "daily", "tags": "iperf3", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc011.html", "_id": "592292c1356833000a195191", "catalog_description": "network: performance latency (iperf)", "creation_date": "2017-05-22 07:26:57.673084", "domains": "network", "dependencies": null, "version": ">brahmaputra.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc011"}, {"project_name": "yardstick", "run": null, "description": "Yardstick TC012 config file; Measure memory read and write bandwidth using lmbench.\n", "ci_loop": "daily", "tags": "lmbench", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc012.html", "_id": "592292c2356833000a195192", "catalog_description": "compute: memory bandwith (lmbench)", "creation_date": "2017-05-22 07:26:58.871685", "domains": "compute", "dependencies": null, "version": ">brahmaputra.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc012"}, {"project_name": "yardstick", "run": null, "description": "Yardstick TC014 config file; Measure Processing speed using unixbench.\n", "ci_loop": "daily", "tags": "unixbench", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc014.html", "_id": "592292c4356833000a195193", "catalog_description": "compute: processing speed (unixbench)", "creation_date": "2017-05-22 07:27:00.007018", "domains": "compute", "dependencies": null, "version": ">brahmaputra.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc014"}, {"project_name": "yardstick", "run": null, "description": "Sample test case for the HA of controller node Openstack service.\n", "ci_loop": "daily", "tags": "serviceHA", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc019.html", "_id": "592292c5356833000a195194", "catalog_description": "HA: nova-api service down", "creation_date": "2017-05-22 07:27:01.147332", "domains": null, "dependencies": null, "version": ">brahmaputra.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc019"}, {"project_name": "yardstick", "run": null, "description": "Yardstick TC020 config file.\n", "ci_loop": "daily", "tags": "vtc_instantiation_validation", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc020.html", "_id": "592292c6356833000a195195", "catalog_description": "vtc instantiation", "creation_date": "2017-05-22 07:27:02.271192", "domains": "vtc", "dependencies": null, "version": ">brahmaputra.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc020"}, {"project_name": "yardstick", "run": null, "description": "Yardstick TC021 config file.\n", "ci_loop": "daily", "tags": "vtc_instantiation_validation_noisy", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc021.html", "_id": "592292c7356833000a195196", "catalog_description": "vtc instantiation (Noisy neighbours)", "creation_date": "2017-05-22 07:27:03.420740", "domains": "vtc", "dependencies": null, "version": ">brahmaputra.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc021"}, {"project_name": "yardstick", "run": null, "description": "Sample test case for the HA of OpenStack Controll Node abnormally shutdown.\n", "ci_loop": "daily", "tags": "serviceHA", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc025.html", "_id": "592292c8356833000a195197", "catalog_description": "HA: controller node shutdown", "creation_date": "2017-05-22 07:27:04.577080", "domains": null, "dependencies": null, "version": ">brahmaputra.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc025"}, {"project_name": "yardstick", "run": null, "description": "Yardstick TC027 config file; Measure IPV6 network latency using ping6.\n", "ci_loop": "daily", "tags": "ping6", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc027.html", "_id": "592292c9356833000a195198", "catalog_description": "network: IPv6 latency", "creation_date": "2017-05-22 07:27:05.736814", "domains": "ipv6", "dependencies": null, "version": ">brahmaputra.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc027"}, {"project_name": "yardstick", "run": null, "description": "Yardstick TC037 config file; Measure network throughput and packet loss using pktgen; Different amounts of flows are tested with, from 2 up to 1001000; All tests are run 2 times each. First 2 times with the least amount of ports, then 2 times with the next amount of ports, and so on until all packet sizes have been run with; During the measurements system load and network latency are recorded/measured using ping and mpstat, respectively;\n", "ci_loop": "daily", "tags": "cpuload,ping,oktgen", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc037.html", "_id": "592292ca356833000a195199", "catalog_description": "network: throughput and packet loss extended (cpu&ping load)", "creation_date": "2017-05-22 07:27:06.875236", "domains": "network,compute", "dependencies": null, "version": ">brahmaputra.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc037"}, {"project_name": "yardstick", "run": null, "description": "Yardstick TC038 config file; Measure network throughput and packet loss using pktgen; Different amounts of flows are tested with, from 2 up to 1001000; All tests are run 10 times each. First 10 times with the least amount of ports, then 10 times with the next amount of ports, and so on until all packet sizes have been run with; During the measurements system load and network latency are recorded/measured using ping and mpstat, respectively;\n", "ci_loop": "daily", "tags": "cpuload,ping,pktgen", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc038.html", "_id": "592292cb356833000a19519a", "catalog_description": "network: throughput packet loss (cpu&ping load)", "creation_date": "2017-05-22 07:27:07.988262", "domains": "network,compute", "dependencies": null, "version": ">brahmaputra.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc038"}, {"project_name": "yardstick", "run": null, "description": "Yardstick TC040 config file; Running Parser Yang-to-Tosca module as a tool, validating output against expected outcome.\n", "ci_loop": "daily", "tags": "parser", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc040.html", "_id": "592292cd356833000a19519b", "catalog_description": "Parser Yang Tosca", "creation_date": "2017-05-22 07:27:09.104621", "domains": null, "dependencies": null, "version": ">brahmaputra.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc040"}, {"project_name": "yardstick", "run": null, "description": "Yardstick TC042 config file; Measure network latency using testpmd and pktgen-dpdk.\n", "ci_loop": "daily", "tags": "pktgenDPDKLatency", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc042.html", "_id": "592292ce356833000a19519c", "catalog_description": "network: throughput latency (dpdk-pktgen)", "creation_date": "2017-05-22 07:27:10.207188", "domains": null, "dependencies": null, "version": ">colorado.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc042"}, {"project_name": "yardstick", "run": null, "description": "Yardstick TC043 config file; Measure latency between NFVI nodes using ping.\n", "ci_loop": "daily", "tags": "ping", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc043.html", "_id": "592292cf356833000a19519d", "catalog_description": "network: node throughput latency(ping)", "creation_date": "2017-05-22 07:27:11.286317", "domains": "network", "dependencies": null, "version": ">colorado.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc043"}, {"project_name": "yardstick", "run": null, "description": "Test case for TC045 :Control node Openstack service down - neutron server.\n", "ci_loop": "daily", "tags": "serviceHA", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc045.html", "_id": "592292d0356833000a19519e", "catalog_description": "HA: neutron-server service down", "creation_date": "2017-05-22 07:27:12.399327", "domains": "ha", "dependencies": null, "version": ">colorado.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc045"}, {"project_name": "yardstick", "run": null, "description": "Test case for TC046 :Control node Openstack service down - keystone.\n", "ci_loop": "daily", "tags": "serviceHA", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc046.html", "_id": "592292d1356833000a19519f", "catalog_description": "HA: keystone service down", "creation_date": "2017-05-22 07:27:13.502066", "domains": "ha", "dependencies": null, "version": ">colorado.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc046"}, {"project_name": "yardstick", "run": null, "description": "Test case for TC047 :Control node Openstack service down - glance api.\n", "ci_loop": "daily", "tags": "serviceHA", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc047.html", "_id": "592292d2356833000a1951a0", "catalog_description": "HA: glance-api service down", "creation_date": "2017-05-22 07:27:14.610834", "domains": "ha", "dependencies": null, "version": ">colorado.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc047"}, {"project_name": "yardstick", "run": null, "description": "Test case for TC048 :Control node Openstack service down - cinder api.\n", "ci_loop": "daily", "tags": "serviceHA", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc048.html", "_id": "592292d3356833000a1951a1", "catalog_description": "HA: cinder-api service down", "creation_date": "2017-05-22 07:27:15.751416", "domains": "ha", "dependencies": null, "version": ">colorado.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc048"}, {"project_name": "yardstick", "run": null, "description": "Test case for TC049 :Control node Openstack service down - swift proxy.\n", "ci_loop": "daily", "tags": "serviceHA", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc049.html", "_id": "592292d4356833000a1951a2", "catalog_description": "HA: swift proxy service down", "creation_date": "2017-05-22 07:27:16.859383", "domains": "ha", "dependencies": null, "version": ">colorado.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc049"}, {"project_name": "yardstick", "run": null, "description": "Test case for TC050 :OpenStack Controller Node Network High Availability; This test case is written by new scenario-based HA testing framework.\n", "ci_loop": "daily", "tags": "GeneralHA", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc050.html", "_id": "592292d6356833000a1951a3", "catalog_description": "HA: network interface down", "creation_date": "2017-05-22 07:27:18.014215", "domains": null, "dependencies": null, "version": ">colorado.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc050"}, {"project_name": "yardstick", "run": null, "description": "Test case for TC051 :OpenStack Controller Node CPU Overload High Availability; This test case is written by new scenario-based HA testing framework.\n", "ci_loop": "daily", "tags": "GeneralHA", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc051.html", "_id": "592292d7356833000a1951a4", "catalog_description": "HA: CPU overload", "creation_date": "2017-05-22 07:27:19.124525", "domains": null, "dependencies": null, "version": ">colorado.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc051"}, {"project_name": "yardstick", "run": null, "description": "Test case for TC052 :OpenStack Controller Node Disk I/O Block High Availability; This test case is written by new scenario-based HA testing framework.\n", "ci_loop": "daily", "tags": "GeneralHA", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc052.html", "_id": "592292d8356833000a1951a5", "catalog_description": "HA: disk I/O block", "creation_date": "2017-05-22 07:27:20.288709", "domains": null, "dependencies": null, "version": ">colorado.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc052"}, {"project_name": "yardstick", "run": null, "description": "Test case for TC053 :Openstack Controller Load Balance Service High Availability; This test case is written by new scenario-based HA testing framework.\n", "ci_loop": "daily", "tags": "GeneralHA", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc053.html", "_id": "592292de356833000a1951a6", "catalog_description": "HA: load balancer service down", "creation_date": "2017-05-22 07:27:26.504630", "domains": null, "dependencies": null, "version": ">colorado.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc053"}, {"project_name": "yardstick", "run": null, "description": "Test case for TC054 :OpenStack VIP Master Node abnormally shutdown High Availability; This test case is written by new scenario-based HA testing framework.\n", "ci_loop": "daily", "tags": "GeneralHA", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc054.html", "_id": "592292df356833000a1951a7", "catalog_description": "HA: virtual IP node shutdown", "creation_date": "2017-05-22 07:27:27.763354", "domains": null, "dependencies": null, "version": ">colorado.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc054"}, {"project_name": "yardstick", "run": null, "description": "Yardstick TC055 config file; Collect hardware specification from /proc/cpuinfo /proc/meminfo; compute capacity and scale. the results have, number of CPUs, number of physical cores in a single CPU; number of logical cores, total memory size; cache size per CPU, total cache size; HT (Hyper-Thread) support status, 1 for open, 0 for close.\n", "ci_loop": "daily", "tags": "computeCapacity", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc055.html", "_id": "592292e1356833000a1951a8", "catalog_description": "compute: cpu & memory_specification", "creation_date": "2017-05-22 07:27:29.032130", "domains": "compute", "dependencies": null, "version": ">colorado.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc055"}, {"project_name": "yardstick", "run": null, "description": "Yardstick TC063 config file; Measure disk size, block size and disk utilization using fdisk and iostat.\n", "ci_loop": "daily", "tags": "storageCapacity", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc063.html", "_id": "592292e2356833000a1951a9", "catalog_description": "storage: disk size&utilization", "creation_date": "2017-05-22 07:27:30.302236", "domains": "storage", "dependencies": null, "version": ">colorado.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc063"}, {"project_name": "yardstick", "run": null, "description": "Yardstick TC069 config file; Measure memory read and write bandwidth using ramspeed.\n", "ci_loop": "daily", "tags": "ramspeed", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc069.html", "_id": "592292e3356833000a1951aa", "catalog_description": "compute: memory bandwith(ramspeed)", "creation_date": "2017-05-22 07:27:31.542189", "domains": "compute", "dependencies": null, "version": ">colorado.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc069"}, {"project_name": "yardstick", "run": null, "description": "Yardstick TC070 config file; Measure network throughput and packet loss using pktgen; Different amounts of flows are tested with, from 2 up to 1001000; All tests are run 2 times each. First 2 times with the least amount of ports, then 2 times with the next amount of ports, and so on until all packet sizes have been run with; During the measurements memory usage statistics and network latency are recorded/measured using free and ping, respectively;\n", "ci_loop": "daily", "tags": "memoryload,ping,pktgen", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc070.html", "_id": "592292e4356833000a1951ab", "catalog_description": "network: throughput packet loss (memory&ping load)", "creation_date": "2017-05-22 07:27:32.747354", "domains": "network,storage", "dependencies": null, "version": ">colorado.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc070"}, {"project_name": "yardstick", "run": null, "description": "Yardstick TC071 config file; Measure cache hit/miss ratio and usage, network throughput and latency; Different amounts of flows are tested with, from 2 up to 1001000; All tests are run 2 times each. First 2 times with the least amount of ports, then 2 times with the next amount of ports, and so on until all packet sizes have been run with; During the measurements cache hit/miss ration, cache usage statistics and network latency are recorded/measured using cachestat and ping, respectively;\n", "ci_loop": "daily", "tags": "cachestat,ping,pktgen", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc071.html", "_id": "592292e5356833000a1951ac", "catalog_description": "compute: cache hit&network throughput latency", "creation_date": "2017-05-22 07:27:33.957092", "domains": "network,storage", "dependencies": null, "version": ">colorado.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc071"}, {"project_name": "yardstick", "run": null, "description": "Yardstick TC072 config file; Measure network throughput and packet loss using pktgen; Different amounts of flows are tested with, from 2 up to 1001000; All tests are run 2 times each. First 2 times with the least amount of ports, then 2 times with the next amount of ports, and so on until all packet sizes have been run with; During the measurements network usage statistics and network latency are recorded/measured using sar and ping, respectively;\n", "ci_loop": "daily", "tags": "netutilization,ping,pktgen", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc072.html", "_id": "592292e7356833000a1951ad", "catalog_description": "network: throughput(netutilization&pktgen load)", "creation_date": "2017-05-22 07:27:35.175762", "domains": "network,storage", "dependencies": null, "version": ">colorado.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc072"}, {"project_name": "yardstick", "run": null, "description": "Yardstick TC073 config file; measure network latency and throughput using netperf; There are two sample scenarios: bulk test and request/response test; In bulk test, UDP_STREAM and TCP_STREAM can be used; send_msg_size and recv_msg_size are options of bulk test; In req/rsp test, TCP_RR TCP_CRR UDP_RR can be used; req_rsp_size is option of req/rsp test;\n", "ci_loop": "daily", "tags": "netperfnode", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc073.html", "_id": "592292e8356833000a1951ae", "catalog_description": "network: node throughput(netperf)", "creation_date": "2017-05-22 07:27:36.466462", "domains": "network", "dependencies": null, "version": ">colorado.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc073"}, {"project_name": "yardstick", "run": null, "description": "Test case for TC074 StorPerf benchmark task config file; StorPerf is a tool to measure block and object storage performance in an NFVI.\n", "ci_loop": "daily", "tags": "storperf", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc074.html", "_id": "592292e9356833000a1951af", "catalog_description": "storage: storperf integration", "creation_date": "2017-05-22 07:27:37.665283", "domains": "storage", "dependencies": null, "version": ">colorado.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc074"}, {"project_name": "yardstick", "run": null, "description": "Yardstick TC075 config file; Measure network capacity and scale. Measure number of connections, number of frames received;\n", "ci_loop": "daily", "tags": "networkCapacity", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc075.html", "_id": "592292ea356833000a1951b0", "catalog_description": "network: capacity connections number", "creation_date": "2017-05-22 07:27:38.857919", "domains": "network", "dependencies": null, "version": ">colorado.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc075"}, {"project_name": "yardstick", "run": null, "description": "Yardstick TC076 config file; Monitor network metrics provided by the kernel in a host and calculate IP datagram error rate, ICMP message error rate, TCP segment error rate and UDP datagram error rate.", "ci_loop": "daily", "tags": "ping,nstat", "url": "http://artifacts.opnfv.org/yardstick/docs/userguide/opnfv_yardstick_tc076.html", "_id": "592292ec356833000a1951b1", "catalog_description": "network: metrics (ping nstat)", "creation_date": "2017-05-22 07:27:40.163423", "domains": "network", "dependencies": null, "version": ">danube.1.0", "criteria": null, "tier": null, "trust": "silver", "blocking": null, "name": "opnfv_yardstick_tc076"}]}
+{"testcases": [{"project_name": "barometer", "run": "", "description": "barometercollectd testcase", "ci_loop": "", "tags": "", "url": "", "_id": "58a2e30e584d06000b0157d3", "catalog_description": null, "creation_date": "2017-02-14 10:59:26.396742", "domains": "", "dependencies": "", "version": "", "criteria": "", "tier": "", "trust": null, "blocking": "", "name": "barometercollectd"}]}
+{"testcases": [{"project_name": "OpenRetriever", "run": null, "description": "The test cases about Container is integrated into NFV", "ci_loop": null, "tags": null, "url": "https://wiki.opnfv.org/display/OpenRetriever/Test+Strategy", "_id": "5923e51578a2ad000ae6a140", "catalog_description": null, "creation_date": "2017-05-23 07:30:29.840213", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "OpenRetriever Test Cases"}, {"project_name": "OpenRetriever", "run": "", "description": "", "ci_loop": "", "tags": "kubernetes,container", "url": "https://wiki.opnfv.org/display/OpenRetriever/Test+Strategy", "_id": "5923fb0b78a2ad000ae6a143", "catalog_description": "", "creation_date": "2017-05-23 09:04:11.498178", "domains": "", "dependencies": "", "version": ">danube", "criteria": "", "tier": "", "trust": null, "blocking": "", "name": "e2e"}]}
+{"testcases": [{"project_name": "ovn4nfv", "run": "", "description": "", "ci_loop": "", "tags": "", "url": "", "_id": "592887af78a2ad000ae6a65f", "catalog_description": "", "creation_date": "2017-05-26 19:53:19.223720", "domains": "", "dependencies": "", "version": "", "criteria": "", "tier": "", "trust": null, "blocking": "", "name": "connection_check"}, {"project_name": "ovn4nfv", "run": "", "description": "", "ci_loop": "", "tags": "", "url": "", "_id": "592887ce78a2ad000ae6a660", "catalog_description": "", "creation_date": "2017-05-26 19:53:50.130441", "domains": "", "dependencies": "", "version": "", "criteria": "", "tier": "", "trust": null, "blocking": "", "name": "api_check"}, {"project_name": "ovn4nfv", "run": "", "description": "", "ci_loop": "", "tags": "", "url": "", "_id": "592887e578a2ad000ae6a661", "catalog_description": "", "creation_date": "2017-05-26 19:54:13.939149", "domains": "", "dependencies": "", "version": "", "criteria": "", "tier": "", "trust": null, "blocking": "", "name": "vping_ssh"}, {"project_name": "ovn4nfv", "run": "", "description": "", "ci_loop": "", "tags": "", "url": "", "_id": "592887ef78a2ad000ae6a662", "catalog_description": "", "creation_date": "2017-05-26 19:54:23.764427", "domains": "", "dependencies": "", "version": "", "criteria": "", "tier": "", "trust": null, "blocking": "", "name": "vping_userdata"}]}
+{"testcases": [{"project_name": "onosfw", "run": "", "description": "", "ci_loop": "", "tags": "", "url": "", "_id": "5928a9bb78a2ad000ae6a6c8", "catalog_description": "", "creation_date": "2017-05-26 22:18:35.709812", "domains": "", "dependencies": "", "version": "", "criteria": "", "tier": "", "trust": null, "blocking": "", "name": "connection_check"}, {"project_name": "onosfw", "run": "", "description": "", "ci_loop": "", "tags": "", "url": "", "_id": "5928aa4178a2ad000ae6a6ca", "catalog_description": "", "creation_date": "2017-05-26 22:20:49.183239", "domains": "", "dependencies": "", "version": "", "criteria": "", "tier": "", "trust": null, "blocking": "", "name": "api_check"}, {"project_name": "onosfw", "run": "", "description": "", "ci_loop": "", "tags": "", "url": "", "_id": "5928aa6578a2ad000ae6a6cb", "catalog_description": "", "creation_date": "2017-05-26 22:21:25.440080", "domains": "", "dependencies": "", "version": "", "criteria": "", "tier": "", "trust": null, "blocking": "", "name": "snaps_health_check"}, {"project_name": "onosfw", "run": "", "description": "", "ci_loop": "", "tags": "", "url": "", "_id": "5928aad478a2ad000ae6a6cc", "catalog_description": "", "creation_date": "2017-05-26 22:23:16.642815", "domains": "", "dependencies": "", "version": "", "criteria": "", "tier": "", "trust": null, "blocking": "", "name": "vping_ssh"}, {"project_name": "onosfw", "run": "", "description": "", "ci_loop": "", "tags": "", "url": "", "_id": "5928ab1878a2ad000ae6a6ce", "catalog_description": "", "creation_date": "2017-05-26 22:24:24.418194", "domains": "", "dependencies": "", "version": "", "criteria": "", "tier": "", "trust": null, "blocking": "", "name": "vping_userdata"}, {"project_name": "onosfw", "run": "", "description": "", "ci_loop": "", "tags": "", "url": "", "_id": "5928ab7a78a2ad000ae6a6d1", "catalog_description": "", "creation_date": "2017-05-26 22:26:02.619234", "domains": "", "dependencies": "", "version": "", "criteria": "", "tier": "", "trust": null, "blocking": "", "name": "tempest_smoke_serial"}, {"project_name": "onosfw", "run": "", "description": "", "ci_loop": "", "tags": "", "url": "", "_id": "5928abc878a2ad000ae6a6d2", "catalog_description": "", "creation_date": "2017-05-26 22:27:20.130387", "domains": "", "dependencies": "", "version": "", "criteria": "", "tier": "", "trust": null, "blocking": "", "name": "rally_sanity"}, {"project_name": "onosfw", "run": "", "description": "", "ci_loop": "", "tags": "", "url": "", "_id": "5928ac0878a2ad000ae6a6d3", "catalog_description": "", "creation_date": "2017-05-26 22:28:24.564141", "domains": "", "dependencies": "", "version": "", "criteria": "", "tier": "", "trust": null, "blocking": "", "name": "refstack_defcore"}, {"project_name": "onosfw", "run": "", "description": "", "ci_loop": "", "tags": "", "url": "", "_id": "5928ac2c78a2ad000ae6a6d4", "catalog_description": "", "creation_date": "2017-05-26 22:29:00.953556", "domains": "", "dependencies": "", "version": "", "criteria": "", "tier": "", "trust": null, "blocking": "", "name": "onos"}, {"project_name": "onosfw", "run": "", "description": "", "ci_loop": "", "tags": "", "url": "", "_id": "5928ac5d78a2ad000ae6a6d5", "catalog_description": "", "creation_date": "2017-05-26 22:29:49.322376", "domains": "", "dependencies": "", "version": "", "criteria": "", "tier": "", "trust": null, "blocking": "", "name": "snaps_smoke"}]}
+{"testcases": [{"project_name": "vina", "run": null, "description": "Verify that credentials to access OpenStack environment are valid", "ci_loop": "daily", "tags": "networking, vina, openstack, fuel", "url": null, "_id": "592be78d78a2ad000ae6aad8", "catalog_description": null, "creation_date": "2017-05-29 09:19:09.076114", "domains": "networking", "dependencies": {"installer": "fuel", "scenario": "vina-setup"}, "version": ">euphrates", "criteria": null, "tier": "smoke", "trust": null, "blocking": false, "name": "openstack_integrity"}]}
diff --git a/dovetail/utils/local_db/get_db_schema.py b/dovetail/utils/local_db/get_db_schema.py
new file mode 100644
index 00000000..1106355c
--- /dev/null
+++ b/dovetail/utils/local_db/get_db_schema.py
@@ -0,0 +1,61 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import requests
+import json
+
+
+source_url = 'http://116.66.187.136:9999/api/v1'
+
+
+def get(url):
+ try:
+ ret = requests.get(url)
+ return ret.json()
+ except:
+ return None
+
+
+def pod():
+ source = '{}/pods'.format(source_url)
+ try:
+ pods = get(source)['pods']
+ with open("pods.json", "w") as f:
+ f.write(json.dumps(pods, indent=4))
+ except:
+ return
+
+
+def project():
+ source = '{}/projects'.format(source_url)
+
+ try:
+ projects = get(source)['projects']
+ with open("projects.json", "w") as f:
+ f.write(json.dumps(projects, indent=4))
+ except:
+ return
+
+ for p in projects:
+ source = '{}/projects/{}/cases'.format(source_url, p['name'])
+ print(p['name'])
+ print(source)
+ try:
+ cases = get(source)
+ with open("cases.json", "a+") as f:
+ f.write(json.dumps(cases))
+ f.write('\n')
+ f.close()
+ except:
+ print("useless data")
+
+
+if __name__ == '__main__':
+ pod()
+ project()
diff --git a/dovetail/utils/local_db/init_dovetail.py b/dovetail/utils/local_db/init_dovetail.py
new file mode 100644
index 00000000..6d066cbb
--- /dev/null
+++ b/dovetail/utils/local_db/init_dovetail.py
@@ -0,0 +1,59 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+
+import requests
+import datetime
+import json
+import yaml
+import sys
+
+base_url = sys.argv[1]
+headers = {'Content-Type': 'application/json'}
+
+
+def create_project():
+
+ name = 'dovetail'
+ s = '2015-10-14 06:56:09'
+ time = datetime.datetime.strptime(s, '%Y-%m-%d %H:%M:%S')
+
+ data = {
+ 'name': name,
+ 'creation_date': str(time),
+ 'description': ''
+ }
+
+ url = '{}/projects'.format(base_url)
+ requests.post(url, data=json.dumps(data), headers=headers)
+
+
+def dovetail():
+ with open('../../compliance/proposed_tests.yml') as f:
+ data = yaml.safe_load(f)['proposed_tests']['testcases_list']
+
+ url = '{}/projects/dovetail/cases'.format(base_url)
+ for case in data:
+ c = {
+ 'ci_loop': 'daily',
+ 'description': 'dovetail',
+ 'name': case,
+ 'project_name': 'dovetail',
+ 'trust': 'gold',
+ 'url': '',
+ 'version': 'master',
+ 'domains': 'master',
+ 'tags': 'dovetail'
+ }
+ requests.post(url, data=json.dumps(c), headers=headers)
+
+
+if __name__ == '__main__':
+ create_project()
+ dovetail()
diff --git a/dovetail/utils/local_db/launch_db.sh b/dovetail/utils/local_db/launch_db.sh
index 77646713..956ccfe8 100755
--- a/dovetail/utils/local_db/launch_db.sh
+++ b/dovetail/utils/local_db/launch_db.sh
@@ -8,20 +8,23 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-if [ "$#" -ne 1 ]; then
+if [ "$#" -ne 2 ]; then
echo "Error: missing parameter! try again like this:"
echo ""
- echo "./launch_db.sh 192.168.115.2"
+ echo "./launch_db.sh 192.168.115.2 http://116.66.187.136:9999"
echo ""
echo "parameters:"
- echo " db_host_ip: your localhost ip address "
+ echo " host_ip: your localhost ip address "
+ echo " base_url: your public url for website"
echo ""
exit 1
fi
export mongodb_port=${mongodb_port:-"27017"}
-export testapi_port=${testapi_port:-"8000"}
+export testapi_port=${testapi_port:-"8010"}
export db_host_ip=${db_host_ip:-"$1"}
+export base_url=${base_url:-"$2"}
+
set -e
@@ -30,8 +33,8 @@ echo "Create the mongodb."
echo "==================="
set +e
-# pull image kkltcjk/mongodb:reporting
-mongodb_img="kkltcjk/mongodb:reporting"
+# pull image mongo:3.2.1
+mongodb_img="mongo:3.2.1"
echo "Step1: pull the image $mongodb_img."
sudo docker pull $mongodb_img
set -e
@@ -47,7 +50,7 @@ fi
# run mongodb container
echo "Step3: run ${container_name} container."
-cmd="sudo docker run -itd -p ${mongodb_port}:27017 --name ${container_name} ${mongodb_img}"
+cmd="sudo docker run -itd -p ${mongodb_port}:27017 -v /home/testapi/mongo:/data/db --name ${container_name} ${mongodb_img}"
echo $cmd
${cmd}
@@ -59,8 +62,7 @@ echo "Create the testapi service."
echo "=========================="
set +e
-# pull image kkltcjk/testapi:reporting
-testapi_img="kkltcjk/testapi:reporting"
+testapi_img="opnfv/testapi:cvp.0.5.0"
echo "Step1: pull the image $testapi_img."
sudo docker pull $testapi_img
set -e
@@ -76,20 +78,46 @@ fi
# run testapi container
echo "Step3: run ${container_name} container."
-cmd="sudo docker run -itd -p ${testapi_port}:8000 --name ${container_name} -e mongodb_url=mongodb://${db_host_ip}:${mongodb_port}/ ${testapi_img}"
+cmd="sudo docker run -itd -p 8010:8010 --name ${container_name} -v /home/testapi/logs:/home/testapi/logs -e mongodb_url=mongodb://${db_host_ip}:${mongodb_port}/ -e base_url=${base_url} ${testapi_img}"
echo $cmd
${cmd}
echo "Wait for testapi to work..."
sleep 10
+set +e
+nginx_img="opnfv/dovetail:nginx.cvp.0.5.0"
+echo "Step1: pull the image $nginx_img."
+sudo docker pull $nginx_img
+set -e
+
+container_name='nginx_cvp'
+
+echo "Step2: remove the exist container with the same name '$container_name' if exists."
+sudo docker ps -a -f "name=${container_name}"
+
+if [[ ! -z $(sudo docker ps -aq -f "name=${container_name}") ]]; then
+ sudo docker ps -aq -f "name=${container_name}" | xargs sudo docker rm -f
+fi
+
+# run nginx container
+echo "Step3: run ${container_name} container."
+cmd="sudo docker run -itd -p 8000:8000 -v /home/testapi/logs:/home/testapi/logs --name ${container_name} -e testapi_url=${db_host_ip}:8010 ${nginx_img}"
+echo $cmd
+${cmd}
+
echo "================================="
echo "Upload default project info to DB"
echo "================================="
echo "Init DB info..."
-cmd="python ./init_db.py ${db_host_ip} ${testapi_port}"
-echo ${cmd}
+cmd="python ./init_db.py ${db_host_ip} 8010"
+echo $cmd
+${cmd}
+
+echo "Init dovetail testcase"
+cmd="python ./init_dovetail.py ${base_url}/api/v1"
+echo $cmd
${cmd}
echo "Successfully load DB info."
diff --git a/dovetail/utils/local_db/pods.json b/dovetail/utils/local_db/pods.json
new file mode 100644
index 00000000..3cd1dadb
--- /dev/null
+++ b/dovetail/utils/local_db/pods.json
@@ -0,0 +1,382 @@
+[
+ {
+ "name": "lf-pod2",
+ "creation_date": "2015-01-01 08:00:00.476549",
+ "role": "production-ci",
+ "mode": "metal",
+ "_id": "5617f98e514bc5355b51f6b5",
+ "details": ""
+ },
+ {
+ "name": "lf-pod1",
+ "creation_date": "2015-01-01 08:00:00.476549",
+ "role": "production-ci",
+ "mode": "metal",
+ "_id": "5617fa5a514bc5355b51f6b6",
+ "details": ""
+ },
+ {
+ "name": "orange-pod2",
+ "creation_date": "2015-10-27 15:27:30.312012",
+ "role": "",
+ "mode": "metal",
+ "_id": "562f97e2514bc5174d053d38",
+ "details": "https://wiki.opnfv.org/opnfv-orange"
+ },
+ {
+ "name": "unknown-pod",
+ "creation_date": "2015-11-30 08:55:02.550465",
+ "role": "",
+ "mode": "undefined",
+ "_id": "565c0ee6514bc5087f2ddcf7",
+ "details": null
+ },
+ {
+ "name": "huawei-pod1",
+ "creation_date": "",
+ "role": "",
+ "mode": "metal",
+ "_id": "566fea58514bc5068a345d4b",
+ "details": ""
+ },
+ {
+ "name": "intel-pod5",
+ "creation_date": "2015-12-15 10:24:53.476549",
+ "role": "",
+ "mode": "metal",
+ "_id": "566fea75514bc5068a345d4c",
+ "details": null
+ },
+ {
+ "name": "intel-pod3",
+ "creation_date": "2015-12-21 17:38:31.435593",
+ "role": "",
+ "mode": "metal",
+ "_id": "56783917514bc5068a345d97",
+ "details": null
+ },
+ {
+ "name": "ericsson-pod1",
+ "creation_date": "2015-12-22 07:21:03.765581",
+ "role": "",
+ "mode": "metal",
+ "_id": "5678f9df514bc5068a345d98",
+ "details": null
+ },
+ {
+ "name": "ericsson-pod2",
+ "creation_date": "2015-12-22 07:21:18.173966",
+ "role": "",
+ "mode": "metal",
+ "_id": "5678f9ee514bc5068a345d99",
+ "details": null
+ },
+ {
+ "name": "dell-us-testing-bm-1",
+ "creation_date": "2016-01-08 12:41:54.097114",
+ "role": "",
+ "mode": "metal",
+ "_id": "568fae92514bc5068a60e7d2",
+ "details": null
+ },
+ {
+ "name": "dell-us-deploying-bm-3",
+ "creation_date": "2016-01-08 14:13:16.740415",
+ "role": "",
+ "mode": null,
+ "_id": "568fc3fc514bc5068a60e7d4",
+ "details": null
+ },
+ {
+ "name": "dell-us-deploying-bm-2",
+ "creation_date": "2016-01-08 14:15:54.037500",
+ "role": "",
+ "mode": null,
+ "_id": "568fc49a514bc5068a60e7d5",
+ "details": null
+ },
+ {
+ "name": "dell-us-deploying-bm3",
+ "creation_date": "2016-01-15 12:14:20.956198",
+ "role": "",
+ "mode": "metal",
+ "_id": "5698e29c514bc56e65a47bc8",
+ "details": null
+ },
+ {
+ "name": "intel-pod6",
+ "creation_date": "2016-01-22 13:32:18.767326",
+ "role": "",
+ "mode": "metal",
+ "_id": "56a22f62514bc541f885b2c0",
+ "details": null
+ },
+ {
+ "name": "huawei-virtual2",
+ "creation_date": "",
+ "role": "",
+ "mode": "virtual",
+ "_id": "56a9d7ac851d7e6a0f74930d",
+ "details": ""
+ },
+ {
+ "name": "huawei-virtual1",
+ "creation_date": "",
+ "role": "",
+ "mode": "virtual",
+ "_id": "56a9f411851d7e6a0f749313",
+ "details": ""
+ },
+ {
+ "name": "huawei-virtual3",
+ "creation_date": "",
+ "role": "",
+ "mode": "virtual",
+ "_id": "56e67ba6851d7e4b188676bc",
+ "details": ""
+ },
+ {
+ "name": "huawei-virtual4",
+ "creation_date": "",
+ "role": "",
+ "mode": "virtual",
+ "_id": "56e67bb6851d7e4b188676bd",
+ "details": ""
+ },
+ {
+ "name": "intel-pod8",
+ "creation_date": "2016-03-14 08:52:47.576623",
+ "role": "",
+ "mode": "metal",
+ "_id": "56e67bdf851d7e4b188676be",
+ "details": null
+ },
+ {
+ "name": "intel-pod7",
+ "creation_date": "2016-03-14 08:53:00.757525",
+ "role": "",
+ "mode": "metal",
+ "_id": "56e67bec851d7e4b188676c0",
+ "details": null
+ },
+ {
+ "name": "huawei-pod2",
+ "creation_date": "",
+ "role": "",
+ "mode": "metal",
+ "_id": "56e67c35851d7e4b188676c1",
+ "details": ""
+ },
+ {
+ "name": "ericsson-virtual1",
+ "creation_date": "2016-03-14 08:58:06.432105",
+ "role": "",
+ "mode": "virtual",
+ "_id": "56e67d1e851d7e4b188676c2",
+ "details": null
+ },
+ {
+ "name": "arm-pod1",
+ "creation_date": "2016-05-05 09:18:54.879497",
+ "role": "",
+ "mode": "metal",
+ "_id": "572b0ffe9377c51472b7878f",
+ "details": null
+ },
+ {
+ "name": "zte-pod1",
+ "creation_date": "2016-05-12 03:36:56.091397",
+ "role": "",
+ "mode": "metal",
+ "_id": "5733fa589377c548e8df3834",
+ "details": null
+ },
+ {
+ "name": "intel-virtual1",
+ "creation_date": "2016-08-23 17:22:30.901081",
+ "role": null,
+ "mode": "virtual",
+ "_id": "57bc86561d2c6e000ab19d93",
+ "details": null
+ },
+ {
+ "name": "intel-virtual2",
+ "creation_date": "2016-08-23 17:24:23.143681",
+ "role": null,
+ "mode": "virtual",
+ "_id": "57bc86c71d2c6e000ab19d94",
+ "details": null
+ },
+ {
+ "name": "zte-pod2",
+ "creation_date": "2016-09-06 09:49:20.228736",
+ "role": "",
+ "mode": "metal",
+ "_id": "57ce91201d2c6e000ab1c261",
+ "details": ""
+ },
+ {
+ "name": "zte-pod3",
+ "creation_date": "2016-09-06 09:49:26.019816",
+ "role": "",
+ "mode": "metal",
+ "_id": "57ce91261d2c6e000ab1c263",
+ "details": ""
+ },
+ {
+ "name": "arm-pod3",
+ "creation_date": "2016-09-12 09:47:50.791351",
+ "role": "",
+ "mode": "metal",
+ "_id": "57d679c61d2c6e000ab1d6bd",
+ "details": "ARM POD3"
+ },
+ {
+ "name": "cisco-pod1",
+ "creation_date": "2016-09-13 13:01:21.906958",
+ "role": "Community lab",
+ "mode": "metal",
+ "_id": "57d7f8a11d2c6e000ab1db88",
+ "details": "not yet declared in CI but needed to validate vpp scenarios for Colorado"
+ },
+ {
+ "name": "ool-virtual1",
+ "creation_date": "2016-09-19 12:43:50.313032",
+ "role": "",
+ "mode": "virtual",
+ "_id": "57dfdd861d2c6e000ab1f37b",
+ "details": "Okinawa lab"
+ },
+ {
+ "name": "ericsson-pod3",
+ "creation_date": "2016-09-26 09:45:40.565795",
+ "role": "",
+ "mode": "metal",
+ "_id": "57e8ee441d2c6e000ab20fa9",
+ "details": ""
+ },
+ {
+ "name": "ericsson-pod4",
+ "creation_date": "2016-09-26 09:45:48.980198",
+ "role": "",
+ "mode": "metal",
+ "_id": "57e8ee4c1d2c6e000ab20faa",
+ "details": ""
+ },
+ {
+ "name": "ericsson-virtual2",
+ "creation_date": "2016-09-26 09:46:05.508776",
+ "role": "",
+ "mode": "virtual",
+ "_id": "57e8ee5d1d2c6e000ab20fac",
+ "details": ""
+ },
+ {
+ "name": "ericsson-virtual3",
+ "creation_date": "2016-09-26 09:46:10.244443",
+ "role": "",
+ "mode": "virtual",
+ "_id": "57e8ee621d2c6e000ab20fad",
+ "details": ""
+ },
+ {
+ "name": "ericsson-virtual4",
+ "creation_date": "2016-09-26 09:46:14.734383",
+ "role": "",
+ "mode": "virtual",
+ "_id": "57e8ee661d2c6e000ab20fae",
+ "details": ""
+ },
+ {
+ "name": "ericsson-virtual5",
+ "creation_date": "2016-09-26 09:46:19.477110",
+ "role": "",
+ "mode": "virtual",
+ "_id": "57e8ee6b1d2c6e000ab20faf",
+ "details": ""
+ },
+ {
+ "name": "intel-pod9",
+ "creation_date": "2016-11-23 14:07:35.963037",
+ "role": "",
+ "mode": "metal",
+ "_id": "5835a2a71d2c6e000ab2bb4b",
+ "details": "https://wiki.opnfv.org/display/pharos/Intel+Pod9"
+ },
+ {
+ "name": "huawei-pod3",
+ "creation_date": "2017-01-17 13:36:03.908341",
+ "role": "production-ci",
+ "mode": "metal",
+ "_id": "587e1dc38cf551000c780eda",
+ "details": ""
+ },
+ {
+ "name": "huawei-pod4",
+ "creation_date": "2017-01-17 13:36:10.759860",
+ "role": "production-ci",
+ "mode": "metal",
+ "_id": "587e1dca8cf551000c780edb",
+ "details": ""
+ },
+ {
+ "name": "huawei-pod5",
+ "creation_date": "2017-01-17 13:36:15.447849",
+ "role": "production-ci",
+ "mode": "metal",
+ "_id": "587e1dcf8cf551000c780edc",
+ "details": ""
+ },
+ {
+ "name": "huawei-pod6",
+ "creation_date": "2017-01-18 10:53:10.586724",
+ "role": "production-ci",
+ "mode": "metal",
+ "_id": "587f49168cf551000c780f5e",
+ "details": ""
+ },
+ {
+ "name": "huawei-pod7",
+ "creation_date": "2017-01-18 10:53:15.373953",
+ "role": "production-ci",
+ "mode": "metal",
+ "_id": "587f491b8cf551000c780f5f",
+ "details": ""
+ },
+ {
+ "name": "huawei-pod12",
+ "creation_date": "2017-02-09 07:22:46.425836",
+ "role": "production-ci",
+ "mode": "metal",
+ "_id": "589c18c68cf551000c7820e8",
+ "details": ""
+ },
+ {
+ "name": "intel-pod12",
+ "creation_date": "2017-05-17 14:11:18.852731",
+ "role": "production-ci",
+ "details": "performance",
+ "query": "<function query at 0x7f574c29c500>",
+ "mode": "metal",
+ "_id": "591c5a06ee2e3f000a50f0b4",
+ "miss_fields": [
+ "name"
+ ]
+ },
+ {
+ "name": "cisco-vina-pod10",
+ "creation_date": "2017-05-29 09:13:20.818497",
+ "role": "production-ci",
+ "mode": "metal",
+ "_id": "592be63078a2ad000ae6aad7",
+ "details": ""
+ },
+ {
+ "name": "zte-virtual1",
+ "creation_date": "2017-05-30 14:11:04.264967",
+ "role": "",
+ "mode": "baremetal",
+ "_id": "592d7d7878a2ad000ae6ac49",
+ "details": ""
+ }
+] \ No newline at end of file
diff --git a/dovetail/utils/local_db/projects.json b/dovetail/utils/local_db/projects.json
new file mode 100644
index 00000000..d034b808
--- /dev/null
+++ b/dovetail/utils/local_db/projects.json
@@ -0,0 +1,218 @@
+[
+ {
+ "_id": "561dfc89514bc5355b51f6c6",
+ "description": "Project functest aims to run suites of functional tests at the end of an OPNFV fresh install. These suites consist in functional tests on the main components of the solution (VIM, SDN controllers). It thus includes several upstream suites from upstream projects.",
+ "name": "functest",
+ "creation_date": "2015-10-14 06:56:09.317134"
+ },
+ {
+ "_id": "5641e12d514bc5174df3d77e",
+ "description": "OPNFV vsperf project",
+ "name": "vsperf",
+ "creation_date": "2015-11-10 12:21:01.464979"
+ },
+ {
+ "_id": "56582fae514bc5087f2ddce9",
+ "description": "Promise is resource reservation and management project to identify NFV related requirements and realize resource reservation for future usage by capacity management of resource pools regarding compute, network and storage. ",
+ "name": "promise",
+ "creation_date": "2015-11-27 10:25:50.539634"
+ },
+ {
+ "_id": "565c5939514bc5087f2ddcfa",
+ "description": "The project aims to integrate the OpenStack Neutron BGPVPN project and its supported backends into the OPNFV reference platform.",
+ "name": "sdnvpn",
+ "creation_date": "2015-11-30 14:12:09.101020"
+ },
+ {
+ "_id": "565c7181514bc5087f2ddcfb",
+ "description": "Service Function Chaining provides the ability to define an ordered list of a network services (e.g. firewalls, NAT, QoS).",
+ "name": "sfc",
+ "creation_date": "2015-11-30 15:55:45.932846"
+ },
+ {
+ "_id": "565d5fa2514bc5087f2ddd05",
+ "description": "High availability for OPNFV",
+ "name": "ha",
+ "creation_date": "2015-12-01 08:51:46.011622"
+ },
+ {
+ "_id": "565d606c514bc5087f2ddd06",
+ "description": "IPv6-enabled OPNFV",
+ "name": "ipvsix",
+ "creation_date": "2015-12-01 08:55:08.824385"
+ },
+ {
+ "_id": "565d60f7514bc5087f2ddd07",
+ "description": "OpenStack based VNF Forwarding Graph",
+ "name": "vnfgraph",
+ "creation_date": "2015-12-01 08:57:27.378992"
+ },
+ {
+ "_id": "565d6398514bc5087f2ddd08",
+ "description": "NFV hypervisors - kvm",
+ "name": "kvmfornfv",
+ "creation_date": "2015-12-01 09:08:40.006837"
+ },
+ {
+ "_id": "565d63f9514bc5087f2ddd09",
+ "description": "Open vSwitch for NFV",
+ "name": "ovsnfv",
+ "creation_date": "2015-12-01 09:10:17.341894"
+ },
+ {
+ "_id": "565d646f514bc5087f2ddd0a",
+ "description": "ARMband project",
+ "name": "armband",
+ "creation_date": "2015-12-01 09:12:15.681896"
+ },
+ {
+ "_id": "565d97be514bc5087f2ddd11",
+ "description": "Project Copper aims to help ensure that virtualized infrastructure deployments comply with goals of the VNF designer/user, e.g. re affinity and partitioning (e.g. per regulation, control/user plane separation, cost\u2026).",
+ "name": "copper",
+ "creation_date": "2015-12-01 12:51:10.776433"
+ },
+ {
+ "_id": "565d9f0f514bc5087f2ddd1d",
+ "description": "Doctor is fault management and maintenance project to develop and realize the consequent implementation for the OPNFV reference platform.",
+ "name": "doctor",
+ "creation_date": "2015-12-01 13:22:23.294997"
+ },
+ {
+ "_id": "565da033514bc5087f2ddd20",
+ "description": "OPNFV platform performance benchmark project",
+ "name": "qtip",
+ "creation_date": "2015-12-01 13:27:15.834860"
+ },
+ {
+ "_id": "565ec191514bc5087f3cfe26",
+ "description": "This project will enable OpenContrail to be selected as the virtual networking technology in OPNFV deployments. The following diagram illustrates how the components of an OPNFV stack that includes OpenContrail map onto the ETSI NFV architecture.",
+ "name": "ovno",
+ "creation_date": "2015-12-02 10:01:53.786607"
+ },
+ {
+ "_id": "566108c7514bc5087f3cfe41",
+ "description": "This project aims to find system bottlenecks by testing and verifying OPNFV infrastructure in a staging environment before committing it to a production environment. ",
+ "name": "bottlenecks",
+ "creation_date": "2015-12-04 03:30:15.934170"
+ },
+ {
+ "_id": "5669a8b4514bc5068a345d2f",
+ "description": "OPNFV policy test based on ODL (Groupe Based Policy)",
+ "name": "policy-test",
+ "creation_date": "2015-12-10 16:30:44.118352"
+ },
+ {
+ "_id": "57173ffe9377c5332042a5e1",
+ "description": "This project proposes a security management system called Moon. NFV uses cloud computing technologies to virtualize the resources and automate the control. The cloud infrastructure is able to provision a set of different cloud resources/services for VNFs (Virtualized Network Functions). Management of isolation and protection of, and interaction between, these VNFs become a big challenge. In order to avoid losing control over the VNFs in the cloud, Moon aims at designing and developing a security management system for OPNFV.",
+ "name": "moon",
+ "creation_date": "2016-04-20 08:38:22.335984"
+ },
+ {
+ "_id": "573395499377c56d4fc3f069",
+ "description": "OPNFV storage perfomance testing project",
+ "name": "storperf",
+ "creation_date": "2016-05-11 20:25:45.317073"
+ },
+ {
+ "_id": "575efa949377c5337aa248f1",
+ "description": "template distribution service",
+ "name": "domino",
+ "creation_date": "2016-06-13 18:25:24.157400"
+ },
+ {
+ "_id": "5762ad599377c5337a5ebc3b",
+ "description": "Project \u201cFastDataStacks\u201d creates and composes a set of scenarios which include the virtual forwarder supplied by the FD.IO project. The project also provides required enhancements to individual components such as SDN controllers or installers to allow for the scenario composition.",
+ "name": "fastdatastacks",
+ "creation_date": "2016-06-16 13:44:57.065160"
+ },
+ {
+ "_id": "577d46ef9377c54b278bbc91",
+ "description": "This project will address various goals for promoting availability and convergence of information and/or data models related to NFV service/VNF management, as being defined in standards (SDOs) and as developed in open source projects.",
+ "name": "models",
+ "creation_date": "2016-07-06 17:59:11.553045"
+ },
+ {
+ "_id": "57add1169377c54b278bd67b",
+ "description": " enhancement to OpenStack ( Nova / Cinder / Neutron / Glance / Ceilometer / KeyStone ), so that OpenStack as the VIM is able to support multisite NFV cloud.",
+ "name": "multisite",
+ "creation_date": "2016-08-12 13:37:26.911022"
+ },
+ {
+ "_id": "57b2cd019377c54b278bdb90",
+ "description": "OPNFV Parser project",
+ "name": "parser",
+ "creation_date": "2016-08-16 08:21:21.788381"
+ },
+ {
+ "_id": "582ef5ec1d2c6e000ab2b226",
+ "description": "The goal of the NetReady project is to investigate how the current OpenStack networking architecture needs to be evolved in order to ensure that NFV-related use cases can be flexibly and efficiently supported. ",
+ "name": "netready",
+ "creation_date": "2016-11-18 12:37:00.875824"
+ },
+ {
+ "_id": "582f29a61d2c6e000ab2b27d",
+ "description": "Security Scanning is a project to insure security compliance and vulnerability checks , as part of an automated CI-CD platform delivery process and as a standalone application. The project makes use of the existing SCAP format to perform deep scanning of NFVi nodes, to insure they are hardened and free of known CVE reported vulnerabilities. The SCAP content itself, is then consumed and run using an upstream opensource tool known as OpenSCAP",
+ "name": "securityscanning",
+ "creation_date": "2016-11-18 16:17:42.047102"
+ },
+ {
+ "_id": "583479f61d2c6e000ab2b9ee",
+ "description": "Opera seeks to develop requirements for OPEN-O MANO support in the OPNFV reference platform, with the plan to eventually integrate OPEN-O in OPNFV as a non-exclusive upstream MANO. ",
+ "name": "opera",
+ "creation_date": "2016-11-22 17:01:42.703558"
+ },
+ {
+ "_id": "58347a121d2c6e000ab2b9ef",
+ "description": "This project will enable the integration of the open source Open Baton platform, already available as ETSI NFV MANO reference implementation since October 2015, with existing OPNFV projects for specific scenarios and use cases.",
+ "name": "orchestra",
+ "creation_date": "2016-11-22 17:02:10.793237"
+ },
+ {
+ "_id": "587520262f248d000c44275b",
+ "description": "",
+ "name": "",
+ "creation_date": "2017-01-10 17:55:50.084797"
+ },
+ {
+ "_id": "587da16e8cf551000c780e93",
+ "description": "Model Oriented Virtualization Interface",
+ "name": "movie",
+ "creation_date": "2017-01-17 04:45:34.074478"
+ },
+ {
+ "_id": "58a155e3584d06000b01574e",
+ "description": "The goal of yardstick is to verify the infrastructure compliance when running VNF applications. NFV Use Cases described in ETSI GS NFV 001 show a large variety of applications, each defining specific requirements and complex configuration on the underlying infrastructure and test tools.The Yardstick concept decomposes typical VNF work-load performance metrics into a number of characteristics/performance vectors, which each of them can be represented by distinct test-cases.",
+ "name": "yardstick",
+ "creation_date": "2015-10-14 06:56:09"
+ },
+ {
+ "_id": "58a2e2c2584d06000b0157d2",
+ "description": "a few plugins that expand the capabilities of collectd which runs on compute nodes regarding collecting and reporting metrics",
+ "name": "barometer",
+ "creation_date": "2017-02-14 10:58:10.292536"
+ },
+ {
+ "_id": "5923e2ff78a2ad000ae6a13f",
+ "description": "Container Integrated For NFV",
+ "name": "OpenRetriever",
+ "creation_date": "2017-05-23 07:21:35.191176"
+ },
+ {
+ "_id": "592887a178a2ad000ae6a65e",
+ "description": "OVN for NFV",
+ "name": "ovn4nfv",
+ "creation_date": "2017-05-26 19:53:05.984276"
+ },
+ {
+ "_id": "5928a4af78a2ad000ae6a6be",
+ "description": "ONOS Framework",
+ "name": "onosfw",
+ "creation_date": "2017-05-26 21:57:03.600198"
+ },
+ {
+ "_id": "592be2ad78a2ad000ae6aad1",
+ "description": "VIM infrastructure networking assurance, visualizing and monitoring low level details of virtual networking, components and inter-connections: internal and across hosts",
+ "name": "vina",
+ "creation_date": "2017-05-29 08:58:21.248835"
+ }
+] \ No newline at end of file
diff --git a/dovetail/utils/local_db/restart_db.sh b/dovetail/utils/local_db/restart_db.sh
index 39b60e05..e44bc07c 100755
--- a/dovetail/utils/local_db/restart_db.sh
+++ b/dovetail/utils/local_db/restart_db.sh
@@ -26,4 +26,4 @@ export db_host_ip=${db_host_ip:-"$1"}
sudo docker rm -f testapi
sudo docker run -itd -p $testapi_port:8000 --name testapi \
- -e mongodb_url=mongodb://$db_host_ip:$mongodb_port/ kkltcjk/testapi:reporting
+ -e mongodb_url=mongodb://$db_host_ip:$mongodb_port/ opnfv/testapi:cvp.0.5.0
diff --git a/dovetail/utils/offline/config.yaml b/dovetail/utils/offline/config.yaml
index 185686a5..9ff4a387 100644
--- a/dovetail/utils/offline/config.yaml
+++ b/dovetail/utils/offline/config.yaml
@@ -2,22 +2,26 @@
docker_images:
dovetail:
domain: opnfv
- tag: latest
+ tag: cvp.0.5.0
store_name: image_dovetail.docker
functest:
domain: opnfv
- tag: latest
+ tag: cvp.0.5.0
store_name: image_functest.docker
yardstick:
domain: opnfv
- tag: latest
+ tag: danube.3.2
store_name: image_yardstick.docker
+ bottlenecks:
+ domain: opnfv
+ tag: cvp.0.4.0
+ store_name: image_bottlenecks.docker
testapi:
domain: opnfv
- tag: latest
+ tag: cvp.0.5.0
store_name: image_testapi.docker
mongo:
- tag: 3.5
+ tag: 3.2.1
store_name: image_mongo.docker
docker_save_path: /home/opnfv/dovetail/results/
diff --git a/setup.cfg b/setup.cfg
index 4908f194..003d2710 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,6 +1,6 @@
[metadata]
name = dovetail
-version = 0.1.0
+version = 0.5.0
home-page = https://wiki.opnfv.org/display/dovetail
[entry_points]