diff options
91 files changed, 4581 insertions, 2640 deletions
@@ -34,3 +34,4 @@ unittest_results.log docs_build/ docs_output/ results/ +pre_config/ @@ -7,14 +7,19 @@ Project Lead: Hongbo (hongbo.tianhongbo@huawei.com) Jira Project Name: OPNFV qualification testing Jira Project Prefix: dovetail Mailing list tag: [dovetail] -IRC: Server:freenode.net Channel:#opnfv-testperf +IRC: Server:freenode.net Channel:#opnfv-meeting Repository: dovetail Committers: -christopher.price@ericsson.com wenjing.chu@huawei.com hongbo.tianhongbo@huawei.com dneary@redhat.com +georg.kunz@ericsson.com +trevor.cooper@intel.com +zshi@redhat.com +lylavoie@iol.unh.edu +grakiss.wanglei@huawei.com +fuqiao@chinamobile.com Link to TSC approval of the project: http://meetbot.opnfv.org/meetings/opnfv-meeting/2015/opnfv-meeting.2015-09-01-13.59.html Link(s) to approval of additional committers: diff --git a/dashboard/backend/dovetail/__init__.py b/dashboard/backend/dovetail/__init__.py deleted file mode 100755 index 6dbd8d79..00000000 --- a/dashboard/backend/dovetail/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -############################################################################## -# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## diff --git a/dashboard/backend/dovetail/api/__init__.py b/dashboard/backend/dovetail/api/__init__.py deleted file mode 100755 index f9c4e5a2..00000000 --- a/dashboard/backend/dovetail/api/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -############################################################################## -# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## - -import datetime -import logging - -from flask import Flask - -from dovetail.utils import util - -logging.info('flask app: begin to init') - -app = Flask(__name__) -app.debug = True -logging.info('flask app config:%s', app.config) - -app.config['REMEMBER_COOKIE_DURATION'] = ( - datetime.timedelta( - seconds=util.parse_time_interval('2h') - ) -) - -logging.info('flask app: finish init') diff --git a/dashboard/backend/dovetail/api/api.py b/dashboard/backend/dovetail/api/api.py deleted file mode 100755 index 7839b893..00000000 --- a/dashboard/backend/dovetail/api/api.py +++ /dev/null @@ -1,183 +0,0 @@ -############################################################################## -# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## - -import logging - -from dovetail.api import utils -from dovetail.api import exception_handler -from dovetail.db import api as db_api - -from flask import Flask -from flask import request - -import json - -app = Flask(__name__) - - -@app.after_request -def after_request(response): - response.headers.add('Access-Control-Allow-Origin', '*') - response.headers.add( - 'Access-Control-Allow-Headers', - 'Content-Type, Authorization') - response.headers.add('Aceess-Control-Allow-Methods', 'GET,PUT,DELETE,POST') - return response - -# test - - -@app.route("/test", methods=['GET']) -def test(): - """backend api test""" - logging.info('test functest') - resp = utils.make_json_response( - 200, {'test': 20} - ) - return resp - - -# settings -@app.route("/clear", methods=['POST']) -def clear_settings(): - """ clear all settings data on backend server """ - logging.info('clear all settings') - - return utils.make_json_response( - 200, {} - ) - - -@app.route("/settings", methods=['GET']) -def list_settings(): - """list settings""" - logging.info('list settings') - global settings - return utils.make_json_response(200, settings) - - -@app.route("/settings", methods=['POST']) -def add_settings(): - pass - - -@app.route("/settings", methods=['POST']) -def remove_settings(): - pass - - -@app.route("/testcases", methods=['GET']) -def get_testcases(): - pass - - -@app.route("/results/<test_id>", methods=['GET']) -def show_result(test_id): - data = _get_request_args() - return utils.make_json_response( - 200, - db_api.get_result( - test_id, **data - ) - ) - - -@app.route("/results", methods=['GET']) -def list_results(): - data = _get_request_args() - return utils.make_json_response( - 200, - db_api.list_results( - **data - ) - ) - - -@app.route("/results", methods=['POST']) -def add_result(): - data = _get_request_data() - ret_code = 200 - json_object = json.loads(data) - logging.debug('json_object:%s', (json_object)) - if not db_api.store_result(**json_object): - ret_code = 500 - resp = utils.make_json_response( - ret_code, data - ) - return resp - - -@app.route("/results/<test_id>", methods=['DELETE']) -def remove_results(test_id): - data = _get_request_data() - logging.debug('data:%s', data) - response = db_api.del_result( - test_id, **data - ) - return utils.make_json_response( - 200, response - ) - - -def _get_request_data(): - """Convert reqeust data from string to python dict. - - If the request data is not json formatted, raises - exception_handler.BadRequest. - If the request data is not json formatted dict, raises - exception_handler.BadRequest - If the request data is empty, return default as empty dict. - - Usage: It is used to add or update a single resource. - """ - if request.data: - try: - data = json.loads(request.data) - except Exception: - raise exception_handler.BadRequest( - 'request data is not json formatted: %s' % request.data - ) - if not isinstance(data, dict): - raise exception_handler.BadRequest( - 'request data is not json formatted dict: %s' % request.data - ) - - return request.data - else: - return {} - - -def _get_request_args(**kwargs): - """Get request args as dict. - - The value in the dict is converted to expected type. - - Args: - kwargs: for each key, the value is the type converter. - """ - args = dict(request.args) - for key, value in args.items(): - if key in kwargs: - converter = kwargs[key] - if isinstance(value, list): - args[key] = [converter(item) for item in value] - else: - args[key] = converter(value) - return args - - -''' -@app.teardown_appcontext -def shutdown_session(exception=None): - db_session.remove() -''' -# user login/logout - -if __name__ == '__main__': - app.run(host='127.0.0.1') diff --git a/dashboard/backend/dovetail/api/exception_handler.py b/dashboard/backend/dovetail/api/exception_handler.py deleted file mode 100755 index b7ce592a..00000000 --- a/dashboard/backend/dovetail/api/exception_handler.py +++ /dev/null @@ -1,93 +0,0 @@ -############################################################################## -# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## - -"""Exceptions for RESTful API.""" -import traceback - -from dovetail.api import app -from dovetail.api import utils - - -class HTTPException(Exception): - - def __init__(self, message, status_code): - super(HTTPException, self).__init__(message) - self.traceback = traceback.format_exc() - self.status_code = status_code - - def to_dict(self): - return {'message': str(self)} - - -class ItemNotFound(HTTPException): - """Define the exception for referring non-existing object.""" - - def __init__(self, message): - super(ItemNotFound, self).__init__(message, 410) - - -class BadRequest(HTTPException): - """Define the exception for invalid/missing parameters. - - User making a request in invalid state cannot be processed. - """ - - def __init__(self, message): - super(BadRequest, self).__init__(message, 400) - - -class Unauthorized(HTTPException): - """Define the exception for invalid user login.""" - - def __init__(self, message): - super(Unauthorized, self).__init__(message, 401) - - -class UserDisabled(HTTPException): - """Define the exception for disabled users.""" - - def __init__(self, message): - super(UserDisabled, self).__init__(message, 403) - - -class Forbidden(HTTPException): - """Define the exception for invalid permissions.""" - - def __init__(self, message): - super(Forbidden, self).__init__(message, 403) - - -class BadMethod(HTTPException): - """Define the exception for invoking unsupported methods.""" - - def __init__(self, message): - super(BadMethod, self).__init__(message, 405) - - -class ConflictObject(HTTPException): - """Define the exception for creating an existing object.""" - - def __init__(self, message): - super(ConflictObject, self).__init__(message, 409) - - -@app.errorhandler(Exception) -def handle_exception(error): - if hasattr(error, 'to_dict'): - response = error.to_dict() - else: - response = {'message': str(error)} - if app.debug and hasattr(error, 'traceback'): - response['traceback'] = error.traceback - - status_code = 400 - if hasattr(error, 'status_code'): - status_code = error.status_code - - return utils.make_json_response(status_code, response) diff --git a/dashboard/backend/dovetail/api/utils.py b/dashboard/backend/dovetail/api/utils.py deleted file mode 100755 index dbe8d082..00000000 --- a/dashboard/backend/dovetail/api/utils.py +++ /dev/null @@ -1,20 +0,0 @@ -############################################################################## -# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## - -import json -from flask import make_response - - -def make_json_response(status_code, data): - """Wrap json format to the reponse object.""" - - result = json.dumps(data, indent=4, default=lambda x: None) + '\r\n' - resp = make_response(result, status_code) - resp.headers['Content-type'] = 'application/json' - return resp diff --git a/dashboard/backend/dovetail/db/__init__.py b/dashboard/backend/dovetail/db/__init__.py deleted file mode 100755 index 6dbd8d79..00000000 --- a/dashboard/backend/dovetail/db/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -############################################################################## -# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## diff --git a/dashboard/backend/dovetail/db/api.py b/dashboard/backend/dovetail/db/api.py deleted file mode 100755 index 631ed2a3..00000000 --- a/dashboard/backend/dovetail/db/api.py +++ /dev/null @@ -1,72 +0,0 @@ -############################################################################## -# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## - -""" -Defines interface for DB access. -""" - -import logging - -from dovetail.db import database -from dovetail.db import utils -from dovetail.db import models - - -@database.run_in_session() -def store_result(exception_when_existing=True, - session=None, **kwargs): - """Storing results into database. - - :param data: Dict describes test results. - """ - logging.debug('store_result:%s', kwargs) - result = utils.add_db_object( - session, models.Result, exception_when_existing, - **kwargs) - - return result - - -@database.run_in_session() -@utils.wrap_to_dict() -def list_results(session=None, **filters): - """Get all results - """ - logging.debug('session:%s', session) - results = utils.list_db_objects( - session, models.Result, **filters - ) - return results - - -@database.run_in_session() -@utils.wrap_to_dict() -def get_result(test_id, exception_when_missing=True, - session=None, **kwargs): - """Get specific result with the test_id - - :param test_id: the unique serial number for the test - """ - return _get_result(test_id, session, - exception_when_missing=exception_when_missing, **kwargs) - - -def _get_result(test_id, session=None, **kwargs): - return utils.get_db_object( - session, models.Result, test_id=test_id, **kwargs) - - -@database.run_in_session() -def del_result(test_id, session=None, **kwargs): - """Delete a results from database - - :param test_id: the unique serial number for the test - """ - return utils.del_db_objects(session, models.Result, - test_id=test_id, **kwargs) diff --git a/dashboard/backend/dovetail/db/database.py b/dashboard/backend/dovetail/db/database.py deleted file mode 100755 index bc09d3bd..00000000 --- a/dashboard/backend/dovetail/db/database.py +++ /dev/null @@ -1,182 +0,0 @@ -############################################################################## -# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## - -import logging -import functools - -from threading import local - -from sqlalchemy import create_engine -from sqlalchemy.exc import IntegrityError -from sqlalchemy.exc import OperationalError -from sqlalchemy.orm import scoped_session -from sqlalchemy.orm import sessionmaker -from sqlalchemy.pool import StaticPool - -from contextlib import contextmanager -from dovetail.db import exception -from dovetail.db import models - -ENGINE = None -SESSION = sessionmaker(autocommit=False, autoflush=False) -SCOPED_SESSION = None -SESSION_HOLDER = local() - -SQLALCHEMY_DATABASE_URI = "mysql://root:%s@localhost:3306/dovetail" % ('root') - - -def init(database_url=None): - """Initialize database. - - :param database_url: string, database url. - """ - global ENGINE - global SCOPED_SESSION - if not database_url: - database_url = SQLALCHEMY_DATABASE_URI - logging.info('init database %s', database_url) - print("database init %s" % database_url) - ENGINE = create_engine( - database_url, convert_unicode=True, - poolclass=StaticPool - ) - SESSION.configure(bind=ENGINE) - SCOPED_SESSION = scoped_session(SESSION) - models.BASE.query = SCOPED_SESSION.query_property() - - -def in_session(): - """check if in database session scope.""" - bool(hasattr(SESSION_HOLDER, 'session')) - - -@contextmanager -def session(exception_when_in_session=True): - """database session scope. - - To operate database, it should be called in database session. - If not exception_when_in_session, the with session statement support - nested session and only the out most session commit/rollback the - transaction. - """ - if not ENGINE: - init() - - nested_session = False - if hasattr(SESSION_HOLDER, 'session'): - if exception_when_in_session: - logging.error('we are already in session') - raise exception.DatabaseException('session already exist') - else: - new_session = SESSION_HOLDER.session - nested_session = True - logging.log( - logging.DEBUG, - 'reuse session %s', nested_session - ) - else: - new_session = SCOPED_SESSION() - setattr(SESSION_HOLDER, 'session', new_session) - logging.log( - logging.DEBUG, - 'enter session %s', new_session - ) - try: - yield new_session - if not nested_session: - new_session.commit() - except Exception as error: - if not nested_session: - new_session.rollback() - logging.error('failed to commit session') - logging.exception(error) - if isinstance(error, IntegrityError): - for item in error.statement.split(): - if item.islower(): - object = item - break - raise exception.DuplicatedRecord( - '%s in %s' % (error.orig, object) - ) - elif isinstance(error, OperationalError): - raise exception.DatabaseException( - 'operation error in database' - ) - elif isinstance(error, exception.DatabaseException): - raise error - else: - raise exception.DatabaseException(str(error)) - finally: - if not nested_session: - new_session.close() - SCOPED_SESSION.remove() - delattr(SESSION_HOLDER, 'session') - logging.log( - logging.DEBUG, - 'exit session %s', new_session - ) - - -def current_session(): - """Get the current session scope when it is called. - - :return: database session. - :raises: DatabaseException when it is not in session. - """ - try: - return SESSION_HOLDER.session - except Exception as error: - logging.error('It is not in the session scope') - logging.exception(error) - if isinstance(error, exception.DatabaseException): - raise error - else: - raise exception.DatabaseException(str(error)) - - -def run_in_session(exception_when_in_session=True): - """Decorator to make sure the decorated function run in session. - - When not exception_when_in_session, the run_in_session can be - decorated several times. - """ - def decorator(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - try: - my_session = kwargs.get('session') - if my_session is not None: - return func(*args, **kwargs) - else: - with session( - exception_when_in_session=exception_when_in_session - ) as my_session: - kwargs['session'] = my_session - return func(*args, **kwargs) - except Exception as error: - logging.error( - 'got exception with func %s args %s kwargs %s', - func, args, kwargs - ) - logging.exception(error) - raise error - return wrapper - return decorator - - -@run_in_session() -def create_db(session=None): - """Create database.""" - models.BASE.metadata.create_all(bind=ENGINE) - print('create_db') - - -def drop_db(): - """Drop database.""" - models.BASE.metadata.drop_all(bind=ENGINE) diff --git a/dashboard/backend/dovetail/db/exception.py b/dashboard/backend/dovetail/db/exception.py deleted file mode 100755 index 4acc5fbd..00000000 --- a/dashboard/backend/dovetail/db/exception.py +++ /dev/null @@ -1,121 +0,0 @@ -############################################################################## -# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## - -"""Custom exception""" -import traceback - - -class DatabaseException(Exception): - """Base class for all database exceptions.""" - - def __init__(self, message): - super(DatabaseException, self).__init__(message) - self.traceback = traceback.format_exc() - self.status_code = 400 - - def to_dict(self): - return {'message': str(self)} - - -class RecordNotExists(DatabaseException): - """Define the exception for referring non-existing object in DB.""" - - def __init__(self, message): - super(RecordNotExists, self).__init__(message) - self.status_code = 404 - - -class DuplicatedRecord(DatabaseException): - """Define the exception for trying to insert an existing object in DB.""" - - def __init__(self, message): - super(DuplicatedRecord, self).__init__(message) - self.status_code = 409 - - -class Unauthorized(DatabaseException): - """Define the exception for invalid user login.""" - - def __init__(self, message): - super(Unauthorized, self).__init__(message) - self.status_code = 401 - - -class UserDisabled(DatabaseException): - """Define the exception that a disabled user tries to do some operations. - - """ - - def __init__(self, message): - super(UserDisabled, self).__init__(message) - self.status_code = 403 - - -class Forbidden(DatabaseException): - """Define the exception that a user is trying to make some action - - without the right permission. - - """ - - def __init__(self, message): - super(Forbidden, self).__init__(message) - self.status_code = 403 - - -class NotAcceptable(DatabaseException): - """The data is not acceptable.""" - - def __init__(self, message): - super(NotAcceptable, self).__init__(message) - self.status_code = 406 - - -class InvalidParameter(DatabaseException): - """Define the exception that the request has invalid or missing parameters. - - """ - - def __init__(self, message): - super(InvalidParameter, self).__init__(message) - self.status_code = 400 - - -class InvalidResponse(DatabaseException): - """Define the exception that the response is invalid. - - """ - - def __init__(self, message): - super(InvalidResponse, self).__init__(message) - self.status_code = 400 - - -class MultiDatabaseException(DatabaseException): - """Define the exception composites with multi exceptions.""" - - def __init__(self, exceptions): - super(MultiDatabaseException, self).__init__('multi exceptions') - self.exceptions = exceptions - self.status_code = 400 - - @property - def traceback(self): - tracebacks = [] - for exception in self.exceptions: - tracebacks.append(exception.trackback) - - def to_dict(self): - dict_info = super(MultiDatabaseException, self).to_dict() - dict_info.update({ - 'exceptions': [ - exception.to_dict() for exception in self.exceptions - ] - }) - return dict_info diff --git a/dashboard/backend/dovetail/db/models.py b/dashboard/backend/dovetail/db/models.py deleted file mode 100755 index e0f3ffa3..00000000 --- a/dashboard/backend/dovetail/db/models.py +++ /dev/null @@ -1,105 +0,0 @@ -############################################################################## -# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## - -import datetime - -from sqlalchemy import Column, Integer, String, DateTime -from sqlalchemy.ext.declarative import declarative_base - -from dovetail.utils import util -from dovetail.db import exception - -BASE = declarative_base() - - -class MarkTimestamp(object): - created = Column(DateTime, default=lambda: datetime.datetime.now()) - updated = Column(DateTime, default=lambda: datetime.datetime.now(), - onupdate=lambda: datetime.datetime.now()) - - -class ModelHandler(object): - - def initialize(self): - self.update() - - def update(self): - pass - - @staticmethod - def type_check(value, column_type): - if value is None: - return True - if not hasattr(column_type, 'python_type'): - return True - column_python_type = column_type.python_type - if isinstance(value, column_python_type): - return True - if issubclass(column_python_type, basestring): - return isinstance(value, basestring) - if column_python_type in [int, long]: - return type(value) in [int, long] - if column_python_type in [float]: - return type(value) in [float] - if column_python_type in [bool]: - return type(value) in [bool] - return False - - def validate(self): - columns = self.__mapper__.columns - for key, column in columns.items(): - value = getattr(self, key) - if not self.type_check(value, column.type): - raise exception.InvalidParameter( - 'column %s value %r type is unexpected: %s' % ( - key, value, column.type - ) - ) - - def to_dict(self): - """General function to convert record to dict. - - Convert all columns not starting with '_' to - {<column_name>: <column_value>} - """ - keys = self.__mapper__.columns.keys() - dict_info = {} - for key in keys: - if key.startswith('_'): - continue - value = getattr(self, key) - if value is not None: - if isinstance(value, datetime.datetime): - value = util.format_datetime(value) - dict_info[key] = value - return dict_info - - -class Result(BASE, MarkTimestamp, ModelHandler): - __tablename__ = 'result' - id = Column(Integer, primary_key=True) - test_id = Column(String(120), unique=True) - name = Column(String(120)) - data = Column(String(64000)) - - def __init__(self, **kwargs): - super(Result, self).__init__(**kwargs) - - def __repr__(self): - return '<Result %r>' % (self.name) - - def __str__(self): - return 'Result[%s:%s]' % (self.name, self.test_id) - - def to_dict(self): - dict_info = super(Result, self).to_dict() - dict_info['name'] = self.name - dict_info['test_id'] = self.test_id - dict_info['data'] = self.data - return dict_info diff --git a/dashboard/backend/dovetail/db/utils.py b/dashboard/backend/dovetail/db/utils.py deleted file mode 100755 index 4bb0026d..00000000 --- a/dashboard/backend/dovetail/db/utils.py +++ /dev/null @@ -1,478 +0,0 @@ -############################################################################## -# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## - -"""Utilities for database.""" - - -import functools -import inspect -import logging - -from sqlalchemy import and_ -from sqlalchemy import or_ - -from dovetail.db import exception -from dovetail.db import models - - -def add_db_object(session, table, exception_when_existing=True, - *args, **kwargs): - """Create db object. - - If not exception_when_existing and the db object exists, - Instead of raising exception, updating the existing db object. - """ - if not session: - raise exception.DatabaseException('session param is None') - with session.begin(subtransactions=True): - logging.debug( - 'session %s add object %s atributes %s to table %s', - id(session), args, kwargs, table.__name__) - argspec = inspect.getargspec(table.__init__) - arg_names = argspec.args[1:] - arg_defaults = argspec.defaults - if not arg_defaults: - arg_defaults = [] - if not ( - len(arg_names) - len(arg_defaults) <= len(args) <= len(arg_names) - ): - raise exception.InvalidParameter( - 'arg names %s does not match arg values %s' % ( - arg_names, args) - ) - db_keys = dict(zip(arg_names, args)) - logging.debug('db_keys:%s', db_keys) - if db_keys: - db_object = session.query(table).filter_by(**db_keys).first() - else: - logging.debug('db object is None') - db_object = None - - new_object = False - if db_object: - logging.debug( - 'got db object %s: %s', db_keys, db_object - ) - if exception_when_existing: - raise exception.DuplicatedRecord( - '%s exists in table %s' % (db_keys, table.__name__) - ) - else: - db_object = table(**db_keys) - new_object = True - - for key, value in kwargs.items(): - setattr(db_object, key, value) - - logging.debug('db_object:%s', db_object) - if new_object: - session.add(db_object) - session.flush() - db_object.initialize() - db_object.validate() - logging.debug( - 'session %s db object %s added', id(session), db_object - ) - return db_object - - -def list_db_objects(session, table, order_by=[], **filters): - """List db objects. - - If order by given, the db objects should be sorted by the ordered keys. - """ - if not session: - raise exception.DatabaseException('session param is None') - with session.begin(subtransactions=True): - logging.debug( - 'session %s list db objects by filters %s in table %s', - id(session), filters, table.__name__ - ) - db_objects = model_order_by( - model_filter( - model_query(session, table), - table, - **filters - ), - table, - order_by - ).all() - logging.debug( - 'session %s got listed db objects: %s', - id(session), db_objects - ) - return db_objects - - -def get_db_object(session, table, exception_when_missing=True, **kwargs): - """Get db object. - - If not exception_when_missing and the db object can not be found, - return None instead of raising exception. - """ - if not session: - raise exception.DatabaseException('session param is None') - with session.begin(subtransactions=True): - logging.debug( - 'session %s get db object %s from table %s', - id(session), kwargs, table.__name__) - db_object = model_filter( - model_query(session, table), table, **kwargs - ).first() - logging.debug( - 'session %s got db object %s', id(session), db_object - ) - if db_object: - return db_object - - if not exception_when_missing: - return None - - raise exception.RecordNotExists( - 'Cannot find the record in table %s: %s' % ( - table.__name__, kwargs - ) - ) - - -def del_db_objects(session, table, **filters): - """delete db objects.""" - if not session: - raise exception.DatabaseException('session param is None') - with session.begin(subtransactions=True): - logging.debug( - 'session %s delete db objects by filters %s in table %s', - id(session), filters, table.__name__ - ) - query = model_filter( - model_query(session, table), table, **filters - ) - db_objects = query.all() - query.delete(synchronize_session=False) - logging.debug( - 'session %s db objects %s deleted', id(session), db_objects - ) - return db_objects - - -def model_order_by(query, model, order_by): - """append order by into sql query model.""" - if not order_by: - return query - order_by_cols = [] - for key in order_by: - if isinstance(key, tuple): - key, is_desc = key - else: - is_desc = False - if isinstance(key, basestring): - if hasattr(model, key): - col_attr = getattr(model, key) - else: - continue - else: - col_attr = key - if is_desc: - order_by_cols.append(col_attr.desc()) - else: - order_by_cols.append(col_attr) - return query.order_by(*order_by_cols) - - -def _model_condition(col_attr, value): - """Generate condition for one column. - - Example for col_attr is name: - value is 'a': name == 'a' - value is ['a']: name == 'a' - value is ['a', 'b']: name == 'a' or name == 'b' - value is {'eq': 'a'}: name == 'a' - value is {'lt': 'a'}: name < 'a' - value is {'le': 'a'}: name <= 'a' - value is {'gt': 'a'}: name > 'a' - value is {'ge': 'a'}: name >= 'a' - value is {'ne': 'a'}: name != 'a' - value is {'in': ['a', 'b']}: name in ['a', 'b'] - value is {'notin': ['a', 'b']}: name not in ['a', 'b'] - value is {'startswith': 'abc'}: name like 'abc%' - value is {'endswith': 'abc'}: name like '%abc' - value is {'like': 'abc'}: name like '%abc%' - value is {'between': ('a', 'c')}: name >= 'a' and name <= 'c' - value is [{'lt': 'a'}]: name < 'a' - value is [{'lt': 'a'}, {'gt': c'}]: name < 'a' or name > 'c' - value is {'lt': 'c', 'gt': 'a'}: name > 'a' and name < 'c' - - If value is a list, the condition is the or relationship among - conditions of each item. - If value is dict and there are multi keys in the dict, the relationship - is and conditions of each key. - Otherwise the condition is to compare the column with the value. - """ - if isinstance(value, list): - basetype_values = [] - composite_values = [] - for item in value: - if isinstance(item, (list, dict)): - composite_values.append(item) - else: - basetype_values.append(item) - conditions = [] - if basetype_values: - if len(basetype_values) == 1: - condition = (col_attr == basetype_values[0]) - else: - condition = col_attr.in_(basetype_values) - conditions.append(condition) - for composite_value in composite_values: - condition = _model_condition(col_attr, composite_value) - if condition is not None: - conditions.append(condition) - if not conditions: - return None - if len(conditions) == 1: - return conditions[0] - return or_(*conditions) - elif isinstance(value, dict): - conditions = [] - if 'eq' in value: - conditions.append(_model_condition_func( - col_attr, value['eq'], - lambda attr, data: attr == data, - lambda attr, data, item_condition_func: attr.in_(data) - )) - if 'lt' in value: - conditions.append(_model_condition_func( - col_attr, value['lt'], - lambda attr, data: attr < data, - _one_item_list_condition_func - )) - if 'gt' in value: - conditions.append(_model_condition_func( - col_attr, value['gt'], - lambda attr, data: attr > data, - _one_item_list_condition_func - )) - if 'le' in value: - conditions.append(_model_condition_func( - col_attr, value['le'], - lambda attr, data: attr <= data, - _one_item_list_condition_func - )) - if 'ge' in value: - conditions.append(_model_condition_func( - col_attr, value['ge'], - lambda attr, data: attr >= data, - _one_item_list_condition_func - )) - if 'ne' in value: - conditions.append(_model_condition_func( - col_attr, value['ne'], - lambda attr, data: attr != data, - lambda attr, data, item_condition_func: attr.notin_(data) - )) - if 'in' in value: - conditions.append(col_attr.in_(value['in'])) - if 'notin' in value: - conditions.append(col_attr.notin_(value['notin'])) - if 'startswith' in value: - conditions.append(_model_condition_func( - col_attr, value['startswith'], - lambda attr, data: attr.like('%s%%' % data) - )) - if 'endswith' in value: - conditions.append(_model_condition_func( - col_attr, value['endswith'], - lambda attr, data: attr.like('%%%s' % data) - )) - if 'like' in value: - conditions.append(_model_condition_func( - col_attr, value['like'], - lambda attr, data: attr.like('%%%s%%' % data) - )) - conditions = [ - condition - for condition in conditions - if condition is not None - ] - if not conditions: - return None - if len(conditions) == 1: - return conditions[0] - return and_(conditions) - else: - condition = (col_attr == value) - return condition - - -def _default_list_condition_func(col_attr, value, condition_func): - """The default condition func for a list of data. - - Given the condition func for single item of data, this function - wrap the condition_func and return another condition func using - or_ to merge the conditions of each single item to deal with a - list of data item. - - Args: - col_attr: the colomn name - value: the column value need to be compared. - condition_func: the sqlalchemy condition object like == - - Examples: - col_attr is name, value is ['a', 'b', 'c'] and - condition_func is ==, the returned condition is - name == 'a' or name == 'b' or name == 'c' - """ - conditions = [] - for sub_value in value: - condition = condition_func(col_attr, sub_value) - if condition is not None: - conditions.append(condition) - if conditions: - return or_(*conditions) - else: - return None - - -def _one_item_list_condition_func(col_attr, value, condition_func): - """The wrapper condition func to deal with one item data list. - - For simplification, it is used to reduce generating too complex - sql conditions. - """ - if value: - return condition_func(col_attr, value[0]) - else: - return None - - -def _model_condition_func( - col_attr, value, - item_condition_func, - list_condition_func=_default_list_condition_func -): - """Return sql condition based on value type.""" - if isinstance(value, list): - if not value: - return None - if len(value) == 1: - return item_condition_func(col_attr, value) - return list_condition_func( - col_attr, value, item_condition_func - ) - else: - return item_condition_func(col_attr, value) - - -def model_filter(query, model, **filters): - """Append conditons to query for each possible column.""" - for key, value in filters.items(): - if isinstance(key, basestring): - if hasattr(model, key): - col_attr = getattr(model, key) - else: - continue - else: - col_attr = key - - condition = _model_condition(col_attr, value) - if condition is not None: - query = query.filter(condition) - return query - - -def model_query(session, model): - """model query. - - Return sqlalchemy query object. - """ - if not issubclass(model, models.BASE): - raise exception.DatabaseException("model should be sublass of BASE!") - - return session.query(model) - - -def wrap_to_dict(support_keys=[], **filters): - """Decrator to convert returned object to dict. - - The details is decribed in _wrapper_dict. - """ - def decorator(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - return _wrapper_dict( - func(*args, **kwargs), support_keys, **filters - ) - return wrapper - return decorator - - -def _wrapper_dict(data, support_keys, **filters): - """Helper for warpping db object into dictionary. - - If data is list, convert it to a list of dict - If data is Base model, convert it to dict - for the data as a dict, filter it with the supported keys. - For each filter_key, filter_value in filters, also filter - data[filter_key] by filter_value recursively if it exists. - - Example: - data is models.Switch, it will be converted to - { - 'id': 1, 'ip': '10.0.0.1', 'ip_int': 123456, - 'credentials': {'version': 2, 'password': 'abc'} - } - Then if support_keys are ['id', 'ip', 'credentials'], - it will be filtered to { - 'id': 1, 'ip': '10.0.0.1', - 'credentials': {'version': 2, 'password': 'abc'} - } - Then if filters is {'credentials': ['version']}, - it will be filtered to { - 'id': 1, 'ip': '10.0.0.1', - 'credentials': {'version': 2} - } - """ - logging.debug( - 'wrap dict %s by support_keys=%s filters=%s', - data, support_keys, filters - ) - if isinstance(data, list): - return [ - _wrapper_dict(item, support_keys, **filters) - for item in data - ] - if isinstance(data, models.ModelHandler): - data = data.to_dict() - if not isinstance(data, dict): - raise exception.InvalidResponse( - 'response %s type is not dict' % data - ) - info = {} - try: - if len(support_keys) == 0: - support_keys = data.keys() - for key in support_keys: - if key in data and data[key] is not None: - if key in filters: - filter_keys = filters[key] - if isinstance(filter_keys, dict): - info[key] = _wrapper_dict( - data[key], filter_keys.keys(), - **filter_keys - ) - else: - info[key] = _wrapper_dict( - data[key], filter_keys - ) - else: - info[key] = data[key] - return info - except Exception as error: - logging.exception(error) - raise error diff --git a/dashboard/backend/dovetail/utils/__init__.py b/dashboard/backend/dovetail/utils/__init__.py deleted file mode 100755 index 6dbd8d79..00000000 --- a/dashboard/backend/dovetail/utils/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -############################################################################## -# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## diff --git a/dashboard/backend/dovetail/utils/flags.py b/dashboard/backend/dovetail/utils/flags.py deleted file mode 100755 index dd10670b..00000000 --- a/dashboard/backend/dovetail/utils/flags.py +++ /dev/null @@ -1,82 +0,0 @@ -############################################################################## -# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## - -import sys - -from optparse import OptionParser - - -class Flags(object): - """Class to store flags.""" - - PARSER = OptionParser() - PARSED_OPTIONS = None - - @classmethod - def parse_args(cls): - """parse args.""" - (options, argv) = Flags.PARSER.parse_args() - sys.argv = [sys.argv[0]] + argv - Flags.PARSED_OPTIONS = options - - def __getattr__(self, name): - if Flags.PARSED_OPTIONS and hasattr(Flags.PARSED_OPTIONS, name): - return getattr(Flags.PARSED_OPTIONS, name) - - for option in Flags.PARSER.option_list: - if option.dest == name: - return option.default - - raise AttributeError('Option instance has no attribute %s' % name) - - def __setattr__(self, name, value): - if Flags.PARSED_OPTIONS and hasattr(Flags.PARSED_OPTIONS, name): - setattr(Flags.PARSED_OPTIONS, name, value) - return - - for option in Flags.PARSER.option_list: - if option.dest == name: - option.default = value - return - - object.__setattr__(self, name, value) - - -OPTIONS = Flags() - - -def init(): - """Init flag parsing.""" - OPTIONS.parse_args() - - -def add(flagname, **kwargs): - """Add a flag name and its setting. - - :param flagname: flag name declared in cmd as --<flagname>=... - :type flagname: str - """ - Flags.PARSER.add_option('--%s' % flagname, - dest=flagname, **kwargs) - - -def add_bool(flagname, default=True, **kwargs): - """Add a bool flag name and its setting. - - :param flagname: flag name declared in cmd as --[no]<flagname>. - :type flagname: str - :param default: default value - :type default: bool - """ - Flags.PARSER.add_option('--%s' % flagname, - dest=flagname, default=default, - action="store_true", **kwargs) - Flags.PARSER.add_option('--no%s' % flagname, - dest=flagname, - action="store_false", **kwargs) diff --git a/dashboard/backend/dovetail/utils/logsetting.py b/dashboard/backend/dovetail/utils/logsetting.py deleted file mode 100755 index 27255688..00000000 --- a/dashboard/backend/dovetail/utils/logsetting.py +++ /dev/null @@ -1,98 +0,0 @@ -############################################################################## -# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## - -import logging -import logging.handlers -import os -import os.path -import sys - -from dovetail.utils import flags -from dovetail.utils import setting_wrapper as setting - - -flags.add('loglevel', - help='logging level', default=setting.DEFAULT_LOGLEVEL) -flags.add('logdir', - help='logging directory', default=setting.DEFAULT_LOGDIR) -flags.add('logfile', - help='logging filename', default=None) -flags.add('log_interval', type='int', - help='log interval', default=setting.DEFAULT_LOGINTERVAL) -flags.add('log_interval_unit', - help='log interval unit', default=setting.DEFAULT_LOGINTERVAL_UNIT) -flags.add('log_format', - help='log format', default=setting.DEFAULT_LOGFORMAT) -flags.add('log_backup_count', type='int', - help='log backup count', default=setting.DEFAULT_LOGBACKUPCOUNT) - - -# mapping str setting in flag --loglevel to logging level. -LOGLEVEL_MAPPING = { - 'finest': logging.DEBUG - 2, # more detailed log. - 'fine': logging.DEBUG - 1, # detailed log. - 'debug': logging.DEBUG, - 'info': logging.INFO, - 'warning': logging.WARNING, - 'error': logging.ERROR, - 'critical': logging.CRITICAL, -} - - -logging.addLevelName(LOGLEVEL_MAPPING['fine'], 'fine') -logging.addLevelName(LOGLEVEL_MAPPING['finest'], 'finest') - - -# disable logging when logsetting.init not called -logging.getLogger().setLevel(logging.CRITICAL) - - -def getLevelByName(level_name): - """Get log level by level name.""" - return LOGLEVEL_MAPPING[level_name] - - -def init(): - """Init loggsetting. It should be called after flags.init.""" - loglevel = flags.OPTIONS.loglevel.lower() - logdir = flags.OPTIONS.logdir - logfile = flags.OPTIONS.logfile - logger = logging.getLogger() - if logger.handlers: - for handler in logger.handlers: - logger.removeHandler(handler) - - if logdir: - if not logfile: - logfile = './%s.log' % os.path.basename(sys.argv[0]) - - handler = logging.handlers.TimedRotatingFileHandler( - os.path.join(logdir, logfile), - when=flags.OPTIONS.log_interval_unit, - interval=flags.OPTIONS.log_interval, - backupCount=flags.OPTIONS.log_backup_count) - else: - if not logfile: - handler = logging.StreamHandler(sys.stderr) - else: - handler = logging.handlers.TimedRotatingFileHandler( - logfile, - when=flags.OPTIONS.log_interval_unit, - interval=flags.OPTIONS.log_interval, - backupCount=flags.OPTIONS.log_backup_count) - - if loglevel in LOGLEVEL_MAPPING: - logger.setLevel(LOGLEVEL_MAPPING[loglevel]) - handler.setLevel(LOGLEVEL_MAPPING[loglevel]) - - formatter = logging.Formatter( - flags.OPTIONS.log_format) - - handler.setFormatter(formatter) - logger.addHandler(handler) diff --git a/dashboard/backend/dovetail/utils/setting_wrapper.py b/dashboard/backend/dovetail/utils/setting_wrapper.py deleted file mode 100755 index bb390ada..00000000 --- a/dashboard/backend/dovetail/utils/setting_wrapper.py +++ /dev/null @@ -1,18 +0,0 @@ -############################################################################## -# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## - - -DEFAULT_LOGLEVEL = 'debug' -DEFAULT_LOGDIR = '/var/log/dovetail/' -DEFAULT_LOGINTERVAL = 30 -DEFAULT_LOGINTERVAL_UNIT = 'M' -DEFAULT_LOGFORMAT = ( - '%(asctime)s - %(filename)s - %(lineno)d - %(levelname)s - %(message)s') -DEFAULT_LOGBACKUPCOUNT = 10 -WEB_LOGFILE = 'dovetail_web.log' diff --git a/dashboard/backend/dovetail/utils/util.py b/dashboard/backend/dovetail/utils/util.py deleted file mode 100755 index bfd257d7..00000000 --- a/dashboard/backend/dovetail/utils/util.py +++ /dev/null @@ -1,71 +0,0 @@ -############################################################################## -# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## - - -import datetime -import re -import sys - - -def format_datetime(date_time): - """Generate string from datetime object.""" - return date_time.strftime("%Y-%m-%d %H:%M:%S") - - -def parse_time_interval(time_interval_str): - """parse string of time interval to time interval. - - supported time interval unit: ['d', 'w', 'h', 'm', 's'] - Examples: - time_interval_str: '3d 2h' time interval to 3 days and 2 hours. - """ - if not time_interval_str: - return 0 - - time_interval_tuple = [ - time_interval_element - for time_interval_element in time_interval_str.split(' ') - if time_interval_element - ] - time_interval_dict = {} - time_interval_unit_mapping = { - 'd': 'days', - 'w': 'weeks', - 'h': 'hours', - 'm': 'minutes', - 's': 'seconds' - } - for time_interval_element in time_interval_tuple: - mat = re.match(r'^([+-]?\d+)(w|d|h|m|s).*', time_interval_element) - if not mat: - continue - - time_interval_value = int(mat.group(1)) - time_interval_unit = time_interval_unit_mapping[mat.group(2)] - time_interval_dict[time_interval_unit] = ( - time_interval_dict.get(time_interval_unit, 0) + time_interval_value - ) - - time_interval = datetime.timedelta(**time_interval_dict) - if sys.version_info[0:2] > (2, 6): - return time_interval.total_seconds() - else: - return ( - time_interval.microseconds + ( - time_interval.seconds + time_interval.days * 24 * 3600 - ) * 1e6 - ) / 1e6 - - -def pretty_print(*contents): - """pretty print contents.""" - if len(contents) == 0: - print "" - else: - print "\n".join(content for content in contents) diff --git a/dashboard/backend/install_db.py b/dashboard/backend/install_db.py deleted file mode 100755 index d37a4099..00000000 --- a/dashboard/backend/install_db.py +++ /dev/null @@ -1,55 +0,0 @@ -############################################################################## -# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## - -# create db in new env -from dovetail.utils import flags -from dovetail.utils import logsetting -from dovetail.utils import setting_wrapper as setting - -from flask_script import Manager - -from dovetail.db import database -from dovetail.api.api import app - -import os - -app_manager = Manager(app, usage="Perform database operations") - -# flags.init() -curr_path = os.path.dirname(os.path.abspath(__file__)) -logdir = os.path.join(curr_path, 'log') -if not os.path.exists(logdir): - os.makedirs(logdir) - -flags.OPTIONS.logdir = logdir -flags.OPTIONS.logfile = setting.WEB_LOGFILE -logsetting.init() - - -@app_manager.command -def createdb(): - """Creates database from sqlalchemy models.""" - database.init() - try: - database.drop_db() - except Exception: - pass - - database.create_db() - - -@app_manager.command -def dropdb(): - """Drops database from sqlalchemy models.""" - database.init() - database.drop_db() - - -if __name__ == "__main__": - app_manager.run() diff --git a/dashboard/backend/wsgi.py b/dashboard/backend/wsgi.py deleted file mode 100755 index 088299d7..00000000 --- a/dashboard/backend/wsgi.py +++ /dev/null @@ -1,35 +0,0 @@ -##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-from dovetail.utils import flags
-from dovetail.utils import logsetting
-from dovetail.utils import setting_wrapper as setting
-
-from dovetail.api.api import app
-
-import os
-import logging
-
-gunicorn_error_logger = logging.getLogger('gunicorn.error')
-app.logger.handlers.extend(gunicorn_error_logger.handlers)
-app.logger.setLevel(logging.DEBUG)
-
-# flags.init()
-# logdir = setting.DEFAULT_LOGDIR
-curr_path = os.path.dirname(os.path.abspath(__file__))
-logdir = os.path.join(curr_path, 'log')
-if not os.path.exists(logdir):
- os.makedirs(logdir)
-
-flags.OPTIONS.logdir = logdir
-flags.OPTIONS.logfile = setting.WEB_LOGFILE
-logsetting.init()
-
-
-if __name__ == "__main__":
- app.run()
diff --git a/docker/Dockerfile b/docker/Dockerfile index 8cc15e0b..eb85ea49 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -2,6 +2,8 @@ FROM ubuntu:14.04 MAINTAINER Leo Wang <grakiss.wanglei@huawei.com> LABEL version="0.1" description="OPNFV Dovetail Docker Container" +ARG BRANCH=master + RUN \ apt-get update \ && \ @@ -13,27 +15,35 @@ RUN \ python-mock \ python-pip \ apt-transport-https \ + wget \ --no-install-recommends \ && \ - apt-get update && apt-get -y install docker.io + apt-get update + +RUN wget -qO- https://get.docker.com/ \ +| \ + sed 's/-q docker-ce/-q docker-ce=17.03.0~ce-0~ubuntu-trusty/' \ +| \ + sed 's/edge/stable/' \ +| \ + sh ENV HOME /home/opnfv ENV REPOS_DIR ${HOME}/dovetail WORKDIR /home/opnfv RUN \ - git config --global http.sslVerify false \ -&& \ - git clone https://git.opnfv.org/dovetail ${REPOS_DIR} \ + mkdir -p ${REPOS_DIR} \ && \ - mkdir -p ${REPOS_DIR}/results \ -&& \ - pip install -U pip \ -&& \ - pip install -r ${REPOS_DIR}/requirements.txt \ + git config --global http.sslVerify false \ && \ - cd ${REPOS_DIR} \ + pip install git+https://git.opnfv.org/dovetail@$BRANCH#egg=dovetail \ && \ - pip install -e . + ln -s /usr/local/lib/python2.7/dist-packages/dovetail ${REPOS_DIR}/dovetail WORKDIR ${REPOS_DIR}/dovetail + +# get db schema from opnfv sites +RUN mkdir -p ${REPOS_DIR}/dovetail/utils/local_db +ADD get_db_schema.py ${REPOS_DIR}/dovetail/utils/local_db +RUN cd ${REPOS_DIR}/dovetail/utils/local_db && python get_db_schema.py diff --git a/docker/Dockerfile.aarch64.patch b/docker/Dockerfile.aarch64.patch index b96b619f..50fdc75c 100644 --- a/docker/Dockerfile.aarch64.patch +++ b/docker/Dockerfile.aarch64.patch @@ -1,14 +1,14 @@ From: Alexandru Nemes <alexandru.nemes@enea.com> -Date: Mon, 24 Apr 2017 11:53:42 +0300 +Date: Mon, 8 May 2017 19:04:37 +0300 Subject: [PATCH] Add AArch64 support for Dovetail docker file Signed-off-by: Alexandru Nemes <alexandru.nemes@enea.com> --- - docker/Dockerfile | 8 +++++--- - 1 file changed, 5 insertions(+), 3 deletions(-) + docker/Dockerfile | 18 ++++++------------ + 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/docker/Dockerfile b/docker/Dockerfile -index 8cc15e0..bbab012 100644 +index 499624f..bbab012 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,6 +1,6 @@ @@ -21,7 +21,18 @@ index 8cc15e0..bbab012 100644 RUN \ apt-get update \ -@@ -28,6 +28,8 @@ RUN \ +@@ -16,9 +16,7 @@ RUN \ + wget \ + --no-install-recommends \ + && \ +- apt-get update +- +-RUN wget -qO- https://get.docker.com/ | sh ++ apt-get update && apt-get -y install docker.io + + ENV HOME /home/opnfv + ENV REPOS_DIR ${HOME}/dovetail +@@ -31,6 +29,8 @@ RUN \ && \ mkdir -p ${REPOS_DIR}/results \ && \ @@ -29,4 +40,4 @@ index 8cc15e0..bbab012 100644 +&& \ pip install -U pip \ && \ - pip install -r ${REPOS_DIR}/requirements.txt \ + pip install -r ${REPOS_DIR}/requirements.txt \
\ No newline at end of file diff --git a/docker/get_db_schema.py b/docker/get_db_schema.py new file mode 100644 index 00000000..9a9d10d4 --- /dev/null +++ b/docker/get_db_schema.py @@ -0,0 +1,61 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +import requests +import json + + +source_url = 'http://testresults.opnfv.org/test/api/v1' + + +def get(url): + try: + ret = requests.get(url) + return ret.json() + except: + return None + + +def pod(): + source = '{}/pods'.format(source_url) + try: + pods = get(source)['pods'] + with open("pods.json", "w") as f: + f.write(json.dumps(pods, indent=4)) + except: + return + + +def project(): + source = '{}/projects'.format(source_url) + + try: + projects = get(source)['projects'] + with open("projects.json", "w") as f: + f.write(json.dumps(projects, indent=4)) + except: + return + + for p in projects: + source = '{}/projects/{}/cases'.format(source_url, p['name']) + print(p['name']) + print(source) + try: + cases = get(source) + with open("cases.json", "a+") as f: + f.write(json.dumps(cases)) + f.write('\n') + f.close() + except: + print("useless data") + + +if __name__ == '__main__': + pod() + project() diff --git a/docs/testing/developer/testcaserequirements/index.rst b/docs/testing/developer/testcaserequirements/index.rst index 5dc87713..38eb93a1 100644 --- a/docs/testing/developer/testcaserequirements/index.rst +++ b/docs/testing/developer/testcaserequirements/index.rst @@ -9,17 +9,6 @@ Compliance and Verification program test case requirements .. toctree:: :maxdepth: 2 -Version history -=============== - -+------------+----------+------------------+----------------------------------+ -| **Date** | **Ver.** | **Author** | **Comment** | -| | | | | -+------------+----------+------------------+----------------------------------+ -| 2017-03-15 | 0.0.1 | Chris Price | Draft version | -| | | | | -+------------+----------+------------------+----------------------------------+ - CVP Test Suite Purpose and Goals ================================ @@ -69,12 +58,13 @@ The following requirements are mandatory for test to be submitted for considerat - Use case specification - Test preconditions - Basic test flow execution descriptor - - Post conditions and pass fail criteria + - Pass fail criteria - The following things may be documented for the test case: - Parameter border test cases descriptions - Fault/Error test case descriptions + - Post conditions where the system state may be left changed after completion New test case proposals should complete a CVP test case worksheet to ensure that all of these considerations are met before the test case is approved for inclusion in the diff --git a/docs/testing/developer/testscope/index.rst b/docs/testing/developer/testscope/index.rst new file mode 100644 index 00000000..ab1577da --- /dev/null +++ b/docs/testing/developer/testscope/index.rst @@ -0,0 +1,552 @@ +.. This work is lit_snapshots_list_details_with_paramsensed under a Creative Commons Attribution 4.0 International License. +.. http://creativecommons.org/licenses/by/4.0 +.. (c) OPNFV + +======================================================= +Compliance and Verification program accepted test cases +======================================================= + + .. toctree:: + :maxdepth: 2 + + +Mandatory CVP Test Areas +======================== + +---------------------------------- +Test Area VIM Operations - Compute +---------------------------------- + +Image operations within the Compute API +--------------------------------------- + +| tempest.api.compute.images.test_images_oneserver.ImagesOneServerTestJSON.test_create_delete_image +| tempest.api.compute.images.test_images_oneserver.ImagesOneServerTestJSON.test_create_image_specify_multibyte_character_image_name + + +Basic support Compute API for server actions such as reboot, rebuild, resize +---------------------------------------------------------------------------- + +| tempest.api.compute.servers.test_instance_actions.InstanceActionsTestJSON.test_get_instance_action +| tempest.api.compute.servers.test_instance_actions.InstanceActionsTestJSON.test_list_instance_actions + + +Generate, import, and delete SSH keys within Compute services +------------------------------------------------------------- + +| tempest.api.compute.servers.test_servers.ServersTestJSON.test_create_specify_keypair + + +List supported versions of the Compute API +------------------------------------------ + +| tempest.api.compute.test_versions.TestVersions.test_list_api_versions + + +Quotas management in Compute API +-------------------------------- + +| tempest.api.compute.test_quotas.QuotasTestJSON.test_get_default_quotas +| tempest.api.compute.test_quotas.QuotasTestJSON.test_get_quotas + + +Basic server operations in the Compute API +------------------------------------------ + +| tempest.api.compute.servers.test_servers.ServersTestJSON.test_create_server_with_admin_password +| tempest.api.compute.servers.test_servers.ServersTestJSON.test_create_with_existing_server_name +| tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_numeric_server_name +| tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_server_metadata_exceeds_length_limit +| tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_server_name_length_exceeds_256 +| tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_flavor +| tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_image +| tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_network_uuid +| tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_delete_server_pass_id_exceeding_length_limit +| tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_delete_server_pass_negative_id +| tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_get_non_existent_server +| tempest.api.compute.servers.test_create_server.ServersTestJSON.test_host_name_is_same_as_server_name +| tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_host_name_is_same_as_server_name +| tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_invalid_ip_v6_address +| tempest.api.compute.servers.test_create_server.ServersTestJSON.test_list_servers +| tempest.api.compute.servers.test_create_server.ServersTestJSON.test_list_servers_with_detail +| tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_list_servers +| tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_list_servers_with_detail +| tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_flavor +| tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_image +| tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_server_name +| tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_server_status +| tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_limit_results +| tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_flavor +| tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_image +| tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_limit +| tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_server_name +| tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_server_status +| tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filtered_by_name_wildcard +| tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_changes_since_future_date +| tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_changes_since_invalid_date +| tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits +| tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_greater_than_actual_count +| tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_pass_negative_value +| tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_pass_string +| tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_flavor +| tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_image +| tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_server_name +| tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_detail_server_is_deleted +| tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_status_non_existing +| tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_with_a_deleted_server +| tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_lock_unlock_server +| tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_delete_server_metadata_item +| tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_get_server_metadata_item +| tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_list_server_metadata +| tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_set_server_metadata +| tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_set_server_metadata_item +| tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_update_server_metadata +| tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_server_name_blank +| tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_reboot_server_hard +| tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_reboot_non_existent_server +| tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_rebuild_server +| tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_rebuild_deleted_server +| tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_rebuild_non_existent_server +| tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_stop_start_server +| tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_stop_non_existent_server +| tempest.api.compute.servers.test_servers.ServersTestJSON.test_update_access_server_address +| tempest.api.compute.servers.test_servers.ServersTestJSON.test_update_server_name +| tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_name_of_non_existent_server +| tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_server_name_length_exceeds_256 +| tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_server_set_empty_name +| tempest.api.compute.servers.test_create_server.ServersTestJSON.test_verify_created_server_vcpus +| tempest.api.compute.servers.test_create_server.ServersTestJSON.test_verify_server_details +| tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_verify_created_server_vcpus +| tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_verify_server_details + + +Retrieve volume information through the Compute API +--------------------------------------------------- + +| tempest.api.compute.volumes.test_attach_volume.AttachVolumeTestJSON.test_attach_detach_volume +| tempest.api.compute.volumes.test_attach_volume.AttachVolumeTestJSON.test_list_get_volume_attachments + + + +----------------------------------- +Test Area VIM Operations - Identity +----------------------------------- + +API discovery operations within the Identity v3 API +--------------------------------------------------- + +| tempest.api.identity.v3.test_api_discovery.TestApiDiscovery.test_api_media_types +| tempest.api.identity.v3.test_api_discovery.TestApiDiscovery.test_api_version_resources +| tempest.api.identity.v3.test_api_discovery.TestApiDiscovery.test_api_version_statuses + + +Auth operations within the Identity API +--------------------------------------- + +| tempest.api.identity.v3.test_tokens.TokensV3Test.test_create_token + + +-------------------------------- +Test Area VIM Operations - Image +-------------------------------- + +Image deletion tests using the Glance v2 API +-------------------------------------------- + +| tempest.api.image.v2.test_images.BasicOperationsImagesTest.test_delete_image +| tempest.api.image.v2.test_images_negative.ImagesNegativeTest.test_delete_image_null_id +| tempest.api.image.v2.test_images_negative.ImagesNegativeTest.test_delete_non_existing_image +| tempest.api.image.v2.test_images_tags_negative.ImagesTagsNegativeTest.test_delete_non_existing_tag + + +Image get tests using the Glance v2 API +--------------------------------------- + +| tempest.api.image.v2.test_images.ListImagesTest.test_get_image_schema +| tempest.api.image.v2.test_images.ListImagesTest.test_get_images_schema +| tempest.api.image.v2.test_images_negative.ImagesNegativeTest.test_get_delete_deleted_image +| tempest.api.image.v2.test_images_negative.ImagesNegativeTest.test_get_image_null_id +| tempest.api.image.v2.test_images_negative.ImagesNegativeTest.test_get_non_existent_image + + +CRUD image operations in Images API v2 +-------------------------------------- + +| tempest.api.image.v2.test_images.ListImagesTest.test_list_no_params + + +Image list tests using the Glance v2 API +---------------------------------------- + +| tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_container_format +| tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_disk_format +| tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_limit +| tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_min_max_size +| tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_size +| tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_status +| tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_visibility + + +Image update tests using the Glance v2 API +------------------------------------------ + +| tempest.api.image.v2.test_images.BasicOperationsImagesTest.test_update_image +| tempest.api.image.v2.test_images_tags.ImagesTagsTest.test_update_delete_tags_for_image +| tempest.api.image.v2.test_images_tags_negative.ImagesTagsNegativeTest.test_update_tags_for_non_existing_image + + +---------------------------------- +Test Area VIM Operations - Network +---------------------------------- + +Basic CRUD operations on L2 networks and L2 network ports +--------------------------------------------------------- + +| tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_all_attributes +| tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_allocation_pools +| tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_dhcp_enabled +| tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_gw +| tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_gw_and_allocation_pools +| tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_host_routes_and_dns_nameservers +| tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_without_gateway +| tempest.api.network.test_networks.NetworksTest.test_create_update_delete_network_subnet +| tempest.api.network.test_networks.NetworksTest.test_delete_network_with_subnet +| tempest.api.network.test_networks.NetworksTest.test_list_networks +| tempest.api.network.test_networks.NetworksTest.test_list_networks_fields +| tempest.api.network.test_networks.NetworksTest.test_list_subnets +| tempest.api.network.test_networks.NetworksTest.test_list_subnets_fields +| tempest.api.network.test_networks.NetworksTest.test_show_network +| tempest.api.network.test_networks.NetworksTest.test_show_network_fields +| tempest.api.network.test_networks.NetworksTest.test_show_subnet +| tempest.api.network.test_networks.NetworksTest.test_show_subnet_fields +| tempest.api.network.test_networks.NetworksTest.test_update_subnet_gw_dns_host_routes_dhcp +| tempest.api.network.test_ports.PortsTestJSON.test_create_bulk_port +| tempest.api.network.test_ports.PortsTestJSON.test_create_port_in_allowed_allocation_pools +| tempest.api.network.test_ports.PortsTestJSON.test_create_update_delete_port +| tempest.api.network.test_ports.PortsTestJSON.test_list_ports +| tempest.api.network.test_ports.PortsTestJSON.test_list_ports_fields +| tempest.api.network.test_ports.PortsTestJSON.test_show_port +| tempest.api.network.test_ports.PortsTestJSON.test_show_port_fields +| tempest.api.network.test_ports.PortsTestJSON.test_update_port_with_security_group_and_extra_attributes +| tempest.api.network.test_ports.PortsTestJSON.test_update_port_with_two_security_groups_and_extra_attributes + + +Basic CRUD operations on security groups +---------------------------------------- + +| tempest.api.network.test_security_groups.SecGroupTest.test_create_list_update_show_delete_security_group +| tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_additional_args +| tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_icmp_type_code +| tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_protocol_integer_value +| tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_remote_group_id +| tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_remote_ip_prefix +| tempest.api.network.test_security_groups.SecGroupTest.test_create_show_delete_security_group_rule +| tempest.api.network.test_security_groups.SecGroupTest.test_list_security_groups +| tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_additional_default_security_group_fails +| tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_duplicate_security_group_rule_fails +| tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_bad_ethertype +| tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_bad_protocol +| tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_bad_remote_ip_prefix +| tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_invalid_ports +| tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_non_existent_remote_groupid +| tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_non_existent_security_group +| tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_delete_non_existent_security_group +| tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_show_non_existent_security_group +| tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_show_non_existent_security_group_rule + + +--------------------------------- +Test Area VIM Operations - Volume +--------------------------------- + +Volume attach and detach operations with the Cinder v2 API +---------------------------------------------------------- + +| tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_attach_detach_volume_to_instance +| tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_get_volume_attachment +| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_attach_volumes_with_nonexistent_volume_id +| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_detach_volumes_with_invalid_volume_id + + +Volume service availability zone operations with the Cinder v2 API +------------------------------------------------------------------ + +| tempest.api.volume.test_availability_zone.AvailabilityZoneV2TestJSON.test_get_availability_zone_list + + +Volume cloning operations with the Cinder v2 API +------------------------------------------------ + +| tempest.api.volume.test_volumes_get.VolumesV2GetTest.test_volume_create_get_update_delete_as_clone + + +Image copy-to-volume operations with the Cinder v2 API +------------------------------------------------------ + +| tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_volume_bootable +| tempest.api.volume.test_volumes_get.VolumesV2GetTest.test_volume_create_get_update_delete_from_image + + +Volume creation and deletion operations with the Cinder v2 API +-------------------------------------------------------------- + +| tempest.api.volume.test_volumes_get.VolumesV2GetTest.test_volume_create_get_update_delete +| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_invalid_size +| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_nonexistent_source_volid +| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_nonexistent_volume_type +| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_out_passing_size +| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_size_negative +| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_size_zero + + +Volume service extension listing operations with the Cinder v2 API +------------------------------------------------------------------ + +| tempest.api.volume.test_extensions.ExtensionsV2TestJSON.test_list_extensions + + +Volume GET operations with the Cinder v2 API +-------------------------------------------- + +| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_get_invalid_volume_id +| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_get_volume_without_passing_volume_id +| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_volume_get_nonexistent_volume_id + + +Volume listing operations with the Cinder v2 API +------------------------------------------------ + +| tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list +| tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_by_name +| tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_details_by_name +| tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_param_display_name_and_status +| tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_with_detail_param_display_name_and_status +| tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_with_detail_param_metadata +| tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_with_details +| tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_with_param_metadata +| tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volumes_list_by_availability_zone +| tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volumes_list_by_status +| tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volumes_list_details_by_availability_zone +| tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volumes_list_details_by_status +| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_list_volumes_detail_with_invalid_status +| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_list_volumes_detail_with_nonexistent_name +| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_list_volumes_with_invalid_status +| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_list_volumes_with_nonexistent_name +| tempest.api.volume.v2.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_details_pagination +| tempest.api.volume.v2.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_details_with_multiple_params +| tempest.api.volume.v2.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_pagination + + +Volume metadata operations with the Cinder v2 API +------------------------------------------------- + +| tempest.api.volume.test_volume_metadata.VolumesV2MetadataTest.test_create_get_delete_volume_metadata +| tempest.api.volume.test_volume_metadata.VolumesV2MetadataTest.test_update_volume_metadata_item + + +Verification of read-only status on volumes with the Cinder v2 API +------------------------------------------------------------------ + +| tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_volume_readonly_update + + +Volume reservation operations with the Cinder v2 API +---------------------------------------------------- + +| tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_reserve_unreserve_volume +| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_reserve_volume_with_negative_volume_status +| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_reserve_volume_with_nonexistent_volume_id +| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_unreserve_volume_with_nonexistent_volume_id + + +Volume snapshot creation/deletion operations with the Cinder v2 API +------------------------------------------------------------------- + +| tempest.api.volume.test_snapshot_metadata.SnapshotV2MetadataTestJSON.test_create_get_delete_snapshot_metadata +| tempest.api.volume.test_snapshot_metadata.SnapshotV2MetadataTestJSON.test_update_snapshot_metadata_item +| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_nonexistent_snapshot_id +| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_delete_invalid_volume_id +| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_delete_volume_without_passing_volume_id +| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_volume_delete_nonexistent_volume_id +| tempest.api.volume.test_volumes_snapshots.VolumesV2SnapshotTestJSON.test_snapshot_create_get_list_update_delete +| tempest.api.volume.test_volumes_snapshots.VolumesV2SnapshotTestJSON.test_volume_from_snapshot +| tempest.api.volume.test_volumes_snapshots.VolumesV2SnapshotTestJSON.test_snapshots_list_details_with_params +| tempest.api.volume.test_volumes_snapshots.VolumesV2SnapshotTestJSON.test_snapshots_list_with_params +| tempest.api.volume.test_volumes_snapshots_negative.VolumesV2SnapshotNegativeTestJSON.test_create_snapshot_with_nonexistent_volume_id +| tempest.api.volume.test_volumes_snapshots_negative.VolumesV2SnapshotNegativeTestJSON.test_create_snapshot_without_passing_volume_id + + +Volume update operations with the Cinder v2 API +----------------------------------------------- + +| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_update_volume_with_empty_volume_id +| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_update_volume_with_invalid_volume_id +| tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_update_volume_with_nonexistent_volume_id + + +--------------------------- +Test Area High Availability +--------------------------- + +Verify high availability of OpenStack controller services +------------------------------------------------------ + +| opnfv.ha.tc001.nova-api_service_down +| opnfv.ha.tc003.neutron-server_service_down +| opnfv.ha.tc004.keystone_service_down +| opnfv.ha.tc005.glance-api_service_down +| opnfv.ha.tc006.cinder-api_service_down +| opnfv.ha.tc009.cpu_overload +| opnfv.ha.tc010.disk_I/O_block +| opnfv.ha.tc011.load_balance_service_down + + +Optional CVP Test Areas +======================== + + +----------------- +Test Area BGP VPN +----------------- + +Verify association and dissasocitation of node using route targets +------------------------------------------------------------------ + +| opnfv.sdnvpn.subnet_connectivity +| opnfv.sdnvpn.tenant separation +| opnfv.sdnvpn.router_association +| opnfv.sdnvpn.router_association_floating_ip + +-------------------------------------------------- +IPv6 Compliance Testing Methodology and Test Cases +-------------------------------------------------- + +Test Case 1: Create and Delete an IPv6 Network, Port and Subnet +--------------------------------------------------------------- + +| tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_network +| tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_port +| tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_subnet + +Test Case 2: Create, Update and Delete an IPv6 Network and Subnet +----------------------------------------------------------------- + +| tempest.api.network.test_networks.NetworksIpV6Test.test_create_update_delete_network_subnet + +Test Case 3: Check External Network Visibility +---------------------------------------------- + +| tempest.api.network.test_networks.NetworksIpV6Test.test_external_network_visibility + +Test Case 4: List IPv6 Networks and Subnets of a Tenant +------------------------------------------------------- + +| tempest.api.network.test_networks.NetworksIpV6Test.test_list_networks +| tempest.api.network.test_networks.NetworksIpV6Test.test_list_subnets + +Test Case 5: Show Information of an IPv6 Network and Subnet +----------------------------------------------------------- + +| tempest.api.network.test_networks.NetworksIpV6Test.test_show_network +| tempest.api.network.test_networks.NetworksIpV6Test.test_show_subnet + +Test Case 6: Create an IPv6 Port in Allowed Allocation Pools +------------------------------------------------------------ + +| tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_port_in_allowed_allocation_pools + +Test Case 7: Create an IPv6 Port without Security Groups +-------------------------------------------------------- + +| tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_port_with_no_securitygroups + +Test Case 8: Create, Update and Delete an IPv6 Port +--------------------------------------------------- + +| tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_update_delete_port + +Test Case 9: List IPv6 Ports of a Tenant +---------------------------------------- + +| tempest.api.network.test_ports.PortsIpV6TestJSON.test_list_ports + +Test Case 10: Show Information of an IPv6 Port +---------------------------------------------- + +| tempest.api.network.test_ports.PortsIpV6TestJSON.test_show_port + +Test Case 11: Add Multiple Interfaces for an IPv6 Router +-------------------------------------------------------- + +| tempest.api.network.test_routers.RoutersIpV6Test.test_add_multiple_router_interfaces + +Test Case 12: Add and Remove an IPv6 Router Interface with port_id +------------------------------------------------------------------ + +| tempest.api.network.test_routers.RoutersIpV6Test.test_add_remove_router_interface_with_port_id + +Test Case 13: Add and Remove an IPv6 Router Interface with subnet_id +-------------------------------------------------------------------- + +| tempest.api.network.test_routers.RoutersIpV6Test.test_add_remove_router_interface_with_subnet_id + +Test Case 14: Create, Update, Delete, List and Show an IPv6 Router +------------------------------------------------------------------ + +| tempest.api.network.test_routers.RoutersIpV6Test.test_create_show_list_update_delete_router + +Test Case 15: Create, Update, Delete, List and Show an IPv6 Security Group +-------------------------------------------------------------------------- + +| tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_list_update_show_delete_security_group + +Test Case 16: Create, Delete and Show Security Group Rules +---------------------------------------------------------- + +| tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_show_delete_security_group_rule + +Test Case 17: List All Security Groups +-------------------------------------- + +| tempest.api.network.test_security_groups.SecGroupIPv6Test.test_list_security_groups + +Test Case 18: IPv6 Address Assignment - Dual Stack, SLAAC, DHCPv6 Stateless +--------------------------------------------------------------------------- + +| tempest.scenario.test_network_v6.TestGettingAddress.test_dhcp6_stateless_from_os + +Test Case 19: IPv6 Address Assignment - Dual Net, Dual Stack, SLAAC, DHCPv6 Stateless +------------------------------------------------------------------------------------- + +| tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_dhcp6_stateless_from_os + +Test Case 20: IPv6 Address Assignment - Multiple Prefixes, Dual Stack, SLAAC, DHCPv6 Stateless +---------------------------------------------------------------------------------------------- + +| tempest.scenario.test_network_v6.TestGettingAddress.test_multi_prefix_dhcpv6_stateless + +Test Case 21: IPv6 Address Assignment - Dual Net, Multiple Prefixes, Dual Stack, SLAAC, DHCPv6 Stateless +-------------------------------------------------------------------------------------------------------- + +| tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_multi_prefix_dhcpv6_stateless + +Test Case 22: IPv6 Address Assignment - Dual Stack, SLAAC +--------------------------------------------------------- + +| tempest.scenario.test_network_v6.TestGettingAddress.test_slaac_from_os + +Test Case 23: IPv6 Address Assignment - Dual Net, Dual Stack, SLAAC +------------------------------------------------------------------- + +| tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_slaac_from_os + +Test Case 24: IPv6 Address Assignment - Multiple Prefixes, Dual Stack, SLAAC +---------------------------------------------------------------------------- + +| tempest.scenario.test_network_v6.TestGettingAddress.test_multi_prefix_slaac + +Test Case 25: IPv6 Address Assignment - Dual Net, Dual Stack, Multiple Prefixes, SLAAC +-------------------------------------------------------------------------------------- + +| tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_multi_prefix_slaac + diff --git a/docs/testing/user/testspecification/highavailability/index.rst b/docs/testing/user/testspecification/highavailability/index.rst index e69de29b..715f84d0 100644 --- a/docs/testing/user/testspecification/highavailability/index.rst +++ b/docs/testing/user/testspecification/highavailability/index.rst @@ -0,0 +1,743 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International +.. License. +.. http://creativecommons.org/licenses/by/4.0 +.. (c) OPNFV, China Mobile and others. + +========================================== +OpenStack Services HA test specification +========================================== + +.. toctree:: +:maxdepth: + +Scope +===== + +The HA test area evaluates the ability of the System Under Test to support service +continuity and recovery from component failures on part of OpenStack controller services("nova-api", +"neutron-server", "keystone", "glance-api", "cinder-api") and on "load balancer" service. + +The tests in this test area will emulate component failures by killing the +processes of above target services, stressing the CPU load or blocking +disk I/O on the selected controller node, and then check if the impacted +services are still available and the killed processes are recovered on the +selected controller node within a given time interval. + + +References +================ + +This test area references the following specifications: + +- ETSI GS NFV-REL 001 + + - http://www.etsi.org/deliver/etsi_gs/NFV-REL/001_099/001/01.01.01_60/gs_nfv-rel001v010101p.pdf + +- OpenStack High Availability Guide + + - https://docs.openstack.org/ha-guide/ + + +Definitions and abbreviations +============================= + +The following terms and abbreviations are used in conjunction with this test area + +- SUT - system under test +- Monitor - tools used to measure the service outage time and the process + outage time +- Service outage time - the outage time (seconds) of the specific OpenStack + service +- Process outage time - the outage time (seconds) from the specific processes + being killed to recovered + + +System Under Test (SUT) +======================= + +The system under test is assumed to be the NFVi and VIM in operation on a +Pharos compliant infrastructure. + +SUT is assumed to be in high availability configuration, which typically means +more than one controller nodes are in the System Under Test. + +Test Area Structure +==================== + +The HA test area is structured with the following test cases in a sequential +manner. + +Each test case is able to run independently. Preceding test case's failure will +not affect the subsequent test cases. + +Preconditions of each test case will be described in the following test +descriptions. + + +Test Descriptions +================= + +--------------------------------------------------------------- +Test Case 1 - Controller node OpenStack service down - nova-api +--------------------------------------------------------------- + +Short name +---------- + +opnfv.ha.tc001.nova-api_service_down + +Use case specification +---------------------- + +This test case verifies the service continuity capability in the face of the +software process failure. It kills the processes of OpenStack "nova-api" +service on the selected controller node, then checks whether the "nova-api" +service is still available during the failure, by creating a VM then deleting +the VM, and checks whether the killed processes are recovered within a given +time interval. + + +Test preconditions +------------------ + +There is more than one controller node, which is providing the "nova-api" +service for API end-point. +Denoted a controller node as Node1 in the following configuration. + + +Basic test flow execution description and pass/fail criteria +------------------------------------------------------------ + +Methodology for verifying service continuity and recovery +''''''''''''''''''''''''''''''''''''''''''''''''''''''''' + +The service continuity and process recovery capabilities of "nova-api" service +is evaluated by monitoring service outage time, process outage time, and results +of nova operations. + +Service outage time is measured by continuously executing "openstack server list" +command in loop and checking if the response of the command request is returned +with no failure. +When the response fails, the "nova-api" service is considered in outage. +The time between the first response failure and the last response failure is +considered as service outage time. + +Process outage time is measured by checking the status of "nova-api" processes on +the selected controller node. The time of "nova-api" processes being killed to +the time of the "nova-api" processes being recovered is the process outage time. +Process recovery is verified by checking the existence of "nova-api" processes. + +All nova operations are carried out correctly within a given time interval which +suggests that the "nova-api" service is continuously available. + +Test execution +'''''''''''''' +* Test action 1: Connect to Node1 through SSH, and check that "nova-api" + processes are running on Node1 +* Test action 2: Create a image with "openstack image create test-cirros + --file cirros-0.3.5-x86_64-disk.img --disk-format qcow2 --container-format bare" +* Test action 3: Execute"openstack flavor create m1.test --id auto --ram 512 + --disk 1 --vcpus 1" to create flavor "m1.test". +* Test action 4: Start two monitors: one for "nova-api" processes and the other + for "openstack server list" command. + Each monitor will run as an independent process +* Test action 5: Connect to Node1 through SSH, and then kill the "nova-api" + processes +* Test action 6: When "openstack server list" returns with no error, calculate + the service outage time, and execute command "openstack server create + --flavor m1.test --image test-cirros test-instance" +* Test action 7: Continuously Execute "openstack server show test-instance" + to check if the status of VM "test-instance" is "Active" +* Test action 8: If VM "test-instance" is "Active", execute "openstack server + delete test-instance", then execute "openstack server list" to check if the + VM is not in the list +* Test action 9: Continuously measure process outage time from the monitor until + the process outage time is more than 30s + +Pass / fail criteria +'''''''''''''''''''' + +The process outage time is less than 30s. + +The service outage time is less than 5s. + +The nova operations are carried out in above order and no errors occur. + +A negative result will be generated if the above is not met in completion. + +Post conditions +--------------- + +Restart the process of "nova-api" if they are not running. +Delete image with "openstack image delete test-cirros" +Delete flavor with "openstack flavor delete m1.test" + + +--------------------------------------------------------------------- +Test Case 2 - Controller node OpenStack service down - neutron-server +--------------------------------------------------------------------- + +Short name +---------- + +opnfv.ha.tc002.neutron-server_service_down + +Use case specification +---------------------- + +This test verifies the high availability of the "neutron-server" service +provided by OpenStack controller nodes. It kills the processes of OpenStack +"neutron-server" service on the selected controller node, then checks whether +the "neutron-server" service is still available, by creating a network and +deleting the network, and checks whether the killed processes are recovered. + +Test preconditions +------------------ + +There is more than one controller node, which is providing the "neutron-server" +service for API end-point. +Denoted a controller node as Node1 in the following configuration. + +Basic test flow execution description and pass/fail criteria +------------------------------------------------------------ + +Methodology for monitoring high availability +'''''''''''''''''''''''''''''''''''''''''''' + +The high availability of "neutron-server" service is evaluated by monitoring +service outage time, process outage time, and results of neutron operations. + +Service outage time is tested by continuously executing "openstack router list" +command in loop and checking if the response of the command request is returned +with no failure. +When the response fails, the "neutron-server" service is considered in outage. +The time between the first response failure and the last response failure is +considered as service outage time. + +Process outage time is tested by checking the status of "neutron-server" +processes on the selected controller node. The time of "neutron-server" +processes being killed to the time of the "neutron-server" processes being +recovered is the process outage time. Process recovery is verified by checking +the existence of "neutron-server" processes. + +Test execution +'''''''''''''' + +* Test action 1: Connect to Node1 through SSH, and check that "neutron-server" + processes are running on Node1 +* Test action 2: Start two monitors: one for "neutron-server" process and the + other for "openstack router list" command. + Each monitor will run as an independent process. +* Test action 3: Connect to Node1 through SSH, and then kill the + "neutron-server" processes +* Test action 4: When "openstack router list" returns with no error, calculate + the service outage time, and execute "openstack network create test-network" +* Test action 5: Continuously executing "openstack network show test-network", + check if the status of "test-network" is "Active" +* Test action 6: If "test-network" is "Active", execute "openstack network + delete test-network", then execute "openstack network list" to check if the + "test-network" is not in the list +* Test action 7: Continuously measure process outage time from the monitor until + the process outage time is more than 30s + +Pass / fail criteria +'''''''''''''''''''' + +The process outage time is less than 30s. + +The service outage time is less than 5s. + +The neutron operations are carried out in above order and no errors occur. + +A negative result will be generated if the above is not met in completion. + +Post conditions +--------------- + +Restart the processes of "neutron-server" if they are not running. + + +--------------------------------------------------------------- +Test Case 3 - Controller node OpenStack service down - keystone +--------------------------------------------------------------- + +Short name +---------- + +opnfv.ha.tc003.keystone_service_down + +Use case specification +---------------------- + +This test verifies the high availability of the "keystone" service provided by +OpenStack controller nodes. It kills the processes of OpenStack "keystone" +service on the selected controller node, then checks whether the "keystone" +service is still available by executing command "openstack user list" and +whether the killed processes are recovered. + +Test preconditions +------------------ + +There is more than one controller node, which is providing the "keystone" +service for API end-point. +Denoted a controller node as Node1 in the following configuration. + +Basic test flow execution description and pass/fail criteria +------------------------------------------------------------ + +Methodology for monitoring high availability +'''''''''''''''''''''''''''''''''''''''''''' + +The high availability of "keystone" service is evaluated by monitoring service +outage time and process outage time + +Service outage time is tested by continuously executing "openstack user list" +command in loop and checking if the response of the command request is reutrned +with no failure. +When the response fails, the "keystone" service is considered in outage. +The time between the first response failure and the last response failure is +considered as service outage time. + +Process outage time is tested by checking the status of "keystone" processes on +the selected controller node. The time of "keystone" processes being killed to +the time of the "keystone" processes being recovered is the process outage +time. Process recovery is verified by checking the existence of "keystone" +processes. + +Test execution +'''''''''''''' + +* Test action 1: Connect to Node1 through SSH, and check that "keystone" + processes are running on Node1 +* Test action 2: Start two monitors: one for "keystone" process and the other + for "openstack user list" command. + Each monitor will run as an independent process. +* Test action 3: Connect to Node1 through SSH, and then kill the "keystone" + processes +* Test action 4: Calculate the service outage time and process outage time +* Test action 5: The test passes if process outage time is less than 20s and + service outage time is less than 5s +* Test action 6: Continuously measure process outage time from the monitor until + the process outage time is more than 30s + +Pass / fail criteria +'''''''''''''''''''' + +The process outage time is less than 30s. + +The service outage time is less than 5s. + +A negative result will be generated if the above is not met in completion. + +Post conditions +--------------- + +Restart the processes of "keystone" if they are not running. + + +----------------------------------------------------------------- +Test Case 4 - Controller node OpenStack service down - glance-api +----------------------------------------------------------------- + +Short name +---------- + +opnfv.ha.tc004.glance-api_service_down + +Use case specification +---------------------- + +This test verifies the high availability of the "glance-api" service provided +by OpenStack controller nodes. It kills the processes of OpenStack "glance-api" +service on the selected controller node, then checks whether the "glance-api" +service is still available, by creating image and deleting image, and checks +whether the killed processes are recovered. + +Test preconditions +------------------ + +There is more than one controller node, which is providing the "glance-api" +service for API end-point. +Denoted a controller node as Node1 in the following configuration. + + +Basic test flow execution description and pass/fail criteria +------------------------------------------------------------ + +Methodology for monitoring high availability +'''''''''''''''''''''''''''''''''''''''''''' + +The high availability of "glance-api" service is evaluated by monitoring +service outage time, process outage time, and results of glance operations. + +Service outage time is tested by continuously executing "openstack image list" +command in loop and checking if the response of the command request is returned +with no failure. +When the response fails, the "glance-api" service is considered in outage. +The time between the first response failure and the last response failure is +considered as service outage time. + +Process outage time is tested by checking the status of "glance-api" processes +on the selected controller node. The time of "glance-api" processes being +killed to the time of the "glance-api" processes being recovered is the process +outage time. Process recovery is verified by checking the existence of +"glance-api" processes. + +Test execution +'''''''''''''' + +* Test action 1: Connect to Node1 through SSH, and check that "glance-api" + processes are running on Node1 +* Test action 2: Start two monitors: one for "glance-api" process and the other + for "openstack image list" command. + Each monitor will run as an independent process. +* Test action 3: Connect to Node1 through SSH, and then kill the "glance-api" + processes +* Test action 4: When "openstack image list" returns with no error, calculate + the service outage time, and execute "openstack image create test-image + --file cirros-0.3.5-x86_64-disk.img --disk-format qcow2 --container-format bare" +* Test action 5: Continuously execute "openstack image show test-image", check + if status of "test-image" is "active" +* Test action 6: If "test-image" is "active", execute "openstack image delete + test-image". Then execute "openstack image list" to check if "test-image" is + not in the list +* Test action 7: Continuously measure process outage time from the monitor until + the process outage time is more than 30s + +Pass / fail criteria +'''''''''''''''''''' + +The process outage time is less than 30s. + +The service outage time is less than 5s. + +The glance operations are carried out in above order and no errors occur. + +A negative result will be generated if the above is not met in completion. + +Post conditions +--------------- + +Restart the processes of "glance-api" if they are not running. + +Delete image with "openstack image delete test-image". + + +----------------------------------------------------------------- +Test Case 5 - Controller node OpenStack service down - cinder-api +----------------------------------------------------------------- + +Short name +---------- + +opnfv.ha.tc005.cinder-api_service_down + +Use case specification +---------------------- + +This test verifies the high availability of the "cinder-api" service provided +by OpenStack controller nodes. It kills the processes of OpenStack "cinder-api" +service on the selected controller node, then checks whether the "cinder-api" +service is still available by executing command "openstack volume list" and +whether the killed processes are recovered. + +Test preconditions +------------------ + +There is more than one controller node, which is providing the "cinder-api" +service for API end-point. +Denoted a controller node as Node1 in the following configuration. + +Basic test flow execution description and pass/fail criteria +------------------------------------------------------------ + +Methodology for monitoring high availability +'''''''''''''''''''''''''''''''''''''''''''' + +The high availability of "cinder-api" service is evaluated by monitoring +service outage time and process outage time + +Service outage time is tested by continuously executing "openstack volume list" +command in loop and checking if the response of the command request is returned +with no failure. +When the response fails, the "cinder-api" service is considered in outage. +The time between the first response failure and the last response failure is +considered as service outage time. + +Process outage time is tested by checking the status of "cinder-api" processes +on the selected controller node. The time of "cinder-api" processes being +killed to the time of the "cinder-api" processes being recovered is the process +outage time. Process recovery is verified by checking the existence of +"cinder-api" processes. + +Test execution +'''''''''''''' + +* Test action 1: Connect to Node1 through SSH, and check that "cinder-api" + processes are running on Node1 +* Test action 2: Start two monitors: one for "cinder-api" process and the other + for "openstack volume list" command. + Each monitor will run as an independent process. +* Test action 3: Connect to Node1 through SSH, and then execute kill the + "cinder-api" processes +* Test action 4: Continuously measure service outage time from the monitor until + the service outage time is more than 5s +* Test action 5: Continuously measure process outage time from the monitor until + the process outage time is more than 30s + +Pass / fail criteria +'''''''''''''''''''' + +The process outage time is less than 30s. + +The service outage time is less than 5s. + +The cinder operations are carried out in above order and no errors occur. + +A negative result will be generated if the above is not met in completion. + +Post conditions +--------------- + +Restart the processes of "cinder-api" if they are not running. + + +------------------------------------------------------------ +Test Case 6 - Controller Node CPU Overload High Availability +------------------------------------------------------------ + +Short name +---------- + +opnfv.ha.tc006.cpu_overload + +Use case specification +---------------------- + +This test verifies the availability of services when one of the controller node +suffers from heavy CPU overload. When the CPU usage of the specified controller +node is up to 100%, which breaks down the OpenStack services on this node, +the Openstack services should continue to be available. This test case stresses +the CPU usage of a specific controller node to 100%, then checks whether all +services provided by the SUT are still available with the monitor tools. + +Test preconditions +------------------ + +There is more than one controller node, which is providing the "cinder-api", +"neutron-server", "glance-api" and "keystone" services for API end-point. +Denoted a controller node as Node1 in the following configuration. + +Basic test flow execution description and pass/fail criteria +------------------------------------------------------------ + +Methodology for monitoring high availability +'''''''''''''''''''''''''''''''''''''''''''' + +The high availability of related OpenStack service is evaluated by monitoring service +outage time + +Service outage time is tested by continuously executing "openstack router list", +"openstack stack list", "openstack volume list", "openstack image list" commands +in loop and checking if the response of the command request is returned with no +failure. +When the response fails, the related service is considered in outage. The time +between the first response failure and the last response failure is considered +as service outage time. + + +Methodology for stressing CPU usage +''''''''''''''''''''''''''''''''''' + +To evaluate the high availability of target OpenStack service under heavy CPU +load, the test case will first get the number of logical CPU cores on the +target controller node by shell command, then use the number to execute 'dd' +command to continuously copy from /dev/zero and output to /dev/null in loop. +The 'dd' operation only uses CPU, no I/O operation, which is ideal for +stressing the CPU usage. + +Since the 'dd' command is continuously executed and the CPU usage rate is +stressed to 100%, the scheduler will schedule each 'dd' command to be +processed on a different logical CPU core. Eventually to achieve all logical +CPU cores usage rate to 100%. + +Test execution +'''''''''''''' + +* Test action 1: Start four monitors: one for "openstack image list" command, + one for "openstack router list" command, one for "openstack stack list" + command and the last one for "openstack volume list" command. Each monitor + will run as an independent process. +* Test action 2: Connect to Node1 through SSH, and then stress all logical CPU + cores usage rate to 100% +* Test action 3: Continuously measure all the service outage times until they are + more than 5s +* Test action 4: Kill the process that stresses the CPU usage + +Pass / fail criteria +'''''''''''''''''''' + +All the service outage times are less than 5s. + +A negative result will be generated if the above is not met in completion. + +Post conditions +--------------- + +No impact on the SUT. + + +----------------------------------------------------------------- +Test Case 7 - Controller Node Disk I/O Overload High Availability +----------------------------------------------------------------- + +Short name +---------- + +opnfv.ha.tc007.disk_I/O_overload + +Use case specification +---------------------- + +This test verifies the high availability of control node. When the disk I/O of +the specific disk is overload, which breaks down the OpenStack services on this +node, the read and write services should continue to be available. This test +case blocks the disk I/O of the specific controller node, then checks whether +the services that need to read or write the disk of the controller node are +available with some monitor tools. + +Test preconditions +------------------ + +There is more than one controller node. +Denoted a controller node as Node1 in the following configuration. +The controller node has at least 20GB free disk space. + +Basic test flow execution description and pass/fail criteria +------------------------------------------------------------ + +Methodology for monitoring high availability +'''''''''''''''''''''''''''''''''''''''''''' + +The high availability of nova service is evaluated by monitoring +service outage time + +Service availability is tested by continuously executing +"openstack flavor list" command in loop and checking if the response of the +command request is returned with no failure. +When the response fails, the related service is considered in outage. + + +Methodology for stressing disk I/O +'''''''''''''''''''''''''''''''''' + +To evaluate the high availability of target OpenStack service under heavy I/O +load, the test case will execute shell command on the selected controller node +to continuously writing 8kb blocks to /test.dbf + +Test execution +'''''''''''''' + +* Test action 1: Connect to Node1 through SSH, and then stress disk I/O by + continuously writing 8kb blocks to /test.dbf +* Test action 2: Start a monitor: for "openstack flavor list" command +* Test action 3: Create a flavor called "test-001" +* Test action 4: Check whether the flavor "test-001" is created +* Test action 5: Continuously measure service outage time from the monitor + until the service outage time is more than 5s +* Test action 6: Stop writing to /test.dbf and delete file /test.dbf + +Pass / fail criteria +'''''''''''''''''''' + +The service outage time is less than 5s. + +The nova operations are carried out in above order and no errors occur. + +A negative result will be generated if the above is not met in completion. + +Post conditions +--------------- + +Delete flavor with "openstack flavor delete test-001". + +-------------------------------------------------------------------- +Test Case 8 - Controller Load Balance as a Service High Availability +-------------------------------------------------------------------- + +Short name +---------- + +opnfv.ha.tc008.load_balance_service_down + +Use case specification +---------------------- + +This test verifies the high availability of "load balancer" service. When +the "load balancer" service of a specified controller node is killed, whether +"load balancer" service on other controller nodes will work, and whether the +controller node will restart the "load balancer" service are checked. This +test case kills the processes of "load balancer" service on the selected +controller node, then checks whether the request of the related OpenStack +command is processed with no failure and whether the killed processes are +recovered. + +Test preconditions +------------------ + +There is more than one controller node, which is providing the "load balancer" +service for rest-api. Denoted as Node1 in the following configuration. + +Basic test flow execution description and pass/fail criteria +------------------------------------------------------------ + +Methodology for monitoring high availability +'''''''''''''''''''''''''''''''''''''''''''' + +The high availability of "load balancer" service is evaluated by monitoring +service outage time and process outage time + +Service outage time is tested by continuously executing "openstack image list" +command in loop and checking if the response of the command request is returned +with no failure. +When the response fails, the "load balancer" service is considered in outage. +The time between the first response failure and the last response failure is +considered as service outage time. + +Process outage time is tested by checking the status of processes of "load +balancer" service on the selected controller node. The time of those processes +being killed to the time of those processes being recovered is the process +outage time. +Process recovery is verified by checking the existence of processes of "load +balancer" service. + +Test execution +'''''''''''''' + +* Test action 1: Connect to Node1 through SSH, and check that processes of + "load balancer" service are running on Node1 +* Test action 2: Start two monitors: one for processes of "load balancer" + service and the other for "openstack image list" command. Each monitor will + run as an independent process +* Test action 3: Connect to Node1 through SSH, and then kill the processes of + "load balancer" service +* Test action 4: Continuously measure service outage time from the monitor until + the service outage time is more than 5s +* Test action 5: Continuously measure process outage time from the monitor until + the process outage time is more than 30s + +Pass / fail criteria +'''''''''''''''''''' + +The process outage time is less than 30s. + +The service outage time is less than 5s. + +A negative result will be generated if the above is not met in completion. + +Post conditions +--------------- +Restart the processes of "load balancer" if they are not running. + + + diff --git a/docs/testing/user/testspecification/ipv6/index.rst b/docs/testing/user/testspecification/ipv6/index.rst new file mode 100644 index 00000000..c3dc844b --- /dev/null +++ b/docs/testing/user/testspecification/ipv6/index.rst @@ -0,0 +1,1787 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. +.. http://creativecommons.org/licenses/by/4.0 +.. (c) OPNFV + +======================== +IPv6 test specification +======================== + +.. toctree:: + :maxdepth: 2 + +Scope +===== + +The IPv6 test area will evaluate the ability for a SUT to support IPv6 +Tenant Network features and functionality. The tests in this test area will +evaluate, + +- network, subnet, port, router API CRUD operations +- interface add and remove operations +- security group and security group rule API CRUD operations +- IPv6 address assignment with dual stack, dual net, multiprefix in mode DHCPv6 stateless or SLAAC + +References +================ + +- upstream openstack API reference + + - http://developer.openstack.org/api-ref + +- upstream openstack IPv6 reference + + - https://docs.openstack.org/newton/networking-guide/config-ipv6.html + +Definitions and abbreviations +============================= + +The following terms and abbreviations are used in conjunction with this test area + +- API - Application Programming Interface +- CIDR - Classless Inter-Domain Routing +- CRUD - Create, Read, Update, and Delete +- DHCP - Dynamic Host Configuration Protocol +- DHCPv6 - Dynamic Host Configuration Protocol version 6 +- ICMP - Internet Control Message Protocol +- NFVI - Network Functions Virtualization Infrastructure +- NIC - Network Interface Controller +- RA - Router Advertisements +- radvd - The Router Advertisement Daemon +- SDN - Software Defined Network +- SLAAC - Stateless Address Auto Configuration +- TCP - Transmission Control Protocol +- UDP - User Datagram Protocol +- VM - Virtual Machine +- vNIC - virtual Network Interface Card + +System Under Test (SUT) +======================= + +The system under test is assumed to be the NFVI and VIM deployed with a Pharos compliant infrastructure. + +Test Area Structure +==================== + +The test area is structured based on network, port and subnet operations. Each test case +is able to run independently, i.e. irrelevant of the state created by a previous test. + +Test Descriptions +================= + +API Used and Reference +---------------------- + +Networks: https://developer.openstack.org/api-ref/networking/v2/index.html#networks + +- show network details +- update network +- delete network +- list networks +- create netowrk +- bulk create networks + +Subnets: https://developer.openstack.org/api-ref/networking/v2/index.html#subnets + +- list subnets +- create subnet +- bulk create subnet +- show subnet details +- update subnet +- delete subnet + +Routers and interface: https://developer.openstack.org/api-ref/networking/v2/index.html#routers-routers + +- list routers +- create router +- show router details +- update router +- delete router +- add interface to router +- remove interface from router + +Ports: https://developer.openstack.org/api-ref/networking/v2/index.html#ports + +- show port details +- update port +- delete port +- list port +- create port +- bulk create ports + +Security groups: https://developer.openstack.org/api-ref/networking/v2/index.html#security-groups-security-groups + +- list security groups +- create security groups +- show security group +- update security group +- delete security group + +Security groups rules: https://developer.openstack.org/api-ref/networking/v2/index.html#security-group-rules-security-group-rules + +- list security group rules +- create security group rule +- show security group rule +- delete security group rule + +Servers: https://developer.openstack.org/api-ref/compute/ + +- list servers +- create server +- create multiple servers +- list servers detailed +- show server details +- update server +- delete server + +------------------------------------------------------------------ +Test Case 1 - Create and Delete Bulk Network, IPv6 Subnet and Port +------------------------------------------------------------------ + +Short name +---------- + +opnfv.ipv6.bulk_network_subnet_port_create_delete + +Use case specification +---------------------- + +This test case evaluates the SUT API ability of creating and deleting multiple networks, +IPv6 subnets, ports in one request, the reference is, + +tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_network +tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_subnet +tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_port + +Test preconditions +------------------ + +None + +Basic test flow execution description and pass/fail criteria +------------------------------------------------------------ + +Test execution +''''''''''''''' + +* Test action 1: Create 2 networks using bulk create, storing the "id" parameters returned in the response +* Test action 2: List all networks, verifying the two network id's are found in the list +* **Test assertion 1:** The two "id" parameters are found in the network list +* Test action 3: Delete the 2 created networks using the stored network ids +* Test action 4: List all networks, verifying the network ids are no longer present +* **Test assertion 2:** The two "id" parameters are not present in the network list +* Test action 5: Create 2 networks using bulk create, storing the "id" parameters returned in the response +* Test action 6: Create an IPv6 subnets on each of the two networks using bulk create commands, + storing the associated "id" parameters +* Test action 7: List all subnets, verify the IPv6 subnets are found in the list +* **Test assertion 3:** The two IPv6 subnet "id" parameters are found in the network list +* Test action 8: Delete the 2 IPv6 subnets using the stored "id" parameters +* Test action 9: List all subnets, verify the IPv6 subnets are no longer present in the list +* **Test assertion 4:** The two IPv6 subnet "id" parameters, are not present in list +* Test action 10: Delete the 2 networks created in test action 5, using the stored network ids +* Test action 11: List all networks, verifying the network ids are no longer present +* **Test assertion 5:** The two "id" parameters are not present in the network list +* Test action 12: Create 2 networks using bulk create, storing the "id" parameters returned in the response +* Test action 13: Create a port on each of the two networks using bulk create commands, + storing the associated "port_id" parameters +* Test action 14: List all ports, verify the port_ids are found in the list +* **Test assertion 6:** The two "port_id" parameters are found in the ports list +* Test action 15: Delete the 2 ports using the stored "port_id" parameters +* Test action 16: List all ports, verify port_ids are no longer present in the list +* **Test assertion 7:** The two "port_id" parameters, are not present in list +* Test action 17: Delete the 2 networks created in test action 12, using the stored network ids +* Test action 18: List all networks, verifying the network ids are no longer present +* **Test assertion 8:** The two "id" parameters are not present in the network list + +Pass / fail criteria +''''''''''''''''''''' + +This test evaluates the ability to use bulk create commands to create networks, IPv6 subnets and ports on +the SUT API. Specifically it verifies that: + +* Bulk network create commands return valid "id" parameters which are reported in the list commands +* Bulk IPv6 subnet commands return valid "id" parameters which are reported in the list commands +* Bulk port commands return valid "port_id" parameters which are reported in the list commands +* All items created using bulk create commands are able to be removed using the returned identifiers + +Post conditions +--------------- + +N/A + +------------------------------------------------------------------- +Test Case 2 - Create, Update and Delete an IPv6 Network and Subnet +------------------------------------------------------------------- + +Short name +----------- + +opnfv.ipv6.network_subnet_create_update_delete + +Use case specification +---------------------- + +This test case evaluates the SUT API ability of creating, updating, deleting +network and IPv6 subnet with the network, the reference is + +tempest.api.network.test_networks.NetworksIpV6Test.test_create_update_delete_network_subnet + +Test preconditions +------------------ + +None + +Basic test flow execution description and pass/fail criteria +------------------------------------------------------------ + +Test execution +''''''''''''''' + +* Test action 1: Create a network, storing the "id" and "status" parameters returned + in the response +* Test action 2: Verify the value of the created network's "status" is ACTIVE +* **Test assertion 1:** The created network's "status" is ACTIVE +* Test action 3: Update this network with a new_name +* Test action 4: Verify the network's name equals the new_name +* **Test assertion 2:** The network's name equals to the new_name after name updating +* Test action 5: Create an IPv6 subnet within the network, storing the "id" parameters + returned in the response +* Test action 6: Update this IPv6 subnet with a new_name +* Test action 7: Verify the IPv6 subnet's name equals the new_name +* **Test assertion 3:** The IPv6 subnet's name equals to the new_name after name updating +* Test action 8: Delete the IPv6 subnet created in test action 5, using the stored subnet id +* Test action 9: List all subnets, verifying the subnet id is no longer present +* **Test assertion 4:** The IPv6 subnet "id" is not present in the subnet list +* Test action 10: Delete the network created in test action 1, using the stored network id +* Test action 11: List all networks, verifying the network id is no longer present +* **Test assertion 5:** The network "id" is not present in the network list + + +Pass / fail criteria +''''''''''''''''''''' + +This test evaluates the ability to create, update, delete network, IPv6 subnet on the +SUT API. Specifically it verifies that: + +* Create network commands return ACTIVE "status" parameters which are reported in the list commands +* Update network commands return updated "name" parameters which equals to the "name" used +* Update subnet commands return updated "name" parameters which equals to the "name" used +* All items created using create commands are able to be removed using the returned identifiers + +Post conditions +--------------- + +None + +------------------------------------------------- +Test Case 3 - Check External Network Visibility +------------------------------------------------- + +Short name +----------- + +opnfv.ipv6.external_network_visibility + +Use case specification +---------------------- + +This test case verifies user can see external networks but not subnets, the reference is, + +tempest.api.network.test_networks.NetworksIpV6Test.test_external_network_visibility + +Test preconditions +------------------ + +1. The SUT has at least one external network. +2. In the external network list, there is no network without external router, i.e., +all networks in this list are with external router. +3. There is one external network with configured public network id and there is +no subnet on this network + +Basic test flow execution description and pass/fail criteria +------------------------------------------------------------ + +Test execution +''''''''''''''' + +* Test action 1: List all networks with external router, storing the "id"s parameters returned in the response +* Test action 2: Verify list in test action 1 is not empty +* **Test assertion 1:** The network with external router list is not empty +* Test action 3: List all netowrks without external router in test action 1 list +* Test action 4: Verify list in test action 3 is empty +* **Test assertion 2:** networks without external router in the external network + list is empty +* Test action 5: Verify the configured public network id is found in test action 1 stored "id"s +* **Test assertion 3:** the public network id is found in the external network "id"s +* Test action 6: List the subnets of the external network with the configured + public network id +* Test action 7: Verify list in test action 6 is empty +* **Test assertion 4:** There is no subnet of the external network with the configured + public network id + +Pass / fail criteria +''''''''''''''''''''' + +This test evaluates the ability to use list commands to list external networks, pre-configured +public network. Specifically it verifies that: + +* Network list commands to find visible networks with external router +* Network list commands to find visible network with pre-configured public network id +* Subnet list commands to find no subnet on the pre-configured public network + +Post conditions +--------------- + +None + +--------------------------------------------- +Test Case 4 - List IPv6 Networks and Subnets +--------------------------------------------- + +Short name +----------- + +opnfv.ipv6.network_subnet_list + +Use case specification +---------------------- + +This test case evaluates the SUT API ability of listing netowrks, +subnets after creating a network and an IPv6 subnet, the reference is + +tempest.api.network.test_networks.NetworksIpV6Test.test_list_networks +tempest.api.network.test_networks.NetworksIpV6Test.test_list_subnets + +Test preconditions +------------------ + +None + +Basic test flow execution description and pass/fail criteria +------------------------------------------------------------ + +Test execution +''''''''''''''' + +* Test action 1: Create a network, storing the "id" parameter returned in the response +* Test action 2: List all networks, verifying the network id is found in the list +* **Test assertion 1:** The "id" parameter is found in the network list +* Test action 3: Create an IPv6 subnet of the network created in test action 1. + storing the "id" parameter returned in the response +* Test action 4: List all subnets of this network, verifying the IPv6 subnet id + is found in the list +* **Test assertion 2:** The "id" parameter is found in the IPv6 subnet list +* Test action 5: Delete the IPv6 subnet using the stored "id" parameters +* Test action 6: List all subnets, verify subnet_id is no longer present in the list +* **Test assertion 3:** The IPv6 subnet "id" parameter is not present in list +* Test action 7: Delete the network created in test action 1, using the stored network ids +* Test action 8: List all networks, verifying the network id is no longer present +* **Test assertion 4:** The network "id" parameter is not present in the network list + +Pass / fail criteria +'''''''''''''''''''' + +This test evaluates the ability to use create commands to create network, IPv6 subnet, list +commands to list the created networks, IPv6 subnet on the SUT API. Specifically it verifies that: + +* Create commands to create network, IPv6 subnet +* List commands to find that netowrk, IPv6 subnet in the all networks, subnets list after creating +* All items created using create commands are able to be removed using the returned identifiers + +Post conditions +--------------- + +None + +------------------------------------------------------------- +Test Case 5 - Show Details of an IPv6 Network and Subnet +------------------------------------------------------------- + +Short name +---------- + +opnfv.ipv6.network_subnet_show + +Use case specification +---------------------- + +This test case evaluates the SUT API ability of showing the network, subnet +details, the reference is, + +tempest.api.network.test_networks.NetworksIpV6Test.test_show_network +tempest.api.network.test_networks.NetworksIpV6Test.test_show_subnet + +Test preconditions +------------------ + +None + +Basic test flow execution description and pass/fail criteria +------------------------------------------------------------ + +Test execution +''''''''''''''' + +* Test action 1: Create a network, storing the "id" and "name" parameter returned in the response +* Test action 2: Show the network id and name, verifying the network id and name equal to the + "id" and "name" stored in test action 1 +* **Test assertion 1:** The id and name equal to the "id" and "name" stored in test action 1 +* Test action 3: Create an IPv6 subnet of the network, storing the "id" and CIDR parameter + returned in the response +* Test action 4: Show the details of the created IPv6 subnet, verifying the + id and CIDR in the details are equal to the stored id and CIDR in test action 3. +* **Test assertion 2:** The "id" and CIDR in show details equal to "id" and CIDR stored in test action 3 +* Test action 5: Delete the IPv6 subnet using the stored "id" parameter +* Test action 6: List all subnets on the network, verify the IPv6 subnet id is no longer present in the list +* **Test assertion 3:** The IPv6 subnet "id" parameter is not present in list +* Test action 7: Delete the network created in test action 1, using the stored network id +* Test action 8: List all networks, verifying the network id is no longer present +* **Test assertion 4:** The "id" parameter is not present in the network list + +Pass / fail criteria +''''''''''''''''''''' + +This test evaluates the ability to use create commands to create network, IPv6 subnet and show +commands to show network, IPv6 subnet details on the SUT API. Specifically it verifies that: + +* Network show commands return correct "id" and "name" parameter which equal to the returned response in the create commands +* IPv6 subnet show commands return correct "id" and CIDR parameter which equal to the returned response in the create commands +* All items created using create commands are able to be removed using the returned identifiers + +Post conditions +--------------- + +None + +------------------------------------------------------------- +Test Case 6 - Create an IPv6 Port in Allowed Allocation Pools +------------------------------------------------------------- + +Short name +---------- + +opnfv.ipv6.port_create_in_allocation_pool + +Use case specification +---------------------- + +This test case evaluates the SUT API ability of creating +an IPv6 subnet within allowed IPv6 address allocation pool and creating +a port whose address is in the range of the pool, the reference is, + +tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_port_in_allowed_allocation_pools + +Test preconditions +------------------ + +There should be an IPv6 CIDR configuration, which prefixlen is less than 126. + +Basic test flow execution description and pass/fail criteria +------------------------------------------------------------ + +Test execution +''''''''''''''' + +* Test action 1: Create a network, storing the "id" parameter returned in the response +* Test action 2: Check the allocation pools configuration, verifying the prefixlen + of the IPv6 CIDR configuration is less than 126. +* **Test assertion 1:** The prefixlen of the IPv6 CIDR configuration is less than 126 +* Test action 3: Get the allocation pool by setting the start_ip and end_ip + based on the IPv6 CIDR configuration. +* Test action 4: Create an IPv6 subnet of the network within the allocation pools, + storing the "id" parameter returned in the response +* Test action 5: Create a port of the network, storing the "id" parameter returned in the response +* Test action 6: Verify the port's id is in the range of the allocation pools which is got is test action 3 +* **Test assertion 2:** the port's id is in the range of the allocation pools +* Test action 7: Delete the port using the stored "id" parameter +* Test action 8: List all ports, verify the port id is no longer present in the list +* **Test assertion 3:** The port "id" parameter is not present in list +* Test action 9: Delete the IPv6 subnet using the stored "id" parameter +* Test action 10: List all subnets on the network, verify the IPv6 subnet id is no longer present in the list +* **Test assertion 4:** The IPv6 subnet "id" parameter is not present in list +* Test action 11: Delete the network created in test action 1, using the stored network id +* Test action 12: List all networks, verifying the network id is no longer present +* **Test assertion 5:** The "id" parameter is not present in the network list + +Pass / fail criteria +''''''''''''''''''''' + +This test evaluates the ability to use create commands to create an IPv6 subnet within allowed +IPv6 address allocation pool and create a port whose address is in the range of the pool. Specifically it verifies that: + +* IPv6 subnet create command to create an IPv6 subnet within allowed IPv6 address allocation pool +* Port create command to create a port whose id is in the range of the allocation pools +* All items created using create commands are able to be removed using the returned identifiers + +Post conditions +--------------- + +None + +------------------------------------------------------------- +Test Case 7 - Create an IPv6 Port with Empty Security Groups +------------------------------------------------------------- + +Short name +----------- + +opnfv.ipv6.port_create_empty_security_group + +Use case specification +---------------------- + +This test case evaluates the SUT API ability of creating port with empty +security group, the reference is, + +tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_port_with_no_securitygroups + +Test preconditions +------------------ + +None + +Basic test flow execution description and pass/fail criteria +------------------------------------------------------------ + +Test execution +''''''''''''''' + +* Test action 1: Create a network, storing the "id" parameter returned in the response +* Test action 2: Create an IPv6 subnet of the network, storing the "id" parameter returned in the response +* Test action 3: Create a port of the network with an empty security group, storing the "id" parameter returned in the response +* Test action 4: Verify the security group of the port is not none but is empty +* **Test assertion 1:** the security group of the port is not none but is empty +* Test action 5: Delete the port using the stored "id" parameter +* Test action 6: List all ports, verify the port id is no longer present in the list +* **Test assertion 2:** The port "id" parameter is not present in list +* Test action 7: Delete the IPv6 subnet using the stored "id" parameter +* Test action 8: List all subnets on the network, verify the IPv6 subnet id is no longer present in the list +* **Test assertion 3:** The IPv6 subnet "id" parameter is not present in list +* Test action 9: Delete the network created in test action 1, using the stored network id +* Test action 10: List all networks, verifying the network id is no longer present +* **Test assertion 4:** The "id" parameter is not present in the network list + +Pass / fail criteria +''''''''''''''''''''' + +This test evaluates the ability to use create commands to create port with +empty security group of the SUT API. Specifically it verifies that: + +* Port create commands to create a port with an empty security group +* All items created using create commands are able to be removed using the returned identifiers + +Post conditions +--------------- + +None + +----------------------------------------------------- +Test Case 8 - Create, Update and Delete an IPv6 Port +----------------------------------------------------- + +Short name +---------- + +opnfv.ipv6.port_create_update_delete + +Use case specification +---------------------- + +This test case evaluates the SUT API ability of creating, updating, +deleting IPv6 port, the reference is, + +tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_update_delete_port + +Test preconditions +------------------ + +None + +Basic test flow execution description and pass/fail criteria +------------------------------------------------------------ + +Test execution +''''''''''''''' + +* Test action 1: Create a network, storing the "id" parameter returned in the response +* Test action 2: Create a port of the network, storing the "id" and "admin_state_up" parameters + returned in the response +* Test action 3: Verify the value of port's 'admin_state_up' is True +* **Test assertion 1:** the value of port's 'admin_state_up' is True after creating +* Test action 4: Update the port's name with a new_name and set port's admin_state_up to False, + storing the name and admin_state_up parameters returned in the response +* Test action 5: Verify the stored port's name equals to new_name and the port's admin_state_up is False. +* **Test assertion 2:** the stored port's name equals to new_name and the port's admin_state_up is False +* Test action 6: Delete the port using the stored "id" parameter +* Test action 7: List all ports, verify the port is no longer present in the list +* **Test assertion 3:** The port "id" parameter is not present in list +* Test action 8: Delete the network created in test action 1, using the stored network id +* Test action 9: List all networks, verifying the network id is no longer present +* **Test assertion 4:** The "id" parameter is not present in the network list + +Pass / fail criteria +'''''''''''''''''''' + +This test evaluates the ability to use create/update/delete commands to create/update/delete port +of the SUT API. Specifically it verifies that: + +* Port create commands return True of 'admin_state_up' in response +* Port update commands to update 'name' to new_name and 'admin_state_up' to false +* All items created using create commands are able to be removed using the returned identifiers + +Post conditions +--------------- + +None + +------------------------------ +Test Case 9 - List IPv6 Ports +------------------------------ + +Short name +---------- + +opnfv.ipv6.tc009.port_list + +Use case specification +---------------------- + +This test case evaluates the SUT ability of creating a port on a network and +finding the port in the all ports list, the reference is, + +tempest.api.network.test_ports.PortsIpV6TestJSON.test_list_ports + +Test preconditions +------------------ + +None + +Basic test flow execution description and pass/fail criteria +------------------------------------------------------------ + +Test execution +''''''''''''''' + +* Test action 1: Create a network, storing the "id" parameter returned in the response +* Test action 2: Create a port of the network, storing the "id" parameter returned in the response +* Test action 3: List all ports, verify the port id is found in the list +* **Test assertion 1:** The "id" parameter is found in the port list +* Test action 4: Delete the port using the stored "id" parameter +* Test action 5: List all ports, verify the port is no longer present in the list +* **Test assertion 2:** The port "id" parameter is not present in list +* Test action 6: Delete the network created in test action 1, using the stored network id +* Test action 7: List all networks, verifying the network id is no longer present +* **Test assertion 3:** The "id" parameter is not present in the network list + +Pass / fail criteria +''''''''''''''''''''' + +This test evaluates the ability to use list commands to list the networks and ports on +the SUT API. Specifically it verifies that: + +* Port list command to list all ports, the created port is found in the list. +* All items created using create commands are able to be removed using the returned identifiers + +Post conditions +--------------- + +None + +------------------------------------------------------- +Test Case 10 - Show Key/Valus Details of an IPv6 Port +------------------------------------------------------- + +Short name +---------- + +opnfv.ipv6.tc010.port_show_details + +Use case specification +---------------------- + +This test case evaluates the SUT ability of showing the port +details, the values in the details should be equal to the values to create the port, +the reference is, + +tempest.api.network.test_ports.PortsIpV6TestJSON.test_show_port + +Test preconditions +------------------ + +None + +Basic test flow execution description and pass/fail criteria +------------------------------------------------------------ + +Test execution +''''''''''''''' + +* Test action 1: Create a network, storing the "id" parameter returned in the response +* Test action 2: Create a port of the network, storing the "id" parameter returned in the response +* Test action 3: Show the details of the port, verify the stored port's id + in test action 2 exists in the details +* **Test assertion 1:** The "id" parameter is found in the port shown details +* Test action 4: Verify the values in the details of the port are the same as the values + to create the port +* **Test assertion 2:** The values in the details of the port are the same as the values + to create the port +* Test action 5: Delete the port using the stored "id" parameter +* Test action 6: List all ports, verify the port is no longer present in the list +* **Test assertion 3:** The port "id" parameter is not present in list +* Test action 7: Delete the network created in test action 1, using the stored network id +* Test action 8: List all networks, verifying the network id is no longer present +* **Test assertion 4:** The "id" parameter is not present in the network list + +Pass / fail criteria +''''''''''''''''''''' + +This test evaluates the ability to use show commands to show port details on the SUT API. +Specifically it verifies that: + +* Port show commands to show the details of the port, whose id is in the details +* Port show commands to show the details of the port, whose values are the same as the values + to create the port +* All items created using create commands are able to be removed using the returned identifiers + +Post conditions +--------------- + +None + +--------------------------------------------------------- +Test Case 11 - Add Multiple Interfaces for an IPv6 Router +--------------------------------------------------------- + +Short name +----------- + +opnfv.ipv6.router_add_multiple_interface + +Use case specification +---------------------- + +This test case evaluates the SUT ability of adding multiple interface +to a router, the reference is, + +tempest.api.network.test_routers.RoutersIpV6Test.test_add_multiple_router_interfaces + +Test preconditions +------------------ + +None + +Basic test flow execution description and pass/fail criteria +------------------------------------------------------------ + +Test execution +''''''''''''''' + +* Test action 1: Create 2 networks named network01 and network02 sequentially, + storing the "id" parameters returned in the response +* Test action 2: Create an IPv6 subnet01 in network01, an IPv6 subnet02 in network02 sequentially, + storing the "id" parameters returned in the response +* Test action 3: Create a router, storing the "id" parameter returned in the response +* Test action 4: Create interface01 with subnet01 and the router +* Test action 5: Verify the router_id stored in test action 3 equals to the interface01's 'device_id' + and subnet01_id stored in test action 2 equals to the interface01's 'subnet_id' +* **Test assertion 1:** the router_id equals to the interface01's 'device_id' + and subnet01_id equals to the interface01's 'subnet_id' +* Test action 5: Create interface02 with subnet02 and the router +* Test action 6: Verify the router_id stored in test action 3 equals to the interface02's 'device_id' + and subnet02_id stored in test action 2 equals to the interface02's 'subnet_id' +* **Test assertion 2:** the router_id equals to the interface02's 'device_id' + and subnet02_id equals to the interface02's 'subnet_id' +* Test action 7: Delete the interfaces, router, IPv6 subnets and networks, networks, subnets, then list + all interfaces, ports, IPv6 subnets, networks, the test passes if the deleted ones + are not found in the list. +* **Test assertion 3:** The interfaces, router, IPv6 subnets and networks ids are not present in the lists + after deleting + +Pass / fail criteria +''''''''''''''''''''' + +This test evaluates the ability to use bulk create commands to create networks, IPv6 subnets and ports on +the SUT API. Specifically it verifies that: + +* Interface create commands to create interface with IPv6 subnet and router, interface 'device_id' and + 'subnet_id' should equal to the router id and IPv6 subnet id, respectively. +* Interface create commands to create multiple interface with the same router and multiple IPv6 subnets. +* All items created using create commands are able to be removed using the returned identifiers + +Post conditions +--------------- + +None + +------------------------------------------------------------------- +Test Case 12 - Add and Remove an IPv6 Router Interface with port_id +------------------------------------------------------------------- + +Short name +---------- + +opnfv.ipv6.router_interface_add_remove_with_port + +Use case specification +---------------------- + +This test case evaluates the SUT abiltiy of adding, removing router interface to +a port, the subnet_id and port_id of the interface will be checked, +the port's device_id will be checked if equals to the router_id or not. The +reference is, + +tempest.api.network.test_routers.RoutersIpV6Test.test_add_remove_router_interface_with_port_id + +Test preconditions +------------------ + +None + +Basic test flow execution description and pass/fail criteria +------------------------------------------------------------ + +Test execution +''''''''''''''' + +* Test action 1: Create a network, storing the "id" parameter returned in the response +* Test action 2: Create an IPv6 subnet of the network, storing the "id" parameter returned in the response +* Test action 3: Create a router, storing the "id" parameter returned in the response +* Test action 4: Create a port of the network, storing the "id" parameter returned in the response +* Test action 5: Add router interface to the port created, storing the "id" parameter returned in the response +* Test action 6: Verify the interface's keys include 'subnet_id' and 'port_id' +* **Test assertion 1:** the interface's keys include 'subnet_id' and 'port_id' +* Test action 7: Show the port details, verify the 'device_id' in port details equals to the router id stored + in test action 3 +* **Test assertion 2:** 'device_id' in port details equals to the router id +* Test action 8: Delete the interface, port, router, subnet and network, then list + all interfaces, ports, routers, subnets and networks, the test passes if the deleted + ones are not found in the list. +* **Test assertion 3:** interfaces, ports, routers, subnets and networks are not found in the lists after deleting + +Pass / fail criteria +''''''''''''''''''''' + +This test evaluates the ability to use add/remove commands to add/remove router interface to the port, +show commands to show port details on the SUT API. Specifically it verifies that: + +* Router_interface add commands to add router interface to a port, the interface's keys should include 'subnet_id' and 'port_id' +* Port show commands to show 'device_id' in port details, which should be equal to the router id +* All items created using create commands are able to be removed using the returned identifiers + +Post conditions +--------------- + +None + +--------------------------------------------------------------------- +Test Case 13 - Add and Remove an IPv6 Router Interface with subnet_id +--------------------------------------------------------------------- + +Short name +---------- + +opnfv.ipv6.router_interface_add_remove + +Use case specification +---------------------- + +This test case evaluates the SUT API ability of adding and removing a router interface with +the IPv6 subnet id, the reference is + +tempest.api.network.test_routers.RoutersIpV6Test.test_add_remove_router_interface_with_subnet_id + +Test preconditions +------------------ + +None + +Basic test flow execution description and pass/fail criteria +------------------------------------------------------------ + +Test execution +''''''''''''''' + +* Test action 1: Create a network, storing the "id" parameter returned in the response +* Test action 2: Create an IPv6 subnet with the network created, storing the "id" parameter + returned in the response +* Test action 3: Create a router, storing the "id" parameter returned in the response +* Test action 4: Add a router interface with the stored ids of the router and IPv6 subnet +* **Test assertion 1:** Key 'subnet_id' is included in the added interface's keys +* **Test assertion 2:** Key 'port_id' is included in the added interface's keys +* Test action 5: Show the port info with the stored interface's port id +* **Test assertion 3:**: The stored router id is equal to the device id shown in the port info +* Test action 6: Delete the router interface created in test action 4, using the stored subnet id +* Test action 7: List all router interfaces, verifying the router interface is no longer present +* **Test assertion 4:** The router interface with the stored subnet id is not present + in the router interface list +* Test action 8: Delete the router created in test action 3, using the stored router id +* Test action 9: List all routers, verifying the router id is no longer present +* **Test assertion 5:** The router "id" parameter is not present in the router list +* Test action 10: Delete the subnet created in test action 2, using the stored subnet id +* Test action 11: List all subnets, verifying the subnet id is no longer present +* **Test assertion 6:** The subnet "id" parameter is not present in the subnet list +* Test action 12: Delete the network created in test action 1, using the stored network id +* Test action 13: List all networks, verifying the network id is no longer present +* **Test assertion 7:** The network "id" parameter is not present in the network list + +Pass / fail criteria +'''''''''''''''''''' + +This test evaluates the ability to add and remove router interface with the subnet id on the +SUT API. Specifically it verifies that: + +* Router interface add command returns valid 'subnet_id' parameter which is reported + in the interface's keys +* Router interface add command returns valid 'port_id' parameter which is reported + in the interface's keys +* All items created using create commands are able to be removed using the returned identifiers + +Post conditions +--------------- + +None + +------------------------------------------------------------------- +Test Case 14 - Create, Show, List, Update and Delete an IPv6 router +------------------------------------------------------------------- + +Short name +---------- + +opnfv.ipv6.router_create_show_list_update_delete + +Use case specification +---------------------- + +This test case evaluates the SUT API ability of creating, showing, listing, updating +and deleting routers, the reference is + +tempest.api.network.test_routers.RoutersIpV6Test.test_create_show_list_update_delete_router + +Test preconditions +------------------ + +There should exist an OpenStack external network. + +Basic test flow execution description and pass/fail criteria +------------------------------------------------------------ + +Test execution +''''''''''''''' + +* Test action 1: Create a router, set the admin_state_up to be False and external_network_id + to be public network id, storing the "id" parameter returned in the response +* **Test assertion 1:** The created router's admin_state_up is False +* **Test assertion 2:** The created router's external network id equals to the public network id +* Test action 2: Show details of the router created in test action 1, using the stored router id +* **Test assertion 3:** The router's name shown is the same as the router created +* **Test assertion 4:** The router's external network id shown is the same as the public network id +* Test action 3: List all routers and verify if created router is in response message +* **Test assertion 5:** The stored router id is in the router list +* Test action 4: Update the name of router and verify if it is updated +* **Test assertion 6:** The name of router equals to the name used to update in test action 4 +* Test action 5: Show the details of router, using the stored router id +* **Test assertion 7:** The router's name shown equals to the name used to update in test action 4 +* Test action 6: Delete the router created in test action 1, using the stored router id +* Test action 7: List all routers, verifying the router id is no longer present +* **Test assertion 8:** The "id" parameter is not present in the router list + +Pass / fail criteria +''''''''''''''''''''' + +This test evaluates the ability to create, show, list, update and delete router on +the SUT API. Specifically it verifies that: + +* Router create command returns valid "admin_state_up" and "id" parameters which equal to the + "admin_state_up" and "id" returned in the response +* Router show command returns valid "name" parameter which equals to the "name" returned in the response +* Router show command returns valid "external network id" parameters which equals to the public network id +* Router list command returns valid "id" parameter which equals to the stored router "id" +* Router update command returns updated "name" parameters which equals to the "name" used to update +* Router created using create command is able to be removed using the returned identifiers + +Post conditions +--------------- + +None + +--------------------------------------------------------------------------- +Test Case 15 - Create, List, Update, Show and Delete an IPv6 security group +--------------------------------------------------------------------------- + +Short name +---------- + +opnfv.ipv6.security_group_create_list_update_show_delete + +Use case specification +---------------------- + +This test case evaluates the SUT API ability of creating, listing, updating, showing +and deleting security groups, the reference is + +tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_list_update_show_delete_security_group + +Test preconditions +------------------ + +None + +Basic test flow execution description and pass/fail criteria +------------------------------------------------------------ + +Test execution +''''''''''''''' + +* Test action 1: Create a security group, storing the "id" parameter returned in the response +* Test action 2: List all security groups and verify if created security group is there in response +* **Test assertion 1:** The created security group's "id" is found in the list +* Test action 3: Update the name and description of this security group, using the stored id +* Test action 4: Verify if the security group's name and description are updated +* **Test assertion 2:** The security group's name equals to the name used in test action 3 +* **Test assertion 3:** The security group's description equals to the description used in test action 3 +* Test action 5: Show details of the updated security group, using the stored id +* **Test assertion 4:** The security group's name shown equals to the name used in test action 3 +* **Test assertion 5:** The security group's description shown equals to the description used in test action 3 +* Test action 6: Delete the security group created in test action 1, using the stored id +* Test action 7: List all security groups, verifying the security group's id is no longer present +* **Test assertion 6:** The "id" parameter is not present in the security group list + +Pass / fail criteria +'''''''''''''''''''' + +This test evaluates the ability to create list, update, show and delete security group on +the SUT API. Specifically it verifies that: + +* Security group create commands return valid "id" parameter which is reported in the list commands +* Security group update commands return valid "name" and "description" parameters which are + reported in the show commands +* Security group created using create command is able to be removed using the returned identifiers + +Post conditions +--------------- + +None + +--------------------------------------------------------------- +Test Case 16 - Create, Show and Delete IPv6 security group rule +--------------------------------------------------------------- + +Short name +---------- + +opnfv.ipv6.security_group_rule_create_show_delete + +Use case specification +---------------------- + +This test case evaluates the SUT API ability of creating, showing, listing and deleting +security group rules, the reference is + +tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_show_delete_security_group_rule + +Test preconditions +------------------ + +None + +Basic test flow execution description and pass/fail criteria +------------------------------------------------------------ + +Test execution +''''''''''''''' + +* Test action 1: Create a security group, storing the "id" parameter returned in the response +* Test action 2: Create a rule of the security group with protocol tcp, udp and icmp, respectively, + using the stored security group's id, storing the "id" parameter returned in the response +* Test action 3: Show details of the created security group rule, using the stored id of the + security group rule +* **Test assertion 1:** All the created security group rule's values equal to the rule values + shown in test action 3 +* Test action 4: List all security group rules +* **Test assertion 2:** The stored security group rule's id is found in the list +* Test action 5: Delete the security group rule, using the stored security group rule's id +* Test action 6: List all security group rules, verifying the security group rule's id is no longer present +* **Test assertion 3:** The security group rule "id" parameter is not present in the list +* Test action 7: Delete the security group, using the stored security group's id +* Test action 8: List all security groups, verifying the security group's id is no longer present +* **Test assertion 4:** The security group "id" parameter is not present in the list + +Pass / fail criteria +''''''''''''''''''''' + +This test evaluates the ability to create, show, list and delete security group rules on +the SUT API. Specifically it verifies that: + +* Security group rule create command returns valid values which are reported in the show command +* Security group rule created using create command is able to be removed using the returned identifiers + +Post conditions +--------------- + +None + +---------------------------------------- +Test Case 17 - List IPv6 Security Groups +---------------------------------------- + +Short name +---------- + +opnfv.ipv6.security_group_list + +Use case specification +---------------------- + +This test case evaluates the SUT API ability of listing security groups, the reference is + +tempest.api.network.test_security_groups.SecGroupIPv6Test.test_list_security_groups + +Test preconditions +------------------ + +There should exist a default security group. + +Basic test flow execution description and pass/fail criteria +------------------------------------------------------------ + +Test execution +''''''''''''''' + +* Test action 1: List all security groups +* Test action 2: Verify the default security group exists in the list, the test passes + if the default security group exists +* **Test assertion 1:** The default security group is in the list + +Pass / fail criteria +''''''''''''''''''''' + +This test evaluates the ability to list security groups on the SUT API. +Specifically it verifies that: + +* Security group list command return valid security groups which include the default security group + +Post conditions +--------------- + +None + +---------------------------------------------------------------------------- +Test Case 18 - IPv6 Address Assignment - Dual Stack, SLAAC, DHCPv6 Stateless +---------------------------------------------------------------------------- + +Short name +---------- + +opnfv.ipv6.dhcpv6_stateless + +Use case specification +---------------------- + +This test case evaluates IPv6 address assignment in ipv6_ra_mode 'dhcpv6_stateless' +and ipv6_address_mode 'dhcpv6_stateless'. +In this case, guest instance obtains IPv6 address from OpenStack managed radvd +using SLAAC and optional info from dnsmasq using DHCPv6 stateless. This test case then +verifies the ping6 available VM can ping the other VM's v4 and v6 addresses +as well as the v6 subnet's gateway ip in the same network, the reference is + +tempest.scenario.test_network_v6.TestGettingAddress.test_dhcp6_stateless_from_os + +Test preconditions +------------------ + +There should exist a public router or a public network. + +Basic test flow execution description and pass/fail criteria +------------------------------------------------------------ + +Test execution +''''''''''''''' + +* Test action 1: Create one network, storing the "id" parameter returned in the response +* Test action 2: Create one IPv4 subnet of the created network, storing the "id" + parameter returned in the response +* Test action 3: If there exists a public router, use it as the router. Otherwise, + use the public network to create a router +* Test action 4: Connect the IPv4 subnet to the router, using the stored IPv4 subnet id +* Test action 5: Create one IPv6 subnet of the network created in test action 1 in + ipv6_ra_mode 'dhcpv6_stateless' and ipv6_address_mode 'dhcpv6_stateless', + storing the "id" parameter returned in the response +* Test action 6: Connect the IPv6 subnet to the router, using the stored IPv6 subnet id +* Test action 7: Boot two VMs on this network, storing the "id" parameters returned in the response +* **Test assertion 1:** The vNIC of each VM gets one v4 address and one v6 address actually assigned +* **Test assertion 2:** Each VM can ping the other's v4 private address +* **Test assertion 3:** The ping6 available VM can ping the other's v6 address + as well as the v6 subnet's gateway ip +* Test action 8: Delete the 2 VMs created in test action 7, using the stored ids +* Test action 9: List all VMs, verifying the ids are no longer present +* **Test assertion 4:** The two "id" parameters are not present in the VM list +* Test action 10: Delete the IPv4 subnet created in test action 2, using the stored id +* Test action 11: Delete the IPv6 subnet created in test action 5, using the stored id +* Test action 12: List all subnets, verifying the ids are no longer present +* **Test assertion 5:** The "id" parameters of IPv4 and IPv6 are not present in the list +* Test action 13: Delete the network created in test action 1, using the stored id +* Test action 14: List all networks, verifying the id is no longer present +* **Test assertion 6:** The "id" parameter is not present in the network list + +Pass / fail criteria +''''''''''''''''''''' + +This test evaluates the ability to assign IPv6 addresses in ipv6_ra_mode +'dhcpv6_stateless' and ipv6_address_mode 'dhcpv6_stateless', +and verify the ping6 available VM can ping the other VM's v4 and v6 addresses as well as +the v6 subnet's gateway ip in the same network. Specifically it verifies that: + +* The IPv6 addresses in mode 'dhcpv6_stateless' assigned successfully +* The VM can ping the other VM's IPv4 and IPv6 private addresses as well as the v6 subnet's gateway ip +* All items created using create commands are able to be removed using the returned identifiers + +Post conditions +--------------- + +None + +-------------------------------------------------------------------------------------- +Test Case 19 - IPv6 Address Assignment - Dual Net, Dual Stack, SLAAC, DHCPv6 Stateless +-------------------------------------------------------------------------------------- + +Short name +---------- + +opnfv.ipv6.dualnet_dhcpv6_stateless + +Use case specification +---------------------- + +This test case evaluates IPv6 address assignment in ipv6_ra_mode 'dhcpv6_stateless' +and ipv6_address_mode 'dhcpv6_stateless'. +In this case, guest instance obtains IPv6 address from OpenStack managed radvd +using SLAAC and optional info from dnsmasq using DHCPv6 stateless. This test case then +verifies the ping6 available VM can ping the other VM's v4 address in one network +and v6 address in another network as well as the v6 subnet's gateway ip, the reference is + +tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_dhcp6_stateless_from_os + +Test preconditions +------------------ + +There should exists a public router or a public network. + +Basic test flow execution description and pass/fail criteria +------------------------------------------------------------ + +Test execution +''''''''''''''' + +* Test action 1: Create one network, storing the "id" parameter returned in the response +* Test action 2: Create one IPv4 subnet of the created network, storing the "id" + parameter returned in the response +* Test action 3: If there exists a public router, use it as the router. Otherwise, + use the public network to create a router +* Test action 4: Connect the IPv4 subnet to the router, using the stored IPv4 subnet id +* Test action 5: Create another network, storing the "id" parameter returned in the response +* Test action 6: Create one IPv6 subnet of network created in test action 5 in + ipv6_ra_mode 'dhcpv6_stateless' and ipv6_address_mode 'dhcpv6_stateless', + storing the "id" parameter returned in the response +* Test action 7: Connect the IPv6 subnet to the router, using the stored IPv6 subnet id +* Test action 8: Boot two VMs on these two networks, storing the "id" parameters returned in the response +* Test action 9: Turn on 2nd NIC of each VM for the network created in test action 5 +* **Test assertion 1:** The 1st vNIC of each VM gets one v4 address assigned and + the 2nd vNIC of each VM gets one v6 address actually assigned +* **Test assertion 2:** Each VM can ping the other's v4 private address +* **Test assertion 3:** The ping6 available VM can ping the other's v6 address + as well as the v6 subnet's gateway ip +* Test action 10: Delete the 2 VMs created in test action 8, using the stored ids +* Test action 11: List all VMs, verifying the ids are no longer present +* **Test assertion 4:** The two "id" parameters are not present in the VM list +* Test action 12: Delete the IPv4 subnet created in test action 2, using the stored id +* Test action 13: Delete the IPv6 subnet created in test action 6, using the stored id +* Test action 14: List all subnets, verifying the ids are no longer present +* **Test assertion 5:** The "id" parameters of IPv4 and IPv6 are not present in the list +* Test action 15: Delete the 2 networks created in test action 1 and 5, using the stored ids +* Test action 16: List all networks, verifying the ids are no longer present +* **Test assertion 6:** The two "id" parameters are not present in the network list + +Pass / fail criteria +'''''''''''''''''''' + +This test evaluates the ability to assign IPv6 addresses in ipv6_ra_mode 'dhcpv6_stateless' +and ipv6_address_mode 'dhcpv6_stateless', and verify the ping6 available VM can ping +the other VM's v4 address in one network and v6 address in another network as well as +the v6 subnet's gateway ip. Specifically it verifies that: + +* The IPv6 addresses in mode 'dhcpv6_stateless' assigned successfully +* The VM can ping the other VM's IPv4 address in one network and IPv6 address in another + network as well as the v6 subnet's gateway ip +* All items created using create commands are able to be removed using the returned identifiers + +Post conditions +--------------- + +None + +----------------------------------------------------------------------------------------------- +Test Case 20 - IPv6 Address Assignment - Multiple Prefixes, Dual Stack, SLAAC, DHCPv6 Stateless +----------------------------------------------------------------------------------------------- + +Short name +---------- + +opnfv.ipv6.multiple_prefixes_dhcpv6_stateless + +Use case specification +---------------------- + +This test case evaluates IPv6 address assignment in ipv6_ra_mode 'dhcpv6_stateless' +and ipv6_address_mode 'dhcpv6_stateless'. +In this case, guest instance obtains IPv6 addresses from OpenStack managed radvd +using SLAAC and optional info from dnsmasq using DHCPv6 stateless. This test case then +verifies the ping6 available VM can ping the other VM's one v4 address and two v6 +addresses with different prefixes as well as the v6 subnets' gateway ips in the +same network, the reference is + +tempest.scenario.test_network_v6.TestGettingAddress.test_multi_prefix_dhcpv6_stateless + +Test preconditions +------------------ + +There should exist a public router or a public network. + +Basic test flow execution description and pass/fail criteria +------------------------------------------------------------ + +Test execution +''''''''''''''' + +* Test action 1: Create one network, storing the "id" parameter returned in the response +* Test action 2: Create one IPv4 subnet of the created network, storing the "id" + parameter returned in the response +* Test action 3: If there exists a public router, use it as the router. Otherwise, + use the public network to create a router +* Test action 4: Connect the IPv4 subnet to the router, using the stored IPv4 subnet id +* Test action 5: Create two IPv6 subnets of the network created in test action 1 in + ipv6_ra_mode 'dhcpv6_stateless' and ipv6_address_mode 'dhcpv6_stateless', + storing the "id" parameters returned in the response +* Test action 6: Connect the two IPv6 subnets to the router, using the stored IPv6 subnet ids +* Test action 7: Boot two VMs on this network, storing the "id" parameters returned in the response +* **Test assertion 1:** The vNIC of each VM gets one v4 address and two v6 addresses with + different prefixes actually assigned +* **Test assertion 2:** Each VM can ping the other's v4 private address +* **Test assertion 3:** The ping6 available VM can ping the other's v6 addresses + as well as the v6 subnets' gateway ips +* Test action 8: Delete the 2 VMs created in test action 7, using the stored ids +* Test action 9: List all VMs, verifying the ids are no longer present +* **Test assertion 4:** The two "id" parameters are not present in the VM list +* Test action 10: Delete the IPv4 subnet created in test action 2, using the stored id +* Test action 11: Delete two IPv6 subnets created in test action 5, using the stored ids +* Test action 12: List all subnets, verifying the ids are no longer present +* **Test assertion 5:** The "id" parameters of IPv4 and IPv6 are not present in the list +* Test action 13: Delete the network created in test action 1, using the stored id +* Test action 14: List all networks, verifying the id is no longer present +* **Test assertion 6:** The "id" parameter is not present in the network list + +Pass / fail criteria +''''''''''''''''''''' + +This test evaluates the ability to assign IPv6 addresses in ipv6_ra_mode 'dhcpv6_stateless' +and ipv6_address_mode 'dhcpv6_stateless', +and verify the ping6 available VM can ping the other VM's v4 address and two +v6 addresses with different prefixes as well as the v6 subnets' gateway ips in the same network. +Specifically it verifies that: + +* The different prefixes IPv6 addresses in mode 'dhcpv6_stateless' assigned successfully +* The VM can ping the other VM's IPv4 and IPv6 private addresses as well as the v6 subnets' gateway ips +* All items created using create commands are able to be removed using the returned identifiers + +Post conditions +--------------- + +None + +--------------------------------------------------------------------------------------------------------- +Test Case 21 - IPv6 Address Assignment - Dual Net, Multiple Prefixes, Dual Stack, SLAAC, DHCPv6 Stateless +--------------------------------------------------------------------------------------------------------- + +Short name +---------- + +opnfv.ipv6.dualnet_multiple_prefixes_dhcpv6_stateless + +Use case specification +---------------------- + +This test case evaluates IPv6 address assignment in ipv6_ra_mode 'dhcpv6_stateless' +and ipv6_address_mode 'dhcpv6_stateless'. +In this case, guest instance obtains IPv6 addresses from OpenStack managed radvd +using SLAAC and optional info from dnsmasq using DHCPv6 stateless. This test case then +verifies the ping6 available VM can ping the other VM's v4 address in one network +and two v6 addresses with different prefixes in another network as well as the +v6 subnets' gateway ips, the reference is + +tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_multi_prefix_dhcpv6_stateless + +Test preconditions +------------------ + +There should exist a public router or a public network. + +Basic test flow execution description and pass/fail criteria +------------------------------------------------------------ + +Test execution +''''''''''''''' + +* Test action 1: Create one network, storing the "id" parameter returned in the response +* Test action 2: Create one IPv4 subnet of the created network, storing the "id" + parameter returned in the response +* Test action 3: If there exists a public router, use it as the router. Otherwise, + use the public network to create a router +* Test action 4: Connect the IPv4 subnet to the router, using the stored IPv4 subnet id +* Test action 5: Create another network, storing the "id" parameter returned in the response +* Test action 6: Create two IPv6 subnets of network created in test action 5 in + ipv6_ra_mode 'dhcpv6_stateless' and ipv6_address_mode 'dhcpv6_stateless', + storing the "id" parameters returned in the response +* Test action 7: Connect the two IPv6 subnets to the router, using the stored IPv6 subnet ids +* Test action 8: Boot two VMs on these two networks, storing the "id" parameters returned in the response +* Test action 9: Turn on 2nd NIC of each VM for the network created in test action 5 +* **Test assertion 1:** The vNIC of each VM gets one v4 address and two v6 addresses + with different prefixes actually assigned +* **Test assertion 2:** Each VM can ping the other's v4 private address +* **Test assertion 3:** The ping6 available VM can ping the other's v6 addresses + as well as the v6 subnets' gateway ips +* Test action 10: Delete the 2 VMs created in test action 8, using the stored ids +* Test action 11: List all VMs, verifying the ids are no longer present +* **Test assertion 4:** The two "id" parameters are not present in the VM list +* Test action 12: Delete the IPv4 subnet created in test action 2, using the stored id +* Test action 13: Delete two IPv6 subnets created in test action 6, using the stored ids +* Test action 14: List all subnets, verifying the ids are no longer present +* **Test assertion 5:** The "id" parameters of IPv4 and IPv6 are not present in the list +* Test action 15: Delete the 2 networks created in test action 1 and 5, using the stored ids +* Test action 16: List all networks, verifying the ids are no longer present +* **Test assertion 6:** The two "id" parameters are not present in the network list + +Pass / fail criteria +''''''''''''''''''''' + +This test evaluates the ability to assign IPv6 addresses in ipv6_ra_mode 'dhcpv6_stateless' +and ipv6_address_mode 'dhcpv6_stateless', +and verify the ping6 available VM can ping the other VM's v4 address in one network and two +v6 addresses with different prefixes in another network as well as the v6 subnets' +gateway ips. Specifically it verifies that: + +* The IPv6 addresses in mode 'dhcpv6_stateless' assigned successfully +* The VM can ping the other VM's IPv4 and IPv6 private addresses as well as the v6 subnets' gateway ips +* All items created using create commands are able to be removed using the returned identifiers + +Post conditions +--------------- + +None + +---------------------------------------------------------- +Test Case 22 - IPv6 Address Assignment - Dual Stack, SLAAC +---------------------------------------------------------- + +Short name +---------- + +opnfv.ipv6.slaac + +Use case specification +---------------------- + +This test case evaluates IPv6 address assignment in ipv6_ra_mode 'slaac' and +ipv6_address_mode 'slaac'. +In this case, guest instance obtains IPv6 address from OpenStack managed radvd +using SLAAC. This test case then verifies the ping6 available VM can ping the other +VM's v4 and v6 addresses as well as the v6 subnet's gateway ip in the +same network, the reference is + +tempest.scenario.test_network_v6.TestGettingAddress.test_slaac_from_os + +Test preconditions +------------------ + +There should exist a public router or a public network. + +Basic test flow execution description and pass/fail criteria +------------------------------------------------------------ + +Test execution +''''''''''''''' + +* Test action 1: Create one network, storing the "id" parameter returned in the response +* Test action 2: Create one IPv4 subnet of the created network, storing the "id" + parameter returned in the response +* Test action 3: If there exists a public router, use it as the router. Otherwise, + use the public network to create a router +* Test action 4: Connect the IPv4 subnet to the router, using the stored IPv4 subnet id +* Test action 5: Create one IPv6 subnet of the network created in test action 1 in + ipv6_ra_mode 'slaac' and ipv6_address_mode 'slaac', storing the "id" parameter returned in the response +* Test action 6: Connect the IPv6 subnet to the router, using the stored IPv6 subnet id +* Test action 7: Boot two VMs on this network, storing the "id" parameters returned in the response +* **Test assertion 1:** The vNIC of each VM gets one v4 address and one v6 address actually assigned +* **Test assertion 2:** Each VM can ping the other's v4 private address +* **Test assertion 3:** The ping6 available VM can ping the other's v6 address + as well as the v6 subnet's gateway ip +* Test action 8: Delete the 2 VMs created in test action 7, using the stored ids +* Test action 9: List all VMs, verifying the ids are no longer present +* **Test assertion 4:** The two "id" parameters are not present in the VM list +* Test action 10: Delete the IPv4 subnet created in test action 2, using the stored id +* Test action 11: Delete the IPv6 subnet created in test action 5, using the stored id +* Test action 12: List all subnets, verifying the ids are no longer present +* **Test assertion 5:** The "id" parameters of IPv4 and IPv6 are not present in the list +* Test action 13: Delete the network created in test action 1, using the stored id +* Test action 14: List all networks, verifying the id is no longer present +* **Test assertion 6:** The "id" parameter is not present in the network list + +Pass / fail criteria +''''''''''''''''''''' + +This test evaluates the ability to assign IPv6 addresses in ipv6_ra_mode 'slaac' +and ipv6_address_mode 'slaac', +and verify the ping6 available VM can ping the other VM's v4 and v6 addresses as well as +the v6 subnet's gateway ip in the same network. Specifically it verifies that: + +* The IPv6 addresses in mode 'slaac' assigned successfully +* The VM can ping the other VM's IPv4 and IPv6 private addresses as well as the v6 subnet's gateway ip +* All items created using create commands are able to be removed using the returned identifiers + +Post conditions +--------------- + +None + +-------------------------------------------------------------------- +Test Case 23 - IPv6 Address Assignment - Dual Net, Dual Stack, SLAAC +-------------------------------------------------------------------- + +Short name +---------- + +opnfv.ipv6.dualnet_slaac + +Use case specification +---------------------- + +This test case evaluates IPv6 address assignment in ipv6_ra_mode 'slaac' and +ipv6_address_mode 'slaac'. +In this case, guest instance obtains IPv6 address from OpenStack managed radvd +using SLAAC. This test case then verifies the ping6 available VM can ping the other +VM's v4 address in one network and v6 address in another network as well as the +v6 subnet's gateway ip, the reference is + +tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_slaac_from_os + +Test preconditions +------------------ + +There should exist a public router or a public network. + +Basic test flow execution description and pass/fail criteria +------------------------------------------------------------ + +Test execution +''''''''''''''' + +* Test action 1: Create one network, storing the "id" parameter returned in the response +* Test action 2: Create one IPv4 subnet of the created network, storing the "id" + parameter returned in the response +* Test action 3: If there exists a public router, use it as the router. Otherwise, + use the public network to create a router +* Test action 4: Connect the IPv4 subnet to the router, using the stored IPv4 subnet id +* Test action 5: Create another network, storing the "id" parameter returned in the response +* Test action 6: Create one IPv6 subnet of network created in test action 5 in + ipv6_ra_mode 'slaac' and ipv6_address_mode 'slaac', storing the "id" parameter returned in the response +* Test action 7: Connect the IPv6 subnet to the router, using the stored IPv6 subnet id +* Test action 8: Boot two VMs on these two networks, storing the "id" parameters returned in the response +* Test action 9: Turn on 2nd NIC of each VM for the network created in test action 5 +* **Test assertion 1:** The 1st vNIC of each VM gets one v4 address assigned and + the 2nd vNIC of each VM gets one v6 address actually assigned +* **Test assertion 2:** Each VM can ping the other's v4 private address +* **Test assertion 3:** The ping6 available VM can ping the other's v6 address + as well as the v6 subnet's gateway ip +* Test action 10: Delete the 2 VMs created in test action 8, using the stored ids +* Test action 11: List all VMs, verifying the ids are no longer present +* **Test assertion 4:** The two "id" parameters are not present in the VM list +* Test action 12: Delete the IPv4 subnet created in test action 2, using the stored id +* Test action 13: Delete the IPv6 subnet created in test action 6, using the stored id +* Test action 14: List all subnets, verifying the ids are no longer present +* **Test assertion 5:** The "id" parameters of IPv4 and IPv6 are not present in the list +* Test action 15: Delete the 2 networks created in test action 1 and 5, using the stored ids +* Test action 16: List all networks, verifying the ids are no longer present +* **Test assertion 6:** The two "id" parameters are not present in the network list + +Pass / fail criteria +''''''''''''''''''''' + +This test evaluates the ability to assign IPv6 addresses in ipv6_ra_mode 'slaac' +and ipv6_address_mode 'slaac', +and verify the ping6 available VM can ping the other VM's v4 address in one network and +v6 address in another network as well as the v6 subnet's gateway ip. Specifically it verifies that: + +* The IPv6 addresses in mode 'slaac' assigned successfully +* The VM can ping the other VM's IPv4 address in one network and IPv6 address + in another network as well as the v6 subnet's gateway ip +* All items created using create commands are able to be removed using the returned identifiers + +Post conditions +--------------- + +None + +----------------------------------------------------------------------------- +Test Case 24 - IPv6 Address Assignment - Multiple Prefixes, Dual Stack, SLAAC +----------------------------------------------------------------------------- + +Short name +---------- + +opnfv.ipv6.multiple_prefixes_slaac + +Use case specification +---------------------- + +This test case evaluates IPv6 address assignment in ipv6_ra_mode 'slaac' and +ipv6_address_mode 'slaac'. +In this case, guest instance obtains IPv6 addresses from OpenStack managed radvd +using SLAAC. This test case then verifies the ping6 available VM can ping the other +VM's one v4 address and two v6 addresses with different prefixes as well as the v6 +subnets' gateway ips in the same network, the reference is + +tempest.scenario.test_network_v6.TestGettingAddress.test_multi_prefix_slaac + +Test preconditions +------------------ + +There should exists a public router or a public network. + +Basic test flow execution description and pass/fail criteria +------------------------------------------------------------ + +Test execution +''''''''''''''' + +* Test action 1: Create one network, storing the "id" parameter returned in the response +* Test action 2: Create one IPv4 subnet of the created network, storing the "id + parameter returned in the response +* Test action 3: If there exists a public router, use it as the router. Otherwise, + use the public network to create a router +* Test action 4: Connect the IPv4 subnet to the router, using the stored IPv4 subnet id +* Test action 5: Create two IPv6 subnets of the network created in test action 1 in + ipv6_ra_mode 'slaac' and ipv6_address_mode 'slaac', storing the "id" parameters returned in the response +* Test action 6: Connect the two IPv6 subnets to the router, using the stored IPv6 subnet ids +* Test action 7: Boot two VMs on this network, storing the "id" parameters returned in the response +* **Test assertion 1:** The vNIC of each VM gets one v4 address and two v6 addresses with + different prefixes actually assigned +* **Test assertion 2:** Each VM can ping the other's v4 private address +* **Test assertion 3:** The ping6 available VM can ping the other's v6 addresses + as well as the v6 subnets' gateway ips +* Test action 8: Delete the 2 VMs created in test action 7, using the stored ids +* Test action 9: List all VMs, verifying the ids are no longer present +* **Test assertion 4:** The two "id" parameters are not present in the VM list +* Test action 10: Delete the IPv4 subnet created in test action 2, using the stored id +* Test action 11: Delete two IPv6 subnets created in test action 5, using the stored ids +* Test action 12: List all subnets, verifying the ids are no longer present +* **Test assertion 5:** The "id" parameters of IPv4 and IPv6 are not present in the list +* Test action 13: Delete the network created in test action 1, using the stored id +* Test action 14: List all networks, verifying the id is no longer present +* **Test assertion 6:** The "id" parameter is not present in the network list + +Pass / fail criteria +''''''''''''''''''''' + +This test evaluates the ability to assign IPv6 addresses in ipv6_ra_mode 'slaac' +and ipv6_address_mode 'slaac', +and verify the ping6 available VM can ping the other VM's v4 address and two +v6 addresses with different prefixes as well as the v6 subnets' gateway ips in the same network. +Specifically it verifies that: + +* The different prefixes IPv6 addresses in mode 'slaac' assigned successfully +* The VM can ping the other VM's IPv4 and IPv6 private addresses as well as the v6 subnets' gateway ips +* All items created using create commands are able to be removed using the returned identifiers + +Post conditions +--------------- + +None + +--------------------------------------------------------------------------------------- +Test Case 25 - IPv6 Address Assignment - Dual Net, Dual Stack, Multiple Prefixes, SLAAC +--------------------------------------------------------------------------------------- + +Short name +---------- + +opnfv.ipv6.dualnet_multiple_prefixes_slaac + +Use case specification +---------------------- + +This test case evaluates IPv6 address assignment in ipv6_ra_mode 'slaac' and +ipv6_address_mode 'slaac'. +In this case, guest instance obtains IPv6 addresses from OpenStack managed radvd +using SLAAC. This test case then verifies the ping6 available VM can ping the other +VM's v4 address in one network and two v6 addresses with different prefixes in another +network as well as the v6 subnets' gateway ips, the reference is + +tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_multi_prefix_slaac + +Test preconditions +------------------ + +There should exist a public router or a public network. + +Basic test flow execution description and pass/fail criteria +------------------------------------------------------------ + +Test execution +''''''''''''''' + +* Test action 1: Create one network, storing the "id" parameter returned in the response +* Test action 2: Create one IPv4 subnet of the created network, storing the "id" + parameter returned in the response +* Test action 3: If there exists a public router, use it as the router. Otherwise, + use the public network to create a router +* Test action 4: Connect the IPv4 subnet to the router, using the stored IPv4 subnet id +* Test action 5: Create another network, storing the "id" parameter returned in the response +* Test action 6: Create two IPv6 subnets of network created in test action 5 in + ipv6_ra_mode 'slaac' and ipv6_address_mode 'slaac', storing the "id" parameters returned in the response +* Test action 7: Connect the two IPv6 subnets to the router, using the stored IPv6 subnet ids +* Test action 8: Boot two VMs on these two networks, storing the "id" parameters returned in the response +* Test action 9: Turn on 2nd NIC of each VM for the network created in test action 5 +* **Test assertion 1:** The vNIC of each VM gets one v4 address and two v6 addresses + with different prefixes actually assigned +* **Test assertion 2:** Each VM can ping the other's v4 private address +* **Test assertion 3:** The ping6 available VM can ping the other's v6 addresses + as well as the v6 subnets' gateway ips +* Test action 10: Delete the 2 VMs created in test action 8, using the stored ids +* Test action 11: List all VMs, verifying the ids are no longer present +* **Test assertion 4:** The two "id" parameters are not present in the VM list +* Test action 12: Delete the IPv4 subnet created in test action 2, using the stored id +* Test action 13: Delete two IPv6 subnets created in test action 6, using the stored ids +* Test action 14: List all subnets, verifying the ids are no longer present +* **Test assertion 5:** The "id" parameters of IPv4 and IPv6 are not present in the list +* Test action 15: Delete the 2 networks created in test action 1 and 5, using the stored ids +* Test action 16: List all networks, verifying the ids are no longer present +* **Test assertion 6:** The two "id" parameters are not present in the network list + +Pass / fail criteria +''''''''''''''''''''' + +This test evaluates the ability to assign IPv6 addresses in ipv6_ra_mode 'slaac' +and ipv6_address_mode 'slaac', +and verify the ping6 available VM can ping the other VM's v4 address in one network and two +v6 addresses with different prefixes in another network as well as the v6 subnets' gateway ips. +Specifically it verifies that: + +* The IPv6 addresses in mode 'slaac' assigned successfully +* The VM can ping the other VM's IPv4 and IPv6 private addresses as well as the v6 subnets' gateway ips +* All items created using create commands are able to be removed using the returned identifiers + +Post conditions +--------------- + +None + + + diff --git a/docs/testing/user/testspecification/old_files/ipv6/designspecification.rst b/docs/testing/user/testspecification/old_files/ipv6/designspecification.rst deleted file mode 100644 index 9e403472..00000000 --- a/docs/testing/user/testspecification/old_files/ipv6/designspecification.rst +++ /dev/null @@ -1,133 +0,0 @@ -.. This work is licensed under a Creative Commons Attribution 4.0 International License. -.. http://creativecommons.org/licenses/by/4.0 -.. (c) Christopher Price (Ericsson AB) and others - -============================== -IPv6 test design specification -============================== - -This document outlines the approach and method for testing IPv6 in the OPNFV compliance test -suite. Providing a brief outline of the features to be tested, the methodology for testing, -schema's and criteria. - -Features to be tested -===================== - -The IPv6 compliance test plan outlines the method for testing IPv6 compliance to the OPNFV -platform behaviours and features of IPv6 enabled VNFi platforms. The specific features to -be tested by the IPv6 compliance test suite is outlined in the following table. - -.. table:: - :class: longtable - -+-----------------------------------------------------------+-------------------+--------------------------------------------------------------------+ -|Features / Requirements |Tests available | Test Cases | -+===========================================================+===================+====================================================================+ -|All topologies work in a multi-tenant environment |No | | -| | | | -| | | | -| | | | -| | | | -| | | | -+-----------------------------------------------------------+-------------------+--------------------------------------------------------------------+ -|IPv6 VM to VM only |No | | -| | | | -| | | | -+-----------------------------------------------------------+-------------------+--------------------------------------------------------------------+ -|IPv6 external L2 VLAN directly attached to a VM |No | | -| | | | -+-----------------------------------------------------------+-------------------+--------------------------------------------------------------------+ -|IPv6 subnet routed via L3 agent to an external IPv6 network|No | | -| | | | -|1. Both VLAN and overlay (e.g. GRE, VXLAN) subnet attached | | | -| to VMs; | | | -|2. Must be able to support multiple L3 agents for a given | | | -| external network to support scaling (neutron scheduler | | | -| to assign vRouters to the L3 agents) | | | -+-----------------------------------------------------------+-------------------+--------------------------------------------------------------------+ -|Ability for a NIC to support both IPv4 and IPv6 (dual |No | | -|stack) address. | | | -| | | | -|1. VM with a single interface associated with a network, | | | -| which is then associated with two subnets. | | | -|2. VM with two different interfaces associated with two | | | -| different networks and two different subnets. | | | -+-----------------------------------------------------------+-------------------+--------------------------------------------------------------------+ -|Support IPv6 Address assignment modes. |No | | -| | | | -|1. SLAAC | | | -|2. DHCPv6 Stateless | | | -|3. DHCPv6 Stateful | | | -+-----------------------------------------------------------+-------------------+--------------------------------------------------------------------+ -|Ability to create a port on an IPv6 DHCPv6 Stateful subnet |No | | -|and assign a specific IPv6 address to the port and have it | | | -|taken out of the DHCP address pool. | | | -+-----------------------------------------------------------+-------------------+--------------------------------------------------------------------+ -|Full support for IPv6 matching (i.e., IPv6, ICMPv6, TCP, |No | | -|UDP) in security groups. Ability to control and manage all | | | -|IPv6 security group capabilities via Neutron/Nova API (REST| | | -|and CLI) as well as via Horizon. | | | -+-----------------------------------------------------------+-------------------+--------------------------------------------------------------------+ -|During network/subnet/router create, there should be an |No | | -|option to allow user to specify the type of address | | | -|management they would like. This includes all options | | | -|including those low priority if implemented (e.g., toggle | | | -|on/off router and address prefix advertisements); It must | | | -|be supported via Neutron API (REST and CLI) as well as via | | | -|Horizon | | | -+-----------------------------------------------------------+-------------------+--------------------------------------------------------------------+ -|Security groups anti-spoofing: Prevent VM from using a |No | | -|source IPv6/MAC address which is not assigned to the VM | | | -+-----------------------------------------------------------+-------------------+--------------------------------------------------------------------+ -|Protect tenant and provider network from rogue RAs |No | | -| | | | -| | | | -| | | | -| | | | -| | | | -+-----------------------------------------------------------+-------------------+--------------------------------------------------------------------+ -|Support the ability to assign multiple IPv6 addresses to |No | | -|an interface; both for Neutron router interfaces and VM | | | -|interfaces. | | | -+-----------------------------------------------------------+-------------------+--------------------------------------------------------------------+ -|Ability for a VM to support a mix of multiple IPv4 and IPv6|No | | -|networks, including multiples of the same type. | | | -+-----------------------------------------------------------+-------------------+--------------------------------------------------------------------+ -|Support for IPv6 Prefix Delegation. |No | | -+-----------------------------------------------------------+-------------------+--------------------------------------------------------------------+ -|IPv6 First-Hop Security, IPv6 ND spoofing |No | | -+-----------------------------------------------------------+-------------------+--------------------------------------------------------------------+ -|IPv6 support in Neutron Layer3 High Availability |No | | -|(keepalived+VRRP). | | | -+-----------------------------------------------------------+-------------------+--------------------------------------------------------------------+ - - -Test approach for IPv6 -====================== - -The most common approach for testing IPv6 capabilities in the test suite is through interaction with the SUT control plane. -In this instance the test framework will exercise the NBI provided by the VIM to configure and leverage IPv6 related features -in the platform, instantiate workloads, and invoke behaviours in the platform. The suite may also interact directly with the -data plane to exercise platform capabilities and further invoke helper functions on the platform for the same purpose. - -Test result analysis --------------------- - -All functional tests in the IPv6 test suite will provide a pass/fail result on completion of the test. In addition test logs -and relevant additional information will be provided as part of the test log, available on test suite completion. - -Some tests in the compliance suite measure such metrics as latency and performance. At this time these tests are intended to -provide a feature based pass/fail metric not related to system performance. -These tests may however provide detailed results of performance and latency in the 'test report'_ document. - -Test identification -=================== - -TBD: WE need to identify the test naming scheme we will use in DoveTail in order that we can cross reference to the test -projects and maintain our suite effectively. This naming scheme needs to be externally relevant to non-OPNFV consumers and as -such some consideration is required on the selection. - -Pass Fail Criteria -================== - -This section requires some further work with the test teams to identify how and where we generate, store and provide results. diff --git a/docs/testing/user/testspecification/old_files/ipv6/index.rst b/docs/testing/user/testspecification/old_files/ipv6/index.rst deleted file mode 100644 index a806d644..00000000 --- a/docs/testing/user/testspecification/old_files/ipv6/index.rst +++ /dev/null @@ -1,19 +0,0 @@ -.. This work is licensed under a Creative Commons Attribution 4.0 International License. -.. http://creativecommons.org/licenses/by/4.0 -.. (c) OPNFV - -******************************* -OPNFV IPv6 Compliance Test Plan -******************************* - -.. toctree:: - :maxdepth: 2 - - ./testplan.rst - ./testprocedure.rst - ./testspecification.rst - ./designspecification.rst - ./ipv6.tc001.specification.rst - ./ipv6.tc026.specification.rst - ./ipv6_all_testcases.rst - diff --git a/docs/testing/user/testspecification/old_files/ipv6/ipv6.tc001.specification.rst b/docs/testing/user/testspecification/old_files/ipv6/ipv6.tc001.specification.rst deleted file mode 100644 index 5afb2095..00000000 --- a/docs/testing/user/testspecification/old_files/ipv6/ipv6.tc001.specification.rst +++ /dev/null @@ -1,59 +0,0 @@ -.. This work is licensed under a Creative Commons Attribution 4.0 International License. -.. http://creativecommons.org/licenses/by/4.0 -.. (c) OPNFV - -================================================================================================== -Dovetail IPv6 tc001 specification - Bulk Creation and Deletion of IPv6 Networks, Ports and Subnets -================================================================================================== - - -+-----------------------+----------------------------------------------------------------------------------------------------+ -|test case name |Bulk creation and deletion of IPv6 networks, ports and subnets | -| | | -+-----------------------+----------------------------------------------------------------------------------------------------+ -|id |dovetail.ipv6.tc001 | -+-----------------------+----------------------------------------------------------------------------------------------------+ -|objective |To verify that platform is able to create/delete networks, ports and subnets in bulk operation | -+-----------------------+----------------------------------------------------------------------------------------------------+ -|test items |tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_network | -| |{idempotent_id('d4f9024d-1e28-4fc1-a6b1-25dbc6fa11e2')} | -| |tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_port | -| |{idempotent_id('48037ff2-e889-4c3b-b86a-8e3f34d2d060')} | -| |tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_subnet | -| |{idempotent_id('8936533b-c0aa-4f29-8e53-6cc873aec489')} | -+-----------------------+----------------------------------------------------------------------------------------------------+ -|environmental | | -|requirements & | environment can be deployed on bare metal of virtualized infrastructure | -|preconditions | deployment can be HA or non-HA | -| | | -+-----------------------+----------------------------------------------------------------------------------------------------+ -|scenario dependencies | NA | -+-----------------------+----------------------------------------------------------------------------------------------------+ -|procedural |Step 1: create/delete network: | -|requirements | create 2 networks in one request | -| | asserting that the networks are found in the list after creation | -| | | -| |Step 2: create/delete subnet: | -| | create 2 subnets in one request | -| | asserting that the subnets are found in the list after creation | -| | | -| |Step 3: create/delete port: | -| | create 2 ports in one request | -| | asserting that the ports are found in the list after creation | -| | | -+-----------------------+----------------------------------------------------------------------------------------------------+ -|input specifications |The parameters needed to execute Neutron network APIs. | -| |Refer to Neutron Networking API v2.0 `[1]`_ `[2]`_ | -+-----------------------+----------------------------------------------------------------------------------------------------+ -|output specifications |The responses after executing Network network APIs. | -| |Refer to Neutron Networking API v2.0 `[1]`_ `[2]`_ | -+-----------------------+----------------------------------------------------------------------------------------------------+ -|pass/fail criteria |If normal response code 200 is returned, the test passes. | -| |Otherwise, the test fails with various error codes. | -| |Refer to Neutron Networking API v2.0 `[1]`_ `[2]`_ | -+-----------------------+----------------------------------------------------------------------------------------------------+ -|test report |TBD | -+-----------------------+----------------------------------------------------------------------------------------------------+ - -.. _`[1]`: http://developer.openstack.org/api-ref/networking/v2/ -.. _`[2]`: http://wiki.openstack.org/wiki/Neutron/APIv2-specification diff --git a/docs/testing/user/testspecification/old_files/ipv6/ipv6.tc026.specification.rst b/docs/testing/user/testspecification/old_files/ipv6/ipv6.tc026.specification.rst deleted file mode 100644 index e7fd82e7..00000000 --- a/docs/testing/user/testspecification/old_files/ipv6/ipv6.tc026.specification.rst +++ /dev/null @@ -1,54 +0,0 @@ -.. This work is licensed under a Creative Commons Attribution 4.0 International License. -.. http://creativecommons.org/licenses/by/4.0 -.. (c) OPNFV - -============================================================== -Dovetail IPv6 tc026 specification - Service VM as IPv6 vRouter -============================================================== - - -+-----------------------+--------------------------------------------------------------------------+ -|test case name |Service VM as IPv6 vRouter | -| | | -+-----------------------+--------------------------------------------------------------------------+ -|id |dovetail.ipv6.tc026 | -+-----------------------+--------------------------------------------------------------------------+ -|objective |IPv6 connnectivity, service VM as IPv6 vRouter | -+-----------------------+--------------------------------------------------------------------------+ -|modules under test |neutron, nova, etc | -+-----------------------+--------------------------------------------------------------------------+ -|dependent test project |yardstick | -+-----------------------+--------------------------------------------------------------------------+ -|test items |yardstick_tc027 | -+-----------------------+--------------------------------------------------------------------------+ -|environmental | OpenStack-only environment | -|requirements & | environment can be deplyed on bare metal of virtualized infrastructure | -|preconditions | deployment can be HA or non-HA | -| | test case image needs to be installed into Glance with ping6 included | -+-----------------------+--------------------------------------------------------------------------+ -|scenario dependencies | nosdn | -+-----------------------+--------------------------------------------------------------------------+ -|procedural |step 1: to setup IPv6 testing environment | -|requirements | 1.1 disable security group | -| | 1.2 create (ipv6, ipv4) router, network and subnet | -| | 1.3 create vRouter, VM1, VM2 | -| |step 2: to run ping6 to verify IPv6 connectivity | -| | 2.1 ssh to VM1 | -| | 2.2 ping6 to ipv6 router from VM1 | -| | 2.3 get the result and store the logs | -| |step 3: to teardown IPv6 testing environment | -| | 3.1 delete vRouter, VM1, VM2 | -| | 3.2 delete (ipv6, ipv4) router, network and subnet | -| | 3.3 enable security group | -+-----------------------+--------------------------------------------------------------------------+ -|input specifications |packetsize: 56 | -| |ping_count: 5 | -| | | -+-----------------------+--------------------------------------------------------------------------+ -|output specifications |output includes max_rtt, min_rtt, average_rtt | -+-----------------------+--------------------------------------------------------------------------+ -|pass/fail criteria |ping6 connectivity success, no SLA | -+-----------------------+--------------------------------------------------------------------------+ -|test report | dovetail dashboard DB here | -+-----------------------+--------------------------------------------------------------------------+ - diff --git a/docs/testing/user/testspecification/old_files/ipv6/ipv6_all_testcases.rst b/docs/testing/user/testspecification/old_files/ipv6/ipv6_all_testcases.rst deleted file mode 100644 index 462219a8..00000000 --- a/docs/testing/user/testspecification/old_files/ipv6/ipv6_all_testcases.rst +++ /dev/null @@ -1,249 +0,0 @@ -.. This work is licensed under a Creative Commons Attribution 4.0 International License. -.. http://creativecommons.org/licenses/by/4.0 -.. (c) OPNFV - -================================================== -IPv6 Compliance Testing Methodology and Test Cases -================================================== - -IPv6 Compliance Testing focuses on overlay IPv6 capabilities, i.e. to validate that -IPv6 capability is supported in tenant networks, subnets and routers. Both Tempest API -testing and Tempest Scenario testing are reused as much as we can in IPv6 Compliance -Testing. In addition, Yardstick Test Case 027 is also used to validate a specific use case -of using a Service VM as an IPv6 vRouter. - -IPv6 Compliance Testing test cases are described as follows: - ---------------------------------------------------------------- -Test Case 1: Create and Delete an IPv6 Network, Port and Subnet ---------------------------------------------------------------- - -.. code-block:: bash - - tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_network - tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_port - tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_subnet - ------------------------------------------------------------------ -Test Case 2: Create, Update and Delete an IPv6 Network and Subnet ------------------------------------------------------------------ - -.. code-block:: bash - - tempest.api.network.test_networks.NetworksIpV6Test.test_create_update_delete_network_subnet - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_update_delete_network_subnet - ----------------------------------------------- -Test Case 3: Check External Network Visibility ----------------------------------------------- - -.. code-block:: bash - - tempest.api.network.test_networks.NetworksIpV6Test.test_external_network_visibility - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_external_network_visibility - -------------------------------------------------------- -Test Case 4: List IPv6 Networks and Subnets of a Tenant -------------------------------------------------------- - -.. code-block:: bash - - tempest.api.network.test_networks.NetworksIpV6Test.test_list_networks - tempest.api.network.test_networks.NetworksIpV6Test.test_list_subnets - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_list_networks - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_list_subnets - ------------------------------------------------------------ -Test Case 5: Show Information of an IPv6 Network and Subnet ------------------------------------------------------------ - -.. code-block:: bash - - tempest.api.network.test_networks.NetworksIpV6Test.test_show_network - tempest.api.network.test_networks.NetworksIpV6Test.test_show_subnet - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_show_network - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_show_subnet - ------------------------------------------------------------- -Test Case 6: Create an IPv6 Port in Allowed Allocation Pools ------------------------------------------------------------- - -.. code-block:: bash - - tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_port_in_allowed_allocation_pools - --------------------------------------------------------- -Test Case 7: Create an IPv6 Port without Security Groups --------------------------------------------------------- - -.. code-block:: bash - - tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_port_with_no_securitygroups - ---------------------------------------------------- -Test Case 8: Create, Update and Delete an IPv6 Port ---------------------------------------------------- - -.. code-block:: bash - - tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_update_delete_port - ----------------------------------------- -Test Case 9: List IPv6 Ports of a Tenant ----------------------------------------- - -.. code-block:: bash - - tempest.api.network.test_ports.PortsIpV6TestJSON.test_list_ports - ----------------------------------------------- -Test Case 10: Show Information of an IPv6 Port ----------------------------------------------- - -.. code-block:: bash - - tempest.api.network.test_ports.PortsIpV6TestJSON.test_show_port - --------------------------------------------------------- -Test Case 11: Add Multiple Interfaces for an IPv6 Router --------------------------------------------------------- - -.. code-block:: bash - - tempest.api.network.test_routers.RoutersIpV6Test.test_add_multiple_router_interfaces - ------------------------------------------------------------------- -Test Case 12: Add and Remove an IPv6 Router Interface with port_id ------------------------------------------------------------------- - -.. code-block:: bash - - tempest.api.network.test_routers.RoutersIpV6Test.test_add_remove_router_interface_with_port_id - --------------------------------------------------------------------- -Test Case 13: Add and Remove an IPv6 Router Interface with subnet_id --------------------------------------------------------------------- - -.. code-block:: bash - - tempest.api.network.test_routers.RoutersIpV6Test.test_add_remove_router_interface_with_subnet_id - ------------------------------------------------------------------- -Test Case 14: Create, Update, Delete, List and Show an IPv6 Router ------------------------------------------------------------------- - -.. code-block:: bash - - tempest.api.network.test_routers.RoutersIpV6Test.test_create_show_list_update_delete_router - --------------------------------------------------------------------------- -Test Case 15: Create, Update, Delete, List and Show an IPv6 Security Group --------------------------------------------------------------------------- - -.. code-block:: bash - - tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_list_update_show_delete_security_group - ----------------------------------------------------------- -Test Case 16: Create, Delete and Show Security Group Rules ----------------------------------------------------------- - -.. code-block:: bash - - tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_show_delete_security_group_rule - --------------------------------------- -Test Case 17: List All Security Groups --------------------------------------- - -.. code-block:: bash - - tempest.api.network.test_security_groups.SecGroupIPv6Test.test_list_security_groups - --------------------------------------------------------- -Test Case 18: IPv6 Address Assignment - DHCPv6 Stateless --------------------------------------------------------- - -.. code-block:: bash - - tempest.scenario.test_network_v6.TestGettingAddress.test_dhcp6_stateless_from_os - --------------------------------------------------------------------- -Test Case 19: IPv6 Address Assignment - Dual Stack, DHCPv6 Stateless --------------------------------------------------------------------- - -.. code-block:: bash - - tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_dhcp6_stateless_from_os - ---------------------------------------------------------------------------- -Test Case 20: IPv6 Address Assignment - Multiple Prefixes, DHCPv6 Stateless ---------------------------------------------------------------------------- - -.. code-block:: bash - - tempest.scenario.test_network_v6.TestGettingAddress.test_multi_prefix_dhcpv6_stateless - ---------------------------------------------------------------------------------------- -Test Case 21: IPv6 Address Assignment - Dual Stack, Multiple Prefixes, DHCPv6 Stateless ---------------------------------------------------------------------------------------- - -.. code-block:: bash - - tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_multi_prefix_dhcpv6_stateless - ---------------------------------------------- -Test Case 22: IPv6 Address Assignment - SLAAC ---------------------------------------------- - -.. code-block:: bash - - tempest.scenario.test_network_v6.TestGettingAddress.test_slaac_from_os - ---------------------------------------------------------- -Test Case 23: IPv6 Address Assignment - Dual Stack, SLAAC ---------------------------------------------------------- - -.. code-block:: bash - - tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_slaac_from_os - ----------------------------------------------------------------- -Test Case 24: IPv6 Address Assignment - Multiple Prefixes, SLAAC ----------------------------------------------------------------- - -.. code-block:: bash - - tempest.scenario.test_network_v6.TestGettingAddress.test_multi_prefix_slaac - ----------------------------------------------------------------------------- -Test Case 25: IPv6 Address Assignment - Dual Stack, Multiple Prefixes, SLAAC ----------------------------------------------------------------------------- - -.. code-block:: bash - - tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_multi_prefix_slaac - -------------------------------------------- -Test Case 26: Service VM as an IPv6 vRouter -------------------------------------------- - -.. code-block:: bash - - # Refer to Yardstick Test Case 027 - # Instruction: http://artifacts.opnfv.org/ipv6/docs/configurationguide/index.html - # Step 1: Set up Service VM as an IPv6 vRouter - # 1.1: Install OPNFV and Preparation - # 1.2: Disable Security Groups in OpenStack ML2 Setup - # 1.3: Create IPv4 and IPv6 Neutron routers, networks and subnets - # 1.4: Boot vRouter VM, and Guest VM1 and Guest VM2 - # Step 2: Verify IPv6 Connectivity - # 2.1: ssh to Guest VM1 - # 2.2: Ping6 from Guest VM1 to Guest VM2 - # 2.3: Ping6 from Guest VM1 to vRouter VM - # 2.4: Ping6 from Guest VM1 to Neutron IPv6 Router Namespace - # Step 3: Tear down Setup - # 3.1: Delete Guest VM1, Guest VM2 and vRouter VM - # 3.2: Delete IPv4 and IPv6 Neutron routers, networks and subnets - # 3.3: Enable Security Groups - diff --git a/docs/testing/user/testspecification/old_files/ipv6/testplan.rst b/docs/testing/user/testspecification/old_files/ipv6/testplan.rst deleted file mode 100644 index 3470e7a6..00000000 --- a/docs/testing/user/testspecification/old_files/ipv6/testplan.rst +++ /dev/null @@ -1,34 +0,0 @@ -.. This work is licensed under a Creative Commons Attribution 4.0 International License. -.. http://creativecommons.org/licenses/by/4.0 -.. (c) OPNFV - -=============================== -OPNFV IPv6 Compliance Test Plan -=============================== - -Introduction -============ - -The IPv6 compliance test plan outlines the method for testing IPv6 Tenant Network feature -compliance with the OPNFV platform. - -Scope ------ - -This test, and other tests in the test suite, are designed to verify an entire SUT, -and not any individual component of the system. - -Test suite scope and procedures -=============================== - -The IPv6 compliance test suite will evaluate the ability for a SUT to support IPv6 -Tenant Network features and functionality provided by OPNFV platform. - -Please refer to the complete list of the test cases for details. - -Test suite execution -==================== - -Please refer to each test case for specific setup and execution procedure. - -.._[1]: http://www.opnfv.org diff --git a/docs/testing/user/testspecification/old_files/ipv6/testprocedure.rst b/docs/testing/user/testspecification/old_files/ipv6/testprocedure.rst deleted file mode 100644 index 2119ed61..00000000 --- a/docs/testing/user/testspecification/old_files/ipv6/testprocedure.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. This work is licensed under a Creative Commons Attribution 4.0 International License. -.. http://creativecommons.org/licenses/by/4.0 -.. (c) Christopher Price (Ericsson AB) and others - -=================== -IPv6 test procedure -=================== - -Draft to be patched this week, someone feel free to work on this in parallel. diff --git a/docs/testing/user/testspecification/old_files/ipv6/testspecification.rst b/docs/testing/user/testspecification/old_files/ipv6/testspecification.rst deleted file mode 100644 index 6f7caba8..00000000 --- a/docs/testing/user/testspecification/old_files/ipv6/testspecification.rst +++ /dev/null @@ -1,54 +0,0 @@ -.. This work is licensed under a Creative Commons Attribution 4.0 International License. -.. http://creativecommons.org/licenses/by/4.0 -.. (c) Christopher Price (Ericsson AB) and others - -=============================================== -Test specification - Service VM as IPv6 vRouter -=============================================== - -Draft to be worked on, this represents the YardStick test but I would suggest we need to break -this into a set of tests which provide more details per action with boundary validation. - -Test Item -========= - -TBD -> IPv6 Ping... - -Identify the items or features to be tested by this test case. The item description and -definition can be referenced from any one of several sources, depending on the level of the -test case specification. It may be a good idea to reference the source documents as well. - -Environmental requirements -========================== - -TBD - -Preconditions and procedural requirements -========================================= - -TBD - -.. <Start> -.. this section may be iterated over for a set of simillar test cases that would be run as one. - -Input Specifications -==================== - -TBD - -Output Specifications -===================== - -TBD - -.. <End> - -Test Reporting -============== - -The test report for this test case will be generated with links to relevant data sources. -This section can be updated once we have a template for the report in place. - -http://testresults.opnfv.org/grafana/dashboard/db/yardstick-tc027 - - diff --git a/docs/testing/user/testspecification/vimoperationscompute/index.rst b/docs/testing/user/testspecification/vimoperationscompute/index.rst index f8dc5870..4ed37809 100644 --- a/docs/testing/user/testspecification/vimoperationscompute/index.rst +++ b/docs/testing/user/testspecification/vimoperationscompute/index.rst @@ -9,11 +9,31 @@ VIM compute operations test specification .. toctree:: :maxdepth: 2 -Each test case requires documentation according to: -* Use case specification -* Test preconditions -* Basic test flow execution descriptor -* Post conditions and pass fail criteria +Scope +===== + +References +================ + + +Definitions and abbreviations +============================= + + +Use case description +==================== + + +System Under Test (SUT) +======================= + + +Test Suite Structure +==================== + + +Test Descriptions +================= tempest.api.compute.servers.test_create_server.ServersTestJSON.test_host_name_is_same_as_server_name tempest.api.compute.servers.test_create_server.ServersTestJSON.test_list_servers diff --git a/docs/testing/user/testspecification/vpn/index.rst b/docs/testing/user/testspecification/vpn/index.rst new file mode 100644 index 00000000..0a8a8d17 --- /dev/null +++ b/docs/testing/user/testspecification/vpn/index.rst @@ -0,0 +1,532 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. +.. http://creativecommons.org/licenses/by/4.0 +.. (c) Ericsson AB + +====================== +VPN test specification +====================== + +.. toctree:: + :maxdepth: 2 + +Scope +===== + +The VPN test area evaluates the ability of the system under test to support VPN +networking for virtual workloads. The tests in this test area will evaluate +establishing VPN networks, publishing and communication between endpoints using +BGP and tear down of the networks. + +References +========== + +This test area evaluates the ability of the system to perform selected actions +defined in the following specifications. Details of specific features evaluated +are described in the test descriptions. + +- RFC 4364 - BGP/MPLS IP Virtual Private Networks + + - https://tools.ietf.org/html/rfc4364 + +- RFC 4659 - BGP-MPLS IP Virtual Private Network + + - https://tools.ietf.org/html/rfc4659 + +- RFC 2547 - BGP/MPLS VPNs + + - https://tools.ietf.org/html/rfc2547 + + +Definitions and abbreviations +============================= + +The following terms and abbreviations are used in conjunction with this test +area + +- BGP - Border gateway protocol +- eRT - Export route target +- IETF - Internet Engineering Task Force +- iRT - Import route target +- NFVi - Network functions virtualization infrastructure +- Tenant - An isolated set of virtualized infrastructures +- VM - Virtual machine +- VPN - Virtual private network +- VLAN - Virtual local area network + + +System Under Test (SUT) +======================= + +The system under test is assumed to be the NFVi and VIM in operation on a +Pharos compliant infrastructure. + + +Test Area Structure +=================== + +The test area is structured in four separate tests which are executed +sequentially. The order of the tests is arbitrary as there are no dependencies +across the tests. Specifially, every test performs clean-up operations which +return the system to the same state as before the test. + +The test area evaluates the ability of the SUT to establish connectivity +between Virtual Machines using an appropriate route target configuration, +reconfigure the route targets to remove connectivity between the VMs, then +reestablish connectivity by re-association. + + +Test Descriptions +================= + +---------------------------------------------------------------- +Test Case 1 - VPN provides connectivity between Neutron subnets +---------------------------------------------------------------- + +Short name +---------- + +opnfv.sdnvpn.subnet_connectivity + + +Use case specification +---------------------- + +This test evaluates the use case where an NFVi tenant uses a BGPVPN to provide +connectivity between VMs on different Neutron networks and subnets that reside +on different hosts. + + +Test preconditions +------------------ + +2 compute nodes are available, denoted Node1 and Node2 in the following. + + +Basic test flow execution description and pass/fail criteria +------------------------------------------------------------ + +Methodology for verifying connectivity +'''''''''''''''''''''''''''''''''''''' + +Connectivity between VMs is tested by sending ICMP ping packets between +selected VMs. The target IPs are passed to the VMs sending pings by means of a +custom user data script. Whether or not a ping was successful is determined by +checking the console output of the source VMs. + + +Test execution +'''''''''''''' + +* Create Neutron network N1 and subnet SN1 with IP range 10.10.10.0/24 +* Create Neutron network N2 and subnet SN2 with IP range 10.10.11.0/24 + +* Create VM1 on Node1 with a port in network N1 +* Create VM2 on Node1 with a port in network N1 +* Create VM3 on Node2 with a port in network N1 +* Create VM4 on Node1 with a port in network N2 +* Create VM5 on Node2 with a port in network N2 + +* Create VPN1 with eRT<>iRT +* Create network association between network N1 and VPN1 + +* VM1 sends ICMP packets to VM2 using ``ping`` + +* **Test assertion 1:** Ping from VM1 to VM2 succeeds: ``ping`` exits with return code 0 + +* VM1 sends ICMP packets to VM3 using ``ping`` + +* **Test assertion 2:** Ping from VM1 to VM3 succeeds: ``ping`` exits with return code 0 + +* VM1 sends ICMP packets to VM4 using ``ping`` + +* **Test assertion 3:** Ping from VM1 to VM4 fails: ``ping`` exits with a non-zero return code + +* Create network association between network N2 and VPN1 + +* VM4 sends ICMP packets to VM5 using ``ping`` + +* **Test assertion 4:** Ping from VM4 to VM5 succeeds: ``ping`` exits with return code 0 + +* Configure iRT=eRT in VPN1 + +* VM1 sends ICMP packets to VM4 using ``ping`` + +* **Test assertion 5:** Ping from VM1 to VM4 succeeds: ``ping`` exits with return code 0 + +* VM1 sends ICMP packets to VM5 using ``ping`` + +* **Test assertion 6:** Ping from VM1 to VM5 succeeds: ``ping`` exits with return code 0 + +* Delete all instances: VM1, VM2, VM3, VM4 and VM5 + +* Delete all networks and subnets: networks N1 and N2 including subnets SN1 and SN2 + +* Delete all network associations and VPN1 + + +Pass / fail criteria +'''''''''''''''''''' + +This test evaluates the capability of the NFVi and VIM to provide routed IP +connectivity between VMs by means of BGP/MPLS VPNs. Specifically, the test +verifies that: + +* VMs in the same Neutron subnet have IP connectivity regardless of BGP/MPLS + VPNs (test assertion 1, 2, 4) + +* VMs in different Neutron subnets do not have IP connectivity by default - in + this case without associating VPNs with the same import and export route + targets to the Neutron networks (test assertion 3) + +* VMs in different Neutron subnets have routed IP connectivity after + associating both networks with BGP/MPLS VPNs which have been configured with + the same import and export route targets (test assertion 5, 6). Hence, + adjusting the ingress and egress route targets enables as well as prohibits + routing. + +In order to pass this test, all test assertions listed in the test execution +above need to pass. + + +Post conditions +--------------- + +N/A + +------------------------------------------------------------ +Test Case 2 - VPNs ensure traffic separation between tenants +------------------------------------------------------------ + +Short Name +---------- + +opnfv.sdnvpn.tenant_separation + + +Use case specification +---------------------- + +This test evaluates if VPNs provide separation of traffic such that overlapping +IP ranges can be used. + + +Test preconditions +------------------ + +2 compute nodes are available, denoted Node1 and Node2 in the following. + + +Basic test flow execution description and pass/fail criteria +------------------------------------------------------------ + +Methodology for verifying connectivity +'''''''''''''''''''''''''''''''''''''' + +Connectivity between VMs is tested by establishing an SSH connection. Moreover, +the command "hostname" is executed at the remote VM in order to retrieve the +hostname of the remote VM. The retrieved hostname is furthermore compared +against an expected value. This is used to verify tenant traffic separation, +i.e., despite overlapping IPs, a connection is made to the correct VM as +determined by means of the hostname of the target VM. + + + +Test execution +'''''''''''''' + +* Create Neutron network N1 +* Create subnet SN1a of network N1 with IP range 10.10.10.0/24 +* Create subnet SN1b of network N1 with IP range 10.10.11.0/24 + +* Create Neutron network N2 +* Create subnet SN2a of network N2 with IP range 10.10.10.0/24 +* Create subnet SN2b of network N2 with IP range 10.10.11.0/24 + +* Create VM1 on Node1 with a port in network N1 and IP 10.10.10.11. +* Create VM2 on Node1 with a port in network N1 and IP 10.10.10.12. +* Create VM3 on Node2 with a port in network N1 and IP 10.10.11.13. +* Create VM4 on Node1 with a port in network N2 and IP 10.10.10.12. +* Create VM5 on Node2 with a port in network N2 and IP 10.10.11.13. + +* Create VPN1 with iRT=eRT=RT1 +* Create network association between network N1 and VPN1 + +* VM1 attempts to execute the command ``hostname`` on the VM with IP 10.10.10.12 via SSH. + +* **Test assertion 1:** VM1 can successfully connect to the VM with IP + 10.10.10.12. via SSH and execute the remote command ``hostname``. The + retrieved hostname equals the hostname of VM2. + +* VM1 attempts to execute the command ``hostname`` on the VM with IP 10.10.11.13 via SSH. + +* **Test assertion 2:** VM1 can successfully connect to the VM with IP + 10.10.11.13 via SSH and execute the remote command ``hostname``. The + retrieved hostname equals the hostname of VM3. + +* Create VPN2 with iRT=eRT=RT2 +* Create network association between network N2 and VPN2 + +* VM4 attempts to execute the command ``hostname`` on the VM with IP 10.10.11.13 via SSH. + +* **Test assertion 3:** VM4 can successfully connect to the VM with IP + 10.10.11.13 via SSH and execute the remote command ``hostname``. The + retrieved hostname equals the hostname of VM5. + +* VM4 attempts to execute the command ``hostname`` on the VM with IP 10.10.11.11 via SSH. + +* **Test assertion 4:** VM4 cannot connect to the VM with IP 10.10.11.11 via SSH. + +* Delete all instances: VM1, VM2, VM3, VM4 and VM5 + +* Delete all networks and subnets: networks N1 and N2 including subnets SN1a, SN1b, SN2a and SN2b + +* Delete all network associations, VPN1 and VPN2 + + +Pass / fail criteria +'''''''''''''''''''' + +This test evaluates the capability of the NFVi and VIM to provide routed IP +connectivity between VMs by means of BGP/MPLS VPNs. Specifically, the test +verifies that: + +* VMs in the same Neutron subnet (still) have IP connectivity between each + other when a BGP/MPLS VPN is associated with the network (test assertion 1). + +* VMs in different Neutron subnets have routed IP connectivity between each + other when BGP/MPLS VPNs with the same import and expert route targets are + associated with both networks (assertion 2). + +* VMs in different Neutron networks and BGP/MPLS VPNs with different import and + export route targets can have overlapping IP ranges. The BGP/MPLS VPNs + provide traffic separation (assertion 3 and 4). + +In order to pass this test, all test assertions listed in the test execution +above need to pass. + + +Post conditions +--------------- + +N/A + +-------------------------------------------------------------------------------- +Test Case 3 - VPN provides connectivity between subnets using router association +-------------------------------------------------------------------------------- + +Short Name +---------- + +opnfv.sdnvpn.router_association + + +Use case specification +---------------------- + +This test evaluates if a VPN provides connectivity between two subnets by +utilizing two different VPN association mechanisms: a router association and a +network association. + +Specifically, the test network topology comprises two networks N1 and N2 with +corresponding subnets. Additionally, network N1 is connected to a router R1. +This test verifies that a VPN V1 provides connectivity between both networks +when applying a router association to router R1 and a network association to +network N2. + + +Test preconditions +------------------ + +2 compute nodes are available, denoted Node1 and Node2 in the following. + +Basic test flow execution description and pass/fail criteria +------------------------------------------------------------ + +Methodology for verifying connectivity +'''''''''''''''''''''''''''''''''''''' + +Connectivity between VMs is tested by sending ICMP ping packets between +selected VMs. The target IPs are passed to the VMs sending pings by means of a +custom user data script. Whether or not a ping was successful is determined by +checking the console output of the source VMs. + + +Test execution +'''''''''''''' + +* Create a network N1, a subnet SN1 with IP range 10.10.10.0/24 and a connected router R1 +* Create a network N2, a subnet SN2 with IP range 10.10.11.0/24 + +* Create VM1 on Node1 with a port in network N1 +* Create VM2 on Node1 with a port in network N1 +* Create VM3 on Node2 with a port in network N1 +* Create VM4 on Node1 with a port in network N2 +* Create VM5 on Node2 with a port in network N2 + +* Create VPN1 with eRT<>iRT so that connected subnets should not reach each other + +* Create route association between router R1 and VPN1 + +* VM1 sends ICMP packets to VM2 using ``ping`` + +* **Test assertion 1:** Ping from VM1 to VM2 succeeds: ``ping`` exits with return code 0 + +* VM1 sends ICMP packets to VM3 using ``ping`` + +* **Test assertion 2:** Ping from VM1 to VM3 succeeds: ``ping`` exits with return code 0 + +* VM1 sends ICMP packets to VM4 using ``ping`` + +* **Test assertion 3:** Ping from VM1 to VM4 fails: ``ping`` exits with a non-zero return code + +* Create network association between network N2 and VPN1 + +* VM4 sends ICMP packets to VM5 using ``ping`` + +* **Test assertion 4:** Ping from VM4 to VM5 succeeds: ``ping`` exits with return code 0 + +* Change VPN1 so that iRT=eRT + +* VM1 sends ICMP packets to VM4 using ``ping`` + +* **Test assertion 5:** Ping from VM1 to VM4 succeeds: ``ping`` exits with return code 0 + +* VM1 sends ICMP packets to VM5 using ``ping`` + +* **Test assertion 6:** Ping from VM1 to VM5 succeeds: ``ping`` exits with return code 0 + +* Delete all instances: VM1, VM2, VM3, VM4 and VM5 + +* Delete all networks, subnets and routers: networks N1 and N2 including subnets SN1 and SN2, router R1 + +* Delete all network and router associations and VPN1 + + +Pass / fail criteria +'''''''''''''''''''' + +This test evaluates the capability of the NFVi and VIM to provide routed IP +connectivity between VMs by means of BGP/MPLS VPNs. Specifically, the test +verifies that: + +* VMs in the same Neutron subnet have IP connectivity regardless of the import + and export route target configuration of BGP/MPLS VPNs (test assertion 1, 2, 4) + +* VMs in different Neutron subnets do not have IP connectivity by default - in + this case without associating VPNs with the same import and export route + targets to the Neutron networks or connected Neutron routers (test assertion 3). + +* VMs in two different Neutron subnets have routed IP connectivity after + associating the first network and a router connected to the second network + with BGP/MPLS VPNs which have been configured with the same import and export + route targets (test assertion 5, 6). Hence, adjusting the ingress and egress + route targets enables as well as prohibits routing. + +* Network and router associations are equivalent methods for binding Neutron networks + to VPN. + +In order to pass this test, all test assertions listed in the test execution +above need to pass. + + +Post conditions +--------------- + +N/A + +--------------------------------------------------------------------------------------------------- +Test Case 4 - Verify interworking of router and network associations with floating IP functionality +--------------------------------------------------------------------------------------------------- + +Short Name +---------- + +opnfv.sdnvpn.router_association_floating_ip + + +Use case specification +---------------------- + +This test evaluates if both the router association and network association +mechanisms interwork with floating IP functionality. + +Specifically, the test network topology comprises two networks N1 and N2 with +corresponding subnets. Additionally, network N1 is connected to a router R1. +This test verifies that i) a VPN V1 provides connectivity between both networks +when applying a router association to router R1 and a network association to +network N2 and ii) a VM in network N1 is reachable externally by means of a +floating IP. + + +Test preconditions +------------------ + +At least one compute node is available. + +Basic test flow execution description and pass/fail criteria +------------------------------------------------------------ + +Methodology for verifying connectivity +'''''''''''''''''''''''''''''''''''''' + +Connectivity between VMs is tested by sending ICMP ping packets between +selected VMs. The target IPs are passed to the VMs sending pings by means of a +custom user data script. Whether or not a ping was successful is determined by +checking the console output of the source VMs. + + +Test execution +'''''''''''''' + +* Create a network N1, a subnet SN1 with IP range 10.10.10.0/24 and a connected router R1 +* Create a network N2 with IP range 10.10.20.0/24 + +* Create VM1 with a port in network N1 +* Create VM2 with a port in network N2 + +* Create VPN1 +* Create a router association between router R1 and VPN1 +* Create a network association between network N2 and VPN1 + + +* VM1 sends ICMP packets to VM2 using ``ping`` + +* **Test assertion 1:** Ping from VM1 to VM2 succeeds: ``ping`` exits with return code 0 + +* Assign a floating IP to VM1 + +* The host running the test framework sends ICMP packets to VM1 using ``ping`` + +* **Test assertion 2:** Ping from the host running the test framework to the + floating IP of VM1 succeeds: ``ping`` exits with return code 0 + +* Delete floating IP assigned to VM1 + +* Delete all instances: VM1, VM2 + +* Delete all networks, subnets and routers: networks N1 and N2 including subnets SN1 and SN2, router R1 + +* Delete all network and router associations as well as VPN1 + + +Pass / fail criteria +'''''''''''''''''''' + +This test evaluates the capability of the NFVi and VIM to provide routed IP +connectivity between VMs by means of BGP/MPLS VPNs. Specifically, the test +verifies that: + +* VMs in the same Neutron subnet have IP connectivity regardless of the import + and export route target configuration of BGP/MPLS VPNs (test assertion 1) + +* VMs connected to a network which has been associated with a BGP/MPLS VPN are + reachable through floating IPs. + +In order to pass this test, all test assertions listed in the test execution +above need to pass. + + +Post conditions +--------------- + +N/A diff --git a/dovetail/compliance/proposed_tests.yml b/dovetail/compliance/proposed_tests.yml index fa9c5b8a..9d63cb2a 100644 --- a/dovetail/compliance/proposed_tests.yml +++ b/dovetail/compliance/proposed_tests.yml @@ -7,10 +7,10 @@ proposed_tests: # - dovetail.defcore.tc002 # ipv6 - dovetail.ipv6.tc001 - # - dovetail.ipv6.tc002 - # - dovetail.ipv6.tc003 - # - dovetail.ipv6.tc004 - # - dovetail.ipv6.tc005 + - dovetail.ipv6.tc002 + - dovetail.ipv6.tc003 + - dovetail.ipv6.tc004 + - dovetail.ipv6.tc005 - dovetail.ipv6.tc006 - dovetail.ipv6.tc007 - dovetail.ipv6.tc008 @@ -31,17 +31,17 @@ proposed_tests: - dovetail.ipv6.tc023 - dovetail.ipv6.tc024 - dovetail.ipv6.tc025 - # nfvi, vping_ssh, vping_userdata - - dovetail.nfvi.tc001 - - dovetail.nfvi.tc002 - # HA, ha.tc002, ha.tc012, will kill the host and can't restart, not ready yet, skip. + # HA - dovetail.ha.tc001 + - dovetail.ha.tc002 - dovetail.ha.tc003 - # - dovetail.ha.tc004 + - dovetail.ha.tc004 - dovetail.ha.tc005 - dovetail.ha.tc006 - # - dovetail.ha.tc007 - # - dovetail.ha.tc008 - - dovetail.ha.tc009 - # - dovetail.ha.tc010 - # - dovetail.ha.tc011 + - dovetail.ha.tc007 + - dovetail.ha.tc008 + # sdnvpn + - dovetail.sdnvpn.tc001 + - dovetail.sdnvpn.tc002 + - dovetail.sdnvpn.tc004 + - dovetail.sdnvpn.tc008 diff --git a/dovetail/conf/cmd_config.yml b/dovetail/conf/cmd_config.yml index da8c4732..a5b262d1 100644 --- a/dovetail/conf/cmd_config.yml +++ b/dovetail/conf/cmd_config.yml @@ -29,13 +29,6 @@ cli: path: - 'functest/docker_tag' help: 'Overwrite tag for functest docker container (e.g. stable or latest)' - openrc: - flags: - - '--openrc' - - '-o' - path: - - 'openrc' - help: 'Openstack Credential file location' control: testsuite: flags: @@ -57,7 +50,7 @@ cli: flags: - '--report' - '-r' - help: 'push results to DB (e.g. --report http://192.168.135.2:8000/api/v1)' + help: 'push results to DB (e.g. --report http://192.168.135.2:8000/api/v1/results)' offline: flags: - '--offline' diff --git a/dovetail/conf/dovetail_config.yml b/dovetail/conf/dovetail_config.yml index 332628a2..793c7051 100644 --- a/dovetail/conf/dovetail_config.yml +++ b/dovetail/conf/dovetail_config.yml @@ -1,10 +1,21 @@ --- -report_file: 'dovetail_report.txt' +# report_file: 'dovetail_report.txt' cli_file_name: 'cmd_config.yml' report_dest: 'file' +result_file: 'results.json' # OPENSTACK Credential file -openrc: '/home/opnfv/dovetail/openrc.sh' +env_file: 'env_config.sh' + +# POD info file +pod_file: 'pod.yaml' + +# JUMPSERVER private key used in pod_file to login hosts +# If use password to login hosts, there's no need to provide the private key +pri_key: 'id_rsa' + +# SDNVPN offline image +sdnvpn_image: 'ubuntu-16.04-server-cloudimg-amd64-disk1.img' COMPLIANCE_PATH: compliance/ TESTCASE_PATH: testcase/ @@ -20,6 +31,7 @@ testarea_supported: - ha - ipv6 - nfvi + - sdnvpn - vimops functest_testsuite: @@ -31,6 +43,7 @@ functest_testsuite: - promise functest_testcase: + - bgpvpn - connection_check - api_check - snaps_health_check @@ -66,3 +79,5 @@ validate_input: - 'danube.1.0' - 'danube.2.0' - 'danube.3.0' + - 'cvp.0.1.0' + - 'cvp.0.2.0' diff --git a/dovetail/conf/functest_config.yml b/dovetail/conf/functest_config.yml index 460506a6..c33a0a91 100644 --- a/dovetail/conf/functest_config.yml +++ b/dovetail/conf/functest_config.yml @@ -1,7 +1,7 @@ --- functest: image_name: opnfv/functest - docker_tag: latest + docker_tag: cvp.0.2.0 opts: '-id --privileged=true' config: dir: '/home/opnfv/userconfig' diff --git a/dovetail/conf/yardstick_config.yml b/dovetail/conf/yardstick_config.yml index ae59a9ec..56ad75a5 100644 --- a/dovetail/conf/yardstick_config.yml +++ b/dovetail/conf/yardstick_config.yml @@ -1,7 +1,7 @@ --- yardstick: image_name: opnfv/yardstick - docker_tag: latest + docker_tag: danube.3.0 opts: '-id --privileged=true' config: dir: '/home/opnfv/userconfig' @@ -15,8 +15,8 @@ yardstick: - 'mkdir -p /home/opnfv/yardstick/results/' - "cd /home/opnfv/repos/yardstick && source tests/ci/prepare_env.sh && yardstick -d task start tests/opnfv/test_cases/{{validate_testcase}}.yaml - --output-file /home/opnfv/yardstick/results/{{validate_testcase}}.out - --task-args '{'file': '/home/opnfv/userconfig/pod.yaml'}'" + --output-file /home/opnfv/yardstick/results/{{testcase}}.out + --task-args '{'file': '/home/opnfv/userconfig/pre_config/pod.yaml'}'" post_condition: - '' result: @@ -25,3 +25,6 @@ yardstick: file_path: 'yardstick.log' key_path: '/root/.ssh/id_rsa' openrc: '/etc/yardstick/openstack.creds' + yard_conf: + src_file: '/home/opnfv/repos/yardstick/etc/yardstick/yardstick.conf.sample' + dest_file: '/etc/yardstick/yardstick.conf' diff --git a/dovetail/container.py b/dovetail/container.py index 5c128c0b..1a5867a7 100644 --- a/dovetail/container.py +++ b/dovetail/container.py @@ -8,6 +8,7 @@ # import os +import yaml import utils.dovetail_logger as dt_logger import utils.dovetail_utils as dt_utils @@ -41,32 +42,41 @@ class Container(object): return '%s:%s' % (dt_cfg.dovetail_config[type]['image_name'], dt_cfg.dovetail_config[type]['docker_tag']) except KeyError as e: - cls.logger.error('There is no %s in %s config file.', e, type) + cls.logger.exception( + 'There is no key {} in {} config file.'.format(e, type)) return None # get the openrc_volume for creating the container @classmethod def openrc_volume(cls, type): dovetail_config = dt_cfg.dovetail_config - dovetail_config['openrc'] = os.path.abspath(dovetail_config['openrc']) + dovetail_config['openrc'] = os.path.join(dovetail_config['config_dir'], + dovetail_config['env_file']) if os.path.isfile(dovetail_config['openrc']): openrc = ' -v %s:%s ' % (dovetail_config['openrc'], dovetail_config[type]['openrc']) return openrc else: - cls.logger.error("File %s is not exist", dovetail_config['openrc']) + cls.logger.error( + "File {} doesn't exist.".format(dovetail_config['openrc'])) return None # set functest envs and TEST_DB_URL for creating functest container @staticmethod - def set_functest_config(): + def set_functest_config(testcase_name): # These are all just used by Functest's function push_results_to_db. # And has nothing to do with DoveTail running test cases. - ins_type = " -e INSTALLER_TYPE=unknown" - scenario = " -e DEPLOY_SCENARIO=unknown" - node = " -e NODE_NAME=master" - envs = "%s %s %s" % (ins_type, scenario, node) + ins_type = os.getenv('INSTALLER_TYPE', "unknown") + scenario = os.getenv('DEPLOY_SCENARIO', "unknown") + ins_type = ''.join([" -e INSTALLER_TYPE=", ins_type]) + scenario = ''.join([" -e DEPLOY_SCENARIO=", scenario]) + # vpn testcase only runs when scenario name includes bgpvpn + # functest requirements + if 'sdnvpn' in testcase_name: + ins_type = "-e INSTALLER_TYPE=netvirt" + scenario = " -e DEPLOY_SCENARIO=bgpvpn" + envs = "%s %s" % (ins_type, scenario) dovetail_config = dt_cfg.dovetail_config if dovetail_config['report_dest'].startswith("http"): @@ -92,15 +102,24 @@ class Container(object): cls.logger.error("Can't find any external network.") return None - if dovetail_config['report_dest'].startswith("http"): - cls.logger.info("Yardstick can't push results to DB.") - cls.logger.info("Results will be stored with files.") - log_vol = '-v %s:%s ' % (dovetail_config['result_dir'], dovetail_config["yardstick"]['result']['log']) - key_path = os.path.join(dovetail_config['userconfig_dir'], 'id_rsa') - key_con_path = dovetail_config["yardstick"]['result']['key_path'] - key_vol = '-v %s:%s ' % (key_path, key_con_path) + + # for yardstick, support pod.yaml configuration + pod_file = os.path.join(dovetail_config['config_dir'], + dovetail_config['pod_file']) + if not os.path.isfile(pod_file): + cls.logger.error("File {} doesn't exist.".format(pod_file)) + return None + key_file = os.path.join(dovetail_config['config_dir'], + dovetail_config['pri_key']) + key_container_path = dovetail_config["yardstick"]['result']['key_path'] + if not os.path.isfile(key_file): + cls.logger.debug("Key file {} is not found, must use password in " + "{} to do HA test.".format(key_file, pod_file)) + key_vol = '' + else: + key_vol = '-v %s:%s ' % (key_file, key_container_path) return "%s %s %s" % (envs, log_vol, key_vol) @classmethod @@ -116,42 +135,81 @@ class Container(object): # CI_DEBUG is used for showing the debug logs of the upstream projects # BUILD_TAG is the unique id for this test - envs = ' -e CI_DEBUG=true' + envs = ' -e CI_DEBUG=true -e NODE_NAME=master' envs = envs + ' -e BUILD_TAG=%s-%s' % (dovetail_config['build_tag'], testcase_name) config = "" if type.lower() == "functest": - config = cls.set_functest_config() + config = cls.set_functest_config(testcase_name) if type.lower() == "yardstick": config = cls.set_yardstick_config() if not config: return None # for refstack, support user self_defined configuration - # for yardstick, support pod.yaml configuration - pod_file = os.path.join(dovetail_config['userconfig_dir'], 'pod.yaml') - if type.lower() == "yardstick" and not os.path.exists(pod_file): - cls.logger.error("File %s doesn't exist.", pod_file) - return None - key_file = os.path.join(dovetail_config['userconfig_dir'], 'id_rsa') - if type.lower() == "yardstick" and not os.path.exists(key_file): - cls.logger.debug("File %s doesn't exist.", key_file) - cls.logger.debug("Can just use password in %s.", pod_file) config_volume = \ - ' -v %s:%s ' % (dovetail_config['userconfig_dir'], - dovetail_config["functest"]['config']['dir']) + ' -v %s:%s ' % (os.getenv("DOVETAIL_HOME"), + dovetail_config[type]['config']['dir']) + + hosts_config = "" + hosts_config_path = os.path.abspath( + os.path.join(os.path.dirname(__file__), 'userconfig')) + try: + with open(os.path.join(hosts_config_path, 'hosts.yaml')) as f: + hosts_info = yaml.safe_load(f) + if hosts_info['hosts_info']: + for host in hosts_info['hosts_info']: + hosts_config += " --add-host " + hosts_config += str(host) + cls.logger.info('Get hosts info {}.'.format(hosts_config)) + except Exception: + cls.logger.warn('Failed to get hosts info in {}/hosts.yaml, ' + 'maybe some issues with domain name resolution.' + .format(hosts_config_path)) + + cacert_volume = "" + https_enabled = dt_utils.check_https_enabled(cls.logger) + cacert = os.getenv('OS_CACERT',) + if https_enabled: + cls.logger.info("https enabled...") + if cacert is not None: + if not os.path.isfile(cacert): + cls.logger.error("Env variable 'OS_CACERT' is set to {}" + "but the file does not exist." + .format(cacert)) + return None + elif not dovetail_config['config_dir'] in cacert: + cls.logger.error("Credential file has to be put in {}," + "which can be mount into container." + .format(dovetail_config['config_dir'])) + return None + cacert_volume = ' -v %s:%s ' % (cacert, cacert) + else: + cls.logger.warn("https enabled, OS_CACERT not set, insecure" + "connection used or OS_CACERT missed") result_volume = ' -v %s:%s ' % (dovetail_config['result_dir'], dovetail_config[type]['result']['dir']) - cmd = 'sudo docker run %s %s %s %s %s %s %s /bin/bash' % \ - (opts, envs, config, openrc, config_volume, - result_volume, docker_image) + cmd = 'sudo docker run %s %s %s %s %s %s %s %s %s /bin/bash' % \ + (opts, envs, config, hosts_config, openrc, cacert_volume, + config_volume, result_volume, docker_image) dt_utils.exec_cmd(cmd, cls.logger) ret, container_id = \ dt_utils.exec_cmd("sudo docker ps | grep " + docker_image + " | awk '{print $1}' | head -1", cls.logger) cls.container_list[type] = container_id + + if 'sdnvpn' in str(testcase_name): + prefix_path = dt_cfg.dovetail_config[type]['config']['dir'] + file_name = dt_cfg.dovetail_config['sdnvpn_image'] + src_path = os.path.join(prefix_path, 'pre_config', file_name) + dest_path = '/home/opnfv/functest/images' + Container.pre_copy(container_id, src_path, dest_path) + + if type.lower() == 'yardstick': + cls.set_yardstick_conf_file(container_id) + return container_id @classmethod @@ -170,15 +228,15 @@ class Container(object): cmd = "sudo docker ps -aq -f 'ancestor=%s'" % (image_id) ret, msg = dt_utils.exec_cmd(cmd, cls.logger) if msg and ret == 0: - cls.logger.debug('image %s has containers, skip.', image_id) + cls.logger.debug('Image {} has containers, skip.'.format(image_id)) return True cmd = 'sudo docker rmi %s' % (image_id) - cls.logger.debug('remove image %s', image_id) + cls.logger.debug('Remove image {}.'.format(image_id)) ret, msg = dt_utils.exec_cmd(cmd, cls.logger) if ret == 0: - cls.logger.debug('remove image %s successfully', image_id) + cls.logger.debug('Remove image {} successfully.'.format(image_id)) return True - cls.logger.error('fail to remove image %s.', image_id) + cls.logger.error('Failed to remove image {}.'.format(image_id)) return False @classmethod @@ -186,9 +244,10 @@ class Container(object): cmd = 'sudo docker pull %s' % (image_name) ret, msg = dt_utils.exec_cmd(cmd, cls.logger) if ret != 0: - cls.logger.error('fail to pull docker image %s!', image_name) + cls.logger.error( + 'Failed to pull docker image {}!'.format(image_name)) return False - cls.logger.debug('success to pull docker image %s!', image_name) + cls.logger.debug('Success to pull docker image {}!'.format(image_name)) return True @classmethod @@ -197,7 +256,8 @@ class Container(object): if not docker_image: return None if cls.has_pull_latest_image[validate_type] is True: - cls.logger.debug('%s is already the newest version.', docker_image) + cls.logger.debug( + '{} is already the newest version.'.format(docker_image)) return docker_image old_image_id = cls.get_image_id(docker_image) if not cls.pull_image_only(docker_image): @@ -205,13 +265,14 @@ class Container(object): cls.has_pull_latest_image[validate_type] = True new_image_id = cls.get_image_id(docker_image) if not new_image_id: - cls.logger.error("fail to get the new image's id %s", docker_image) + cls.logger.error( + "Failed to get the id of image {}.".format(docker_image)) return None if not old_image_id: return docker_image if new_image_id == old_image_id: - cls.logger.debug('image %s has no changes, no need to remove.', - docker_image) + cls.logger.debug('Image {} has no changes, no need to remove.' + .format(docker_image)) else: cls.remove_image(old_image_id) return docker_image @@ -243,3 +304,18 @@ class Container(object): return (1, 'src_path or dest_path is empty') cmd = 'cp %s %s' % (src_path, dest_path) return cls.exec_cmd(container_id, cmd, exit_on_error) + + @classmethod + def set_yardstick_conf_file(cls, container_id): + valid_type = 'yardstick' + src = dt_cfg.dovetail_config[valid_type]['yard_conf']['src_file'] + dest = dt_cfg.dovetail_config[valid_type]['yard_conf']['dest_file'] + cls.pre_copy(container_id, src, dest) + url = dt_cfg.dovetail_config['report_dest'] + if url.startswith("http"): + cmd = ("sed -i '16s#http://127.0.0.1:8000/results#{}#g' {}" + .format(url, dest)) + cls.exec_cmd(container_id, cmd) + if url.lower() == 'file': + cmd = ("sed -i '12s/http/file/g' {}".format(dest)) + cls.exec_cmd(container_id, cmd) diff --git a/dovetail/parser.py b/dovetail/parser.py index fdde4f9e..1b539c85 100644 --- a/dovetail/parser.py +++ b/dovetail/parser.py @@ -32,15 +32,16 @@ class Parser(object): kwargs = {} for arg in dt_cfg.dovetail_config['parameters']: path = eval(arg['path']) - cls.logger.debug('name: %s, eval path: %s ', - arg['name'], path) + cls.logger.debug( + 'name: {}, eval path: {}'.format(arg['name'], path)) kwargs[arg['name']] = \ dt_utils.get_obj_by_path(testcase.testcase, path) - cls.logger.debug('kwargs: %s', kwargs) + cls.logger.debug('kwargs: {}'.format(kwargs)) cmd_lines = template.render(**kwargs) except Exception as e: - cls.logger.error('failed to parse cmd %s, exception:%s', cmd, e) + cls.logger.exception( + 'Failed to parse cmd {}, exception: {}'.format(cmd, e)) return None return cmd_lines diff --git a/dovetail/report.py b/dovetail/report.py index 2c6200d3..08780b88 100644 --- a/dovetail/report.py +++ b/dovetail/report.py @@ -112,8 +112,8 @@ class Report(object): '|'.join(dt_cfg.dovetail_config['testarea_supported'])) area = pattern.findall(testcase['name']) if not area: - cls.logger.error("testcase %s not in supported testarea", - testcase['name']) + cls.logger.error("Test case {} not in supported testarea." + .format(testcase['name'])) return None area = area[0] testarea_scope.append(area) @@ -170,9 +170,9 @@ class Report(object): with open(os.path.join(dt_cfg.dovetail_config['result_dir'], report_file_name), 'w') as report_file: report_file.write(report) - cls.logger.info('save report to %s', report_file_name) + cls.logger.info('Save report to {}'.format(report_file_name)) except Exception: - cls.logger.error('Failed to save: %s', report_file_name) + cls.logger.exception('Failed to save: {}'.format(report_file_name)) @classmethod def get_result(cls, testcase): @@ -180,7 +180,7 @@ class Report(object): type = testcase.validate_type() crawler = CrawlerFactory.create(type) if crawler is None: - cls.logger.error('crawler is None:%s', testcase.name()) + cls.logger.error('Crawler is None: {}'.format(testcase.name())) return None # if validate_testcase in cls.results[type]: @@ -191,12 +191,12 @@ class Report(object): if result is not None: cls.results[type][validate_testcase] = result # testcase.script_result_acquired(True) - cls.logger.debug('testcase: %s -> result acquired', - validate_testcase) + cls.logger.debug( + 'Test case: {} -> result acquired'.format(validate_testcase)) else: retry = testcase.increase_retry() - cls.logger.debug('testcase: %s -> result acquired retry:%d', - validate_testcase, retry) + cls.logger.debug('Test case: {} -> result acquired retry: {}' + .format(validate_testcase, retry)) return result @@ -206,7 +206,7 @@ class FunctestCrawler(object): def __init__(self): self.type = 'functest' - self.logger.debug('create crawler:%s', self.type) + self.logger.debug('Create crawler: {}'.format(self.type)) @classmethod def create_log(cls): @@ -234,14 +234,15 @@ class FunctestCrawler(object): os.path.join(dovetail_config['result_dir'], dovetail_config[self.type]['result']['file_path']) if not os.path.exists(file_path): - self.logger.info('result file not found: %s', file_path) + self.logger.error('Result file not found: {}'.format(file_path)) return None if testcase_name in dt_cfg.dovetail_config['functest_testcase']: complex_testcase = False elif testcase_name in dt_cfg.dovetail_config['functest_testsuite']: complex_testcase = True else: - self.logger.error("Wrong Functest test case %s.", testcase_name) + self.logger.error( + "Wrong Functest test case {}.".format(testcase_name)) return None with open(file_path, 'r') as f: for jsonfile in f: @@ -264,7 +265,8 @@ class FunctestCrawler(object): "errors": error_case, "skipped": skipped_case} except KeyError as e: - self.logger.error("Key error, exception: %s", e) + self.logger.exception( + "Result data don't have key {}.".format(e)) return None except ValueError: continue @@ -273,20 +275,20 @@ class FunctestCrawler(object): 'timestop': timestop, 'duration': duration, 'details': details} - self.logger.debug('Results: %s', str(json_results)) + self.logger.debug('Results: {}'.format(str(json_results))) return json_results def crawl_from_url(self, testcase=None): url = "%s?case=%s&last=1" % \ (dt_cfg.dovetail_config['report_dest'], testcase.validate_testcase()) - self.logger.debug("Query to rest api: %s", url) + self.logger.debug("Query to rest api: {}".format(url)) try: data = json.load(urllib2.urlopen(url)) return data['results'][0] except Exception as e: - self.logger.error("Cannot read content from the url: %s, " - "exception: %s", url, e) + self.logger.exception("Cannot read content from the url: {}, " + "exception: {}".format(url, e)) return None @@ -296,7 +298,7 @@ class YardstickCrawler(object): def __init__(self): self.type = 'yardstick' - self.logger.debug('create crawler:%s', self.type) + self.logger.debug('Create crawler: {}'.format(self.type)) @classmethod def create_log(cls): @@ -313,18 +315,24 @@ class YardstickCrawler(object): def crawl_from_file(self, testcase=None): file_path = os.path.join(dt_cfg.dovetail_config['result_dir'], - testcase.validate_testcase() + '.out') + testcase.name() + '.out') if not os.path.exists(file_path): - self.logger.info('result file not found: %s', file_path) + self.logger.error('Result file not found: {}'.format(file_path)) return None criteria = 'FAIL' with open(file_path, 'r') as f: for jsonfile in f: data = json.loads(jsonfile) if 1 == data['status']: - criteria = 'PASS' + try: + v = data['result'][1]['benchmark']['data']['sla_pass'] + if 1 == v: + criteria = 'PASS' + except KeyError as e: + self.logger.exception( + 'Pass flag not found {}'.format(e)) json_results = {'criteria': criteria} - self.logger.debug('Results: %s', str(json_results)) + self.logger.debug('Results: {}'.format(str(json_results))) return json_results def crawl_from_url(self, testcase=None): @@ -415,7 +423,7 @@ class FunctestChecker(object): testcase_passed = 'SKIP' for sub_testcase in sub_testcase_list: - self.logger.debug('check sub_testcase:%s', sub_testcase) + self.logger.debug('Check sub_testcase: {}'.format(sub_testcase)) try: if self.get_sub_testcase(sub_testcase, db_result['details']['errors']): diff --git a/dovetail/run.py b/dovetail/run.py index 607e1b15..5c0b2ddb 100755 --- a/dovetail/run.py +++ b/dovetail/run.py @@ -46,11 +46,11 @@ def run_test(testsuite, testarea, logger): duration = 0 start_time = time.time() for testcase_name in testarea_list: - logger.info('>>[testcase]: %s', testcase_name) + logger.info('>>[testcase]: {}'.format(testcase_name)) testcase = Testcase.get(testcase_name) if testcase is None: - logger.error('test case %s is not defined in testcase folder, \ - skipping', testcase_name) + logger.error('Test case {} is not defined in testcase folder, ' + 'skipping.'.format(testcase_name)) continue run_testcase = True @@ -74,27 +74,31 @@ def check_tc_result(testcase, logger): result_dir = dt_cfg.dovetail_config['result_dir'] validate_type = testcase.validate_type() functest_result = dt_cfg.dovetail_config['functest']['result']['file_path'] + dovetail_result = os.path.join(result_dir, + dt_cfg.dovetail_config['result_file']) if dt_cfg.dovetail_config['report_dest'].startswith("http"): - if validate_type.lower() == 'yardstick': - logger.info("Results have been stored with file %s.", - os.path.join(result_dir, - testcase.validate_testcase() + '.out')) + if dt_utils.store_db_results(dt_cfg.dovetail_config['report_dest'], + dt_cfg.dovetail_config['build_tag'], + testcase.name(), dovetail_result, + logger): + logger.info("Results have been pushed to database and stored " + "with local file {}.".format(dovetail_result)) else: - if dt_utils.check_db_results(dt_cfg.dovetail_config['report_dest'], - dt_cfg.dovetail_config['build_tag'], - testcase.name(), - logger): - logger.info("Results have been pushed to database.") - else: - logger.error("Fail to push results to database.") + logger.error("Failed to push results to database.") if dt_cfg.dovetail_config['report_dest'] == "file": if validate_type.lower() == 'yardstick': - logger.info("Results have been stored with file %s.", - os.path.join(result_dir, - testcase.validate_testcase() + '.out')) - if validate_type.lower() == 'functest': - logger.info("Results have been stored with file %s.", - os.path.join(result_dir, functest_result)) + result_file = os.path.join(result_dir, testcase.name() + '.out') + elif validate_type.lower() == 'functest': + result_file = os.path.join(result_dir, functest_result) + else: + logger.error("Don't support {} now.".format(validate_type)) + return + if os.path.isfile(result_file): + logger.info( + "Results have been stored with file {}.".format(result_file)) + else: + logger.error( + "Failed to store results with file {}.".format(result_file)) result = Report.get_result(testcase) Report.check_result(testcase, result) @@ -105,18 +109,20 @@ def validate_input(input_dict, check_dict, logger): yard_tag = input_dict['yard_tag'] valid_tag = check_dict['valid_docker_tag'] if func_tag is not None and func_tag not in valid_tag: - logger.error("func_tag can't be %s, valid in %s", func_tag, valid_tag) + logger.error("The input option 'func_tag' can't be {}, " + "valid values are {}.".format(func_tag, valid_tag)) raise SystemExit(1) if yard_tag is not None and yard_tag not in valid_tag: - logger.error("yard_tag can't be %s, valid in %s", yard_tag, valid_tag) + logger.error("The input option 'yard_tag' can't be {}, " + "valid values are {}.".format(yard_tag, valid_tag)) raise SystemExit(1) # for 'report' option report = input_dict['report'] if report: if not (report.startswith("http") or report == "file"): - logger.error("report can't be %s", input_dict['report']) - logger.info("valid report types are 'file' and 'http'") + logger.error("Report type can't be {}, valid types are 'file' " + "and 'http'.".format(input_dict['report'])) raise SystemExit(1) @@ -145,7 +151,7 @@ def filter_config(input_dict, logger): configs[key.upper()] = value_dict break except KeyError as e: - logger.exception('%s lacks subsection %s', config_key, e) + logger.exception('KeyError {}.'.format(e)) raise SystemExit(1) if not configs: return None @@ -171,39 +177,65 @@ def clean_results_dir(): if os.path.exists(result_path): if os.path.isdir(result_path): cmd = 'sudo rm -rf %s/*' % (result_path) - dt_utils.exec_cmd(cmd, exit_on_error=False) + dt_utils.exec_cmd(cmd, exit_on_error=False, exec_msg_on=False) else: print "result_dir in dovetail_config.yml is not a directory." raise SystemExit(1) def get_result_path(): - dovetail_home = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + try: + dovetail_home = os.environ["DOVETAIL_HOME"] + except Exception: + print("ERROR: mandatory env variable 'DOVETAIL_HOME' is not found, " + "please set in env_config.sh and source this file before " + "running.") + return None result_path = os.path.join(dovetail_home, 'results') dt_cfg.dovetail_config['result_dir'] = result_path + pre_config_path = os.path.join(dovetail_home, 'pre_config') + dt_cfg.dovetail_config['config_dir'] = pre_config_path + return dovetail_home -def get_userconfig_path(): - dovetail_home = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +def copy_userconfig_files(logger): + dovetail_home = os.path.dirname(os.path.abspath(__file__)) userconfig_path = os.path.join(dovetail_home, 'userconfig') - dt_cfg.dovetail_config['userconfig_dir'] = userconfig_path + pre_config_path = dt_cfg.dovetail_config['config_dir'] + if not os.path.isdir(pre_config_path): + os.makedirs(pre_config_path) + cmd = 'sudo cp -r %s/* %s' % (userconfig_path, pre_config_path) + dt_utils.exec_cmd(cmd, logger, exit_on_error=False) + + +# env_init can source some env variable used in dovetail, such as +# when https+credential used, OS_CACERT +def env_init(logger): + openrc = os.path.join(dt_cfg.dovetail_config['config_dir'], + dt_cfg.dovetail_config['env_file']) + if not os.path.isfile(openrc): + logger.error("File {} does not exist.".format(openrc)) + dt_utils.source_env(openrc) def main(*args, **kwargs): """Dovetail compliance test entry!""" build_tag = "daily-master-%s" % str(uuid.uuid4()) dt_cfg.dovetail_config['build_tag'] = build_tag - get_result_path() - get_userconfig_path() + if not get_result_path(): + return clean_results_dir() if kwargs['debug']: os.environ['DEBUG'] = 'true' create_logs() logger = dt_logger.Logger('run').getLogger() logger.info('================================================') - logger.info('Dovetail compliance: %s!', (kwargs['testsuite'])) + logger.info('Dovetail compliance: {}!'.format(kwargs['testsuite'])) logger.info('================================================') - logger.info('Build tag: %s', dt_cfg.dovetail_config['build_tag']) + logger.info('Build tag: {}'.format(dt_cfg.dovetail_config['build_tag'])) + env_init(logger) + copy_userconfig_files(logger) + dt_utils.check_docker_version(logger) validate_input(kwargs, dt_cfg.dovetail_config['validate_input'], logger) configs = filter_config(kwargs, logger) @@ -235,8 +267,8 @@ def main(*args, **kwargs): if dt_cfg.dovetail_config['report_dest'] == "file": Report.generate(testsuite_yaml, testarea, duration) else: - logger.error('invalid input commands, testsuite %s testarea %s', - kwargs['testsuite'], testarea) + logger.error('Invalid input commands, testsuite {} testarea {}' + .format(kwargs['testsuite'], testarea)) dt_cfg.load_config_files() diff --git a/dovetail/test_runner.py b/dovetail/test_runner.py index cfc49018..b3fd7a3f 100644 --- a/dovetail/test_runner.py +++ b/dovetail/test_runner.py @@ -22,37 +22,66 @@ class DockerRunner(object): def __init__(self, testcase): self.testcase = testcase - self.logger.debug('create runner: %s', self.type) + self.logger.debug('Create runner: {}'.format(self.type)) @classmethod def create_log(cls): cls.logger = dt_logger.Logger(__name__ + '.DockerRunner').getLogger() + def pre_copy(self, container_id=None, dest_path=None, + src_file=None, exist_file=None): + if not dest_path: + self.logger.error("There has no dest_path in {} config file." + .format(self.testcase.name())) + return None + if src_file: + self.testcase.mk_src_file() + file_path = dt_cfg.dovetail_config[self.type]['result']['dir'] + src_path = os.path.join(file_path, src_file) + if exist_file: + file_path = dt_cfg.dovetail_config[self.type]['config']['dir'] + src_path = os.path.join(file_path, 'pre_config', exist_file) + + Container.pre_copy(container_id, src_path, dest_path) + return dest_path + def run(self): if dt_cfg.dovetail_config['offline']: exist = Container.check_image_exist(self.testcase.validate_type()) if not exist: - self.logger.error('%s image not exist offline running', - self.testcase.validate_type()) + self.logger.error("{} image doesn't exist, can't run offline." + .format(self.testcase.validate_type())) return else: if not Container.pull_image(self.testcase.validate_type()): self.logger.error("Failed to pull the image.") return + # for sdnvpn, there is a need to download needed images to config_dir + # in dovetail_config.yml first. + if 'sdnvpn' in str(self.testcase.name()): + img_name = dt_cfg.dovetail_config['sdnvpn_image'] + img_file = os.path.join(dt_cfg.dovetail_config['config_dir'], + img_name) + if not os.path.isfile(img_file): + self.logger.error('Image {} not found.'.format(img_name)) + return container_id = Container.create(self.testcase.validate_type(), self.testcase.name()) if not container_id: - self.logger.error('failed to create container') + self.logger.error('Failed to create container.') return - self.logger.debug('container id:%s', container_id) + self.logger.debug('container id: {}'.format(container_id)) + + dest_path = self.testcase.pre_copy_path("dest_path") + src_file_name = self.testcase.pre_copy_path("src_file") + exist_file_name = self.testcase.pre_copy_path("exist_src_file") + + if src_file_name or exist_file_name: + if not self.pre_copy(container_id, dest_path, src_file_name, + exist_file_name): + return - dest_path = self.testcase.pre_copy_dest_path() - if dest_path: - self.testcase.mk_src_file() - src_path = self.testcase.pre_copy_src_path(self.type) - ret, msg = Container.pre_copy(container_id, src_path, - dest_path) if not self.testcase.prepared(): prepare_failed = False cmds = self.testcase.pre_condition() @@ -66,14 +95,14 @@ class DockerRunner(object): self.testcase.prepared(True) if not self.testcase.prepare_cmd(self.type): - self.logger.error('failed to prepare testcase:%s', - self.testcase.name()) + self.logger.error( + 'Failed to prepare test case: {}'.format(self.testcase.name())) else: for cmd in self.testcase.cmds: ret, msg = Container.exec_cmd(container_id, cmd) if ret != 0: - self.logger.error('Failed to exec %s, ret:%d, msg:%s', - cmd, ret, msg) + self.logger.error('Failed to exec {}, ret: {}, msg: {}' + .format(cmd, ret, msg)) break cmds = self.testcase.post_condition() @@ -111,7 +140,7 @@ class ShellRunner(object): super(ShellRunner, self).__init__() self.testcase = testcase self.type = 'shell' - self.logger.debug('create runner:%s', self.type) + self.logger.debug('Create runner: {}'.format(self.type)) def run(self): testcase_passed = 'PASS' @@ -129,8 +158,8 @@ class ShellRunner(object): self.testcase.prepared(True) if not self.testcase.prepare_cmd(self.type): - self.logger.error('failed to prepare cmd:%s', - self.testcase.name()) + self.logger.error( + 'Failed to prepare cmd: {}'.format(self.testcase.name())) else: for cmd in self.testcase.cmds: ret, msg = dt_utils.exec_cmd(cmd, self.logger) @@ -147,13 +176,13 @@ class ShellRunner(object): result_filename = os.path.join(dt_cfg.dovetail_config['result_dir'], self.testcase.name()) + '.out' - self.logger.debug('save result:%s', result_filename) + self.logger.debug('Save result: {}'.format(result_filename)) try: with open(result_filename, 'w') as f: f.write(json.dumps(result)) except Exception as e: - self.logger.exception('Failed to write result into file:%s, \ - except:%s', result_filename, e) + self.logger.exception('Failed to write result into file: {}, ' + 'exception: {}'.format(result_filename, e)) class TestRunnerFactory(object): diff --git a/dovetail/testcase.py b/dovetail/testcase.py index 408599fc..7b012c88 100644 --- a/dovetail/testcase.py +++ b/dovetail/testcase.py @@ -39,7 +39,7 @@ class Testcase(object): return False # self.logger.debug('cmd_lines:%s', cmd_lines) self.cmds.append(cmd_lines) - self.logger.debug('cmds:%s', self.cmds) + self.logger.debug('cmds: {}'.format(self.cmds)) return True def prepare_cmd(self, test_type): @@ -55,7 +55,7 @@ class Testcase(object): return self.parse_cmd(testcase_cmds) if config_cmds: return self.parse_cmd(config_cmds) - self.logger.error('testcase %s has no cmds', self.name()) + self.logger.error('Test case {} has no cmds.'.format(self.name())) return False def __str__(self): @@ -75,7 +75,8 @@ class Testcase(object): def sub_testcase_passed(self, name, passed=None): if passed is not None: - self.logger.debug('sub_testcase_passed:%s %s', name, passed) + self.logger.debug( + 'sub_testcase_passed: {} {}'.format(name, passed)) self.sub_testcase_status[name] = passed return self.sub_testcase_status[name] @@ -111,28 +112,16 @@ class Testcase(object): return pre_condition pre_condition = self.pre_condition_cls(self.validate_type()) if not pre_condition: - self.logger.debug('testcase:%s pre_condition is empty', - self.name()) + self.logger.debug( + 'Test case: {} pre_condition is empty.'.format(self.name())) return pre_condition - def pre_copy_src_path(self, test_type): + def pre_copy_path(self, key_name): try: - pre_copy_src_file = \ - self.testcase['validate']['pre_copy']['src_file'] - result_dir = dt_cfg.dovetail_config[test_type]['result']['dir'] - except KeyError as e: - self.logger.error('src file Key error %s', e) - return None - src_path = os.path.join(result_dir, pre_copy_src_file) - return src_path - - def pre_copy_dest_path(self): - try: - pre_copy_dest_path = \ - self.testcase['validate']['pre_copy']['dest_path'] + path = self.testcase['validate']['pre_copy'][key_name] except KeyError: - pre_copy_dest_path = '' - return pre_copy_dest_path + return None + return path def post_condition(self): try: @@ -143,34 +132,34 @@ class Testcase(object): return post_condition post_condition = self.post_condition_cls(self.validate_type()) if not post_condition: - self.logger.debug('testcae:%s post_condition is empty', - self.name()) + self.logger.debug( + 'Test case: {} post_condition is empty.'.format(self.name())) return post_condition def mk_src_file(self): - testcase_src_file = self.testcase['validate']['pre_copy']['src_file'] + testcase_src_file = self.pre_copy_path('src_file') try: file_path = os.path.join(dt_cfg.dovetail_config['result_dir'], testcase_src_file) with open(file_path, 'w+') as src_file: if self.sub_testcase() is not None: for sub_test in self.sub_testcase(): - self.logger.debug('save testcases %s', sub_test) + self.logger.debug( + 'Save test cases {}'.format(sub_test)) src_file.write(sub_test + '\n') - self.logger.debug('save testcases to %s', file_path) + self.logger.debug('Save test cases to {}'.format(file_path)) + return file_path except Exception: - self.logger.error('Failed to save: %s', file_path) - - src_file_path = os.path.join(dt_cfg.dovetail_config['result_dir'], - testcase_src_file) - return src_file_path + self.logger.exception('Failed to save: {}'.format(file_path)) + return None def run(self): runner = TestRunnerFactory.create(self) try: runner.run() except AttributeError as e: - self.logger.exception('testcase:%s except:%s', self.name, e) + self.logger.exception( + 'Test case: {} Exception: {}'.format(self.name, e)) # testcase in upstream testing project # validate_testcase_list = {'functest': {}, 'yardstick': {}, 'shell': {}} @@ -244,8 +233,8 @@ class Testcase(object): cls.testcase_list[next(testcase_yaml.iterkeys())] = \ testcase else: - cls.logger.error('failed to create testcase: %s', - testcase_file) + cls.logger.error('Failed to create test case: {}' + .format(testcase_file)) @classmethod def get(cls, testcase_name): diff --git a/dovetail/testcase/ha.tc002.yml b/dovetail/testcase/ha.tc002.yml index 393212f6..4ca78b45 100644 --- a/dovetail/testcase/ha.tc002.yml +++ b/dovetail/testcase/ha.tc002.yml @@ -1,10 +1,10 @@ --- dovetail.ha.tc002: name: dovetail.ha.tc002 - objective: > # This test case will verify the high availability of controller node. - # When one of the controller node abnormally shutdown, the service provided by it should be OK + objective: > # This test case will verify the high availability of the + # network service provided by OpenStack (neutro-server) on control node. validate: type: yardstick - testcase: opnfv_yardstick_tc025 + testcase: opnfv_yardstick_tc045 report: sub_testcase_list: diff --git a/dovetail/testcase/ha.tc003.yml b/dovetail/testcase/ha.tc003.yml index 2c012b9b..b3a0bf7b 100644 --- a/dovetail/testcase/ha.tc003.yml +++ b/dovetail/testcase/ha.tc003.yml @@ -2,9 +2,9 @@ dovetail.ha.tc003: name: dovetail.ha.tc003 objective: > # This test case will verify the high availability of the - # network service provided by OpenStack (neutro-server) on control node. + # user service provided by OpenStack (keystone) on control node. validate: type: yardstick - testcase: opnfv_yardstick_tc045 + testcase: opnfv_yardstick_tc046 report: sub_testcase_list: diff --git a/dovetail/testcase/ha.tc004.yml b/dovetail/testcase/ha.tc004.yml index e743415e..b25af983 100644 --- a/dovetail/testcase/ha.tc004.yml +++ b/dovetail/testcase/ha.tc004.yml @@ -2,9 +2,9 @@ dovetail.ha.tc004: name: dovetail.ha.tc004 objective: > # This test case will verify the high availability of the - # user service provided by OpenStack (keystone) on control node. + # image service provided by OpenStack (glance-api) on control node. validate: type: yardstick - testcase: opnfv_yardstick_tc046 + testcase: opnfv_yardstick_tc047 report: sub_testcase_list: diff --git a/dovetail/testcase/ha.tc005.yml b/dovetail/testcase/ha.tc005.yml index bd412e98..fd6e14d5 100644 --- a/dovetail/testcase/ha.tc005.yml +++ b/dovetail/testcase/ha.tc005.yml @@ -2,9 +2,9 @@ dovetail.ha.tc005: name: dovetail.ha.tc005 objective: > # This test case will verify the high availability of the - # image service provided by OpenStack (glance-api) on control node. + # volume service provided by OpenStack (cinder-api) on control node. validate: type: yardstick - testcase: opnfv_yardstick_tc047 + testcase: opnfv_yardstick_tc048 report: sub_testcase_list: diff --git a/dovetail/testcase/ha.tc006.yml b/dovetail/testcase/ha.tc006.yml index 52809bb9..aecbe8b2 100644 --- a/dovetail/testcase/ha.tc006.yml +++ b/dovetail/testcase/ha.tc006.yml @@ -1,10 +1,13 @@ --- dovetail.ha.tc006: name: dovetail.ha.tc006 - objective: > # This test case will verify the high availability of the - # volume service provided by OpenStack (cinder-api) on control node. + objective: > # This test case will verify the high availability of control node. + # When the CPU usage of a specified controller node is stressed to 100%, + # which breaks down the Openstack services on this node. These Openstack service + # should able to be accessed by other controller nodes, and the services on + # failed controller node should be isolated. validate: type: yardstick - testcase: opnfv_yardstick_tc048 + testcase: opnfv_yardstick_tc051 report: sub_testcase_list: diff --git a/dovetail/testcase/ha.tc007.yml b/dovetail/testcase/ha.tc007.yml index 8a8aff71..5d985534 100644 --- a/dovetail/testcase/ha.tc007.yml +++ b/dovetail/testcase/ha.tc007.yml @@ -1,10 +1,12 @@ --- dovetail.ha.tc007: name: dovetail.ha.tc007 - objective: > # This test case will verify the high availability of the - # storage service provided by OpenStack (swift-proxy) on control node. + objective: > # This test case will verify the high availability of control node. + # When the disk I/O of a specified disk is blocked, which breaks down the Openstack + # services on this node. Read and write services should still be accessed by other + # controller nodes, and the services on failed controller node should be isolated. validate: type: yardstick - testcase: opnfv_yardstick_tc049 + testcase: opnfv_yardstick_tc052 report: sub_testcase_list: diff --git a/dovetail/testcase/ha.tc008.yml b/dovetail/testcase/ha.tc008.yml index 3ee323f4..2287b8d1 100644 --- a/dovetail/testcase/ha.tc008.yml +++ b/dovetail/testcase/ha.tc008.yml @@ -1,13 +1,13 @@ --- dovetail.ha.tc008: name: dovetail.ha.tc008 - objective: > # This test case will verify the high availability of control node. - # When one of the controller failed to connect the network, which breaks down - # the Openstack services on this node. These Openstack service should able to - # be accessed by other controller nodes, and the services on failed controller - # node should be isolated + objective: > # This test case will verify the high availability of the load balance + # service(current is HAProxy) that supports OpenStack on controller node. When the + # load balance service of a specified controller node is killed, whether other + # load balancers on other controller nodes will work, and whether the controller node + # will restart the load balancer are checked. validate: type: yardstick - testcase: opnfv_yardstick_tc050 + testcase: opnfv_yardstick_tc053 report: sub_testcase_list: diff --git a/dovetail/testcase/ha.tc009.yml b/dovetail/testcase/ha.tc009.yml deleted file mode 100644 index 2625de59..00000000 --- a/dovetail/testcase/ha.tc009.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -dovetail.ha.tc009: - name: dovetail.ha.tc009 - objective: > # This test case will verify the high availability of control node. - # When the CPU usage of a specified controller node is stressed to 100%, - # which breaks down the Openstack services on this node. These Openstack service - # should able to be accessed by other controller nodes, and the services on - # failed controller node should be isolated. - validate: - type: yardstick - testcase: opnfv_yardstick_tc051 - report: - sub_testcase_list: diff --git a/dovetail/testcase/ha.tc010.yml b/dovetail/testcase/ha.tc010.yml deleted file mode 100644 index 254fa6fd..00000000 --- a/dovetail/testcase/ha.tc010.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -dovetail.ha.tc010: - name: dovetail.ha.tc010 - objective: > # This test case will verify the high availability of control node. - # When the disk I/O of a specified disk is blocked, which breaks down the Openstack - # services on this node. Read and write services should still be accessed by other - # controller nodes, and the services on failed controller node should be isolated. - validate: - type: yardstick - testcase: opnfv_yardstick_tc052 - report: - sub_testcase_list: diff --git a/dovetail/testcase/ha.tc011.yml b/dovetail/testcase/ha.tc011.yml deleted file mode 100644 index a3a91c9b..00000000 --- a/dovetail/testcase/ha.tc011.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -dovetail.ha.tc011: - name: dovetail.ha.tc011 - objective: > # This test case will verify the high availability of the load balance - # service(current is HAProxy) that supports OpenStack on controller node. When the - # load balance service of a specified controller node is killed, whether other - # load balancers on other controller nodes will work, and whether the controller node - # will restart the load balancer are checked. - validate: - type: yardstick - testcase: opnfv_yardstick_tc053 - report: - sub_testcase_list: diff --git a/dovetail/testcase/ha.tc012.yml b/dovetail/testcase/ha.tc012.yml deleted file mode 100644 index 127bf2d0..00000000 --- a/dovetail/testcase/ha.tc012.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -dovetail.ha.tc012: - name: dovetail.ha.tc012 - objective: > # This test case will verify the high availability for virtual ip in the environment. - # When master node of virtual ip is abnormally shutdown, connection to virtual ip and the - # services binded to the virtual IP it should be OK. - validate: - type: yardstick - testcase: opnfv_yardstick_tc054 - report: - sub_testcase_list: diff --git a/dovetail/testcase/ipv6.tc002.yml b/dovetail/testcase/ipv6.tc002.yml index 48aeafa3..c8254bb8 100644 --- a/dovetail/testcase/ipv6.tc002.yml +++ b/dovetail/testcase/ipv6.tc002.yml @@ -11,5 +11,3 @@ dovetail.ipv6.tc002: report: sub_testcase_list: - tempest.api.network.test_networks.NetworksIpV6Test.test_create_update_delete_network_subnet[id-0e269138-0da6-4efc-a46d-578161e7b221,smoke] - - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_update_delete_network_subnet - diff --git a/dovetail/testcase/ipv6.tc003.yml b/dovetail/testcase/ipv6.tc003.yml index d9c93799..339d405b 100644 --- a/dovetail/testcase/ipv6.tc003.yml +++ b/dovetail/testcase/ipv6.tc003.yml @@ -11,4 +11,3 @@ dovetail.ipv6.tc003: report: sub_testcase_list: - tempest.api.network.test_networks.NetworksIpV6Test.test_external_network_visibility[id-af774677-42a9-4e4b-bb58-16fe6a5bc1ec,smoke] - - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_external_network_visibility diff --git a/dovetail/testcase/ipv6.tc004.yml b/dovetail/testcase/ipv6.tc004.yml index a69b9fcd..514a846e 100644 --- a/dovetail/testcase/ipv6.tc004.yml +++ b/dovetail/testcase/ipv6.tc004.yml @@ -12,5 +12,3 @@ dovetail.ipv6.tc004: sub_testcase_list: - tempest.api.network.test_networks.NetworksIpV6Test.test_list_networks[id-f7ffdeda-e200-4a7a-bcbe-05716e86bf43,smoke] - tempest.api.network.test_networks.NetworksIpV6Test.test_list_subnets[id-db68ba48-f4ea-49e9-81d1-e367f6d0b20a,smoke] - - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_list_networks - - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_list_subnets diff --git a/dovetail/testcase/ipv6.tc005.yml b/dovetail/testcase/ipv6.tc005.yml index 52cae5f5..3dcca9b2 100644 --- a/dovetail/testcase/ipv6.tc005.yml +++ b/dovetail/testcase/ipv6.tc005.yml @@ -12,5 +12,3 @@ dovetail.ipv6.tc005: sub_testcase_list: - tempest.api.network.test_networks.NetworksIpV6Test.test_show_network[id-2bf13842-c93f-4a69-83ed-717d2ec3b44e,smoke] - tempest.api.network.test_networks.NetworksIpV6Test.test_show_subnet[id-bd635d81-6030-4dd1-b3b9-31ba0cfdf6cc,smoke] - - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_show_network - - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_show_subnet diff --git a/dovetail/testcase/ipv6.tc018.yml b/dovetail/testcase/ipv6.tc018.yml index 93ae1235..406e02af 100644 --- a/dovetail/testcase/ipv6.tc018.yml +++ b/dovetail/testcase/ipv6.tc018.yml @@ -1,7 +1,7 @@ --- dovetail.ipv6.tc018: name: dovetail.ipv6.tc018 - objective: VIM ipv6 operations, to show information of an IPv6 port + objective: VIM ipv6 operations, to show information of an IPv6 port, scenario os-nosdn-nofeature dependent validate: type: functest testcase: tempest_custom diff --git a/dovetail/testcase/ipv6.tc019.yml b/dovetail/testcase/ipv6.tc019.yml index fab0acdb..1e3c171b 100644 --- a/dovetail/testcase/ipv6.tc019.yml +++ b/dovetail/testcase/ipv6.tc019.yml @@ -1,7 +1,7 @@ --- dovetail.ipv6.tc019: name: dovetail.ipv6.tc019 - objective: VIM ipv6 operations, to do IPv6 address assignment - dual stack, DHCPv6 stateless + objective: VIM ipv6 operations, to do IPv6 address assignment - dual stack, DHCPv6 stateless, scenario os-nosdn-nofeature dependent validate: type: functest testcase: tempest_custom diff --git a/dovetail/testcase/ipv6.tc020.yml b/dovetail/testcase/ipv6.tc020.yml index b31cd727..f7ab8b47 100644 --- a/dovetail/testcase/ipv6.tc020.yml +++ b/dovetail/testcase/ipv6.tc020.yml @@ -1,7 +1,7 @@ --- dovetail.ipv6.tc020: name: dovetail.ipv6.tc020 - objective: VIM ipv6 operations, to do IPv6 Address Assignment - Multiple Prefixes, DHCPv6 Stateless + objective: VIM ipv6 operations, to do IPv6 Address Assignment - Multiple Prefixes, DHCPv6 Stateless, scenario os-nosdn-nofeature dependent validate: type: functest testcase: tempest_custom diff --git a/dovetail/testcase/ipv6.tc021.yml b/dovetail/testcase/ipv6.tc021.yml index f8820f1d..466b75ca 100644 --- a/dovetail/testcase/ipv6.tc021.yml +++ b/dovetail/testcase/ipv6.tc021.yml @@ -1,7 +1,7 @@ --- dovetail.ipv6.tc021: name: dovetail.ipv6.tc021 - objective: VIM ipv6 operations, to do IPv6 Address Assignment - Dual Stack, Multiple Prefixes, DHCPv6 Stateless + objective: VIM ipv6 operations, to do IPv6 Address Assignment - Dual Stack, Multiple Prefixes, DHCPv6 Stateless, scenario os-nosdn-nofeature dependent validate: type: functest testcase: tempest_custom diff --git a/dovetail/testcase/ipv6.tc022.yml b/dovetail/testcase/ipv6.tc022.yml index 6c61702e..c1d371d6 100644 --- a/dovetail/testcase/ipv6.tc022.yml +++ b/dovetail/testcase/ipv6.tc022.yml @@ -1,7 +1,7 @@ --- dovetail.ipv6.tc022: name: dovetail.ipv6.tc022 - objective: VIM ipv6 operations, to do IPv6 Address Assignment - SLAAC + objective: VIM ipv6 operations, to do IPv6 Address Assignment - SLAAC, scenario os-nosdn-nofeature dependent validate: type: functest testcase: tempest_custom diff --git a/dovetail/testcase/ipv6.tc023.yml b/dovetail/testcase/ipv6.tc023.yml index 25193708..8b816252 100644 --- a/dovetail/testcase/ipv6.tc023.yml +++ b/dovetail/testcase/ipv6.tc023.yml @@ -1,7 +1,7 @@ --- dovetail.ipv6.tc023: name: dovetail.ipv6.tc023 - objective: VIM ipv6 operations, to do IPv6 Address Assignment - Dual Stack, SLAAC + objective: VIM ipv6 operations, to do IPv6 Address Assignment - Dual Stack, SLAAC, scenario os-nosdn-nofeature dependent validate: type: functest testcase: tempest_custom @@ -10,4 +10,4 @@ dovetail.ipv6.tc023: dest_path: /home/opnfv/repos/functest/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt report: sub_testcase_list: - - tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_dhcp6_stateless_from_os[compute,id-76f26acd-9688-42b4-bc3e-cd134c4cb09e,network,slow] + - tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_slaac_from_os[compute,id-b6399d76-4438-4658-bcf5-0d6c8584fde2,network,slow] diff --git a/dovetail/testcase/ipv6.tc024.yml b/dovetail/testcase/ipv6.tc024.yml index 9a5f331e..8d248901 100644 --- a/dovetail/testcase/ipv6.tc024.yml +++ b/dovetail/testcase/ipv6.tc024.yml @@ -1,7 +1,7 @@ --- dovetail.ipv6.tc024: name: dovetail.ipv6.tc024 - objective: VIM ipv6 operations, to do IPv6 address assignment - multiple prefixes, SLAAC + objective: VIM ipv6 operations, to do IPv6 address assignment - multiple prefixes, SLAAC, scenario os-nosdn-nofeature dependent validate: type: functest testcase: tempest_custom diff --git a/dovetail/testcase/ipv6.tc025.yml b/dovetail/testcase/ipv6.tc025.yml index 4cf3a005..35ef78c7 100644 --- a/dovetail/testcase/ipv6.tc025.yml +++ b/dovetail/testcase/ipv6.tc025.yml @@ -1,7 +1,7 @@ --- dovetail.ipv6.tc025: name: dovetail.ipv6.tc025 - objective: VIM ipv6 operations, to do IPv6 address assignment - dual stack, multiple prefixes, SLAAC + objective: VIM ipv6 operations, to do IPv6 address assignment - dual stack, multiple prefixes, SLAAC, scenario os-nosdn-nofeature dependent validate: type: functest testcase: tempest_custom diff --git a/dovetail/testcase/sdnvpn.tc001.yml b/dovetail/testcase/sdnvpn.tc001.yml new file mode 100644 index 00000000..9ab3d445 --- /dev/null +++ b/dovetail/testcase/sdnvpn.tc001.yml @@ -0,0 +1,12 @@ +--- +dovetail.sdnvpn.tc001: + name: dovetail.sdnvpn.tc001 + objective: Connectivity between Neutron subnets through association of Neutron Networks to VPNs + validate: + type: functest + testcase: bgpvpn + pre_copy: + exist_src_file: sdnvpn_config_testcase1.yaml + dest_path: /usr/local/lib/python2.7/dist-packages/sdnvpn/test/functest/config.yaml + report: + sub_testcase_list: diff --git a/dovetail/testcase/sdnvpn.tc002.yml b/dovetail/testcase/sdnvpn.tc002.yml new file mode 100644 index 00000000..a5c70ba9 --- /dev/null +++ b/dovetail/testcase/sdnvpn.tc002.yml @@ -0,0 +1,12 @@ +--- +dovetail.sdnvpn.tc002: + name: dovetail.sdnvpn.tc002 + objective: Separation of tenant networks through association to different VPNs + validate: + type: functest + testcase: bgpvpn + pre_copy: + exist_src_file: sdnvpn_config_testcase2.yaml + dest_path: /usr/local/lib/python2.7/dist-packages/sdnvpn/test/functest/config.yaml + report: + sub_testcase_list: diff --git a/dovetail/testcase/sdnvpn.tc003.yml b/dovetail/testcase/sdnvpn.tc003.yml new file mode 100644 index 00000000..c8c8b2be --- /dev/null +++ b/dovetail/testcase/sdnvpn.tc003.yml @@ -0,0 +1,12 @@ +--- +dovetail.sdnvpn.tc003: + name: dovetail.sdnvpn.tc003 + objective: Data center gateway integration through BGP peering + validate: + type: functest + testcase: bgpvpn + pre_copy: + exist_src_file: sdnvpn_config_testcase3.yaml + dest_path: /usr/local/lib/python2.7/dist-packages/sdnvpn/test/functest/config.yaml + report: + sub_testcase_list: diff --git a/dovetail/testcase/sdnvpn.tc004.yml b/dovetail/testcase/sdnvpn.tc004.yml new file mode 100644 index 00000000..f6a4a6ff --- /dev/null +++ b/dovetail/testcase/sdnvpn.tc004.yml @@ -0,0 +1,12 @@ +--- +dovetail.sdnvpn.tc004: + name: dovetail.sdnvpn.tc004 + objective: VPN provides connectivity between subnets using association of Neutron Router to VPNs + validate: + type: functest + testcase: bgpvpn + pre_copy: + exist_src_file: sdnvpn_config_testcase4.yaml + dest_path: /usr/local/lib/python2.7/dist-packages/sdnvpn/test/functest/config.yaml + report: + sub_testcase_list: diff --git a/dovetail/testcase/sdnvpn.tc008.yml b/dovetail/testcase/sdnvpn.tc008.yml new file mode 100644 index 00000000..5713e102 --- /dev/null +++ b/dovetail/testcase/sdnvpn.tc008.yml @@ -0,0 +1,12 @@ +--- +dovetail.sdnvpn.tc008: + name: dovetail.sdnvpn.tc008 + objective: associate Neutron Router with an attached subnet to a VPN and verify reachability of the Floating IP + validate: + type: functest + testcase: bgpvpn + pre_copy: + exist_src_file: sdnvpn_config_testcase8.yaml + dest_path: /usr/local/lib/python2.7/dist-packages/sdnvpn/test/functest/config.yaml + report: + sub_testcase_list: diff --git a/dovetail/userconfig/hosts.yaml b/dovetail/userconfig/hosts.yaml new file mode 100644 index 00000000..e4687dfb --- /dev/null +++ b/dovetail/userconfig/hosts.yaml @@ -0,0 +1,2 @@ +--- +hosts_info: diff --git a/userconfig/sample_pod.yaml b/dovetail/userconfig/pod.yaml.sample index 26636a6b..26636a6b 100644 --- a/userconfig/sample_pod.yaml +++ b/dovetail/userconfig/pod.yaml.sample diff --git a/dovetail/userconfig/sdnvpn_config_testcase1.yaml b/dovetail/userconfig/sdnvpn_config_testcase1.yaml new file mode 100644 index 00000000..d9e4a1b4 --- /dev/null +++ b/dovetail/userconfig/sdnvpn_config_testcase1.yaml @@ -0,0 +1,33 @@ +defaults: + flavor: m1.tiny # adapt to your environment + +testcases: + tempest: + enabled: true + description: Neutron BGPVPN tests in tempest + testname_db: functest_tempest # declared name in the test api + # http://testresults.opnfv.org/test/api/v1/projects/sdnvpn/cases + + testcase_1: + enabled: true + description: VPN provides connectivity between subnets + testname_db: functest_testcase_1 + instance_1_name: sdnvpn-1-1 + instance_2_name: sdnvpn-1-2 + instance_3_name: sdnvpn-1-3 + instance_4_name: sdnvpn-1-4 + instance_5_name: sdnvpn-1-5 + image_name: sdnvpn-image + net_1_name: sdnvpn-1-1-net + subnet_1_name: sdnvpn-1-1-subnet + subnet_1_cidr: 10.10.10.0/24 + router_1_name: sdnvpn-1-1-router + net_2_name: sdnvpn-1-2-net + subnet_2_name: sdnvpn-1-2-subnet + subnet_2_cidr: 10.10.11.0/24 + router_2_name: sdnvpn-1-2-router + secgroup_name: sdnvpn-sg + secgroup_descr: Security group for SDNVPN test cases + targets1: '88:88' + targets2: '55:55' + route_distinguishers: '11:11' diff --git a/dovetail/userconfig/sdnvpn_config_testcase2.yaml b/dovetail/userconfig/sdnvpn_config_testcase2.yaml new file mode 100644 index 00000000..07b0adfa --- /dev/null +++ b/dovetail/userconfig/sdnvpn_config_testcase2.yaml @@ -0,0 +1,43 @@ +defaults: + flavor: m1.tiny # adapt to your environment + +testcases: + tempest: + enabled: true + description: Neutron BGPVPN tests in tempest + testname_db: functest_tempest # declared name in the test api + # http://testresults.opnfv.org/test/api/v1/projects/sdnvpn/cases + + testcase_2: + enabled: true + description: Tenant separation + testname_db: functest_testcase_2 + instance_1_name: sdnvpn-2-1 + instance_2_name: sdnvpn-2-2 + instance_3_name: sdnvpn-2-3 + instance_4_name: sdnvpn-2-4 + instance_5_name: sdnvpn-2-5 + instance_1_ip: 10.10.10.11 + instance_2_ip: 10.10.10.12 + instance_3_ip: 10.10.11.13 + instance_4_ip: 10.10.10.12 + instance_5_ip: 10.10.11.13 + image_name: sdnvpn-image + net_1_name: sdnvpn-2-1-net + subnet_1a_name: sdnvpn-2-1a-subnet + subnet_1a_cidr: 10.10.10.0/24 + subnet_1b_name: sdnvpn-2-1b-subnet + subnet_1b_cidr: 10.10.11.0/24 + router_1_name: sdnvpn-2-1-router + net_2_name: sdnvpn-2-2-net + subnet_2a_name: sdnvpn-2-2a-subnet + subnet_2a_cidr: 10.10.11.0/24 + subnet_2b_name: sdnvpn-2-2b-subnet + subnet_2b_cidr: 10.10.10.0/24 + router_2_name: sdnvpn-2-2-router + secgroup_name: sdnvpn-sg + secgroup_descr: Security group for SDNVPN test cases + targets1: '88:88' + targets2: '55:55' + route_distinguishers1: '111:111' + route_distinguishers2: '222:222' diff --git a/dovetail/userconfig/sdnvpn_config_testcase3.yaml b/dovetail/userconfig/sdnvpn_config_testcase3.yaml new file mode 100644 index 00000000..60592cbb --- /dev/null +++ b/dovetail/userconfig/sdnvpn_config_testcase3.yaml @@ -0,0 +1,32 @@ +defaults: + flavor: m1.tiny # adapt to your environment + +testcases: + tempest: + enabled: true + description: Neutron BGPVPN tests in tempest + testname_db: functest_tempest # declared name in the test api + # http://testresults.opnfv.org/test/api/v1/projects/sdnvpn/cases + + testcase_3: + enabled: true + description: Data center gateway integration + testname_db: functest_testcase_3 + secgroup_name: sdnvpn-sg + secgroup_descr: Security group for SDNVPN test cases + image_name: sdnvpn-image + ubuntu_image_name: sdnvpn-ubuntu-image + net_1_name: sdnvpn-3-1-net + subnet_1_name: sdnvpn-3-1-subnet + subnet_1_cidr: 10.10.10.0/24 + router_1_name: sdnvpn-3-1-router + quagga_net_name: sdnvpn-3-2-quagga-net + quagga_subnet_name: sdnvpn-3-2-quagga-subnet + quagga_subnet_cidr: 10.10.11.0/24 + quagga_router_name: sdnvpn-3-2-quagga-router + quagga_instance_name: sdnvpn-3-2-quagga + quagga_instance_ip: 10.10.11.5 + instance_1_name: sdnvpn-3-1 + instance_1_ip: 10.10.10.5 + import_targets: '31:31' + export_targets: '32:32' diff --git a/dovetail/userconfig/sdnvpn_config_testcase4.yaml b/dovetail/userconfig/sdnvpn_config_testcase4.yaml new file mode 100644 index 00000000..1e221354 --- /dev/null +++ b/dovetail/userconfig/sdnvpn_config_testcase4.yaml @@ -0,0 +1,33 @@ +defaults: + flavor: m1.tiny # adapt to your environment + +testcases: + tempest: + enabled: true + description: Neutron BGPVPN tests in tempest + testname_db: functest_tempest # declared name in the test api + # http://testresults.opnfv.org/test/api/v1/projects/sdnvpn/cases + + testcase_4: + enabled: true + description: VPN provides connectivity between subnets using router association + testname_db: functest_testcase_4 + instance_1_name: sdnvpn-4-1 + instance_2_name: sdnvpn-4-2 + instance_3_name: sdnvpn-4-3 + instance_4_name: sdnvpn-4-4 + instance_5_name: sdnvpn-4-5 + image_name: sdnvpn-image + net_1_name: sdnvpn-4-1-net + subnet_1_name: sdnvpn-4-1-subnet + subnet_1_cidr: 10.10.10.0/24 + router_1_name: sdnvpn-4-1-router + net_2_name: sdnvpn-4-2-net + subnet_2_name: sdnvpn-4-2-subnet + subnet_2_cidr: 10.10.11.0/24 + router_2_name: sdnvpn-4-2-router + secgroup_name: sdnvpn-sg + secgroup_descr: Security group for SDNVPN test cases + targets1: '88:88' + targets2: '55:55' + route_distinguishers: '12:12' diff --git a/dovetail/userconfig/sdnvpn_config_testcase8.yaml b/dovetail/userconfig/sdnvpn_config_testcase8.yaml new file mode 100644 index 00000000..c825997b --- /dev/null +++ b/dovetail/userconfig/sdnvpn_config_testcase8.yaml @@ -0,0 +1,29 @@ +defaults: + flavor: m1.tiny # adapt to your environment + +testcases: + tempest: + enabled: true + description: Neutron BGPVPN tests in tempest + testname_db: functest_tempest # declared name in the test api + # http://testresults.opnfv.org/test/api/v1/projects/sdnvpn/cases + + testcase_8: + enabled: true + description: Test floating IP and router assoc coexistence + testname_db: functest_testcase_8 + image_name: sdnvpn-image + instance_1_name: sdnvpn-8-1 + instance_2_name: sdnvpn-8-2 + net_1_name: sdnvpn-8-1 + subnet_1_name: sdnvpn-8-1-subnet + subnet_1_cidr: 10.10.10.0/24 + router_1_name: sdnvpn-8-1-router + net_2_name: sdnvpn-8-2 + subnet_2_name: sdnvpn-8-2-subnet + subnet_2_cidr: 10.10.20.0/24 + router_2_name: sdnvpn-8-2-router + secgroup_name: sdnvpn-sg + secgroup_descr: Security group for SDNVPN test cases + targets: '88:88' + route_distinguishers: '18:18' diff --git a/dovetail/utils/dovetail_utils.py b/dovetail/utils/dovetail_utils.py index 020617b8..f74da3a2 100644 --- a/dovetail/utils/dovetail_utils.py +++ b/dovetail/utils/dovetail_utils.py @@ -17,6 +17,7 @@ from collections import Mapping, Set, Sequence import json import urllib2 from datetime import datetime +from distutils.version import LooseVersion def exec_log(verbose, logger, msg, level, flush=False): @@ -110,38 +111,62 @@ def source_env(env_file): with open(env_file, 'r') as f: lines = f.readlines() for line in lines: - for match in re.findall(r"export (.*)=(.*)", line): - match = (match[0].strip('\"'), match[1].strip('\"')) - match = (match[0].strip('\''), match[1].strip('\'')) - os.environ.update({match[0]: match[1]}) + if line.lstrip().startswith('export'): + for match in re.findall(r"export (.*)=(.*)", line): + match = (match[0].strip('\"'), match[1].strip('\"')) + match = (match[0].strip('\''), match[1].strip('\'')) + os.environ.update({match[0]: match[1]}) + + +def check_https_enabled(logger=None): + logger.debug("Checking if https enabled or not...") + cmd = ("openstack catalog show identity |awk '/public/ {print $4}'") + ret, msg = exec_cmd(cmd, logger) + if ret == 0 and "https://" in msg: + return True + return False def get_ext_net_name(env_file, logger=None): - source_env(env_file) - cmd_check = "openstack network list" + https_enabled = check_https_enabled(logger) + insecure_option = '' + insecure = os.getenv('OS_INSECURE',) + if https_enabled: + logger.info("https enabled...") + if insecure.lower() == "true": + insecure_option = ' --insecure ' + else: + logger.warn("Env variable OS_INSECURE is {}, if https + no " + "credential used, should be set as True." + .format(insecure)) + + cmd_check = "openstack %s network list" % insecure_option ret, msg = exec_cmd(cmd_check, logger) if ret: - logger.error("The credentials info in %s is invalid." % env_file) + logger.error("The credentials info in {} is invalid.".format(env_file)) return None - cmd = "openstack network list --long | grep 'External' | head -1 | \ - awk '{print $4}'" + cmd = "openstack %s network list --long | grep 'External' | head -1 | \ + awk '{print $4}'" % insecure_option ret, msg = exec_cmd(cmd, logger) if not ret: return msg return None -def check_db_results(db_url, build_tag, testcase, logger): +def store_db_results(db_url, build_tag, testcase, dest_file, logger): url = "%s?build_tag=%s-%s" % (db_url, build_tag, testcase) - logger.debug("Query to rest api: %s", url) + logger.debug("Query to rest api: {}".format(url)) try: data = json.load(urllib2.urlopen(url)) if data['results']: + with open(dest_file, 'a') as f: + f.write(json.dumps(data['results'][0]) + '\n') return True else: return False except Exception as e: - logger.error("Cannot read content from %s, exception: %s", url, e) + logger.exception( + "Cannot read content from {}, exception: {}".format(url, e)) return False @@ -154,7 +179,7 @@ def get_duration(start_date, stop_date, logger): res = "%sm%ss" % (delta / 60, delta % 60) return res except ValueError as e: - logger.error("ValueError: %s", e) + logger.exception("ValueError: {}".format(e)) return None @@ -165,3 +190,20 @@ def show_progress_bar(length): sys.stdout.flush() sys.stdout.write('Running ' + '.' * length + '\r') sys.stdout.flush() + + +def check_docker_version(logger=None): + server_ret, server_ver = \ + exec_cmd("sudo docker version -f'{{.Server.Version}}'", logger=logger) + client_ret, client_ver = \ + exec_cmd("sudo docker version -f'{{.Client.Version}}'", logger=logger) + if server_ret == 0: + logger.debug("docker server version: {}".format(server_ver)) + if server_ret != 0 or (LooseVersion(server_ver) < LooseVersion('1.12.3')): + logger.error("Don't support this Docker server version. " + "Docker server should be updated to at least 1.12.3.") + if client_ret == 0: + logger.debug("docker client version: {}".format(client_ver)) + if client_ret != 0 or (LooseVersion(client_ver) < LooseVersion('1.12.3')): + logger.error("Don't support this Docker client version. " + "Docker client should be updated to at least 1.12.3.") diff --git a/utils/init_db.py b/dovetail/utils/local_db/init_db.py index 129c61f8..2aac6fc5 100644 --- a/utils/init_db.py +++ b/dovetail/utils/local_db/init_db.py @@ -14,7 +14,6 @@ import sys db_host_ip = sys.argv[1] testapi_port = sys.argv[2] -source_url = 'http://testresults.opnfv.org/test/api/v1' target_url = 'http://{}:{}/api/v1'.format(db_host_ip, testapi_port) print(target_url) @@ -30,10 +29,10 @@ def post(url, data): def pod(): - source = '{}/pods'.format(source_url) target = '{}/pods'.format(target_url) - pods = get(source)['pods'] + with open('pods.json', 'r') as f: + pods = json.load(f) for p in pods: post(target, p) @@ -42,24 +41,26 @@ def pod(): def project(): - source = '{}/projects'.format(source_url) target = '{}/projects'.format(target_url) - - projects = get(source)['projects'] + with open('projects.json', 'r') as f: + projects = json.load(f) for p in projects: post(target, p) def cases(): - project_list = ['yardstick', 'functest', 'dovetail'] - - for p in project_list: - source = '{}/projects/{}/cases'.format(source_url, p) - target = '{}/projects/{}/cases'.format(target_url, p) - - cases = get(source)['testcases'] - for c in cases: - post(target, c) + with open('cases.json', 'r') as f: + for line in f: + try: + cases = json.loads(line) + for c in cases["testcases"]: + target = '{}/projects/{}/cases'.format(target_url, + c['project_name']) + print(target) + post(target, c) + except: + print("useless data") + add_case("functest", "tempest_custom") def add_pod(name, mode): @@ -74,6 +75,15 @@ def add_pod(name, mode): post(pod_url, data) +def add_case(project, case): + data = { + "project_name": project, + "name": case, + } + case_url = '{}/projects/{}/cases'.format(target_url, project) + post(case_url, data) + + if __name__ == '__main__': pod() project() diff --git a/utils/launch_db.sh b/dovetail/utils/local_db/launch_db.sh index f3681665..e31f47ce 100755 --- a/utils/launch_db.sh +++ b/dovetail/utils/local_db/launch_db.sh @@ -29,10 +29,12 @@ echo "===================" echo "Create the mongodb." echo "===================" -# pull image kkltcjk/mongodb:reporting -mongodb_img="kkltcjk/mongodb:reporting" +set +e +# pull image mongo:3.2.1 +mongodb_img="mongo:3.2.1" echo "Step1: pull the image $mongodb_img." -sudo docker pull $mongodb_img > /dev/null +sudo docker pull $mongodb_img +set -e container_name='mongodb' @@ -56,10 +58,12 @@ echo "==========================" echo "Create the testapi service." echo "==========================" -# pull image kkltcjk/testapi:reporting -testapi_img="kkltcjk/testapi:reporting" +set +e +# pull image opnfv/testapi:cvp.0.2.0 +testapi_img="opnfv/testapi:cvp.0.2.0" echo "Step1: pull the image $testapi_img." -sudo docker pull $testapi_img > /dev/null +sudo docker pull $testapi_img +set -e container_name='testapi' @@ -76,32 +80,17 @@ cmd="sudo docker run -itd -p ${testapi_port}:8000 --name ${container_name} -e mo echo $cmd ${cmd} -echo "Successfully create the testapi service." +echo "Wait for testapi to work..." +sleep 10 echo "=================================" echo "Upload default project info to DB" echo "=================================" -# For Ubuntu, there is file /etc/lsb-release -# For Centos and redhat, there is file /etc/redhat-release -if [ -f /etc/lsb-release ]; then - sudo apt-get update > /dev/null - sudo apt-get install -y python-pip > /dev/null -elif [ -f /etc/redhat-release ]; then - sudo yum -y update > /dev/null - sudo yum -y install epel-release > /dev/null - sudo yum -y install python-pip > /dev/null -else - echo "This operating system is not currently supported." - exit 1 -fi - -pip install requests > /dev/null - echo "Init DB info..." cmd="python ./init_db.py ${db_host_ip} ${testapi_port}" echo ${cmd} -${cmd} > /dev/null +${cmd} echo "Successfully load DB info." diff --git a/utils/restart_db.sh b/dovetail/utils/local_db/restart_db.sh index 39b60e05..5d6b9b68 100755 --- a/utils/restart_db.sh +++ b/dovetail/utils/local_db/restart_db.sh @@ -26,4 +26,4 @@ export db_host_ip=${db_host_ip:-"$1"} sudo docker rm -f testapi sudo docker run -itd -p $testapi_port:8000 --name testapi \ - -e mongodb_url=mongodb://$db_host_ip:$mongodb_port/ kkltcjk/testapi:reporting + -e mongodb_url=mongodb://$db_host_ip:$mongodb_port/ opnfv/testapi:cvp.0.2.0 diff --git a/dovetail/utils/offline/config.yaml b/dovetail/utils/offline/config.yaml new file mode 100644 index 00000000..edb8a5b8 --- /dev/null +++ b/dovetail/utils/offline/config.yaml @@ -0,0 +1,28 @@ +--- +docker_images: + dovetail: + domain: opnfv + tag: cvp.0.2.0 + store_name: image_dovetail.docker + functest: + domain: opnfv + tag: cvp.0.2.0 + store_name: image_functest.docker + yardstick: + domain: opnfv + tag: danube.3.0 + store_name: image_yardstick.docker + testapi: + domain: opnfv + tag: cvp.0.2.0 + store_name: image_testapi.docker + mongo: + tag: 3.2.1 + store_name: image_mongo.docker +docker_save_path: /home/opnfv/dovetail/results/ + +wgets: + sdnvpn: + source_url: http://artifacts.opnfv.org/sdnvpn/ubuntu-16.04-server-cloudimg-amd64-disk1.img + save_path: /home/opnfv/dovetail/results/ + file_name: ubuntu-16.04-server-cloudimg-amd64-disk1.img diff --git a/dovetail/utils/offline/download.py b/dovetail/utils/offline/download.py new file mode 100755 index 00000000..3fb0cde2 --- /dev/null +++ b/dovetail/utils/offline/download.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python + +import os +import yaml + +import dovetail.utils.dovetail_utils as dt_utils + + +class download(object): + + def __init__(self): + self.curr_path = os.path.dirname(os.path.abspath(__file__)) + with open(os.path.join(self.curr_path, 'config.yaml')) as f: + self.config = yaml.safe_load(f) + + def main(self): + keys = self.config.keys() + if 'docker_save_path' in keys: + save_path = self.config['docker_save_path'] + else: + save_path = self.curr_path + print "save files to path %s" % save_path + if 'docker_images' in keys: + for key, value in self.config['docker_images'].items(): + if value is not None: + tag = str(self.config['docker_images'][key]['tag']) + if 'domain' in self.config['docker_images'][key]: + domain = self.config['docker_images'][key]['domain'] + image_name = ''.join([domain, '/', key, ':', tag]) + else: + image_name = ''.join([key, ':', tag]) + cmd = 'sudo docker pull %s' % image_name + dt_utils.exec_cmd(cmd) + if not os.path.exists(save_path): + os.makedirs(save_path) + StoreName = self.config['docker_images'][key]['store_name'] + image_save_path = ''.join([save_path, StoreName]) + cmd = 'sudo docker save -o %s %s' % \ + (image_save_path, image_name) + dt_utils.exec_cmd(cmd) + cmd = 'sudo chmod og+rw %s' % image_save_path + dt_utils.exec_cmd(cmd) + + if 'wgets' in keys: + for key, value in self.config['wgets'].items(): + if value is not None: + wget_url = self.config['wgets'][key]['source_url'] + wget_path = self.config['wgets'][key]['save_path'] + cmd = 'sudo wget -nc %s -P %s' % (wget_url, wget_path) + dt_utils.exec_cmd(cmd) + + +if __name__ == '__main__': + download = download() + download.main() diff --git a/dovetail/utils/offline/load.py b/dovetail/utils/offline/load.py new file mode 100755 index 00000000..c56868a5 --- /dev/null +++ b/dovetail/utils/offline/load.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python + +import os +import sys +import yaml + +import dovetail.utils.dovetail_utils as dt_utils + + +class load(object): + def __init__(self): + self.curr_path = os.path.dirname(os.path.abspath(__file__)) + with open(os.path.join(self.curr_path, 'config.yaml')) as f: + self.config = yaml.safe_load(f) + + def main(self): + keys = self.config.keys() + if 'docker_save_path' in keys: + save_path = self.config['docker_save_path'] + else: + save_path = self.curr_path + if 'docker_images' in keys: + for key, value in self.config['docker_images'].items(): + if value is not None: + name = self.config['docker_images'][key]['store_name'] + image_save_path = os.path.join(save_path, name) + if os.path.isfile(image_save_path): + cmd = 'sudo docker load -i %s' % (image_save_path) + dt_utils.exec_cmd(cmd) + else: + print "file %s not exists" % image_save_path + if 'wgets' in keys: + for key, value in self.config['wgets'].items(): + if value is not None: + try: + dovetail_home = os.environ["DOVETAIL_HOME"] + except KeyError: + print "env variable DOVETAIL_HOME not found" + sys.exit(1) + name = self.config['wgets'][key]['file_name'] + save_path = self.config['wgets'][key]['save_path'] + file_path = os.path.join(save_path, name) + dest_path = os.path.join(dovetail_home, 'pre_config') + if not os.path.isdir(dest_path): + os.mkdir(dest_path) + if os.path.isfile(file_path): + cmd = 'sudo cp %s %s' % (file_path, dest_path) + dt_utils.exec_cmd(cmd) + else: + print "file %s not exists" % file_path + + +if __name__ == '__main__': + load = load() + load.main() @@ -1,6 +1,6 @@ [metadata] name = dovetail -version = 0.1.0 +version = 0.2.0 home-page = https://wiki.opnfv.org/display/dovetail [entry_points] |