aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xapi/api-prepare.sh8
-rw-r--r--api/database/__init__.py23
-rw-r--r--api/database/handler.py30
-rw-r--r--api/database/handlers.py31
-rw-r--r--api/database/models.py36
-rw-r--r--api/resources/asynctask.py35
-rw-r--r--api/resources/env_action.py129
-rw-r--r--api/resources/release_action.py10
-rw-r--r--api/resources/results.py32
-rw-r--r--api/resources/samples_action.py10
-rw-r--r--api/server.py26
-rw-r--r--api/urls.py1
-rw-r--r--api/utils/common.py4
-rw-r--r--api/utils/daemonthread.py22
-rw-r--r--api/utils/influx.py42
-rw-r--r--api/views.py5
-rw-r--r--api/yardstick.ini4
-rwxr-xr-xdocs/userguide/03-architecture.rst2
-rw-r--r--docs/userguide/opnfv_yardstick_tc001.rst108
-rw-r--r--docs/userguide/opnfv_yardstick_tc002.rst97
-rw-r--r--docs/userguide/opnfv_yardstick_tc004.rst81
-rw-r--r--docs/userguide/opnfv_yardstick_tc005.rst102
-rw-r--r--docs/userguide/opnfv_yardstick_tc010.rst116
-rw-r--r--docs/userguide/opnfv_yardstick_tc011.rst81
-rw-r--r--docs/userguide/opnfv_yardstick_tc012.rst101
-rw-r--r--docs/userguide/opnfv_yardstick_tc014.rst92
-rw-r--r--docs/userguide/opnfv_yardstick_tc037.rst153
-rw-r--r--docs/userguide/opnfv_yardstick_tc043.rst74
-rw-r--r--etc/yardstick/yardstick.conf.sample25
-rwxr-xr-xfuel-plugin/deployment_scripts/install.sh5
-rw-r--r--fuel-plugin/deployment_scripts/puppet/manifests/yardstick-install.pp2
-rw-r--r--plugin/storperf.yaml1
-rw-r--r--requirements.txt1
-rw-r--r--samples/ping_load.yaml65
-rw-r--r--samples/tosca.yaml260
-rwxr-xr-xsetup.py3
-rwxr-xr-xtests/ci/clean_images.sh7
-rwxr-xr-xtests/ci/yardstick-verify2
-rw-r--r--tests/unit/benchmark/contexts/test_heat.py16
-rw-r--r--tests/unit/benchmark/core/__init__.py (renamed from api/yardstick.sock)0
-rw-r--r--tests/unit/benchmark/core/no_constraint_no_args_scenario_sample.yaml (renamed from tests/unit/cmd/commands/no_constraint_no_args_scenario_sample.yaml)0
-rw-r--r--tests/unit/benchmark/core/no_constraint_with_args_scenario_sample.yaml (renamed from tests/unit/cmd/commands/no_constraint_with_args_scenario_sample.yaml)0
-rw-r--r--tests/unit/benchmark/core/test_plugin.py (renamed from tests/unit/cmd/commands/test_plugin.py)32
-rw-r--r--tests/unit/benchmark/core/test_task.py (renamed from tests/unit/cmd/commands/test_task.py)88
-rw-r--r--tests/unit/benchmark/core/test_testcase.py (renamed from tests/unit/cmd/commands/test_testcase.py)29
-rw-r--r--tests/unit/benchmark/core/with_constraint_no_args_scenario_sample.yaml (renamed from tests/unit/cmd/commands/with_constraint_no_args_scenario_sample.yaml)0
-rw-r--r--tests/unit/benchmark/core/with_constraint_with_args_scenario_sample.yaml (renamed from tests/unit/cmd/commands/with_constraint_with_args_scenario_sample.yaml)0
-rw-r--r--tests/unit/cmd/commands/test_env.py45
-rw-r--r--tests/unit/common/test_httpClient.py6
-rw-r--r--tests/unit/test_ssh.py36
-rw-r--r--yardstick/benchmark/contexts/heat.py15
-rw-r--r--yardstick/benchmark/core/__init__.py38
-rw-r--r--yardstick/benchmark/core/plugin.py212
-rw-r--r--yardstick/benchmark/core/runner.py36
-rw-r--r--yardstick/benchmark/core/scenario.py36
-rw-r--r--yardstick/benchmark/core/task.py511
-rw-r--r--yardstick/benchmark/core/testcase.py112
-rw-r--r--yardstick/benchmark/scenarios/parser/parser.py6
-rw-r--r--yardstick/cmd/commands/__init__.py9
-rw-r--r--yardstick/cmd/commands/env.py72
-rw-r--r--yardstick/cmd/commands/plugin.py167
-rw-r--r--yardstick/cmd/commands/runner.py18
-rw-r--r--yardstick/cmd/commands/scenario.py18
-rw-r--r--yardstick/cmd/commands/task.py468
-rw-r--r--yardstick/cmd/commands/testcase.py95
-rw-r--r--yardstick/common/constants.py24
-rw-r--r--yardstick/common/httpClient.py4
-rw-r--r--yardstick/common/openstack_utils.py1
-rw-r--r--yardstick/orchestrator/heat.py11
-rw-r--r--yardstick/ssh.py16
70 files changed, 2580 insertions, 1367 deletions
diff --git a/api/api-prepare.sh b/api/api-prepare.sh
index fade8ccc6..5cc65c959 100755
--- a/api/api-prepare.sh
+++ b/api/api-prepare.sh
@@ -20,7 +20,7 @@ server {
index index.htm index.html;
location / {
include uwsgi_params;
- uwsgi_pass unix:///home/opnfv/repos/yardstick/api/yardstick.sock;
+ uwsgi_pass unix:///var/run/yardstick.sock;
}
}
EOF
@@ -47,3 +47,9 @@ command = uwsgi -i yardstick.ini
autorestart = true
EOF
fi
+
+# create api log directory
+mkdir -p /var/log/yardstick
+
+# create yardstick.sock for communicating
+touch /var/run/yardstick.sock
diff --git a/api/database/__init__.py b/api/database/__init__.py
new file mode 100644
index 000000000..5b0bb05a2
--- /dev/null
+++ b/api/database/__init__.py
@@ -0,0 +1,23 @@
+##############################################################################
+# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import logging
+
+from sqlalchemy import create_engine
+from sqlalchemy.orm import scoped_session, sessionmaker
+from sqlalchemy.ext.declarative import declarative_base
+
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.DEBUG)
+
+engine = create_engine('sqlite:////tmp/yardstick.db', convert_unicode=True)
+db_session = scoped_session(sessionmaker(autocommit=False,
+ autoflush=False,
+ bind=engine))
+Base = declarative_base()
+Base.query = db_session.query_property()
diff --git a/api/database/handler.py b/api/database/handler.py
new file mode 100644
index 000000000..f6a22578f
--- /dev/null
+++ b/api/database/handler.py
@@ -0,0 +1,30 @@
+# ############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+# ############################################################################
+from api.database import db_session
+from api.database.models import AsyncTasks
+
+
+class AsyncTaskHandler(object):
+ def insert(self, kwargs):
+ task = AsyncTasks(**kwargs)
+ db_session.add(task)
+ db_session.commit()
+ return task
+
+ def update_status(self, task, status):
+ task.status = status
+ db_session.commit()
+
+ def update_error(self, task, error):
+ task.error = error
+ db_session.commit()
+
+ def get_task_by_taskid(self, task_id):
+ task = AsyncTasks.query.filter_by(task_id=task_id).first()
+ return task
diff --git a/api/database/handlers.py b/api/database/handlers.py
new file mode 100644
index 000000000..42979b529
--- /dev/null
+++ b/api/database/handlers.py
@@ -0,0 +1,31 @@
+##############################################################################
+# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+from api.database import db_session
+from api.database.models import Tasks
+
+
+class TasksHandler(object):
+
+ def insert(self, kwargs):
+ task = Tasks(**kwargs)
+ db_session.add(task)
+ db_session.commit()
+ return task
+
+ def update_status(self, task, status):
+ task.status = status
+ db_session.commit()
+
+ def update_error(self, task, error):
+ task.error = error
+ db_session.commit()
+
+ def get_task_by_taskid(self, task_id):
+ task = Tasks.query.filter_by(task_id=task_id).first()
+ return task
diff --git a/api/database/models.py b/api/database/models.py
new file mode 100644
index 000000000..2fc141c1f
--- /dev/null
+++ b/api/database/models.py
@@ -0,0 +1,36 @@
+##############################################################################
+# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+from sqlalchemy import Column
+from sqlalchemy import Integer
+from sqlalchemy import String
+
+from api.database import Base
+
+
+class Tasks(Base):
+ __tablename__ = 'tasks'
+ id = Column(Integer, primary_key=True)
+ task_id = Column(String(30))
+ status = Column(Integer)
+ error = Column(String(120))
+ details = Column(String(120))
+
+ def __repr__(self):
+ return '<Task %r>' % Tasks.task_id
+
+
+class AsyncTasks(Base):
+ __tablename__ = 'asynctasks'
+ id = Column(Integer, primary_key=True)
+ task_id = Column(String(30))
+ status = Column(Integer)
+ error = Column(String(120))
+
+ def __repr__(self):
+ return '<Task %r>' % AsyncTasks.task_id
diff --git a/api/resources/asynctask.py b/api/resources/asynctask.py
new file mode 100644
index 000000000..dd2a71003
--- /dev/null
+++ b/api/resources/asynctask.py
@@ -0,0 +1,35 @@
+# ############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+# ############################################################################
+import uuid
+
+from api.utils import common as common_utils
+from api.database.models import AsyncTasks
+
+
+def default(args):
+ return _get_status(args)
+
+
+def _get_status(args):
+ try:
+ task_id = args['task_id']
+ uuid.UUID(task_id)
+ except KeyError:
+ message = 'measurement and task_id must be provided'
+ return common_utils.error_handler(message)
+
+ asynctask = AsyncTasks.query.filter_by(task_id=task_id).first()
+
+ try:
+ status = asynctask.status
+ error = asynctask.error if asynctask.error else []
+
+ return common_utils.result_handler(status, error)
+ except AttributeError:
+ return common_utils.error_handler('no such task')
diff --git a/api/resources/env_action.py b/api/resources/env_action.py
index fa0f95d90..7e2487158 100644
--- a/api/resources/env_action.py
+++ b/api/resources/env_action.py
@@ -10,9 +10,11 @@ import logging
import threading
import subprocess
import time
+import uuid
import json
import os
import errno
+import ConfigParser
from docker import Client
@@ -22,17 +24,24 @@ from yardstick.common.httpClient import HttpClient
from api import conf as api_conf
from api.utils import influx
from api.utils.common import result_handler
+from api.database.handler import AsyncTaskHandler
logger = logging.getLogger(__name__)
+logger.setLevel(logging.DEBUG)
def createGrafanaContainer(args):
- thread = threading.Thread(target=_create_grafana)
+ task_id = str(uuid.uuid4())
+
+ thread = threading.Thread(target=_create_grafana, args=(task_id,))
thread.start()
- return result_handler('success', [])
+
+ return result_handler('success', {'task_id': task_id})
-def _create_grafana():
+def _create_grafana(task_id):
+ _create_task(task_id)
+
client = Client(base_url=config.DOCKER_URL)
try:
@@ -47,7 +56,10 @@ def _create_grafana():
_create_data_source()
_create_dashboard()
+
+ _update_task_status(task_id)
except Exception as e:
+ _update_task_error(task_id, str(e))
logger.debug('Error: %s', e)
@@ -95,16 +107,21 @@ def _check_image_exist(client, t):
def createInfluxDBContainer(args):
- thread = threading.Thread(target=_create_influxdb)
+ task_id = str(uuid.uuid4())
+
+ thread = threading.Thread(target=_create_influxdb, args=(task_id,))
thread.start()
- return result_handler('success', [])
+
+ return result_handler('success', {'task_id': task_id})
-def _create_influxdb():
+def _create_influxdb(task_id):
+ _create_task(task_id)
+
client = Client(base_url=config.DOCKER_URL)
try:
- _config_output_file()
+ _change_output_to_influxdb()
if not _check_image_exist(client, '%s:%s' % (config.INFLUXDB_IMAGE,
config.INFLUXDB_TAG)):
@@ -115,7 +132,10 @@ def _create_influxdb():
time.sleep(5)
_config_influxdb()
+
+ _update_task_status(task_id)
except Exception as e:
+ _update_task_error(task_id, str(e))
logger.debug('Error: %s', e)
@@ -144,58 +164,59 @@ def _config_influxdb():
logger.debug('Failed to config influxDB: %s', e)
-def _config_output_file():
+def _change_output_to_influxdb():
yardstick_utils.makedirs(config.YARDSTICK_CONFIG_DIR)
- with open(config.YARDSTICK_CONFIG_FILE, 'w') as f:
- f.write("""\
-[DEFAULT]
-debug = False
-dispatcher = influxdb
-[dispatcher_file]
-file_path = /tmp/yardstick.out
+ parser = ConfigParser.ConfigParser()
+ parser.read(config.YARDSTICK_CONFIG_SAMPLE_FILE)
-[dispatcher_http]
-timeout = 5
-# target = http://127.0.0.1:8000/results
+ parser.set('DEFAULT', 'dispatcher', 'influxdb')
+ parser.set('dispatcher_influxdb', 'target',
+ 'http://%s:8086' % api_conf.GATEWAY_IP)
-[dispatcher_influxdb]
-timeout = 5
-target = http://%s:8086
-db_name = yardstick
-username = root
-password = root
-"""
- % api_conf.GATEWAY_IP)
+ with open(config.YARDSTICK_CONFIG_FILE, 'w') as f:
+ parser.write(f)
def prepareYardstickEnv(args):
- thread = threading.Thread(target=_prepare_env_daemon)
+ task_id = str(uuid.uuid4())
+
+ thread = threading.Thread(target=_prepare_env_daemon, args=(task_id,))
thread.start()
- return result_handler('success', [])
+
+ return result_handler('success', {'task_id': task_id})
-def _prepare_env_daemon():
+def _prepare_env_daemon(task_id):
+ _create_task(task_id)
installer_ip = os.environ.get('INSTALLER_IP', 'undefined')
installer_type = os.environ.get('INSTALLER_TYPE', 'undefined')
- _check_variables(installer_ip, installer_type)
+ try:
+ _check_variables(installer_ip, installer_type)
+
+ _create_directories()
- _create_directories()
+ rc_file = config.OPENSTACK_RC_FILE
- rc_file = config.OPENSTACK_RC_FILE
+ _get_remote_rc_file(rc_file, installer_ip, installer_type)
- _get_remote_rc_file(rc_file, installer_ip, installer_type)
+ _source_file(rc_file)
- _source_file(rc_file)
+ _append_external_network(rc_file)
- _append_external_network(rc_file)
+ # update the external_network
+ _source_file(rc_file)
- # update the external_network
- _source_file(rc_file)
+ _clean_images()
- _load_images()
+ _load_images()
+
+ _update_task_status(task_id)
+ except Exception as e:
+ _update_task_error(task_id, str(e))
+ logger.debug('Error: %s', e)
def _check_variables(installer_ip, installer_type):
@@ -251,9 +272,41 @@ def _append_external_network(rc_file):
raise
+def _clean_images():
+ cmd = [config.CLEAN_IMAGES_SCRIPT]
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ cwd=config.YARDSTICK_REPOS_DIR)
+ output = p.communicate()[0]
+ logger.debug('The result is: %s', output)
+
+
def _load_images():
cmd = [config.LOAD_IMAGES_SCRIPT]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
cwd=config.YARDSTICK_REPOS_DIR)
output = p.communicate()[0]
logger.debug('The result is: %s', output)
+
+
+def _create_task(task_id):
+ async_handler = AsyncTaskHandler()
+ task_dict = {
+ 'task_id': task_id,
+ 'status': 0
+ }
+ async_handler.insert(task_dict)
+
+
+def _update_task_status(task_id):
+ async_handler = AsyncTaskHandler()
+
+ task = async_handler.get_task_by_taskid(task_id)
+ async_handler.update_status(task, 1)
+
+
+def _update_task_error(task_id, error):
+ async_handler = AsyncTaskHandler()
+
+ task = async_handler.get_task_by_taskid(task_id)
+ async_handler.update_status(task, 2)
+ async_handler.update_error(task, error)
diff --git a/api/resources/release_action.py b/api/resources/release_action.py
index fda0ffd32..d4dc246ef 100644
--- a/api/resources/release_action.py
+++ b/api/resources/release_action.py
@@ -23,8 +23,8 @@ def runTestCase(args):
except KeyError:
return common_utils.error_handler('Lack of testcase argument')
- testcase = os.path.join(conf.TEST_CASE_PATH,
- conf.TEST_CASE_PRE + testcase + '.yaml')
+ testcase_name = conf.TEST_CASE_PRE + testcase
+ testcase = os.path.join(conf.TEST_CASE_PATH, testcase_name + '.yaml')
task_id = str(uuid.uuid4())
@@ -33,6 +33,10 @@ def runTestCase(args):
logger.debug('The command_list is: %s', command_list)
logger.debug('Start to execute command list')
- common_utils.exec_command_task(command_list, task_id)
+ task_dict = {
+ 'task_id': task_id,
+ 'details': testcase_name
+ }
+ common_utils.exec_command_task(command_list, task_dict)
return common_utils.result_handler('success', task_id)
diff --git a/api/resources/results.py b/api/resources/results.py
index 3de09fdc9..fd518958c 100644
--- a/api/resources/results.py
+++ b/api/resources/results.py
@@ -8,11 +8,10 @@
##############################################################################
import logging
import uuid
-import re
from api.utils import influx as influx_utils
from api.utils import common as common_utils
-from api import conf
+from api.database.handlers import TasksHandler
logger = logging.getLogger(__name__)
@@ -23,39 +22,36 @@ def default(args):
def getResult(args):
try:
- measurement = args['measurement']
task_id = args['task_id']
- if re.search("[^a-zA-Z0-9_-]", measurement):
- raise ValueError('invalid measurement parameter')
-
uuid.UUID(task_id)
except KeyError:
- message = 'measurement and task_id must be provided'
+ message = 'task_id must be provided'
return common_utils.error_handler(message)
- query_template = "select * from %s where task_id='%s'"
- query_sql = query_template % ('tasklist', task_id)
- data = common_utils.translate_to_str(influx_utils.query(query_sql))
+ task = TasksHandler().get_task_by_taskid(task_id)
def _unfinished():
return common_utils.result_handler(0, [])
def _finished():
- query_sql = query_template % (conf.TEST_CASE_PRE + measurement,
- task_id)
- data = common_utils.translate_to_str(influx_utils.query(query_sql))
- if not data:
- query_sql = query_template % (measurement, task_id)
+ testcases = task.details.split(',')
+
+ def get_data(testcase):
+ query_template = "select * from %s where task_id='%s'"
+ query_sql = query_template % (testcase, task_id)
data = common_utils.translate_to_str(influx_utils.query(query_sql))
+ return data
+
+ result = {k: get_data(k) for k in testcases}
- return common_utils.result_handler(1, data)
+ return common_utils.result_handler(1, result)
def _error():
- return common_utils.result_handler(2, data[0]['error'])
+ return common_utils.result_handler(2, task.error)
try:
- status = data[0]['status']
+ status = task.status
switcher = {
0: _unfinished,
diff --git a/api/resources/samples_action.py b/api/resources/samples_action.py
index 545447aec..df6db17ee 100644
--- a/api/resources/samples_action.py
+++ b/api/resources/samples_action.py
@@ -19,11 +19,11 @@ logger = logging.getLogger(__name__)
def runTestCase(args):
try:
opts = args.get('opts', {})
- testcase = args['testcase']
+ testcase_name = args['testcase']
except KeyError:
return common_utils.error_handler('Lack of testcase argument')
- testcase = os.path.join(conf.SAMPLE_PATH, testcase + '.yaml')
+ testcase = os.path.join(conf.SAMPLE_PATH, testcase_name + '.yaml')
task_id = str(uuid.uuid4())
@@ -32,6 +32,10 @@ def runTestCase(args):
logger.debug('The command_list is: %s', command_list)
logger.debug('Start to execute command list')
- common_utils.exec_command_task(command_list, task_id)
+ task_dict = {
+ 'task_id': task_id,
+ 'details': testcase_name
+ }
+ common_utils.exec_command_task(command_list, task_dict)
return common_utils.result_handler('success', task_id)
diff --git a/api/server.py b/api/server.py
index 64a2b4f96..8cce4de87 100644
--- a/api/server.py
+++ b/api/server.py
@@ -7,11 +7,17 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
import logging
+from itertools import ifilter
+import inspect
from flask import Flask
from flask_restful import Api
from flasgger import Swagger
+from api.database import Base
+from api.database import engine
+from api.database import db_session
+from api.database import models
from api.urls import urlpatterns
from yardstick import _init_logging
@@ -24,6 +30,26 @@ Swagger(app)
api = Api(app)
+@app.teardown_request
+def shutdown_session(exception=None):
+ db_session.remove()
+
+
+def init_db():
+ def func(a):
+ try:
+ if issubclass(a[1], Base):
+ return True
+ except TypeError:
+ pass
+ return False
+
+ subclses = ifilter(func, inspect.getmembers(models, inspect.isclass))
+ logger.debug('Import models: %s', [a[1] for a in subclses])
+ Base.metadata.create_all(bind=engine)
+
+
+init_db()
reduce(lambda a, b: a.add_resource(b.resource, b.url,
endpoint=b.endpoint) or a, urlpatterns, api)
diff --git a/api/urls.py b/api/urls.py
index 7f9f0bb07..58df29142 100644
--- a/api/urls.py
+++ b/api/urls.py
@@ -11,6 +11,7 @@ from api.utils.common import Url
urlpatterns = [
+ Url('/yardstick/asynctask', views.Asynctask, 'asynctask'),
Url('/yardstick/testcases/release/action', views.ReleaseAction, 'release'),
Url('/yardstick/testcases/samples/action', views.SamplesAction, 'samples'),
Url('/yardstick/testsuites/action', views.TestsuitesAction, 'testsuites'),
diff --git a/api/utils/common.py b/api/utils/common.py
index e3e64a72b..6971c6dfe 100644
--- a/api/utils/common.py
+++ b/api/utils/common.py
@@ -40,8 +40,8 @@ def get_command_list(command_list, opts, args):
return command_list
-def exec_command_task(command_list, task_id): # pragma: no cover
- daemonthread = DaemonThread(YardstickCLI().api, (command_list, task_id))
+def exec_command_task(command_list, task_dict): # pragma: no cover
+ daemonthread = DaemonThread(YardstickCLI().api, (command_list, task_dict))
daemonthread.start()
diff --git a/api/utils/daemonthread.py b/api/utils/daemonthread.py
index 47c0b9108..19182c429 100644
--- a/api/utils/daemonthread.py
+++ b/api/utils/daemonthread.py
@@ -8,11 +8,10 @@
##############################################################################
import threading
import os
-import datetime
import errno
from api import conf
-from api.utils.influx import write_data_tasklist
+from api.database.handlers import TasksHandler
class DaemonThread(threading.Thread):
@@ -21,19 +20,24 @@ class DaemonThread(threading.Thread):
super(DaemonThread, self).__init__(target=method, args=args)
self.method = method
self.command_list = args[0]
- self.task_id = args[1]
+ self.task_dict = args[1]
def run(self):
- timestamp = datetime.datetime.now()
+ self.task_dict['status'] = 0
+ task_id = self.task_dict['task_id']
try:
- write_data_tasklist(self.task_id, timestamp, 0)
- self.method(self.command_list, self.task_id)
- write_data_tasklist(self.task_id, timestamp, 1)
+ task_handler = TasksHandler()
+ task = task_handler.insert(self.task_dict)
+
+ self.method(self.command_list, task_id)
+
+ task_handler.update_status(task, 1)
except Exception as e:
- write_data_tasklist(self.task_id, timestamp, 2, error=str(e))
+ task_handler.update_status(task, 2)
+ task_handler.update_error(task, str(e))
finally:
- _handle_testsuite_file(self.task_id)
+ _handle_testsuite_file(task_id)
def _handle_testsuite_file(task_id):
diff --git a/api/utils/influx.py b/api/utils/influx.py
index 9366ed3e9..d4b070fb4 100644
--- a/api/utils/influx.py
+++ b/api/utils/influx.py
@@ -7,10 +7,10 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
import logging
+import ConfigParser
from urlparse import urlsplit
from influxdb import InfluxDBClient
-import ConfigParser
from api import conf
@@ -21,46 +21,26 @@ def get_data_db_client():
parser = ConfigParser.ConfigParser()
try:
parser.read(conf.OUTPUT_CONFIG_FILE_PATH)
- dispatcher = parser.get('DEFAULT', 'dispatcher')
- if 'influxdb' != dispatcher:
+ if 'influxdb' != parser.get('DEFAULT', 'dispatcher'):
raise RuntimeError
- ip = _get_ip(parser.get('dispatcher_influxdb', 'target'))
- username = parser.get('dispatcher_influxdb', 'username')
- password = parser.get('dispatcher_influxdb', 'password')
- db_name = parser.get('dispatcher_influxdb', 'db_name')
- return InfluxDBClient(ip, conf.PORT, username, password, db_name)
+ return _get_client(parser)
except ConfigParser.NoOptionError:
logger.error('can not find the key')
raise
-def _get_ip(url):
- return urlsplit(url).hostname
+def _get_client(parser):
+ ip = _get_ip(parser.get('dispatcher_influxdb', 'target'))
+ username = parser.get('dispatcher_influxdb', 'username')
+ password = parser.get('dispatcher_influxdb', 'password')
+ db_name = parser.get('dispatcher_influxdb', 'db_name')
+ return InfluxDBClient(ip, conf.PORT, username, password, db_name)
-def _write_data(measurement, field, timestamp, tags):
- point = {
- 'measurement': measurement,
- 'fields': field,
- 'time': timestamp,
- 'tags': tags
- }
-
- try:
- client = get_data_db_client()
-
- logger.debug('Start to write data: %s', point)
- client.write_points([point])
- except RuntimeError:
- logger.debug('dispatcher is not influxdb')
-
-
-def write_data_tasklist(task_id, timestamp, status, error=''):
- field = {'status': status, 'error': error}
- tags = {'task_id': task_id}
- _write_data('tasklist', field, timestamp, tags)
+def _get_ip(url):
+ return urlsplit(url).hostname
def query(query_sql):
diff --git a/api/views.py b/api/views.py
index bdff7c0d9..eb81145fc 100644
--- a/api/views.py
+++ b/api/views.py
@@ -24,6 +24,11 @@ TestCaseActionArgsOptsModel = models.TestCaseActionArgsOptsModel
TestCaseActionArgsOptsTaskArgModel = models.TestCaseActionArgsOptsTaskArgModel
+class Asynctask(ApiResource):
+ def get(self):
+ return self._dispatch_get()
+
+
class ReleaseAction(ApiResource):
@swag_from(os.getcwd() + '/swagger/docs/release_action.yaml')
def post(self):
diff --git a/api/yardstick.ini b/api/yardstick.ini
index 01025c2ef..2ba881fc1 100644
--- a/api/yardstick.ini
+++ b/api/yardstick.ini
@@ -12,5 +12,5 @@ chmod-socket = 666
callable = app
enable-threads = true
close-on-exec = 1
-daemonize=/home/opnfv/repos/yardstick/api/uwsgi.log
-socket = /home/opnfv/repos/yardstick/api/yardstick.sock
+daemonize= /var/log/yardstick/uwsgi.log
+socket = /var/run/yardstick.sock
diff --git a/docs/userguide/03-architecture.rst b/docs/userguide/03-architecture.rst
index ace3117c2..03bf00f58 100755
--- a/docs/userguide/03-architecture.rst
+++ b/docs/userguide/03-architecture.rst
@@ -175,7 +175,7 @@ LmBench, ...)
TaskCommands is the "yardstick task" subcommand's main entry. It takes yaml
file (e.g. test.yaml) as input, and uses HeatContext to convert the yaml
-file's context section to HOT. After Openstacik heat stack is deployed by
+file's context section to HOT. After Openstack heat stack is deployed by
HeatContext with the converted HOT, TaskCommands use Runner to run specified
TestScenario. During first runner initialization, it will create output
process. The output process use Dispatcher to push test results. The Runner
diff --git a/docs/userguide/opnfv_yardstick_tc001.rst b/docs/userguide/opnfv_yardstick_tc001.rst
index fac375d50..b53c508a6 100644
--- a/docs/userguide/opnfv_yardstick_tc001.rst
+++ b/docs/userguide/opnfv_yardstick_tc001.rst
@@ -1,4 +1,4 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International
+s work is licensed under a Creative Commons Attribution 4.0 International
.. License.
.. http://creativecommons.org/licenses/by/4.0
.. (c) OPNFV, Ericsson AB and others.
@@ -13,38 +13,40 @@ Yardstick Test Case Description TC001
|Network Performance |
| |
+--------------+--------------------------------------------------------------+
-|test case id | OPNFV_YARDSTICK_TC001_NW PERF |
+|test case id | OPNFV_YARDSTICK_TC001_NETWORK PERFORMANCE |
| | |
+--------------+--------------------------------------------------------------+
|metric | Number of flows and throughput |
| | |
+--------------+--------------------------------------------------------------+
-|test purpose | To evaluate the IaaS network performance with regards to |
-| | flows and throughput, such as if and how different amounts |
-| | of flows matter for the throughput between hosts on |
-| | different compute blades. Typically e.g. the performance of |
-| | a vSwitch depends on the number of flows running through it. |
-| | Also performance of other equipment or entities can depend |
-| | on the number of flows or the packet sizes used. |
-| | The purpose is also to be able to spot trends. Test results, |
-| | graphs ans similar shall be stored for comparison reasons |
-| | and product evolution understanding between different OPNFV |
-| | versions and/or configurations. |
-| | |
-+--------------+--------------------------------------------------------------+
-|configuration | file: opnfv_yardstick_tc001.yaml |
-| | |
-| | Packet size: 60 bytes |
-| | Number of ports: 10, 50, 100, 500 and 1000, where each |
-| | runs for 20 seconds. The whole sequence is run |
-| | twice. The client and server are distributed on different |
-| | HW. |
-| | For SLA max_ppm is set to 1000. The amount of configured |
-| | ports map to between 110 up to 1001000 flows, respectively. |
+|test purpose | The purpose of TC001 is to evaluate the IaaS network |
+| | performance with regards to flows and throughput, such as if |
+| | and how different amounts of flows matter for the throughput |
+| | between hosts on different compute blades. Typically e.g. |
+| | the performance of a vSwitch depends on the number of flows |
+| | running through it. Also performance of other equipment or |
+| | entities can depend on the number of flows or the packet |
+| | sizes used. |
+| | |
+| | The purpose is also to be able to spot the trends. |
+| | Test results, graphs and similar shall be stored for |
+| | comparison reasons and product evolution understanding |
+| | between different OPNFV versions and/or configurations. |
| | |
+--------------+--------------------------------------------------------------+
|test tool | pktgen |
| | |
+| | Linux packet generator is a tool to generate packets at very |
+| | high speed in the kernel. pktgen is mainly used to drive and |
+| | LAN equipment test network. pktgen supports multi threading. |
+| | To generate random MAC address, IP address, port number UDP |
+| | packets, pktgen uses multiple CPU processors in the |
+| | different PCI bus (PCI, PCIe bus) with Gigabit Ethernet |
+| | tested (pktgen performance depends on the CPU processing |
+| | speed, memory delay, PCI bus speed hardware parameters), |
+| | Transmit data rate can be even larger than 10GBit/s. Visible |
+| | can satisfy most card test requirements. |
+| | |
| | (Pktgen is not always part of a Linux distribution, hence it |
| | needs to be installed. It is part of the Yardstick Docker |
| | image. |
@@ -52,18 +54,47 @@ Yardstick Test Case Description TC001
| | to generate a Linux image with pktgen included.) |
| | |
+--------------+--------------------------------------------------------------+
-|references | pktgen_ |
+|test | This test case uses Pktgen to generate packet flow between |
+|description | two hosts for simulating network workloads on the SUT. |
| | |
-| | ETSI-NFV-TST001 |
++--------------+--------------------------------------------------------------+
+|traffic | An IP table is setup on server to monitor for received |
+|profile | packets. |
+| | |
++--------------+--------------------------------------------------------------+
+|configuration | file: opnfv_yardstick_tc001.yaml |
+| | |
+| | Packet size is set to 60 bytes. |
+| | Number of ports: 10, 50, 100, 500 and 1000, where each |
+| | runs for 20 seconds. The whole sequence is run twice |
+| | The client and server are distributed on different hardware. |
+| | |
+| | For SLA max_ppm is set to 1000. The amount of configured |
+| | ports map to between 110 up to 1001000 flows, respectively. |
| | |
+--------------+--------------------------------------------------------------+
-|applicability | Test can be configured with different packet sizes, amount |
-| | of flows and test duration. Default values exist. |
+|applicability | Test can be configured with different: |
+| | |
+| | * packet sizes; |
+| | * amount of flows; |
+| | * test duration. |
+| | |
+| | Default values exist. |
| | |
| | SLA (optional): max_ppm: The number of packets per million |
| | packets sent that are acceptable to loose, not received. |
| | |
+--------------+--------------------------------------------------------------+
+|usability | This test case is used for generating high network |
+| | throughput to simulate certain workloads on the SUT. Hence |
+| | it should work with other test cases. |
+| | |
++--------------+--------------------------------------------------------------+
+|references | pktgen_ |
+| | |
+| | ETSI-NFV-TST001 |
+| | |
++--------------+--------------------------------------------------------------+
|pre-test | The test case image needs to be installed into Glance |
|conditions | with pktgen included in it. |
| | |
@@ -73,12 +104,29 @@ Yardstick Test Case Description TC001
|test sequence | description and expected result |
| | |
+--------------+--------------------------------------------------------------+
-|step 1 | The hosts are installed, as server and client. pktgen is |
-| | invoked and logs are produced and stored. |
+|step 1 | Two host VMs are booted, as server and client. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 2 | Yardstick is connected with the server VM by using ssh. |
+| | 'pktgen_benchmark' bash script is copyied from Jump Host to |
+| | the server VM via the ssh tunnel. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 3 | An IP table is setup on server to monitor for received |
+| | packets. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 4 | pktgen is invoked to generate packet flow between two server |
+| | and client for simulating network workloads on the SUT. |
+| | Results are processed and checked against the SLA. Logs are |
+| | produced and stored. |
| | |
| | Result: Logs are stored. |
| | |
+--------------+--------------------------------------------------------------+
+|step 5 | Two host VMs are deleted. |
+| | |
++--------------+--------------------------------------------------------------+
|test verdict | Fails only if SLA is not passed, or if there is a test case |
| | execution problem. |
| | |
diff --git a/docs/userguide/opnfv_yardstick_tc002.rst b/docs/userguide/opnfv_yardstick_tc002.rst
index 193fc531f..c98780fd5 100644
--- a/docs/userguide/opnfv_yardstick_tc002.rst
+++ b/docs/userguide/opnfv_yardstick_tc002.rst
@@ -8,34 +8,37 @@ Yardstick Test Case Description TC002
*************************************
.. _cirros-image: https://download.cirros-cloud.net
+.. _Ping: https://linux.die.net/man/8/ping
+-----------------------------------------------------------------------------+
|Network Latency |
| |
+--------------+--------------------------------------------------------------+
-|test case id | OPNFV_YARDSTICK_TC002_NW LATENCY |
+|test case id | OPNFV_YARDSTICK_TC002_NETWORK LATENCY |
| | |
+--------------+--------------------------------------------------------------+
-|metric | RTT, Round Trip Time |
+|metric | RTT (Round Trip Time) |
| | |
+--------------+--------------------------------------------------------------+
-|test purpose | To do a basic verification that network latency is within |
-| | acceptable boundaries when packets travel between hosts |
-| | located on same or different compute blades. |
-| | The purpose is also to be able to spot trends. Test results, |
-| | graphs and similar shall be stored for comparison reasons and|
-| | product evolution understanding between different OPNFV |
-| | versions and/or configurations. |
+|test purpose | The purpose of TC002 is to do a basic verification that |
+| | network latency is within acceptable boundaries when packets |
+| | travel between hosts located on same or different compute |
+| | blades. |
| | |
-+--------------+--------------------------------------------------------------+
-|configuration | file: opnfv_yardstick_tc002.yaml |
-| | |
-| | Packet size 100 bytes. Total test duration 600 seconds. |
-| | One ping each 10 seconds. SLA RTT is set to maximum 10 ms. |
+| | The purpose is also to be able to spot the trends. |
+| | Test results, graphs and similar shall be stored for |
+| | comparison reasons and product evolution understanding |
+| | between different OPNFV versions and/or configurations. |
| | |
+--------------+--------------------------------------------------------------+
|test tool | ping |
| | |
+| | Ping is a computer network administration software utility |
+| | used to test the reachability of a host on an Internet |
+| | Protocol (IP) network. It measures the round-trip time for |
+| | packet sent from the originating host to a destination |
+| | computer that are echoed back to the source. |
+| | |
| | Ping is normally part of any Linux distribution, hence it |
| | doesn't need to be installed. It is also part of the |
| | Yardstick Docker image. |
@@ -43,27 +46,55 @@ Yardstick Test Case Description TC002
| | cirros-image_, it includes ping) |
| | |
+--------------+--------------------------------------------------------------+
-|references | Ping man page |
+|test topology | Ping packets (ICMP protocol's mandatory ECHO_REQUEST |
+| | datagram) are sent from host VM to target VM(s) to elicit |
+| | ICMP ECHO_RESPONSE. |
| | |
-| | ETSI-NFV-TST001 |
+| | For one host VM there can be multiple target VMs. |
+| | Host VM and target VM(s) can be on same or different compute |
+| | blades. |
+| | |
++--------------+--------------------------------------------------------------+
+|configuration | file: opnfv_yardstick_tc002.yaml |
+| | |
+| | Packet size 100 bytes. Test duration 60 seconds. |
+| | One ping each 10 seconds. Test is iterated two times. |
+| | SLA RTT is set to maximum 10 ms. |
| | |
+--------------+--------------------------------------------------------------+
-|applicability | Test case can be configured with different packet sizes, |
-| | burst sizes, ping intervals and test duration. |
+|applicability | This test case can be configured with different: |
+| | |
+| | * packet sizes; |
+| | * burst sizes; |
+| | * ping intervals; |
+| | * test durations; |
+| | * test iterations. |
+| | |
+| | Default values exist. |
+| | |
| | SLA is optional. The SLA in this test case serves as an |
-| | example. Considerably lower RTT is expected, and |
-| | also normal to achieve in balanced L2 environments. However, |
-| | to cover most configurations, both bare metal and fully |
-| | virtualized ones, this value should be possible to achieve |
-| | and acceptable for black box testing. Many real time |
+| | example. Considerably lower RTT is expected, and also normal |
+| | to achieve in balanced L2 environments. However, to cover |
+| | most configurations, both bare metal and fully virtualized |
+| | ones, this value should be possible to achieve and |
+| | acceptable for black box testing. Many real time |
| | applications start to suffer badly if the RTT time is higher |
| | than this. Some may suffer bad also close to this RTT, while |
| | others may not suffer at all. It is a compromise that may |
| | have to be tuned for different configuration purposes. |
| | |
+--------------+--------------------------------------------------------------+
-|pre-test | The test case image needs to be installed into Glance |
-|conditions | with ping included in it. |
+|usability | This test case is one of Yardstick's generic test. Thus it |
+| | is runnable on most of the scenarios. |
+| | |
++--------------+--------------------------------------------------------------+
+|references | Ping_ |
+| | |
+| | ETSI-NFV-TST001 |
+| | |
++--------------+--------------------------------------------------------------+
+|pre-test | The test case image (cirros-image) needs to be installed |
+|conditions | into Glance with ping included in it. |
| | |
| | No POD specific requirements have been identified. |
| | |
@@ -71,12 +102,24 @@ Yardstick Test Case Description TC002
|test sequence | description and expected result |
| | |
+--------------+--------------------------------------------------------------+
-|step 1 | The hosts are installed, as server and client. Ping is |
-| | invoked and logs are produced and stored. |
+|step 1 | Two host VMs are booted, as server and client. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 2 | Yardstick is connected with the server VM by using ssh. |
+| | 'ping_benchmark' bash script is copyied from Jump Host to |
+| | the server VM via the ssh tunnel. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 3 | Ping is invoked. Ping packets are sent from server VM to |
+| | client VM. RTT results are calculated and checked against |
+| | the SLA. Logs are produced and stored. |
| | |
| | Result: Logs are stored. |
| | |
+--------------+--------------------------------------------------------------+
+|step 4 | Two host VMs are deleted. |
+| | |
++--------------+--------------------------------------------------------------+
|test verdict | Test should not PASS if any RTT is above the optional SLA |
| | value, or if there is a test case execution problem. |
| | |
diff --git a/docs/userguide/opnfv_yardstick_tc004.rst b/docs/userguide/opnfv_yardstick_tc004.rst
index 301286126..3554b3826 100644
--- a/docs/userguide/opnfv_yardstick_tc004.rst
+++ b/docs/userguide/opnfv_yardstick_tc004.rst
@@ -13,39 +13,52 @@ Yardstick Test Case Description TC004
|Cache Utilization |
| |
+--------------+--------------------------------------------------------------+
-|test case id | OPNFV_YARDSTICK_TC004_Cache Utilization |
+|test case id | OPNFV_YARDSTICK_TC004_CACHE Utilization |
| | |
+--------------+--------------------------------------------------------------+
-|metric | Cache Utilization |
+|metric | cache hit, cache miss, hit/miss ratio, buffer size and page |
+| | cache size |
| | |
+--------------+--------------------------------------------------------------+
-|test purpose | To evaluate the IaaS compute capability with regards to |
-| | cache utilization.This test case should be run in parallel |
-| | to other Yardstick test cases and not run as a stand-alone |
-| | test case. |
-| | Measure the cache usage statistics including cache hit, |
-| | cache miss, hit ratio, page cache size and page cache size. |
-| | Both average and maximun values are obtained. |
-| | The purpose is also to be able to spot trends. |
+|test purpose | The purpose of TC004 is to evaluate the IaaS compute |
+| | capability with regards to cache utilization.This test case |
+| | should be run in parallel with other Yardstick test cases |
+| | and not run as a stand-alone test case. |
+| | |
+| | This test case measures cache usage statistics, including |
+| | cache hit, cache miss, hit ratio, buffer cache size and page |
+| | cache size, with some wokloads runing on the infrastructure. |
+| | Both average and maximun values are collected. |
+| | |
+| | The purpose is also to be able to spot the trends. |
| | Test results, graphs and similar shall be stored for |
| | comparison reasons and product evolution understanding |
| | between different OPNFV versions and/or configurations. |
| | |
+--------------+--------------------------------------------------------------+
-|configuration | File: cachestat.yaml (in the 'samples' directory) |
+|test tool | cachestat |
| | |
-| | * interval: 1 - repeat, pausing every 1 seconds in-between. |
+| | cachestat is a tool using Linux ftrace capabilities for |
+| | showing Linux page cache hit/miss statistics. |
| | |
-+--------------+--------------------------------------------------------------+
-|test tool | cachestat |
+| | (cachestat is not always part of a Linux distribution, hence |
+| | it needs to be installed. As an example see the |
+| | /yardstick/tools/ directory for how to generate a Linux |
+| | image with cachestat included.) |
| | |
-| | cachestat is not always part of a Linux distribution, hence |
-| | it needs to be installed. |
++--------------+--------------------------------------------------------------+
+|test | cachestat test is invoked in a host VM on a compute blade, |
+|description | cachestat test requires some other test cases running in the |
+| | host to stimulate workload. |
| | |
+--------------+--------------------------------------------------------------+
-|references | cachestat_ |
+|configuration | File: cachestat.yaml (in the 'samples' directory) |
| | |
-| | ETSI-NFV-TST001 |
+| | Interval is set 1. Test repeat, pausing every 1 seconds |
+| | in-between. |
+| | Test durarion is set to 60 seconds. |
+| | |
+| | SLA is not available in this test case. |
| | |
+--------------+--------------------------------------------------------------+
|applicability | Test can be configured with different: |
@@ -53,8 +66,16 @@ Yardstick Test Case Description TC004
| | * interval; |
| | * runner Duration. |
| | |
-| | There are default values for each above-mentioned option. |
-| | Run in background with other test cases. |
+| | Default values exist. |
+| | |
++--------------+--------------------------------------------------------------+
+|usability | This test case is one of Yardstick's generic test. Thus it |
+| | is runnable on most of the scenarios. |
+| | |
++--------------+--------------------------------------------------------------+
+|references | cachestat_ |
+| | |
+| | ETSI-NFV-TST001 |
| | |
+--------------+--------------------------------------------------------------+
|pre-test | The test case image needs to be installed into Glance |
@@ -66,12 +87,24 @@ Yardstick Test Case Description TC004
|test sequence | description and expected result |
| | |
+--------------+--------------------------------------------------------------+
-|step 1 | The host is installed as client. The related TC, or TCs, is |
-| | invoked and cachestat logs are produced and stored. |
+|step 1 | A host VM with cachestat installed is booted. |
| | |
-| | Result: logs are stored. |
++--------------+--------------------------------------------------------------+
+|step 2 | Yardstick is connected with the host VM by using ssh. |
+| | 'cache_stat' bash script is copyied from Jump Host to |
+| | the server VM via the ssh tunnel. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 3 | 'cache_stat' script is invoked. Raw cache usage statistics |
+| | are collected and filtrated. Average and maximum values are |
+| | calculated and recorded. Logs are produced and stored. |
+| | |
+| | Result: Logs are stored. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 4 | The host VM is deleted. |
| | |
+--------------+--------------------------------------------------------------+
-|test verdict | None. Cache utilization results are fetched and stored. |
+|test verdict | None. Cache utilization results are collected and stored. |
| | |
+--------------+--------------------------------------------------------------+
diff --git a/docs/userguide/opnfv_yardstick_tc005.rst b/docs/userguide/opnfv_yardstick_tc005.rst
index a181aa9f7..1c2d71d81 100644
--- a/docs/userguide/opnfv_yardstick_tc005.rst
+++ b/docs/userguide/opnfv_yardstick_tc005.rst
@@ -1,4 +1,4 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International
+. This work is licensed under a Creative Commons Attribution 4.0 International
.. License.
.. http://creativecommons.org/licenses/by/4.0
.. (c) OPNFV, Huawei Technologies Co.,Ltd and others.
@@ -7,53 +7,88 @@
Yardstick Test Case Description TC005
*************************************
-.. _fio: http://www.bluestop.org/fio/HOWTO.txt
+.. _fio: http://bluestop.org/files/fio/HOWTO.txt
+-----------------------------------------------------------------------------+
|Storage Performance |
| |
+--------------+--------------------------------------------------------------+
-|test case id | OPNFV_YARDSTICK_TC005_Storage Performance |
+|test case id | OPNFV_YARDSTICK_TC005_STORAGE PERFORMANCE |
| | |
+--------------+--------------------------------------------------------------+
-|metric | IOPS, throughput and latency |
+|metric | IOPS (Average IOs performed per second), |
+| | Throughput (Average disk read/write bandwidth rate), |
+| | Latency (Average disk read/write latency) |
| | |
+--------------+--------------------------------------------------------------+
-|test purpose | To evaluate the IaaS storage performance with regards to |
-| | IOPS, throughput and latency. |
-| | The purpose is also to be able to spot trends. Test results, |
-| | graphs and similar shall be stored for comparison reasons |
-| | and product evolution understanding between different OPNFV |
-| | versions and/or configurations. |
+|test purpose | The purpose of TC005 is to evaluate the IaaS storage |
+| | performance with regards to IOPS, throughput and latency. |
| | |
-+--------------+--------------------------------------------------------------+
-|configuration | file: opnfv_yardstick_tc005.yaml |
-| | |
-| | IO types: read, write, randwrite, randread, rw |
-| | IO block size: 4KB, 64KB, 1024KB, where each |
-| | runs for 30 seconds(10 for ramp time, 20 for runtime). |
-| | |
-| | For SLA minimum read/write iops is set to 100, minimum |
-| | read/write throughput is set to 400 KB/s, and maximum |
-| | read/write latency is set to 20000 usec. |
+| | The purpose is also to be able to spot the trends. |
+| | Test results, graphs and similar shall be stored for |
+| | comparison reasons and product evolution understanding |
+| | between different OPNFV versions and/or configurations. |
| | |
+--------------+--------------------------------------------------------------+
|test tool | fio |
| | |
+| | fio is an I/O tool meant to be used both for benchmark and |
+| | stress/hardware verification. It has support for 19 |
+| | different types of I/O engines (sync, mmap, libaio, |
+| | posixaio, SG v3, splice, null, network, syslet, guasi, |
+| | solarisaio, and more), I/O priorities (for newer Linux |
+| | kernels), rate I/O, forked or threaded jobs, and much more. |
+| | |
| | (fio is not always part of a Linux distribution, hence it |
| | needs to be installed. As an example see the |
| | /yardstick/tools/ directory for how to generate a Linux |
| | image with fio included.) |
| | |
+--------------+--------------------------------------------------------------+
-|references | fio_ |
+|test | fio test is invoked in a host VM on a compute blade, a job |
+|description | file as well as parameters are passed to fio and fio will |
+| | start doing what the job file tells it to do. |
| | |
-| | ETSI-NFV-TST001 |
++--------------+--------------------------------------------------------------+
+|configuration | file: opnfv_yardstick_tc005.yaml |
+| | |
+| | IO types is set to read, write, randwrite, randread, rw. |
+| | IO block size is set to 4KB, 64KB, 1024KB. |
+| | fio is run for each IO type and IO block size scheme, |
+| | each iteration runs for 30 seconds (10 for ramp time, 20 for |
+| | runtime). |
+| | |
+| | For SLA, minimum read/write iops is set to 100, |
+| | minimum read/write throughput is set to 400 KB/s, |
+| | and maximum read/write latency is set to 20000 usec. |
| | |
+--------------+--------------------------------------------------------------+
-|applicability | Test can be configured with different read/write types, IO |
-| | block size, IO depth, ramp time (runtime required for stable |
-| | results) and test duration. Default values exist. |
+|applicability | This test case can be configured with different: |
+| | |
+| | * IO types; |
+| | * IO block size; |
+| | * IO depth; |
+| | * ramp time; |
+| | * test duration. |
+| | |
+| | Default values exist. |
+| | |
+| | SLA is optional. The SLA in this test case serves as an |
+| | example. Considerably higher throughput and lower latency |
+| | are expected. However, to cover most configurations, both |
+| | baremetal and fully virtualized ones, this value should be |
+| | possible to achieve and acceptable for black box testing. |
+| | Many heavy IO applications start to suffer badly if the |
+| | read/write bandwidths are lower than this. |
+| | |
++--------------+--------------------------------------------------------------+
+|usability | This test case is one of Yardstick's generic test. Thus it |
+| | is runnable on most of the scenarios. |
+| | |
++--------------+--------------------------------------------------------------+
+|references | fio_ |
+| | |
+| | ETSI-NFV-TST001 |
| | |
+--------------+--------------------------------------------------------------+
|pre-test | The test case image needs to be installed into Glance |
@@ -65,12 +100,25 @@ Yardstick Test Case Description TC005
|test sequence | description and expected result |
| | |
+--------------+--------------------------------------------------------------+
-|step 1 | The host is installed and fio is invoked and logs are |
-| | produced and stored. |
+|step 1 | A host VM with fio installed is booted. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 2 | Yardstick is connected with the host VM by using ssh. |
+| | 'fio_benchmark' bash script is copyied from Jump Host to |
+| | the host VM via the ssh tunnel. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 3 | 'fio_benchmark' script is invoked. Simulated IO operations |
+| | are started. IOPS, disk read/write bandwidth and latency are |
+| | recorded and checked against the SLA. Logs are produced and |
+| | stored. |
| | |
| | Result: Logs are stored. |
| | |
+--------------+--------------------------------------------------------------+
+|step 4 | The host VM is deleted. |
+| | |
++--------------+--------------------------------------------------------------+
|test verdict | Fails only if SLA is not passed, or if there is a test case |
| | execution problem. |
| | |
diff --git a/docs/userguide/opnfv_yardstick_tc010.rst b/docs/userguide/opnfv_yardstick_tc010.rst
index ab793de76..202307de6 100644
--- a/docs/userguide/opnfv_yardstick_tc010.rst
+++ b/docs/userguide/opnfv_yardstick_tc010.rst
@@ -7,21 +7,71 @@
Yardstick Test Case Description TC010
*************************************
-.. _man-pages: http://manpages.ubuntu.com/manpages/trusty/lat_mem_rd.8.html
+.. _lat_mem_rd: http://manpages.ubuntu.com/manpages/trusty/lat_mem_rd.8.html
+-----------------------------------------------------------------------------+
|Memory Latency |
| |
+--------------+--------------------------------------------------------------+
-|test case id | OPNFV_YARDSTICK_TC010_Memory Latency |
+|test case id | OPNFV_YARDSTICK_TC010_MEMORY LATENCY |
| | |
+--------------+--------------------------------------------------------------+
-|metric | Latency in nanoseconds |
+|metric | Memory read latency (nanoseconds) |
| | |
+--------------+--------------------------------------------------------------+
-|test purpose | Measure the memory read latency for varying memory sizes and |
-| | strides. Whole memory hierarchy is measured including all |
-| | levels of cache. |
+|test purpose | The purpose of TC010 is to evaluate the IaaS compute |
+| | performance with regards to memory read latency. |
+| | It measures the memory read latency for varying memory sizes |
+| | and strides. Whole memory hierarchy is measured. |
+| | |
+| | The purpose is also to be able to spot the trends. |
+| | Test results, graphs and similar shall be stored for |
+| | comparison reasons and product evolution understanding |
+| | between different OPNFV versions and/or configurations. |
+| | |
++--------------+--------------------------------------------------------------+
+|test tool | Lmbench |
+| | |
+| | Lmbench is a suite of operating system microbenchmarks. This |
+| | test uses lat_mem_rd tool from that suite including: |
+| | * Context switching |
+| | * Networking: connection establishment, pipe, TCP, UDP, and |
+| | RPC hot potato |
+| | * File system creates and deletes |
+| | * Process creation |
+| | * Signal handling |
+| | * System call overhead |
+| | * Memory read latency |
+| | |
+| | (LMbench is not always part of a Linux distribution, hence |
+| | it needs to be installed. As an example see the |
+| | /yardstick/tools/ directory for how to generate a Linux |
+| | image with LMbench included.) |
+| | |
++--------------+--------------------------------------------------------------+
+|test | LMbench lat_mem_rd benchmark measures memory read latency |
+|description | for varying memory sizes and strides. |
+| | |
+| | The benchmark runs as two nested loops. The outer loop is |
+| | the stride size. The inner loop is the array size. For each |
+| | array size, the benchmark creates a ring of pointers that |
+| | point backward one stride.Traversing the array is done by: |
+| | |
+| | p = (char **)*p; |
+| | |
+| | in a for loop (the over head of the for loop is not |
+| | significant; the loop is an unrolled loop 100 loads long). |
+| | The size of the array varies from 512 bytes to (typically) |
+| | eight megabytes. For the small sizes, the cache will have an |
+| | effect, and the loads will be much faster. This becomes much |
+| | more apparent when the data is plotted. |
+| | |
+| | Only data accesses are measured; the instruction cache is |
+| | not measured. |
+| | |
+| | The results are reported in nanoseconds per load and have |
+| | been verified accurate to within a few nanoseconds on an SGI |
+| | Indy. |
| | |
+--------------+--------------------------------------------------------------+
|configuration | File: opnfv_yardstick_tc010.yaml |
@@ -33,20 +83,13 @@ Yardstick Test Case Description TC010
| | * Interval: 1 - there is 1 second delay between each |
| | iteration. |
| | |
-+--------------+--------------------------------------------------------------+
-|test tool | Lmbench |
-| | |
-| | Lmbench is a suite of operating system microbenchmarks. This |
-| | test uses lat_mem_rd tool from that suite. |
-| | Lmbench is not always part of a Linux distribution, hence it |
-| | needs to be installed in the test image |
-| | |
-+--------------+--------------------------------------------------------------+
-|references | man-pages_ |
-| | |
-| | McVoy, Larry W.,and Carl Staelin. "lmbench: Portable Tools |
-| | for Performance Analysis." USENIX annual technical |
-| | conference 1996. |
+| | SLA is optional. The SLA in this test case serves as an |
+| | example. Considerably lower read latency is expected. |
+| | However, to cover most configurations, both baremetal and |
+| | fully virtualized ones, this value should be possible to |
+| | achieve and acceptable for black box testing. |
+| | Many heavy IO applications start to suffer badly if the |
+| | read latency is higher than this. |
| | |
+--------------+--------------------------------------------------------------+
|applicability | Test can be configured with different: |
@@ -55,12 +98,21 @@ Yardstick Test Case Description TC010
| | * stop_size; |
| | * iterations and intervals. |
| | |
-| | There are default values for each above-mentioned option. |
+| | Default values exist. |
| | |
| | SLA (optional) : max_latency: The maximum memory latency |
| | that is accepted. |
| | |
+--------------+--------------------------------------------------------------+
+|usability | This test case is one of Yardstick's generic test. Thus it |
+| | is runnable on most of the scenarios. |
+| | |
++--------------+--------------------------------------------------------------+
+|references | LMbench lat_mem_rd_ |
+| | |
+| | ETSI-NFV-TST001 |
+| | |
++--------------+--------------------------------------------------------------+
|pre-test | The test case image needs to be installed into Glance |
|conditions | with Lmbench included in the image. |
| | |
@@ -70,12 +122,32 @@ Yardstick Test Case Description TC010
|test sequence | description and expected result |
| | |
+--------------+--------------------------------------------------------------+
-|step 1 | The host is installed as client. Lmbench's lat_mem_rd tool |
+|step 1 | The host is installed as client. LMbench's lat_mem_rd tool |
| | is invoked and logs are produced and stored. |
| | |
| | Result: logs are stored. |
| | |
+--------------+--------------------------------------------------------------+
+|step 1 | A host VM with LMbench installed is booted. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 2 | Yardstick is connected with the host VM by using ssh. |
+| | 'lmbench_latency_benchmark' bash script is copyied from Jump |
+| | Host to the host VM via the ssh tunnel. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 3 | 'lmbench_latency_benchmark' script is invoked. LMbench's |
+| | lat_mem_rd benchmark starts to measures memory read latency |
+| | for varying memory sizes and strides. Memory read latency |
+| | are recorded and checked against the SLA. Logs are produced |
+| | and stored. |
+| | |
+| | Result: Logs are stored. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 4 | The host VM is deleted. |
+| | |
++--------------+--------------------------------------------------------------+
|test verdict | Test fails if the measured memory latency is above the SLA |
| | value or if there is a test case execution problem. |
| | |
diff --git a/docs/userguide/opnfv_yardstick_tc011.rst b/docs/userguide/opnfv_yardstick_tc011.rst
index cf2fd5055..48bdef497 100644
--- a/docs/userguide/opnfv_yardstick_tc011.rst
+++ b/docs/userguide/opnfv_yardstick_tc011.rst
@@ -13,28 +13,22 @@ Yardstick Test Case Description TC011
|Packet delay variation between VMs |
| |
+--------------+--------------------------------------------------------------+
-|test case id | OPNFV_YARDSTICK_TC011_Packet delay variation between VMs |
+|test case id | OPNFV_YARDSTICK_TC011_PACKET DELAY VARIATION BETWEEN VMs |
| | |
+--------------+--------------------------------------------------------------+
|metric | jitter: packet delay variation (ms) |
| | |
+--------------+--------------------------------------------------------------+
-|test purpose | Measure the packet delay variation sending the packets from |
-| | one VM to the other. |
+|test purpose | The purpose of TC011 is to evaluate the IaaS network |
+| | performance with regards to network jitter (packet delay |
+| | variation). |
+| | It measures the packet delay variation sending the packets |
+| | from one VM to the other. |
| | |
-+--------------+--------------------------------------------------------------+
-|configuration | File: opnfv_yardstick_tc011.yaml |
-| | |
-| | * options: |
-| | protocol: udp # The protocol used by iperf3 tools |
-| | bandwidth: 20m # It will send the given number of packets |
-| | without pausing |
-| | * runner: |
-| | duration: 30 # Total test duration 30 seconds. |
-| | |
-| | * SLA (optional): |
-| | jitter: 10 (ms) # The maximum amount of jitter that is |
-| | accepted. |
+| | The purpose is also to be able to spot the trends. |
+| | Test results, graphs and similar shall be stored for |
+| | comparison reasons and product evolution understanding |
+| | between different OPNFV versions and/or configurations. |
| | |
+--------------+--------------------------------------------------------------+
|test tool | iperf3 |
@@ -46,14 +40,34 @@ Yardstick Test Case Description TC011
| | |
| | (iperf3 is not always part of a Linux distribution, hence it |
| | needs to be installed. It is part of the Yardstick Docker |
-| | image. |
-| | As an example see the /yardstick/tools/ directory for how |
-| | to generate a Linux image with pktgen included.) |
+| | image. As an example see the /yardstick/tools/ directory for |
+| | how to generate a Linux image with pktgen included.) |
| | |
+--------------+--------------------------------------------------------------+
-|references | iperf3_ |
+|test | iperf3 test is invoked between a host VM and a target VM. |
+|description | |
+| | Jitter calculations are continuously computed by the server, |
+| | as specified by RTP in RFC 1889. The client records a 64 bit |
+| | second/microsecond timestamp in the packet. The server |
+| | computes the relative transit time as (server's receive time |
+| | - client's send time). The client's and server's clocks do |
+| | not need to be synchronized; any difference is subtracted |
+| | outin the jitter calculation. Jitter is the smoothed mean of |
+| | differences between consecutive transit times. |
| | |
-| | ETSI-NFV-TST001 |
++--------------+--------------------------------------------------------------+
+|configuration | File: opnfv_yardstick_tc011.yaml |
+| | |
+| | * options: |
+| | protocol: udp # The protocol used by iperf3 tools |
+| | bandwidth: 20m # It will send the given number of packets |
+| | without pausing |
+| | * runner: |
+| | duration: 30 # Total test duration 30 seconds. |
+| | |
+| | * SLA (optional): |
+| | jitter: 10 (ms) # The maximum amount of jitter that is |
+| | accepted. |
| | |
+--------------+--------------------------------------------------------------+
|applicability | Test can be configured with different: |
@@ -67,6 +81,15 @@ Yardstick Test Case Description TC011
| | serves as an example. |
| | |
+--------------+--------------------------------------------------------------+
+|usability | This test case is one of Yardstick's generic test. Thus it |
+| | is runnable on most of the scenarios. |
+| | |
++--------------+--------------------------------------------------------------+
+|references | iperf3_ |
+| | |
+| | ETSI-NFV-TST001 |
+| | |
++--------------+--------------------------------------------------------------+
|pre-test | The test case image needs to be installed into Glance |
|conditions | with iperf3 included in the image. |
| | |
@@ -76,12 +99,24 @@ Yardstick Test Case Description TC011
|test sequence | description and expected result |
| | |
+--------------+--------------------------------------------------------------+
-|step 1 | The hosts are installed, as server and client. iperf3 is |
-| | invoked and logs are produced and stored. |
+|step 1 | Two host VMs with iperf3 installed are booted, as server and |
+| | client. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 2 | Yardstick is connected with the host VM by using ssh. |
+| | A iperf3 server is started on the server VM via the ssh |
+| | tunnel. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 3 | iperf3 benchmark is invoked. Jitter is calculated and check |
+| | against the SLA. Logs are produced and stored. |
| | |
| | Result: Logs are stored. |
| | |
+--------------+--------------------------------------------------------------+
+|step 4 | The host VMs are deleted. |
+| | |
++--------------+--------------------------------------------------------------+
|test verdict | Test should not PASS if any jitter is above the optional SLA |
| | value, or if there is a test case execution problem. |
| | |
diff --git a/docs/userguide/opnfv_yardstick_tc012.rst b/docs/userguide/opnfv_yardstick_tc012.rst
index ffce06eb9..b56e829f5 100644
--- a/docs/userguide/opnfv_yardstick_tc012.rst
+++ b/docs/userguide/opnfv_yardstick_tc012.rst
@@ -7,29 +7,60 @@
Yardstick Test Case Description TC012
*************************************
-.. _man-pages: http://manpages.ubuntu.com/manpages/trusty/bw_mem.8.html
+.. _bw_mem: http://manpages.ubuntu.com/manpages/trusty/bw_mem.8.html
+-----------------------------------------------------------------------------+
|Memory Bandwidth |
| |
+--------------+--------------------------------------------------------------+
-|test case id | OPNFV_YARDSTICK_TC012_Memory Bandwidth |
+|test case id | OPNFV_YARDSTICK_TC012_MEMORY BANDWIDTH |
| | |
+--------------+--------------------------------------------------------------+
-|metric | Megabyte per second (MBps) |
+|metric | Memory read/write bandwidth (MBps) |
| | |
+--------------+--------------------------------------------------------------+
-|test purpose | Measure the rate at which data can be read from and written |
-| | to the memory (this includes all levels of memory). |
+|test purpose | The purpose of TC012 is to evaluate the IaaS compute |
+| | performance with regards to memory throughput. |
+| | It measures the rate at which data can be read from and |
+| | written to the memory (this includes all levels of memory). |
+| | |
+| | The purpose is also to be able to spot the trends. |
+| | Test results, graphs and similar shall be stored for |
+| | comparison reasons and product evolution understanding |
+| | between different OPNFV versions and/or configurations. |
+| | |
++--------------+--------------------------------------------------------------+
+|test tool | LMbench |
+| | |
+| | LMbench is a suite of operating system microbenchmarks. |
+| | This test uses bw_mem tool from that suite including: |
+| | * Cached file read |
+| | * Memory copy (bcopy) |
+| | * Memory read |
+| | * Memory write |
+| | * Pipe |
+| | * TCP |
+| | |
+| | (LMbench is not always part of a Linux distribution, hence |
+| | it needs to be installed. As an example see the |
+| | /yardstick/tools/ directory for how to generate a Linux |
+| | image with LMbench included.) |
+| | |
++--------------+--------------------------------------------------------------+
+|test | LMbench bw_mem benchmark allocates twice the specified |
+|description | amount of memory, zeros it, and then times the copying of |
+| | the first half to the second half. The benchmark is invoked |
+| | in a host VM on a compute blade. Results are reported in |
+| | megabytes moved per second. |
| | |
+--------------+--------------------------------------------------------------+
|configuration | File: opnfv_yardstick_tc012.yaml |
| | |
| | * SLA (optional): 15000 (MBps) min_bw: The minimum amount of |
| | memory bandwidth that is accepted. |
-| | * Size: 10 240 kB - test allocates twice that size (20 480kB)|
-| | zeros it and then measures the time it takes to copy from |
-| | one side to another. |
+| | * Size: 10 240 kB - test allocates twice that size |
+| | (20 480kB) zeros it and then measures the time it takes to |
+| | copy from one side to another. |
| | * Benchmark: rdwr - measures the time to read data into |
| | memory and then write data to the same location. |
| | * Warmup: 0 - the number of iterations to perform before |
@@ -38,20 +69,13 @@ Yardstick Test Case Description TC012
| | * Interval: 1 - there is 1 second delay between each |
| | iteration. |
| | |
-+--------------+--------------------------------------------------------------+
-|test tool | Lmbench |
-| | |
-| | Lmbench is a suite of operating system microbenchmarks. This |
-| | test uses bw_mem tool from that suite. |
-| | Lmbench is not always part of a Linux distribution, hence it |
-| | needs to be installed in the test image. |
-| | |
-+--------------+--------------------------------------------------------------+
-|references | man-pages_ |
-| | |
-| | McVoy, Larry W., and Carl Staelin. "lmbench: Portable Tools |
-| | for Performance Analysis." USENIX annual technical |
-| | conference. 1996. |
+| | SLA is optional. The SLA in this test case serves as an |
+| | example. Considerably higher bandwidth is expected. |
+| | However, to cover most configurations, both baremetal and |
+| | fully virtualized ones, this value should be possible to |
+| | achieve and acceptable for black box testing. |
+| | Many heavy IO applications start to suffer badly if the |
+| | read/write bandwidths are lower than this. |
| | |
+--------------+--------------------------------------------------------------+
|applicability | Test can be configured with different: |
@@ -62,7 +86,19 @@ Yardstick Test Case Description TC012
| | * number of warmup iterations; |
| | * iterations and intervals. |
| | |
-| | There are default values for each above-mentioned option. |
+| | Default values exist. |
+| | |
+| | SLA (optional) : min_bandwidth: The minimun memory bandwidth |
+| | that is accepted. |
+| | |
++--------------+--------------------------------------------------------------+
+|usability | This test case is one of Yardstick's generic test. Thus it |
+| | is runnable on most of the scenarios. |
+| | |
++--------------+--------------------------------------------------------------+
+|references | LMbench bw_mem_ |
+| | |
+| | ETSI-NFV-TST001 |
| | |
+--------------+--------------------------------------------------------------+
|pre-test | The test case image needs to be installed into Glance |
@@ -74,10 +110,23 @@ Yardstick Test Case Description TC012
|test sequence | description and expected result |
| | |
+--------------+--------------------------------------------------------------+
-|step 1 | The host is installed as client. Lmbench's bw_mem tool is |
-| | invoked and logs are produced and stored. |
+|step 1 | A host VM with LMbench installed is booted. |
| | |
-| | Result: logs are stored. |
++--------------+--------------------------------------------------------------+
+|step 2 | Yardstick is connected with the host VM by using ssh. |
+| | "lmbench_bandwidth_benchmark" bash script is copied from |
+| | Jump Host to the host VM via ssh tunnel. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 3 | 'lmbench_bandwidth_benchmark' script is invoked. LMbench's |
+| | bw_mem benchmark starts to measures memory read/write |
+| | bandwidth. Memory read/write bandwidth results are recorded |
+| | and checked against the SLA. Logs are produced and stored. |
+| | |
+| | Result: Logs are stored. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 4 | The host VM is deleted. |
| | |
+--------------+--------------------------------------------------------------+
|test verdict | Test fails if the measured memory bandwidth is below the SLA |
diff --git a/docs/userguide/opnfv_yardstick_tc014.rst b/docs/userguide/opnfv_yardstick_tc014.rst
index 27d390ac6..1b0d7831a 100644
--- a/docs/userguide/opnfv_yardstick_tc014.rst
+++ b/docs/userguide/opnfv_yardstick_tc014.rst
@@ -13,18 +13,51 @@ Yardstick Test Case Description TC014
|Processing speed |
| |
+--------------+--------------------------------------------------------------+
-|test case id | OPNFV_YARDSTICK_TC014_Processing speed |
+|test case id | OPNFV_YARDSTICK_TC014_PROCESSING SPEED |
| | |
+--------------+--------------------------------------------------------------+
-|metric | score of single cpu running, score of parallel running |
+|metric | score of single cpu running, |
+| | score of parallel running |
| | |
+--------------+--------------------------------------------------------------+
-|test purpose | To evaluate the IaaS processing speed with regards to score |
-| | of single cpu running and parallel running |
-| | The purpose is also to be able to spot trends. Test results, |
-| | graphs and similar shall be stored for comparison reasons |
-| | and product evolution understanding between different OPNFV |
-| | versions and/or configurations. |
+|test purpose | The purpose of TC014 is to evaluate the IaaS compute |
+| | performance with regards to CPU processing speed. |
+| | It measures score of single cpu running and parallel |
+| | running. |
+| | |
+| | The purpose is also to be able to spot the trends. |
+| | Test results, graphs and similar shall be stored for |
+| | comparison reasons and product evolution understanding |
+| | between different OPNFV versions and/or configurations. |
+| | |
++--------------+--------------------------------------------------------------+
+|test tool | UnixBench |
+| | |
+| | Unixbench is the most used CPU benchmarking software tool. |
+| | It can measure the performance of bash scripts, CPUs in |
+| | multithreading and single threading. It can also measure the |
+| | performance for parallel taks. Also, specific disk IO for |
+| | small and large files are performed. You can use it to |
+| | measure either linux dedicated servers and linux vps |
+| | servers, running CentOS, Debian, Ubuntu, Fedora and other |
+| | distros. |
+| | |
+| | (UnixBench is not always part of a Linux distribution, hence |
+| | it needs to be installed. As an example see the |
+| | /yardstick/tools/ directory for how to generate a Linux |
+| | image with UnixBench included.) |
+| | |
++--------------+--------------------------------------------------------------+
+|test | The UnixBench runs system benchmarks in a host VM on a |
+|description | compute blade, getting information on the CPUs in the |
+| | system. If the system has more than one CPU, the tests will |
+| | be run twice -- once with a single copy of each test running |
+| | at once, and once with N copies, where N is the number of |
+| | CPUs. |
+| | |
+| | UnixBench will processs a set of results from a single test |
+| | by averaging the individal pass results into a single final |
+| | value. |
| | |
+--------------+--------------------------------------------------------------+
|configuration | file: opnfv_yardstick_tc014.yaml |
@@ -33,15 +66,23 @@ Yardstick Test Case Description TC014
| | test_type: dhry2reg, whetstone and so on |
| | |
| | For SLA with single_score and parallel_score, both can be |
-| | set by user, default is NA |
+| | set by user, default is NA. |
| | |
+--------------+--------------------------------------------------------------+
-|test tool | unixbench |
+|applicability | Test can be configured with different: |
| | |
-| | (unixbench is not always part of a Linux distribution, hence |
-| | it needs to be installed. As an example see the |
-| | /yardstick/tools/ directory for how to generate a Linux |
-| | image with unixbench included.) |
+| | * test types; |
+| | * dhry2reg; |
+| | * whetstone. |
+| | |
+| | Default values exist. |
+| | |
+| | SLA (optional) : min_score: The minimun UnixBench score that |
+| | is accepted. |
+| | |
++--------------+--------------------------------------------------------------+
+|usability | This test case is one of Yardstick's generic test. Thus it |
+| | is runnable on most of the scenarios. |
| | |
+--------------+--------------------------------------------------------------+
|references | unixbench_ |
@@ -49,10 +90,6 @@ Yardstick Test Case Description TC014
| | ETSI-NFV-TST001 |
| | |
+--------------+--------------------------------------------------------------+
-|applicability | Test can be configured with different test types, dhry2reg, |
-| | whetstone and so on. |
-| | |
-+--------------+--------------------------------------------------------------+
|pre-test | The test case image needs to be installed into Glance |
|conditions | with unixbench included in it. |
| | |
@@ -62,12 +99,27 @@ Yardstick Test Case Description TC014
|test sequence | description and expected result |
| | |
+--------------+--------------------------------------------------------------+
-|step 1 | The hosts are installed, as a client. unixbench is |
-| | invoked and logs are produced and stored. |
+|step 1 | A host VM with UnixBench installed is booted. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 2 | Yardstick is connected with the host VM by using ssh. |
+| | "unixbench_benchmark" bash script is copied from Jump Host |
+| | to the host VM via ssh tunnel. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 3 | UnixBench is invoked. All the tests are executed using the |
+| | "Run" script in the top-level of UnixBench directory. |
+| | The "Run" script will run a standard "index" test, and save |
+| | the report in the "results" directory. Then the report is |
+| | processed by "unixbench_benchmark" and checked againsted the |
+| | SLA. |
| | |
| | Result: Logs are stored. |
| | |
+--------------+--------------------------------------------------------------+
+|step 4 | The host VM is deleted. |
+| | |
++--------------+--------------------------------------------------------------+
|test verdict | Fails only if SLA is not passed, or if there is a test case |
| | execution problem. |
| | |
diff --git a/docs/userguide/opnfv_yardstick_tc037.rst b/docs/userguide/opnfv_yardstick_tc037.rst
index 3ed1fa529..5a6e1eaae 100644
--- a/docs/userguide/opnfv_yardstick_tc037.rst
+++ b/docs/userguide/opnfv_yardstick_tc037.rst
@@ -7,84 +7,128 @@
Yardstick Test Case Description TC037
*************************************
-.. _cirros: https://download.cirros-cloud.net
+.. _cirros-image: https://download.cirros-cloud.net
+.. _Ping: https://linux.die.net/man/8/ping
.. _pktgen: https://www.kernel.org/doc/Documentation/networking/pktgen.txt
+.. _mpstat: http://www.linuxcommand.org/man_pages/mpstat1.html
+-----------------------------------------------------------------------------+
|Latency, CPU Load, Throughput, Packet Loss |
| |
+--------------+--------------------------------------------------------------+
-|test case id | OPNFV_YARDSTICK_TC037_Latency,CPU Load,Throughput,Packet Loss|
+|test case id | OPNFV_YARDSTICK_TC037_LATENCY,CPU LOAD,THROUGHPUT, |
+| | PACKET LOSS |
| | |
+--------------+--------------------------------------------------------------+
-|metric | Number of flows, latency, throughput, CPU load, packet loss |
+|metric | Number of flows, latency, throughput, packet loss |
+| | CPU utilization percentage, CPU interrupt per second |
| | |
+--------------+--------------------------------------------------------------+
-|test purpose | To evaluate the IaaS network performance with regards to |
-| | flows and throughput, such as if and how different amounts |
-| | of flows matter for the throughput between hosts on different|
-| | compute blades. Typically e.g. the performance of a vSwitch |
-| | depends on the number of flows running through it. Also |
-| | performance of other equipment or entities can depend |
-| | on the number of flows or the packet sizes used. |
-| | The purpose is also to be able to spot trends. Test results, |
-| | graphs ans similar shall be stored for comparison reasons and|
-| | product evolution understanding between different OPNFV |
-| | versions and/or configurations. |
+|test purpose | The purpose of TC037 is to evaluate the IaaS compute |
+| | capacity and network performance with regards to CPU |
+| | utilization, packet flows and network throughput, such as if |
+| | and how different amounts of flows matter for the throughput |
+| | between hosts on different compute blades, and the CPU load |
+| | variation. |
+| | |
+| | Typically e.g. the performance of a vSwitch depends on the |
+| | number of flows running through it. Also performance of |
+| | other equipment or entities can depend on the number of |
+| | flows or the packet sizes used |
+| | |
+| | The purpose is also to be able to spot the trends. |
+| | Test results, graphs and similar shall be stored for |
+| | comparison reasons and product evolution understanding |
+| | between different OPNFV versions and/or configurations. |
+| | |
++--------------+--------------------------------------------------------------+
+|test tool | Ping, Pktgen, mpstat |
+| | |
+| | Ping is a computer network administration software utility |
+| | used to test the reachability of a host on an Internet |
+| | Protocol (IP) network. It measures the round-trip time for |
+| | packet sent from the originating host to a destination |
+| | computer that are echoed back to the source. |
+| | |
+| | Linux packet generator is a tool to generate packets at very |
+| | high speed in the kernel. pktgen is mainly used to drive and |
+| | LAN equipment test network. pktgen supports multi threading. |
+| | To generate random MAC address, IP address, port number UDP |
+| | packets, pktgen uses multiple CPU processors in the |
+| | different PCI bus (PCI, PCIe bus) with Gigabit Ethernet |
+| | tested (pktgen performance depends on the CPU processing |
+| | speed, memory delay, PCI bus speed hardware parameters), |
+| | Transmit data rate can be even larger than 10GBit/s. Visible |
+| | can satisfy most card test requirements. |
+| | |
+| | The mpstat command writes to standard output activities for |
+| | each available processor, processor 0 being the first one. |
+| | Global average activities among all processors are also |
+| | reported. The mpstat command can be used both on SMP and UP |
+| | machines, but in the latter, only global average activities |
+| | will be printed. |
+| | |
+| | (Ping is normally part of any Linux distribution, hence it |
+| | doesn't need to be installed. It is also part of the |
+| | Yardstick Docker image. |
+| | For example also a Cirros image can be downloaded from |
+| | cirros-image_, it includes ping. |
+| | |
+| | Pktgen and mpstat are not always part of a Linux |
+| | distribution, hence it needs to be installed. It is part of |
+| | the Yardstick Docker image. |
+| | As an example see the /yardstick/tools/ directory for how |
+| | to generate a Linux image with pktgen and mpstat included.) |
+| | |
++--------------+--------------------------------------------------------------+
+|test | This test case uses Pktgen to generate packet flow between |
+|description | two hosts for simulating network workloads on the SUT. |
+| | Ping packets (ICMP protocol's mandatory ECHO_REQUEST |
+| | datagram) are sent from a host VM to the target VM(s) to |
+| | elicit ICMP ECHO_RESPONSE, meanwhile CPU activities are |
+| | monitored by mpstat. |
| | |
+--------------+--------------------------------------------------------------+
|configuration | file: opnfv_yardstick_tc037.yaml |
| | |
-| | Packet size: 64 bytes |
+| | Packet size is set to 64 bytes. |
| | Number of ports: 1, 10, 50, 100, 300, 500, 750 and 1000. |
| | The amount configured ports map from 2 up to 1001000 flows, |
| | respectively. Each port amount is run two times, for 20 |
| | seconds each. Then the next port_amount is run, and so on. |
| | During the test CPU load on both client and server, and the |
| | network latency between the client and server are measured. |
-| | The client and server are distributed on different HW. |
+| | The client and server are distributed on different hardware. |
+| | mpstat monitoring interval is set to 1 second. |
+| | ping packet size is set to 100 bytes. |
| | For SLA max_ppm is set to 1000. |
| | |
+--------------+--------------------------------------------------------------+
-|test tool | pktgen |
+|applicability | Test can be configured with different: |
| | |
-| | (Pktgen is not always part of a Linux distribution, hence it |
-| | needs to be installed. It is part of the Yardstick Glance |
-| | image. |
-| | As an example see the /yardstick/tools/ directory for how |
-| | to generate a Linux image with pktgen included.) |
-| | |
-| | ping |
-| | |
-| | Ping is normally part of any Linux distribution, hence it |
-| | doesn't need to be installed. It is also part of the |
-| | Yardstick Glance image. |
-| | (For example also a cirros_ image can be downloaded, it |
-| | includes ping) |
+| | * pktgen packet sizes; |
+| | * amount of flows; |
+| | * test duration; |
+| | * ping packet size; |
+| | * mpstat monitor interval. |
| | |
-| | mpstat |
+| | Default values exist. |
| | |
-| | (Mpstat is not always part of a Linux distribution, hence it |
-| | needs to be installed. It is part of the Yardstick Glance |
-| | image. |
+| | SLA (optional): max_ppm: The number of packets per million |
+| | packets sent that are acceptable to loose, not received. |
| | |
+--------------+--------------------------------------------------------------+
-|references | Ping and Mpstat man pages |
+|references | Ping_ |
+| | |
+| | mpstat_ |
| | |
| | pktgen_ |
| | |
| | ETSI-NFV-TST001 |
| | |
+--------------+--------------------------------------------------------------+
-|applicability | Test can be configured with different packet sizes, amount |
-| | of flows and test duration. Default values exist. |
-| | |
-| | SLA (optional): max_ppm: The number of packets per million |
-| | packets sent that are acceptable to loose, not received. |
-| | |
-+--------------+--------------------------------------------------------------+
|pre-test | The test case image needs to be installed into Glance |
-|conditions | with pktgen included in it. |
+|conditions | with pktgen, mpstat included in it. |
| | |
| | No POD specific requirements have been identified. |
| | |
@@ -92,12 +136,31 @@ Yardstick Test Case Description TC037
|test sequence | description and expected result |
| | |
+--------------+--------------------------------------------------------------+
-|step 1 | The hosts are installed, as server and client. pktgen is |
-| | invoked and logs are produced and stored. |
+|step 1 | Two host VMs are booted, as server and client. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 2 | Yardstick is connected with the server VM by using ssh. |
+| | 'pktgen_benchmark', "ping_benchmark" bash script are copyied |
+| | from Jump Host to the server VM via the ssh tunnel. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 3 | An IP table is setup on server to monitor for received |
+| | packets. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 4 | pktgen is invoked to generate packet flow between two server |
+| | and client for simulating network workloads on the SUT. Ping |
+| | is invoked. Ping packets are sent from server VM to client |
+| | VM. mpstat is invoked, recording activities for each |
+| | available processor. Results are processed and checked |
+| | against the SLA. Logs are produced and stored. |
| | |
| | Result: Logs are stored. |
| | |
+--------------+--------------------------------------------------------------+
+|step 5 | Two host VMs are deleted. |
+| | |
++--------------+--------------------------------------------------------------+
|test verdict | Fails only if SLA is not passed, or if there is a test case |
| | execution problem. |
| | |
diff --git a/docs/userguide/opnfv_yardstick_tc043.rst b/docs/userguide/opnfv_yardstick_tc043.rst
index 59d7c6993..a873696dc 100644
--- a/docs/userguide/opnfv_yardstick_tc043.rst
+++ b/docs/userguide/opnfv_yardstick_tc043.rst
@@ -8,21 +8,40 @@ Yardstick Test Case Description TC043
*************************************
.. _cirros-image: https://download.cirros-cloud.net
+.. _Ping: https://linux.die.net/man/8/ping
+-----------------------------------------------------------------------------+
|Network Latency Between NFVI Nodes |
| |
+--------------+--------------------------------------------------------------+
-|test case id | OPNFV_YARDSTICK_TC043_Latency_between_NFVI_nodes |
-| | measurements |
+|test case id | OPNFV_YARDSTICK_TC043_LATENCY_BETWEEN_NFVI_NODES |
| | |
+--------------+--------------------------------------------------------------+
-|metric | RTT, Round Trip Time |
+|metric | RTT (Round Trip Time) |
| | |
+--------------+--------------------------------------------------------------+
-|test purpose | To do a basic verification that network latency is within |
-| | acceptable boundaries when packets travel between different |
-| | nodes. |
+|test purpose | The purpose of TC043 is to do a basic verification that |
+| | network latency is within acceptable boundaries when packets |
+| | travel between different NFVI nodes. |
+| | |
+| | The purpose is also to be able to spot the trends. |
+| | Test results, graphs and similar shall be stored for |
+| | comparison reasons and product evolution understanding |
+| | between different OPNFV versions and/or configurations. |
+| | |
++--------------+--------------------------------------------------------------+
+|test tool | ping |
+| | |
+| | Ping is a computer network administration software utility |
+| | used to test the reachability of a host on an Internet |
+| | Protocol (IP) network. It measures the round-trip time for |
+| | packet sent from the originating host to a destination |
+| | computer that are echoed back to the source. |
+| | |
++--------------+--------------------------------------------------------------+
+|test topology | Ping packets (ICMP protocol's mandatory ECHO_REQUEST |
+| | datagram) are sent from host node to target node to elicit |
+| | ICMP ECHO_RESPONSE. |
| | |
+--------------+--------------------------------------------------------------+
|configuration | file: opnfv_yardstick_tc043.yaml |
@@ -31,32 +50,33 @@ Yardstick Test Case Description TC043
| | One ping each 10 seconds. SLA RTT is set to maximum 10 ms. |
| | |
+--------------+--------------------------------------------------------------+
-|test tool | ping |
-| | |
-| | Ping is normally part of any Linux distribution, hence it |
-| | doesn't need to be installed. It is also part of the |
-| | Yardstick Docker image. |
+|applicability | This test case can be configured with different: |
| | |
-+--------------+--------------------------------------------------------------+
-|references | Ping man page |
+| | * packet sizes; |
+| | * burst sizes; |
+| | * ping intervals; |
+| | * test durations; |
+| | * test iterations. |
| | |
-| | ETSI-NFV-TST001 |
+| | Default values exist. |
| | |
-+--------------+--------------------------------------------------------------+
-|applicability | Test case can be configured with different packet sizes, |
-| | burst sizes, ping intervals and test duration. |
| | SLA is optional. The SLA in this test case serves as an |
-| | example. Considerably lower RTT is expected, and |
-| | also normal to achieve in balanced L2 environments. However, |
-| | to cover most configurations, both bare metal and fully |
-| | virtualized ones, this value should be possible to achieve |
-| | and acceptable for black box testing. Many real time |
+| | example. Considerably lower RTT is expected, and also normal |
+| | to achieve in balanced L2 environments. However, to cover |
+| | most configurations, both bare metal and fully virtualized |
+| | ones, this value should be possible to achieve and |
+| | acceptable for black box testing. Many real time |
| | applications start to suffer badly if the RTT time is higher |
| | than this. Some may suffer bad also close to this RTT, while |
| | others may not suffer at all. It is a compromise that may |
| | have to be tuned for different configuration purposes. |
| | |
+--------------+--------------------------------------------------------------+
+|references | Ping_ |
+| | |
+| | ETSI-NFV-TST001 |
+| | |
++--------------+--------------------------------------------------------------+
|pre_test | Each pod node must have ping included in it. |
|conditions | |
| | |
@@ -64,8 +84,14 @@ Yardstick Test Case Description TC043
|test sequence | description and expected result |
| | |
+--------------+--------------------------------------------------------------+
-|step 1 | The pod is available. Two nodes as server and client. |
-| | Ping is invoked and logs are produced and stored. |
+|step 1 | Yardstick is connected with the NFVI node by using ssh. |
+| | 'ping_benchmark' bash script is copyied from Jump Host to |
+| | the NFVI node via the ssh tunnel. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 2 | Ping is invoked. Ping packets are sent from server node to |
+| | client node. RTT results are calculated and checked against |
+| | the SLA. Logs are produced and stored. |
| | |
| | Result: Logs are stored. |
| | |
diff --git a/etc/yardstick/yardstick.conf.sample b/etc/yardstick/yardstick.conf.sample
index f4eff05d3..70cf71ade 100644
--- a/etc/yardstick/yardstick.conf.sample
+++ b/etc/yardstick/yardstick.conf.sample
@@ -8,22 +8,21 @@
##############################################################################
[DEFAULT]
-# verbose = True
-# debug = True
-# dispatcher = http
+debug = False
+dispatcher = http
[dispatcher_http]
-# timeout = 5
-# target = http://127.0.0.1:8000/results
+timeout = 5
+target = http://127.0.0.1:8000/results
[dispatcher_file]
-# file_path = /tmp/yardstick.out
-# max_bytes = 0
-# backup_count = 0
+file_path = /tmp/yardstick.out
+max_bytes = 0
+backup_count = 0
[dispatcher_influxdb]
-# timeout = 5
-# target = http://127.0.0.1:8086
-# db_name = yardstick
-# username = root
-# password = root
+timeout = 5
+target = http://127.0.0.1:8086
+db_name = yardstick
+username = root
+password = root
diff --git a/fuel-plugin/deployment_scripts/install.sh b/fuel-plugin/deployment_scripts/install.sh
index 6882f0be2..18f4fc2c2 100755
--- a/fuel-plugin/deployment_scripts/install.sh
+++ b/fuel-plugin/deployment_scripts/install.sh
@@ -27,4 +27,7 @@ cd $BIN_HOME
curl http://$HOST:8080/plugins/fuel-plugin-yardstick-1.0/repositories/ubuntu/yardstick.tar.gz | tar xzvf -
-python setup.py develop
+# install dependency
+pip install -r requirements.txt
+
+python setup.py install
diff --git a/fuel-plugin/deployment_scripts/puppet/manifests/yardstick-install.pp b/fuel-plugin/deployment_scripts/puppet/manifests/yardstick-install.pp
index 82dfff387..e69371141 100644
--- a/fuel-plugin/deployment_scripts/puppet/manifests/yardstick-install.pp
+++ b/fuel-plugin/deployment_scripts/puppet/manifests/yardstick-install.pp
@@ -7,7 +7,7 @@ $admin_user = $access_hash['user']
$admin_password = $access_hash['password']
$region = hiera('region', 'RegionOne')
-$auth_api_version = 'v2.0'
+$auth_api_version = ''
$service_endpoint = hiera('service_endpoint', $management_vip)
$ssl_hash = hiera_hash('use_ssl', {})
$internal_auth_protocol = get_ssl_property($ssl_hash, {}, 'keystone', 'internal', 'protocol', 'http')
diff --git a/plugin/storperf.yaml b/plugin/storperf.yaml
index d08e26eb6..074a82067 100644
--- a/plugin/storperf.yaml
+++ b/plugin/storperf.yaml
@@ -10,4 +10,5 @@ plugins:
deployment:
ip: 192.168.23.2
user: root
+ # Remove 'password' if log into deployment location using key file
password: root
diff --git a/requirements.txt b/requirements.txt
index 6b4edf3f0..9c037ed79 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -84,3 +84,4 @@ pyroute2==0.4.10
docker-py==1.10.6
flasgger==0.5.13
flask-restful-swagger==0.19
+SQLAlchemy==1.1.4
diff --git a/samples/ping_load.yaml b/samples/ping_load.yaml
new file mode 100644
index 000000000..370916822
--- /dev/null
+++ b/samples/ping_load.yaml
@@ -0,0 +1,65 @@
+---
+# Sample benchmark task config file
+# Three scenarios run in parallel pinging one target vm.
+# Multiple context are used to specify the host and target VMs.
+
+schema: "yardstick:task:0.1"
+run_in_parallel: true
+
+scenarios:
+{% for host in ['athena.demo1', 'apollo.demo1', 'kratos.demo1'] %}
+-
+ type: Ping
+ options:
+ packetsize: 100
+ host: {{host}}
+ target: hades.demo2
+ runner:
+ type: Duration
+ duration: 60
+ interval: 1
+ sla:
+ max_rtt: 10
+ action: assert
+{% endfor %}
+
+contexts:
+-
+ name: demo1
+ image: cirros-0.3.3
+ flavor: yardstick-flavor
+ user: cirros
+
+ placement_groups:
+ pgrp1:
+ policy: "availability"
+
+ servers:
+ athena:
+ floating_ip: true
+ placement: "pgrp1"
+ apollo:
+ floating_ip: true
+ placement: "pgrp1"
+ kratos:
+ floating_ip: true
+ placement: "pgrp1"
+
+ networks:
+ test:
+ cidr: '10.0.1.0/24'
+-
+ name: demo2
+ image: cirros-0.3.3
+ flavor: yardstick-flavor
+ user: cirros
+ placement_groups:
+ pgrp1:
+ policy: "availability"
+ servers:
+ hades:
+ floating_ip: true
+ placement: "pgrp1"
+ networks:
+ test:
+ cidr: '10.0.1.0/24'
diff --git a/samples/tosca.yaml b/samples/tosca.yaml
index 4472f7ef8..21c789133 100644
--- a/samples/tosca.yaml
+++ b/samples/tosca.yaml
@@ -5,145 +5,147 @@ import:
metadata:
- ID:clearwater
- Vendor:HP
+ ID: clearwater
+ Vendor: HP
dsl_definitions:
- compute_props_host_ellis:&compute_props_host_ellis
- num_cpu:4
- mem_size:4096
- compute_props_host_bono:&compute_props_host_bono
- num_cpu:3
- mem_size:2048
+ compute_props_host_ellis: &compute_props_host_ellis
+ num_cpu: 4
+ mem_size: 4096
+ compute_props_host_bono: &compute_props_host_bono
+ num_cpu: 3
+ mem_size: 2048
node_types:
- tosca.nodes.compute.ellis:
- derived_from:tosca.nodes.compute
+ tosca.nodes.compute.ellis:
+ derived_from: tosca.nodes.compute
- tosca.nodes.compute.bono:
- derived_from:tosca.nodes.compute
+ tosca.nodes.compute.bono:
+ derived_from: tosca.nodes.compute
topology_template:
- # a description of the topology template
- description:>
- Vdus used in a vnfd
- inputs:
- storage_size:
- type:scalar-unit.size
- default:2048
- description:The required storage resource
- storage_location:
- type:string
- description:>
- Block storage mount point (filesystem path).
- node_templates:
+ # A description of the topology template
+ description: >
+ Vdus used in a vnfd
+ inputs:
+ storage_size:
+ type: scalar-unit.size
+ default: 2048
+ description: The required storage resource
+ default: 3000
+ description: The required storage resource
+ storage_location:
+ type: string
+ description: >
+ Block storage mount point (filesystem path).
+ node_templates:
ellis:
- type:tosca.nodes.Compute
- capabilities:
- os:
- properties:
- architecture:
- type:
- distribution:
- version:
- host:
- properties:*compute_props_host_ellis
- scalable:
- properties:
- min_instances:1
- default_instances:1
- requirements:
- - local_storage:
- node:ellis_BlockStorage
- relationship:
- type:AttachesTo
- properties:
- location:{ get_input:storage_location }
- interfaces:
- Standard:
- start:
- implementation:start.sh
- delete:
- implementaion:stop.sh
- stop:
- implementaion:shutdown.sh
+ type: tosca.nodes.Compute
+ capabilities:
+ os:
+ properties:
+ architecture:
+ type:
+ distribution:
+ version:
+ host:
+ properties: *compute_props_host_ellis
+ scalable:
+ properties:
+ min_instances: 1
+ default_instances: 1
+ requirements:
+ - local_storage:
+ node: ellis_BlockStorage
+ relationship:
+ type: AttachesTo
+ properties:
+ location: { get_input:storage_location }
+ interfaces:
+ Standard:
+ start:
+ implementation: start.sh
+ delete:
+ implementaion: stop.sh
+ stop:
+ implementaion: shutdown.sh
ellis_BlockStorage:
- type:tosca.nodes.BlockStorage
- properties:
- size:{ get_input:storage_size }
+ type: tosca.nodes.BlockStorage
+ properties:
+ size: { get_input:storage_size }
bono:
- type:tosca.nodes.Compute
- capabilities:
- os:
- properties:
- architecture:
- type:
- distribution:
- version:
- host:
- properties:*compute_props_host_bono
- scalable:
- properties:
- min_instances:3
- default_instances:3
- requirements:
- - local_storage:
- node:bono_BlockStorage
- relationship:
- type:AttachesTo
- properties:
- location:{ get_input:storage_location }
- interfaces:
- Standard:
- start:
- implementation:start.sh
- delete:
- implementaion:stop.sh
- stop:
- implementaion:shutdown.sh
+ type: tosca.nodes.Compute
+ capabilities:
+ os:
+ properties:
+ architecture:
+ type:
+ distribution:
+ version:
+ host:
+ properties: *compute_props_host_bono
+ scalable:
+ properties:
+ min_instances: 3
+ default_instances: 3
+ requirements:
+ - local_storage:
+ node: bono_BlockStorage
+ relationship:
+ type: AttachesTo
+ properties:
+ location: { get_input:storage_location }
+ interfaces:
+ Standard:
+ start:
+ implementation: start.sh
+ delete:
+ implementaion: stop.sh
+ stop:
+ implementaion: shutdown.sh
bono_BlockStorage:
- type:tosca.nodes.BlockStorage
- properties:
- size:{ get_input:storage_size }
+ type: tosca.nodes.BlockStorage
+ properties:
+ size: { get_input:storage_size }
clearwater_network1:
- type:tosca.nodes.network.Network
- properties:
- ip_version:4
- ellis_port1:
- type:tosca.nodes.network.Port
- requirements:
- - binding:
- node:ellis
- - link:
- node:clearwater_network1
+ type:tosca.nodes.network.Network
+ properties:
+ ip_version:4
+ ellis_port1:
+ type:tosca.nodes.network.Port
+ requirements:
+ - binding:
+ node:ellis
+ - link:
+ node:clearwater_network1
clearwater_network2:
- type:tosca.nodes.network.Network
- properties:
- ip_version:4
- ellis_port2:
- type:tosca.nodes.network.Port
- requirements:
- - binding:
- node:ellis
- - link:
- node:clearwater_network2
+ type:tosca.nodes.network.Network
+ properties:
+ ip_version:4
+ ellis_port2:
+ type:tosca.nodes.network.Port
+ requirements:
+ - binding:
+ node:ellis
+ - link:
+ node:clearwater_network2
clearwater_network1:
- type:tosca.nodes.network.Network
- properties:
- ip_version:4
- bono_port1:
- type:tosca.nodes.network.Port
- requirements:
- - binding:
- node:bono
- - link:
- node:clearwater_network1
+ type:tosca.nodes.network.Network
+ properties:
+ ip_version:4
+ bono_port1:
+ type:tosca.nodes.network.Port
+ requirements:
+ - binding:
+ node:bono
+ - link:
+ node:clearwater_network1
clearwater_network2:
- type:tosca.nodes.network.Network
- properties:
- ip_version:4
- bono_port2:
- type:tosca.nodes.network.Port
- requirements:
- - binding:
- node:bono
- - link:
- node:clearwater_network2 \ No newline at end of file
+ type:tosca.nodes.network.Network
+ properties:
+ ip_version:4
+ bono_port2:
+ type:tosca.nodes.network.Port
+ requirements:
+ - binding:
+ node:bono
+ - link:
+ node:clearwater_network2 \ No newline at end of file
diff --git a/setup.py b/setup.py
index 54595b648..0100b4635 100755
--- a/setup.py
+++ b/setup.py
@@ -25,7 +25,8 @@ setup(
'resources/scripts/remove/*.bash'
],
'etc': [
- 'yardstick/nodes/*/*.yaml'
+ 'yardstick/nodes/*/*.yaml',
+ 'yardstick/*.sample'
],
'tests': [
'opnfv/*/*.yaml',
diff --git a/tests/ci/clean_images.sh b/tests/ci/clean_images.sh
index 5d661283d..fa4a54df6 100755
--- a/tests/ci/clean_images.sh
+++ b/tests/ci/clean_images.sh
@@ -27,3 +27,10 @@ cleanup()
openstack flavor delete yardstick-flavor &> /dev/null || true
}
+
+main()
+{
+ cleanup
+}
+
+main
diff --git a/tests/ci/yardstick-verify b/tests/ci/yardstick-verify
index 46b32cc2c..f9d98a4da 100755
--- a/tests/ci/yardstick-verify
+++ b/tests/ci/yardstick-verify
@@ -320,8 +320,6 @@ main()
source $YARDSTICK_REPO_DIR/tests/ci/clean_images.sh
- cleanup
-
trap "error_exit" EXIT SIGTERM
source $YARDSTICK_REPO_DIR/tests/ci/load_images.sh
diff --git a/tests/unit/benchmark/contexts/test_heat.py b/tests/unit/benchmark/contexts/test_heat.py
index f891b0a5f..dd830a485 100644
--- a/tests/unit/benchmark/contexts/test_heat.py
+++ b/tests/unit/benchmark/contexts/test_heat.py
@@ -11,6 +11,7 @@
# Unittest for yardstick.benchmark.contexts.heat
+import os
import mock
import unittest
@@ -39,6 +40,8 @@ class HeatContextTestCase(unittest.TestCase):
self.assertIsNone(self.test_context._user)
self.assertIsNone(self.test_context.template_file)
self.assertIsNone(self.test_context.heat_parameters)
+ self.assertIsNotNone(self.test_context.key_uuid)
+ self.assertIsNotNone(self.test_context.key_filename)
@mock.patch('yardstick.benchmark.contexts.heat.PlacementGroup')
@mock.patch('yardstick.benchmark.contexts.heat.Network')
@@ -55,6 +58,7 @@ class HeatContextTestCase(unittest.TestCase):
self.test_context.init(attrs)
+ self.assertEqual(self.test_context.name, "foo")
self.assertEqual(self.test_context.keypair_name, "foo-key")
self.assertEqual(self.test_context.secgroup_name, "foo-secgroup")
@@ -69,14 +73,23 @@ class HeatContextTestCase(unittest.TestCase):
mock_server.assert_called_with('baz', self.test_context, servers['baz'])
self.assertTrue(len(self.test_context.servers) == 1)
+ if os.path.exists(self.test_context.key_filename):
+ try:
+ os.remove(self.test_context.key_filename)
+ os.remove(self.test_context.key_filename + ".pub")
+ except OSError:
+ LOG.exception("key_filename: %s", e.key_filename)
+
@mock.patch('yardstick.benchmark.contexts.heat.HeatTemplate')
def test__add_resources_to_template_no_servers(self, mock_template):
self.test_context.keypair_name = "foo-key"
self.test_context.secgroup_name = "foo-secgroup"
+ self.test_context.key_uuid = "2f2e4997-0a8e-4eb7-9fa4-f3f8fbbc393b"
self.test_context._add_resources_to_template(mock_template)
- mock_template.add_keypair.assert_called_with("foo-key")
+ mock_template.add_keypair.assert_called_with("foo-key",
+ "2f2e4997-0a8e-4eb7-9fa4-f3f8fbbc393b")
mock_template.add_security_group.assert_called_with("foo-secgroup")
@mock.patch('yardstick.benchmark.contexts.heat.HeatTemplate')
@@ -100,6 +113,7 @@ class HeatContextTestCase(unittest.TestCase):
self.assertTrue(mock_template.delete.called)
+
def test__get_server(self):
self.mock_context.name = 'bar'
diff --git a/api/yardstick.sock b/tests/unit/benchmark/core/__init__.py
index e69de29bb..e69de29bb 100644
--- a/api/yardstick.sock
+++ b/tests/unit/benchmark/core/__init__.py
diff --git a/tests/unit/cmd/commands/no_constraint_no_args_scenario_sample.yaml b/tests/unit/benchmark/core/no_constraint_no_args_scenario_sample.yaml
index 4933b93ae..4933b93ae 100644
--- a/tests/unit/cmd/commands/no_constraint_no_args_scenario_sample.yaml
+++ b/tests/unit/benchmark/core/no_constraint_no_args_scenario_sample.yaml
diff --git a/tests/unit/cmd/commands/no_constraint_with_args_scenario_sample.yaml b/tests/unit/benchmark/core/no_constraint_with_args_scenario_sample.yaml
index f39df7346..f39df7346 100644
--- a/tests/unit/cmd/commands/no_constraint_with_args_scenario_sample.yaml
+++ b/tests/unit/benchmark/core/no_constraint_with_args_scenario_sample.yaml
diff --git a/tests/unit/cmd/commands/test_plugin.py b/tests/unit/benchmark/core/test_plugin.py
index 2e823fdae..441116a25 100644
--- a/tests/unit/cmd/commands/test_plugin.py
+++ b/tests/unit/benchmark/core/test_plugin.py
@@ -9,12 +9,12 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for yardstick.cmd.commands.plugin
+# Unittest for yardstick.benchmark.core.plugin
import mock
import unittest
-from yardstick.cmd.commands import plugin
+from yardstick.benchmark.core import plugin
class Arg(object):
@@ -22,30 +22,30 @@ class Arg(object):
self.input_file = ('plugin/sample_config.yaml',)
-@mock.patch('yardstick.cmd.commands.plugin.ssh')
-class pluginCommandsTestCase(unittest.TestCase):
+@mock.patch('yardstick.benchmark.core.plugin.ssh')
+class pluginTestCase(unittest.TestCase):
def setUp(self):
self.result = {}
- def test_do_install(self, mock_ssh):
- p = plugin.PluginCommands()
+ def test_install(self, mock_ssh):
+ p = plugin.Plugin()
mock_ssh.SSH().execute.return_value = (0, '', '')
input_file = Arg()
- p.do_install(input_file)
+ p.install(input_file)
expected_result = {}
self.assertEqual(self.result, expected_result)
- def test_do_remove(self, mock_ssh):
- p = plugin.PluginCommands()
+ def test_remove(self, mock_ssh):
+ p = plugin.Plugin()
mock_ssh.SSH().execute.return_value = (0, '', '')
input_file = Arg()
- p.do_remove(input_file)
+ p.remove(input_file)
expected_result = {}
self.assertEqual(self.result, expected_result)
def test_install_setup_run(self, mock_ssh):
- p = plugin.PluginCommands()
+ p = plugin.Plugin()
mock_ssh.SSH().execute.return_value = (0, '', '')
plugins = {
"name": "sample"
@@ -64,7 +64,7 @@ class pluginCommandsTestCase(unittest.TestCase):
self.assertEqual(self.result, expected_result)
def test_remove_setup_run(self, mock_ssh):
- p = plugin.PluginCommands()
+ p = plugin.Plugin()
mock_ssh.SSH().execute.return_value = (0, '', '')
plugins = {
"name": "sample"
@@ -81,3 +81,11 @@ class pluginCommandsTestCase(unittest.TestCase):
p._run(plugin_name)
expected_result = {}
self.assertEqual(self.result, expected_result)
+
+
+def main():
+ unittest.main()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/unit/cmd/commands/test_task.py b/tests/unit/benchmark/core/test_task.py
index 0177fd08a..463c43e1f 100644
--- a/tests/unit/cmd/commands/test_task.py
+++ b/tests/unit/benchmark/core/test_task.py
@@ -9,18 +9,18 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for yardstick.cmd.commands.task
+# Unittest for yardstick.benchmark.core.task
import os
import mock
import unittest
-from yardstick.cmd.commands import task
+from yardstick.benchmark.core import task
-class TaskCommandsTestCase(unittest.TestCase):
+class TaskTestCase(unittest.TestCase):
- @mock.patch('yardstick.cmd.commands.task.Context')
+ @mock.patch('yardstick.benchmark.core.task.Context')
def test_parse_nodes_host_target_same_context(self, mock_context):
nodes = {
"host": "node1.LF",
@@ -38,42 +38,45 @@ class TaskCommandsTestCase(unittest.TestCase):
self.assertEqual(context_cfg["host"], server_info)
self.assertEqual(context_cfg["target"], server_info)
- @mock.patch('yardstick.cmd.commands.task.Context')
- @mock.patch('yardstick.cmd.commands.task.base_runner')
+ @mock.patch('yardstick.benchmark.core.task.Context')
+ @mock.patch('yardstick.benchmark.core.task.base_runner')
def test_run(self, mock_base_runner, mock_ctx):
- scenario = \
- {'host': 'athena.demo',
- 'target': 'ares.demo',
- 'runner':
- {'duration': 60,
- 'interval': 1,
- 'type': 'Duration'
- },
- 'type': 'Ping'}
-
- t = task.TaskCommands()
+ scenario = {
+ 'host': 'athena.demo',
+ 'target': 'ares.demo',
+ 'runner': {
+ 'duration': 60,
+ 'interval': 1,
+ 'type': 'Duration'
+ },
+ 'type': 'Ping'
+ }
+
+ t = task.Task()
runner = mock.Mock()
runner.join.return_value = 0
mock_base_runner.Runner.get.return_value = runner
t._run([scenario], False, "yardstick.out")
self.assertTrue(runner.run.called)
- @mock.patch('yardstick.cmd.commands.task.os')
+ @mock.patch('yardstick.benchmark.core.task.os')
def test_check_precondition(self, mock_os):
- cfg = \
- {'precondition':
- {'installer_type': 'compass',
- 'deploy_scenarios': 'os-nosdn',
- 'pod_name': 'huawei-pod1'
- }
+ cfg = {
+ 'precondition': {
+ 'installer_type': 'compass',
+ 'deploy_scenarios': 'os-nosdn',
+ 'pod_name': 'huawei-pod1'
}
+ }
t = task.TaskParser('/opt')
- mock_os.environ.get.side_effect = ['compass', 'os-nosdn', 'huawei-pod1']
+ mock_os.environ.get.side_effect = ['compass',
+ 'os-nosdn',
+ 'huawei-pod1']
result = t._check_precondition(cfg)
self.assertTrue(result)
- @mock.patch('yardstick.cmd.commands.task.os.environ')
+ @mock.patch('yardstick.benchmark.core.task.os.environ')
def test_parse_suite_no_constraint_no_args(self, mock_environ):
SAMPLE_SCENARIO_PATH = "no_constraint_no_args_scenario_sample.yaml"
t = task.TaskParser(self._get_file_abspath(SAMPLE_SCENARIO_PATH))
@@ -82,15 +85,15 @@ class TaskCommandsTestCase(unittest.TestCase):
print ("files=%s, args=%s, fnames=%s" % (task_files, task_args,
task_args_fnames))
self.assertEqual(task_files[0],
- 'tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml')
+ 'tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml')
self.assertEqual(task_files[1],
- 'tests/opnfv/test_cases/opnfv_yardstick_tc043.yaml')
+ 'tests/opnfv/test_cases/opnfv_yardstick_tc043.yaml')
self.assertEqual(task_args[0], None)
self.assertEqual(task_args[1], None)
self.assertEqual(task_args_fnames[0], None)
self.assertEqual(task_args_fnames[1], None)
- @mock.patch('yardstick.cmd.commands.task.os.environ')
+ @mock.patch('yardstick.benchmark.core.task.os.environ')
def test_parse_suite_no_constraint_with_args(self, mock_environ):
SAMPLE_SCENARIO_PATH = "no_constraint_with_args_scenario_sample.yaml"
t = task.TaskParser(self._get_file_abspath(SAMPLE_SCENARIO_PATH))
@@ -99,16 +102,16 @@ class TaskCommandsTestCase(unittest.TestCase):
print ("files=%s, args=%s, fnames=%s" % (task_files, task_args,
task_args_fnames))
self.assertEqual(task_files[0],
- 'tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml')
+ 'tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml')
self.assertEqual(task_files[1],
- 'tests/opnfv/test_cases/opnfv_yardstick_tc043.yaml')
+ 'tests/opnfv/test_cases/opnfv_yardstick_tc043.yaml')
self.assertEqual(task_args[0], None)
self.assertEqual(task_args[1],
- '{"host": "node1.LF","target": "node2.LF"}')
+ '{"host": "node1.LF","target": "node2.LF"}')
self.assertEqual(task_args_fnames[0], None)
self.assertEqual(task_args_fnames[1], None)
- @mock.patch('yardstick.cmd.commands.task.os.environ')
+ @mock.patch('yardstick.benchmark.core.task.os.environ')
def test_parse_suite_with_constraint_no_args(self, mock_environ):
SAMPLE_SCENARIO_PATH = "with_constraint_no_args_scenario_sample.yaml"
t = task.TaskParser(self._get_file_abspath(SAMPLE_SCENARIO_PATH))
@@ -117,15 +120,15 @@ class TaskCommandsTestCase(unittest.TestCase):
print ("files=%s, args=%s, fnames=%s" % (task_files, task_args,
task_args_fnames))
self.assertEqual(task_files[0],
- 'tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml')
+ 'tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml')
self.assertEqual(task_files[1],
- 'tests/opnfv/test_cases/opnfv_yardstick_tc043.yaml')
+ 'tests/opnfv/test_cases/opnfv_yardstick_tc043.yaml')
self.assertEqual(task_args[0], None)
self.assertEqual(task_args[1], None)
self.assertEqual(task_args_fnames[0], None)
self.assertEqual(task_args_fnames[1], None)
- @mock.patch('yardstick.cmd.commands.task.os.environ')
+ @mock.patch('yardstick.benchmark.core.task.os.environ')
def test_parse_suite_with_constraint_with_args(self, mock_environ):
SAMPLE_SCENARIO_PATH = "with_constraint_with_args_scenario_sample.yaml"
t = task.TaskParser(self._get_file_abspath(SAMPLE_SCENARIO_PATH))
@@ -134,12 +137,12 @@ class TaskCommandsTestCase(unittest.TestCase):
print ("files=%s, args=%s, fnames=%s" % (task_files, task_args,
task_args_fnames))
self.assertEqual(task_files[0],
- 'tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml')
+ 'tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml')
self.assertEqual(task_files[1],
- 'tests/opnfv/test_cases/opnfv_yardstick_tc043.yaml')
+ 'tests/opnfv/test_cases/opnfv_yardstick_tc043.yaml')
self.assertEqual(task_args[0], None)
self.assertEqual(task_args[1],
- '{"host": "node1.LF","target": "node2.LF"}')
+ '{"host": "node1.LF","target": "node2.LF"}')
self.assertEqual(task_args_fnames[0], None)
self.assertEqual(task_args_fnames[1], None)
@@ -148,3 +151,10 @@ class TaskCommandsTestCase(unittest.TestCase):
file_path = os.path.join(curr_path, filename)
return file_path
+
+def main():
+ unittest.main()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/unit/cmd/commands/test_testcase.py b/tests/unit/benchmark/core/test_testcase.py
index c55c367d0..6e0473cc1 100644
--- a/tests/unit/cmd/commands/test_testcase.py
+++ b/tests/unit/benchmark/core/test_testcase.py
@@ -11,26 +11,33 @@
# Unittest for yardstick.cmd.commands.testcase
-import mock
import unittest
-from yardstick.cmd.commands import testcase
-from yardstick.cmd.commands.testcase import TestcaseCommands
+from yardstick.benchmark.core import testcase
+
class Arg(object):
def __init__(self):
- self.casename=('opnfv_yardstick_tc001',)
+ self.casename = ('opnfv_yardstick_tc001',)
+
-class TestcaseCommandsUT(unittest.TestCase):
+class TestcaseUT(unittest.TestCase):
- def test_do_list(self):
- t = testcase.TestcaseCommands()
- result = t.do_list("")
+ def test_list_all(self):
+ t = testcase.Testcase()
+ result = t.list_all("")
self.assertEqual(result, True)
- def test_do_show(self):
- t = testcase.TestcaseCommands()
+ def test_show(self):
+ t = testcase.Testcase()
casename = Arg()
- result = t.do_show(casename)
+ result = t.show(casename)
self.assertEqual(result, True)
+
+def main():
+ unittest.main()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/unit/cmd/commands/with_constraint_no_args_scenario_sample.yaml b/tests/unit/benchmark/core/with_constraint_no_args_scenario_sample.yaml
index 8194a2361..8194a2361 100644
--- a/tests/unit/cmd/commands/with_constraint_no_args_scenario_sample.yaml
+++ b/tests/unit/benchmark/core/with_constraint_no_args_scenario_sample.yaml
diff --git a/tests/unit/cmd/commands/with_constraint_with_args_scenario_sample.yaml b/tests/unit/benchmark/core/with_constraint_with_args_scenario_sample.yaml
index 86c9b2800..86c9b2800 100644
--- a/tests/unit/cmd/commands/with_constraint_with_args_scenario_sample.yaml
+++ b/tests/unit/benchmark/core/with_constraint_with_args_scenario_sample.yaml
diff --git a/tests/unit/cmd/commands/test_env.py b/tests/unit/cmd/commands/test_env.py
index af1ab8030..e85c36755 100644
--- a/tests/unit/cmd/commands/test_env.py
+++ b/tests/unit/cmd/commands/test_env.py
@@ -8,17 +8,56 @@
##############################################################################
import unittest
import mock
+import uuid
from yardstick.cmd.commands.env import EnvCommand
class EnvCommandTestCase(unittest.TestCase):
- @mock.patch('yardstick.cmd.commands.env.HttpClient')
- def test_do_influxdb(self, mock_http_client):
+ @mock.patch('yardstick.cmd.commands.env.EnvCommand._start_async_task')
+ @mock.patch('yardstick.cmd.commands.env.EnvCommand._check_status')
+ def test_do_influxdb(self, check_status_mock, start_async_task_mock):
env = EnvCommand()
env.do_influxdb({})
- self.assertTrue(mock_http_client().post.called)
+ self.assertTrue(start_async_task_mock.called)
+ self.assertTrue(check_status_mock.called)
+
+ @mock.patch('yardstick.cmd.commands.env.EnvCommand._start_async_task')
+ @mock.patch('yardstick.cmd.commands.env.EnvCommand._check_status')
+ def test_do_grafana(self, check_status_mock, start_async_task_mock):
+ env = EnvCommand()
+ env.do_grafana({})
+ self.assertTrue(start_async_task_mock.called)
+ self.assertTrue(check_status_mock.called)
+
+ @mock.patch('yardstick.cmd.commands.env.EnvCommand._start_async_task')
+ @mock.patch('yardstick.cmd.commands.env.EnvCommand._check_status')
+ def test_do_prepare(self, check_status_mock, start_async_task_mock):
+ env = EnvCommand()
+ env.do_prepare({})
+ self.assertTrue(start_async_task_mock.called)
+ self.assertTrue(check_status_mock.called)
+
+ @mock.patch('yardstick.cmd.commands.env.HttpClient.post')
+ def test_start_async_task(self, post_mock):
+ data = {'action': 'createGrafanaContainer'}
+ EnvCommand()._start_async_task(data)
+ self.assertTrue(post_mock.called)
+
+ @mock.patch('yardstick.cmd.commands.env.HttpClient.get')
+ @mock.patch('yardstick.cmd.commands.env.EnvCommand._print_status')
+ def test_check_status(self, print_mock, get_mock):
+ task_id = str(uuid.uuid4())
+ get_mock.return_value = {'status': 2, 'result': 'error'}
+ status = EnvCommand()._check_status(task_id, 'hello world')
+ self.assertEqual(status, 2)
+
+ def test_print_status(self):
+ try:
+ EnvCommand()._print_status('hello', 'word')
+ except Exception as e:
+ self.assertIsInstance(e, IndexError)
def main():
diff --git a/tests/unit/common/test_httpClient.py b/tests/unit/common/test_httpClient.py
index b39dc2332..94ac1c891 100644
--- a/tests/unit/common/test_httpClient.py
+++ b/tests/unit/common/test_httpClient.py
@@ -24,6 +24,12 @@ class HttpClientTestCase(unittest.TestCase):
mock_requests.post.assert_called_with(url, data=json.dumps(data),
headers=headers)
+ @mock.patch('yardstick.common.httpClient.requests')
+ def test_get(self, mock_requests):
+ url = 'http://localhost:5000/hello'
+ httpClient.HttpClient().get(url)
+ mock_requests.get.assert_called_with(url)
+
def main():
unittest.main()
diff --git a/tests/unit/test_ssh.py b/tests/unit/test_ssh.py
index 8b828ed7c..045ac0f1b 100644
--- a/tests/unit/test_ssh.py
+++ b/tests/unit/test_ssh.py
@@ -310,12 +310,38 @@ class SSHRunTestCase(unittest.TestCase):
@mock.patch("yardstick.ssh.open", create=True)
def test__put_file_shell(self, mock_open):
- self.test_client.run = mock.Mock()
- self.test_client._put_file_shell("localfile", "remotefile", 0o42)
+ with mock.patch.object(self.test_client, "run") as run_mock:
+ self.test_client._put_file_shell("localfile", "remotefile", 0o42)
+ run_mock.assert_called_once_with(
+ 'cat > "remotefile"&& chmod -- 042 "remotefile"',
+ stdin=mock_open.return_value.__enter__.return_value)
- self.test_client.run.assert_called_once_with(
- 'cat > remotefile && chmod -- 042 remotefile',
- stdin=mock_open.return_value.__enter__.return_value)
+ @mock.patch("yardstick.ssh.open", create=True)
+ def test__put_file_shell_space(self, mock_open):
+ with mock.patch.object(self.test_client, "run") as run_mock:
+ self.test_client._put_file_shell("localfile",
+ "filename with space", 0o42)
+ run_mock.assert_called_once_with(
+ 'cat > "filename with space"&& chmod -- 042 "filename with '
+ 'space"',
+ stdin=mock_open.return_value.__enter__.return_value)
+
+ @mock.patch("yardstick.ssh.open", create=True)
+ def test__put_file_shell_tilde(self, mock_open):
+ with mock.patch.object(self.test_client, "run") as run_mock:
+ self.test_client._put_file_shell("localfile", "~/remotefile", 0o42)
+ run_mock.assert_called_once_with(
+ 'cat > ~/"remotefile"&& chmod -- 042 ~/"remotefile"',
+ stdin=mock_open.return_value.__enter__.return_value)
+
+ @mock.patch("yardstick.ssh.open", create=True)
+ def test__put_file_shell_tilde_spaces(self, mock_open):
+ with mock.patch.object(self.test_client, "run") as run_mock:
+ self.test_client._put_file_shell("localfile", "~/file with space",
+ 0o42)
+ run_mock.assert_called_once_with(
+ 'cat > ~/"file with space"&& chmod -- 042 ~/"file with space"',
+ stdin=mock_open.return_value.__enter__.return_value)
@mock.patch("yardstick.ssh.os.stat")
def test__put_file_sftp(self, mock_stat):
diff --git a/yardstick/benchmark/contexts/heat.py b/yardstick/benchmark/contexts/heat.py
index fcbe825d6..166ca40c8 100644
--- a/yardstick/benchmark/contexts/heat.py
+++ b/yardstick/benchmark/contexts/heat.py
@@ -9,6 +9,7 @@
import os
import sys
+import uuid
import pkg_resources
import paramiko
@@ -40,8 +41,11 @@ class HeatContext(Context):
self._user = None
self.template_file = None
self.heat_parameters = None
+ # generate an uuid to identify yardstick_key
+ # the first 8 digits of the uuid will be used
+ self.key_uuid = uuid.uuid4()
self.key_filename = YARDSTICK_ROOT_PATH + \
- 'yardstick/resources/files/yardstick_key'
+ 'yardstick/resources/files/yardstick_key-' + str(self.key_uuid)[:8]
super(self.__class__, self).__init__()
def init(self, attrs):
@@ -79,16 +83,12 @@ class HeatContext(Context):
self.servers.append(server)
self._server_map[server.dn] = server
- print "Generating RSA host key ..."
rsa_key = paramiko.RSAKey.generate(bits=2048, progress_func=None)
- print "Writing yardstick_key ..."
rsa_key.write_private_key_file(self.key_filename)
- print "Writing yardstick_key.pub ..."
open(self.key_filename + ".pub", "w").write("%s %s\n" %
(rsa_key.get_name(),
rsa_key.get_base64()))
del rsa_key
- print "... done!"
@property
def image(self):
@@ -107,7 +107,7 @@ class HeatContext(Context):
def _add_resources_to_template(self, template):
'''add to the template the resources represented by this context'''
- template.add_keypair(self.keypair_name)
+ template.add_keypair(self.keypair_name, self.key_uuid)
template.add_security_group(self.secgroup_name)
for network in self.networks:
@@ -243,7 +243,8 @@ class HeatContext(Context):
with attribute name mapping when using external heat templates
'''
key_filename = pkg_resources.resource_filename(
- 'yardstick.resources', 'files/yardstick_key')
+ 'yardstick.resources', 'files/yardstick_key-{:.{width}}'.format(
+ self.key_uuid, width=8))
if type(attr_name) is dict:
cname = attr_name["name"].split(".")[1]
diff --git a/yardstick/benchmark/core/__init__.py b/yardstick/benchmark/core/__init__.py
new file mode 100644
index 000000000..12c83f87e
--- /dev/null
+++ b/yardstick/benchmark/core/__init__.py
@@ -0,0 +1,38 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+from __future__ import print_function
+
+
+class Param(object):
+
+ def __init__(self, kwargs):
+ # list
+ self.inputfile = kwargs.get('inputfile')
+ self.task_args = kwargs.get('task-args')
+ self.task_args_file = kwargs.get('task-args-file')
+ self.keep_deploy = kwargs.get('keep-deploy')
+ self.parse_only = kwargs.get('parse-only')
+ self.output_file = kwargs.get('output-file', '/tmp/yardstick.out')
+ self.suite = kwargs.get('suite')
+
+ # list
+ self.input_file = kwargs.get('input_file')
+
+ # list
+ self.casename = kwargs.get('casename')
+
+ # list
+ self.type = kwargs.get('type')
+
+
+def print_hbar(barlen):
+ '''print to stdout a horizontal bar'''
+ print("+"),
+ print("-" * barlen),
+ print("+")
diff --git a/yardstick/benchmark/core/plugin.py b/yardstick/benchmark/core/plugin.py
new file mode 100644
index 000000000..da12ce438
--- /dev/null
+++ b/yardstick/benchmark/core/plugin.py
@@ -0,0 +1,212 @@
+##############################################################################
+# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+""" Handler for yardstick command 'plugin' """
+
+from __future__ import print_function
+import os
+import sys
+import yaml
+import time
+import logging
+import pkg_resources
+import yardstick.ssh as ssh
+
+from yardstick.common.task_template import TaskTemplate
+
+LOG = logging.getLogger(__name__)
+
+
+class Plugin(object):
+ """Plugin commands.
+
+ Set of commands to manage plugins.
+ """
+
+ def install(self, args):
+ """Install a plugin."""
+
+ total_start_time = time.time()
+ parser = PluginParser(args.input_file[0])
+
+ plugins, deployment = parser.parse_plugin()
+ plugin_name = plugins.get("name")
+ print("Installing plugin: %s" % plugin_name)
+
+ LOG.info("Executing _install_setup()")
+ self._install_setup(plugin_name, deployment)
+
+ LOG.info("Executing _run()")
+ self._run(plugin_name)
+
+ total_end_time = time.time()
+ LOG.info("total finished in %d secs",
+ total_end_time - total_start_time)
+
+ print("Done, exiting")
+
+ def remove(self, args):
+ """Remove a plugin."""
+
+ total_start_time = time.time()
+ parser = PluginParser(args.input_file[0])
+
+ plugins, deployment = parser.parse_plugin()
+ plugin_name = plugins.get("name")
+ print("Removing plugin: %s" % plugin_name)
+
+ LOG.info("Executing _remove_setup()")
+ self._remove_setup(plugin_name, deployment)
+
+ LOG.info("Executing _run()")
+ self._run(plugin_name)
+
+ total_end_time = time.time()
+ LOG.info("total finished in %d secs",
+ total_end_time - total_start_time)
+
+ print("Done, exiting")
+
+ def _install_setup(self, plugin_name, deployment):
+ """Deployment environment setup"""
+ target_script = plugin_name + ".bash"
+ self.script = pkg_resources.resource_filename(
+ 'yardstick.resources', 'scripts/install/' + target_script)
+
+ deployment_user = deployment.get("user")
+ deployment_ssh_port = deployment.get("ssh_port", ssh.DEFAULT_PORT)
+ deployment_ip = deployment.get("ip", None)
+ deployment_password = deployment.get("password", None)
+ deployment_key_filename = deployment.get("key_filename",
+ "/root/.ssh/id_rsa")
+
+ if deployment_ip == "local":
+ installer_ip = os.environ.get("INSTALLER_IP", None)
+
+ if deployment_password is not None:
+ self._login_via_password(deployment_user, installer_ip,
+ deployment_password,
+ deployment_ssh_port)
+ else:
+ self._login_via_key(self, deployment_user, installer_ip,
+ deployment_key_filename,
+ deployment_ssh_port)
+ else:
+ if deployment_password is not None:
+ self._login_via_password(deployment_user, deployment_ip,
+ deployment_password,
+ deployment_ssh_port)
+ else:
+ self._login_via_key(self, deployment_user, deployment_ip,
+ deployment_key_filename,
+ deployment_ssh_port)
+ # copy script to host
+ remotepath = '~/%s.sh' % plugin_name
+
+ LOG.info("copying script to host: %s", remotepath)
+ self.client._put_file_shell(self.script, remotepath)
+
+ def _remove_setup(self, plugin_name, deployment):
+ """Deployment environment setup"""
+ target_script = plugin_name + ".bash"
+ self.script = pkg_resources.resource_filename(
+ 'yardstick.resources', 'scripts/remove/' + target_script)
+
+ deployment_user = deployment.get("user")
+ deployment_ssh_port = deployment.get("ssh_port", ssh.DEFAULT_PORT)
+ deployment_ip = deployment.get("ip", None)
+ deployment_password = deployment.get("password", None)
+ deployment_key_filename = deployment.get("key_filename",
+ "/root/.ssh/id_rsa")
+
+ if deployment_ip == "local":
+ installer_ip = os.environ.get("INSTALLER_IP", None)
+
+ if deployment_password is not None:
+ self._login_via_password(deployment_user, installer_ip,
+ deployment_password,
+ deployment_ssh_port)
+ else:
+ self._login_via_key(self, deployment_user, installer_ip,
+ deployment_key_filename,
+ deployment_ssh_port)
+ else:
+ if deployment_password is not None:
+ self._login_via_password(deployment_user, deployment_ip,
+ deployment_password,
+ deployment_ssh_port)
+ else:
+ self._login_via_key(self, deployment_user, deployment_ip,
+ deployment_key_filename,
+ deployment_ssh_port)
+
+ # copy script to host
+ remotepath = '~/%s.sh' % plugin_name
+
+ LOG.info("copying script to host: %s", remotepath)
+ self.client._put_file_shell(self.script, remotepath)
+
+ def _login_via_password(self, user, ip, password, ssh_port):
+ LOG.info("Log in via pw, user:%s, host:%s", user, ip)
+ self.client = ssh.SSH(user, ip, password=password, port=ssh_port)
+ self.client.wait(timeout=600)
+
+ def _login_via_key(self, user, ip, key_filename, ssh_port):
+ LOG.info("Log in via key, user:%s, host:%s", user, ip)
+ self.client = ssh.SSH(user, ip, key_filename=key_filename,
+ port=ssh_port)
+ self.client.wait(timeout=600)
+
+ def _run(self, plugin_name):
+ """Run installation script """
+ cmd = "sudo bash %s" % plugin_name + ".sh"
+
+ LOG.info("Executing command: %s", cmd)
+ status, stdout, stderr = self.client.execute(cmd)
+
+
+class PluginParser(object):
+ """Parser for plugin configration files in yaml format"""
+
+ def __init__(self, path):
+ self.path = path
+
+ def parse_plugin(self):
+ """parses the plugin file and return a plugins instance
+ and a deployment instance
+ """
+
+ print ("Parsing plugin config:", self.path)
+
+ try:
+ kw = {}
+ with open(self.path) as f:
+ try:
+ input_plugin = f.read()
+ rendered_plugin = TaskTemplate.render(input_plugin, **kw)
+ except Exception as e:
+ print(("Failed to render template:\n%(plugin)s\n%(err)s\n")
+ % {"plugin": input_plugin, "err": e})
+ raise e
+ print(("Input plugin is:\n%s\n") % rendered_plugin)
+
+ cfg = yaml.load(rendered_plugin)
+ except IOError as ioerror:
+ sys.exit(ioerror)
+
+ self._check_schema(cfg["schema"], "plugin")
+
+ return cfg["plugins"], cfg["deployment"]
+
+ def _check_schema(self, cfg_schema, schema_type):
+ """Check if configration file is using the correct schema type"""
+
+ if cfg_schema != "yardstick:" + schema_type + ":0.1":
+ sys.exit("error: file %s has unknown schema %s" % (self.path,
+ cfg_schema))
diff --git a/yardstick/benchmark/core/runner.py b/yardstick/benchmark/core/runner.py
new file mode 100644
index 000000000..e8dd21a12
--- /dev/null
+++ b/yardstick/benchmark/core/runner.py
@@ -0,0 +1,36 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+""" Handler for yardstick command 'runner' """
+
+from yardstick.benchmark.runners.base import Runner
+from yardstick.benchmark.core import print_hbar
+
+
+class Runners(object):
+ '''Runner commands.
+
+ Set of commands to discover and display runner types.
+ '''
+
+ def list_all(self, args):
+ '''List existing runner types'''
+ types = Runner.get_types()
+ print_hbar(78)
+ print("| %-16s | %-60s" % ("Type", "Description"))
+ print_hbar(78)
+ for rtype in types:
+ print "| %-16s | %-60s" % (rtype.__execution_type__,
+ rtype.__doc__.split("\n")[0])
+ print_hbar(78)
+
+ def show(self, args):
+ '''Show details of a specific runner type'''
+ rtype = Runner.get_cls(args.type[0])
+ print rtype.__doc__
diff --git a/yardstick/benchmark/core/scenario.py b/yardstick/benchmark/core/scenario.py
new file mode 100644
index 000000000..e228054ee
--- /dev/null
+++ b/yardstick/benchmark/core/scenario.py
@@ -0,0 +1,36 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+""" Handler for yardstick command 'scenario' """
+
+from yardstick.benchmark.scenarios.base import Scenario
+from yardstick.benchmark.core import print_hbar
+
+
+class Scenarios(object):
+ '''Scenario commands.
+
+ Set of commands to discover and display scenario types.
+ '''
+
+ def list_all(self, args):
+ '''List existing scenario types'''
+ types = Scenario.get_types()
+ print_hbar(78)
+ print("| %-16s | %-60s" % ("Type", "Description"))
+ print_hbar(78)
+ for stype in types:
+ print("| %-16s | %-60s" % (stype.__scenario_type__,
+ stype.__doc__.split("\n")[0]))
+ print_hbar(78)
+
+ def show(self, args):
+ '''Show details of a specific scenario type'''
+ stype = Scenario.get_cls(args.type[0])
+ print stype.__doc__
diff --git a/yardstick/benchmark/core/task.py b/yardstick/benchmark/core/task.py
new file mode 100644
index 000000000..8fb117771
--- /dev/null
+++ b/yardstick/benchmark/core/task.py
@@ -0,0 +1,511 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+""" Handler for yardstick command 'task' """
+
+import sys
+import os
+import yaml
+import atexit
+import ipaddress
+import time
+import logging
+import uuid
+import errno
+from itertools import ifilter
+
+from yardstick.benchmark.contexts.base import Context
+from yardstick.benchmark.runners import base as base_runner
+from yardstick.common.task_template import TaskTemplate
+from yardstick.common.utils import source_env
+from yardstick.common import constants
+
+output_file_default = "/tmp/yardstick.out"
+test_cases_dir_default = "tests/opnfv/test_cases/"
+LOG = logging.getLogger(__name__)
+
+
+class Task(object): # pragma: no cover
+ '''Task commands.
+
+ Set of commands to manage benchmark tasks.
+ '''
+
+ def start(self, args, **kwargs):
+ '''Start a benchmark scenario.'''
+
+ atexit.register(atexit_handler)
+
+ self.task_id = kwargs.get('task_id', str(uuid.uuid4()))
+
+ check_environment()
+
+ total_start_time = time.time()
+ parser = TaskParser(args.inputfile[0])
+
+ if args.suite:
+ # 1.parse suite, return suite_params info
+ task_files, task_args, task_args_fnames = \
+ parser.parse_suite()
+ else:
+ task_files = [parser.path]
+ task_args = [args.task_args]
+ task_args_fnames = [args.task_args_file]
+
+ LOG.info("\ntask_files:%s, \ntask_args:%s, \ntask_args_fnames:%s",
+ task_files, task_args, task_args_fnames)
+
+ if args.parse_only:
+ sys.exit(0)
+
+ if os.path.isfile(args.output_file):
+ os.remove(args.output_file)
+ # parse task_files
+ for i in range(0, len(task_files)):
+ one_task_start_time = time.time()
+ parser.path = task_files[i]
+ scenarios, run_in_parallel, meet_precondition = parser.parse_task(
+ self.task_id, task_args[i], task_args_fnames[i])
+
+ if not meet_precondition:
+ LOG.info("meet_precondition is %s, please check envrionment",
+ meet_precondition)
+ continue
+
+ self._run(scenarios, run_in_parallel, args.output_file)
+
+ if args.keep_deploy:
+ # keep deployment, forget about stack
+ # (hide it for exit handler)
+ Context.list = []
+ else:
+ for context in Context.list:
+ context.undeploy()
+ Context.list = []
+ one_task_end_time = time.time()
+ LOG.info("task %s finished in %d secs", task_files[i],
+ one_task_end_time - one_task_start_time)
+
+ total_end_time = time.time()
+ LOG.info("total finished in %d secs",
+ total_end_time - total_start_time)
+
+ print "Done, exiting"
+
+ def _run(self, scenarios, run_in_parallel, output_file):
+ '''Deploys context and calls runners'''
+ for context in Context.list:
+ context.deploy()
+
+ background_runners = []
+
+ # Start all background scenarios
+ for scenario in ifilter(_is_background_scenario, scenarios):
+ scenario["runner"] = dict(type="Duration", duration=1000000000)
+ runner = run_one_scenario(scenario, output_file)
+ background_runners.append(runner)
+
+ runners = []
+ if run_in_parallel:
+ for scenario in scenarios:
+ if not _is_background_scenario(scenario):
+ runner = run_one_scenario(scenario, output_file)
+ runners.append(runner)
+
+ # Wait for runners to finish
+ for runner in runners:
+ runner_join(runner)
+ print "Runner ended, output in", output_file
+ else:
+ # run serially
+ for scenario in scenarios:
+ if not _is_background_scenario(scenario):
+ runner = run_one_scenario(scenario, output_file)
+ runner_join(runner)
+ print "Runner ended, output in", output_file
+
+ # Abort background runners
+ for runner in background_runners:
+ runner.abort()
+
+ # Wait for background runners to finish
+ for runner in background_runners:
+ if runner.join(timeout=60) is None:
+ # Nuke if it did not stop nicely
+ base_runner.Runner.terminate(runner)
+ runner_join(runner)
+ else:
+ base_runner.Runner.release(runner)
+ print "Background task ended"
+
+
+# TODO: Move stuff below into TaskCommands class !?
+
+
+class TaskParser(object): # pragma: no cover
+ '''Parser for task config files in yaml format'''
+ def __init__(self, path):
+ self.path = path
+
+ def _meet_constraint(self, task, cur_pod, cur_installer):
+ if "constraint" in task:
+ constraint = task.get('constraint', None)
+ if constraint is not None:
+ tc_fit_pod = constraint.get('pod', None)
+ tc_fit_installer = constraint.get('installer', None)
+ LOG.info("cur_pod:%s, cur_installer:%s,tc_constraints:%s",
+ cur_pod, cur_installer, constraint)
+ if cur_pod and tc_fit_pod and cur_pod not in tc_fit_pod:
+ return False
+ if cur_installer and tc_fit_installer and \
+ cur_installer not in tc_fit_installer:
+ return False
+ return True
+
+ def _get_task_para(self, task, cur_pod):
+ task_args = task.get('task_args', None)
+ if task_args is not None:
+ task_args = task_args.get(cur_pod, None)
+ task_args_fnames = task.get('task_args_fnames', None)
+ if task_args_fnames is not None:
+ task_args_fnames = task_args_fnames.get(cur_pod, None)
+ return task_args, task_args_fnames
+
+ def parse_suite(self):
+ '''parse the suite file and return a list of task config file paths
+ and lists of optional parameters if present'''
+ LOG.info("\nParsing suite file:%s", self.path)
+
+ try:
+ with open(self.path) as stream:
+ cfg = yaml.load(stream)
+ except IOError as ioerror:
+ sys.exit(ioerror)
+
+ self._check_schema(cfg["schema"], "suite")
+ LOG.info("\nStarting scenario:%s", cfg["name"])
+
+ test_cases_dir = cfg.get("test_cases_dir", test_cases_dir_default)
+ if test_cases_dir[-1] != os.sep:
+ test_cases_dir += os.sep
+
+ cur_pod = os.environ.get('NODE_NAME', None)
+ cur_installer = os.environ.get('INSTALLER_TYPE', None)
+
+ valid_task_files = []
+ valid_task_args = []
+ valid_task_args_fnames = []
+
+ for task in cfg["test_cases"]:
+ # 1.check file_name
+ if "file_name" in task:
+ task_fname = task.get('file_name', None)
+ if task_fname is None:
+ continue
+ else:
+ continue
+ # 2.check constraint
+ if self._meet_constraint(task, cur_pod, cur_installer):
+ valid_task_files.append(test_cases_dir + task_fname)
+ else:
+ continue
+ # 3.fetch task parameters
+ task_args, task_args_fnames = self._get_task_para(task, cur_pod)
+ valid_task_args.append(task_args)
+ valid_task_args_fnames.append(task_args_fnames)
+
+ return valid_task_files, valid_task_args, valid_task_args_fnames
+
+ def parse_task(self, task_id, task_args=None, task_args_file=None):
+ '''parses the task file and return an context and scenario instances'''
+ print "Parsing task config:", self.path
+
+ try:
+ kw = {}
+ if task_args_file:
+ with open(task_args_file) as f:
+ kw.update(parse_task_args("task_args_file", f.read()))
+ kw.update(parse_task_args("task_args", task_args))
+ except TypeError:
+ raise TypeError()
+
+ try:
+ with open(self.path) as f:
+ try:
+ input_task = f.read()
+ rendered_task = TaskTemplate.render(input_task, **kw)
+ except Exception as e:
+ print(("Failed to render template:\n%(task)s\n%(err)s\n")
+ % {"task": input_task, "err": e})
+ raise e
+ print(("Input task is:\n%s\n") % rendered_task)
+
+ cfg = yaml.load(rendered_task)
+ except IOError as ioerror:
+ sys.exit(ioerror)
+
+ self._check_schema(cfg["schema"], "task")
+ meet_precondition = self._check_precondition(cfg)
+
+ # TODO: support one or many contexts? Many would simpler and precise
+ # TODO: support hybrid context type
+ if "context" in cfg:
+ context_cfgs = [cfg["context"]]
+ elif "contexts" in cfg:
+ context_cfgs = cfg["contexts"]
+ else:
+ context_cfgs = [{"type": "Dummy"}]
+
+ name_suffix = '-{}'.format(task_id[:8])
+ for cfg_attrs in context_cfgs:
+ cfg_attrs['name'] = '{}{}'.format(cfg_attrs['name'], name_suffix)
+ context_type = cfg_attrs.get("type", "Heat")
+ if "Heat" == context_type and "networks" in cfg_attrs:
+ # bugfix: if there are more than one network,
+ # only add "external_network" on first one.
+ # the name of netwrok should follow this rule:
+ # test, test2, test3 ...
+ # sort network with the length of network's name
+ sorted_networks = sorted(cfg_attrs["networks"])
+ # config external_network based on env var
+ cfg_attrs["networks"][sorted_networks[0]]["external_network"] \
+ = os.environ.get("EXTERNAL_NETWORK", "net04_ext")
+
+ context = Context.get(context_type)
+ context.init(cfg_attrs)
+
+ run_in_parallel = cfg.get("run_in_parallel", False)
+
+ # add tc and task id for influxdb extended tags
+ for scenario in cfg["scenarios"]:
+ task_name = os.path.splitext(os.path.basename(self.path))[0]
+ scenario["tc"] = task_name
+ scenario["task_id"] = task_id
+
+ change_server_name(scenario, name_suffix)
+
+ try:
+ change_server_name(scenario['nodes'], name_suffix)
+ except KeyError:
+ pass
+
+ # TODO we need something better here, a class that represent the file
+ return cfg["scenarios"], run_in_parallel, meet_precondition
+
+ def _check_schema(self, cfg_schema, schema_type):
+ '''Check if config file is using the correct schema type'''
+
+ if cfg_schema != "yardstick:" + schema_type + ":0.1":
+ sys.exit("error: file %s has unknown schema %s" % (self.path,
+ cfg_schema))
+
+ def _check_precondition(self, cfg):
+ '''Check if the envrionment meet the preconditon'''
+
+ if "precondition" in cfg:
+ precondition = cfg["precondition"]
+ installer_type = precondition.get("installer_type", None)
+ deploy_scenarios = precondition.get("deploy_scenarios", None)
+ tc_fit_pods = precondition.get("pod_name", None)
+ installer_type_env = os.environ.get('INSTALL_TYPE', None)
+ deploy_scenario_env = os.environ.get('DEPLOY_SCENARIO', None)
+ pod_name_env = os.environ.get('NODE_NAME', None)
+
+ LOG.info("installer_type: %s, installer_type_env: %s",
+ installer_type, installer_type_env)
+ LOG.info("deploy_scenarios: %s, deploy_scenario_env: %s",
+ deploy_scenarios, deploy_scenario_env)
+ LOG.info("tc_fit_pods: %s, pod_name_env: %s",
+ tc_fit_pods, pod_name_env)
+ if installer_type and installer_type_env:
+ if installer_type_env not in installer_type:
+ return False
+ if deploy_scenarios and deploy_scenario_env:
+ deploy_scenarios_list = deploy_scenarios.split(',')
+ for deploy_scenario in deploy_scenarios_list:
+ if deploy_scenario_env.startswith(deploy_scenario):
+ return True
+ return False
+ if tc_fit_pods and pod_name_env:
+ if pod_name_env not in tc_fit_pods:
+ return False
+ return True
+
+
+def atexit_handler():
+ '''handler for process termination'''
+ base_runner.Runner.terminate_all()
+
+ if len(Context.list) > 0:
+ print "Undeploying all contexts"
+ for context in Context.list:
+ context.undeploy()
+
+
+def is_ip_addr(addr):
+ '''check if string addr is an IP address'''
+ try:
+ ipaddress.ip_address(unicode(addr))
+ return True
+ except ValueError:
+ return False
+
+
+def _is_same_heat_context(host_attr, target_attr):
+ '''check if two servers are in the same heat context
+ host_attr: either a name for a server created by yardstick or a dict
+ with attribute name mapping when using external heat templates
+ target_attr: either a name for a server created by yardstick or a dict
+ with attribute name mapping when using external heat templates
+ '''
+ host = None
+ target = None
+ for context in Context.list:
+ if context.__context_type__ != "Heat":
+ continue
+
+ host = context._get_server(host_attr)
+ if host is None:
+ continue
+
+ target = context._get_server(target_attr)
+ if target is None:
+ return False
+
+ # Both host and target is not None, then they are in the
+ # same heat context.
+ return True
+
+ return False
+
+
+def _is_background_scenario(scenario):
+ if "run_in_background" in scenario:
+ return scenario["run_in_background"]
+ else:
+ return False
+
+
+def run_one_scenario(scenario_cfg, output_file):
+ '''run one scenario using context'''
+ runner_cfg = scenario_cfg["runner"]
+ runner_cfg['output_filename'] = output_file
+
+ # TODO support get multi hosts/vms info
+ context_cfg = {}
+ if "host" in scenario_cfg:
+ context_cfg['host'] = Context.get_server(scenario_cfg["host"])
+
+ if "target" in scenario_cfg:
+ if is_ip_addr(scenario_cfg["target"]):
+ context_cfg['target'] = {}
+ context_cfg['target']["ipaddr"] = scenario_cfg["target"]
+ else:
+ context_cfg['target'] = Context.get_server(scenario_cfg["target"])
+ if _is_same_heat_context(scenario_cfg["host"],
+ scenario_cfg["target"]):
+ context_cfg["target"]["ipaddr"] = \
+ context_cfg["target"]["private_ip"]
+ else:
+ context_cfg["target"]["ipaddr"] = \
+ context_cfg["target"]["ip"]
+
+ if "targets" in scenario_cfg:
+ ip_list = []
+ for target in scenario_cfg["targets"]:
+ if is_ip_addr(target):
+ ip_list.append(target)
+ context_cfg['target'] = {}
+ else:
+ context_cfg['target'] = Context.get_server(target)
+ if _is_same_heat_context(scenario_cfg["host"], target):
+ ip_list.append(context_cfg["target"]["private_ip"])
+ else:
+ ip_list.append(context_cfg["target"]["ip"])
+ context_cfg['target']['ipaddr'] = ','.join(ip_list)
+
+ if "nodes" in scenario_cfg:
+ context_cfg["nodes"] = parse_nodes_with_context(scenario_cfg)
+ runner = base_runner.Runner.get(runner_cfg)
+
+ print "Starting runner of type '%s'" % runner_cfg["type"]
+ runner.run(scenario_cfg, context_cfg)
+
+ return runner
+
+
+def parse_nodes_with_context(scenario_cfg):
+ '''paras the 'nodes' fields in scenario '''
+ nodes = scenario_cfg["nodes"]
+
+ nodes_cfg = {}
+ for nodename in nodes:
+ nodes_cfg[nodename] = Context.get_server(nodes[nodename])
+
+ return nodes_cfg
+
+
+def runner_join(runner):
+ '''join (wait for) a runner, exit process at runner failure'''
+ status = runner.join()
+ base_runner.Runner.release(runner)
+ if status != 0:
+ sys.exit("Runner failed")
+
+
+def print_invalid_header(source_name, args):
+ print(("Invalid %(source)s passed:\n\n %(args)s\n")
+ % {"source": source_name, "args": args})
+
+
+def parse_task_args(src_name, args):
+ try:
+ kw = args and yaml.safe_load(args)
+ kw = {} if kw is None else kw
+ except yaml.parser.ParserError as e:
+ print_invalid_header(src_name, args)
+ print(("%(source)s has to be YAML. Details:\n\n%(err)s\n")
+ % {"source": src_name, "err": e})
+ raise TypeError()
+
+ if not isinstance(kw, dict):
+ print_invalid_header(src_name, args)
+ print(("%(src)s had to be dict, actually %(src_type)s\n")
+ % {"src": src_name, "src_type": type(kw)})
+ raise TypeError()
+ return kw
+
+
+def check_environment():
+ auth_url = os.environ.get('OS_AUTH_URL', None)
+ if not auth_url:
+ try:
+ source_env(constants.OPENSTACK_RC_FILE)
+ except IOError as e:
+ if e.errno != errno.EEXIST:
+ raise
+ LOG.debug('OPENRC file not found')
+
+
+def change_server_name(scenario, suffix):
+ try:
+ scenario['host'] += suffix
+ except KeyError:
+ pass
+
+ try:
+ scenario['target'] += suffix
+ except KeyError:
+ pass
+
+ try:
+ key = 'targets'
+ scenario[key] = ['{}{}'.format(a, suffix) for a in scenario[key]]
+ except KeyError:
+ pass
diff --git a/yardstick/benchmark/core/testcase.py b/yardstick/benchmark/core/testcase.py
new file mode 100644
index 000000000..d292ad2d7
--- /dev/null
+++ b/yardstick/benchmark/core/testcase.py
@@ -0,0 +1,112 @@
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+""" Handler for yardstick command 'testcase' """
+import os
+import yaml
+import sys
+
+from yardstick.benchmark.core import print_hbar
+from yardstick.common.task_template import TaskTemplate
+from yardstick.definitions import YARDSTICK_ROOT_PATH
+
+
+class Testcase(object):
+ '''Testcase commands.
+
+ Set of commands to discover and display test cases.
+ '''
+ def __init__(self):
+ self.test_case_path = YARDSTICK_ROOT_PATH + 'tests/opnfv/test_cases/'
+ self.testcase_list = []
+
+ def list_all(self, args):
+ '''List existing test cases'''
+
+ try:
+ testcase_files = os.listdir(self.test_case_path)
+ except Exception as e:
+ print(("Failed to list dir:\n%(path)s\n%(err)s\n")
+ % {"path": self.test_case_path, "err": e})
+ raise e
+ testcase_files.sort()
+
+ for testcase_file in testcase_files:
+ record = self._get_record(testcase_file)
+ self.testcase_list.append(record)
+
+ self._format_print(self.testcase_list)
+ return True
+
+ def show(self, args):
+ '''Show details of a specific test case'''
+ testcase_name = args.casename[0]
+ testcase_path = self.test_case_path + testcase_name + ".yaml"
+ try:
+ with open(testcase_path) as f:
+ try:
+ testcase_info = f.read()
+ print testcase_info
+
+ except Exception as e:
+ print(("Failed to load test cases:"
+ "\n%(testcase_file)s\n%(err)s\n")
+ % {"testcase_file": testcase_path, "err": e})
+ raise e
+ except IOError as ioerror:
+ sys.exit(ioerror)
+ return True
+
+ def _get_record(self, testcase_file):
+
+ try:
+ with open(self.test_case_path + testcase_file) as f:
+ try:
+ testcase_info = f.read()
+ except Exception as e:
+ print(("Failed to load test cases:"
+ "\n%(testcase_file)s\n%(err)s\n")
+ % {"testcase_file": testcase_file, "err": e})
+ raise e
+ description, installer, deploy_scenarios = \
+ self._parse_testcase(testcase_info)
+
+ record = {'Name': testcase_file.split(".")[0],
+ 'Description': description,
+ 'installer': installer,
+ 'deploy_scenarios': deploy_scenarios}
+ return record
+ except IOError as ioerror:
+ sys.exit(ioerror)
+
+ def _parse_testcase(self, testcase_info):
+
+ kw = {}
+ rendered_testcase = TaskTemplate.render(testcase_info, **kw)
+ testcase_cfg = yaml.load(rendered_testcase)
+ test_precondition = testcase_cfg.get('precondition', None)
+ installer_type = 'all'
+ deploy_scenarios = 'all'
+ if test_precondition is not None:
+ installer_type = test_precondition.get('installer_type', 'all')
+ deploy_scenarios = test_precondition.get('deploy_scenarios', 'all')
+
+ description = testcase_info.split("\n")[2][1:].strip()
+ return description, installer_type, deploy_scenarios
+
+ def _format_print(self, testcase_list):
+ '''format output'''
+
+ print_hbar(88)
+ print("| %-21s | %-60s" % ("Testcase Name", "Description"))
+ print_hbar(88)
+ for testcase_record in testcase_list:
+ print "| %-16s | %-60s" % (testcase_record['Name'],
+ testcase_record['Description'])
+ print_hbar(88)
diff --git a/yardstick/benchmark/scenarios/parser/parser.py b/yardstick/benchmark/scenarios/parser/parser.py
index 006258d05..bb16e7c89 100644
--- a/yardstick/benchmark/scenarios/parser/parser.py
+++ b/yardstick/benchmark/scenarios/parser/parser.py
@@ -58,10 +58,12 @@ class Parser(base.Scenario):
cmd1 = "%s %s %s" % (self.parser_script, yangfile, toscafile)
cmd2 = "chmod 777 %s" % (self.parser_script)
subprocess.call(cmd2, shell=True)
- output = subprocess.call(cmd1, shell=True, stdout=subprocess.PIPE)
+ p = subprocess.Popen(cmd1, shell=True, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ p.communicate()
print "yangtotosca finished"
- result['yangtotosca'] = "success" if output == 0 else "fail"
+ result['yangtotosca'] = "success" if p.returncode == 0 else "fail"
def teardown(self):
''' for scenario teardown remove parser and pyang '''
diff --git a/yardstick/cmd/commands/__init__.py b/yardstick/cmd/commands/__init__.py
index e69de29bb..ba229d481 100644
--- a/yardstick/cmd/commands/__init__.py
+++ b/yardstick/cmd/commands/__init__.py
@@ -0,0 +1,9 @@
+from yardstick.benchmark.core import Param
+
+
+def change_osloobj_to_paras(args):
+ param = Param({})
+ for k in param.__dict__:
+ if hasattr(args, k):
+ setattr(param, k, getattr(args, k))
+ return param
diff --git a/yardstick/cmd/commands/env.py b/yardstick/cmd/commands/env.py
index 098379ae1..d0fc75dd3 100644
--- a/yardstick/cmd/commands/env.py
+++ b/yardstick/cmd/commands/env.py
@@ -6,13 +6,13 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-import logging
+from __future__ import print_function
+import time
+import os
+import sys
from yardstick.common.httpClient import HttpClient
-from yardstick.common import constants
-
-logger = logging.getLogger(__name__)
-logger.setLevel(logging.DEBUG)
+from yardstick.common import constants as consts
class EnvCommand(object):
@@ -21,19 +21,63 @@ class EnvCommand(object):
Set of commands to prepare environment
'''
def do_influxdb(self, args):
- url = constants.YARDSTICK_ENV_ACTION_API
data = {'action': 'createInfluxDBContainer'}
- HttpClient().post(url, data)
- logger.debug('Now creating and configing influxdb')
+ task_id = self._start_async_task(data)
+
+ start = '* creating influxDB'
+ self._check_status(task_id, start)
def do_grafana(self, args):
- url = constants.YARDSTICK_ENV_ACTION_API
data = {'action': 'createGrafanaContainer'}
- HttpClient().post(url, data)
- logger.debug('Now creating and configing grafana')
+ task_id = self._start_async_task(data)
+
+ start = '* creating grafana'
+ self._check_status(task_id, start)
def do_prepare(self, args):
- url = constants.YARDSTICK_ENV_ACTION_API
data = {'action': 'prepareYardstickEnv'}
- HttpClient().post(url, data)
- logger.debug('Now preparing environment')
+ task_id = self._start_async_task(data)
+
+ start = '* preparing yardstick environment'
+ self._check_status(task_id, start)
+
+ def _start_async_task(self, data):
+ url = consts.ENV_ACTION_API
+ return HttpClient().post(url, data)['result']['task_id']
+
+ def _check_status(self, task_id, start):
+ self._print_status(start, '[]\r')
+ url = '{}?task_id={}'.format(consts.ASYNC_TASK_API, task_id)
+
+ CHECK_STATUS_RETRY = 20
+ CHECK_STATUS_DELAY = 5
+
+ for retry in xrange(CHECK_STATUS_RETRY):
+ response = HttpClient().get(url)
+ status = response['status']
+
+ if status:
+ break
+
+ # wait until the async task finished
+ time.sleep(CHECK_STATUS_DELAY * (retry + 1))
+
+ switcher = {
+ 0: 'Timeout',
+ 1: 'Finished',
+ 2: 'Error'
+ }
+ self._print_status(start, '[{}]'.format(switcher[status]))
+ if status == 2:
+ print(response['result'])
+ sys.stdout.flush()
+ return status
+
+ def _print_status(self, s, e):
+ try:
+ columns = int(os.popen('stty size', 'r').read().split()[1])
+ word = '{}{}{}'.format(s, ' ' * (columns - len(s) - len(e)), e)
+ sys.stdout.write(word)
+ sys.stdout.flush()
+ except IndexError:
+ pass
diff --git a/yardstick/cmd/commands/plugin.py b/yardstick/cmd/commands/plugin.py
index 10e5cdfbe..94095665a 100644
--- a/yardstick/cmd/commands/plugin.py
+++ b/yardstick/cmd/commands/plugin.py
@@ -9,18 +9,9 @@
""" Handler for yardstick command 'plugin' """
-import os
-import sys
-import yaml
-import time
-import logging
-import pkg_resources
-import yardstick.ssh as ssh
-
+from yardstick.benchmark.core.plugin import Plugin
from yardstick.common.utils import cliargs
-from yardstick.common.task_template import TaskTemplate
-
-LOG = logging.getLogger(__name__)
+from yardstick.cmd.commands import change_osloobj_to_paras
class PluginCommands(object):
@@ -33,158 +24,12 @@ class PluginCommands(object):
nargs=1)
def do_install(self, args):
'''Install a plugin.'''
-
- total_start_time = time.time()
- parser = PluginParser(args.input_file[0])
-
- plugins, deployment = parser.parse_plugin()
- plugin_name = plugins.get("name")
- print("Installing plugin: %s" % plugin_name)
-
- LOG.info("Executing _install_setup()")
- self._install_setup(plugin_name, deployment)
-
- LOG.info("Executing _run()")
- self._run(plugin_name)
-
- total_end_time = time.time()
- LOG.info("total finished in %d secs",
- total_end_time - total_start_time)
-
- print("Done, exiting")
+ param = change_osloobj_to_paras(args)
+ Plugin().install(param)
@cliargs("input_file", type=str, help="path to plugin configuration file",
nargs=1)
def do_remove(self, args):
'''Remove a plugin.'''
-
- total_start_time = time.time()
- parser = PluginParser(args.input_file[0])
-
- plugins, deployment = parser.parse_plugin()
- plugin_name = plugins.get("name")
- print("Removing plugin: %s" % plugin_name)
-
- LOG.info("Executing _remove_setup()")
- self._remove_setup(plugin_name, deployment)
-
- LOG.info("Executing _run()")
- self._run(plugin_name)
-
- total_end_time = time.time()
- LOG.info("total finished in %d secs",
- total_end_time - total_start_time)
-
- print("Done, exiting")
-
- def _install_setup(self, plugin_name, deployment):
- '''Deployment environment setup'''
- target_script = plugin_name + ".bash"
- self.script = pkg_resources.resource_filename(
- 'yardstick.resources', 'scripts/install/' + target_script)
-
- deployment_user = deployment.get("user")
- deployment_ssh_port = deployment.get("ssh_port", ssh.DEFAULT_PORT)
- deployment_ip = deployment.get("ip")
- deployment_password = deployment.get("password")
-
- if deployment_ip == "local":
- installer_ip = os.environ.get("INSTALLER_IP", None)
-
- LOG.info("user:%s, host:%s", deployment_user, installer_ip)
- self.client = ssh.SSH(deployment_user, installer_ip,
- password=deployment_password,
- port=deployment_ssh_port)
- self.client.wait(timeout=600)
- else:
- LOG.info("user:%s, host:%s", deployment_user, deployment_ip)
- self.client = ssh.SSH(deployment_user, deployment_ip,
- password=deployment_password,
- port=deployment_ssh_port)
- self.client.wait(timeout=600)
-
- # copy script to host
- cmd = "cat > ~/%s.sh" % plugin_name
-
- LOG.info("copying script to host: %s", cmd)
- self.client.run(cmd, stdin=open(self.script, 'rb'))
-
- def _remove_setup(self, plugin_name, deployment):
- '''Deployment environment setup'''
- target_script = plugin_name + ".bash"
- self.script = pkg_resources.resource_filename(
- 'yardstick.resources', 'scripts/remove/' + target_script)
-
- deployment_user = deployment.get("user")
- deployment_ssh_port = deployment.get("ssh_port", ssh.DEFAULT_PORT)
- deployment_ip = deployment.get("ip")
- deployment_password = deployment.get("password")
-
- if deployment_ip == "local":
- installer_ip = os.environ.get("INSTALLER_IP", None)
-
- LOG.info("user:%s, host:%s", deployment_user, installer_ip)
- self.client = ssh.SSH(deployment_user, installer_ip,
- password=deployment_password,
- port=deployment_ssh_port)
- self.client.wait(timeout=600)
- else:
- LOG.info("user:%s, host:%s", deployment_user, deployment_ip)
- self.client = ssh.SSH(deployment_user, deployment_ip,
- password=deployment_password,
- port=deployment_ssh_port)
- self.client.wait(timeout=600)
-
- # copy script to host
- cmd = "cat > ~/%s.sh" % plugin_name
-
- LOG.info("copying script to host: %s", cmd)
- self.client.run(cmd, stdin=open(self.script, 'rb'))
-
- def _run(self, plugin_name):
- '''Run installation script '''
- cmd = "sudo bash %s" % plugin_name + ".sh"
-
- LOG.info("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
-
-
-class PluginParser(object):
- '''Parser for plugin configration files in yaml format'''
-
- def __init__(self, path):
- self.path = path
-
- def parse_plugin(self):
- '''parses the plugin file and return a plugins instance
- and a deployment instance
- '''
-
- print "Parsing plugin config:", self.path
-
- try:
- kw = {}
- with open(self.path) as f:
- try:
- input_plugin = f.read()
- rendered_plugin = TaskTemplate.render(input_plugin, **kw)
- except Exception as e:
- print(("Failed to render template:\n%(plugin)s\n%(err)s\n")
- % {"plugin": input_plugin, "err": e})
- raise e
- print(("Input plugin is:\n%s\n") % rendered_plugin)
-
- cfg = yaml.load(rendered_plugin)
- except IOError as ioerror:
- sys.exit(ioerror)
-
- self._check_schema(cfg["schema"], "plugin")
-
- return cfg["plugins"], cfg["deployment"]
-
- def _check_schema(self, cfg_schema, schema_type):
- '''Check if configration file is using the correct schema type'''
-
- if cfg_schema != "yardstick:" + schema_type + ":0.1":
- sys.exit("error: file %s has unknown schema %s" % (self.path,
- cfg_schema))
+ param = change_osloobj_to_paras(args)
+ Plugin().remove(param)
diff --git a/yardstick/cmd/commands/runner.py b/yardstick/cmd/commands/runner.py
index 84bc3c6cf..62a2082c4 100644
--- a/yardstick/cmd/commands/runner.py
+++ b/yardstick/cmd/commands/runner.py
@@ -9,9 +9,9 @@
""" Handler for yardstick command 'runner' """
-from yardstick.benchmark.runners.base import Runner
+from yardstick.benchmark.core.runner import Runners
from yardstick.common.utils import cliargs
-from yardstick.cmd import print_hbar
+from yardstick.cmd.commands import change_osloobj_to_paras
class RunnerCommands(object):
@@ -22,17 +22,11 @@ class RunnerCommands(object):
def do_list(self, args):
'''List existing runner types'''
- types = Runner.get_types()
- print_hbar(78)
- print("| %-16s | %-60s" % ("Type", "Description"))
- print_hbar(78)
- for rtype in types:
- print "| %-16s | %-60s" % (rtype.__execution_type__,
- rtype.__doc__.split("\n")[0])
- print_hbar(78)
+ param = change_osloobj_to_paras(args)
+ Runners().list_all(param)
@cliargs("type", type=str, help="runner type", nargs=1)
def do_show(self, args):
'''Show details of a specific runner type'''
- rtype = Runner.get_cls(args.type[0])
- print rtype.__doc__
+ param = change_osloobj_to_paras(args)
+ Runners().show(param)
diff --git a/yardstick/cmd/commands/scenario.py b/yardstick/cmd/commands/scenario.py
index 00d46cf11..6aa3a451a 100644
--- a/yardstick/cmd/commands/scenario.py
+++ b/yardstick/cmd/commands/scenario.py
@@ -9,9 +9,9 @@
""" Handler for yardstick command 'scenario' """
-from yardstick.benchmark.scenarios.base import Scenario
+from yardstick.benchmark.core.scenario import Scenarios
from yardstick.common.utils import cliargs
-from yardstick.cmd import print_hbar
+from yardstick.cmd.commands import change_osloobj_to_paras
class ScenarioCommands(object):
@@ -22,17 +22,11 @@ class ScenarioCommands(object):
def do_list(self, args):
'''List existing scenario types'''
- types = Scenario.get_types()
- print_hbar(78)
- print("| %-16s | %-60s" % ("Type", "Description"))
- print_hbar(78)
- for stype in types:
- print("| %-16s | %-60s" % (stype.__scenario_type__,
- stype.__doc__.split("\n")[0]))
- print_hbar(78)
+ param = change_osloobj_to_paras(args)
+ Scenarios().list_all(param)
@cliargs("type", type=str, help="runner type", nargs=1)
def do_show(self, args):
'''Show details of a specific scenario type'''
- stype = Scenario.get_cls(args.type[0])
- print stype.__doc__
+ param = change_osloobj_to_paras(args)
+ Scenarios().show(param)
diff --git a/yardstick/cmd/commands/task.py b/yardstick/cmd/commands/task.py
index 9524778ba..bd018bcab 100644
--- a/yardstick/cmd/commands/task.py
+++ b/yardstick/cmd/commands/task.py
@@ -8,28 +8,12 @@
##############################################################################
""" Handler for yardstick command 'task' """
-
-import sys
-import os
-import yaml
-import atexit
-import ipaddress
-import time
-import logging
-import uuid
-import errno
-from itertools import ifilter
-
-from yardstick.benchmark.contexts.base import Context
-from yardstick.benchmark.runners import base as base_runner
-from yardstick.common.task_template import TaskTemplate
+from yardstick.benchmark.core.task import Task
from yardstick.common.utils import cliargs
-from yardstick.common.utils import source_env
-from yardstick.common import constants
+from yardstick.cmd.commands import change_osloobj_to_paras
+
output_file_default = "/tmp/yardstick.out"
-test_cases_dir_default = "tests/opnfv/test_cases/"
-LOG = logging.getLogger(__name__)
class TaskCommands(object):
@@ -55,447 +39,5 @@ class TaskCommands(object):
@cliargs("--suite", help="process test suite file instead of a task file",
action="store_true")
def do_start(self, args, **kwargs):
- '''Start a benchmark scenario.'''
-
- atexit.register(atexit_handler)
-
- self.task_id = kwargs.get('task_id', str(uuid.uuid4()))
-
- check_environment()
-
- total_start_time = time.time()
- parser = TaskParser(args.inputfile[0])
-
- if args.suite:
- # 1.parse suite, return suite_params info
- task_files, task_args, task_args_fnames = \
- parser.parse_suite()
- else:
- task_files = [parser.path]
- task_args = [args.task_args]
- task_args_fnames = [args.task_args_file]
-
- LOG.info("\ntask_files:%s, \ntask_args:%s, \ntask_args_fnames:%s",
- task_files, task_args, task_args_fnames)
-
- if args.parse_only:
- sys.exit(0)
-
- if os.path.isfile(args.output_file):
- os.remove(args.output_file)
- # parse task_files
- for i in range(0, len(task_files)):
- one_task_start_time = time.time()
- parser.path = task_files[i]
- scenarios, run_in_parallel, meet_precondition = parser.parse_task(
- self.task_id, task_args[i], task_args_fnames[i])
-
- if not meet_precondition:
- LOG.info("meet_precondition is %s, please check envrionment",
- meet_precondition)
- continue
-
- self._run(scenarios, run_in_parallel, args.output_file)
-
- if args.keep_deploy:
- # keep deployment, forget about stack
- # (hide it for exit handler)
- Context.list = []
- else:
- for context in Context.list:
- context.undeploy()
- Context.list = []
- one_task_end_time = time.time()
- LOG.info("task %s finished in %d secs", task_files[i],
- one_task_end_time - one_task_start_time)
-
- total_end_time = time.time()
- LOG.info("total finished in %d secs",
- total_end_time - total_start_time)
-
- print "Done, exiting"
-
- def _run(self, scenarios, run_in_parallel, output_file):
- '''Deploys context and calls runners'''
- for context in Context.list:
- context.deploy()
-
- background_runners = []
-
- # Start all background scenarios
- for scenario in ifilter(_is_background_scenario, scenarios):
- scenario["runner"] = dict(type="Duration", duration=1000000000)
- runner = run_one_scenario(scenario, output_file)
- background_runners.append(runner)
-
- runners = []
- if run_in_parallel:
- for scenario in scenarios:
- if not _is_background_scenario(scenario):
- runner = run_one_scenario(scenario, output_file)
- runners.append(runner)
-
- # Wait for runners to finish
- for runner in runners:
- runner_join(runner)
- print "Runner ended, output in", output_file
- else:
- # run serially
- for scenario in scenarios:
- if not _is_background_scenario(scenario):
- runner = run_one_scenario(scenario, output_file)
- runner_join(runner)
- print "Runner ended, output in", output_file
-
- # Abort background runners
- for runner in background_runners:
- runner.abort()
-
- # Wait for background runners to finish
- for runner in background_runners:
- if runner.join(timeout=60) is None:
- # Nuke if it did not stop nicely
- base_runner.Runner.terminate(runner)
- runner_join(runner)
- else:
- base_runner.Runner.release(runner)
- print "Background task ended"
-
-
-# TODO: Move stuff below into TaskCommands class !?
-
-
-class TaskParser(object):
- '''Parser for task config files in yaml format'''
- def __init__(self, path):
- self.path = path
-
- def _meet_constraint(self, task, cur_pod, cur_installer):
- if "constraint" in task:
- constraint = task.get('constraint', None)
- if constraint is not None:
- tc_fit_pod = constraint.get('pod', None)
- tc_fit_installer = constraint.get('installer', None)
- LOG.info("cur_pod:%s, cur_installer:%s,tc_constraints:%s",
- cur_pod, cur_installer, constraint)
- if cur_pod and tc_fit_pod and cur_pod not in tc_fit_pod:
- return False
- if cur_installer and tc_fit_installer and \
- cur_installer not in tc_fit_installer:
- return False
- return True
-
- def _get_task_para(self, task, cur_pod):
- task_args = task.get('task_args', None)
- if task_args is not None:
- task_args = task_args.get(cur_pod, None)
- task_args_fnames = task.get('task_args_fnames', None)
- if task_args_fnames is not None:
- task_args_fnames = task_args_fnames.get(cur_pod, None)
- return task_args, task_args_fnames
-
- def parse_suite(self):
- '''parse the suite file and return a list of task config file paths
- and lists of optional parameters if present'''
- LOG.info("\nParsing suite file:%s", self.path)
-
- try:
- with open(self.path) as stream:
- cfg = yaml.load(stream)
- except IOError as ioerror:
- sys.exit(ioerror)
-
- self._check_schema(cfg["schema"], "suite")
- LOG.info("\nStarting scenario:%s", cfg["name"])
-
- test_cases_dir = cfg.get("test_cases_dir", test_cases_dir_default)
- if test_cases_dir[-1] != os.sep:
- test_cases_dir += os.sep
-
- cur_pod = os.environ.get('NODE_NAME', None)
- cur_installer = os.environ.get('INSTALLER_TYPE', None)
-
- valid_task_files = []
- valid_task_args = []
- valid_task_args_fnames = []
-
- for task in cfg["test_cases"]:
- # 1.check file_name
- if "file_name" in task:
- task_fname = task.get('file_name', None)
- if task_fname is None:
- continue
- else:
- continue
- # 2.check constraint
- if self._meet_constraint(task, cur_pod, cur_installer):
- valid_task_files.append(test_cases_dir + task_fname)
- else:
- continue
- # 3.fetch task parameters
- task_args, task_args_fnames = self._get_task_para(task, cur_pod)
- valid_task_args.append(task_args)
- valid_task_args_fnames.append(task_args_fnames)
-
- return valid_task_files, valid_task_args, valid_task_args_fnames
-
- def parse_task(self, task_id, task_args=None, task_args_file=None):
- '''parses the task file and return an context and scenario instances'''
- print "Parsing task config:", self.path
-
- try:
- kw = {}
- if task_args_file:
- with open(task_args_file) as f:
- kw.update(parse_task_args("task_args_file", f.read()))
- kw.update(parse_task_args("task_args", task_args))
- except TypeError:
- raise TypeError()
-
- try:
- with open(self.path) as f:
- try:
- input_task = f.read()
- rendered_task = TaskTemplate.render(input_task, **kw)
- except Exception as e:
- print(("Failed to render template:\n%(task)s\n%(err)s\n")
- % {"task": input_task, "err": e})
- raise e
- print(("Input task is:\n%s\n") % rendered_task)
-
- cfg = yaml.load(rendered_task)
- except IOError as ioerror:
- sys.exit(ioerror)
-
- self._check_schema(cfg["schema"], "task")
- meet_precondition = self._check_precondition(cfg)
-
- # TODO: support one or many contexts? Many would simpler and precise
- # TODO: support hybrid context type
- if "context" in cfg:
- context_cfgs = [cfg["context"]]
- elif "contexts" in cfg:
- context_cfgs = cfg["contexts"]
- else:
- context_cfgs = [{"type": "Dummy"}]
-
- for cfg_attrs in context_cfgs:
- context_type = cfg_attrs.get("type", "Heat")
- if "Heat" == context_type and "networks" in cfg_attrs:
- # bugfix: if there are more than one network,
- # only add "external_network" on first one.
- # the name of netwrok should follow this rule:
- # test, test2, test3 ...
- # sort network with the length of network's name
- sorted_networks = sorted(cfg_attrs["networks"].keys())
- # config external_network based on env var
- cfg_attrs["networks"][sorted_networks[0]]["external_network"] \
- = os.environ.get("EXTERNAL_NETWORK", "net04_ext")
-
- context = Context.get(context_type)
- context.init(cfg_attrs)
-
- run_in_parallel = cfg.get("run_in_parallel", False)
-
- # add tc and task id for influxdb extended tags
- for scenario in cfg["scenarios"]:
- task_name = os.path.splitext(os.path.basename(self.path))[0]
- scenario["tc"] = task_name
- scenario["task_id"] = task_id
-
- # TODO we need something better here, a class that represent the file
- return cfg["scenarios"], run_in_parallel, meet_precondition
-
- def _check_schema(self, cfg_schema, schema_type):
- '''Check if config file is using the correct schema type'''
-
- if cfg_schema != "yardstick:" + schema_type + ":0.1":
- sys.exit("error: file %s has unknown schema %s" % (self.path,
- cfg_schema))
-
- def _check_precondition(self, cfg):
- '''Check if the envrionment meet the preconditon'''
-
- if "precondition" in cfg:
- precondition = cfg["precondition"]
- installer_type = precondition.get("installer_type", None)
- deploy_scenarios = precondition.get("deploy_scenarios", None)
- tc_fit_pods = precondition.get("pod_name", None)
- installer_type_env = os.environ.get('INSTALL_TYPE', None)
- deploy_scenario_env = os.environ.get('DEPLOY_SCENARIO', None)
- pod_name_env = os.environ.get('NODE_NAME', None)
-
- LOG.info("installer_type: %s, installer_type_env: %s",
- installer_type, installer_type_env)
- LOG.info("deploy_scenarios: %s, deploy_scenario_env: %s",
- deploy_scenarios, deploy_scenario_env)
- LOG.info("tc_fit_pods: %s, pod_name_env: %s",
- tc_fit_pods, pod_name_env)
- if installer_type and installer_type_env:
- if installer_type_env not in installer_type:
- return False
- if deploy_scenarios and deploy_scenario_env:
- deploy_scenarios_list = deploy_scenarios.split(',')
- for deploy_scenario in deploy_scenarios_list:
- if deploy_scenario_env.startswith(deploy_scenario):
- return True
- return False
- if tc_fit_pods and pod_name_env:
- if pod_name_env not in tc_fit_pods:
- return False
- return True
-
-
-def atexit_handler():
- '''handler for process termination'''
- base_runner.Runner.terminate_all()
-
- if len(Context.list) > 0:
- print "Undeploying all contexts"
- for context in Context.list:
- context.undeploy()
-
-
-def is_ip_addr(addr):
- '''check if string addr is an IP address'''
- try:
- ipaddress.ip_address(unicode(addr))
- return True
- except ValueError:
- return False
-
-
-def _is_same_heat_context(host_attr, target_attr):
- '''check if two servers are in the same heat context
- host_attr: either a name for a server created by yardstick or a dict
- with attribute name mapping when using external heat templates
- target_attr: either a name for a server created by yardstick or a dict
- with attribute name mapping when using external heat templates
- '''
- host = None
- target = None
- for context in Context.list:
- if context.__context_type__ != "Heat":
- continue
-
- host = context._get_server(host_attr)
- if host is None:
- continue
-
- target = context._get_server(target_attr)
- if target is None:
- return False
-
- # Both host and target is not None, then they are in the
- # same heat context.
- return True
-
- return False
-
-
-def _is_background_scenario(scenario):
- if "run_in_background" in scenario:
- return scenario["run_in_background"]
- else:
- return False
-
-
-def run_one_scenario(scenario_cfg, output_file):
- '''run one scenario using context'''
- runner_cfg = scenario_cfg["runner"]
- runner_cfg['output_filename'] = output_file
-
- # TODO support get multi hosts/vms info
- context_cfg = {}
- if "host" in scenario_cfg:
- context_cfg['host'] = Context.get_server(scenario_cfg["host"])
-
- if "target" in scenario_cfg:
- if is_ip_addr(scenario_cfg["target"]):
- context_cfg['target'] = {}
- context_cfg['target']["ipaddr"] = scenario_cfg["target"]
- else:
- context_cfg['target'] = Context.get_server(scenario_cfg["target"])
- if _is_same_heat_context(scenario_cfg["host"],
- scenario_cfg["target"]):
- context_cfg["target"]["ipaddr"] = \
- context_cfg["target"]["private_ip"]
- else:
- context_cfg["target"]["ipaddr"] = \
- context_cfg["target"]["ip"]
-
- if "targets" in scenario_cfg:
- ip_list = []
- for target in scenario_cfg["targets"]:
- if is_ip_addr(target):
- ip_list.append(target)
- context_cfg['target'] = {}
- else:
- context_cfg['target'] = Context.get_server(target)
- if _is_same_heat_context(scenario_cfg["host"], target):
- ip_list.append(context_cfg["target"]["private_ip"])
- else:
- ip_list.append(context_cfg["target"]["ip"])
- context_cfg['target']['ipaddr'] = ','.join(ip_list)
-
- if "nodes" in scenario_cfg:
- context_cfg["nodes"] = parse_nodes_with_context(scenario_cfg)
- runner = base_runner.Runner.get(runner_cfg)
-
- print "Starting runner of type '%s'" % runner_cfg["type"]
- runner.run(scenario_cfg, context_cfg)
-
- return runner
-
-
-def parse_nodes_with_context(scenario_cfg):
- '''paras the 'nodes' fields in scenario '''
- nodes = scenario_cfg["nodes"]
-
- nodes_cfg = {}
- for nodename in nodes:
- nodes_cfg[nodename] = Context.get_server(nodes[nodename])
-
- return nodes_cfg
-
-
-def runner_join(runner):
- '''join (wait for) a runner, exit process at runner failure'''
- status = runner.join()
- base_runner.Runner.release(runner)
- if status != 0:
- sys.exit("Runner failed")
-
-
-def print_invalid_header(source_name, args):
- print(("Invalid %(source)s passed:\n\n %(args)s\n")
- % {"source": source_name, "args": args})
-
-
-def parse_task_args(src_name, args):
- try:
- kw = args and yaml.safe_load(args)
- kw = {} if kw is None else kw
- except yaml.parser.ParserError as e:
- print_invalid_header(src_name, args)
- print(("%(source)s has to be YAML. Details:\n\n%(err)s\n")
- % {"source": src_name, "err": e})
- raise TypeError()
-
- if not isinstance(kw, dict):
- print_invalid_header(src_name, args)
- print(("%(src)s had to be dict, actually %(src_type)s\n")
- % {"src": src_name, "src_type": type(kw)})
- raise TypeError()
- return kw
-
-
-def check_environment():
- auth_url = os.environ.get('OS_AUTH_URL', None)
- if not auth_url:
- try:
- source_env(constants.OPENSTACK_RC_FILE)
- except IOError as e:
- if e.errno != errno.EEXIST:
- raise
- LOG.debug('OPENRC file not found')
+ param = change_osloobj_to_paras(args)
+ Task().start(param)
diff --git a/yardstick/cmd/commands/testcase.py b/yardstick/cmd/commands/testcase.py
index cb76c7ae3..6ff796238 100644
--- a/yardstick/cmd/commands/testcase.py
+++ b/yardstick/cmd/commands/testcase.py
@@ -8,14 +8,9 @@
##############################################################################
""" Handler for yardstick command 'testcase' """
-import os
-import yaml
-import sys
-
-from yardstick.cmd import print_hbar
-from yardstick.common.task_template import TaskTemplate
+from yardstick.benchmark.core.testcase import Testcase
from yardstick.common.utils import cliargs
-from yardstick.definitions import YARDSTICK_ROOT_PATH
+from yardstick.cmd.commands import change_osloobj_to_paras
class TestcaseCommands(object):
@@ -23,92 +18,14 @@ class TestcaseCommands(object):
Set of commands to discover and display test cases.
'''
- def __init__(self):
- self.test_case_path = YARDSTICK_ROOT_PATH + 'tests/opnfv/test_cases/'
- self.testcase_list = []
def do_list(self, args):
'''List existing test cases'''
-
- try:
- testcase_files = os.listdir(self.test_case_path)
- except Exception as e:
- print(("Failed to list dir:\n%(path)s\n%(err)s\n")
- % {"path": self.test_case_path, "err": e})
- raise e
- testcase_files.sort()
-
- for testcase_file in testcase_files:
- record = self._get_record(testcase_file)
- self.testcase_list.append(record)
-
- self._format_print(self.testcase_list)
- return True
+ param = change_osloobj_to_paras(args)
+ Testcase().list_all(param)
@cliargs("casename", type=str, help="test case name", nargs=1)
def do_show(self, args):
'''Show details of a specific test case'''
- testcase_name = args.casename[0]
- testcase_path = self.test_case_path + testcase_name + ".yaml"
- try:
- with open(testcase_path) as f:
- try:
- testcase_info = f.read()
- print testcase_info
-
- except Exception as e:
- print(("Failed to load test cases:"
- "\n%(testcase_file)s\n%(err)s\n")
- % {"testcase_file": testcase_path, "err": e})
- raise e
- except IOError as ioerror:
- sys.exit(ioerror)
- return True
-
- def _get_record(self, testcase_file):
-
- try:
- with open(self.test_case_path + testcase_file) as f:
- try:
- testcase_info = f.read()
- except Exception as e:
- print(("Failed to load test cases:"
- "\n%(testcase_file)s\n%(err)s\n")
- % {"testcase_file": testcase_file, "err": e})
- raise e
- description, installer, deploy_scenarios = \
- self._parse_testcase(testcase_info)
-
- record = {'Name': testcase_file.split(".")[0],
- 'Description': description,
- 'installer': installer,
- 'deploy_scenarios': deploy_scenarios}
- return record
- except IOError as ioerror:
- sys.exit(ioerror)
-
- def _parse_testcase(self, testcase_info):
-
- kw = {}
- rendered_testcase = TaskTemplate.render(testcase_info, **kw)
- testcase_cfg = yaml.load(rendered_testcase)
- test_precondition = testcase_cfg.get('precondition', None)
- installer_type = 'all'
- deploy_scenarios = 'all'
- if test_precondition is not None:
- installer_type = test_precondition.get('installer_type', 'all')
- deploy_scenarios = test_precondition.get('deploy_scenarios', 'all')
-
- description = testcase_info.split("\n")[2][1:].strip()
- return description, installer_type, deploy_scenarios
-
- def _format_print(self, testcase_list):
- '''format output'''
-
- print_hbar(88)
- print("| %-21s | %-60s" % ("Testcase Name", "Description"))
- print_hbar(88)
- for testcase_record in testcase_list:
- print "| %-16s | %-60s" % (testcase_record['Name'],
- testcase_record['Description'])
- print_hbar(88)
+ param = change_osloobj_to_paras(args)
+ Testcase().show(param)
diff --git a/yardstick/common/constants.py b/yardstick/common/constants.py
index 443b3e810..174d39bfe 100644
--- a/yardstick/common/constants.py
+++ b/yardstick/common/constants.py
@@ -1,3 +1,11 @@
+##############################################################################
+# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
import os
DOCKER_URL = 'unix://var/run/docker.sock'
@@ -15,6 +23,7 @@ GRAFANA_TAGS = '3.1.1'
dirname = os.path.dirname
abspath = os.path.abspath
+join = os.path.join
sep = os.path.sep
INSTALLERS = ['apex', 'compass', 'fuel', 'joid']
@@ -25,14 +34,23 @@ YARDSTICK_REPOS_DIR = '/home/opnfv/repos/yardstick'
YARDSTICK_CONFIG_DIR = '/etc/yardstick/'
-YARDSTICK_CONFIG_FILE = os.path.join(YARDSTICK_CONFIG_DIR, 'yardstick.conf')
+YARDSTICK_CONFIG_FILE = join(YARDSTICK_CONFIG_DIR, 'yardstick.conf')
+
+YARDSTICK_CONFIG_SAMPLE_DIR = join(YARDSTICK_ROOT_PATH, 'etc/yardstick/')
+
+YARDSTICK_CONFIG_SAMPLE_FILE = join(YARDSTICK_CONFIG_SAMPLE_DIR,
+ 'yardstick.conf.sample')
RELENG_DIR = '/home/opnfv/repos/releng'
OS_FETCH_SCRIPT = 'utils/fetch_os_creds.sh'
+CLEAN_IMAGES_SCRIPT = 'tests/ci/clean_images.sh'
+
LOAD_IMAGES_SCRIPT = 'tests/ci/load_images.sh'
-OPENSTACK_RC_FILE = os.path.join(YARDSTICK_CONFIG_DIR, 'openstack.creds')
+OPENSTACK_RC_FILE = join(YARDSTICK_CONFIG_DIR, 'openstack.creds')
-YARDSTICK_ENV_ACTION_API = 'http://localhost:5000/yardstick/env/action'
+BASE_URL = 'http://localhost:5000'
+ENV_ACTION_API = BASE_URL + '/yardstick/env/action'
+ASYNC_TASK_API = BASE_URL + '/yardstick/asynctask'
diff --git a/yardstick/common/httpClient.py b/yardstick/common/httpClient.py
index ab2e9a379..6acd0303d 100644
--- a/yardstick/common/httpClient.py
+++ b/yardstick/common/httpClient.py
@@ -28,3 +28,7 @@ class HttpClient(object):
except Exception as e:
logger.debug('Failed: %s', e)
raise
+
+ def get(self, url):
+ response = requests.get(url)
+ return response.json()
diff --git a/yardstick/common/openstack_utils.py b/yardstick/common/openstack_utils.py
index 25dcffadd..d8dc61ef6 100644
--- a/yardstick/common/openstack_utils.py
+++ b/yardstick/common/openstack_utils.py
@@ -5,7 +5,6 @@
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
-# yardstick: this file is copied from rally and slightly modified
##############################################################################
import os
diff --git a/yardstick/orchestrator/heat.py b/yardstick/orchestrator/heat.py
index 4839455e1..f1104d625 100644
--- a/yardstick/orchestrator/heat.py
+++ b/yardstick/orchestrator/heat.py
@@ -17,6 +17,7 @@ import logging
import pkg_resources
import json
+from oslo_utils import encodeutils
import heatclient
from yardstick.common import template_format
@@ -297,15 +298,19 @@ class HeatTemplate(HeatObject):
}
}
- def add_keypair(self, name):
+ def add_keypair(self, name, key_uuid):
'''add to the template a Nova KeyPair'''
log.debug("adding Nova::KeyPair '%s'", name)
self.resources[name] = {
'type': 'OS::Nova::KeyPair',
'properties': {
'name': name,
- 'public_key': pkg_resources.resource_string(
- 'yardstick.resources', 'files/yardstick_key.pub')
+ 'public_key': encodeutils.safe_decode(
+ pkg_resources.resource_string(
+ 'yardstick.resources',
+ 'files/yardstick_key-{:.{width}}.pub'.format(
+ key_uuid, width=8)),
+ 'utf-8')
}
}
diff --git a/yardstick/ssh.py b/yardstick/ssh.py
index 3081001b6..927ca94db 100644
--- a/yardstick/ssh.py
+++ b/yardstick/ssh.py
@@ -66,6 +66,7 @@ import os
import select
import socket
import time
+import re
import logging
import paramiko
@@ -252,7 +253,7 @@ class SSH(object):
raise SSHError("Socket error.")
exit_status = session.recv_exit_status()
- if 0 != exit_status and raise_on_error:
+ if exit_status != 0 and raise_on_error:
fmt = "Command '%(cmd)s' failed with exit_status %(status)d."
details = fmt % {"cmd": cmd, "status": exit_status}
if stderr_data:
@@ -311,17 +312,21 @@ class SSH(object):
mode = 0o777 & os.stat(localpath).st_mode
sftp.chmod(remotepath, mode)
+ TILDE_EXPANSIONS_RE = re.compile("(^~[^/]*/)?(.*)")
+
def _put_file_shell(self, localpath, remotepath, mode=None):
# quote to stop wordpslit
- cmd = ['cat > %s' % remotepath]
+ tilde, remotepath = self.TILDE_EXPANSIONS_RE.match(remotepath).groups()
+ if not tilde:
+ tilde = ''
+ cmd = ['cat > %s"%s"' % (tilde, remotepath)]
if mode is not None:
# use -- so no options
- cmd.append('chmod -- 0%o %s' % (mode, remotepath))
+ cmd.append('chmod -- 0%o %s"%s"' % (mode, tilde, remotepath))
with open(localpath, "rb") as localfile:
# only chmod on successful cat
- cmd = " && ".join(cmd)
- self.run(cmd, stdin=localfile)
+ self.run("&& ".join(cmd), stdin=localfile)
def put_file(self, localpath, remotepath, mode=None):
"""Copy specified local file to the server.
@@ -330,7 +335,6 @@ class SSH(object):
:param remotepath: Remote filename.
:param mode: Permissions to set after upload
"""
- import socket
try:
self._put_file_sftp(localpath, remotepath, mode=mode)
except (paramiko.SSHException, socket.error):