summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xapi/api-prepare.sh8
-rw-r--r--api/base.py55
-rw-r--r--api/database/__init__.py29
-rw-r--r--api/database/models.py25
-rw-r--r--api/resources/__init__.py (renamed from api/actions/__init__.py)0
-rw-r--r--api/resources/env_action.py (renamed from api/actions/env.py)40
-rw-r--r--api/resources/release_action.py (renamed from api/actions/test.py)0
-rw-r--r--api/resources/results.py (renamed from api/actions/result.py)4
-rw-r--r--api/resources/samples_action.py (renamed from api/actions/samples.py)0
-rw-r--r--api/server.py9
-rw-r--r--api/urls.py6
-rw-r--r--api/views.py53
-rw-r--r--api/yardstick.ini4
-rw-r--r--api/yardstick.sock0
-rwxr-xr-xdocs/userguide/03-architecture.rst2
-rw-r--r--docs/userguide/opnfv_yardstick_tc001.rst108
-rw-r--r--docs/userguide/opnfv_yardstick_tc002.rst97
-rw-r--r--docs/userguide/opnfv_yardstick_tc004.rst81
-rw-r--r--docs/userguide/opnfv_yardstick_tc005.rst102
-rw-r--r--docs/userguide/opnfv_yardstick_tc010.rst116
-rw-r--r--docs/userguide/opnfv_yardstick_tc011.rst81
-rw-r--r--docs/userguide/opnfv_yardstick_tc012.rst101
-rw-r--r--docs/userguide/opnfv_yardstick_tc014.rst92
-rw-r--r--docs/userguide/opnfv_yardstick_tc037.rst153
-rw-r--r--docs/userguide/opnfv_yardstick_tc043.rst74
-rw-r--r--etc/yardstick/yardstick.conf.sample25
-rwxr-xr-xfuel-plugin/deployment_scripts/install.sh5
-rw-r--r--fuel-plugin/deployment_scripts/puppet/manifests/yardstick-install.pp2
-rw-r--r--plugin/storperf.yaml1
-rw-r--r--requirements.txt1
-rw-r--r--samples/ping_load.yaml65
-rwxr-xr-xsetup.py3
-rwxr-xr-xtests/ci/clean_images.sh15
-rwxr-xr-xtests/ci/load_images.sh47
-rwxr-xr-xtests/ci/prepare_storperf_admin-rc.sh5
-rw-r--r--tests/ci/scp_storperf_admin-rc.sh11
-rwxr-xr-xtests/ci/yardstick-verify8
-rw-r--r--tests/opnfv/test_suites/opnfv_components.yaml16
-rw-r--r--tests/opnfv/test_suites/opnfv_features.yaml52
-rw-r--r--tests/opnfv/test_suites/opnfv_performance.yaml62
-rw-r--r--tests/opnfv/test_suites/opnfv_smoke.yaml14
-rw-r--r--tests/unit/benchmark/core/__init__.py (renamed from tests/unit/cmd/commands/__init__.py)0
-rw-r--r--tests/unit/benchmark/core/no_constraint_no_args_scenario_sample.yaml (renamed from tests/unit/cmd/commands/no_constraint_no_args_scenario_sample.yaml)0
-rw-r--r--tests/unit/benchmark/core/no_constraint_with_args_scenario_sample.yaml (renamed from tests/unit/cmd/commands/no_constraint_with_args_scenario_sample.yaml)0
-rw-r--r--tests/unit/benchmark/core/test_plugin.py (renamed from tests/unit/cmd/commands/test_plugin.py)32
-rw-r--r--tests/unit/benchmark/core/test_task.py (renamed from tests/unit/cmd/commands/test_task.py)88
-rw-r--r--tests/unit/benchmark/core/test_testcase.py (renamed from tests/unit/cmd/commands/test_testcase.py)29
-rw-r--r--tests/unit/benchmark/core/with_constraint_no_args_scenario_sample.yaml (renamed from tests/unit/cmd/commands/with_constraint_no_args_scenario_sample.yaml)0
-rw-r--r--tests/unit/benchmark/core/with_constraint_with_args_scenario_sample.yaml (renamed from tests/unit/cmd/commands/with_constraint_with_args_scenario_sample.yaml)0
-rw-r--r--tests/unit/test_ssh.py36
-rwxr-xr-xtools/yardstick-img-modify8
-rw-r--r--yardstick/benchmark/core/__init__.py38
-rw-r--r--yardstick/benchmark/core/plugin.py212
-rw-r--r--yardstick/benchmark/core/runner.py36
-rw-r--r--yardstick/benchmark/core/scenario.py36
-rw-r--r--yardstick/benchmark/core/task.py484
-rw-r--r--yardstick/benchmark/core/testcase.py112
-rw-r--r--yardstick/cmd/commands/__init__.py9
-rw-r--r--yardstick/cmd/commands/plugin.py167
-rw-r--r--yardstick/cmd/commands/runner.py18
-rw-r--r--yardstick/cmd/commands/scenario.py18
-rw-r--r--yardstick/cmd/commands/task.py468
-rw-r--r--yardstick/cmd/commands/testcase.py95
-rw-r--r--yardstick/common/constants.py20
-rw-r--r--yardstick/common/openstack_utils.py1
-rw-r--r--yardstick/ssh.py16
66 files changed, 2294 insertions, 1201 deletions
diff --git a/api/api-prepare.sh b/api/api-prepare.sh
index fade8ccc6..5cc65c959 100755
--- a/api/api-prepare.sh
+++ b/api/api-prepare.sh
@@ -20,7 +20,7 @@ server {
index index.htm index.html;
location / {
include uwsgi_params;
- uwsgi_pass unix:///home/opnfv/repos/yardstick/api/yardstick.sock;
+ uwsgi_pass unix:///var/run/yardstick.sock;
}
}
EOF
@@ -47,3 +47,9 @@ command = uwsgi -i yardstick.ini
autorestart = true
EOF
fi
+
+# create api log directory
+mkdir -p /var/log/yardstick
+
+# create yardstick.sock for communicating
+touch /var/run/yardstick.sock
diff --git a/api/base.py b/api/base.py
new file mode 100644
index 000000000..7671527d4
--- /dev/null
+++ b/api/base.py
@@ -0,0 +1,55 @@
+##############################################################################
+# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import re
+import importlib
+import logging
+
+from flask import request
+from flask_restful import Resource
+
+from api.utils import common as common_utils
+
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.DEBUG)
+
+
+class ApiResource(Resource):
+
+ def _post_args(self):
+ params = common_utils.translate_to_str(request.json)
+ action = params.get('action', '')
+ args = params.get('args', {})
+ logger.debug('Input args is: action: %s, args: %s', action, args)
+
+ return action, args
+
+ def _get_args(self):
+ args = common_utils.translate_to_str(request.args)
+ logger.debug('Input args is: args: %s', args)
+
+ return args
+
+ def _dispatch_post(self):
+ action, args = self._post_args()
+ return self._dispatch(args, action)
+
+ def _dispatch_get(self):
+ args = self._get_args()
+ return self._dispatch(args)
+
+ def _dispatch(self, args, action='default'):
+ module_name = re.sub(r'([A-Z][a-z]*)', r'_\1',
+ self.__class__.__name__)[1:].lower()
+
+ module_name = 'api.resources.%s' % module_name
+ resources = importlib.import_module(module_name)
+ try:
+ return getattr(resources, action)(args)
+ except NameError:
+ common_utils.error_handler('Wrong action')
diff --git a/api/database/__init__.py b/api/database/__init__.py
new file mode 100644
index 000000000..bc2708bc7
--- /dev/null
+++ b/api/database/__init__.py
@@ -0,0 +1,29 @@
+##############################################################################
+# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import logging
+
+from sqlalchemy import create_engine
+from sqlalchemy.orm import scoped_session, sessionmaker
+from sqlalchemy.ext.declarative import declarative_base
+
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.DEBUG)
+
+engine = create_engine('sqlite:////tmp/yardstick.db', convert_unicode=True)
+db_session = scoped_session(sessionmaker(autocommit=False,
+ autoflush=False,
+ bind=engine))
+Base = declarative_base()
+Base.query = db_session.query_property()
+
+
+def init_db():
+ subclasses = [subclass.__name__ for subclass in Base.__subclasses__()]
+ logger.debug('Import models: %s', subclasses)
+ Base.metadata.create_all(bind=engine)
diff --git a/api/database/models.py b/api/database/models.py
new file mode 100644
index 000000000..25e323842
--- /dev/null
+++ b/api/database/models.py
@@ -0,0 +1,25 @@
+##############################################################################
+# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+from sqlalchemy import Column
+from sqlalchemy import Integer
+from sqlalchemy import String
+
+from api.database import Base
+
+
+class Tasks(Base):
+ __tablename__ = 'tasks'
+ id = Column(Integer, primary_key=True)
+ task_id = Column(String(30))
+ status = Column(Integer)
+ error = Column(String(120))
+ details = Column(String(120))
+
+ def __repr__(self):
+ return '<Task %r>' % Tasks.task_id
diff --git a/api/actions/__init__.py b/api/resources/__init__.py
index e69de29bb..e69de29bb 100644
--- a/api/actions/__init__.py
+++ b/api/resources/__init__.py
diff --git a/api/actions/env.py b/api/resources/env_action.py
index fa0f95d90..59a1692a1 100644
--- a/api/actions/env.py
+++ b/api/resources/env_action.py
@@ -13,6 +13,7 @@ import time
import json
import os
import errno
+import ConfigParser
from docker import Client
@@ -104,7 +105,7 @@ def _create_influxdb():
client = Client(base_url=config.DOCKER_URL)
try:
- _config_output_file()
+ _change_output_to_influxdb()
if not _check_image_exist(client, '%s:%s' % (config.INFLUXDB_IMAGE,
config.INFLUXDB_TAG)):
@@ -144,29 +145,18 @@ def _config_influxdb():
logger.debug('Failed to config influxDB: %s', e)
-def _config_output_file():
+def _change_output_to_influxdb():
yardstick_utils.makedirs(config.YARDSTICK_CONFIG_DIR)
- with open(config.YARDSTICK_CONFIG_FILE, 'w') as f:
- f.write("""\
-[DEFAULT]
-debug = False
-dispatcher = influxdb
-[dispatcher_file]
-file_path = /tmp/yardstick.out
+ parser = ConfigParser.ConfigParser()
+ parser.read(config.YARDSTICK_CONFIG_SAMPLE_FILE)
-[dispatcher_http]
-timeout = 5
-# target = http://127.0.0.1:8000/results
+ parser.set('DEFAULT', 'dispatcher', 'influxdb')
+ parser.set('dispatcher_influxdb', 'target',
+ 'http://%s:8086' % api_conf.GATEWAY_IP)
-[dispatcher_influxdb]
-timeout = 5
-target = http://%s:8086
-db_name = yardstick
-username = root
-password = root
-"""
- % api_conf.GATEWAY_IP)
+ with open(config.YARDSTICK_CONFIG_FILE, 'w') as f:
+ parser.write(f)
def prepareYardstickEnv(args):
@@ -195,6 +185,8 @@ def _prepare_env_daemon():
# update the external_network
_source_file(rc_file)
+ _clean_images()
+
_load_images()
@@ -251,6 +243,14 @@ def _append_external_network(rc_file):
raise
+def _clean_images():
+ cmd = [config.CLEAN_IMAGES_SCRIPT]
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ cwd=config.YARDSTICK_REPOS_DIR)
+ output = p.communicate()[0]
+ logger.debug('The result is: %s', output)
+
+
def _load_images():
cmd = [config.LOAD_IMAGES_SCRIPT]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
diff --git a/api/actions/test.py b/api/resources/release_action.py
index fda0ffd32..fda0ffd32 100644
--- a/api/actions/test.py
+++ b/api/resources/release_action.py
diff --git a/api/actions/result.py b/api/resources/results.py
index 1f200fbcc..3de09fdc9 100644
--- a/api/actions/result.py
+++ b/api/resources/results.py
@@ -17,6 +17,10 @@ from api import conf
logger = logging.getLogger(__name__)
+def default(args):
+ return getResult(args)
+
+
def getResult(args):
try:
measurement = args['measurement']
diff --git a/api/actions/samples.py b/api/resources/samples_action.py
index 545447aec..545447aec 100644
--- a/api/actions/samples.py
+++ b/api/resources/samples_action.py
diff --git a/api/server.py b/api/server.py
index 64a2b4f96..fac821b00 100644
--- a/api/server.py
+++ b/api/server.py
@@ -12,6 +12,8 @@ from flask import Flask
from flask_restful import Api
from flasgger import Swagger
+from api.database import init_db
+from api.database import db_session
from api.urls import urlpatterns
from yardstick import _init_logging
@@ -19,11 +21,18 @@ logger = logging.getLogger(__name__)
app = Flask(__name__)
+init_db()
+
Swagger(app)
api = Api(app)
+@app.teardown_request
+def shutdown_session(exception=None):
+ db_session.remove()
+
+
reduce(lambda a, b: a.add_resource(b.resource, b.url,
endpoint=b.endpoint) or a, urlpatterns, api)
diff --git a/api/urls.py b/api/urls.py
index 50be91ead..0fffd12db 100644
--- a/api/urls.py
+++ b/api/urls.py
@@ -11,8 +11,8 @@ from api.utils.common import Url
urlpatterns = [
- Url('/yardstick/testcases/release/action', views.Release, 'release'),
- Url('/yardstick/testcases/samples/action', views.Samples, 'samples'),
+ Url('/yardstick/testcases/release/action', views.ReleaseAction, 'release'),
+ Url('/yardstick/testcases/samples/action', views.SamplesAction, 'samples'),
Url('/yardstick/results', views.Results, 'results'),
- Url('/yardstick/env/action', views.Env, 'env')
+ Url('/yardstick/env/action', views.EnvAction, 'env')
]
diff --git a/api/views.py b/api/views.py
index 928d8e9eb..ee13b47a9 100644
--- a/api/views.py
+++ b/api/views.py
@@ -9,18 +9,13 @@
import logging
import os
-from flask import request
-from flask_restful import Resource
from flasgger.utils import swag_from
-from api.utils import common as common_utils
+from api.base import ApiResource
from api.swagger import models
-from api.actions import test as test_action
-from api.actions import samples as samples_action
-from api.actions import result as result_action
-from api.actions import env as env_action
logger = logging.getLogger(__name__)
+logger.setLevel(logging.DEBUG)
TestCaseActionModel = models.TestCaseActionModel
@@ -29,54 +24,26 @@ TestCaseActionArgsOptsModel = models.TestCaseActionArgsOptsModel
TestCaseActionArgsOptsTaskArgModel = models.TestCaseActionArgsOptsTaskArgModel
-class Release(Resource):
+class ReleaseAction(ApiResource):
@swag_from(os.getcwd() + '/swagger/docs/testcases.yaml')
def post(self):
- action = common_utils.translate_to_str(request.json.get('action', ''))
- args = common_utils.translate_to_str(request.json.get('args', {}))
- logger.debug('Input args is: action: %s, args: %s', action, args)
+ return self._dispatch_post()
- try:
- return getattr(test_action, action)(args)
- except AttributeError:
- return common_utils.error_handler('Wrong action')
-
-class Samples(Resource):
+class SamplesAction(ApiResource):
def post(self):
- action = common_utils.translate_to_str(request.json.get('action', ''))
- args = common_utils.translate_to_str(request.json.get('args', {}))
- logger.debug('Input args is: action: %s, args: %s', action, args)
-
- try:
- return getattr(samples_action, action)(args)
- except AttributeError:
- return common_utils.error_handler('Wrong action')
+ return self._dispatch_post()
ResultModel = models.ResultModel
-class Results(Resource):
+class Results(ApiResource):
@swag_from(os.getcwd() + '/swagger/docs/results.yaml')
def get(self):
- args = common_utils.translate_to_str(request.args)
- action = args.get('action', '')
- logger.debug('Input args is: action: %s, args: %s', action, args)
+ return self._dispatch_get()
- try:
- return getattr(result_action, action)(args)
- except AttributeError:
- return common_utils.error_handler('Wrong action')
-
-class Env(Resource):
+class EnvAction(ApiResource):
def post(self):
- action = common_utils.translate_to_str(request.json.get('action', ''))
- args = common_utils.translate_to_str(request.json.get('args', {}))
- logger.debug('Input args is: action: %s, args: %s', action, args)
-
- try:
- return getattr(env_action, action)(args)
- except AttributeError:
- return common_utils.error_handler('Wrong action')
+ return self._dispatch_post()
diff --git a/api/yardstick.ini b/api/yardstick.ini
index 01025c2ef..2ba881fc1 100644
--- a/api/yardstick.ini
+++ b/api/yardstick.ini
@@ -12,5 +12,5 @@ chmod-socket = 666
callable = app
enable-threads = true
close-on-exec = 1
-daemonize=/home/opnfv/repos/yardstick/api/uwsgi.log
-socket = /home/opnfv/repos/yardstick/api/yardstick.sock
+daemonize= /var/log/yardstick/uwsgi.log
+socket = /var/run/yardstick.sock
diff --git a/api/yardstick.sock b/api/yardstick.sock
deleted file mode 100644
index e69de29bb..000000000
--- a/api/yardstick.sock
+++ /dev/null
diff --git a/docs/userguide/03-architecture.rst b/docs/userguide/03-architecture.rst
index ace3117c2..03bf00f58 100755
--- a/docs/userguide/03-architecture.rst
+++ b/docs/userguide/03-architecture.rst
@@ -175,7 +175,7 @@ LmBench, ...)
TaskCommands is the "yardstick task" subcommand's main entry. It takes yaml
file (e.g. test.yaml) as input, and uses HeatContext to convert the yaml
-file's context section to HOT. After Openstacik heat stack is deployed by
+file's context section to HOT. After Openstack heat stack is deployed by
HeatContext with the converted HOT, TaskCommands use Runner to run specified
TestScenario. During first runner initialization, it will create output
process. The output process use Dispatcher to push test results. The Runner
diff --git a/docs/userguide/opnfv_yardstick_tc001.rst b/docs/userguide/opnfv_yardstick_tc001.rst
index fac375d50..b53c508a6 100644
--- a/docs/userguide/opnfv_yardstick_tc001.rst
+++ b/docs/userguide/opnfv_yardstick_tc001.rst
@@ -1,4 +1,4 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International
+s work is licensed under a Creative Commons Attribution 4.0 International
.. License.
.. http://creativecommons.org/licenses/by/4.0
.. (c) OPNFV, Ericsson AB and others.
@@ -13,38 +13,40 @@ Yardstick Test Case Description TC001
|Network Performance |
| |
+--------------+--------------------------------------------------------------+
-|test case id | OPNFV_YARDSTICK_TC001_NW PERF |
+|test case id | OPNFV_YARDSTICK_TC001_NETWORK PERFORMANCE |
| | |
+--------------+--------------------------------------------------------------+
|metric | Number of flows and throughput |
| | |
+--------------+--------------------------------------------------------------+
-|test purpose | To evaluate the IaaS network performance with regards to |
-| | flows and throughput, such as if and how different amounts |
-| | of flows matter for the throughput between hosts on |
-| | different compute blades. Typically e.g. the performance of |
-| | a vSwitch depends on the number of flows running through it. |
-| | Also performance of other equipment or entities can depend |
-| | on the number of flows or the packet sizes used. |
-| | The purpose is also to be able to spot trends. Test results, |
-| | graphs ans similar shall be stored for comparison reasons |
-| | and product evolution understanding between different OPNFV |
-| | versions and/or configurations. |
-| | |
-+--------------+--------------------------------------------------------------+
-|configuration | file: opnfv_yardstick_tc001.yaml |
-| | |
-| | Packet size: 60 bytes |
-| | Number of ports: 10, 50, 100, 500 and 1000, where each |
-| | runs for 20 seconds. The whole sequence is run |
-| | twice. The client and server are distributed on different |
-| | HW. |
-| | For SLA max_ppm is set to 1000. The amount of configured |
-| | ports map to between 110 up to 1001000 flows, respectively. |
+|test purpose | The purpose of TC001 is to evaluate the IaaS network |
+| | performance with regards to flows and throughput, such as if |
+| | and how different amounts of flows matter for the throughput |
+| | between hosts on different compute blades. Typically e.g. |
+| | the performance of a vSwitch depends on the number of flows |
+| | running through it. Also performance of other equipment or |
+| | entities can depend on the number of flows or the packet |
+| | sizes used. |
+| | |
+| | The purpose is also to be able to spot the trends. |
+| | Test results, graphs and similar shall be stored for |
+| | comparison reasons and product evolution understanding |
+| | between different OPNFV versions and/or configurations. |
| | |
+--------------+--------------------------------------------------------------+
|test tool | pktgen |
| | |
+| | Linux packet generator is a tool to generate packets at very |
+| | high speed in the kernel. pktgen is mainly used to drive and |
+| | LAN equipment test network. pktgen supports multi threading. |
+| | To generate random MAC address, IP address, port number UDP |
+| | packets, pktgen uses multiple CPU processors in the |
+| | different PCI bus (PCI, PCIe bus) with Gigabit Ethernet |
+| | tested (pktgen performance depends on the CPU processing |
+| | speed, memory delay, PCI bus speed hardware parameters), |
+| | Transmit data rate can be even larger than 10GBit/s. Visible |
+| | can satisfy most card test requirements. |
+| | |
| | (Pktgen is not always part of a Linux distribution, hence it |
| | needs to be installed. It is part of the Yardstick Docker |
| | image. |
@@ -52,18 +54,47 @@ Yardstick Test Case Description TC001
| | to generate a Linux image with pktgen included.) |
| | |
+--------------+--------------------------------------------------------------+
-|references | pktgen_ |
+|test | This test case uses Pktgen to generate packet flow between |
+|description | two hosts for simulating network workloads on the SUT. |
| | |
-| | ETSI-NFV-TST001 |
++--------------+--------------------------------------------------------------+
+|traffic | An IP table is setup on server to monitor for received |
+|profile | packets. |
+| | |
++--------------+--------------------------------------------------------------+
+|configuration | file: opnfv_yardstick_tc001.yaml |
+| | |
+| | Packet size is set to 60 bytes. |
+| | Number of ports: 10, 50, 100, 500 and 1000, where each |
+| | runs for 20 seconds. The whole sequence is run twice |
+| | The client and server are distributed on different hardware. |
+| | |
+| | For SLA max_ppm is set to 1000. The amount of configured |
+| | ports map to between 110 up to 1001000 flows, respectively. |
| | |
+--------------+--------------------------------------------------------------+
-|applicability | Test can be configured with different packet sizes, amount |
-| | of flows and test duration. Default values exist. |
+|applicability | Test can be configured with different: |
+| | |
+| | * packet sizes; |
+| | * amount of flows; |
+| | * test duration. |
+| | |
+| | Default values exist. |
| | |
| | SLA (optional): max_ppm: The number of packets per million |
| | packets sent that are acceptable to loose, not received. |
| | |
+--------------+--------------------------------------------------------------+
+|usability | This test case is used for generating high network |
+| | throughput to simulate certain workloads on the SUT. Hence |
+| | it should work with other test cases. |
+| | |
++--------------+--------------------------------------------------------------+
+|references | pktgen_ |
+| | |
+| | ETSI-NFV-TST001 |
+| | |
++--------------+--------------------------------------------------------------+
|pre-test | The test case image needs to be installed into Glance |
|conditions | with pktgen included in it. |
| | |
@@ -73,12 +104,29 @@ Yardstick Test Case Description TC001
|test sequence | description and expected result |
| | |
+--------------+--------------------------------------------------------------+
-|step 1 | The hosts are installed, as server and client. pktgen is |
-| | invoked and logs are produced and stored. |
+|step 1 | Two host VMs are booted, as server and client. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 2 | Yardstick is connected with the server VM by using ssh. |
+| | 'pktgen_benchmark' bash script is copyied from Jump Host to |
+| | the server VM via the ssh tunnel. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 3 | An IP table is setup on server to monitor for received |
+| | packets. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 4 | pktgen is invoked to generate packet flow between two server |
+| | and client for simulating network workloads on the SUT. |
+| | Results are processed and checked against the SLA. Logs are |
+| | produced and stored. |
| | |
| | Result: Logs are stored. |
| | |
+--------------+--------------------------------------------------------------+
+|step 5 | Two host VMs are deleted. |
+| | |
++--------------+--------------------------------------------------------------+
|test verdict | Fails only if SLA is not passed, or if there is a test case |
| | execution problem. |
| | |
diff --git a/docs/userguide/opnfv_yardstick_tc002.rst b/docs/userguide/opnfv_yardstick_tc002.rst
index 193fc531f..c98780fd5 100644
--- a/docs/userguide/opnfv_yardstick_tc002.rst
+++ b/docs/userguide/opnfv_yardstick_tc002.rst
@@ -8,34 +8,37 @@ Yardstick Test Case Description TC002
*************************************
.. _cirros-image: https://download.cirros-cloud.net
+.. _Ping: https://linux.die.net/man/8/ping
+-----------------------------------------------------------------------------+
|Network Latency |
| |
+--------------+--------------------------------------------------------------+
-|test case id | OPNFV_YARDSTICK_TC002_NW LATENCY |
+|test case id | OPNFV_YARDSTICK_TC002_NETWORK LATENCY |
| | |
+--------------+--------------------------------------------------------------+
-|metric | RTT, Round Trip Time |
+|metric | RTT (Round Trip Time) |
| | |
+--------------+--------------------------------------------------------------+
-|test purpose | To do a basic verification that network latency is within |
-| | acceptable boundaries when packets travel between hosts |
-| | located on same or different compute blades. |
-| | The purpose is also to be able to spot trends. Test results, |
-| | graphs and similar shall be stored for comparison reasons and|
-| | product evolution understanding between different OPNFV |
-| | versions and/or configurations. |
+|test purpose | The purpose of TC002 is to do a basic verification that |
+| | network latency is within acceptable boundaries when packets |
+| | travel between hosts located on same or different compute |
+| | blades. |
| | |
-+--------------+--------------------------------------------------------------+
-|configuration | file: opnfv_yardstick_tc002.yaml |
-| | |
-| | Packet size 100 bytes. Total test duration 600 seconds. |
-| | One ping each 10 seconds. SLA RTT is set to maximum 10 ms. |
+| | The purpose is also to be able to spot the trends. |
+| | Test results, graphs and similar shall be stored for |
+| | comparison reasons and product evolution understanding |
+| | between different OPNFV versions and/or configurations. |
| | |
+--------------+--------------------------------------------------------------+
|test tool | ping |
| | |
+| | Ping is a computer network administration software utility |
+| | used to test the reachability of a host on an Internet |
+| | Protocol (IP) network. It measures the round-trip time for |
+| | packet sent from the originating host to a destination |
+| | computer that are echoed back to the source. |
+| | |
| | Ping is normally part of any Linux distribution, hence it |
| | doesn't need to be installed. It is also part of the |
| | Yardstick Docker image. |
@@ -43,27 +46,55 @@ Yardstick Test Case Description TC002
| | cirros-image_, it includes ping) |
| | |
+--------------+--------------------------------------------------------------+
-|references | Ping man page |
+|test topology | Ping packets (ICMP protocol's mandatory ECHO_REQUEST |
+| | datagram) are sent from host VM to target VM(s) to elicit |
+| | ICMP ECHO_RESPONSE. |
| | |
-| | ETSI-NFV-TST001 |
+| | For one host VM there can be multiple target VMs. |
+| | Host VM and target VM(s) can be on same or different compute |
+| | blades. |
+| | |
++--------------+--------------------------------------------------------------+
+|configuration | file: opnfv_yardstick_tc002.yaml |
+| | |
+| | Packet size 100 bytes. Test duration 60 seconds. |
+| | One ping each 10 seconds. Test is iterated two times. |
+| | SLA RTT is set to maximum 10 ms. |
| | |
+--------------+--------------------------------------------------------------+
-|applicability | Test case can be configured with different packet sizes, |
-| | burst sizes, ping intervals and test duration. |
+|applicability | This test case can be configured with different: |
+| | |
+| | * packet sizes; |
+| | * burst sizes; |
+| | * ping intervals; |
+| | * test durations; |
+| | * test iterations. |
+| | |
+| | Default values exist. |
+| | |
| | SLA is optional. The SLA in this test case serves as an |
-| | example. Considerably lower RTT is expected, and |
-| | also normal to achieve in balanced L2 environments. However, |
-| | to cover most configurations, both bare metal and fully |
-| | virtualized ones, this value should be possible to achieve |
-| | and acceptable for black box testing. Many real time |
+| | example. Considerably lower RTT is expected, and also normal |
+| | to achieve in balanced L2 environments. However, to cover |
+| | most configurations, both bare metal and fully virtualized |
+| | ones, this value should be possible to achieve and |
+| | acceptable for black box testing. Many real time |
| | applications start to suffer badly if the RTT time is higher |
| | than this. Some may suffer bad also close to this RTT, while |
| | others may not suffer at all. It is a compromise that may |
| | have to be tuned for different configuration purposes. |
| | |
+--------------+--------------------------------------------------------------+
-|pre-test | The test case image needs to be installed into Glance |
-|conditions | with ping included in it. |
+|usability | This test case is one of Yardstick's generic test. Thus it |
+| | is runnable on most of the scenarios. |
+| | |
++--------------+--------------------------------------------------------------+
+|references | Ping_ |
+| | |
+| | ETSI-NFV-TST001 |
+| | |
++--------------+--------------------------------------------------------------+
+|pre-test | The test case image (cirros-image) needs to be installed |
+|conditions | into Glance with ping included in it. |
| | |
| | No POD specific requirements have been identified. |
| | |
@@ -71,12 +102,24 @@ Yardstick Test Case Description TC002
|test sequence | description and expected result |
| | |
+--------------+--------------------------------------------------------------+
-|step 1 | The hosts are installed, as server and client. Ping is |
-| | invoked and logs are produced and stored. |
+|step 1 | Two host VMs are booted, as server and client. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 2 | Yardstick is connected with the server VM by using ssh. |
+| | 'ping_benchmark' bash script is copyied from Jump Host to |
+| | the server VM via the ssh tunnel. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 3 | Ping is invoked. Ping packets are sent from server VM to |
+| | client VM. RTT results are calculated and checked against |
+| | the SLA. Logs are produced and stored. |
| | |
| | Result: Logs are stored. |
| | |
+--------------+--------------------------------------------------------------+
+|step 4 | Two host VMs are deleted. |
+| | |
++--------------+--------------------------------------------------------------+
|test verdict | Test should not PASS if any RTT is above the optional SLA |
| | value, or if there is a test case execution problem. |
| | |
diff --git a/docs/userguide/opnfv_yardstick_tc004.rst b/docs/userguide/opnfv_yardstick_tc004.rst
index 301286126..3554b3826 100644
--- a/docs/userguide/opnfv_yardstick_tc004.rst
+++ b/docs/userguide/opnfv_yardstick_tc004.rst
@@ -13,39 +13,52 @@ Yardstick Test Case Description TC004
|Cache Utilization |
| |
+--------------+--------------------------------------------------------------+
-|test case id | OPNFV_YARDSTICK_TC004_Cache Utilization |
+|test case id | OPNFV_YARDSTICK_TC004_CACHE Utilization |
| | |
+--------------+--------------------------------------------------------------+
-|metric | Cache Utilization |
+|metric | cache hit, cache miss, hit/miss ratio, buffer size and page |
+| | cache size |
| | |
+--------------+--------------------------------------------------------------+
-|test purpose | To evaluate the IaaS compute capability with regards to |
-| | cache utilization.This test case should be run in parallel |
-| | to other Yardstick test cases and not run as a stand-alone |
-| | test case. |
-| | Measure the cache usage statistics including cache hit, |
-| | cache miss, hit ratio, page cache size and page cache size. |
-| | Both average and maximun values are obtained. |
-| | The purpose is also to be able to spot trends. |
+|test purpose | The purpose of TC004 is to evaluate the IaaS compute |
+| | capability with regards to cache utilization.This test case |
+| | should be run in parallel with other Yardstick test cases |
+| | and not run as a stand-alone test case. |
+| | |
+| | This test case measures cache usage statistics, including |
+| | cache hit, cache miss, hit ratio, buffer cache size and page |
+| | cache size, with some wokloads runing on the infrastructure. |
+| | Both average and maximun values are collected. |
+| | |
+| | The purpose is also to be able to spot the trends. |
| | Test results, graphs and similar shall be stored for |
| | comparison reasons and product evolution understanding |
| | between different OPNFV versions and/or configurations. |
| | |
+--------------+--------------------------------------------------------------+
-|configuration | File: cachestat.yaml (in the 'samples' directory) |
+|test tool | cachestat |
| | |
-| | * interval: 1 - repeat, pausing every 1 seconds in-between. |
+| | cachestat is a tool using Linux ftrace capabilities for |
+| | showing Linux page cache hit/miss statistics. |
| | |
-+--------------+--------------------------------------------------------------+
-|test tool | cachestat |
+| | (cachestat is not always part of a Linux distribution, hence |
+| | it needs to be installed. As an example see the |
+| | /yardstick/tools/ directory for how to generate a Linux |
+| | image with cachestat included.) |
| | |
-| | cachestat is not always part of a Linux distribution, hence |
-| | it needs to be installed. |
++--------------+--------------------------------------------------------------+
+|test | cachestat test is invoked in a host VM on a compute blade, |
+|description | cachestat test requires some other test cases running in the |
+| | host to stimulate workload. |
| | |
+--------------+--------------------------------------------------------------+
-|references | cachestat_ |
+|configuration | File: cachestat.yaml (in the 'samples' directory) |
| | |
-| | ETSI-NFV-TST001 |
+| | Interval is set 1. Test repeat, pausing every 1 seconds |
+| | in-between. |
+| | Test durarion is set to 60 seconds. |
+| | |
+| | SLA is not available in this test case. |
| | |
+--------------+--------------------------------------------------------------+
|applicability | Test can be configured with different: |
@@ -53,8 +66,16 @@ Yardstick Test Case Description TC004
| | * interval; |
| | * runner Duration. |
| | |
-| | There are default values for each above-mentioned option. |
-| | Run in background with other test cases. |
+| | Default values exist. |
+| | |
++--------------+--------------------------------------------------------------+
+|usability | This test case is one of Yardstick's generic test. Thus it |
+| | is runnable on most of the scenarios. |
+| | |
++--------------+--------------------------------------------------------------+
+|references | cachestat_ |
+| | |
+| | ETSI-NFV-TST001 |
| | |
+--------------+--------------------------------------------------------------+
|pre-test | The test case image needs to be installed into Glance |
@@ -66,12 +87,24 @@ Yardstick Test Case Description TC004
|test sequence | description and expected result |
| | |
+--------------+--------------------------------------------------------------+
-|step 1 | The host is installed as client. The related TC, or TCs, is |
-| | invoked and cachestat logs are produced and stored. |
+|step 1 | A host VM with cachestat installed is booted. |
| | |
-| | Result: logs are stored. |
++--------------+--------------------------------------------------------------+
+|step 2 | Yardstick is connected with the host VM by using ssh. |
+| | 'cache_stat' bash script is copyied from Jump Host to |
+| | the server VM via the ssh tunnel. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 3 | 'cache_stat' script is invoked. Raw cache usage statistics |
+| | are collected and filtrated. Average and maximum values are |
+| | calculated and recorded. Logs are produced and stored. |
+| | |
+| | Result: Logs are stored. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 4 | The host VM is deleted. |
| | |
+--------------+--------------------------------------------------------------+
-|test verdict | None. Cache utilization results are fetched and stored. |
+|test verdict | None. Cache utilization results are collected and stored. |
| | |
+--------------+--------------------------------------------------------------+
diff --git a/docs/userguide/opnfv_yardstick_tc005.rst b/docs/userguide/opnfv_yardstick_tc005.rst
index a181aa9f7..1c2d71d81 100644
--- a/docs/userguide/opnfv_yardstick_tc005.rst
+++ b/docs/userguide/opnfv_yardstick_tc005.rst
@@ -1,4 +1,4 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International
+. This work is licensed under a Creative Commons Attribution 4.0 International
.. License.
.. http://creativecommons.org/licenses/by/4.0
.. (c) OPNFV, Huawei Technologies Co.,Ltd and others.
@@ -7,53 +7,88 @@
Yardstick Test Case Description TC005
*************************************
-.. _fio: http://www.bluestop.org/fio/HOWTO.txt
+.. _fio: http://bluestop.org/files/fio/HOWTO.txt
+-----------------------------------------------------------------------------+
|Storage Performance |
| |
+--------------+--------------------------------------------------------------+
-|test case id | OPNFV_YARDSTICK_TC005_Storage Performance |
+|test case id | OPNFV_YARDSTICK_TC005_STORAGE PERFORMANCE |
| | |
+--------------+--------------------------------------------------------------+
-|metric | IOPS, throughput and latency |
+|metric | IOPS (Average IOs performed per second), |
+| | Throughput (Average disk read/write bandwidth rate), |
+| | Latency (Average disk read/write latency) |
| | |
+--------------+--------------------------------------------------------------+
-|test purpose | To evaluate the IaaS storage performance with regards to |
-| | IOPS, throughput and latency. |
-| | The purpose is also to be able to spot trends. Test results, |
-| | graphs and similar shall be stored for comparison reasons |
-| | and product evolution understanding between different OPNFV |
-| | versions and/or configurations. |
+|test purpose | The purpose of TC005 is to evaluate the IaaS storage |
+| | performance with regards to IOPS, throughput and latency. |
| | |
-+--------------+--------------------------------------------------------------+
-|configuration | file: opnfv_yardstick_tc005.yaml |
-| | |
-| | IO types: read, write, randwrite, randread, rw |
-| | IO block size: 4KB, 64KB, 1024KB, where each |
-| | runs for 30 seconds(10 for ramp time, 20 for runtime). |
-| | |
-| | For SLA minimum read/write iops is set to 100, minimum |
-| | read/write throughput is set to 400 KB/s, and maximum |
-| | read/write latency is set to 20000 usec. |
+| | The purpose is also to be able to spot the trends. |
+| | Test results, graphs and similar shall be stored for |
+| | comparison reasons and product evolution understanding |
+| | between different OPNFV versions and/or configurations. |
| | |
+--------------+--------------------------------------------------------------+
|test tool | fio |
| | |
+| | fio is an I/O tool meant to be used both for benchmark and |
+| | stress/hardware verification. It has support for 19 |
+| | different types of I/O engines (sync, mmap, libaio, |
+| | posixaio, SG v3, splice, null, network, syslet, guasi, |
+| | solarisaio, and more), I/O priorities (for newer Linux |
+| | kernels), rate I/O, forked or threaded jobs, and much more. |
+| | |
| | (fio is not always part of a Linux distribution, hence it |
| | needs to be installed. As an example see the |
| | /yardstick/tools/ directory for how to generate a Linux |
| | image with fio included.) |
| | |
+--------------+--------------------------------------------------------------+
-|references | fio_ |
+|test | fio test is invoked in a host VM on a compute blade, a job |
+|description | file as well as parameters are passed to fio and fio will |
+| | start doing what the job file tells it to do. |
| | |
-| | ETSI-NFV-TST001 |
++--------------+--------------------------------------------------------------+
+|configuration | file: opnfv_yardstick_tc005.yaml |
+| | |
+| | IO types is set to read, write, randwrite, randread, rw. |
+| | IO block size is set to 4KB, 64KB, 1024KB. |
+| | fio is run for each IO type and IO block size scheme, |
+| | each iteration runs for 30 seconds (10 for ramp time, 20 for |
+| | runtime). |
+| | |
+| | For SLA, minimum read/write iops is set to 100, |
+| | minimum read/write throughput is set to 400 KB/s, |
+| | and maximum read/write latency is set to 20000 usec. |
| | |
+--------------+--------------------------------------------------------------+
-|applicability | Test can be configured with different read/write types, IO |
-| | block size, IO depth, ramp time (runtime required for stable |
-| | results) and test duration. Default values exist. |
+|applicability | This test case can be configured with different: |
+| | |
+| | * IO types; |
+| | * IO block size; |
+| | * IO depth; |
+| | * ramp time; |
+| | * test duration. |
+| | |
+| | Default values exist. |
+| | |
+| | SLA is optional. The SLA in this test case serves as an |
+| | example. Considerably higher throughput and lower latency |
+| | are expected. However, to cover most configurations, both |
+| | baremetal and fully virtualized ones, this value should be |
+| | possible to achieve and acceptable for black box testing. |
+| | Many heavy IO applications start to suffer badly if the |
+| | read/write bandwidths are lower than this. |
+| | |
++--------------+--------------------------------------------------------------+
+|usability | This test case is one of Yardstick's generic test. Thus it |
+| | is runnable on most of the scenarios. |
+| | |
++--------------+--------------------------------------------------------------+
+|references | fio_ |
+| | |
+| | ETSI-NFV-TST001 |
| | |
+--------------+--------------------------------------------------------------+
|pre-test | The test case image needs to be installed into Glance |
@@ -65,12 +100,25 @@ Yardstick Test Case Description TC005
|test sequence | description and expected result |
| | |
+--------------+--------------------------------------------------------------+
-|step 1 | The host is installed and fio is invoked and logs are |
-| | produced and stored. |
+|step 1 | A host VM with fio installed is booted. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 2 | Yardstick is connected with the host VM by using ssh. |
+| | 'fio_benchmark' bash script is copyied from Jump Host to |
+| | the host VM via the ssh tunnel. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 3 | 'fio_benchmark' script is invoked. Simulated IO operations |
+| | are started. IOPS, disk read/write bandwidth and latency are |
+| | recorded and checked against the SLA. Logs are produced and |
+| | stored. |
| | |
| | Result: Logs are stored. |
| | |
+--------------+--------------------------------------------------------------+
+|step 4 | The host VM is deleted. |
+| | |
++--------------+--------------------------------------------------------------+
|test verdict | Fails only if SLA is not passed, or if there is a test case |
| | execution problem. |
| | |
diff --git a/docs/userguide/opnfv_yardstick_tc010.rst b/docs/userguide/opnfv_yardstick_tc010.rst
index ab793de76..202307de6 100644
--- a/docs/userguide/opnfv_yardstick_tc010.rst
+++ b/docs/userguide/opnfv_yardstick_tc010.rst
@@ -7,21 +7,71 @@
Yardstick Test Case Description TC010
*************************************
-.. _man-pages: http://manpages.ubuntu.com/manpages/trusty/lat_mem_rd.8.html
+.. _lat_mem_rd: http://manpages.ubuntu.com/manpages/trusty/lat_mem_rd.8.html
+-----------------------------------------------------------------------------+
|Memory Latency |
| |
+--------------+--------------------------------------------------------------+
-|test case id | OPNFV_YARDSTICK_TC010_Memory Latency |
+|test case id | OPNFV_YARDSTICK_TC010_MEMORY LATENCY |
| | |
+--------------+--------------------------------------------------------------+
-|metric | Latency in nanoseconds |
+|metric | Memory read latency (nanoseconds) |
| | |
+--------------+--------------------------------------------------------------+
-|test purpose | Measure the memory read latency for varying memory sizes and |
-| | strides. Whole memory hierarchy is measured including all |
-| | levels of cache. |
+|test purpose | The purpose of TC010 is to evaluate the IaaS compute |
+| | performance with regards to memory read latency. |
+| | It measures the memory read latency for varying memory sizes |
+| | and strides. Whole memory hierarchy is measured. |
+| | |
+| | The purpose is also to be able to spot the trends. |
+| | Test results, graphs and similar shall be stored for |
+| | comparison reasons and product evolution understanding |
+| | between different OPNFV versions and/or configurations. |
+| | |
++--------------+--------------------------------------------------------------+
+|test tool | Lmbench |
+| | |
+| | Lmbench is a suite of operating system microbenchmarks. This |
+| | test uses lat_mem_rd tool from that suite including: |
+| | * Context switching |
+| | * Networking: connection establishment, pipe, TCP, UDP, and |
+| | RPC hot potato |
+| | * File system creates and deletes |
+| | * Process creation |
+| | * Signal handling |
+| | * System call overhead |
+| | * Memory read latency |
+| | |
+| | (LMbench is not always part of a Linux distribution, hence |
+| | it needs to be installed. As an example see the |
+| | /yardstick/tools/ directory for how to generate a Linux |
+| | image with LMbench included.) |
+| | |
++--------------+--------------------------------------------------------------+
+|test | LMbench lat_mem_rd benchmark measures memory read latency |
+|description | for varying memory sizes and strides. |
+| | |
+| | The benchmark runs as two nested loops. The outer loop is |
+| | the stride size. The inner loop is the array size. For each |
+| | array size, the benchmark creates a ring of pointers that |
+| | point backward one stride.Traversing the array is done by: |
+| | |
+| | p = (char **)*p; |
+| | |
+| | in a for loop (the over head of the for loop is not |
+| | significant; the loop is an unrolled loop 100 loads long). |
+| | The size of the array varies from 512 bytes to (typically) |
+| | eight megabytes. For the small sizes, the cache will have an |
+| | effect, and the loads will be much faster. This becomes much |
+| | more apparent when the data is plotted. |
+| | |
+| | Only data accesses are measured; the instruction cache is |
+| | not measured. |
+| | |
+| | The results are reported in nanoseconds per load and have |
+| | been verified accurate to within a few nanoseconds on an SGI |
+| | Indy. |
| | |
+--------------+--------------------------------------------------------------+
|configuration | File: opnfv_yardstick_tc010.yaml |
@@ -33,20 +83,13 @@ Yardstick Test Case Description TC010
| | * Interval: 1 - there is 1 second delay between each |
| | iteration. |
| | |
-+--------------+--------------------------------------------------------------+
-|test tool | Lmbench |
-| | |
-| | Lmbench is a suite of operating system microbenchmarks. This |
-| | test uses lat_mem_rd tool from that suite. |
-| | Lmbench is not always part of a Linux distribution, hence it |
-| | needs to be installed in the test image |
-| | |
-+--------------+--------------------------------------------------------------+
-|references | man-pages_ |
-| | |
-| | McVoy, Larry W.,and Carl Staelin. "lmbench: Portable Tools |
-| | for Performance Analysis." USENIX annual technical |
-| | conference 1996. |
+| | SLA is optional. The SLA in this test case serves as an |
+| | example. Considerably lower read latency is expected. |
+| | However, to cover most configurations, both baremetal and |
+| | fully virtualized ones, this value should be possible to |
+| | achieve and acceptable for black box testing. |
+| | Many heavy IO applications start to suffer badly if the |
+| | read latency is higher than this. |
| | |
+--------------+--------------------------------------------------------------+
|applicability | Test can be configured with different: |
@@ -55,12 +98,21 @@ Yardstick Test Case Description TC010
| | * stop_size; |
| | * iterations and intervals. |
| | |
-| | There are default values for each above-mentioned option. |
+| | Default values exist. |
| | |
| | SLA (optional) : max_latency: The maximum memory latency |
| | that is accepted. |
| | |
+--------------+--------------------------------------------------------------+
+|usability | This test case is one of Yardstick's generic test. Thus it |
+| | is runnable on most of the scenarios. |
+| | |
++--------------+--------------------------------------------------------------+
+|references | LMbench lat_mem_rd_ |
+| | |
+| | ETSI-NFV-TST001 |
+| | |
++--------------+--------------------------------------------------------------+
|pre-test | The test case image needs to be installed into Glance |
|conditions | with Lmbench included in the image. |
| | |
@@ -70,12 +122,32 @@ Yardstick Test Case Description TC010
|test sequence | description and expected result |
| | |
+--------------+--------------------------------------------------------------+
-|step 1 | The host is installed as client. Lmbench's lat_mem_rd tool |
+|step 1 | The host is installed as client. LMbench's lat_mem_rd tool |
| | is invoked and logs are produced and stored. |
| | |
| | Result: logs are stored. |
| | |
+--------------+--------------------------------------------------------------+
+|step 1 | A host VM with LMbench installed is booted. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 2 | Yardstick is connected with the host VM by using ssh. |
+| | 'lmbench_latency_benchmark' bash script is copyied from Jump |
+| | Host to the host VM via the ssh tunnel. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 3 | 'lmbench_latency_benchmark' script is invoked. LMbench's |
+| | lat_mem_rd benchmark starts to measures memory read latency |
+| | for varying memory sizes and strides. Memory read latency |
+| | are recorded and checked against the SLA. Logs are produced |
+| | and stored. |
+| | |
+| | Result: Logs are stored. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 4 | The host VM is deleted. |
+| | |
++--------------+--------------------------------------------------------------+
|test verdict | Test fails if the measured memory latency is above the SLA |
| | value or if there is a test case execution problem. |
| | |
diff --git a/docs/userguide/opnfv_yardstick_tc011.rst b/docs/userguide/opnfv_yardstick_tc011.rst
index cf2fd5055..48bdef497 100644
--- a/docs/userguide/opnfv_yardstick_tc011.rst
+++ b/docs/userguide/opnfv_yardstick_tc011.rst
@@ -13,28 +13,22 @@ Yardstick Test Case Description TC011
|Packet delay variation between VMs |
| |
+--------------+--------------------------------------------------------------+
-|test case id | OPNFV_YARDSTICK_TC011_Packet delay variation between VMs |
+|test case id | OPNFV_YARDSTICK_TC011_PACKET DELAY VARIATION BETWEEN VMs |
| | |
+--------------+--------------------------------------------------------------+
|metric | jitter: packet delay variation (ms) |
| | |
+--------------+--------------------------------------------------------------+
-|test purpose | Measure the packet delay variation sending the packets from |
-| | one VM to the other. |
+|test purpose | The purpose of TC011 is to evaluate the IaaS network |
+| | performance with regards to network jitter (packet delay |
+| | variation). |
+| | It measures the packet delay variation sending the packets |
+| | from one VM to the other. |
| | |
-+--------------+--------------------------------------------------------------+
-|configuration | File: opnfv_yardstick_tc011.yaml |
-| | |
-| | * options: |
-| | protocol: udp # The protocol used by iperf3 tools |
-| | bandwidth: 20m # It will send the given number of packets |
-| | without pausing |
-| | * runner: |
-| | duration: 30 # Total test duration 30 seconds. |
-| | |
-| | * SLA (optional): |
-| | jitter: 10 (ms) # The maximum amount of jitter that is |
-| | accepted. |
+| | The purpose is also to be able to spot the trends. |
+| | Test results, graphs and similar shall be stored for |
+| | comparison reasons and product evolution understanding |
+| | between different OPNFV versions and/or configurations. |
| | |
+--------------+--------------------------------------------------------------+
|test tool | iperf3 |
@@ -46,14 +40,34 @@ Yardstick Test Case Description TC011
| | |
| | (iperf3 is not always part of a Linux distribution, hence it |
| | needs to be installed. It is part of the Yardstick Docker |
-| | image. |
-| | As an example see the /yardstick/tools/ directory for how |
-| | to generate a Linux image with pktgen included.) |
+| | image. As an example see the /yardstick/tools/ directory for |
+| | how to generate a Linux image with pktgen included.) |
| | |
+--------------+--------------------------------------------------------------+
-|references | iperf3_ |
+|test | iperf3 test is invoked between a host VM and a target VM. |
+|description | |
+| | Jitter calculations are continuously computed by the server, |
+| | as specified by RTP in RFC 1889. The client records a 64 bit |
+| | second/microsecond timestamp in the packet. The server |
+| | computes the relative transit time as (server's receive time |
+| | - client's send time). The client's and server's clocks do |
+| | not need to be synchronized; any difference is subtracted |
+| | outin the jitter calculation. Jitter is the smoothed mean of |
+| | differences between consecutive transit times. |
| | |
-| | ETSI-NFV-TST001 |
++--------------+--------------------------------------------------------------+
+|configuration | File: opnfv_yardstick_tc011.yaml |
+| | |
+| | * options: |
+| | protocol: udp # The protocol used by iperf3 tools |
+| | bandwidth: 20m # It will send the given number of packets |
+| | without pausing |
+| | * runner: |
+| | duration: 30 # Total test duration 30 seconds. |
+| | |
+| | * SLA (optional): |
+| | jitter: 10 (ms) # The maximum amount of jitter that is |
+| | accepted. |
| | |
+--------------+--------------------------------------------------------------+
|applicability | Test can be configured with different: |
@@ -67,6 +81,15 @@ Yardstick Test Case Description TC011
| | serves as an example. |
| | |
+--------------+--------------------------------------------------------------+
+|usability | This test case is one of Yardstick's generic test. Thus it |
+| | is runnable on most of the scenarios. |
+| | |
++--------------+--------------------------------------------------------------+
+|references | iperf3_ |
+| | |
+| | ETSI-NFV-TST001 |
+| | |
++--------------+--------------------------------------------------------------+
|pre-test | The test case image needs to be installed into Glance |
|conditions | with iperf3 included in the image. |
| | |
@@ -76,12 +99,24 @@ Yardstick Test Case Description TC011
|test sequence | description and expected result |
| | |
+--------------+--------------------------------------------------------------+
-|step 1 | The hosts are installed, as server and client. iperf3 is |
-| | invoked and logs are produced and stored. |
+|step 1 | Two host VMs with iperf3 installed are booted, as server and |
+| | client. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 2 | Yardstick is connected with the host VM by using ssh. |
+| | A iperf3 server is started on the server VM via the ssh |
+| | tunnel. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 3 | iperf3 benchmark is invoked. Jitter is calculated and check |
+| | against the SLA. Logs are produced and stored. |
| | |
| | Result: Logs are stored. |
| | |
+--------------+--------------------------------------------------------------+
+|step 4 | The host VMs are deleted. |
+| | |
++--------------+--------------------------------------------------------------+
|test verdict | Test should not PASS if any jitter is above the optional SLA |
| | value, or if there is a test case execution problem. |
| | |
diff --git a/docs/userguide/opnfv_yardstick_tc012.rst b/docs/userguide/opnfv_yardstick_tc012.rst
index ffce06eb9..b56e829f5 100644
--- a/docs/userguide/opnfv_yardstick_tc012.rst
+++ b/docs/userguide/opnfv_yardstick_tc012.rst
@@ -7,29 +7,60 @@
Yardstick Test Case Description TC012
*************************************
-.. _man-pages: http://manpages.ubuntu.com/manpages/trusty/bw_mem.8.html
+.. _bw_mem: http://manpages.ubuntu.com/manpages/trusty/bw_mem.8.html
+-----------------------------------------------------------------------------+
|Memory Bandwidth |
| |
+--------------+--------------------------------------------------------------+
-|test case id | OPNFV_YARDSTICK_TC012_Memory Bandwidth |
+|test case id | OPNFV_YARDSTICK_TC012_MEMORY BANDWIDTH |
| | |
+--------------+--------------------------------------------------------------+
-|metric | Megabyte per second (MBps) |
+|metric | Memory read/write bandwidth (MBps) |
| | |
+--------------+--------------------------------------------------------------+
-|test purpose | Measure the rate at which data can be read from and written |
-| | to the memory (this includes all levels of memory). |
+|test purpose | The purpose of TC012 is to evaluate the IaaS compute |
+| | performance with regards to memory throughput. |
+| | It measures the rate at which data can be read from and |
+| | written to the memory (this includes all levels of memory). |
+| | |
+| | The purpose is also to be able to spot the trends. |
+| | Test results, graphs and similar shall be stored for |
+| | comparison reasons and product evolution understanding |
+| | between different OPNFV versions and/or configurations. |
+| | |
++--------------+--------------------------------------------------------------+
+|test tool | LMbench |
+| | |
+| | LMbench is a suite of operating system microbenchmarks. |
+| | This test uses bw_mem tool from that suite including: |
+| | * Cached file read |
+| | * Memory copy (bcopy) |
+| | * Memory read |
+| | * Memory write |
+| | * Pipe |
+| | * TCP |
+| | |
+| | (LMbench is not always part of a Linux distribution, hence |
+| | it needs to be installed. As an example see the |
+| | /yardstick/tools/ directory for how to generate a Linux |
+| | image with LMbench included.) |
+| | |
++--------------+--------------------------------------------------------------+
+|test | LMbench bw_mem benchmark allocates twice the specified |
+|description | amount of memory, zeros it, and then times the copying of |
+| | the first half to the second half. The benchmark is invoked |
+| | in a host VM on a compute blade. Results are reported in |
+| | megabytes moved per second. |
| | |
+--------------+--------------------------------------------------------------+
|configuration | File: opnfv_yardstick_tc012.yaml |
| | |
| | * SLA (optional): 15000 (MBps) min_bw: The minimum amount of |
| | memory bandwidth that is accepted. |
-| | * Size: 10 240 kB - test allocates twice that size (20 480kB)|
-| | zeros it and then measures the time it takes to copy from |
-| | one side to another. |
+| | * Size: 10 240 kB - test allocates twice that size |
+| | (20 480kB) zeros it and then measures the time it takes to |
+| | copy from one side to another. |
| | * Benchmark: rdwr - measures the time to read data into |
| | memory and then write data to the same location. |
| | * Warmup: 0 - the number of iterations to perform before |
@@ -38,20 +69,13 @@ Yardstick Test Case Description TC012
| | * Interval: 1 - there is 1 second delay between each |
| | iteration. |
| | |
-+--------------+--------------------------------------------------------------+
-|test tool | Lmbench |
-| | |
-| | Lmbench is a suite of operating system microbenchmarks. This |
-| | test uses bw_mem tool from that suite. |
-| | Lmbench is not always part of a Linux distribution, hence it |
-| | needs to be installed in the test image. |
-| | |
-+--------------+--------------------------------------------------------------+
-|references | man-pages_ |
-| | |
-| | McVoy, Larry W., and Carl Staelin. "lmbench: Portable Tools |
-| | for Performance Analysis." USENIX annual technical |
-| | conference. 1996. |
+| | SLA is optional. The SLA in this test case serves as an |
+| | example. Considerably higher bandwidth is expected. |
+| | However, to cover most configurations, both baremetal and |
+| | fully virtualized ones, this value should be possible to |
+| | achieve and acceptable for black box testing. |
+| | Many heavy IO applications start to suffer badly if the |
+| | read/write bandwidths are lower than this. |
| | |
+--------------+--------------------------------------------------------------+
|applicability | Test can be configured with different: |
@@ -62,7 +86,19 @@ Yardstick Test Case Description TC012
| | * number of warmup iterations; |
| | * iterations and intervals. |
| | |
-| | There are default values for each above-mentioned option. |
+| | Default values exist. |
+| | |
+| | SLA (optional) : min_bandwidth: The minimun memory bandwidth |
+| | that is accepted. |
+| | |
++--------------+--------------------------------------------------------------+
+|usability | This test case is one of Yardstick's generic test. Thus it |
+| | is runnable on most of the scenarios. |
+| | |
++--------------+--------------------------------------------------------------+
+|references | LMbench bw_mem_ |
+| | |
+| | ETSI-NFV-TST001 |
| | |
+--------------+--------------------------------------------------------------+
|pre-test | The test case image needs to be installed into Glance |
@@ -74,10 +110,23 @@ Yardstick Test Case Description TC012
|test sequence | description and expected result |
| | |
+--------------+--------------------------------------------------------------+
-|step 1 | The host is installed as client. Lmbench's bw_mem tool is |
-| | invoked and logs are produced and stored. |
+|step 1 | A host VM with LMbench installed is booted. |
| | |
-| | Result: logs are stored. |
++--------------+--------------------------------------------------------------+
+|step 2 | Yardstick is connected with the host VM by using ssh. |
+| | "lmbench_bandwidth_benchmark" bash script is copied from |
+| | Jump Host to the host VM via ssh tunnel. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 3 | 'lmbench_bandwidth_benchmark' script is invoked. LMbench's |
+| | bw_mem benchmark starts to measures memory read/write |
+| | bandwidth. Memory read/write bandwidth results are recorded |
+| | and checked against the SLA. Logs are produced and stored. |
+| | |
+| | Result: Logs are stored. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 4 | The host VM is deleted. |
| | |
+--------------+--------------------------------------------------------------+
|test verdict | Test fails if the measured memory bandwidth is below the SLA |
diff --git a/docs/userguide/opnfv_yardstick_tc014.rst b/docs/userguide/opnfv_yardstick_tc014.rst
index 27d390ac6..1b0d7831a 100644
--- a/docs/userguide/opnfv_yardstick_tc014.rst
+++ b/docs/userguide/opnfv_yardstick_tc014.rst
@@ -13,18 +13,51 @@ Yardstick Test Case Description TC014
|Processing speed |
| |
+--------------+--------------------------------------------------------------+
-|test case id | OPNFV_YARDSTICK_TC014_Processing speed |
+|test case id | OPNFV_YARDSTICK_TC014_PROCESSING SPEED |
| | |
+--------------+--------------------------------------------------------------+
-|metric | score of single cpu running, score of parallel running |
+|metric | score of single cpu running, |
+| | score of parallel running |
| | |
+--------------+--------------------------------------------------------------+
-|test purpose | To evaluate the IaaS processing speed with regards to score |
-| | of single cpu running and parallel running |
-| | The purpose is also to be able to spot trends. Test results, |
-| | graphs and similar shall be stored for comparison reasons |
-| | and product evolution understanding between different OPNFV |
-| | versions and/or configurations. |
+|test purpose | The purpose of TC014 is to evaluate the IaaS compute |
+| | performance with regards to CPU processing speed. |
+| | It measures score of single cpu running and parallel |
+| | running. |
+| | |
+| | The purpose is also to be able to spot the trends. |
+| | Test results, graphs and similar shall be stored for |
+| | comparison reasons and product evolution understanding |
+| | between different OPNFV versions and/or configurations. |
+| | |
++--------------+--------------------------------------------------------------+
+|test tool | UnixBench |
+| | |
+| | Unixbench is the most used CPU benchmarking software tool. |
+| | It can measure the performance of bash scripts, CPUs in |
+| | multithreading and single threading. It can also measure the |
+| | performance for parallel taks. Also, specific disk IO for |
+| | small and large files are performed. You can use it to |
+| | measure either linux dedicated servers and linux vps |
+| | servers, running CentOS, Debian, Ubuntu, Fedora and other |
+| | distros. |
+| | |
+| | (UnixBench is not always part of a Linux distribution, hence |
+| | it needs to be installed. As an example see the |
+| | /yardstick/tools/ directory for how to generate a Linux |
+| | image with UnixBench included.) |
+| | |
++--------------+--------------------------------------------------------------+
+|test | The UnixBench runs system benchmarks in a host VM on a |
+|description | compute blade, getting information on the CPUs in the |
+| | system. If the system has more than one CPU, the tests will |
+| | be run twice -- once with a single copy of each test running |
+| | at once, and once with N copies, where N is the number of |
+| | CPUs. |
+| | |
+| | UnixBench will processs a set of results from a single test |
+| | by averaging the individal pass results into a single final |
+| | value. |
| | |
+--------------+--------------------------------------------------------------+
|configuration | file: opnfv_yardstick_tc014.yaml |
@@ -33,15 +66,23 @@ Yardstick Test Case Description TC014
| | test_type: dhry2reg, whetstone and so on |
| | |
| | For SLA with single_score and parallel_score, both can be |
-| | set by user, default is NA |
+| | set by user, default is NA. |
| | |
+--------------+--------------------------------------------------------------+
-|test tool | unixbench |
+|applicability | Test can be configured with different: |
| | |
-| | (unixbench is not always part of a Linux distribution, hence |
-| | it needs to be installed. As an example see the |
-| | /yardstick/tools/ directory for how to generate a Linux |
-| | image with unixbench included.) |
+| | * test types; |
+| | * dhry2reg; |
+| | * whetstone. |
+| | |
+| | Default values exist. |
+| | |
+| | SLA (optional) : min_score: The minimun UnixBench score that |
+| | is accepted. |
+| | |
++--------------+--------------------------------------------------------------+
+|usability | This test case is one of Yardstick's generic test. Thus it |
+| | is runnable on most of the scenarios. |
| | |
+--------------+--------------------------------------------------------------+
|references | unixbench_ |
@@ -49,10 +90,6 @@ Yardstick Test Case Description TC014
| | ETSI-NFV-TST001 |
| | |
+--------------+--------------------------------------------------------------+
-|applicability | Test can be configured with different test types, dhry2reg, |
-| | whetstone and so on. |
-| | |
-+--------------+--------------------------------------------------------------+
|pre-test | The test case image needs to be installed into Glance |
|conditions | with unixbench included in it. |
| | |
@@ -62,12 +99,27 @@ Yardstick Test Case Description TC014
|test sequence | description and expected result |
| | |
+--------------+--------------------------------------------------------------+
-|step 1 | The hosts are installed, as a client. unixbench is |
-| | invoked and logs are produced and stored. |
+|step 1 | A host VM with UnixBench installed is booted. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 2 | Yardstick is connected with the host VM by using ssh. |
+| | "unixbench_benchmark" bash script is copied from Jump Host |
+| | to the host VM via ssh tunnel. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 3 | UnixBench is invoked. All the tests are executed using the |
+| | "Run" script in the top-level of UnixBench directory. |
+| | The "Run" script will run a standard "index" test, and save |
+| | the report in the "results" directory. Then the report is |
+| | processed by "unixbench_benchmark" and checked againsted the |
+| | SLA. |
| | |
| | Result: Logs are stored. |
| | |
+--------------+--------------------------------------------------------------+
+|step 4 | The host VM is deleted. |
+| | |
++--------------+--------------------------------------------------------------+
|test verdict | Fails only if SLA is not passed, or if there is a test case |
| | execution problem. |
| | |
diff --git a/docs/userguide/opnfv_yardstick_tc037.rst b/docs/userguide/opnfv_yardstick_tc037.rst
index 3ed1fa529..5a6e1eaae 100644
--- a/docs/userguide/opnfv_yardstick_tc037.rst
+++ b/docs/userguide/opnfv_yardstick_tc037.rst
@@ -7,84 +7,128 @@
Yardstick Test Case Description TC037
*************************************
-.. _cirros: https://download.cirros-cloud.net
+.. _cirros-image: https://download.cirros-cloud.net
+.. _Ping: https://linux.die.net/man/8/ping
.. _pktgen: https://www.kernel.org/doc/Documentation/networking/pktgen.txt
+.. _mpstat: http://www.linuxcommand.org/man_pages/mpstat1.html
+-----------------------------------------------------------------------------+
|Latency, CPU Load, Throughput, Packet Loss |
| |
+--------------+--------------------------------------------------------------+
-|test case id | OPNFV_YARDSTICK_TC037_Latency,CPU Load,Throughput,Packet Loss|
+|test case id | OPNFV_YARDSTICK_TC037_LATENCY,CPU LOAD,THROUGHPUT, |
+| | PACKET LOSS |
| | |
+--------------+--------------------------------------------------------------+
-|metric | Number of flows, latency, throughput, CPU load, packet loss |
+|metric | Number of flows, latency, throughput, packet loss |
+| | CPU utilization percentage, CPU interrupt per second |
| | |
+--------------+--------------------------------------------------------------+
-|test purpose | To evaluate the IaaS network performance with regards to |
-| | flows and throughput, such as if and how different amounts |
-| | of flows matter for the throughput between hosts on different|
-| | compute blades. Typically e.g. the performance of a vSwitch |
-| | depends on the number of flows running through it. Also |
-| | performance of other equipment or entities can depend |
-| | on the number of flows or the packet sizes used. |
-| | The purpose is also to be able to spot trends. Test results, |
-| | graphs ans similar shall be stored for comparison reasons and|
-| | product evolution understanding between different OPNFV |
-| | versions and/or configurations. |
+|test purpose | The purpose of TC037 is to evaluate the IaaS compute |
+| | capacity and network performance with regards to CPU |
+| | utilization, packet flows and network throughput, such as if |
+| | and how different amounts of flows matter for the throughput |
+| | between hosts on different compute blades, and the CPU load |
+| | variation. |
+| | |
+| | Typically e.g. the performance of a vSwitch depends on the |
+| | number of flows running through it. Also performance of |
+| | other equipment or entities can depend on the number of |
+| | flows or the packet sizes used |
+| | |
+| | The purpose is also to be able to spot the trends. |
+| | Test results, graphs and similar shall be stored for |
+| | comparison reasons and product evolution understanding |
+| | between different OPNFV versions and/or configurations. |
+| | |
++--------------+--------------------------------------------------------------+
+|test tool | Ping, Pktgen, mpstat |
+| | |
+| | Ping is a computer network administration software utility |
+| | used to test the reachability of a host on an Internet |
+| | Protocol (IP) network. It measures the round-trip time for |
+| | packet sent from the originating host to a destination |
+| | computer that are echoed back to the source. |
+| | |
+| | Linux packet generator is a tool to generate packets at very |
+| | high speed in the kernel. pktgen is mainly used to drive and |
+| | LAN equipment test network. pktgen supports multi threading. |
+| | To generate random MAC address, IP address, port number UDP |
+| | packets, pktgen uses multiple CPU processors in the |
+| | different PCI bus (PCI, PCIe bus) with Gigabit Ethernet |
+| | tested (pktgen performance depends on the CPU processing |
+| | speed, memory delay, PCI bus speed hardware parameters), |
+| | Transmit data rate can be even larger than 10GBit/s. Visible |
+| | can satisfy most card test requirements. |
+| | |
+| | The mpstat command writes to standard output activities for |
+| | each available processor, processor 0 being the first one. |
+| | Global average activities among all processors are also |
+| | reported. The mpstat command can be used both on SMP and UP |
+| | machines, but in the latter, only global average activities |
+| | will be printed. |
+| | |
+| | (Ping is normally part of any Linux distribution, hence it |
+| | doesn't need to be installed. It is also part of the |
+| | Yardstick Docker image. |
+| | For example also a Cirros image can be downloaded from |
+| | cirros-image_, it includes ping. |
+| | |
+| | Pktgen and mpstat are not always part of a Linux |
+| | distribution, hence it needs to be installed. It is part of |
+| | the Yardstick Docker image. |
+| | As an example see the /yardstick/tools/ directory for how |
+| | to generate a Linux image with pktgen and mpstat included.) |
+| | |
++--------------+--------------------------------------------------------------+
+|test | This test case uses Pktgen to generate packet flow between |
+|description | two hosts for simulating network workloads on the SUT. |
+| | Ping packets (ICMP protocol's mandatory ECHO_REQUEST |
+| | datagram) are sent from a host VM to the target VM(s) to |
+| | elicit ICMP ECHO_RESPONSE, meanwhile CPU activities are |
+| | monitored by mpstat. |
| | |
+--------------+--------------------------------------------------------------+
|configuration | file: opnfv_yardstick_tc037.yaml |
| | |
-| | Packet size: 64 bytes |
+| | Packet size is set to 64 bytes. |
| | Number of ports: 1, 10, 50, 100, 300, 500, 750 and 1000. |
| | The amount configured ports map from 2 up to 1001000 flows, |
| | respectively. Each port amount is run two times, for 20 |
| | seconds each. Then the next port_amount is run, and so on. |
| | During the test CPU load on both client and server, and the |
| | network latency between the client and server are measured. |
-| | The client and server are distributed on different HW. |
+| | The client and server are distributed on different hardware. |
+| | mpstat monitoring interval is set to 1 second. |
+| | ping packet size is set to 100 bytes. |
| | For SLA max_ppm is set to 1000. |
| | |
+--------------+--------------------------------------------------------------+
-|test tool | pktgen |
+|applicability | Test can be configured with different: |
| | |
-| | (Pktgen is not always part of a Linux distribution, hence it |
-| | needs to be installed. It is part of the Yardstick Glance |
-| | image. |
-| | As an example see the /yardstick/tools/ directory for how |
-| | to generate a Linux image with pktgen included.) |
-| | |
-| | ping |
-| | |
-| | Ping is normally part of any Linux distribution, hence it |
-| | doesn't need to be installed. It is also part of the |
-| | Yardstick Glance image. |
-| | (For example also a cirros_ image can be downloaded, it |
-| | includes ping) |
+| | * pktgen packet sizes; |
+| | * amount of flows; |
+| | * test duration; |
+| | * ping packet size; |
+| | * mpstat monitor interval. |
| | |
-| | mpstat |
+| | Default values exist. |
| | |
-| | (Mpstat is not always part of a Linux distribution, hence it |
-| | needs to be installed. It is part of the Yardstick Glance |
-| | image. |
+| | SLA (optional): max_ppm: The number of packets per million |
+| | packets sent that are acceptable to loose, not received. |
| | |
+--------------+--------------------------------------------------------------+
-|references | Ping and Mpstat man pages |
+|references | Ping_ |
+| | |
+| | mpstat_ |
| | |
| | pktgen_ |
| | |
| | ETSI-NFV-TST001 |
| | |
+--------------+--------------------------------------------------------------+
-|applicability | Test can be configured with different packet sizes, amount |
-| | of flows and test duration. Default values exist. |
-| | |
-| | SLA (optional): max_ppm: The number of packets per million |
-| | packets sent that are acceptable to loose, not received. |
-| | |
-+--------------+--------------------------------------------------------------+
|pre-test | The test case image needs to be installed into Glance |
-|conditions | with pktgen included in it. |
+|conditions | with pktgen, mpstat included in it. |
| | |
| | No POD specific requirements have been identified. |
| | |
@@ -92,12 +136,31 @@ Yardstick Test Case Description TC037
|test sequence | description and expected result |
| | |
+--------------+--------------------------------------------------------------+
-|step 1 | The hosts are installed, as server and client. pktgen is |
-| | invoked and logs are produced and stored. |
+|step 1 | Two host VMs are booted, as server and client. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 2 | Yardstick is connected with the server VM by using ssh. |
+| | 'pktgen_benchmark', "ping_benchmark" bash script are copyied |
+| | from Jump Host to the server VM via the ssh tunnel. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 3 | An IP table is setup on server to monitor for received |
+| | packets. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 4 | pktgen is invoked to generate packet flow between two server |
+| | and client for simulating network workloads on the SUT. Ping |
+| | is invoked. Ping packets are sent from server VM to client |
+| | VM. mpstat is invoked, recording activities for each |
+| | available processor. Results are processed and checked |
+| | against the SLA. Logs are produced and stored. |
| | |
| | Result: Logs are stored. |
| | |
+--------------+--------------------------------------------------------------+
+|step 5 | Two host VMs are deleted. |
+| | |
++--------------+--------------------------------------------------------------+
|test verdict | Fails only if SLA is not passed, or if there is a test case |
| | execution problem. |
| | |
diff --git a/docs/userguide/opnfv_yardstick_tc043.rst b/docs/userguide/opnfv_yardstick_tc043.rst
index 59d7c6993..a873696dc 100644
--- a/docs/userguide/opnfv_yardstick_tc043.rst
+++ b/docs/userguide/opnfv_yardstick_tc043.rst
@@ -8,21 +8,40 @@ Yardstick Test Case Description TC043
*************************************
.. _cirros-image: https://download.cirros-cloud.net
+.. _Ping: https://linux.die.net/man/8/ping
+-----------------------------------------------------------------------------+
|Network Latency Between NFVI Nodes |
| |
+--------------+--------------------------------------------------------------+
-|test case id | OPNFV_YARDSTICK_TC043_Latency_between_NFVI_nodes |
-| | measurements |
+|test case id | OPNFV_YARDSTICK_TC043_LATENCY_BETWEEN_NFVI_NODES |
| | |
+--------------+--------------------------------------------------------------+
-|metric | RTT, Round Trip Time |
+|metric | RTT (Round Trip Time) |
| | |
+--------------+--------------------------------------------------------------+
-|test purpose | To do a basic verification that network latency is within |
-| | acceptable boundaries when packets travel between different |
-| | nodes. |
+|test purpose | The purpose of TC043 is to do a basic verification that |
+| | network latency is within acceptable boundaries when packets |
+| | travel between different NFVI nodes. |
+| | |
+| | The purpose is also to be able to spot the trends. |
+| | Test results, graphs and similar shall be stored for |
+| | comparison reasons and product evolution understanding |
+| | between different OPNFV versions and/or configurations. |
+| | |
++--------------+--------------------------------------------------------------+
+|test tool | ping |
+| | |
+| | Ping is a computer network administration software utility |
+| | used to test the reachability of a host on an Internet |
+| | Protocol (IP) network. It measures the round-trip time for |
+| | packet sent from the originating host to a destination |
+| | computer that are echoed back to the source. |
+| | |
++--------------+--------------------------------------------------------------+
+|test topology | Ping packets (ICMP protocol's mandatory ECHO_REQUEST |
+| | datagram) are sent from host node to target node to elicit |
+| | ICMP ECHO_RESPONSE. |
| | |
+--------------+--------------------------------------------------------------+
|configuration | file: opnfv_yardstick_tc043.yaml |
@@ -31,32 +50,33 @@ Yardstick Test Case Description TC043
| | One ping each 10 seconds. SLA RTT is set to maximum 10 ms. |
| | |
+--------------+--------------------------------------------------------------+
-|test tool | ping |
-| | |
-| | Ping is normally part of any Linux distribution, hence it |
-| | doesn't need to be installed. It is also part of the |
-| | Yardstick Docker image. |
+|applicability | This test case can be configured with different: |
| | |
-+--------------+--------------------------------------------------------------+
-|references | Ping man page |
+| | * packet sizes; |
+| | * burst sizes; |
+| | * ping intervals; |
+| | * test durations; |
+| | * test iterations. |
| | |
-| | ETSI-NFV-TST001 |
+| | Default values exist. |
| | |
-+--------------+--------------------------------------------------------------+
-|applicability | Test case can be configured with different packet sizes, |
-| | burst sizes, ping intervals and test duration. |
| | SLA is optional. The SLA in this test case serves as an |
-| | example. Considerably lower RTT is expected, and |
-| | also normal to achieve in balanced L2 environments. However, |
-| | to cover most configurations, both bare metal and fully |
-| | virtualized ones, this value should be possible to achieve |
-| | and acceptable for black box testing. Many real time |
+| | example. Considerably lower RTT is expected, and also normal |
+| | to achieve in balanced L2 environments. However, to cover |
+| | most configurations, both bare metal and fully virtualized |
+| | ones, this value should be possible to achieve and |
+| | acceptable for black box testing. Many real time |
| | applications start to suffer badly if the RTT time is higher |
| | than this. Some may suffer bad also close to this RTT, while |
| | others may not suffer at all. It is a compromise that may |
| | have to be tuned for different configuration purposes. |
| | |
+--------------+--------------------------------------------------------------+
+|references | Ping_ |
+| | |
+| | ETSI-NFV-TST001 |
+| | |
++--------------+--------------------------------------------------------------+
|pre_test | Each pod node must have ping included in it. |
|conditions | |
| | |
@@ -64,8 +84,14 @@ Yardstick Test Case Description TC043
|test sequence | description and expected result |
| | |
+--------------+--------------------------------------------------------------+
-|step 1 | The pod is available. Two nodes as server and client. |
-| | Ping is invoked and logs are produced and stored. |
+|step 1 | Yardstick is connected with the NFVI node by using ssh. |
+| | 'ping_benchmark' bash script is copyied from Jump Host to |
+| | the NFVI node via the ssh tunnel. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 2 | Ping is invoked. Ping packets are sent from server node to |
+| | client node. RTT results are calculated and checked against |
+| | the SLA. Logs are produced and stored. |
| | |
| | Result: Logs are stored. |
| | |
diff --git a/etc/yardstick/yardstick.conf.sample b/etc/yardstick/yardstick.conf.sample
index f4eff05d3..70cf71ade 100644
--- a/etc/yardstick/yardstick.conf.sample
+++ b/etc/yardstick/yardstick.conf.sample
@@ -8,22 +8,21 @@
##############################################################################
[DEFAULT]
-# verbose = True
-# debug = True
-# dispatcher = http
+debug = False
+dispatcher = http
[dispatcher_http]
-# timeout = 5
-# target = http://127.0.0.1:8000/results
+timeout = 5
+target = http://127.0.0.1:8000/results
[dispatcher_file]
-# file_path = /tmp/yardstick.out
-# max_bytes = 0
-# backup_count = 0
+file_path = /tmp/yardstick.out
+max_bytes = 0
+backup_count = 0
[dispatcher_influxdb]
-# timeout = 5
-# target = http://127.0.0.1:8086
-# db_name = yardstick
-# username = root
-# password = root
+timeout = 5
+target = http://127.0.0.1:8086
+db_name = yardstick
+username = root
+password = root
diff --git a/fuel-plugin/deployment_scripts/install.sh b/fuel-plugin/deployment_scripts/install.sh
index 6882f0be2..18f4fc2c2 100755
--- a/fuel-plugin/deployment_scripts/install.sh
+++ b/fuel-plugin/deployment_scripts/install.sh
@@ -27,4 +27,7 @@ cd $BIN_HOME
curl http://$HOST:8080/plugins/fuel-plugin-yardstick-1.0/repositories/ubuntu/yardstick.tar.gz | tar xzvf -
-python setup.py develop
+# install dependency
+pip install -r requirements.txt
+
+python setup.py install
diff --git a/fuel-plugin/deployment_scripts/puppet/manifests/yardstick-install.pp b/fuel-plugin/deployment_scripts/puppet/manifests/yardstick-install.pp
index 82dfff387..e69371141 100644
--- a/fuel-plugin/deployment_scripts/puppet/manifests/yardstick-install.pp
+++ b/fuel-plugin/deployment_scripts/puppet/manifests/yardstick-install.pp
@@ -7,7 +7,7 @@ $admin_user = $access_hash['user']
$admin_password = $access_hash['password']
$region = hiera('region', 'RegionOne')
-$auth_api_version = 'v2.0'
+$auth_api_version = ''
$service_endpoint = hiera('service_endpoint', $management_vip)
$ssl_hash = hiera_hash('use_ssl', {})
$internal_auth_protocol = get_ssl_property($ssl_hash, {}, 'keystone', 'internal', 'protocol', 'http')
diff --git a/plugin/storperf.yaml b/plugin/storperf.yaml
index d08e26eb6..074a82067 100644
--- a/plugin/storperf.yaml
+++ b/plugin/storperf.yaml
@@ -10,4 +10,5 @@ plugins:
deployment:
ip: 192.168.23.2
user: root
+ # Remove 'password' if log into deployment location using key file
password: root
diff --git a/requirements.txt b/requirements.txt
index 6b4edf3f0..9c037ed79 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -84,3 +84,4 @@ pyroute2==0.4.10
docker-py==1.10.6
flasgger==0.5.13
flask-restful-swagger==0.19
+SQLAlchemy==1.1.4
diff --git a/samples/ping_load.yaml b/samples/ping_load.yaml
new file mode 100644
index 000000000..370916822
--- /dev/null
+++ b/samples/ping_load.yaml
@@ -0,0 +1,65 @@
+---
+# Sample benchmark task config file
+# Three scenarios run in parallel pinging one target vm.
+# Multiple context are used to specify the host and target VMs.
+
+schema: "yardstick:task:0.1"
+run_in_parallel: true
+
+scenarios:
+{% for host in ['athena.demo1', 'apollo.demo1', 'kratos.demo1'] %}
+-
+ type: Ping
+ options:
+ packetsize: 100
+ host: {{host}}
+ target: hades.demo2
+ runner:
+ type: Duration
+ duration: 60
+ interval: 1
+ sla:
+ max_rtt: 10
+ action: assert
+{% endfor %}
+
+contexts:
+-
+ name: demo1
+ image: cirros-0.3.3
+ flavor: yardstick-flavor
+ user: cirros
+
+ placement_groups:
+ pgrp1:
+ policy: "availability"
+
+ servers:
+ athena:
+ floating_ip: true
+ placement: "pgrp1"
+ apollo:
+ floating_ip: true
+ placement: "pgrp1"
+ kratos:
+ floating_ip: true
+ placement: "pgrp1"
+
+ networks:
+ test:
+ cidr: '10.0.1.0/24'
+-
+ name: demo2
+ image: cirros-0.3.3
+ flavor: yardstick-flavor
+ user: cirros
+ placement_groups:
+ pgrp1:
+ policy: "availability"
+ servers:
+ hades:
+ floating_ip: true
+ placement: "pgrp1"
+ networks:
+ test:
+ cidr: '10.0.1.0/24'
diff --git a/setup.py b/setup.py
index 54595b648..0100b4635 100755
--- a/setup.py
+++ b/setup.py
@@ -25,7 +25,8 @@ setup(
'resources/scripts/remove/*.bash'
],
'etc': [
- 'yardstick/nodes/*/*.yaml'
+ 'yardstick/nodes/*/*.yaml',
+ 'yardstick/*.sample'
],
'tests': [
'opnfv/*/*.yaml',
diff --git a/tests/ci/clean_images.sh b/tests/ci/clean_images.sh
index b1942160b..fa4a54df6 100755
--- a/tests/ci/clean_images.sh
+++ b/tests/ci/clean_images.sh
@@ -15,15 +15,22 @@ cleanup()
echo
echo "========== Cleanup =========="
- if ! glance image-list; then
+ if ! openstack image list; then
return
fi
- for image in $(glance image-list | grep -e cirros-0.3.3 -e yardstick-image -e Ubuntu-14.04 \
+ for image in $(openstack image list | grep -e cirros-0.3.3 -e yardstick-image -e Ubuntu-14.04 \
-e yardstick-vivid-kernel | awk '{print $2}'); do
echo "Deleting image $image..."
- glance image-delete $image || true
+ openstack image delete $image || true
done
- nova flavor-delete yardstick-flavor &> /dev/null || true
+ openstack flavor delete yardstick-flavor &> /dev/null || true
}
+
+main()
+{
+ cleanup
+}
+
+main
diff --git a/tests/ci/load_images.sh b/tests/ci/load_images.sh
index 405d72076..e1d717749 100755
--- a/tests/ci/load_images.sh
+++ b/tests/ci/load_images.sh
@@ -75,11 +75,12 @@ load_yardstick_image()
if [ ! -f $VIVID_KERNEL ]; then
tar zxf $VIVID_IMAGE $(basename $VIVID_KERNEL)
fi
- create_vivid_kernel=$(glance --os-image-api-version 1 image-create \
- --name yardstick-vivid-kernel \
- --is-public true --disk-format qcow2 \
+ create_vivid_kernel=$(openstack image create \
+ --public \
+ --disk-format qcow2 \
--container-format bare \
- --file $VIVID_KERNEL)
+ --file $VIVID_KERNEL \
+ yardstick-vivid-kernel)
GLANCE_KERNEL_ID=$(echo "$create_vivid_kernel" | grep " id " | awk '{print $(NF-1)}')
if [ -z "$GLANCE_KERNEL_ID" ]; then
@@ -101,19 +102,21 @@ load_yardstick_image()
fi
if [[ "$DEPLOY_SCENARIO" == *"-lxd-"* ]]; then
- output=$(eval glance --os-image-api-version 1 image-create \
- --name yardstick-image \
- --is-public true --disk-format root-tar \
+ output=$(eval openstack image create \
+ --public \
+ --disk-format root-tar \
--container-format bare \
$EXTRA_PARAMS \
- --file $RAW_IMAGE)
+ --file $RAW_IMAGE \
+ yardstick-image)
else
- output=$(eval glance --os-image-api-version 1 image-create \
- --name yardstick-image \
- --is-public true --disk-format qcow2 \
+ output=$(eval openstack image create \
+ --public \
+ --disk-format qcow2 \
--container-format bare \
$EXTRA_PARAMS \
- --file $QCOW_IMAGE)
+ --file $QCOW_IMAGE \
+ yardstick-image)
fi
echo "$output"
@@ -147,12 +150,12 @@ load_cirros_image()
EXTRA_PARAMS=$EXTRA_PARAMS" --property hw_mem_page_size=large"
fi
- output=$(glance image-create \
- --name cirros-0.3.3 \
+ output=$(openstack image create \
--disk-format qcow2 \
--container-format bare \
$EXTRA_PARAMS \
- --file $image_file)
+ --file $image_file \
+ cirros-0.3.3)
echo "$output"
CIRROS_IMAGE_ID=$(echo "$output" | grep " id " | awk '{print $(NF-1)}')
@@ -177,12 +180,12 @@ load_ubuntu_image()
EXTRA_PARAMS=$EXTRA_PARAMS" --property hw_mem_page_size=large"
fi
- output=$(glance image-create \
- --name Ubuntu-14.04 \
+ output=$(openstack image create \
--disk-format qcow2 \
--container-format bare \
$EXTRA_PARAMS \
- --file $ubuntu_image_file)
+ --file $ubuntu_image_file \
+ Ubuntu-14.04)
echo "$output"
UBUNTU_IMAGE_ID=$(echo "$output" | grep " id " | awk '{print $(NF-1)}')
@@ -197,18 +200,18 @@ load_ubuntu_image()
create_nova_flavor()
{
- if ! nova flavor-list | grep -q yardstick-flavor; then
+ if ! openstack flavor list | grep -q yardstick-flavor; then
echo
echo "========== Create nova flavor =========="
# Create the nova flavor used by some sample test cases
- nova flavor-create yardstick-flavor 100 512 3 1
+ openstack flavor create --id 100 --ram 512 --disk 3 --vcpus 1 yardstick-flavor
# DPDK-enabled OVS requires guest memory to be backed by large pages
if [[ "$DEPLOY_SCENARIO" == *"-ovs-"* ]]; then
- nova flavor-key yardstick-flavor set hw:mem_page_size=large
+ openstack flavor set --property hw:mem_page_size=large yardstick-flavor
fi
# VPP requires guest memory to be backed by large pages
if [[ "$DEPLOY_SCENARIO" == *"-fdio-"* ]]; then
- nova flavor-key yardstick-flavor set hw:mem_page_size=large
+ openstack flavor set --property hw:mem_page_size=large yardstick-flavor
fi
fi
}
diff --git a/tests/ci/prepare_storperf_admin-rc.sh b/tests/ci/prepare_storperf_admin-rc.sh
index 0401719ff..b3dc2e58e 100755
--- a/tests/ci/prepare_storperf_admin-rc.sh
+++ b/tests/ci/prepare_storperf_admin-rc.sh
@@ -9,14 +9,15 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+# Prepare storperf_admin-rc for StorPerf.
+
AUTH_URL=${OS_AUTH_URL}
USERNAME=${OS_USERNAME:-admin}
PASSWORD=${OS_PASSWORD:-console}
TENANT_NAME=${OS_TENANT_NAME:-admin}
VOLUME_API_VERSION=${OS_VOLUME_API_VERSION:-2}
PROJECT_NAME=${OS_PROJECT_NAME:-$TENANT_NAME}
-TENANT_ID=`keystone tenant-get admin|grep 'id'|awk -F '|' '{print $3}'|sed -e 's/^[[:space:]]*//'`
-
+TENANT_ID=`openstack project show admin|grep '\bid\b' |awk -F '|' '{print $3}'|sed -e 's/^[[:space:]]*//'`
rm -f ~/storperf_admin-rc
touch ~/storperf_admin-rc
diff --git a/tests/ci/scp_storperf_admin-rc.sh b/tests/ci/scp_storperf_admin-rc.sh
index af2885b01..7c3896d88 100644
--- a/tests/ci/scp_storperf_admin-rc.sh
+++ b/tests/ci/scp_storperf_admin-rc.sh
@@ -1,5 +1,16 @@
#!/bin/bash
+##############################################################################
+# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Copy storperf_admin-rc to deployment location.
+
ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
sshpass -p root scp 2>/dev/null $ssh_options ~/storperf_admin-rc \
root@192.168.200.1:/root/ &> /dev/null
diff --git a/tests/ci/yardstick-verify b/tests/ci/yardstick-verify
index 7644c96c4..f9d98a4da 100755
--- a/tests/ci/yardstick-verify
+++ b/tests/ci/yardstick-verify
@@ -301,8 +301,8 @@ main()
# check OpenStack services
echo "Checking OpenStack services:"
- for cmd in "glance image-list" "nova list" "heat stack-list"; do
- echo " checking ${cmd/%\ */} ..."
+ for cmd in "openstack image list" "openstack server list" "openstack stack list"; do
+ echo " checking ${cmd} ..."
if ! $cmd >/dev/null; then
echo "error: command \"$cmd\" failed"
exit 1
@@ -311,7 +311,7 @@ main()
echo
echo "Checking for External network:"
- for net in $(neutron net-list --router:external True -c name -f value); do
+ for net in $(openstack network list --external -c Name -f value); do
echo " external network: $net"
done
@@ -320,8 +320,6 @@ main()
source $YARDSTICK_REPO_DIR/tests/ci/clean_images.sh
- cleanup
-
trap "error_exit" EXIT SIGTERM
source $YARDSTICK_REPO_DIR/tests/ci/load_images.sh
diff --git a/tests/opnfv/test_suites/opnfv_components.yaml b/tests/opnfv/test_suites/opnfv_components.yaml
new file mode 100644
index 000000000..ff4923e03
--- /dev/null
+++ b/tests/opnfv/test_suites/opnfv_components.yaml
@@ -0,0 +1,16 @@
+---
+# Yardstick components task suite
+
+schema: "yardstick:suite:0.1"
+
+name: "opnfv_yardstick-components"
+test_cases_dir: "tests/opnfv/test_cases/"
+test_cases:
+-
+ file_name: opnfv_yardstick_tc074.yaml
+ constraint:
+ installer: compass
+ pod: huawei-pod1
+ task_args:
+ huawei-pod1: '{"public_network": "ext-net",
+ "StorPerf_ip": "192.168.200.1"}'
diff --git a/tests/opnfv/test_suites/opnfv_features.yaml b/tests/opnfv/test_suites/opnfv_features.yaml
new file mode 100644
index 000000000..3621f1367
--- /dev/null
+++ b/tests/opnfv/test_suites/opnfv_features.yaml
@@ -0,0 +1,52 @@
+---
+# Yardstick features task suite
+
+schema: "yardstick:suite:0.1"
+
+name: "opnfv_yardstick-features"
+test_cases_dir: "tests/opnfv/test_cases/"
+test_cases:
+-
+ file_name: opnfv_yardstick_tc027.yaml
+ constraint:
+ installer: compass,fuel
+ pod: huawei-pod1,lf-pod2
+ task_args:
+ huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml"}'
+ lf-pod2: '{"pod_info": "etc/yardstick/nodes/fuel_baremetal/pod.yaml", "openrc":"/root/openrc", "external_network":"admin_floating_net"}'
+-
+ file_name: opnfv_yardstick_tc045.yaml
+ constraint:
+ installer: fuel
+-
+ file_name: opnfv_yardstick_tc046.yaml
+ constraint:
+ installer: fuel
+-
+ file_name: opnfv_yardstick_tc047.yaml
+ constraint:
+ installer: fuel
+-
+ file_name: opnfv_yardstick_tc048.yaml
+ constraint:
+ installer: fuel
+-
+ file_name: opnfv_yardstick_tc049.yaml
+ constraint:
+ installer: fuel
+-
+ file_name: opnfv_yardstick_tc050.yaml
+ constraint:
+ installer: fuel
+-
+ file_name: opnfv_yardstick_tc051.yaml
+ constraint:
+ installer: fuel
+-
+ file_name: opnfv_yardstick_tc052.yaml
+ constraint:
+ installer: fuel
+-
+ file_name: opnfv_yardstick_tc053.yaml
+ constraint:
+ installer: fuel
diff --git a/tests/opnfv/test_suites/opnfv_performance.yaml b/tests/opnfv/test_suites/opnfv_performance.yaml
new file mode 100644
index 000000000..71b1e2ef9
--- /dev/null
+++ b/tests/opnfv/test_suites/opnfv_performance.yaml
@@ -0,0 +1,62 @@
+---
+# Yardstick performance task suite
+
+schema: "yardstick:suite:0.1"
+
+name: "opnfv_yardstick-performance"
+test_cases_dir: "tests/opnfv/test_cases/"
+test_cases:
+-
+ file_name: opnfv_yardstick_tc002.yaml
+-
+ file_name: opnfv_yardstick_tc005.yaml
+-
+ file_name: opnfv_yardstick_tc010.yaml
+-
+ file_name: opnfv_yardstick_tc011.yaml
+-
+ file_name: opnfv_yardstick_tc012.yaml
+-
+ file_name: opnfv_yardstick_tc014.yaml
+-
+ file_name: opnfv_yardstick_tc037.yaml
+-
+ file_name: opnfv_yardstick_tc043.yaml
+ constraint:
+ installer: compass
+ pod: huawei-pod1
+ task_args:
+ huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml",
+ "host": "node4.LF","target": "node5.LF"}'
+-
+ file_name: opnfv_yardstick_tc055.yaml
+ constraint:
+ installer: compass
+ pod: huawei-pod1
+ task_args:
+ huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml",
+ "host": "node5.yardstick-TC055"}'
+-
+ file_name: opnfv_yardstick_tc063.yaml
+ constraint:
+ installer: compass
+ pod: huawei-pod1
+ task_args:
+ huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml",
+ "host": "node5.yardstick-TC063"}'
+-
+ file_name: opnfv_yardstick_tc069.yaml
+-
+ file_name: opnfv_yardstick_tc070.yaml
+-
+ file_name: opnfv_yardstick_tc071.yaml
+-
+ file_name: opnfv_yardstick_tc072.yaml
+-
+ file_name: opnfv_yardstick_tc075.yaml
+ constraint:
+ installer: compass
+ pod: huawei-pod1
+ task_args:
+ huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml",
+ "host": "node1.LF"}'
diff --git a/tests/opnfv/test_suites/opnfv_smoke.yaml b/tests/opnfv/test_suites/opnfv_smoke.yaml
new file mode 100644
index 000000000..f773bec87
--- /dev/null
+++ b/tests/opnfv/test_suites/opnfv_smoke.yaml
@@ -0,0 +1,14 @@
+---
+# Yardstick smoke task suite
+
+schema: "yardstick:suite:0.1"
+
+name: "opnfv_yardstick-smoke"
+test_cases_dir: "tests/opnfv/test_cases/"
+test_cases:
+-
+ file_name: opnfv_yardstick_tc002.yaml
+-
+ file_name: opnfv_yardstick_tc005.yaml
+-
+ file_name: opnfv_yardstick_tc012.yaml
diff --git a/tests/unit/cmd/commands/__init__.py b/tests/unit/benchmark/core/__init__.py
index e69de29bb..e69de29bb 100644
--- a/tests/unit/cmd/commands/__init__.py
+++ b/tests/unit/benchmark/core/__init__.py
diff --git a/tests/unit/cmd/commands/no_constraint_no_args_scenario_sample.yaml b/tests/unit/benchmark/core/no_constraint_no_args_scenario_sample.yaml
index 4933b93ae..4933b93ae 100644
--- a/tests/unit/cmd/commands/no_constraint_no_args_scenario_sample.yaml
+++ b/tests/unit/benchmark/core/no_constraint_no_args_scenario_sample.yaml
diff --git a/tests/unit/cmd/commands/no_constraint_with_args_scenario_sample.yaml b/tests/unit/benchmark/core/no_constraint_with_args_scenario_sample.yaml
index f39df7346..f39df7346 100644
--- a/tests/unit/cmd/commands/no_constraint_with_args_scenario_sample.yaml
+++ b/tests/unit/benchmark/core/no_constraint_with_args_scenario_sample.yaml
diff --git a/tests/unit/cmd/commands/test_plugin.py b/tests/unit/benchmark/core/test_plugin.py
index 2e823fdae..441116a25 100644
--- a/tests/unit/cmd/commands/test_plugin.py
+++ b/tests/unit/benchmark/core/test_plugin.py
@@ -9,12 +9,12 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for yardstick.cmd.commands.plugin
+# Unittest for yardstick.benchmark.core.plugin
import mock
import unittest
-from yardstick.cmd.commands import plugin
+from yardstick.benchmark.core import plugin
class Arg(object):
@@ -22,30 +22,30 @@ class Arg(object):
self.input_file = ('plugin/sample_config.yaml',)
-@mock.patch('yardstick.cmd.commands.plugin.ssh')
-class pluginCommandsTestCase(unittest.TestCase):
+@mock.patch('yardstick.benchmark.core.plugin.ssh')
+class pluginTestCase(unittest.TestCase):
def setUp(self):
self.result = {}
- def test_do_install(self, mock_ssh):
- p = plugin.PluginCommands()
+ def test_install(self, mock_ssh):
+ p = plugin.Plugin()
mock_ssh.SSH().execute.return_value = (0, '', '')
input_file = Arg()
- p.do_install(input_file)
+ p.install(input_file)
expected_result = {}
self.assertEqual(self.result, expected_result)
- def test_do_remove(self, mock_ssh):
- p = plugin.PluginCommands()
+ def test_remove(self, mock_ssh):
+ p = plugin.Plugin()
mock_ssh.SSH().execute.return_value = (0, '', '')
input_file = Arg()
- p.do_remove(input_file)
+ p.remove(input_file)
expected_result = {}
self.assertEqual(self.result, expected_result)
def test_install_setup_run(self, mock_ssh):
- p = plugin.PluginCommands()
+ p = plugin.Plugin()
mock_ssh.SSH().execute.return_value = (0, '', '')
plugins = {
"name": "sample"
@@ -64,7 +64,7 @@ class pluginCommandsTestCase(unittest.TestCase):
self.assertEqual(self.result, expected_result)
def test_remove_setup_run(self, mock_ssh):
- p = plugin.PluginCommands()
+ p = plugin.Plugin()
mock_ssh.SSH().execute.return_value = (0, '', '')
plugins = {
"name": "sample"
@@ -81,3 +81,11 @@ class pluginCommandsTestCase(unittest.TestCase):
p._run(plugin_name)
expected_result = {}
self.assertEqual(self.result, expected_result)
+
+
+def main():
+ unittest.main()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/unit/cmd/commands/test_task.py b/tests/unit/benchmark/core/test_task.py
index 0177fd08a..463c43e1f 100644
--- a/tests/unit/cmd/commands/test_task.py
+++ b/tests/unit/benchmark/core/test_task.py
@@ -9,18 +9,18 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for yardstick.cmd.commands.task
+# Unittest for yardstick.benchmark.core.task
import os
import mock
import unittest
-from yardstick.cmd.commands import task
+from yardstick.benchmark.core import task
-class TaskCommandsTestCase(unittest.TestCase):
+class TaskTestCase(unittest.TestCase):
- @mock.patch('yardstick.cmd.commands.task.Context')
+ @mock.patch('yardstick.benchmark.core.task.Context')
def test_parse_nodes_host_target_same_context(self, mock_context):
nodes = {
"host": "node1.LF",
@@ -38,42 +38,45 @@ class TaskCommandsTestCase(unittest.TestCase):
self.assertEqual(context_cfg["host"], server_info)
self.assertEqual(context_cfg["target"], server_info)
- @mock.patch('yardstick.cmd.commands.task.Context')
- @mock.patch('yardstick.cmd.commands.task.base_runner')
+ @mock.patch('yardstick.benchmark.core.task.Context')
+ @mock.patch('yardstick.benchmark.core.task.base_runner')
def test_run(self, mock_base_runner, mock_ctx):
- scenario = \
- {'host': 'athena.demo',
- 'target': 'ares.demo',
- 'runner':
- {'duration': 60,
- 'interval': 1,
- 'type': 'Duration'
- },
- 'type': 'Ping'}
-
- t = task.TaskCommands()
+ scenario = {
+ 'host': 'athena.demo',
+ 'target': 'ares.demo',
+ 'runner': {
+ 'duration': 60,
+ 'interval': 1,
+ 'type': 'Duration'
+ },
+ 'type': 'Ping'
+ }
+
+ t = task.Task()
runner = mock.Mock()
runner.join.return_value = 0
mock_base_runner.Runner.get.return_value = runner
t._run([scenario], False, "yardstick.out")
self.assertTrue(runner.run.called)
- @mock.patch('yardstick.cmd.commands.task.os')
+ @mock.patch('yardstick.benchmark.core.task.os')
def test_check_precondition(self, mock_os):
- cfg = \
- {'precondition':
- {'installer_type': 'compass',
- 'deploy_scenarios': 'os-nosdn',
- 'pod_name': 'huawei-pod1'
- }
+ cfg = {
+ 'precondition': {
+ 'installer_type': 'compass',
+ 'deploy_scenarios': 'os-nosdn',
+ 'pod_name': 'huawei-pod1'
}
+ }
t = task.TaskParser('/opt')
- mock_os.environ.get.side_effect = ['compass', 'os-nosdn', 'huawei-pod1']
+ mock_os.environ.get.side_effect = ['compass',
+ 'os-nosdn',
+ 'huawei-pod1']
result = t._check_precondition(cfg)
self.assertTrue(result)
- @mock.patch('yardstick.cmd.commands.task.os.environ')
+ @mock.patch('yardstick.benchmark.core.task.os.environ')
def test_parse_suite_no_constraint_no_args(self, mock_environ):
SAMPLE_SCENARIO_PATH = "no_constraint_no_args_scenario_sample.yaml"
t = task.TaskParser(self._get_file_abspath(SAMPLE_SCENARIO_PATH))
@@ -82,15 +85,15 @@ class TaskCommandsTestCase(unittest.TestCase):
print ("files=%s, args=%s, fnames=%s" % (task_files, task_args,
task_args_fnames))
self.assertEqual(task_files[0],
- 'tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml')
+ 'tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml')
self.assertEqual(task_files[1],
- 'tests/opnfv/test_cases/opnfv_yardstick_tc043.yaml')
+ 'tests/opnfv/test_cases/opnfv_yardstick_tc043.yaml')
self.assertEqual(task_args[0], None)
self.assertEqual(task_args[1], None)
self.assertEqual(task_args_fnames[0], None)
self.assertEqual(task_args_fnames[1], None)
- @mock.patch('yardstick.cmd.commands.task.os.environ')
+ @mock.patch('yardstick.benchmark.core.task.os.environ')
def test_parse_suite_no_constraint_with_args(self, mock_environ):
SAMPLE_SCENARIO_PATH = "no_constraint_with_args_scenario_sample.yaml"
t = task.TaskParser(self._get_file_abspath(SAMPLE_SCENARIO_PATH))
@@ -99,16 +102,16 @@ class TaskCommandsTestCase(unittest.TestCase):
print ("files=%s, args=%s, fnames=%s" % (task_files, task_args,
task_args_fnames))
self.assertEqual(task_files[0],
- 'tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml')
+ 'tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml')
self.assertEqual(task_files[1],
- 'tests/opnfv/test_cases/opnfv_yardstick_tc043.yaml')
+ 'tests/opnfv/test_cases/opnfv_yardstick_tc043.yaml')
self.assertEqual(task_args[0], None)
self.assertEqual(task_args[1],
- '{"host": "node1.LF","target": "node2.LF"}')
+ '{"host": "node1.LF","target": "node2.LF"}')
self.assertEqual(task_args_fnames[0], None)
self.assertEqual(task_args_fnames[1], None)
- @mock.patch('yardstick.cmd.commands.task.os.environ')
+ @mock.patch('yardstick.benchmark.core.task.os.environ')
def test_parse_suite_with_constraint_no_args(self, mock_environ):
SAMPLE_SCENARIO_PATH = "with_constraint_no_args_scenario_sample.yaml"
t = task.TaskParser(self._get_file_abspath(SAMPLE_SCENARIO_PATH))
@@ -117,15 +120,15 @@ class TaskCommandsTestCase(unittest.TestCase):
print ("files=%s, args=%s, fnames=%s" % (task_files, task_args,
task_args_fnames))
self.assertEqual(task_files[0],
- 'tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml')
+ 'tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml')
self.assertEqual(task_files[1],
- 'tests/opnfv/test_cases/opnfv_yardstick_tc043.yaml')
+ 'tests/opnfv/test_cases/opnfv_yardstick_tc043.yaml')
self.assertEqual(task_args[0], None)
self.assertEqual(task_args[1], None)
self.assertEqual(task_args_fnames[0], None)
self.assertEqual(task_args_fnames[1], None)
- @mock.patch('yardstick.cmd.commands.task.os.environ')
+ @mock.patch('yardstick.benchmark.core.task.os.environ')
def test_parse_suite_with_constraint_with_args(self, mock_environ):
SAMPLE_SCENARIO_PATH = "with_constraint_with_args_scenario_sample.yaml"
t = task.TaskParser(self._get_file_abspath(SAMPLE_SCENARIO_PATH))
@@ -134,12 +137,12 @@ class TaskCommandsTestCase(unittest.TestCase):
print ("files=%s, args=%s, fnames=%s" % (task_files, task_args,
task_args_fnames))
self.assertEqual(task_files[0],
- 'tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml')
+ 'tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml')
self.assertEqual(task_files[1],
- 'tests/opnfv/test_cases/opnfv_yardstick_tc043.yaml')
+ 'tests/opnfv/test_cases/opnfv_yardstick_tc043.yaml')
self.assertEqual(task_args[0], None)
self.assertEqual(task_args[1],
- '{"host": "node1.LF","target": "node2.LF"}')
+ '{"host": "node1.LF","target": "node2.LF"}')
self.assertEqual(task_args_fnames[0], None)
self.assertEqual(task_args_fnames[1], None)
@@ -148,3 +151,10 @@ class TaskCommandsTestCase(unittest.TestCase):
file_path = os.path.join(curr_path, filename)
return file_path
+
+def main():
+ unittest.main()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/unit/cmd/commands/test_testcase.py b/tests/unit/benchmark/core/test_testcase.py
index c55c367d0..6e0473cc1 100644
--- a/tests/unit/cmd/commands/test_testcase.py
+++ b/tests/unit/benchmark/core/test_testcase.py
@@ -11,26 +11,33 @@
# Unittest for yardstick.cmd.commands.testcase
-import mock
import unittest
-from yardstick.cmd.commands import testcase
-from yardstick.cmd.commands.testcase import TestcaseCommands
+from yardstick.benchmark.core import testcase
+
class Arg(object):
def __init__(self):
- self.casename=('opnfv_yardstick_tc001',)
+ self.casename = ('opnfv_yardstick_tc001',)
+
-class TestcaseCommandsUT(unittest.TestCase):
+class TestcaseUT(unittest.TestCase):
- def test_do_list(self):
- t = testcase.TestcaseCommands()
- result = t.do_list("")
+ def test_list_all(self):
+ t = testcase.Testcase()
+ result = t.list_all("")
self.assertEqual(result, True)
- def test_do_show(self):
- t = testcase.TestcaseCommands()
+ def test_show(self):
+ t = testcase.Testcase()
casename = Arg()
- result = t.do_show(casename)
+ result = t.show(casename)
self.assertEqual(result, True)
+
+def main():
+ unittest.main()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/unit/cmd/commands/with_constraint_no_args_scenario_sample.yaml b/tests/unit/benchmark/core/with_constraint_no_args_scenario_sample.yaml
index 8194a2361..8194a2361 100644
--- a/tests/unit/cmd/commands/with_constraint_no_args_scenario_sample.yaml
+++ b/tests/unit/benchmark/core/with_constraint_no_args_scenario_sample.yaml
diff --git a/tests/unit/cmd/commands/with_constraint_with_args_scenario_sample.yaml b/tests/unit/benchmark/core/with_constraint_with_args_scenario_sample.yaml
index 86c9b2800..86c9b2800 100644
--- a/tests/unit/cmd/commands/with_constraint_with_args_scenario_sample.yaml
+++ b/tests/unit/benchmark/core/with_constraint_with_args_scenario_sample.yaml
diff --git a/tests/unit/test_ssh.py b/tests/unit/test_ssh.py
index 88638a0a8..045ac0f1b 100644
--- a/tests/unit/test_ssh.py
+++ b/tests/unit/test_ssh.py
@@ -310,12 +310,38 @@ class SSHRunTestCase(unittest.TestCase):
@mock.patch("yardstick.ssh.open", create=True)
def test__put_file_shell(self, mock_open):
- self.test_client.run = mock.Mock()
- self.test_client._put_file_shell("localfile", "remotefile", 0o42)
+ with mock.patch.object(self.test_client, "run") as run_mock:
+ self.test_client._put_file_shell("localfile", "remotefile", 0o42)
+ run_mock.assert_called_once_with(
+ 'cat > "remotefile"&& chmod -- 042 "remotefile"',
+ stdin=mock_open.return_value.__enter__.return_value)
- self.test_client.run.assert_called_once_with(
- 'cat > "remotefile"&& chmod -- 042 "remotefile"',
- stdin=mock_open.return_value.__enter__.return_value)
+ @mock.patch("yardstick.ssh.open", create=True)
+ def test__put_file_shell_space(self, mock_open):
+ with mock.patch.object(self.test_client, "run") as run_mock:
+ self.test_client._put_file_shell("localfile",
+ "filename with space", 0o42)
+ run_mock.assert_called_once_with(
+ 'cat > "filename with space"&& chmod -- 042 "filename with '
+ 'space"',
+ stdin=mock_open.return_value.__enter__.return_value)
+
+ @mock.patch("yardstick.ssh.open", create=True)
+ def test__put_file_shell_tilde(self, mock_open):
+ with mock.patch.object(self.test_client, "run") as run_mock:
+ self.test_client._put_file_shell("localfile", "~/remotefile", 0o42)
+ run_mock.assert_called_once_with(
+ 'cat > ~/"remotefile"&& chmod -- 042 ~/"remotefile"',
+ stdin=mock_open.return_value.__enter__.return_value)
+
+ @mock.patch("yardstick.ssh.open", create=True)
+ def test__put_file_shell_tilde_spaces(self, mock_open):
+ with mock.patch.object(self.test_client, "run") as run_mock:
+ self.test_client._put_file_shell("localfile", "~/file with space",
+ 0o42)
+ run_mock.assert_called_once_with(
+ 'cat > ~/"file with space"&& chmod -- 042 ~/"file with space"',
+ stdin=mock_open.return_value.__enter__.return_value)
@mock.patch("yardstick.ssh.os.stat")
def test__put_file_sftp(self, mock_stat):
diff --git a/tools/yardstick-img-modify b/tools/yardstick-img-modify
index 0033383ef..68ce6e223 100755
--- a/tools/yardstick-img-modify
+++ b/tools/yardstick-img-modify
@@ -152,9 +152,15 @@ cleanup() {
mount | grep $mountdir/proc && umount $mountdir/proc
mount | grep $mountdir && umount $mountdir
mount | grep "/mnt/vivid" && umount "/mnt/vivid"
+
if [ -f $raw_imgfile ]; then
- kpartx -dv $raw_imgfile
+ #kpartx -dv $raw_imgfile sometimes failed, we should checked it agein.
+ #if [ -z "$(kpartx -l $raw_imgfile | grep 'loop deleted')" ]; then
+ # kpartx -dv $raw_imgfile
+ #fi
+ kpartx -dv $raw_imgfile || true
fi
+
rm -f $raw_imgfile
rm -rf $mountdir
}
diff --git a/yardstick/benchmark/core/__init__.py b/yardstick/benchmark/core/__init__.py
new file mode 100644
index 000000000..12c83f87e
--- /dev/null
+++ b/yardstick/benchmark/core/__init__.py
@@ -0,0 +1,38 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+from __future__ import print_function
+
+
+class Param(object):
+
+ def __init__(self, kwargs):
+ # list
+ self.inputfile = kwargs.get('inputfile')
+ self.task_args = kwargs.get('task-args')
+ self.task_args_file = kwargs.get('task-args-file')
+ self.keep_deploy = kwargs.get('keep-deploy')
+ self.parse_only = kwargs.get('parse-only')
+ self.output_file = kwargs.get('output-file', '/tmp/yardstick.out')
+ self.suite = kwargs.get('suite')
+
+ # list
+ self.input_file = kwargs.get('input_file')
+
+ # list
+ self.casename = kwargs.get('casename')
+
+ # list
+ self.type = kwargs.get('type')
+
+
+def print_hbar(barlen):
+ '''print to stdout a horizontal bar'''
+ print("+"),
+ print("-" * barlen),
+ print("+")
diff --git a/yardstick/benchmark/core/plugin.py b/yardstick/benchmark/core/plugin.py
new file mode 100644
index 000000000..da12ce438
--- /dev/null
+++ b/yardstick/benchmark/core/plugin.py
@@ -0,0 +1,212 @@
+##############################################################################
+# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+""" Handler for yardstick command 'plugin' """
+
+from __future__ import print_function
+import os
+import sys
+import yaml
+import time
+import logging
+import pkg_resources
+import yardstick.ssh as ssh
+
+from yardstick.common.task_template import TaskTemplate
+
+LOG = logging.getLogger(__name__)
+
+
+class Plugin(object):
+ """Plugin commands.
+
+ Set of commands to manage plugins.
+ """
+
+ def install(self, args):
+ """Install a plugin."""
+
+ total_start_time = time.time()
+ parser = PluginParser(args.input_file[0])
+
+ plugins, deployment = parser.parse_plugin()
+ plugin_name = plugins.get("name")
+ print("Installing plugin: %s" % plugin_name)
+
+ LOG.info("Executing _install_setup()")
+ self._install_setup(plugin_name, deployment)
+
+ LOG.info("Executing _run()")
+ self._run(plugin_name)
+
+ total_end_time = time.time()
+ LOG.info("total finished in %d secs",
+ total_end_time - total_start_time)
+
+ print("Done, exiting")
+
+ def remove(self, args):
+ """Remove a plugin."""
+
+ total_start_time = time.time()
+ parser = PluginParser(args.input_file[0])
+
+ plugins, deployment = parser.parse_plugin()
+ plugin_name = plugins.get("name")
+ print("Removing plugin: %s" % plugin_name)
+
+ LOG.info("Executing _remove_setup()")
+ self._remove_setup(plugin_name, deployment)
+
+ LOG.info("Executing _run()")
+ self._run(plugin_name)
+
+ total_end_time = time.time()
+ LOG.info("total finished in %d secs",
+ total_end_time - total_start_time)
+
+ print("Done, exiting")
+
+ def _install_setup(self, plugin_name, deployment):
+ """Deployment environment setup"""
+ target_script = plugin_name + ".bash"
+ self.script = pkg_resources.resource_filename(
+ 'yardstick.resources', 'scripts/install/' + target_script)
+
+ deployment_user = deployment.get("user")
+ deployment_ssh_port = deployment.get("ssh_port", ssh.DEFAULT_PORT)
+ deployment_ip = deployment.get("ip", None)
+ deployment_password = deployment.get("password", None)
+ deployment_key_filename = deployment.get("key_filename",
+ "/root/.ssh/id_rsa")
+
+ if deployment_ip == "local":
+ installer_ip = os.environ.get("INSTALLER_IP", None)
+
+ if deployment_password is not None:
+ self._login_via_password(deployment_user, installer_ip,
+ deployment_password,
+ deployment_ssh_port)
+ else:
+ self._login_via_key(self, deployment_user, installer_ip,
+ deployment_key_filename,
+ deployment_ssh_port)
+ else:
+ if deployment_password is not None:
+ self._login_via_password(deployment_user, deployment_ip,
+ deployment_password,
+ deployment_ssh_port)
+ else:
+ self._login_via_key(self, deployment_user, deployment_ip,
+ deployment_key_filename,
+ deployment_ssh_port)
+ # copy script to host
+ remotepath = '~/%s.sh' % plugin_name
+
+ LOG.info("copying script to host: %s", remotepath)
+ self.client._put_file_shell(self.script, remotepath)
+
+ def _remove_setup(self, plugin_name, deployment):
+ """Deployment environment setup"""
+ target_script = plugin_name + ".bash"
+ self.script = pkg_resources.resource_filename(
+ 'yardstick.resources', 'scripts/remove/' + target_script)
+
+ deployment_user = deployment.get("user")
+ deployment_ssh_port = deployment.get("ssh_port", ssh.DEFAULT_PORT)
+ deployment_ip = deployment.get("ip", None)
+ deployment_password = deployment.get("password", None)
+ deployment_key_filename = deployment.get("key_filename",
+ "/root/.ssh/id_rsa")
+
+ if deployment_ip == "local":
+ installer_ip = os.environ.get("INSTALLER_IP", None)
+
+ if deployment_password is not None:
+ self._login_via_password(deployment_user, installer_ip,
+ deployment_password,
+ deployment_ssh_port)
+ else:
+ self._login_via_key(self, deployment_user, installer_ip,
+ deployment_key_filename,
+ deployment_ssh_port)
+ else:
+ if deployment_password is not None:
+ self._login_via_password(deployment_user, deployment_ip,
+ deployment_password,
+ deployment_ssh_port)
+ else:
+ self._login_via_key(self, deployment_user, deployment_ip,
+ deployment_key_filename,
+ deployment_ssh_port)
+
+ # copy script to host
+ remotepath = '~/%s.sh' % plugin_name
+
+ LOG.info("copying script to host: %s", remotepath)
+ self.client._put_file_shell(self.script, remotepath)
+
+ def _login_via_password(self, user, ip, password, ssh_port):
+ LOG.info("Log in via pw, user:%s, host:%s", user, ip)
+ self.client = ssh.SSH(user, ip, password=password, port=ssh_port)
+ self.client.wait(timeout=600)
+
+ def _login_via_key(self, user, ip, key_filename, ssh_port):
+ LOG.info("Log in via key, user:%s, host:%s", user, ip)
+ self.client = ssh.SSH(user, ip, key_filename=key_filename,
+ port=ssh_port)
+ self.client.wait(timeout=600)
+
+ def _run(self, plugin_name):
+ """Run installation script """
+ cmd = "sudo bash %s" % plugin_name + ".sh"
+
+ LOG.info("Executing command: %s", cmd)
+ status, stdout, stderr = self.client.execute(cmd)
+
+
+class PluginParser(object):
+ """Parser for plugin configration files in yaml format"""
+
+ def __init__(self, path):
+ self.path = path
+
+ def parse_plugin(self):
+ """parses the plugin file and return a plugins instance
+ and a deployment instance
+ """
+
+ print ("Parsing plugin config:", self.path)
+
+ try:
+ kw = {}
+ with open(self.path) as f:
+ try:
+ input_plugin = f.read()
+ rendered_plugin = TaskTemplate.render(input_plugin, **kw)
+ except Exception as e:
+ print(("Failed to render template:\n%(plugin)s\n%(err)s\n")
+ % {"plugin": input_plugin, "err": e})
+ raise e
+ print(("Input plugin is:\n%s\n") % rendered_plugin)
+
+ cfg = yaml.load(rendered_plugin)
+ except IOError as ioerror:
+ sys.exit(ioerror)
+
+ self._check_schema(cfg["schema"], "plugin")
+
+ return cfg["plugins"], cfg["deployment"]
+
+ def _check_schema(self, cfg_schema, schema_type):
+ """Check if configration file is using the correct schema type"""
+
+ if cfg_schema != "yardstick:" + schema_type + ":0.1":
+ sys.exit("error: file %s has unknown schema %s" % (self.path,
+ cfg_schema))
diff --git a/yardstick/benchmark/core/runner.py b/yardstick/benchmark/core/runner.py
new file mode 100644
index 000000000..e8dd21a12
--- /dev/null
+++ b/yardstick/benchmark/core/runner.py
@@ -0,0 +1,36 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+""" Handler for yardstick command 'runner' """
+
+from yardstick.benchmark.runners.base import Runner
+from yardstick.benchmark.core import print_hbar
+
+
+class Runners(object):
+ '''Runner commands.
+
+ Set of commands to discover and display runner types.
+ '''
+
+ def list_all(self, args):
+ '''List existing runner types'''
+ types = Runner.get_types()
+ print_hbar(78)
+ print("| %-16s | %-60s" % ("Type", "Description"))
+ print_hbar(78)
+ for rtype in types:
+ print "| %-16s | %-60s" % (rtype.__execution_type__,
+ rtype.__doc__.split("\n")[0])
+ print_hbar(78)
+
+ def show(self, args):
+ '''Show details of a specific runner type'''
+ rtype = Runner.get_cls(args.type[0])
+ print rtype.__doc__
diff --git a/yardstick/benchmark/core/scenario.py b/yardstick/benchmark/core/scenario.py
new file mode 100644
index 000000000..e228054ee
--- /dev/null
+++ b/yardstick/benchmark/core/scenario.py
@@ -0,0 +1,36 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+""" Handler for yardstick command 'scenario' """
+
+from yardstick.benchmark.scenarios.base import Scenario
+from yardstick.benchmark.core import print_hbar
+
+
+class Scenarios(object):
+ '''Scenario commands.
+
+ Set of commands to discover and display scenario types.
+ '''
+
+ def list_all(self, args):
+ '''List existing scenario types'''
+ types = Scenario.get_types()
+ print_hbar(78)
+ print("| %-16s | %-60s" % ("Type", "Description"))
+ print_hbar(78)
+ for stype in types:
+ print("| %-16s | %-60s" % (stype.__scenario_type__,
+ stype.__doc__.split("\n")[0]))
+ print_hbar(78)
+
+ def show(self, args):
+ '''Show details of a specific scenario type'''
+ stype = Scenario.get_cls(args.type[0])
+ print stype.__doc__
diff --git a/yardstick/benchmark/core/task.py b/yardstick/benchmark/core/task.py
new file mode 100644
index 000000000..397ba00b0
--- /dev/null
+++ b/yardstick/benchmark/core/task.py
@@ -0,0 +1,484 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+""" Handler for yardstick command 'task' """
+
+import sys
+import os
+import yaml
+import atexit
+import ipaddress
+import time
+import logging
+import uuid
+import errno
+from itertools import ifilter
+
+from yardstick.benchmark.contexts.base import Context
+from yardstick.benchmark.runners import base as base_runner
+from yardstick.common.task_template import TaskTemplate
+from yardstick.common.utils import source_env
+from yardstick.common import constants
+
+output_file_default = "/tmp/yardstick.out"
+test_cases_dir_default = "tests/opnfv/test_cases/"
+LOG = logging.getLogger(__name__)
+
+
+class Task(object): # pragma: no cover
+ '''Task commands.
+
+ Set of commands to manage benchmark tasks.
+ '''
+
+ def start(self, args, **kwargs):
+ '''Start a benchmark scenario.'''
+
+ atexit.register(atexit_handler)
+
+ self.task_id = kwargs.get('task_id', str(uuid.uuid4()))
+
+ check_environment()
+
+ total_start_time = time.time()
+ parser = TaskParser(args.inputfile[0])
+
+ if args.suite:
+ # 1.parse suite, return suite_params info
+ task_files, task_args, task_args_fnames = \
+ parser.parse_suite()
+ else:
+ task_files = [parser.path]
+ task_args = [args.task_args]
+ task_args_fnames = [args.task_args_file]
+
+ LOG.info("\ntask_files:%s, \ntask_args:%s, \ntask_args_fnames:%s",
+ task_files, task_args, task_args_fnames)
+
+ if args.parse_only:
+ sys.exit(0)
+
+ if os.path.isfile(args.output_file):
+ os.remove(args.output_file)
+ # parse task_files
+ for i in range(0, len(task_files)):
+ one_task_start_time = time.time()
+ parser.path = task_files[i]
+ scenarios, run_in_parallel, meet_precondition = parser.parse_task(
+ self.task_id, task_args[i], task_args_fnames[i])
+
+ if not meet_precondition:
+ LOG.info("meet_precondition is %s, please check envrionment",
+ meet_precondition)
+ continue
+
+ self._run(scenarios, run_in_parallel, args.output_file)
+
+ if args.keep_deploy:
+ # keep deployment, forget about stack
+ # (hide it for exit handler)
+ Context.list = []
+ else:
+ for context in Context.list:
+ context.undeploy()
+ Context.list = []
+ one_task_end_time = time.time()
+ LOG.info("task %s finished in %d secs", task_files[i],
+ one_task_end_time - one_task_start_time)
+
+ total_end_time = time.time()
+ LOG.info("total finished in %d secs",
+ total_end_time - total_start_time)
+
+ print "Done, exiting"
+
+ def _run(self, scenarios, run_in_parallel, output_file):
+ '''Deploys context and calls runners'''
+ for context in Context.list:
+ context.deploy()
+
+ background_runners = []
+
+ # Start all background scenarios
+ for scenario in ifilter(_is_background_scenario, scenarios):
+ scenario["runner"] = dict(type="Duration", duration=1000000000)
+ runner = run_one_scenario(scenario, output_file)
+ background_runners.append(runner)
+
+ runners = []
+ if run_in_parallel:
+ for scenario in scenarios:
+ if not _is_background_scenario(scenario):
+ runner = run_one_scenario(scenario, output_file)
+ runners.append(runner)
+
+ # Wait for runners to finish
+ for runner in runners:
+ runner_join(runner)
+ print "Runner ended, output in", output_file
+ else:
+ # run serially
+ for scenario in scenarios:
+ if not _is_background_scenario(scenario):
+ runner = run_one_scenario(scenario, output_file)
+ runner_join(runner)
+ print "Runner ended, output in", output_file
+
+ # Abort background runners
+ for runner in background_runners:
+ runner.abort()
+
+ # Wait for background runners to finish
+ for runner in background_runners:
+ if runner.join(timeout=60) is None:
+ # Nuke if it did not stop nicely
+ base_runner.Runner.terminate(runner)
+ runner_join(runner)
+ else:
+ base_runner.Runner.release(runner)
+ print "Background task ended"
+
+
+# TODO: Move stuff below into TaskCommands class !?
+
+
+class TaskParser(object): # pragma: no cover
+ '''Parser for task config files in yaml format'''
+ def __init__(self, path):
+ self.path = path
+
+ def _meet_constraint(self, task, cur_pod, cur_installer):
+ if "constraint" in task:
+ constraint = task.get('constraint', None)
+ if constraint is not None:
+ tc_fit_pod = constraint.get('pod', None)
+ tc_fit_installer = constraint.get('installer', None)
+ LOG.info("cur_pod:%s, cur_installer:%s,tc_constraints:%s",
+ cur_pod, cur_installer, constraint)
+ if cur_pod and tc_fit_pod and cur_pod not in tc_fit_pod:
+ return False
+ if cur_installer and tc_fit_installer and \
+ cur_installer not in tc_fit_installer:
+ return False
+ return True
+
+ def _get_task_para(self, task, cur_pod):
+ task_args = task.get('task_args', None)
+ if task_args is not None:
+ task_args = task_args.get(cur_pod, None)
+ task_args_fnames = task.get('task_args_fnames', None)
+ if task_args_fnames is not None:
+ task_args_fnames = task_args_fnames.get(cur_pod, None)
+ return task_args, task_args_fnames
+
+ def parse_suite(self):
+ '''parse the suite file and return a list of task config file paths
+ and lists of optional parameters if present'''
+ LOG.info("\nParsing suite file:%s", self.path)
+
+ try:
+ with open(self.path) as stream:
+ cfg = yaml.load(stream)
+ except IOError as ioerror:
+ sys.exit(ioerror)
+
+ self._check_schema(cfg["schema"], "suite")
+ LOG.info("\nStarting scenario:%s", cfg["name"])
+
+ test_cases_dir = cfg.get("test_cases_dir", test_cases_dir_default)
+ if test_cases_dir[-1] != os.sep:
+ test_cases_dir += os.sep
+
+ cur_pod = os.environ.get('NODE_NAME', None)
+ cur_installer = os.environ.get('INSTALLER_TYPE', None)
+
+ valid_task_files = []
+ valid_task_args = []
+ valid_task_args_fnames = []
+
+ for task in cfg["test_cases"]:
+ # 1.check file_name
+ if "file_name" in task:
+ task_fname = task.get('file_name', None)
+ if task_fname is None:
+ continue
+ else:
+ continue
+ # 2.check constraint
+ if self._meet_constraint(task, cur_pod, cur_installer):
+ valid_task_files.append(test_cases_dir + task_fname)
+ else:
+ continue
+ # 3.fetch task parameters
+ task_args, task_args_fnames = self._get_task_para(task, cur_pod)
+ valid_task_args.append(task_args)
+ valid_task_args_fnames.append(task_args_fnames)
+
+ return valid_task_files, valid_task_args, valid_task_args_fnames
+
+ def parse_task(self, task_id, task_args=None, task_args_file=None):
+ '''parses the task file and return an context and scenario instances'''
+ print "Parsing task config:", self.path
+
+ try:
+ kw = {}
+ if task_args_file:
+ with open(task_args_file) as f:
+ kw.update(parse_task_args("task_args_file", f.read()))
+ kw.update(parse_task_args("task_args", task_args))
+ except TypeError:
+ raise TypeError()
+
+ try:
+ with open(self.path) as f:
+ try:
+ input_task = f.read()
+ rendered_task = TaskTemplate.render(input_task, **kw)
+ except Exception as e:
+ print(("Failed to render template:\n%(task)s\n%(err)s\n")
+ % {"task": input_task, "err": e})
+ raise e
+ print(("Input task is:\n%s\n") % rendered_task)
+
+ cfg = yaml.load(rendered_task)
+ except IOError as ioerror:
+ sys.exit(ioerror)
+
+ self._check_schema(cfg["schema"], "task")
+ meet_precondition = self._check_precondition(cfg)
+
+ # TODO: support one or many contexts? Many would simpler and precise
+ # TODO: support hybrid context type
+ if "context" in cfg:
+ context_cfgs = [cfg["context"]]
+ elif "contexts" in cfg:
+ context_cfgs = cfg["contexts"]
+ else:
+ context_cfgs = [{"type": "Dummy"}]
+
+ for cfg_attrs in context_cfgs:
+ context_type = cfg_attrs.get("type", "Heat")
+ if "Heat" == context_type and "networks" in cfg_attrs:
+ # bugfix: if there are more than one network,
+ # only add "external_network" on first one.
+ # the name of netwrok should follow this rule:
+ # test, test2, test3 ...
+ # sort network with the length of network's name
+ sorted_networks = sorted(cfg_attrs["networks"].keys())
+ # config external_network based on env var
+ cfg_attrs["networks"][sorted_networks[0]]["external_network"] \
+ = os.environ.get("EXTERNAL_NETWORK", "net04_ext")
+
+ context = Context.get(context_type)
+ context.init(cfg_attrs)
+
+ run_in_parallel = cfg.get("run_in_parallel", False)
+
+ # add tc and task id for influxdb extended tags
+ for scenario in cfg["scenarios"]:
+ task_name = os.path.splitext(os.path.basename(self.path))[0]
+ scenario["tc"] = task_name
+ scenario["task_id"] = task_id
+
+ # TODO we need something better here, a class that represent the file
+ return cfg["scenarios"], run_in_parallel, meet_precondition
+
+ def _check_schema(self, cfg_schema, schema_type):
+ '''Check if config file is using the correct schema type'''
+
+ if cfg_schema != "yardstick:" + schema_type + ":0.1":
+ sys.exit("error: file %s has unknown schema %s" % (self.path,
+ cfg_schema))
+
+ def _check_precondition(self, cfg):
+ '''Check if the envrionment meet the preconditon'''
+
+ if "precondition" in cfg:
+ precondition = cfg["precondition"]
+ installer_type = precondition.get("installer_type", None)
+ deploy_scenarios = precondition.get("deploy_scenarios", None)
+ tc_fit_pods = precondition.get("pod_name", None)
+ installer_type_env = os.environ.get('INSTALL_TYPE', None)
+ deploy_scenario_env = os.environ.get('DEPLOY_SCENARIO', None)
+ pod_name_env = os.environ.get('NODE_NAME', None)
+
+ LOG.info("installer_type: %s, installer_type_env: %s",
+ installer_type, installer_type_env)
+ LOG.info("deploy_scenarios: %s, deploy_scenario_env: %s",
+ deploy_scenarios, deploy_scenario_env)
+ LOG.info("tc_fit_pods: %s, pod_name_env: %s",
+ tc_fit_pods, pod_name_env)
+ if installer_type and installer_type_env:
+ if installer_type_env not in installer_type:
+ return False
+ if deploy_scenarios and deploy_scenario_env:
+ deploy_scenarios_list = deploy_scenarios.split(',')
+ for deploy_scenario in deploy_scenarios_list:
+ if deploy_scenario_env.startswith(deploy_scenario):
+ return True
+ return False
+ if tc_fit_pods and pod_name_env:
+ if pod_name_env not in tc_fit_pods:
+ return False
+ return True
+
+
+def atexit_handler():
+ '''handler for process termination'''
+ base_runner.Runner.terminate_all()
+
+ if len(Context.list) > 0:
+ print "Undeploying all contexts"
+ for context in Context.list:
+ context.undeploy()
+
+
+def is_ip_addr(addr):
+ '''check if string addr is an IP address'''
+ try:
+ ipaddress.ip_address(unicode(addr))
+ return True
+ except ValueError:
+ return False
+
+
+def _is_same_heat_context(host_attr, target_attr):
+ '''check if two servers are in the same heat context
+ host_attr: either a name for a server created by yardstick or a dict
+ with attribute name mapping when using external heat templates
+ target_attr: either a name for a server created by yardstick or a dict
+ with attribute name mapping when using external heat templates
+ '''
+ host = None
+ target = None
+ for context in Context.list:
+ if context.__context_type__ != "Heat":
+ continue
+
+ host = context._get_server(host_attr)
+ if host is None:
+ continue
+
+ target = context._get_server(target_attr)
+ if target is None:
+ return False
+
+ # Both host and target is not None, then they are in the
+ # same heat context.
+ return True
+
+ return False
+
+
+def _is_background_scenario(scenario):
+ if "run_in_background" in scenario:
+ return scenario["run_in_background"]
+ else:
+ return False
+
+
+def run_one_scenario(scenario_cfg, output_file):
+ '''run one scenario using context'''
+ runner_cfg = scenario_cfg["runner"]
+ runner_cfg['output_filename'] = output_file
+
+ # TODO support get multi hosts/vms info
+ context_cfg = {}
+ if "host" in scenario_cfg:
+ context_cfg['host'] = Context.get_server(scenario_cfg["host"])
+
+ if "target" in scenario_cfg:
+ if is_ip_addr(scenario_cfg["target"]):
+ context_cfg['target'] = {}
+ context_cfg['target']["ipaddr"] = scenario_cfg["target"]
+ else:
+ context_cfg['target'] = Context.get_server(scenario_cfg["target"])
+ if _is_same_heat_context(scenario_cfg["host"],
+ scenario_cfg["target"]):
+ context_cfg["target"]["ipaddr"] = \
+ context_cfg["target"]["private_ip"]
+ else:
+ context_cfg["target"]["ipaddr"] = \
+ context_cfg["target"]["ip"]
+
+ if "targets" in scenario_cfg:
+ ip_list = []
+ for target in scenario_cfg["targets"]:
+ if is_ip_addr(target):
+ ip_list.append(target)
+ context_cfg['target'] = {}
+ else:
+ context_cfg['target'] = Context.get_server(target)
+ if _is_same_heat_context(scenario_cfg["host"], target):
+ ip_list.append(context_cfg["target"]["private_ip"])
+ else:
+ ip_list.append(context_cfg["target"]["ip"])
+ context_cfg['target']['ipaddr'] = ','.join(ip_list)
+
+ if "nodes" in scenario_cfg:
+ context_cfg["nodes"] = parse_nodes_with_context(scenario_cfg)
+ runner = base_runner.Runner.get(runner_cfg)
+
+ print "Starting runner of type '%s'" % runner_cfg["type"]
+ runner.run(scenario_cfg, context_cfg)
+
+ return runner
+
+
+def parse_nodes_with_context(scenario_cfg):
+ '''paras the 'nodes' fields in scenario '''
+ nodes = scenario_cfg["nodes"]
+
+ nodes_cfg = {}
+ for nodename in nodes:
+ nodes_cfg[nodename] = Context.get_server(nodes[nodename])
+
+ return nodes_cfg
+
+
+def runner_join(runner):
+ '''join (wait for) a runner, exit process at runner failure'''
+ status = runner.join()
+ base_runner.Runner.release(runner)
+ if status != 0:
+ sys.exit("Runner failed")
+
+
+def print_invalid_header(source_name, args):
+ print(("Invalid %(source)s passed:\n\n %(args)s\n")
+ % {"source": source_name, "args": args})
+
+
+def parse_task_args(src_name, args):
+ try:
+ kw = args and yaml.safe_load(args)
+ kw = {} if kw is None else kw
+ except yaml.parser.ParserError as e:
+ print_invalid_header(src_name, args)
+ print(("%(source)s has to be YAML. Details:\n\n%(err)s\n")
+ % {"source": src_name, "err": e})
+ raise TypeError()
+
+ if not isinstance(kw, dict):
+ print_invalid_header(src_name, args)
+ print(("%(src)s had to be dict, actually %(src_type)s\n")
+ % {"src": src_name, "src_type": type(kw)})
+ raise TypeError()
+ return kw
+
+
+def check_environment():
+ auth_url = os.environ.get('OS_AUTH_URL', None)
+ if not auth_url:
+ try:
+ source_env(constants.OPENSTACK_RC_FILE)
+ except IOError as e:
+ if e.errno != errno.EEXIST:
+ raise
+ LOG.debug('OPENRC file not found')
diff --git a/yardstick/benchmark/core/testcase.py b/yardstick/benchmark/core/testcase.py
new file mode 100644
index 000000000..d292ad2d7
--- /dev/null
+++ b/yardstick/benchmark/core/testcase.py
@@ -0,0 +1,112 @@
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+""" Handler for yardstick command 'testcase' """
+import os
+import yaml
+import sys
+
+from yardstick.benchmark.core import print_hbar
+from yardstick.common.task_template import TaskTemplate
+from yardstick.definitions import YARDSTICK_ROOT_PATH
+
+
+class Testcase(object):
+ '''Testcase commands.
+
+ Set of commands to discover and display test cases.
+ '''
+ def __init__(self):
+ self.test_case_path = YARDSTICK_ROOT_PATH + 'tests/opnfv/test_cases/'
+ self.testcase_list = []
+
+ def list_all(self, args):
+ '''List existing test cases'''
+
+ try:
+ testcase_files = os.listdir(self.test_case_path)
+ except Exception as e:
+ print(("Failed to list dir:\n%(path)s\n%(err)s\n")
+ % {"path": self.test_case_path, "err": e})
+ raise e
+ testcase_files.sort()
+
+ for testcase_file in testcase_files:
+ record = self._get_record(testcase_file)
+ self.testcase_list.append(record)
+
+ self._format_print(self.testcase_list)
+ return True
+
+ def show(self, args):
+ '''Show details of a specific test case'''
+ testcase_name = args.casename[0]
+ testcase_path = self.test_case_path + testcase_name + ".yaml"
+ try:
+ with open(testcase_path) as f:
+ try:
+ testcase_info = f.read()
+ print testcase_info
+
+ except Exception as e:
+ print(("Failed to load test cases:"
+ "\n%(testcase_file)s\n%(err)s\n")
+ % {"testcase_file": testcase_path, "err": e})
+ raise e
+ except IOError as ioerror:
+ sys.exit(ioerror)
+ return True
+
+ def _get_record(self, testcase_file):
+
+ try:
+ with open(self.test_case_path + testcase_file) as f:
+ try:
+ testcase_info = f.read()
+ except Exception as e:
+ print(("Failed to load test cases:"
+ "\n%(testcase_file)s\n%(err)s\n")
+ % {"testcase_file": testcase_file, "err": e})
+ raise e
+ description, installer, deploy_scenarios = \
+ self._parse_testcase(testcase_info)
+
+ record = {'Name': testcase_file.split(".")[0],
+ 'Description': description,
+ 'installer': installer,
+ 'deploy_scenarios': deploy_scenarios}
+ return record
+ except IOError as ioerror:
+ sys.exit(ioerror)
+
+ def _parse_testcase(self, testcase_info):
+
+ kw = {}
+ rendered_testcase = TaskTemplate.render(testcase_info, **kw)
+ testcase_cfg = yaml.load(rendered_testcase)
+ test_precondition = testcase_cfg.get('precondition', None)
+ installer_type = 'all'
+ deploy_scenarios = 'all'
+ if test_precondition is not None:
+ installer_type = test_precondition.get('installer_type', 'all')
+ deploy_scenarios = test_precondition.get('deploy_scenarios', 'all')
+
+ description = testcase_info.split("\n")[2][1:].strip()
+ return description, installer_type, deploy_scenarios
+
+ def _format_print(self, testcase_list):
+ '''format output'''
+
+ print_hbar(88)
+ print("| %-21s | %-60s" % ("Testcase Name", "Description"))
+ print_hbar(88)
+ for testcase_record in testcase_list:
+ print "| %-16s | %-60s" % (testcase_record['Name'],
+ testcase_record['Description'])
+ print_hbar(88)
diff --git a/yardstick/cmd/commands/__init__.py b/yardstick/cmd/commands/__init__.py
index e69de29bb..ba229d481 100644
--- a/yardstick/cmd/commands/__init__.py
+++ b/yardstick/cmd/commands/__init__.py
@@ -0,0 +1,9 @@
+from yardstick.benchmark.core import Param
+
+
+def change_osloobj_to_paras(args):
+ param = Param({})
+ for k in param.__dict__:
+ if hasattr(args, k):
+ setattr(param, k, getattr(args, k))
+ return param
diff --git a/yardstick/cmd/commands/plugin.py b/yardstick/cmd/commands/plugin.py
index 10e5cdfbe..94095665a 100644
--- a/yardstick/cmd/commands/plugin.py
+++ b/yardstick/cmd/commands/plugin.py
@@ -9,18 +9,9 @@
""" Handler for yardstick command 'plugin' """
-import os
-import sys
-import yaml
-import time
-import logging
-import pkg_resources
-import yardstick.ssh as ssh
-
+from yardstick.benchmark.core.plugin import Plugin
from yardstick.common.utils import cliargs
-from yardstick.common.task_template import TaskTemplate
-
-LOG = logging.getLogger(__name__)
+from yardstick.cmd.commands import change_osloobj_to_paras
class PluginCommands(object):
@@ -33,158 +24,12 @@ class PluginCommands(object):
nargs=1)
def do_install(self, args):
'''Install a plugin.'''
-
- total_start_time = time.time()
- parser = PluginParser(args.input_file[0])
-
- plugins, deployment = parser.parse_plugin()
- plugin_name = plugins.get("name")
- print("Installing plugin: %s" % plugin_name)
-
- LOG.info("Executing _install_setup()")
- self._install_setup(plugin_name, deployment)
-
- LOG.info("Executing _run()")
- self._run(plugin_name)
-
- total_end_time = time.time()
- LOG.info("total finished in %d secs",
- total_end_time - total_start_time)
-
- print("Done, exiting")
+ param = change_osloobj_to_paras(args)
+ Plugin().install(param)
@cliargs("input_file", type=str, help="path to plugin configuration file",
nargs=1)
def do_remove(self, args):
'''Remove a plugin.'''
-
- total_start_time = time.time()
- parser = PluginParser(args.input_file[0])
-
- plugins, deployment = parser.parse_plugin()
- plugin_name = plugins.get("name")
- print("Removing plugin: %s" % plugin_name)
-
- LOG.info("Executing _remove_setup()")
- self._remove_setup(plugin_name, deployment)
-
- LOG.info("Executing _run()")
- self._run(plugin_name)
-
- total_end_time = time.time()
- LOG.info("total finished in %d secs",
- total_end_time - total_start_time)
-
- print("Done, exiting")
-
- def _install_setup(self, plugin_name, deployment):
- '''Deployment environment setup'''
- target_script = plugin_name + ".bash"
- self.script = pkg_resources.resource_filename(
- 'yardstick.resources', 'scripts/install/' + target_script)
-
- deployment_user = deployment.get("user")
- deployment_ssh_port = deployment.get("ssh_port", ssh.DEFAULT_PORT)
- deployment_ip = deployment.get("ip")
- deployment_password = deployment.get("password")
-
- if deployment_ip == "local":
- installer_ip = os.environ.get("INSTALLER_IP", None)
-
- LOG.info("user:%s, host:%s", deployment_user, installer_ip)
- self.client = ssh.SSH(deployment_user, installer_ip,
- password=deployment_password,
- port=deployment_ssh_port)
- self.client.wait(timeout=600)
- else:
- LOG.info("user:%s, host:%s", deployment_user, deployment_ip)
- self.client = ssh.SSH(deployment_user, deployment_ip,
- password=deployment_password,
- port=deployment_ssh_port)
- self.client.wait(timeout=600)
-
- # copy script to host
- cmd = "cat > ~/%s.sh" % plugin_name
-
- LOG.info("copying script to host: %s", cmd)
- self.client.run(cmd, stdin=open(self.script, 'rb'))
-
- def _remove_setup(self, plugin_name, deployment):
- '''Deployment environment setup'''
- target_script = plugin_name + ".bash"
- self.script = pkg_resources.resource_filename(
- 'yardstick.resources', 'scripts/remove/' + target_script)
-
- deployment_user = deployment.get("user")
- deployment_ssh_port = deployment.get("ssh_port", ssh.DEFAULT_PORT)
- deployment_ip = deployment.get("ip")
- deployment_password = deployment.get("password")
-
- if deployment_ip == "local":
- installer_ip = os.environ.get("INSTALLER_IP", None)
-
- LOG.info("user:%s, host:%s", deployment_user, installer_ip)
- self.client = ssh.SSH(deployment_user, installer_ip,
- password=deployment_password,
- port=deployment_ssh_port)
- self.client.wait(timeout=600)
- else:
- LOG.info("user:%s, host:%s", deployment_user, deployment_ip)
- self.client = ssh.SSH(deployment_user, deployment_ip,
- password=deployment_password,
- port=deployment_ssh_port)
- self.client.wait(timeout=600)
-
- # copy script to host
- cmd = "cat > ~/%s.sh" % plugin_name
-
- LOG.info("copying script to host: %s", cmd)
- self.client.run(cmd, stdin=open(self.script, 'rb'))
-
- def _run(self, plugin_name):
- '''Run installation script '''
- cmd = "sudo bash %s" % plugin_name + ".sh"
-
- LOG.info("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
-
-
-class PluginParser(object):
- '''Parser for plugin configration files in yaml format'''
-
- def __init__(self, path):
- self.path = path
-
- def parse_plugin(self):
- '''parses the plugin file and return a plugins instance
- and a deployment instance
- '''
-
- print "Parsing plugin config:", self.path
-
- try:
- kw = {}
- with open(self.path) as f:
- try:
- input_plugin = f.read()
- rendered_plugin = TaskTemplate.render(input_plugin, **kw)
- except Exception as e:
- print(("Failed to render template:\n%(plugin)s\n%(err)s\n")
- % {"plugin": input_plugin, "err": e})
- raise e
- print(("Input plugin is:\n%s\n") % rendered_plugin)
-
- cfg = yaml.load(rendered_plugin)
- except IOError as ioerror:
- sys.exit(ioerror)
-
- self._check_schema(cfg["schema"], "plugin")
-
- return cfg["plugins"], cfg["deployment"]
-
- def _check_schema(self, cfg_schema, schema_type):
- '''Check if configration file is using the correct schema type'''
-
- if cfg_schema != "yardstick:" + schema_type + ":0.1":
- sys.exit("error: file %s has unknown schema %s" % (self.path,
- cfg_schema))
+ param = change_osloobj_to_paras(args)
+ Plugin().remove(param)
diff --git a/yardstick/cmd/commands/runner.py b/yardstick/cmd/commands/runner.py
index 84bc3c6cf..62a2082c4 100644
--- a/yardstick/cmd/commands/runner.py
+++ b/yardstick/cmd/commands/runner.py
@@ -9,9 +9,9 @@
""" Handler for yardstick command 'runner' """
-from yardstick.benchmark.runners.base import Runner
+from yardstick.benchmark.core.runner import Runners
from yardstick.common.utils import cliargs
-from yardstick.cmd import print_hbar
+from yardstick.cmd.commands import change_osloobj_to_paras
class RunnerCommands(object):
@@ -22,17 +22,11 @@ class RunnerCommands(object):
def do_list(self, args):
'''List existing runner types'''
- types = Runner.get_types()
- print_hbar(78)
- print("| %-16s | %-60s" % ("Type", "Description"))
- print_hbar(78)
- for rtype in types:
- print "| %-16s | %-60s" % (rtype.__execution_type__,
- rtype.__doc__.split("\n")[0])
- print_hbar(78)
+ param = change_osloobj_to_paras(args)
+ Runners().list_all(param)
@cliargs("type", type=str, help="runner type", nargs=1)
def do_show(self, args):
'''Show details of a specific runner type'''
- rtype = Runner.get_cls(args.type[0])
- print rtype.__doc__
+ param = change_osloobj_to_paras(args)
+ Runners().show(param)
diff --git a/yardstick/cmd/commands/scenario.py b/yardstick/cmd/commands/scenario.py
index 00d46cf11..6aa3a451a 100644
--- a/yardstick/cmd/commands/scenario.py
+++ b/yardstick/cmd/commands/scenario.py
@@ -9,9 +9,9 @@
""" Handler for yardstick command 'scenario' """
-from yardstick.benchmark.scenarios.base import Scenario
+from yardstick.benchmark.core.scenario import Scenarios
from yardstick.common.utils import cliargs
-from yardstick.cmd import print_hbar
+from yardstick.cmd.commands import change_osloobj_to_paras
class ScenarioCommands(object):
@@ -22,17 +22,11 @@ class ScenarioCommands(object):
def do_list(self, args):
'''List existing scenario types'''
- types = Scenario.get_types()
- print_hbar(78)
- print("| %-16s | %-60s" % ("Type", "Description"))
- print_hbar(78)
- for stype in types:
- print("| %-16s | %-60s" % (stype.__scenario_type__,
- stype.__doc__.split("\n")[0]))
- print_hbar(78)
+ param = change_osloobj_to_paras(args)
+ Scenarios().list_all(param)
@cliargs("type", type=str, help="runner type", nargs=1)
def do_show(self, args):
'''Show details of a specific scenario type'''
- stype = Scenario.get_cls(args.type[0])
- print stype.__doc__
+ param = change_osloobj_to_paras(args)
+ Scenarios().show(param)
diff --git a/yardstick/cmd/commands/task.py b/yardstick/cmd/commands/task.py
index 9524778ba..bd018bcab 100644
--- a/yardstick/cmd/commands/task.py
+++ b/yardstick/cmd/commands/task.py
@@ -8,28 +8,12 @@
##############################################################################
""" Handler for yardstick command 'task' """
-
-import sys
-import os
-import yaml
-import atexit
-import ipaddress
-import time
-import logging
-import uuid
-import errno
-from itertools import ifilter
-
-from yardstick.benchmark.contexts.base import Context
-from yardstick.benchmark.runners import base as base_runner
-from yardstick.common.task_template import TaskTemplate
+from yardstick.benchmark.core.task import Task
from yardstick.common.utils import cliargs
-from yardstick.common.utils import source_env
-from yardstick.common import constants
+from yardstick.cmd.commands import change_osloobj_to_paras
+
output_file_default = "/tmp/yardstick.out"
-test_cases_dir_default = "tests/opnfv/test_cases/"
-LOG = logging.getLogger(__name__)
class TaskCommands(object):
@@ -55,447 +39,5 @@ class TaskCommands(object):
@cliargs("--suite", help="process test suite file instead of a task file",
action="store_true")
def do_start(self, args, **kwargs):
- '''Start a benchmark scenario.'''
-
- atexit.register(atexit_handler)
-
- self.task_id = kwargs.get('task_id', str(uuid.uuid4()))
-
- check_environment()
-
- total_start_time = time.time()
- parser = TaskParser(args.inputfile[0])
-
- if args.suite:
- # 1.parse suite, return suite_params info
- task_files, task_args, task_args_fnames = \
- parser.parse_suite()
- else:
- task_files = [parser.path]
- task_args = [args.task_args]
- task_args_fnames = [args.task_args_file]
-
- LOG.info("\ntask_files:%s, \ntask_args:%s, \ntask_args_fnames:%s",
- task_files, task_args, task_args_fnames)
-
- if args.parse_only:
- sys.exit(0)
-
- if os.path.isfile(args.output_file):
- os.remove(args.output_file)
- # parse task_files
- for i in range(0, len(task_files)):
- one_task_start_time = time.time()
- parser.path = task_files[i]
- scenarios, run_in_parallel, meet_precondition = parser.parse_task(
- self.task_id, task_args[i], task_args_fnames[i])
-
- if not meet_precondition:
- LOG.info("meet_precondition is %s, please check envrionment",
- meet_precondition)
- continue
-
- self._run(scenarios, run_in_parallel, args.output_file)
-
- if args.keep_deploy:
- # keep deployment, forget about stack
- # (hide it for exit handler)
- Context.list = []
- else:
- for context in Context.list:
- context.undeploy()
- Context.list = []
- one_task_end_time = time.time()
- LOG.info("task %s finished in %d secs", task_files[i],
- one_task_end_time - one_task_start_time)
-
- total_end_time = time.time()
- LOG.info("total finished in %d secs",
- total_end_time - total_start_time)
-
- print "Done, exiting"
-
- def _run(self, scenarios, run_in_parallel, output_file):
- '''Deploys context and calls runners'''
- for context in Context.list:
- context.deploy()
-
- background_runners = []
-
- # Start all background scenarios
- for scenario in ifilter(_is_background_scenario, scenarios):
- scenario["runner"] = dict(type="Duration", duration=1000000000)
- runner = run_one_scenario(scenario, output_file)
- background_runners.append(runner)
-
- runners = []
- if run_in_parallel:
- for scenario in scenarios:
- if not _is_background_scenario(scenario):
- runner = run_one_scenario(scenario, output_file)
- runners.append(runner)
-
- # Wait for runners to finish
- for runner in runners:
- runner_join(runner)
- print "Runner ended, output in", output_file
- else:
- # run serially
- for scenario in scenarios:
- if not _is_background_scenario(scenario):
- runner = run_one_scenario(scenario, output_file)
- runner_join(runner)
- print "Runner ended, output in", output_file
-
- # Abort background runners
- for runner in background_runners:
- runner.abort()
-
- # Wait for background runners to finish
- for runner in background_runners:
- if runner.join(timeout=60) is None:
- # Nuke if it did not stop nicely
- base_runner.Runner.terminate(runner)
- runner_join(runner)
- else:
- base_runner.Runner.release(runner)
- print "Background task ended"
-
-
-# TODO: Move stuff below into TaskCommands class !?
-
-
-class TaskParser(object):
- '''Parser for task config files in yaml format'''
- def __init__(self, path):
- self.path = path
-
- def _meet_constraint(self, task, cur_pod, cur_installer):
- if "constraint" in task:
- constraint = task.get('constraint', None)
- if constraint is not None:
- tc_fit_pod = constraint.get('pod', None)
- tc_fit_installer = constraint.get('installer', None)
- LOG.info("cur_pod:%s, cur_installer:%s,tc_constraints:%s",
- cur_pod, cur_installer, constraint)
- if cur_pod and tc_fit_pod and cur_pod not in tc_fit_pod:
- return False
- if cur_installer and tc_fit_installer and \
- cur_installer not in tc_fit_installer:
- return False
- return True
-
- def _get_task_para(self, task, cur_pod):
- task_args = task.get('task_args', None)
- if task_args is not None:
- task_args = task_args.get(cur_pod, None)
- task_args_fnames = task.get('task_args_fnames', None)
- if task_args_fnames is not None:
- task_args_fnames = task_args_fnames.get(cur_pod, None)
- return task_args, task_args_fnames
-
- def parse_suite(self):
- '''parse the suite file and return a list of task config file paths
- and lists of optional parameters if present'''
- LOG.info("\nParsing suite file:%s", self.path)
-
- try:
- with open(self.path) as stream:
- cfg = yaml.load(stream)
- except IOError as ioerror:
- sys.exit(ioerror)
-
- self._check_schema(cfg["schema"], "suite")
- LOG.info("\nStarting scenario:%s", cfg["name"])
-
- test_cases_dir = cfg.get("test_cases_dir", test_cases_dir_default)
- if test_cases_dir[-1] != os.sep:
- test_cases_dir += os.sep
-
- cur_pod = os.environ.get('NODE_NAME', None)
- cur_installer = os.environ.get('INSTALLER_TYPE', None)
-
- valid_task_files = []
- valid_task_args = []
- valid_task_args_fnames = []
-
- for task in cfg["test_cases"]:
- # 1.check file_name
- if "file_name" in task:
- task_fname = task.get('file_name', None)
- if task_fname is None:
- continue
- else:
- continue
- # 2.check constraint
- if self._meet_constraint(task, cur_pod, cur_installer):
- valid_task_files.append(test_cases_dir + task_fname)
- else:
- continue
- # 3.fetch task parameters
- task_args, task_args_fnames = self._get_task_para(task, cur_pod)
- valid_task_args.append(task_args)
- valid_task_args_fnames.append(task_args_fnames)
-
- return valid_task_files, valid_task_args, valid_task_args_fnames
-
- def parse_task(self, task_id, task_args=None, task_args_file=None):
- '''parses the task file and return an context and scenario instances'''
- print "Parsing task config:", self.path
-
- try:
- kw = {}
- if task_args_file:
- with open(task_args_file) as f:
- kw.update(parse_task_args("task_args_file", f.read()))
- kw.update(parse_task_args("task_args", task_args))
- except TypeError:
- raise TypeError()
-
- try:
- with open(self.path) as f:
- try:
- input_task = f.read()
- rendered_task = TaskTemplate.render(input_task, **kw)
- except Exception as e:
- print(("Failed to render template:\n%(task)s\n%(err)s\n")
- % {"task": input_task, "err": e})
- raise e
- print(("Input task is:\n%s\n") % rendered_task)
-
- cfg = yaml.load(rendered_task)
- except IOError as ioerror:
- sys.exit(ioerror)
-
- self._check_schema(cfg["schema"], "task")
- meet_precondition = self._check_precondition(cfg)
-
- # TODO: support one or many contexts? Many would simpler and precise
- # TODO: support hybrid context type
- if "context" in cfg:
- context_cfgs = [cfg["context"]]
- elif "contexts" in cfg:
- context_cfgs = cfg["contexts"]
- else:
- context_cfgs = [{"type": "Dummy"}]
-
- for cfg_attrs in context_cfgs:
- context_type = cfg_attrs.get("type", "Heat")
- if "Heat" == context_type and "networks" in cfg_attrs:
- # bugfix: if there are more than one network,
- # only add "external_network" on first one.
- # the name of netwrok should follow this rule:
- # test, test2, test3 ...
- # sort network with the length of network's name
- sorted_networks = sorted(cfg_attrs["networks"].keys())
- # config external_network based on env var
- cfg_attrs["networks"][sorted_networks[0]]["external_network"] \
- = os.environ.get("EXTERNAL_NETWORK", "net04_ext")
-
- context = Context.get(context_type)
- context.init(cfg_attrs)
-
- run_in_parallel = cfg.get("run_in_parallel", False)
-
- # add tc and task id for influxdb extended tags
- for scenario in cfg["scenarios"]:
- task_name = os.path.splitext(os.path.basename(self.path))[0]
- scenario["tc"] = task_name
- scenario["task_id"] = task_id
-
- # TODO we need something better here, a class that represent the file
- return cfg["scenarios"], run_in_parallel, meet_precondition
-
- def _check_schema(self, cfg_schema, schema_type):
- '''Check if config file is using the correct schema type'''
-
- if cfg_schema != "yardstick:" + schema_type + ":0.1":
- sys.exit("error: file %s has unknown schema %s" % (self.path,
- cfg_schema))
-
- def _check_precondition(self, cfg):
- '''Check if the envrionment meet the preconditon'''
-
- if "precondition" in cfg:
- precondition = cfg["precondition"]
- installer_type = precondition.get("installer_type", None)
- deploy_scenarios = precondition.get("deploy_scenarios", None)
- tc_fit_pods = precondition.get("pod_name", None)
- installer_type_env = os.environ.get('INSTALL_TYPE', None)
- deploy_scenario_env = os.environ.get('DEPLOY_SCENARIO', None)
- pod_name_env = os.environ.get('NODE_NAME', None)
-
- LOG.info("installer_type: %s, installer_type_env: %s",
- installer_type, installer_type_env)
- LOG.info("deploy_scenarios: %s, deploy_scenario_env: %s",
- deploy_scenarios, deploy_scenario_env)
- LOG.info("tc_fit_pods: %s, pod_name_env: %s",
- tc_fit_pods, pod_name_env)
- if installer_type and installer_type_env:
- if installer_type_env not in installer_type:
- return False
- if deploy_scenarios and deploy_scenario_env:
- deploy_scenarios_list = deploy_scenarios.split(',')
- for deploy_scenario in deploy_scenarios_list:
- if deploy_scenario_env.startswith(deploy_scenario):
- return True
- return False
- if tc_fit_pods and pod_name_env:
- if pod_name_env not in tc_fit_pods:
- return False
- return True
-
-
-def atexit_handler():
- '''handler for process termination'''
- base_runner.Runner.terminate_all()
-
- if len(Context.list) > 0:
- print "Undeploying all contexts"
- for context in Context.list:
- context.undeploy()
-
-
-def is_ip_addr(addr):
- '''check if string addr is an IP address'''
- try:
- ipaddress.ip_address(unicode(addr))
- return True
- except ValueError:
- return False
-
-
-def _is_same_heat_context(host_attr, target_attr):
- '''check if two servers are in the same heat context
- host_attr: either a name for a server created by yardstick or a dict
- with attribute name mapping when using external heat templates
- target_attr: either a name for a server created by yardstick or a dict
- with attribute name mapping when using external heat templates
- '''
- host = None
- target = None
- for context in Context.list:
- if context.__context_type__ != "Heat":
- continue
-
- host = context._get_server(host_attr)
- if host is None:
- continue
-
- target = context._get_server(target_attr)
- if target is None:
- return False
-
- # Both host and target is not None, then they are in the
- # same heat context.
- return True
-
- return False
-
-
-def _is_background_scenario(scenario):
- if "run_in_background" in scenario:
- return scenario["run_in_background"]
- else:
- return False
-
-
-def run_one_scenario(scenario_cfg, output_file):
- '''run one scenario using context'''
- runner_cfg = scenario_cfg["runner"]
- runner_cfg['output_filename'] = output_file
-
- # TODO support get multi hosts/vms info
- context_cfg = {}
- if "host" in scenario_cfg:
- context_cfg['host'] = Context.get_server(scenario_cfg["host"])
-
- if "target" in scenario_cfg:
- if is_ip_addr(scenario_cfg["target"]):
- context_cfg['target'] = {}
- context_cfg['target']["ipaddr"] = scenario_cfg["target"]
- else:
- context_cfg['target'] = Context.get_server(scenario_cfg["target"])
- if _is_same_heat_context(scenario_cfg["host"],
- scenario_cfg["target"]):
- context_cfg["target"]["ipaddr"] = \
- context_cfg["target"]["private_ip"]
- else:
- context_cfg["target"]["ipaddr"] = \
- context_cfg["target"]["ip"]
-
- if "targets" in scenario_cfg:
- ip_list = []
- for target in scenario_cfg["targets"]:
- if is_ip_addr(target):
- ip_list.append(target)
- context_cfg['target'] = {}
- else:
- context_cfg['target'] = Context.get_server(target)
- if _is_same_heat_context(scenario_cfg["host"], target):
- ip_list.append(context_cfg["target"]["private_ip"])
- else:
- ip_list.append(context_cfg["target"]["ip"])
- context_cfg['target']['ipaddr'] = ','.join(ip_list)
-
- if "nodes" in scenario_cfg:
- context_cfg["nodes"] = parse_nodes_with_context(scenario_cfg)
- runner = base_runner.Runner.get(runner_cfg)
-
- print "Starting runner of type '%s'" % runner_cfg["type"]
- runner.run(scenario_cfg, context_cfg)
-
- return runner
-
-
-def parse_nodes_with_context(scenario_cfg):
- '''paras the 'nodes' fields in scenario '''
- nodes = scenario_cfg["nodes"]
-
- nodes_cfg = {}
- for nodename in nodes:
- nodes_cfg[nodename] = Context.get_server(nodes[nodename])
-
- return nodes_cfg
-
-
-def runner_join(runner):
- '''join (wait for) a runner, exit process at runner failure'''
- status = runner.join()
- base_runner.Runner.release(runner)
- if status != 0:
- sys.exit("Runner failed")
-
-
-def print_invalid_header(source_name, args):
- print(("Invalid %(source)s passed:\n\n %(args)s\n")
- % {"source": source_name, "args": args})
-
-
-def parse_task_args(src_name, args):
- try:
- kw = args and yaml.safe_load(args)
- kw = {} if kw is None else kw
- except yaml.parser.ParserError as e:
- print_invalid_header(src_name, args)
- print(("%(source)s has to be YAML. Details:\n\n%(err)s\n")
- % {"source": src_name, "err": e})
- raise TypeError()
-
- if not isinstance(kw, dict):
- print_invalid_header(src_name, args)
- print(("%(src)s had to be dict, actually %(src_type)s\n")
- % {"src": src_name, "src_type": type(kw)})
- raise TypeError()
- return kw
-
-
-def check_environment():
- auth_url = os.environ.get('OS_AUTH_URL', None)
- if not auth_url:
- try:
- source_env(constants.OPENSTACK_RC_FILE)
- except IOError as e:
- if e.errno != errno.EEXIST:
- raise
- LOG.debug('OPENRC file not found')
+ param = change_osloobj_to_paras(args)
+ Task().start(param)
diff --git a/yardstick/cmd/commands/testcase.py b/yardstick/cmd/commands/testcase.py
index cb76c7ae3..6ff796238 100644
--- a/yardstick/cmd/commands/testcase.py
+++ b/yardstick/cmd/commands/testcase.py
@@ -8,14 +8,9 @@
##############################################################################
""" Handler for yardstick command 'testcase' """
-import os
-import yaml
-import sys
-
-from yardstick.cmd import print_hbar
-from yardstick.common.task_template import TaskTemplate
+from yardstick.benchmark.core.testcase import Testcase
from yardstick.common.utils import cliargs
-from yardstick.definitions import YARDSTICK_ROOT_PATH
+from yardstick.cmd.commands import change_osloobj_to_paras
class TestcaseCommands(object):
@@ -23,92 +18,14 @@ class TestcaseCommands(object):
Set of commands to discover and display test cases.
'''
- def __init__(self):
- self.test_case_path = YARDSTICK_ROOT_PATH + 'tests/opnfv/test_cases/'
- self.testcase_list = []
def do_list(self, args):
'''List existing test cases'''
-
- try:
- testcase_files = os.listdir(self.test_case_path)
- except Exception as e:
- print(("Failed to list dir:\n%(path)s\n%(err)s\n")
- % {"path": self.test_case_path, "err": e})
- raise e
- testcase_files.sort()
-
- for testcase_file in testcase_files:
- record = self._get_record(testcase_file)
- self.testcase_list.append(record)
-
- self._format_print(self.testcase_list)
- return True
+ param = change_osloobj_to_paras(args)
+ Testcase().list_all(param)
@cliargs("casename", type=str, help="test case name", nargs=1)
def do_show(self, args):
'''Show details of a specific test case'''
- testcase_name = args.casename[0]
- testcase_path = self.test_case_path + testcase_name + ".yaml"
- try:
- with open(testcase_path) as f:
- try:
- testcase_info = f.read()
- print testcase_info
-
- except Exception as e:
- print(("Failed to load test cases:"
- "\n%(testcase_file)s\n%(err)s\n")
- % {"testcase_file": testcase_path, "err": e})
- raise e
- except IOError as ioerror:
- sys.exit(ioerror)
- return True
-
- def _get_record(self, testcase_file):
-
- try:
- with open(self.test_case_path + testcase_file) as f:
- try:
- testcase_info = f.read()
- except Exception as e:
- print(("Failed to load test cases:"
- "\n%(testcase_file)s\n%(err)s\n")
- % {"testcase_file": testcase_file, "err": e})
- raise e
- description, installer, deploy_scenarios = \
- self._parse_testcase(testcase_info)
-
- record = {'Name': testcase_file.split(".")[0],
- 'Description': description,
- 'installer': installer,
- 'deploy_scenarios': deploy_scenarios}
- return record
- except IOError as ioerror:
- sys.exit(ioerror)
-
- def _parse_testcase(self, testcase_info):
-
- kw = {}
- rendered_testcase = TaskTemplate.render(testcase_info, **kw)
- testcase_cfg = yaml.load(rendered_testcase)
- test_precondition = testcase_cfg.get('precondition', None)
- installer_type = 'all'
- deploy_scenarios = 'all'
- if test_precondition is not None:
- installer_type = test_precondition.get('installer_type', 'all')
- deploy_scenarios = test_precondition.get('deploy_scenarios', 'all')
-
- description = testcase_info.split("\n")[2][1:].strip()
- return description, installer_type, deploy_scenarios
-
- def _format_print(self, testcase_list):
- '''format output'''
-
- print_hbar(88)
- print("| %-21s | %-60s" % ("Testcase Name", "Description"))
- print_hbar(88)
- for testcase_record in testcase_list:
- print "| %-16s | %-60s" % (testcase_record['Name'],
- testcase_record['Description'])
- print_hbar(88)
+ param = change_osloobj_to_paras(args)
+ Testcase().show(param)
diff --git a/yardstick/common/constants.py b/yardstick/common/constants.py
index 443b3e810..705e1ad87 100644
--- a/yardstick/common/constants.py
+++ b/yardstick/common/constants.py
@@ -1,3 +1,11 @@
+##############################################################################
+# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
import os
DOCKER_URL = 'unix://var/run/docker.sock'
@@ -15,6 +23,7 @@ GRAFANA_TAGS = '3.1.1'
dirname = os.path.dirname
abspath = os.path.abspath
+join = os.path.join
sep = os.path.sep
INSTALLERS = ['apex', 'compass', 'fuel', 'joid']
@@ -25,14 +34,21 @@ YARDSTICK_REPOS_DIR = '/home/opnfv/repos/yardstick'
YARDSTICK_CONFIG_DIR = '/etc/yardstick/'
-YARDSTICK_CONFIG_FILE = os.path.join(YARDSTICK_CONFIG_DIR, 'yardstick.conf')
+YARDSTICK_CONFIG_FILE = join(YARDSTICK_CONFIG_DIR, 'yardstick.conf')
+
+YARDSTICK_CONFIG_SAMPLE_DIR = join(YARDSTICK_ROOT_PATH, 'etc/yardstick/')
+
+YARDSTICK_CONFIG_SAMPLE_FILE = join(YARDSTICK_CONFIG_SAMPLE_DIR,
+ 'yardstick.conf.sample')
RELENG_DIR = '/home/opnfv/repos/releng'
OS_FETCH_SCRIPT = 'utils/fetch_os_creds.sh'
+CLEAN_IMAGES_SCRIPT = 'tests/ci/clean_images.sh'
+
LOAD_IMAGES_SCRIPT = 'tests/ci/load_images.sh'
-OPENSTACK_RC_FILE = os.path.join(YARDSTICK_CONFIG_DIR, 'openstack.creds')
+OPENSTACK_RC_FILE = join(YARDSTICK_CONFIG_DIR, 'openstack.creds')
YARDSTICK_ENV_ACTION_API = 'http://localhost:5000/yardstick/env/action'
diff --git a/yardstick/common/openstack_utils.py b/yardstick/common/openstack_utils.py
index 25dcffadd..d8dc61ef6 100644
--- a/yardstick/common/openstack_utils.py
+++ b/yardstick/common/openstack_utils.py
@@ -5,7 +5,6 @@
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
-# yardstick: this file is copied from rally and slightly modified
##############################################################################
import os
diff --git a/yardstick/ssh.py b/yardstick/ssh.py
index 2ba6de92e..927ca94db 100644
--- a/yardstick/ssh.py
+++ b/yardstick/ssh.py
@@ -66,6 +66,7 @@ import os
import select
import socket
import time
+import re
import logging
import paramiko
@@ -252,7 +253,7 @@ class SSH(object):
raise SSHError("Socket error.")
exit_status = session.recv_exit_status()
- if 0 != exit_status and raise_on_error:
+ if exit_status != 0 and raise_on_error:
fmt = "Command '%(cmd)s' failed with exit_status %(status)d."
details = fmt % {"cmd": cmd, "status": exit_status}
if stderr_data:
@@ -311,17 +312,21 @@ class SSH(object):
mode = 0o777 & os.stat(localpath).st_mode
sftp.chmod(remotepath, mode)
+ TILDE_EXPANSIONS_RE = re.compile("(^~[^/]*/)?(.*)")
+
def _put_file_shell(self, localpath, remotepath, mode=None):
# quote to stop wordpslit
- cmd = ['cat > "%s"' % remotepath]
+ tilde, remotepath = self.TILDE_EXPANSIONS_RE.match(remotepath).groups()
+ if not tilde:
+ tilde = ''
+ cmd = ['cat > %s"%s"' % (tilde, remotepath)]
if mode is not None:
# use -- so no options
- cmd.append('chmod -- 0%o "%s"' % (mode, remotepath))
+ cmd.append('chmod -- 0%o %s"%s"' % (mode, tilde, remotepath))
with open(localpath, "rb") as localfile:
# only chmod on successful cat
- cmd = "&& ".join(cmd)
- self.run(cmd, stdin=localfile)
+ self.run("&& ".join(cmd), stdin=localfile)
def put_file(self, localpath, remotepath, mode=None):
"""Copy specified local file to the server.
@@ -330,7 +335,6 @@ class SSH(object):
:param remotepath: Remote filename.
:param mode: Permissions to set after upload
"""
- import socket
try:
self._put_file_sftp(localpath, remotepath, mode=mode)
except (paramiko.SSHException, socket.error):