aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--yardstick/benchmark/core/__init__.py1
-rw-r--r--yardstick/benchmark/core/task.py129
-rw-r--r--yardstick/cmd/commands/task.py9
-rw-r--r--yardstick/common/exceptions.py12
-rw-r--r--yardstick/tests/unit/benchmark/core/test_task.py138
5 files changed, 235 insertions, 54 deletions
diff --git a/yardstick/benchmark/core/__init__.py b/yardstick/benchmark/core/__init__.py
index 3e3aa99a1..3914e3237 100644
--- a/yardstick/benchmark/core/__init__.py
+++ b/yardstick/benchmark/core/__init__.py
@@ -23,6 +23,7 @@ class Param(object):
self.task_args_file = kwargs.get('task-args-file')
self.keep_deploy = kwargs.get('keep-deploy')
self.parse_only = kwargs.get('parse-only')
+ self.render_only = kwargs.get('render-only')
self.output_file = kwargs.get('output-file', '/tmp/yardstick.out')
self.suite = kwargs.get('suite')
self.task_id = kwargs.get('task_id')
diff --git a/yardstick/benchmark/core/task.py b/yardstick/benchmark/core/task.py
index 4993d2a7e..ba6c9c830 100644
--- a/yardstick/benchmark/core/task.py
+++ b/yardstick/benchmark/core/task.py
@@ -7,7 +7,6 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
import sys
import os
from collections import OrderedDict
@@ -28,10 +27,10 @@ from yardstick.benchmark.runners import base as base_runner
from yardstick.common.constants import CONF_FILE
from yardstick.common.yaml_loader import yaml_load
from yardstick.dispatcher.base import Base as DispatcherBase
-from yardstick.common.task_template import TaskTemplate
-from yardstick.common import utils
from yardstick.common import constants
-from yardstick.common import exceptions
+from yardstick.common import exceptions as y_exc
+from yardstick.common import task_template
+from yardstick.common import utils
from yardstick.common.html_template import report_template
output_file_default = "/tmp/yardstick.out"
@@ -55,7 +54,7 @@ class Task(object): # pragma: no cover
out_types = [s.strip() for s in dispatchers.split(',')]
output_config['DEFAULT']['dispatcher'] = out_types
- def start(self, args):
+ def start(self, args, **kwargs): # pylint: disable=unused-argument
"""Start a benchmark scenario."""
atexit.register(self.atexit_handler)
@@ -87,8 +86,7 @@ class Task(object): # pragma: no cover
if args.suite:
# 1.parse suite, return suite_params info
- task_files, task_args, task_args_fnames = \
- parser.parse_suite()
+ task_files, task_args, task_args_fnames = parser.parse_suite()
else:
task_files = [parser.path]
task_args = [args.task_args]
@@ -101,32 +99,33 @@ class Task(object): # pragma: no cover
sys.exit(0)
testcases = {}
- # parse task_files
- for i in range(0, len(task_files)):
- one_task_start_time = time.time()
- parser.path = task_files[i]
- scenarios, run_in_parallel, meet_precondition, contexts = \
- parser.parse_task(self.task_id, task_args[i],
- task_args_fnames[i])
-
- self.contexts.extend(contexts)
+ tasks = self._parse_tasks(parser, task_files, args, task_args,
+ task_args_fnames)
- if not meet_precondition:
- LOG.info("meet_precondition is %s, please check envrionment",
- meet_precondition)
+ # Execute task files.
+ for i, _ in enumerate(task_files):
+ one_task_start_time = time.time()
+ self.contexts.extend(tasks[i]['contexts'])
+ if not tasks[i]['meet_precondition']:
+ LOG.info('"meet_precondition" is %s, please check environment',
+ tasks[i]['meet_precondition'])
continue
- case_name = os.path.splitext(os.path.basename(task_files[i]))[0]
try:
- data = self._run(scenarios, run_in_parallel, output_config)
+ data = self._run(tasks[i]['scenarios'],
+ tasks[i]['run_in_parallel'],
+ output_config)
except KeyboardInterrupt:
raise
except Exception: # pylint: disable=broad-except
- LOG.error('Testcase: "%s" FAILED!!!', case_name, exc_info=True)
- testcases[case_name] = {'criteria': 'FAIL', 'tc_data': []}
+ LOG.error('Testcase: "%s" FAILED!!!', tasks[i]['case_name'],
+ exc_info=True)
+ testcases[tasks[i]['case_name']] = {'criteria': 'FAIL',
+ 'tc_data': []}
else:
- LOG.info('Testcase: "%s" SUCCESS!!!', case_name)
- testcases[case_name] = {'criteria': 'PASS', 'tc_data': data}
+ LOG.info('Testcase: "%s" SUCCESS!!!', tasks[i]['case_name'])
+ testcases[tasks[i]['case_name']] = {'criteria': 'PASS',
+ 'tc_data': data}
if args.keep_deploy:
# keep deployment, forget about stack
@@ -149,9 +148,8 @@ class Task(object): # pragma: no cover
LOG.info("Total finished in %d secs",
total_end_time - total_start_time)
- scenario = scenarios[0]
- LOG.info("To generate report, execute command "
- "'yardstick report generate %(task_id)s %(tc)s'", scenario)
+ LOG.info('To generate report, execute command "yardstick report '
+ 'generate %(task_id)s <yaml_name>s"', self.task_id)
LOG.info("Task ALL DONE, exiting")
return result
@@ -312,6 +310,30 @@ class Task(object): # pragma: no cover
else:
return op
+ def _parse_tasks(self, parser, task_files, args, task_args,
+ task_args_fnames):
+ tasks = []
+
+ # Parse task_files.
+ for i, _ in enumerate(task_files):
+ parser.path = task_files[i]
+ tasks.append(parser.parse_task(self.task_id, task_args[i],
+ task_args_fnames[i]))
+ tasks[i]['case_name'] = os.path.splitext(
+ os.path.basename(task_files[i]))[0]
+
+ if args.render_only:
+ utils.makedirs(args.render_only)
+ for idx, task in enumerate(tasks):
+ output_file_name = os.path.abspath(os.path.join(
+ args.render_only,
+ '{0:03d}-{1}.yml'.format(idx, task['case_name'])))
+ utils.write_file(output_file_name, task['rendered'])
+
+ sys.exit(0)
+
+ return tasks
+
def run_one_scenario(self, scenario_cfg, output_config):
"""run one scenario using context"""
runner_cfg = scenario_cfg["runner"]
@@ -477,33 +499,42 @@ class TaskParser(object): # pragma: no cover
return valid_task_files, valid_task_args, valid_task_args_fnames
- def parse_task(self, task_id, task_args=None, task_args_file=None):
- """parses the task file and return an context and scenario instances"""
- LOG.info("Parsing task config: %s", self.path)
+ def _render_task(self, task_args, task_args_file):
+ """Render the input task with the given arguments
+ :param task_args: (dict) arguments to render the task
+ :param task_args_file: (str) file containing the arguments to render
+ the task
+ :return: (str) task file rendered
+ """
try:
kw = {}
if task_args_file:
with open(task_args_file) as f:
- kw.update(parse_task_args("task_args_file", f.read()))
- kw.update(parse_task_args("task_args", task_args))
+ kw.update(parse_task_args('task_args_file', f.read()))
+ kw.update(parse_task_args('task_args', task_args))
except TypeError:
- raise TypeError()
+ raise y_exc.TaskRenderArgumentError()
+ input_task = None
try:
with open(self.path) as f:
- try:
- input_task = f.read()
- rendered_task = TaskTemplate.render(input_task, **kw)
- except Exception as e:
- LOG.exception('Failed to render template:\n%s\n', input_task)
- raise e
- LOG.debug("Input task is:\n%s\n", rendered_task)
-
- cfg = yaml_load(rendered_task)
- except IOError as ioerror:
- sys.exit(ioerror)
+ input_task = f.read()
+ rendered_task = task_template.TaskTemplate.render(input_task, **kw)
+ LOG.debug('Input task is:\n%s', rendered_task)
+ parsed_task = yaml_load(rendered_task)
+ except (IOError, OSError):
+ raise y_exc.TaskReadError(task_file=self.path)
+ except Exception:
+ raise y_exc.TaskRenderError(input_task=input_task)
+
+ return parsed_task, rendered_task
+
+ def parse_task(self, task_id, task_args=None, task_args_file=None):
+ """parses the task file and return an context and scenario instances"""
+ LOG.info("Parsing task config: %s", self.path)
+ cfg, rendered = self._render_task(task_args, task_args_file)
self._check_schema(cfg["schema"], "task")
meet_precondition = self._check_precondition(cfg)
@@ -540,7 +571,11 @@ class TaskParser(object): # pragma: no cover
self._change_node_names(scenario, contexts)
# TODO we need something better here, a class that represent the file
- return cfg["scenarios"], run_in_parallel, meet_precondition, contexts
+ return {'scenarios': cfg['scenarios'],
+ 'run_in_parallel': run_in_parallel,
+ 'meet_precondition': meet_precondition,
+ 'contexts': contexts,
+ 'rendered': rendered}
@staticmethod
def _change_node_names(scenario, contexts):
@@ -581,7 +616,7 @@ class TaskParser(object): # pragma: no cover
ctx = next((context for context in contexts
if context.assigned_name == context_name))
except StopIteration:
- raise exceptions.ScenarioConfigContextNameNotFound(
+ raise y_exc.ScenarioConfigContextNameNotFound(
context_name=context_name)
return '{}.{}'.format(node_name, ctx.name)
diff --git a/yardstick/cmd/commands/task.py b/yardstick/cmd/commands/task.py
index e2e8bf67d..a3488a23d 100644
--- a/yardstick/cmd/commands/task.py
+++ b/yardstick/cmd/commands/task.py
@@ -7,10 +7,6 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-""" Handler for yardstick command 'task' """
-from __future__ import print_function
-from __future__ import absolute_import
-
import logging
from yardstick.benchmark.core.task import Task
@@ -42,6 +38,8 @@ class TaskCommands(object): # pragma: no cover
action="store_true")
@cliargs("--parse-only", help="parse the config file and exit",
action="store_true")
+ @cliargs("--render-only", help="Render the tasks files, store the result "
+ "in the directory given and exit", type=str, dest="render_only")
@cliargs("--output-file", help="file where output is stored, default %s" %
output_file_default, default=output_file_default)
@cliargs("--suite", help="process test suite file instead of a task file",
@@ -54,9 +52,8 @@ class TaskCommands(object): # pragma: no cover
LOG.info('Task START')
try:
result = Task().start(param, **kwargs)
- except Exception as e:
+ except Exception as e: # pylint: disable=broad-except
self._write_error_data(e)
- LOG.exception("")
if result.get('result', {}).get('criteria') == 'PASS':
LOG.info('Task SUCCESS')
diff --git a/yardstick/common/exceptions.py b/yardstick/common/exceptions.py
index 41d7b8830..68f9995a2 100644
--- a/yardstick/common/exceptions.py
+++ b/yardstick/common/exceptions.py
@@ -83,3 +83,15 @@ class ScenarioConfigContextNameNotFound(YardstickException):
class StackCreationInterrupt(YardstickException):
message = 'Stack create interrupted.'
+
+
+class TaskRenderArgumentError(YardstickException):
+ message = 'Error reading the task input arguments'
+
+
+class TaskReadError(YardstickException):
+ message = 'Failed to read task %(task_file)s'
+
+
+class TaskRenderError(YardstickException):
+ message = 'Failed to render template:\n%(input_task)s'
diff --git a/yardstick/tests/unit/benchmark/core/test_task.py b/yardstick/tests/unit/benchmark/core/test_task.py
index 25a7a64b3..253e9d8aa 100644
--- a/yardstick/tests/unit/benchmark/core/test_task.py
+++ b/yardstick/tests/unit/benchmark/core/test_task.py
@@ -8,15 +8,21 @@
##############################################################################
import copy
+import io
import os
+import sys
import mock
+import six
import unittest
+import uuid
from yardstick.benchmark.contexts import dummy
from yardstick.benchmark.core import task
from yardstick.common import constants as consts
from yardstick.common import exceptions
+from yardstick.common import task_template
+from yardstick.common import utils
class TaskTestCase(unittest.TestCase):
@@ -177,7 +183,6 @@ class TaskTestCase(unittest.TestCase):
'tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml'))
self.assertEqual(task_files[1], self.change_to_abspath(
'tests/opnfv/test_cases/opnfv_yardstick_tc043.yaml'))
-
self.assertIsNone(task_args[0])
self.assertIsNone(task_args[1])
self.assertIsNone(task_args_fnames[0])
@@ -279,6 +284,27 @@ class TaskTestCase(unittest.TestCase):
class TaskParserTestCase(unittest.TestCase):
+ TASK = """
+{% set value1 = value1 or 'var1' %}
+{% set value2 = value2 or 'var2' %}
+key1: {{ value1 }}
+key2:
+ - {{ value2 }}"""
+
+ TASK_RENDERED_1 = u"""
+
+
+key1: var1
+key2:
+ - var2"""
+
+ TASK_RENDERED_2 = u"""
+
+
+key1: var3
+key2:
+ - var4"""
+
def setUp(self):
self.parser = task.TaskParser('fake/path')
self.scenario = {
@@ -362,3 +388,113 @@ class TaskParserTestCase(unittest.TestCase):
self.parser._change_node_names(scenario, [my_context])
self.assertEqual(scenario, expected_scenario)
+
+ def test__parse_tasks(self):
+ task_obj = task.Task()
+ _uuid = uuid.uuid4()
+ task_obj.task_id = _uuid
+ task_files = ['/directory/task_file_name.yml']
+ mock_parser = mock.Mock()
+ mock_parser.parse_task.return_value = {'rendered': 'File content'}
+ mock_args = mock.Mock()
+ mock_args.render_only = False
+
+ tasks = task_obj._parse_tasks(mock_parser, task_files, mock_args,
+ ['arg1'], ['file_arg1'])
+ self.assertEqual(
+ [{'rendered': 'File content', 'case_name': 'task_file_name'}],
+ tasks)
+ mock_parser.parse_task.assert_called_once_with(
+ _uuid, 'arg1', 'file_arg1')
+
+ @mock.patch.object(sys, 'exit')
+ @mock.patch.object(utils, 'write_file')
+ @mock.patch.object(utils, 'makedirs')
+ def test__parse_tasks_render_only(self, mock_makedirs, mock_write_file,
+ mock_exit):
+ task_obj = task.Task()
+ _uuid = uuid.uuid4()
+ task_obj.task_id = _uuid
+ task_files = ['/directory/task_file_name.yml']
+ mock_parser = mock.Mock()
+ mock_parser.parse_task.return_value = {'rendered': 'File content'}
+ mock_args = mock.Mock()
+ mock_args.render_only = '/output_directory'
+
+ task_obj._parse_tasks(mock_parser, task_files, mock_args,
+ ['arg1'], ['file_arg1'])
+ mock_makedirs.assert_called_once_with('/output_directory')
+ mock_write_file.assert_called_once_with(
+ '/output_directory/000-task_file_name.yml', 'File content')
+ mock_exit.assert_called_once_with(0)
+
+ def test__render_task_no_args(self):
+ task_parser = task.TaskParser('task_file')
+ task_str = io.StringIO(six.text_type(self.TASK))
+ with mock.patch.object(six.moves.builtins, 'open',
+ return_value=task_str) as mock_open:
+ parsed, rendered = task_parser._render_task(None, None)
+
+ self.assertEqual(self.TASK_RENDERED_1, rendered)
+ self.assertEqual({'key1': 'var1', 'key2': ['var2']}, parsed)
+ mock_open.assert_called_once_with('task_file')
+
+ def test__render_task_arguments(self):
+ task_parser = task.TaskParser('task_file')
+ task_str = io.StringIO(six.text_type(self.TASK))
+ with mock.patch.object(six.moves.builtins, 'open',
+ return_value=task_str) as mock_open:
+ parsed, rendered = task_parser._render_task('value1: "var1"', None)
+
+ self.assertEqual(self.TASK_RENDERED_1, rendered)
+ self.assertEqual({'key1': 'var1', 'key2': ['var2']}, parsed)
+ mock_open.assert_called_once_with('task_file')
+
+ def test__render_task_file_arguments(self):
+ task_parser = task.TaskParser('task_file')
+ with mock.patch.object(six.moves.builtins, 'open') as mock_open:
+ mock_open.side_effect = (
+ io.StringIO(six.text_type('value2: var4')),
+ io.StringIO(six.text_type(self.TASK))
+ )
+ parsed, rendered = task_parser._render_task('value1: "var3"',
+ 'args_file')
+
+ self.assertEqual(self.TASK_RENDERED_2, rendered)
+ self.assertEqual({'key1': 'var3', 'key2': ['var4']}, parsed)
+ mock_open.assert_has_calls([mock.call('args_file'),
+ mock.call('task_file')])
+
+ def test__render_task_error_arguments(self):
+ with self.assertRaises(exceptions.TaskRenderArgumentError):
+ task.TaskParser('task_file')._render_task('value1="var3"', None)
+
+ def test__render_task_error_task_file(self):
+ task_parser = task.TaskParser('task_file')
+ with mock.patch.object(six.moves.builtins, 'open') as mock_open:
+ mock_open.side_effect = (
+ io.StringIO(six.text_type('value2: var4')),
+ IOError()
+ )
+ with self.assertRaises(exceptions.TaskReadError):
+ task_parser._render_task('value1: "var3"', 'args_file')
+
+ mock_open.assert_has_calls([mock.call('args_file'),
+ mock.call('task_file')])
+
+ def test__render_task_render_error(self):
+ task_parser = task.TaskParser('task_file')
+ with mock.patch.object(six.moves.builtins, 'open') as mock_open, \
+ mock.patch.object(task_template.TaskTemplate, 'render',
+ side_effect=TypeError) as mock_render:
+ mock_open.side_effect = (
+ io.StringIO(six.text_type('value2: var4')),
+ io.StringIO(six.text_type(self.TASK))
+ )
+ with self.assertRaises(exceptions.TaskRenderError):
+ task_parser._render_task('value1: "var3"', 'args_file')
+
+ mock_open.assert_has_calls([mock.call('args_file'),
+ mock.call('task_file')])
+ mock_render.assert_has_calls(
+ [mock.call(self.TASK, value1='var3', value2='var4')])