aboutsummaryrefslogtreecommitdiffstats
path: root/yardstick/benchmark/core/task.py
diff options
context:
space:
mode:
Diffstat (limited to 'yardstick/benchmark/core/task.py')
-rw-r--r--yardstick/benchmark/core/task.py119
1 files changed, 72 insertions, 47 deletions
diff --git a/yardstick/benchmark/core/task.py b/yardstick/benchmark/core/task.py
index 955b8cae2..bcca3558f 100644
--- a/yardstick/benchmark/core/task.py
+++ b/yardstick/benchmark/core/task.py
@@ -11,6 +11,7 @@ import sys
import os
from collections import OrderedDict
+import six
import yaml
import atexit
import ipaddress
@@ -22,7 +23,8 @@ import collections
from six.moves import filter
from jinja2 import Environment
-from yardstick.benchmark.contexts.base import Context
+from yardstick.benchmark import contexts
+from yardstick.benchmark.contexts import base as base_context
from yardstick.benchmark.runners import base as base_runner
from yardstick.common.constants import CONF_FILE
from yardstick.common.yaml_loader import yaml_load
@@ -112,9 +114,9 @@ class Task(object): # pragma: no cover
continue
try:
- data = self._run(tasks[i]['scenarios'],
- tasks[i]['run_in_parallel'],
- output_config)
+ success, data = self._run(tasks[i]['scenarios'],
+ tasks[i]['run_in_parallel'],
+ output_config)
except KeyboardInterrupt:
raise
except Exception: # pylint: disable=broad-except
@@ -123,9 +125,15 @@ class Task(object): # pragma: no cover
testcases[tasks[i]['case_name']] = {'criteria': 'FAIL',
'tc_data': []}
else:
- LOG.info('Testcase: "%s" SUCCESS!!!', tasks[i]['case_name'])
- testcases[tasks[i]['case_name']] = {'criteria': 'PASS',
- 'tc_data': data}
+ if success:
+ LOG.info('Testcase: "%s" SUCCESS!!!', tasks[i]['case_name'])
+ testcases[tasks[i]['case_name']] = {'criteria': 'PASS',
+ 'tc_data': data}
+ else:
+ LOG.error('Testcase: "%s" FAILED!!!', tasks[i]['case_name'],
+ exc_info=True)
+ testcases[tasks[i]['case_name']] = {'criteria': 'FAIL',
+ 'tc_data': data}
if args.keep_deploy:
# keep deployment, forget about stack
@@ -240,6 +248,7 @@ class Task(object): # pragma: no cover
background_runners = []
+ task_success = True
result = []
# Start all background scenarios
for scenario in filter(_is_background_scenario, scenarios):
@@ -258,8 +267,8 @@ class Task(object): # pragma: no cover
for runner in runners:
status = runner_join(runner, background_runners, self.outputs, result)
if status != 0:
- raise RuntimeError(
- "{0} runner status {1}".format(runner.__execution_type__, status))
+ LOG.error("%s runner status %s", runner.__execution_type__, status)
+ task_success = False
LOG.info("Runner ended")
else:
# run serially
@@ -271,8 +280,8 @@ class Task(object): # pragma: no cover
LOG.error('Scenario NO.%s: "%s" ERROR!',
scenarios.index(scenario) + 1,
scenario.get('type'))
- raise RuntimeError(
- "{0} runner status {1}".format(runner.__execution_type__, status))
+ LOG.error("%s runner status %s", runner.__execution_type__, status)
+ task_success = False
LOG.info("Runner ended")
# Abort background runners
@@ -289,7 +298,7 @@ class Task(object): # pragma: no cover
base_runner.Runner.release(runner)
print("Background task ended")
- return result
+ return task_success, result
def atexit_handler(self):
"""handler for process termination"""
@@ -305,7 +314,7 @@ class Task(object): # pragma: no cover
return {k: self._parse_options(v) for k, v in op.items()}
elif isinstance(op, list):
return [self._parse_options(v) for v in op]
- elif isinstance(op, str):
+ elif isinstance(op, six.string_types):
return self.outputs.get(op[1:]) if op.startswith('$') else op
else:
return op
@@ -352,7 +361,7 @@ class Task(object): # pragma: no cover
if is_ip_addr(target):
context_cfg['target'] = {"ipaddr": target}
else:
- context_cfg['target'] = Context.get_server(target)
+ context_cfg['target'] = base_context.Context.get_server(target)
if self._is_same_context(cfg["host"], target):
context_cfg['target']["ipaddr"] = context_cfg['target']["private_ip"]
else:
@@ -360,7 +369,7 @@ class Task(object): # pragma: no cover
host_name = server_name.get('host', scenario_cfg.get('host'))
if host_name:
- context_cfg['host'] = Context.get_server(host_name)
+ context_cfg['host'] = base_context.Context.get_server(host_name)
for item in [server_name, scenario_cfg]:
try:
@@ -377,7 +386,8 @@ class Task(object): # pragma: no cover
ip_list.append(target)
context_cfg['target'] = {}
else:
- context_cfg['target'] = Context.get_server(target)
+ context_cfg['target'] = (
+ base_context.Context.get_server(target))
if self._is_same_context(scenario_cfg["host"],
target):
ip_list.append(context_cfg["target"]["private_ip"])
@@ -405,7 +415,8 @@ class Task(object): # pragma: no cover
with attribute name mapping when using external heat templates
"""
for context in self.contexts:
- if context.__context_type__ not in {"Heat", "Kubernetes"}:
+ if context.__context_type__ not in {contexts.CONTEXT_HEAT,
+ contexts.CONTEXT_KUBERNETES}:
continue
host = context._get_server(host_attr)
@@ -546,19 +557,19 @@ class TaskParser(object): # pragma: no cover
elif "contexts" in cfg:
context_cfgs = cfg["contexts"]
else:
- context_cfgs = [{"type": "Dummy"}]
+ context_cfgs = [{"type": contexts.CONTEXT_DUMMY}]
- contexts = []
+ _contexts = []
for cfg_attrs in context_cfgs:
cfg_attrs['task_id'] = task_id
# default to Heat context because we are testing OpenStack
- context_type = cfg_attrs.get("type", "Heat")
- context = Context.get(context_type)
+ context_type = cfg_attrs.get("type", contexts.CONTEXT_HEAT)
+ context = base_context.Context.get(context_type)
context.init(cfg_attrs)
# Update the name in case the context has used the name_suffix
cfg_attrs['name'] = context.name
- contexts.append(context)
+ _contexts.append(context)
run_in_parallel = cfg.get("run_in_parallel", False)
@@ -571,17 +582,17 @@ class TaskParser(object): # pragma: no cover
# relative to task path
scenario["task_path"] = os.path.dirname(self.path)
- self._change_node_names(scenario, contexts)
+ self._change_node_names(scenario, _contexts)
# TODO we need something better here, a class that represent the file
return {'scenarios': cfg['scenarios'],
'run_in_parallel': run_in_parallel,
'meet_precondition': meet_precondition,
- 'contexts': contexts,
+ 'contexts': _contexts,
'rendered': rendered}
@staticmethod
- def _change_node_names(scenario, contexts):
+ def _change_node_names(scenario, _contexts):
"""Change the node names in a scenario, depending on the context config
The nodes (VMs or physical servers) are referred in the context section
@@ -610,29 +621,34 @@ class TaskParser(object): # pragma: no cover
scenario:
nodes:
- tg__0: tg_0.yardstick
+ tg__0: trafficgen_0.yardstick
vnf__0: vnf_0.yardstick
+
+ scenario:
+ nodes:
+ tg__0:
+ name: trafficgen_0.yardstick
+ public_ip_attr: "server1_public_ip"
+ private_ip_attr: "server1_private_ip"
+ vnf__0:
+ name: vnf_0.yardstick
+ public_ip_attr: "server2_public_ip"
+ private_ip_attr: "server2_private_ip"
+ NOTE: in Kubernetes context, the separator character between the server
+ name and the context name is "-":
+ scenario:
+ host: host-k8s
+ target: target-k8s
"""
def qualified_name(name):
- try:
- # for openstack
- node_name, context_name = name.split('.')
- sep = '.'
- except ValueError:
- # for kubernetes, some kubernetes resources don't support
- # name format like 'xxx.xxx', so we use '-' instead
- # need unified later
- node_name, context_name = name.split('-')
- sep = '-'
+ for context in _contexts:
+ host_name, ctx_name = context.split_host_name(name)
+ if context.assigned_name == ctx_name:
+ return '{}{}{}'.format(host_name,
+ context.host_name_separator,
+ context.name)
- try:
- ctx = next((context for context in contexts
- if context.assigned_name == context_name))
- except StopIteration:
- raise y_exc.ScenarioConfigContextNameNotFound(
- context_name=context_name)
-
- return '{}{}{}'.format(node_name, sep, ctx.name)
+ raise y_exc.ScenarioConfigContextNameNotFound(host_name=name)
if 'host' in scenario:
scenario['host'] = qualified_name(scenario['host'])
@@ -649,7 +665,15 @@ class TaskParser(object): # pragma: no cover
scenario['targets'][idx] = qualified_name(target)
if 'nodes' in scenario:
for scenario_node, target in scenario['nodes'].items():
- scenario['nodes'][scenario_node] = qualified_name(target)
+ if isinstance(target, collections.Mapping):
+ # Update node info on scenario with context info
+ # Just update the node name with context
+ # Append context information
+ target['name'] = qualified_name(target['name'])
+ # Then update node
+ scenario['nodes'][scenario_node] = target
+ else:
+ scenario['nodes'][scenario_node] = qualified_name(target)
def _check_schema(self, cfg_schema, schema_type):
"""Check if config file is using the correct schema type"""
@@ -716,7 +740,8 @@ def _is_background_scenario(scenario):
def parse_nodes_with_context(scenario_cfg):
"""parse the 'nodes' fields in scenario """
# ensure consistency in node instantiation order
- return OrderedDict((nodename, Context.get_server(scenario_cfg["nodes"][nodename]))
+ return OrderedDict((nodename, base_context.Context.get_server(
+ scenario_cfg["nodes"][nodename]))
for nodename in sorted(scenario_cfg["nodes"]))
@@ -732,7 +757,7 @@ def get_networks_from_nodes(nodes):
network_name = interface.get('network_name')
if not network_name:
continue
- network = Context.get_network(network_name)
+ network = base_context.Context.get_network(network_name)
if network:
networks[network['name']] = network
return networks