aboutsummaryrefslogtreecommitdiffstats
path: root/yardstick/benchmark
diff options
context:
space:
mode:
Diffstat (limited to 'yardstick/benchmark')
-rwxr-xr-xyardstick/benchmark/scenarios/availability/__init__.py24
-rw-r--r--yardstick/benchmark/scenarios/availability/actionplayers.py54
-rw-r--r--yardstick/benchmark/scenarios/availability/actionrollbackers.py45
-rw-r--r--yardstick/benchmark/scenarios/availability/director.py106
-rw-r--r--yardstick/benchmark/scenarios/availability/result_checker/__init__.py0
-rw-r--r--yardstick/benchmark/scenarios/availability/result_checker/baseresultchecker.py88
-rw-r--r--yardstick/benchmark/scenarios/availability/result_checker/result_checker_general.py107
-rw-r--r--yardstick/benchmark/scenarios/availability/result_checker_conf.yaml11
-rw-r--r--yardstick/benchmark/scenarios/availability/scenario_general.py68
-rw-r--r--yardstick/benchmark/scenarios/compute/cpuload.py71
-rw-r--r--yardstick/benchmark/scenarios/networking/vsperf.py229
11 files changed, 789 insertions, 14 deletions
diff --git a/yardstick/benchmark/scenarios/availability/__init__.py b/yardstick/benchmark/scenarios/availability/__init__.py
index e69de29bb..c3b3aae30 100755
--- a/yardstick/benchmark/scenarios/availability/__init__.py
+++ b/yardstick/benchmark/scenarios/availability/__init__.py
@@ -0,0 +1,24 @@
+##############################################################################
+# Copyright (c) 2016 Juan Qiu and others
+# juan_ qiu@tongji.edu.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+
+class ActionType:
+ ATTACKER = "attacker"
+ MONITOR = "monitor"
+ RESULTCHECKER = "resultchecker"
+ OPERATION = "operation"
+
+
+class Condition:
+ EQUAL = "eq"
+ GREATERTHAN = "gt"
+ GREATERTHANEQUAL = "gt_eq"
+ LESSTHAN = "lt"
+ LESSTHANEQUAL = "lt_eq"
+ IN = "in"
diff --git a/yardstick/benchmark/scenarios/availability/actionplayers.py b/yardstick/benchmark/scenarios/availability/actionplayers.py
new file mode 100644
index 000000000..420626413
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/actionplayers.py
@@ -0,0 +1,54 @@
+##############################################################################
+# Copyright (c) 2016 Juan Qiu and others
+# juan_ qiu@tongji.edu.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+
+class ActionPlayer(object):
+ """
+ Abstract the action functions of attacker,
+ monitor, operation, resultchecker and mybe others in future
+ """
+
+ def action(self):
+ pass
+
+
+class AttackerPlayer(ActionPlayer):
+
+ def __init__(self, attacker):
+ self.underlyingAttacker = attacker
+
+ def action(self):
+ self.underlyingAttacker.inject_fault()
+
+
+class OperationPlayer(ActionPlayer):
+
+ def __init__(self, operation):
+ self.underlyingOperation = operation
+
+ def action(self):
+ self.underlyingOperation.run()
+
+
+class MonitorPlayer(ActionPlayer):
+
+ def __init__(self, monitor):
+ self.underlyingmonitor = monitor
+
+ def action(self):
+ self.underlyingmonitor.start_monitor()
+
+
+class ResultCheckerPlayer(ActionPlayer):
+
+ def __init__(self, resultChecker):
+ self.underlyingresultChecker = resultChecker
+
+ def action(self):
+ self.underlyingresultChecker.verify()
diff --git a/yardstick/benchmark/scenarios/availability/actionrollbackers.py b/yardstick/benchmark/scenarios/availability/actionrollbackers.py
new file mode 100644
index 000000000..4b732a10c
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/actionrollbackers.py
@@ -0,0 +1,45 @@
+##############################################################################
+# Copyright (c) 2016 Juan Qiu and others
+# juan_ qiu@tongji.edu.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import logging
+
+LOG = logging.getLogger(__name__)
+
+
+class ActionRollbacker(object):
+ """
+ Abstract the rollback functions of attacker, operation
+ and mybe others in future
+ """
+
+ def rollback(self):
+ pass
+
+
+class AttackerRollbacker(ActionRollbacker):
+
+ def __init__(self, attacker):
+ self.underlyingAttacker = attacker
+
+ def rollback(self):
+ LOG.debug(
+ "\033[93m recovering attacker %s \033[0m"
+ % (self.underlyingAttacker.key))
+ self.underlyingAttacker.recover()
+
+
+class OperationRollbacker(ActionRollbacker):
+
+ def __init__(self, operation):
+ self.underlyingOperation = operation
+
+ def rollback(self):
+ LOG.debug(
+ "\033[93m rollback operation %s \033[0m"
+ % (self.underlyingOperation.key))
+ self.underlyingOperation.rollback()
diff --git a/yardstick/benchmark/scenarios/availability/director.py b/yardstick/benchmark/scenarios/availability/director.py
new file mode 100644
index 000000000..267933dd0
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/director.py
@@ -0,0 +1,106 @@
+##############################################################################
+# Copyright (c) 2016 Juan Qiu and others
+# juan_ qiu@tongji.edu.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import logging
+
+from yardstick.benchmark.scenarios.availability.monitor import basemonitor
+from yardstick.benchmark.scenarios.availability.attacker import baseattacker
+from yardstick.benchmark.scenarios.availability.operation import baseoperation
+from yardstick.benchmark.scenarios.availability.result_checker \
+ import baseresultchecker
+from yardstick.benchmark.scenarios.availability import ActionType
+from yardstick.benchmark.scenarios.availability import actionplayers
+from yardstick.benchmark.scenarios.availability import actionrollbackers
+
+LOG = logging.getLogger(__name__)
+
+
+class Director(object):
+ """
+ Director is used to direct a test scenaio
+ including the creation of action players, test result verification
+ and rollback of actions.
+ """
+
+ def __init__(self, scenario_cfg, context_cfg):
+
+ # A stack store Rollbacker that will be called after
+ # all actionplayers finish.
+ self.executionSteps = []
+
+ self.scenario_cfg = scenario_cfg
+ self.context_cfg = context_cfg
+ nodes = self.context_cfg.get("nodes", None)
+ # setup attackers
+ if "attackers" in self.scenario_cfg["options"]:
+ LOG.debug("start init attackers...")
+ attacker_cfgs = self.scenario_cfg["options"]["attackers"]
+ self.attackerMgr = baseattacker.AttackerMgr()
+ self.attackerMgr.init_attackers(attacker_cfgs, nodes)
+ # setup monitors
+ if "monitors" in self.scenario_cfg["options"]:
+ LOG.debug("start init monitors...")
+ monitor_cfgs = self.scenario_cfg["options"]["monitors"]
+ self.monitorMgr = basemonitor.MonitorMgr()
+ self.monitorMgr.init_monitors(monitor_cfgs, nodes)
+ # setup operations
+ if "operations" in self.scenario_cfg["options"]:
+ LOG.debug("start init operations...")
+ operation_cfgs = self.scenario_cfg["options"]["operations"]
+ self.operationMgr = baseoperation.OperationMgr()
+ self.operationMgr.init_operations(operation_cfgs, nodes)
+ # setup result checker
+ if "resultCheckers" in self.scenario_cfg["options"]:
+ LOG.debug("start init resultCheckers...")
+ result_check_cfgs = self.scenario_cfg["options"]["resultCheckers"]
+ self.resultCheckerMgr = baseresultchecker.ResultCheckerMgr()
+ self.resultCheckerMgr.init_ResultChecker(result_check_cfgs, nodes)
+
+ def createActionPlayer(self, type, key):
+ LOG.debug(
+ "the type of current action is %s, the key is %s" % (type, key))
+ if type == ActionType.ATTACKER:
+ return actionplayers.AttackerPlayer(self.attackerMgr[key])
+ if type == ActionType.MONITOR:
+ return actionplayers.MonitorPlayer(self.monitorMgr[key])
+ if type == ActionType.RESULTCHECKER:
+ return actionplayers.ResultCheckerPlayer(
+ self.resultCheckerMgr[key])
+ if type == ActionType.OPERATION:
+ return actionplayers.OperationPlayer(self.operationMgr[key])
+ LOG.debug("something run when creatactionplayer")
+
+ def createActionRollbacker(self, type, key):
+ LOG.debug(
+ "the type of current action is %s, the key is %s" % (type, key))
+ if type == ActionType.ATTACKER:
+ return actionrollbackers.AttackerRollbacker(self.attackerMgr[key])
+ if type == ActionType.OPERATION:
+ return actionrollbackers.OperationRollbacker(
+ self.operationMgr[key])
+ LOG.debug("no rollbacker created for %s" % (key))
+
+ def verify(self):
+ result = True
+ if hasattr(self, 'monitorMgr'):
+ result &= self.monitorMgr.verify_SLA()
+ if hasattr(self, 'resultCheckerMgr'):
+ result &= self.resultCheckerMgr.verify()
+ if result:
+ LOG.debug("monitors are passed")
+ return result
+
+ def stopMonitors(self):
+ if "monitors" in self.scenario_cfg["options"]:
+ self.monitorMgr.wait_monitors()
+
+ def knockoff(self):
+ LOG.debug("knock off ....")
+ while self.executionSteps:
+ singleStep = self.executionSteps.pop()
+ singleStep.rollback()
diff --git a/yardstick/benchmark/scenarios/availability/result_checker/__init__.py b/yardstick/benchmark/scenarios/availability/result_checker/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/result_checker/__init__.py
diff --git a/yardstick/benchmark/scenarios/availability/result_checker/baseresultchecker.py b/yardstick/benchmark/scenarios/availability/result_checker/baseresultchecker.py
new file mode 100644
index 000000000..1bdb9f2c2
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/result_checker/baseresultchecker.py
@@ -0,0 +1,88 @@
+##############################################################################
+# Copyright (c) 2016 Juan Qiu and others
+# juan_ qiu@tongji.edu.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import pkg_resources
+import yaml
+import logging
+import os
+
+import yardstick.common.utils as utils
+
+LOG = logging.getLogger(__name__)
+
+resultchecker_conf_path = pkg_resources.resource_filename(
+ "yardstick.benchmark.scenarios.availability",
+ "result_checker_conf.yaml")
+
+
+class ResultCheckerMgr(object):
+
+ def __init__(self):
+ self._result_checker_list = []
+
+ def init_ResultChecker(self, resultchecker_cfgs, context):
+ LOG.debug("resultcheckerMgr confg: %s" % resultchecker_cfgs)
+
+ for cfg in resultchecker_cfgs:
+ resultchecker_type = cfg['checker_type']
+ resultchecker_cls = BaseResultChecker.get_resultchecker_cls(
+ resultchecker_type)
+ resultchecker_ins = resultchecker_cls(cfg, context)
+ resultchecker_ins.key = cfg['key']
+ resultchecker_ins.setup()
+ self._result_checker_list.append(resultchecker_ins)
+
+ def __getitem__(self, item):
+ for obj in self._result_checker_list:
+ if(obj.key == item):
+ return obj
+ raise KeyError("No such result checker instance of key - %s" % item)
+
+ def verify(self):
+ result = True
+ for obj in self._result_checker_list:
+ result &= obj.success
+ return result
+
+
+class BaseResultChecker(object):
+
+ resultchecker_cfgs = {}
+
+ def __init__(self, config, context):
+ if not BaseResultChecker.resultchecker_cfgs:
+ with open(resultchecker_conf_path) as stream:
+ BaseResultChecker.resultchecker_cfgs = yaml.load(stream)
+ self.actualResult = object()
+ self.expectedResult = object()
+ self.success = False
+
+ self._config = config
+ self._context = context
+ self.setup_done = False
+
+ @staticmethod
+ def get_resultchecker_cls(type):
+ '''return resultchecker instance of specified type'''
+ resultchecker_type = type
+ for checker_cls in utils.itersubclasses(BaseResultChecker):
+ if resultchecker_type == checker_cls.__result_checker__type__:
+ return checker_cls
+ raise RuntimeError("No such runner_type %s" % resultchecker_type)
+
+ def get_script_fullpath(self, path):
+ base_path = os.path.dirname(resultchecker_conf_path)
+ return os.path.join(base_path, path)
+
+ def setup(self):
+ pass
+
+ def verify(self):
+ if(self.actualResult == self.expectedResult):
+ self.success = True
+ return self.success
diff --git a/yardstick/benchmark/scenarios/availability/result_checker/result_checker_general.py b/yardstick/benchmark/scenarios/availability/result_checker/result_checker_general.py
new file mode 100644
index 000000000..70bf9aea6
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/result_checker/result_checker_general.py
@@ -0,0 +1,107 @@
+##############################################################################
+# Copyright (c) 2016 Juan Qiu and others
+# juan_ qiu@tongji.edu.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import logging
+
+from baseresultchecker import BaseResultChecker
+from yardstick.benchmark.scenarios.availability import Condition
+import yardstick.ssh as ssh
+from yardstick.benchmark.scenarios.availability.util import buildshellparams
+
+LOG = logging.getLogger(__name__)
+
+
+class GeneralResultChecker(BaseResultChecker):
+
+ __result_checker__type__ = "general-result-checker"
+
+ def setup(self):
+ LOG.debug("config:%s context:%s" % (self._config, self._context))
+ host = self._context.get(self._config['host'], None)
+ ip = host.get("ip", None)
+ user = host.get("user", "root")
+ key_filename = host.get("key_filename", "~/.ssh/id_rsa")
+
+ self.connection = ssh.SSH(user, ip, key_filename=key_filename)
+ self.connection.wait(timeout=600)
+ LOG.debug("ssh host success!")
+
+ self.key = self._config['key']
+ self.type = self._config['checker_type']
+ self.condition = self._config['condition']
+ self.expectedResult = self._config['expectedValue']
+ self.actualResult = object()
+
+ self.key = self._config['key']
+ if "parameter" in self._config:
+ parameter = self._config['parameter']
+ str = buildshellparams(parameter)
+ l = list(item for item in parameter.values())
+ self.shell_cmd = str.format(*l)
+
+ self.resultchecker_cfgs = BaseResultChecker.resultchecker_cfgs.get(
+ self.key)
+ self.verify_script = self.get_script_fullpath(
+ self.resultchecker_cfgs['verify_script'])
+
+ def verify(self):
+ if "parameter" in self._config:
+ exit_status, stdout, stderr = self.connection.execute(
+ self.shell_cmd,
+ stdin=open(self.verify_script, "r"))
+ LOG.debug("action script of the operation is: {0}"
+ .format(self.verify_script))
+ LOG.debug("action parameter the of operation is: {0}"
+ .format(self.shell_cmd))
+ else:
+ exit_status, stdout, stderr = self.connection.execute(
+ "/bin/bash -s ",
+ stdin=open(self.verify_script, "r"))
+ LOG.debug("action script of the operation is: {0}"
+ .format(self.verify_script))
+
+ LOG.debug("exit_status ,stdout : {0} ,{1}".format(exit_status, stdout))
+ if exit_status == 0 and stdout:
+ self.actualResult = stdout
+ LOG.debug("verifying resultchecker: {0}".format(self.key))
+ LOG.debug("verifying resultchecker,expected: {0}"
+ .format(self.expectedResult))
+ LOG.debug("verifying resultchecker,actual: {0}"
+ .format(self.actualResult))
+ LOG.debug("verifying resultchecker,condition: {0}"
+ .format(self.condition))
+ if (type(self.expectedResult) is int):
+ self.actualResult = int(self.actualResult)
+ if self.condition == Condition.EQUAL:
+ self.success = self.actualResult == self.expectedResult
+ elif self.condition == Condition.GREATERTHAN:
+ self.success = self.actualResult > self.expectedResult
+ elif self.condition == Condition.GREATERTHANEQUAL:
+ self.success = self.actualResult >= self.expectedResult
+ elif self.condition == Condition.LESSTHANEQUAL:
+ self.success = self.actualResult <= self.expectedResult
+ elif self.condition == Condition.LESSTHAN:
+ self.success = self.actualResult < self.expectedResult
+ elif self.condition == Condition.IN:
+ self.success = self.expectedResult in self.actualResult
+ else:
+ self.success = False
+ LOG.debug(
+ "error happened when resultchecker: {0} Invalid condition"
+ .format(self.key))
+ else:
+ self.success = False
+ LOG.debug(
+ "error happened when resultchecker: {0} verifying the result"
+ .format(self.key))
+ LOG.error(stderr)
+
+ LOG.debug(
+ "verifying resultchecker: {0},the result is : {1}"
+ .format(self.key, self.success))
+ return self.success
diff --git a/yardstick/benchmark/scenarios/availability/result_checker_conf.yaml b/yardstick/benchmark/scenarios/availability/result_checker_conf.yaml
new file mode 100644
index 000000000..638c39a6e
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/result_checker_conf.yaml
@@ -0,0 +1,11 @@
+---
+# sample config file for ha test
+#
+schema: "yardstick:task:0.1"
+
+process-checker:
+ verify_script: ha_tools/check_process_python.bash
+service-checker:
+ verify_script: ha_tools/check_service.bash
+nova-instance-checker:
+ verify_script: ha_tools/nova/show_instances.bash \ No newline at end of file
diff --git a/yardstick/benchmark/scenarios/availability/scenario_general.py b/yardstick/benchmark/scenarios/availability/scenario_general.py
new file mode 100644
index 000000000..0a128aa09
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/scenario_general.py
@@ -0,0 +1,68 @@
+##############################################################################
+# Copyright (c) 2016 Juan Qiu and others
+# juan_ qiu@tongji.edu.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import logging
+import traceback
+
+from yardstick.benchmark.scenarios import base
+from yardstick.benchmark.scenarios.availability.director import Director
+
+LOG = logging.getLogger(__name__)
+
+
+class ScenarioGeneral(base.Scenario):
+ """Support orchestrating general HA test scenarios."""
+
+ __scenario_type__ = "GeneralHA"
+
+ def __init__(self, scenario_cfg, context_cfg):
+ LOG.debug(
+ "scenario_cfg:%s context_cfg:%s" % (scenario_cfg, context_cfg))
+ self.scenario_cfg = scenario_cfg
+ self.context_cfg = context_cfg
+
+ def setup(self):
+ self.director = Director(self.scenario_cfg, self.context_cfg)
+
+ def run(self, args):
+ steps = self.scenario_cfg["options"]["steps"]
+ orderedSteps = sorted(steps, key=lambda x: x['index'])
+ for step in orderedSteps:
+ LOG.debug(
+ "\033[94m running step: {0} .... \033[0m"
+ .format(orderedSteps.index(step)+1))
+ try:
+ actionPlayer = self.director.createActionPlayer(
+ step['actionType'], step['actionKey'])
+ actionPlayer.action()
+ actionRollbacker = self.director.createActionRollbacker(
+ step['actionType'], step['actionKey'])
+ if actionRollbacker:
+ self.director.executionSteps.append(actionRollbacker)
+ except Exception, e:
+ LOG.debug(e.message)
+ traceback.print_exc()
+ LOG.debug(
+ "\033[91m exception when running step: {0} .... \033[0m"
+ .format(orderedSteps.index(step)))
+ break
+ finally:
+ pass
+
+ self.director.stopMonitors()
+ if self.director.verify():
+ LOG.debug(
+ "\033[92m congratulations, "
+ "the test cases scenario is pass! \033[0m")
+ else:
+ LOG.debug(
+ "\033[91m aoh,the test cases scenario failed,"
+ "please check the detail debug information! \033[0m")
+
+ def teardown(self):
+ self.director.knockoff()
diff --git a/yardstick/benchmark/scenarios/compute/cpuload.py b/yardstick/benchmark/scenarios/compute/cpuload.py
index d11bec5c3..f45313e91 100644
--- a/yardstick/benchmark/scenarios/compute/cpuload.py
+++ b/yardstick/benchmark/scenarios/compute/cpuload.py
@@ -36,13 +36,17 @@ class CPULoad(base.Scenario):
on the Linux host.
Parameters
- interval - Time interval to measure CPU usage. A value of 0
- indicates that processors statistics are to be
- reported for the time since system startup (boot)
+ interval - Time interval to measure CPU usage.
type: [int]
unit: seconds
- default: 0
+ default: 1
+
+ count (for mpstat only) - Number of CPU usage measurment.
+
+ type: [int]
+ unit: N/A
+ default: 1
"""
@@ -56,6 +60,7 @@ class CPULoad(base.Scenario):
self.context_cfg = context_cfg
self.setup_done = False
self.has_mpstat = False
+ self.has_count = False
def setup(self):
"""Scenario setup."""
@@ -77,10 +82,13 @@ class CPULoad(base.Scenario):
LOG.info("MPSTAT is installed")
self.has_mpstat = True
- if 'options' in self.scenario_cfg:
- self.interval = self.scenario_cfg['options'].get("interval", 0)
+ options = self.scenario_cfg['options']
+ self.interval = options.get("interval", 1)
+ if 'count' in options:
+ self.count = options.get("count", 1)
+ self.has_count = True
else:
- self.interval = 0
+ self.has_count = False
self.setup_done = True
@@ -99,15 +107,17 @@ class CPULoad(base.Scenario):
def _get_cpu_usage_mpstat(self):
"""Get processor usage using mpstat."""
- if self.interval > 0:
- cmd = "mpstat -P ON %s 1" % self.interval
+ if self.interval > 0 and self.has_count:
+ cmd = "mpstat -P ON %s %s" % (self.interval, self.count)
else:
- cmd = "mpstat -P ON"
+ cmd = "mpstat -P ON %s 1" % self.interval
result = self._execute_command(cmd)
fields = []
- mpstat = {}
+ maximum = {}
+ minimum = {}
+ average = {}
time_marker = re.compile("^([0-9]+):([0-9]+):([0-9]+)$")
ampm_marker = re.compile("(AM|PM)$")
@@ -117,7 +127,6 @@ class CPULoad(base.Scenario):
line = row.split()
if line and re.match(time_marker, line[0]):
-
if re.match(ampm_marker, line[1]):
del line[:2]
else:
@@ -134,11 +143,45 @@ class CPULoad(base.Scenario):
cpu = 'cpu' if line[0] == 'all' else 'cpu' + line[0]
values = line[1:]
if values and len(values) == len(fields):
- mpstat[cpu] = dict(zip(fields, values))
+ temp_dict = dict(zip(fields, values))
+ if cpu not in maximum:
+ maximum[cpu] = temp_dict
+ else:
+ for item in temp_dict:
+ if float(maximum[cpu][item]) <\
+ float(temp_dict[item]):
+ maximum[cpu][item] = temp_dict[item]
+
+ if cpu not in minimum:
+ minimum[cpu] = temp_dict
+ else:
+ for item in temp_dict:
+ if float(minimum[cpu][item]) >\
+ float(temp_dict[item]):
+ minimum[cpu][item] = temp_dict[item]
else:
raise RuntimeError("mpstat: parse error", fields, line)
- return {'mpstat': mpstat}
+ elif line and line[0] == 'Average:':
+ del line[:1]
+ if line[0] == 'CPU':
+ # header fields
+ fields = line[1:]
+ if len(fields) != CPULoad.MPSTAT_FIELD_SIZE:
+ raise RuntimeError("mpstat average: unexpected field\
+ size", fields)
+ else:
+ # value fields
+ cpu = 'cpu' if line[0] == 'all' else 'cpu' + line[0]
+ values = line[1:]
+ if values and len(values) == len(fields):
+ average[cpu] = dict(zip(fields, values))
+ else:
+ raise RuntimeError("mpstat average: parse error",
+ fields, line)
+
+ return {'mpstat_maximun': maximum, 'mpstat_minimum': minimum,
+ 'mpstat_average': average}
def _get_cpu_usage(self):
"""Get processor usage from /proc/stat."""
diff --git a/yardstick/benchmark/scenarios/networking/vsperf.py b/yardstick/benchmark/scenarios/networking/vsperf.py
new file mode 100644
index 000000000..d3123083a
--- /dev/null
+++ b/yardstick/benchmark/scenarios/networking/vsperf.py
@@ -0,0 +1,229 @@
+# Copyright 2016 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Vsperf specific scenario definition """
+
+import logging
+import os
+import subprocess
+import csv
+
+import yardstick.ssh as ssh
+from yardstick.benchmark.scenarios import base
+
+LOG = logging.getLogger(__name__)
+
+
+class Vsperf(base.Scenario):
+ """Execute vsperf with defined parameters
+
+ Parameters:
+ traffic_type - to specify the type of traffic executed by traffic generator
+ the valid values are "rfc2544", "continuous", "back2back"
+ type: string
+ default: "rfc2544"
+ pkt_sizes - a packet size for which test should be executed;
+ Multiple packet sizes can be tested by modification of Sequence runner
+ section inside TC YAML definition.
+ type: string
+ default: "64"
+ duration - sets duration for which traffic will be generated
+ type: int
+ default: 30
+ bidirectional - speficies if traffic will be uni (False) or bi-directional
+ (True)
+ type: string
+ default: False
+ iload - specifies frame rate
+ type: string
+ default: 100
+ rfc2544_trials - the number of trials performed for each packet size
+ type: string
+ default: NA
+ multistream - the number of simulated streams
+ type: string
+ default: 0 (disabled)
+ stream_type - specifies network layer used for multistream simulation
+ the valid values are "L4", "L3" and "L2"
+ type: string
+ default: "L4"
+ conf-file - path to the vsperf configuration file, which will be uploaded
+ to the VM
+ type: string
+ default: NA
+ setup-script - path to the setup script, which will be executed during
+ setup and teardown phases
+ type: string
+ default: NA
+ trafficgen_port1 - specifies device name of 1st interface connected to
+ the trafficgen
+ type: string
+ default: NA
+ trafficgen_port2 - specifies device name of 2nd interface connected to
+ the trafficgen
+ type: string
+ default: NA
+ external_bridge - specifies name of external bridge configured in OVS
+ type: string
+ default: "br-ex"
+
+ """
+ __scenario_type__ = "Vsperf"
+
+ VSPERF_CONF = '~/vsperf-yardstick.conf'
+
+ def __init__(self, scenario_cfg, context_cfg):
+ self.scenario_cfg = scenario_cfg
+ self.context_cfg = context_cfg
+ self.setup_done = False
+ self.client = None
+ self.tg_port1 = self.scenario_cfg['options'].get('trafficgen_port1',
+ None)
+ self.tg_port2 = self.scenario_cfg['options'].get('trafficgen_port2',
+ None)
+ self.br_ex = self.scenario_cfg['options'].get('external_bridge',
+ 'br-ex')
+ self.vsperf_conf = os.path.expanduser(
+ self.scenario_cfg['options'].get('conf-file', Vsperf.VSPERF_CONF))
+ self.setup_script = self.scenario_cfg['options'].get('setup-script',
+ None)
+ if self.setup_script:
+ self.setup_script = os.path.expanduser(self.setup_script)
+
+ def setup(self):
+ '''scenario setup'''
+ vsperf = self.context_cfg['host']
+ vsperf_user = vsperf.get('user', 'ubuntu')
+ vsperf_password = vsperf.get('password', 'ubuntu')
+ vsperf_ip = vsperf.get('ip', None)
+
+ # add trafficgen interfaces to the external bridge
+ if self.tg_port1:
+ subprocess.call('sudo bash -c "ovs-vsctl add-port %s %s"' %
+ (self.br_ex, self.tg_port1), shell=True)
+ if self.tg_port2:
+ subprocess.call('sudo bash -c "ovs-vsctl add-port %s %s"' %
+ (self.br_ex, self.tg_port2), shell=True)
+
+ # copy vsperf conf to VM
+ LOG.info("user:%s, host:%s", vsperf_user, vsperf_ip)
+ self.client = ssh.SSH(vsperf_user, vsperf_ip,
+ password=vsperf_password)
+ # traffic generation could last long
+ self.client.wait(timeout=1800)
+
+ # copy script to host
+ self.client.run("cat > ~/vsperf.conf",
+ stdin=open(self.vsperf_conf, "rb"))
+
+ # execute external setup script
+ if self.setup_script:
+ cmd = "%s setup" % (self.setup_script)
+ LOG.info("Execute setup script \"%s\"", cmd)
+ subprocess.call(cmd, shell=True)
+
+ self.setup_done = True
+
+ def run(self, result):
+ """ execute the vsperf benchmark and return test results
+ within result dictionary
+ """
+ def add_test_params(options, option, default_value):
+ """return parameter and its value as a string to be passed
+ to the VSPERF inside --test-params argument
+
+ Parameters:
+ options - dictionary with scenario options
+ option - a name of option to be added to the string
+ default_value - value to be used in case that option
+ is not defined inside scenario options
+ """
+ if option in options:
+ return "%s=%s" % (option, options[option])
+ elif default_value is not None:
+ return "%s=%s" % (option, default_value)
+ else:
+ return None
+
+ if not self.setup_done:
+ self.setup()
+
+ # remove results from previous tests
+ self.client.execute("rm -rf /tmp/results*")
+
+ # get vsperf options
+ options = self.scenario_cfg['options']
+ test_params = []
+ test_params.append(add_test_params(options, "traffic_type", "rfc2544"))
+ test_params.append(add_test_params(options, "pkt_sizes", "64"))
+ test_params.append(add_test_params(options, "duration", None))
+ test_params.append(add_test_params(options, "bidirectional", "False"))
+ test_params.append(add_test_params(options, "iload", 100))
+ test_params.append(add_test_params(options, "rfc2544_trials", None))
+ test_params.append(add_test_params(options, "multistream", None))
+ test_params.append(add_test_params(options, "stream_type", None))
+
+ # execute vsperf
+ cmd = "source ~/vsperfenv/bin/activate ; cd vswitchperf ; "
+ cmd += "./vsperf --mode trafficgen --conf-file ~/vsperf.conf "
+ cmd += "--test-params=\"%s\"" % (';'.join(filter(None, test_params)))
+ LOG.debug("Executing command: %s", cmd)
+ status, stdout, stderr = self.client.execute(cmd)
+
+ if status:
+ raise RuntimeError(stderr)
+
+ # get test results
+ cmd = "cat /tmp/results*/result.csv"
+ LOG.debug("Executing command: %s", cmd)
+ status, stdout, stderr = self.client.execute(cmd)
+
+ if status:
+ raise RuntimeError(stderr)
+
+ # convert result.csv to JSON format
+ reader = csv.DictReader(stdout.split('\r\n'))
+ result.update(reader.next())
+
+ # sla check; go through all defined SLAs and check if values measured
+ # by VSPERF are higher then those defined by SLAs
+ if 'sla' in self.scenario_cfg and \
+ 'metrics' in self.scenario_cfg['sla']:
+ for metric in self.scenario_cfg['sla']['metrics'].split(','):
+ assert metric in result, \
+ '%s is not collected by VSPERF' % (metric)
+ assert metric in self.scenario_cfg['sla'], \
+ '%s is not defined in SLA' % (metric)
+ vs_res = float(result[metric])
+ sla_res = float(self.scenario_cfg['sla'][metric])
+ assert vs_res >= sla_res, \
+ 'VSPERF_%s(%f) < SLA_%s(%f)' % \
+ (metric, vs_res, metric, sla_res)
+
+ def teardown(self):
+ """cleanup after the test execution"""
+ # remove trafficgen interfaces from the external bridge
+ if self.tg_port1:
+ subprocess.call('sudo bash -c "ovs-vsctl del-port %s %s"' %
+ (self.br_ex, self.tg_port1), shell=True)
+ if self.tg_port2:
+ subprocess.call('sudo bash -c "ovs-vsctl del-port %s %s"' %
+ (self.br_ex, self.tg_port2), shell=True)
+
+ # execute external setup script
+ if self.setup_script:
+ cmd = "%s teardown" % (self.setup_script)
+ LOG.info("Execute setup script \"%s\"", cmd)
+ subprocess.call(cmd, shell=True)
+
+ self.setup_done = False