aboutsummaryrefslogtreecommitdiffstats
path: root/yardstick
diff options
context:
space:
mode:
Diffstat (limited to 'yardstick')
-rwxr-xr-xyardstick/benchmark/scenarios/availability/__init__.py25
-rw-r--r--yardstick/benchmark/scenarios/availability/operation/__init__.py0
-rw-r--r--yardstick/benchmark/scenarios/availability/operation/baseoperation.py81
-rw-r--r--yardstick/benchmark/scenarios/availability/operation/operation_general.py77
-rw-r--r--yardstick/benchmark/scenarios/availability/operation_conf.yaml16
-rw-r--r--yardstick/benchmark/scenarios/availability/result_checker/__init__.py0
-rw-r--r--yardstick/benchmark/scenarios/availability/result_checker/baseresultchecker.py88
-rw-r--r--yardstick/benchmark/scenarios/availability/result_checker/result_checker_general.py107
-rw-r--r--yardstick/benchmark/scenarios/availability/result_checker_conf.yaml11
-rw-r--r--yardstick/benchmark/scenarios/networking/ping.py14
-rw-r--r--yardstick/benchmark/scenarios/networking/vsperf.py229
11 files changed, 644 insertions, 4 deletions
diff --git a/yardstick/benchmark/scenarios/availability/__init__.py b/yardstick/benchmark/scenarios/availability/__init__.py
index e69de29bb..fdad0fe95 100755
--- a/yardstick/benchmark/scenarios/availability/__init__.py
+++ b/yardstick/benchmark/scenarios/availability/__init__.py
@@ -0,0 +1,25 @@
+##############################################################################
+# Copyright (c) 2016 Juan Qiu and others
+# juan_ qiu@tongji.edu.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+
+class ActionType:
+ ATTACKER = "attacker"
+ MONITOR = "monitor"
+ RESULTCHECKER = "resultchecker"
+ RESULTCOMPARER = "comparer"
+ OPERATION = "operation"
+
+
+class Condition:
+ EQUAL = "eq"
+ GREATERTHAN = "gt"
+ GREATERTHANEQUAL = "gt_eq"
+ LESSTHAN = "lt"
+ LESSTHANEQUAL = "lt_eq"
+ IN = "in"
diff --git a/yardstick/benchmark/scenarios/availability/operation/__init__.py b/yardstick/benchmark/scenarios/availability/operation/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/operation/__init__.py
diff --git a/yardstick/benchmark/scenarios/availability/operation/baseoperation.py b/yardstick/benchmark/scenarios/availability/operation/baseoperation.py
new file mode 100644
index 000000000..e776e87ae
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/operation/baseoperation.py
@@ -0,0 +1,81 @@
+##############################################################################
+# Copyright (c) 2016 Juan Qiu and others
+# juan_ qiu@tongji.edu.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import pkg_resources
+import yaml
+import logging
+import os
+
+import yardstick.common.utils as utils
+
+LOG = logging.getLogger(__name__)
+
+operation_conf_path = pkg_resources.resource_filename(
+ "yardstick.benchmark.scenarios.availability",
+ "operation_conf.yaml")
+
+
+class OperationMgr(object):
+
+ def __init__(self):
+ self._operation_list = []
+
+ def init_operations(self, operation_cfgs, context):
+ LOG.debug("operationMgr confg: %s" % operation_cfgs)
+ for cfg in operation_cfgs:
+ operation_type = cfg['operation_type']
+ operation_cls = BaseOperation.get_operation_cls(operation_type)
+ operation_ins = operation_cls(cfg, context)
+ operation_ins.key = cfg['key']
+ operation_ins.setup()
+ self._operation_list.append(operation_ins)
+
+ def __getitem__(self, item):
+ for obj in self._operation_list:
+ if(obj.key == item):
+ return obj
+ raise KeyError("No such operation instance of key - %s" % item)
+
+ def rollback(self):
+ for _instance in self._operation_list:
+ _instance.rollback()
+
+
+class BaseOperation(object):
+
+ operation_cfgs = {}
+
+ def __init__(self, config, context):
+ if not BaseOperation.operation_cfgs:
+ with open(operation_conf_path) as stream:
+ BaseOperation.operation_cfgs = yaml.load(stream)
+ self.key = ''
+ self._config = config
+ self._context = context
+
+ @staticmethod
+ def get_operation_cls(type):
+ '''return operation instance of specified type'''
+ operation_type = type
+ for operation_cls in utils.itersubclasses(BaseOperation):
+ if operation_type == operation_cls.__operation__type__:
+ return operation_cls
+ raise RuntimeError("No such runner_type %s" % operation_type)
+
+ def get_script_fullpath(self, path):
+ base_path = os.path.dirname(operation_conf_path)
+ return os.path.join(base_path, path)
+
+ def setup(self):
+ pass
+
+ def run(self):
+ pass
+
+ def rollback(self):
+ pass
diff --git a/yardstick/benchmark/scenarios/availability/operation/operation_general.py b/yardstick/benchmark/scenarios/availability/operation/operation_general.py
new file mode 100644
index 000000000..d41371629
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/operation/operation_general.py
@@ -0,0 +1,77 @@
+##############################################################################
+# Copyright (c) 2016 Juan Qiu and others
+# juan_ qiu@tongji.edu.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import logging
+from baseoperation import BaseOperation
+import yardstick.ssh as ssh
+from yardstick.benchmark.scenarios.availability.util import buildshellparams
+
+LOG = logging.getLogger(__name__)
+
+
+class GeneralOperaion(BaseOperation):
+
+ __operation__type__ = "general-operation"
+
+ def setup(self):
+ LOG.debug("config:%s context:%s" % (self._config, self._context))
+ host = self._context.get(self._config['host'], None)
+ ip = host.get("ip", None)
+ user = host.get("user", "root")
+ key_filename = host.get("key_filename", "~/.ssh/id_rsa")
+
+ self.connection = ssh.SSH(user, ip, key_filename=key_filename)
+ self.connection.wait(timeout=600)
+ LOG.debug("ssh host success!")
+
+ self.key = self._config['key']
+
+ if "action_parameter" in self._config:
+ actionParameter = self._config['action_parameter']
+ str = buildshellparams(actionParameter)
+ l = list(item for item in actionParameter.values())
+ self.action_param = str.format(*l)
+
+ if "rollback_parameter" in self._config:
+ rollbackParameter = self._config['rollback_parameter']
+ str = buildshellparams(rollbackParameter)
+ l = list(item for item in rollbackParameter.values())
+ self.rollback_param = str.format(*l)
+
+ self.operation_cfgs = BaseOperation.operation_cfgs.get(self.key)
+ self.action_script = self.get_script_fullpath(
+ self.operation_cfgs['action_script'])
+ self.rollback_script = self.get_script_fullpath(
+ self.operation_cfgs['rollback_script'])
+
+ def run(self):
+ if "action_parameter" in self._config:
+ exit_status, stdout, stderr = self.connection.execute(
+ self.action_param,
+ stdin=open(self.action_script, "r"))
+ else:
+ exit_status, stdout, stderr = self.connection.execute(
+ "/bin/sh -s ",
+ stdin=open(self.action_script, "r"))
+
+ if exit_status == 0:
+ LOG.debug("success,the operation's output is: {0}".format(stdout))
+ else:
+ LOG.error(
+ "the operation's error, stdout:%s, stderr:%s" %
+ (stdout, stderr))
+
+ def rollback(self):
+ if "rollback_parameter" in self._config:
+ exit_status, stdout, stderr = self.connection.execute(
+ self.rollback_param,
+ stdin=open(self.rollback_script, "r"))
+ else:
+ exit_status, stdout, stderr = self.connection.execute(
+ "/bin/sh -s ",
+ stdin=open(self.rollback_script, "r"))
diff --git a/yardstick/benchmark/scenarios/availability/operation_conf.yaml b/yardstick/benchmark/scenarios/availability/operation_conf.yaml
new file mode 100644
index 000000000..78c996d05
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/operation_conf.yaml
@@ -0,0 +1,16 @@
+---
+# sample config file for ha test
+#
+schema: "yardstick:task:0.1"
+
+nova-create-instance:
+ action_script: ha_tools/nova/create_instance_from_image.bash
+ rollback_script: ha_tools/nova/delete_instance.bash
+
+swift_upload_file:
+ action_script: ha_tools/swift/upload.bash
+ rollback_script: ha_tools/swift/delete.bash
+
+swift_download_file:
+ action_script: ha_tools/swift/download.bash
+ rollback_script: ha_tools/file/remove_file.bash \ No newline at end of file
diff --git a/yardstick/benchmark/scenarios/availability/result_checker/__init__.py b/yardstick/benchmark/scenarios/availability/result_checker/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/result_checker/__init__.py
diff --git a/yardstick/benchmark/scenarios/availability/result_checker/baseresultchecker.py b/yardstick/benchmark/scenarios/availability/result_checker/baseresultchecker.py
new file mode 100644
index 000000000..1bdb9f2c2
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/result_checker/baseresultchecker.py
@@ -0,0 +1,88 @@
+##############################################################################
+# Copyright (c) 2016 Juan Qiu and others
+# juan_ qiu@tongji.edu.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import pkg_resources
+import yaml
+import logging
+import os
+
+import yardstick.common.utils as utils
+
+LOG = logging.getLogger(__name__)
+
+resultchecker_conf_path = pkg_resources.resource_filename(
+ "yardstick.benchmark.scenarios.availability",
+ "result_checker_conf.yaml")
+
+
+class ResultCheckerMgr(object):
+
+ def __init__(self):
+ self._result_checker_list = []
+
+ def init_ResultChecker(self, resultchecker_cfgs, context):
+ LOG.debug("resultcheckerMgr confg: %s" % resultchecker_cfgs)
+
+ for cfg in resultchecker_cfgs:
+ resultchecker_type = cfg['checker_type']
+ resultchecker_cls = BaseResultChecker.get_resultchecker_cls(
+ resultchecker_type)
+ resultchecker_ins = resultchecker_cls(cfg, context)
+ resultchecker_ins.key = cfg['key']
+ resultchecker_ins.setup()
+ self._result_checker_list.append(resultchecker_ins)
+
+ def __getitem__(self, item):
+ for obj in self._result_checker_list:
+ if(obj.key == item):
+ return obj
+ raise KeyError("No such result checker instance of key - %s" % item)
+
+ def verify(self):
+ result = True
+ for obj in self._result_checker_list:
+ result &= obj.success
+ return result
+
+
+class BaseResultChecker(object):
+
+ resultchecker_cfgs = {}
+
+ def __init__(self, config, context):
+ if not BaseResultChecker.resultchecker_cfgs:
+ with open(resultchecker_conf_path) as stream:
+ BaseResultChecker.resultchecker_cfgs = yaml.load(stream)
+ self.actualResult = object()
+ self.expectedResult = object()
+ self.success = False
+
+ self._config = config
+ self._context = context
+ self.setup_done = False
+
+ @staticmethod
+ def get_resultchecker_cls(type):
+ '''return resultchecker instance of specified type'''
+ resultchecker_type = type
+ for checker_cls in utils.itersubclasses(BaseResultChecker):
+ if resultchecker_type == checker_cls.__result_checker__type__:
+ return checker_cls
+ raise RuntimeError("No such runner_type %s" % resultchecker_type)
+
+ def get_script_fullpath(self, path):
+ base_path = os.path.dirname(resultchecker_conf_path)
+ return os.path.join(base_path, path)
+
+ def setup(self):
+ pass
+
+ def verify(self):
+ if(self.actualResult == self.expectedResult):
+ self.success = True
+ return self.success
diff --git a/yardstick/benchmark/scenarios/availability/result_checker/result_checker_general.py b/yardstick/benchmark/scenarios/availability/result_checker/result_checker_general.py
new file mode 100644
index 000000000..70bf9aea6
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/result_checker/result_checker_general.py
@@ -0,0 +1,107 @@
+##############################################################################
+# Copyright (c) 2016 Juan Qiu and others
+# juan_ qiu@tongji.edu.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import logging
+
+from baseresultchecker import BaseResultChecker
+from yardstick.benchmark.scenarios.availability import Condition
+import yardstick.ssh as ssh
+from yardstick.benchmark.scenarios.availability.util import buildshellparams
+
+LOG = logging.getLogger(__name__)
+
+
+class GeneralResultChecker(BaseResultChecker):
+
+ __result_checker__type__ = "general-result-checker"
+
+ def setup(self):
+ LOG.debug("config:%s context:%s" % (self._config, self._context))
+ host = self._context.get(self._config['host'], None)
+ ip = host.get("ip", None)
+ user = host.get("user", "root")
+ key_filename = host.get("key_filename", "~/.ssh/id_rsa")
+
+ self.connection = ssh.SSH(user, ip, key_filename=key_filename)
+ self.connection.wait(timeout=600)
+ LOG.debug("ssh host success!")
+
+ self.key = self._config['key']
+ self.type = self._config['checker_type']
+ self.condition = self._config['condition']
+ self.expectedResult = self._config['expectedValue']
+ self.actualResult = object()
+
+ self.key = self._config['key']
+ if "parameter" in self._config:
+ parameter = self._config['parameter']
+ str = buildshellparams(parameter)
+ l = list(item for item in parameter.values())
+ self.shell_cmd = str.format(*l)
+
+ self.resultchecker_cfgs = BaseResultChecker.resultchecker_cfgs.get(
+ self.key)
+ self.verify_script = self.get_script_fullpath(
+ self.resultchecker_cfgs['verify_script'])
+
+ def verify(self):
+ if "parameter" in self._config:
+ exit_status, stdout, stderr = self.connection.execute(
+ self.shell_cmd,
+ stdin=open(self.verify_script, "r"))
+ LOG.debug("action script of the operation is: {0}"
+ .format(self.verify_script))
+ LOG.debug("action parameter the of operation is: {0}"
+ .format(self.shell_cmd))
+ else:
+ exit_status, stdout, stderr = self.connection.execute(
+ "/bin/bash -s ",
+ stdin=open(self.verify_script, "r"))
+ LOG.debug("action script of the operation is: {0}"
+ .format(self.verify_script))
+
+ LOG.debug("exit_status ,stdout : {0} ,{1}".format(exit_status, stdout))
+ if exit_status == 0 and stdout:
+ self.actualResult = stdout
+ LOG.debug("verifying resultchecker: {0}".format(self.key))
+ LOG.debug("verifying resultchecker,expected: {0}"
+ .format(self.expectedResult))
+ LOG.debug("verifying resultchecker,actual: {0}"
+ .format(self.actualResult))
+ LOG.debug("verifying resultchecker,condition: {0}"
+ .format(self.condition))
+ if (type(self.expectedResult) is int):
+ self.actualResult = int(self.actualResult)
+ if self.condition == Condition.EQUAL:
+ self.success = self.actualResult == self.expectedResult
+ elif self.condition == Condition.GREATERTHAN:
+ self.success = self.actualResult > self.expectedResult
+ elif self.condition == Condition.GREATERTHANEQUAL:
+ self.success = self.actualResult >= self.expectedResult
+ elif self.condition == Condition.LESSTHANEQUAL:
+ self.success = self.actualResult <= self.expectedResult
+ elif self.condition == Condition.LESSTHAN:
+ self.success = self.actualResult < self.expectedResult
+ elif self.condition == Condition.IN:
+ self.success = self.expectedResult in self.actualResult
+ else:
+ self.success = False
+ LOG.debug(
+ "error happened when resultchecker: {0} Invalid condition"
+ .format(self.key))
+ else:
+ self.success = False
+ LOG.debug(
+ "error happened when resultchecker: {0} verifying the result"
+ .format(self.key))
+ LOG.error(stderr)
+
+ LOG.debug(
+ "verifying resultchecker: {0},the result is : {1}"
+ .format(self.key, self.success))
+ return self.success
diff --git a/yardstick/benchmark/scenarios/availability/result_checker_conf.yaml b/yardstick/benchmark/scenarios/availability/result_checker_conf.yaml
new file mode 100644
index 000000000..638c39a6e
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/result_checker_conf.yaml
@@ -0,0 +1,11 @@
+---
+# sample config file for ha test
+#
+schema: "yardstick:task:0.1"
+
+process-checker:
+ verify_script: ha_tools/check_process_python.bash
+service-checker:
+ verify_script: ha_tools/check_service.bash
+nova-instance-checker:
+ verify_script: ha_tools/nova/show_instances.bash \ No newline at end of file
diff --git a/yardstick/benchmark/scenarios/networking/ping.py b/yardstick/benchmark/scenarios/networking/ping.py
index 3af354850..08755a08b 100644
--- a/yardstick/benchmark/scenarios/networking/ping.py
+++ b/yardstick/benchmark/scenarios/networking/ping.py
@@ -41,11 +41,17 @@ class Ping(base.Scenario):
user = host.get('user', 'ubuntu')
ip = host.get('ip', None)
key_filename = host.get('key_filename', '/root/.ssh/id_rsa')
- password = host.get('password', 'root')
+ password = host.get('password', None)
+
+ if password is not None:
+ LOG.info("Log in via pw, user:%s, host:%s, pw:%s",
+ user, ip, password)
+ self.connection = ssh.SSH(user, ip, password=password)
+ else:
+ LOG.info("Log in via key, user:%s, host:%s, key_filename:%s",
+ user, ip, key_filename)
+ self.connection = ssh.SSH(user, ip, key_filename=key_filename)
- LOG.info("user:%s, host:%s, key_filename:%s", user, ip, key_filename)
- self.connection = ssh.SSH(user, ip, key_filename=key_filename,
- password=password)
self.connection.wait()
def run(self, result):
diff --git a/yardstick/benchmark/scenarios/networking/vsperf.py b/yardstick/benchmark/scenarios/networking/vsperf.py
new file mode 100644
index 000000000..d3123083a
--- /dev/null
+++ b/yardstick/benchmark/scenarios/networking/vsperf.py
@@ -0,0 +1,229 @@
+# Copyright 2016 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Vsperf specific scenario definition """
+
+import logging
+import os
+import subprocess
+import csv
+
+import yardstick.ssh as ssh
+from yardstick.benchmark.scenarios import base
+
+LOG = logging.getLogger(__name__)
+
+
+class Vsperf(base.Scenario):
+ """Execute vsperf with defined parameters
+
+ Parameters:
+ traffic_type - to specify the type of traffic executed by traffic generator
+ the valid values are "rfc2544", "continuous", "back2back"
+ type: string
+ default: "rfc2544"
+ pkt_sizes - a packet size for which test should be executed;
+ Multiple packet sizes can be tested by modification of Sequence runner
+ section inside TC YAML definition.
+ type: string
+ default: "64"
+ duration - sets duration for which traffic will be generated
+ type: int
+ default: 30
+ bidirectional - speficies if traffic will be uni (False) or bi-directional
+ (True)
+ type: string
+ default: False
+ iload - specifies frame rate
+ type: string
+ default: 100
+ rfc2544_trials - the number of trials performed for each packet size
+ type: string
+ default: NA
+ multistream - the number of simulated streams
+ type: string
+ default: 0 (disabled)
+ stream_type - specifies network layer used for multistream simulation
+ the valid values are "L4", "L3" and "L2"
+ type: string
+ default: "L4"
+ conf-file - path to the vsperf configuration file, which will be uploaded
+ to the VM
+ type: string
+ default: NA
+ setup-script - path to the setup script, which will be executed during
+ setup and teardown phases
+ type: string
+ default: NA
+ trafficgen_port1 - specifies device name of 1st interface connected to
+ the trafficgen
+ type: string
+ default: NA
+ trafficgen_port2 - specifies device name of 2nd interface connected to
+ the trafficgen
+ type: string
+ default: NA
+ external_bridge - specifies name of external bridge configured in OVS
+ type: string
+ default: "br-ex"
+
+ """
+ __scenario_type__ = "Vsperf"
+
+ VSPERF_CONF = '~/vsperf-yardstick.conf'
+
+ def __init__(self, scenario_cfg, context_cfg):
+ self.scenario_cfg = scenario_cfg
+ self.context_cfg = context_cfg
+ self.setup_done = False
+ self.client = None
+ self.tg_port1 = self.scenario_cfg['options'].get('trafficgen_port1',
+ None)
+ self.tg_port2 = self.scenario_cfg['options'].get('trafficgen_port2',
+ None)
+ self.br_ex = self.scenario_cfg['options'].get('external_bridge',
+ 'br-ex')
+ self.vsperf_conf = os.path.expanduser(
+ self.scenario_cfg['options'].get('conf-file', Vsperf.VSPERF_CONF))
+ self.setup_script = self.scenario_cfg['options'].get('setup-script',
+ None)
+ if self.setup_script:
+ self.setup_script = os.path.expanduser(self.setup_script)
+
+ def setup(self):
+ '''scenario setup'''
+ vsperf = self.context_cfg['host']
+ vsperf_user = vsperf.get('user', 'ubuntu')
+ vsperf_password = vsperf.get('password', 'ubuntu')
+ vsperf_ip = vsperf.get('ip', None)
+
+ # add trafficgen interfaces to the external bridge
+ if self.tg_port1:
+ subprocess.call('sudo bash -c "ovs-vsctl add-port %s %s"' %
+ (self.br_ex, self.tg_port1), shell=True)
+ if self.tg_port2:
+ subprocess.call('sudo bash -c "ovs-vsctl add-port %s %s"' %
+ (self.br_ex, self.tg_port2), shell=True)
+
+ # copy vsperf conf to VM
+ LOG.info("user:%s, host:%s", vsperf_user, vsperf_ip)
+ self.client = ssh.SSH(vsperf_user, vsperf_ip,
+ password=vsperf_password)
+ # traffic generation could last long
+ self.client.wait(timeout=1800)
+
+ # copy script to host
+ self.client.run("cat > ~/vsperf.conf",
+ stdin=open(self.vsperf_conf, "rb"))
+
+ # execute external setup script
+ if self.setup_script:
+ cmd = "%s setup" % (self.setup_script)
+ LOG.info("Execute setup script \"%s\"", cmd)
+ subprocess.call(cmd, shell=True)
+
+ self.setup_done = True
+
+ def run(self, result):
+ """ execute the vsperf benchmark and return test results
+ within result dictionary
+ """
+ def add_test_params(options, option, default_value):
+ """return parameter and its value as a string to be passed
+ to the VSPERF inside --test-params argument
+
+ Parameters:
+ options - dictionary with scenario options
+ option - a name of option to be added to the string
+ default_value - value to be used in case that option
+ is not defined inside scenario options
+ """
+ if option in options:
+ return "%s=%s" % (option, options[option])
+ elif default_value is not None:
+ return "%s=%s" % (option, default_value)
+ else:
+ return None
+
+ if not self.setup_done:
+ self.setup()
+
+ # remove results from previous tests
+ self.client.execute("rm -rf /tmp/results*")
+
+ # get vsperf options
+ options = self.scenario_cfg['options']
+ test_params = []
+ test_params.append(add_test_params(options, "traffic_type", "rfc2544"))
+ test_params.append(add_test_params(options, "pkt_sizes", "64"))
+ test_params.append(add_test_params(options, "duration", None))
+ test_params.append(add_test_params(options, "bidirectional", "False"))
+ test_params.append(add_test_params(options, "iload", 100))
+ test_params.append(add_test_params(options, "rfc2544_trials", None))
+ test_params.append(add_test_params(options, "multistream", None))
+ test_params.append(add_test_params(options, "stream_type", None))
+
+ # execute vsperf
+ cmd = "source ~/vsperfenv/bin/activate ; cd vswitchperf ; "
+ cmd += "./vsperf --mode trafficgen --conf-file ~/vsperf.conf "
+ cmd += "--test-params=\"%s\"" % (';'.join(filter(None, test_params)))
+ LOG.debug("Executing command: %s", cmd)
+ status, stdout, stderr = self.client.execute(cmd)
+
+ if status:
+ raise RuntimeError(stderr)
+
+ # get test results
+ cmd = "cat /tmp/results*/result.csv"
+ LOG.debug("Executing command: %s", cmd)
+ status, stdout, stderr = self.client.execute(cmd)
+
+ if status:
+ raise RuntimeError(stderr)
+
+ # convert result.csv to JSON format
+ reader = csv.DictReader(stdout.split('\r\n'))
+ result.update(reader.next())
+
+ # sla check; go through all defined SLAs and check if values measured
+ # by VSPERF are higher then those defined by SLAs
+ if 'sla' in self.scenario_cfg and \
+ 'metrics' in self.scenario_cfg['sla']:
+ for metric in self.scenario_cfg['sla']['metrics'].split(','):
+ assert metric in result, \
+ '%s is not collected by VSPERF' % (metric)
+ assert metric in self.scenario_cfg['sla'], \
+ '%s is not defined in SLA' % (metric)
+ vs_res = float(result[metric])
+ sla_res = float(self.scenario_cfg['sla'][metric])
+ assert vs_res >= sla_res, \
+ 'VSPERF_%s(%f) < SLA_%s(%f)' % \
+ (metric, vs_res, metric, sla_res)
+
+ def teardown(self):
+ """cleanup after the test execution"""
+ # remove trafficgen interfaces from the external bridge
+ if self.tg_port1:
+ subprocess.call('sudo bash -c "ovs-vsctl del-port %s %s"' %
+ (self.br_ex, self.tg_port1), shell=True)
+ if self.tg_port2:
+ subprocess.call('sudo bash -c "ovs-vsctl del-port %s %s"' %
+ (self.br_ex, self.tg_port2), shell=True)
+
+ # execute external setup script
+ if self.setup_script:
+ cmd = "%s teardown" % (self.setup_script)
+ LOG.info("Execute setup script \"%s\"", cmd)
+ subprocess.call(cmd, shell=True)
+
+ self.setup_done = False