summaryrefslogtreecommitdiffstats
path: root/yardstick/benchmark
diff options
context:
space:
mode:
Diffstat (limited to 'yardstick/benchmark')
-rw-r--r--yardstick/benchmark/core/task.py19
-rwxr-xr-xyardstick/benchmark/runners/base.py24
-rw-r--r--yardstick/benchmark/scenarios/networking/sfc_openstack.py16
-rw-r--r--yardstick/benchmark/scenarios/networking/vnf_generic.py274
4 files changed, 90 insertions, 243 deletions
diff --git a/yardstick/benchmark/core/task.py b/yardstick/benchmark/core/task.py
index 2c3edfe13..a4514f5f1 100644
--- a/yardstick/benchmark/core/task.py
+++ b/yardstick/benchmark/core/task.py
@@ -118,7 +118,7 @@ class Task(object): # pragma: no cover
case_name = os.path.splitext(os.path.basename(task_files[i]))[0]
try:
- data = self._run(scenarios, run_in_parallel, args.output_file)
+ data = self._run(scenarios, run_in_parallel, output_config)
except KeyboardInterrupt:
raise
except Exception: # pylint: disable=broad-except
@@ -230,11 +230,12 @@ class Task(object): # pragma: no cover
def _do_output(self, output_config, result):
dispatchers = DispatcherBase.get(output_config)
+ dispatchers = (d for d in dispatchers if d.__dispatcher_type__ != 'Influxdb')
for dispatcher in dispatchers:
dispatcher.flush_result_data(result)
- def _run(self, scenarios, run_in_parallel, output_file):
+ def _run(self, scenarios, run_in_parallel, output_config):
"""Deploys context and calls runners"""
for context in self.contexts:
context.deploy()
@@ -245,14 +246,14 @@ class Task(object): # pragma: no cover
# Start all background scenarios
for scenario in filter(_is_background_scenario, scenarios):
scenario["runner"] = dict(type="Duration", duration=1000000000)
- runner = self.run_one_scenario(scenario, output_file)
+ runner = self.run_one_scenario(scenario, output_config)
background_runners.append(runner)
runners = []
if run_in_parallel:
for scenario in scenarios:
if not _is_background_scenario(scenario):
- runner = self.run_one_scenario(scenario, output_file)
+ runner = self.run_one_scenario(scenario, output_config)
runners.append(runner)
# Wait for runners to finish
@@ -261,12 +262,12 @@ class Task(object): # pragma: no cover
if status != 0:
raise RuntimeError(
"{0} runner status {1}".format(runner.__execution_type__, status))
- LOG.info("Runner ended, output in %s", output_file)
+ LOG.info("Runner ended")
else:
# run serially
for scenario in scenarios:
if not _is_background_scenario(scenario):
- runner = self.run_one_scenario(scenario, output_file)
+ runner = self.run_one_scenario(scenario, output_config)
status = runner_join(runner, background_runners, self.outputs, result)
if status != 0:
LOG.error('Scenario NO.%s: "%s" ERROR!',
@@ -274,7 +275,7 @@ class Task(object): # pragma: no cover
scenario.get('type'))
raise RuntimeError(
"{0} runner status {1}".format(runner.__execution_type__, status))
- LOG.info("Runner ended, output in %s", output_file)
+ LOG.info("Runner ended")
# Abort background runners
for runner in background_runners:
@@ -311,10 +312,10 @@ class Task(object): # pragma: no cover
else:
return op
- def run_one_scenario(self, scenario_cfg, output_file):
+ def run_one_scenario(self, scenario_cfg, output_config):
"""run one scenario using context"""
runner_cfg = scenario_cfg["runner"]
- runner_cfg['output_filename'] = output_file
+ runner_cfg['output_config'] = output_config
options = scenario_cfg.get('options', {})
scenario_cfg['options'] = self._parse_options(options)
diff --git a/yardstick/benchmark/runners/base.py b/yardstick/benchmark/runners/base.py
index a887fa5b3..99386a440 100755
--- a/yardstick/benchmark/runners/base.py
+++ b/yardstick/benchmark/runners/base.py
@@ -23,6 +23,7 @@ import multiprocessing
import subprocess
import time
import traceback
+from subprocess import CalledProcessError
import importlib
@@ -30,6 +31,7 @@ from six.moves.queue import Empty
import yardstick.common.utils as utils
from yardstick.benchmark.scenarios import base as base_scenario
+from yardstick.dispatcher.base import Base as DispatcherBase
log = logging.getLogger(__name__)
@@ -39,7 +41,7 @@ def _execute_shell_command(command):
exitcode = 0
try:
output = subprocess.check_output(command, shell=True)
- except Exception:
+ except CalledProcessError:
exitcode = -1
output = traceback.format_exc()
log.error("exec command '%s' error:\n ", command)
@@ -137,6 +139,8 @@ class Runner(object):
Runner.release(runner)
def __init__(self, config):
+ self.task_id = None
+ self.case_name = None
self.config = config
self.periodic_action_process = None
self.output_queue = multiprocessing.Queue()
@@ -170,6 +174,8 @@ class Runner(object):
cls = getattr(module, path_split[-1])
self.config['object'] = class_name
+ self.case_name = scenario_cfg['tc']
+ self.task_id = scenario_cfg['task_id']
self.aborted.clear()
# run a potentially configured pre-start action
@@ -245,10 +251,24 @@ class Runner(object):
def get_result(self):
result = []
+
+ dispatcher = self.config['output_config']['DEFAULT']['dispatcher']
+ output_in_influxdb = 'influxdb' in dispatcher
+
while not self.result_queue.empty():
log.debug("result_queue size %s", self.result_queue.qsize())
try:
- result.append(self.result_queue.get(True, 1))
+ one_record = self.result_queue.get(True, 1)
except Empty:
pass
+ else:
+ if output_in_influxdb:
+ self._output_to_influxdb(one_record)
+
+ result.append(one_record)
return result
+
+ def _output_to_influxdb(self, record):
+ dispatchers = DispatcherBase.get(self.config['output_config'])
+ dispatcher = next((d for d in dispatchers if d.__dispatcher_type__ == 'Influxdb'))
+ dispatcher.upload_one_record(record, self.case_name, '', task_id=self.task_id)
diff --git a/yardstick/benchmark/scenarios/networking/sfc_openstack.py b/yardstick/benchmark/scenarios/networking/sfc_openstack.py
index d5feabbbe..aaab2131a 100644
--- a/yardstick/benchmark/scenarios/networking/sfc_openstack.py
+++ b/yardstick/benchmark/scenarios/networking/sfc_openstack.py
@@ -34,11 +34,13 @@ def get_credentials(service): # pragma: no cover
# The most common way to pass these info to the script is to do it through
# environment variables.
+ # NOTE(ralonsoh): OS_TENANT_NAME is deprecated.
+ project_name = os.environ.get('OS_PROJECT_NAME', 'admin')
creds.update({
"username": os.environ.get('OS_USERNAME', "admin"),
password: os.environ.get("OS_PASSWORD", 'admin'),
"auth_url": os.environ.get("OS_AUTH_URL"),
- tenant: os.environ.get("OS_TENANT_NAME", "admin"),
+ tenant: os.environ.get("OS_TENANT_NAME", project_name),
})
cacert = os.environ.get("OS_CACERT")
if cacert is not None:
@@ -59,7 +61,7 @@ def get_instances(nova_client): # pragma: no cover
try:
instances = nova_client.servers.list(search_opts={'all_tenants': 1})
return instances
- except Exception as e:
+ except Exception as e: # pylint: disable=broad-except
print("Error [get_instances(nova_client)]:", e)
return None
@@ -72,7 +74,7 @@ def get_SFs(nova_client): # pragma: no cover
if "sfc_test" not in instance.name:
SFs.append(instance)
return SFs
- except Exception as e:
+ except Exception as e: # pylint: disable=broad-except
print("Error [get_SFs(nova_client)]:", e)
return None
@@ -93,7 +95,7 @@ def create_floating_ips(neutron_client): # pragma: no cover
ip_json = neutron_client.create_floatingip({'floatingip': props})
fip_addr = ip_json['floatingip']['floating_ip_address']
ips.append(fip_addr)
- except Exception as e:
+ except Exception as e: # pylint: disable=broad-except
print("Error [create_floating_ip(neutron_client)]:", e)
return None
return ips
@@ -106,7 +108,7 @@ def floatIPtoSFs(SFs, floatips): # pragma: no cover
SF.add_floating_ip(floatips[i])
i = i + 1
return True
- except Exception as e:
+ except Exception as e: # pylint: disable=broad-except
print(("Error [add_floating_ip(nova_client, '%s', '%s')]:" %
(SF, floatips[i]), e))
return False
@@ -122,7 +124,3 @@ def get_an_IP(): # pragma: no cover
floatips = create_floating_ips(neutron_client)
floatIPtoSFs(SFs, floatips)
return floatips
-
-
-if __name__ == '__main__': # pragma: no cover
- get_an_IP()
diff --git a/yardstick/benchmark/scenarios/networking/vnf_generic.py b/yardstick/benchmark/scenarios/networking/vnf_generic.py
index b94bfc9ab..0e4785294 100644
--- a/yardstick/benchmark/scenarios/networking/vnf_generic.py
+++ b/yardstick/benchmark/scenarios/networking/vnf_generic.py
@@ -11,115 +11,38 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-""" NSPerf specific scenario definition """
-
-from __future__ import absolute_import
+import copy
import logging
-import errno
import ipaddress
-
-import copy
+from itertools import chain
import os
import sys
-import re
-from itertools import chain
import six
import yaml
-from collections import defaultdict
-from yardstick.benchmark.scenarios import base
+from yardstick.benchmark.scenarios import base as scenario_base
+from yardstick.error import IncorrectConfig
from yardstick.common.constants import LOG_DIR
from yardstick.common.process import terminate_children
-from yardstick.common.utils import import_modules_from_package, itersubclasses
-from yardstick.common.yaml_loader import yaml_load
+from yardstick.common import utils
from yardstick.network_services.collector.subscriber import Collector
from yardstick.network_services.vnf_generic import vnfdgen
from yardstick.network_services.vnf_generic.vnf.base import GenericVNF
-from yardstick.network_services.traffic_profile.base import TrafficProfile
+from yardstick.network_services import traffic_profile
+from yardstick.network_services.traffic_profile import base as tprofile_base
from yardstick.network_services.utils import get_nsb_option
from yardstick import ssh
-
-LOG = logging.getLogger(__name__)
-
-
-class SSHError(Exception):
- """Class handles ssh connection error exception"""
- pass
-
-
-class SSHTimeout(SSHError):
- """Class handles ssh connection timeout exception"""
- pass
-
-
-class IncorrectConfig(Exception):
- """Class handles incorrect configuration during setup"""
- pass
-
-
-class IncorrectSetup(Exception):
- """Class handles incorrect setup during setup"""
- pass
+traffic_profile.register_modules()
-class SshManager(object):
- def __init__(self, node, timeout=120):
- super(SshManager, self).__init__()
- self.node = node
- self.conn = None
- self.timeout = timeout
-
- def __enter__(self):
- """
- args -> network device mappings
- returns -> ssh connection ready to be used
- """
- try:
- self.conn = ssh.SSH.from_node(self.node)
- self.conn.wait(timeout=self.timeout)
- except SSHError as error:
- LOG.info("connect failed to %s, due to %s", self.node["ip"], error)
- # self.conn defaults to None
- return self.conn
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- if self.conn:
- self.conn.close()
-
-
-def find_relative_file(path, task_path):
- """
- Find file in one of places: in abs of path or
- relative to TC scenario file. In this order.
-
- :param path:
- :param task_path:
- :return str: full path to file
- """
- # fixme: create schema to validate all fields have been provided
- for lookup in [os.path.abspath(path), os.path.join(task_path, path)]:
- try:
- with open(lookup):
- return lookup
- except IOError:
- pass
- raise IOError(errno.ENOENT, 'Unable to find {} file'.format(path))
-
-
-def open_relative_file(path, task_path):
- try:
- return open(path)
- except IOError as e:
- if e.errno == errno.ENOENT:
- return open(os.path.join(task_path, path))
- raise
+LOG = logging.getLogger(__name__)
-class NetworkServiceTestCase(base.Scenario):
+class NetworkServiceTestCase(scenario_base.Scenario):
"""Class handles Generic framework to do pre-deployment VNF &
Network service testing """
@@ -130,16 +53,12 @@ class NetworkServiceTestCase(base.Scenario):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
- # fixme: create schema to validate all fields have been provided
- with open_relative_file(scenario_cfg["topology"],
- scenario_cfg['task_path']) as stream:
- topology_yaml = yaml_load(stream)
-
- self.topology = topology_yaml["nsd:nsd-catalog"]["nsd"][0]
+ self._render_topology()
self.vnfs = []
self.collector = None
self.traffic_profile = None
self.node_netdevs = {}
+ self.bin_path = get_nsb_option('bin_path', '')
def _get_ip_flow_range(self, ip_start_range):
@@ -211,37 +130,47 @@ class NetworkServiceTestCase(base.Scenario):
def _get_traffic_profile(self):
profile = self.scenario_cfg["traffic_profile"]
path = self.scenario_cfg["task_path"]
- with open_relative_file(profile, path) as infile:
+ with utils.open_relative_file(profile, path) as infile:
+ return infile.read()
+
+ def _get_topology(self):
+ topology = self.scenario_cfg["topology"]
+ path = self.scenario_cfg["task_path"]
+ with utils.open_relative_file(topology, path) as infile:
return infile.read()
def _fill_traffic_profile(self):
- traffic_mapping = self._get_traffic_profile()
- traffic_map_data = {
+ tprofile = self._get_traffic_profile()
+ extra_args = self.scenario_cfg.get('extra_args', {})
+ tprofile_data = {
'flow': self._get_traffic_flow(),
'imix': self._get_traffic_imix(),
- TrafficProfile.UPLINK: {},
- TrafficProfile.DOWNLINK: {},
+ tprofile_base.TrafficProfile.UPLINK: {},
+ tprofile_base.TrafficProfile.DOWNLINK: {},
+ 'extra_args': extra_args
}
- traffic_vnfd = vnfdgen.generate_vnfd(traffic_mapping, traffic_map_data)
- self.traffic_profile = TrafficProfile.get(traffic_vnfd)
- return self.traffic_profile
+ traffic_vnfd = vnfdgen.generate_vnfd(tprofile, tprofile_data)
+ self.traffic_profile = tprofile_base.TrafficProfile.get(traffic_vnfd)
+
+ def _render_topology(self):
+ topology = self._get_topology()
+ topology_args = self.scenario_cfg.get('extra_args', {})
+ topolgy_data = {
+ 'extra_args': topology_args
+ }
+ topology_yaml = vnfdgen.generate_vnfd(topology, topolgy_data)
+ self.topology = topology_yaml["nsd:nsd-catalog"]["nsd"][0]
def _find_vnf_name_from_id(self, vnf_id):
return next((vnfd["vnfd-id-ref"]
for vnfd in self.topology["constituent-vnfd"]
if vnf_id == vnfd["member-vnf-index"]), None)
- @staticmethod
- def get_vld_networks(networks):
- # network name is vld_id
- vld_map = {}
- for name, n in networks.items():
- try:
- vld_map[n['vld_id']] = n
- except KeyError:
- vld_map[name] = n
- return vld_map
+ def _find_vnfd_from_vnf_idx(self, vnf_id):
+ return next((vnfd
+ for vnfd in self.topology["constituent-vnfd"]
+ if vnf_id == vnfd["member-vnf-index"]), None)
@staticmethod
def find_node_if(nodes, name, if_name, vld_id):
@@ -293,7 +222,9 @@ class NetworkServiceTestCase(base.Scenario):
node1_if["peer_ifname"] = node0_if_name
# just load the network
- vld_networks = self.get_vld_networks(self.context_cfg["networks"])
+ vld_networks = {n.get('vld_id', name): n for name, n in
+ self.context_cfg["networks"].items()}
+
node0_if["network"] = vld_networks.get(vld["id"], {})
node1_if["network"] = vld_networks.get(vld["id"], {})
@@ -332,10 +263,6 @@ class NetworkServiceTestCase(base.Scenario):
node0_if["peer_intf"] = node1_copy
node1_if["peer_intf"] = node0_copy
- def _find_vnfd_from_vnf_idx(self, vnf_idx):
- return next((vnfd for vnfd in self.topology["constituent-vnfd"]
- if vnf_idx == vnfd["member-vnf-index"]), None)
-
def _update_context_with_topology(self):
for vnfd in self.topology["constituent-vnfd"]:
vnf_idx = vnfd["member-vnf-index"]
@@ -343,43 +270,6 @@ class NetworkServiceTestCase(base.Scenario):
vnfd = self._find_vnfd_from_vnf_idx(vnf_idx)
self.context_cfg["nodes"][vnf_name].update(vnfd)
- def _probe_netdevs(self, node, node_dict, timeout=120):
- try:
- return self.node_netdevs[node]
- except KeyError:
- pass
-
- netdevs = {}
- cmd = "PATH=$PATH:/sbin:/usr/sbin ip addr show"
-
- with SshManager(node_dict, timeout=timeout) as conn:
- if conn:
- exit_status = conn.execute(cmd)[0]
- if exit_status != 0:
- raise IncorrectSetup("Node's %s lacks ip tool." % node)
- exit_status, stdout, _ = conn.execute(
- self.FIND_NETDEVICE_STRING)
- if exit_status != 0:
- raise IncorrectSetup(
- "Cannot find netdev info in sysfs" % node)
- netdevs = node_dict['netdevs'] = self.parse_netdev_info(stdout)
-
- self.node_netdevs[node] = netdevs
- return netdevs
-
- @classmethod
- def _probe_missing_values(cls, netdevs, network):
-
- mac_lower = network['local_mac'].lower()
- for netdev in netdevs.values():
- if netdev['address'].lower() != mac_lower:
- continue
- network.update({
- 'driver': netdev['driver'],
- 'vpci': netdev['pci_bus_id'],
- 'ifindex': netdev['ifindex'],
- })
-
def _generate_pod_yaml(self):
context_yaml = os.path.join(LOG_DIR, "pod-{}.yaml".format(self.scenario_cfg['task_id']))
# convert OrderedDict to a list
@@ -405,82 +295,16 @@ class NetworkServiceTestCase(base.Scenario):
pass
return new_node
- TOPOLOGY_REQUIRED_KEYS = frozenset({
- "vpci", "local_ip", "netmask", "local_mac", "driver"})
-
def map_topology_to_infrastructure(self):
""" This method should verify if the available resources defined in pod.yaml
match the topology.yaml file.
:return: None. Side effect: context_cfg is updated
"""
- num_nodes = len(self.context_cfg["nodes"])
- # OpenStack instance creation time is probably proportional to the number
- # of instances
- timeout = 120 * num_nodes
- for node, node_dict in self.context_cfg["nodes"].items():
-
- for network in node_dict["interfaces"].values():
- missing = self.TOPOLOGY_REQUIRED_KEYS.difference(network)
- if not missing:
- continue
-
- # only ssh probe if there are missing values
- # ssh probe won't work on Ixia, so we had better define all our values
- try:
- netdevs = self._probe_netdevs(node, node_dict, timeout=timeout)
- except (SSHError, SSHTimeout):
- raise IncorrectConfig(
- "Unable to probe missing interface fields '%s', on node %s "
- "SSH Error" % (', '.join(missing), node))
- try:
- self._probe_missing_values(netdevs, network)
- except KeyError:
- pass
- else:
- missing = self.TOPOLOGY_REQUIRED_KEYS.difference(
- network)
- if missing:
- raise IncorrectConfig(
- "Require interface fields '%s' not found, topology file "
- "corrupted" % ', '.join(missing))
-
- # we have to generate pod.yaml here so we have vpci and driver
- self._generate_pod_yaml()
# 3. Use topology file to find connections & resolve dest address
self._resolve_topology()
self._update_context_with_topology()
- FIND_NETDEVICE_STRING = r"""find /sys/devices/pci* -type d -name net -exec sh -c '{ grep -sH ^ \
-$1/ifindex $1/address $1/operstate $1/device/vendor $1/device/device \
-$1/device/subsystem_vendor $1/device/subsystem_device ; \
-printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
-' sh \{\}/* \;
-"""
- BASE_ADAPTER_RE = re.compile(
- '^/sys/devices/(.*)/net/([^/]*)/([^:]*):(.*)$', re.M)
-
- @classmethod
- def parse_netdev_info(cls, stdout):
- network_devices = defaultdict(dict)
- matches = cls.BASE_ADAPTER_RE.findall(stdout)
- for bus_path, interface_name, name, value in matches:
- dirname, bus_id = os.path.split(bus_path)
- if 'virtio' in bus_id:
- # for some stupid reason VMs include virtio1/
- # in PCI device path
- bus_id = os.path.basename(dirname)
- # remove extra 'device/' from 'device/vendor,
- # device/subsystem_vendor', etc.
- if 'device/' in name:
- name = name.split('/')[1]
- network_devices[interface_name][name] = value
- network_devices[interface_name][
- 'interface_name'] = interface_name
- network_devices[interface_name]['pci_bus_id'] = bus_id
- # convert back to regular dict
- return dict(network_devices)
-
@classmethod
def get_vnf_impl(cls, vnf_model_id):
""" Find the implementing class from vnf_model["vnf"]["name"] field
@@ -488,13 +312,14 @@ printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
:param vnf_model_id: parsed vnfd model ID field
:return: subclass of GenericVNF
"""
- import_modules_from_package(
+ utils.import_modules_from_package(
"yardstick.network_services.vnf_generic.vnf")
expected_name = vnf_model_id
classes_found = []
def impl():
- for name, class_ in ((c.__name__, c) for c in itersubclasses(GenericVNF)):
+ for name, class_ in ((c.__name__, c) for c in
+ utils.itersubclasses(GenericVNF)):
if name == expected_name:
yield class_
classes_found.append(name)
@@ -547,7 +372,7 @@ printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
context_cfg = self.context_cfg
vnfs = []
- # we assume OrderedDict for consistenct in instantiation
+ # we assume OrderedDict for consistency in instantiation
for node_name, node in context_cfg["nodes"].items():
LOG.debug(node)
try:
@@ -556,7 +381,7 @@ printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
LOG.debug("no model for %s, skipping", node_name)
continue
file_path = scenario_cfg['task_path']
- with open_relative_file(file_name, file_path) as stream:
+ with utils.open_relative_file(file_name, file_path) as stream:
vnf_model = stream.read()
vnfd = vnfdgen.generate_vnfd(vnf_model, node)
# TODO: here add extra context_cfg["nodes"] regardless of template
@@ -606,6 +431,9 @@ printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
vnf.terminate()
raise
+ # we have to generate pod.yaml here after VNF has probed so we know vpci and driver
+ self._generate_pod_yaml()
+
# 3. Run experiment
# Start listeners first to avoid losing packets
for traffic_gen in traffic_runners: