aboutsummaryrefslogtreecommitdiffstats
path: root/yardstick
diff options
context:
space:
mode:
Diffstat (limited to 'yardstick')
-rw-r--r--yardstick/benchmark/contexts/base.py32
-rw-r--r--yardstick/benchmark/contexts/dummy.py3
-rw-r--r--yardstick/benchmark/contexts/heat.py148
-rw-r--r--yardstick/benchmark/contexts/kubernetes.py140
-rw-r--r--yardstick/benchmark/contexts/model.py57
-rw-r--r--yardstick/benchmark/contexts/node.py30
-rw-r--r--yardstick/benchmark/contexts/ovsdpdk.py369
-rw-r--r--yardstick/benchmark/contexts/sriov.py431
-rw-r--r--yardstick/benchmark/contexts/standalone.py122
-rw-r--r--yardstick/benchmark/core/plugin.py8
-rw-r--r--yardstick/benchmark/core/task.py63
-rw-r--r--yardstick/benchmark/core/testsuite.py42
-rw-r--r--yardstick/benchmark/scenarios/availability/actionplayers.py4
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py5
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker/attacker_process.py4
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker_conf.yaml7
-rw-r--r--yardstick/benchmark/scenarios/availability/director.py7
-rwxr-xr-xyardstick/benchmark/scenarios/availability/ha_tools/check_lxc_process_python.bash42
-rwxr-xr-xyardstick/benchmark/scenarios/availability/ha_tools/fault_lxc_process_kill.bash65
-rw-r--r--yardstick/benchmark/scenarios/availability/ha_tools/nova/create_flavor.bash2
-rw-r--r--yardstick/benchmark/scenarios/availability/ha_tools/nova/delete_flavor.bash2
-rw-r--r--yardstick/benchmark/scenarios/availability/ha_tools/nova/show_flavors.bash2
-rwxr-xr-xyardstick/benchmark/scenarios/availability/ha_tools/start_lxc_service.bash70
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor/monitor_command.py5
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor_conf.yaml2
-rw-r--r--yardstick/benchmark/scenarios/availability/operation/baseoperation.py1
-rw-r--r--yardstick/benchmark/scenarios/availability/operation/operation_general.py20
-rw-r--r--yardstick/benchmark/scenarios/availability/scenario_general.py4
-rw-r--r--yardstick/benchmark/scenarios/availability/util.py33
-rw-r--r--yardstick/benchmark/scenarios/base.py12
-rw-r--r--yardstick/benchmark/scenarios/compute/lmbench.py13
-rw-r--r--yardstick/benchmark/scenarios/compute/ramspeed.py6
-rw-r--r--yardstick/benchmark/scenarios/lib/__init__.py0
-rw-r--r--yardstick/benchmark/scenarios/lib/add_memory_load.py57
-rw-r--r--yardstick/benchmark/scenarios/lib/check_numa_info.py61
-rw-r--r--yardstick/benchmark/scenarios/lib/check_value.py58
-rw-r--r--yardstick/benchmark/scenarios/lib/get_migrate_target_host.py56
-rw-r--r--yardstick/benchmark/scenarios/lib/get_numa_info.py79
-rw-r--r--yardstick/benchmark/scenarios/lib/get_server.py83
-rw-r--r--yardstick/benchmark/scenarios/lib/get_server_ip.py38
-rw-r--r--yardstick/benchmark/scenarios/lib/migrate.py155
-rw-r--r--yardstick/benchmark/scenarios/networking/iperf3.py26
-rw-r--r--yardstick/benchmark/scenarios/networking/ping.py6
-rw-r--r--yardstick/benchmark/scenarios/networking/pktgen.py242
-rw-r--r--yardstick/benchmark/scenarios/networking/pktgen_benchmark.bash139
-rw-r--r--yardstick/benchmark/scenarios/networking/vnf_generic.py106
-rw-r--r--yardstick/benchmark/scenarios/storage/fio.py32
-rw-r--r--yardstick/benchmark/scenarios/storage/storperf.py7
-rwxr-xr-xyardstick/cmd/NSBperf.py16
-rw-r--r--yardstick/cmd/commands/task.py7
-rw-r--r--yardstick/common/constants.py16
-rw-r--r--yardstick/common/kubernetes_utils.py137
-rw-r--r--yardstick/common/openstack_utils.py24
-rw-r--r--yardstick/common/utils.py95
-rw-r--r--yardstick/dispatcher/base.py6
-rw-r--r--yardstick/dispatcher/influxdb.py26
-rw-r--r--yardstick/network_services/vnf_generic/vnf/base.py1
-rw-r--r--yardstick/network_services/vnf_generic/vnfdgen.py2
-rw-r--r--yardstick/orchestrator/heat.py90
-rw-r--r--yardstick/orchestrator/kubernetes.py130
-rw-r--r--yardstick/resources/scripts/install/storperf.bash27
-rw-r--r--yardstick/resources/scripts/remove/storperf.bash20
-rw-r--r--yardstick/vTC/apexlake/tests/deployment_unit_test.py21
-rw-r--r--yardstick/vTC/apexlake/tests/dpdk_packet_generator_test.py49
-rw-r--r--yardstick/vTC/apexlake/tests/instantiation_validation_bench_test.py6
65 files changed, 3234 insertions, 335 deletions
diff --git a/yardstick/benchmark/contexts/base.py b/yardstick/benchmark/contexts/base.py
index 0be2eee77..e362c6a3d 100644
--- a/yardstick/benchmark/contexts/base.py
+++ b/yardstick/benchmark/contexts/base.py
@@ -23,7 +23,7 @@ class Context(object):
@abc.abstractmethod
def init(self, attrs):
- "Initiate context."
+ """Initiate context."""
@staticmethod
def get_cls(context_type):
@@ -56,20 +56,34 @@ class Context(object):
"""get server info by name from context
"""
+ @abc.abstractmethod
+ def _get_network(self, attr_name):
+ """get network info by name from context
+ """
+
@staticmethod
def get_server(attr_name):
"""lookup server info by name from context
attr_name: either a name for a server created by yardstick or a dict
with attribute name mapping when using external heat templates
"""
- server = None
- for context in Context.list:
- server = context._get_server(attr_name)
- if server is not None:
- break
-
- if server is None:
+ servers = (context._get_server(attr_name) for context in Context.list)
+ try:
+ return next(s for s in servers if s)
+ except StopIteration:
raise ValueError("context not found for server '%r'" %
attr_name)
- return server
+ @staticmethod
+ def get_network(attr_name):
+ """lookup server info by name from context
+ attr_name: either a name for a server created by yardstick or a dict
+ with attribute name mapping when using external heat templates
+ """
+
+ networks = (context._get_network(attr_name) for context in Context.list)
+ try:
+ return next(n for n in networks if n)
+ except StopIteration:
+ raise ValueError("context not found for server '%r'" %
+ attr_name)
diff --git a/yardstick/benchmark/contexts/dummy.py b/yardstick/benchmark/contexts/dummy.py
index c658d3257..8ae4b65b8 100644
--- a/yardstick/benchmark/contexts/dummy.py
+++ b/yardstick/benchmark/contexts/dummy.py
@@ -37,3 +37,6 @@ class DummyContext(Context):
def _get_server(self, attr_name):
return None
+
+ def _get_network(self, attr_name):
+ return None
diff --git a/yardstick/benchmark/contexts/heat.py b/yardstick/benchmark/contexts/heat.py
index fed8fc342..d5349eab5 100644
--- a/yardstick/benchmark/contexts/heat.py
+++ b/yardstick/benchmark/contexts/heat.py
@@ -25,6 +25,7 @@ from yardstick.benchmark.contexts.model import Network
from yardstick.benchmark.contexts.model import PlacementGroup, ServerGroup
from yardstick.benchmark.contexts.model import Server
from yardstick.benchmark.contexts.model import update_scheduler_hints
+from yardstick.common.openstack_utils import get_neutron_client
from yardstick.orchestrator.heat import HeatTemplate, get_short_key_uuid
from yardstick.common.constants import YARDSTICK_ROOT_PATH
@@ -54,9 +55,11 @@ class HeatContext(Context):
self._user = None
self.template_file = None
self.heat_parameters = None
+ self.neutron_client = None
# generate an uuid to identify yardstick_key
# the first 8 digits of the uuid will be used
self.key_uuid = uuid.uuid4()
+ self.heat_timeout = None
self.key_filename = ''.join(
[YARDSTICK_ROOT_PATH, 'yardstick/resources/files/yardstick_key-',
get_short_key_uuid(self.key_uuid)])
@@ -65,15 +68,16 @@ class HeatContext(Context):
def assign_external_network(self, networks):
sorted_networks = sorted(networks.items())
external_network = os.environ.get("EXTERNAL_NETWORK", "net04_ext")
- have_external_network = [(name, net)
- for name, net in sorted_networks if
- net.get("external_network")]
- # no external net defined, assign it to first network usig os.environ
+
+ have_external_network = any(net.get("external_network") for net in networks.values())
if sorted_networks and not have_external_network:
+ # no external net defined, assign it to first network using os.environ
sorted_networks[0][1]["external_network"] = external_network
- return sorted_networks
- def init(self, attrs): # pragma: no cover
+ self.networks = OrderedDict((name, Network(name, self, attrs))
+ for name, attrs in sorted_networks)
+
+ def init(self, attrs):
"""initializes itself from the supplied arguments"""
self.name = attrs["name"]
@@ -103,11 +107,7 @@ class HeatContext(Context):
# we have to do this first, because we are injecting external_network
# into the dict
- sorted_networks = self.assign_external_network(attrs["networks"])
-
- self.networks = OrderedDict(
- (name, Network(name, self, netattrs)) for name, netattrs in
- sorted_networks)
+ self.assign_external_network(attrs["networks"])
for name, serverattrs in sorted(attrs["servers"].items()):
server = Server(name, self, serverattrs)
@@ -120,7 +120,6 @@ class HeatContext(Context):
with open(self.key_filename + ".pub", "w") as pubkey_file:
pubkey_file.write(
"%s %s\n" % (rsa_key.get_name(), rsa_key.get_base64()))
- del rsa_key
@property
def image(self):
@@ -153,9 +152,12 @@ class HeatContext(Context):
template.add_network(network.stack_name,
network.physical_network,
network.provider,
- network.segmentation_id)
+ network.segmentation_id,
+ network.port_security_enabled)
template.add_subnet(network.subnet_stack_name, network.stack_name,
- network.subnet_cidr)
+ network.subnet_cidr,
+ network.enable_dhcp,
+ network.gateway_ip)
if network.router:
template.add_router(network.router.stack_name,
@@ -194,7 +196,7 @@ class HeatContext(Context):
scheduler_hints = {}
for pg in server.placement_groups:
update_scheduler_hints(scheduler_hints, added_servers, pg)
- # workround for openstack nova bug, check JIRA: YARDSTICK-200
+ # workaround for openstack nova bug, check JIRA: YARDSTICK-200
# for details
if len(availability_servers) == 2:
if not scheduler_hints["different_host"]:
@@ -250,6 +252,20 @@ class HeatContext(Context):
list(self.networks.values()),
scheduler_hints)
+ def get_neutron_info(self):
+ if not self.neutron_client:
+ self.neutron_client = get_neutron_client()
+
+ networks = self.neutron_client.list_networks()
+ for network in self.networks.values():
+ for neutron_net in networks['networks']:
+ if neutron_net['name'] == network.stack_name:
+ network.segmentation_id = neutron_net.get('provider:segmentation_id')
+ # we already have physical_network
+ # network.physical_network = neutron_net.get('provider:physical_network')
+ network.network_type = neutron_net.get('provider:network_type')
+ network.neutron_info = neutron_net
+
def deploy(self):
"""deploys template into a stack using cloud"""
print("Deploying context '%s'" % self.name)
@@ -267,20 +283,16 @@ class HeatContext(Context):
raise SystemExit("\nStack create interrupted")
except:
LOG.exception("stack failed")
+ # let the other failures happen, we want stack trace
raise
- # let the other failures happend, we want stack trace
+
+ # TODO: use Neutron to get segementation-id
+ self.get_neutron_info()
# copy some vital stack output into server objects
for server in self.servers:
if server.ports:
- # TODO(hafe) can only handle one internal network for now
- port = next(iter(server.ports.values()))
- server.private_ip = self.stack.outputs[port["stack_name"]]
- server.interfaces = {}
- for network_name, port in server.ports.items():
- self.make_interface_dict(network_name, port['stack_name'],
- server,
- self.stack.outputs)
+ self.add_server_port(server)
if server.floating_ip:
server.public_ip = \
@@ -288,24 +300,36 @@ class HeatContext(Context):
print("Context '%s' deployed" % self.name)
- def make_interface_dict(self, network_name, stack_name, server, outputs):
- server.interfaces[network_name] = {
- "private_ip": outputs[stack_name],
+ def add_server_port(self, server):
+ # TODO(hafe) can only handle one internal network for now
+ port = next(iter(server.ports.values()))
+ server.private_ip = self.stack.outputs[port["stack_name"]]
+ server.interfaces = {}
+ for network_name, port in server.ports.items():
+ server.interfaces[network_name] = self.make_interface_dict(
+ network_name, port['stack_name'], self.stack.outputs)
+
+ def make_interface_dict(self, network_name, stack_name, outputs):
+ private_ip = outputs[stack_name]
+ mac_addr = outputs[stack_name + "-mac_address"]
+ subnet_cidr_key = "-".join([self.name, network_name, 'subnet', 'cidr'])
+ gateway_key = "-".join([self.name, network_name, 'subnet', 'gateway_ip'])
+ subnet_cidr = outputs[subnet_cidr_key]
+ subnet_ip = ipaddress.ip_network(subnet_cidr)
+ return {
+ "private_ip": private_ip,
"subnet_id": outputs[stack_name + "-subnet_id"],
- "subnet_cidr": outputs[
- "{}-{}-subnet-cidr".format(self.name, network_name)],
- "netmask": str(ipaddress.ip_network(
- outputs["{}-{}-subnet-cidr".format(self.name,
- network_name)]).netmask),
- "gateway_ip": outputs[
- "{}-{}-subnet-gateway_ip".format(self.name, network_name)],
- "mac_address": outputs[stack_name + "-mac_address"],
+ "subnet_cidr": subnet_cidr,
+ "network": str(subnet_ip.network_address),
+ "netmask": str(subnet_ip.netmask),
+ "gateway_ip": outputs[gateway_key],
+ "mac_address": mac_addr,
"device_id": outputs[stack_name + "-device_id"],
"network_id": outputs[stack_name + "-network_id"],
"network_name": network_name,
# to match vnf_generic
- "local_mac": outputs[stack_name + "-mac_address"],
- "local_ip": outputs[stack_name],
+ "local_mac": mac_addr,
+ "local_ip": private_ip,
"vld_id": self.networks[network_name].vld_id,
}
@@ -326,6 +350,19 @@ class HeatContext(Context):
super(HeatContext, self).undeploy()
+ @staticmethod
+ def generate_routing_table(server):
+ routes = [
+ {
+ "network": intf["network"],
+ "netmask": intf["netmask"],
+ "if": name,
+ "gateway": intf["gateway_ip"],
+ }
+ for name, intf in server.interfaces.items()
+ ]
+ return routes
+
def _get_server(self, attr_name):
"""lookup server info by name from context
attr_name: either a name for a server created by yardstick or a dict
@@ -335,7 +372,10 @@ class HeatContext(Context):
'yardstick.resources',
'files/yardstick_key-' + get_short_key_uuid(self.key_uuid))
- if isinstance(attr_name, collections.Mapping):
+ if not isinstance(attr_name, collections.Mapping):
+ server = self._server_map.get(attr_name, None)
+
+ else:
cname = attr_name["name"].split(".")[1]
if cname != self.name:
return None
@@ -352,10 +392,6 @@ class HeatContext(Context):
server = Server(attr_name["name"].split(".")[0], self, {})
server.public_ip = public_ip
server.private_ip = private_ip
- else:
- if attr_name not in self._server_map:
- return None
- server = self._server_map[attr_name]
if server is None:
return None
@@ -365,9 +401,37 @@ class HeatContext(Context):
"key_filename": key_filename,
"private_ip": server.private_ip,
"interfaces": server.interfaces,
+ "routing_table": self.generate_routing_table(server),
+ # empty IPv6 routing table
+ "nd_route_tbl": [],
}
# Target server may only have private_ip
if server.public_ip:
result["ip"] = server.public_ip
return result
+
+ def _get_network(self, attr_name):
+ if not isinstance(attr_name, collections.Mapping):
+ network = self.networks.get(attr_name, None)
+
+ else:
+ # Don't generalize too much Just support vld_id
+ vld_id = attr_name.get('vld_id')
+ if vld_id is None:
+ return None
+
+ network = next((n for n in self.networks.values() if
+ getattr(n, "vld_id", None) == vld_id), None)
+
+ if network is None:
+ return None
+
+ result = {
+ "name": network.name,
+ "vld_id": network.vld_id,
+ "segmentation_id": network.segmentation_id,
+ "network_type": network.network_type,
+ "physical_network": network.physical_network,
+ }
+ return result
diff --git a/yardstick/benchmark/contexts/kubernetes.py b/yardstick/benchmark/contexts/kubernetes.py
new file mode 100644
index 000000000..a39f63137
--- /dev/null
+++ b/yardstick/benchmark/contexts/kubernetes.py
@@ -0,0 +1,140 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+from __future__ import absolute_import
+import logging
+import time
+import pkg_resources
+
+import paramiko
+
+from yardstick.benchmark.contexts.base import Context
+from yardstick.orchestrator.kubernetes import KubernetesTemplate
+from yardstick.common import kubernetes_utils as k8s_utils
+from yardstick.common import utils
+
+LOG = logging.getLogger(__name__)
+BITS_LENGTH = 2048
+
+
+class KubernetesContext(Context):
+ """Class that handle nodes info"""
+
+ __context_type__ = "Kubernetes"
+
+ def __init__(self):
+ self.name = ''
+ self.ssh_key = ''
+ self.key_path = ''
+ self.public_key_path = ''
+ self.template = None
+
+ super(KubernetesContext, self).__init__()
+
+ def init(self, attrs):
+ self.name = attrs.get('name', '')
+
+ template_cfg = attrs.get('servers', {})
+ self.template = KubernetesTemplate(self.name, template_cfg)
+
+ self.ssh_key = '{}-key'.format(self.name)
+
+ self.key_path = self._get_key_path()
+ self.public_key_path = '{}.pub'.format(self.key_path)
+
+ def deploy(self):
+ LOG.info('Creating ssh key')
+ self._set_ssh_key()
+
+ LOG.info('Launch containers')
+ self._create_rcs()
+ time.sleep(1)
+ self.template.get_rc_pods()
+
+ self._wait_until_running()
+
+ def undeploy(self):
+ self._delete_ssh_key()
+ self._delete_rcs()
+ self._delete_pods()
+
+ super(KubernetesContext, self).undeploy()
+
+ def _wait_until_running(self):
+ while not all(self._check_pod_status(p) for p in self.template.pods):
+ time.sleep(1)
+
+ def _check_pod_status(self, pod):
+ status = k8s_utils.read_pod_status(pod)
+ LOG.debug('%s:%s', pod, status)
+ if status == 'Failed':
+ LOG.error('Pod %s status is failed', pod)
+ raise RuntimeError
+ if status != 'Running':
+ return False
+ return True
+
+ def _create_rcs(self):
+ for obj in self.template.k8s_objs:
+ self._create_rc(obj.get_template())
+
+ def _create_rc(self, template):
+ k8s_utils.create_replication_controller(template)
+
+ def _delete_rcs(self):
+ for rc in self.template.rcs:
+ self._delete_rc(rc)
+
+ def _delete_rc(self, rc):
+ k8s_utils.delete_replication_controller(rc)
+
+ def _delete_pods(self):
+ for pod in self.template.pods:
+ self._delete_pod(pod)
+
+ def _delete_pod(self, pod):
+ k8s_utils.delete_pod(pod)
+
+ def _get_key_path(self):
+ task_id = self.name.split('-')[-1]
+ k = 'files/yardstick_key-{}'.format(task_id)
+ return pkg_resources.resource_filename('yardstick.resources', k)
+
+ def _set_ssh_key(self):
+ rsa_key = paramiko.RSAKey.generate(bits=BITS_LENGTH)
+
+ LOG.info('Writing private key')
+ rsa_key.write_private_key_file(self.key_path)
+
+ LOG.info('Writing public key')
+ key = '{} {}\n'.format(rsa_key.get_name(), rsa_key.get_base64())
+ with open(self.public_key_path, 'w') as f:
+ f.write(key)
+
+ LOG.info('Create configmap for ssh key')
+ k8s_utils.create_config_map(self.ssh_key, {'authorized_keys': key})
+
+ def _delete_ssh_key(self):
+ k8s_utils.delete_config_map(self.ssh_key)
+ utils.remove_file(self.key_path)
+ utils.remove_file(self.public_key_path)
+
+ def _get_server(self, name):
+ resp = k8s_utils.get_pod_list()
+ hosts = ({'name': n.metadata.name,
+ 'ip': n.status.pod_ip,
+ 'user': 'root',
+ 'key_filename': self.key_path,
+ 'private_ip': n.status.pod_ip}
+ for n in resp.items if n.metadata.name.startswith(name))
+
+ return next(hosts, None)
+
+ def _get_network(self, attr_name):
+ return None
diff --git a/yardstick/benchmark/contexts/model.py b/yardstick/benchmark/contexts/model.py
index 5077a9786..aed1a3f60 100644
--- a/yardstick/benchmark/contexts/model.py
+++ b/yardstick/benchmark/contexts/model.py
@@ -104,15 +104,30 @@ class Network(Object):
self.stack_name = context.name + "-" + self.name
self.subnet_stack_name = self.stack_name + "-subnet"
self.subnet_cidr = attrs.get('cidr', '10.0.1.0/24')
+ self.enable_dhcp = attrs.get('enable_dhcp', 'true')
self.router = None
self.physical_network = attrs.get('physical_network', 'physnet1')
- self.provider = attrs.get('provider', None)
- self.segmentation_id = attrs.get('segmentation_id', None)
+ self.provider = attrs.get('provider')
+ self.segmentation_id = attrs.get('segmentation_id')
+ self.network_type = attrs.get('network_type')
+ self.port_security_enabled = attrs.get('port_security_enabled')
+ self.vnic_type = attrs.get('vnic_type', 'normal')
+ self.allowed_address_pairs = attrs.get('allowed_address_pairs', [])
+ try:
+ # we require 'null' or '' to disable setting gateway_ip
+ self.gateway_ip = attrs['gateway_ip']
+ except KeyError:
+ # default to explicit None
+ self.gateway_ip = None
+ else:
+ # null is None in YAML, so we have to convert back to string
+ if self.gateway_ip is None:
+ self.gateway_ip = "null"
if "external_network" in attrs:
self.router = Router("router", self.name,
context, attrs["external_network"])
- self.vld_id = attrs.get("vld_id", "")
+ self.vld_id = attrs.get("vld_id")
Network.list.append(self)
@@ -170,6 +185,14 @@ class Server(Object): # pragma: no cover
self.placement_groups.append(pg)
pg.add_member(self.stack_name)
+ self.volume = None
+ if "volume" in attrs:
+ self.volume = attrs.get("volume")
+
+ self.volume_mountpoint = None
+ if "volume_mountpoint" in attrs:
+ self.volume_mountpoint = attrs.get("volume_mountpoint")
+
# support servergroup attr
self.server_group = None
sg = attrs.get("server_group")
@@ -233,10 +256,17 @@ class Server(Object): # pragma: no cover
for network in networks:
port_name = server_name + "-" + network.name + "-port"
self.ports[network.name] = {"stack_name": port_name}
- template.add_port(port_name, network.stack_name,
- network.subnet_stack_name,
- sec_group_id=self.secgroup_name,
- provider=network.provider)
+ # we can't use secgroups if port_security_enabled is False
+ if network.port_security_enabled:
+ sec_group_id = self.secgroup_name
+ else:
+ sec_group_id = None
+ # don't refactor to pass in network object, that causes JSON
+ # circular ref encode errors
+ template.add_port(port_name, network.stack_name, network.subnet_stack_name,
+ network.vnic_type, sec_group_id=sec_group_id,
+ provider=network.provider,
+ allowed_address_pairs=network.allowed_address_pairs)
port_name_list.append(port_name)
if self.floating_ip:
@@ -247,7 +277,7 @@ class Server(Object): # pragma: no cover
external_network,
port_name,
network.router.stack_if_name,
- self.secgroup_name)
+ sec_group_id)
self.floating_ip_assoc["stack_name"] = \
server_name + "-fip-assoc"
template.add_floating_ip_association(
@@ -263,6 +293,17 @@ class Server(Object): # pragma: no cover
else:
self.flavor_name = self.flavor
+ if self.volume:
+ if isinstance(self.volume, dict):
+ self.volume["name"] = \
+ self.volume.setdefault("name", server_name + "-volume")
+ template.add_volume(**self.volume)
+ template.add_volume_attachment(server_name, self.volume["name"],
+ mountpoint=self.volume_mountpoint)
+ else:
+ template.add_volume_attachment(server_name, self.volume,
+ mountpoint=self.volume_mountpoint)
+
template.add_server(server_name, self.image, flavor=self.flavor_name,
flavors=self.context.flavors,
ports=port_name_list,
diff --git a/yardstick/benchmark/contexts/node.py b/yardstick/benchmark/contexts/node.py
index baa1cf5d6..b3f0aca0e 100644
--- a/yardstick/benchmark/contexts/node.py
+++ b/yardstick/benchmark/contexts/node.py
@@ -33,6 +33,7 @@ class NodeContext(Context):
self.name = None
self.file_path = None
self.nodes = []
+ self.networks = {}
self.controllers = []
self.computes = []
self.baremetals = []
@@ -77,6 +78,9 @@ class NodeContext(Context):
self.env = attrs.get('env', {})
LOG.debug("Env: %r", self.env)
+ # add optional static network definition
+ self.networks.update(cfg.get("networks", {}))
+
def deploy(self):
config_type = self.env.get('type', '')
if config_type == 'ansible':
@@ -141,6 +145,32 @@ class NodeContext(Context):
node["name"] = attr_name
return node
+ def _get_network(self, attr_name):
+ if not isinstance(attr_name, collections.Mapping):
+ network = self.networks.get(attr_name)
+
+ else:
+ # Don't generalize too much Just support vld_id
+ vld_id = attr_name.get('vld_id')
+ if vld_id is None:
+ return None
+
+ network = next((n for n in self.networks.values() if
+ n.get("vld_id") == vld_id), None)
+
+ if network is None:
+ return None
+
+ result = {
+ # name is required
+ "name": network["name"],
+ "vld_id": network.get("vld_id"),
+ "segmentation_id": network.get("segmentation_id"),
+ "network_type": network.get("network_type"),
+ "physical_network": network.get("physical_network"),
+ }
+ return result
+
def _execute_script(self, node_name, info):
if node_name == 'local':
self._execute_local_script(info)
diff --git a/yardstick/benchmark/contexts/ovsdpdk.py b/yardstick/benchmark/contexts/ovsdpdk.py
new file mode 100644
index 000000000..cf5529d89
--- /dev/null
+++ b/yardstick/benchmark/contexts/ovsdpdk.py
@@ -0,0 +1,369 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+import os
+import yaml
+import time
+import glob
+import itertools
+import logging
+from yardstick import ssh
+from yardstick.benchmark.contexts.standalone import StandaloneContext
+
+BIN_PATH = "/opt/isb_bin/"
+DPDK_NIC_BIND = "dpdk_nic_bind.py"
+
+log = logging.getLogger(__name__)
+
+VM_TEMPLATE = """
+<domain type='kvm'>
+ <name>vm1</name>
+ <uuid>18230c0c-635d-4c50-b2dc-a213d30acb34</uuid>
+ <memory unit='KiB'>20971520</memory>
+ <currentMemory unit="KiB">20971520</currentMemory>
+ <memoryBacking>
+ <hugepages/>
+ </memoryBacking>
+ <vcpu placement='static'>20</vcpu>
+ <os>
+ <type arch='x86_64' machine='pc'>hvm</type>
+ <boot dev='hd'/>
+ </os>
+ <features>
+ <acpi/>
+ <apic/>
+ </features>
+ <cpu match="exact" mode='host-model'>
+ <model fallback='allow'/>
+ <topology sockets='1' cores='10' threads='2'/>
+ </cpu>
+ <on_poweroff>destroy</on_poweroff>
+ <on_reboot>restart</on_reboot>
+ <on_crash>destroy</on_crash>
+ <devices>
+ <emulator>/usr/bin/qemu-system-x86_64</emulator>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='qcow2' cache='none'/>
+ <source file="{vm_image}"/>
+ <target dev='vda' bus='virtio'/>
+ <address bus="0x00" domain="0x0000"
+ function="0x0" slot="0x04" type="pci" />
+ </disk>
+ <!--disk type='dir' device='disk'>
+ <driver name='qemu' type='fat'/>
+ <source dir='/opt/isb_bin/dpdk'/>
+ <target dev='vdb' bus='virtio'/>
+ <readonly/>
+ </disk-->
+ <interface type="bridge">
+ <mac address="00:00:00:ab:cd:ef" />
+ <source bridge="br-int" />
+ </interface>
+ <interface type='vhostuser'>
+ <mac address='00:00:00:00:00:01'/>
+ <source type='unix' path='/usr/local/var/run/openvswitch/dpdkvhostuser0' mode='client'/>
+ <model type='virtio'/>
+ <driver queues='4'>
+ <host mrg_rxbuf='off'/>
+ </driver>
+ </interface>
+ <interface type='vhostuser'>
+ <mac address='00:00:00:00:00:02'/>
+ <source type='unix' path='/usr/local/var/run/openvswitch/dpdkvhostuser1' mode='client'/>
+ <model type='virtio'/>
+ <driver queues='4'>
+ <host mrg_rxbuf='off'/>
+ </driver>
+ </interface>
+ <serial type='pty'>
+ <target port='0'/>
+ </serial>
+ <console type='pty'>
+ <target type='serial' port='0'/>
+ </console>
+ <graphics autoport="yes" listen="0.0.0.0" port="1" type="vnc" />
+ </devices>
+</domain>
+"""
+
+
+class Ovsdpdk(StandaloneContext):
+ def __init__(self):
+ self.name = None
+ self.file_path = None
+ self.nodes = []
+ self.vm_deploy = False
+ self.ovs = []
+ self.first_run = True
+ self.dpdk_nic_bind = BIN_PATH + DPDK_NIC_BIND
+ self.user = ""
+ self.ssh_ip = ""
+ self.passwd = ""
+ self.ssh_port = ""
+ self.auth_type = ""
+
+ def init(self):
+ '''initializes itself'''
+ log.debug("In init")
+ self.parse_pod_and_get_data()
+
+ def parse_pod_and_get_data(self, file_path):
+ self.file_path = file_path
+ print("parsing pod file: {0}".format(self.file_path))
+ try:
+ with open(self.file_path) as stream:
+ cfg = yaml.load(stream)
+ except IOError:
+ print("File {0} does not exist".format(self.file_path))
+ raise
+
+ self.ovs.extend([node for node in cfg["nodes"]
+ if node["role"] == "Ovsdpdk"])
+ self.user = self.ovs[0]['user']
+ self.ssh_ip = self.ovs[0]['ip']
+ if self.ovs[0]['auth_type'] == "password":
+ self.passwd = self.ovs[0]['password']
+ else:
+ self.ssh_port = self.ovs[0]['ssh_port']
+ self.key_filename = self.ovs[0]['key_filename']
+
+ def ssh_remote_machine(self):
+ if self.ovs[0]['auth_type'] == "password":
+ self.connection = ssh.SSH(
+ self.user,
+ self.ssh_ip,
+ password=self.passwd)
+ self.connection.wait()
+ else:
+ if self.ssh_port is not None:
+ ssh_port = self.ssh_port
+ else:
+ ssh_port = ssh.DEFAULT_PORT
+ self.connection = ssh.SSH(
+ self.user,
+ self.ssh_ip,
+ port=ssh_port,
+ key_filename=self.key_filename)
+ self.connection.wait()
+
+ def get_nic_details(self):
+ nic_details = {}
+ nic_details['interface'] = {}
+ nic_details['pci'] = self.ovs[0]['phy_ports']
+ nic_details['phy_driver'] = self.ovs[0]['phy_driver']
+ nic_details['vports_mac'] = self.ovs[0]['vports_mac']
+ # Make sure that ports are bound to kernel drivers e.g. i40e/ixgbe
+ for i, _ in enumerate(nic_details['pci']):
+ err, out, _ = self.connection.execute(
+ "{dpdk_nic_bind} --force -b {driver} {port}".format(
+ dpdk_nic_bind=self.dpdk_nic_bind,
+ driver=self.ovs[0]['phy_driver'],
+ port=self.ovs[0]['phy_ports'][i]))
+ err, out, _ = self.connection.execute(
+ "lshw -c network -businfo | grep '{port}'".format(
+ port=self.ovs[0]['phy_ports'][i]))
+ a = out.split()[1]
+ err, out, _ = self.connection.execute(
+ "ip -s link show {interface}".format(
+ interface=out.split()[1]))
+ nic_details['interface'][i] = str(a)
+ print("{0}".format(nic_details))
+ return nic_details
+
+ def install_req_libs(self):
+ if self.first_run:
+ err, out, _ = self.connection.execute("apt-get update")
+ print("{0}".format(out))
+ err, out, _ = self.connection.execute(
+ "apt-get -y install qemu-kvm libvirt-bin")
+ print("{0}".format(out))
+ err, out, _ = self.connection.execute(
+ "apt-get -y install libvirt-dev bridge-utils numactl")
+ print("{0}".format(out))
+ self.first_run = False
+
+ def setup_ovs(self, vpcis):
+ self.connection.execute("/usr/bin/chmod 0666 /dev/vfio/*")
+ self.connection.execute("/usr/bin/chmod a+x /dev/vfio")
+ self.connection.execute("pkill -9 ovs")
+ self.connection.execute("ps -ef | grep ovs | grep -v grep | "
+ "awk '{print $2}' | xargs -r kill -9")
+ self.connection.execute("killall -r 'ovs*'")
+ self.connection.execute(
+ "mkdir -p {0}/etc/openvswitch".format(self.ovs[0]["vpath"]))
+ self.connection.execute(
+ "mkdir -p {0}/var/run/openvswitch".format(self.ovs[0]["vpath"]))
+ self.connection.execute(
+ "rm {0}/etc/openvswitch/conf.db".format(self.ovs[0]["vpath"]))
+ self.connection.execute(
+ "ovsdb-tool create {0}/etc/openvswitch/conf.db "
+ "{0}/share/openvswitch/"
+ "vswitch.ovsschema".format(self.ovs[0]["vpath"]))
+ self.connection.execute("modprobe vfio-pci")
+ self.connection.execute("chmod a+x /dev/vfio")
+ self.connection.execute("chmod 0666 /dev/vfio/*")
+ for vpci in vpcis:
+ self.connection.execute(
+ "/opt/isb_bin/dpdk_nic_bind.py "
+ "--bind=vfio-pci {0}".format(vpci))
+
+ def start_ovs_serverswitch(self):
+ self.connection.execute("mkdir -p /usr/local/var/run/openvswitch")
+ self.connection.execute(
+ "ovsdb-server --remote=punix:"
+ "/usr/local/var/run/openvswitch/db.sock --pidfile --detach")
+ self.connection.execute(
+ "ovs-vsctl --no-wait set "
+ "Open_vSwitch . other_config:dpdk-init=true")
+ self.connection.execute(
+ "ovs-vsctl --no-wait set "
+ "Open_vSwitch . other_config:dpdk-lcore-mask=0x3")
+ self.connection.execute(
+ "ovs-vsctl --no-wait set "
+ "Open_vSwitch . other_config:dpdk-socket-mem='2048,0'")
+ self.connection.execute(
+ "ovs-vswitchd unix:{0}/"
+ "var/run/openvswitch/db.sock --pidfile --detach "
+ "--log-file=/var/log/openvswitch/"
+ "ovs-vswitchd.log".format(
+ self.ovs[0]["vpath"]))
+ self.connection.execute(
+ "ovs-vsctl set Open_vSwitch . other_config:pmd-cpu-mask=2C")
+
+ def setup_ovs_bridge(self):
+ self.connection.execute("ovs-vsctl del-br br0")
+ self.connection.execute(
+ "rm -rf /usr/local/var/run/openvswitch/dpdkvhostuser*")
+ self.connection.execute(
+ "ovs-vsctl add-br br0 -- set bridge br0 datapath_type=netdev")
+ self.connection.execute(
+ "ovs-vsctl add-port br0 dpdk0 -- set Interface dpdk0 type=dpdk")
+ self.connection.execute(
+ "ovs-vsctl add-port br0 dpdk1 -- set Interface dpdk1 type=dpdk")
+ self.connection.execute(
+ "ovs-vsctl add-port br0 dpdkvhostuser0 -- set Interface "
+ "dpdkvhostuser0 type=dpdkvhostuser")
+ self.connection.execute("ovs-vsctl add-port br0 dpdkvhostuser1 "
+ "-- set Interface dpdkvhostuser1 "
+ "type=dpdkvhostuser")
+ self.connection.execute(
+ "chmod 0777 {0}/var/run/"
+ "openvswitch/dpdkvhostuser*".format(self.ovs[0]["vpath"]))
+
+ def add_oflows(self):
+ self.connection.execute("ovs-ofctl del-flows br0")
+ for flow in self.ovs[0]["flow"]:
+ self.connection.execute(flow)
+ self.connection.execute("ovs-ofctl dump-flows br0")
+ self.connection.execute(
+ "ovs-vsctl set Interface dpdk0 options:n_rxq=4")
+ self.connection.execute(
+ "ovs-vsctl set Interface dpdk1 options:n_rxq=4")
+
+ def setup_ovs_context(self, pcis, nic_details, host_driver):
+
+ ''' 1: Setup vm_ovs.xml to launch VM.'''
+ cfg_ovs = '/tmp/vm_ovs.xml'
+ vm_ovs_xml = VM_TEMPLATE.format(vm_image=self.ovs[0]["images"])
+ with open(cfg_ovs, 'w') as f:
+ f.write(vm_ovs_xml)
+
+ ''' 2: Create and start the VM'''
+ self.connection.put(cfg_ovs, cfg_ovs)
+ time.sleep(10)
+ err, out = self.check_output("virsh list --name | grep -i vm1")
+ if out == "vm1":
+ print("VM is already present")
+ else:
+ ''' FIXME: launch through libvirt'''
+ print("virsh create ...")
+ err, out, _ = self.connection.execute(
+ "virsh create /tmp/vm_ovs.xml")
+ time.sleep(10)
+ print("err : {0}".format(err))
+ print("{0}".format(_))
+ print("out : {0}".format(out))
+
+ ''' 3: Tuning for better performace.'''
+ self.pin_vcpu(pcis)
+ self.connection.execute(
+ "echo 1 > /sys/module/kvm/parameters/"
+ "allow_unsafe_assigned_interrupts")
+ self.connection.execute(
+ "echo never > /sys/kernel/mm/transparent_hugepage/enabled")
+ print("After tuning performance ...")
+
+ ''' This is roughly compatible with check_output function in subprocess
+ module which is only available in python 2.7.'''
+ def check_output(self, cmd, stderr=None):
+ '''Run a command and capture its output'''
+ err, out, _ = self.connection.execute(cmd)
+ return err, out
+
+ def read_from_file(self, filename):
+ data = ""
+ with open(filename, 'r') as the_file:
+ data = the_file.read()
+ return data
+
+ def write_to_file(self, filename, content):
+ with open(filename, 'w') as the_file:
+ the_file.write(content)
+
+ def pin_vcpu(self, pcis):
+ nodes = self.get_numa_nodes()
+ print("{0}".format(nodes))
+ num_nodes = len(nodes)
+ for i in range(0, 10):
+ self.connection.execute(
+ "virsh vcpupin vm1 {0} {1}".format(
+ i, nodes[str(num_nodes - 1)][i % len(nodes[str(num_nodes - 1)])]))
+
+ def get_numa_nodes(self):
+ nodes_sysfs = glob.iglob("/sys/devices/system/node/node*")
+ nodes = {}
+ for node_sysfs in nodes_sysfs:
+ num = os.path.basename(node_sysfs).replace("node", "")
+ with open(os.path.join(node_sysfs, "cpulist")) as cpulist_file:
+ cpulist = cpulist_file.read().strip()
+ print("cpulist: {0}".format(cpulist))
+ nodes[num] = self.split_cpu_list(cpulist)
+ print("nodes: {0}".format(nodes))
+ return nodes
+
+ def split_cpu_list(self, cpu_list):
+ if cpu_list:
+ ranges = cpu_list.split(',')
+ bounds = ([int(b) for b in r.split('-')] for r in ranges)
+ range_objects =\
+ (range(bound[0], bound[1] + 1 if len(bound) == 2
+ else bound[0] + 1) for bound in bounds)
+
+ return sorted(itertools.chain.from_iterable(range_objects))
+ else:
+ return []
+
+ def destroy_vm(self):
+ host_driver = self.ovs[0]['phy_driver']
+ err, out = self.check_output("virsh list --name | grep -i vm1")
+ print("{0}".format(out))
+ if err == 0:
+ self.connection.execute("virsh shutdown vm1")
+ self.connection.execute("virsh destroy vm1")
+ self.check_output("rmmod {0}".format(host_driver))[1].splitlines()
+ self.check_output("modprobe {0}".format(host_driver))[
+ 1].splitlines()
+ else:
+ print("error : ", err)
diff --git a/yardstick/benchmark/contexts/sriov.py b/yardstick/benchmark/contexts/sriov.py
new file mode 100644
index 000000000..fe27d2579
--- /dev/null
+++ b/yardstick/benchmark/contexts/sriov.py
@@ -0,0 +1,431 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+import os
+import yaml
+import re
+import time
+import glob
+import uuid
+import random
+import logging
+import itertools
+import xml.etree.ElementTree as ET
+from yardstick import ssh
+from yardstick.network_services.utils import get_nsb_option
+from yardstick.network_services.utils import provision_tool
+from yardstick.benchmark.contexts.standalone import StandaloneContext
+
+log = logging.getLogger(__name__)
+
+VM_TEMPLATE = """
+<domain type="kvm">
+ <name>vm1</name>
+ <uuid>{random_uuid}</uuid>
+ <memory unit="KiB">102400</memory>
+ <currentMemory unit="KiB">102400</currentMemory>
+ <memoryBacking>
+ <hugepages />
+ </memoryBacking>
+ <vcpu placement="static">20</vcpu>
+ <os>
+ <type arch="x86_64" machine="pc-i440fx-utopic">hvm</type>
+ <boot dev="hd" />
+ </os>
+ <features>
+ <acpi />
+ <apic />
+ <pae />
+ </features>
+ <cpu match="exact" mode="custom">
+ <model fallback="allow">SandyBridge</model>
+ <topology cores="10" sockets="1" threads="2" />
+ </cpu>
+ <clock offset="utc">
+ <timer name="rtc" tickpolicy="catchup" />
+ <timer name="pit" tickpolicy="delay" />
+ <timer name="hpet" present="no" />
+ </clock>
+ <on_poweroff>destroy</on_poweroff>
+ <on_reboot>restart</on_reboot>
+ <on_crash>restart</on_crash>
+ <devices>
+ <emulator>/usr/bin/kvm-spice</emulator>
+ <disk device="disk" type="file">
+ <driver name="qemu" type="qcow2" />
+ <source file="{vm_image}"/>
+ <target bus="virtio" dev="vda" />
+ <address bus="0x00" domain="0x0000"
+function="0x0" slot="0x04" type="pci" />
+ </disk>
+ <controller index="0" model="ich9-ehci1" type="usb">
+ <address bus="0x00" domain="0x0000"
+function="0x7" slot="0x05" type="pci" />
+ </controller>
+ <controller index="0" model="ich9-uhci1" type="usb">
+ <master startport="0" />
+ <address bus="0x00" domain="0x0000" function="0x0"
+multifunction="on" slot="0x05" type="pci" />
+ </controller>
+ <controller index="0" model="ich9-uhci2" type="usb">
+ <master startport="2" />
+ <address bus="0x00" domain="0x0000"
+function="0x1" slot="0x05" type="pci" />
+ </controller>
+ <controller index="0" model="ich9-uhci3" type="usb">
+ <master startport="4" />
+ <address bus="0x00" domain="0x0000"
+function="0x2" slot="0x05" type="pci" />
+ </controller>
+ <controller index="0" model="pci-root" type="pci" />
+ <serial type="pty">
+ <target port="0" />
+ </serial>
+ <console type="pty">
+ <target port="0" type="serial" />
+ </console>
+ <input bus="usb" type="tablet" />
+ <input bus="ps2" type="mouse" />
+ <input bus="ps2" type="keyboard" />
+ <graphics autoport="yes" listen="0.0.0.0" port="-1" type="vnc" />
+ <video>
+ <model heads="1" type="cirrus" vram="16384" />
+ <address bus="0x00" domain="0x0000"
+function="0x0" slot="0x02" type="pci" />
+ </video>
+ <memballoon model="virtio">
+ <address bus="0x00" domain="0x0000"
+function="0x0" slot="0x06" type="pci" />
+ </memballoon>
+ <interface type="bridge">
+ <mac address="{mac_addr}" />
+ <source bridge="virbr0" />
+ </interface>
+ </devices>
+</domain>
+"""
+
+
+class Sriov(StandaloneContext):
+ def __init__(self):
+ self.name = None
+ self.file_path = None
+ self.nodes = []
+ self.vm_deploy = False
+ self.sriov = []
+ self.first_run = True
+ self.dpdk_nic_bind = ""
+ self.user = ""
+ self.ssh_ip = ""
+ self.passwd = ""
+ self.ssh_port = ""
+ self.auth_type = ""
+
+ def init(self):
+ log.debug("In init")
+ self.parse_pod_and_get_data(self.file_path)
+
+ def parse_pod_and_get_data(self, file_path):
+ self.file_path = file_path
+ log.debug("parsing pod file: {0}".format(self.file_path))
+ try:
+ with open(self.file_path) as stream:
+ cfg = yaml.load(stream)
+ except IOError:
+ log.error("File {0} does not exist".format(self.file_path))
+ raise
+
+ self.sriov.extend([node for node in cfg["nodes"]
+ if node["role"] == "Sriov"])
+ self.user = self.sriov[0]['user']
+ self.ssh_ip = self.sriov[0]['ip']
+ if self.sriov[0]['auth_type'] == "password":
+ self.passwd = self.sriov[0]['password']
+ else:
+ self.ssh_port = self.sriov[0]['ssh_port']
+ self.key_filename = self.sriov[0]['key_filename']
+
+ def ssh_remote_machine(self):
+ if self.sriov[0]['auth_type'] == "password":
+ self.connection = ssh.SSH(
+ self.user,
+ self.ssh_ip,
+ password=self.passwd)
+ self.connection.wait()
+ else:
+ if self.ssh_port is not None:
+ ssh_port = self.ssh_port
+ else:
+ ssh_port = ssh.DEFAULT_PORT
+ self.connection = ssh.SSH(
+ self.user,
+ self.ssh_ip,
+ port=ssh_port,
+ key_filename=self.key_filename)
+ self.connection.wait()
+ self.dpdk_nic_bind = provision_tool(
+ self.connection,
+ os.path.join(get_nsb_option("bin_path"), "dpdk_nic_bind.py"))
+
+ def get_nic_details(self):
+ nic_details = {}
+ nic_details = {
+ 'interface': {},
+ 'pci': self.sriov[0]['phy_ports'],
+ 'phy_driver': self.sriov[0]['phy_driver'],
+ 'vf_macs': self.sriov[0]['vf_macs']
+ }
+ # Make sure that ports are bound to kernel drivers e.g. i40e/ixgbe
+ for i, _ in enumerate(nic_details['pci']):
+ err, out, _ = self.connection.execute(
+ "{dpdk_nic_bind} --force -b {driver} {port}".format(
+ dpdk_nic_bind=self.dpdk_nic_bind,
+ driver=self.sriov[0]['phy_driver'],
+ port=self.sriov[0]['phy_ports'][i]))
+ err, out, _ = self.connection.execute(
+ "lshw -c network -businfo | grep '{port}'".format(
+ port=self.sriov[0]['phy_ports'][i]))
+ a = out.split()[1]
+ err, out, _ = self.connection.execute(
+ "ip -s link show {interface}".format(
+ interface=out.split()[1]))
+ nic_details['interface'][i] = str(a)
+ log.info("{0}".format(nic_details))
+ return nic_details
+
+ def install_req_libs(self):
+ if self.first_run:
+ log.info("Installing required libraries...")
+ err, out, _ = self.connection.execute("apt-get update")
+ log.debug("{0}".format(out))
+ err, out, _ = self.connection.execute(
+ "apt-get -y install qemu-kvm libvirt-bin")
+ log.debug("{0}".format(out))
+ err, out, _ = self.connection.execute(
+ "apt-get -y install libvirt-dev bridge-utils numactl")
+ log.debug("{0}".format(out))
+ self.first_run = False
+
+ def configure_nics_for_sriov(self, host_driver, nic_details):
+ vf_pci = [[], []]
+ self.connection.execute(
+ "rmmod {0}".format(host_driver))[1].splitlines()
+ self.connection.execute(
+ "modprobe {0} num_vfs=1".format(host_driver))[1].splitlines()
+ nic_details['vf_pci'] = {}
+ for i in range(len(nic_details['pci'])):
+ self.connection.execute(
+ "echo 1 > /sys/bus/pci/devices/{0}/sriov_numvfs".format(
+ nic_details['pci'][i]))
+ err, out, _ = self.connection.execute(
+ "ip link set {interface} vf 0 mac {mac}".format(
+ interface=nic_details['interface'][i],
+ mac=nic_details['vf_macs'][i]))
+ time.sleep(3)
+ vf_pci[i] = self.get_vf_datas(
+ 'vf_pci',
+ nic_details['pci'][i],
+ nic_details['vf_macs'][i])
+ nic_details['vf_pci'][i] = vf_pci[i]
+ log.debug("NIC DETAILS : {0}".format(nic_details))
+ return nic_details
+
+ def setup_sriov_context(self, pcis, nic_details, host_driver):
+ blacklist = "/etc/modprobe.d/blacklist.conf"
+
+ # 1 : Blacklist the vf driver in /etc/modprobe.d/blacklist.conf
+ vfnic = "{0}vf".format(host_driver)
+ lines = self.read_from_file(blacklist)
+ if vfnic not in lines:
+ vfblacklist = "blacklist {vfnic}".format(vfnic=vfnic)
+ self.connection.execute(
+ "echo {vfblacklist} >> {blacklist}".format(
+ vfblacklist=vfblacklist,
+ blacklist=blacklist))
+
+ # 2 : modprobe host_driver with num_vfs
+ nic_details = self.configure_nics_for_sriov(host_driver, nic_details)
+
+ # 3: Setup vm_sriov.xml to launch VM
+ cfg_sriov = '/tmp/vm_sriov.xml'
+ mac = [0x00, 0x24, 0x81,
+ random.randint(0x00, 0x7f),
+ random.randint(0x00, 0xff),
+ random.randint(0x00, 0xff)]
+ mac_address = ':'.join(map(lambda x: "%02x" % x, mac))
+ vm_sriov_xml = VM_TEMPLATE.format(
+ random_uuid=uuid.uuid4(),
+ mac_addr=mac_address,
+ vm_image=self.sriov[0]["images"])
+ with open(cfg_sriov, 'w') as f:
+ f.write(vm_sriov_xml)
+
+ vf = nic_details['vf_pci']
+ for index in range(len(nic_details['vf_pci'])):
+ self.add_sriov_interface(
+ index,
+ vf[index]['vf_pci'],
+ mac_address,
+ "/tmp/vm_sriov.xml")
+ self.connection.execute(
+ "ifconfig {interface} up".format(
+ interface=nic_details['interface'][index]))
+
+ # 4: Create and start the VM
+ self.connection.put(cfg_sriov, cfg_sriov)
+ time.sleep(10)
+ err, out = self.check_output("virsh list --name | grep -i vm1")
+ try:
+ if out == "vm1":
+ log.info("VM is already present")
+ else:
+ # FIXME: launch through libvirt
+ log.info("virsh create ...")
+ err, out, _ = self.connection.execute(
+ "virsh create /tmp/vm_sriov.xml")
+ time.sleep(10)
+ log.error("err : {0}".format(err))
+ log.error("{0}".format(_))
+ log.debug("out : {0}".format(out))
+ except ValueError:
+ raise
+
+ # 5: Tunning for better performace
+ self.pin_vcpu(pcis)
+ self.connection.execute(
+ "echo 1 > /sys/module/kvm/parameters/"
+ "allow_unsafe_assigned_interrupts")
+ self.connection.execute(
+ "echo never > /sys/kernel/mm/transparent_hugepage/enabled")
+
+ def add_sriov_interface(self, index, vf_pci, vfmac, xml):
+ root = ET.parse(xml)
+ pattern = "0000:(\d+):(\d+).(\d+)"
+ m = re.search(pattern, vf_pci, re.MULTILINE)
+ device = root.find('devices')
+
+ interface = ET.SubElement(device, 'interface')
+ interface.set('managed', 'yes')
+ interface.set('type', 'hostdev')
+
+ mac = ET.SubElement(interface, 'mac')
+ mac.set('address', vfmac)
+ source = ET.SubElement(interface, 'source')
+
+ addr = ET.SubElement(source, "address")
+ addr.set('domain', "0x0")
+ addr.set('bus', "{0}".format(m.group(1)))
+ addr.set('function', "{0}".format(m.group(3)))
+ addr.set('slot', "{0}".format(m.group(2)))
+ addr.set('type', "pci")
+
+ vf_pci = ET.SubElement(interface, 'address')
+ vf_pci.set('type', 'pci')
+ vf_pci.set('domain', '0x0000')
+ vf_pci.set('bus', '0x00')
+ vf_pci.set('slot', '0x0{0}'.format(index + 7))
+ vf_pci.set('function', '0x00')
+
+ root.write(xml)
+
+ # This is roughly compatible with check_output function in subprocess
+ # module which is only available in python 2.7
+ def check_output(self, cmd, stderr=None):
+ # Run a command and capture its output
+ err, out, _ = self.connection.execute(cmd)
+ return err, out
+
+ def get_virtual_devices(self, pci):
+ pf_vfs = {}
+ err, extra_info = self.check_output(
+ "cat /sys/bus/pci/devices/{0}/virtfn0/uevent".format(pci))
+ pattern = "PCI_SLOT_NAME=(?P<name>[0-9:.\s.]+)"
+ m = re.search(pattern, extra_info, re.MULTILINE)
+
+ if m:
+ pf_vfs.update({pci: str(m.group(1).rstrip())})
+ log.info("pf_vfs : {0}".format(pf_vfs))
+ return pf_vfs
+
+ def get_vf_datas(self, key, value, vfmac):
+ vfret = {}
+ pattern = "0000:(\d+):(\d+).(\d+)"
+
+ vfret["mac"] = vfmac
+ vfs = self.get_virtual_devices(value)
+ log.info("vfs: {0}".format(vfs))
+ for k, v in vfs.items():
+ m = re.search(pattern, k, re.MULTILINE)
+ m1 = re.search(pattern, value, re.MULTILINE)
+ if m.group(1) == m1.group(1):
+ vfret["vf_pci"] = str(v)
+ break
+
+ return vfret
+
+ def read_from_file(self, filename):
+ data = ""
+ with open(filename, 'r') as the_file:
+ data = the_file.read()
+ return data
+
+ def write_to_file(self, filename, content):
+ with open(filename, 'w') as the_file:
+ the_file.write(content)
+
+ def pin_vcpu(self, pcis):
+ nodes = self.get_numa_nodes()
+ log.info("{0}".format(nodes))
+ num_nodes = len(nodes)
+ for i in range(0, 10):
+ self.connection.execute(
+ "virsh vcpupin vm1 {0} {1}".format(
+ i, nodes[str(num_nodes - 1)][i % len(nodes[str(num_nodes - 1)])]))
+
+ def get_numa_nodes(self):
+ nodes_sysfs = glob.iglob("/sys/devices/system/node/node*")
+ nodes = {}
+ for node_sysfs in nodes_sysfs:
+ num = os.path.basename(node_sysfs).replace("node", "")
+ with open(os.path.join(node_sysfs, "cpulist")) as cpulist_file:
+ cpulist = cpulist_file.read().strip()
+ nodes[num] = self.split_cpu_list(cpulist)
+ log.info("nodes: {0}".format(nodes))
+ return nodes
+
+ def split_cpu_list(self, cpu_list):
+ if cpu_list:
+ ranges = cpu_list.split(',')
+ bounds = ([int(b) for b in r.split('-')] for r in ranges)
+ range_objects =\
+ (range(bound[0], bound[1] + 1 if len(bound) == 2
+ else bound[0] + 1) for bound in bounds)
+
+ return sorted(itertools.chain.from_iterable(range_objects))
+ else:
+ return []
+
+ def destroy_vm(self):
+ host_driver = self.sriov[0]["phy_driver"]
+ err, out = self.check_output("virsh list --name | grep -i vm1")
+ log.info("{0}".format(out))
+ if err == 0:
+ self.connection.execute("virsh shutdown vm1")
+ self.connection.execute("virsh destroy vm1")
+ self.check_output("rmmod {0}".format(host_driver))[1].splitlines()
+ self.check_output("modprobe {0}".format(host_driver))[
+ 1].splitlines()
+ else:
+ log.error("error : {0}".format(err))
diff --git a/yardstick/benchmark/contexts/standalone.py b/yardstick/benchmark/contexts/standalone.py
index 78eaac7ee..2bc1f3755 100644
--- a/yardstick/benchmark/contexts/standalone.py
+++ b/yardstick/benchmark/contexts/standalone.py
@@ -18,9 +18,11 @@ import logging
import errno
import collections
import yaml
+import time
from yardstick.benchmark.contexts.base import Context
from yardstick.common.constants import YARDSTICK_ROOT_PATH
+from yardstick.common.utils import import_modules_from_package, itersubclasses
LOG = logging.getLogger(__name__)
@@ -36,8 +38,10 @@ class StandaloneContext(Context):
self.name = None
self.file_path = None
self.nodes = []
+ self.networks = {}
self.nfvi_node = []
- super(StandaloneContext, self).__init__()
+ self.nfvi_obj = None
+ super(self.__class__, self).__init__()
def read_config_file(self):
"""Read from config file"""
@@ -47,6 +51,14 @@ class StandaloneContext(Context):
cfg = yaml.load(stream)
return cfg
+ def get_nfvi_obj(self):
+ print("{0}".format(self.nfvi_node[0]['role']))
+ context_type = self.get_context_impl(self.nfvi_node[0]['role'])
+ nfvi_obj = context_type()
+ nfvi_obj.__init__()
+ nfvi_obj.parse_pod_and_get_data(self.file_path)
+ return nfvi_obj
+
def init(self, attrs):
"""initializes itself from the supplied arguments"""
@@ -63,23 +75,70 @@ class StandaloneContext(Context):
else:
raise
- self.nodes.extend(cfg["nodes"])
- self.nfvi_node.extend([node for node in cfg["nodes"]
- if node["role"] == "nfvi_node"])
+ self.vm_deploy = attrs.get("vm_deploy", True)
+ self.nodes.extend([node for node in cfg["nodes"]
+ if str(node["role"]) != "Sriov" and
+ str(node["role"]) != "Ovsdpdk"])
+ for node in cfg["nodes"]:
+ if str(node["role"]) == "Sriov":
+ self.nfvi_node.extend([node for node in cfg["nodes"]
+ if str(node["role"]) == "Sriov"])
+ if str(node["role"]) == "Ovsdpdk":
+ self.nfvi_node.extend([node for node in cfg["nodes"]
+ if str(node["role"]) == "Ovsdpdk"])
+ LOG.info("{0}".format(node["role"]))
+ else:
+ LOG.debug("Node role is other than SRIOV and OVS")
+ self.nfvi_obj = self.get_nfvi_obj()
+ # add optional static network definition
+ self.networks.update(cfg.get("networks", {}))
+ self.nfvi_obj = self.get_nfvi_obj()
LOG.debug("Nodes: %r", self.nodes)
LOG.debug("NFVi Node: %r", self.nfvi_node)
+ LOG.debug("Networks: %r", self.networks)
def deploy(self):
"""don't need to deploy"""
# Todo: NFVi deploy (sriov, vswitch, ovs etc) based on the config.
- pass
+ if not self.vm_deploy:
+ return
+
+ # Todo: NFVi deploy (sriov, vswitch, ovs etc) based on the config.
+ self.nfvi_obj.ssh_remote_machine()
+ if self.nfvi_obj.first_run is True:
+ self.nfvi_obj.install_req_libs()
+
+ nic_details = self.nfvi_obj.get_nic_details()
+ print("{0}".format(nic_details))
+
+ if self.nfvi_node[0]["role"] == "Sriov":
+ self.nfvi_obj.setup_sriov_context(
+ self.nfvi_obj.sriov[0]['phy_ports'],
+ nic_details,
+ self.nfvi_obj.sriov[0]['phy_driver'])
+ if self.nfvi_node[0]["role"] == "Ovsdpdk":
+ self.nfvi_obj.setup_ovs(self.nfvi_obj.ovs[0]["phy_ports"])
+ self.nfvi_obj.start_ovs_serverswitch()
+ time.sleep(5)
+ self.nfvi_obj.setup_ovs_bridge()
+ self.nfvi_obj.add_oflows()
+ self.nfvi_obj.setup_ovs_context(
+ self.nfvi_obj.ovs[0]['phy_ports'],
+ nic_details,
+ self.nfvi_obj.ovs[0]['phy_driver'])
+ pass
def undeploy(self):
"""don't need to undeploy"""
+ if not self.vm_deploy:
+ return
# Todo: NFVi undeploy (sriov, vswitch, ovs etc) based on the config.
- super(StandaloneContext, self).undeploy()
+ # self.nfvi_obj = self.get_nfvi_obj()
+ self.nfvi_obj.ssh_remote_machine()
+ self.nfvi_obj.destroy_vm()
+ pass
def _get_server(self, attr_name):
"""lookup server info by name from context
@@ -87,16 +146,12 @@ class StandaloneContext(Context):
Keyword arguments:
attr_name -- A name for a server listed in nodes config file
"""
-
if isinstance(attr_name, collections.Mapping):
return None
-
- if self.name.split("-")[0] != attr_name.split(".")[1]:
+ if self.name != attr_name.split(".")[1]:
return None
-
node_name = attr_name.split(".")[0]
matching_nodes = (n for n in self.nodes if n["name"] == node_name)
-
try:
# A clone is created in order to avoid affecting the
# original one.
@@ -111,6 +166,49 @@ class StandaloneContext(Context):
else:
raise ValueError("Duplicate nodes!!! Nodes: %s %s",
(matching_nodes, duplicate))
-
node["name"] = attr_name
return node
+
+ def _get_network(self, attr_name):
+ if not isinstance(attr_name, collections.Mapping):
+ network = self.networks.get(attr_name)
+
+ else:
+ # Don't generalize too much Just support vld_id
+ vld_id = attr_name.get('vld_id')
+ if vld_id is None:
+ return None
+ try:
+ network = next(n for n in self.networks.values() if
+ n.get("vld_id") == vld_id)
+ except StopIteration:
+ return None
+
+ if network is None:
+ return None
+
+ result = {
+ # name is required
+ "name": network["name"],
+ "vld_id": network.get("vld_id"),
+ "segmentation_id": network.get("segmentation_id"),
+ "network_type": network.get("network_type"),
+ "physical_network": network.get("physical_network"),
+ }
+ return result
+
+ def get_context_impl(self, nfvi_type):
+ """ Find the implementing class from vnf_model["vnf"]["name"] field
+
+ :param vnf_model: dictionary containing a parsed vnfd
+ :return: subclass of GenericVNF
+ """
+ import_modules_from_package(
+ "yardstick.benchmark.contexts")
+ expected_name = nfvi_type
+ impl = [c for c in itersubclasses(StandaloneContext)
+ if c.__name__ == expected_name]
+ try:
+ return next(iter(impl))
+ except StopIteration:
+ raise ValueError("No implementation for %s", expected_name)
diff --git a/yardstick/benchmark/core/plugin.py b/yardstick/benchmark/core/plugin.py
index 7f67a04b3..c8d0865d1 100644
--- a/yardstick/benchmark/core/plugin.py
+++ b/yardstick/benchmark/core/plugin.py
@@ -84,8 +84,8 @@ class Plugin(object):
if deployment_ip == "local":
self.client = ssh.SSH.from_node(deployment, overrides={
- # host can't be None, fail if no INSTALLER_IP
- 'ip': os.environ["INSTALLER_IP"],
+ # host can't be None, fail if no JUMP_HOST_IP
+ 'ip': os.environ["JUMP_HOST_IP"],
})
else:
self.client = ssh.SSH.from_node(deployment)
@@ -107,8 +107,8 @@ class Plugin(object):
if deployment_ip == "local":
self.client = ssh.SSH.from_node(deployment, overrides={
- # host can't be None, fail if no INSTALLER_IP
- 'ip': os.environ["INSTALLER_IP"],
+ # host can't be None, fail if no JUMP_HOST_IP
+ 'ip': os.environ["JUMP_HOST_IP"],
})
else:
self.client = ssh.SSH.from_node(deployment)
diff --git a/yardstick/benchmark/core/task.py b/yardstick/benchmark/core/task.py
index a985c86ef..b2da7a2ee 100644
--- a/yardstick/benchmark/core/task.py
+++ b/yardstick/benchmark/core/task.py
@@ -48,6 +48,12 @@ class Task(object): # pragma: no cover
self.contexts = []
self.outputs = {}
+ def _set_dispatchers(self, output_config):
+ dispatchers = output_config.get('DEFAULT', {}).get('dispatcher',
+ 'file')
+ out_types = [s.strip() for s in dispatchers.split(',')]
+ output_config['DEFAULT']['dispatcher'] = out_types
+
def start(self, args, **kwargs):
"""Start a benchmark scenario."""
@@ -58,12 +64,20 @@ class Task(object): # pragma: no cover
check_environment()
- output_config = utils.parse_ini_file(config_file)
+ try:
+ output_config = utils.parse_ini_file(config_file)
+ except Exception:
+ # all error will be ignore, the default value is {}
+ output_config = {}
+
self._init_output_config(output_config)
self._set_output_config(output_config, args.output_file)
LOG.debug('Output configuration is: %s', output_config)
- if output_config['DEFAULT'].get('dispatcher') == 'file':
+ self._set_dispatchers(output_config)
+
+ # update dispatcher list
+ if 'file' in output_config['DEFAULT']['dispatcher']:
result = {'status': 0, 'result': {}}
utils.write_json_to_file(args.output_file, result)
@@ -193,9 +207,10 @@ class Task(object): # pragma: no cover
return 'PASS'
def _do_output(self, output_config, result):
+ dispatchers = DispatcherBase.get(output_config)
- dispatcher = DispatcherBase.get(output_config)
- dispatcher.flush_result_data(result)
+ for dispatcher in dispatchers:
+ dispatcher.flush_result_data(result)
def _run(self, scenarios, run_in_parallel, output_file):
"""Deploys context and calls runners"""
@@ -255,11 +270,7 @@ class Task(object): # pragma: no cover
self.outputs.update(runner.get_output())
result.extend(runner.get_result())
-
- if status != 0:
- raise RuntimeError
print("Background task ended")
-
return result
def atexit_handler(self):
@@ -326,6 +337,8 @@ class Task(object): # pragma: no cover
if "nodes" in scenario_cfg:
context_cfg["nodes"] = parse_nodes_with_context(scenario_cfg)
+ context_cfg["networks"] = get_networks_from_nodes(
+ context_cfg["nodes"])
runner = base_runner.Runner.get(runner_cfg)
print("Starting runner of type '%s'" % runner_cfg["type"])
@@ -375,10 +388,10 @@ class TaskParser(object): # pragma: no cover
tc_fit_installer = constraint.get('installer', None)
LOG.info("cur_pod:%s, cur_installer:%s,tc_constraints:%s",
cur_pod, cur_installer, constraint)
- if cur_pod and tc_fit_pod and cur_pod not in tc_fit_pod:
+ if (cur_pod is None) or (tc_fit_pod and cur_pod not in tc_fit_pod):
return False
- if cur_installer and tc_fit_installer and \
- cur_installer not in tc_fit_installer:
+ if (cur_installer is None) or (tc_fit_installer and cur_installer
+ not in tc_fit_installer):
return False
return True
@@ -522,7 +535,7 @@ class TaskParser(object): # pragma: no cover
cfg_schema))
def _check_precondition(self, cfg):
- """Check if the envrionment meet the preconditon"""
+ """Check if the environment meet the precondition"""
if "precondition" in cfg:
precondition = cfg["precondition"]
@@ -577,14 +590,26 @@ def _is_background_scenario(scenario):
def parse_nodes_with_context(scenario_cfg):
- """paras the 'nodes' fields in scenario """
+ """parse the 'nodes' fields in scenario """
nodes = scenario_cfg["nodes"]
-
- nodes_cfg = {}
- for nodename in nodes:
- nodes_cfg[nodename] = Context.get_server(nodes[nodename])
-
- return nodes_cfg
+ return {nodename: Context.get_server(node) for nodename, node in nodes.items()}
+
+
+def get_networks_from_nodes(nodes):
+ """parse the 'nodes' fields in scenario """
+ networks = {}
+ for node in nodes.values():
+ if not node:
+ continue
+ for interface in node['interfaces'].values():
+ vld_id = interface.get('vld_id')
+ # mgmt network doesn't have vld_id
+ if not vld_id:
+ continue
+ network = Context.get_network({"vld_id": vld_id})
+ if network:
+ networks[network['name']] = network
+ return networks
def runner_join(runner):
diff --git a/yardstick/benchmark/core/testsuite.py b/yardstick/benchmark/core/testsuite.py
new file mode 100644
index 000000000..e3940a0ba
--- /dev/null
+++ b/yardstick/benchmark/core/testsuite.py
@@ -0,0 +1,42 @@
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+""" Handler for yardstick command 'testcase' """
+from __future__ import absolute_import
+from __future__ import print_function
+
+import os
+import logging
+
+from yardstick.common import constants as consts
+
+LOG = logging.getLogger(__name__)
+
+
+class Testsuite(object):
+ """Testcase commands.
+
+ Set of commands to discover and display test cases.
+ """
+
+ def list_all(self, args):
+ """List existing test cases"""
+
+ testsuite_list = self._get_testsuite_file_list()
+
+ return testsuite_list
+
+ def _get_testsuite_file_list(self):
+ try:
+ testsuite_files = sorted(os.listdir(consts.TESTSUITE_DIR))
+ except OSError:
+ LOG.exception('Failed to list dir:\n%s\n', consts.TESTSUITE_DIR)
+ raise
+
+ return testsuite_files
diff --git a/yardstick/benchmark/scenarios/availability/actionplayers.py b/yardstick/benchmark/scenarios/availability/actionplayers.py
index 420626413..c5e199ba6 100644
--- a/yardstick/benchmark/scenarios/availability/actionplayers.py
+++ b/yardstick/benchmark/scenarios/availability/actionplayers.py
@@ -29,8 +29,10 @@ class AttackerPlayer(ActionPlayer):
class OperationPlayer(ActionPlayer):
- def __init__(self, operation):
+ def __init__(self, operation, intermediate_variables):
self.underlyingOperation = operation
+ self.underlyingOperation.intermediate_variables \
+ = intermediate_variables
def action(self):
self.underlyingOperation.run()
diff --git a/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py b/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py
index 22de0b645..50d44c1ca 100644
--- a/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py
+++ b/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py
@@ -9,7 +9,6 @@
from __future__ import absolute_import
import logging
import subprocess
-import traceback
import yardstick.ssh as ssh
from yardstick.benchmark.scenarios.availability.attacker.baseattacker import \
@@ -26,9 +25,7 @@ def _execute_shell_command(command, stdin=None):
output = subprocess.check_output(command, stdin=stdin, shell=True)
except Exception:
exitcode = -1
- output = traceback.format_exc()
- LOG.error("exec command '%s' error:\n ", command)
- LOG.error(traceback.format_exc())
+ LOG.error("exec command '%s' error:\n ", command, exc_info=True)
return exitcode, output
diff --git a/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py b/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py
index f7ab23dcd..cb171eafa 100644
--- a/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py
+++ b/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py
@@ -47,11 +47,11 @@ class ProcessAttacker(BaseAttacker):
stdin=stdin_file)
if stdout:
- LOG.info("check the envrioment success!")
+ LOG.info("check the environment success!")
return int(stdout.strip('\n'))
else:
LOG.error(
- "the host envrioment is error, stdout:%s, stderr:%s",
+ "the host environment is error, stdout:%s, stderr:%s",
stdout, stderr)
return False
diff --git a/yardstick/benchmark/scenarios/availability/attacker_conf.yaml b/yardstick/benchmark/scenarios/availability/attacker_conf.yaml
index b8c34ad44..aa144ab50 100644
--- a/yardstick/benchmark/scenarios/availability/attacker_conf.yaml
+++ b/yardstick/benchmark/scenarios/availability/attacker_conf.yaml
@@ -16,6 +16,11 @@ kill-process:
inject_script: ha_tools/fault_process_kill.bash
recovery_script: ha_tools/start_service.bash
+kill-lxc-process:
+ check_script: ha_tools/check_lxc_process_python.bash
+ inject_script: ha_tools/fault_lxc_process_kill.bash
+ recovery_script: ha_tools/start_lxc_service.bash
+
bare-metal-down:
check_script: ha_tools/check_host_ping.bash
recovery_script: ha_tools/ipmi_power.bash
@@ -34,4 +39,4 @@ stress-cpu:
block-io:
inject_script: ha_tools/disk/block_io.bash
- recovery_script: ha_tools/disk/recovery_disk_io.bash \ No newline at end of file
+ recovery_script: ha_tools/disk/recovery_disk_io.bash
diff --git a/yardstick/benchmark/scenarios/availability/director.py b/yardstick/benchmark/scenarios/availability/director.py
index e0d05ebf5..c9187c34d 100644
--- a/yardstick/benchmark/scenarios/availability/director.py
+++ b/yardstick/benchmark/scenarios/availability/director.py
@@ -65,7 +65,9 @@ class Director(object):
self.resultCheckerMgr = baseresultchecker.ResultCheckerMgr()
self.resultCheckerMgr.init_ResultChecker(result_check_cfgs, nodes)
- def createActionPlayer(self, type, key):
+ def createActionPlayer(self, type, key, intermediate_variables=None):
+ if intermediate_variables is None:
+ intermediate_variables = {}
LOG.debug(
"the type of current action is %s, the key is %s", type, key)
if type == ActionType.ATTACKER:
@@ -76,7 +78,8 @@ class Director(object):
return actionplayers.ResultCheckerPlayer(
self.resultCheckerMgr[key])
if type == ActionType.OPERATION:
- return actionplayers.OperationPlayer(self.operationMgr[key])
+ return actionplayers.OperationPlayer(self.operationMgr[key],
+ intermediate_variables)
LOG.debug("something run when creatactionplayer")
def createActionRollbacker(self, type, key):
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/check_lxc_process_python.bash b/yardstick/benchmark/scenarios/availability/ha_tools/check_lxc_process_python.bash
new file mode 100755
index 000000000..6d2f4dd51
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/check_lxc_process_python.bash
@@ -0,0 +1,42 @@
+#!/bin/sh
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# check the status of a service
+
+set -e
+
+NOVA_API_PROCESS_1="nova-api-os-compute"
+NOVA_API_PROCESS_2="nova-api-metadata"
+NOVA_API_LXC_FILTER_1="nova_api_os_compute"
+NOVA_API_LXC_FILTER_2="nova_api_metadata"
+
+process_name=$1
+
+lxc_filter=$(echo "${process_name}" | sed 's/-/_/g')
+
+if [ "${lxc_filter}" = "glance_api" ]; then
+ lxc_filter="glance"
+fi
+
+if [ "${process_name}" = "nova-api" ]; then
+ container_1=$(lxc-ls -1 --filter="${NOVA_API_LXC_FILTER_1}")
+ container_2=$(lxc-ls -1 --filter="${NOVA_API_LXC_FILTER_2}")
+
+ echo $(($(lxc-attach -n "${container_1}" -- ps aux | grep -e "${NOVA_API_PROCESS_1}" | grep -v grep | grep -cv /bin/sh) + $(lxc-attach -n "${container_2}" -- ps aux | grep -e "${NOVA_API_PROCESS_2}" | grep -v grep | grep -cv /bin/sh)))
+else
+ container=$(lxc-ls -1 --filter="${lxc_filter}")
+
+ if [ "${process_name}" = "haproxy" ]; then
+ ps aux | grep -e "/usr/.*/${process_name}" | grep -v grep | grep -cv /bin/sh
+ else
+ lxc-attach -n "${container}" -- ps aux | grep -e "${process_name}" | grep -v grep | grep -cv /bin/sh
+ fi
+fi
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/fault_lxc_process_kill.bash b/yardstick/benchmark/scenarios/availability/ha_tools/fault_lxc_process_kill.bash
new file mode 100755
index 000000000..b0b86ab65
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/fault_lxc_process_kill.bash
@@ -0,0 +1,65 @@
+#!/bin/sh
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Stop process by process name
+
+set -e
+
+NOVA_API_PROCESS_1="nova-api-os-compute"
+NOVA_API_PROCESS_2="nova-api-metadata"
+NOVA_API_LXC_FILTER_1="nova_api_os_compute"
+NOVA_API_LXC_FILTER_2="nova_api_metadata"
+
+process_name=$1
+
+lxc_filter=$(echo "${process_name}" | sed 's/-/_/g')
+
+if [ "${lxc_filter}" = "glance_api" ]; then
+ lxc_filter="glance"
+fi
+
+if [ "${process_name}" = "nova-api" ]; then
+ container_1=$(lxc-ls -1 --filter="${NOVA_API_LXC_FILTER_1}")
+ container_2=$(lxc-ls -1 --filter="${NOVA_API_LXC_FILTER_2}")
+
+ pids_1=$(lxc-attach -n "${container_1}" -- pgrep -f "/openstack/.*/${NOVA_API_PROCESS_1}")
+ for pid in ${pids_1};
+ do
+ lxc-attach -n "${container_1}" -- kill -9 "${pid}"
+ done
+
+ pids_2=$(lxc-attach -n "${container_2}" -- pgrep -f "/openstack/.*/${NOVA_API_PROCESS_2}")
+ for pid in ${pids_2};
+ do
+ lxc-attach -n "${container_2}" -- kill -9 "${pid}"
+ done
+else
+ container=$(lxc-ls -1 --filter="${lxc_filter}")
+
+ if [ "${process_name}" = "haproxy" ]; then
+ for pid in $(pgrep -cf "/usr/.*/${process_name}");
+ do
+ kill -9 "${pid}"
+ done
+ elif [ "${process_name}" = "keystone" ]; then
+ pids=$(lxc-attach -n "${container}" -- ps aux | grep "keystone" | grep -iv heartbeat | grep -iv monitor | grep -v grep | grep -v /bin/sh | awk '{print $2}')
+ for pid in ${pids};
+ do
+ lxc-attach -n "${container}" -- kill -9 "${pid}"
+ done
+ else
+ pids=$(lxc-attach -n "${container}" -- pgrep -f "/openstack/.*/${process_name}")
+ for pid in ${pids};
+ do
+ lxc-attach -n "${container}" -- kill -9 "${pid}"
+ done
+ fi
+fi
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/nova/create_flavor.bash b/yardstick/benchmark/scenarios/availability/ha_tools/nova/create_flavor.bash
index aee516ea9..7408409a9 100644
--- a/yardstick/benchmark/scenarios/availability/ha_tools/nova/create_flavor.bash
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/nova/create_flavor.bash
@@ -20,4 +20,4 @@ else
SECURE=""
fi
-openstack "${SECURE}" flavor create $1 --id $2 --ram $3 --disk $4 --vcpus $5
+openstack ${SECURE} flavor create $1 --id $2 --ram $3 --disk $4 --vcpus $5
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/nova/delete_flavor.bash b/yardstick/benchmark/scenarios/availability/ha_tools/nova/delete_flavor.bash
index d39926fc5..7240476f7 100644
--- a/yardstick/benchmark/scenarios/availability/ha_tools/nova/delete_flavor.bash
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/nova/delete_flavor.bash
@@ -20,4 +20,4 @@ else
SECURE=""
fi
-openstack "${SECURE}" flavor delete $1
+openstack ${SECURE} flavor delete $1
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/nova/show_flavors.bash b/yardstick/benchmark/scenarios/availability/ha_tools/nova/show_flavors.bash
index bd61ba9bb..e679fdb9e 100644
--- a/yardstick/benchmark/scenarios/availability/ha_tools/nova/show_flavors.bash
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/nova/show_flavors.bash
@@ -19,4 +19,4 @@ else
SECURE=""
fi
-openstack "${SECURE}" flavor list
+openstack ${SECURE} flavor list
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/start_lxc_service.bash b/yardstick/benchmark/scenarios/availability/ha_tools/start_lxc_service.bash
new file mode 100755
index 000000000..36a673977
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/start_lxc_service.bash
@@ -0,0 +1,70 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Start a service and check the service is started
+
+set -e
+
+NOVA_API_SERVICE_1="nova-api-os-compute"
+NOVA_API_SERVICE_2="nova-api-metadata"
+NOVA_API_LXC_FILTER_1="nova_api_os_compute"
+NOVA_API_LXC_FILTER_2="nova_api_metadata"
+
+service_name=$1
+
+if [ "${service_name}" = "haproxy" ]; then
+ if which systemctl 2>/dev/null; then
+ systemctl start $service_name
+ else
+ service $service_name start
+ fi
+else
+ lxc_filter=${service_name//-/_}
+
+ if [ "${lxc_filter}" = "glance_api" ]; then
+ lxc_filter="glance"
+ fi
+
+ if [ "${service_name}" = "nova-api" ]; then
+ container_1=$(lxc-ls -1 --filter="${NOVA_API_LXC_FILTER_1}")
+ container_2=$(lxc-ls -1 --filter="${NOVA_API_LXC_FILTER_2}")
+
+ if lxc-attach -n "${container_1}" -- which systemctl 2>/dev/null; then
+ lxc-attach -n "${container_1}" -- systemctl start "${NOVA_API_SERVICE_1}"
+ else
+ lxc-attach -n "${container_1}" -- service "${NOVA_API_SERVICE_1}" start
+ fi
+
+ if lxc-attach -n "${container_2}" -- which systemctl 2>/dev/null; then
+ lxc-attach -n "${container_2}" -- systemctl start "${NOVA_API_SERVICE_2}"
+ else
+ lxc-attach -n "${container_2}" -- service "${NOVA_API_SERVICE_2}" start
+ fi
+ else
+ container=$(lxc-ls -1 --filter="${lxc_filter}")
+
+ Distributor=$(lxc-attach -n "${container}" -- lsb_release -a | grep "Distributor ID" | awk '{print $3}')
+
+ if [ "${Distributor}" != "Ubuntu" -a "${service_name}" != "keystone" -a "${service_name}" != "neutron-server" ]; then
+ service_name="openstack-"${service_name}
+ elif [ "${Distributor}" = "Ubuntu" -a "${service_name}" = "keystone" ]; then
+ service_name="apache2"
+ elif [ "${service_name}" = "keystone" ]; then
+ service_name="httpd"
+ fi
+
+ if lxc-attach -n "${container}" -- which systemctl 2>/dev/null; then
+ lxc-attach -n "${container}" -- systemctl start "${service_name}"
+ else
+ lxc-attach -n "${container}" -- service "${service_name}" start
+ fi
+ fi
+fi
diff --git a/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py b/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py
index a0777f94e..a9488cc30 100644
--- a/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py
+++ b/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py
@@ -11,7 +11,6 @@ from __future__ import absolute_import
import os
import logging
import subprocess
-import traceback
import yardstick.ssh as ssh
from yardstick.benchmark.scenarios.availability.monitor import basemonitor
@@ -27,9 +26,7 @@ def _execute_shell_command(command):
output = subprocess.check_output(command, shell=True)
except Exception:
exitcode = -1
- output = traceback.format_exc()
- LOG.error("exec command '%s' error:\n ", command)
- LOG.error(traceback.format_exc())
+ LOG.error("exec command '%s' error:\n ", command, exc_info=True)
return exitcode, output
diff --git a/yardstick/benchmark/scenarios/availability/monitor_conf.yaml b/yardstick/benchmark/scenarios/availability/monitor_conf.yaml
index 511449221..a08347d2d 100644
--- a/yardstick/benchmark/scenarios/availability/monitor_conf.yaml
+++ b/yardstick/benchmark/scenarios/availability/monitor_conf.yaml
@@ -13,6 +13,8 @@ schema: "yardstick:task:0.1"
process-status:
monitor_script: ha_tools/check_process_python.bash
+lxc_process-status:
+ monitor_script: ha_tools/check_lxc_process_python.bash
nova-image-list:
monitor_script: ha_tools/nova_image_list.bash
service-status:
diff --git a/yardstick/benchmark/scenarios/availability/operation/baseoperation.py b/yardstick/benchmark/scenarios/availability/operation/baseoperation.py
index be286b8fd..88ca9e2bb 100644
--- a/yardstick/benchmark/scenarios/availability/operation/baseoperation.py
+++ b/yardstick/benchmark/scenarios/availability/operation/baseoperation.py
@@ -58,6 +58,7 @@ class BaseOperation(object):
self.key = ''
self._config = config
self._context = context
+ self.intermediate_variables = {}
@staticmethod
def get_operation_cls(type):
diff --git a/yardstick/benchmark/scenarios/availability/operation/operation_general.py b/yardstick/benchmark/scenarios/availability/operation/operation_general.py
index 8fd387e47..af1ae7469 100644
--- a/yardstick/benchmark/scenarios/availability/operation/operation_general.py
+++ b/yardstick/benchmark/scenarios/availability/operation/operation_general.py
@@ -15,7 +15,8 @@ from yardstick.benchmark.scenarios.availability.operation.baseoperation \
import yardstick.ssh as ssh
from yardstick.benchmark.scenarios.availability.util \
- import buildshellparams, execute_shell_command
+ import buildshellparams, execute_shell_command, \
+ read_stdout_item, build_shell_command
LOG = logging.getLogger(__name__)
@@ -39,11 +40,7 @@ class GeneralOperaion(BaseOperation):
self.operation_key = self._config['operation_key']
if "action_parameter" in self._config:
- actionParameter = self._config['action_parameter']
- str = buildshellparams(
- actionParameter, True if self.connection else False)
- l = list(item for item in actionParameter.values())
- self.action_param = str.format(*l)
+ self.actionParameter_config = self._config['action_parameter']
if "rollback_parameter" in self._config:
rollbackParameter = self._config['rollback_parameter']
@@ -61,6 +58,11 @@ class GeneralOperaion(BaseOperation):
def run(self):
if "action_parameter" in self._config:
+ self.action_param = \
+ build_shell_command(
+ self.actionParameter_config,
+ True if self.connection else False,
+ self.intermediate_variables)
if self.connection:
with open(self.action_script, "r") as stdin_file:
exit_status, stdout, stderr = self.connection.execute(
@@ -83,6 +85,12 @@ class GeneralOperaion(BaseOperation):
if exit_status == 0:
LOG.debug("success,the operation's output is: %s", stdout)
+ if "return_parameter" in self._config:
+ returnParameter = self._config['return_parameter']
+ for key, item in returnParameter.items():
+ value = read_stdout_item(stdout, key)
+ LOG.debug("intermediate variables %s: %s", item, value)
+ self.intermediate_variables[item] = value
else:
LOG.error(
"the operation's error, stdout:%s, stderr:%s",
diff --git a/yardstick/benchmark/scenarios/availability/scenario_general.py b/yardstick/benchmark/scenarios/availability/scenario_general.py
index 28bec8aff..17ad79f29 100644
--- a/yardstick/benchmark/scenarios/availability/scenario_general.py
+++ b/yardstick/benchmark/scenarios/availability/scenario_general.py
@@ -25,6 +25,7 @@ class ScenarioGeneral(base.Scenario):
"scenario_cfg:%s context_cfg:%s", scenario_cfg, context_cfg)
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
+ self.intermediate_variables = {}
def setup(self):
self.director = Director(self.scenario_cfg, self.context_cfg)
@@ -38,7 +39,8 @@ class ScenarioGeneral(base.Scenario):
orderedSteps.index(step) + 1)
try:
actionPlayer = self.director.createActionPlayer(
- step['actionType'], step['actionKey'])
+ step['actionType'], step['actionKey'],
+ self.intermediate_variables)
actionPlayer.action()
actionRollbacker = self.director.createActionRollbacker(
step['actionType'], step['actionKey'])
diff --git a/yardstick/benchmark/scenarios/availability/util.py b/yardstick/benchmark/scenarios/availability/util.py
index eadbfa53b..6fef622bd 100644
--- a/yardstick/benchmark/scenarios/availability/util.py
+++ b/yardstick/benchmark/scenarios/availability/util.py
@@ -14,13 +14,8 @@ LOG = logging.getLogger(__name__)
def buildshellparams(param, remote=True):
- i = 0
- values = []
result = '/bin/bash -s' if remote else ''
- for key in param.keys():
- values.append(param[key])
- result += " {%d}" % i
- i = i + 1
+ result += "".join(" {%d}" % i for i in range(len(param)))
return result
@@ -36,5 +31,29 @@ def execute_shell_command(command):
output = traceback.format_exc()
LOG.error("exec command '%s' error:\n ", command)
LOG.error(traceback.format_exc())
-
return exitcode, output
+
+PREFIX = '$'
+
+
+def build_shell_command(param_config, remote=True, intermediate_variables=None):
+ param_template = '/bin/bash -s' if remote else ''
+ if intermediate_variables:
+ for key, val in param_config.items():
+ if str(val).startswith(PREFIX):
+ try:
+ param_config[key] = intermediate_variables[val]
+ except KeyError:
+ pass
+ result = param_template + "".join(" {}".format(v) for v in param_config.values())
+ LOG.debug("THE RESULT OF build_shell_command IS: %s", result)
+ return result
+
+
+def read_stdout_item(stdout, key):
+ for item in stdout.splitlines():
+ if key in item:
+ attributes = item.split("|")
+ if attributes[1].lstrip().startswith(key):
+ return attributes[2].strip()
+ return None
diff --git a/yardstick/benchmark/scenarios/base.py b/yardstick/benchmark/scenarios/base.py
index 5d3c36c38..3cb138dd8 100644
--- a/yardstick/benchmark/scenarios/base.py
+++ b/yardstick/benchmark/scenarios/base.py
@@ -63,3 +63,15 @@ class Scenario(object):
return scenario.__module__ + "." + scenario.__name__
raise RuntimeError("No such scenario type %s" % scenario_type)
+
+ def _push_to_outputs(self, keys, values):
+ return dict(zip(keys, values))
+
+ def _change_obj_to_dict(self, obj):
+ dic = {}
+ for k, v in vars(obj).items():
+ try:
+ vars(v)
+ except TypeError:
+ dic[k] = v
+ return dic
diff --git a/yardstick/benchmark/scenarios/compute/lmbench.py b/yardstick/benchmark/scenarios/compute/lmbench.py
index c99fc988d..801f7fa80 100644
--- a/yardstick/benchmark/scenarios/compute/lmbench.py
+++ b/yardstick/benchmark/scenarios/compute/lmbench.py
@@ -15,6 +15,7 @@ import pkg_resources
from oslo_serialization import jsonutils
import yardstick.ssh as ssh
+from yardstick.common import utils
from yardstick.benchmark.scenarios import base
LOG = logging.getLogger(__name__)
@@ -127,30 +128,32 @@ class Lmbench(base.Scenario):
if status:
raise RuntimeError(stderr)
+ lmbench_result = {}
if test_type == 'latency':
- result.update(
+ lmbench_result.update(
{"latencies": jsonutils.loads(stdout)})
else:
- result.update(jsonutils.loads(stdout))
+ lmbench_result.update(jsonutils.loads(stdout))
+ result.update(utils.flatten_dict_key(lmbench_result))
if "sla" in self.scenario_cfg:
sla_error = ""
if test_type == 'latency':
sla_max_latency = int(self.scenario_cfg['sla']['max_latency'])
- for t_latency in result["latencies"]:
+ for t_latency in lmbench_result["latencies"]:
latency = t_latency['latency']
if latency > sla_max_latency:
sla_error += "latency %f > sla:max_latency(%f); " \
% (latency, sla_max_latency)
elif test_type == 'bandwidth':
sla_min_bw = int(self.scenario_cfg['sla']['min_bandwidth'])
- bw = result["bandwidth(MBps)"]
+ bw = lmbench_result["bandwidth(MBps)"]
if bw < sla_min_bw:
sla_error += "bandwidth %f < " \
"sla:min_bandwidth(%f)" % (bw, sla_min_bw)
elif test_type == 'latency_for_cache':
sla_latency = float(self.scenario_cfg['sla']['max_latency'])
- cache_latency = float(result['L1cache'])
+ cache_latency = float(lmbench_result['L1cache'])
if sla_latency < cache_latency:
sla_error += "latency %f > sla:max_latency(%f); " \
% (cache_latency, sla_latency)
diff --git a/yardstick/benchmark/scenarios/compute/ramspeed.py b/yardstick/benchmark/scenarios/compute/ramspeed.py
index 850ee5934..ca64935dd 100644
--- a/yardstick/benchmark/scenarios/compute/ramspeed.py
+++ b/yardstick/benchmark/scenarios/compute/ramspeed.py
@@ -14,6 +14,7 @@ import pkg_resources
from oslo_serialization import jsonutils
import yardstick.ssh as ssh
+from yardstick.common import utils
from yardstick.benchmark.scenarios import base
LOG = logging.getLogger(__name__)
@@ -128,12 +129,13 @@ class Ramspeed(base.Scenario):
if status:
raise RuntimeError(stderr)
- result.update(jsonutils.loads(stdout))
+ ramspeed_result = jsonutils.loads(stdout)
+ result.update(utils.flatten_dict_key(ramspeed_result))
if "sla" in self.scenario_cfg:
sla_error = ""
sla_min_bw = int(self.scenario_cfg['sla']['min_bandwidth'])
- for i in result["Result"]:
+ for i in ramspeed_result["Result"]:
bw = i["Bandwidth(MBps)"]
if bw < sla_min_bw:
sla_error += "Bandwidth %f < " \
diff --git a/yardstick/benchmark/scenarios/lib/__init__.py b/yardstick/benchmark/scenarios/lib/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/yardstick/benchmark/scenarios/lib/__init__.py
diff --git a/yardstick/benchmark/scenarios/lib/add_memory_load.py b/yardstick/benchmark/scenarios/lib/add_memory_load.py
new file mode 100644
index 000000000..26cf140d1
--- /dev/null
+++ b/yardstick/benchmark/scenarios/lib/add_memory_load.py
@@ -0,0 +1,57 @@
+# ############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+# ############################################################################
+
+from __future__ import print_function
+from __future__ import absolute_import
+
+import logging
+
+import yardstick.ssh as ssh
+from yardstick.benchmark.scenarios import base
+
+LOG = logging.getLogger(__name__)
+
+
+class AddMemoryLoad(base.Scenario):
+ """Add memory load in server
+ """
+
+ __scenario_type__ = "AddMemoryLoad"
+
+ def __init__(self, scenario_cfg, context_cfg):
+ self.scenario_cfg = scenario_cfg
+ self.context_cfg = context_cfg
+
+ self.options = scenario_cfg.get('options', {})
+
+ self.client = ssh.SSH.from_node(self.context_cfg['host'])
+ self.client.wait(timeout=600)
+
+ def run(self, result):
+ self._add_load()
+
+ def _add_load(self):
+ try:
+ memory_load = self.options['memory_load']
+ except KeyError:
+ LOG.error('memory_load parameter must be provided')
+ else:
+ if float(memory_load) == 0:
+ return
+ cmd = 'free | awk "/Mem/ {print $2}"'
+ code, stdout, stderr = self.client.execute(cmd)
+ total = int(stdout.split()[1])
+ used = int(stdout.split()[2])
+ remain_memory = total * float(memory_load) - used
+ if remain_memory > 0:
+ count = remain_memory / 1024 / 128
+ LOG.info('Add %s vm load', count)
+ if count != 0:
+ cmd = 'stress -t 10 -m {} --vm-keep'.format(count)
+ self.client.execute(cmd)
diff --git a/yardstick/benchmark/scenarios/lib/check_numa_info.py b/yardstick/benchmark/scenarios/lib/check_numa_info.py
new file mode 100644
index 000000000..59a47547e
--- /dev/null
+++ b/yardstick/benchmark/scenarios/lib/check_numa_info.py
@@ -0,0 +1,61 @@
+# ############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+# ############################################################################
+
+from __future__ import print_function
+from __future__ import absolute_import
+
+import logging
+
+from yardstick.benchmark.scenarios import base
+
+LOG = logging.getLogger(__name__)
+
+
+class CheckNumaInfo(base.Scenario):
+ """
+ Execute a live migration for two hosts
+
+ """
+
+ __scenario_type__ = "CheckNumaInfo"
+
+ def __init__(self, scenario_cfg, context_cfg):
+ self.scenario_cfg = scenario_cfg
+ self.context_cfg = context_cfg
+
+ self.options = self.scenario_cfg.get('options', {})
+
+ self.cpu_set = self.options.get('cpu_set', '1,2,3,4,5,6')
+
+ def run(self, result):
+ info1 = self.options.get('info1')
+ info2 = self.options.get('info2')
+ LOG.debug('Origin numa info: %s', info1)
+ LOG.debug('Current numa info: %s', info2)
+ status = self._check_vm2_status(info1, info2)
+
+ keys = self.scenario_cfg.get('output', '').split()
+ values = [status]
+ return self._push_to_outputs(keys, values)
+
+ def _check_vm2_status(self, info1, info2):
+ if len(info1['pinning']) != 1 or len(info2['pinning']) != 1:
+ return False
+
+ for i in info1['vcpupin']:
+ for j in i['cpuset'].split(','):
+ if j not in self.cpu_set.split(','):
+ return False
+
+ for i in info2['vcpupin']:
+ for j in i['cpuset'].split(','):
+ if j not in self.cpu_set.split(','):
+ return False
+
+ return True
diff --git a/yardstick/benchmark/scenarios/lib/check_value.py b/yardstick/benchmark/scenarios/lib/check_value.py
new file mode 100644
index 000000000..759076068
--- /dev/null
+++ b/yardstick/benchmark/scenarios/lib/check_value.py
@@ -0,0 +1,58 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+from __future__ import print_function
+from __future__ import absolute_import
+
+import logging
+
+from yardstick.benchmark.scenarios import base
+
+LOG = logging.getLogger(__name__)
+
+
+class CheckValue(base.Scenario):
+ """Check values between value1 and value2
+
+ options:
+ operator: equal(eq) and not equal(ne)
+ value1:
+ value2:
+ output: check_result
+ """
+
+ __scenario_type__ = "CheckValue"
+
+ def __init__(self, scenario_cfg, context_cfg):
+ self.scenario_cfg = scenario_cfg
+ self.context_cfg = context_cfg
+ self.options = self.scenario_cfg['options']
+
+ def run(self, result):
+ """execute the test"""
+
+ op = self.options.get("operator")
+ LOG.debug("options=%s", self.options)
+ value1 = str(self.options.get("value1"))
+ value2 = str(self.options.get("value2"))
+ check_result = "PASS"
+ if op == "eq" and value1 != value2:
+ LOG.info("value1=%s, value2=%s, error: should equal!!!", value1,
+ value2)
+ check_result = "FAIL"
+ assert value1 == value2, "Error %s!=%s" % (value1, value2)
+ elif op == "ne" and value1 == value2:
+ LOG.info("value1=%s, value2=%s, error: should not equal!!!",
+ value1, value2)
+ check_result = "FAIL"
+ assert value1 != value2, "Error %s==%s" % (value1, value2)
+ LOG.info("Check result is %s", check_result)
+ keys = self.scenario_cfg.get('output', '').split()
+ values = [check_result]
+ return self._push_to_outputs(keys, values)
diff --git a/yardstick/benchmark/scenarios/lib/get_migrate_target_host.py b/yardstick/benchmark/scenarios/lib/get_migrate_target_host.py
new file mode 100644
index 000000000..c19d96d68
--- /dev/null
+++ b/yardstick/benchmark/scenarios/lib/get_migrate_target_host.py
@@ -0,0 +1,56 @@
+
+# ############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+# ############################################################################
+
+from __future__ import print_function
+from __future__ import absolute_import
+
+import logging
+
+from yardstick.common import openstack_utils
+from yardstick.common.utils import change_obj_to_dict
+from yardstick.benchmark.scenarios import base
+
+LOG = logging.getLogger(__name__)
+
+
+class GetMigrateTargetHost(base.Scenario):
+ """Get a migrate target host according server
+ """
+
+ __scenario_type__ = "GetMigrateTargetHost"
+
+ def __init__(self, scenario_cfg, context_cfg):
+ self.scenario_cfg = scenario_cfg
+ self.context_cfg = context_cfg
+
+ self.options = self.scenario_cfg.get('options', {})
+ default_instance_id = self.options.get('server', {}).get('id', '')
+ self.instance_id = self.options.get('server_id', default_instance_id)
+
+ self.nova_client = openstack_utils.get_nova_client()
+
+ def run(self, result):
+ current_host = self._get_current_host_name(self.instance_id)
+ target_host = self._get_migrate_host(current_host)
+
+ keys = self.scenario_cfg.get('output', '').split()
+ values = [target_host]
+ return self._push_to_outputs(keys, values)
+
+ def _get_current_host_name(self, server_id):
+
+ return change_obj_to_dict(self.nova_client.servers.get(server_id))['OS-EXT-SRV-ATTR:host']
+
+ def _get_migrate_host(self, current_host):
+ hosts = self.nova_client.hosts.list_all()
+ compute_hosts = [a.host for a in hosts if a.service == 'compute']
+ for host in compute_hosts:
+ if host.strip() != current_host.strip():
+ return host
diff --git a/yardstick/benchmark/scenarios/lib/get_numa_info.py b/yardstick/benchmark/scenarios/lib/get_numa_info.py
new file mode 100644
index 000000000..4e4a44d95
--- /dev/null
+++ b/yardstick/benchmark/scenarios/lib/get_numa_info.py
@@ -0,0 +1,79 @@
+# ############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+# ############################################################################
+
+from __future__ import print_function
+from __future__ import absolute_import
+
+import logging
+import os
+
+import yaml
+from xml.etree import ElementTree as ET
+
+from yardstick import ssh
+from yardstick.benchmark.scenarios import base
+from yardstick.common import constants as consts
+from yardstick.common.utils import change_obj_to_dict
+from yardstick.common.openstack_utils import get_nova_client
+from yardstick.common.task_template import TaskTemplate
+
+LOG = logging.getLogger(__name__)
+
+
+class GetNumaInfo(base.Scenario):
+ """
+ Execute a live migration for two hosts
+
+ """
+
+ __scenario_type__ = "GetNumaInfo"
+
+ def __init__(self, scenario_cfg, context_cfg):
+ self.scenario_cfg = scenario_cfg
+ self.context_cfg = context_cfg
+ self.options = self.scenario_cfg.get('options', {})
+
+ server = self.options['server']
+ self.server_id = server['id']
+ self.host = self._get_current_host_name(self.server_id)
+
+ node_file = os.path.join(consts.YARDSTICK_ROOT_PATH,
+ self.options.get('file'))
+
+ with open(node_file) as f:
+ nodes = yaml.safe_load(TaskTemplate.render(f.read()))
+ self.nodes = {a['host_name']: a for a in nodes['nodes']}
+
+ def run(self, result):
+ numa_info = self._check_numa_node(self.server_id, self.host)
+
+ keys = self.scenario_cfg.get('output', '').split()
+ values = [numa_info]
+ return self._push_to_outputs(keys, values)
+
+ def _get_current_host_name(self, server_id):
+
+ return change_obj_to_dict(get_nova_client().servers.get(server_id))['OS-EXT-SRV-ATTR:host']
+
+ def _get_host_client(self, node_name):
+ self.host_client = ssh.SSH.from_node(self.nodes.get(node_name))
+ self.host_client.wait(timeout=600)
+
+ def _check_numa_node(self, server_id, host):
+ self._get_host_client(host)
+
+ cmd = "sudo virsh dumpxml %s" % server_id
+ LOG.debug("Executing command: %s", cmd)
+ status, stdout, stderr = self.host_client.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+ root = ET.fromstring(stdout)
+ vcpupin = [a.attrib for a in root.iter('vcpupin')]
+ pinning = [a.attrib for a in root.iter('memnode')]
+ return {"pinning": pinning, 'vcpupin': vcpupin}
diff --git a/yardstick/benchmark/scenarios/lib/get_server.py b/yardstick/benchmark/scenarios/lib/get_server.py
new file mode 100644
index 000000000..fcf47c80d
--- /dev/null
+++ b/yardstick/benchmark/scenarios/lib/get_server.py
@@ -0,0 +1,83 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+from __future__ import print_function
+from __future__ import absolute_import
+
+import logging
+
+from yardstick.benchmark.scenarios import base
+import yardstick.common.openstack_utils as op_utils
+
+LOG = logging.getLogger(__name__)
+
+
+class GetServer(base.Scenario):
+ """Get a server instance
+
+ Parameters
+ server_id - ID of the server
+ type: string
+ unit: N/A
+ default: null
+ server_name - name of the server
+ type: string
+ unit: N/A
+ default: null
+
+ Either server_id or server_name is required.
+
+ Outputs
+ rc - response code of getting server instance
+ 0 for success
+ 1 for failure
+ type: int
+ unit: N/A
+ server - instance of the server
+ type: dict
+ unit: N/A
+ """
+
+ __scenario_type__ = "GetServer"
+
+ def __init__(self, scenario_cfg, context_cfg):
+ self.scenario_cfg = scenario_cfg
+ self.context_cfg = context_cfg
+ self.options = self.scenario_cfg.get('options', {})
+
+ self.server_id = self.options.get("server_id")
+ if self.server_id:
+ LOG.debug('Server id is %s', self.server_id)
+
+ default_name = self.scenario_cfg.get('host',
+ self.scenario_cfg.get('target'))
+ self.server_name = self.options.get('server_name', default_name)
+ if self.server_name:
+ LOG.debug('Server name is %s', self.server_name)
+
+ self.nova_client = op_utils.get_nova_client()
+
+ def run(self, result):
+ """execute the test"""
+
+ if self.server_id:
+ server = self.nova_client.servers.get(self.server_id)
+ else:
+ server = op_utils.get_server_by_name(self.server_name)
+
+ keys = self.scenario_cfg.get('output', '').split()
+
+ if server:
+ LOG.info("Get server successful!")
+ values = [0, self._change_obj_to_dict(server)]
+ else:
+ LOG.info("Get server failed!")
+ values = [1]
+
+ return self._push_to_outputs(keys, values)
diff --git a/yardstick/benchmark/scenarios/lib/get_server_ip.py b/yardstick/benchmark/scenarios/lib/get_server_ip.py
new file mode 100644
index 000000000..1eeeb7fca
--- /dev/null
+++ b/yardstick/benchmark/scenarios/lib/get_server_ip.py
@@ -0,0 +1,38 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+from __future__ import print_function
+from __future__ import absolute_import
+
+import logging
+
+from yardstick.benchmark.scenarios import base
+
+LOG = logging.getLogger(__name__)
+
+
+class GetServerIp(base.Scenario):
+ """Get a server by name"""
+
+ __scenario_type__ = "GetServerIp"
+
+ def __init__(self, scenario_cfg, context_cfg):
+ self.scenario_cfg = scenario_cfg
+ self.context_cfg = context_cfg
+ self.options = self.scenario_cfg.get('options', {})
+ self.ip_type = self.options.get('ip_type', "floating")
+
+ def run(self, result):
+ server = self.options.get('server', {})
+ ip = next(n['addr'] for k, v in server['addresses'].items()
+ for n in v if n['OS-EXT-IPS:type'] == self.ip_type)
+
+ keys = self.scenario_cfg.get('output', '').split()
+ values = [ip]
+ return self._push_to_outputs(keys, values)
diff --git a/yardstick/benchmark/scenarios/lib/migrate.py b/yardstick/benchmark/scenarios/lib/migrate.py
new file mode 100644
index 000000000..116bae69e
--- /dev/null
+++ b/yardstick/benchmark/scenarios/lib/migrate.py
@@ -0,0 +1,155 @@
+# ############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+# ############################################################################
+
+from __future__ import print_function
+from __future__ import absolute_import
+
+import logging
+import subprocess
+import threading
+import time
+
+from datetime import datetime
+import ping
+
+from yardstick.common import openstack_utils
+from yardstick.common.utils import change_obj_to_dict
+from yardstick.benchmark.scenarios import base
+
+LOG = logging.getLogger(__name__)
+
+TIMEOUT = 0.05
+PACKAGE_SIZE = 64
+
+
+class Migrate(base.Scenario): # pragma: no cover
+ """
+ Execute a live migration for two hosts
+
+ """
+
+ __scenario_type__ = "Migrate"
+
+ def __init__(self, scenario_cfg, context_cfg):
+ self.scenario_cfg = scenario_cfg
+ self.context_cfg = context_cfg
+ self.options = self.scenario_cfg.get('options', {})
+
+ self.nova_client = openstack_utils.get_nova_client()
+
+ def run(self, result):
+ default_instance_id = self.options.get('server', {}).get('id', '')
+ instance_id = self.options.get('server_id', default_instance_id)
+ LOG.info('Instance id is %s', instance_id)
+
+ target_host = self.options.get('host')
+ LOG.info('Target host is %s', target_host)
+
+ instance_ip = self.options.get('server_ip')
+ if instance_ip:
+ LOG.info('Instance ip is %s', instance_ip)
+
+ self._ping_until_connected(instance_ip)
+ LOG.info('Instance is connected')
+
+ LOG.debug('Start to ping instance')
+ ping_thread = self._do_ping_task(instance_ip)
+
+ keys = self.scenario_cfg.get('output', '').split()
+ try:
+ LOG.info('Start to migrate')
+ self._do_migrate(instance_id, target_host)
+ except Exception as e:
+ return self._push_to_outputs(keys, [1, str(e).split('.')[0]])
+ else:
+ migrate_time = self._get_migrate_time(instance_id)
+ LOG.info('Migration time is %s s', migrate_time)
+
+ current_host = self._get_current_host_name(instance_id)
+ LOG.info('Current host is %s', current_host)
+ if current_host.strip() != target_host.strip():
+ LOG.error('current_host not equal to target_host')
+ values = [1, 'current_host not equal to target_host']
+ return self._push_to_outputs(keys, values)
+
+ if instance_ip:
+ ping_thread.flag = False
+ ping_thread.join()
+
+ downtime = ping_thread.get_delay()
+ LOG.info('Downtime is %s s', downtime)
+
+ values = [0, migrate_time, downtime]
+ return self._push_to_outputs(keys, values)
+ else:
+ values = [0, migrate_time]
+ return self._push_to_outputs(keys, values)
+
+ def _do_migrate(self, server_id, target_host):
+
+ cmd = ['nova', 'live-migration', server_id, target_host]
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+ p.communicate()
+
+ def _ping_until_connected(self, instance_ip):
+ for i in range(3000):
+ res = ping.do_one(instance_ip, TIMEOUT, PACKAGE_SIZE)
+ if res:
+ break
+
+ def _do_ping_task(self, instance_ip):
+ ping_thread = PingThread(instance_ip)
+ ping_thread.start()
+ return ping_thread
+
+ def _get_current_host_name(self, server_id):
+
+ return change_obj_to_dict(self.nova_client.servers.get(server_id))['OS-EXT-SRV-ATTR:host']
+
+ def _get_migrate_time(self, server_id):
+ while True:
+ status = self.nova_client.servers.get(server_id).status.lower()
+ if status == 'migrating':
+ start_time = datetime.now()
+ break
+ LOG.debug('Instance status change to MIGRATING')
+
+ while True:
+ status = self.nova_client.servers.get(server_id).status.lower()
+ if status == 'active':
+ end_time = datetime.now()
+ break
+ if status == 'error':
+ LOG.error('Instance status is ERROR')
+ raise RuntimeError('The instance status is error')
+ LOG.debug('Instance status change to ACTIVE')
+
+ duration = end_time - start_time
+ return duration.seconds + duration.microseconds * 1.0 / 1e6
+
+
+class PingThread(threading.Thread): # pragma: no cover
+
+ def __init__(self, target):
+ super(PingThread, self).__init__()
+ self.target = target
+ self.flag = True
+ self.delay = 0.0
+
+ def run(self):
+ count = 0
+ while self.flag:
+ res = ping.do_one(self.target, TIMEOUT, PACKAGE_SIZE)
+ if not res:
+ count += 1
+ time.sleep(0.01)
+ self.delay = (TIMEOUT + 0.01) * count
+
+ def get_delay(self):
+ return self.delay
diff --git a/yardstick/benchmark/scenarios/networking/iperf3.py b/yardstick/benchmark/scenarios/networking/iperf3.py
index 334f3a920..a3d273750 100644
--- a/yardstick/benchmark/scenarios/networking/iperf3.py
+++ b/yardstick/benchmark/scenarios/networking/iperf3.py
@@ -19,6 +19,7 @@ import pkg_resources
from oslo_serialization import jsonutils
import yardstick.ssh as ssh
+from yardstick.common import utils
from yardstick.benchmark.scenarios import base
LOG = logging.getLogger(__name__)
@@ -49,6 +50,17 @@ For more info see http://software.es.net/iperf
type: int
unit: bytes
default: -
+ length - length of buffer to read or write,
+ (default 128 KB for TCP, 8 KB for UDP)
+ type: int
+ unit: k
+ default: -
+ window - set window size / socket buffer size
+ set TCP windows size. for UDP way to test, this will set to accept UDP
+ packet buffer size, limit the max size of acceptable data packet.
+ type: int
+ unit: k
+ default: -
"""
__scenario_type__ = "Iperf3"
@@ -121,6 +133,12 @@ For more info see http://software.es.net/iperf
elif "blockcount" in options:
cmd += " --blockcount %d" % options["blockcount"]
+ if "length" in options:
+ cmd += " --length %s" % options["length"]
+
+ if "window" in options:
+ cmd += " --window %s" % options["window"]
+
LOG.debug("Executing command: %s", cmd)
status, stdout, stderr = self.host.execute(cmd)
@@ -131,8 +149,8 @@ For more info see http://software.es.net/iperf
# Note: convert all ints to floats in order to avoid
# schema conflicts in influxdb. We probably should add
# a format func in the future.
- result.update(
- jsonutils.loads(stdout, parse_int=float))
+ iperf_result = jsonutils.loads(stdout, parse_int=float)
+ result.update(utils.flatten_dict_key(iperf_result))
if "sla" in self.scenario_cfg:
sla_iperf = self.scenario_cfg["sla"]
@@ -141,7 +159,7 @@ For more info see http://software.es.net/iperf
# convert bits per second to bytes per second
bit_per_second = \
- int(result["end"]["sum_received"]["bits_per_second"])
+ int(iperf_result["end"]["sum_received"]["bits_per_second"])
bytes_per_second = bit_per_second / 8
assert bytes_per_second >= sla_bytes_per_second, \
"bytes_per_second %d < sla:bytes_per_second (%d); " % \
@@ -149,7 +167,7 @@ For more info see http://software.es.net/iperf
else:
sla_jitter = float(sla_iperf["jitter"])
- jitter_ms = float(result["end"]["sum"]["jitter_ms"])
+ jitter_ms = float(iperf_result["end"]["sum"]["jitter_ms"])
assert jitter_ms <= sla_jitter, \
"jitter_ms %f > sla:jitter %f; " % \
(jitter_ms, sla_jitter)
diff --git a/yardstick/benchmark/scenarios/networking/ping.py b/yardstick/benchmark/scenarios/networking/ping.py
index a929e5337..6a7927de4 100644
--- a/yardstick/benchmark/scenarios/networking/ping.py
+++ b/yardstick/benchmark/scenarios/networking/ping.py
@@ -15,6 +15,7 @@ import pkg_resources
import logging
import yardstick.ssh as ssh
+from yardstick.common import utils
from yardstick.benchmark.scenarios import base
LOG = logging.getLogger(__name__)
@@ -57,8 +58,8 @@ class Ping(base.Scenario):
destination = self.context_cfg['target'].get('ipaddr', '127.0.0.1')
dest_list = [s.strip() for s in destination.split(',')]
- result["rtt"] = {}
- rtt_result = result["rtt"]
+ rtt_result = {}
+ ping_result = {"rtt": rtt_result}
for pos, dest in enumerate(dest_list):
if 'targets' in self.scenario_cfg:
@@ -88,6 +89,7 @@ class Ping(base.Scenario):
(rtt_result[target_vm_name], sla_max_rtt)
else:
LOG.error("ping '%s' '%s' timeout", options, target_vm)
+ result.update(utils.flatten_dict_key(ping_result))
def _test(): # pragma: no cover
diff --git a/yardstick/benchmark/scenarios/networking/pktgen.py b/yardstick/benchmark/scenarios/networking/pktgen.py
index e6aa7e5fb..8ca1ca60e 100644
--- a/yardstick/benchmark/scenarios/networking/pktgen.py
+++ b/yardstick/benchmark/scenarios/networking/pktgen.py
@@ -9,6 +9,7 @@
from __future__ import absolute_import
from __future__ import print_function
+import os
import logging
import pkg_resources
@@ -19,6 +20,9 @@ from yardstick.benchmark.scenarios import base
LOG = logging.getLogger(__name__)
+VNIC_TYPE_LIST = ["ovs", "sriov"]
+SRIOV_DRIVER_LIST = ["ixgbevf", "i40evf"]
+
class Pktgen(base.Scenario):
"""Execute pktgen between two hosts
@@ -44,7 +48,11 @@ class Pktgen(base.Scenario):
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
+ self.vnic_name = "eth0"
+ self.vnic_type = "ovs"
+ self.queue_number = 1
self.setup_done = False
+ self.multiqueue_setup_done = False
def setup(self):
"""scenario setup"""
@@ -67,6 +75,212 @@ class Pktgen(base.Scenario):
self.setup_done = True
+ def multiqueue_setup(self):
+ # one time setup stuff
+ cmd = "sudo sysctl -w net.core.netdev_budget=3000"
+ self.server.send_command(cmd)
+ self.client.send_command(cmd)
+
+ cmd = "sudo sysctl -w net.core.netdev_max_backlog=100000"
+ self.server.send_command(cmd)
+ self.client.send_command(cmd)
+
+ """multiqueue setup"""
+ if not self._is_irqbalance_disabled():
+ self._disable_irqbalance()
+
+ vnic_driver_name = self._get_vnic_driver_name()
+ if vnic_driver_name in SRIOV_DRIVER_LIST:
+ self.vnic_type = "sriov"
+
+ # one time setup stuff
+ cmd = "sudo ethtool -G %s rx 4096 tx 4096" % self.vnic_name
+ self.server.send_command(cmd)
+ self.client.send_command(cmd)
+
+ self.queue_number = self._get_sriov_queue_number()
+ self._setup_irqmapping_sriov(self.queue_number)
+ else:
+ self.vnic_type = "ovs"
+ self.queue_number = self._enable_ovs_multiqueue()
+ self._setup_irqmapping_ovs(self.queue_number)
+
+ self.multiqueue_setup_done = True
+
+ def _get_vnic_driver_name(self):
+ cmd = "readlink /sys/class/net/%s/device/driver" % self.vnic_name
+ LOG.debug("Executing command: %s", cmd)
+ status, stdout, stderr = self.server.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+ return os.path.basename(stdout.strip())
+
+ def _is_irqbalance_disabled(self):
+ """Did we disable irqbalance already in the guest?"""
+ is_disabled = False
+ cmd = "grep ENABLED /etc/default/irqbalance"
+ status, stdout, stderr = self.server.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+ if "0" in stdout:
+ is_disabled = True
+
+ return is_disabled
+
+ def _disable_irqbalance(self):
+ cmd = "sudo sed -i -e 's/ENABLED=\"1\"/ENABLED=\"0\"/g' " \
+ "/etc/default/irqbalance"
+ status, stdout, stderr = self.server.execute(cmd)
+ status, stdout, stderr = self.client.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+
+ cmd = "sudo service irqbalance stop"
+ status, stdout, stderr = self.server.execute(cmd)
+ status, stdout, stderr = self.client.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+
+ cmd = "sudo service irqbalance disable"
+ status, stdout, stderr = self.server.execute(cmd)
+ status, stdout, stderr = self.client.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+
+ def _setup_irqmapping_ovs(self, queue_number):
+ cmd = "grep 'virtio0-input.0' /proc/interrupts |" \
+ "awk '{match($0,/ +[0-9]+/)} " \
+ "{print substr($1,RSTART,RLENGTH-1)}'"
+ status, stdout, stderr = self.server.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+
+ cmd = "echo 1 | sudo tee /proc/irq/%s/smp_affinity" % (int(stdout))
+ status, stdout, stderr = self.server.execute(cmd)
+ status, stdout, stderr = self.client.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+
+ cmd = "grep 'virtio0-output.0' /proc/interrupts |" \
+ "awk '{match($0,/ +[0-9]+/)} " \
+ "{print substr($1,RSTART,RLENGTH-1)}'"
+ status, stdout, stderr = self.server.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+
+ cmd = "echo 1 | sudo tee /proc/irq/%s/smp_affinity" % (int(stdout))
+ status, stdout, stderr = self.server.execute(cmd)
+ status, stdout, stderr = self.client.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+
+ if queue_number == 1:
+ return
+
+ for i in range(1, queue_number):
+ cmd = "grep 'virtio0-input.%s' /proc/interrupts |" \
+ "awk '{match($0,/ +[0-9]+/)} " \
+ "{print substr($1,RSTART,RLENGTH-1)}'" % (i)
+ status, stdout, stderr = self.server.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+
+ cmd = "echo %s | sudo tee /proc/irq/%s/smp_affinity" \
+ % (1 << i, int(stdout))
+ status, stdout, stderr = self.server.execute(cmd)
+ status, stdout, stderr = self.client.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+
+ cmd = "grep 'virtio0-output.%s' /proc/interrupts |" \
+ "awk '{match($0,/ +[0-9]+/)} " \
+ "{print substr($1,RSTART,RLENGTH-1)}'" % (i)
+ status, stdout, stderr = self.server.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+
+ cmd = "echo %s | sudo tee /proc/irq/%s/smp_affinity" \
+ % (1 << i, int(stdout))
+ status, stdout, stderr = self.server.execute(cmd)
+ status, stdout, stderr = self.client.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+
+ def _setup_irqmapping_sriov(self, queue_number):
+ cmd = "grep '%s-TxRx-0' /proc/interrupts |" \
+ "awk '{match($0,/ +[0-9]+/)} " \
+ "{print substr($1,RSTART,RLENGTH-1)}'" % self.vnic_name
+ status, stdout, stderr = self.server.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+
+ cmd = "echo 1 | sudo tee /proc/irq/%s/smp_affinity" % (int(stdout))
+ status, stdout, stderr = self.server.execute(cmd)
+ status, stdout, stderr = self.client.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+
+ if queue_number == 1:
+ return
+
+ for i in range(1, queue_number):
+ cmd = "grep '%s-TxRx-%s' /proc/interrupts |" \
+ "awk '{match($0,/ +[0-9]+/)} " \
+ "{print substr($1,RSTART,RLENGTH-1)}'" % (self.vnic_name, i)
+ status, stdout, stderr = self.server.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+
+ cmd = "echo %s | sudo tee /proc/irq/%s/smp_affinity" \
+ % (1 << i, int(stdout))
+ status, stdout, stderr = self.server.execute(cmd)
+ status, stdout, stderr = self.client.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+
+ def _get_sriov_queue_number(self):
+ """Get queue number from server as both VMs are the same"""
+ cmd = "grep %s-TxRx- /proc/interrupts | wc -l" % self.vnic_name
+ LOG.debug("Executing command: %s", cmd)
+ status, stdout, stderr = self.server.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+ return int(stdout)
+
+ def _get_available_queue_number(self):
+ """Get queue number from client as both VMs are the same"""
+ cmd = "sudo ethtool -l %s | grep Combined | head -1 |" \
+ "awk '{printf $2}'" % self.vnic_name
+ LOG.debug("Executing command: %s", cmd)
+ status, stdout, stderr = self.server.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+ return int(stdout)
+
+ def _get_usable_queue_number(self):
+ """Get queue number from client as both VMs are the same"""
+ cmd = "sudo ethtool -l %s | grep Combined | tail -1 |" \
+ "awk '{printf $2}'" % self.vnic_name
+ LOG.debug("Executing command: %s", cmd)
+ status, stdout, stderr = self.server.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+ return int(stdout)
+
+ def _enable_ovs_multiqueue(self):
+ available_queue_number = self._get_available_queue_number()
+ usable_queue_number = self._get_usable_queue_number()
+ if available_queue_number > 1 and \
+ available_queue_number != usable_queue_number:
+ cmd = "sudo ethtool -L %s combined %s" % \
+ (self.vnic_name, available_queue_number)
+ LOG.debug("Executing command: %s", cmd)
+ status, stdout, stderr = self.server.execute(cmd)
+ status, stdout, stderr = self.client.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+ return available_queue_number
+
def _iptables_setup(self):
"""Setup iptables on server to monitor for received packets"""
cmd = "sudo iptables -F; " \
@@ -99,6 +313,14 @@ class Pktgen(base.Scenario):
options = self.scenario_cfg['options']
packetsize = options.get("packetsize", 60)
self.number_of_ports = options.get("number_of_ports", 10)
+ self.vnic_name = options.get("vnic_name", "eth0")
+ ovs_dpdk = options.get("ovs_dpdk", False)
+ pps = options.get("pps", 1000000)
+ multiqueue = options.get("multiqueue", False)
+
+ if multiqueue and not self.multiqueue_setup_done:
+ self.multiqueue_setup()
+
# if run by a duration runner
duration_time = self.scenario_cfg["runner"].get("duration", None) \
if "runner" in self.scenario_cfg else None
@@ -114,8 +336,18 @@ class Pktgen(base.Scenario):
self._iptables_setup()
- cmd = "sudo bash pktgen.sh %s %s %s %s" \
- % (ipaddr, self.number_of_ports, packetsize, duration)
+ queue_number = self.queue_number
+
+ # For native OVS, half of vCPUs are used by vhost kernel threads
+ # hence set the queue_number to half number of vCPUs
+ # e.g. set queue_number to 2 if there are 4 vCPUs
+ if self.vnic_type == "ovs" and not ovs_dpdk and self.queue_number > 1:
+ queue_number = self.queue_number / 2
+
+ cmd = "sudo bash pktgen.sh %s %s %s %s %s %s" \
+ % (ipaddr, self.number_of_ports, packetsize,
+ duration, queue_number, pps)
+
LOG.debug("Executing command: %s", cmd)
status, stdout, stderr = self.client.execute(cmd)
@@ -131,12 +363,15 @@ class Pktgen(base.Scenario):
sent = result['packets_sent']
received = result['packets_received']
ppm = 1000000 * (sent - received) / sent
+ # if ppm is 1, then 11 out of 10 million is no pass
+ ppm += (sent - received) % sent > 0
+ LOG.debug("Lost packets %d - Lost ppm %d", (sent - received), ppm)
sla_max_ppm = int(self.scenario_cfg["sla"]["max_ppm"])
assert ppm <= sla_max_ppm, "ppm %d > sla_max_ppm %d; " \
% (ppm, sla_max_ppm)
-def _test():
+def _test(): # pragma: no cover
"""internal test function"""
key_filename = pkg_resources.resource_filename('yardstick.resources',
'files/yardstick_key')
@@ -165,6 +400,5 @@ def _test():
p.run(result)
print(result)
-
if __name__ == '__main__':
_test()
diff --git a/yardstick/benchmark/scenarios/networking/pktgen_benchmark.bash b/yardstick/benchmark/scenarios/networking/pktgen_benchmark.bash
index 4224c5abf..e338a1b09 100644
--- a/yardstick/benchmark/scenarios/networking/pktgen_benchmark.bash
+++ b/yardstick/benchmark/scenarios/networking/pktgen_benchmark.bash
@@ -16,6 +16,8 @@ DST_IP=$1 # destination IP address
NUM_PORTS=$2 # number of source ports
PKT_SIZE=$3 # packet size
DURATION=$4 # test duration (seconds)
+TRXQUEUE=$5 # number of RX/TX queues to use
+PPS=$6 # packets per second to send
# Configuration
UDP_SRC_MIN=1000 # UDP source port min
@@ -37,62 +39,100 @@ pgset()
fi
}
+# remove all devices from thread
+pgclean()
+{
+ COUNTER=0
+ while [ ${COUNTER} -lt ${TRXQUEUE} ]; do
+ #
+ # Thread commands
+ #
+
+ PGDEV=/proc/net/pktgen/kpktgend_${COUNTER}
+
+ # Remove all devices from this thread
+ pgset "rem_device_all"
+ let COUNTER=COUNTER+1
+ done
+}
+
# configure pktgen (see pktgen doc for details)
pgconfig()
{
- #
- # Thread commands
- #
+ pps=$(( PPS / TRXQUEUE ))
+ COUNTER=0
+ while [ ${COUNTER} -lt ${TRXQUEUE} ]; do
+ #
+ # Thread commands
+ #
- PGDEV=/proc/net/pktgen/kpktgend_0
+ PGDEV=/proc/net/pktgen/kpktgend_${COUNTER}
- # Remove all devices from this thread
- pgset "rem_device_all"
+ # Add device to thread
+ pgset "add_device $DEV@${COUNTER}"
- # Add device to thread
- pgset "add_device $DEV"
+ #
+ # Device commands
+ #
- #
- # Device commands
- #
+ PGDEV=/proc/net/pktgen/$DEV@${COUNTER}
- PGDEV=/proc/net/pktgen/$DEV
+ # 0 means continious sends untill explicitly stopped
+ pgset "count 0"
- # 0 means continious sends untill explicitly stopped
- pgset "count 0"
+ # set pps count to test with an explicit number. if 0 will try with bandwidth
+ if [ ${pps} -gt 0 ]
+ then
+ pgset "ratep ${pps}"
+ fi
- # use single SKB for all transmits
- pgset "clone_skb 0"
+ pgset "clone_skb 10"
- # packet size, NIC adds 4 bytes CRC
- pgset "pkt_size $PKT_SIZE"
+ # use different queue per thread
+ pgset "queue_map_min ${COUNTER}"
+ pgset "queue_map_max ${COUNTER}"
- # random address within the min-max range
- pgset "flag IPDST_RND UDPSRC_RND UDPDST_RND"
+ # packet size, NIC adds 4 bytes CRC
+ pgset "pkt_size $PKT_SIZE"
- # destination IP
- pgset "dst_min $DST_IP"
- pgset "dst_max $DST_IP"
+ # random address within the min-max range
+ pgset "flag UDPDST_RND"
+ pgset "flag UDPSRC_RND"
+ pgset "flag IPDST_RND"
- # destination MAC address
- pgset "dst_mac $MAC"
+ # destination IP
+ pgset "dst_min $DST_IP"
+ pgset "dst_max $DST_IP"
+
+ # destination MAC address
+ pgset "dst_mac $MAC"
+
+ # source UDP port range
+ pgset "udp_src_min $UDP_SRC_MIN"
+ pgset "udp_src_max $UDP_SRC_MAX"
- # source UDP port range
- pgset "udp_src_min $UDP_SRC_MIN"
- pgset "udp_src_max $UDP_SRC_MAX"
+ # destination UDP port range
+ pgset "udp_dst_min $UDP_DST_MIN"
+ pgset "udp_dst_max $UDP_DST_MAX"
- # destination UDP port range
- pgset "udp_dst_min $UDP_DST_MIN"
- pgset "udp_dst_max $UDP_DST_MAX"
+ let COUNTER=COUNTER+1
+
+ done
}
# run pktgen
pgrun()
{
- # Time to run, result can be vieved in /proc/net/pktgen/$DEV
+ # Time to run, result can be viewed in /proc/net/pktgen/$DEV
PGDEV=/proc/net/pktgen/pgctrl
# Will hang, Ctrl-C or SIGINT to stop
pgset "start" start
+
+ COUNTER=0
+ while [ ${COUNTER} -lt ${TRXQUEUE} ]; do
+ taskset -c ${COUNTER} kpktgend_${COUNTER}
+ let COUNTER=COUNTER+1
+ done
}
# run pktgen for ${DURATION} seconds
@@ -111,19 +151,28 @@ run_test()
# write the result to stdout in json format
output_json()
{
- sent=$(awk '/^Result:/{print $5}' <$PGDEV)
- pps=$(awk 'match($0,/'\([0-9]+\)pps'/, a) {print a[1]}' <$PGDEV)
- errors=$(awk '/errors:/{print $5}' <$PGDEV)
+ sent=0
+ result_pps=0
+ errors=0
+ PGDEV=/proc/net/pktgen/$DEV@
+ COUNTER=0
+ while [ ${COUNTER} -lt ${TRXQUEUE} ]; do
+ sent=$(($sent + $(awk '/^Result:/{print $5}' <$PGDEV${COUNTER})))
+ result_pps=$(($result_pps + $(awk 'match($0,/'\([0-9]+\)pps'/, a) {print a[1]}' <$PGDEV${COUNTER})))
+ errors=$(($errors + $(awk '/errors:/{print $5}' <$PGDEV${COUNTER})))
+ let COUNTER=COUNTER+1
+ done
flows=$(( NUM_PORTS * (NUM_PORTS + 1) ))
- echo { '"packets_sent"':$sent , '"packets_per_second"':$pps, '"flows"':$flows, '"errors"':$errors }
+ echo '{ "packets_sent"':${sent} , '"packets_per_second"':${result_pps}, '"flows"':${flows}, '"errors"':${errors} '}'
}
# main entry
main()
{
modprobe pktgen
+ pgclean
ping -c 3 $DST_IP >/dev/null
@@ -137,16 +186,20 @@ main()
pgconfig
# run the test
- run_test >/dev/null
+ run_test
- PGDEV=/proc/net/pktgen/$DEV
+ PGDEV=/proc/net/pktgen/$DEV@
# check result
- result=$(cat $PGDEV | fgrep "Result: OK:")
- if [ "$result" = "" ]; then
- cat $PGDEV | fgrep Result: >/dev/stderr
- exit 1
- fi
+ COUNTER=0
+ while [ ${COUNTER} -lt ${TRXQUEUE} ]; do
+ result=$(cat $PGDEV${COUNTER} | fgrep "Result: OK:")
+ if [ "$result" = "" ]; then
+ cat $PGDEV${COUNTER} | fgrep Result: >/dev/stderr
+ exit 1
+ fi
+ let COUNTER=COUNTER+1
+ done
# output result
output_json
diff --git a/yardstick/benchmark/scenarios/networking/vnf_generic.py b/yardstick/benchmark/scenarios/networking/vnf_generic.py
index 594edeaa8..9607e3005 100644
--- a/yardstick/benchmark/scenarios/networking/vnf_generic.py
+++ b/yardstick/benchmark/scenarios/networking/vnf_generic.py
@@ -164,38 +164,60 @@ class NetworkServiceTestCase(base.Scenario):
for vnfd in topology["constituent-vnfd"]
if vnf_id == vnfd["member-vnf-index"]), None)
+ @staticmethod
+ def get_vld_networks(networks):
+ return {n['vld_id']: n for n in networks.values()}
+
def _resolve_topology(self, context_cfg, topology):
for vld in topology["vld"]:
- if len(vld["vnfd-connection-point-ref"]) > 2:
+ try:
+ node_0, node_1 = vld["vnfd-connection-point-ref"]
+ except (TypeError, ValueError):
raise IncorrectConfig("Topology file corrupted, "
- "too many endpoint for connection")
-
- node_0, node_1 = vld["vnfd-connection-point-ref"]
+ "wrong number of endpoints for connection")
- node0 = self._find_vnf_name_from_id(topology,
- node_0["member-vnf-index-ref"])
- node1 = self._find_vnf_name_from_id(topology,
- node_1["member-vnf-index-ref"])
+ node_0_name = self._find_vnf_name_from_id(topology,
+ node_0["member-vnf-index-ref"])
+ node_1_name = self._find_vnf_name_from_id(topology,
+ node_1["member-vnf-index-ref"])
- if0 = node_0["vnfd-connection-point-ref"]
- if1 = node_1["vnfd-connection-point-ref"]
+ node_0_ifname = node_0["vnfd-connection-point-ref"]
+ node_1_ifname = node_1["vnfd-connection-point-ref"]
+ node_0_if = context_cfg["nodes"][node_0_name]["interfaces"][node_0_ifname]
+ node_1_if = context_cfg["nodes"][node_1_name]["interfaces"][node_1_ifname]
try:
- nodes = context_cfg["nodes"]
- nodes[node0]["interfaces"][if0]["vld_id"] = vld["id"]
- nodes[node1]["interfaces"][if1]["vld_id"] = vld["id"]
-
- nodes[node0]["interfaces"][if0]["dst_mac"] = \
- nodes[node1]["interfaces"][if1]["local_mac"]
- nodes[node0]["interfaces"][if0]["dst_ip"] = \
- nodes[node1]["interfaces"][if1]["local_ip"]
-
- nodes[node1]["interfaces"][if1]["dst_mac"] = \
- nodes[node0]["interfaces"][if0]["local_mac"]
- nodes[node1]["interfaces"][if1]["dst_ip"] = \
- nodes[node0]["interfaces"][if0]["local_ip"]
+ vld_networks = self.get_vld_networks(context_cfg["networks"])
+
+ node_0_if["vld_id"] = vld["id"]
+ node_1_if["vld_id"] = vld["id"]
+
+ # set peer name
+ node_0_if["peer_name"] = node_1_name
+ node_1_if["peer_name"] = node_0_name
+
+ # set peer interface name
+ node_0_if["peer_ifname"] = node_1_ifname
+ node_1_if["peer_ifname"] = node_0_ifname
+
+ # just load the whole network dict
+ node_0_if["network"] = vld_networks.get(vld["id"], {})
+ node_1_if["network"] = vld_networks.get(vld["id"], {})
+
+ node_0_if["dst_mac"] = node_1_if["local_mac"]
+ node_0_if["dst_ip"] = node_1_if["local_ip"]
+
+ node_1_if["dst_mac"] = node_0_if["local_mac"]
+ node_1_if["dst_ip"] = node_0_if["local_ip"]
+
+ # add peer interface dict, but remove circular link
+ # TODO: don't waste memory
+ node_0_copy = node_0_if.copy()
+ node_1_copy = node_1_if.copy()
+ node_0_if["peer_intf"] = node_1_copy
+ node_1_if["peer_intf"] = node_0_copy
except KeyError:
- raise IncorrectConfig("Required interface not found,"
+ raise IncorrectConfig("Required interface not found, "
"topology file corrupted")
@classmethod
@@ -308,21 +330,36 @@ printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
return dict(network_devices)
@classmethod
- def get_vnf_impl(cls, vnf_model):
+ def get_vnf_impl(cls, vnf_model_id):
""" Find the implementing class from vnf_model["vnf"]["name"] field
- :param vnf_model: dictionary containing a parsed vnfd
+ :param vnf_model_id: parsed vnfd model ID field
:return: subclass of GenericVNF
"""
import_modules_from_package(
"yardstick.network_services.vnf_generic.vnf")
- expected_name = vnf_model['id']
- impl = (c for c in itersubclasses(GenericVNF)
- if c.__name__ == expected_name)
+ expected_name = vnf_model_id
+ classes_found = []
+
+ def impl():
+ for name, class_ in ((c.__name__, c) for c in itersubclasses(GenericVNF)):
+ if name == expected_name:
+ yield class_
+ classes_found.append(name)
+
try:
- return next(impl)
+ return next(impl())
except StopIteration:
- raise IncorrectConfig("No implementation for %s", expected_name)
+ pass
+
+ raise IncorrectConfig("No implementation for %s found in %s" %
+ (expected_name, classes_found))
+
+ @staticmethod
+ def update_interfaces_from_node(vnfd, node):
+ for intf in vnfd["vdu"][0]["external-interface"]:
+ node_intf = node['interfaces'][intf['name']]
+ intf['virtual-interface'].update(node_intf)
def load_vnf_models(self, scenario_cfg, context_cfg):
""" Create VNF objects based on YAML descriptors
@@ -339,8 +376,11 @@ printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
scenario_cfg['task_path']) as stream:
vnf_model = stream.read()
vnfd = vnfdgen.generate_vnfd(vnf_model, node)
- vnf_impl = self.get_vnf_impl(vnfd["vnfd:vnfd-catalog"]["vnfd"][0])
- vnf_instance = vnf_impl(vnfd["vnfd:vnfd-catalog"]["vnfd"][0])
+ # TODO: here add extra context_cfg["nodes"] regardless of template
+ vnfd = vnfd["vnfd:vnfd-catalog"]["vnfd"][0]
+ self.update_interfaces_from_node(vnfd, node)
+ vnf_impl = self.get_vnf_impl(vnfd['id'])
+ vnf_instance = vnf_impl(vnfd)
vnf_instance.name = node_name
vnfs.append(vnf_instance)
diff --git a/yardstick/benchmark/scenarios/storage/fio.py b/yardstick/benchmark/scenarios/storage/fio.py
index ad34817a7..b99e34270 100644
--- a/yardstick/benchmark/scenarios/storage/fio.py
+++ b/yardstick/benchmark/scenarios/storage/fio.py
@@ -40,10 +40,26 @@ class Fio(base.Scenario):
type: string
unit: na
default: write
+ rwmixwrite - percentage of a mixed workload that should be writes
+ type: int
+ unit: percentage
+ default: 50
ramp_time - run time before logging any performance
type: int
unit: seconds
default: 20
+ direct - whether use non-buffered I/O or not
+ type: boolean
+ unit: na
+ default: 1
+ size - total size of I/O for this job.
+ type: string
+ unit: na
+ default: 1g
+ numjobs - number of clones (processes/threads performing the same workload) of this job
+ type: int
+ unit: na
+ default: 1
Read link below for more fio args description:
http://www.bluestop.org/fio/HOWTO.txt
@@ -74,8 +90,8 @@ class Fio(base.Scenario):
def run(self, result):
"""execute the benchmark"""
- default_args = "-ioengine=libaio -direct=1 -group_reporting " \
- "-numjobs=1 -time_based --output-format=json"
+ default_args = "-ioengine=libaio -group_reporting -time_based -time_based " \
+ "--output-format=json"
if not self.setup_done:
self.setup()
@@ -86,6 +102,10 @@ class Fio(base.Scenario):
iodepth = options.get("iodepth", "1")
rw = options.get("rw", "write")
ramp_time = options.get("ramp_time", 20)
+ size = options.get("size", "1g")
+ direct = options.get("direct", "1")
+ numjobs = options.get("numjobs", "1")
+ rwmixwrite = options.get("rwmixwrite", 50)
name = "yardstick-fio"
# if run by a duration runner
duration_time = self.scenario_cfg["runner"].get("duration", None) \
@@ -99,10 +119,10 @@ class Fio(base.Scenario):
else:
runtime = 30
- cmd_args = "-filename=%s -bs=%s -iodepth=%s -rw=%s -ramp_time=%s " \
- "-runtime=%s -name=%s %s" \
- % (filename, bs, iodepth, rw, ramp_time, runtime, name,
- default_args)
+ cmd_args = "-filename=%s -direct=%s -bs=%s -iodepth=%s -rw=%s -rwmixwrite=%s " \
+ "-size=%s -ramp_time=%s -numjobs=%s -runtime=%s -name=%s %s" \
+ % (filename, direct, bs, iodepth, rw, rwmixwrite, size, ramp_time, numjobs,
+ runtime, name, default_args)
cmd = "sudo bash fio.sh %s %s" % (filename, cmd_args)
LOG.debug("Executing command: %s", cmd)
# Set timeout, so that the cmd execution does not exit incorrectly
diff --git a/yardstick/benchmark/scenarios/storage/storperf.py b/yardstick/benchmark/scenarios/storage/storperf.py
index c10118ad1..f0b2361d6 100644
--- a/yardstick/benchmark/scenarios/storage/storperf.py
+++ b/yardstick/benchmark/scenarios/storage/storperf.py
@@ -87,8 +87,9 @@ class StorPerf(base.Scenario):
def setup(self):
"""Set the configuration."""
env_args = {}
- env_args_payload_list = ["agent_count", "public_network",
- "agent_image", "volume_size"]
+ env_args_payload_list = ["agent_count", "agent_flavor",
+ "public_network", "agent_image",
+ "volume_size"]
for env_argument in env_args_payload_list:
try:
@@ -206,7 +207,7 @@ class StorPerf(base.Scenario):
# terminate_res = requests.delete('http://%s:5000/api/v1.0
# /jobs' % self.target)
# else:
- # time.sleep(int(est_time)/2)
+ # time.sleep(int(esti_time)/2)
result_res = requests.get('http://%s:5000/api/v1.0/jobs?id=%s' %
(self.target, job_id))
diff --git a/yardstick/cmd/NSBperf.py b/yardstick/cmd/NSBperf.py
index f158d57f4..4e7590ea5 100755
--- a/yardstick/cmd/NSBperf.py
+++ b/yardstick/cmd/NSBperf.py
@@ -39,13 +39,11 @@ if not PYTHONPATH or not VIRTUAL_ENV:
raise SystemExit(1)
-def handler():
+def sigint_handler(*args, **kwargs):
""" Capture ctrl+c and exit cli """
subprocess.call(["pkill", "-9", "yardstick"])
raise SystemExit(1)
-signal.signal(signal.SIGINT, handler)
-
class YardstickNSCli(object):
""" This class handles yardstick network serivce testing """
@@ -117,10 +115,10 @@ class YardstickNSCli(object):
and generates final report in rst format.
"""
+ tc_name = os.path.splitext(test_case)[0]
report_caption = '{}\n{} ({})\n{}\n\n'.format(
'================================================================',
- 'Performance report for',
- os.path.splitext(test_case)[0].upper(),
+ 'Performance report for', tc_name.upper(),
'================================================================')
print(report_caption)
if os.path.isfile("/tmp/yardstick.out"):
@@ -129,9 +127,10 @@ class YardstickNSCli(object):
lines = jsonutils.load(infile)
if lines:
- lines = lines['result']
+ lines = \
+ lines['result']["testcases"][tc_name]["tc_data"]
tc_res = lines.pop(len(lines) - 1)
- for key, value in tc_res["benchmark"]["data"].items():
+ for key, value in tc_res["data"].items():
self.generate_kpi_results(key, value)
self.generate_nfvi_results(value)
@@ -158,7 +157,7 @@ class YardstickNSCli(object):
testcases = os.listdir(test_path + vnf)
print(("VNF :(%s)" % vnf))
print("================")
- for testcase in [tc for tc in testcases if "tc" in tc]:
+ for testcase in [tc for tc in testcases if "tc_" in tc]:
print('%s' % testcase)
print(os.linesep)
raise SystemExit(0)
@@ -214,5 +213,6 @@ class YardstickNSCli(object):
self.run_test(args, test_path)
if __name__ == "__main__":
+ signal.signal(signal.SIGINT, sigint_handler)
NS_CLI = YardstickNSCli()
NS_CLI.main()
diff --git a/yardstick/cmd/commands/task.py b/yardstick/cmd/commands/task.py
index 0f98cabdc..03f6b1b1e 100644
--- a/yardstick/cmd/commands/task.py
+++ b/yardstick/cmd/commands/task.py
@@ -11,6 +11,8 @@
from __future__ import print_function
from __future__ import absolute_import
+import logging
+
from yardstick.benchmark.core.task import Task
from yardstick.common.utils import cliargs
from yardstick.common.utils import write_json_to_file
@@ -19,6 +21,9 @@ from yardstick.cmd.commands import change_osloobj_to_paras
output_file_default = "/tmp/yardstick.out"
+LOG = logging.getLogger(__name__)
+
+
class TaskCommands(object): # pragma: no cover
"""Task commands.
@@ -49,7 +54,7 @@ class TaskCommands(object): # pragma: no cover
Task().start(param, **kwargs)
except Exception as e:
self._write_error_data(e)
- raise
+ LOG.exception("")
def _write_error_data(self, error):
data = {'status': 2, 'result': str(error)}
diff --git a/yardstick/common/constants.py b/yardstick/common/constants.py
index d251341fc..8e8114fbb 100644
--- a/yardstick/common/constants.py
+++ b/yardstick/common/constants.py
@@ -26,7 +26,15 @@ except KeyError:
SERVER_IP = '172.17.0.1'
else:
with IPDB() as ip:
- SERVER_IP = ip.routes['default'].gateway
+ try:
+ SERVER_IP = ip.routes['default'].gateway
+ except KeyError:
+ # during unittests ip.routes['default'] can be invalid
+ SERVER_IP = '127.0.0.1'
+
+if not SERVER_IP:
+ SERVER_IP = '127.0.0.1'
+
# dir
CONF_DIR = get_param('dir.conf', '/etc/yardstick')
@@ -40,12 +48,15 @@ SAMPLE_CASE_DIR = join(REPOS_DIR, 'samples')
TESTCASE_DIR = join(YARDSTICK_ROOT_PATH, 'tests/opnfv/test_cases/')
TESTSUITE_DIR = join(YARDSTICK_ROOT_PATH, 'tests/opnfv/test_suites/')
DOCS_DIR = join(REPOS_DIR, 'docs/testing/user/userguide/')
+OPENSTACK_CONF_DIR = '/etc/openstack'
# file
OPENRC = get_param('file.openrc', '/etc/yardstick/openstack.creds')
ETC_HOSTS = get_param('file.etc_hosts', '/etc/hosts')
CONF_FILE = join(CONF_DIR, 'yardstick.conf')
POD_FILE = join(CONF_DIR, 'pod.yaml')
+CLOUDS_CONF = join(OPENSTACK_CONF_DIR, 'clouds.yml')
+K8S_CONF_FILE = join(CONF_DIR, 'admin.conf')
CONF_SAMPLE_FILE = join(CONF_SAMPLE_DIR, 'yardstick.conf.sample')
FETCH_SCRIPT = get_param('file.fetch_script', 'utils/fetch_os_creds.sh')
FETCH_SCRIPT = join(RELENG_DIR, FETCH_SCRIPT)
@@ -66,6 +77,7 @@ INFLUXDB_PASS = get_param('influxdb.password', 'root')
INFLUXDB_DB_NAME = get_param('influxdb.db_name', 'yardstick')
INFLUXDB_IMAGE = get_param('influxdb.image', 'tutum/influxdb')
INFLUXDB_TAG = get_param('influxdb.tag', '0.13')
+INFLUXDB_DASHBOARD_PORT = 8083
# grafana
GRAFANA_IP = get_param('grafana.ip', SERVER_IP)
@@ -74,8 +86,10 @@ GRAFANA_USER = get_param('grafana.username', 'admin')
GRAFANA_PASS = get_param('grafana.password', 'admin')
GRAFANA_IMAGE = get_param('grafana.image', 'grafana/grafana')
GRAFANA_TAG = get_param('grafana.tag', '3.1.1')
+GRAFANA_MAPPING_PORT = 1948
# api
+API_PORT = 5000
DOCKER_URL = 'unix://var/run/docker.sock'
INSTALLERS = ['apex', 'compass', 'fuel', 'joid']
SQLITE = 'sqlite:////tmp/yardstick.db'
diff --git a/yardstick/common/kubernetes_utils.py b/yardstick/common/kubernetes_utils.py
new file mode 100644
index 000000000..e4c232830
--- /dev/null
+++ b/yardstick/common/kubernetes_utils.py
@@ -0,0 +1,137 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import logging
+
+from kubernetes import client
+from kubernetes import config
+from kubernetes.client.rest import ApiException
+
+from yardstick.common import constants as consts
+
+LOG = logging.getLogger(__name__)
+LOG.setLevel(logging.DEBUG)
+
+
+def get_core_api(): # pragma: no cover
+ try:
+ config.load_kube_config(config_file=consts.K8S_CONF_FILE)
+ except IOError:
+ LOG.exception('config file not found')
+ raise
+
+ return client.CoreV1Api()
+
+
+def create_replication_controller(template,
+ namespace='default',
+ wait=False,
+ **kwargs): # pragma: no cover
+
+ core_v1_api = get_core_api()
+ try:
+ core_v1_api.create_namespaced_replication_controller(namespace,
+ template,
+ **kwargs)
+ except ApiException:
+ LOG.exception('Create replication controller failed')
+ raise
+
+
+def delete_replication_controller(name,
+ namespace='default',
+ wait=False,
+ **kwargs): # pragma: no cover
+
+ core_v1_api = get_core_api()
+ body = kwargs.get('body', client.V1DeleteOptions())
+ kwargs.pop('body', None)
+ try:
+ core_v1_api.delete_namespaced_replication_controller(name,
+ namespace,
+ body,
+ **kwargs)
+ except ApiException:
+ LOG.exception('Delete replication controller failed')
+ raise
+
+
+def delete_pod(name,
+ namespace='default',
+ wait=False,
+ **kwargs): # pragma: no cover
+
+ core_v1_api = get_core_api()
+ body = kwargs.get('body', client.V1DeleteOptions())
+ kwargs.pop('body', None)
+ try:
+ core_v1_api.delete_namespaced_pod(name,
+ namespace,
+ body,
+ **kwargs)
+ except ApiException:
+ LOG.exception('Delete pod failed')
+ raise
+
+
+def read_pod(name,
+ namespace='default',
+ **kwargs): # pragma: no cover
+ core_v1_api = get_core_api()
+ try:
+ resp = core_v1_api.read_namespaced_pod(name, namespace, **kwargs)
+ except ApiException:
+ LOG.exception('Read pod failed')
+ raise
+ else:
+ return resp
+
+
+def read_pod_status(name, namespace='default', **kwargs): # pragma: no cover
+ return read_pod(name).status.phase
+
+
+def create_config_map(name,
+ data,
+ namespace='default',
+ wait=False,
+ **kwargs): # pragma: no cover
+ core_v1_api = get_core_api()
+ metadata = client.V1ObjectMeta(name=name)
+ body = client.V1ConfigMap(data=data, metadata=metadata)
+ try:
+ core_v1_api.create_namespaced_config_map(namespace, body, **kwargs)
+ except ApiException:
+ LOG.exception('Create config map failed')
+ raise
+
+
+def delete_config_map(name,
+ namespace='default',
+ wait=False,
+ **kwargs): # pragma: no cover
+ core_v1_api = get_core_api()
+ body = kwargs.get('body', client.V1DeleteOptions())
+ kwargs.pop('body', None)
+ try:
+ core_v1_api.delete_namespaced_config_map(name,
+ namespace,
+ body,
+ **kwargs)
+ except ApiException:
+ LOG.exception('Delete config map failed')
+ raise
+
+
+def get_pod_list(namespace='default'): # pragma: no cover
+ core_v1_api = get_core_api()
+ try:
+ return core_v1_api.list_namespaced_pod(namespace=namespace)
+ except ApiException:
+ LOG.exception('Get pod list failed')
+ raise
diff --git a/yardstick/common/openstack_utils.py b/yardstick/common/openstack_utils.py
index 8787e605a..f027b7922 100644
--- a/yardstick/common/openstack_utils.py
+++ b/yardstick/common/openstack_utils.py
@@ -15,6 +15,7 @@ import logging
from keystoneauth1 import loading
from keystoneauth1 import session
+from cinderclient import client as cinderclient
from novaclient import client as novaclient
from glanceclient import client as glanceclient
from neutronclient.neutron import client as neutronclient
@@ -108,6 +109,21 @@ def get_heat_api_version(): # pragma: no cover
return api_version
+def get_cinder_client_version(): # pragma: no cover
+ try:
+ api_version = os.environ['OS_VOLUME_API_VERSION']
+ except KeyError:
+ return DEFAULT_API_VERSION
+ else:
+ log.info("OS_VOLUME_API_VERSION is set in env as '%s'", api_version)
+ return api_version
+
+
+def get_cinder_client(): # pragma: no cover
+ sess = get_session()
+ return cinderclient.Client(get_cinder_client_version(), session=sess)
+
+
def get_nova_client_version(): # pragma: no cover
try:
api_version = os.environ['OS_COMPUTE_API_VERSION']
@@ -430,3 +446,11 @@ def get_port_id_by_ip(neutron_client, ip_address): # pragma: no cover
def get_image_id(glance_client, image_name): # pragma: no cover
images = glance_client.images.list()
return next((i.id for i in images if i.name == image_name), None)
+
+
+# *********************************************
+# CINDER
+# *********************************************
+def get_volume_id(volume_name): # pragma: no cover
+ volumes = get_cinder_client().volumes.list()
+ return next((v.id for v in volumes if v.name == volume_name), None)
diff --git a/yardstick/common/utils.py b/yardstick/common/utils.py
index 7aab46942..7a64b8ca2 100644
--- a/yardstick/common/utils.py
+++ b/yardstick/common/utils.py
@@ -23,9 +23,15 @@ import logging
import os
import subprocess
import sys
+import collections
+import socket
+import random
from functools import reduce
+from contextlib import closing
import yaml
+import six
+from flask import jsonify
from six.moves import configparser
from oslo_utils import importutils
from oslo_serialization import jsonutils
@@ -121,6 +127,14 @@ def makedirs(d):
raise
+def remove_file(path):
+ try:
+ os.remove(path)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+
+
def execute_command(cmd):
exec_msg = "Executing command: '%s'" % cmd
logger.debug(exec_msg)
@@ -158,7 +172,15 @@ def write_file(path, data, mode='w'):
def parse_ini_file(path):
parser = configparser.ConfigParser()
- parser.read(path)
+
+ try:
+ files = parser.read(path)
+ except configparser.MissingSectionHeaderError:
+ logger.exception('invalid file type')
+ raise
+ else:
+ if not files:
+ raise RuntimeError('file not exist')
try:
default = {k: v for k, v in parser.items('DEFAULT')}
@@ -189,3 +211,74 @@ def get_port_ip(sshclient, port):
if status:
raise RuntimeError(stderr)
return stdout.rstrip()
+
+
+def flatten_dict_key(data):
+ next_data = {}
+
+ # use list, because iterable is too generic
+ if not any(isinstance(v, (collections.Mapping, list))
+ for v in data.values()):
+ return data
+
+ for k, v in six.iteritems(data):
+ if isinstance(v, collections.Mapping):
+ for n_k, n_v in six.iteritems(v):
+ next_data["%s.%s" % (k, n_k)] = n_v
+ # use list because iterable is too generic
+ elif isinstance(v, list):
+ for index, item in enumerate(v):
+ next_data["%s%d" % (k, index)] = item
+ else:
+ next_data[k] = v
+
+ return flatten_dict_key(next_data)
+
+
+def translate_to_str(obj):
+ if isinstance(obj, collections.Mapping):
+ return {str(k): translate_to_str(v) for k, v in obj.items()}
+ elif isinstance(obj, list):
+ return [translate_to_str(ele) for ele in obj]
+ elif isinstance(obj, six.text_type):
+ return str(obj)
+ return obj
+
+
+def result_handler(status, data):
+ result = {
+ 'status': status,
+ 'result': data
+ }
+ return jsonify(result)
+
+
+def change_obj_to_dict(obj):
+ dic = {}
+ for k, v in vars(obj).items():
+ try:
+ vars(v)
+ except TypeError:
+ dic.update({k: v})
+ return dic
+
+
+def set_dict_value(dic, keys, value):
+ return_dic = dic
+
+ for key in keys.split('.'):
+
+ return_dic.setdefault(key, {})
+ if key == keys.split('.')[-1]:
+ return_dic[key] = value
+ else:
+ return_dic = return_dic[key]
+ return dic
+
+
+def get_free_port(ip):
+ with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
+ while True:
+ port = random.randint(5000, 10000)
+ if s.connect_ex((ip, port)) != 0:
+ return port
diff --git a/yardstick/dispatcher/base.py b/yardstick/dispatcher/base.py
index e77249c54..1fc0a2f31 100644
--- a/yardstick/dispatcher/base.py
+++ b/yardstick/dispatcher/base.py
@@ -41,9 +41,11 @@ class Base(object):
def get(config):
"""Returns instance of a dispatcher for dispatcher type.
"""
- out_type = config['DEFAULT']['dispatcher']
+ list_dispatcher = \
+ [Base.get_cls(out_type.capitalize())(config)
+ for out_type in config['DEFAULT']['dispatcher']]
- return Base.get_cls(out_type.capitalize())(config)
+ return list_dispatcher
@abc.abstractmethod
def flush_result_data(self, data):
diff --git a/yardstick/dispatcher/influxdb.py b/yardstick/dispatcher/influxdb.py
index 373aae13a..f157e91f9 100644
--- a/yardstick/dispatcher/influxdb.py
+++ b/yardstick/dispatcher/influxdb.py
@@ -12,10 +12,9 @@ from __future__ import absolute_import
import logging
import time
-import collections
import requests
-import six
+from yardstick.common import utils
from third_party.influxdb.influxdb_line_protocol import make_lines
from yardstick.dispatcher.base import Base as DispatchBase
@@ -80,7 +79,7 @@ class InfluxdbDispatcher(DispatchBase):
msg = {}
point = {
"measurement": case,
- "fields": self._dict_key_flatten(data["data"]),
+ "fields": utils.flatten_dict_key(data["data"]),
"time": self._get_nano_timestamp(data),
"tags": self._get_extended_tags(criteria),
}
@@ -89,27 +88,6 @@ class InfluxdbDispatcher(DispatchBase):
return make_lines(msg).encode('utf-8')
- def _dict_key_flatten(self, data):
- next_data = {}
-
- # use list, because iterable is too generic
- if not [v for v in data.values() if
- isinstance(v, (collections.Mapping, list))]:
- return data
-
- for k, v in six.iteritems(data):
- if isinstance(v, collections.Mapping):
- for n_k, n_v in six.iteritems(v):
- next_data["%s.%s" % (k, n_k)] = n_v
- # use list because iterable is too generic
- elif isinstance(v, list):
- for index, item in enumerate(v):
- next_data["%s%d" % (k, index)] = item
- else:
- next_data[k] = v
-
- return self._dict_key_flatten(next_data)
-
def _get_nano_timestamp(self, results):
try:
timestamp = results["timestamp"]
diff --git a/yardstick/network_services/vnf_generic/vnf/base.py b/yardstick/network_services/vnf_generic/vnf/base.py
index 1d770f724..2df6037f3 100644
--- a/yardstick/network_services/vnf_generic/vnf/base.py
+++ b/yardstick/network_services/vnf_generic/vnf/base.py
@@ -96,7 +96,6 @@ class GenericVNF(object):
return address.version
def _ip_to_hex(self, ip_addr):
- ip_to_convert = ip_addr.split(".")
ip_x = ip_addr
if self.get_ip_version(ip_addr) == 4:
ip_to_convert = ip_addr.split(".")
diff --git a/yardstick/network_services/vnf_generic/vnfdgen.py b/yardstick/network_services/vnf_generic/vnfdgen.py
index 40cc14a49..b56a91915 100644
--- a/yardstick/network_services/vnf_generic/vnfdgen.py
+++ b/yardstick/network_services/vnf_generic/vnfdgen.py
@@ -48,7 +48,7 @@ def generate_vnfd(vnf_model, node):
rendered_vnfd = render(vnf_model, **node)
# This is done to get rid of issues with serializing node
del node["get"]
- filled_vnfd = yaml.load(rendered_vnfd)
+ filled_vnfd = yaml.safe_load(rendered_vnfd)
return filled_vnfd
diff --git a/yardstick/orchestrator/heat.py b/yardstick/orchestrator/heat.py
index fd6c4f6ff..beb63b421 100644
--- a/yardstick/orchestrator/heat.py
+++ b/yardstick/orchestrator/heat.py
@@ -230,14 +230,50 @@ name (i.e. %s).\
'value': {'get_resource': name}
}
+ def add_volume(self, name, size=10):
+ """add to the template a volume description"""
+ log.debug("adding Cinder::Volume '%s' size '%d' ", name, size)
+
+ self.resources[name] = {
+ 'type': 'OS::Cinder::Volume',
+ 'properties': {'name': name,
+ 'size': size}
+ }
+
+ self._template['outputs'][name] = {
+ 'description': 'Volume %s ID' % name,
+ 'value': {'get_resource': name}
+ }
+
+ def add_volume_attachment(self, server_name, volume_name, mountpoint=None):
+ """add to the template an association of volume to instance"""
+ log.debug("adding Cinder::VolumeAttachment server '%s' volume '%s' ", server_name,
+ volume_name)
+
+ name = "%s-%s" % (server_name, volume_name)
+
+ volume_id = op_utils.get_volume_id(volume_name)
+ if not volume_id:
+ volume_id = {'get_resource': volume_name}
+ self.resources[name] = {
+ 'type': 'OS::Cinder::VolumeAttachment',
+ 'properties': {'instance_uuid': {'get_resource': server_name},
+ 'volume_id': volume_id}
+ }
+
+ if mountpoint:
+ self.resources[name]['properties']['mountpoint'] = mountpoint
+
def add_network(self, name, physical_network='physnet1', provider=None,
- segmentation_id=None):
+ segmentation_id=None, port_security_enabled=None):
"""add to the template a Neutron Net"""
log.debug("adding Neutron::Net '%s'", name)
if provider is None:
self.resources[name] = {
'type': 'OS::Neutron::Net',
- 'properties': {'name': name}
+ 'properties': {
+ 'name': name,
+ }
}
else:
self.resources[name] = {
@@ -245,12 +281,15 @@ name (i.e. %s).\
'properties': {
'name': name,
'network_type': 'vlan',
- 'physical_network': physical_network
- }
+ 'physical_network': physical_network,
+ },
}
if segmentation_id:
- seg_id_dit = {'segmentation_id': segmentation_id}
- self.resources[name]["properties"].update(seg_id_dit)
+ self.resources[name]['properties']['segmentation_id'] = segmentation_id
+ # if port security is not defined then don't add to template:
+ # some deployments don't have port security plugin installed
+ if port_security_enabled is not None:
+ self.resources[name]['properties']['port_security_enabled'] = port_security_enabled
def add_server_group(self, name, policies): # pragma: no cover
"""add to the template a ServerGroup"""
@@ -262,8 +301,9 @@ name (i.e. %s).\
'policies': policies}
}
- def add_subnet(self, name, network, cidr):
- """add to the template a Neutron Subnet"""
+ def add_subnet(self, name, network, cidr, enable_dhcp='true', gateway_ip=None):
+ """add to the template a Neutron Subnet
+ """
log.debug("adding Neutron::Subnet '%s' in network '%s', cidr '%s'",
name, network, cidr)
self.resources[name] = {
@@ -272,9 +312,12 @@ name (i.e. %s).\
'properties': {
'name': name,
'cidr': cidr,
- 'network_id': {'get_resource': network}
+ 'network_id': {'get_resource': network},
+ 'enable_dhcp': enable_dhcp,
}
}
+ if gateway_ip is not None:
+ self.resources[name]['properties']['gateway_ip'] = gateway_ip
self._template['outputs'][name] = {
'description': 'subnet %s ID' % name,
@@ -316,16 +359,18 @@ name (i.e. %s).\
}
}
- def add_port(self, name, network_name, subnet_name, sec_group_id=None,
- provider=None):
- """add to the template a named Neutron Port"""
- log.debug("adding Neutron::Port '%s', network:'%s', subnet:'%s', "
- "secgroup:%s", name, network_name, subnet_name, sec_group_id)
+ def add_port(self, name, network_name, subnet_name, vnic_type, sec_group_id=None,
+ provider=None, allowed_address_pairs=None):
+ """add to the template a named Neutron Port
+ """
+ log.debug("adding Neutron::Port '%s', network:'%s', subnet:'%s', vnic_type:'%s', "
+ "secgroup:%s", name, network_name, subnet_name, vnic_type, sec_group_id)
self.resources[name] = {
'type': 'OS::Neutron::Port',
'depends_on': [subnet_name],
'properties': {
'name': name,
+ 'binding:vnic_type': vnic_type,
'fixed_ips': [{'subnet': {'get_resource': subnet_name}}],
'network_id': {'get_resource': network_name},
'replacement_policy': 'AUTO',
@@ -341,6 +386,10 @@ name (i.e. %s).\
self.resources[name]['properties']['security_groups'] = \
[sec_group_id]
+ if allowed_address_pairs:
+ self.resources[name]['properties'][
+ 'allowed_address_pairs'] = allowed_address_pairs
+
self._template['outputs'][name] = {
'description': 'Address for interface %s' % name,
'value': {'get_attr': [name, 'fixed_ips', 0, 'ip_address']}
@@ -534,6 +583,7 @@ name (i.e. %s).\
}
HEAT_WAIT_LOOP_INTERVAL = 2
+ HEAT_CREATE_COMPLETE_STATUS = u'CREATE_COMPLETE'
def create(self, block=True, timeout=3600):
"""
@@ -558,14 +608,18 @@ name (i.e. %s).\
if not block:
self.outputs = stack.outputs = {}
+ end_time = time.time()
+ log.info("Created stack '%s' in %.3e secs",
+ self.name, end_time - start_time)
return stack
time_limit = start_time + timeout
- for status in iter(self.status, u'CREATE_COMPLETE'):
+ for status in iter(self.status, self.HEAT_CREATE_COMPLETE_STATUS):
log.debug("stack state %s", status)
if status == u'CREATE_FAILED':
- raise RuntimeError(
- heat_client.stacks.get(self.uuid).stack_status_reason)
+ stack_status_reason = heat_client.stacks.get(self.uuid).stack_status_reason
+ heat_client.stacks.delete(self.uuid)
+ raise RuntimeError(stack_status_reason)
if time.time() > time_limit:
raise RuntimeError("Heat stack create timeout")
@@ -573,7 +627,7 @@ name (i.e. %s).\
end_time = time.time()
outputs = heat_client.stacks.get(self.uuid).outputs
- log.info("Created stack '%s' in %d secs",
+ log.info("Created stack '%s' in %.3e secs",
self.name, end_time - start_time)
# keep outputs as unicode
diff --git a/yardstick/orchestrator/kubernetes.py b/yardstick/orchestrator/kubernetes.py
new file mode 100644
index 000000000..6d7045f58
--- /dev/null
+++ b/yardstick/orchestrator/kubernetes.py
@@ -0,0 +1,130 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+from __future__ import absolute_import
+from __future__ import print_function
+
+from yardstick.common import utils
+from yardstick.common import kubernetes_utils as k8s_utils
+
+
+class KubernetesObject(object):
+
+ def __init__(self, name, **kwargs):
+ super(KubernetesObject, self).__init__()
+ self.name = name
+ self.image = kwargs.get('image', 'openretriever/yardstick')
+ self.command = [kwargs.get('command', '/bin/bash')]
+ self.args = kwargs.get('args', [])
+ self.ssh_key = kwargs.get('ssh_key', 'yardstick_key')
+
+ self.volumes = []
+
+ self.template = {
+ "apiVersion": "v1",
+ "kind": "ReplicationController",
+ "metadata": {
+ "name": ""
+ },
+ "spec": {
+ "replicas": 1,
+ "template": {
+ "metadata": {
+ "labels": {
+ "app": ""
+ }
+ },
+ "spec": {
+ "containers": [],
+ "volumes": []
+ }
+ }
+ }
+ }
+
+ self._change_value_according_name(name)
+ self._add_containers()
+ self._add_ssh_key_volume()
+ self._add_volumes()
+
+ def get_template(self):
+ return self.template
+
+ def _change_value_according_name(self, name):
+ utils.set_dict_value(self.template, 'metadata.name', name)
+
+ utils.set_dict_value(self.template,
+ 'spec.template.metadata.labels.app',
+ name)
+
+ def _add_containers(self):
+ containers = [self._add_container()]
+ utils.set_dict_value(self.template,
+ 'spec.template.spec.containers',
+ containers)
+
+ def _add_container(self):
+ container_name = '{}-container'.format(self.name)
+ ssh_key_mount_path = "/root/.ssh/"
+
+ container = {
+ "args": self.args,
+ "command": self.command,
+ "image": self.image,
+ "name": container_name,
+ "volumeMounts": [
+ {
+ "mountPath": ssh_key_mount_path,
+ "name": self.ssh_key
+ }
+ ]
+ }
+
+ return container
+
+ def _add_volumes(self):
+ utils.set_dict_value(self.template,
+ 'spec.template.spec.volumes',
+ self.volumes)
+
+ def _add_volume(self, volume):
+ self.volumes.append(volume)
+
+ def _add_ssh_key_volume(self):
+ key_volume = {
+ "configMap": {
+ "name": self.ssh_key
+ },
+ "name": self.ssh_key
+ }
+ self._add_volume(key_volume)
+
+
+class KubernetesTemplate(object):
+
+ def __init__(self, name, template_cfg):
+ self.name = name
+ self.ssh_key = '{}-key'.format(name)
+
+ self.rcs = [self._get_rc_name(rc) for rc in template_cfg]
+ self.k8s_objs = [KubernetesObject(self._get_rc_name(rc),
+ ssh_key=self.ssh_key,
+ **cfg)
+ for rc, cfg in template_cfg.items()]
+ self.pods = []
+
+ def _get_rc_name(self, rc_name):
+ return '{}-{}'.format(rc_name, self.name)
+
+ def get_rc_pods(self):
+ resp = k8s_utils.get_pod_list()
+ self.pods = [p.metadata.name for p in resp.items for s in self.rcs
+ if p.metadata.name.startswith(s)]
+
+ return self.pods
diff --git a/yardstick/resources/scripts/install/storperf.bash b/yardstick/resources/scripts/install/storperf.bash
index 9d20a5a8a..4974bacb1 100644
--- a/yardstick/resources/scripts/install/storperf.bash
+++ b/yardstick/resources/scripts/install/storperf.bash
@@ -12,20 +12,25 @@
# StorPerf plugin installation script
# After installation, it will run StorPerf container on Jump Host
# Requirements:
-# 1. docker has been installed on the Jump Host
-# 2. Openstack environment file for storperf, '~/storperf_admin-rc', is ready.
+# 1. docker and docker-compose have been installed on the Jump Host
+# 2. Openstack environment file for storperf, '~/storperf_admin-rc', is ready
+# 3. Jump Host must have internet connectivity for downloading docker image
+# 4. Jump Host has access to the OpenStack Controller API
+# 5. Enough OpenStack floating IPs must be available to match your agent count
+# 6. The following ports are exposed if you use the supplied docker-compose.yaml file:
+# * 5000 for StorPerf ReST API and Swagger UI
+# * 8000 for StorPerf's Graphite Web Server
set -e
-mkdir -p /tmp/storperf-yardstick
+WWW_DATA_UID=33
+WWW_DATA_GID=33
-docker pull opnfv/storperf
+export TAG=${DOCKER_TAG:-latest}
+export ENV_FILE=~/storperf_admin-rc
+export CARBON_DIR=~/carbon
-STORPERF_DIR=/tmp/storperf-yardstick/carbon
-docker run -t \
---env-file ~/storperf_admin-rc \
--p 5000:5000 -p 8000:8000 \
--v $STORPERF_DIR:/opt/graphite/storage/whisper \
---name storperf-yardstick opnfv/storperf &
+sudo install --owner=${WWW_DATA_UID} --group=${WWW_DATA_GID} -d "${CARBON_DIR}"
-chown www-data:www-data $STORPERF_DIR
+docker-compose -f ~/docker-compose.yaml pull
+docker-compose -f ~/docker-compose.yaml up -d
diff --git a/yardstick/resources/scripts/remove/storperf.bash b/yardstick/resources/scripts/remove/storperf.bash
index a8eb51c89..b241d1893 100644
--- a/yardstick/resources/scripts/remove/storperf.bash
+++ b/yardstick/resources/scripts/remove/storperf.bash
@@ -13,8 +13,20 @@
set -e
-docker stop storperf-yardstick
-docker rm -f storperf-yardstick
-docker rmi opnfv/storperf
+export TAG=${DOCKER_TAG:-latest}
+export ENV_FILE=~/storperf_admin-rc
+export CARBON_DIR=~/carbon
-rm -rf /tmp/storperf-yardstick
+rm -rf "${CARBON_DIR}"
+
+docker-compose down
+
+for container_name in storperf swagger-ui http-front-end
+do
+ container=$(docker ps -a -q -f name=$container_name)
+ if [[ ! -z $container ]]
+ then
+ echo "Stopping any existing $container_name container"
+ docker rm -fv $container
+ fi
+done
diff --git a/yardstick/vTC/apexlake/tests/deployment_unit_test.py b/yardstick/vTC/apexlake/tests/deployment_unit_test.py
index 5a9178f53..1ff4225d6 100644
--- a/yardstick/vTC/apexlake/tests/deployment_unit_test.py
+++ b/yardstick/vTC/apexlake/tests/deployment_unit_test.py
@@ -130,6 +130,7 @@ class DummyDeploymentUnit(mut.DeploymentUnit):
raise Exception
+@mock.patch("experimental_framework.deployment_unit.time")
class TestDeploymentUnit(unittest.TestCase):
def setUp(self):
@@ -140,7 +141,7 @@ class TestDeploymentUnit(unittest.TestCase):
@mock.patch('experimental_framework.heat_manager.HeatManager',
side_effect=DummyHeatManager)
- def test_constructor_for_sanity(self, mock_heat_manager):
+ def test_constructor_for_sanity(self, mock_heat_manager, mock_time):
du = mut.DeploymentUnit(dict())
self.assertTrue(isinstance(du.heat_manager, DummyHeatManager))
mock_heat_manager.assert_called_once_with(dict())
@@ -150,7 +151,7 @@ class TestDeploymentUnit(unittest.TestCase):
side_effect=DummyHeatManager)
@mock.patch('os.path.isfile')
def test_deploy_heat_template_for_failure(self, mock_os_is_file,
- mock_heat_manager):
+ mock_heat_manager, mock_time):
mock_os_is_file.return_value = False
du = mut.DeploymentUnit(dict())
template_file = ''
@@ -163,7 +164,7 @@ class TestDeploymentUnit(unittest.TestCase):
side_effect=DummyHeatManager)
@mock.patch('os.path.isfile')
def test_deploy_heat_template_for_success(self, mock_os_is_file,
- mock_heat_manager):
+ mock_heat_manager, mock_time):
mock_os_is_file.return_value = True
du = mut.DeploymentUnit(dict())
template_file = ''
@@ -178,7 +179,7 @@ class TestDeploymentUnit(unittest.TestCase):
side_effect=DummyHeatManagerComplete)
@mock.patch('os.path.isfile')
def test_deploy_heat_template_2_for_success(self, mock_os_is_file,
- mock_heat_manager):
+ mock_heat_manager, mock_time):
mock_os_is_file.return_value = True
du = mut.DeploymentUnit(dict())
template_file = ''
@@ -196,7 +197,7 @@ class TestDeploymentUnit(unittest.TestCase):
side_effect=DummyDeploymentUnit)
def test_deploy_heat_template_3_for_success(self, mock_dep_unit,
mock_os_is_file,
- mock_heat_manager):
+ mock_heat_manager, mock_time):
mock_os_is_file.return_value = True
du = mut.DeploymentUnit(dict())
template_file = ''
@@ -212,7 +213,7 @@ class TestDeploymentUnit(unittest.TestCase):
side_effect=DummyHeatManagerFailed)
@mock.patch('os.path.isfile')
def test_deploy_heat_template_for_success_2(self, mock_os_is_file,
- mock_heat_manager, mock_log):
+ mock_heat_manager, mock_log, mock_time):
mock_os_is_file.return_value = True
du = DummyDeploymentUnit(dict())
template_file = ''
@@ -226,7 +227,7 @@ class TestDeploymentUnit(unittest.TestCase):
side_effect=DummyHeatManagerDestroy)
@mock.patch('experimental_framework.common.LOG')
def test_destroy_heat_template_for_success(self, mock_log,
- mock_heat_manager):
+ mock_heat_manager, mock_time):
openstack_credentials = dict()
du = mut.DeploymentUnit(openstack_credentials)
du.deployed_stacks = ['stack']
@@ -238,14 +239,14 @@ class TestDeploymentUnit(unittest.TestCase):
side_effect=DummyHeatManagerDestroyException)
@mock.patch('experimental_framework.common.LOG')
def test_destroy_heat_template_for_success_2(self, mock_log,
- mock_heat_manager):
+ mock_heat_manager, mock_time):
openstack_credentials = dict()
du = mut.DeploymentUnit(openstack_credentials)
du.deployed_stacks = ['stack']
stack_name = 'stack'
self.assertFalse(du.destroy_heat_template(stack_name))
- def test_destroy_all_deployed_stacks_for_success(self):
+ def test_destroy_all_deployed_stacks_for_success(self, mock_time):
du = DeploymentUnitDestroy()
du.destroy_all_deployed_stacks()
self.assertTrue(du.destroy_heat_template())
@@ -254,7 +255,7 @@ class TestDeploymentUnit(unittest.TestCase):
side_effect=DummyHeatManagerReiteration)
@mock.patch('os.path.isfile')
def test_deploy_heat_template_for_success_3(self, mock_os_is_file,
- mock_heat_manager):
+ mock_heat_manager, mock_time):
mock_os_is_file.return_value = True
du = mut.DeploymentUnit(dict())
template = 'template_reiteration'
diff --git a/yardstick/vTC/apexlake/tests/dpdk_packet_generator_test.py b/yardstick/vTC/apexlake/tests/dpdk_packet_generator_test.py
index 96ead5ef7..9fa860ab4 100644
--- a/yardstick/vTC/apexlake/tests/dpdk_packet_generator_test.py
+++ b/yardstick/vTC/apexlake/tests/dpdk_packet_generator_test.py
@@ -359,6 +359,7 @@ class MockRunCommand:
return MockRunCommand.ret_val_finalization
+@mock.patch('experimental_framework.packet_generators.dpdk_packet_generator.time')
class TestDpdkPacketGenOthers(unittest.TestCase):
def setUp(self):
@@ -370,7 +371,7 @@ class TestDpdkPacketGenOthers(unittest.TestCase):
@mock.patch('experimental_framework.packet_generators.'
'dpdk_packet_generator.DpdkPacketGenerator.'
'_cores_configuration')
- def test__get_core_nics_for_failure(self, mock_cores_configuration):
+ def test__get_core_nics_for_failure(self, mock_cores_configuration, mock_time):
mock_cores_configuration.return_value = None
self.assertRaises(ValueError, mut.DpdkPacketGenerator._get_core_nics,
'', '')
@@ -379,7 +380,7 @@ class TestDpdkPacketGenOthers(unittest.TestCase):
'dpdk_packet_generator.DpdkPacketGenerator.'
'_cores_configuration')
def test__get_core_nics_one_nic_for_success(self,
- mock_cores_configuration):
+ mock_cores_configuration, mock_time):
mock_cores_configuration.return_value = 'ret_val'
expected = 'ret_val'
output = mut.DpdkPacketGenerator._get_core_nics(1, 'coremask')
@@ -390,7 +391,7 @@ class TestDpdkPacketGenOthers(unittest.TestCase):
'dpdk_packet_generator.DpdkPacketGenerator.'
'_cores_configuration')
def test__get_core_nics_two_nics_for_success(self,
- mock_cores_configuration):
+ mock_cores_configuration, mock_time):
mock_cores_configuration.return_value = 'ret_val'
expected = 'ret_val'
output = mut.DpdkPacketGenerator._get_core_nics(2, 'coremask')
@@ -398,7 +399,7 @@ class TestDpdkPacketGenOthers(unittest.TestCase):
mock_cores_configuration.assert_called_once_with('coremask', 1, 2, 2)
@mock.patch('os.path.isfile')
- def test__init_input_validation_for_success(self, mock_is_file):
+ def test__init_input_validation_for_success(self, mock_is_file, mock_time):
mock_is_file.return_value = True
pcap_file_0 = 'pcap_file_0'
@@ -419,7 +420,7 @@ class TestDpdkPacketGenOthers(unittest.TestCase):
variables), None)
@mock.patch('os.path.isfile')
- def test__init_input_validation_for_failure(self, mock_is_file):
+ def test__init_input_validation_for_failure(self, mock_is_file, mock_time):
mock_is_file.return_value = True
pcap_file_0 = 'pcap_file_0'
@@ -440,7 +441,7 @@ class TestDpdkPacketGenOthers(unittest.TestCase):
lua_script, pcap_directory, lua_directory, variables)
@mock.patch('os.path.isfile')
- def test__init_input_validation_for_failure_2(self, mock_is_file):
+ def test__init_input_validation_for_failure_2(self, mock_is_file, mock_time):
mock_is_file.return_value = True
pcap_directory = None
@@ -461,7 +462,7 @@ class TestDpdkPacketGenOthers(unittest.TestCase):
lua_script, pcap_directory, lua_directory, variables)
@mock.patch('os.path.isfile')
- def test__init_input_validation_for_failure_3(self, mock_is_file):
+ def test__init_input_validation_for_failure_3(self, mock_is_file, mock_time):
mock_is_file.return_value = True
pcap_directory = 'directory'
@@ -482,7 +483,7 @@ class TestDpdkPacketGenOthers(unittest.TestCase):
lua_script, pcap_directory, lua_directory, variables)
@mock.patch('os.path.isfile')
- def test__init_input_validation_for_failure_4(self, mock_is_file):
+ def test__init_input_validation_for_failure_4(self, mock_is_file, mock_time):
mock_is_file.return_value = True
pcap_directory = 'directory'
@@ -503,7 +504,7 @@ class TestDpdkPacketGenOthers(unittest.TestCase):
lua_script, pcap_directory, lua_directory, variables)
@mock.patch('os.path.isfile')
- def test__init_input_validation_for_failure_5(self, mock_is_file):
+ def test__init_input_validation_for_failure_5(self, mock_is_file, mock_time):
mock_is_file.return_value = True
pcap_directory = 'directory'
@@ -524,7 +525,7 @@ class TestDpdkPacketGenOthers(unittest.TestCase):
lua_script, pcap_directory, lua_directory, variables)
@mock.patch('os.path.isfile', side_effect=[False])
- def test__init_input_validation_for_failure_6(self, mock_is_file):
+ def test__init_input_validation_for_failure_6(self, mock_is_file, mock_time):
# mock_is_file.return_value = False
pcap_directory = 'directory'
@@ -545,7 +546,7 @@ class TestDpdkPacketGenOthers(unittest.TestCase):
lua_script, pcap_directory, lua_directory, variables)
@mock.patch('os.path.isfile', side_effect=[True, False])
- def test__init_input_validation_for_failure_7(self, mock_is_file):
+ def test__init_input_validation_for_failure_7(self, mock_is_file, mock_time):
pcap_directory = 'directory'
pcap_file_0 = 'pcap_file_0'
pcap_file_1 = 'pcap_file_1'
@@ -564,7 +565,7 @@ class TestDpdkPacketGenOthers(unittest.TestCase):
lua_script, pcap_directory, lua_directory, variables)
@mock.patch('os.path.isfile', side_effect=[True, True, False])
- def test__init_input_validation_for_failure_8(self, mock_is_file):
+ def test__init_input_validation_for_failure_8(self, mock_is_file, mock_time):
pcap_directory = 'directory'
pcap_file_0 = 'pcap_file_0'
pcap_file_1 = 'pcap_file_1'
@@ -583,13 +584,13 @@ class TestDpdkPacketGenOthers(unittest.TestCase):
lua_script, pcap_directory, lua_directory, variables)
@mock.patch('os.chdir')
- def test__chdir_for_success(self, mock_os_chdir):
+ def test__chdir_for_success(self, mock_os_chdir, mock_time):
mut.DpdkPacketGenerator._chdir('directory')
mock_os_chdir.assert_called_once_with('directory')
@mock.patch('experimental_framework.common.run_command',
side_effect=MockRunCommand.mock_run_command)
- def test__init_physical_nics_for_success(self, mock_run_command):
+ def test__init_physical_nics_for_success(self, mock_run_command, mock_time):
dpdk_interfaces = 1
dpdk_vars = dict()
@@ -608,7 +609,7 @@ class TestDpdkPacketGenOthers(unittest.TestCase):
@mock.patch('experimental_framework.common.run_command',
side_effect=MockRunCommand.mock_run_command)
- def test__init_physical_nics_for_success_2(self, mock_run_command):
+ def test__init_physical_nics_for_success_2(self, mock_run_command, mock_time):
dpdk_interfaces = 2
dpdk_vars = dict()
@@ -626,7 +627,7 @@ class TestDpdkPacketGenOthers(unittest.TestCase):
[True, True, True, True, True, True])
@mock.patch('experimental_framework.common.run_command')
- def test__init_physical_nics_for_failure(self, mock_run_command):
+ def test__init_physical_nics_for_failure(self, mock_run_command, mock_time):
dpdk_interfaces = 3
dpdk_vars = dict()
self.assertRaises(ValueError, self.mut._init_physical_nics,
@@ -634,7 +635,7 @@ class TestDpdkPacketGenOthers(unittest.TestCase):
@mock.patch('experimental_framework.common.run_command',
side_effect=MockRunCommand.mock_run_command_finalization)
- def test__finalize_physical_nics_for_success(self, mock_run_command):
+ def test__finalize_physical_nics_for_success(self, mock_run_command, mock_time):
dpdk_interfaces = 1
dpdk_vars = dict()
dpdk_vars[conf_file.CFSP_DPDK_DPDK_DIRECTORY] = 'dpdk_directory/'
@@ -652,7 +653,7 @@ class TestDpdkPacketGenOthers(unittest.TestCase):
@mock.patch('experimental_framework.common.run_command',
side_effect=MockRunCommand.mock_run_command_finalization)
- def test__finalize_physical_nics_for_success_2(self, mock_run_command):
+ def test__finalize_physical_nics_for_success_2(self, mock_run_command, mock_time):
dpdk_interfaces = 2
dpdk_vars = dict()
dpdk_vars[conf_file.CFSP_DPDK_DPDK_DIRECTORY] = 'dpdk_directory/'
@@ -668,34 +669,34 @@ class TestDpdkPacketGenOthers(unittest.TestCase):
self.assertEqual(MockRunCommand.mock_run_command_finalization(),
[True, True, True, True, True, True])
- def test__finalize_physical_nics_for_failure(self):
+ def test__finalize_physical_nics_for_failure(self, mock_time):
dpdk_interfaces = 0
dpdk_vars = dict()
self.assertRaises(ValueError, self.mut._finalize_physical_nics,
dpdk_interfaces, dpdk_vars)
- def test__cores_configuration_for_success(self):
+ def test__cores_configuration_for_success(self, mock_time):
coremask = '1f'
expected = '[2:1].0,[4:3].1'
output = mut.DpdkPacketGenerator._cores_configuration(coremask,
1, 2, 2)
self.assertEqual(expected, output)
- def test__cores_configuration_for_success_2(self):
+ def test__cores_configuration_for_success_2(self, mock_time):
coremask = '1f'
expected = '2.0,[4:3].1'
output = mut.DpdkPacketGenerator._cores_configuration(coremask,
1, 1, 2)
self.assertEqual(expected, output)
- def test__cores_configuration_for_success_3(self):
+ def test__cores_configuration_for_success_3(self, mock_time):
coremask = '1f'
expected = '[3:2].0,4.1'
output = mut.DpdkPacketGenerator._cores_configuration(coremask,
1, 2, 1)
self.assertEqual(expected, output)
- def test__cores_configuration_for_failure(self):
+ def test__cores_configuration_for_failure(self, mock_time):
coremask = '1'
self.assertRaises(ValueError,
mut.DpdkPacketGenerator._cores_configuration,
@@ -703,7 +704,7 @@ class TestDpdkPacketGenOthers(unittest.TestCase):
@mock.patch('experimental_framework.common.LOG')
@mock.patch('experimental_framework.common.run_command')
- def test__change_vlan_for_success(self, mock_run_command, mock_log):
+ def test__change_vlan_for_success(self, mock_run_command, mock_log, mock_time):
mut.DpdkPacketGenerator._change_vlan('/directory/', 'pcap_file', '10')
expected_param = '/directory/vlan_tag.sh /directory/pcap_file 10'
mock_run_command.assert_called_with(expected_param)
diff --git a/yardstick/vTC/apexlake/tests/instantiation_validation_bench_test.py b/yardstick/vTC/apexlake/tests/instantiation_validation_bench_test.py
index 2bd8b7b38..69c5d745e 100644
--- a/yardstick/vTC/apexlake/tests/instantiation_validation_bench_test.py
+++ b/yardstick/vTC/apexlake/tests/instantiation_validation_bench_test.py
@@ -257,6 +257,7 @@ class InstantiationValidationInitTest(unittest.TestCase):
self.assertEqual(dummy_os_kill('', '', True), [1, 1])
self.assertEqual(dummy_run_command('', True), [1, 1, 0, 0, 0])
+ @mock.patch('experimental_framework.benchmarks.instantiation_validation_benchmark.time')
@mock.patch('os.chdir')
@mock.patch('experimental_framework.common.run_command',
side_effect=dummy_run_command_2)
@@ -265,7 +266,7 @@ class InstantiationValidationInitTest(unittest.TestCase):
'InstantiationValidationBenchmark._get_pids')
@mock.patch('os.kill', side_effect=dummy_os_kill)
def test__init_packet_checker_for_success(self, mock_kill, mock_pids,
- mock_run_command, mock_chdir):
+ mock_run_command, mock_chdir, mock_time):
global command_counter
command_counter = [0, 0, 0, 0, 0]
mock_pids.return_value = [1234, 4321]
@@ -314,13 +315,14 @@ class InstantiationValidationInitTest(unittest.TestCase):
self.assertEqual(dummy_replace_in_file('', '', '', True),
[0, 0, 0, 1, 1, 1])
+ @mock.patch('experimental_framework.benchmarks.instantiation_validation_benchmark.time')
@mock.patch('experimental_framework.common.LOG')
@mock.patch('experimental_framework.packet_generators.'
'dpdk_packet_generator.DpdkPacketGenerator',
side_effect=DummyDpdkPacketGenerator)
@mock.patch('experimental_framework.common.get_dpdk_pktgen_vars')
def test_run_for_success(self, mock_common_get_vars, mock_pktgen,
- mock_log):
+ mock_log, mock_time):
rval = dict()
rval[cfs.CFSP_DPDK_BUS_SLOT_NIC_2] = 'bus_2'
rval[cfs.CFSP_DPDK_NAME_IF_2] = 'if_2'