summaryrefslogtreecommitdiffstats
path: root/yardstick
diff options
context:
space:
mode:
Diffstat (limited to 'yardstick')
-rw-r--r--yardstick/benchmark/contexts/standalone/model.py15
-rw-r--r--yardstick/benchmark/contexts/standalone/ovs_dpdk.py74
-rw-r--r--yardstick/benchmark/contexts/standalone/sriov.py4
-rw-r--r--yardstick/benchmark/core/report.py165
-rw-r--r--yardstick/benchmark/core/task.py3
-rw-r--r--yardstick/benchmark/runners/iteration.py1
-rw-r--r--yardstick/benchmark/scenarios/networking/vnf_generic.py34
-rw-r--r--yardstick/benchmark/scenarios/parser/parser.py2
-rw-r--r--yardstick/benchmark/scenarios/storage/storperf.py41
-rw-r--r--yardstick/cmd/commands/report.py23
-rw-r--r--yardstick/common/html_template.py124
-rw-r--r--yardstick/common/nsb_report.css29
-rw-r--r--yardstick/common/nsb_report.html.j282
-rw-r--r--yardstick/common/nsb_report.js146
-rw-r--r--yardstick/common/report.html.j2184
-rw-r--r--yardstick/common/utils.py18
-rw-r--r--yardstick/network_services/constants.py3
-rw-r--r--yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py228
-rw-r--r--yardstick/network_services/pipeline.py2
-rw-r--r--yardstick/network_services/traffic_profile/__init__.py1
-rw-r--r--yardstick/network_services/traffic_profile/base.py4
-rw-r--r--yardstick/network_services/traffic_profile/http_ixload.py65
-rw-r--r--yardstick/network_services/traffic_profile/ixia_rfc2544.py53
-rw-r--r--yardstick/network_services/traffic_profile/prox_binsearch.py4
-rw-r--r--yardstick/network_services/traffic_profile/prox_irq.py48
-rw-r--r--yardstick/network_services/traffic_profile/prox_profile.py4
-rw-r--r--yardstick/network_services/vnf_generic/vnf/prox_helpers.py311
-rw-r--r--yardstick/network_services/vnf_generic/vnf/prox_irq.py200
-rw-r--r--yardstick/network_services/vnf_generic/vnf/prox_vnf.py34
-rw-r--r--yardstick/network_services/vnf_generic/vnf/sample_vnf.py43
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py368
-rw-r--r--yardstick/network_services/vnf_generic/vnf/vpe_vnf.py199
-rw-r--r--yardstick/tests/unit/benchmark/contexts/standalone/test_model.py9
-rw-r--r--yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py33
-rw-r--r--yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py14
-rw-r--r--yardstick/tests/unit/benchmark/core/test_report.py194
-rw-r--r--yardstick/tests/unit/benchmark/runner/test_iteration.py45
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py73
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py40
-rw-r--r--yardstick/tests/unit/common/test_utils.py17
-rw-r--r--yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py172
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_base.py16
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_http_ixload.py57
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py51
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_prox_binsearch.py39
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_prox_irq.py55
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_helpers.py457
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_irq.py828
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py57
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py49
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_prox.py20
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py485
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py73
53 files changed, 4500 insertions, 796 deletions
diff --git a/yardstick/benchmark/contexts/standalone/model.py b/yardstick/benchmark/contexts/standalone/model.py
index 1004c62d1..aa5fdd391 100644
--- a/yardstick/benchmark/contexts/standalone/model.py
+++ b/yardstick/benchmark/contexts/standalone/model.py
@@ -45,7 +45,7 @@ VM_TEMPLATE = """
<vcpu cpuset='{cpuset}'>{vcpu}</vcpu>
{cputune}
<os>
- <type arch="x86_64" machine="pc-i440fx-xenial">hvm</type>
+ <type arch="x86_64" machine="{machine}">hvm</type>
<boot dev="hd" />
</os>
<features>
@@ -107,7 +107,7 @@ version: 2
ethernets:
ens3:
match:
- mac_address: {mac_address}
+ macaddress: {mac_address}
addresses:
- {ip_address}
EOF
@@ -161,7 +161,8 @@ class Libvirt(object):
return vm_pci
@classmethod
- def add_ovs_interface(cls, vpath, port_num, vpci, vports_mac, xml_str):
+ def add_ovs_interface(cls, vpath, port_num, vpci, vports_mac, xml_str,
+ queues):
"""Add a DPDK OVS 'interface' XML node in 'devices' node
<devices>
@@ -203,7 +204,7 @@ class Libvirt(object):
model.set('type', 'virtio')
driver = ET.SubElement(interface, 'driver')
- driver.set('queues', '4')
+ driver.set('queues', str(queues))
host = ET.SubElement(driver, 'host')
host.set('mrg_rxbuf', 'off')
@@ -305,6 +306,7 @@ class Libvirt(object):
cpuset = Libvirt.pin_vcpu_for_perf(connection, hw_socket)
cputune = extra_spec.get('cputune', '')
+ machine = extra_spec.get('machine_type', 'pc-i440fx-xenial')
mac = StandaloneContextHelper.get_mac_address(0x00)
image = cls.create_snapshot_qemu(connection, index,
flavor.get("images", None))
@@ -315,7 +317,8 @@ class Libvirt(object):
memory=memory, vcpu=vcpu, cpu=cpu,
numa_cpus=numa_cpus,
socket=socket, threads=threads,
- vm_image=image, cpuset=cpuset, cputune=cputune)
+ vm_image=image, cpuset=cpuset,
+ machine=machine, cputune=cputune)
# Add CD-ROM device
vm_xml = Libvirt.add_cdrom(cdrom_img, vm_xml)
@@ -570,6 +573,8 @@ class StandaloneContextHelper(object):
# Update image with public key
key_filename = node.get('key_filename')
ip_netmask = "{0}/{1}".format(node.get('ip'), node.get('netmask'))
+ ip_netmask = "{0}/{1}".format(node.get('ip'),
+ IPNetwork(ip_netmask).prefixlen)
Libvirt.gen_cdrom_image(connection, cdrom_img, vm_name, user_name, key_filename, mac,
ip_netmask)
return node
diff --git a/yardstick/benchmark/contexts/standalone/ovs_dpdk.py b/yardstick/benchmark/contexts/standalone/ovs_dpdk.py
index 3ad1097b0..c6e19f614 100644
--- a/yardstick/benchmark/contexts/standalone/ovs_dpdk.py
+++ b/yardstick/benchmark/contexts/standalone/ovs_dpdk.py
@@ -24,6 +24,7 @@ from yardstick.benchmark import contexts
from yardstick.benchmark.contexts import base
from yardstick.benchmark.contexts.standalone import model
from yardstick.common import exceptions
+from yardstick.common import utils as common_utils
from yardstick.network_services import utils
from yardstick.network_services.utils import get_nsb_option
@@ -73,6 +74,11 @@ class OvsDpdkContext(base.Context):
self.wait_for_vswitchd = 10
super(OvsDpdkContext, self).__init__()
+ def get_dpdk_socket_mem_size(self, socket_id):
+ """Get the size of OvS DPDK socket memory (Mb)"""
+ ram = self.ovs_properties.get("ram", {})
+ return ram.get('socket_%d' % (socket_id), 2048)
+
def init(self, attrs):
"""initializes itself from the supplied arguments"""
super(OvsDpdkContext, self).init(attrs)
@@ -133,9 +139,6 @@ class OvsDpdkContext(base.Context):
if pmd_cpu_mask:
pmd_mask = pmd_cpu_mask
- socket0 = self.ovs_properties.get("ram", {}).get("socket_0", "2048")
- socket1 = self.ovs_properties.get("ram", {}).get("socket_1", "2048")
-
ovs_other_config = "ovs-vsctl {0}set Open_vSwitch . other_config:{1}"
detach_cmd = "ovs-vswitchd unix:{0}{1} --pidfile --detach --log-file={2}"
@@ -143,16 +146,23 @@ class OvsDpdkContext(base.Context):
if lcore_mask:
lcore_mask = ovs_other_config.format("--no-wait ", "dpdk-lcore-mask='%s'" % lcore_mask)
+ max_idle = self.ovs_properties.get("max_idle", '')
+ if max_idle:
+ max_idle = ovs_other_config.format("", "max-idle=%s" % max_idle)
+
cmd_list = [
"mkdir -p /usr/local/var/run/openvswitch",
"mkdir -p {}".format(os.path.dirname(log_path)),
- "ovsdb-server --remote=punix:/{0}/{1} --pidfile --detach".format(vpath,
- ovs_sock_path),
+ ("ovsdb-server --remote=punix:/{0}/{1} --remote=ptcp:6640"
+ " --pidfile --detach").format(vpath, ovs_sock_path),
ovs_other_config.format("--no-wait ", "dpdk-init=true"),
- ovs_other_config.format("--no-wait ", "dpdk-socket-mem='%s,%s'" % (socket0, socket1)),
+ ovs_other_config.format("--no-wait ", "dpdk-socket-mem='%d,%d'" % (
+ self.get_dpdk_socket_mem_size(0),
+ self.get_dpdk_socket_mem_size(1))),
lcore_mask,
detach_cmd.format(vpath, ovs_sock_path, log_path),
ovs_other_config.format("", "pmd-cpu-mask=%s" % pmd_mask),
+ max_idle,
]
for cmd in cmd_list:
@@ -162,12 +172,12 @@ class OvsDpdkContext(base.Context):
def setup_ovs_bridge_add_flows(self):
dpdk_args = ""
- dpdk_list = []
vpath = self.ovs_properties.get("vpath", "/usr/local")
version = self.ovs_properties.get('version', {})
ovs_ver = [int(x) for x in version.get('ovs', self.DEFAULT_OVS).split('.')]
ovs_add_port = ('ovs-vsctl add-port {br} {port} -- '
- 'set Interface {port} type={type_}{dpdk_args}{dpdk_rxq}')
+ 'set Interface {port} type={type_}{dpdk_args}'
+ '{dpdk_rxq}{pmd_rx_aff}')
chmod_vpath = 'chmod 0777 {0}/var/run/openvswitch/dpdkvhostuser*'
cmd_list = [
@@ -176,26 +186,43 @@ class OvsDpdkContext(base.Context):
'ovs-vsctl add-br {0} -- set bridge {0} datapath_type=netdev'.
format(MAIN_BRIDGE)
]
- dpdk_rxq = " options:n_rxq={queue}".format(
- queue=self.ovs_properties.get("queues", 1))
+ dpdk_rxq = ""
+ queues = self.ovs_properties.get("queues")
+ if queues:
+ dpdk_rxq = " options:n_rxq={queue}".format(queue=queues)
- ordered_network = collections.OrderedDict(self.networks)
+ # Sorting the array to make sure we execute dpdk0... in the order
+ ordered_network = collections.OrderedDict(
+ sorted(self.networks.items(), key=lambda t: t[1].get('port_num', 0)))
+ pmd_rx_aff_ports = self.ovs_properties.get("dpdk_pmd-rxq-affinity", {})
for index, vnf in enumerate(ordered_network.values()):
if ovs_ver >= [2, 7, 0]:
dpdk_args = " options:dpdk-devargs=%s" % vnf.get("phy_port")
- dpdk_list.append(ovs_add_port.format(
+ affinity = pmd_rx_aff_ports.get(vnf.get("port_num", -1), "")
+ if affinity:
+ pmd_rx_aff = ' other_config:pmd-rxq-affinity=' \
+ '"{affinity}"'.format(affinity=affinity)
+ else:
+ pmd_rx_aff = ""
+ cmd_list.append(ovs_add_port.format(
br=MAIN_BRIDGE, port='dpdk%s' % vnf.get("port_num", 0),
- type_='dpdk', dpdk_args=dpdk_args, dpdk_rxq=dpdk_rxq))
-
- # Sorting the array to make sure we execute dpdk0... in the order
- list.sort(dpdk_list)
- cmd_list.extend(dpdk_list)
+ type_='dpdk', dpdk_args=dpdk_args, dpdk_rxq=dpdk_rxq,
+ pmd_rx_aff=pmd_rx_aff))
# Need to do two for loop to maintain the dpdk/vhost ports.
+ pmd_rx_aff_ports = self.ovs_properties.get("vhost_pmd-rxq-affinity",
+ {})
for index, _ in enumerate(ordered_network):
+ affinity = pmd_rx_aff_ports.get(index)
+ if affinity:
+ pmd_rx_aff = ' other_config:pmd-rxq-affinity=' \
+ '"{affinity}"'.format(affinity=affinity)
+ else:
+ pmd_rx_aff = ""
cmd_list.append(ovs_add_port.format(
br=MAIN_BRIDGE, port='dpdkvhostuser%s' % index,
- type_='dpdkvhostuser', dpdk_args="", dpdk_rxq=""))
+ type_='dpdkvhostuser', dpdk_args="", dpdk_rxq=dpdk_rxq,
+ pmd_rx_aff=pmd_rx_aff))
ovs_flow = ("ovs-ofctl add-flow {0} in_port=%s,action=output:%s".
format(MAIN_BRIDGE))
@@ -235,7 +262,6 @@ class OvsDpdkContext(base.Context):
def check_ovs_dpdk_env(self):
self.cleanup_ovs_dpdk_env()
- self._check_hugepages()
version = self.ovs_properties.get("version", {})
ovs_ver = version.get("ovs", self.DEFAULT_OVS)
@@ -375,6 +401,7 @@ class OvsDpdkContext(base.Context):
def _enable_interfaces(self, index, vfs, xml_str):
vpath = self.ovs_properties.get("vpath", "/usr/local")
+ queue = self.ovs_properties.get("queues", 1)
vf = self.networks[vfs[0]]
port_num = vf.get('port_num', 0)
vpci = utils.PciAddress(vf['vpci'].strip())
@@ -383,13 +410,20 @@ class OvsDpdkContext(base.Context):
vf['vpci'] = \
"{}:{}:{:02x}.{}".format(vpci.domain, vpci.bus, slot, vpci.function)
return model.Libvirt.add_ovs_interface(
- vpath, port_num, vf['vpci'], vf['mac'], xml_str)
+ vpath, port_num, vf['vpci'], vf['mac'], xml_str, queue)
def setup_ovs_dpdk_context(self):
nodes = []
self.configure_nics_for_ovs_dpdk()
+ hp_total_mb = int(self.vm_flavor.get('ram', '4096')) * len(self.servers)
+ common_utils.setup_hugepages(self.connection, (hp_total_mb + \
+ self.get_dpdk_socket_mem_size(0) + \
+ self.get_dpdk_socket_mem_size(1)) * 1024)
+
+ self._check_hugepages()
+
for index, (key, vnf) in enumerate(collections.OrderedDict(
self.servers).items()):
cfg = '/tmp/vm_ovs_%d.xml' % index
diff --git a/yardstick/benchmark/contexts/standalone/sriov.py b/yardstick/benchmark/contexts/standalone/sriov.py
index f1b67a2da..e037dd85a 100644
--- a/yardstick/benchmark/contexts/standalone/sriov.py
+++ b/yardstick/benchmark/contexts/standalone/sriov.py
@@ -21,6 +21,7 @@ from yardstick import ssh
from yardstick.benchmark import contexts
from yardstick.benchmark.contexts import base
from yardstick.benchmark.contexts.standalone import model
+from yardstick.common import utils
from yardstick.network_services.utils import get_nsb_option
from yardstick.network_services.utils import PciAddress
@@ -222,6 +223,9 @@ class SriovContext(base.Context):
# 1 : modprobe host_driver with num_vfs
self.configure_nics_for_sriov()
+ hp_total_mb = int(self.vm_flavor.get('ram', '4096')) * len(self.servers)
+ utils.setup_hugepages(self.connection, hp_total_mb * 1024)
+
for index, (key, vnf) in enumerate(collections.OrderedDict(
self.servers).items()):
cfg = '/tmp/vm_sriov_%s.xml' % str(index)
diff --git a/yardstick/benchmark/core/report.py b/yardstick/benchmark/core/report.py
index 199602444..0bc392fe5 100644
--- a/yardstick/benchmark/core/report.py
+++ b/yardstick/benchmark/core/report.py
@@ -1,7 +1,7 @@
-#############################################################################
-# Copyright (c) 2017 Rajesh Kudaka
+##############################################################################
+# Copyright (c) 2017 Rajesh Kudaka <4k.rajesh@gmail.com>
+# Copyright (c) 2018 Intel Corporation.
#
-# Author: Rajesh Kudaka 4k.rajesh@gmail.com
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
@@ -10,33 +10,82 @@
""" Handler for yardstick command 'report' """
-from __future__ import print_function
-
-from __future__ import absolute_import
-
import ast
import re
import uuid
+import jinja2
from api.utils import influx
-
-from django.conf import settings
-from django.template import Context
-from django.template import Template
-
from oslo_utils import encodeutils
from oslo_utils import uuidutils
from yardstick.common import constants as consts
-from yardstick.common.html_template import template
from yardstick.common.utils import cliargs
-settings.configure()
+
+class JSTree(object):
+ """Data structure to parse data for use with the JS library jsTree"""
+ def __init__(self):
+ self._created_nodes = ['#']
+ self.jstree_data = []
+
+ def _create_node(self, _id):
+ """Helper method for format_for_jstree to create each node.
+
+ Creates the node (and any required parents) and keeps track
+ of the created nodes.
+
+ :param _id: (string) id of the node to be created
+ :return: None
+ """
+ components = _id.split(".")
+
+ if len(components) == 1:
+ text = components[0]
+ parent_id = "#"
+ else:
+ text = components[-1]
+ parent_id = ".".join(components[:-1])
+ # make sure the parent has been created
+ if not parent_id in self._created_nodes:
+ self._create_node(parent_id)
+
+ self.jstree_data.append({"id": _id, "text": text, "parent": parent_id})
+ self._created_nodes.append(_id)
+
+ def format_for_jstree(self, data):
+ """Format the data into the required format for jsTree.
+
+ The data format expected is a list of key-value pairs which represent
+ the data and label for each metric e.g.:
+
+ [{'data': [0, ], 'label': 'tg__0.DropPackets'},
+ {'data': [548, ], 'label': 'tg__0.LatencyAvg.5'},]
+
+ This data is converted into the format required for jsTree to group and
+ display the metrics in a hierarchial fashion, including creating a
+ number of parent nodes e.g.::
+
+ [{"id": "tg__0", "text": "tg__0", "parent": "#"},
+ {"id": "tg__0.DropPackets", "text": "DropPackets", "parent": "tg__0"},
+ {"id": "tg__0.LatencyAvg", "text": "LatencyAvg", "parent": "tg__0"},
+ {"id": "tg__0.LatencyAvg.5", "text": "5", "parent": "tg__0.LatencyAvg"},]
+
+ :param data: (list) data to be converted
+ :return: list
+ """
+ self._created_nodes = ['#']
+ self.jstree_data = []
+
+ for item in data:
+ self._create_node(item["label"])
+
+ return self.jstree_data
class Report(object):
"""Report commands.
- Set of commands to manage benchmark tasks.
+ Set of commands to manage reports.
"""
def __init__(self):
@@ -64,7 +113,7 @@ class Report(object):
if query_exec:
return query_exec
else:
- raise KeyError("Task ID or Test case not found..")
+ raise KeyError("Test case not found.")
def _get_tasks(self):
task_cmd = "select * from \"%s\" where task_id= '%s'"
@@ -73,12 +122,14 @@ class Report(object):
if query_exec:
return query_exec
else:
- raise KeyError("Task ID or Test case not found..")
+ raise KeyError("Task ID or Test case not found.")
- @cliargs("task_id", type=str, help=" task id", nargs=1)
- @cliargs("yaml_name", type=str, help=" Yaml file Name", nargs=1)
- def generate(self, args):
- """Start report generation."""
+ def _generate_common(self, args):
+ """Actions that are common to both report formats.
+
+ Create the necessary data structure for rendering
+ the report templates.
+ """
self._validate(args.yaml_name[0], args.task_id[0])
self.db_fieldkeys = self._get_fieldkeys()
@@ -86,7 +137,7 @@ class Report(object):
self.db_task = self._get_tasks()
field_keys = []
- temp_series = []
+ datasets = []
table_vals = {}
field_keys = [encodeutils.to_utf8(field['fieldKey'])
@@ -94,35 +145,77 @@ class Report(object):
for key in field_keys:
self.Timestamp = []
- series = {}
values = []
for task in self.db_task:
task_time = encodeutils.to_utf8(task['time'])
if not isinstance(task_time, str):
task_time = str(task_time, 'utf8')
+ if not isinstance(key, str):
key = str(key, 'utf8')
task_time = task_time[11:]
head, _, tail = task_time.partition('.')
task_time = head + "." + tail[:6]
self.Timestamp.append(task_time)
if task[key] is None:
- values.append('')
- elif isinstance(task[key], (int, float)) is True:
+ values.append(None)
+ elif isinstance(task[key], (int, float)):
values.append(task[key])
else:
values.append(ast.literal_eval(task[key]))
+ datasets.append({'label': key, 'data': values})
table_vals['Timestamp'] = self.Timestamp
table_vals[key] = values
- series['name'] = key
- series['data'] = values
- temp_series.append(series)
-
- Template_html = Template(template)
- Context_html = Context({"series": temp_series,
- "Timestamp": self.Timestamp,
- "task_id": self.task_id,
- "table": table_vals})
+
+ return datasets, table_vals
+
+ @cliargs("task_id", type=str, help=" task id", nargs=1)
+ @cliargs("yaml_name", type=str, help=" Yaml file Name", nargs=1)
+ def generate(self, args):
+ """Start report generation."""
+ datasets, table_vals = self._generate_common(args)
+
+ template_dir = consts.YARDSTICK_ROOT_PATH + "yardstick/common"
+ template_environment = jinja2.Environment(
+ autoescape=False,
+ loader=jinja2.FileSystemLoader(template_dir))
+
+ context = {
+ "datasets": datasets,
+ "Timestamps": self.Timestamp,
+ "task_id": self.task_id,
+ "table": table_vals,
+ }
+
+ template_html = template_environment.get_template("report.html.j2")
+
+ with open(consts.DEFAULT_HTML_FILE, "w") as file_open:
+ file_open.write(template_html.render(context))
+
+ print("Report generated. View %s" % consts.DEFAULT_HTML_FILE)
+
+ @cliargs("task_id", type=str, help=" task id", nargs=1)
+ @cliargs("yaml_name", type=str, help=" Yaml file Name", nargs=1)
+ def generate_nsb(self, args):
+ """Start NSB report generation."""
+ datasets, table_vals = self._generate_common(args)
+ jstree_data = JSTree().format_for_jstree(datasets)
+
+ template_dir = consts.YARDSTICK_ROOT_PATH + "yardstick/common"
+ template_environment = jinja2.Environment(
+ autoescape=False,
+ loader=jinja2.FileSystemLoader(template_dir),
+ lstrip_blocks=True)
+
+ context = {
+ "Timestamps": self.Timestamp,
+ "task_id": self.task_id,
+ "table": table_vals,
+ "jstree_nodes": jstree_data,
+ }
+
+ template_html = template_environment.get_template("nsb_report.html.j2")
+
with open(consts.DEFAULT_HTML_FILE, "w") as file_open:
- file_open.write(Template_html.render(Context_html))
+ file_open.write(template_html.render(context))
- print("Report generated. View /tmp/yardstick.htm")
+ print("Report generated. View %s" % consts.DEFAULT_HTML_FILE)
diff --git a/yardstick/benchmark/core/task.py b/yardstick/benchmark/core/task.py
index 1dfd6c31e..477dbcc57 100644
--- a/yardstick/benchmark/core/task.py
+++ b/yardstick/benchmark/core/task.py
@@ -11,6 +11,7 @@ import sys
import os
from collections import OrderedDict
+import six
import yaml
import atexit
import ipaddress
@@ -313,7 +314,7 @@ class Task(object): # pragma: no cover
return {k: self._parse_options(v) for k, v in op.items()}
elif isinstance(op, list):
return [self._parse_options(v) for v in op]
- elif isinstance(op, str):
+ elif isinstance(op, six.string_types):
return self.outputs.get(op[1:]) if op.startswith('$') else op
else:
return op
diff --git a/yardstick/benchmark/runners/iteration.py b/yardstick/benchmark/runners/iteration.py
index 4c88f3671..58ab06a32 100644
--- a/yardstick/benchmark/runners/iteration.py
+++ b/yardstick/benchmark/runners/iteration.py
@@ -96,6 +96,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
except Exception: # pylint: disable=broad-except
errors = traceback.format_exc()
LOG.exception("")
+ raise
else:
if result:
# add timeout for put so we don't block test
diff --git a/yardstick/benchmark/scenarios/networking/vnf_generic.py b/yardstick/benchmark/scenarios/networking/vnf_generic.py
index 20fff61ed..5ac51cdfc 100644
--- a/yardstick/benchmark/scenarios/networking/vnf_generic.py
+++ b/yardstick/benchmark/scenarios/networking/vnf_generic.py
@@ -151,6 +151,26 @@ class NetworkServiceTestCase(scenario_base.Scenario):
return options.get('duration',
tprofile_base.TrafficProfileConfig.DEFAULT_DURATION)
+ def _key_list_to_dict(self, key, value_list):
+ value_dict = {}
+ try:
+ for index, count in enumerate(value_list[key]):
+ value_dict["{}_{}".format(key, index)] = count
+ except KeyError:
+ value_dict = {}
+
+ return value_dict
+
+ def _get_simulated_users(self):
+ users = self.scenario_cfg.get("options", {}).get("simulated_users", {})
+ simulated_users = self._key_list_to_dict("uplink", users)
+ return {"simulated_users": simulated_users}
+
+ def _get_page_object(self):
+ objects = self.scenario_cfg.get("options", {}).get("page_object", {})
+ page_object = self._key_list_to_dict("uplink", objects)
+ return {"page_object": page_object}
+
def _fill_traffic_profile(self):
tprofile = self._get_traffic_profile()
extra_args = self.scenario_cfg.get('extra_args', {})
@@ -160,9 +180,19 @@ class NetworkServiceTestCase(scenario_base.Scenario):
tprofile_base.TrafficProfile.UPLINK: {},
tprofile_base.TrafficProfile.DOWNLINK: {},
'extra_args': extra_args,
- 'duration': self._get_duration()}
+ 'duration': self._get_duration(),
+ 'page_object': self._get_page_object(),
+ 'simulated_users': self._get_simulated_users()}
traffic_vnfd = vnfdgen.generate_vnfd(tprofile, tprofile_data)
- self.traffic_profile = tprofile_base.TrafficProfile.get(traffic_vnfd)
+
+ traffic_config = \
+ self.scenario_cfg.get("options", {}).get("traffic_config", {})
+
+ traffic_vnfd.setdefault("traffic_profile", {})
+ traffic_vnfd["traffic_profile"].update(traffic_config)
+
+ self.traffic_profile = \
+ tprofile_base.TrafficProfile.get(traffic_vnfd)
def _get_topology(self):
topology = self.scenario_cfg["topology"]
diff --git a/yardstick/benchmark/scenarios/parser/parser.py b/yardstick/benchmark/scenarios/parser/parser.py
index 5b2b49c2c..a0f8e9e72 100644
--- a/yardstick/benchmark/scenarios/parser/parser.py
+++ b/yardstick/benchmark/scenarios/parser/parser.py
@@ -20,7 +20,7 @@ class Parser(base.Scenario):
"""running Parser Yang-to-Tosca module as a tool
validating output against expected outcome
- more info https://wiki.opnfv.org/parser
+ more info https://wiki.opnfv.org/display/parser
"""
__scenario_type__ = "Parser"
diff --git a/yardstick/benchmark/scenarios/storage/storperf.py b/yardstick/benchmark/scenarios/storage/storperf.py
index e4c72dc8f..5b8b00075 100644
--- a/yardstick/benchmark/scenarios/storage/storperf.py
+++ b/yardstick/benchmark/scenarios/storage/storperf.py
@@ -102,13 +102,14 @@ class StorPerf(base.Scenario):
setup_res = requests.post('http://%s:5000/api/v1.0/configurations'
% self.target, json=env_args)
- setup_res_content = jsonutils.loads(
- setup_res.content)
if setup_res.status_code != 200:
- raise RuntimeError("Failed to create a stack, error message:",
- setup_res_content["message"])
+ LOG.error("Failed to create stack. %s: %s",
+ setup_res.status_code, setup_res.content)
+ raise RuntimeError("Failed to create stack. %s: %s" %
+ (setup_res.status_code, setup_res.content))
elif setup_res.status_code == 200:
+ setup_res_content = jsonutils.loads(setup_res.content)
LOG.info("stack_id: %s", setup_res_content["stack_id"])
while not self._query_setup_state():
@@ -122,14 +123,15 @@ class StorPerf(base.Scenario):
def _query_job_state(self, job_id):
"""Query the status of the supplied job_id and report on metrics"""
LOG.info("Fetching report for %s...", job_id)
- report_res = requests.get('http://{}:5000/api/v1.0/jobs'.format
- (self.target),
+ report_res = requests.get('http://%s:5000/api/v1.0/jobs' % self.target,
params={'id': job_id, 'type': 'status'})
report_res_content = jsonutils.loads(
report_res.content)
if report_res.status_code != 200:
+ LOG.error("Failed to fetch report, error message: %s",
+ report_res_content["message"])
raise RuntimeError("Failed to fetch report, error message:",
report_res_content["message"])
else:
@@ -186,15 +188,15 @@ class StorPerf(base.Scenario):
LOG.info("Starting a job with parameters %s", job_args)
job_res = requests.post('http://%s:5000/api/%s/jobs' % (self.target,
- api_version),
- json=job_args)
-
- job_res_content = jsonutils.loads(job_res.content)
+ api_version), json=job_args)
if job_res.status_code != 200:
- raise RuntimeError("Failed to start a job, error message:",
- job_res_content["message"])
+ LOG.error("Failed to start job. %s: %s",
+ job_res.status_code, job_res.content)
+ raise RuntimeError("Failed to start job. %s: %s" %
+ (job_res.status_code, job_res.content))
elif job_res.status_code == 200:
+ job_res_content = jsonutils.loads(job_res.content)
job_id = job_res_content["job_id"]
LOG.info("Started job id: %s...", job_id)
@@ -225,8 +227,8 @@ class StorPerf(base.Scenario):
LOG.info("Job %s completed with steady state %s",
job_id, steady_state)
- result_res = requests.get('http://%s:5000/api/v1.0/jobs?'
- 'type=status&id=%s' % (self.target, job_id))
+ result_res = requests.get('http://%s:5000/api/v1.0/jobs?id=%s' %
+ (self.target, job_id))
result_res_content = jsonutils.loads(
result_res.content)
result.update(result_res_content)
@@ -247,13 +249,14 @@ class StorPerf(base.Scenario):
job_res = requests.post('http://%s:5000/api/v1.0/initializations' %
self.target, json=job_args)
- job_res_content = jsonutils.loads(job_res.content)
if job_res.status_code != 200:
- raise RuntimeError(
- "Failed to start initialization job, error message:",
- job_res_content["message"])
+ LOG.error("Failed to start initialization job, error message: %s: %s",
+ job_res.status_code, job_res.content)
+ raise RuntimeError("Failed to start initialization job, error message: %s: %s" %
+ (job_res.status_code, job_res.content))
elif job_res.status_code == 200:
+ job_res_content = jsonutils.loads(job_res.content)
job_id = job_res_content["job_id"]
LOG.info("Started initialization as job id: %s...", job_id)
@@ -271,6 +274,8 @@ class StorPerf(base.Scenario):
if teardown_res.status_code == 400:
teardown_res_content = jsonutils.loads(
teardown_res.json_data)
+ LOG.error("Failed to reset environment, error message: %s",
+ teardown_res_content['message'])
raise RuntimeError("Failed to reset environment, error message:",
teardown_res_content['message'])
diff --git a/yardstick/cmd/commands/report.py b/yardstick/cmd/commands/report.py
index 47bf22a1f..4f057a05d 100644
--- a/yardstick/cmd/commands/report.py
+++ b/yardstick/cmd/commands/report.py
@@ -1,7 +1,7 @@
##############################################################################
-# Copyright (c) 2017 Rajesh Kudaka.
+# Copyright (c) 2017 Rajesh Kudaka <4k.rajesh@gmail.com>
+# Copyright (c) 2018 Intel Corporation.
#
-# Author: Rajesh Kudaka (4k.rajesh@gmail.com)
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
@@ -10,11 +10,7 @@
""" Handler for yardstick command 'report' """
-from __future__ import print_function
-
-from __future__ import absolute_import
-
-from yardstick.benchmark.core.report import Report
+from yardstick.benchmark.core import report
from yardstick.cmd.commands import change_osloobj_to_paras
from yardstick.common.utils import cliargs
@@ -22,12 +18,19 @@ from yardstick.common.utils import cliargs
class ReportCommands(object): # pragma: no cover
"""Report commands.
- Set of commands to manage benchmark tasks.
+ Set of commands to manage reports.
"""
@cliargs("task_id", type=str, help=" task id", nargs=1)
@cliargs("yaml_name", type=str, help=" Yaml file Name", nargs=1)
def do_generate(self, args):
- """Start a benchmark scenario."""
+ """Generate a report."""
+ param = change_osloobj_to_paras(args)
+ report.Report().generate(param)
+
+ @cliargs("task_id", type=str, help=" task id", nargs=1)
+ @cliargs("yaml_name", type=str, help=" Yaml file Name", nargs=1)
+ def do_generate_nsb(self, args):
+ """Generate a report using the NSB template."""
param = change_osloobj_to_paras(args)
- Report().generate(param)
+ report.Report().generate_nsb(param)
diff --git a/yardstick/common/html_template.py b/yardstick/common/html_template.py
index e17c76637..c15dd8238 100644
--- a/yardstick/common/html_template.py
+++ b/yardstick/common/html_template.py
@@ -8,130 +8,6 @@
# http://www.apache.org/licenses/LICENSE-2.0
#############################################################################
-template = """
-<html>
-<body>
-<head>
-<meta charset="utf-8">
-<meta name="viewport" content="width=device-width, initial-scale=1">
-<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7\
-/css/bootstrap.min.css">
-<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.1.1\
-/jquery.min.js"></script>
-<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7\
-/js/bootstrap.min.js"></script>
-<script src="https://code.highcharts.com/highcharts.js"></script>
-<script src="jquery.min.js"></script>
-<script src="highcharts.js"></script>
-</head>
-<style>
-
-table{
- overflow-y: scroll;
- height: 360px;
- display: block;
- }
-
- header,h3{
- font-family:Frutiger;
- clear: left;
- text-align: center;
-}
-</style>
-<header class="jumbotron text-center">
- <h1>Yardstick User Interface</h1>
- <h4>Report of {{task_id}} Generated</h4>
-</header>
-
-<div class="container">
- <div class="row">
- <div class="col-md-4">
- <div class="table-responsive" >
- <table class="table table-hover" > </table>
- </div>
- </div>
- <div class="col-md-8" >
- <div id="container" ></div>
- </div>
- </div>
-</div>
-<script>
- var arr, tab, th, tr, td, tn, row, col, thead, tbody;
- arr={{table|safe}}
- tab = document.getElementsByTagName('table')[0];
- thead=document.createElement('thead');
- tr = document.createElement('tr');
- for(row=0;row<Object.keys(arr).length;row++)
- {
- th = document.createElement('th');
- tn = document.createTextNode(Object.keys(arr).sort()[row]);
- th.appendChild(tn);
- tr.appendChild(th);
- thead.appendChild(tr);
- }
- tab.appendChild(thead);
- tbody=document.createElement('tbody');
-
- for (col = 0; col < arr[Object.keys(arr)[0]].length; col++){
- tr = document.createElement('tr');
- for(row=0;row<Object.keys(arr).length;row++)
- {
- td = document.createElement('td');
- tn = document.createTextNode(arr[Object.keys(arr).sort()[row]][col]);
- td.appendChild(tn);
- tr.appendChild(td);
- }
- tbody.appendChild(tr);
- }
-tab.appendChild(tbody);
-
-</script>
-
-<script language="JavaScript">
-
-$(function() {
- $('#container').highcharts({
- title: {
- text: 'Yardstick test results',
- x: -20 //center
- },
- subtitle: {
- text: 'Report of {{task_id}} Task Generated',
- x: -20
- },
- xAxis: {
- title: {
- text: 'Timestamp'
- },
- categories:{{Timestamp|safe}}
- },
- yAxis: {
-
- plotLines: [{
- value: 0,
- width: 1,
- color: '#808080'
- }]
- },
- tooltip: {
- valueSuffix: ''
- },
- legend: {
- layout: 'vertical',
- align: 'right',
- verticalAlign: 'middle',
- borderWidth: 0
- },
- series: {{series|safe}}
- });
-});
-
-</script>
-
-
-</body>
-</html>"""
-
report_template = """
<html>
<head>
diff --git a/yardstick/common/nsb_report.css b/yardstick/common/nsb_report.css
new file mode 100644
index 000000000..2beb91c53
--- /dev/null
+++ b/yardstick/common/nsb_report.css
@@ -0,0 +1,29 @@
+/*******************************************************************************
+ * Copyright (c) 2017 Rajesh Kudaka <4k.rajesh@gmail.com>
+ * Copyright (c) 2018 Intel Corporation.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Apache License, Version 2.0
+ * which accompanies this distribution, and is available at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ ******************************************************************************/
+
+body {
+ font-size: 16pt;
+}
+
+table {
+ overflow-y: scroll;
+ height: 360px;
+ display: block;
+}
+
+header {
+ font-family: Frutiger, "Helvetica Neue", Helvetica, Arial, sans-serif;
+ clear: left;
+ text-align: center;
+}
+
+.control-pane {
+ font-size: 10pt;
+}
diff --git a/yardstick/common/nsb_report.html.j2 b/yardstick/common/nsb_report.html.j2
new file mode 100644
index 000000000..a3087d746
--- /dev/null
+++ b/yardstick/common/nsb_report.html.j2
@@ -0,0 +1,82 @@
+<!DOCTYPE html>
+<html>
+
+<!--
+ Copyright (c) 2017 Rajesh Kudaka <4k.rajesh@gmail.com>
+ Copyright (c) 2018 Intel Corporation.
+
+ All rights reserved. This program and the accompanying materials
+ are made available under the terms of the Apache License, Version 2.0
+ which accompanies this distribution, and is available at
+ http://www.apache.org/licenses/LICENSE-2.0
+-->
+
+ <head>
+ <meta charset="utf-8">
+ <meta name="viewport" content="width=device-width, initial-scale=1">
+ <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css">
+ <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/jstree/3.3.7/themes/default/style.min.css">
+ <script src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>
+ <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js"></script>
+ <script src="https://cdnjs.cloudflare.com/ajax/libs/jstree/3.3.7/jstree.min.js"></script>
+ <script src="https://cdnjs.cloudflare.com/ajax/libs/Chart.js/2.7.3/Chart.bundle.min.js"></script>
+ <style>
+ {% include 'nsb_report.css' %}
+ </style>
+ <script>
+ {% include 'nsb_report.js' %}
+ </script>
+ </head>
+
+ <body>
+ <div class="container" style="width:80%">
+ <div class="row">
+ <header class="jumbotron">
+ <h1>Yardstick User Interface</h1>
+ <h4>Report of {{task_id}} Generated</h4>
+ </header>
+ </div>
+ <div class="row">
+ <div class="col-md-2 control-pane">
+ <div id="data_selector"></div>
+ </div>
+ <div class="col-md-10 data-pane">
+ <canvas id="cnvGraph" style="width: 100%; height: 500px"></canvas>
+ </div>
+ </div>
+ <div class="row">
+ <div class="col-md-12 table-responsive">
+ <table class="table table-hover"></table>
+ </div>
+ </div>
+ </div>
+
+ <script>
+ var arr, jstree_data, timestamps;
+ arr = {{table|safe}};
+ timestamps = {{Timestamps|safe}};
+ jstree_data = {{jstree_nodes|safe}};
+
+ $(function() {
+ create_table(arr);
+ create_tree(jstree_data);
+ var objGraph = create_graph($('#cnvGraph'), timestamps);
+
+ $('#data_selector').on('check_node.jstree uncheck_node.jstree', function(e, data) {
+ var selected_datasets = [];
+ for (var i = 0; i < data.selected.length; i++) {
+ var node = data.instance.get_node(data.selected[i]);
+ if (node.children.length == 0) {
+ var dataset = {
+ label: node.id,
+ data: arr[node.id],
+ };
+ selected_datasets.push(dataset);
+ }
+ }
+ update_graph(objGraph, selected_datasets);
+ });
+ });
+ </script>
+ </body>
+</html>
diff --git a/yardstick/common/nsb_report.js b/yardstick/common/nsb_report.js
new file mode 100644
index 000000000..cc5e14ee7
--- /dev/null
+++ b/yardstick/common/nsb_report.js
@@ -0,0 +1,146 @@
+/*******************************************************************************
+ * Copyright (c) 2017 Rajesh Kudaka <4k.rajesh@gmail.com>
+ * Copyright (c) 2018 Intel Corporation.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Apache License, Version 2.0
+ * which accompanies this distribution, and is available at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ ******************************************************************************/
+
+var None = null;
+
+function create_tree(jstree_data)
+{
+ $('#data_selector').jstree({
+ plugins: ['checkbox'],
+ checkbox: {
+ three_state: false,
+ whole_node: true,
+ tie_selection: false,
+ },
+ core: {
+ themes: {
+ icons: false,
+ stripes: true,
+ },
+ data: jstree_data,
+ },
+ });
+}
+
+// may need to pass timestamps too...
+function create_table(table_data)
+{
+ var tab, tr, td, tn, tbody, keys, key, curr_data, val;
+ // create table
+ tab = document.getElementsByTagName('table')[0];
+ tbody = document.createElement('tbody');
+ // for each metric
+ keys = Object.keys(table_data);
+ for (var i = 0; i < keys.length; i++) {
+ key = keys[i];
+ tr = document.createElement('tr');
+ td = document.createElement('td');
+ tn = document.createTextNode(key);
+ td.appendChild(tn);
+ tr.appendChild(td);
+ // add each piece of data as its own column
+ curr_data = table_data[key];
+ for (var j = 0; j < curr_data.length; j++) {
+ val = curr_data[j];
+ td = document.createElement('td');
+ tn = document.createTextNode(val === None ? '' : val);
+ td.appendChild(tn);
+ tr.appendChild(td);
+ }
+ tbody.appendChild(tr);
+ }
+ tab.appendChild(tbody);
+}
+
+function create_graph(cnvGraph, timestamps)
+{
+ return new Chart(cnvGraph, {
+ type: 'line',
+ data: {
+ labels: timestamps,
+ datasets: [],
+ },
+ options: {
+ elements: {
+ line: {
+ borderWidth: 2,
+ fill: false,
+ tension: 0,
+ },
+ },
+ scales: {
+ xAxes: [{
+ type: 'category',
+ }],
+ yAxes: [{
+ type: 'linear',
+ }],
+ },
+ tooltips: {
+ mode: 'point',
+ intersect: true,
+ },
+ hover: {
+ mode: 'index',
+ intersect: false,
+ animationDuration: 0,
+ },
+ legend: {
+ position: 'bottom',
+ labels: {
+ usePointStyle: true,
+ },
+ },
+ animation: {
+ duration: 0,
+ },
+ responsive: true,
+ responsiveAnimationDuration: 0,
+ maintainAspectRatio: false,
+ },
+ });
+}
+
+function update_graph(objGraph, datasets)
+{
+ var colors = [
+ '#FF0000', // Red
+ '#228B22', // ForestGreen
+ '#FF8C00', // DarkOrange
+ '#00008B', // DarkBlue
+ '#FF00FF', // Fuchsia
+ '#9ACD32', // YellowGreen
+ '#FFD700', // Gold
+ '#4169E1', // RoyalBlue
+ '#A0522D', // Sienna
+ '#20B2AA', // LightSeaGreen
+ '#8A2BE2', // BlueViolet
+ ];
+
+ var points = [
+ {s: 'circle', r: 3},
+ {s: 'rect', r: 4},
+ {s: 'triangle', r: 4},
+ {s: 'star', r: 4},
+ {s: 'rectRot', r: 5},
+ ];
+
+ datasets.forEach(function(d, i) {
+ var color = colors[i % colors.length];
+ var point = points[i % points.length];
+ d.borderColor = color;
+ d.backgroundColor = color;
+ d.pointStyle = point.s;
+ d.pointRadius = point.r;
+ d.pointHoverRadius = point.r + 1;
+ });
+ objGraph.data.datasets = datasets;
+ objGraph.update();
+}
diff --git a/yardstick/common/report.html.j2 b/yardstick/common/report.html.j2
new file mode 100644
index 000000000..1dc7b1db1
--- /dev/null
+++ b/yardstick/common/report.html.j2
@@ -0,0 +1,184 @@
+<!DOCTYPE html>
+<html>
+
+<!--
+ Copyright (c) 2017 Rajesh Kudaka <4k.rajesh@gmail.com>
+ Copyright (c) 2018 Intel Corporation.
+
+ All rights reserved. This program and the accompanying materials
+ are made available under the terms of the Apache License, Version 2.0
+ which accompanies this distribution, and is available at
+ http://www.apache.org/licenses/LICENSE-2.0
+-->
+
+ <head>
+ <meta charset="utf-8">
+ <meta name="viewport" content="width=device-width, initial-scale=1">
+ <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css">
+ <script src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>
+ <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js"></script>
+ <script src="https://cdnjs.cloudflare.com/ajax/libs/Chart.js/2.7.3/Chart.bundle.min.js"></script>
+
+ <style>
+ table {
+ overflow-y: scroll;
+ height: 360px;
+ display: block;
+ }
+ header {
+ font-family: Frutiger, "Helvetica Neue", Helvetica, Arial, sans-serif;
+ clear: left;
+ text-align: center;
+ }
+ </style>
+ </head>
+
+ <body>
+ <header class="jumbotron text-center">
+ <h1>Yardstick User Interface</h1>
+ <h4>Report of {{task_id}} Generated</h4>
+ </header>
+
+ <div class="container">
+ <div class="row">
+ <div class="col-md-4">
+ <div class="table-responsive">
+ <table class="table table-hover"></table>
+ </div>
+ </div>
+ <div class="col-md-8">
+ <canvas id="cnvGraph" style="width: 100%; height: 500px"></canvas>
+ </div>
+ </div>
+ </div>
+
+ <script>
+ var None = null;
+ var arr, tab, th, tr, td, tn, row, col, thead, tbody, val;
+ arr = {{table|safe}};
+ tab = document.getElementsByTagName('table')[0];
+
+ thead = document.createElement('thead');
+ tr = document.createElement('tr');
+ for (col = 0; col < Object.keys(arr).length; col++) {
+ th = document.createElement('th');
+ tn = document.createTextNode(Object.keys(arr).sort()[col]);
+ th.appendChild(tn);
+ tr.appendChild(th);
+ }
+ thead.appendChild(tr);
+ tab.appendChild(thead);
+
+ tbody = document.createElement('tbody');
+ for (row = 0; row < arr[Object.keys(arr)[0]].length; row++) {
+ tr = document.createElement('tr');
+ for (col = 0; col < Object.keys(arr).length; col++) {
+ val = arr[Object.keys(arr).sort()[col]][row];
+ td = document.createElement('td');
+ tn = document.createTextNode(val === None ? '' : val);
+ td.appendChild(tn);
+ tr.appendChild(td);
+ }
+ tbody.appendChild(tr);
+ }
+ tab.appendChild(tbody);
+
+ $(function() {
+ var datasets = {{datasets|safe}};
+
+ var colors = [
+ '#FF0000', // Red
+ '#228B22', // ForestGreen
+ '#FF8C00', // DarkOrange
+ '#00008B', // DarkBlue
+ '#FF00FF', // Fuchsia
+ '#9ACD32', // YellowGreen
+ '#FFD700', // Gold
+ '#4169E1', // RoyalBlue
+ '#A0522D', // Sienna
+ '#20B2AA', // LightSeaGreen
+ '#8A2BE2', // BlueViolet
+ ];
+
+ var points = [
+ {s: 'circle', r: 3},
+ {s: 'rect', r: 4},
+ {s: 'triangle', r: 4},
+ {s: 'star', r: 4},
+ {s: 'rectRot', r: 5},
+ ];
+
+ datasets.forEach(function(d, i) {
+ var color = colors[i % colors.length];
+ var point = points[i % points.length];
+ d.borderColor = color;
+ d.backgroundColor = color;
+ d.pointStyle = point.s;
+ d.pointRadius = point.r;
+ d.pointHoverRadius = point.r + 1;
+ });
+
+ new Chart($('#cnvGraph'), {
+ type: 'line',
+ data: {
+ labels: {{Timestamps|safe}},
+ datasets: datasets,
+ },
+ options: {
+ elements: {
+ line: {
+ borderWidth: 2,
+ fill: false,
+ tension: 0,
+ },
+ },
+ title: {
+ text: [
+ 'Yardstick test results',
+ 'Report of {{task_id}} Task Generated',
+ ],
+ display: true,
+ },
+ scales: {
+ xAxes: [{
+ type: 'category',
+ scaleLabel: {
+ display: true,
+ labelString: 'Timestamp',
+ },
+ }],
+ yAxes: [{
+ type: 'linear',
+ scaleLabel: {
+ display: true,
+ labelString: 'Values',
+ },
+ }],
+ },
+ tooltips: {
+ mode: 'point',
+ intersect: true,
+ },
+ hover: {
+ mode: 'index',
+ intersect: false,
+ animationDuration: 0,
+ },
+ legend: {
+ position: 'right',
+ labels: {
+ usePointStyle: true,
+ },
+ },
+ animation: {
+ duration: 0,
+ },
+ responsive: true,
+ responsiveAnimationDuration: 0,
+ maintainAspectRatio: false,
+ },
+ });
+ });
+ </script>
+ </body>
+</html>
diff --git a/yardstick/common/utils.py b/yardstick/common/utils.py
index 31885c073..51313ef47 100644
--- a/yardstick/common/utils.py
+++ b/yardstick/common/utils.py
@@ -30,6 +30,7 @@ import subprocess
import sys
import time
import threading
+import math
import six
from flask import jsonify
@@ -499,6 +500,23 @@ def read_meminfo(ssh_client):
return output
+def setup_hugepages(ssh_client, size_kb):
+ """Setup needed number of hugepages for the size specified"""
+
+ NR_HUGEPAGES_PATH = '/proc/sys/vm/nr_hugepages'
+ meminfo = read_meminfo(ssh_client)
+ hp_size_kb = int(meminfo['Hugepagesize'])
+ hp_number = int(math.ceil(size_kb / float(hp_size_kb)))
+ ssh_client.execute(
+ 'echo %s | sudo tee %s' % (hp_number, NR_HUGEPAGES_PATH))
+ hp = six.BytesIO()
+ ssh_client.get_file_obj(NR_HUGEPAGES_PATH, hp)
+ hp_number_set = int(hp.getvalue().decode('utf-8').splitlines()[0])
+ logger.info('Hugepages size (kB): %s, number claimed: %s, number set: %s',
+ hp_size_kb, hp_number, hp_number_set)
+ return hp_size_kb, hp_number, hp_number_set
+
+
def find_relative_file(path, task_path):
"""
Find file in one of places: in abs of path or relative to a directory path,
diff --git a/yardstick/network_services/constants.py b/yardstick/network_services/constants.py
index 0064b4fc5..5a186be42 100644
--- a/yardstick/network_services/constants.py
+++ b/yardstick/network_services/constants.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2016-2017 Intel Corporation
+# Copyright (c) 2016-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -17,3 +17,4 @@ DEFAULT_VNF_TIMEOUT = 3600
PROCESS_JOIN_TIMEOUT = 3
ONE_GIGABIT_IN_BITS = 1000000000
NIC_GBPS_DEFAULT = 10
+RETRY_TIMEOUT = 5
diff --git a/yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py b/yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py
index 87e9dbf85..6645d45fe 100644
--- a/yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py
+++ b/yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py
@@ -44,8 +44,24 @@ ETHER_TYPE_802_1ad = '0x88a8'
TRAFFIC_STATUS_STARTED = 'started'
TRAFFIC_STATUS_STOPPED = 'stopped'
+PROTOCOL_STATUS_UP = 'up'
+PROTOCOL_STATUS_DOWN = ['down', 'notStarted']
+
SUPPORTED_PROTO = [PROTO_UDP]
+SUPPORTED_DSCP_CLASSES = [
+ 'defaultPHB',
+ 'classSelectorPHB',
+ 'assuredForwardingPHB',
+ 'expeditedForwardingPHB']
+
+SUPPORTED_TOS_FIELDS = [
+ 'precedence',
+ 'delay',
+ 'throughput',
+ 'reliability'
+]
+
class Vlan(object):
def __init__(self,
@@ -66,6 +82,7 @@ class IxNextgen(object): # pragma: no cover
PORT_STATS_NAME_MAP = {
"stat_name": 'Stat Name',
+ "port_name": 'Port Name',
"Frames_Tx": 'Frames Tx.',
"Valid_Frames_Rx": 'Valid Frames Rx.',
"Frames_Tx_Rate": 'Frames Tx. Rate',
@@ -82,6 +99,18 @@ class IxNextgen(object): # pragma: no cover
"Store-Forward_Max_latency_ns": 'Store-Forward Max Latency (ns)',
}
+ PPPOX_CLIENT_PER_PORT_NAME_MAP = {
+ 'subs_port': 'Port',
+ 'Sessions_Up': 'Sessions Up',
+ 'Sessions_Down': 'Sessions Down',
+ 'Sessions_Not_Started': 'Sessions Not Started',
+ 'Sessions_Total': 'Sessions Total'
+ }
+
+ PORT_STATISTICS = '::ixNet::OBJ-/statistics/view:"Port Statistics"'
+ FLOW_STATISTICS = '::ixNet::OBJ-/statistics/view:"Flow Statistics"'
+ PPPOX_CLIENT_PER_PORT = '::ixNet::OBJ-/statistics/view:"PPPoX Client Per Port"'
+
@staticmethod
def get_config(tg_cfg):
card = []
@@ -177,6 +206,24 @@ class IxNextgen(object): # pragma: no cover
return self.ixnet.getAttribute(self.ixnet.getRoot() + 'traffic',
'-state')
+ def _get_protocol_status(self, proto):
+ """Get protocol status
+
+ :param proto: IxNet protocol str representation, e.g.:
+ '::ixNet::OBJ-/topology:2/deviceGroup:1/ethernet:1/ipv4:L14'
+ :return: (list) protocol status: list of sessions protocol
+ statuses which include states 'up', 'down' and 'notStarted'
+ """
+ return self.ixnet.getAttribute(proto, '-sessionStatus')
+
+ def get_topology_device_groups(self, topology):
+ """Get list of device groups in topology
+
+ :param topology: (str) topology descriptor
+ :return: (list) list of device groups descriptors
+ """
+ return self.ixnet.getList(topology, 'deviceGroup')
+
def is_traffic_running(self):
"""Returns true if traffic state == TRAFFIC_STATUS_STARTED"""
return self._get_traffic_state() == TRAFFIC_STATUS_STARTED
@@ -185,6 +232,28 @@ class IxNextgen(object): # pragma: no cover
"""Returns true if traffic state == TRAFFIC_STATUS_STOPPED"""
return self._get_traffic_state() == TRAFFIC_STATUS_STOPPED
+ def is_protocols_running(self, protocols):
+ """Returns true if all protocols statuses are PROTOCOL_STATUS_UP
+
+ :param protocols: list of protocols str representations, e.g.:
+ ['::ixNet::OBJ-/topology:2/deviceGroup:1/ethernet:1/ipv4:L14', ...]
+ :return: (bool) True if all protocols status is 'up', False if any
+ protocol status is 'down' or 'notStarted'
+ """
+ return all(session_status is PROTOCOL_STATUS_UP for proto in protocols
+ for session_status in self._get_protocol_status(proto))
+
+ def is_protocols_stopped(self, protocols):
+ """Returns true if all protocols statuses are in PROTOCOL_STATUS_DOWN
+
+ :param protocols: list of protocols str representations, e.g.:
+ ['::ixNet::OBJ-/topology:2/deviceGroup:1/ethernet:1/ipv4:L14', ...]
+ :return: (bool) True if all protocols status is 'down' or 'notStarted',
+ False if any protocol status is 'up'
+ """
+ return all(session_status in PROTOCOL_STATUS_DOWN for proto in protocols
+ for session_status in self._get_protocol_status(proto))
+
@staticmethod
def _parse_framesize(framesize):
"""Parse "framesize" config param. to return a list of weighted pairs
@@ -345,7 +414,7 @@ class IxNextgen(object): # pragma: no cover
self._create_flow_groups(uplink_endpoints, downlink_endpoints)
self._setup_config_elements()
- def create_ipv4_traffic_model(self, uplink_topologies, downlink_topologies):
+ def create_ipv4_traffic_model(self, uplink_endpoints, downlink_endpoints):
"""Create a traffic item and the needed flow groups
Each flow group inside the traffic item (only one is present)
@@ -357,7 +426,7 @@ class IxNextgen(object): # pragma: no cover
FlowGroup4: uplink2 <- downlink2
"""
self._create_traffic_item('ipv4')
- self._create_flow_groups(uplink_topologies, downlink_topologies)
+ self._create_flow_groups(uplink_endpoints, downlink_endpoints)
self._setup_config_elements(False)
def _update_frame_mac(self, ethernet_descriptor, field, mac_address):
@@ -526,6 +595,8 @@ class IxNextgen(object): # pragma: no cover
or ipaddress.IPV4LENGTH
dstmask = traffic_param['outer_l3']['dstmask'] \
or ipaddress.IPV4LENGTH
+ priority = traffic_param['outer_l3']['priority']
+
if srcip:
self._update_ipv4_address(
self._get_stack_item(fg_id, PROTO_IPV4)[0],
@@ -534,6 +605,58 @@ class IxNextgen(object): # pragma: no cover
self._update_ipv4_address(
self._get_stack_item(fg_id, PROTO_IPV4)[0],
'dstIp', str(dstip), dstseed, dstmask, count)
+ if priority:
+ self._update_ipv4_priority(
+ self._get_stack_item(fg_id, PROTO_IPV4)[0], priority)
+
+ def _update_ipv4_priority(self, ip_descriptor, priority):
+ """Set the IPv4 priority in a config element stack IP field
+
+ :param ip_descriptor: (str) IP descriptor, e.g.:
+ /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"
+ :param priority: (dict) priority configuration from traffic profile, e.g.:
+ {'tos':
+ 'precedence': [1, 4, 7]
+ }
+ """
+ if priority.get('raw'):
+ priority_field = self._get_field_in_stack_item(ip_descriptor,
+ 'priority.raw')
+ self._set_priority_field(priority_field, priority['raw'])
+
+ elif priority.get('dscp'):
+ for field, value in priority['dscp'].items():
+ if field in SUPPORTED_DSCP_CLASSES:
+ priority_field = self._get_field_in_stack_item(
+ ip_descriptor,
+ 'priority.ds.phb.{field}.{field}'.format(field=field))
+ self._set_priority_field(priority_field, value)
+
+ elif priority.get('tos'):
+ for field, value in priority['tos'].items():
+ if field in SUPPORTED_TOS_FIELDS:
+ priority_field = self._get_field_in_stack_item(
+ ip_descriptor, 'priority.tos.' + field)
+ self._set_priority_field(priority_field, value)
+
+ def _set_priority_field(self, field_descriptor, value):
+ """Set the priority field described by field_descriptor
+
+ :param field_descriptor: (str) field descriptor, e.g.:
+ /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/ \
+ field:"ipv4.header.priority.raw-3
+ :param value: (list, int) list of integers or single integer value
+ """
+ if isinstance(value, list):
+ self.ixnet.setMultiAttribute(field_descriptor,
+ '-valueList', value,
+ '-activeFieldChoice', 'true',
+ '-valueType', 'valueList')
+ else:
+ self.ixnet.setMultiAttribute(field_descriptor,
+ '-activeFieldChoice', 'true',
+ '-singleValue', str(value))
+ self.ixnet.commit()
def update_l4(self, traffic):
"""Update the L4 headers
@@ -605,6 +728,39 @@ class IxNextgen(object): # pragma: no cover
'getColumnValues', view_obj, data_ixia)
for data_yardstick, data_ixia in name_map.items()}
+ def _set_egress_flow_tracking(self, encapsulation, offset):
+ """Set egress flow tracking options
+
+ :param encapsulation: encapsulation type
+ :type encapsulation: str, e.g. 'Ethernet'
+ :param offset: offset type
+ :type offset: str, e.g. 'IPv4 TOS Precedence (3 bits)'
+ """
+ traffic_item = self.ixnet.getList(self.ixnet.getRoot() + '/traffic',
+ 'trafficItem')[0]
+ # Enable Egress Tracking
+ self.ixnet.setAttribute(traffic_item, '-egressEnabled', True)
+ self.ixnet.commit()
+
+ # Set encapsulation type
+ enc_obj = self.ixnet.getList(traffic_item, 'egressTracking')[0]
+ self.ixnet.setAttribute(enc_obj, '-encapsulation', encapsulation)
+
+ # Set offset
+ self.ixnet.setAttribute(enc_obj, '-offset', offset)
+ self.ixnet.commit()
+
+ def _set_flow_tracking(self, track_by):
+ """Set flow tracking options
+
+ :param track_by: list of tracking fields
+ :type track_by: list, e.g. ['vlanVlanId0','ipv4Precedence0']
+ """
+ traffic_item = self.ixnet.getList(self.ixnet.getRoot() + '/traffic',
+ 'trafficItem')[0]
+ self.ixnet.setAttribute(traffic_item + '/tracking', '-trackBy', track_by)
+ self.ixnet.commit()
+
def get_statistics(self):
"""Retrieve port and flow statistics
@@ -614,12 +770,30 @@ class IxNextgen(object): # pragma: no cover
:return: dictionary with the statistics; the keys of this dictionary
are PORT_STATS_NAME_MAP and LATENCY_NAME_MAP keys.
"""
- port_statistics = '::ixNet::OBJ-/statistics/view:"Port Statistics"'
- flow_statistics = '::ixNet::OBJ-/statistics/view:"Flow Statistics"'
- stats = self._build_stats_map(port_statistics,
+ stats = self._build_stats_map(self.PORT_STATISTICS,
+ self.PORT_STATS_NAME_MAP)
+ stats.update(self._build_stats_map(self.FLOW_STATISTICS,
+ self.LATENCY_NAME_MAP))
+ return stats
+
+ def get_pppoe_scenario_statistics(self):
+ """Retrieve port, flow and PPPoE subscribers statistics
+
+ "Port Statistics" parameters are stored in self.PORT_STATS_NAME_MAP.
+ "Flow Statistics" parameters are stored in self.LATENCY_NAME_MAP.
+ "PPPoX Client Per Port" parameters are stored in
+ self.PPPOE_CLIENT_PER_PORT_NAME_MAP
+
+ :return: dictionary with the statistics; the keys of this dictionary
+ are PORT_STATS_NAME_MAP, LATENCY_NAME_MAP and
+ PPPOE_CLIENT_PER_PORT_NAME_MAP keys.
+ """
+ stats = self._build_stats_map(self.PORT_STATISTICS,
self.PORT_STATS_NAME_MAP)
- stats.update(self._build_stats_map(flow_statistics,
- self.LATENCY_NAME_MAP))
+ stats.update(self._build_stats_map(self.FLOW_STATISTICS,
+ self.LATENCY_NAME_MAP))
+ stats.update(self._build_stats_map(self.PPPOX_CLIENT_PER_PORT,
+ self.PPPOX_CLIENT_PER_PORT_NAME_MAP))
return stats
def start_protocols(self):
@@ -788,7 +962,7 @@ class IxNextgen(object): # pragma: no cover
self.ixnet.commit()
return obj
- def add_pppox_client(self, xproto, auth, user, pwd):
+ def add_pppox_client(self, xproto, auth, user, pwd, enable_redial=True):
log.debug(
"add_pppox_client: xproto='%s', auth='%s', user='%s', pwd='%s'",
xproto, auth, user, pwd)
@@ -808,6 +982,10 @@ class IxNextgen(object): # pragma: no cover
else:
raise NotImplementedError()
+ if enable_redial:
+ redial = self.ixnet.getAttribute(obj, '-enableRedial')
+ self.ixnet.setAttribute(redial + '/singleValue', '-value', 'true')
+
self.ixnet.commit()
return obj
@@ -836,3 +1014,37 @@ class IxNextgen(object): # pragma: no cover
'-value', bgp_type)
self.ixnet.commit()
return obj
+
+ def add_interface(self, vport, ip, mac=None, gateway=None):
+ """Add protocol interface to the vport"""
+ log.debug("add_interface: mac='%s', ip='%s', gateway='%s'", mac, ip,
+ gateway)
+ obj = self.ixnet.add(vport, 'interface')
+ self.ixnet.commit()
+
+ if mac is not None:
+ self.ixnet.setMultiAttribute(obj + '/ethernet', '-macAddress', mac)
+
+ ipv4 = self.ixnet.add(obj, 'ipv4')
+ self.ixnet.setMultiAttribute(ipv4, '-ip', ip)
+
+ if gateway is not None:
+ self.ixnet.setMultiAttribute(ipv4, '-gateway', gateway)
+
+ self.ixnet.commit()
+
+ self.ixnet.setMultiAttribute(obj, '-enabled', 'true')
+ self.ixnet.commit()
+
+ return obj
+
+ def add_static_ipv4(self, iface, vport, start_ip, count):
+ """Add static IP range to the interface"""
+ log.debug("add_static_ipv4: start_ip:'%s', count:'%s'",
+ start_ip, count)
+ obj = self.ixnet.add(vport + '/protocols/static', 'ip')
+
+ self.ixnet.setMultiAttribute(obj, '-protocolInterface', iface,
+ '-ipStart', start_ip, '-count', count,
+ '-enabled', 'true')
+ self.ixnet.commit()
diff --git a/yardstick/network_services/pipeline.py b/yardstick/network_services/pipeline.py
index 7155480d4..4fbe7967f 100644
--- a/yardstick/network_services/pipeline.py
+++ b/yardstick/network_services/pipeline.py
@@ -22,7 +22,7 @@ from yardstick.common import utils
FIREWALL_ADD_DEFAULT = "p {0} firewall add default 1"
FIREWALL_ADD_PRIO = """\
-p {0} firewall add priority 1 ipv4 {1} 24 0.0.0.0 0 0 65535 0 65535 6 0xFF port 0"""
+p {0} firewall add priority 1 ipv4 {1} 24 0.0.0.0 0 0 65535 0 65535 17 0xFF port 0"""
FLOW_ADD_QINQ_RULES = """\
p {0} flow add qinq 128 512 port 0 id 1
diff --git a/yardstick/network_services/traffic_profile/__init__.py b/yardstick/network_services/traffic_profile/__init__.py
index 91d8a665f..72a61b6b4 100644
--- a/yardstick/network_services/traffic_profile/__init__.py
+++ b/yardstick/network_services/traffic_profile/__init__.py
@@ -23,6 +23,7 @@ def register_modules():
'yardstick.network_services.traffic_profile.http_ixload',
'yardstick.network_services.traffic_profile.ixia_rfc2544',
'yardstick.network_services.traffic_profile.prox_ACL',
+ 'yardstick.network_services.traffic_profile.prox_irq',
'yardstick.network_services.traffic_profile.prox_binsearch',
'yardstick.network_services.traffic_profile.prox_profile',
'yardstick.network_services.traffic_profile.prox_ramp',
diff --git a/yardstick/network_services/traffic_profile/base.py b/yardstick/network_services/traffic_profile/base.py
index ea3f17874..2fdf6ce4a 100644
--- a/yardstick/network_services/traffic_profile/base.py
+++ b/yardstick/network_services/traffic_profile/base.py
@@ -36,7 +36,7 @@ class TrafficProfileConfig(object):
self.description = tp_config.get('description')
tprofile = tp_config['traffic_profile']
self.traffic_type = tprofile.get('traffic_type')
- self.frame_rate, self.rate_unit = self._parse_rate(
+ self.frame_rate, self.rate_unit = self.parse_rate(
tprofile.get('frame_rate', self.DEFAULT_FRAME_RATE))
self.test_precision = tprofile.get('test_precision')
self.packet_sizes = tprofile.get('packet_sizes')
@@ -46,7 +46,7 @@ class TrafficProfileConfig(object):
self.step_interval = tprofile.get('step_interval')
self.enable_latency = tprofile.get('enable_latency', False)
- def _parse_rate(self, rate):
+ def parse_rate(self, rate):
"""Parse traffic profile rate
The line rate can be defined in fps or percentage over the maximum line
diff --git a/yardstick/network_services/traffic_profile/http_ixload.py b/yardstick/network_services/traffic_profile/http_ixload.py
index 3ccec637d..b88aadff7 100644
--- a/yardstick/network_services/traffic_profile/http_ixload.py
+++ b/yardstick/network_services/traffic_profile/http_ixload.py
@@ -16,6 +16,14 @@ import sys
import os
import logging
import collections
+import subprocess
+try:
+ libs = subprocess.check_output(
+ 'python -c "import site; print(site.getsitepackages())"', shell=True)
+
+ sys.path.extend(libs[1:-1].replace("'", "").split(','))
+except subprocess.CalledProcessError:
+ pass
# ixload uses its own py2. So importing jsonutils fails. So adding below
# workaround to support call from yardstick
@@ -24,7 +32,7 @@ try:
except ImportError:
import json as jsonutils
-from yardstick.common import exceptions
+from yardstick.common import exceptions #pylint: disable=wrong-import-position
try:
from IxLoad import IxLoad, StatCollectorUtils
@@ -256,6 +264,61 @@ class IXLOADHttpTest(object):
continue
self.update_network_param(net_traffic, param["ip"])
+ if "uplink" in name:
+ self.update_http_client_param(net_traffic, param["http_client"])
+
+ def update_http_client_param(self, net_traffic, param):
+ """Update http client object in net_traffic
+
+ Update http client object in net_traffic by parameters
+ specified in param.
+ Do not return anything.
+
+ :param net_traffic: (IxLoadObjectProxy) proxy obj to tcl net_traffic object
+ :param param: (dict) http_client section from traffic profile
+ :return:
+ """
+ page = param.get("page_object")
+ if page:
+ self.update_page_size(net_traffic, page)
+ users = param.get("simulated_users")
+ if users:
+ self.update_user_count(net_traffic, users)
+
+ def update_page_size(self, net_traffic, page_object):
+ """Update page_object field in http client object in net_traffic
+
+ This function update field which configure page_object
+ which will be loaded from server
+ Do not return anything.
+
+ :param net_traffic: (IxLoadObjectProxy) proxy obj to tcl net_traffic object
+ :param page_object: (str) path to object on server e.g. "/4k.html"
+ :return:
+ """
+ try:
+ activity = net_traffic.activityList[0]
+ ix_http_command = activity.agent.actionList[0]
+ ix_http_command.config(pageObject=page_object)
+ except Exception:
+ raise exceptions.InvalidRxfFile
+
+ def update_user_count(self, net_traffic, user_count):
+ """Update userObjectiveValue field in activity object in net_traffic
+
+ This function update field which configure users count
+ which will be simulated by client.
+ Do not return anything.
+
+ :param net_traffic: (IxLoadObjectProxy) proxy obj to tcl net_traffic object
+ :param user_count: (int) number of simulated users
+ :return:
+ """
+ try:
+ activity = net_traffic.activityList[0]
+ activity.config(userObjectiveValue=user_count)
+ except Exception:
+ raise exceptions.InvalidRxfFile
def start_http_test(self):
self.ix_load = IxLoad()
diff --git a/yardstick/network_services/traffic_profile/ixia_rfc2544.py b/yardstick/network_services/traffic_profile/ixia_rfc2544.py
index b8aa78d80..35038891b 100644
--- a/yardstick/network_services/traffic_profile/ixia_rfc2544.py
+++ b/yardstick/network_services/traffic_profile/ixia_rfc2544.py
@@ -13,6 +13,7 @@
# limitations under the License.
import logging
+import collections
from yardstick.common import utils
from yardstick.network_services.traffic_profile import base as tp_base
@@ -28,11 +29,14 @@ class IXIARFC2544Profile(trex_traffic_profile.TrexProfile):
DOWNLINK = 'downlink'
DROP_PERCENT_ROUND = 6
RATE_ROUND = 5
+ STATUS_SUCCESS = "Success"
+ STATUS_FAIL = "Failure"
def __init__(self, yaml_data):
super(IXIARFC2544Profile, self).__init__(yaml_data)
self.rate = self.config.frame_rate
self.rate_unit = self.config.rate_unit
+ self.full_profile = {}
def _get_ip_and_mask(self, ip_range):
_ip_range = ip_range.split('-')
@@ -76,6 +80,12 @@ class IXIARFC2544Profile(trex_traffic_profile.TrexProfile):
'outer_l4': {},
}
+ frame_rate = value.get('frame_rate')
+ if frame_rate:
+ flow_rate, flow_rate_unit = self.config.parse_rate(frame_rate)
+ result[traffickey]['rate'] = flow_rate
+ result[traffickey]['rate_unit'] = flow_rate_unit
+
outer_l2 = value.get('outer_l2')
if outer_l2:
result[traffickey]['outer_l2'].update({
@@ -111,6 +121,7 @@ class IXIARFC2544Profile(trex_traffic_profile.TrexProfile):
'dstmask': dstmask,
'type': key,
'proto': outer_l3.get('proto'),
+ 'priority': outer_l3.get('priority')
})
outer_l4 = value.get('outer_l4')
@@ -161,9 +172,7 @@ class IXIARFC2544Profile(trex_traffic_profile.TrexProfile):
first_run = self.first_run
if self.first_run:
self.first_run = False
- self.full_profile = {}
self.pg_id = 0
- self.update_traffic_profile(traffic_generator)
self.max_rate = self.rate
self.min_rate = 0.0
else:
@@ -174,7 +183,7 @@ class IXIARFC2544Profile(trex_traffic_profile.TrexProfile):
self._ixia_traffic_generate(traffic, ixia_obj)
return first_run
- def get_drop_percentage(self, samples, tol_min, tolerance,
+ def get_drop_percentage(self, samples, tol_min, tolerance, precision,
first_run=False):
completed = False
drop_percent = 100
@@ -208,6 +217,10 @@ class IXIARFC2544Profile(trex_traffic_profile.TrexProfile):
else:
completed = True
+ LOG.debug("tolerance=%s, tolerance_precision=%s drop_percent=%s "
+ "completed=%s", tolerance, precision, drop_percent,
+ completed)
+
latency_ns_avg = float(
sum([samples[iface]['Store-Forward_Avg_latency_ns']
for iface in samples])) / num_ifaces
@@ -218,6 +231,10 @@ class IXIARFC2544Profile(trex_traffic_profile.TrexProfile):
sum([samples[iface]['Store-Forward_Max_latency_ns']
for iface in samples])) / num_ifaces
+ samples['Status'] = self.STATUS_FAIL
+ if round(drop_percent, precision) <= tolerance:
+ samples['Status'] = self.STATUS_SUCCESS
+
samples['TxThroughput'] = tx_throughput
samples['RxThroughput'] = rx_throughput
samples['DropPercentage'] = drop_percent
@@ -226,3 +243,33 @@ class IXIARFC2544Profile(trex_traffic_profile.TrexProfile):
samples['latency_ns_max'] = latency_ns_max
return completed, samples
+
+
+class IXIARFC2544PppoeScenarioProfile(IXIARFC2544Profile):
+ """Class handles BNG PPPoE scenario tests traffic profile"""
+
+ def __init__(self, yaml_data):
+ super(IXIARFC2544PppoeScenarioProfile, self).__init__(yaml_data)
+ self.full_profile = collections.OrderedDict()
+
+ def _get_flow_groups_params(self):
+ flows_data = [key for key in self.params.keys()
+ if key.split('_')[0] in [self.UPLINK, self.DOWNLINK]]
+ for i in range(len(flows_data)):
+ uplink = '_'.join([self.UPLINK, str(i)])
+ downlink = '_'.join([self.DOWNLINK, str(i)])
+ if uplink in flows_data:
+ self.full_profile.update({uplink: self.params[uplink]})
+ if downlink in flows_data:
+ self.full_profile.update({downlink: self.params[downlink]})
+
+ def update_traffic_profile(self, traffic_generator):
+ def port_generator():
+ for vld_id, intfs in sorted(traffic_generator.networks.items()):
+ if not vld_id.startswith((self.UPLINK, self.DOWNLINK)):
+ continue
+ for intf in intfs:
+ yield traffic_generator.vnfd_helper.port_num(intf)
+
+ self._get_flow_groups_params()
+ self.ports = [port for port in port_generator()]
diff --git a/yardstick/network_services/traffic_profile/prox_binsearch.py b/yardstick/network_services/traffic_profile/prox_binsearch.py
index f924cf419..402bf741c 100644
--- a/yardstick/network_services/traffic_profile/prox_binsearch.py
+++ b/yardstick/network_services/traffic_profile/prox_binsearch.py
@@ -168,8 +168,8 @@ class ProxBinSearchProfile(ProxProfile):
samples = result.get_samples(pkt_size, successful_pkt_loss, port_samples)
- if theor_max_thruput < samples["TxThroughput"]:
- theor_max_thruput = samples['TxThroughput']
+ if theor_max_thruput < samples["RequestedTxThroughput"]:
+ theor_max_thruput = samples['RequestedTxThroughput']
samples['theor_max_throughput'] = theor_max_thruput
samples["rx_total"] = int(result.rx_total)
diff --git a/yardstick/network_services/traffic_profile/prox_irq.py b/yardstick/network_services/traffic_profile/prox_irq.py
new file mode 100644
index 000000000..0ea294914
--- /dev/null
+++ b/yardstick/network_services/traffic_profile/prox_irq.py
@@ -0,0 +1,48 @@
+# Copyright (c) 2016-2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+""" Fixed traffic profile definitions """
+
+import logging
+import time
+
+from yardstick.network_services.traffic_profile.prox_profile import ProxProfile
+
+LOG = logging.getLogger(__name__)
+
+
+class ProxIrqProfile(ProxProfile):
+ """
+ This profile adds a single stream at the beginning of the traffic session
+ """
+
+ def __init__(self, tp_config):
+ super(ProxIrqProfile, self).__init__(tp_config)
+
+ def init(self, queue):
+ self.queue = queue
+ self.queue.cancel_join_thread()
+
+ def execute_traffic(self, traffic_generator):
+ LOG.debug("Prox_IRQ Execute Traffic....")
+ time.sleep(5)
+
+ def is_ended(self):
+ return False
+
+ def run_test(self):
+ """Run the test
+ """
+
+ LOG.info("Prox_IRQ ....")
diff --git a/yardstick/network_services/traffic_profile/prox_profile.py b/yardstick/network_services/traffic_profile/prox_profile.py
index de4b3f9a0..be450c9f7 100644
--- a/yardstick/network_services/traffic_profile/prox_profile.py
+++ b/yardstick/network_services/traffic_profile/prox_profile.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2016-2017 Intel Corporation
+# Copyright (c) 2016-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -17,6 +17,7 @@ from __future__ import absolute_import
import logging
import multiprocessing
+import time
from yardstick.network_services.traffic_profile.base import TrafficProfile
from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxProfileHelper
@@ -117,6 +118,7 @@ class ProxProfile(TrafficProfile):
try:
pkt_size = next(self.pkt_size_iterator)
except StopIteration:
+ time.sleep(5)
self.done.set()
return
diff --git a/yardstick/network_services/vnf_generic/vnf/prox_helpers.py b/yardstick/network_services/vnf_generic/vnf/prox_helpers.py
index 321c05779..5d980037a 100644
--- a/yardstick/network_services/vnf_generic/vnf/prox_helpers.py
+++ b/yardstick/network_services/vnf_generic/vnf/prox_helpers.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2017 Intel Corporation
+# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -21,6 +21,7 @@ import re
import select
import socket
import time
+
from collections import OrderedDict, namedtuple
from contextlib import contextmanager
from itertools import repeat, chain
@@ -325,6 +326,27 @@ class ProxSocketHelper(object):
return ret_str, False
+ def get_string(self, pkt_dump_only=False, timeout=0.01):
+
+ def is_ready_string():
+ # recv() is blocking, so avoid calling it when no data is waiting.
+ ready = select.select([self._sock], [], [], timeout)
+ return bool(ready[0])
+
+ status = False
+ ret_str = ""
+ while status is False:
+ for status in iter(is_ready_string, False):
+ decoded_data = self._sock.recv(256).decode('utf-8')
+ ret_str, done = self._parse_socket_data(decoded_data,
+ pkt_dump_only)
+ if (done):
+ status = True
+ break
+
+ LOG.debug("Received data from socket: [%s]", ret_str)
+ return status, ret_str
+
def get_data(self, pkt_dump_only=False, timeout=0.01):
""" read data from the socket """
@@ -394,7 +416,6 @@ class ProxSocketHelper(object):
""" stop all cores on the remote instance """
LOG.debug("Stop all")
self.put_command("stop all\n")
- time.sleep(3)
def stop(self, cores, task=''):
""" stop specific cores on the remote instance """
@@ -406,7 +427,6 @@ class ProxSocketHelper(object):
LOG.debug("Stopping cores %s", tmpcores)
self.put_command("stop {} {}\n".format(join_non_strings(',', tmpcores), task))
- time.sleep(3)
def start_all(self):
""" start all cores on the remote instance """
@@ -423,13 +443,11 @@ class ProxSocketHelper(object):
LOG.debug("Starting cores %s", tmpcores)
self.put_command("start {}\n".format(join_non_strings(',', tmpcores)))
- time.sleep(3)
def reset_stats(self):
""" reset the statistics on the remote instance """
LOG.debug("Reset stats")
self.put_command("reset stats\n")
- time.sleep(1)
def _run_template_over_cores(self, template, cores, *args):
for core in cores:
@@ -440,7 +458,6 @@ class ProxSocketHelper(object):
LOG.debug("Set packet size for core(s) %s to %d", cores, pkt_size)
pkt_size -= 4
self._run_template_over_cores("pkt_size {} 0 {}\n", cores, pkt_size)
- time.sleep(1)
def set_value(self, cores, offset, value, length):
""" set value on the remote instance """
@@ -544,50 +561,173 @@ class ProxSocketHelper(object):
tsc = int(ret[3])
return rx, tx, drop, tsc
- def multi_port_stats(self, ports):
- """get counter values from all ports port"""
+ def irq_core_stats(self, cores_tasks):
+ """ get IRQ stats per core"""
+
+ stat = {}
+ core = 0
+ task = 0
+ for core, task in cores_tasks:
+ self.put_command("stats task.core({}).task({}).max_irq,task.core({}).task({}).irq(0),"
+ "task.core({}).task({}).irq(1),task.core({}).task({}).irq(2),"
+ "task.core({}).task({}).irq(3),task.core({}).task({}).irq(4),"
+ "task.core({}).task({}).irq(5),task.core({}).task({}).irq(6),"
+ "task.core({}).task({}).irq(7),task.core({}).task({}).irq(8),"
+ "task.core({}).task({}).irq(9),task.core({}).task({}).irq(10),"
+ "task.core({}).task({}).irq(11),task.core({}).task({}).irq(12)"
+ "\n".format(core, task, core, task, core, task, core, task,
+ core, task, core, task, core, task, core, task,
+ core, task, core, task, core, task, core, task,
+ core, task, core, task))
+ in_data_str = self.get_data().split(",")
+ ret = [try_int(s, 0) for s in in_data_str]
+ key = "core_" + str(core)
+ try:
+ stat[key] = {"cpu": core, "max_irq": ret[0], "bucket_0" : ret[1],
+ "bucket_1" : ret[2], "bucket_2" : ret[3],
+ "bucket_3" : ret[4], "bucket_4" : ret[5],
+ "bucket_5" : ret[6], "bucket_6" : ret[7],
+ "bucket_7" : ret[8], "bucket_8" : ret[9],
+ "bucket_9" : ret[10], "bucket_10" : ret[11],
+ "bucket_11" : ret[12], "bucket_12" : ret[13],
+ "overflow": ret[10] + ret[11] + ret[12] + ret[13]}
+ except (KeyError, IndexError):
+ LOG.error("Corrupted PACKET %s", in_data_str)
+
+ return stat
- ports_str = ""
- for port in ports:
- ports_str = ports_str + str(port) + ","
- ports_str = ports_str[:-1]
+ def multi_port_stats(self, ports):
+ """get counter values from all ports at once"""
+ ports_str = ",".join(map(str, ports))
ports_all_data = []
tot_result = [0] * len(ports)
- retry_counter = 0
port_index = 0
- while (len(ports) is not len(ports_all_data)) and (retry_counter < 10):
+ while (len(ports) is not len(ports_all_data)):
self.put_command("multi port stats {}\n".format(ports_str))
- ports_all_data = self.get_data().split(";")
+ status, ports_all_data_str = self.get_string()
+
+ if not status:
+ return False, []
+
+ ports_all_data = ports_all_data_str.split(";")
if len(ports) is len(ports_all_data):
for port_data_str in ports_all_data:
+ tmpdata = []
try:
- tot_result[port_index] = [try_int(s, 0) for s in port_data_str.split(",")]
+ tmpdata = [try_int(s, 0) for s in port_data_str.split(",")]
except (IndexError, TypeError):
- LOG.error("Port Index error %d %s - retrying ", port_index, port_data_str)
-
- if (len(tot_result[port_index]) is not 6) or \
- tot_result[port_index][0] is not ports[port_index]:
- ports_all_data = []
- tot_result = [0] * len(ports)
- port_index = 0
- time.sleep(0.1)
+ LOG.error("Unpacking data error %s", port_data_str)
+ return False, []
+
+ if (len(tmpdata) < 6) or tmpdata[0] not in ports:
LOG.error("Corrupted PACKET %s - retrying", port_data_str)
- break
+ return False, []
else:
+ tot_result[port_index] = tmpdata
port_index = port_index + 1
else:
LOG.error("Empty / too much data - retry -%s-", ports_all_data)
- ports_all_data = []
- tot_result = [0] * len(ports)
- port_index = 0
- time.sleep(0.1)
+ return False, []
- retry_counter = retry_counter + 1
- return tot_result
+ LOG.debug("Multi port packet ..OK.. %s", tot_result)
+ return True, tot_result
+
+ @staticmethod
+ def multi_port_stats_tuple(stats, ports):
+ """
+ Create a statistics tuple from port stats.
+
+ Returns a dict with contains the port stats indexed by port name
+
+ :param stats: (List) - List of List of port stats in pps
+ :param ports (Iterator) - to List of Ports
+
+ :return: (Dict) of port stats indexed by port_name
+ """
+
+ samples = {}
+ port_names = {}
+ try:
+ port_names = {port_num: port_name for port_name, port_num in ports}
+ except (TypeError, IndexError, KeyError):
+ LOG.critical("Ports are not initialized or number of port is ZERO ... CRITICAL ERROR")
+ return {}
+
+ try:
+ for stat in stats:
+ port_num = stat[0]
+ samples[port_names[port_num]] = {
+ "in_packets": stat[1],
+ "out_packets": stat[2]}
+ except (TypeError, IndexError, KeyError):
+ LOG.error("Ports data and samples data is incompatable ....")
+ return {}
+
+ return samples
+
+ @staticmethod
+ def multi_port_stats_diff(prev_stats, new_stats, hz):
+ """
+ Create a statistics tuple from difference between prev port stats
+ and current port stats. And store results in pps.
+
+ :param prev_stats: (List) - Previous List of port statistics
+ :param new_stats: (List) - Current List of port statistics
+ :param hz (float) - speed of system in Hz
+
+ :return: sample (List) - Difference of prev_port_stats and
+ new_port_stats in pps
+ """
+
+ RX_TOTAL_INDEX = 1
+ TX_TOTAL_INDEX = 2
+ TSC_INDEX = 5
+
+ stats = []
+
+ if len(prev_stats) is not len(new_stats):
+ for port_index, stat in enumerate(new_stats):
+ stats.append([port_index, float(0), float(0), 0, 0, 0])
+ return stats
+
+ try:
+ for port_index, stat in enumerate(new_stats):
+ if stat[RX_TOTAL_INDEX] > prev_stats[port_index][RX_TOTAL_INDEX]:
+ rx_total = stat[RX_TOTAL_INDEX] - \
+ prev_stats[port_index][RX_TOTAL_INDEX]
+ else:
+ rx_total = stat[RX_TOTAL_INDEX]
+
+ if stat[TX_TOTAL_INDEX] > prev_stats[port_index][TX_TOTAL_INDEX]:
+ tx_total = stat[TX_TOTAL_INDEX] - prev_stats[port_index][TX_TOTAL_INDEX]
+ else:
+ tx_total = stat[TX_TOTAL_INDEX]
+
+ if stat[TSC_INDEX] > prev_stats[port_index][TSC_INDEX]:
+ tsc = stat[TSC_INDEX] - prev_stats[port_index][TSC_INDEX]
+ else:
+ tsc = stat[TSC_INDEX]
+
+ if tsc is 0:
+ rx_total = tx_total = float(0)
+ else:
+ if hz is 0:
+ LOG.error("HZ is ZERO ..")
+ rx_total = tx_total = float(0)
+ else:
+ rx_total = float(rx_total * hz / tsc)
+ tx_total = float(tx_total * hz / tsc)
+
+ stats.append([port_index, rx_total, tx_total, 0, 0, tsc])
+ except (TypeError, IndexError, KeyError):
+ stats = []
+ LOG.info("Current Port Stats incompatable to previous Port stats .. Discarded")
+
+ return stats
def port_stats(self, ports):
"""get counter values from a specific port"""
@@ -649,7 +789,6 @@ class ProxSocketHelper(object):
self.put_command("quit_force\n")
time.sleep(3)
-
_LOCAL_OBJECT = object()
@@ -956,6 +1095,8 @@ class ProxResourceHelper(ClientResourceHelper):
self.step_delta = 1
self.step_time = 0.5
self._test_type = None
+ self.prev_multi_port = []
+ self.prev_hz = 0
@property
def sut(self):
@@ -994,11 +1135,40 @@ class ProxResourceHelper(ClientResourceHelper):
def collect_collectd_kpi(self):
return self._collect_resource_kpi()
+ def collect_live_stats(self):
+ ports = []
+ for _, port_num in self.vnfd_helper.ports_iter():
+ ports.append(port_num)
+
+ ok, curr_port_stats = self.sut.multi_port_stats(ports)
+ if not ok:
+ return False, {}
+
+ hz = self.sut.hz()
+ if hz is 0:
+ hz = self.prev_hz
+ else:
+ self.prev_hz = hz
+
+ new_all_port_stats = \
+ self.sut.multi_port_stats_diff(self.prev_multi_port, curr_port_stats, hz)
+
+ self.prev_multi_port = curr_port_stats
+
+ live_stats = self.sut.multi_port_stats_tuple(new_all_port_stats,
+ self.vnfd_helper.ports_iter())
+ return True, live_stats
+
def collect_kpi(self):
result = super(ProxResourceHelper, self).collect_kpi()
# add in collectd kpis manually
if result:
result['collect_stats'] = self._collect_resource_kpi()
+
+ ok, live_stats = self.collect_live_stats()
+ if ok:
+ result.update({'live_stats': live_stats})
+
return result
def terminate(self):
@@ -1070,41 +1240,70 @@ class ProxDataHelper(object):
def totals_and_pps(self):
if self._totals_and_pps is None:
rx_total = tx_total = 0
- all_ports = self.sut.multi_port_stats(range(self.port_count))
- for port in all_ports:
- rx_total = rx_total + port[1]
- tx_total = tx_total + port[2]
- requested_pps = self.value / 100.0 * self.line_rate_to_pps()
- self._totals_and_pps = rx_total, tx_total, requested_pps
+ ok = False
+ timeout = time.time() + constants.RETRY_TIMEOUT
+ while not ok:
+ ok, all_ports = self.sut.multi_port_stats([
+ self.vnfd_helper.port_num(port_name)
+ for port_name in self.vnfd_helper.port_pairs.all_ports])
+ if time.time() > timeout:
+ break
+ if ok:
+ for port in all_ports:
+ rx_total = rx_total + port[1]
+ tx_total = tx_total + port[2]
+ requested_pps = self.value / 100.0 * self.line_rate_to_pps()
+ self._totals_and_pps = rx_total, tx_total, requested_pps
return self._totals_and_pps
@property
def rx_total(self):
- return self.totals_and_pps[0]
+ try:
+ ret_val = self.totals_and_pps[0]
+ except (AttributeError, ValueError, TypeError, LookupError):
+ ret_val = 0
+ return ret_val
@property
def tx_total(self):
- return self.totals_and_pps[1]
+ try:
+ ret_val = self.totals_and_pps[1]
+ except (AttributeError, ValueError, TypeError, LookupError):
+ ret_val = 0
+ return ret_val
@property
def requested_pps(self):
- return self.totals_and_pps[2]
+ try:
+ ret_val = self.totals_and_pps[2]
+ except (AttributeError, ValueError, TypeError, LookupError):
+ ret_val = 0
+ return ret_val
@property
def samples(self):
samples = {}
ports = []
- port_names = []
+ port_names = {}
for port_name, port_num in self.vnfd_helper.ports_iter():
ports.append(port_num)
- port_names.append(port_name)
-
- results = self.sut.multi_port_stats(ports)
- for result in results:
- port_num = result[0]
- samples[port_names[port_num]] = {
- "in_packets": result[1],
- "out_packets": result[2]}
+ port_names[port_num] = port_name
+
+ ok = False
+ timeout = time.time() + constants.RETRY_TIMEOUT
+ while not ok:
+ ok, results = self.sut.multi_port_stats(ports)
+ if time.time() > timeout:
+ break
+ if ok:
+ for result in results:
+ port_num = result[0]
+ try:
+ samples[port_names[port_num]] = {
+ "in_packets": result[1],
+ "out_packets": result[2]}
+ except (IndexError, KeyError):
+ pass
return samples
def __enter__(self):
@@ -1902,3 +2101,15 @@ class ProxlwAFTRProfileHelper(ProxProfileHelper):
data_helper.latency = self.get_latency()
return data_helper.result_tuple, data_helper.samples
+
+
+class ProxIrqProfileHelper(ProxProfileHelper):
+
+ __prox_profile_type__ = "IRQ Query"
+
+ def __init__(self, resource_helper):
+ super(ProxIrqProfileHelper, self).__init__(resource_helper)
+ self._cores_tuple = None
+ self._ports_tuple = None
+ self.step_delta = 5
+ self.step_time = 0.5
diff --git a/yardstick/network_services/vnf_generic/vnf/prox_irq.py b/yardstick/network_services/vnf_generic/vnf/prox_irq.py
new file mode 100644
index 000000000..dda26b0fe
--- /dev/null
+++ b/yardstick/network_services/vnf_generic/vnf/prox_irq.py
@@ -0,0 +1,200 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import errno
+import logging
+import copy
+import time
+
+from yardstick.common.process import check_if_process_failed
+from yardstick.network_services.utils import get_nsb_option
+from yardstick.network_services.vnf_generic.vnf.prox_vnf import ProxApproxVnf
+from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNFTrafficGen
+from yardstick.benchmark.contexts.base import Context
+from yardstick.network_services.vnf_generic.vnf.prox_helpers import CoreSocketTuple
+LOG = logging.getLogger(__name__)
+
+
+class ProxIrq(SampleVNFTrafficGen):
+
+ def __init__(self, name, vnfd, task_id, setup_env_helper_type=None,
+ resource_helper_type=None):
+ vnfd_cpy = copy.deepcopy(vnfd)
+ super(ProxIrq, self).__init__(name, vnfd_cpy, task_id)
+
+ self._vnf_wrapper = ProxApproxVnf(
+ name, vnfd, task_id, setup_env_helper_type, resource_helper_type)
+ self.bin_path = get_nsb_option('bin_path', '')
+ self.name = self._vnf_wrapper.name
+ self.ssh_helper = self._vnf_wrapper.ssh_helper
+ self.setup_helper = self._vnf_wrapper.setup_helper
+ self.resource_helper = self._vnf_wrapper.resource_helper
+ self.scenario_helper = self._vnf_wrapper.scenario_helper
+ self.irq_cores = None
+
+ def terminate(self):
+ self._vnf_wrapper.terminate()
+ super(ProxIrq, self).terminate()
+
+ def instantiate(self, scenario_cfg, context_cfg):
+ self._vnf_wrapper.instantiate(scenario_cfg, context_cfg)
+ self._tg_process = self._vnf_wrapper._vnf_process
+
+ def wait_for_instantiate(self):
+ self._vnf_wrapper.wait_for_instantiate()
+
+ def get_irq_cores(self):
+ cores = []
+ mode = "irq"
+
+ for section_name, section in self.setup_helper.prox_config_data:
+ if not section_name.startswith("core"):
+ continue
+ irq_mode = task_present = False
+ task_present_task = 0
+ for key, value in section:
+ if key == "mode" and value == mode:
+ irq_mode = True
+ if key == "task":
+ task_present = True
+ task_present_task = int(value)
+
+ if irq_mode:
+ if not task_present:
+ task_present_task = 0
+ core_tuple = CoreSocketTuple(section_name)
+ core = core_tuple.core_id
+ cores.append((core, task_present_task))
+
+ return cores
+
+class ProxIrqVNF(ProxIrq, SampleVNFTrafficGen):
+
+ APP_NAME = 'ProxIrqVNF'
+
+ def __init__(self, name, vnfd, task_id, setup_env_helper_type=None,
+ resource_helper_type=None):
+ ProxIrq.__init__(self, name, vnfd, task_id, setup_env_helper_type,
+ resource_helper_type)
+
+ self.start_test_time = None
+ self.end_test_time = None
+
+ def vnf_execute(self, cmd, *args, **kwargs):
+ ignore_errors = kwargs.pop("_ignore_errors", False)
+ try:
+ return self.resource_helper.execute(cmd, *args, **kwargs)
+ except OSError as e:
+ if e.errno in {errno.EPIPE, errno.ESHUTDOWN, errno.ECONNRESET}:
+ if ignore_errors:
+ LOG.debug("ignoring vnf_execute exception %s for command %s", e, cmd)
+ else:
+ raise
+ else:
+ raise
+
+ def collect_kpi(self):
+ # check if the tg processes have exited
+ physical_node = Context.get_physical_node_from_server(
+ self.scenario_helper.nodes[self.name])
+
+ result = {"physical_node": physical_node}
+ for proc in (self._tg_process, self._traffic_process):
+ check_if_process_failed(proc)
+
+ if self.resource_helper is None:
+ return result
+
+ if self.irq_cores is None:
+ self.setup_helper.build_config_file()
+ self.irq_cores = self.get_irq_cores()
+
+ data = self.vnf_execute('irq_core_stats', self.irq_cores)
+ new_data = copy.deepcopy(data)
+
+ self.end_test_time = time.time()
+ self.vnf_execute('reset_stats')
+
+ if self.start_test_time is None:
+ new_data = {}
+ else:
+ test_time = self.end_test_time - self.start_test_time
+ for index, item in data.items():
+ for counter, value in item.items():
+ if counter.startswith("bucket_")or \
+ counter.startswith("overflow"):
+ if value is 0:
+ del new_data[index][counter]
+ else:
+ new_data[index][counter] = float(value) / test_time
+
+ self.start_test_time = time.time()
+
+ result["collect_stats"] = new_data
+ LOG.debug("%s collect KPIs %s", self.APP_NAME, result)
+
+ return result
+
+class ProxIrqGen(ProxIrq, SampleVNFTrafficGen):
+
+ APP_NAME = 'ProxIrqGen'
+
+ def __init__(self, name, vnfd, task_id, setup_env_helper_type=None,
+ resource_helper_type=None):
+ ProxIrq.__init__(self, name, vnfd, task_id, setup_env_helper_type,
+ resource_helper_type)
+ self.start_test_time = None
+ self.end_test_time = None
+
+ def collect_kpi(self):
+ # check if the tg processes have exited
+ physical_node = Context.get_physical_node_from_server(
+ self.scenario_helper.nodes[self.name])
+
+ result = {"physical_node": physical_node}
+ for proc in (self._tg_process, self._traffic_process):
+ check_if_process_failed(proc)
+
+ if self.resource_helper is None:
+ return result
+
+ if self.irq_cores is None:
+ self.setup_helper.build_config_file()
+ self.irq_cores = self.get_irq_cores()
+
+ data = self.resource_helper.sut.irq_core_stats(self.irq_cores)
+ new_data = copy.deepcopy(data)
+
+ self.end_test_time = time.time()
+ self.resource_helper.sut.reset_stats()
+
+ if self.start_test_time is None:
+ new_data = {}
+ else:
+ test_time = self.end_test_time - self.start_test_time
+ for index, item in data.items():
+ for counter, value in item.items():
+ if counter.startswith("bucket_") or \
+ counter.startswith("overflow"):
+ if value is 0:
+ del new_data[index][counter]
+ else:
+ new_data[index][counter] = float(value) / test_time
+
+ self.start_test_time = time.time()
+
+ result["collect_stats"] = new_data
+ LOG.debug("%s collect KPIs %s", self.APP_NAME, result)
+
+ return result
diff --git a/yardstick/network_services/vnf_generic/vnf/prox_vnf.py b/yardstick/network_services/vnf_generic/vnf/prox_vnf.py
index 839f30967..c3b50369b 100644
--- a/yardstick/network_services/vnf_generic/vnf/prox_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/prox_vnf.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2017 Intel Corporation
+# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -15,6 +15,7 @@
import errno
import logging
import datetime
+import time
from yardstick.common.process import check_if_process_failed
from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxDpdkVnfSetupEnvHelper
@@ -81,6 +82,8 @@ class ProxApproxVnf(SampleVNF):
"packets_in": 0,
"packets_dropped": 0,
"packets_fwd": 0,
+ "curr_packets_in": 0,
+ "curr_packets_fwd": 0,
"collect_stats": {"core": {}},
})
return result
@@ -97,15 +100,26 @@ class ProxApproxVnf(SampleVNF):
raise RuntimeError("Failed ..Invalid no of ports .. "
"1, 2 or 4 ports only supported at this time")
- all_port_stats = self.vnf_execute('multi_port_stats', range(port_count))
- rx_total = tx_total = tsc = 0
- try:
- for single_port_stats in all_port_stats:
- rx_total = rx_total + single_port_stats[1]
- tx_total = tx_total + single_port_stats[2]
- tsc = tsc + single_port_stats[5]
- except (TypeError, IndexError):
- LOG.error("Invalid data ...")
+ tmpPorts = [self.vnfd_helper.port_num(port_name)
+ for port_name in self.vnfd_helper.port_pairs.all_ports]
+ ok = False
+ timeout = time.time() + constants.RETRY_TIMEOUT
+ while not ok:
+ ok, all_port_stats = self.vnf_execute('multi_port_stats', tmpPorts)
+ if time.time() > timeout:
+ break
+
+ if ok:
+ rx_total = tx_total = tsc = 0
+ try:
+ for single_port_stats in all_port_stats:
+ rx_total = rx_total + single_port_stats[1]
+ tx_total = tx_total + single_port_stats[2]
+ tsc = tsc + single_port_stats[5]
+ except (TypeError, IndexError):
+ LOG.error("Invalid data ...")
+ return {}
+ else:
return {}
tsc = tsc / port_count
diff --git a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
index a09f2a7a9..8833b88f2 100644
--- a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
@@ -13,6 +13,7 @@
# limitations under the License.
import logging
+import decimal
from multiprocessing import Queue, Value, Process
import os
import posixpath
@@ -21,7 +22,6 @@ import uuid
import subprocess
import time
-import six
from trex_stl_lib.trex_stl_client import LoggerApi
from trex_stl_lib.trex_stl_client import STLClient
@@ -113,19 +113,6 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper):
self.used_drivers = None
self.dpdk_bind_helper = DpdkBindHelper(ssh_helper)
- def _setup_hugepages(self):
- meminfo = utils.read_meminfo(self.ssh_helper)
- hp_size_kb = int(meminfo['Hugepagesize'])
- hugepages_gb = self.scenario_helper.all_options.get('hugepages_gb', 16)
- nr_hugepages = int(abs(hugepages_gb * 1024 * 1024 / hp_size_kb))
- self.ssh_helper.execute('echo %s | sudo tee %s' %
- (nr_hugepages, self.NR_HUGEPAGES_PATH))
- hp = six.BytesIO()
- self.ssh_helper.get_file_obj(self.NR_HUGEPAGES_PATH, hp)
- nr_hugepages_set = int(hp.getvalue().decode('utf-8').splitlines()[0])
- LOG.info('Hugepages size (kB): %s, number claimed: %s, number set: %s',
- hp_size_kb, nr_hugepages, nr_hugepages_set)
-
def build_config(self):
vnf_cfg = self.scenario_helper.vnf_cfg
task_path = self.scenario_helper.task_path
@@ -193,7 +180,7 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper):
"""No actions/rules (flows) by default"""
return None
- def _build_pipeline_kwargs(self):
+ def _build_pipeline_kwargs(self, cfg_file=None, script=None):
tool_path = self.ssh_helper.provision_tool(tool_file=self.APP_NAME)
# count the number of actual ports in the list of pairs
# remove duplicate ports
@@ -213,8 +200,8 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper):
hwlb = ' --hwlb %s' % worker_threads
self.pipeline_kwargs = {
- 'cfg_file': self.CFG_CONFIG,
- 'script': self.CFG_SCRIPT,
+ 'cfg_file': cfg_file if cfg_file else self.CFG_CONFIG,
+ 'script': script if script else self.CFG_SCRIPT,
'port_mask_hex': ports_mask_hex,
'tool_path': tool_path,
'hwlb': hwlb,
@@ -238,7 +225,8 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper):
def _setup_dpdk(self):
"""Setup DPDK environment needed for VNF to run"""
- self._setup_hugepages()
+ hugepages_gb = self.scenario_helper.all_options.get('hugepages_gb', 16)
+ utils.setup_hugepages(self.ssh_helper, hugepages_gb * 1024 * 1024)
self.dpdk_bind_helper.load_dpdk_driver()
exit_status = self.dpdk_bind_helper.check_dpdk_driver()
@@ -499,6 +487,7 @@ class Rfc2544ResourceHelper(object):
self._rfc2544 = None
self._tolerance_low = None
self._tolerance_high = None
+ self._tolerance_precision = None
@property
def rfc2544(self):
@@ -519,6 +508,12 @@ class Rfc2544ResourceHelper(object):
return self._tolerance_high
@property
+ def tolerance_precision(self):
+ if self._tolerance_precision is None:
+ self.get_rfc_tolerance()
+ return self._tolerance_precision
+
+ @property
def correlated_traffic(self):
if self._correlated_traffic is None:
self._correlated_traffic = \
@@ -537,9 +532,13 @@ class Rfc2544ResourceHelper(object):
def get_rfc_tolerance(self):
tolerance_str = self.get_rfc2544('allowed_drop_rate', self.DEFAULT_TOLERANCE)
- tolerance_iter = iter(sorted(float(t.strip()) for t in tolerance_str.split('-')))
- self._tolerance_low = next(tolerance_iter)
- self._tolerance_high = next(tolerance_iter, self.tolerance_low)
+ tolerance_iter = iter(sorted(
+ decimal.Decimal(t.strip()) for t in tolerance_str.split('-')))
+ tolerance_low = next(tolerance_iter)
+ tolerance_high = next(tolerance_iter, tolerance_low)
+ self._tolerance_precision = abs(tolerance_high.as_tuple().exponent)
+ self._tolerance_high = float(tolerance_high)
+ self._tolerance_low = float(tolerance_low)
class SampleVNFDeployHelper(object):
@@ -899,6 +898,8 @@ class SampleVNFTrafficGen(GenericTrafficGen):
self.scenario_helper.nodes[self.name]
)
+ self.resource_helper.context_cfg = context_cfg
+
self.resource_helper.setup()
# must generate_cfg after DPDK bind because we need port number
self.resource_helper.generate_cfg()
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py
index 558a62935..1d37f8f6f 100644
--- a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py
+++ b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py
@@ -12,7 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import ipaddress
import logging
+import six
from yardstick.common import utils
from yardstick.network_services.libs.ixia_libs.ixnet import ixnet_api
@@ -25,6 +27,331 @@ LOG = logging.getLogger(__name__)
WAIT_AFTER_CFG_LOAD = 10
WAIT_FOR_TRAFFIC = 30
+WAIT_PROTOCOLS_STARTED = 360
+
+
+class IxiaBasicScenario(object):
+ def __init__(self, client, context_cfg, ixia_cfg):
+
+ self.client = client
+ self.context_cfg = context_cfg
+ self.ixia_cfg = ixia_cfg
+
+ self._uplink_vports = None
+ self._downlink_vports = None
+
+ def apply_config(self):
+ pass
+
+ def create_traffic_model(self, traffic_profile=None):
+ # pylint: disable=unused-argument
+ vports = self.client.get_vports()
+ self._uplink_vports = vports[::2]
+ self._downlink_vports = vports[1::2]
+ self.client.create_traffic_model(self._uplink_vports,
+ self._downlink_vports)
+
+ def run_protocols(self):
+ pass
+
+ def stop_protocols(self):
+ pass
+
+
+class IxiaPppoeClientScenario(object):
+ def __init__(self, client, context_cfg, ixia_cfg):
+
+ self.client = client
+
+ self._uplink_vports = None
+ self._downlink_vports = None
+
+ self._access_topologies = []
+ self._core_topologies = []
+
+ self._context_cfg = context_cfg
+ self._ixia_cfg = ixia_cfg
+ self.protocols = []
+ self.device_groups = []
+
+ def apply_config(self):
+ vports = self.client.get_vports()
+ self._uplink_vports = vports[::2]
+ self._downlink_vports = vports[1::2]
+ self._fill_ixia_config()
+ self._apply_access_network_config()
+ self._apply_core_network_config()
+
+ def create_traffic_model(self, traffic_profile):
+ endpoints_id_pairs = self._get_endpoints_src_dst_id_pairs(
+ traffic_profile.full_profile)
+ endpoints_obj_pairs = \
+ self._get_endpoints_src_dst_obj_pairs(endpoints_id_pairs)
+ uplink_endpoints = endpoints_obj_pairs[::2]
+ downlink_endpoints = endpoints_obj_pairs[1::2]
+ self.client.create_ipv4_traffic_model(uplink_endpoints,
+ downlink_endpoints)
+
+ def run_protocols(self):
+ LOG.info('PPPoE Scenario - Start Protocols')
+ self.client.start_protocols()
+ utils.wait_until_true(
+ lambda: self.client.is_protocols_running(self.protocols),
+ timeout=WAIT_PROTOCOLS_STARTED, sleep=2)
+
+ def stop_protocols(self):
+ LOG.info('PPPoE Scenario - Stop Protocols')
+ self.client.stop_protocols()
+
+ def _get_intf_addr(self, intf):
+ """Retrieve interface IP address and mask
+
+ :param intf: could be the string which represents IP address
+ with mask (e.g 192.168.10.2/24) or a dictionary with the host
+ name and the port (e.g. {'tg__0': 'xe1'})
+ :return: (tuple) pair of ip address and mask
+ """
+ if isinstance(intf, six.string_types):
+ ip, mask = tuple(intf.split('/'))
+ return ip, int(mask)
+
+ node_name, intf_name = next(iter(intf.items()))
+ node = self._context_cfg["nodes"].get(node_name, {})
+ interface = node.get("interfaces", {})[intf_name]
+ ip = interface["local_ip"]
+ mask = interface["netmask"]
+ ipaddr = ipaddress.ip_network(six.text_type('{}/{}'.format(ip, mask)),
+ strict=False)
+ return ip, ipaddr.prefixlen
+
+ @staticmethod
+ def _get_endpoints_src_dst_id_pairs(flows_params):
+ """Get list of flows src/dst port pairs
+
+ Create list of flows src/dst port pairs based on traffic profile
+ flows data. Each uplink/downlink pair in traffic profile represents
+ specific flows between the pair of ports.
+
+ Example ('port' key represents port on which flow will be created):
+
+ Input flows data:
+ uplink_0:
+ ipv4:
+ id: 1
+ port: xe0
+ downlink_0:
+ ipv4:
+ id: 2
+ port: xe1
+ uplink_1:
+ ipv4:
+ id: 3
+ port: xe2
+ downlink_1:
+ ipv4:
+ id: 4
+ port: xe3
+
+ Result list: ['xe0', 'xe1', 'xe2', 'xe3']
+
+ Result list means that the following flows pairs will be created:
+ - uplink 0: port xe0 <-> port xe1
+ - downlink 0: port xe1 <-> port xe0
+ - uplink 1: port xe2 <-> port xe3
+ - downlink 1: port xe3 <-> port xe2
+
+ :param flows_params: ordered dict of traffic profile flows params
+ :return: (list) list of flows src/dst ports
+ """
+ if len(flows_params) % 2:
+ raise RuntimeError('Number of uplink/downlink pairs'
+ ' in traffic profile is not equal')
+ endpoint_pairs = []
+ for flow in flows_params:
+ port = flows_params[flow]['ipv4'].get('port')
+ if port is None:
+ continue
+ endpoint_pairs.append(port)
+ return endpoint_pairs
+
+ def _get_endpoints_src_dst_obj_pairs(self, endpoints_id_pairs):
+ """Create list of uplink/downlink device groups pairs
+
+ Based on traffic profile options, create list of uplink/downlink
+ device groups pairs between which flow groups will be created:
+
+ 1. In case uplink/downlink flows in traffic profile doesn't have
+ specified 'port' key, flows will be created between each device
+ group on access port and device group on corresponding core port.
+ E.g.:
+ Device groups created on access port xe0: dg1, dg2, dg3
+ Device groups created on core port xe1: dg4
+ Flows will be created between:
+ dg1 -> dg4
+ dg4 -> dg1
+ dg2 -> dg4
+ dg4 -> dg2
+ dg3 -> dg4
+ dg4 -> dg3
+
+ 2. In case uplink/downlink flows in traffic profile have specified
+ 'port' key, flows will be created between device groups on this
+ port.
+ E.g., for the following traffic profile
+ uplink_0:
+ port: xe0
+ downlink_0:
+ port: xe1
+ uplink_1:
+ port: xe0
+ downlink_0:
+ port: xe3
+ Flows will be created between:
+ Port xe0 (dg1) -> Port xe1 (dg1)
+ Port xe1 (dg1) -> Port xe0 (dg1)
+ Port xe0 (dg2) -> Port xe3 (dg1)
+ Port xe3 (dg3) -> Port xe0 (dg1)
+
+ :param endpoints_id_pairs: (list) List of uplink/downlink flows ports
+ pairs
+ :return: (list) list of uplink/downlink device groups descriptors pairs
+ """
+ pppoe = self._ixia_cfg['pppoe_client']
+ sessions_per_port = pppoe['sessions_per_port']
+ sessions_per_svlan = pppoe['sessions_per_svlan']
+ svlan_count = int(sessions_per_port / sessions_per_svlan)
+
+ uplink_ports = [p['tg__0'] for p in self._ixia_cfg['flow']['src_ip']]
+ downlink_ports = [p['tg__0'] for p in self._ixia_cfg['flow']['dst_ip']]
+ uplink_port_topology_map = zip(uplink_ports, self._access_topologies)
+ downlink_port_topology_map = zip(downlink_ports, self._core_topologies)
+
+ port_to_dev_group_mapping = {}
+ for port, topology in uplink_port_topology_map:
+ topology_dgs = self.client.get_topology_device_groups(topology)
+ port_to_dev_group_mapping[port] = topology_dgs
+ for port, topology in downlink_port_topology_map:
+ topology_dgs = self.client.get_topology_device_groups(topology)
+ port_to_dev_group_mapping[port] = topology_dgs
+
+ uplink_endpoints = endpoints_id_pairs[::2]
+ downlink_endpoints = endpoints_id_pairs[1::2]
+
+ uplink_dev_groups = []
+ group_up = [uplink_endpoints[i:i + svlan_count]
+ for i in range(0, len(uplink_endpoints), svlan_count)]
+
+ for group in group_up:
+ for i, port in enumerate(group):
+ uplink_dev_groups.append(port_to_dev_group_mapping[port][i])
+
+ downlink_dev_groups = []
+ for port in downlink_endpoints:
+ downlink_dev_groups.append(port_to_dev_group_mapping[port][0])
+
+ endpoint_obj_pairs = []
+ [endpoint_obj_pairs.extend([up, down])
+ for up, down in zip(uplink_dev_groups, downlink_dev_groups)]
+
+ if not endpoint_obj_pairs:
+ for up, down in zip(uplink_ports, downlink_ports):
+ uplink_dev_groups = port_to_dev_group_mapping[up]
+ downlink_dev_groups = \
+ port_to_dev_group_mapping[down] * len(uplink_dev_groups)
+ [endpoint_obj_pairs.extend(list(i))
+ for i in zip(uplink_dev_groups, downlink_dev_groups)]
+ return endpoint_obj_pairs
+
+ def _fill_ixia_config(self):
+ pppoe = self._ixia_cfg["pppoe_client"]
+ ipv4 = self._ixia_cfg["ipv4_client"]
+
+ _ip = [self._get_intf_addr(intf)[0] for intf in pppoe["ip"]]
+ self._ixia_cfg["pppoe_client"]["ip"] = _ip
+
+ _ip = [self._get_intf_addr(intf)[0] for intf in ipv4["gateway_ip"]]
+ self._ixia_cfg["ipv4_client"]["gateway_ip"] = _ip
+
+ addrs = [self._get_intf_addr(intf) for intf in ipv4["ip"]]
+ _ip = [addr[0] for addr in addrs]
+ _prefix = [addr[1] for addr in addrs]
+
+ self._ixia_cfg["ipv4_client"]["ip"] = _ip
+ self._ixia_cfg["ipv4_client"]["prefix"] = _prefix
+
+ def _apply_access_network_config(self):
+ pppoe = self._ixia_cfg["pppoe_client"]
+ sessions_per_port = pppoe['sessions_per_port']
+ sessions_per_svlan = pppoe['sessions_per_svlan']
+ svlan_count = int(sessions_per_port / sessions_per_svlan)
+
+ # add topology per uplink port (access network)
+ for access_tp_id, vport in enumerate(self._uplink_vports):
+ name = 'Topology access {}'.format(access_tp_id)
+ tp = self.client.add_topology(name, vport)
+ self._access_topologies.append(tp)
+ # add device group per svlan
+ for dg_id in range(svlan_count):
+ s_vlan_id = int(pppoe['s_vlan']) + dg_id + access_tp_id * svlan_count
+ s_vlan = ixnet_api.Vlan(vlan_id=s_vlan_id)
+ c_vlan = ixnet_api.Vlan(vlan_id=pppoe['c_vlan'], vlan_id_step=1)
+ name = 'SVLAN {}'.format(s_vlan_id)
+ dg = self.client.add_device_group(tp, name, sessions_per_svlan)
+ self.device_groups.append(dg)
+ # add ethernet layer to device group
+ ethernet = self.client.add_ethernet(dg, 'Ethernet')
+ self.protocols.append(ethernet)
+ self.client.add_vlans(ethernet, [s_vlan, c_vlan])
+ # add ppp over ethernet
+ if 'pap_user' in pppoe:
+ ppp = self.client.add_pppox_client(ethernet, 'pap',
+ pppoe['pap_user'],
+ pppoe['pap_password'])
+ else:
+ ppp = self.client.add_pppox_client(ethernet, 'chap',
+ pppoe['chap_user'],
+ pppoe['chap_password'])
+ self.protocols.append(ppp)
+
+ def _apply_core_network_config(self):
+ ipv4 = self._ixia_cfg["ipv4_client"]
+ sessions_per_port = ipv4['sessions_per_port']
+ sessions_per_vlan = ipv4['sessions_per_vlan']
+ vlan_count = int(sessions_per_port / sessions_per_vlan)
+
+ # add topology per downlink port (core network)
+ for core_tp_id, vport in enumerate(self._downlink_vports):
+ name = 'Topology core {}'.format(core_tp_id)
+ tp = self.client.add_topology(name, vport)
+ self._core_topologies.append(tp)
+ # add device group per vlan
+ for dg_id in range(vlan_count):
+ name = 'Core port {}'.format(core_tp_id)
+ dg = self.client.add_device_group(tp, name, sessions_per_vlan)
+ self.device_groups.append(dg)
+ # add ethernet layer to device group
+ ethernet = self.client.add_ethernet(dg, 'Ethernet')
+ self.protocols.append(ethernet)
+ if 'vlan' in ipv4:
+ vlan_id = int(ipv4['vlan']) + dg_id + core_tp_id * vlan_count
+ vlan = ixnet_api.Vlan(vlan_id=vlan_id)
+ self.client.add_vlans(ethernet, [vlan])
+ # add ipv4 layer
+ gw_ip = ipv4['gateway_ip'][core_tp_id]
+ # use gw addr to generate ip addr from the same network
+ ip_addr = ipaddress.IPv4Address(gw_ip) + 1
+ ipv4_obj = self.client.add_ipv4(ethernet, name='ipv4',
+ addr=ip_addr,
+ addr_step='0.0.0.1',
+ prefix=ipv4['prefix'][core_tp_id],
+ gateway=gw_ip)
+ self.protocols.append(ipv4_obj)
+ if ipv4.get("bgp"):
+ bgp_peer_obj = self.client.add_bgp(ipv4_obj,
+ dut_ip=ipv4["bgp"]["dut_ip"],
+ local_as=ipv4["bgp"]["as_number"],
+ bgp_type=ipv4["bgp"].get("bgp_type"))
+ self.protocols.append(bgp_peer_obj)
class IxiaRfc2544Helper(Rfc2544ResourceHelper):
@@ -41,6 +368,11 @@ class IxiaResourceHelper(ClientResourceHelper):
super(IxiaResourceHelper, self).__init__(setup_helper)
self.scenario_helper = setup_helper.scenario_helper
+ self._ixia_scenarios = {
+ "IxiaBasic": IxiaBasicScenario,
+ "IxiaPppoeClient": IxiaPppoeClientScenario,
+ }
+
self.client = ixnet_api.IxNextgen()
if rfc_helper_type is None:
@@ -49,6 +381,8 @@ class IxiaResourceHelper(ClientResourceHelper):
self.rfc_helper = rfc_helper_type(self.scenario_helper)
self.uplink_ports = None
self.downlink_ports = None
+ self.context_cfg = None
+ self._ix_scenario = None
self._connect()
def _connect(self, client=None):
@@ -57,7 +391,12 @@ class IxiaResourceHelper(ClientResourceHelper):
def get_stats(self, *args, **kwargs):
return self.client.get_statistics()
+ def setup(self):
+ super(IxiaResourceHelper, self).setup()
+ self._init_ix_scenario()
+
def stop_collect(self):
+ self._ix_scenario.stop_protocols()
self._terminated.value = 1
def generate_samples(self, ports, duration):
@@ -92,14 +431,24 @@ class IxiaResourceHelper(ClientResourceHelper):
return samples
- def _initialize_client(self):
+ def _init_ix_scenario(self):
+ ixia_config = self.scenario_helper.scenario_cfg.get('ixia_config', 'IxiaBasic')
+
+ if ixia_config in self._ixia_scenarios:
+ scenario_type = self._ixia_scenarios[ixia_config]
+
+ self._ix_scenario = scenario_type(self.client, self.context_cfg,
+ self.scenario_helper.scenario_cfg['options'])
+ else:
+ raise RuntimeError(
+ "IXIA config type '{}' not supported".format(ixia_config))
+
+ def _initialize_client(self, traffic_profile):
"""Initialize the IXIA IxNetwork client and configure the server"""
self.client.clear_config()
self.client.assign_ports()
- vports = self.client.get_vports()
- uplink_vports = vports[::2]
- downlink_vports = vports[1::2]
- self.client.create_traffic_model(uplink_vports, downlink_vports)
+ self._ix_scenario.apply_config()
+ self._ix_scenario.create_traffic_model(traffic_profile)
def run_traffic(self, traffic_profile, *args):
if self._terminated.value:
@@ -107,10 +456,12 @@ class IxiaResourceHelper(ClientResourceHelper):
min_tol = self.rfc_helper.tolerance_low
max_tol = self.rfc_helper.tolerance_high
+ precision = self.rfc_helper.tolerance_precision
default = "00:00:00:00:00:00"
self._build_ports()
- self._initialize_client()
+ traffic_profile.update_traffic_profile(self)
+ self._initialize_client(traffic_profile)
mac = {}
for port_name in self.vnfd_helper.port_pairs.all_ports:
@@ -122,6 +473,8 @@ class IxiaResourceHelper(ClientResourceHelper):
mac["src_mac_{}".format(port_num)] = virt_intf.get("local_mac", default)
mac["dst_mac_{}".format(port_num)] = virt_intf.get("dst_mac", default)
+ self._ix_scenario.run_protocols()
+
try:
while not self._terminated.value:
first_run = traffic_profile.execute_traffic(
@@ -134,7 +487,7 @@ class IxiaResourceHelper(ClientResourceHelper):
traffic_profile.config.duration)
completed, samples = traffic_profile.get_drop_percentage(
- samples, min_tol, max_tol, first_run=first_run)
+ samples, min_tol, max_tol, precision, first_run=first_run)
self._queue.put(samples)
if completed:
@@ -143,6 +496,7 @@ class IxiaResourceHelper(ClientResourceHelper):
except Exception: # pylint: disable=broad-except
LOG.exception('Run Traffic terminated')
+ self._ix_scenario.stop_protocols()
self._terminated.value = 1
def collect_kpi(self):
diff --git a/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py b/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py
index b7cf8b35e..dd3221386 100644
--- a/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py
@@ -17,13 +17,11 @@ from __future__ import absolute_import
from __future__ import print_function
-import os
import logging
import re
import posixpath
-from six.moves import configparser, zip
-
+from yardstick.common import utils
from yardstick.common.process import check_if_process_failed
from yardstick.network_services.helpers.samplevnf_helper import PortPairs
from yardstick.network_services.pipeline import PipelineRules
@@ -43,15 +41,6 @@ Pkts in:\\s(\\d+)\r\n\
class ConfigCreate(object):
- @staticmethod
- def vpe_tmq(config, index):
- tm_q = 'TM{0}'.format(index)
- config.add_section(tm_q)
- config.set(tm_q, 'burst_read', '24')
- config.set(tm_q, 'burst_write', '32')
- config.set(tm_q, 'cfg', '/tmp/full_tm_profile_10G.cfg')
- return config
-
def __init__(self, vnfd_helper, socket):
super(ConfigCreate, self).__init__()
self.sw_q = -1
@@ -64,141 +53,6 @@ class ConfigCreate(object):
self.socket = socket
self._dpdk_port_to_link_id_map = None
- @property
- def dpdk_port_to_link_id_map(self):
- # we need interface name -> DPDK port num (PMD ID) -> LINK ID
- # LINK ID -> PMD ID is governed by the port mask
- # LINK instances are created implicitly based on the PORT_MASK application startup
- # argument. LINK0 is the first port enabled in the PORT_MASK, port 1 is the next one,
- # etc. The LINK ID is different than the DPDK PMD-level NIC port ID, which is the actual
- # position in the bitmask mentioned above. For example, if bit 5 is the first bit set
- # in the bitmask, then LINK0 is having the PMD ID of 5. This mechanism creates a
- # contiguous LINK ID space and isolates the configuration file against changes in the
- # board PCIe slots where NICs are plugged in.
- if self._dpdk_port_to_link_id_map is None:
- self._dpdk_port_to_link_id_map = {}
- for link_id, port_name in enumerate(sorted(self.vnfd_helper.port_pairs.all_ports,
- key=self.vnfd_helper.port_num)):
- self._dpdk_port_to_link_id_map[port_name] = link_id
- return self._dpdk_port_to_link_id_map
-
- def vpe_initialize(self, config):
- config.add_section('EAL')
- config.set('EAL', 'log_level', '0')
-
- config.add_section('PIPELINE0')
- config.set('PIPELINE0', 'type', 'MASTER')
- config.set('PIPELINE0', 'core', 's%sC0' % self.socket)
-
- config.add_section('MEMPOOL0')
- config.set('MEMPOOL0', 'pool_size', '256K')
-
- config.add_section('MEMPOOL1')
- config.set('MEMPOOL1', 'pool_size', '2M')
- return config
-
- def vpe_rxq(self, config):
- for port in self.downlink_ports:
- new_section = 'RXQ{0}.0'.format(self.dpdk_port_to_link_id_map[port])
- config.add_section(new_section)
- config.set(new_section, 'mempool', 'MEMPOOL1')
-
- return config
-
- def get_sink_swq(self, parser, pipeline, k, index):
- sink = ""
- pktq = parser.get(pipeline, k)
- if "SINK" in pktq:
- self.sink_q += 1
- sink = " SINK{0}".format(self.sink_q)
- if "TM" in pktq:
- sink = " TM{0}".format(index)
- pktq = "SWQ{0}{1}".format(self.sw_q, sink)
- return pktq
-
- def vpe_upstream(self, vnf_cfg, index=0): # pragma: no cover
- # NOTE(ralonsoh): this function must be covered in UTs.
- parser = configparser.ConfigParser()
- parser.read(os.path.join(vnf_cfg, 'vpe_upstream'))
-
- for pipeline in parser.sections():
- for k, v in parser.items(pipeline):
- if k == "pktq_in":
- if "RXQ" in v:
- port = self.dpdk_port_to_link_id_map[self.uplink_ports[index]]
- value = "RXQ{0}.0".format(port)
- else:
- value = self.get_sink_swq(parser, pipeline, k, index)
-
- parser.set(pipeline, k, value)
-
- elif k == "pktq_out":
- if "TXQ" in v:
- port = self.dpdk_port_to_link_id_map[self.downlink_ports[index]]
- value = "TXQ{0}.0".format(port)
- else:
- self.sw_q += 1
- value = self.get_sink_swq(parser, pipeline, k, index)
-
- parser.set(pipeline, k, value)
-
- new_pipeline = 'PIPELINE{0}'.format(self.n_pipeline)
- if new_pipeline != pipeline:
- parser._sections[new_pipeline] = parser._sections[pipeline]
- parser._sections.pop(pipeline)
- self.n_pipeline += 1
- return parser
-
- def vpe_downstream(self, vnf_cfg, index): # pragma: no cover
- # NOTE(ralonsoh): this function must be covered in UTs.
- parser = configparser.ConfigParser()
- parser.read(os.path.join(vnf_cfg, 'vpe_downstream'))
- for pipeline in parser.sections():
- for k, v in parser.items(pipeline):
-
- if k == "pktq_in":
- port = self.dpdk_port_to_link_id_map[self.downlink_ports[index]]
- if "RXQ" not in v:
- value = self.get_sink_swq(parser, pipeline, k, index)
- elif "TM" in v:
- value = "RXQ{0}.0 TM{1}".format(port, index)
- else:
- value = "RXQ{0}.0".format(port)
-
- parser.set(pipeline, k, value)
-
- if k == "pktq_out":
- port = self.dpdk_port_to_link_id_map[self.uplink_ports[index]]
- if "TXQ" not in v:
- self.sw_q += 1
- value = self.get_sink_swq(parser, pipeline, k, index)
- elif "TM" in v:
- value = "TXQ{0}.0 TM{1}".format(port, index)
- else:
- value = "TXQ{0}.0".format(port)
-
- parser.set(pipeline, k, value)
-
- new_pipeline = 'PIPELINE{0}'.format(self.n_pipeline)
- if new_pipeline != pipeline:
- parser._sections[new_pipeline] = parser._sections[pipeline]
- parser._sections.pop(pipeline)
- self.n_pipeline += 1
- return parser
-
- def create_vpe_config(self, vnf_cfg):
- config = configparser.ConfigParser()
- vpe_cfg = os.path.join("/tmp/vpe_config")
- with open(vpe_cfg, 'w') as cfg_file:
- config = self.vpe_initialize(config)
- config = self.vpe_rxq(config)
- config.write(cfg_file)
- for index, _ in enumerate(self.uplink_ports):
- config = self.vpe_upstream(vnf_cfg, index)
- config.write(cfg_file)
- config = self.vpe_downstream(vnf_cfg, index)
- config = self.vpe_tmq(config, index)
- config.write(cfg_file)
def generate_vpe_script(self, interfaces):
rules = PipelineRules(pipeline_id=1)
@@ -231,16 +85,10 @@ class ConfigCreate(object):
return rules.get_string()
- def generate_tm_cfg(self, vnf_cfg):
- vnf_cfg = os.path.join(vnf_cfg, "full_tm_profile_10G.cfg")
- if os.path.exists(vnf_cfg):
- return open(vnf_cfg).read()
-
class VpeApproxSetupEnvHelper(DpdkVnfSetupEnvHelper):
APP_NAME = 'vPE_vnf'
- CFG_CONFIG = "/tmp/vpe_config"
CFG_SCRIPT = "/tmp/vpe_script"
TM_CONFIG = "/tmp/full_tm_profile_10G.cfg"
CORES = ['0', '1', '2', '3', '4', '5']
@@ -253,33 +101,52 @@ class VpeApproxSetupEnvHelper(DpdkVnfSetupEnvHelper):
self.all_ports = self._port_pairs.all_ports
def build_config(self):
+ vnf_cfg = self.scenario_helper.vnf_cfg
+ task_path = self.scenario_helper.task_path
+ action_bulk_file = vnf_cfg.get('action_bulk_file', '/tmp/action_bulk_512.txt')
+ full_tm_profile_file = vnf_cfg.get('full_tm_profile_file', '/tmp/full_tm_profile_10G.cfg')
+ config_file = vnf_cfg.get('file', '/tmp/vpe_config')
+ script_file = vnf_cfg.get('script_file', None)
vpe_vars = {
"bin_path": self.ssh_helper.bin_path,
"socket": self.socket,
}
-
self._build_vnf_ports()
vpe_conf = ConfigCreate(self.vnfd_helper, self.socket)
- vpe_conf.create_vpe_config(self.scenario_helper.vnf_cfg)
- config_basename = posixpath.basename(self.CFG_CONFIG)
- script_basename = posixpath.basename(self.CFG_SCRIPT)
- tm_basename = posixpath.basename(self.TM_CONFIG)
- with open(self.CFG_CONFIG) as handle:
+ if script_file is None:
+ # autogenerate vpe_script if not given
+ vpe_script = vpe_conf.generate_vpe_script(self.vnfd_helper.interfaces)
+ script_file = self.CFG_SCRIPT
+ else:
+ with utils.open_relative_file(script_file, task_path) as handle:
+ vpe_script = handle.read()
+
+ config_basename = posixpath.basename(config_file)
+ script_basename = posixpath.basename(script_file)
+
+ with utils.open_relative_file(action_bulk_file, task_path) as handle:
+ action_bulk = handle.read()
+
+ with utils.open_relative_file(full_tm_profile_file, task_path) as handle:
+ full_tm_profile = handle.read()
+
+ with utils.open_relative_file(config_file, task_path) as handle:
vpe_config = handle.read()
+ # upload the 4 config files to the target server
self.ssh_helper.upload_config_file(config_basename, vpe_config.format(**vpe_vars))
-
- vpe_script = vpe_conf.generate_vpe_script(self.vnfd_helper.interfaces)
self.ssh_helper.upload_config_file(script_basename, vpe_script.format(**vpe_vars))
-
- tm_config = vpe_conf.generate_tm_cfg(self.scenario_helper.vnf_cfg)
- self.ssh_helper.upload_config_file(tm_basename, tm_config)
+ self.ssh_helper.upload_config_file(posixpath.basename(action_bulk_file),
+ action_bulk.format(**vpe_vars))
+ self.ssh_helper.upload_config_file(posixpath.basename(full_tm_profile_file),
+ full_tm_profile.format(**vpe_vars))
LOG.info("Provision and start the %s", self.APP_NAME)
- LOG.info(self.CFG_CONFIG)
+ LOG.info(config_file)
LOG.info(self.CFG_SCRIPT)
- self._build_pipeline_kwargs()
+ self._build_pipeline_kwargs(cfg_file='/tmp/' + config_basename,
+ script='/tmp/' + script_basename)
return self.PIPELINE_COMMAND.format(**self.pipeline_kwargs)
diff --git a/yardstick/tests/unit/benchmark/contexts/standalone/test_model.py b/yardstick/tests/unit/benchmark/contexts/standalone/test_model.py
index 98d2b1836..e76a3ca27 100644
--- a/yardstick/tests/unit/benchmark/contexts/standalone/test_model.py
+++ b/yardstick/tests/unit/benchmark/contexts/standalone/test_model.py
@@ -17,6 +17,7 @@ import os
import uuid
import mock
+import netaddr
import unittest
from xml.etree import ElementTree
@@ -54,7 +55,7 @@ class ModelLibvirtTestCase(unittest.TestCase):
numa_cpus=0 - 10,
socket=1, threads=1,
vm_image="/var/lib/libvirt/images/yardstick-nsb-image.img",
- cpuset=2 - 10, cputune='')
+ cpuset=2 - 10, cputune='', machine='pc')
def setUp(self):
self.pci_address_str = '0001:04:03.2'
@@ -123,7 +124,7 @@ class ModelLibvirtTestCase(unittest.TestCase):
def test_add_ovs_interfaces(self):
xml_input = copy.deepcopy(XML_SAMPLE)
xml_output = model.Libvirt.add_ovs_interface(
- '/usr/local', 0, self.pci_address_str, self.mac, xml_input)
+ '/usr/local', 0, self.pci_address_str, self.mac, xml_input, 4)
root = ElementTree.fromstring(xml_output)
et_out = ElementTree.ElementTree(element=root)
@@ -292,6 +293,7 @@ class ModelLibvirtTestCase(unittest.TestCase):
hostname = root.find('name').text
mac = "00:11:22:33:44:55"
ip = "{0}/{1}".format(node.get('ip'), node.get('netmask'))
+ ip = "{0}/{1}".format(node.get('ip'), netaddr.IPNetwork(ip).prefixlen)
model.StandaloneContextHelper.check_update_key(self.mock_ssh, node, hostname, id_name,
cdrom_img, mac)
mock_gen_cdrom_image.assert_called_once_with(self.mock_ssh, cdrom_img, hostname,
@@ -350,7 +352,8 @@ class ModelLibvirtTestCase(unittest.TestCase):
xml_ref = model.VM_TEMPLATE.format(vm_name='vm_name',
random_uuid=_uuid, mac_addr=mac, memory='1024', vcpu='8', cpu='4',
numa_cpus='0-7', socket='3', threads='2',
- vm_image='qemu_image', cpuset='4,5', cputune='cool')
+ vm_image='qemu_image', cpuset='4,5', cputune='cool',
+ machine='pc-i440fx-xenial')
xml_ref = model.Libvirt.add_cdrom(cdrom_img, xml_ref)
self.assertEqual(xml_out, xml_ref)
mock_get_mac_address.assert_called_once_with(0x00)
diff --git a/yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py b/yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py
index 6cc8b11f3..413bb68b7 100644
--- a/yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py
+++ b/yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py
@@ -24,6 +24,7 @@ from yardstick.benchmark.contexts import base
from yardstick.benchmark.contexts.standalone import model
from yardstick.benchmark.contexts.standalone import ovs_dpdk
from yardstick.common import exceptions
+from yardstick.common import utils as common_utils
from yardstick.network_services import utils
@@ -159,6 +160,13 @@ class OvsDpdkContextTestCase(unittest.TestCase):
}
self.ovs_dpdk.wait_for_vswitchd = 0
self.assertIsNone(self.ovs_dpdk.setup_ovs_bridge_add_flows())
+ self.ovs_dpdk.ovs_properties.update(
+ {'dpdk_pmd-rxq-affinity': {'0': "0:1"}})
+ self.ovs_dpdk.ovs_properties.update(
+ {'vhost_pmd-rxq-affinity': {'0': "0:1"}})
+ self.NETWORKS['private_0'].update({'port_num': '0'})
+ self.NETWORKS['public_0'].update({'port_num': '1'})
+ self.ovs_dpdk.setup_ovs_bridge_add_flows()
@mock.patch("yardstick.ssh.SSH")
def test_cleanup_ovs_dpdk_env(self, mock_ssh):
@@ -171,11 +179,9 @@ class OvsDpdkContextTestCase(unittest.TestCase):
self.ovs_dpdk.wait_for_vswitchd = 0
self.assertIsNone(self.ovs_dpdk.cleanup_ovs_dpdk_env())
- @mock.patch.object(ovs_dpdk.OvsDpdkContext, '_check_hugepages')
@mock.patch.object(utils, 'get_nsb_option')
@mock.patch.object(model.OvsDeploy, 'ovs_deploy')
- def test_check_ovs_dpdk_env(self, mock_ovs_deploy, mock_get_nsb_option,
- mock_check_hugepages):
+ def test_check_ovs_dpdk_env(self, mock_ovs_deploy, mock_get_nsb_option):
self.ovs_dpdk.connection = mock.Mock()
self.ovs_dpdk.connection.execute = mock.Mock(
return_value=(1, 0, 0))
@@ -189,11 +195,9 @@ class OvsDpdkContextTestCase(unittest.TestCase):
self.ovs_dpdk.check_ovs_dpdk_env()
mock_ovs_deploy.assert_called_once()
- mock_check_hugepages.assert_called_once()
mock_get_nsb_option.assert_called_once_with('bin_path')
- @mock.patch.object(ovs_dpdk.OvsDpdkContext, '_check_hugepages')
- def test_check_ovs_dpdk_env_wrong_version(self, mock_check_hugepages):
+ def test_check_ovs_dpdk_env_wrong_version(self):
self.ovs_dpdk.connection = mock.Mock()
self.ovs_dpdk.connection.execute = mock.Mock(
return_value=(1, 0, 0))
@@ -206,7 +210,6 @@ class OvsDpdkContextTestCase(unittest.TestCase):
with self.assertRaises(exceptions.OVSUnsupportedVersion):
self.ovs_dpdk.check_ovs_dpdk_env()
- mock_check_hugepages.assert_called_once()
@mock.patch('yardstick.ssh.SSH')
def test_deploy(self, *args):
@@ -389,15 +392,20 @@ class OvsDpdkContextTestCase(unittest.TestCase):
self.ovs_dpdk._enable_interfaces(0, ["private_0"], 'test')
mock_add_ovs_interface.assert_called_once_with(
'fake_path', 0, self.NETWORKS['private_0']['vpci'],
- self.NETWORKS['private_0']['mac'], 'test')
+ self.NETWORKS['private_0']['mac'], 'test', 1)
+ @mock.patch.object(ovs_dpdk.OvsDpdkContext, '_check_hugepages')
+ @mock.patch.object(common_utils, 'setup_hugepages')
@mock.patch.object(model.StandaloneContextHelper, 'check_update_key')
@mock.patch.object(model.Libvirt, 'write_file')
@mock.patch.object(model.Libvirt, 'build_vm_xml')
@mock.patch.object(model.Libvirt, 'check_if_vm_exists_and_delete')
@mock.patch.object(model.Libvirt, 'virsh_create_vm')
- def test_setup_ovs_dpdk_context(self, mock_create_vm, mock_check_if_exists, mock_build_xml,
- mock_write_file, mock_check_update_key):
+ def test_setup_ovs_dpdk_context(self, mock_create_vm, mock_check_if_exists,
+ mock_build_xml, mock_write_file,
+ mock_check_update_key,
+ mock_setup_hugepages,
+ mock__check_hugepages):
self.ovs_dpdk.vm_deploy = True
self.ovs_dpdk.connection = mock.Mock()
self.ovs_dpdk.vm_names = ['vm-0', 'vm-1']
@@ -413,7 +421,7 @@ class OvsDpdkContextTestCase(unittest.TestCase):
}
self.ovs_dpdk.networks = self.NETWORKS
self.ovs_dpdk.host_mgmt = {}
- self.ovs_dpdk.flavor = {}
+ self.ovs_dpdk.vm_flavor = {'ram': '1024'}
self.ovs_dpdk.file_path = '/var/lib/libvirt/images/cdrom-0.img'
self.ovs_dpdk.configure_nics_for_ovs_dpdk = mock.Mock(return_value="")
self.ovs_dpdk._name_task_id = 'fake_name'
@@ -429,6 +437,9 @@ class OvsDpdkContextTestCase(unittest.TestCase):
self.assertEqual([vnf_instance_2],
self.ovs_dpdk.setup_ovs_dpdk_context())
+ mock_setup_hugepages.assert_called_once_with(self.ovs_dpdk.connection,
+ (1024 + 4096) * 1024) # ram + dpdk_socket0_mem + dpdk_socket1_mem
+ mock__check_hugepages.assert_called_once()
mock_create_vm.assert_called_once_with(
self.ovs_dpdk.connection, '/tmp/vm_ovs_0.xml')
mock_check_if_exists.assert_called_once_with(
diff --git a/yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py b/yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py
index 316aca72a..0809a983a 100644
--- a/yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py
+++ b/yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py
@@ -22,6 +22,7 @@ from yardstick.benchmark import contexts
from yardstick.benchmark.contexts import base
from yardstick.benchmark.contexts.standalone import model
from yardstick.benchmark.contexts.standalone import sriov
+from yardstick.common import utils
class SriovContextTestCase(unittest.TestCase):
@@ -276,13 +277,15 @@ class SriovContextTestCase(unittest.TestCase):
mock_add_sriov.assert_called_once_with(
'0000:00:0a.0', 0, self.NETWORKS['private_0']['mac'], 'test')
+ @mock.patch.object(utils, 'setup_hugepages')
@mock.patch.object(model.StandaloneContextHelper, 'check_update_key')
@mock.patch.object(model.Libvirt, 'build_vm_xml')
@mock.patch.object(model.Libvirt, 'check_if_vm_exists_and_delete')
@mock.patch.object(model.Libvirt, 'write_file')
@mock.patch.object(model.Libvirt, 'virsh_create_vm')
- def test_setup_sriov_context(self, mock_create_vm, mock_write_file, mock_check,
- mock_build_vm_xml, mock_check_update_key):
+ def test_setup_sriov_context(self, mock_create_vm, mock_write_file,
+ mock_check, mock_build_vm_xml,
+ mock_check_update_key, mock_setup_hugepages):
self.sriov.servers = {
'vnf_0': {
'network_ports': {
@@ -295,7 +298,7 @@ class SriovContextTestCase(unittest.TestCase):
connection = mock.Mock()
self.sriov.connection = connection
self.sriov.host_mgmt = {'ip': '1.2.3.4'}
- self.sriov.vm_flavor = 'flavor'
+ self.sriov.vm_flavor = {'ram': '1024'}
self.sriov.networks = 'networks'
self.sriov.configure_nics_for_sriov = mock.Mock()
self.sriov._name_task_id = 'fake_name'
@@ -314,15 +317,16 @@ class SriovContextTestCase(unittest.TestCase):
mock_vnf_node.generate_vnf_instance = mock.Mock(
return_value='node_1')
nodes_out = self.sriov.setup_sriov_context()
+ mock_setup_hugepages.assert_called_once_with(connection, 1024*1024)
mock_check_update_key.assert_called_once_with(connection, 'node_1', vm_name,
self.sriov._name_task_id, cdrom_img,
mac)
self.assertEqual(['node_2'], nodes_out)
mock_vnf_node.generate_vnf_instance.assert_called_once_with(
- 'flavor', 'networks', '1.2.3.4', 'vnf_0',
+ self.sriov.vm_flavor, 'networks', '1.2.3.4', 'vnf_0',
self.sriov.servers['vnf_0'], '00:00:00:00:00:01')
mock_build_vm_xml.assert_called_once_with(
- connection, 'flavor', vm_name, 0, cdrom_img)
+ connection, self.sriov.vm_flavor, vm_name, 0, cdrom_img)
mock_create_vm.assert_called_once_with(connection, cfg)
mock_check.assert_called_once_with(vm_name, connection)
mock_write_file.assert_called_once_with(cfg, 'out_xml')
diff --git a/yardstick/tests/unit/benchmark/core/test_report.py b/yardstick/tests/unit/benchmark/core/test_report.py
index 524302f92..11d017ff0 100644
--- a/yardstick/tests/unit/benchmark/core/test_report.py
+++ b/yardstick/tests/unit/benchmark/core/test_report.py
@@ -1,5 +1,6 @@
##############################################################################
# Copyright (c) 2017 Rajesh Kudaka.
+# Copyright (c) 2018 Intel Corporation.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
@@ -7,30 +8,93 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for yardstick.benchmark.core.report
-
-from __future__ import print_function
-
-from __future__ import absolute_import
-
+import mock
import unittest
import uuid
-try:
- from unittest import mock
-except ImportError:
- import mock
-
+from api.utils import influx
from yardstick.benchmark.core import report
from yardstick.cmd.commands import change_osloobj_to_paras
-FAKE_YAML_NAME = 'fake_name'
-FAKE_TASK_ID = str(uuid.uuid4())
-FAKE_DB_FIELDKEYS = [{'fieldKey': 'fake_key'}]
-FAKE_TIME = '0000-00-00T00:00:00.000000Z'
-FAKE_DB_TASK = [{'fake_key': 0.000, 'time': FAKE_TIME}]
-FAKE_TIMESTAMP = ['fake_time']
-DUMMY_TASK_ID = 'aaaaaa-aaaaaaaa-aaaaaaaaaa-aaaaaa'
+GOOD_YAML_NAME = 'fake_name'
+GOOD_TASK_ID = str(uuid.uuid4())
+GOOD_DB_FIELDKEYS = [{'fieldKey': 'fake_key'}]
+GOOD_DB_TASK = [{
+ 'fake_key': 1.234,
+ 'time': '0000-00-00T12:34:56.789012Z',
+ }]
+GOOD_TIMESTAMP = ['12:34:56.789012']
+BAD_YAML_NAME = 'F@KE_NAME'
+BAD_TASK_ID = 'aaaaaa-aaaaaaaa-aaaaaaaaaa-aaaaaa'
+
+
+class JSTreeTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.jstree = report.JSTree()
+
+ def test__create_node(self):
+ _id = "tg__0.DropPackets"
+
+ expected_data = [
+ {"id": "tg__0", "text": "tg__0", "parent": "#"},
+ {"id": "tg__0.DropPackets", "text": "DropPackets", "parent": "tg__0"}
+ ]
+ self.jstree._create_node(_id)
+
+ self.assertEqual(self.jstree._created_nodes, ['#', 'tg__0', 'tg__0.DropPackets'])
+ self.assertEqual(self.jstree.jstree_data, expected_data)
+
+ def test_format_for_jstree(self):
+ data = [
+ {'data': [0, ], 'label': 'tg__0.DropPackets'},
+ {'data': [548, ], 'label': 'tg__0.LatencyAvg.5'},
+ {'data': [1172, ], 'label': 'tg__0.LatencyAvg.6'},
+ {'data': [1001, ], 'label': 'tg__0.LatencyMax.5'},
+ {'data': [1468, ], 'label': 'tg__0.LatencyMax.6'},
+ {'data': [18.11, ], 'label': 'tg__0.RxThroughput'},
+ {'data': [18.11, ], 'label': 'tg__0.TxThroughput'},
+ {'data': [0, ], 'label': 'tg__1.DropPackets'},
+ {'data': [548, ], 'label': 'tg__1.LatencyAvg.5'},
+ {'data': [1172, ], 'label': 'tg__1.LatencyAvg.6'},
+ {'data': [1001, ], 'label': 'tg__1.LatencyMax.5'},
+ {'data': [1468, ], 'label': 'tg__1.LatencyMax.6'},
+ {'data': [18.1132084505, ], 'label': 'tg__1.RxThroughput'},
+ {'data': [18.1157260383, ], 'label': 'tg__1.TxThroughput'},
+ {'data': [9057888, ], 'label': 'vnf__0.curr_packets_in'},
+ {'data': [0, ], 'label': 'vnf__0.packets_dropped'},
+ {'data': [617825443, ], 'label': 'vnf__0.packets_fwd'},
+ ]
+
+ expected_output = [
+ {"id": "tg__0", "text": "tg__0", "parent": "#"},
+ {"id": "tg__0.DropPackets", "text": "DropPackets", "parent": "tg__0"},
+ {"id": "tg__0.LatencyAvg", "text": "LatencyAvg", "parent": "tg__0"},
+ {"id": "tg__0.LatencyAvg.5", "text": "5", "parent": "tg__0.LatencyAvg"},
+ {"id": "tg__0.LatencyAvg.6", "text": "6", "parent": "tg__0.LatencyAvg"},
+ {"id": "tg__0.LatencyMax", "text": "LatencyMax", "parent": "tg__0"},
+ {"id": "tg__0.LatencyMax.5", "text": "5", "parent": "tg__0.LatencyMax"},
+ {"id": "tg__0.LatencyMax.6", "text": "6", "parent": "tg__0.LatencyMax"},
+ {"id": "tg__0.RxThroughput", "text": "RxThroughput", "parent": "tg__0"},
+ {"id": "tg__0.TxThroughput", "text": "TxThroughput", "parent": "tg__0"},
+ {"id": "tg__1", "text": "tg__1", "parent": "#"},
+ {"id": "tg__1.DropPackets", "text": "DropPackets", "parent": "tg__1"},
+ {"id": "tg__1.LatencyAvg", "text": "LatencyAvg", "parent": "tg__1"},
+ {"id": "tg__1.LatencyAvg.5", "text": "5", "parent": "tg__1.LatencyAvg"},
+ {"id": "tg__1.LatencyAvg.6", "text": "6", "parent": "tg__1.LatencyAvg"},
+ {"id": "tg__1.LatencyMax", "text": "LatencyMax", "parent": "tg__1"},
+ {"id": "tg__1.LatencyMax.5", "text": "5", "parent": "tg__1.LatencyMax"},
+ {"id": "tg__1.LatencyMax.6", "text": "6", "parent": "tg__1.LatencyMax"},
+ {"id": "tg__1.RxThroughput", "text": "RxThroughput", "parent": "tg__1"},
+ {"id": "tg__1.TxThroughput", "text": "TxThroughput", "parent": "tg__1"},
+ {"id": "vnf__0", "text": "vnf__0", "parent": "#"},
+ {"id": "vnf__0.curr_packets_in", "text": "curr_packets_in", "parent": "vnf__0"},
+ {"id": "vnf__0.packets_dropped", "text": "packets_dropped", "parent": "vnf__0"},
+ {"id": "vnf__0.packets_fwd", "text": "packets_fwd", "parent": "vnf__0"},
+ ]
+
+ result = self.jstree.format_for_jstree(data)
+ self.assertEqual(expected_output, result)
class ReportTestCase(unittest.TestCase):
@@ -38,37 +102,81 @@ class ReportTestCase(unittest.TestCase):
def setUp(self):
super(ReportTestCase, self).setUp()
self.param = change_osloobj_to_paras({})
- self.param.yaml_name = [FAKE_YAML_NAME]
- self.param.task_id = [FAKE_TASK_ID]
+ self.param.yaml_name = [GOOD_YAML_NAME]
+ self.param.task_id = [GOOD_TASK_ID]
self.rep = report.Report()
+ def test___init__(self):
+ self.assertEqual([], self.rep.Timestamp)
+ self.assertEqual("", self.rep.yaml_name)
+ self.assertEqual("", self.rep.task_id)
+
+ def test__validate(self):
+ self.rep._validate(GOOD_YAML_NAME, GOOD_TASK_ID)
+ self.assertEqual(GOOD_YAML_NAME, self.rep.yaml_name)
+ self.assertEqual(GOOD_TASK_ID, str(self.rep.task_id))
+
+ def test__validate_invalid_yaml_name(self):
+ with self.assertRaisesRegexp(ValueError, "yaml*"):
+ self.rep._validate(BAD_YAML_NAME, GOOD_TASK_ID)
+
+ def test__validate_invalid_task_id(self):
+ with self.assertRaisesRegexp(ValueError, "task*"):
+ self.rep._validate(GOOD_YAML_NAME, BAD_TASK_ID)
+
+ @mock.patch.object(influx, 'query')
+ def test__get_fieldkeys(self, mock_query):
+ mock_query.return_value = GOOD_DB_FIELDKEYS
+ self.rep.yaml_name = GOOD_YAML_NAME
+ self.rep.task_id = GOOD_TASK_ID
+ self.assertEqual(GOOD_DB_FIELDKEYS, self.rep._get_fieldkeys())
+
+ @mock.patch.object(influx, 'query')
+ def test__get_fieldkeys_nodbclient(self, mock_query):
+ mock_query.side_effect = RuntimeError
+ self.assertRaises(RuntimeError, self.rep._get_fieldkeys)
+
+ @mock.patch.object(influx, 'query')
+ def test__get_fieldkeys_testcase_not_found(self, mock_query):
+ mock_query.return_value = []
+ self.rep.yaml_name = GOOD_YAML_NAME
+ self.rep.task_id = GOOD_TASK_ID
+ self.assertRaisesRegexp(KeyError, "Test case", self.rep._get_fieldkeys)
+
+ @mock.patch.object(influx, 'query')
+ def test__get_tasks(self, mock_query):
+ mock_query.return_value = GOOD_DB_TASK
+ self.rep.yaml_name = GOOD_YAML_NAME
+ self.rep.task_id = GOOD_TASK_ID
+ self.assertEqual(GOOD_DB_TASK, self.rep._get_tasks())
+
+ @mock.patch.object(influx, 'query')
+ def test__get_tasks_task_not_found(self, mock_query):
+ mock_query.return_value = []
+ self.rep.yaml_name = GOOD_YAML_NAME
+ self.rep.task_id = GOOD_TASK_ID
+ self.assertRaisesRegexp(KeyError, "Task ID", self.rep._get_tasks)
+
@mock.patch.object(report.Report, '_get_tasks')
@mock.patch.object(report.Report, '_get_fieldkeys')
@mock.patch.object(report.Report, '_validate')
- def test_generate_success(self, mock_valid, mock_keys, mock_tasks):
- mock_tasks.return_value = FAKE_DB_TASK
- mock_keys.return_value = FAKE_DB_FIELDKEYS
+ def test_generate(self, mock_valid, mock_keys, mock_tasks):
+ mock_tasks.return_value = GOOD_DB_TASK
+ mock_keys.return_value = GOOD_DB_FIELDKEYS
self.rep.generate(self.param)
- mock_valid.assert_called_once_with(FAKE_YAML_NAME, FAKE_TASK_ID)
+ mock_valid.assert_called_once_with(GOOD_YAML_NAME, GOOD_TASK_ID)
mock_tasks.assert_called_once_with()
mock_keys.assert_called_once_with()
+ self.assertEqual(GOOD_TIMESTAMP, self.rep.Timestamp)
- # pylint: disable=deprecated-method
- def test_invalid_yaml_name(self):
- self.assertRaisesRegexp(ValueError, "yaml*", self.rep._validate,
- 'F@KE_NAME', FAKE_TASK_ID)
-
- # pylint: disable=deprecated-method
- def test_invalid_task_id(self):
- self.assertRaisesRegexp(ValueError, "task*", self.rep._validate,
- FAKE_YAML_NAME, DUMMY_TASK_ID)
-
- @mock.patch('api.utils.influx.query')
- def test_task_not_found(self, mock_query):
- mock_query.return_value = []
- self.rep.yaml_name = FAKE_YAML_NAME
- self.rep.task_id = FAKE_TASK_ID
- # pylint: disable=deprecated-method
- self.assertRaisesRegexp(KeyError, "Task ID", self.rep._get_fieldkeys)
- self.assertRaisesRegexp(KeyError, "Task ID", self.rep._get_tasks)
- # pylint: enable=deprecated-method
+ @mock.patch.object(report.Report, '_get_tasks')
+ @mock.patch.object(report.Report, '_get_fieldkeys')
+ @mock.patch.object(report.Report, '_validate')
+ def test_generate_nsb(self, mock_valid, mock_keys, mock_tasks):
+ mock_tasks.return_value = GOOD_DB_TASK
+ mock_keys.return_value = GOOD_DB_FIELDKEYS
+ self.rep.generate_nsb(self.param)
+ mock_valid.assert_called_once_with(GOOD_YAML_NAME, GOOD_TASK_ID)
+ mock_tasks.assert_called_once_with()
+ mock_keys.assert_called_once_with()
+ self.assertEqual(GOOD_TIMESTAMP, self.rep.Timestamp)
diff --git a/yardstick/tests/unit/benchmark/runner/test_iteration.py b/yardstick/tests/unit/benchmark/runner/test_iteration.py
new file mode 100644
index 000000000..783b236f5
--- /dev/null
+++ b/yardstick/tests/unit/benchmark/runner/test_iteration.py
@@ -0,0 +1,45 @@
+##############################################################################
+# Copyright (c) 2018 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import mock
+import unittest
+import multiprocessing
+from yardstick.benchmark.runners import iteration
+from yardstick.common import exceptions as y_exc
+
+
+class IterationRunnerTest(unittest.TestCase):
+ def setUp(self):
+ self.scenario_cfg = {
+ 'runner': {'interval': 0, "duration": 0},
+ 'type': 'some_type'
+ }
+
+ self.benchmark = mock.Mock()
+ self.benchmark_cls = mock.Mock(return_value=self.benchmark)
+
+ def _assert_defaults__worker_run_setup_and_teardown(self):
+ self.benchmark_cls.assert_called_once_with(self.scenario_cfg, {})
+ self.benchmark.setup.assert_called_once()
+
+ def _assert_defaults__worker_run_one_iteration(self):
+ self.benchmark.pre_run_wait_time.assert_called_once_with(0)
+ self.benchmark.my_method.assert_called_once_with({})
+
+ def test__worker_process_broad_exception(self):
+ self.benchmark.my_method = mock.Mock(
+ side_effect=y_exc.YardstickException)
+
+ with self.assertRaises(Exception):
+ iteration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self._assert_defaults__worker_run_one_iteration()
+ self._assert_defaults__worker_run_setup_and_teardown()
diff --git a/yardstick/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py
index 0f68753fd..35455a49c 100644
--- a/yardstick/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py
+++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py
@@ -7,10 +7,6 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for
-# yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal
-
-from __future__ import absolute_import
import mock
import unittest
@@ -18,33 +14,44 @@ from yardstick.benchmark.scenarios.availability.attacker import \
attacker_baremetal
-# pylint: disable=unused-argument
-# disable this for now because I keep forgetting mock patch arg ordering
+class ExecuteShellTestCase(unittest.TestCase):
+ def setUp(self):
+ self._mock_subprocess = mock.patch.object(attacker_baremetal,
+ 'subprocess')
+ self.mock_subprocess = self._mock_subprocess.start()
-@mock.patch('yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal.subprocess')
-class ExecuteShellTestCase(unittest.TestCase):
+ self.addCleanup(self._stop_mocks)
- def test__fun_execute_shell_command_successful(self, mock_subprocess):
- cmd = "env"
- mock_subprocess.check_output.return_value = (0, 'unittest')
- exitcode, _ = attacker_baremetal._execute_shell_command(cmd)
+ def _stop_mocks(self):
+ self._mock_subprocess.stop()
+
+ def test__execute_shell_command_successful(self):
+ self.mock_subprocess.check_output.return_value = (0, 'unittest')
+ exitcode, _ = attacker_baremetal._execute_shell_command("env")
self.assertEqual(exitcode, 0)
- @mock.patch('yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal.LOG')
- def test__fun_execute_shell_command_fail_cmd_exception(self, mock_log, mock_subprocess):
- cmd = "env"
- mock_subprocess.check_output.side_effect = RuntimeError
- exitcode, _ = attacker_baremetal._execute_shell_command(cmd)
+ @mock.patch.object(attacker_baremetal, 'LOG')
+ def test__execute_shell_command_fail_cmd_exception(self, mock_log):
+ self.mock_subprocess.check_output.side_effect = RuntimeError
+ exitcode, _ = attacker_baremetal._execute_shell_command("env")
self.assertEqual(exitcode, -1)
mock_log.error.assert_called_once()
-@mock.patch('yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal.subprocess')
-@mock.patch('yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal.ssh')
class AttackerBaremetalTestCase(unittest.TestCase):
def setUp(self):
+ self._mock_ssh = mock.patch.object(attacker_baremetal, 'ssh')
+ self.mock_ssh = self._mock_ssh.start()
+ self._mock_subprocess = mock.patch.object(attacker_baremetal,
+ 'subprocess')
+ self.mock_subprocess = self._mock_subprocess.start()
+ self.addCleanup(self._stop_mocks)
+
+ self.mock_ssh.SSH.from_node().execute.return_value = (
+ 0, "running", '')
+
host = {
"ipmi_ip": "10.20.0.5",
"ipmi_user": "root",
@@ -59,26 +66,26 @@ class AttackerBaremetalTestCase(unittest.TestCase):
'host': 'node1',
}
- def test__attacker_baremetal_all_successful(self, mock_ssh, mock_subprocess):
- mock_ssh.SSH.from_node().execute.return_value = (0, "running", '')
- ins = attacker_baremetal.BaremetalAttacker(self.attacker_cfg,
- self.context)
+ self.ins = attacker_baremetal.BaremetalAttacker(self.attacker_cfg,
+ self.context)
- ins.setup()
- ins.inject_fault()
- ins.recover()
+ def _stop_mocks(self):
+ self._mock_ssh.stop()
+ self._mock_subprocess.stop()
- def test__attacker_baremetal_check_failuer(self, mock_ssh, mock_subprocess):
- mock_ssh.SSH.from_node().execute.return_value = (0, "error check", '')
- ins = attacker_baremetal.BaremetalAttacker(self.attacker_cfg,
- self.context)
- ins.setup()
+ def test__attacker_baremetal_all_successful(self):
+ self.ins.setup()
+ self.ins.inject_fault()
+ self.ins.recover()
- def test__attacker_baremetal_recover_successful(self, mock_ssh, mock_subprocess):
+ def test__attacker_baremetal_check_failure(self):
+ self.mock_ssh.SSH.from_node().execute.return_value = (
+ 0, "error check", '')
+ self.ins.setup()
+ def test__attacker_baremetal_recover_successful(self):
self.attacker_cfg["jump_host"] = 'node1'
self.context["node1"]["password"] = "123456"
- mock_ssh.SSH.from_node().execute.return_value = (0, "running", '')
ins = attacker_baremetal.BaremetalAttacker(self.attacker_cfg,
self.context)
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
index 6bf2f2c2f..8214782b2 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
@@ -325,6 +325,8 @@ class TestNetworkServiceTestCase(unittest.TestCase):
},
},
'options': {
+ 'simulated_users': {'uplink': [1, 2]},
+ 'page_object': {'uplink': [1, 2]},
'framesize': {'64B': 100}
},
'runner': {
@@ -620,6 +622,20 @@ class TestNetworkServiceTestCase(unittest.TestCase):
with self.assertRaises(IOError):
self.s._get_traffic_profile()
+ def test__key_list_to_dict(self):
+ result = self.s._key_list_to_dict("uplink", {"uplink": [1, 2]})
+ self.assertEqual({"uplink_0": 1, "uplink_1": 2}, result)
+
+ def test__get_simulated_users(self):
+ result = self.s._get_simulated_users()
+ self.assertEqual({'simulated_users': {'uplink_0': 1, 'uplink_1': 2}},
+ result)
+
+ def test__get_page_object(self):
+ result = self.s._get_page_object()
+ self.assertEqual({'page_object': {'uplink_0': 1, 'uplink_1': 2}},
+ result)
+
def test___get_traffic_imix_exception(self):
with mock.patch.dict(self.scenario_cfg["traffic_options"], {'imix': ''}):
self.assertEqual({'imix': {'64B': 100}},
@@ -629,7 +645,7 @@ class TestNetworkServiceTestCase(unittest.TestCase):
@mock.patch.object(vnfdgen, 'generate_vnfd')
def test__fill_traffic_profile(self, mock_generate, mock_tprofile_get):
fake_tprofile = mock.Mock()
- fake_vnfd = mock.Mock()
+ fake_vnfd = mock.MagicMock()
with mock.patch.object(self.s, '_get_traffic_profile',
return_value=fake_tprofile) as mock_get_tp:
mock_generate.return_value = fake_vnfd
@@ -642,10 +658,30 @@ class TestNetworkServiceTestCase(unittest.TestCase):
'flow': {'flow': {}},
'imix': {'imix': {'64B': 100}},
'uplink': {},
- 'duration': 30}
+ 'duration': 30,
+ 'simulated_users': {
+ 'simulated_users': {'uplink_0': 1, 'uplink_1': 2}},
+ 'page_object': {
+ 'page_object': {'uplink_0': 1, 'uplink_1': 2}},}
)
mock_tprofile_get.assert_called_once_with(fake_vnfd)
+ @mock.patch.object(base.TrafficProfile, 'get')
+ @mock.patch.object(vnfdgen, 'generate_vnfd')
+ def test__fill_traffic_profile2(self, mock_generate, mock_tprofile_get):
+ fake_tprofile = mock.Mock()
+ fake_vnfd = {}
+ with mock.patch.object(self.s, '_get_traffic_profile',
+ return_value=fake_tprofile) as mock_get_tp:
+ mock_generate.return_value = fake_vnfd
+
+ self.s.scenario_cfg["options"] = {"traffic_config": {"duration": 99899}}
+ self.s._fill_traffic_profile()
+ mock_get_tp.assert_called_once()
+ self.assertIn("traffic_profile", fake_vnfd)
+ self.assertIn("duration", fake_vnfd["traffic_profile"])
+ self.assertEqual(99899, fake_vnfd["traffic_profile"]["duration"])
+
@mock.patch.object(utils, 'open_relative_file')
def test__get_topology(self, mock_open_path):
self.s.scenario_cfg['topology'] = 'fake_topology'
diff --git a/yardstick/tests/unit/common/test_utils.py b/yardstick/tests/unit/common/test_utils.py
index 3cf6c4d05..c0c928916 100644
--- a/yardstick/tests/unit/common/test_utils.py
+++ b/yardstick/tests/unit/common/test_utils.py
@@ -1407,3 +1407,20 @@ class SafeCaseTestCase(unittest.TestCase):
def test_default_value(self):
self.assertEqual(0, utils.safe_cast('', 'int', 0))
+
+
+class SetupHugepagesTestCase(unittest.TestCase):
+
+ @mock.patch.object(six, 'BytesIO', return_value=six.BytesIO(b'5\n'))
+ @mock.patch.object(utils, 'read_meminfo',
+ return_value={'Hugepagesize': '1024'})
+ def test_setup_hugepages(self, mock_meminfo, *args):
+ ssh = mock.Mock()
+ ssh.execute = mock.Mock()
+ hp_size_kb, hp_number, hp_number_set = utils.setup_hugepages(ssh, 10 * 1024)
+ mock_meminfo.assert_called_once_with(ssh)
+ ssh.execute.assert_called_once_with(
+ 'echo 10 | sudo tee /proc/sys/vm/nr_hugepages')
+ self.assertEqual(hp_size_kb, 1024)
+ self.assertEqual(hp_number, 10)
+ self.assertEqual(hp_number_set, 5)
diff --git a/yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py b/yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py
index 66fed81f1..c1d902061 100644
--- a/yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py
+++ b/yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py
@@ -46,7 +46,8 @@ TRAFFIC_PARAMETERS = {
'dstip': '152.16.40.20',
'srcip': '152.16.100.20',
'dstmask': 24,
- 'srcmask': 24
+ 'srcmask': 24,
+ 'priority': {'raw': '0x01'}
},
'outer_l4': {
'seed': 1,
@@ -78,7 +79,8 @@ TRAFFIC_PARAMETERS = {
'dstip': '2001::10',
'srcip': '2021::10',
'dstmask': 64,
- 'srcmask': 64
+ 'srcmask': 64,
+ 'priority': {'raw': '0x01'}
},
'outer_l4': {
'seed': 1,
@@ -349,6 +351,37 @@ class TestIxNextgen(unittest.TestCase):
self.ixnet_gen.ixnet.setAttribute.assert_any_call(
'attr/singleValue', '-value', 'external')
+ def test_add_interface(self):
+ self.ixnet_gen.ixnet.add.return_value = 'obj'
+ self.ixnet_gen.add_interface(vport='vport',
+ ip='10.0.0.2',
+ mac='00:00:00:00:00:00',
+ gateway='10.0.0.1')
+ self.ixnet_gen.ixnet.add.assert_any_call('vport', 'interface')
+ self.ixnet_gen.ixnet.add.assert_any_call('obj', 'ipv4')
+ self.ixnet_gen.ixnet.setMultiAttribute.assert_any_call(
+ 'obj/ethernet', '-macAddress', '00:00:00:00:00:00')
+ self.ixnet_gen.ixnet.setMultiAttribute.assert_any_call(
+ 'obj', '-ip', '10.0.0.2')
+ self.ixnet_gen.ixnet.setMultiAttribute.assert_any_call(
+ 'obj', '-gateway', '10.0.0.1')
+ self.ixnet_gen.ixnet.setMultiAttribute.assert_any_call(
+ 'obj', '-enabled', 'true')
+
+ def test_add_static_ipv4(self):
+ self.ixnet_gen.ixnet.add.return_value = 'obj'
+ self.ixnet_gen.add_static_ipv4(iface='iface',
+ vport='vport',
+ start_ip='10.0.0.0',
+ count='100')
+ self.ixnet_gen.ixnet.add.assert_called_once_with(
+ 'vport/protocols/static', 'ip')
+ self.ixnet_gen.ixnet.setMultiAttribute.assert_any_call(
+ 'obj', '-protocolInterface', 'iface',
+ '-ipStart', '10.0.0.0',
+ '-count', '100',
+ '-enabled', 'true')
+
@mock.patch.object(IxNetwork, 'IxNet')
def test_connect(self, mock_ixnet):
mock_ixnet.return_value = self.ixnet
@@ -582,15 +615,47 @@ class TestIxNextgen(unittest.TestCase):
self.ixnet_gen.update_frame(TRAFFIC_PARAMETERS, 40)
def test_get_statistics(self):
- port_statistics = '::ixNet::OBJ-/statistics/view:"Port Statistics"'
- flow_statistics = '::ixNet::OBJ-/statistics/view:"Flow Statistics"'
with mock.patch.object(self.ixnet_gen, '_build_stats_map') as \
mock_build_stats:
self.ixnet_gen.get_statistics()
mock_build_stats.assert_has_calls([
- mock.call(port_statistics, self.ixnet_gen.PORT_STATS_NAME_MAP),
- mock.call(flow_statistics, self.ixnet_gen.LATENCY_NAME_MAP)])
+ mock.call(self.ixnet_gen.PORT_STATISTICS,
+ self.ixnet_gen.PORT_STATS_NAME_MAP),
+ mock.call(self.ixnet_gen.FLOW_STATISTICS,
+ self.ixnet_gen.LATENCY_NAME_MAP)])
+
+ def test__set_flow_tracking(self):
+ self.ixnet_gen._ixnet.getList.return_value = ['traffic_item']
+ self.ixnet_gen._set_flow_tracking(track_by=['vlanVlanId0'])
+ self.ixnet_gen.ixnet.setAttribute.assert_called_once_with(
+ 'traffic_item/tracking', '-trackBy', ['vlanVlanId0'])
+ self.assertEqual(self.ixnet.commit.call_count, 1)
+
+ def test__set_egress_flow_tracking(self):
+ self.ixnet_gen._ixnet.getList.side_effect = [['traffic_item'],
+ ['encapsulation']]
+ self.ixnet_gen._set_egress_flow_tracking(encapsulation='Ethernet',
+ offset='IPv4 TOS Precedence')
+ self.ixnet_gen.ixnet.setAttribute.assert_any_call(
+ 'traffic_item', '-egressEnabled', True)
+ self.ixnet_gen.ixnet.setAttribute.assert_any_call(
+ 'encapsulation', '-encapsulation', 'Ethernet')
+ self.ixnet_gen.ixnet.setAttribute.assert_any_call(
+ 'encapsulation', '-offset', 'IPv4 TOS Precedence')
+ self.assertEqual(self.ixnet.commit.call_count, 2)
+
+ def test_get_pppoe_scenario_statistics(self):
+ with mock.patch.object(self.ixnet_gen, '_build_stats_map') as \
+ mock_build_stats:
+ self.ixnet_gen.get_pppoe_scenario_statistics()
+
+ mock_build_stats.assert_any_call(self.ixnet_gen.PORT_STATISTICS,
+ self.ixnet_gen.PORT_STATS_NAME_MAP)
+ mock_build_stats.assert_any_call(self.ixnet_gen.FLOW_STATISTICS,
+ self.ixnet_gen.LATENCY_NAME_MAP)
+ mock_build_stats.assert_any_call(self.ixnet_gen.PPPOX_CLIENT_PER_PORT,
+ self.ixnet_gen.PPPOX_CLIENT_PER_PORT_NAME_MAP)
def test__update_ipv4_address(self):
with mock.patch.object(self.ixnet_gen, '_get_field_in_stack_item',
@@ -602,6 +667,78 @@ class TestIxNextgen(unittest.TestCase):
'-randomMask', '0.0.0.63', '-valueType', 'random',
'-countValue', 25)
+ def test__update_ipv4_priority_raw(self):
+ priority = {'raw': '0x01'}
+ self.ixnet_gen._set_priority_field = mock.Mock()
+ with mock.patch.object(self.ixnet_gen, '_get_field_in_stack_item',
+ return_value='field_desc'):
+ self.ixnet_gen._update_ipv4_priority('field_desc', priority)
+
+ self.ixnet_gen._set_priority_field.assert_called_once_with(
+ 'field_desc', priority['raw'])
+
+ def test__update_ipv4_priority_dscp(self):
+ priority = {'dscp': {'defaultPHB': [0, 1, 2, 3]}}
+ self.ixnet_gen._set_priority_field = mock.Mock()
+ with mock.patch.object(self.ixnet_gen, '_get_field_in_stack_item',
+ return_value='field_desc'):
+ self.ixnet_gen._update_ipv4_priority('field_desc', priority)
+
+ self.ixnet_gen._set_priority_field.assert_called_once_with(
+ 'field_desc', priority['dscp']['defaultPHB'])
+
+ def test__update_ipv4_priority_tos(self):
+ priority = {'tos': {'precedence': [0, 4, 7]}}
+ self.ixnet_gen._set_priority_field = mock.Mock()
+ with mock.patch.object(self.ixnet_gen, '_get_field_in_stack_item',
+ return_value='field_desc'):
+ self.ixnet_gen._update_ipv4_priority('field_desc', priority)
+
+ self.ixnet_gen._set_priority_field.assert_called_once_with(
+ 'field_desc', priority['tos']['precedence'])
+
+ def test__update_ipv4_priority_wrong_priority_type(self):
+ priority = {'test': [0, 4, 7]}
+ self.ixnet_gen._set_priority_field = mock.Mock()
+ with mock.patch.object(self.ixnet_gen, '_get_field_in_stack_item',
+ return_value='field_desc'):
+ self.ixnet_gen._update_ipv4_priority('field_desc', priority)
+
+ self.ixnet_gen._set_priority_field.assert_not_called()
+
+ def test__update_ipv4_priority_not_supported_dscp_class(self):
+ priority = {'dscp': {'testPHB': [0, 4, 7]}}
+ self.ixnet_gen._set_priority_field = mock.Mock()
+ self.ixnet_gen._get_field_in_stack_item = mock.Mock()
+ self.ixnet_gen._update_ipv4_priority('field_desc', priority)
+ self.ixnet_gen._set_priority_field.assert_not_called()
+ self.ixnet_gen._get_field_in_stack_item.assert_not_called()
+
+ def test__update_ipv4_priority_not_supported_tos_field(self):
+ priority = {'tos': {'test': [0, 4, 7]}}
+ self.ixnet_gen._set_priority_field = mock.Mock()
+ self.ixnet_gen._get_field_in_stack_item = mock.Mock()
+ self.ixnet_gen._update_ipv4_priority('field_desc', priority)
+ self.ixnet_gen._set_priority_field.assert_not_called()
+ self.ixnet_gen._get_field_in_stack_item.assert_not_called()
+
+ def test__set_priority_field_list_value(self):
+ value = [1, 4, 7]
+ self.ixnet_gen._set_priority_field('field_desc', value)
+ self.ixnet_gen.ixnet.setMultiAttribute.assert_called_once_with(
+ 'field_desc',
+ '-valueList', [1, 4, 7],
+ '-activeFieldChoice', 'true',
+ '-valueType', 'valueList')
+
+ def test__set_priority_field_single_value(self):
+ value = 7
+ self.ixnet_gen._set_priority_field('field_desc', value)
+ self.ixnet_gen.ixnet.setMultiAttribute.assert_called_once_with(
+ 'field_desc',
+ '-activeFieldChoice', 'true',
+ '-singleValue', '7')
+
def test__update_udp_port(self):
with mock.patch.object(self.ixnet_gen, '_get_field_in_stack_item',
return_value='field_desc'):
@@ -622,10 +759,13 @@ class TestIxNextgen(unittest.TestCase):
mock_update_add, \
mock.patch.object(self.ixnet_gen, '_get_stack_item'), \
mock.patch.object(self.ixnet_gen,
- '_get_config_element_by_flow_group_name', return_value='celm'):
+ '_get_config_element_by_flow_group_name', return_value='celm'), \
+ mock.patch.object(self.ixnet_gen, '_update_ipv4_priority') as \
+ mock_update_priority:
self.ixnet_gen.update_ip_packet(TRAFFIC_PARAMETERS)
self.assertEqual(4, len(mock_update_add.mock_calls))
+ self.assertEqual(2, len(mock_update_priority.mock_calls))
def test_update_ip_packet_exception_no_config_element(self):
with mock.patch.object(self.ixnet_gen,
@@ -717,6 +857,24 @@ class TestIxNextgen(unittest.TestCase):
self.ixnet.getList.assert_called_once()
self.assertEqual(3, self.ixnet_gen._ixnet.execute.call_count)
+ def test__get_protocol_status(self):
+ self.ixnet.getAttribute.return_value = ['up']
+ self.ixnet_gen._get_protocol_status('ipv4')
+ self.ixnet.getAttribute.assert_called_once_with('ipv4',
+ '-sessionStatus')
+
+ @mock.patch.object(ixnet_api.IxNextgen, '_get_protocol_status')
+ def test_is_protocols_running(self, mock_ixnextgen_get_protocol_status):
+ mock_ixnextgen_get_protocol_status.return_value = ['up', 'up']
+ result = self.ixnet_gen.is_protocols_running(['ethernet', 'ipv4'])
+ self.assertTrue(result)
+
+ @mock.patch.object(ixnet_api.IxNextgen, '_get_protocol_status')
+ def test_is_protocols_stopped(self, mock_ixnextgen_get_protocol_status):
+ mock_ixnextgen_get_protocol_status.return_value = ['down', 'down']
+ result = self.ixnet_gen.is_protocols_running(['ethernet', 'ipv4'])
+ self.assertFalse(result)
+
def test_start_protocols(self):
self.ixnet_gen.start_protocols()
self.ixnet.execute.assert_called_once_with('startAllProtocols')
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_base.py b/yardstick/tests/unit/network_services/traffic_profile/test_base.py
index 0dc3e0579..d9244e31b 100644
--- a/yardstick/tests/unit/network_services/traffic_profile/test_base.py
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_base.py
@@ -95,18 +95,18 @@ class TrafficProfileConfigTestCase(unittest.TestCase):
def test__parse_rate(self):
tp_config = {'traffic_profile': {'packet_sizes': {'64B': 100}}}
tp_config_obj = base.TrafficProfileConfig(tp_config)
- self.assertEqual((100.0, 'fps'), tp_config_obj._parse_rate('100 '))
- self.assertEqual((200.5, 'fps'), tp_config_obj._parse_rate('200.5'))
- self.assertEqual((300.8, 'fps'), tp_config_obj._parse_rate('300.8fps'))
+ self.assertEqual((100.0, 'fps'), tp_config_obj.parse_rate('100 '))
+ self.assertEqual((200.5, 'fps'), tp_config_obj.parse_rate('200.5'))
+ self.assertEqual((300.8, 'fps'), tp_config_obj.parse_rate('300.8fps'))
self.assertEqual((400.2, 'fps'),
- tp_config_obj._parse_rate('400.2 fps'))
- self.assertEqual((500.3, '%'), tp_config_obj._parse_rate('500.3%'))
- self.assertEqual((600.1, '%'), tp_config_obj._parse_rate('600.1 %'))
+ tp_config_obj.parse_rate('400.2 fps'))
+ self.assertEqual((500.3, '%'), tp_config_obj.parse_rate('500.3%'))
+ self.assertEqual((600.1, '%'), tp_config_obj.parse_rate('600.1 %'))
def test__parse_rate_exception(self):
tp_config = {'traffic_profile': {'packet_sizes': {'64B': 100}}}
tp_config_obj = base.TrafficProfileConfig(tp_config)
with self.assertRaises(exceptions.TrafficProfileRate):
- tp_config_obj._parse_rate('100Fps')
+ tp_config_obj.parse_rate('100Fps')
with self.assertRaises(exceptions.TrafficProfileRate):
- tp_config_obj._parse_rate('100 kbps')
+ tp_config_obj.parse_rate('100 kbps')
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_http_ixload.py b/yardstick/tests/unit/network_services/traffic_profile/test_http_ixload.py
index 1adab48bc..c9be200b2 100644
--- a/yardstick/tests/unit/network_services/traffic_profile/test_http_ixload.py
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_http_ixload.py
@@ -249,16 +249,20 @@ class TestIxLoadTrafficGen(unittest.TestCase):
ixload = http_ixload.IXLOADHttpTest(
jsonutils.dump_as_bytes(self.test_input))
- ixload.links_param = {"uplink_0": {"ip": {}}}
+ ixload.links_param = {"uplink_0": {"ip": {},
+ "http_client": {}}}
ixload.test = mock.Mock()
ixload.test.communityList = community_list
ixload.update_network_param = mock.Mock()
+ ixload.update_http_client_param = mock.Mock()
ixload.update_config()
ixload.update_network_param.assert_called_once_with(net_taraffic_0, {})
+ ixload.update_http_client_param.assert_called_once_with(net_taraffic_0,
+ {})
def test_update_network_mac_address(self):
ethernet = mock.MagicMock()
@@ -338,6 +342,57 @@ class TestIxLoadTrafficGen(unittest.TestCase):
net_traffic,
"mac")
+ def test_update_http_client_param(self):
+ net_traffic = mock.Mock()
+
+ ixload = http_ixload.IXLOADHttpTest(
+ jsonutils.dump_as_bytes(self.test_input))
+
+ ixload.update_page_size = mock.Mock()
+ ixload.update_user_count = mock.Mock()
+
+ param = {"page_object": "page_object",
+ "simulated_users": "simulated_users"}
+
+ ixload.update_http_client_param(net_traffic, param)
+
+ ixload.update_page_size.assert_called_once_with(net_traffic,
+ "page_object")
+ ixload.update_user_count.assert_called_once_with(net_traffic,
+ "simulated_users")
+
+ def test_update_page_size(self):
+ activity = mock.MagicMock()
+ net_traffic = mock.Mock()
+
+ ixload = http_ixload.IXLOADHttpTest(
+ jsonutils.dump_as_bytes(self.test_input))
+
+ net_traffic.activityList = [activity]
+ ix_http_command = activity.agent.actionList[0]
+ ixload.update_page_size(net_traffic, "page_object")
+ ix_http_command.config.assert_called_once_with(
+ pageObject="page_object")
+
+ net_traffic.activityList = []
+ with self.assertRaises(exceptions.InvalidRxfFile):
+ ixload.update_page_size(net_traffic, "page_object")
+
+ def test_update_user_count(self):
+ activity = mock.MagicMock()
+ net_traffic = mock.Mock()
+
+ ixload = http_ixload.IXLOADHttpTest(
+ jsonutils.dump_as_bytes(self.test_input))
+
+ net_traffic.activityList = [activity]
+ ixload.update_user_count(net_traffic, 123)
+ activity.config.assert_called_once_with(userObjectiveValue=123)
+
+ net_traffic.activityList = []
+ with self.assertRaises(exceptions.InvalidRxfFile):
+ ixload.update_user_count(net_traffic, 123)
+
@mock.patch('yardstick.network_services.traffic_profile.http_ixload.IxLoad')
@mock.patch('yardstick.network_services.traffic_profile.http_ixload.StatCollectorUtils')
def test_start_http_test(self, *args):
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py b/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py
index 0759ecebd..ef16676c7 100644
--- a/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py
@@ -16,6 +16,7 @@ import copy
import mock
import unittest
+import collections
from yardstick.network_services.traffic_profile import ixia_rfc2544
from yardstick.network_services.traffic_profile import trex_traffic_profile
@@ -511,9 +512,7 @@ class TestIXIARFC2544Profile(unittest.TestCase):
with mock.patch.object(rfc2544_profile, '_get_ixia_traffic_profile') \
as mock_get_tp, \
mock.patch.object(rfc2544_profile, '_ixia_traffic_generate') \
- as mock_tgenerate, \
- mock.patch.object(rfc2544_profile, 'update_traffic_profile') \
- as mock_update_tp:
+ as mock_tgenerate:
mock_get_tp.return_value = 'fake_tprofile'
output = rfc2544_profile.execute_traffic(mock.ANY,
ixia_obj=mock.ANY)
@@ -524,7 +523,6 @@ class TestIXIARFC2544Profile(unittest.TestCase):
self.assertEqual(0, rfc2544_profile.min_rate)
mock_get_tp.assert_called_once()
mock_tgenerate.assert_called_once()
- mock_update_tp.assert_called_once()
def test_execute_traffic_not_first_run(self):
rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE)
@@ -586,7 +584,8 @@ class TestIXIARFC2544Profile(unittest.TestCase):
'Store-Forward_Max_latency_ns': 28}
}
rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE)
- completed, samples = rfc2544_profile.get_drop_percentage(samples, 0, 1)
+ completed, samples = rfc2544_profile.get_drop_percentage(
+ samples, 0, 1, 4)
self.assertTrue(completed)
self.assertEqual(66.9, samples['TxThroughput'])
self.assertEqual(66.833, samples['RxThroughput'])
@@ -610,7 +609,7 @@ class TestIXIARFC2544Profile(unittest.TestCase):
rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE)
rfc2544_profile.rate = 1000
completed, samples = rfc2544_profile.get_drop_percentage(
- samples, 0, 0.05)
+ samples, 0, 0.05, 4)
self.assertFalse(completed)
self.assertEqual(66.9, samples['TxThroughput'])
self.assertEqual(66.833, samples['RxThroughput'])
@@ -632,7 +631,7 @@ class TestIXIARFC2544Profile(unittest.TestCase):
rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE)
rfc2544_profile.rate = 1000
completed, samples = rfc2544_profile.get_drop_percentage(
- samples, 0.2, 1)
+ samples, 0.2, 1, 4)
self.assertFalse(completed)
self.assertEqual(66.9, samples['TxThroughput'])
self.assertEqual(66.833, samples['RxThroughput'])
@@ -655,7 +654,7 @@ class TestIXIARFC2544Profile(unittest.TestCase):
rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE)
rfc2544_profile.rate = 1000
completed, samples = rfc2544_profile.get_drop_percentage(
- samples, 0.2, 1)
+ samples, 0.2, 1, 4)
self.assertFalse(completed)
self.assertEqual(0.0, samples['TxThroughput'])
self.assertEqual(66.833, samples['RxThroughput'])
@@ -676,9 +675,43 @@ class TestIXIARFC2544Profile(unittest.TestCase):
}
rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE)
completed, samples = rfc2544_profile.get_drop_percentage(
- samples, 0, 1, first_run=True)
+ samples, 0, 1, 4, first_run=True)
self.assertTrue(completed)
self.assertEqual(66.9, samples['TxThroughput'])
self.assertEqual(66.833, samples['RxThroughput'])
self.assertEqual(0.099651, samples['DropPercentage'])
self.assertEqual(33.45, rfc2544_profile.rate)
+
+
+class TestIXIARFC2544PppoeScenarioProfile(unittest.TestCase):
+
+ TRAFFIC_PROFILE = {
+ "schema": "nsb:traffic_profile:0.1",
+ "name": "fixed",
+ "description": "Fixed traffic profile to run UDP traffic",
+ "traffic_profile": {
+ "traffic_type": "FixedTraffic",
+ "frame_rate": 100},
+ 'uplink_0': {'ipv4': {'port': 'xe0', 'id': 1}},
+ 'downlink_0': {'ipv4': {'port': 'xe2', 'id': 2}},
+ 'uplink_1': {'ipv4': {'port': 'xe1', 'id': 3}},
+ 'downlink_1': {'ipv4': {'port': 'xe2', 'id': 4}}
+ }
+
+ def setUp(self):
+ self.ixia_tp = ixia_rfc2544.IXIARFC2544PppoeScenarioProfile(
+ self.TRAFFIC_PROFILE)
+
+ def test___init__(self):
+ self.assertIsInstance(self.ixia_tp.full_profile,
+ collections.OrderedDict)
+
+ def test__get_flow_groups_params(self):
+ expected_tp = collections.OrderedDict([
+ ('uplink_0', {'ipv4': {'id': 1, 'port': 'xe0'}}),
+ ('downlink_0', {'ipv4': {'id': 2, 'port': 'xe2'}}),
+ ('uplink_1', {'ipv4': {'id': 3, 'port': 'xe1'}}),
+ ('downlink_1', {'ipv4': {'id': 4, 'port': 'xe2'}})])
+
+ self.ixia_tp._get_flow_groups_params()
+ self.assertDictEqual(self.ixia_tp.full_profile, expected_tp)
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_prox_binsearch.py b/yardstick/tests/unit/network_services/traffic_profile/test_prox_binsearch.py
index c09903377..f17656328 100644
--- a/yardstick/tests/unit/network_services/traffic_profile/test_prox_binsearch.py
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_prox_binsearch.py
@@ -21,6 +21,8 @@ from yardstick.network_services.traffic_profile import prox_binsearch
class TestProxBinSearchProfile(unittest.TestCase):
+ THEOR_MAX_THROUGHPUT = 0.00012340000000000002
+
def setUp(self):
self._mock_log_info = mock.patch.object(prox_binsearch.LOG, 'info')
self.mock_log_info = self._mock_log_info.start()
@@ -38,6 +40,12 @@ class TestProxBinSearchProfile(unittest.TestCase):
return fail_tuple, {}
return success_tuple, {}
+ def side_effect_func(arg1, arg2):
+ if arg1 == "confirmation":
+ return arg2
+ else:
+ return {}
+
tp_config = {
'traffic_profile': {
'packet_sizes': [200],
@@ -51,11 +59,13 @@ class TestProxBinSearchProfile(unittest.TestCase):
fail_tuple = ProxTestDataTuple(10.0, 1, 2, 3, 4, [5.6, 5.7, 5.8], 850, 1000, 123.4)
traffic_generator = mock.MagicMock()
- attrs1 = {'get.return_value' : 10}
+ attrs1 = {'get.return_value': 10}
traffic_generator.scenario_helper.all_options.configure_mock(**attrs1)
- attrs2 = {'__getitem__.return_value' : 10, 'get.return_value': 10}
+ attrs2 = {'__getitem__.return_value': 10, 'get.return_value': 10}
+ attrs3 = {'get.side_effect': side_effect_func}
traffic_generator.scenario_helper.scenario_cfg["runner"].configure_mock(**attrs2)
+ traffic_generator.scenario_helper.scenario_cfg["options"].configure_mock(**attrs3)
profile_helper = mock.MagicMock()
profile_helper.run_test = target
@@ -68,11 +78,11 @@ class TestProxBinSearchProfile(unittest.TestCase):
self.assertEqual(round(profile.current_lower, 2), 74.69)
self.assertEqual(round(profile.current_upper, 2), 76.09)
- self.assertEqual(len(runs), 77)
+ self.assertEqual(len(runs), 7)
# Result Samples inc theor_max
result_tuple = {'Actual_throughput': 5e-07,
- 'theor_max_throughput': 7.5e-07,
+ 'theor_max_throughput': self.THEOR_MAX_THROUGHPUT,
'PktSize': 200,
'Status': 'Result'}
@@ -88,7 +98,7 @@ class TestProxBinSearchProfile(unittest.TestCase):
"PktSize": 200,
"RxThroughput": 7.5e-07,
"Throughput": 7.5e-07,
- "TxThroughput": 0.00012340000000000002,
+ "TxThroughput": self.THEOR_MAX_THROUGHPUT,
"Status": 'Success'}
calls = profile.queue.put(success_result_tuple)
@@ -121,6 +131,12 @@ class TestProxBinSearchProfile(unittest.TestCase):
return fail_tuple, {}
return success_tuple, {}
+ def side_effect_func(arg1, _):
+ if arg1 == "confirmation":
+ return 2
+ else:
+ return {}
+
tp_config = {
'traffic_profile': {
'packet_sizes': [200],
@@ -138,7 +154,10 @@ class TestProxBinSearchProfile(unittest.TestCase):
traffic_generator.scenario_helper.all_options.configure_mock(**attrs1)
attrs2 = {'__getitem__.return_value': 0, 'get.return_value': 0}
+ attrs3 = {'get.side_effect': side_effect_func}
+
traffic_generator.scenario_helper.scenario_cfg["runner"].configure_mock(**attrs2)
+ traffic_generator.scenario_helper.scenario_cfg["options"].configure_mock(**attrs3)
profile_helper = mock.MagicMock()
profile_helper.run_test = target
@@ -150,7 +169,7 @@ class TestProxBinSearchProfile(unittest.TestCase):
profile.execute_traffic(traffic_generator)
self.assertEqual(round(profile.current_lower, 2), 24.06)
self.assertEqual(round(profile.current_upper, 2), 25.47)
- self.assertEqual(len(runs), 7)
+ self.assertEqual(len(runs), 21)
def test_execute_3(self):
def target(*args, **_):
@@ -186,8 +205,6 @@ class TestProxBinSearchProfile(unittest.TestCase):
profile.lower_bound = 99.0
profile.execute_traffic(traffic_generator)
-
- # Result Samples
result_tuple = {'Actual_throughput': 0, 'theor_max_throughput': 0,
"Status": 'Result', "Next_Step": ''}
profile.queue.put.assert_called_with(result_tuple)
@@ -207,6 +224,7 @@ class TestProxBinSearchProfile(unittest.TestCase):
raise RuntimeError(' '.join([str(args), str(runs)]))
if args[2] > 75.0:
return fail_tuple, {}
+
return success_tuple, {}
tp_config = {
@@ -226,6 +244,7 @@ class TestProxBinSearchProfile(unittest.TestCase):
traffic_generator.scenario_helper.all_options.configure_mock(**attrs1)
attrs2 = {'__getitem__.return_value': 0, 'get.return_value': 0}
+
traffic_generator.scenario_helper.scenario_cfg["runner"].configure_mock(**attrs2)
profile_helper = mock.MagicMock()
@@ -242,7 +261,7 @@ class TestProxBinSearchProfile(unittest.TestCase):
# Result Samples inc theor_max
result_tuple = {'Actual_throughput': 5e-07,
- 'theor_max_throughput': 7.5e-07,
+ 'theor_max_throughput': self.THEOR_MAX_THROUGHPUT,
'PktSize': 200,
"Status": 'Result'}
@@ -258,7 +277,7 @@ class TestProxBinSearchProfile(unittest.TestCase):
"PktSize": 200,
"RxThroughput": 7.5e-07,
"Throughput": 7.5e-07,
- "TxThroughput": 0.00012340000000000002,
+ "TxThroughput": self.THEOR_MAX_THROUGHPUT,
"Status": 'Success'}
calls = profile.queue.put(success_result_tuple)
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_prox_irq.py b/yardstick/tests/unit/network_services/traffic_profile/test_prox_irq.py
new file mode 100644
index 000000000..59f37befa
--- /dev/null
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_prox_irq.py
@@ -0,0 +1,55 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+import mock
+
+from yardstick.network_services.traffic_profile import prox_irq
+
+
+class TestProxIrqProfile(unittest.TestCase):
+
+ def setUp(self):
+ self._mock_log_info = mock.patch.object(prox_irq.LOG, 'info')
+ self.mock_log_info = self._mock_log_info.start()
+ self.addCleanup(self._stop_mocks)
+
+ def _stop_mocks(self):
+ self._mock_log_info.stop()
+
+ def test_execute_1(self):
+ tp_config = {
+ 'traffic_profile': {
+ },
+ }
+
+ traffic_generator = mock.MagicMock()
+ attrs1 = {'get.return_value' : 10}
+ traffic_generator.scenario_helper.all_options.configure_mock(**attrs1)
+
+ attrs2 = {'__getitem__.return_value' : 10, 'get.return_value': 10}
+ traffic_generator.scenario_helper.scenario_cfg["runner"].configure_mock(**attrs2)
+
+ profile_helper = mock.MagicMock()
+
+ profile = prox_irq.ProxIrqProfile(tp_config)
+ profile.init(mock.MagicMock())
+ profile._profile_helper = profile_helper
+
+ profile.execute_traffic(traffic_generator)
+ profile.run_test()
+ is_ended_flag = profile.is_ended()
+
+ self.assertFalse(is_ended_flag)
+ self.assertEqual(profile.lower_bound, 10.0)
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_helpers.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_helpers.py
index 3d6ebb25b..31f08da3e 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_helpers.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_helpers.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2016-2017 Intel Corporation
+# Copyright (c) 2016-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -320,7 +320,8 @@ class TestProxSocketHelper(unittest.TestCase):
self.assertEqual(len(prox._pkt_dumps), 0)
mock_select.select.reset_mock()
- mock_select.select.side_effect = chain([['a'], ['']], repeat([1], 3))
+ mock_select.select.side_effect = chain([['a'], ['']],
+ repeat([1], 3))
mock_recv.decode.return_value = PACKET_DUMP_1
ret = prox.get_data()
self.assertEqual(mock_select.select.call_count, 2)
@@ -328,13 +329,54 @@ class TestProxSocketHelper(unittest.TestCase):
self.assertEqual(len(prox._pkt_dumps), 1)
mock_select.select.reset_mock()
- mock_select.select.side_effect = chain([[object()], [None]], repeat([1], 3))
+ mock_select.select.side_effect = chain([[object()], [None]],
+ repeat([1], 3))
mock_recv.decode.return_value = PACKET_DUMP_2
ret = prox.get_data()
self.assertEqual(mock_select.select.call_count, 1)
self.assertEqual(ret, 'jumped over')
self.assertEqual(len(prox._pkt_dumps), 3)
+ @mock.patch.object(prox_helpers, 'select')
+ def test_get_string(self, mock_select):
+ mock_select.select.side_effect = [[1], [0]]
+ mock_socket = mock.MagicMock()
+ mock_recv = mock_socket.recv()
+ mock_recv.decode.return_value = ""
+ prox = prox_helpers.ProxSocketHelper(mock_socket)
+ status, ret = prox.get_string()
+ self.assertEqual(ret, "")
+ self.assertTrue(status)
+ self.assertEqual(len(prox._pkt_dumps), 0)
+
+ @mock.patch.object(prox_helpers, 'select')
+ def test_get_string2(self, mock_select):
+ mock_select.select.side_effect = chain([['a'], ['']],
+ repeat([1], 3))
+ mock_socket = mock.MagicMock()
+ mock_recv = mock_socket.recv()
+ mock_recv.decode.return_value = PACKET_DUMP_1
+ prox = prox_helpers.ProxSocketHelper(mock_socket)
+ status, ret = prox.get_string()
+ self.assertEqual(mock_select.select.call_count, 2)
+ self.assertEqual(ret, 'pktdump,3,11')
+ self.assertTrue(status)
+ self.assertEqual(len(prox._pkt_dumps), 1)
+
+ @mock.patch.object(prox_helpers, 'select')
+ def test_get_string3(self, mock_select):
+ mock_select.select.side_effect = chain([[object()], [None]],
+ repeat([1], 3))
+ mock_socket = mock.MagicMock()
+ mock_recv = mock_socket.recv()
+ mock_recv.decode.return_value = PACKET_DUMP_2
+ prox = prox_helpers.ProxSocketHelper(mock_socket)
+ status, ret = prox.get_string()
+ self.assertTrue(status)
+ self.assertTrue(mock_select.select.assert_called_once)
+ self.assertEqual(ret, 'jumped over')
+ self.assertEqual(len(prox._pkt_dumps), 2)
+
def test__parse_socket_data_mixed_data(self):
prox = prox_helpers.ProxSocketHelper(mock.MagicMock())
ret, _ = prox._parse_socket_data(PACKET_DUMP_NON_1, False)
@@ -548,29 +590,160 @@ class TestProxSocketHelper(unittest.TestCase):
self.assertEqual(result, expected)
@mock.patch.object(prox_helpers.LOG, 'error')
+ def test_irq_core_stats(self, *args):
+ mock_socket = mock.MagicMock()
+ prox = prox_helpers.ProxSocketHelper(mock_socket)
+ prox.get_data = mock.MagicMock(return_value=('0,1,2,3,4,5,0,1,2,3,4,5,0,1,2,3'))
+
+ data_0 = {"cpu": 0, 'bucket_0': 1, 'bucket_1': 2, 'bucket_2': 3, 'bucket_3': 4,
+ 'bucket_4': 5, 'bucket_5': 0, 'bucket_6': 1, 'bucket_7': 2, 'bucket_8': 3,
+ 'bucket_9': 4, 'bucket_10': 5, 'bucket_11': 0, 'bucket_12': 1,
+ "max_irq": 0, "overflow": 10}
+
+ data_1 = {"cpu": 1, 'bucket_0': 1, 'bucket_1': 2, 'bucket_2': 3, 'bucket_3': 4,
+ 'bucket_4': 5, 'bucket_5': 0, 'bucket_6': 1, 'bucket_7': 2, 'bucket_8': 3,
+ 'bucket_9': 4, 'bucket_10': 5, 'bucket_11': 0, 'bucket_12': 1,
+ "max_irq": 0, "overflow": 10}
+
+ expected = {"core_0": data_0, "core_1": data_1}
+
+ result = prox.irq_core_stats([[0, 1], [1, 0]])
+ self.assertDictEqual(result, expected)
+
+ @mock.patch.object(prox_helpers.LOG, 'error')
def test_multi_port_stats(self, *args):
mock_socket = mock.MagicMock()
prox = prox_helpers.ProxSocketHelper(mock_socket)
- prox.get_data = mock.MagicMock(return_value='0,1,2,3,4,5;1,1,2,3,4,5')
+ prox.get_string = mock.MagicMock(return_value=(True, '0,1,2,3,4,5;1,1,2,3,4,5'))
expected = [[0, 1, 2, 3, 4, 5], [1, 1, 2, 3, 4, 5]]
- result = prox.multi_port_stats([0, 1])
+ status, result = prox.multi_port_stats([0, 1])
self.assertEqual(result, expected)
-
- prox.get_data = mock.MagicMock(return_value='0,1,2,3,4,5;1,1,2,3,4,5')
- result = prox.multi_port_stats([0])
- expected = [0]
+ self.assertEqual(status, True)
+
+ prox.get_string = mock.MagicMock(
+ return_value=(True, '0,1,2,3,4,5;1,1,2,3,4,5'))
+ status, result = prox.multi_port_stats([0])
+ self.assertEqual(status, False)
+
+ prox.get_string = mock.MagicMock(
+ return_value=(True, '0,1,2,3,4,5;1,1,2,3,4,5'))
+ status, result = prox.multi_port_stats([0, 1, 2])
+ self.assertEqual(status, False)
+
+ prox.get_string = mock.MagicMock(
+ return_value=(True, '0,1,2,3;1,1,2,3,4,5'))
+ status, result = prox.multi_port_stats([0, 1])
+ self.assertEqual(status, False)
+
+ prox.get_string = mock.MagicMock(
+ return_value=(True, '99,1,2,3,4,5;1,1,2,3,4,5'))
+ status, result = prox.multi_port_stats([0, 1])
+ self.assertEqual(status, False)
+
+ prox.get_string = mock.MagicMock(
+ return_value=(True, '99,1,2,3,4,5;1,1,2,3,4,5'))
+ status, result = prox.multi_port_stats([99, 1])
+ expected = [[99, 1, 2, 3, 4, 5], [1, 1, 2, 3, 4, 5]]
+ self.assertEqual(status, True)
self.assertEqual(result, expected)
- prox.get_data = mock.MagicMock(return_value='0,1,2,3;1,1,2,3,4,5')
- result = prox.multi_port_stats([0, 1])
- expected = [0] * 2
- self.assertEqual(result, expected)
+ prox.get_string = mock.MagicMock(
+ return_value=(True,
+ '2,21,22,23,24,25;1,11,12,13,14,15;0,1,2,3,4,5'))
+
+ sample1 = [0, 1, 2, 3, 4, 5]
+ sample2 = [1, 11, 12, 13, 14, 15]
+ sample3 = [2, 21, 22, 23, 24, 25]
+ expected = [sample3, sample2, sample1]
+ status, result = prox.multi_port_stats([1, 2, 0])
+ self.assertTrue(status)
+ self.assertListEqual(result, expected)
+
+ prox.get_string = mock.MagicMock(
+ return_value=(True, '6,21,22,23,24,25;1,11,12,13,14,15;0,1,2,3,4,5'))
+ ok, result = prox.multi_port_stats([1, 6, 0])
+ sample1 = [6, 21, 22, 23, 24, 25]
+ sample2 = [1, 11, 12, 13, 14, 15]
+ sample3 = [0, 1, 2, 3, 4, 5]
+ expected = [sample1, sample2, sample3]
+ self.assertListEqual(result, expected)
+ self.assertTrue(ok)
- prox.get_data = mock.MagicMock(return_value='99,1,2,3,4,5;1,1,2,3,4,5')
- expected = [0] * 2
- result = prox.multi_port_stats([0, 1])
- self.assertEqual(result, expected)
+ @mock.patch.object(prox_helpers.LOG, 'error')
+ def test_multi_port_stats_diff(self, *args):
+ mock_socket = mock.MagicMock()
+ prox = prox_helpers.ProxSocketHelper(mock_socket)
+ prox.get_string = mock.MagicMock(return_value=(True, '0,1,2,3,4,5;1,1,2,3,4,5'))
+ _, t1 = prox.multi_port_stats([0, 1])
+
+ prox.get_string = mock.MagicMock(return_value=(True, '0,2,4,6,8,6;1,4,8,16,32,6'))
+ _, t2 = prox.multi_port_stats([0, 1])
+
+ prox.get_string = mock.MagicMock(return_value=(True, '0,1,1,1,1,1;1,1,1,1,1,1'))
+ _, t3 = prox.multi_port_stats([0, 1])
+
+ prox.get_string = mock.MagicMock(return_value=(True, '0,2,2,2,2,2;1,2,2,2,2,2'))
+ _, t4 = prox.multi_port_stats([0, 1])
+
+ expected = [[0, 1.0, 2.0, 0, 0, 1], [1, 3.0, 6.0, 0, 0, 1]]
+ result = prox.multi_port_stats_diff(t1, t2, 1)
+
+ self.assertListEqual(result, expected)
+
+ result = prox.multi_port_stats_diff(t4, t3, 1)
+ expected = [[0, 1.0, 1.0, 0, 0, 1], [1, 1.0, 1.0, 0, 0, 1]]
+
+ self.assertListEqual(result, expected)
+ prox.get_string = mock.MagicMock(return_value=(True, '0,2,4,6,8,10'))
+ ok, t5 = prox.multi_port_stats([0, 1])
+ self.assertFalse(ok)
+ self.assertListEqual(t5, [])
+
+ result = prox.multi_port_stats_diff(t5, t4, 1)
+ expected = [[0, 0.0, 0.0, 0, 0, 0], [1, 0.0, 0.0, 0, 0, 0]]
+ self.assertListEqual(result, expected)
+
+ prox.get_string = mock.MagicMock(return_value=(True, '0,10,10,20,30,0;1,30,40,50,60,0'))
+ _, t6 = prox.multi_port_stats([0, 1])
+
+ prox.get_string = \
+ mock.MagicMock(return_value=(True, '0,100,100,100,100,0;1,100,100,100,100,0'))
+ _, t7 = prox.multi_port_stats([0, 1])
+
+ result = prox.multi_port_stats_diff(t6, t7, 1)
+ expected = [[0, 0.0, 0.0, 0, 0, 0], [1, 0.0, 0.0, 0, 0, 0]]
+ self.assertListEqual(result, expected)
+
+ result = prox.multi_port_stats_diff(t1, t2, 0)
+ expected = [[0, 0.0, 0.0, 0, 0, 1], [1, 0.0, 0.0, 0, 0, 1]]
+ self.assertListEqual(result, expected)
+
+ @mock.patch.object(prox_helpers.LOG, 'error')
+ def test_multi_port_stats_tuple(self, *args):
+ mock_socket = mock.MagicMock()
+ prox = prox_helpers.ProxSocketHelper(mock_socket)
+ prox.get_string = mock.MagicMock(return_value=(True, '0,1,2,3,4,5;1,1,2,3,4,5'))
+ _, result1 = prox.multi_port_stats([0, 1])
+ prox.get_string = mock.MagicMock(return_value=(True, '0,2,4,6,8,6;1,4,8,16,32,6'))
+ _, result2 = prox.multi_port_stats([0, 1])
+
+ result = prox.multi_port_stats_diff(result1, result2, 1)
+
+ vnfd_helper = mock.MagicMock()
+ vnfd_helper.ports_iter.return_value = [('xe0', 0), ('xe1', 1)]
+
+ expected = {'xe0': {'in_packets': 1.0, 'out_packets': 2.0},
+ 'xe1': {'in_packets': 3.0, 'out_packets': 6.0}}
+ live_stats = prox.multi_port_stats_tuple(result, vnfd_helper.ports_iter())
+ self.assertDictEqual(live_stats, expected)
+
+ live_stats = prox.multi_port_stats_tuple(result, None)
+ expected = {}
+ self.assertDictEqual(live_stats, expected)
+
+ live_stats = prox.multi_port_stats_tuple(None, vnfd_helper.ports_iter())
+ self.assertDictEqual(live_stats, expected)
def test_port_stats(self):
port_stats = [
@@ -1492,8 +1665,52 @@ class TestProxResourceHelper(unittest.TestCase):
helper = prox_helpers.ProxResourceHelper(mock.MagicMock())
helper._queue = queue = mock.MagicMock()
helper._result = {'z': 123}
+
+ helper.client = mock.MagicMock()
+ helper.client.hz.return_value = 1
+ helper.client.multi_port_stats.return_value = \
+ (True, [[0, 1, 2, 3, 4, 5], [1, 1, 2, 3, 4, 5]])
+ helper.client.multi_port_stats_diff.return_value = \
+ ([0, 1, 2, 3, 4, 5, 6, 7])
+ helper.client.multi_port_stats_tuple.return_value = \
+ {"xe0": {"in_packets": 1, "out_packets": 2}}
+ helper.resource = resource = mock.MagicMock()
+
+ vnfd_helper = mock.MagicMock()
+ vnfd_helper.ports_iter.return_value = [('xe0', 0), ('xe1', 1)]
+ helper.vnfd_helper = vnfd_helper
+
+ resource.check_if_system_agent_running.return_value = 0, '1234'
+ resource.amqp_collect_nfvi_kpi.return_value = 543
+ resource.check_if_system_agent_running.return_value = (0, None)
+
+ queue.empty.return_value = False
+ queue.get.return_value = {'a': 789}
+
+ expected = {'z': 123, 'a': 789,
+ 'collect_stats': {'core': 543},
+ 'live_stats': {'xe0': {'in_packets': 1, 'out_packets': 2}}}
+ result = helper.collect_kpi()
+ self.assertDictEqual(result, expected)
+
+ def test_collect_kpi_no_hz(self):
+ helper = prox_helpers.ProxResourceHelper(mock.MagicMock())
+ helper._queue = queue = mock.MagicMock()
+ helper._result = {'z': 123}
+
+ helper.client = mock.MagicMock()
+ helper.client.multi_port_stats.return_value = \
+ (True, [[0, 1, 2, 3, 4, 5], [1, 1, 2, 3, 4, 5]])
+ helper.client.multi_port_stats_diff.return_value = \
+ ([0, 1, 2, 3, 4, 5, 6, 7])
+ helper.client.multi_port_stats_tuple.return_value = \
+ {"xe0": {"in_packets": 1, "out_packets": 2}}
helper.resource = resource = mock.MagicMock()
+ vnfd_helper = mock.MagicMock()
+ vnfd_helper.ports_iter.return_value = [('xe0', 0), ('xe1', 1)]
+ helper.vnfd_helper = vnfd_helper
+
resource.check_if_system_agent_running.return_value = 0, '1234'
resource.amqp_collect_nfvi_kpi.return_value = 543
resource.check_if_system_agent_running.return_value = (0, None)
@@ -1501,7 +1718,39 @@ class TestProxResourceHelper(unittest.TestCase):
queue.empty.return_value = False
queue.get.return_value = {'a': 789}
- expected = {'z': 123, 'a': 789, 'collect_stats': {'core': 543}}
+ expected = {'z': 123, 'a': 789,
+ 'collect_stats': {'core': 543},
+ 'live_stats': {'xe0': {'in_packets': 1, 'out_packets': 2}}}
+ result = helper.collect_kpi()
+ self.assertDictEqual(result, expected)
+
+ def test_collect_kpi_bad_data(self):
+ helper = prox_helpers.ProxResourceHelper(mock.MagicMock())
+ helper._queue = queue = mock.MagicMock()
+ helper._result = {'z': 123}
+
+ helper.client = mock.MagicMock()
+ helper.client.multi_port_stats.return_value = \
+ (False, [[0, 1, 2, 3, 4, 5], [1, 1, 2, 3, 4, 5]])
+ helper.client.multi_port_stats_diff.return_value = \
+ ([0, 1, 2, 3, 4, 5, 6, 7])
+ helper.client.multi_port_stats_tuple.return_value = \
+ {"xe0": {"in_packets": 1, "out_packets": 2}}
+ helper.resource = resource = mock.MagicMock()
+
+ vnfd_helper = mock.MagicMock()
+ vnfd_helper.ports_iter.return_value = [('xe0', 0), ('xe1', 1)]
+ helper.vnfd_helper = vnfd_helper
+
+ resource.check_if_system_agent_running.return_value = 0, '1234'
+ resource.amqp_collect_nfvi_kpi.return_value = 543
+ resource.check_if_system_agent_running.return_value = (0, None)
+
+ queue.empty.return_value = False
+ queue.get.return_value = {'a': 789}
+
+ expected = {'z': 123, 'a': 789,
+ 'collect_stats': {'core': 543}}
result = helper.collect_kpi()
self.assertDictEqual(result, expected)
@@ -1584,8 +1833,9 @@ class TestProxDataHelper(unittest.TestCase):
vnfd_helper.port_pairs.all_ports = list(range(4))
sut = mock.MagicMock()
- sut.multi_port_stats.return_value = [[0, 1, 2, 3, 4, 5], [1, 1, 2, 3, 4, 5],
- [2, 1, 2, 3, 4, 5], [3, 1, 2, 3, 4, 5]]
+ sut.multi_port_stats.return_value = (True,
+ [[0, 1, 2, 3, 4, 5], [1, 1, 2, 3, 4, 5],
+ [2, 1, 2, 3, 4, 5], [3, 1, 2, 3, 4, 5]])
data_helper = prox_helpers.ProxDataHelper(
vnfd_helper, sut, pkt_size, 25, None,
@@ -1593,14 +1843,77 @@ class TestProxDataHelper(unittest.TestCase):
self.assertEqual(data_helper.rx_total, 4)
self.assertEqual(data_helper.tx_total, 8)
- self.assertEqual(data_helper.requested_pps, 6.25e6)
+ self.assertEqual(data_helper.requested_pps, 6250000.0)
+
+ vnfd_helper = mock.MagicMock()
+ vnfd_helper.port_pairs.all_ports = [3, 4]
+
+ sut = mock.MagicMock()
+ sut.multi_port_stats.return_value = (True,
+ [[3, 1, 2, 3, 4, 5], [4, 1, 2, 3, 4, 5]])
+
+ data_helper = prox_helpers.ProxDataHelper(
+ vnfd_helper, sut, pkt_size, 25, None,
+ constants.NIC_GBPS_DEFAULT * constants.ONE_GIGABIT_IN_BITS)
+
+ self.assertEqual(data_helper.rx_total, 2)
+ self.assertEqual(data_helper.tx_total, 4)
+ self.assertEqual(data_helper.requested_pps, 3125000.0)
+
+ vnfd_helper = mock.MagicMock()
+ vnfd_helper.port_pairs.all_ports = [0, 1, 2, 3, 4, 6, 7]
+
+ sut = mock.MagicMock()
+ sut.multi_port_stats.return_value = (True,
+ [[8, 1, 2, 3, 4, 5], [9, 1, 2, 3, 4, 5]])
+
+ data_helper = prox_helpers.ProxDataHelper(
+ vnfd_helper, sut, pkt_size, 25, None,
+ constants.NIC_GBPS_DEFAULT * constants.ONE_GIGABIT_IN_BITS)
+
+ self.assertEqual(data_helper.rx_total, 2)
+ self.assertEqual(data_helper.tx_total, 4)
+ self.assertEqual(data_helper.requested_pps, 10937500.0)
+
+ vnfd_helper = mock.MagicMock()
+ vnfd_helper.port_pairs.all_ports = []
+
+ sut = mock.MagicMock()
+ sut.multi_port_stats.return_value = (True,
+ [[8, 1, 2, 3, 4, 5], [9, 1, 2, 3, 4, 5]])
+
+ data_helper = prox_helpers.ProxDataHelper(
+ vnfd_helper, sut, pkt_size, 25, None,
+ constants.NIC_GBPS_DEFAULT * constants.ONE_GIGABIT_IN_BITS)
+
+ self.assertEqual(data_helper.rx_total, 2)
+ self.assertEqual(data_helper.tx_total, 4)
+ self.assertEqual(data_helper.requested_pps, 0.0)
+
+ def test_totals_and_pps2(self):
+ pkt_size = 180
+ vnfd_helper = mock.MagicMock()
+ vnfd_helper.port_pairs.all_ports = list(range(4))
+
+ sut = mock.MagicMock()
+ sut.multi_port_stats.return_value = (True,
+ [[0, 'A', 2, 3, 4, 5], [1, 'B', 'C', 3, 4, 5],
+ ['D', 1, 2, 3, 4, 5], [3, 1, 2, 3, 4, 'F']])
+
+ data_helper = prox_helpers.ProxDataHelper(
+ vnfd_helper, sut, pkt_size, 25, None,
+ constants.NIC_GBPS_DEFAULT * constants.ONE_GIGABIT_IN_BITS)
+
+ self.assertEqual(data_helper.rx_total, 0)
+ self.assertEqual(data_helper.tx_total, 0)
+ self.assertEqual(data_helper.requested_pps, 0)
def test_samples(self):
vnfd_helper = mock.MagicMock()
vnfd_helper.ports_iter.return_value = [('xe0', 0), ('xe1', 1)]
sut = mock.MagicMock()
- sut.multi_port_stats.return_value = [[0, 1, 2, 3, 4, 5], [1, 11, 12, 3, 4, 5]]
+ sut.multi_port_stats.return_value = (True, [[0, 1, 2, 3, 4, 5], [1, 11, 12, 3, 4, 5]])
data_helper = prox_helpers.ProxDataHelper(
vnfd_helper, sut, None, None, None, None)
@@ -1618,13 +1931,35 @@ class TestProxDataHelper(unittest.TestCase):
result = data_helper.samples
self.assertDictEqual(result, expected)
+ def test_samples2(self):
+ vnfd_helper = mock.MagicMock()
+ vnfd_helper.ports_iter.return_value = [('xe1', 3), ('xe2', 7)]
+
+ sut = mock.MagicMock()
+ sut.multi_port_stats.return_value = (True, [[3, 1, 2, 3, 4, 5], [7, 11, 12, 3, 4, 5]])
+
+ data_helper = prox_helpers.ProxDataHelper(
+ vnfd_helper, sut, None, None, None, None)
+
+ expected = {
+ 'xe1': {
+ 'in_packets': 1,
+ 'out_packets': 2,
+ },
+ 'xe2': {
+ 'in_packets': 11,
+ 'out_packets': 12,
+ },
+ }
+ result = data_helper.samples
+ self.assertDictEqual(result, expected)
+
def test___enter__(self):
vnfd_helper = mock.MagicMock()
vnfd_helper.port_pairs.all_ports = list(range(4))
vnfd_helper.ports_iter.return_value = [('xe1', 3), ('xe2', 7)]
sut = mock.MagicMock()
- sut.port_stats.return_value = list(range(10))
data_helper = prox_helpers.ProxDataHelper(vnfd_helper, sut, None, None,
5.4, constants.NIC_GBPS_DEFAULT * constants.ONE_GIGABIT_IN_BITS)
@@ -1978,7 +2313,6 @@ class TestProxProfileHelper(unittest.TestCase):
client = mock.MagicMock()
client.hz.return_value = 2
- client.port_stats.return_value = tuple(range(12))
helper.client = client
helper.get_latency = mock.MagicMock(return_value=[3.3, 3.6, 3.8])
@@ -1988,18 +2322,20 @@ class TestProxProfileHelper(unittest.TestCase):
with helper.traffic_context(64, 1):
pass
- @mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.time')
- def test_run_test(self, _):
+ def test_run_test(self, *args):
resource_helper = mock.MagicMock()
resource_helper.step_delta = 0.4
resource_helper.vnfd_helper.port_pairs.all_ports = list(range(2))
- resource_helper.sut.port_stats.return_value = list(range(10))
+ resource_helper.sut.multi_port_stats.return_value = (True, [[0, 1, 1, 2, 4, 5],
+ [1, 1, 2, 3, 4, 5]])
helper = prox_helpers.ProxProfileHelper(resource_helper)
- helper.run_test(120, 5, 6.5,
- constants.NIC_GBPS_DEFAULT * constants.ONE_GIGABIT_IN_BITS)
-
+ helper.run_test(pkt_size=120, duration=5, value=6.5, tolerated_loss=0.0,
+ line_speed=constants.NIC_GBPS_DEFAULT * constants.ONE_GIGABIT_IN_BITS)
+ self.assertTrue(resource_helper.sut.multi_port_stats.called)
+ self.assertTrue(resource_helper.sut.stop_all.called)
+ self.assertTrue(resource_helper.sut.reset_stats.called)
class TestProxMplsProfileHelper(unittest.TestCase):
@@ -2135,22 +2471,30 @@ class TestProxBngProfileHelper(unittest.TestCase):
self.assertEqual(helper.arp_task_cores, expected_arp_task)
self.assertEqual(helper._cores_tuple, expected_combined)
- @mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.time')
- def test_run_test(self, _):
+ def test_run_test(self, *args):
resource_helper = mock.MagicMock()
resource_helper.step_delta = 0.4
resource_helper.vnfd_helper.port_pairs.all_ports = list(range(2))
- resource_helper.sut.port_stats.return_value = list(range(10))
+ resource_helper.sut.multi_port_stats.return_value = (True, [[0, 1, 1, 2, 4, 5],
+ [1, 1, 2, 3, 4, 5]])
helper = prox_helpers.ProxBngProfileHelper(resource_helper)
- helper.run_test(120, 5, 6.5,
- constants.NIC_GBPS_DEFAULT * constants.ONE_GIGABIT_IN_BITS)
+ helper.run_test(pkt_size=120, duration=5, value=6.5, tolerated_loss=0.0,
+ line_speed=constants.NIC_GBPS_DEFAULT * constants.ONE_GIGABIT_IN_BITS)
+ self.assertTrue(resource_helper.sut.multi_port_stats.called)
+ self.assertTrue(resource_helper.sut.stop_all.called)
+ self.assertTrue(resource_helper.sut.reset_stats.called)
+
+ resource_helper.reset_mock()
# negative pkt_size is the only way to make ratio > 1
- helper.run_test(-1000, 5, 6.5,
- constants.NIC_GBPS_DEFAULT * constants.ONE_GIGABIT_IN_BITS)
+ helper.run_test(pkt_size=-1000, duration=5, value=6.5, tolerated_loss=0.0,
+ line_speed=constants.NIC_GBPS_DEFAULT * constants.ONE_GIGABIT_IN_BITS)
+ self.assertTrue(resource_helper.sut.multi_port_stats.called)
+ self.assertTrue(resource_helper.sut.stop_all.called)
+ self.assertTrue(resource_helper.sut.reset_stats.called)
class TestProxVpeProfileHelper(unittest.TestCase):
@@ -2253,18 +2597,21 @@ class TestProxVpeProfileHelper(unittest.TestCase):
self.assertEqual(helper.inet_ports, expected_inet)
self.assertEqual(helper._ports_tuple, expected_combined)
- @mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.time')
- def test_run_test(self, _):
+ def test_run_test(self, *args):
resource_helper = mock.MagicMock()
resource_helper.step_delta = 0.4
resource_helper.vnfd_helper.port_pairs.all_ports = list(range(2))
- resource_helper.sut.port_stats.return_value = list(range(10))
+ resource_helper.sut.multi_port_stats.return_value = (True, [[0, 1, 1, 2, 4, 5],
+ [1, 1, 2, 3, 4, 5]])
helper = prox_helpers.ProxVpeProfileHelper(resource_helper)
- helper.run_test(120, 5, 6.5)
- helper.run_test(-1000, 5, 6.5) # negative pkt_size is the only way to make ratio > 1
+ helper.run_test(pkt_size=120, duration=5, value=6.5, tolerated_loss=0.0,
+ line_speed=constants.NIC_GBPS_DEFAULT * constants.ONE_GIGABIT_IN_BITS)
+ # negative pkt_size is the only way to make ratio > 1
+ helper.run_test(pkt_size=-1000, duration=5, value=6.5, tolerated_loss=0.0,
+ line_speed=constants.NIC_GBPS_DEFAULT * constants.ONE_GIGABIT_IN_BITS)
class TestProxlwAFTRProfileHelper(unittest.TestCase):
@@ -2367,14 +2714,30 @@ class TestProxlwAFTRProfileHelper(unittest.TestCase):
self.assertEqual(helper.inet_ports, expected_inet)
self.assertEqual(helper._ports_tuple, expected_combined)
- @mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.time')
- def test_run_test(self, _):
+ def test_run_test(self, *args):
resource_helper = mock.MagicMock()
resource_helper.step_delta = 0.4
resource_helper.vnfd_helper.port_pairs.all_ports = list(range(2))
- resource_helper.sut.port_stats.return_value = list(range(10))
+ resource_helper.sut.multi_port_stats.return_value = (True, [[0, 1, 2, 4, 6, 5],
+ [1, 1, 2, 3, 4, 5]])
helper = prox_helpers.ProxlwAFTRProfileHelper(resource_helper)
- helper.run_test(120, 5, 6.5)
- helper.run_test(-1000, 5, 6.5) # negative pkt_size is the only way to make ratio > 1
+ helper.run_test(pkt_size=120, duration=5, value=6.5, tolerated_loss=0.0,
+ line_speed=constants.NIC_GBPS_DEFAULT * constants.ONE_GIGABIT_IN_BITS)
+
+ # negative pkt_size is the only way to make ratio > 1
+ helper.run_test(pkt_size=-1000, duration=5, value=6.5, tolerated_loss=0.0,
+ line_speed=constants.NIC_GBPS_DEFAULT * constants.ONE_GIGABIT_IN_BITS)
+
+
+class TestProxIrqProfileHelper(unittest.TestCase):
+
+ def test_run_test(self, *args):
+ resource_helper = mock.MagicMock()
+ helper = prox_helpers.ProxIrqProfileHelper(resource_helper)
+ self.assertIsNone(helper._cores_tuple)
+ self.assertIsNone(helper._ports_tuple)
+ self.assertIsNone(helper._latency_cores)
+ self.assertIsNone(helper._test_cores)
+ self.assertIsNone(helper._cpu_topology)
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_irq.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_irq.py
new file mode 100644
index 000000000..4eaa38c27
--- /dev/null
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_irq.py
@@ -0,0 +1,828 @@
+# Copyright (c) 2017-2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+import mock
+import errno
+
+from yardstick.tests import STL_MOCKS
+from yardstick.common import exceptions as y_exceptions
+from yardstick.network_services.vnf_generic.vnf.prox_irq import ProxIrqGen
+from yardstick.network_services.vnf_generic.vnf.prox_irq import ProxIrqVNF
+from yardstick.benchmark.contexts import base as ctx_base
+
+SSH_HELPER = 'yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper'
+
+STLClient = mock.MagicMock()
+stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
+stl_patch.start()
+
+if stl_patch:
+ from yardstick.network_services.vnf_generic.vnf import prox_vnf
+ from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh
+
+VNF_NAME = "vnf__1"
+
+class TestProxIrqVNF(unittest.TestCase):
+
+ SCENARIO_CFG = {
+ 'task_path': "",
+ 'nodes': {
+ 'tg__1': 'trafficgen_1.yardstick',
+ 'vnf__1': 'vnf.yardstick'},
+ 'runner': {
+ 'duration': 600, 'type': 'Duration'},
+ 'topology': 'prox-tg-topology-2.yaml',
+ 'traffic_profile': '../../traffic_profiles/prox_binsearch.yaml',
+ 'type': 'NSPerf',
+ 'options': {
+ 'tg__1': {'prox_args': {'-e': '',
+ '-t': ''},
+ 'prox_config': 'configs/l3-gen-2.cfg',
+ 'prox_path':
+ '/root/dppd-PROX-v035/build/prox'},
+ 'vnf__1': {
+ 'prox_args': {'-t': ''},
+ 'prox_config': 'configs/l3-swap-2.cfg',
+ 'prox_path': '/root/dppd-PROX-v035/build/prox'}}}
+
+ VNFD_0 = {
+ 'short-name': 'VpeVnf',
+ 'vdu': [
+ {
+ 'routing_table': [
+ {
+ 'network': '152.16.100.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.100.20',
+ 'if': 'xe0'
+ },
+ {
+ 'network': '152.16.40.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.40.20',
+ 'if': 'xe1'
+ },
+ ],
+ 'description': 'VPE approximation using DPDK',
+ 'name': 'vpevnf-baremetal',
+ 'nd_route_tbl': [
+ {
+ 'network': '0064:ff9b:0:0:0:0:9810:6414',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:6414',
+ 'if': 'xe0'
+ },
+ {
+ 'network': '0064:ff9b:0:0:0:0:9810:2814',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:2814',
+ 'if': 'xe1'
+ },
+ ],
+ 'id': 'vpevnf-baremetal',
+ 'external-interface': [
+ {
+ 'virtual-interface': {
+ 'dst_mac': '00:00:00:00:00:03',
+ 'vpci': '0000:05:00.0',
+ 'local_ip': '152.16.100.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 0,
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '152.16.100.20',
+ 'local_mac': '00:00:00:00:00:01'
+ },
+ 'vnfd-connection-point-ref': 'xe0',
+ 'name': 'xe0'
+ },
+ {
+ 'virtual-interface': {
+ 'dst_mac': '00:00:00:00:00:04',
+ 'vpci': '0000:05:00.1',
+ 'local_ip': '152.16.40.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 1,
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '152.16.40.20',
+ 'local_mac': '00:00:00:00:00:02'
+ },
+ 'vnfd-connection-point-ref': 'xe1',
+ 'name': 'xe1'
+ },
+ ],
+ },
+ ],
+ 'description': 'Vpe approximation using DPDK',
+ 'mgmt-interface': {
+ 'vdu-id': 'vpevnf-baremetal',
+ 'host': '1.1.1.1',
+ 'password': 'r00t',
+ 'user': 'root',
+ 'ip': '1.1.1.1'
+ },
+ 'benchmark': {
+ 'kpi': [
+ 'packets_in',
+ 'packets_fwd',
+ 'packets_dropped',
+ ],
+ },
+ 'connection-point': [
+ {
+ 'type': 'VPORT',
+ 'name': 'xe0',
+ },
+ {
+ 'type': 'VPORT',
+ 'name': 'xe1',
+ },
+ ],
+ 'id': 'VpeApproxVnf', 'name': 'VPEVnfSsh'
+ }
+
+ VNFD = {
+ 'vnfd:vnfd-catalog': {
+ 'vnfd': [
+ VNFD_0,
+ ]
+ }
+ }
+
+ TRAFFIC_PROFILE = {
+ "schema": "isb:traffic_profile:0.1",
+ "name": "fixed",
+ "description": "Fixed traffic profile to run UDP traffic",
+ "traffic_profile": {
+ "traffic_type": "FixedTraffic",
+ "frame_rate": 100, # pps
+ "flow_number": 10,
+ "frame_size": 64,
+ },
+ }
+
+ CONTEXT_CFG = {
+ 'nodes': {
+ 'tg__2': {
+ 'member-vnf-index': '3',
+ 'role': 'TrafficGen',
+ 'name': 'trafficgen_2.yardstick',
+ 'vnfd-id-ref': 'tg__2',
+ 'ip': '1.2.1.1',
+ 'interfaces': {
+ 'xe0': {
+ 'local_iface_name': 'ens513f0',
+ 'vld_id': prox_vnf.ProxApproxVnf.DOWNLINK,
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.40.20',
+ 'dst_mac': '00:00:00:00:00:01',
+ 'local_mac': '00:00:00:00:00:03',
+ 'dst_ip': '152.16.40.19',
+ 'driver': 'ixgbe',
+ 'vpci': '0000:02:00.0',
+ 'dpdk_port_num': 0,
+ },
+ 'xe1': {
+ 'local_iface_name': 'ens513f1',
+ 'netmask': '255.255.255.0',
+ 'network': '202.16.100.0',
+ 'local_ip': '202.16.100.20',
+ 'local_mac': '00:1e:67:d0:60:5d',
+ 'driver': 'ixgbe',
+ 'vpci': '0000:02:00.1',
+ 'dpdk_port_num': 1,
+ },
+ },
+ 'password': 'r00t',
+ 'VNF model': 'l3fwd_vnf.yaml',
+ 'user': 'root',
+ },
+ 'tg__1': {
+ 'member-vnf-index': '1',
+ 'role': 'TrafficGen',
+ 'name': 'trafficgen_1.yardstick',
+ 'vnfd-id-ref': 'tg__1',
+ 'ip': '1.2.1.1',
+ 'interfaces': {
+ 'xe0': {
+ 'local_iface_name': 'ens785f0',
+ 'vld_id': prox_vnf.ProxApproxVnf.UPLINK,
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.100.20',
+ 'dst_mac': '00:00:00:00:00:02',
+ 'local_mac': '00:00:00:00:00:04',
+ 'dst_ip': '152.16.100.19',
+ 'driver': 'i40e',
+ 'vpci': '0000:05:00.0',
+ 'dpdk_port_num': 0,
+ },
+ 'xe1': {
+ 'local_iface_name': 'ens785f1',
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.100.21',
+ 'local_mac': '00:00:00:00:00:01',
+ 'driver': 'i40e',
+ 'vpci': '0000:05:00.1',
+ 'dpdk_port_num': 1,
+ },
+ },
+ 'password': 'r00t',
+ 'VNF model': 'tg_rfc2544_tpl.yaml',
+ 'user': 'root',
+ },
+ 'vnf__1': {
+ 'name': 'vnf.yardstick',
+ 'vnfd-id-ref': 'vnf__1',
+ 'ip': '1.2.1.1',
+ 'interfaces': {
+ 'xe0': {
+ 'local_iface_name': 'ens786f0',
+ 'vld_id': prox_vnf.ProxApproxVnf.UPLINK,
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.100.19',
+ 'dst_mac': '00:00:00:00:00:04',
+ 'local_mac': '00:00:00:00:00:02',
+ 'dst_ip': '152.16.100.20',
+ 'driver': 'i40e',
+ 'vpci': '0000:05:00.0',
+ 'dpdk_port_num': 0,
+ },
+ 'xe1': {
+ 'local_iface_name': 'ens786f1',
+ 'vld_id': prox_vnf.ProxApproxVnf.DOWNLINK,
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.40.19',
+ 'dst_mac': '00:00:00:00:00:03',
+ 'local_mac': '00:00:00:00:00:01',
+ 'dst_ip': '152.16.40.20',
+ 'driver': 'i40e',
+ 'vpci': '0000:05:00.1',
+ 'dpdk_port_num': 1,
+ },
+ },
+ 'routing_table': [
+ {
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.100.20',
+ 'network': '152.16.100.20',
+ 'if': 'xe0',
+ },
+ {
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.40.20',
+ 'network': '152.16.40.20',
+ 'if': 'xe1',
+ },
+ ],
+ 'member-vnf-index': '2',
+ 'host': '1.2.1.1',
+ 'role': 'vnf',
+ 'user': 'root',
+ 'nd_route_tbl': [
+ {
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:6414',
+ 'network': '0064:ff9b:0:0:0:0:9810:6414',
+ 'if': 'xe0',
+ },
+ {
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:2814',
+ 'network': '0064:ff9b:0:0:0:0:9810:2814',
+ 'if': 'xe1',
+ },
+ ],
+ 'password': 'r00t',
+ 'VNF model': 'prox_vnf.yaml',
+ },
+ },
+ }
+
+ def test___init__(self):
+ prox_irq_vnf = ProxIrqVNF('vnf1', self.VNFD_0, 'task_id')
+
+ self.assertEqual(prox_irq_vnf.name, 'vnf1')
+ self.assertDictEqual(prox_irq_vnf.vnfd_helper, self.VNFD_0)
+
+ @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node')
+ @mock.patch(SSH_HELPER)
+ def test_collect_kpi(self, ssh, *args):
+ mock_ssh(ssh)
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ resource_helper = mock.MagicMock()
+
+ resource_helper = mock.MagicMock()
+
+ core_1 = {'bucket_1': 1, 'bucket_2': 2, 'bucket_3': 3, 'bucket_4': 4, 'bucket_5': 5,
+ 'bucket_6': 6, 'bucket_7': 7, 'bucket_8': 8, 'bucket_9': 9, 'bucket_10': 10,
+ 'bucket_11': 11, 'bucket_12': 12, 'bucket_0': 100, 'cpu': 1, 'max_irq': 12,
+ 'overflow': 10}
+ core_2 = {'bucket_1': 1, 'bucket_2': 2, 'bucket_3': 3, 'bucket_4': 4, 'bucket_5': 5,
+ 'bucket_6': 0, 'bucket_7': 0, 'bucket_8': 0, 'bucket_9': 0, 'bucket_10': 0,
+ 'bucket_11': 0, 'bucket_12': 0, 'bucket_0': 100, 'cpu': 2, 'max_irq': 12,
+ 'overflow': 10}
+
+ irq_data = {'core_1': core_1, 'core_2': core_2}
+ resource_helper.execute.return_value = (irq_data)
+
+ build_config_file = mock.MagicMock()
+ build_config_file.return_value = None
+
+ prox_irq_vnf = ProxIrqVNF(VNF_NAME, vnfd, 'task_id')
+
+ startup = ["global", [["eal", "-4"]]]
+ master_0 = ["core 0", [["mode", "master"]]]
+ core_1 = ["core 1", [["mode", "irq"]]]
+ core_2 = ["core 2", [["mode", "irq"], ["task", "2"]]]
+
+ prox_irq_vnf.setup_helper._prox_config_data = \
+ [startup, master_0, core_1, core_2]
+
+ prox_irq_vnf.scenario_helper.scenario_cfg = self.SCENARIO_CFG
+ prox_irq_vnf.resource_helper = resource_helper
+ prox_irq_vnf.setup_helper.build_config_file = build_config_file
+
+ result = prox_irq_vnf.collect_kpi()
+ self.assertDictEqual(result["collect_stats"], {})
+
+ result = prox_irq_vnf.collect_kpi()
+ self.assertFalse('bucket_10' in result["collect_stats"]['core_2'])
+ self.assertFalse('bucket_11' in result["collect_stats"]['core_2'])
+ self.assertFalse('bucket_12' in result["collect_stats"]['core_2'])
+ self.assertEqual(result["collect_stats"]['core_2']['max_irq'], 12)
+
+
+ @mock.patch(SSH_HELPER)
+ def test_vnf_execute_oserror(self, ssh, *args):
+ mock_ssh(ssh)
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ prox_irq_vnf = ProxIrqVNF(VNF_NAME, vnfd, 'task_id')
+ prox_irq_vnf.resource_helper = resource_helper = mock.Mock()
+
+ resource_helper.execute.side_effect = OSError(errno.EPIPE, "")
+ prox_irq_vnf.vnf_execute("", _ignore_errors=True)
+
+ resource_helper.execute.side_effect = OSError(errno.ESHUTDOWN, "")
+ prox_irq_vnf.vnf_execute("", _ignore_errors=True)
+
+ resource_helper.execute.side_effect = OSError(errno.EADDRINUSE, "")
+ with self.assertRaises(OSError):
+ prox_irq_vnf.vnf_execute("", _ignore_errors=True)
+
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.socket')
+ @mock.patch(SSH_HELPER)
+ def test_terminate(self, ssh, *args):
+ mock_ssh(ssh)
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+
+ mock_ssh(ssh, exec_result=(1, "", ""))
+ prox_irq_vnf = ProxIrqVNF(VNF_NAME, vnfd, 'task_id')
+
+ prox_irq_vnf._terminated = mock.MagicMock()
+ prox_irq_vnf._traffic_process = mock.MagicMock()
+ prox_irq_vnf._traffic_process.terminate = mock.Mock()
+ prox_irq_vnf.ssh_helper = mock.MagicMock()
+ prox_irq_vnf.setup_helper = mock.MagicMock()
+ prox_irq_vnf.resource_helper = mock.MagicMock()
+ prox_irq_vnf._vnf_wrapper.setup_helper = mock.MagicMock()
+ prox_irq_vnf._vnf_wrapper._vnf_process = mock.MagicMock(**{"is_alive.return_value": False})
+ prox_irq_vnf._vnf_wrapper.resource_helper = mock.MagicMock()
+
+ prox_irq_vnf._run_prox = mock.Mock(return_value=0)
+ prox_irq_vnf.q_in = mock.Mock()
+ prox_irq_vnf.q_out = mock.Mock()
+
+ self.assertIsNone(prox_irq_vnf.terminate())
+
+ @mock.patch(SSH_HELPER)
+ def test_wait_for_instantiate_panic(self, ssh, *args):
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+
+ mock_ssh(ssh, exec_result=(1, "", ""))
+ prox_irq_vnf = ProxIrqVNF(VNF_NAME, vnfd, 'task_id')
+
+ prox_irq_vnf._terminated = mock.MagicMock()
+ prox_irq_vnf._traffic_process = mock.MagicMock()
+ prox_irq_vnf._traffic_process.terminate = mock.Mock()
+ prox_irq_vnf.ssh_helper = mock.MagicMock()
+ prox_irq_vnf.setup_helper = mock.MagicMock()
+ prox_irq_vnf.resource_helper = mock.MagicMock()
+ prox_irq_vnf._vnf_wrapper.setup_helper = mock.MagicMock()
+ prox_irq_vnf._vnf_wrapper._vnf_process = mock.MagicMock(**{"is_alive.return_value": False})
+ prox_irq_vnf._vnf_wrapper.resource_helper = mock.MagicMock()
+
+ prox_irq_vnf._run_prox = mock.Mock(return_value=0)
+ prox_irq_vnf.q_in = mock.Mock()
+ prox_irq_vnf.q_out = mock.Mock()
+ prox_irq_vnf.WAIT_TIME = 0
+ with self.assertRaises(RuntimeError):
+ prox_irq_vnf.wait_for_instantiate()
+
+class TestProxIrqGen(unittest.TestCase):
+
+ SCENARIO_CFG = {
+ 'task_path': "",
+ 'nodes': {
+ 'tg__1': 'trafficgen_1.yardstick',
+ 'vnf__1': 'vnf.yardstick'},
+ 'runner': {
+ 'duration': 600, 'type': 'Duration'},
+ 'topology': 'prox-tg-topology-2.yaml',
+ 'traffic_profile': '../../traffic_profiles/prox_binsearch.yaml',
+ 'type': 'NSPerf',
+ 'options': {
+ 'tg__1': {'prox_args': {'-e': '',
+ '-t': ''},
+ 'prox_config': 'configs/l3-gen-2.cfg',
+ 'prox_path':
+ '/root/dppd-PROX-v035/build/prox'},
+ 'vnf__1': {
+ 'prox_args': {'-t': ''},
+ 'prox_config': 'configs/l3-swap-2.cfg',
+ 'prox_path': '/root/dppd-PROX-v035/build/prox'}}}
+
+ VNFD_0 = {
+ 'short-name': 'VpeVnf',
+ 'vdu': [
+ {
+ 'routing_table': [
+ {
+ 'network': '152.16.100.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.100.20',
+ 'if': 'xe0'
+ },
+ {
+ 'network': '152.16.40.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.40.20',
+ 'if': 'xe1'
+ },
+ ],
+ 'description': 'VPE approximation using DPDK',
+ 'name': 'vpevnf-baremetal',
+ 'nd_route_tbl': [
+ {
+ 'network': '0064:ff9b:0:0:0:0:9810:6414',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:6414',
+ 'if': 'xe0'
+ },
+ {
+ 'network': '0064:ff9b:0:0:0:0:9810:2814',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:2814',
+ 'if': 'xe1'
+ },
+ ],
+ 'id': 'vpevnf-baremetal',
+ 'external-interface': [
+ {
+ 'virtual-interface': {
+ 'dst_mac': '00:00:00:00:00:03',
+ 'vpci': '0000:05:00.0',
+ 'driver': 'i40e',
+ 'local_ip': '152.16.100.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 0,
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '152.16.100.20',
+ 'local_mac': '00:00:00:00:00:01'
+ },
+ 'vnfd-connection-point-ref': 'xe0',
+ 'name': 'xe0'
+ },
+ {
+ 'virtual-interface': {
+ 'dst_mac': '00:00:00:00:00:04',
+ 'vpci': '0000:05:00.1',
+ 'driver': 'ixgbe',
+ 'local_ip': '152.16.40.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 1,
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '152.16.40.20',
+ 'local_mac': '00:00:00:00:00:02'
+ },
+ 'vnfd-connection-point-ref': 'xe1',
+ 'name': 'xe1'
+ },
+ ],
+ },
+ ],
+ 'description': 'Vpe approximation using DPDK',
+ 'mgmt-interface': {
+ 'vdu-id': 'vpevnf-baremetal',
+ 'host': '1.1.1.1',
+ 'password': 'r00t',
+ 'user': 'root',
+ 'ip': '1.1.1.1'
+ },
+ 'benchmark': {
+ 'kpi': [
+ 'packets_in',
+ 'packets_fwd',
+ 'packets_dropped',
+ ],
+ },
+ 'connection-point': [
+ {
+ 'type': 'VPORT',
+ 'name': 'xe0',
+ },
+ {
+ 'type': 'VPORT',
+ 'name': 'xe1',
+ },
+ ],
+ 'id': 'VpeApproxVnf', 'name': 'VPEVnfSsh'
+ }
+
+ VNFD = {
+ 'vnfd:vnfd-catalog': {
+ 'vnfd': [
+ VNFD_0,
+ ],
+ },
+ }
+
+ TRAFFIC_PROFILE = {
+ "schema": "isb:traffic_profile:0.1",
+ "name": "fixed",
+ "description": "Fixed traffic profile to run UDP traffic",
+ "traffic_profile": {
+ "traffic_type": "FixedTraffic",
+ "frame_rate": 100, # pps
+ "flow_number": 10,
+ "frame_size": 64,
+ },
+ }
+
+ CONTEXT_CFG = {
+ 'nodes': {
+ 'tg__2': {
+ 'member-vnf-index': '3',
+ 'role': 'TrafficGen',
+ 'name': 'trafficgen_2.yardstick',
+ 'vnfd-id-ref': 'tg__2',
+ 'ip': '1.2.1.1',
+ 'interfaces': {
+ 'xe0': {
+ 'local_iface_name': 'ens513f0',
+ 'vld_id': prox_vnf.ProxApproxVnf.DOWNLINK,
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.40.20',
+ 'dst_mac': '00:00:00:00:00:01',
+ 'local_mac': '00:00:00:00:00:03',
+ 'dst_ip': '152.16.40.19',
+ 'driver': 'ixgbe',
+ 'vpci': '0000:02:00.0',
+ 'dpdk_port_num': 0,
+ },
+ 'xe1': {
+ 'local_iface_name': 'ens513f1',
+ 'netmask': '255.255.255.0',
+ 'network': '202.16.100.0',
+ 'local_ip': '202.16.100.20',
+ 'local_mac': '00:1e:67:d0:60:5d',
+ 'driver': 'ixgbe',
+ 'vpci': '0000:02:00.1',
+ 'dpdk_port_num': 1,
+ },
+ },
+ 'password': 'r00t',
+ 'VNF model': 'l3fwd_vnf.yaml',
+ 'user': 'root',
+ },
+ 'tg__1': {
+ 'member-vnf-index': '1',
+ 'role': 'TrafficGen',
+ 'name': 'trafficgen_1.yardstick',
+ 'vnfd-id-ref': 'tg__1',
+ 'ip': '1.2.1.1',
+ 'interfaces': {
+ 'xe0': {
+ 'local_iface_name': 'ens785f0',
+ 'vld_id': prox_vnf.ProxApproxVnf.UPLINK,
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.100.20',
+ 'dst_mac': '00:00:00:00:00:02',
+ 'local_mac': '00:00:00:00:00:04',
+ 'dst_ip': '152.16.100.19',
+ 'driver': 'i40e',
+ 'vpci': '0000:05:00.0',
+ 'dpdk_port_num': 0,
+ },
+ 'xe1': {
+ 'local_iface_name': 'ens785f1',
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.100.21',
+ 'local_mac': '00:00:00:00:00:01',
+ 'driver': 'i40e',
+ 'vpci': '0000:05:00.1',
+ 'dpdk_port_num': 1,
+ },
+ },
+ 'password': 'r00t',
+ 'VNF model': 'tg_rfc2544_tpl.yaml',
+ 'user': 'root',
+ },
+ 'vnf__1': {
+ 'name': 'vnf.yardstick',
+ 'vnfd-id-ref': 'vnf__1',
+ 'ip': '1.2.1.1',
+ 'interfaces': {
+ 'xe0': {
+ 'local_iface_name': 'ens786f0',
+ 'vld_id': prox_vnf.ProxApproxVnf.UPLINK,
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.100.19',
+ 'dst_mac': '00:00:00:00:00:04',
+ 'local_mac': '00:00:00:00:00:02',
+ 'dst_ip': '152.16.100.20',
+ 'driver': 'i40e',
+ 'vpci': '0000:05:00.0',
+ 'dpdk_port_num': 0,
+ },
+ 'xe1': {
+ 'local_iface_name': 'ens786f1',
+ 'vld_id': prox_vnf.ProxApproxVnf.DOWNLINK,
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.40.19',
+ 'dst_mac': '00:00:00:00:00:03',
+ 'local_mac': '00:00:00:00:00:01',
+ 'dst_ip': '152.16.40.20',
+ 'driver': 'i40e',
+ 'vpci': '0000:05:00.1',
+ 'dpdk_port_num': 1,
+ },
+ },
+ 'routing_table': [
+ {
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.100.20',
+ 'network': '152.16.100.20',
+ 'if': 'xe0',
+ },
+ {
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.40.20',
+ 'network': '152.16.40.20',
+ 'if': 'xe1',
+ },
+ ],
+ 'member-vnf-index': '2',
+ 'host': '1.2.1.1',
+ 'role': 'vnf',
+ 'user': 'root',
+ 'nd_route_tbl': [
+ {
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:6414',
+ 'network': '0064:ff9b:0:0:0:0:9810:6414',
+ 'if': 'xe0',
+ },
+ {
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:2814',
+ 'network': '0064:ff9b:0:0:0:0:9810:2814',
+ 'if': 'xe1',
+ },
+ ],
+ 'password': 'r00t',
+ 'VNF model': 'prox_vnf.yaml',
+ },
+ },
+ }
+
+
+ def test__check_status(self):
+ prox_irq_gen = ProxIrqGen('tg1', self.VNFD_0, 'task_id')
+
+ with self.assertRaises(NotImplementedError):
+ prox_irq_gen._check_status()
+
+ def test_listen_traffic(self):
+ prox_irq_gen = ProxIrqGen('tg1', self.VNFD_0, 'task_id')
+
+ prox_irq_gen.listen_traffic(mock.Mock())
+
+ def test_verify_traffic(self):
+ prox_irq_gen = ProxIrqGen('tg1', self.VNFD_0, 'task_id')
+
+ prox_irq_gen.verify_traffic(mock.Mock())
+
+ mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.socket')
+ @mock.patch(SSH_HELPER)
+ def test_terminate(self, ssh, *args):
+ mock_ssh(ssh)
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ prox_traffic_gen = ProxIrqGen(VNF_NAME, vnfd, 'task_id')
+ prox_traffic_gen._terminated = mock.MagicMock()
+ prox_traffic_gen._traffic_process = mock.MagicMock()
+ prox_traffic_gen._traffic_process.terminate = mock.Mock()
+ prox_traffic_gen.ssh_helper = mock.MagicMock()
+ prox_traffic_gen.setup_helper = mock.MagicMock()
+ prox_traffic_gen.resource_helper = mock.MagicMock()
+ prox_traffic_gen._vnf_wrapper.setup_helper = mock.MagicMock()
+ prox_traffic_gen._vnf_wrapper._vnf_process = mock.MagicMock()
+ prox_traffic_gen._vnf_wrapper.resource_helper = mock.MagicMock()
+ self.assertIsNone(prox_traffic_gen.terminate())
+
+ def test__wait_for_process(self):
+ prox_irq_gen = ProxIrqGen('tg1', self.VNFD_0, 'task_id')
+ with mock.patch.object(prox_irq_gen, '_check_status',
+ return_value=0) as mock_status, \
+ mock.patch.object(prox_irq_gen, '_tg_process') as mock_proc:
+ mock_proc.is_alive.return_value = True
+ mock_proc.exitcode = 234
+ self.assertEqual(prox_irq_gen._wait_for_process(), 234)
+ mock_proc.is_alive.assert_called_once()
+ mock_status.assert_called_once()
+
+ def test__wait_for_process_not_alive(self):
+ prox_irq_gen = ProxIrqGen('tg1', self.VNFD_0, 'task_id')
+ with mock.patch.object(prox_irq_gen, '_tg_process') as mock_proc:
+ mock_proc.is_alive.return_value = False
+ self.assertRaises(RuntimeError, prox_irq_gen._wait_for_process)
+ mock_proc.is_alive.assert_called_once()
+
+ def test__wait_for_process_delayed(self):
+ prox_irq_gen = ProxIrqGen('tg1', self.VNFD_0, 'task_id')
+ with mock.patch.object(prox_irq_gen, '_check_status',
+ side_effect=[1, 0]) as mock_status, \
+ mock.patch.object(prox_irq_gen,
+ '_tg_process') as mock_proc:
+ mock_proc.is_alive.return_value = True
+ mock_proc.exitcode = 234
+ self.assertEqual(prox_irq_gen._wait_for_process(), 234)
+ mock_proc.is_alive.assert_has_calls([mock.call(), mock.call()])
+ mock_status.assert_has_calls([mock.call(), mock.call()])
+
+ def test_scale(self):
+ prox_irq_gen = ProxIrqGen('tg1', self.VNFD_0, 'task_id')
+ self.assertRaises(y_exceptions.FunctionNotImplemented,
+ prox_irq_gen.scale)
+
+ @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node')
+ @mock.patch(SSH_HELPER)
+ def test_collect_kpi(self, ssh, *args):
+ mock_ssh(ssh)
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ resource_helper = mock.MagicMock()
+
+ core_1 = {'bucket_1': 1, 'bucket_2': 2, 'bucket_3': 3, 'bucket_4': 4, 'bucket_5': 5,
+ 'bucket_6': 6, 'bucket_7': 7, 'bucket_8': 8, 'bucket_9': 9, 'bucket_10': 10,
+ 'bucket_11': 11, 'bucket_12': 12, 'bucket_0': 100, 'cpu': 1, 'max_irq': 12,
+ 'overflow': 10}
+ core_2 = {'bucket_1': 1, 'bucket_2': 2, 'bucket_3': 3, 'bucket_4': 4, 'bucket_5': 5,
+ 'bucket_6': 0, 'bucket_7': 0, 'bucket_8': 0, 'bucket_9': 0, 'bucket_10': 0,
+ 'bucket_11': 0, 'bucket_12': 0, 'bucket_0': 100, 'cpu': 2, 'max_irq': 12,
+ 'overflow': 10}
+
+ irq_data = {'core_1': core_1, 'core_2': core_2}
+ resource_helper.sut.irq_core_stats.return_value = (irq_data)
+
+ build_config_file = mock.MagicMock()
+ build_config_file.return_value = None
+
+ prox_irq_gen = ProxIrqGen(VNF_NAME, vnfd, 'task_id')
+
+ startup = ["global", [["eal", "-4"]]]
+ master_0 = ["core 0", [["mode", "master"]]]
+ core_1 = ["core 1", [["mode", "irq"]]]
+ core_2 = ["core 2", [["mode", "irq"], ["task", "2"]]]
+
+ prox_irq_gen.setup_helper._prox_config_data = \
+ [startup, master_0, core_1, core_2]
+
+ prox_irq_gen.scenario_helper.scenario_cfg = self.SCENARIO_CFG
+ prox_irq_gen.resource_helper = resource_helper
+ prox_irq_gen.setup_helper.build_config_file = build_config_file
+
+ result = prox_irq_gen.collect_kpi()
+ self.assertDictEqual(result["collect_stats"], {})
+
+ result = prox_irq_gen.collect_kpi()
+ self.assertFalse('bucket_10' in result["collect_stats"]['core_2'])
+ self.assertFalse('bucket_11' in result["collect_stats"]['core_2'])
+ self.assertFalse('bucket_12' in result["collect_stats"]['core_2'])
+ self.assertEqual(result["collect_stats"]['core_2']['max_irq'], 12)
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py
index f144e8c42..62cbea0bb 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py
@@ -335,6 +335,8 @@ class TestProxApproxVnf(unittest.TestCase):
'packets_in': 0,
'packets_dropped': 0,
'packets_fwd': 0,
+ 'curr_packets_in': 0,
+ 'curr_packets_fwd': 0,
'collect_stats': {'core': {}}
}
result = prox_approx_vnf.collect_kpi()
@@ -346,8 +348,8 @@ class TestProxApproxVnf(unittest.TestCase):
mock_ssh(ssh)
resource_helper = mock.MagicMock()
- resource_helper.execute.return_value = [[0, 1, 2, 3, 4, 5], [1, 1, 2, 3, 4, 5],
- [2, 1, 2, 3, 4, 5], [3, 1, 2, 3, 4, 5]]
+ resource_helper.execute.return_value = (True,
+ [[0, 1, 2, 3, 4, 5], [1, 1, 2, 3, 4, 5]])
resource_helper.collect_collectd_kpi.return_value = {'core': {'result': 234}}
prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0, 'task_id')
@@ -355,20 +357,61 @@ class TestProxApproxVnf(unittest.TestCase):
'nodes': {prox_approx_vnf.name: "mock"}
}
prox_approx_vnf.resource_helper = resource_helper
+ prox_approx_vnf.tsc_hz = 1000
expected = {
+ 'curr_packets_in': 200,
+ 'curr_packets_fwd': 400,
'physical_node': 'mock_node',
- 'packets_in': 4,
- 'packets_dropped': 4,
- 'packets_fwd': 8,
+ 'packets_in': 2,
+ 'packets_dropped': 2,
+ 'packets_fwd': 4,
'collect_stats': {'core': {'result': 234}},
}
result = prox_approx_vnf.collect_kpi()
self.assertEqual(result['packets_in'], expected['packets_in'])
self.assertEqual(result['packets_dropped'], expected['packets_dropped'])
self.assertEqual(result['packets_fwd'], expected['packets_fwd'])
- self.assertNotEqual(result['packets_fwd'], 0)
- self.assertNotEqual(result['packets_fwd'], 0)
+ self.assertEqual(result['curr_packets_in'], expected['curr_packets_in'])
+ self.assertEqual(result['curr_packets_fwd'], expected['curr_packets_fwd'])
+
+ @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node')
+ @mock.patch(SSH_HELPER)
+ def test_collect_kpi_bad_input(self, ssh, *args):
+ mock_ssh(ssh)
+
+ resource_helper = mock.MagicMock()
+ resource_helper.execute.return_value = (True,
+ [[0, 'A', 'B', 'C', 'D', 'E'],
+ ['F', 1, 2, 3, 4, 5]])
+
+ prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0, 'task_id')
+ prox_approx_vnf.scenario_helper.scenario_cfg = {
+ 'nodes': {prox_approx_vnf.name: "mock"}
+ }
+ prox_approx_vnf.resource_helper = resource_helper
+
+ result = prox_approx_vnf.collect_kpi()
+ self.assertDictEqual(result, {})
+
+ @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node')
+ @mock.patch(SSH_HELPER)
+ def test_collect_kpi_bad_input2(self, ssh, *args):
+ mock_ssh(ssh)
+
+ resource_helper = mock.MagicMock()
+ resource_helper.execute.return_value = (False,
+ [[0, 'A', 'B', 'C', 'D', 'E'],
+ ['F', 1, 2, 3, 4, 5]])
+
+ prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0, 'task_id')
+ prox_approx_vnf.scenario_helper.scenario_cfg = {
+ 'nodes': {prox_approx_vnf.name: "mock"}
+ }
+ prox_approx_vnf.resource_helper = resource_helper
+
+ result = prox_approx_vnf.collect_kpi()
+ self.assertDictEqual(result, {})
@mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node')
@mock.patch(SSH_HELPER)
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py
index 4a1d8c30e..43682dd07 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py
@@ -16,7 +16,6 @@ from copy import deepcopy
import unittest
import mock
-import six
from yardstick.common import exceptions as y_exceptions
from yardstick.common import utils
@@ -521,38 +520,6 @@ class TestDpdkVnfSetupEnvHelper(unittest.TestCase):
result = DpdkVnfSetupEnvHelper._update_traffic_type(ip_pipeline_cfg, traffic_options)
self.assertEqual(result, expected)
- @mock.patch.object(six, 'BytesIO', return_value=six.BytesIO(b'100\n'))
- @mock.patch.object(utils, 'read_meminfo',
- return_value={'Hugepagesize': '2048'})
- def test__setup_hugepages_no_hugepages_defined(self, mock_meminfo, *args):
- ssh_helper = mock.Mock()
- scenario_helper = mock.Mock()
- scenario_helper.all_options = {}
- dpdk_setup_helper = DpdkVnfSetupEnvHelper(
- mock.ANY, ssh_helper, scenario_helper)
- with mock.patch.object(sample_vnf.LOG, 'info') as mock_info:
- dpdk_setup_helper._setup_hugepages()
- mock_info.assert_called_once_with(
- 'Hugepages size (kB): %s, number claimed: %s, number set: '
- '%s', 2048, 8192, 100)
- mock_meminfo.assert_called_once_with(ssh_helper)
-
- @mock.patch.object(six, 'BytesIO', return_value=six.BytesIO(b'100\n'))
- @mock.patch.object(utils, 'read_meminfo',
- return_value={'Hugepagesize': '1048576'})
- def test__setup_hugepages_8gb_hugepages_defined(self, mock_meminfo, *args):
- ssh_helper = mock.Mock()
- scenario_helper = mock.Mock()
- scenario_helper.all_options = {'hugepages_gb': 8}
- dpdk_setup_helper = DpdkVnfSetupEnvHelper(
- mock.ANY, ssh_helper, scenario_helper)
- with mock.patch.object(sample_vnf.LOG, 'info') as mock_info:
- dpdk_setup_helper._setup_hugepages()
- mock_info.assert_called_once_with(
- 'Hugepages size (kB): %s, number claimed: %s, number set: '
- '%s', 1048576, 8, 100)
- mock_meminfo.assert_called_once_with(ssh_helper)
-
@mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.open')
@mock.patch.object(utils, 'find_relative_file')
@mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.MultiPortConfig')
@@ -638,15 +605,17 @@ class TestDpdkVnfSetupEnvHelper(unittest.TestCase):
dpdk_vnf_setup_env_helper.setup_vnf_environment(),
ResourceProfile)
- def test__setup_dpdk(self):
+ @mock.patch.object(utils, 'setup_hugepages')
+ def test__setup_dpdk(self, mock_setup_hugepages):
ssh_helper = mock.Mock()
ssh_helper.execute = mock.Mock()
ssh_helper.execute.return_value = (0, 0, 0)
- dpdk_setup_helper = DpdkVnfSetupEnvHelper(mock.ANY, ssh_helper, mock.ANY)
- with mock.patch.object(dpdk_setup_helper, '_setup_hugepages') as \
- mock_setup_hp:
- dpdk_setup_helper._setup_dpdk()
- mock_setup_hp.assert_called_once()
+ scenario_helper = mock.Mock()
+ scenario_helper.all_options = {'hugepages_gb': 8}
+ dpdk_setup_helper = DpdkVnfSetupEnvHelper(mock.ANY, ssh_helper,
+ scenario_helper)
+ dpdk_setup_helper._setup_dpdk()
+ mock_setup_hugepages.assert_called_once_with(ssh_helper, 8*1024*1024)
ssh_helper.execute.assert_has_calls([
mock.call('sudo modprobe uio && sudo modprobe igb_uio'),
mock.call('lsmod | grep -i igb_uio')
@@ -1206,6 +1175,7 @@ class TestRfc2544ResourceHelper(unittest.TestCase):
self.assertIsNone(rfc2544_resource_helper._tolerance_high)
self.assertEqual(rfc2544_resource_helper.tolerance_high, 0.15)
self.assertEqual(rfc2544_resource_helper._tolerance_high, 0.15)
+ self.assertEqual(rfc2544_resource_helper._tolerance_precision, 2)
scenario_helper.scenario_cfg = {} # ensure that resource_helper caches
self.assertEqual(rfc2544_resource_helper.tolerance_high, 0.15)
@@ -1240,6 +1210,7 @@ class TestRfc2544ResourceHelper(unittest.TestCase):
rfc2544_resource_helper = Rfc2544ResourceHelper(scenario_helper)
self.assertEqual(rfc2544_resource_helper.tolerance_high, 0.2)
+ self.assertEqual(rfc2544_resource_helper._tolerance_precision, 1)
def test_property_tolerance_low_not_range(self):
scenario_helper = ScenarioHelper('name1')
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_prox.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_prox.py
index a7e61da0f..935d3fa30 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_prox.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_prox.py
@@ -329,13 +329,27 @@ class TestProxTrafficGen(unittest.TestCase):
}
prox_traffic_gen._vnf_wrapper.resource_helper.resource = mock.MagicMock(
**{"self.check_if_system_agent_running.return_value": [False]})
+
+ vnfd_helper = mock.MagicMock()
+ vnfd_helper.ports_iter.return_value = [('xe0', 0), ('xe1', 1)]
+ prox_traffic_gen.resource_helper.vnfd_helper = vnfd_helper
+
+ prox_traffic_gen._vnf_wrapper.resource_helper.client = mock.MagicMock()
+ prox_traffic_gen._vnf_wrapper.resource_helper.client.multi_port_stats.return_value = \
+ [[0, 1, 2, 3, 4, 5], [1, 1, 2, 3, 4, 5]]
+ prox_traffic_gen._vnf_wrapper.resource_helper.client.multi_port_stats_diff.return_value = \
+ [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7]
+ prox_traffic_gen._vnf_wrapper.resource_helper.client.\
+ multi_port_stats_tuple.return_value = \
+ {"xe0": {"in_packets": 1, "out_packets": 2}}
+
prox_traffic_gen._vnf_wrapper.vnf_execute = mock.Mock(return_value="")
expected = {
- 'collect_stats': {},
+ 'collect_stats': {'live_stats': {'xe0': {'in_packets': 1, 'out_packets': 2}}},
'physical_node': 'mock_node'
}
- self.assertEqual(prox_traffic_gen.collect_kpi(), expected)
-
+ result = prox_traffic_gen.collect_kpi()
+ self.assertDictEqual(result, expected)
@mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.find_relative_file')
@mock.patch(
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py
index ec0e6aa6d..65bf56f1e 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py
@@ -17,8 +17,11 @@ import os
import mock
import six
import unittest
+import ipaddress
+from collections import OrderedDict
from yardstick.common import utils
+from yardstick.common import exceptions
from yardstick.benchmark import contexts
from yardstick.benchmark.contexts import base as ctx_base
from yardstick.network_services.libs.ixia_libs.ixnet import ixnet_api
@@ -49,12 +52,43 @@ class TestIxiaResourceHelper(unittest.TestCase):
mock.Mock(), MyRfcHelper)
self.assertIsInstance(ixia_resource_helper.rfc_helper, MyRfcHelper)
+ def test__init_ix_scenario(self):
+ mock_scenario = mock.Mock()
+ mock_scenario_helper = mock.Mock()
+ mock_scenario_helper.scenario_cfg = {'ixia_config': 'TestScenario',
+ 'options': 'scenario_options'}
+ mock_setup_helper = mock.Mock(scenario_helper=mock_scenario_helper)
+ ixia_resource_helper = tg_rfc2544_ixia.IxiaResourceHelper(mock_setup_helper)
+ ixia_resource_helper._ixia_scenarios = {'TestScenario': mock_scenario}
+ ixia_resource_helper.client = 'client'
+ ixia_resource_helper.context_cfg = 'context'
+ ixia_resource_helper._init_ix_scenario()
+ mock_scenario.assert_called_once_with('client', 'context', 'scenario_options')
+
+ def test__init_ix_scenario_not_supported_cfg_type(self):
+ mock_scenario_helper = mock.Mock()
+ mock_scenario_helper.scenario_cfg = {'ixia_config': 'FakeScenario',
+ 'options': 'scenario_options'}
+ mock_setup_helper = mock.Mock(scenario_helper=mock_scenario_helper)
+ ixia_resource_helper = tg_rfc2544_ixia.IxiaResourceHelper(mock_setup_helper)
+ ixia_resource_helper._ixia_scenarios = {'TestScenario': mock.Mock()}
+ with self.assertRaises(RuntimeError):
+ ixia_resource_helper._init_ix_scenario()
+
+ @mock.patch.object(tg_rfc2544_ixia.IxiaResourceHelper, '_init_ix_scenario')
+ def test_setup(self, mock__init_ix_scenario):
+ ixia_resource_helper = tg_rfc2544_ixia.IxiaResourceHelper(mock.Mock())
+ ixia_resource_helper.setup()
+ mock__init_ix_scenario.assert_called_once()
+
def test_stop_collect_with_client(self):
mock_client = mock.Mock()
ixia_resource_helper = tg_rfc2544_ixia.IxiaResourceHelper(mock.Mock())
ixia_resource_helper.client = mock_client
+ ixia_resource_helper._ix_scenario = mock.Mock()
ixia_resource_helper.stop_collect()
self.assertEqual(1, ixia_resource_helper._terminated.value)
+ ixia_resource_helper._ix_scenario.stop_protocols.assert_called_once()
def test_run_traffic(self):
mock_tprofile = mock.Mock()
@@ -63,6 +97,7 @@ class TestIxiaResourceHelper(unittest.TestCase):
ixia_rhelper = tg_rfc2544_ixia.IxiaResourceHelper(mock.Mock())
ixia_rhelper.rfc_helper = mock.Mock()
ixia_rhelper.vnfd_helper = mock.Mock()
+ ixia_rhelper._ix_scenario = mock.Mock()
ixia_rhelper.vnfd_helper.port_pairs.all_ports = []
with mock.patch.object(ixia_rhelper, 'generate_samples'), \
mock.patch.object(ixia_rhelper, '_build_ports'), \
@@ -71,6 +106,7 @@ class TestIxiaResourceHelper(unittest.TestCase):
ixia_rhelper.run_traffic(mock_tprofile)
self.assertEqual('fake_samples', ixia_rhelper._queue.get())
+ mock_tprofile.update_traffic_profile.assert_called_once()
@mock.patch.object(tg_rfc2544_ixia, 'ixnet_api')
@@ -205,8 +241,8 @@ class TestIXIATrafficGen(unittest.TestCase):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
ixnet_traffic_gen = tg_rfc2544_ixia.IxiaTrafficGen(NAME, vnfd,
'task_id')
- scenario_cfg = {'tc': "nsb_test_case", "topology": "",
- 'ixia_profile': "ixload.cfg"}
+ scenario_cfg = {'tc': "nsb_test_case",
+ "topology": ""}
scenario_cfg.update(
{
'options': {
@@ -261,8 +297,8 @@ class TestIXIATrafficGen(unittest.TestCase):
ssh_mock.execute = \
mock.Mock(return_value=(0, "", ""))
ssh.from_node.return_value = ssh_mock
- ixnet_traffic_gen = tg_rfc2544_ixia.IxiaTrafficGen(NAME, vnfd,
- 'task_id')
+ ixnet_traffic_gen = tg_rfc2544_ixia.IxiaTrafficGen(
+ NAME, vnfd, 'task_id', resource_helper_type=mock.Mock())
ixnet_traffic_gen._terminated = mock.MagicMock()
ixnet_traffic_gen._terminated.value = 0
ixnet_traffic_gen._ixia_traffic_gen = mock.MagicMock()
@@ -360,6 +396,7 @@ class TestIXIATrafficGen(unittest.TestCase):
sut.resource_helper.client_started = mock.MagicMock()
sut.resource_helper.client_started.value = 1
sut.resource_helper.rfc_helper.iteration.value = 11
+ sut.resource_helper._ix_scenario = mock.Mock()
sut.scenario_helper.scenario_cfg = {
'options': {
@@ -379,7 +416,6 @@ class TestIXIATrafficGen(unittest.TestCase):
},
},
},
- 'ixia_profile': '/path/to/profile',
'task_path': '/path/to/task'
}
@@ -393,3 +429,442 @@ class TestIXIATrafficGen(unittest.TestCase):
self.assertIsNone(result)
_traffic_runner()
+
+
+class TestIxiaBasicScenario(unittest.TestCase):
+
+ def setUp(self):
+ self._mock_IxNextgen = mock.patch.object(ixnet_api, 'IxNextgen')
+ self.mock_IxNextgen = self._mock_IxNextgen.start()
+ self.context_cfg = mock.Mock()
+ self.ixia_cfg = mock.Mock()
+ self.scenario = tg_rfc2544_ixia.IxiaBasicScenario(self.mock_IxNextgen,
+ self.context_cfg,
+ self.ixia_cfg)
+ self.addCleanup(self._stop_mocks)
+
+ def _stop_mocks(self):
+ self._mock_IxNextgen.stop()
+
+ def test___init___(self):
+ self.assertIsInstance(self.scenario, tg_rfc2544_ixia.IxiaBasicScenario)
+ self.assertEqual(self.scenario.client, self.mock_IxNextgen)
+
+ def test_apply_config(self):
+ self.assertIsNone(self.scenario.apply_config())
+
+ def test_create_traffic_model(self):
+ self.mock_IxNextgen.get_vports.return_value = [1, 2, 3, 4]
+ self.scenario.create_traffic_model()
+ self.scenario.client.get_vports.assert_called_once()
+ self.scenario.client.create_traffic_model.assert_called_once_with([1, 3], [2, 4])
+
+ def test_run_protocols(self):
+ self.assertIsNone(self.scenario.run_protocols())
+
+ def test_stop_protocols(self):
+ self.assertIsNone(self.scenario.stop_protocols())
+
+
+class TestIxiaPppoeClientScenario(unittest.TestCase):
+
+ IXIA_CFG = {
+ 'pppoe_client': {
+ 'sessions_per_port': 4,
+ 'sessions_per_svlan': 1,
+ 's_vlan': 10,
+ 'c_vlan': 20,
+ 'ip': ['10.3.3.1', '10.4.4.1']
+ },
+ 'ipv4_client': {
+ 'sessions_per_port': 1,
+ 'sessions_per_vlan': 1,
+ 'vlan': 101,
+ 'gateway_ip': ['10.1.1.1', '10.2.2.1'],
+ 'ip': ['10.1.1.1', '10.2.2.1'],
+ 'prefix': ['24', '24']
+ }
+ }
+
+ CONTEXT_CFG = {
+ 'nodes': {'tg__0': {
+ 'interfaces': {'xe0': {
+ 'local_ip': '10.1.1.1',
+ 'netmask': '255.255.255.0'
+ }}}}}
+
+ def setUp(self):
+ self._mock_IxNextgen = mock.patch.object(ixnet_api, 'IxNextgen')
+ self.mock_IxNextgen = self._mock_IxNextgen.start()
+ self.scenario = tg_rfc2544_ixia.IxiaPppoeClientScenario(
+ self.mock_IxNextgen, self.CONTEXT_CFG, self.IXIA_CFG)
+ tg_rfc2544_ixia.WAIT_PROTOCOLS_STARTED = 2
+ self.addCleanup(self._stop_mocks)
+
+ def _stop_mocks(self):
+ self._mock_IxNextgen.stop()
+
+ def test___init___(self):
+ self.assertIsInstance(self.scenario, tg_rfc2544_ixia.IxiaPppoeClientScenario)
+ self.assertEqual(self.scenario.client, self.mock_IxNextgen)
+
+ @mock.patch.object(tg_rfc2544_ixia.IxiaPppoeClientScenario,
+ '_fill_ixia_config')
+ @mock.patch.object(tg_rfc2544_ixia.IxiaPppoeClientScenario,
+ '_apply_access_network_config')
+ @mock.patch.object(tg_rfc2544_ixia.IxiaPppoeClientScenario,
+ '_apply_core_network_config')
+ def test_apply_config(self, mock_apply_core_net_cfg,
+ mock_apply_access_net_cfg,
+ mock_fill_ixia_config):
+ self.mock_IxNextgen.get_vports.return_value = [1, 2, 3, 4]
+ self.scenario.apply_config()
+ self.scenario.client.get_vports.assert_called_once()
+ self.assertEqual(self.scenario._uplink_vports, [1, 3])
+ self.assertEqual(self.scenario._downlink_vports, [2, 4])
+ mock_fill_ixia_config.assert_called_once()
+ mock_apply_core_net_cfg.assert_called_once()
+ mock_apply_access_net_cfg.assert_called_once()
+
+ @mock.patch.object(tg_rfc2544_ixia.IxiaPppoeClientScenario,
+ '_get_endpoints_src_dst_id_pairs')
+ @mock.patch.object(tg_rfc2544_ixia.IxiaPppoeClientScenario,
+ '_get_endpoints_src_dst_obj_pairs')
+ def test_create_traffic_model(self, mock_obj_pairs, mock_id_pairs):
+ uplink_endpoints = ['group1', 'group2']
+ downlink_endpoints = ['group3', 'group3']
+ mock_id_pairs.return_value = ['xe0', 'xe1', 'xe0', 'xe1']
+ mock_obj_pairs.return_value = ['group1', 'group3', 'group2', 'group3']
+ mock_tp = mock.Mock()
+ mock_tp.full_profile = {'uplink_0': 'data',
+ 'downlink_0': 'data',
+ 'uplink_1': 'data',
+ 'downlink_1': 'data'
+ }
+ self.scenario.create_traffic_model(mock_tp)
+ mock_id_pairs.assert_called_once_with(mock_tp.full_profile)
+ mock_obj_pairs.assert_called_once_with(['xe0', 'xe1', 'xe0', 'xe1'])
+ self.scenario.client.create_ipv4_traffic_model.assert_called_once_with(
+ uplink_endpoints, downlink_endpoints)
+
+ def test__get_endpoints_src_dst_id_pairs(self):
+ full_tp = OrderedDict([
+ ('uplink_0', {'ipv4': {'port': 'xe0'}}),
+ ('downlink_0', {'ipv4': {'port': 'xe1'}}),
+ ('uplink_1', {'ipv4': {'port': 'xe0'}}),
+ ('downlink_1', {'ipv4': {'port': 'xe3'}})])
+ endpoints_src_dst_pairs = ['xe0', 'xe1', 'xe0', 'xe3']
+ res = self.scenario._get_endpoints_src_dst_id_pairs(full_tp)
+ self.assertEqual(res, endpoints_src_dst_pairs)
+
+ def test__get_endpoints_src_dst_id_pairs_wrong_flows_number(self):
+ full_tp = OrderedDict([
+ ('uplink_0', {'ipv4': {'port': 'xe0'}}),
+ ('downlink_0', {'ipv4': {'port': 'xe1'}}),
+ ('uplink_1', {'ipv4': {'port': 'xe0'}})])
+ with self.assertRaises(RuntimeError):
+ self.scenario._get_endpoints_src_dst_id_pairs(full_tp)
+
+ def test__get_endpoints_src_dst_id_pairs_no_port_key(self):
+ full_tp = OrderedDict([
+ ('uplink_0', {'ipv4': {'id': 1}}),
+ ('downlink_0', {'ipv4': {'id': 2}})])
+ self.assertEqual(
+ self.scenario._get_endpoints_src_dst_id_pairs(full_tp), [])
+
+ def test__get_endpoints_src_dst_obj_pairs_tp_with_port_key(self):
+ endpoints_id_pairs = ['xe0', 'xe1',
+ 'xe0', 'xe1',
+ 'xe0', 'xe3',
+ 'xe0', 'xe3']
+ ixia_cfg = {
+ 'pppoe_client': {
+ 'sessions_per_port': 4,
+ 'sessions_per_svlan': 1
+ },
+ 'flow': {
+ 'src_ip': [{'tg__0': 'xe0'}, {'tg__0': 'xe2'}],
+ 'dst_ip': [{'tg__0': 'xe1'}, {'tg__0': 'xe3'}]
+ }
+ }
+
+ expected_result = ['tp1_dg1', 'tp3_dg1', 'tp1_dg2', 'tp3_dg1',
+ 'tp1_dg3', 'tp4_dg1', 'tp1_dg4', 'tp4_dg1']
+
+ self.scenario._ixia_cfg = ixia_cfg
+ self.scenario._access_topologies = ['topology1', 'topology2']
+ self.scenario._core_topologies = ['topology3', 'topology4']
+ self.mock_IxNextgen.get_topology_device_groups.side_effect = \
+ [['tp1_dg1', 'tp1_dg2', 'tp1_dg3', 'tp1_dg4'],
+ ['tp2_dg1', 'tp2_dg2', 'tp2_dg3', 'tp2_dg4'],
+ ['tp3_dg1'],
+ ['tp4_dg1']]
+ res = self.scenario._get_endpoints_src_dst_obj_pairs(
+ endpoints_id_pairs)
+ self.assertEqual(res, expected_result)
+
+ def test__get_endpoints_src_dst_obj_pairs_default_flows_mapping(self):
+ endpoints_id_pairs = []
+ ixia_cfg = {
+ 'pppoe_client': {
+ 'sessions_per_port': 4,
+ 'sessions_per_svlan': 1
+ },
+ 'flow': {
+ 'src_ip': [{'tg__0': 'xe0'}, {'tg__0': 'xe2'}],
+ 'dst_ip': [{'tg__0': 'xe1'}, {'tg__0': 'xe3'}]
+ }
+ }
+
+ expected_result = ['tp1_dg1', 'tp3_dg1', 'tp1_dg2', 'tp3_dg1',
+ 'tp1_dg3', 'tp3_dg1', 'tp1_dg4', 'tp3_dg1',
+ 'tp2_dg1', 'tp4_dg1', 'tp2_dg2', 'tp4_dg1',
+ 'tp2_dg3', 'tp4_dg1', 'tp2_dg4', 'tp4_dg1']
+
+ self.scenario._ixia_cfg = ixia_cfg
+ self.scenario._access_topologies = ['topology1', 'topology2']
+ self.scenario._core_topologies = ['topology3', 'topology4']
+ self.mock_IxNextgen.get_topology_device_groups.side_effect = \
+ [['tp1_dg1', 'tp1_dg2', 'tp1_dg3', 'tp1_dg4'],
+ ['tp2_dg1', 'tp2_dg2', 'tp2_dg3', 'tp2_dg4'],
+ ['tp3_dg1'],
+ ['tp4_dg1']]
+ res = self.scenario._get_endpoints_src_dst_obj_pairs(
+ endpoints_id_pairs)
+ self.assertEqual(res, expected_result)
+
+ def test_run_protocols(self):
+ self.scenario.client.is_protocols_running.return_value = True
+ self.scenario.run_protocols()
+ self.scenario.client.start_protocols.assert_called_once()
+
+ def test_run_protocols_timeout_exception(self):
+ self.scenario.client.is_protocols_running.return_value = False
+ with self.assertRaises(exceptions.WaitTimeout):
+ self.scenario.run_protocols()
+ self.scenario.client.start_protocols.assert_called_once()
+
+ def test_stop_protocols(self):
+ self.scenario.stop_protocols()
+ self.scenario.client.stop_protocols.assert_called_once()
+
+ def test__get_intf_addr_str_type_input(self):
+ intf = '192.168.10.2/24'
+ ip, mask = self.scenario._get_intf_addr(intf)
+ self.assertEqual(ip, '192.168.10.2')
+ self.assertEqual(mask, 24)
+
+ def test__get_intf_addr_dict_type_input(self):
+ intf = {'tg__0': 'xe0'}
+ ip, mask = self.scenario._get_intf_addr(intf)
+ self.assertEqual(ip, '10.1.1.1')
+ self.assertEqual(mask, 24)
+
+ @mock.patch.object(tg_rfc2544_ixia.IxiaPppoeClientScenario, '_get_intf_addr')
+ def test__fill_ixia_config(self, mock_get_intf_addr):
+
+ ixia_cfg = {
+ 'pppoe_client': {
+ 'sessions_per_port': 4,
+ 'sessions_per_svlan': 1,
+ 's_vlan': 10,
+ 'c_vlan': 20,
+ 'ip': ['10.3.3.1/24', '10.4.4.1/24']
+ },
+ 'ipv4_client': {
+ 'sessions_per_port': 1,
+ 'sessions_per_vlan': 1,
+ 'vlan': 101,
+ 'gateway_ip': ['10.1.1.1/24', '10.2.2.1/24'],
+ 'ip': ['10.1.1.1/24', '10.2.2.1/24']
+ }
+ }
+
+ mock_get_intf_addr.side_effect = [
+ ('10.3.3.1', '24'),
+ ('10.4.4.1', '24'),
+ ('10.1.1.1', '24'),
+ ('10.2.2.1', '24'),
+ ('10.1.1.1', '24'),
+ ('10.2.2.1', '24')
+ ]
+ self.scenario._ixia_cfg = ixia_cfg
+ self.scenario._fill_ixia_config()
+ self.assertEqual(mock_get_intf_addr.call_count, 6)
+ self.assertEqual(self.scenario._ixia_cfg['pppoe_client']['ip'],
+ ['10.3.3.1', '10.4.4.1'])
+ self.assertEqual(self.scenario._ixia_cfg['ipv4_client']['ip'],
+ ['10.1.1.1', '10.2.2.1'])
+ self.assertEqual(self.scenario._ixia_cfg['ipv4_client']['prefix'],
+ ['24', '24'])
+
+ @mock.patch('yardstick.network_services.libs.ixia_libs.ixnet.ixnet_api.Vlan')
+ def test__apply_access_network_config_pap_auth(self, mock_vlan):
+ _ixia_cfg = {
+ 'pppoe_client': {
+ 'sessions_per_port': 4,
+ 'sessions_per_svlan': 1,
+ 's_vlan': 10,
+ 'c_vlan': 20,
+ 'pap_user': 'test_pap',
+ 'pap_password': 'pap'
+ }}
+ pap_user = _ixia_cfg['pppoe_client']['pap_user']
+ pap_passwd = _ixia_cfg['pppoe_client']['pap_password']
+ self.scenario._ixia_cfg = _ixia_cfg
+ self.scenario._uplink_vports = [0, 2]
+ self.scenario.client.add_topology.side_effect = ['Topology 1', 'Topology 2']
+ self.scenario.client.add_device_group.side_effect = ['Dg1', 'Dg2', 'Dg3',
+ 'Dg4', 'Dg5', 'Dg6',
+ 'Dg7', 'Dg8']
+ self.scenario.client.add_ethernet.side_effect = ['Eth1', 'Eth2', 'Eth3',
+ 'Eth4', 'Eth5', 'Eth6',
+ 'Eth7', 'Eth8']
+ self.scenario._apply_access_network_config()
+ self.assertEqual(self.scenario.client.add_topology.call_count, 2)
+ self.assertEqual(self.scenario.client.add_device_group.call_count, 8)
+ self.assertEqual(self.scenario.client.add_ethernet.call_count, 8)
+ self.assertEqual(mock_vlan.call_count, 16)
+ self.assertEqual(self.scenario.client.add_vlans.call_count, 8)
+ self.assertEqual(self.scenario.client.add_pppox_client.call_count, 8)
+ self.scenario.client.add_topology.assert_has_calls([
+ mock.call('Topology access 0', 0),
+ mock.call('Topology access 1', 2)
+ ])
+ self.scenario.client.add_device_group.assert_has_calls([
+ mock.call('Topology 1', 'SVLAN 10', 1),
+ mock.call('Topology 1', 'SVLAN 11', 1),
+ mock.call('Topology 1', 'SVLAN 12', 1),
+ mock.call('Topology 1', 'SVLAN 13', 1),
+ mock.call('Topology 2', 'SVLAN 14', 1),
+ mock.call('Topology 2', 'SVLAN 15', 1),
+ mock.call('Topology 2', 'SVLAN 16', 1),
+ mock.call('Topology 2', 'SVLAN 17', 1)
+ ])
+ self.scenario.client.add_ethernet.assert_has_calls([
+ mock.call('Dg1', 'Ethernet'),
+ mock.call('Dg2', 'Ethernet'),
+ mock.call('Dg3', 'Ethernet'),
+ mock.call('Dg4', 'Ethernet'),
+ mock.call('Dg5', 'Ethernet'),
+ mock.call('Dg6', 'Ethernet'),
+ mock.call('Dg7', 'Ethernet'),
+ mock.call('Dg8', 'Ethernet')
+ ])
+ mock_vlan.assert_has_calls([
+ mock.call(vlan_id=10),
+ mock.call(vlan_id=20, vlan_id_step=1),
+ mock.call(vlan_id=11),
+ mock.call(vlan_id=20, vlan_id_step=1),
+ mock.call(vlan_id=12),
+ mock.call(vlan_id=20, vlan_id_step=1),
+ mock.call(vlan_id=13),
+ mock.call(vlan_id=20, vlan_id_step=1),
+ mock.call(vlan_id=14),
+ mock.call(vlan_id=20, vlan_id_step=1),
+ mock.call(vlan_id=15),
+ mock.call(vlan_id=20, vlan_id_step=1),
+ mock.call(vlan_id=16),
+ mock.call(vlan_id=20, vlan_id_step=1),
+ mock.call(vlan_id=17),
+ mock.call(vlan_id=20, vlan_id_step=1)
+ ])
+ self.scenario.client.add_pppox_client.assert_has_calls([
+ mock.call('Eth1', 'pap', pap_user, pap_passwd),
+ mock.call('Eth2', 'pap', pap_user, pap_passwd),
+ mock.call('Eth3', 'pap', pap_user, pap_passwd),
+ mock.call('Eth4', 'pap', pap_user, pap_passwd),
+ mock.call('Eth5', 'pap', pap_user, pap_passwd),
+ mock.call('Eth6', 'pap', pap_user, pap_passwd),
+ mock.call('Eth7', 'pap', pap_user, pap_passwd),
+ mock.call('Eth8', 'pap', pap_user, pap_passwd)
+ ])
+
+ def test__apply_access_network_config_chap_auth(self):
+ _ixia_cfg = {
+ 'pppoe_client': {
+ 'sessions_per_port': 4,
+ 'sessions_per_svlan': 1,
+ 's_vlan': 10,
+ 'c_vlan': 20,
+ 'chap_user': 'test_chap',
+ 'chap_password': 'chap'
+ }}
+ chap_user = _ixia_cfg['pppoe_client']['chap_user']
+ chap_passwd = _ixia_cfg['pppoe_client']['chap_password']
+ self.scenario._ixia_cfg = _ixia_cfg
+ self.scenario._uplink_vports = [0, 2]
+ self.scenario.client.add_ethernet.side_effect = ['Eth1', 'Eth2', 'Eth3',
+ 'Eth4', 'Eth5', 'Eth6',
+ 'Eth7', 'Eth8']
+ self.scenario._apply_access_network_config()
+ self.assertEqual(self.scenario.client.add_pppox_client.call_count, 8)
+ self.scenario.client.add_pppox_client.assert_has_calls([
+ mock.call('Eth1', 'chap', chap_user, chap_passwd),
+ mock.call('Eth2', 'chap', chap_user, chap_passwd),
+ mock.call('Eth3', 'chap', chap_user, chap_passwd),
+ mock.call('Eth4', 'chap', chap_user, chap_passwd),
+ mock.call('Eth5', 'chap', chap_user, chap_passwd),
+ mock.call('Eth6', 'chap', chap_user, chap_passwd),
+ mock.call('Eth7', 'chap', chap_user, chap_passwd),
+ mock.call('Eth8', 'chap', chap_user, chap_passwd)
+ ])
+
+ @mock.patch('yardstick.network_services.libs.ixia_libs.ixnet.ixnet_api.Vlan')
+ def test__apply_core_network_config_no_bgp_proto(self, mock_vlan):
+ self.scenario._downlink_vports = [1, 3]
+ self.scenario.client.add_topology.side_effect = ['Topology 1', 'Topology 2']
+ self.scenario.client.add_device_group.side_effect = ['Dg1', 'Dg2']
+ self.scenario.client.add_ethernet.side_effect = ['Eth1', 'Eth2']
+ self.scenario._apply_core_network_config()
+ self.assertEqual(self.scenario.client.add_topology.call_count, 2)
+ self.assertEqual(self.scenario.client.add_device_group.call_count, 2)
+ self.assertEqual(self.scenario.client.add_ethernet.call_count, 2)
+ self.assertEqual(mock_vlan.call_count, 2)
+ self.assertEqual(self.scenario.client.add_vlans.call_count, 2)
+ self.assertEqual(self.scenario.client.add_ipv4.call_count, 2)
+ self.scenario.client.add_topology.assert_has_calls([
+ mock.call('Topology core 0', 1),
+ mock.call('Topology core 1', 3)
+ ])
+ self.scenario.client.add_device_group.assert_has_calls([
+ mock.call('Topology 1', 'Core port 0', 1),
+ mock.call('Topology 2', 'Core port 1', 1)
+ ])
+ self.scenario.client.add_ethernet.assert_has_calls([
+ mock.call('Dg1', 'Ethernet'),
+ mock.call('Dg2', 'Ethernet')
+ ])
+ mock_vlan.assert_has_calls([
+ mock.call(vlan_id=101),
+ mock.call(vlan_id=102)
+ ])
+ self.scenario.client.add_ipv4.assert_has_calls([
+ mock.call('Eth1', name='ipv4', addr=ipaddress.IPv4Address('10.1.1.2'),
+ addr_step='0.0.0.1', prefix='24', gateway='10.1.1.1'),
+ mock.call('Eth2', name='ipv4', addr=ipaddress.IPv4Address('10.2.2.2'),
+ addr_step='0.0.0.1', prefix='24', gateway='10.2.2.1')
+ ])
+ self.scenario.client.add_bgp.assert_not_called()
+
+ def test__apply_core_network_config_with_bgp_proto(self):
+ bgp_params = {
+ 'bgp': {
+ 'bgp_type': 'external',
+ 'dut_ip': '10.0.0.1',
+ 'as_number': 65000
+ }
+ }
+ self.scenario._ixia_cfg['ipv4_client'].update(bgp_params)
+ self.scenario._downlink_vports = [1, 3]
+ self.scenario.client.add_ipv4.side_effect = ['ipv4_1', 'ipv4_2']
+ self.scenario._apply_core_network_config()
+ self.assertEqual(self.scenario.client.add_bgp.call_count, 2)
+ self.scenario.client.add_bgp.assert_has_calls([
+ mock.call('ipv4_1', dut_ip=bgp_params["bgp"]["dut_ip"],
+ local_as=bgp_params["bgp"]["as_number"],
+ bgp_type=bgp_params["bgp"]["bgp_type"]),
+ mock.call('ipv4_2', dut_ip=bgp_params["bgp"]["dut_ip"],
+ local_as=bgp_params["bgp"]["as_number"],
+ bgp_type=bgp_params["bgp"]["bgp_type"])
+ ])
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py
index 7b937dfb5..8d49cb3f4 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py
@@ -16,7 +16,6 @@ from multiprocessing import Process, Queue
import time
import mock
-from six.moves import configparser
import unittest
from yardstick.benchmark.contexts import base as ctx_base
@@ -147,48 +146,6 @@ class TestConfigCreate(unittest.TestCase):
self.assertEqual(config_create.downlink_ports, ['xe1'])
self.assertEqual(config_create.socket, 2)
- def test_dpdk_port_to_link_id(self):
- vnfd_helper = vnf_base.VnfdHelper(self.VNFD_0)
- config_create = vpe_vnf.ConfigCreate(vnfd_helper, 2)
- self.assertEqual(config_create.dpdk_port_to_link_id_map, {'xe0': 0, 'xe1': 1})
-
- def test_vpe_initialize(self):
- vnfd_helper = vnf_base.VnfdHelper(self.VNFD_0)
- config_create = vpe_vnf.ConfigCreate(vnfd_helper, 2)
- config = configparser.ConfigParser()
- config_create.vpe_initialize(config)
- self.assertEqual(config.get('EAL', 'log_level'), '0')
- self.assertEqual(config.get('PIPELINE0', 'type'), 'MASTER')
- self.assertEqual(config.get('PIPELINE0', 'core'), 's2C0')
- self.assertEqual(config.get('MEMPOOL0', 'pool_size'), '256K')
- self.assertEqual(config.get('MEMPOOL1', 'pool_size'), '2M')
-
- def test_vpe_rxq(self):
- vnfd_helper = vnf_base.VnfdHelper(self.VNFD_0)
- config_create = vpe_vnf.ConfigCreate(vnfd_helper, 2)
- config = configparser.ConfigParser()
- config_create.downlink_ports = ['xe0']
- config_create.vpe_rxq(config)
- self.assertEqual(config.get('RXQ0.0', 'mempool'), 'MEMPOOL1')
-
- def test_get_sink_swq(self):
- vnfd_helper = vnf_base.VnfdHelper(self.VNFD_0)
- config_create = vpe_vnf.ConfigCreate(vnfd_helper, 2)
- config = configparser.ConfigParser()
- config.add_section('PIPELINE0')
- config.set('PIPELINE0', 'key1', 'value1')
- config.set('PIPELINE0', 'key2', 'value2 SINK')
- config.set('PIPELINE0', 'key3', 'TM value3')
- config.set('PIPELINE0', 'key4', 'value4')
- config.set('PIPELINE0', 'key5', 'the SINK value5')
-
- self.assertEqual(config_create.get_sink_swq(config, 'PIPELINE0', 'key1', 5), 'SWQ-1')
- self.assertEqual(config_create.get_sink_swq(config, 'PIPELINE0', 'key2', 5), 'SWQ-1 SINK0')
- self.assertEqual(config_create.get_sink_swq(config, 'PIPELINE0', 'key3', 5), 'SWQ-1 TM5')
- config_create.sw_q += 1
- self.assertEqual(config_create.get_sink_swq(config, 'PIPELINE0', 'key4', 5), 'SWQ0')
- self.assertEqual(config_create.get_sink_swq(config, 'PIPELINE0', 'key5', 5), 'SWQ0 SINK1')
-
def test_generate_vpe_script(self):
vnfd_helper = vnf_base.VnfdHelper(self.VNFD_0)
vpe_config_vnf = vpe_vnf.ConfigCreate(vnfd_helper, 2)
@@ -214,36 +171,6 @@ class TestConfigCreate(unittest.TestCase):
self.assertIsInstance(result, str)
self.assertNotEqual(result, '')
- def test_create_vpe_config(self):
- vnfd_helper = vnf_base.VnfdHelper(self.VNFD_0)
- config_create = vpe_vnf.ConfigCreate(vnfd_helper, 23)
- config_create.uplink_ports = ['xe1']
- with mock.patch.object(config_create, 'vpe_upstream') as mock_up, \
- mock.patch.object(config_create, 'vpe_downstream') as \
- mock_down, \
- mock.patch.object(config_create, 'vpe_tmq') as mock_tmq, \
- mock.patch.object(config_create, 'vpe_initialize') as \
- mock_ini, \
- mock.patch.object(config_create, 'vpe_rxq') as mock_rxq:
- mock_ini_obj = mock.Mock()
- mock_rxq_obj = mock.Mock()
- mock_up_obj = mock.Mock()
- mock_down_obj = mock.Mock()
- mock_tmq_obj = mock.Mock()
- mock_ini.return_value = mock_ini_obj
- mock_rxq.return_value = mock_rxq_obj
- mock_up.return_value = mock_up_obj
- mock_down.return_value = mock_down_obj
- mock_tmq.return_value = mock_tmq_obj
- config_create.create_vpe_config('fake_config_file')
-
- mock_rxq.assert_called_once_with(mock_ini_obj)
- mock_up.assert_called_once_with('fake_config_file', 0)
- mock_down.assert_called_once_with('fake_config_file', 0)
- mock_tmq.assert_called_once_with(mock_down_obj, 0)
- mock_up_obj.write.assert_called_once()
- mock_tmq_obj.write.assert_called_once()
-
class TestVpeApproxVnf(unittest.TestCase):