aboutsummaryrefslogtreecommitdiffstats
path: root/yardstick
diff options
context:
space:
mode:
authorMartin Banszel <martinx.banszel@intel.com>2017-07-19 19:35:02 +0000
committerEdward MacGillivray <edward.s.macgillivray@intel.com>2017-09-14 15:46:38 -0700
commitbe6e7ed6f053a4a697af939fa0ddcd5dce54c0c8 (patch)
tree5bc4b5bed762d9d4ddf79369d3e925acf86596e2 /yardstick
parentac0c076ffc701333aed7d65112a0f2e15fda825a (diff)
NSB: fix port topology
Add a new PortPair class to resolve the topology into list of public and private ports. Before we were calculating public/private in multiple locations and using different conventions. In addition for all the DPDK test we need to use the DPDK port number and no rely on interface ordering or interface naming conventions. We used to use xe0 -> 0, xe1 -> 1, etc. This is not the DPDK port number. Use the new dpdknicbind_helper class to parse the output of dpdk-devbind.py to find the actual DPDK port number at runtime. We then use this DPDK port number to correctly calculate the port_mask_hex. The port mask maps the DPDK port num (PMD ID) to the LINK ID used in the pipeline config We also need to make sure we only use the interfaces matched to the topology and not use all the interfaces, because in some cases we will have unused interfaces. In particular TRex always requires an even number of interfaces, so for single port TRex tests we have to create the second port and not use it. Thus we had to modify the traffic generator stats code to only dump stats for used ports and no unused ports. Ixia was using interface ordering to map to Ixia ports, instead we use the dpdk_port_num which must be hardcoded for Ixia. Renamed traffic_profile.execute to traffic_profile.execute_traffic so we can trace the code easier. We pass the port used by the traffic profile to generate_samples so we don't get stats for unused ports. Fixed up vPE config creation and bring up issues. Fixed up CGNAPT and UDP_Replay to work correctly. Tested with 4-port scale-out Change-Id: I2e4f328bff2904108081e92a4bf712333fa73869 Signed-off-by: Ross Brattain <ross.b.brattain@intel.com> Signed-off-by: Edward MacGillivray <edward.s.macgillivray@intel.com>
Diffstat (limited to 'yardstick')
-rw-r--r--yardstick/benchmark/contexts/heat.py32
-rw-r--r--yardstick/benchmark/contexts/model.py79
-rw-r--r--yardstick/benchmark/scenarios/networking/vnf_generic.py53
-rw-r--r--yardstick/network_services/helpers/dpdknicbind_helper.py145
-rw-r--r--yardstick/network_services/helpers/samplevnf_helper.py311
-rw-r--r--yardstick/network_services/libs/ixia_libs/IxNet/IxNet.py4
-rw-r--r--yardstick/network_services/nfvi/resource.py12
-rw-r--r--yardstick/network_services/traffic_profile/base.py2
-rw-r--r--yardstick/network_services/traffic_profile/ixia_rfc2544.py40
-rw-r--r--yardstick/network_services/traffic_profile/rfc2544.py31
-rw-r--r--yardstick/network_services/traffic_profile/traffic_profile.py4
-rw-r--r--yardstick/network_services/vnf_generic/vnf/acl_vnf.py2
-rw-r--r--yardstick/network_services/vnf_generic/vnf/base.py28
-rw-r--r--yardstick/network_services/vnf_generic/vnf/cgnapt_vnf.py53
-rw-r--r--yardstick/network_services/vnf_generic/vnf/prox_helpers.py49
-rw-r--r--yardstick/network_services/vnf_generic/vnf/prox_vnf.py11
-rw-r--r--yardstick/network_services/vnf_generic/vnf/sample_vnf.py182
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_ping.py74
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_prox.py1
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py63
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py38
-rw-r--r--yardstick/network_services/vnf_generic/vnf/udp_replay.py49
-rw-r--r--yardstick/network_services/vnf_generic/vnf/vfw_vnf.py2
-rw-r--r--yardstick/network_services/vnf_generic/vnf/vpe_vnf.py73
24 files changed, 805 insertions, 533 deletions
diff --git a/yardstick/benchmark/contexts/heat.py b/yardstick/benchmark/contexts/heat.py
index 94a3824a7..a4bec6382 100644
--- a/yardstick/benchmark/contexts/heat.py
+++ b/yardstick/benchmark/contexts/heat.py
@@ -83,9 +83,14 @@ class HeatContext(Context):
external_network = os.environ.get("EXTERNAL_NETWORK", "net04_ext")
have_external_network = any(net.get("external_network") for net in networks.values())
- if sorted_networks and not have_external_network:
- # no external net defined, assign it to first network using os.environ
- sorted_networks[0][1]["external_network"] = external_network
+ if not have_external_network:
+ # try looking for mgmt network first
+ try:
+ networks['mgmt']["external_network"] = external_network
+ except KeyError:
+ if sorted_networks:
+ # otherwise assign it to first network using os.environ
+ sorted_networks[0][1]["external_network"] = external_network
return sorted_networks
@@ -328,16 +333,21 @@ class HeatContext(Context):
print("Context '%s' deployed" % self.name)
def add_server_port(self, server):
- # TODO(hafe) can only handle one internal network for now
- # use private ip from first port
- private_port = next(iter(server.ports.values()))
+ # use private ip from first port in first network
+ try:
+ private_port = next(iter(server.ports.values()))[0]
+ except IndexError:
+ LOG.exception("Unable to find first private port in %s", server.ports)
+ raise
server.private_ip = self.stack.outputs[private_port["stack_name"]]
server.interfaces = {}
- for network_name, port in server.ports.items():
- # port['port'] is either port name from mapping or default network_name
- server.interfaces[port['port']] = self.make_interface_dict(network_name, port['port'],
- port['stack_name'],
- self.stack.outputs)
+ for network_name, ports in server.ports.items():
+ for port in ports:
+ # port['port'] is either port name from mapping or default network_name
+ server.interfaces[port['port']] = self.make_interface_dict(network_name,
+ port['port'],
+ port['stack_name'],
+ self.stack.outputs)
def make_interface_dict(self, network_name, port, stack_name, outputs):
private_ip = outputs[stack_name]
diff --git a/yardstick/benchmark/contexts/model.py b/yardstick/benchmark/contexts/model.py
index da2b74e1c..facfab892 100644
--- a/yardstick/benchmark/contexts/model.py
+++ b/yardstick/benchmark/contexts/model.py
@@ -11,9 +11,15 @@
"""
from __future__ import absolute_import
+
+import six
+import logging
from six.moves import range
+LOG = logging.getLogger(__name__)
+
+
class Object(object):
"""Base class for classes in the logical model
Contains common attributes and methods
@@ -257,44 +263,51 @@ class Server(Object): # pragma: no cover
# if explicit mapping skip unused networks
if self.network_ports:
try:
- port = self.network_ports[network.name]
+ ports = self.network_ports[network.name]
except KeyError:
# no port for this network
continue
+ else:
+ if isinstance(ports, six.string_types):
+ if ports.startswith('-'):
+ LOG.warning("possible YAML error, port name starts with - '%s", ports)
+ ports = [ports]
# otherwise add a port for every network with port name as network name
else:
- port = network.name
- port_name = "{0}-{1}-port".format(server_name, port)
- self.ports[network.name] = {"stack_name": port_name, "port": port}
- # we can't use secgroups if port_security_enabled is False
- if network.port_security_enabled is False:
- sec_group_id = None
- else:
- # if port_security_enabled is None we still need to add to secgroup
- sec_group_id = self.secgroup_name
- # don't refactor to pass in network object, that causes JSON
- # circular ref encode errors
- template.add_port(port_name, network.stack_name, network.subnet_stack_name,
- network.vnic_type, sec_group_id=sec_group_id,
- provider=network.provider,
- allowed_address_pairs=network.allowed_address_pairs)
- port_name_list.append(port_name)
-
- if self.floating_ip:
- external_network = self.floating_ip["external_network"]
- if network.has_route_to(external_network):
- self.floating_ip["stack_name"] = server_name + "-fip"
- template.add_floating_ip(self.floating_ip["stack_name"],
- external_network,
- port_name,
- network.router.stack_if_name,
- sec_group_id)
- self.floating_ip_assoc["stack_name"] = \
- server_name + "-fip-assoc"
- template.add_floating_ip_association(
- self.floating_ip_assoc["stack_name"],
- self.floating_ip["stack_name"],
- port_name)
+ ports = [network.name]
+ for port in ports:
+ port_name = "{0}-{1}-port".format(server_name, port)
+ self.ports.setdefault(network.name, []).append(
+ {"stack_name": port_name, "port": port})
+ # we can't use secgroups if port_security_enabled is False
+ if network.port_security_enabled is False:
+ sec_group_id = None
+ else:
+ # if port_security_enabled is None we still need to add to secgroup
+ sec_group_id = self.secgroup_name
+ # don't refactor to pass in network object, that causes JSON
+ # circular ref encode errors
+ template.add_port(port_name, network.stack_name, network.subnet_stack_name,
+ network.vnic_type, sec_group_id=sec_group_id,
+ provider=network.provider,
+ allowed_address_pairs=network.allowed_address_pairs)
+ port_name_list.append(port_name)
+
+ if self.floating_ip:
+ external_network = self.floating_ip["external_network"]
+ if network.has_route_to(external_network):
+ self.floating_ip["stack_name"] = server_name + "-fip"
+ template.add_floating_ip(self.floating_ip["stack_name"],
+ external_network,
+ port_name,
+ network.router.stack_if_name,
+ sec_group_id)
+ self.floating_ip_assoc["stack_name"] = \
+ server_name + "-fip-assoc"
+ template.add_floating_ip_association(
+ self.floating_ip_assoc["stack_name"],
+ self.floating_ip["stack_name"],
+ port_name)
if self.flavor:
if isinstance(self.flavor, dict):
self.flavor["name"] = \
diff --git a/yardstick/benchmark/scenarios/networking/vnf_generic.py b/yardstick/benchmark/scenarios/networking/vnf_generic.py
index 0e6ceab6e..ada92121b 100644
--- a/yardstick/benchmark/scenarios/networking/vnf_generic.py
+++ b/yardstick/benchmark/scenarios/networking/vnf_generic.py
@@ -25,7 +25,6 @@ import re
from itertools import chain
import six
-from operator import itemgetter
from collections import defaultdict
from yardstick.benchmark.scenarios import base
@@ -134,6 +133,7 @@ class NetworkServiceTestCase(base.Scenario):
self.vnfs = []
self.collector = None
self.traffic_profile = None
+ self.node_netdevs = {}
def _get_ip_flow_range(self, ip_start_range):
@@ -168,15 +168,17 @@ class NetworkServiceTestCase(base.Scenario):
def _get_traffic_flow(self):
flow = {}
try:
+ # TODO: should be .0 or .1 so we can use list
+ # but this also roughly matches private_0, public_0
fflow = self.scenario_cfg["options"]["flow"]
for index, src in enumerate(fflow.get("src_ip", [])):
- flow["src_ip{}".format(index)] = self._get_ip_flow_range(src)
+ flow["src_ip_{}".format(index)] = self._get_ip_flow_range(src)
for index, dst in enumerate(fflow.get("dst_ip", [])):
- flow["dst_ip{}".format(index)] = self._get_ip_flow_range(dst)
+ flow["dst_ip_{}".format(index)] = self._get_ip_flow_range(dst)
- for index, publicip in enumerate(fflow.get("publicip", [])):
- flow["public_ip{}".format(index)] = publicip
+ for index, publicip in enumerate(fflow.get("public_ip", [])):
+ flow["public_ip_{}".format(index)] = publicip
flow["count"] = fflow["count"]
except KeyError:
@@ -263,7 +265,6 @@ class NetworkServiceTestCase(base.Scenario):
node0_if["node_name"] = node0_name
node1_if["node_name"] = node1_name
- vld_networks = self.get_vld_networks(self.context_cfg["networks"])
node0_if["vld_id"] = vld["id"]
node1_if["vld_id"] = vld["id"]
@@ -276,6 +277,7 @@ class NetworkServiceTestCase(base.Scenario):
node1_if["peer_ifname"] = node0_if_name
# just load the network
+ vld_networks = self.get_vld_networks(self.context_cfg["networks"])
node0_if["network"] = vld_networks.get(vld["id"], {})
node1_if["network"] = vld_networks.get(vld["id"], {})
@@ -325,16 +327,15 @@ class NetworkServiceTestCase(base.Scenario):
vnfd = self._find_vnfd_from_vnf_idx(vnf_idx)
self.context_cfg["nodes"][vnf_name].update(vnfd)
- @staticmethod
- def _sort_dpdk_port_num(netdevs):
- # dpdk_port_num is PCI BUS ID ordering, lowest first
- s = sorted(netdevs.values(), key=itemgetter('pci_bus_id'))
- for dpdk_port_num, netdev in enumerate(s):
- netdev['dpdk_port_num'] = dpdk_port_num
+ def _probe_netdevs(self, node, node_dict, timeout=120):
+ try:
+ return self.node_netdevs[node]
+ except KeyError:
+ pass
- def _probe_netdevs(self, node, node_dict):
- cmd = "PATH=$PATH:/sbin:/usr/sbin ip addr show"
netdevs = {}
+ cmd = "PATH=$PATH:/sbin:/usr/sbin ip addr show"
+
with SshManager(node_dict) as conn:
if conn:
exit_status = conn.execute(cmd)[0]
@@ -346,6 +347,8 @@ class NetworkServiceTestCase(base.Scenario):
raise IncorrectSetup(
"Cannot find netdev info in sysfs" % node)
netdevs = node_dict['netdevs'] = self.parse_netdev_info(stdout)
+
+ self.node_netdevs[node] = netdevs
return netdevs
@classmethod
@@ -458,10 +461,22 @@ printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
(expected_name, classes_found))
@staticmethod
- def update_interfaces_from_node(vnfd, node):
- for intf in vnfd["vdu"][0]["external-interface"]:
- node_intf = node['interfaces'][intf['name']]
- intf['virtual-interface'].update(node_intf)
+ def create_interfaces_from_node(vnfd, node):
+ ext_intfs = vnfd["vdu"][0]["external-interface"] = []
+ # have to sort so xe0 goes first
+ for intf_name, intf in sorted(node['interfaces'].items()):
+ if intf.get('vld_id'):
+ # force dpkd_port_num to int so we can do reverse lookup
+ try:
+ intf['dpdk_port_num'] = int(intf['dpdk_port_num'])
+ except KeyError:
+ pass
+ ext_intf = {
+ "name": intf_name,
+ "virtual-interface": intf,
+ "vnfd-connection-point-ref": intf_name,
+ }
+ ext_intfs.append(ext_intf)
def load_vnf_models(self, scenario_cfg=None, context_cfg=None):
""" Create VNF objects based on YAML descriptors
@@ -491,7 +506,7 @@ printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
vnfd = vnfdgen.generate_vnfd(vnf_model, node)
# TODO: here add extra context_cfg["nodes"] regardless of template
vnfd = vnfd["vnfd:vnfd-catalog"]["vnfd"][0]
- self.update_interfaces_from_node(vnfd, node)
+ self.create_interfaces_from_node(vnfd, node)
vnf_impl = self.get_vnf_impl(vnfd['id'])
vnf_instance = vnf_impl(node_name, vnfd)
vnfs.append(vnf_instance)
diff --git a/yardstick/network_services/helpers/dpdknicbind_helper.py b/yardstick/network_services/helpers/dpdknicbind_helper.py
new file mode 100644
index 000000000..605d08d38
--- /dev/null
+++ b/yardstick/network_services/helpers/dpdknicbind_helper.py
@@ -0,0 +1,145 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import re
+import itertools
+
+NETWORK_KERNEL = 'network_kernel'
+NETWORK_DPDK = 'network_dpdk'
+NETWORK_OTHER = 'network_other'
+CRYPTO_KERNEL = 'crypto_kernel'
+CRYPTO_DPDK = 'crypto_dpdk'
+CRYPTO_OTHER = 'crypto_other'
+
+
+class DpdkBindHelperException(Exception):
+ pass
+
+
+class DpdkBindHelper(object):
+ DPDK_STATUS_CMD = "{dpdk_nic_bind} --status"
+ DPDK_BIND_CMD = "sudo {dpdk_nic_bind} {force} -b {driver} {vpci}"
+
+ NIC_ROW_RE = re.compile("([^ ]+) '([^']+)' (?:if=([^ ]+) )?drv=([^ ]+) "
+ "unused=([^ ]*)(?: (\*Active\*))?")
+ SKIP_RE = re.compile('(====|<none>|^$)')
+ NIC_ROW_FIELDS = ['vpci', 'dev_type', 'iface', 'driver', 'unused', 'active']
+
+ HEADER_DICT_PAIRS = [
+ (re.compile('^Network.*DPDK.*$'), NETWORK_DPDK),
+ (re.compile('^Network.*kernel.*$'), NETWORK_KERNEL),
+ (re.compile('^Other network.*$'), NETWORK_OTHER),
+ (re.compile('^Crypto.*DPDK.*$'), CRYPTO_DPDK),
+ (re.compile('^Crypto.*kernel$'), CRYPTO_KERNEL),
+ (re.compile('^Other crypto.*$'), CRYPTO_OTHER),
+ ]
+
+ def clean_status(self):
+ self.dpdk_status = {
+ NETWORK_KERNEL: [],
+ NETWORK_DPDK: [],
+ CRYPTO_KERNEL: [],
+ CRYPTO_DPDK: [],
+ NETWORK_OTHER: [],
+ CRYPTO_OTHER: [],
+ }
+
+ def __init__(self, ssh_helper):
+ self.dpdk_status = None
+ self.status_nic_row_re = None
+ self._dpdk_nic_bind_attr = None
+ self._status_cmd_attr = None
+
+ self.ssh_helper = ssh_helper
+ self.clean_status()
+
+ def _dpdk_execute(self, *args, **kwargs):
+ res = self.ssh_helper.execute(*args, **kwargs)
+ if res[0] != 0:
+ raise DpdkBindHelperException('{} command failed with rc={}'.format(
+ self._dpdk_nic_bind, res[0]))
+ return res
+
+ @property
+ def _dpdk_nic_bind(self):
+ if self._dpdk_nic_bind_attr is None:
+ self._dpdk_nic_bind_attr = self.ssh_helper.provision_tool(tool_file="dpdk-devbind.py")
+ return self._dpdk_nic_bind_attr
+
+ @property
+ def _status_cmd(self):
+ if self._status_cmd_attr is None:
+ self._status_cmd_attr = self.DPDK_STATUS_CMD.format(dpdk_nic_bind=self._dpdk_nic_bind)
+ return self._status_cmd_attr
+
+ def _addline(self, active_list, line):
+ if active_list is None:
+ return
+ res = self.NIC_ROW_RE.match(line)
+ if res is None:
+ return
+ new_data = {k: v for k, v in zip(self.NIC_ROW_FIELDS, res.groups())}
+ new_data['active'] = bool(new_data['active'])
+ self.dpdk_status[active_list].append(new_data)
+
+ @classmethod
+ def _switch_active_dict(cls, a_row, active_dict):
+ for regexp, a_dict in cls.HEADER_DICT_PAIRS:
+ if regexp.match(a_row):
+ return a_dict
+ return active_dict
+
+ def parse_dpdk_status_output(self, input):
+ active_dict = None
+ self.clean_status()
+ for a_row in input.splitlines():
+ if self.SKIP_RE.match(a_row):
+ continue
+ active_dict = self._switch_active_dict(a_row, active_dict)
+ self._addline(active_dict, a_row)
+ return self.dpdk_status
+
+ def _get_bound_pci_addresses(self, active_dict):
+ return [iface['vpci'] for iface in self.dpdk_status[active_dict]]
+
+ @property
+ def dpdk_bound_pci_addresses(self):
+ return self._get_bound_pci_addresses(NETWORK_DPDK)
+
+ @property
+ def kernel_bound_pci_addresses(self):
+ return self._get_bound_pci_addresses(NETWORK_KERNEL)
+
+ @property
+ def interface_driver_map(self):
+ return {interface['vpci']: interface['driver']
+ for interface in itertools.chain(*self.dpdk_status.values())}
+
+ def read_status(self):
+ return self.parse_dpdk_status_output(self._dpdk_execute(self._status_cmd)[1])
+
+ def bind(self, pci, driver, force=True):
+ cmd = self.DPDK_BIND_CMD.format(dpdk_nic_bind=self._dpdk_nic_bind,
+ driver=driver,
+ vpci=' '.join(list(pci)),
+ force='--force' if force else '')
+ self._dpdk_execute(cmd)
+ # update the inner status dict
+ self.read_status()
+
+ def save_used_drivers(self):
+ self.used_drivers = self.interface_driver_map
+
+ def rebind_drivers(self, force=True):
+ for vpci, driver in self.used_drivers.items():
+ self.bind(vpci, driver, force)
diff --git a/yardstick/network_services/helpers/samplevnf_helper.py b/yardstick/network_services/helpers/samplevnf_helper.py
index 9d89d4188..7054b9232 100644
--- a/yardstick/network_services/helpers/samplevnf_helper.py
+++ b/yardstick/network_services/helpers/samplevnf_helper.py
@@ -19,7 +19,7 @@ import logging
import os
import sys
from collections import OrderedDict, defaultdict
-from itertools import chain
+from itertools import chain, repeat
import six
from six.moves.configparser import ConfigParser
@@ -62,6 +62,97 @@ SCRIPT_TPL = """
"""
+class PortPairs(object):
+
+ PUBLIC = "public"
+ PRIVATE = "private"
+
+ def __init__(self, interfaces):
+ super(PortPairs, self).__init__()
+ self.interfaces = interfaces
+ self._all_ports = None
+ self._priv_ports = None
+ self._pub_ports = None
+ self._networks = None
+ self._port_pair_list = None
+ self._valid_networks = None
+
+ @property
+ def networks(self):
+ if self._networks is None:
+ self._networks = {}
+ for intf in self.interfaces:
+ vintf = intf['virtual-interface']
+ try:
+ vld_id = vintf['vld_id']
+ except KeyError:
+ # probably unused port?
+ LOG.warning("intf without vld_id, %s", vintf)
+ else:
+ self._networks.setdefault(vld_id, []).append(vintf["ifname"])
+ return self._networks
+
+ @classmethod
+ def get_public_id(cls, vld_id):
+ # partition returns a tuple
+ parts = list(vld_id.partition(cls.PRIVATE))
+ if parts[0]:
+ # 'private' was not in or not leftmost in the string
+ return
+ parts[1] = cls.PUBLIC
+ public_id = ''.join(parts)
+ return public_id
+
+ @property
+ # this only works for vnfs that have both private and public visible
+ def valid_networks(self):
+ if self._valid_networks is None:
+ self._valid_networks = []
+ for vld_id in self.networks:
+ public_id = self.get_public_id(vld_id)
+ if public_id in self.networks:
+ self._valid_networks.append((vld_id, public_id))
+ return self._valid_networks
+
+ @property
+ def all_ports(self):
+ if self._all_ports is None:
+ self._all_ports = sorted(set(self.priv_ports + self.pub_ports))
+ return self._all_ports
+
+ @property
+ def priv_ports(self):
+ if self._priv_ports is None:
+ intfs = chain.from_iterable(
+ intfs for vld_id, intfs in self.networks.items() if
+ vld_id.startswith(self.PRIVATE))
+ self._priv_ports = sorted(set(intfs))
+ return self._priv_ports
+
+ @property
+ def pub_ports(self):
+ if self._pub_ports is None:
+ intfs = chain.from_iterable(
+ intfs for vld_id, intfs in self.networks.items() if vld_id.startswith(self.PUBLIC))
+ self._pub_ports = sorted(set(intfs))
+ return self._pub_ports
+
+ @property
+ def port_pair_list(self):
+ if self._port_pair_list is None:
+ self._port_pair_list = []
+
+ for priv, pub in self.valid_networks:
+ for private_intf in self.networks[priv]:
+ # only VNFs have private, public peers
+ peer_intfs = self.networks.get(pub, [])
+ if peer_intfs:
+ for public_intf in peer_intfs:
+ port_pair = private_intf, public_intf
+ self._port_pair_list.append(port_pair)
+ return self._port_pair_list
+
+
class MultiPortConfig(object):
HW_LB = "HW"
@@ -108,7 +199,7 @@ class MultiPortConfig(object):
ip_addr = cls.make_ip_addr(ip_addr, prefixlen)
return ip_addr.ip.exploded, ip_addr.network.prefixlen
- def __init__(self, topology_file, config_tpl, tmp_file, interfaces=None,
+ def __init__(self, topology_file, config_tpl, tmp_file, vnfd_helper,
vnf_type='CGNAT', lb_count=2, worker_threads=3,
worker_config='1C/1T', lb_config='SW', socket=0):
@@ -118,8 +209,7 @@ class MultiPortConfig(object):
self.worker_threads = self.get_worker_threads(worker_threads)
self.vnf_type = vnf_type
self.pipe_line = 0
- self.interfaces = interfaces if interfaces else {}
- self.networks = {}
+ self.vnfd_helper = vnfd_helper
self.write_parser = ConfigParser()
self.read_parser = ConfigParser()
self.read_parser.read(config_tpl)
@@ -138,6 +228,8 @@ class MultiPortConfig(object):
self.start_core = ""
self.pipeline_counter = ""
self.txrx_pipeline = ""
+ self._port_pairs = None
+ self.all_ports = []
self.port_pair_list = []
self.lb_to_port_pair_mapping = {}
self.init_eal()
@@ -145,12 +237,11 @@ class MultiPortConfig(object):
self.lb_index = None
self.mul = 0
self.port_pairs = []
- self.port_pair_list = []
self.ports_len = 0
self.prv_que_handler = None
self.vnfd = None
self.rules = None
- self.pktq_out = ''
+ self.pktq_out = []
@staticmethod
def gen_core(core):
@@ -160,18 +251,19 @@ class MultiPortConfig(object):
return str(core)
def make_port_pairs_iter(self, operand, iterable):
- return (operand(x[-1], y) for y in iterable for x in chain(*self.port_pairs))
+ return (operand(self.vnfd_helper.port_num(x), y) for y in iterable for x in
+ chain.from_iterable(self.port_pairs))
def make_range_port_pairs_iter(self, operand, start, end):
return self.make_port_pairs_iter(operand, range(start, end))
def init_eal(self):
- vpci = [v['virtual-interface']["vpci"] for v in self.interfaces]
+ lines = ['[EAL]\n']
+ vpci = (v['virtual-interface']["vpci"] for v in self.vnfd_helper.interfaces)
+ lines.extend('w = {0}\n'.format(item) for item in vpci)
+ lines.append('\n')
with open(self.tmp_file, 'w') as fh:
- fh.write('[EAL]\n')
- for item in vpci:
- fh.write('w = {0}\n'.format(item))
- fh.write('\n')
+ fh.writelines(lines)
def update_timer(self):
timer_tpl = self.get_config_tpl_data('TIMER')
@@ -226,40 +318,6 @@ class MultiPortConfig(object):
except ValueError:
self.start_core = int(self.start_core[:-1]) + 1
- @staticmethod
- def get_port_pairs(interfaces):
- port_pair_list = []
- networks = {}
- for private_intf in interfaces:
- vintf = private_intf['virtual-interface']
- try:
- vld_id = vintf['vld_id']
- except KeyError:
- pass
- else:
- networks.setdefault(vld_id, []).append(vintf)
-
- for name, net in networks.items():
- # partition returns a tuple
- parts = list(name.partition('private'))
- if parts[0]:
- # 'private' was not in or not leftmost in the string
- continue
- parts[1] = 'public'
- public_id = ''.join(parts)
- for private_intf in net:
- try:
- public_peer_intfs = networks[public_id]
- except KeyError:
- LOG.warning("private network without peer %s, %s not found", name, public_id)
- continue
-
- for public_intf in public_peer_intfs:
- port_pair = private_intf["ifname"], public_intf["ifname"]
- port_pair_list.append(port_pair)
-
- return port_pair_list, networks
-
def get_lb_count(self):
self.lb_count = int(min(len(self.port_pair_list), self.lb_count))
@@ -267,50 +325,51 @@ class MultiPortConfig(object):
self.lb_to_port_pair_mapping = defaultdict(int)
port_pair_count = len(self.port_pair_list)
lb_pair_count = int(port_pair_count / self.lb_count)
- for i in range(self.lb_count):
- self.lb_to_port_pair_mapping[i + 1] = lb_pair_count
- for i in range(port_pair_count % self.lb_count):
- self.lb_to_port_pair_mapping[i + 1] += 1
+ extra = port_pair_count % self.lb_count
+ extra_iter = repeat(lb_pair_count + 1, extra)
+ norm_iter = repeat(lb_pair_count, port_pair_count - extra)
+ new_values = {i: v for i, v in enumerate(chain(extra_iter, norm_iter), 1)}
+ self.lb_to_port_pair_mapping.update(new_values)
def set_priv_to_pub_mapping(self):
- return "".join(str(y) for y in [(int(x[0][-1]), int(x[1][-1])) for x in
- self.port_pair_list])
+ port_nums = [tuple(self.vnfd_helper.port_nums(x)) for x in self.port_pair_list]
+ return "".join(str(y).replace(" ", "") for y in
+ port_nums)
def set_priv_que_handler(self):
# iterated twice, can't be generator
- priv_to_pub_map = [(int(x[0][-1]), int(x[1][-1])) for x in self.port_pairs]
+ priv_to_pub_map = [tuple(self.vnfd_helper.port_nums(x)) for x in self.port_pairs]
# must be list to use .index()
port_list = list(chain.from_iterable(priv_to_pub_map))
priv_ports = (x[0] for x in priv_to_pub_map)
self.prv_que_handler = '({})'.format(
- ",".join((str(port_list.index(x)) for x in priv_ports)))
+ "".join(("{},".format(port_list.index(x)) for x in priv_ports)))
def generate_arp_route_tbl(self):
- arp_config = []
arp_route_tbl_tmpl = "({port0_dst_ip_hex},{port0_netmask_hex},{port_num}," \
"{next_hop_ip_hex})"
- for port_pair in self.port_pair_list:
- for port in port_pair:
- port_num = int(port[-1])
- interface = self.interfaces[port_num]
- # We must use the dst because we are on the VNF and we need to
- # reach the TG.
- dst_port0_ip = \
- ipaddress.ip_interface(six.text_type(
- "%s/%s" % (interface["virtual-interface"]["dst_ip"],
- interface["virtual-interface"]["netmask"])))
- arp_vars = {
- "port0_dst_ip_hex": ip_to_hex(dst_port0_ip.network.network_address.exploded),
- "port0_netmask_hex": ip_to_hex(dst_port0_ip.network.netmask.exploded),
- # this is the port num that contains port0 subnet and next_hop_ip_hex
- "port_num": port_num,
- # next hop is dst in this case
- # must be within subnet
- "next_hop_ip_hex": ip_to_hex(dst_port0_ip.ip.exploded),
- }
- arp_config.append(arp_route_tbl_tmpl.format(**arp_vars))
-
- return ' '.join(arp_config)
+
+ def build_arp_config(port):
+ dpdk_port_num = self.vnfd_helper.port_num(port)
+ interface = self.vnfd_helper.find_interface(name=port)["virtual-interface"]
+ # We must use the dst because we are on the VNF and we need to
+ # reach the TG.
+ dst_port0_ip = ipaddress.ip_interface(six.text_type(
+ "%s/%s" % (interface["dst_ip"], interface["netmask"])))
+
+ arp_vars = {
+ "port0_dst_ip_hex": ip_to_hex(dst_port0_ip.network.network_address.exploded),
+ "port0_netmask_hex": ip_to_hex(dst_port0_ip.network.netmask.exploded),
+ # this is the port num that contains port0 subnet and next_hop_ip_hex
+ # this is LINKID which should be based on DPDK port number
+ "port_num": dpdk_port_num,
+ # next hop is dst in this case
+ # must be within subnet
+ "next_hop_ip_hex": ip_to_hex(dst_port0_ip.ip.exploded),
+ }
+ return arp_route_tbl_tmpl.format(**arp_vars)
+
+ return ' '.join(build_arp_config(port) for port in self.all_ports)
def generate_arpicmp_data(self):
swq_in_str = self.make_range_str('SWQ{}', self.swq, offset=self.lb_count)
@@ -318,9 +377,11 @@ class MultiPortConfig(object):
swq_out_str = self.make_range_str('SWQ{}', self.swq, offset=self.lb_count)
self.swq += self.lb_count
# ports_mac_list is disabled for some reason
- # mac_iter = (self.interfaces[int(x[-1])]['virtual-interface']['local_mac']
- # for port_pair in self.port_pair_list for x in port_pair)
- pktq_in_iter = ('RXQ{}'.format(float(x[0][-1])) for x in self.port_pair_list)
+
+ # mac_iter = (self.vnfd_helper.find_interface(name=port)['virtual-interface']['local_mac']
+ # for port in self.all_ports)
+ pktq_in_iter = ('RXQ{}.0'.format(self.vnfd_helper.port_num(x[0])) for x in
+ self.port_pair_list)
arpicmp_data = {
'core': self.gen_core(self.start_core),
@@ -505,7 +566,10 @@ class MultiPortConfig(object):
self.vnf_tpl = self.get_config_tpl_data(self.vnf_type)
def generate_config(self):
- self.port_pair_list, self.networks = self.get_port_pairs(self.interfaces)
+ self._port_pairs = PortPairs(self.vnfd_helper.interfaces)
+ self.port_pair_list = self._port_pairs.port_pair_list
+ self.all_ports = self._port_pairs.all_ports
+
self.get_lb_count()
self.generate_lb_to_port_pair_mapping()
self.generate_config_data()
@@ -514,18 +578,16 @@ class MultiPortConfig(object):
self.write_parser.write(tfh)
def generate_link_config(self):
+ def build_args(port):
+ # lookup interface by name
+ virtual_interface = self.vnfd_helper.find_interface(name=port)["virtual-interface"]
+ local_ip = virtual_interface["local_ip"]
+ netmask = virtual_interface["netmask"]
+ port_num = self.vnfd_helper.port_num(port)
+ port_ip, prefix_len = self.validate_ip_and_prefixlen(local_ip, netmask)
+ return LINK_CONFIG_TEMPLATE.format(port_num, port_ip, prefix_len)
- link_configs = []
- for port_pair in self.port_pair_list:
- for port in port_pair:
- port = port[-1]
- virtual_interface = self.interfaces[int(port)]["virtual-interface"]
- local_ip = virtual_interface["local_ip"]
- netmask = virtual_interface["netmask"]
- port_ip, prefix_len = self.validate_ip_and_prefixlen(local_ip, netmask)
- link_configs.append(LINK_CONFIG_TEMPLATE.format(port, port_ip, prefix_len))
-
- return ''.join(link_configs)
+ return ''.join(build_args(port) for port in self.all_ports)
def get_route_data(self, src_key, data_key, port):
route_list = self.vnfd['vdu'][0].get(src_key, [])
@@ -548,37 +610,38 @@ class MultiPortConfig(object):
def generate_arp_config(self):
arp_config = []
- for port_pair in self.port_pair_list:
- for port in port_pair:
- # ignore gateway, always use TG IP
- # gateway = self.get_ports_gateway(port)
- dst_mac = self.interfaces[int(port[-1])]["virtual-interface"]["dst_mac"]
- dst_ip = self.interfaces[int(port[-1])]["virtual-interface"]["dst_ip"]
- # arp_config.append((port[-1], gateway, dst_mac, self.txrx_pipeline))
- # so dst_mac is the TG dest mac, so we need TG dest IP.
- arp_config.append((port[-1], dst_ip, dst_mac, self.txrx_pipeline))
+ for port in self.all_ports:
+ # ignore gateway, always use TG IP
+ # gateway = self.get_ports_gateway(port)
+ vintf = self.vnfd_helper.find_interface(name=port)["virtual-interface"]
+ dst_mac = vintf["dst_mac"]
+ dst_ip = vintf["dst_ip"]
+ # arp_config.append(
+ # (self.vnfd_helper.port_num(port), gateway, dst_mac, self.txrx_pipeline))
+ # so dst_mac is the TG dest mac, so we need TG dest IP.
+ # should be dpdk_port_num
+ arp_config.append(
+ (self.vnfd_helper.port_num(port), dst_ip, dst_mac, self.txrx_pipeline))
return '\n'.join(('p {3} arpadd {0} {1} {2}'.format(*values) for values in arp_config))
def generate_arp_config6(self):
arp_config6 = []
- for port_pair in self.port_pair_list:
- for port in port_pair:
- # ignore gateway, always use TG IP
- # gateway6 = self.get_ports_gateway6(port)
- dst_mac6 = self.interfaces[int(port[-1])]["virtual-interface"]["dst_mac"]
- dst_ip6 = self.interfaces[int(port[-1])]["virtual-interface"]["dst_ip"]
- # arp_config6.append((port[-1], gateway6, dst_mac6, self.txrx_pipeline))
- arp_config6.append((port[-1], dst_ip6, dst_mac6, self.txrx_pipeline))
+ for port in self.all_ports:
+ # ignore gateway, always use TG IP
+ # gateway6 = self.get_ports_gateway6(port)
+ vintf = self.vnfd_helper.find_interface(name=port)["virtual-interface"]
+ dst_mac6 = vintf["dst_mac"]
+ dst_ip6 = vintf["dst_ip"]
+ # arp_config6.append(
+ # (self.vnfd_helper.port_num(port), gateway6, dst_mac6, self.txrx_pipeline))
+ arp_config6.append(
+ (self.vnfd_helper.port_num(port), dst_ip6, dst_mac6, self.txrx_pipeline))
return '\n'.join(('p {3} arpadd {0} {1} {2}'.format(*values) for values in arp_config6))
def generate_action_config(self):
- port_list = []
- for port_pair in self.port_pair_list:
- for port in port_pair:
- port_list.append(port[-1])
-
+ port_list = (self.vnfd_helper.port_num(p) for p in self.all_ports)
if self.vnf_type == "VFW":
template = FW_ACTION_TEMPLATE
else:
@@ -589,8 +652,9 @@ class MultiPortConfig(object):
def get_ip_from_port(self, port):
# we can't use gateway because in OpenStack gateways interfer with floating ip routing
# return self.make_ip_addr(self.get_ports_gateway(port), self.get_netmask_gateway(port))
- ip = self.interfaces[port]["virtual-interface"]["local_ip"]
- netmask = self.interfaces[port]["virtual-interface"]["netmask"]
+ vintf = self.vnfd_helper.find_interface(name=port)["virtual-interface"]
+ ip = vintf["local_ip"]
+ netmask = vintf["netmask"]
return self.make_ip_addr(ip, netmask)
def get_network_and_prefixlen_from_ip_of_port(self, port):
@@ -607,12 +671,12 @@ class MultiPortConfig(object):
new_rules = []
new_ipv6_rules = []
pattern = 'p {0} add {1} {2} {3} {4} {5} 0 65535 0 65535 0 0 {6}'
- for port_pair in self.port_pair_list:
- src_port = int(port_pair[0][-1])
- dst_port = int(port_pair[1][-1])
+ for src_intf, dst_intf in self.port_pair_list:
+ src_port = self.vnfd_helper.port_num(src_intf)
+ dst_port = self.vnfd_helper.port_num(dst_intf)
- src_net, src_prefix_len = self.get_network_and_prefixlen_from_ip_of_port(src_port)
- dst_net, dst_prefix_len = self.get_network_and_prefixlen_from_ip_of_port(dst_port)
+ src_net, src_prefix_len = self.get_network_and_prefixlen_from_ip_of_port(src_intf)
+ dst_net, dst_prefix_len = self.get_network_and_prefixlen_from_ip_of_port(dst_intf)
# ignore entires with empty values
if all((src_net, src_prefix_len, dst_net, dst_prefix_len)):
new_rules.append((cmd, self.txrx_pipeline, src_net, src_prefix_len,
@@ -637,7 +701,8 @@ class MultiPortConfig(object):
return ''.join([rules_config, new_rules_config, acl_apply])
def generate_script_data(self):
- self.port_pair_list, self.networks = self.get_port_pairs(self.interfaces)
+ self._port_pairs = PortPairs(self.vnfd_helper.interfaces)
+ self.port_pair_list = self._port_pairs.port_pair_list
self.get_lb_count()
script_data = {
'link_config': self.generate_link_config(),
@@ -675,5 +740,5 @@ set_hash_input_set {0} ipv6-udp src-ipv6 udp-src-port add
set_hash_input_set {1} ipv6-udp dst-ipv6 udp-dst-port add
"""
for port_pair in self.port_pair_list:
- script += hwlb_tpl.format(port_pair[0][-1], port_pair[1][-1])
+ script += hwlb_tpl.format(*(self.vnfd_helper.port_nums(port_pair)))
return script
diff --git a/yardstick/network_services/libs/ixia_libs/IxNet/IxNet.py b/yardstick/network_services/libs/ixia_libs/IxNet/IxNet.py
index 38831ee86..1ec00e5bc 100644
--- a/yardstick/network_services/libs/ixia_libs/IxNet/IxNet.py
+++ b/yardstick/network_services/libs/ixia_libs/IxNet/IxNet.py
@@ -113,10 +113,10 @@ class IxNextgen(object):
}
MODE_SEEDS_MAP = {
- 0: ('private_1', ['256', '2048']),
+ 0: ('private_0', ['256', '2048']),
}
- MODE_SEEDS_DEFAULT = 'public_1', ['2048', '256']
+ MODE_SEEDS_DEFAULT = 'public_0', ['2048', '256']
@staticmethod
def find_view_obj(view_name, views):
diff --git a/yardstick/network_services/nfvi/resource.py b/yardstick/network_services/nfvi/resource.py
index 055fdba7e..fa32a4dcf 100644
--- a/yardstick/network_services/nfvi/resource.py
+++ b/yardstick/network_services/nfvi/resource.py
@@ -21,11 +21,11 @@ import os
import os.path
import re
import multiprocessing
-from collections import Sequence
from oslo_config import cfg
from yardstick import ssh
+from yardstick.common.utils import validate_non_string_sequence
from yardstick.network_services.nfvi.collectd import AmqpConsumer
from yardstick.network_services.utils import get_nsb_option
@@ -45,16 +45,14 @@ class ResourceProfile(object):
def __init__(self, mgmt, interfaces=None, cores=None):
self.enable = True
- self.connection = None
- self.cores = cores if isinstance(cores, Sequence) else []
+ self.cores = validate_non_string_sequence(cores, default=[])
self._queue = multiprocessing.Queue()
self.amqp_client = None
- self.interfaces = interfaces if isinstance(interfaces, Sequence) else []
+ self.interfaces = validate_non_string_sequence(interfaces, default=[])
# why the host or ip?
self.vnfip = mgmt.get("host", mgmt["ip"])
self.connection = ssh.SSH.from_node(mgmt, overrides={"ip": self.vnfip})
-
self.connection.wait()
def check_if_sa_running(self, process):
@@ -111,7 +109,7 @@ class ResourceProfile(object):
@classmethod
def parse_intel_pmu_stats(cls, key, value):
- return {''.join(key): value.split(":")[1]}
+ return {''.join(str(v) for v in key): value.split(":")[1]}
def parse_collectd_result(self, metrics, core_list):
""" convert collectd data into json"""
@@ -234,7 +232,7 @@ class ResourceProfile(object):
connection.execute("sudo rabbitmqctl delete_user guest")
connection.execute("sudo rabbitmqctl add_user admin admin")
connection.execute("sudo rabbitmqctl authenticate_user admin admin")
- connection.execute("sudo rabbitmqctl set_permissions -p / admin \".*\" \".*\" \".*\"")
+ connection.execute("sudo rabbitmqctl set_permissions -p / admin '.*' '.*' '.*'")
LOG.debug("Start collectd service.....")
connection.execute("sudo %s" % collectd_path)
diff --git a/yardstick/network_services/traffic_profile/base.py b/yardstick/network_services/traffic_profile/base.py
index 906498586..611792b94 100644
--- a/yardstick/network_services/traffic_profile/base.py
+++ b/yardstick/network_services/traffic_profile/base.py
@@ -44,7 +44,7 @@ class TrafficProfile(object):
# IMIX = {"10K": 0.1, "100M": 0.5}
self.params = tp_config
- def execute(self, traffic_generator):
+ def execute_traffic(self, traffic_generator):
""" This methods defines the behavior of the traffic generator.
It will be called in a loop until the traffic generator exits.
diff --git a/yardstick/network_services/traffic_profile/ixia_rfc2544.py b/yardstick/network_services/traffic_profile/ixia_rfc2544.py
index ddb41f3c0..049a81a65 100644
--- a/yardstick/network_services/traffic_profile/ixia_rfc2544.py
+++ b/yardstick/network_services/traffic_profile/ixia_rfc2544.py
@@ -23,11 +23,17 @@ LOG = logging.getLogger(__name__)
class IXIARFC2544Profile(TrexProfile):
- def _get_ixia_traffic_profile(self, profile_data, mac={},
- xfile=None, static_traffic={}):
+
+ def _get_ixia_traffic_profile(self, profile_data, mac=None, xfile=None, static_traffic=None):
+ if mac is None:
+ mac = {}
+
+ if static_traffic is None:
+ static_traffic = {}
+
result = {}
if xfile:
- with open(xfile, 'r') as stream:
+ with open(xfile) as stream:
try:
static_traffic = json.load(stream)
except Exception as exc:
@@ -73,7 +79,7 @@ class IXIARFC2544Profile(TrexProfile):
def _ixia_traffic_generate(self, traffic_generator, traffic, ixia_obj):
for key, value in traffic.items():
if "public" in key or "private" in key:
- traffic[key]["iload"] = str(self.rate)
+ value["iload"] = str(self.rate)
ixia_obj.ix_update_frame(traffic)
ixia_obj.ix_update_ether(traffic)
ixia_obj.add_ip_header(traffic, 4)
@@ -81,19 +87,27 @@ class IXIARFC2544Profile(TrexProfile):
self.tmp_drop = 0
self.tmp_throughput = 0
- def update_traffic_profile(self):
- self.profile = 'private_1'
- for key, value in self.params.items():
- if "private" in key or "public" in key:
- self.profile_data = self.params[key]
+ def update_traffic_profile(self, traffic_generator):
+ def port_generator():
+ for vld_id, intfs in sorted(traffic_generator.networks.items()):
+ if not vld_id.startswith(("private", "public")):
+ continue
+ profile_data = self.params.get(vld_id)
+ if not profile_data:
+ continue
+ self.profile_data = profile_data
self.get_streams(self.profile_data)
- self.full_profile.update({key: self.profile_data})
+ self.full_profile.update({vld_id: self.profile_data})
+ for intf in intfs:
+ yield traffic_generator.vnfd_helper.port_num(intf)
+
+ self.ports = [port for port in port_generator()]
- def execute(self, traffic_generator, ixia_obj, mac={}, xfile=None):
+ def execute_traffic(self, traffic_generator, ixia_obj, mac={}, xfile=None):
if self.first_run:
self.full_profile = {}
self.pg_id = 0
- self.update_traffic_profile()
+ self.update_traffic_profile(traffic_generator)
traffic = \
self._get_ixia_traffic_profile(self.full_profile, mac, xfile)
self.max_rate = self.rate
@@ -108,7 +122,7 @@ class IXIARFC2544Profile(TrexProfile):
def start_ixia_latency(self, traffic_generator, ixia_obj,
mac={}, xfile=None):
- self.update_traffic_profile()
+ self.update_traffic_profile(traffic_generator)
traffic = \
self._get_ixia_traffic_profile(self.full_profile, mac, xfile)
self._ixia_traffic_generate(traffic_generator, traffic,
diff --git a/yardstick/network_services/traffic_profile/rfc2544.py b/yardstick/network_services/traffic_profile/rfc2544.py
index a3b803673..6a0ecaf99 100644
--- a/yardstick/network_services/traffic_profile/rfc2544.py
+++ b/yardstick/network_services/traffic_profile/rfc2544.py
@@ -43,7 +43,7 @@ class RFC2544Profile(TrexProfile):
def register_generator(self, generator):
self.generator = generator
- def execute(self, traffic_generator=None):
+ def execute_traffic(self, traffic_generator=None):
""" Generate the stream and run traffic on the given ports """
if traffic_generator is not None and self.generator is None:
self.generator = traffic_generator
@@ -52,21 +52,18 @@ class RFC2544Profile(TrexProfile):
return
self.ports = []
- priv_ports = self.generator.priv_ports
- pub_ports = self.generator.pub_ports
- # start from 1 for private_1, public_1, etc.
- for index, (priv_port, pub_port) in enumerate(zip(priv_ports, pub_ports), 1):
- profile_data = self.params.get('private_{}'.format(index), '')
- self.ports.append(priv_port)
- # pass profile_data directly, don't use self.profile_data
- self.generator.client.add_streams(self.get_streams(profile_data), ports=priv_port)
- profile_data = self.params.get('public_{}'.format(index), '')
+ for vld_id, intfs in sorted(self.generator.networks.items()):
+ profile_data = self.params.get(vld_id)
+ # no profile for this port
+ if not profile_data:
+ continue
# correlated traffic doesn't use public traffic?
- if not profile_data or self.generator.rfc2544_helper.correlated_traffic:
+ if vld_id.startswith("public") and self.generator.rfc2544_helper.correlated_traffic:
continue
- # just get the pub_port
- self.ports.append(pub_port)
- self.generator.client.add_streams(self.get_streams(profile_data), ports=pub_port)
+ for intf in intfs:
+ port = self.generator.vnfd_helper.port_num(intf)
+ self.ports.append(port)
+ self.generator.client.add_streams(self.get_streams(profile_data), ports=port)
self.max_rate = self.rate
self.min_rate = 0
@@ -86,7 +83,7 @@ class RFC2544Profile(TrexProfile):
if generator is None:
generator = self.generator
run_duration = self.generator.RUN_DURATION
- samples = self.generator.generate_samples()
+ samples = self.generator.generate_samples(self.ports)
in_packets = sum([value['in_packets'] for value in samples.values()])
out_packets = sum([value['out_packets'] for value in samples.values()])
@@ -135,8 +132,8 @@ class RFC2544Profile(TrexProfile):
# TODO(esm): why don't we discard results that are out of tolerance?
self.min_rate = self.rate
- generator.clear_client_stats()
- generator.start_client(mult=self.get_multiplier(),
+ generator.clear_client_stats(self.ports)
+ generator.start_client(self.ports, mult=self.get_multiplier(),
duration=run_duration, force=True)
# if correlated traffic update the Throughput
diff --git a/yardstick/network_services/traffic_profile/traffic_profile.py b/yardstick/network_services/traffic_profile/traffic_profile.py
index 4c6595d94..894126c09 100644
--- a/yardstick/network_services/traffic_profile/traffic_profile.py
+++ b/yardstick/network_services/traffic_profile/traffic_profile.py
@@ -188,9 +188,9 @@ class TrexProfile(TrafficProfile):
),
}
- def execute(self, traffic_generator):
+ def execute_traffic(self, traffic_generator):
""" Generate the stream and run traffic on the given ports """
- pass
+ raise NotImplementedError()
def _call_on_range(self, range, single_action, range_action, count=1, to_int=False):
def convert_to_int(val):
diff --git a/yardstick/network_services/vnf_generic/vnf/acl_vnf.py b/yardstick/network_services/vnf_generic/vnf/acl_vnf.py
index 5f3c8a0cd..3ba38dec2 100644
--- a/yardstick/network_services/vnf_generic/vnf/acl_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/acl_vnf.py
@@ -24,7 +24,7 @@ LOG = logging.getLogger(__name__)
# ACL should work the same on all systems, we can provide the binary
ACL_PIPELINE_COMMAND = \
- 'sudo {tool_path} -p {ports_len_hex} -f {cfg_file} -s {script}'
+ 'sudo {tool_path} -p {port_mask_hex} -f {cfg_file} -s {script}'
ACL_COLLECT_KPI = r"""\
ACL TOTAL:[^p]+pkts_processed"?:\s(\d+),[^p]+pkts_drop"?:\s(\d+),[^p]+pkts_received"?:\s(\d+),"""
diff --git a/yardstick/network_services/vnf_generic/vnf/base.py b/yardstick/network_services/vnf_generic/vnf/base.py
index 955f9f03d..e32e5fb50 100644
--- a/yardstick/network_services/vnf_generic/vnf/base.py
+++ b/yardstick/network_services/vnf_generic/vnf/base.py
@@ -16,6 +16,8 @@
from __future__ import absolute_import
import logging
+from yardstick.network_services.helpers.samplevnf_helper import PortPairs
+
LOG = logging.getLogger(__name__)
@@ -59,6 +61,10 @@ class QueueFileWrapper(object):
class VnfdHelper(dict):
+ def __init__(self, *args, **kwargs):
+ super(VnfdHelper, self).__init__(*args, **kwargs)
+ self.port_pairs = PortPairs(self['vdu'][0]['external-interface'])
+
@property
def mgmt_interface(self):
return self["mgmt-interface"]
@@ -92,6 +98,28 @@ class VnfdHelper(dict):
if interface[key] == value:
return interface
+ # hide dpdk_port_num key so we can abstract
+ def find_interface_by_port(self, port):
+ for interface in self.interfaces:
+ virtual_intf = interface["virtual-interface"]
+ # we have to convert to int to compare
+ if int(virtual_intf['dpdk_port_num']) == port:
+ return interface
+
+ def port_num(self, name):
+ # we need interface name -> DPDK port num (PMD ID) -> LINK ID
+ # LINK ID -> PMD ID is governed by the port mask
+ """
+
+ :rtype: int
+ :type name: str
+ """
+ intf = self.find_interface(name=name)
+ return int(intf["virtual-interface"]["dpdk_port_num"])
+
+ def port_nums(self, intfs):
+ return [self.port_num(i) for i in intfs]
+
class VNFObject(object):
diff --git a/yardstick/network_services/vnf_generic/vnf/cgnapt_vnf.py b/yardstick/network_services/vnf_generic/vnf/cgnapt_vnf.py
index f9980b165..45ef757b3 100644
--- a/yardstick/network_services/vnf_generic/vnf/cgnapt_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/cgnapt_vnf.py
@@ -13,16 +13,14 @@
# limitations under the License.
from __future__ import absolute_import
-import time
import logging
-from six.moves import zip
from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNF, DpdkVnfSetupEnvHelper
LOG = logging.getLogger(__name__)
# CGNAPT should work the same on all systems, we can provide the binary
-CGNAPT_PIPELINE_COMMAND = 'sudo {tool_path} -p {ports_len_hex} -f {cfg_file} -s {script}'
+CGNAPT_PIPELINE_COMMAND = 'sudo {tool_path} -p {port_mask_hex} -f {cfg_file} -s {script}'
WAIT_FOR_STATIC_NAPT = 4
CGNAPT_COLLECT_KPI = """\
@@ -55,7 +53,7 @@ class CgnaptApproxSetupEnvHelper(DpdkVnfSetupEnvHelper):
yield '.'.join(ip_parts)
@staticmethod
- def _update_cgnat_script_file(ip_pipeline_cfg, mcpi, vnf_str):
+ def _update_cgnat_script_file(ip_pipeline_cfg, mcpi):
pipeline_config_str = str(ip_pipeline_cfg)
input_cmds = '\n'.join(mcpi)
icmp_flag = 'link 0 down' in input_cmds
@@ -67,16 +65,13 @@ class CgnaptApproxSetupEnvHelper(DpdkVnfSetupEnvHelper):
raise NotImplementedError
def _get_cgnapt_config(self, interfaces=None):
+ # TODO: static CGNAPT is broken, don't use it
if interfaces is None:
interfaces = self.vnfd_helper.interfaces
- gateway_ips = []
-
# fixme: Get private port and gateway from port list
- priv_ports = interfaces[::2]
- for interface in priv_ports:
- gateway_ips.append(self._get_ports_gateway(interface["name"]))
- return gateway_ips
+ priv_ports = self.vnfd_helper.port_pairs.priv_ports
+ return [self._get_ports_gateway(intf["name"]) for intf in priv_ports]
class CgnaptApproxVnf(SampleVNF):
@@ -103,21 +98,23 @@ class CgnaptApproxVnf(SampleVNF):
if self.scenario_helper.options.get('napt', 'static') != 'static':
return
- ip_iter = self.setup_helper._generate_ip_from_pool("152.16.40.10")
- gw_ips = self.setup_helper._get_cgnapt_config()
- if self.scenario_helper.vnf_cfg.get("lb_config", "SW") == 'HW':
- pipeline = self.setup_helper.HW_DEFAULT_CORE
- offset = 3
- else:
- pipeline = self.setup_helper.SW_DEFAULT_CORE - 1
- offset = 0
-
- worker_threads = int(self.scenario_helper.vnf_cfg["worker_threads"])
- cmd_template = "p {0} entry addm {1} 1 {2} 1 0 32 65535 65535 65535"
- for gw, ip in zip(gw_ips, ip_iter):
- cmd = cmd_template.format(pipeline, gw, ip)
- pipeline += worker_threads
- pipeline += offset
- self.vnf_execute(cmd)
-
- time.sleep(WAIT_FOR_STATIC_NAPT)
+ # ip_iter = self.setup_helper._generate_ip_from_pool("152.16.40.10")
+ # gw_ips = self.setup_helper._get_cgnapt_config()
+ # if self.scenario_helper.vnf_cfg.get("lb_config", "SW") == 'HW':
+ # pipeline = self.setup_helper.HW_DEFAULT_CORE
+ # offset = 3
+ # else:
+ # pipeline = self.setup_helper.SW_DEFAULT_CORE - 1
+ # offset = 0
+ #
+ # worker_threads = int(self.scenario_helper.vnf_cfg["worker_threads"])
+ # # p <pipeline id> entry addm <prv_ipv4/6> prvport> <pub_ip> <pub_port> <phy_port> <ttl>
+ # # <no_of_entries> <end_prv_port> <end_pub_port>
+ # cmd_template = "p {0} entry addm {1} 1 {2} 1 0 32 65535 65535 65535"
+ # for gw, ip in zip(gw_ips, ip_iter):
+ # cmd = cmd_template.format(pipeline, gw, ip)
+ # pipeline += worker_threads
+ # pipeline += offset
+ # self.vnf_execute(cmd)
+ #
+ # time.sleep(WAIT_FOR_STATIC_NAPT)
diff --git a/yardstick/network_services/vnf_generic/vnf/prox_helpers.py b/yardstick/network_services/vnf_generic/vnf/prox_helpers.py
index d6ec271c9..00ab6c24c 100644
--- a/yardstick/network_services/vnf_generic/vnf/prox_helpers.py
+++ b/yardstick/network_services/vnf_generic/vnf/prox_helpers.py
@@ -639,9 +639,10 @@ class ProxDpdkVnfSetupEnvHelper(DpdkVnfSetupEnvHelper):
prox_config.parse()
# Ensure MAC is set "hardware"
- ext_intf = self.vnfd_helper.interfaces
- # we are using enumeration to map logical port numbers to interfaces
- for port_num, intf in enumerate(ext_intf):
+ all_ports = self.vnfd_helper.port_pairs.all_ports
+ # use dpdk port number
+ for port_name in all_ports:
+ port_num = self.vnfd_helper.port_num(port_name)
port_section_name = "port {}".format(port_num)
for section_name, section in sections:
if port_section_name != section_name:
@@ -659,13 +660,15 @@ class ProxDpdkVnfSetupEnvHelper(DpdkVnfSetupEnvHelper):
if item_val.startswith("@@dst_mac"):
tx_port_iter = re.finditer(r'\d+', item_val)
tx_port_no = int(next(tx_port_iter).group(0))
- mac = ext_intf[tx_port_no]["virtual-interface"]["dst_mac"]
+ intf = self.vnfd_helper.find_interface_by_port(tx_port_no)
+ mac = intf["virtual-interface"]["dst_mac"]
section_data[1] = mac.replace(":", " ", 6)
if item_key == "dst mac" and item_val.startswith("@@"):
tx_port_iter = re.finditer(r'\d+', item_val)
tx_port_no = int(next(tx_port_iter).group(0))
- mac = ext_intf[tx_port_no]["virtual-interface"]["dst_mac"]
+ intf = self.vnfd_helper.find_interface_by_port(tx_port_no)
+ mac = intf["virtual-interface"]["dst_mac"]
section_data[1] = mac
# if addition file specified in prox config
@@ -714,13 +717,15 @@ class ProxDpdkVnfSetupEnvHelper(DpdkVnfSetupEnvHelper):
def generate_prox_lua_file(self):
p = OrderedDict()
- ext_intf = self.vnfd_helper.interfaces
+ all_ports = self.vnfd_helper.port_pairs.all_ports
lua_param = self.LUA_PARAMETER_NAME
- for intf in ext_intf:
+ for port_name in all_ports:
peer = self.LUA_PARAMETER_PEER[lua_param]
- port_num = intf["virtual-interface"]["dpdk_port_num"]
- local_ip = intf["local_ip"]
- dst_ip = intf["dst_ip"]
+ port_num = self.vnfd_helper.port_num(port_name)
+ intf = self.vnfd_helper.find_interface(name=port_name)
+ vintf = intf['virtual-interface']
+ local_ip = vintf["local_ip"]
+ dst_ip = vintf["dst_ip"]
local_ip_hex = ip_to_hex(local_ip, separator=' ')
dst_ip_hex = ip_to_hex(dst_ip, separator=' ')
p.update([
@@ -880,7 +885,7 @@ class ProxResourceHelper(ClientResourceHelper):
self._run_traffic_once(traffic_profile)
def _run_traffic_once(self, traffic_profile):
- traffic_profile.execute(self)
+ traffic_profile.execute_traffic(self)
if traffic_profile.done:
self._queue.put({'done': True})
LOG.debug("tg_prox done")
@@ -922,12 +927,11 @@ class ProxResourceHelper(ClientResourceHelper):
self.sut.stop_all()
def run_test(self, pkt_size, duration, value, tolerated_loss=0.0):
- # type: (object, object, object, object) -> object
# do this assert in init? unless we expect interface count to
# change from one run to another run...
- interfaces = self.vnfd_helper.interfaces
- interface_count = len(interfaces)
- assert interface_count in {1, 2, 4}, \
+ ports = self.vnfd_helper.port_pairs.all_ports
+ port_count = len(ports)
+ assert port_count in {1, 2, 4}, \
"Invalid number of ports: 1, 2 or 4 ports only supported at this time"
with self.traffic_context(pkt_size, value):
@@ -942,15 +946,18 @@ class ProxResourceHelper(ClientResourceHelper):
latency = self.get_latency()
deltas = data['delta']
- rx_total, tx_total = self.sut.port_stats(range(interface_count))[6:8]
- pps = value / 100.0 * self.line_rate_to_pps(pkt_size, interface_count)
+ rx_total, tx_total = self.sut.port_stats(range(port_count))[6:8]
+ pps = value / 100.0 * self.line_rate_to_pps(pkt_size, port_count)
samples = {}
# we are currently using enumeration to map logical port num to interface
- for index, iface in enumerate(interfaces):
- port_rx_total, port_tx_total = self.sut.port_stats([index])[6:8]
- samples[iface["name"]] = {"in_packets": port_rx_total,
- "out_packets": port_tx_total}
+ for port_name in ports:
+ port = self.vnfd_helper.port_num(port_name)
+ port_rx_total, port_tx_total = self.sut.port_stats([port])[6:8]
+ samples[port_name] = {
+ "in_packets": port_rx_total,
+ "out_packets": port_tx_total,
+ }
result = ProxTestDataTuple(tolerated_loss, tsc_hz, deltas.rx, deltas.tx,
deltas.tsc, latency, rx_total, tx_total, pps)
diff --git a/yardstick/network_services/vnf_generic/vnf/prox_vnf.py b/yardstick/network_services/vnf_generic/vnf/prox_vnf.py
index cb09b43f6..bef7c5a33 100644
--- a/yardstick/network_services/vnf_generic/vnf/prox_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/prox_vnf.py
@@ -51,9 +51,7 @@ class ProxApproxVnf(SampleVNF):
try:
return self.resource_helper.execute(cmd, *args, **kwargs)
except OSError as e:
- if ignore_errors and e.errno in {errno.EPIPE, errno.ESHUTDOWN}:
- pass
- else:
+ if not ignore_errors or e.errno not in {errno.EPIPE, errno.ESHUTDOWN}:
raise
def collect_kpi(self):
@@ -66,11 +64,12 @@ class ProxApproxVnf(SampleVNF):
}
return result
- if len(self.vnfd_helper.interfaces) not in {1, 2, 4}:
+ intf_count = len(self.vnfd_helper.interfaces)
+ if intf_count not in {1, 2, 4}:
raise RuntimeError("Failed ..Invalid no of ports .. "
"1, 2 or 4 ports only supported at this time")
- port_stats = self.vnf_execute('port_stats', range(len(self.vnfd_helper.interfaces)))
+ port_stats = self.vnf_execute('port_stats', range(intf_count))
try:
rx_total = port_stats[6]
tx_total = port_stats[7]
@@ -90,7 +89,7 @@ class ProxApproxVnf(SampleVNF):
def _tear_down(self):
# this should be standardized for all VNFs or removed
- self.setup_helper.rebind_drivers()
+ self.setup_helper.tear_down()
def terminate(self):
# try to quit with socket commands
diff --git a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
index 1b2533aad..96e703060 100644
--- a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
@@ -30,7 +30,9 @@ from six.moves import cStringIO
from yardstick.benchmark.contexts.base import Context
from yardstick.benchmark.scenarios.networking.vnf_generic import find_relative_file
from yardstick.network_services.helpers.cpu import CpuSysCores
+from yardstick.network_services.helpers.samplevnf_helper import PortPairs
from yardstick.network_services.helpers.samplevnf_helper import MultiPortConfig
+from yardstick.network_services.helpers.dpdknicbind_helper import DpdkBindHelper
from yardstick.network_services.nfvi.resource import ResourceProfile
from yardstick.network_services.vnf_generic.vnf.base import GenericVNF
from yardstick.network_services.vnf_generic.vnf.base import QueueFileWrapper
@@ -126,15 +128,11 @@ class SetupEnvHelper(object):
class DpdkVnfSetupEnvHelper(SetupEnvHelper):
APP_NAME = 'DpdkVnf'
- DPDK_BIND_CMD = "sudo {dpdk_nic_bind} {force} -b {driver} {vpci}"
- DPDK_UNBIND_CMD = "sudo {dpdk_nic_bind} --force -b {driver} {vpci}"
FIND_NET_CMD = "find /sys/class/net -lname '*{}*' -printf '%f'"
HW_DEFAULT_CORE = 3
SW_DEFAULT_CORE = 2
- DPDK_STATUS_DRIVER_RE = re.compile(r"(\d{2}:\d{2}\.\d).*drv=([-\w]+)")
-
@staticmethod
def _update_packet_type(ip_pipeline_cfg, traffic_options):
match_str = 'pkt_type = ipv4'
@@ -165,15 +163,9 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper):
super(DpdkVnfSetupEnvHelper, self).__init__(vnfd_helper, ssh_helper, scenario_helper)
self.all_ports = None
self.bound_pci = None
- self._dpdk_nic_bind = None
self.socket = None
self.used_drivers = None
-
- @property
- def dpdk_nic_bind(self):
- if self._dpdk_nic_bind is None:
- self._dpdk_nic_bind = self.ssh_helper.provision_tool(tool_file="dpdk-devbind.py")
- return self._dpdk_nic_bind
+ self.dpdk_bind_helper = DpdkBindHelper(ssh_helper)
def _setup_hugepages(self):
cmd = "awk '/Hugepagesize/ { print $2$3 }' < /proc/meminfo"
@@ -190,10 +182,6 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper):
self.ssh_helper.execute("echo %s | sudo tee %s" % (pages, memory_path))
- def _get_dpdk_port_num(self, name):
- interface = self.vnfd_helper.find_interface(name=name)
- return interface['virtual-interface']['dpdk_port_num']
-
def build_config(self):
vnf_cfg = self.scenario_helper.vnf_cfg
task_path = self.scenario_helper.task_path
@@ -216,7 +204,7 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper):
multiport = MultiPortConfig(self.scenario_helper.topology,
config_tpl_cfg,
config_basename,
- self.vnfd_helper.interfaces,
+ self.vnfd_helper,
self.VNF_TYPE,
lb_count,
worker_threads,
@@ -234,7 +222,6 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper):
self.ssh_helper.upload_config_file(config_basename, new_config)
self.ssh_helper.upload_config_file(script_basename,
multiport.generate_script(self.vnfd_helper))
- self.all_ports = multiport.port_pair_list
LOG.info("Provision and start the %s", self.APP_NAME)
self._build_pipeline_kwargs()
@@ -242,11 +229,19 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper):
def _build_pipeline_kwargs(self):
tool_path = self.ssh_helper.provision_tool(tool_file=self.APP_NAME)
- ports_len_hex = hex(2 ** (len(self.all_ports) + 1) - 1)
+ # count the number of actual ports in the list of pairs
+ # remove duplicate ports
+ # this is really a mapping from LINK ID to DPDK PMD ID
+ # e.g. 0x110 maps LINK0 -> PMD_ID_1, LINK1 -> PMD_ID_2
+ # 0x1010 maps LINK0 -> PMD_ID_1, LINK1 -> PMD_ID_3
+ ports = self.vnfd_helper.port_pairs.all_ports
+ port_nums = self.vnfd_helper.port_nums(ports)
+ # create mask from all the dpdk port numbers
+ ports_mask_hex = hex(sum(2 ** num for num in port_nums))
self.pipeline_kwargs = {
'cfg_file': self.CFG_CONFIG,
'script': self.CFG_SCRIPT,
- 'ports_len_hex': ports_len_hex,
+ 'port_mask_hex': ports_mask_hex,
'tool_path': tool_path,
}
@@ -285,17 +280,6 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper):
def _validate_cpu_cfg(self):
return self._get_cpu_sibling_list()
- def _find_used_drivers(self):
- cmd = "{0} -s".format(self.dpdk_nic_bind)
- rc, dpdk_status, _ = self.ssh_helper.execute(cmd)
-
- self.used_drivers = {
- vpci: (index, driver)
- for index, (vpci, driver)
- in enumerate(self.DPDK_STATUS_DRIVER_RE.findall(dpdk_status))
- if any(b.endswith(vpci) for b in self.bound_pci)
- }
-
def setup_vnf_environment(self):
self._setup_dpdk()
resource = self._setup_resources()
@@ -341,65 +325,31 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper):
def _detect_and_bind_drivers(self):
interfaces = self.vnfd_helper.interfaces
- self._find_used_drivers()
- for vpci, (index, _) in self.used_drivers.items():
- try:
- intf1 = next(v for v in interfaces if vpci == v['virtual-interface']['vpci'])
- except StopIteration:
- pass
- else:
- intf1['dpdk_port_num'] = index
-
- for vpci in self.bound_pci:
- self._bind_dpdk('igb_uio', vpci)
- time.sleep(2)
-
- # debug dump after binding
- self.ssh_helper.execute("sudo {} -s".format(self.dpdk_nic_bind))
+ self.dpdk_bind_helper.read_status()
+ self.dpdk_bind_helper.save_used_drivers()
- def rebind_drivers(self, force=True):
- if not self.used_drivers:
- self._find_used_drivers()
- for vpci, (_, driver) in self.used_drivers.items():
- self._bind_dpdk(driver, vpci, force)
+ self.dpdk_bind_helper.bind(self.bound_pci, 'igb_uio')
- def _bind_dpdk(self, driver, vpci, force=True):
- if force:
- force = '--force '
- else:
- force = ''
- cmd = self.DPDK_BIND_CMD.format(force=force,
- dpdk_nic_bind=self.dpdk_nic_bind,
- driver=driver,
- vpci=vpci)
- self.ssh_helper.execute(cmd)
+ sorted_dpdk_pci_addresses = sorted(self.dpdk_bind_helper.dpdk_bound_pci_addresses)
+ for dpdk_port_num, vpci in enumerate(sorted_dpdk_pci_addresses):
+ try:
+ intf = next(v for v in interfaces
+ if vpci == v['virtual-interface']['vpci'])
+ # force to int
+ intf['virtual-interface']['dpdk_port_num'] = int(dpdk_port_num)
+ except:
+ pass
+ time.sleep(2)
- def _detect_and_bind_dpdk(self, vpci, driver):
+ def get_local_iface_name_by_vpci(self, vpci):
find_net_cmd = self.FIND_NET_CMD.format(vpci)
- exit_status, _, _ = self.ssh_helper.execute(find_net_cmd)
- if exit_status == 0:
- # already bound
- return None
- self._bind_dpdk(driver, vpci)
exit_status, stdout, _ = self.ssh_helper.execute(find_net_cmd)
- if exit_status != 0:
- # failed to bind
- return None
- return stdout
-
- def _bind_kernel_devices(self):
- # only used by PingSetupEnvHelper?
- for intf in self.vnfd_helper.interfaces:
- vi = intf["virtual-interface"]
- stdout = self._detect_and_bind_dpdk(vi["vpci"], vi["driver"])
- if stdout is not None:
- vi["local_iface_name"] = posixpath.basename(stdout)
+ if exit_status == 0:
+ return stdout
+ return None
def tear_down(self):
- for vpci, (_, driver) in self.used_drivers.items():
- self.ssh_helper.execute(self.DPDK_UNBIND_CMD.format(dpdk_nic_bind=self.dpdk_nic_bind,
- driver=driver,
- vpci=vpci))
+ self.dpdk_bind_helper.rebind_drivers()
class ResourceHelper(object):
@@ -458,14 +408,17 @@ class ClientResourceHelper(ResourceHelper):
self.client = None
self.client_started = Value('i', 0)
- self.my_ports = None
+ self.all_ports = None
self._queue = Queue()
self._result = {}
self._terminated = Value('i', 0)
self._vpci_ascending = None
def _build_ports(self):
- self.my_ports = [0, 1]
+ self.networks = self.vnfd_helper.port_pairs.networks
+ self.priv_ports = self.vnfd_helper.port_nums(self.vnfd_helper.port_pairs.priv_ports)
+ self.pub_ports = self.vnfd_helper.port_nums(self.vnfd_helper.port_pairs.pub_ports)
+ self.all_ports = self.vnfd_helper.port_nums(self.vnfd_helper.port_pairs.all_ports)
def get_stats(self, *args, **kwargs):
try:
@@ -474,8 +427,9 @@ class ClientResourceHelper(ResourceHelper):
LOG.exception("TRex client not connected")
return {}
- def generate_samples(self, key=None, default=None):
- last_result = self.get_stats(self.my_ports)
+ def generate_samples(self, ports, key=None, default=None):
+ # needs to be used ports
+ last_result = self.get_stats(ports)
key_value = last_result.get(key, default)
if not isinstance(last_result, Mapping): # added for mock unit test
@@ -483,27 +437,29 @@ class ClientResourceHelper(ResourceHelper):
return {}
samples = {}
- for vpci_idx, vpci in enumerate(self._vpci_ascending):
- name = self.vnfd_helper.find_virtual_interface(vpci=vpci)["name"]
- # fixme: VNFDs KPIs values needs to be mapped to TRex structure
- xe_value = last_result.get(vpci_idx, {})
- samples[name] = {
- "rx_throughput_fps": float(xe_value.get("rx_pps", 0.0)),
- "tx_throughput_fps": float(xe_value.get("tx_pps", 0.0)),
- "rx_throughput_mbps": float(xe_value.get("rx_bps", 0.0)),
- "tx_throughput_mbps": float(xe_value.get("tx_bps", 0.0)),
- "in_packets": int(xe_value.get("ipackets", 0)),
- "out_packets": int(xe_value.get("opackets", 0)),
- }
- if key:
- samples[name][key] = key_value
+ # recalculate port for interface and see if it matches ports provided
+ for intf in self.vnfd_helper.interfaces:
+ name = intf["name"]
+ port = self.vnfd_helper.port_num(name)
+ if port in ports:
+ xe_value = last_result.get(port, {})
+ samples[name] = {
+ "rx_throughput_fps": float(xe_value.get("rx_pps", 0.0)),
+ "tx_throughput_fps": float(xe_value.get("tx_pps", 0.0)),
+ "rx_throughput_mbps": float(xe_value.get("rx_bps", 0.0)),
+ "tx_throughput_mbps": float(xe_value.get("tx_bps", 0.0)),
+ "in_packets": int(xe_value.get("ipackets", 0)),
+ "out_packets": int(xe_value.get("opackets", 0)),
+ }
+ if key:
+ samples[name][key] = key_value
return samples
def _run_traffic_once(self, traffic_profile):
- traffic_profile.execute(self)
+ traffic_profile.execute_traffic(self)
self.client_started.value = 1
time.sleep(self.RUN_DURATION)
- samples = self.generate_samples()
+ samples = self.generate_samples(traffic_profile.ports)
time.sleep(self.QUEUE_WAIT_TIME)
self._queue.put(samples)
@@ -513,14 +469,14 @@ class ClientResourceHelper(ResourceHelper):
try:
self._build_ports()
self.client = self._connect()
- self.client.reset(ports=self.my_ports)
- self.client.remove_all_streams(self.my_ports) # remove all streams
+ self.client.reset(ports=self.all_ports)
+ self.client.remove_all_streams(self.all_ports) # remove all streams
traffic_profile.register_generator(self)
while self._terminated.value == 0:
self._run_traffic_once(traffic_profile)
- self.client.stop(self.my_ports)
+ self.client.stop(self.all_ports)
self.client.disconnect()
self._terminated.value = 0
except STLError:
@@ -534,12 +490,12 @@ class ClientResourceHelper(ResourceHelper):
def clear_stats(self, ports=None):
if ports is None:
- ports = self.my_ports
+ ports = self.all_ports
self.client.clear_stats(ports=ports)
def start(self, ports=None, *args, **kwargs):
if ports is None:
- ports = self.my_ports
+ ports = self.all_ports
self.client.start(ports=ports, *args, **kwargs)
def collect_kpi(self):
@@ -730,7 +686,6 @@ class SampleVNF(GenericVNF):
self.resource_helper = resource_helper_type(self.setup_helper)
- self.all_ports = None
self.context_cfg = None
self.nfvi_context = None
self.pipeline_kwargs = {}
@@ -742,11 +697,17 @@ class SampleVNF(GenericVNF):
self.q_out = Queue()
self.queue_wrapper = None
self.run_kwargs = {}
- self.tg_port_pairs = None
self.used_drivers = {}
self.vnf_port_pairs = None
self._vnf_process = None
+ def _build_ports(self):
+ self._port_pairs = PortPairs(self.vnfd_helper.interfaces)
+ self.networks = self._port_pairs.networks
+ self.priv_ports = self.vnfd_helper.port_nums(self._port_pairs.priv_ports)
+ self.pub_ports = self.vnfd_helper.port_nums(self._port_pairs.pub_ports)
+ self.my_ports = self.vnfd_helper.port_nums(self._port_pairs.all_ports)
+
def _get_route_data(self, route_index, route_type):
route_iter = iter(self.vnfd_helper.vdu0.get('nd_route_tbl', []))
for _ in range(route_index):
@@ -825,6 +786,8 @@ class SampleVNF(GenericVNF):
LOG.info("Waiting for %s VNF to start.. ", self.APP_NAME)
time.sleep(1)
+ # put newline to force new prompt?
+ self.q_in.put("\r\n")
def _build_run_kwargs(self):
self.run_kwargs = {
@@ -925,7 +888,6 @@ class SampleVNFTrafficGen(GenericTrafficGen):
self.runs_traffic = True
self.traffic_finished = False
- self.tg_port_pairs = None
self._tg_process = None
self._traffic_process = None
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_ping.py b/yardstick/network_services/vnf_generic/vnf/tg_ping.py
index e65296287..9cd9f2574 100644
--- a/yardstick/network_services/vnf_generic/vnf/tg_ping.py
+++ b/yardstick/network_services/vnf_generic/vnf/tg_ping.py
@@ -23,6 +23,7 @@ from ipaddress import IPv4Interface
from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNFTrafficGen
from yardstick.network_services.vnf_generic.vnf.sample_vnf import DpdkVnfSetupEnvHelper
+from yardstick.network_services.vnf_generic.vnf.sample_vnf import ClientResourceHelper
LOG = logging.getLogger(__name__)
@@ -59,7 +60,38 @@ class PingParser(object):
class PingSetupEnvHelper(DpdkVnfSetupEnvHelper):
def setup_vnf_environment(self):
- self._bind_kernel_devices()
+ for intf in self.vnfd_helper.interfaces:
+ vi = intf['virtual-interface']
+ vi['local_iface_name'] = self.get_local_iface_name_by_vpci(vi['vpci'])
+
+
+class PingResourceHelper(ClientResourceHelper):
+
+ def __init__(self, setup_helper):
+ super(PingResourceHelper, self).__init__(setup_helper)
+ self._queue = Queue()
+ self._parser = PingParser(self._queue)
+
+ def run_traffic(self, traffic_profile):
+ # drop the connection in order to force a new one
+ self.ssh_helper.drop_connection()
+
+ self.client_started.value = 1
+ cmd_list = [
+ "sudo ip addr flush {local_if_name}",
+ "sudo ip addr add {local_ip}/24 dev {local_if_name}",
+ "sudo ip link set {local_if_name} up",
+ ]
+
+ self.cmd_kwargs['packet_size'] = traffic_profile.params['traffic_profile']['frame_size']
+
+ for cmd in cmd_list:
+ self.ssh_helper.execute(cmd.format(**self.cmd_kwargs))
+
+ ping_cmd = "nohup ping -s {packet_size} {target_ip}&"
+ self.ssh_helper.run(ping_cmd.format(**self.cmd_kwargs),
+ stdout=self._parser,
+ keep_stdin_open=True, pty=True)
class PingTrafficGen(SampleVNFTrafficGen):
@@ -69,16 +101,17 @@ class PingTrafficGen(SampleVNFTrafficGen):
"""
TG_NAME = 'Ping'
+ APP_NAME = 'Ping'
RUN_WAIT = 4
def __init__(self, name, vnfd, setup_env_helper_type=None, resource_helper_type=None):
if setup_env_helper_type is None:
setup_env_helper_type = PingSetupEnvHelper
+ if resource_helper_type is None:
+ resource_helper_type = PingResourceHelper
super(PingTrafficGen, self).__init__(name, vnfd, setup_env_helper_type,
resource_helper_type)
- self._queue = Queue()
- self._parser = PingParser(self._queue)
self._result = {}
def scale(self, flavor=""):
@@ -89,12 +122,23 @@ class PingTrafficGen(SampleVNFTrafficGen):
return self._tg_process.is_alive()
def instantiate(self, scenario_cfg, context_cfg):
+ self._start_server()
self._result = {
"packets_received": 0,
"rtt": 0,
}
+ intf = self.vnfd_helper.interfaces[0]["virtual-interface"]
+ self.resource_helper.cmd_kwargs = {
+ 'target_ip': IPv4Interface(intf["dst_ip"]).ip.exploded,
+ 'local_ip': IPv4Interface(intf["local_ip"]).ip.exploded,
+ 'local_if_name': intf["local_iface_name"].split('/')[0],
+ }
+
self.setup_helper.setup_vnf_environment()
+ def wait_for_instantiate(self):
+ pass
+
def listen_traffic(self, traffic_profile):
""" Not needed for ping
@@ -102,27 +146,3 @@ class PingTrafficGen(SampleVNFTrafficGen):
:return:
"""
pass
-
- def _traffic_runner(self, traffic_profile):
- intf = self.vnfd_helper.interfaces[0]["virtual-interface"]
- profile = traffic_profile.params["traffic_profile"]
- cmd_kwargs = {
- 'target_ip': IPv4Interface(intf["dst_ip"]).ip.exploded,
- 'local_ip': IPv4Interface(intf["local_ip"]).ip.exploded,
- 'local_if_name': intf["local_iface_name"].split('/')[0],
- 'packet_size': profile["frame_size"],
- }
-
- cmd_list = [
- "sudo ip addr flush {local_if_name}",
- "sudo ip addr add {local_ip}/24 dev {local_if_name}",
- "sudo ip link set {local_if_name} up",
- ]
-
- for cmd in cmd_list:
- self.ssh_helper.execute(cmd.format(**cmd_kwargs))
-
- ping_cmd = "ping -s {packet_size} {target_ip}"
- self.ssh_helper.run(ping_cmd.format(**cmd_kwargs),
- stdout=self._parser,
- keep_stdin_open=True, pty=True)
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_prox.py b/yardstick/network_services/vnf_generic/vnf/tg_prox.py
index c266f2c0f..40eda753f 100644
--- a/yardstick/network_services/vnf_generic/vnf/tg_prox.py
+++ b/yardstick/network_services/vnf_generic/vnf/tg_prox.py
@@ -56,7 +56,6 @@ class ProxTrafficGen(SampleVNFTrafficGen):
self.runs_traffic = True
self.traffic_finished = False
- self.tg_port_pairs = None
self._tg_process = None
self._traffic_process = None
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py
index a52416dd9..93e496969 100644
--- a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py
+++ b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py
@@ -58,19 +58,12 @@ class IxiaResourceHelper(ClientResourceHelper):
rfc_helper_type = IxiaRfc2544Helper
self.rfc_helper = rfc_helper_type(self.scenario_helper)
- self.tg_port_pairs = []
self.priv_ports = None
self.pub_ports = None
def _connect(self, client=None):
self.client._connect(self.vnfd_helper)
- def _build_ports(self):
- # self.generate_port_pairs(self.topology)
- self.priv_ports = [int(x[0][2:]) for x in self.tg_port_pairs]
- self.pub_ports = [int(x[1][2:]) for x in self.tg_port_pairs]
- self.my_ports = list(set(self.priv_ports).union(set(self.pub_ports)))
-
def get_stats(self, *args, **kwargs):
return self.client.ix_get_statistics()
@@ -79,33 +72,37 @@ class IxiaResourceHelper(ClientResourceHelper):
if self.client and self.client.ixnet:
self.client.ix_stop_traffic()
- def generate_samples(self, key=None, default=None):
+ def generate_samples(self, ports, key=None, default=None):
stats = self.get_stats()
last_result = stats[1]
latency = stats[0]
samples = {}
- for vpci_idx, interface in enumerate(self.vnfd_helper.interfaces):
+ for interface in self.vnfd_helper.interfaces:
try:
- name = "xe{0}".format(vpci_idx)
- samples[name] = {
- "rx_throughput_kps": float(last_result["Rx_Rate_Kbps"][vpci_idx]),
- "tx_throughput_kps": float(last_result["Tx_Rate_Kbps"][vpci_idx]),
- "rx_throughput_mbps": float(last_result["Rx_Rate_Mbps"][vpci_idx]),
- "tx_throughput_mbps": float(last_result["Tx_Rate_Mbps"][vpci_idx]),
- "in_packets": int(last_result["Valid_Frames_Rx"][vpci_idx]),
- "out_packets": int(last_result["Frames_Tx"][vpci_idx]),
- "RxThroughput": int(last_result["Valid_Frames_Rx"][vpci_idx]) / 30,
- "TxThroughput": int(last_result["Frames_Tx"][vpci_idx]) / 30,
- }
- if key:
- avg_latency = latency["Store-Forward_Avg_latency_ns"][vpci_idx]
- min_latency = latency["Store-Forward_Min_latency_ns"][vpci_idx]
- max_latency = latency["Store-Forward_Max_latency_ns"][vpci_idx]
- samples[name][key] = \
- {"Store-Forward_Avg_latency_ns": avg_latency,
- "Store-Forward_Min_latency_ns": min_latency,
- "Store-Forward_Max_latency_ns": max_latency}
+ name = interface["name"]
+ # this is not DPDK port num, but this is whatever number we gave
+ # when we selected ports and programmed the profile
+ port = self.vnfd_helper.port_num(name)
+ if port in ports:
+ samples[name] = {
+ "rx_throughput_kps": float(last_result["Rx_Rate_Kbps"][port]),
+ "tx_throughput_kps": float(last_result["Tx_Rate_Kbps"][port]),
+ "rx_throughput_mbps": float(last_result["Rx_Rate_Mbps"][port]),
+ "tx_throughput_mbps": float(last_result["Tx_Rate_Mbps"][port]),
+ "in_packets": int(last_result["Valid_Frames_Rx"][port]),
+ "out_packets": int(last_result["Frames_Tx"][port]),
+ "RxThroughput": int(last_result["Valid_Frames_Rx"][port]) / 30,
+ "TxThroughput": int(last_result["Frames_Tx"][port]) / 30,
+ }
+ if key:
+ avg_latency = latency["Store-Forward_Avg_latency_ns"][port]
+ min_latency = latency["Store-Forward_Min_latency_ns"][port]
+ max_latency = latency["Store-Forward_Max_latency_ns"][port]
+ samples[name][key] = \
+ {"Store-Forward_Avg_latency_ns": avg_latency,
+ "Store-Forward_Min_latency_ns": min_latency,
+ "Store-Forward_Max_latency_ns": max_latency}
except IndexError:
pass
@@ -132,6 +129,7 @@ class IxiaResourceHelper(ClientResourceHelper):
self.client.ix_assign_ports()
mac = {}
+ # TODO: shouldn't this index map to port number we used to generate the profile
for index, interface in enumerate(self.vnfd_helper.interfaces, 1):
virt_intf = interface["virtual-interface"]
mac.update({
@@ -145,11 +143,11 @@ class IxiaResourceHelper(ClientResourceHelper):
self.scenario_helper.scenario_cfg["task_path"])
# Generate ixia traffic config...
while not self._terminated.value:
- traffic_profile.execute(self, self.client, mac, ixia_file)
+ traffic_profile.execute_traffic(self, self.client, mac, ixia_file)
self.client_started.value = 1
time.sleep(WAIT_FOR_TRAFFIC)
self.client.ix_stop_traffic()
- samples = self.generate_samples()
+ samples = self.generate_samples(traffic_profile.ports)
self._queue.put(samples)
status, samples = traffic_profile.get_drop_percentage(self, samples, min_tol,
max_tol, self.client, mac,
@@ -167,11 +165,11 @@ class IxiaResourceHelper(ClientResourceHelper):
self._terminated.value = 1
return
- traffic_profile.execute(self, self.client, mac, ixia_file)
+ traffic_profile.execute_traffic(self, self.client, mac, ixia_file)
for _ in range(5):
time.sleep(self.LATENCY_TIME_SLEEP)
self.client.ix_stop_traffic()
- samples = self.generate_samples('latency', {})
+ samples = self.generate_samples(traffic_profile.ports, 'latency', {})
self._queue.put(samples)
traffic_profile.start_ixia_latency(self, self.client, mac, ixia_file)
if self._terminated.value:
@@ -197,7 +195,6 @@ class IxiaTrafficGen(SampleVNFTrafficGen):
resource_helper_type)
self._ixia_traffic_gen = None
self.ixia_file_name = ''
- self.tg_port_pairs = []
self.vnf_port_pairs = []
def _check_status(self):
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py
index 15c9c0e1d..4e9f4bdc1 100644
--- a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py
+++ b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py
@@ -18,9 +18,7 @@ from __future__ import print_function
import time
import logging
from collections import Mapping
-from itertools import chain
-from yardstick.network_services.helpers.samplevnf_helper import MultiPortConfig
from yardstick.network_services.vnf_generic.vnf.tg_trex import TrexTrafficGen
from yardstick.network_services.vnf_generic.vnf.sample_vnf import Rfc2544ResourceHelper
from yardstick.network_services.vnf_generic.vnf.tg_trex import TrexResourceHelper
@@ -47,23 +45,15 @@ class TrexRfcResourceHelper(TrexResourceHelper):
rfc_helper_type = TrexRfc2544ResourceHelper
self.rfc2544_helper = rfc_helper_type(self.scenario_helper)
- # self.tg_port_pairs = []
-
- def _build_ports(self):
- self.tg_port_pairs, self.networks = MultiPortConfig.get_port_pairs(
- self.vnfd_helper.interfaces)
- self.priv_ports = [int(x[0][2:]) for x in self.tg_port_pairs]
- self.pub_ports = [int(x[1][2:]) for x in self.tg_port_pairs]
- self.my_ports = list(set(chain(self.priv_ports, self.pub_ports)))
def _run_traffic_once(self, traffic_profile):
if self._terminated.value:
return
- traffic_profile.execute(self)
+ traffic_profile.execute_traffic(self)
self.client_started.value = 1
time.sleep(self.RUN_DURATION)
- self.client.stop(self.my_ports)
+ self.client.stop(traffic_profile.ports)
time.sleep(self.WAIT_TIME)
samples = traffic_profile.get_drop_percentage(self)
self._queue.put(samples)
@@ -71,30 +61,30 @@ class TrexRfcResourceHelper(TrexResourceHelper):
if not self.rfc2544_helper.is_done():
return
- self.client.stop(self.my_ports)
- self.client.reset(ports=self.my_ports)
- self.client.remove_all_streams(self.my_ports)
- traffic_profile.execute_latency(samples=samples)
+ self.client.stop(traffic_profile.ports)
+ self.client.reset(ports=traffic_profile.ports)
+ self.client.remove_all_streams(traffic_profile.ports)
+ traffic_profile.execute_traffic_latency(samples=samples)
multiplier = traffic_profile.calculate_pps(samples)[1]
for _ in range(5):
time.sleep(self.LATENCY_TIME_SLEEP)
- self.client.stop(self.my_ports)
+ self.client.stop(traffic_profile.ports)
time.sleep(self.WAIT_TIME)
- last_res = self.client.get_stats(self.my_ports)
+ last_res = self.client.get_stats(traffic_profile.ports)
if not isinstance(last_res, Mapping):
self._terminated.value = 1
continue
- self.generate_samples('latency', {})
+ self.generate_samples(traffic_profile.ports, 'latency', {})
self._queue.put(samples)
self.client.start(mult=str(multiplier),
- ports=self.my_ports,
+ ports=traffic_profile.ports,
duration=120, force=True)
- def start_client(self, mult, duration, force=True):
- self.client.start(ports=self.my_ports, mult=mult, duration=duration, force=force)
+ def start_client(self, ports, mult=None, duration=None, force=True):
+ self.client.start(ports=ports, mult=mult, duration=duration, force=force)
- def clear_client_stats(self):
- self.client.clear_stats(ports=self.my_ports)
+ def clear_client_stats(self, ports):
+ self.client.clear_stats(ports=ports)
def collect_kpi(self):
self.rfc2544_helper.iteration.value += 1
diff --git a/yardstick/network_services/vnf_generic/vnf/udp_replay.py b/yardstick/network_services/vnf_generic/vnf/udp_replay.py
index a9bc204d5..88773387e 100644
--- a/yardstick/network_services/vnf_generic/vnf/udp_replay.py
+++ b/yardstick/network_services/vnf_generic/vnf/udp_replay.py
@@ -19,14 +19,22 @@ from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNF
from yardstick.network_services.vnf_generic.vnf.sample_vnf import DpdkVnfSetupEnvHelper
from yardstick.network_services.vnf_generic.vnf.sample_vnf import ClientResourceHelper
+
LOG = logging.getLogger(__name__)
# UDP_Replay should work the same on all systems, we can provide the binary
+
+# we can't match the prompt regexp due to extra noise
+# yardstick.ssh ssh.py:302 DEBUG stdout: UDP_Replay: lcore 0 has nothing to do
+# eplUDP_Replay: -- lcoreid=1 portid=0 rxqueueid=0
+# ay>
+#
+# try decreasing log level to RTE_LOG_NOTICE (5)
REPLAY_PIPELINE_COMMAND = (
- """sudo {tool_path} -c {cpu_mask_hex} -n 4 -w {whitelist} -- """
- """{hw_csum} -p {ports_len_hex} --config='{config}'"""
+ """sudo {tool_path} --log-level=5 -c {cpu_mask_hex} -n 4 -w {whitelist} -- """
+ """{hw_csum} -p {port_mask_hex} --config='{config}'"""
)
-# {tool_path} -p {ports_len_hex} -f {cfg_file} -s {script}'
+# {tool_path} -p {port_mask_hex} -f {cfg_file} -s {script}'
class UdpReplaySetupEnvHelper(DpdkVnfSetupEnvHelper):
@@ -42,7 +50,8 @@ class UdpReplayApproxVnf(SampleVNF):
APP_NAME = "UDP_Replay"
APP_WORD = "UDP_Replay"
- VNF_PROMPT = 'Replay>'
+ # buffering issue?
+ VNF_PROMPT = 'eplay>'
VNF_TYPE = 'UdpReplay'
@@ -60,36 +69,30 @@ class UdpReplayApproxVnf(SampleVNF):
super(UdpReplayApproxVnf, self).__init__(name, vnfd, setup_env_helper_type,
resource_helper_type)
- def _start_server(self):
- super(UdpReplayApproxVnf, self)._start_server()
- self.resource_helper.start()
-
- def scale(self, flavor=""):
- """ scale vnfbased on flavor input """
- raise NotImplementedError
-
- def _deploy(self):
- self.generate_port_pairs()
- super(UdpReplayApproxVnf, self)._deploy()
-
def _build_pipeline_kwargs(self):
- all_ports = [i for i, _ in enumerate(self.vnfd_helper.interfaces)]
- number_of_ports = len(all_ports)
+ ports = self.vnfd_helper.port_pairs.all_ports
+ number_of_ports = len(ports)
tool_path = self.ssh_helper.provision_tool(tool_file=self.APP_NAME)
- ports_mask = 2 ** number_of_ports - 1
- ports_mask_hex = hex(ports_mask)
+ port_nums = self.vnfd_helper.port_nums(ports)
+ ports_mask_hex = hex(sum(2 ** num for num in port_nums))
+ # one core extra for master
cpu_mask_hex = hex(2 ** (number_of_ports + 1) - 1)
hw_csum = ""
if (not self.scenario_helper.options.get('hw_csum', False) or
self.nfvi_context.attrs.get('nfvi_type') not in self.HW_OFFLOADING_NFVI_TYPES):
hw_csum = '--no-hw-csum'
- config_value = "".join(str((port, 0, port + 1)) for port in all_ports)
+ # tuples of (FLD_PORT, FLD_QUEUE, FLD_LCORE)
+ # [--config (port,queue,lcore)[,(port,queue,lcore]]"
+ # start with lcore = 1 since we use lcore=0 for master
+ config_value = ",".join(
+ str((self.vnfd_helper.port_num(port), 0, core)).replace(" ", "") for core, port in
+ enumerate(self.vnfd_helper.port_pairs.all_ports, 1))
whitelist = " -w ".join(self.setup_helper.bound_pci)
self.pipeline_kwargs = {
- 'ports_len_hex': ports_mask_hex,
+ 'port_mask_hex': ports_mask_hex,
'tool_path': tool_path,
'hw_csum': hw_csum,
'whitelist': whitelist,
@@ -105,7 +108,7 @@ class UdpReplayApproxVnf(SampleVNF):
def get_sum(offset):
return sum(int(i) for i in split_stats[offset::5])
- number_of_ports = len(self.vnfd_helper.interfaces)
+ number_of_ports = len(self.vnfd_helper.port_pairs.all_ports)
stats = self.get_stats()
stats_words = stats.split()
diff --git a/yardstick/network_services/vnf_generic/vnf/vfw_vnf.py b/yardstick/network_services/vnf_generic/vnf/vfw_vnf.py
index 32a08c7bd..6c95648ce 100644
--- a/yardstick/network_services/vnf_generic/vnf/vfw_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/vfw_vnf.py
@@ -22,7 +22,7 @@ from yardstick.network_services.yang_model import YangModel
LOG = logging.getLogger(__name__)
# vFW should work the same on all systems, we can provide the binary
-FW_PIPELINE_COMMAND = """sudo {tool_path} -p {ports_len_hex} -f {cfg_file} -s {script}"""
+FW_PIPELINE_COMMAND = """sudo {tool_path} -p {port_mask_hex} -f {cfg_file} -s {script}"""
FW_COLLECT_KPI = (r"""VFW TOTAL:[^p]+pkts_received"?:\s(\d+),[^p]+pkts_fw_forwarded"?:\s(\d+),"""
r"""[^p]+pkts_drop_fw"?:\s(\d+),\s""")
diff --git a/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py b/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py
index 310ab67cb..72c1514f1 100644
--- a/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py
@@ -15,6 +15,8 @@
from __future__ import absolute_import
from __future__ import print_function
+
+
import os
import logging
import re
@@ -22,17 +24,17 @@ import posixpath
from six.moves import configparser, zip
+from yardstick.network_services.helpers.samplevnf_helper import PortPairs
from yardstick.network_services.pipeline import PipelineRules
from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNF, DpdkVnfSetupEnvHelper
LOG = logging.getLogger(__name__)
-VPE_PIPELINE_COMMAND = """sudo {tool_path} -p {ports_len_hex} -f {cfg_file} -s {script}"""
+VPE_PIPELINE_COMMAND = """sudo {tool_path} -p {port_mask_hex} -f {cfg_file} -s {script}"""
VPE_COLLECT_KPI = """\
Pkts in:\s(\d+)\r\n\
-\tPkts dropped by Pkts in:\s(\d+)\r\n\
-\tPkts dropped by AH:\s(\d+)\r\n\\
+\tPkts dropped by AH:\s(\d+)\r\n\
\tPkts dropped by other:\s(\d+)\
"""
@@ -92,24 +94,23 @@ class ConfigCreate(object):
pktq = "SWQ{0}{1}".format(self.sw_q, sink)
return pktq
- def vpe_upstream(self, vnf_cfg, intf):
+ def vpe_upstream(self, vnf_cfg, index=0):
parser = configparser.ConfigParser()
parser.read(os.path.join(vnf_cfg, 'vpe_upstream'))
+
for pipeline in parser.sections():
for k, v in parser.items(pipeline):
if k == "pktq_in":
- index = intf['index']
if "RXQ" in v:
- value = "RXQ{0}.0".format(index)
+ value = "RXQ{0}.0".format(self.priv_ports[index])
else:
value = self.get_sink_swq(parser, pipeline, k, index)
parser.set(pipeline, k, value)
elif k == "pktq_out":
- index = intf['peer_intf']['index']
if "TXQ" in v:
- value = "TXQ{0}.0".format(index)
+ value = "TXQ{0}.0".format(self.pub_ports[index])
else:
self.sw_q += 1
value = self.get_sink_swq(parser, pipeline, k, index)
@@ -123,21 +124,19 @@ class ConfigCreate(object):
self.n_pipeline += 1
return parser
- def vpe_downstream(self, vnf_cfg, intf):
+ def vpe_downstream(self, vnf_cfg, index):
parser = configparser.ConfigParser()
parser.read(os.path.join(vnf_cfg, 'vpe_downstream'))
for pipeline in parser.sections():
for k, v in parser.items(pipeline):
- index = intf['dpdk_port_num']
- peer_index = intf['peer_intf']['dpdk_port_num']
if k == "pktq_in":
if "RXQ" not in v:
value = self.get_sink_swq(parser, pipeline, k, index)
elif "TM" in v:
- value = "RXQ{0}.0 TM{1}".format(peer_index, index)
+ value = "RXQ{0}.0 TM{1}".format(self.pub_ports[index], index)
else:
- value = "RXQ{0}.0".format(peer_index)
+ value = "RXQ{0}.0".format(self.pub_ports[index])
parser.set(pipeline, k, value)
@@ -146,9 +145,9 @@ class ConfigCreate(object):
self.sw_q += 1
value = self.get_sink_swq(parser, pipeline, k, index)
elif "TM" in v:
- value = "TXQ{0}.0 TM{1}".format(peer_index, index)
+ value = "TXQ{0}.0 TM{1}".format(self.priv_ports[index], index)
else:
- value = "TXQ{0}.0".format(peer_index)
+ value = "TXQ{0}.0".format(self.priv_ports[index])
parser.set(pipeline, k, value)
@@ -166,10 +165,10 @@ class ConfigCreate(object):
config = self.vpe_initialize(config)
config = self.vpe_rxq(config)
config.write(cfg_file)
- for index, priv_port in enumerate(self.priv_ports):
- config = self.vpe_upstream(vnf_cfg, priv_port)
+ for index in range(0, len(self.priv_ports)):
+ config = self.vpe_upstream(vnf_cfg, index)
config.write(cfg_file)
- config = self.vpe_downstream(vnf_cfg, priv_port)
+ config = self.vpe_downstream(vnf_cfg, index)
config = self.vpe_tmq(config, index)
config.write(cfg_file)
@@ -199,36 +198,41 @@ class ConfigCreate(object):
return rules.get_string()
+ def generate_tm_cfg(self, vnf_cfg, index=0):
+ vnf_cfg = os.path.join(vnf_cfg, "full_tm_profile_10G.cfg")
+ if os.path.exists(vnf_cfg):
+ return open(vnf_cfg).read()
+
class VpeApproxSetupEnvHelper(DpdkVnfSetupEnvHelper):
+ APP_NAME = 'vPE_vnf'
CFG_CONFIG = "/tmp/vpe_config"
CFG_SCRIPT = "/tmp/vpe_script"
+ TM_CONFIG = "/tmp/full_tm_profile_10G.cfg"
CORES = ['0', '1', '2', '3', '4', '5']
PIPELINE_COMMAND = VPE_PIPELINE_COMMAND
+ def _build_vnf_ports(self):
+ self._port_pairs = PortPairs(self.vnfd_helper.interfaces)
+ self.priv_ports = self._port_pairs.priv_ports
+ self.pub_ports = self._port_pairs.pub_ports
+ self.all_ports = self._port_pairs.all_ports
+
def build_config(self):
vpe_vars = {
"bin_path": self.ssh_helper.bin_path,
"socket": self.socket,
}
- all_ports = []
- priv_ports = []
- pub_ports = []
- for interface in self.vnfd_helper.interfaces:
- all_ports.append(interface['name'])
- vld_id = interface['virtual-interface']['vld_id']
- if vld_id.startswith('private'):
- priv_ports.append(interface)
- elif vld_id.startswith('public'):
- pub_ports.append(interface)
-
- vpe_conf = ConfigCreate(priv_ports, pub_ports, self.socket)
+ self._build_vnf_ports()
+ vpe_conf = ConfigCreate(self.vnfd_helper.port_pairs.priv_ports,
+ self.vnfd_helper.port_pairs.pub_ports, self.socket)
vpe_conf.create_vpe_config(self.scenario_helper.vnf_cfg)
config_basename = posixpath.basename(self.CFG_CONFIG)
script_basename = posixpath.basename(self.CFG_SCRIPT)
+ tm_basename = posixpath.basename(self.TM_CONFIG)
with open(self.CFG_CONFIG) as handle:
vpe_config = handle.read()
@@ -237,6 +241,15 @@ class VpeApproxSetupEnvHelper(DpdkVnfSetupEnvHelper):
vpe_script = vpe_conf.generate_vpe_script(self.vnfd_helper.interfaces)
self.ssh_helper.upload_config_file(script_basename, vpe_script.format(**vpe_vars))
+ tm_config = vpe_conf.generate_tm_cfg(self.scenario_helper.vnf_cfg)
+ self.ssh_helper.upload_config_file(tm_basename, tm_config)
+
+ LOG.info("Provision and start the %s", self.APP_NAME)
+ LOG.info(self.CFG_CONFIG)
+ LOG.info(self.CFG_SCRIPT)
+ self._build_pipeline_kwargs()
+ return self.PIPELINE_COMMAND.format(**self.pipeline_kwargs)
+
class VpeApproxVnf(SampleVNF):
""" This class handles vPE VNF model-driver definitions """