aboutsummaryrefslogtreecommitdiffstats
path: root/yardstick/network_services
diff options
context:
space:
mode:
Diffstat (limited to 'yardstick/network_services')
-rw-r--r--yardstick/network_services/collector/subscriber.py49
-rw-r--r--yardstick/network_services/constants.py20
-rw-r--r--yardstick/network_services/helpers/cpu.py103
-rw-r--r--yardstick/network_services/helpers/dpdkbindnic_helper.py332
-rw-r--r--yardstick/network_services/helpers/samplevnf_helper.py121
-rw-r--r--yardstick/network_services/helpers/vpp_helpers/__init__.py (renamed from yardstick/network_services/libs/ixia_libs/IxNet/__init__.py)0
-rw-r--r--yardstick/network_services/helpers/vpp_helpers/abstract_search_algorithm.py53
-rw-r--r--yardstick/network_services/helpers/vpp_helpers/multiple_loss_ratio_search.py688
-rw-r--r--yardstick/network_services/helpers/vpp_helpers/ndr_pdr_result.py68
-rw-r--r--yardstick/network_services/helpers/vpp_helpers/receive_rate_interval.py88
-rw-r--r--yardstick/network_services/helpers/vpp_helpers/receive_rate_measurement.py58
-rw-r--r--yardstick/network_services/libs/ixia_libs/IxNet/IxNet.py344
-rw-r--r--yardstick/network_services/libs/ixia_libs/ixnet/__init__.py0
-rw-r--r--yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py1132
-rw-r--r--yardstick/network_services/nfvi/resource.py149
-rw-r--r--yardstick/network_services/pipeline.py13
-rw-r--r--yardstick/network_services/traffic_profile/__init__.py38
-rw-r--r--yardstick/network_services/traffic_profile/base.py70
-rw-r--r--yardstick/network_services/traffic_profile/http.py4
-rw-r--r--yardstick/network_services/traffic_profile/http_ixload.py171
-rw-r--r--yardstick/network_services/traffic_profile/ixia_rfc2544.py485
-rw-r--r--yardstick/network_services/traffic_profile/landslide_profile.py47
-rw-r--r--yardstick/network_services/traffic_profile/pktgen.py61
-rw-r--r--yardstick/network_services/traffic_profile/prox_binsearch.py135
-rw-r--r--yardstick/network_services/traffic_profile/prox_irq.py48
-rw-r--r--yardstick/network_services/traffic_profile/prox_profile.py25
-rw-r--r--yardstick/network_services/traffic_profile/rfc2544.py474
-rw-r--r--yardstick/network_services/traffic_profile/sip.py32
-rw-r--r--yardstick/network_services/traffic_profile/trex_traffic_profile.py (renamed from yardstick/network_services/traffic_profile/traffic_profile.py)234
-rw-r--r--yardstick/network_services/traffic_profile/vpp_rfc2544.py339
-rw-r--r--yardstick/network_services/utils.py4
-rw-r--r--yardstick/network_services/vnf_generic/vnf/acl_vnf.py216
-rw-r--r--yardstick/network_services/vnf_generic/vnf/agnostic_vnf.py46
-rw-r--r--yardstick/network_services/vnf_generic/vnf/base.py39
-rw-r--r--yardstick/network_services/vnf_generic/vnf/cgnapt_vnf.py10
-rw-r--r--yardstick/network_services/vnf_generic/vnf/epc_vnf.py53
-rw-r--r--yardstick/network_services/vnf_generic/vnf/ipsec_vnf.py498
-rw-r--r--yardstick/network_services/vnf_generic/vnf/prox_helpers.py517
-rw-r--r--yardstick/network_services/vnf_generic/vnf/prox_irq.py200
-rw-r--r--yardstick/network_services/vnf_generic/vnf/prox_vnf.py90
-rw-r--r--yardstick/network_services/vnf_generic/vnf/router_vnf.py6
-rw-r--r--yardstick/network_services/vnf_generic/vnf/sample_vnf.py536
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_imsbench_sipp.py143
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_ixload.py75
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_landslide.py1226
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_ping.py2
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_pktgen.py88
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_prox.py31
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py828
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py145
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_trex.py34
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_trex_vpp.py178
-rwxr-xr-xyardstick/network_services/vnf_generic/vnf/tg_vcmts_pktgen.py215
-rw-r--r--yardstick/network_services/vnf_generic/vnf/udp_replay.py17
-rwxr-xr-xyardstick/network_services/vnf_generic/vnf/vcmts_vnf.py273
-rw-r--r--yardstick/network_services/vnf_generic/vnf/vfw_vnf.py24
-rw-r--r--yardstick/network_services/vnf_generic/vnf/vims_vnf.py105
-rw-r--r--yardstick/network_services/vnf_generic/vnf/vnf_ssh_helper.py62
-rw-r--r--yardstick/network_services/vnf_generic/vnf/vpe_vnf.py195
-rw-r--r--yardstick/network_services/vnf_generic/vnf/vpp_helpers.py751
-rw-r--r--yardstick/network_services/yang_model.py108
61 files changed, 10239 insertions, 1857 deletions
diff --git a/yardstick/network_services/collector/subscriber.py b/yardstick/network_services/collector/subscriber.py
index 4dc5a796e..0c6d97771 100644
--- a/yardstick/network_services/collector/subscriber.py
+++ b/yardstick/network_services/collector/subscriber.py
@@ -11,63 +11,66 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-"""This module implements stub for publishing results in yardstick format."""
+
import logging
from yardstick.network_services.nfvi.resource import ResourceProfile
from yardstick.network_services.utils import get_nsb_option
+
LOG = logging.getLogger(__name__)
class Collector(object):
"""Class that handles dictionary of results in yardstick-plot format."""
- def __init__(self, vnfs, nodes, traffic_profile, timeout=3600):
+ def __init__(self, vnfs, contexts_nodes, timeout=3600):
super(Collector, self).__init__()
- self.traffic_profile = traffic_profile
self.vnfs = vnfs
- self.nodes = nodes
- self.timeout = timeout
+ self.nodes = contexts_nodes
self.bin_path = get_nsb_option('bin_path', '')
- self.resource_profiles = {node_name: ResourceProfile.make_from_node(node, self.timeout)
- for node_name, node in self.nodes.items()
- if node.get("collectd")}
+ self.resource_profiles = {}
+
+ for ctx_name, nodes in ((ctx_name, nodes) for (ctx_name, nodes)
+ in contexts_nodes.items() if nodes):
+ for node in (node for node in nodes
+ if node and node.get('collectd')):
+ name = ".".join([node['name'], ctx_name])
+ self.resource_profiles.update(
+ {name: ResourceProfile.make_from_node(node, timeout)})
def start(self):
- """Nothing to do, yet"""
for resource in self.resource_profiles.values():
resource.initiate_systemagent(self.bin_path)
resource.start()
resource.amqp_process_for_nfvi_kpi()
+ for vnf in self.vnfs:
+ vnf.start_collect()
+
def stop(self):
- """Nothing to do, yet"""
+ for vnf in self.vnfs:
+ vnf.stop_collect()
+
for resource in self.resource_profiles.values():
resource.stop()
def get_kpi(self):
"""Returns dictionary of results in yardstick-plot format
- :return:
+ :return: (dict) dictionary of kpis collected from the VNFs;
+ the keys are the names of the VNFs.
"""
results = {}
for vnf in self.vnfs:
# Result example:
# {"VNF1: { "tput" : [1000, 999] }, "VNF2": { "latency": 100 }}
- LOG.debug("collect KPI for %s", vnf.name)
+ LOG.debug("collect KPI for vnf %s", vnf.name)
results[vnf.name] = vnf.collect_kpi()
for node_name, resource in self.resource_profiles.items():
- # Result example:
- # {"VNF1: { "tput" : [1000, 999] }, "VNF2": { "latency": 100 }}
- LOG.debug("collect KPI for %s", node_name)
- if resource.check_if_sa_running("collectd")[0] != 0:
- continue
-
- try:
- results[node_name] = {"core": resource.amqp_collect_nfvi_kpi()}
- LOG.debug("%s collect KPIs %s", node_name, results[node_name]['core'])
- except Exception:
- LOG.exception("")
+ LOG.debug("collect KPI for nfvi_node %s", node_name)
+ results[node_name] = {"core": resource.amqp_collect_nfvi_kpi()}
+ LOG.debug("%s collect KPIs %s", node_name, results[node_name]['core'])
+
return results
diff --git a/yardstick/network_services/constants.py b/yardstick/network_services/constants.py
new file mode 100644
index 000000000..5a186be42
--- /dev/null
+++ b/yardstick/network_services/constants.py
@@ -0,0 +1,20 @@
+# Copyright (c) 2016-2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+REMOTE_TMP = "/tmp"
+DEFAULT_VNF_TIMEOUT = 3600
+PROCESS_JOIN_TIMEOUT = 3
+ONE_GIGABIT_IN_BITS = 1000000000
+NIC_GBPS_DEFAULT = 10
+RETRY_TIMEOUT = 5
diff --git a/yardstick/network_services/helpers/cpu.py b/yardstick/network_services/helpers/cpu.py
index 8c21754ff..279af204a 100644
--- a/yardstick/network_services/helpers/cpu.py
+++ b/yardstick/network_services/helpers/cpu.py
@@ -15,11 +15,15 @@
import io
+# Number of threads per core.
+NR_OF_THREADS = 2
+
class CpuSysCores(object):
def __init__(self, connection=""):
self.core_map = {}
+ self.cpuinfo = {}
self.connection = connection
def _open_cpuinfo(self):
@@ -33,11 +37,11 @@ class CpuSysCores(object):
core_lines = {}
for line in lines:
if line.strip():
- name, value = line.split(":", 1)
- core_lines[name.strip()] = value.strip()
+ name, value = line.split(":", 1)
+ core_lines[name.strip()] = value.strip()
else:
- core_details.append(core_lines)
- core_lines = {}
+ core_details.append(core_lines)
+ core_lines = {}
return core_details
@@ -51,7 +55,7 @@ class CpuSysCores(object):
lines = self._open_cpuinfo()
core_details = self._get_core_details(lines)
for core in core_details:
- for k, v in core.items():
+ for k, _ in core.items():
if k == "physical id":
if core["physical id"] not in self.core_map:
self.core_map[core['physical id']] = []
@@ -60,6 +64,17 @@ class CpuSysCores(object):
return self.core_map
+ def get_cpu_layout(self):
+ _, stdout, _ = self.connection.execute("lscpu -p")
+ self.cpuinfo = {}
+ self.cpuinfo['cpuinfo'] = list()
+ for line in stdout.split("\n"):
+ if line and line[0] != "#":
+ self.cpuinfo['cpuinfo'].append(
+ [CpuSysCores._str2int(x) for x in
+ line.split(",")])
+ return self.cpuinfo
+
def validate_cpu_cfg(self, vnf_cfg=None):
if vnf_cfg is None:
vnf_cfg = {
@@ -78,3 +93,81 @@ class CpuSysCores(object):
return -1
return 0
+
+ def is_smt_enabled(self):
+ return CpuSysCores.smt_enabled(self.cpuinfo)
+
+ def cpu_list_per_node(self, cpu_node, smt_used=False):
+ cpu_node = int(cpu_node)
+ cpu_info = self.cpuinfo.get("cpuinfo")
+ if cpu_info is None:
+ raise RuntimeError("Node cpuinfo not available.")
+
+ smt_enabled = self.is_smt_enabled()
+ if not smt_enabled and smt_used:
+ raise RuntimeError("SMT is not enabled.")
+
+ cpu_list = []
+ for cpu in cpu_info:
+ if cpu[3] == cpu_node:
+ cpu_list.append(cpu[0])
+
+ if not smt_enabled or smt_enabled and smt_used:
+ pass
+
+ if smt_enabled and not smt_used:
+ cpu_list_len = len(cpu_list)
+ cpu_list = cpu_list[:int(cpu_list_len / NR_OF_THREADS)]
+
+ return cpu_list
+
+ def cpu_slice_of_list_per_node(self, cpu_node, skip_cnt=0, cpu_cnt=0,
+ smt_used=False):
+ cpu_list = self.cpu_list_per_node(cpu_node, smt_used)
+
+ cpu_list_len = len(cpu_list)
+ if cpu_cnt + skip_cnt > cpu_list_len:
+ raise RuntimeError("cpu_cnt + skip_cnt > length(cpu list).")
+
+ if cpu_cnt == 0:
+ cpu_cnt = cpu_list_len - skip_cnt
+
+ if smt_used:
+ cpu_list_0 = cpu_list[:int(cpu_list_len / NR_OF_THREADS)]
+ cpu_list_1 = cpu_list[int(cpu_list_len / NR_OF_THREADS):]
+ cpu_list = [cpu for cpu in cpu_list_0[skip_cnt:skip_cnt + cpu_cnt]]
+ cpu_list_ex = [cpu for cpu in
+ cpu_list_1[skip_cnt:skip_cnt + cpu_cnt]]
+ cpu_list.extend(cpu_list_ex)
+ else:
+ cpu_list = [cpu for cpu in cpu_list[skip_cnt:skip_cnt + cpu_cnt]]
+
+ return cpu_list
+
+ def cpu_list_per_node_str(self, cpu_node, skip_cnt=0, cpu_cnt=0, sep=",",
+ smt_used=False):
+ cpu_list = self.cpu_slice_of_list_per_node(cpu_node,
+ skip_cnt=skip_cnt,
+ cpu_cnt=cpu_cnt,
+ smt_used=smt_used)
+ return sep.join(str(cpu) for cpu in cpu_list)
+
+ @staticmethod
+ def _str2int(string):
+ try:
+ return int(string)
+ except ValueError:
+ return 0
+
+ @staticmethod
+ def smt_enabled(cpuinfo):
+ cpu_info = cpuinfo.get("cpuinfo")
+ if cpu_info is None:
+ raise RuntimeError("Node cpuinfo not available.")
+ cpu_mems = [item[-4:] for item in cpu_info]
+ cpu_mems_len = int(len(cpu_mems) / NR_OF_THREADS)
+ count = 0
+ for cpu_mem in cpu_mems[:cpu_mems_len]:
+ if cpu_mem in cpu_mems[cpu_mems_len:]:
+ count += 1
+ return count == cpu_mems_len
diff --git a/yardstick/network_services/helpers/dpdkbindnic_helper.py b/yardstick/network_services/helpers/dpdkbindnic_helper.py
index c07613147..33a5e8c1d 100644
--- a/yardstick/network_services/helpers/dpdkbindnic_helper.py
+++ b/yardstick/network_services/helpers/dpdkbindnic_helper.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2016-2017 Intel Corporation
+# Copyright (c) 2016-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,11 +12,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
-
+import os
import re
-import itertools
+from collections import defaultdict
+from itertools import chain
-import six
+from yardstick.common import exceptions
+from yardstick.common.utils import validate_non_string_sequence
NETWORK_KERNEL = 'network_kernel'
NETWORK_DPDK = 'network_dpdk'
@@ -25,7 +27,6 @@ CRYPTO_KERNEL = 'crypto_kernel'
CRYPTO_DPDK = 'crypto_dpdk'
CRYPTO_OTHER = 'crypto_other'
-
LOG = logging.getLogger(__name__)
@@ -33,15 +34,180 @@ class DpdkBindHelperException(Exception):
pass
+class DpdkInterface(object):
+ TOPOLOGY_REQUIRED_KEYS = frozenset({
+ "vpci", "local_ip", "netmask", "local_mac", "driver"})
+
+ def __init__(self, dpdk_node, interface):
+ super(DpdkInterface, self).__init__()
+ self.dpdk_node = dpdk_node
+ self.interface = interface
+
+ try:
+ assert self.local_mac
+ except (AssertionError, KeyError):
+ raise exceptions.IncorrectConfig(error_msg='')
+
+ @property
+ def local_mac(self):
+ return self.interface['local_mac']
+
+ @property
+ def mac_lower(self):
+ return self.local_mac.lower()
+
+ @property
+ def missing_fields(self):
+ return self.TOPOLOGY_REQUIRED_KEYS.difference(self.interface)
+
+ @staticmethod
+ def _detect_socket(netdev):
+ try:
+ socket = netdev['numa_node']
+ except KeyError:
+ # Where is this documented?
+ # It seems for dual-sockets systems the second socket PCI bridge
+ # will have an address > 0x0f, e.g.
+ # Bridge PCI->PCI (P#524320 busid=0000:80:02.0 id=8086:6f04
+ if netdev['pci_bus_id'][5] == "0":
+ socket = 0
+ else:
+ # this doesn't handle quad-sockets
+ # TODO: fix this for quad-socket
+ socket = 1
+ return socket
+
+ def probe_missing_values(self):
+ try:
+ for netdev in self.dpdk_node.netdevs.values():
+ if netdev['address'].lower() == self.mac_lower:
+ socket = self._detect_socket(netdev)
+ self.interface.update({
+ 'vpci': netdev['pci_bus_id'],
+ 'driver': netdev['driver'],
+ 'socket': socket,
+ # don't need ifindex
+ })
+
+ except KeyError:
+ # if we don't find all the keys then don't update
+ pass
+
+ except (exceptions.IncorrectNodeSetup, exceptions.SSHError,
+ exceptions.SSHTimeout):
+ message = ('Unable to probe missing interface fields "%s", on '
+ 'node %s SSH Error' % (', '.join(self.missing_fields),
+ self.dpdk_node.node_key))
+ raise exceptions.IncorrectConfig(error_msg=message)
+
+
+class DpdkNode(object):
+
+ def __init__(self, node_name, interfaces, ssh_helper, timeout=120):
+ super(DpdkNode, self).__init__()
+ self.interfaces = interfaces
+ self.ssh_helper = ssh_helper
+ self.node_key = node_name
+ self.timeout = timeout
+ self._dpdk_helper = None
+ self.netdevs = {}
+
+ try:
+ self.dpdk_interfaces = {intf['name']: DpdkInterface(self, intf['virtual-interface'])
+ for intf in self.interfaces}
+ except exceptions.IncorrectConfig:
+ template = "MAC address is required for all interfaces, missing on: {}"
+ errors = (intf['name'] for intf in self.interfaces if
+ 'local_mac' not in intf['virtual-interface'])
+ raise exceptions.IncorrectSetup(
+ error_msg=template.format(", ".join(errors)))
+
+ @property
+ def dpdk_helper(self):
+ if not isinstance(self._dpdk_helper, DpdkBindHelper):
+ self._dpdk_helper = DpdkBindHelper(self.ssh_helper)
+ return self._dpdk_helper
+
+ @property
+ def _interface_missing_iter(self):
+ return chain.from_iterable(self._interface_missing_map.values())
+
+ @property
+ def _interface_missing_map(self):
+ return {name: intf.missing_fields for name, intf in self.dpdk_interfaces.items()}
+
+ def _probe_netdevs(self):
+ self.netdevs.update(self.dpdk_helper.find_net_devices())
+
+ def _force_rebind(self):
+ return self.dpdk_helper.force_dpdk_rebind()
+
+ def _probe_dpdk_drivers(self):
+ self.dpdk_helper.probe_real_kernel_drivers()
+ for pci, driver in self.dpdk_helper.real_kernel_interface_driver_map.items():
+ for intf in self.interfaces:
+ vintf = intf['virtual-interface']
+ # stupid substring matches
+ # don't use netdev use interface
+ if vintf['vpci'].endswith(pci):
+ vintf['driver'] = driver
+ # we can't update netdevs because we may not have netdev info
+
+ def _probe_missing_values(self):
+ for intf in self.dpdk_interfaces.values():
+ intf.probe_missing_values()
+
+ def check(self):
+ # only ssh probe if there are missing values
+ # ssh probe won't work on Ixia, so we had better define all our values
+ try:
+ missing_fields_set = set(self._interface_missing_iter)
+
+ # if we are only missing driver then maybe we can get kernel module
+ # this requires vpci
+ if missing_fields_set == {'driver'}:
+ self._probe_dpdk_drivers()
+ # we can't reprobe missing values because we may not have netdev info
+
+ # if there are any other missing then we have to netdev probe
+ if missing_fields_set.difference({'driver'}):
+ self._probe_netdevs()
+ try:
+ self._probe_missing_values()
+ except exceptions.IncorrectConfig:
+ # ignore for now
+ pass
+
+ # check again and verify we have all the fields
+ if set(self._interface_missing_iter):
+ # last chance fallback, rebind everything and probe
+ # this probably won't work
+ self._force_rebind()
+ self._probe_netdevs()
+ self._probe_missing_values()
+
+ errors = ("{} missing: {}".format(name, ", ".join(missing_fields)) for
+ name, missing_fields in self._interface_missing_map.items() if
+ missing_fields)
+ errors = "\n".join(errors)
+ if errors:
+ raise exceptions.IncorrectSetup(error_msg=errors)
+
+ finally:
+ self._dpdk_helper = None
+
+
class DpdkBindHelper(object):
- DPDK_STATUS_CMD = "{dpdk_nic_bind} --status"
- DPDK_BIND_CMD = "sudo {dpdk_nic_bind} {force} -b {driver} {vpci}"
+ DPDK_STATUS_CMD = "{dpdk_devbind} --status"
+ DPDK_BIND_CMD = "sudo {dpdk_devbind} {force} -b {driver} {vpci}"
- NIC_ROW_RE = re.compile("([^ ]+) '([^']+)' (?:if=([^ ]+) )?drv=([^ ]+) "
- "unused=([^ ]*)(?: (\*Active\*))?")
+ NIC_ROW_RE = re.compile(r"([^ ]+) '([^']+)' (?:if=([^ ]+) )?drv=([^ ]+) "
+ r"unused=([^ ]*)(?: (\*Active\*))?")
SKIP_RE = re.compile('(====|<none>|^$)')
NIC_ROW_FIELDS = ['vpci', 'dev_type', 'iface', 'driver', 'unused', 'active']
+ UIO_DRIVER = "uio"
+
HEADER_DICT_PAIRS = [
(re.compile('^Network.*DPDK.*$'), NETWORK_DPDK),
(re.compile('^Network.*kernel.*$'), NETWORK_KERNEL),
@@ -51,6 +217,42 @@ class DpdkBindHelper(object):
(re.compile('^Other crypto.*$'), CRYPTO_OTHER),
]
+ FIND_NETDEVICE_STRING = r"""\
+find /sys/devices/pci* -type d -name net -exec sh -c '{ grep -sH ^ \
+$1/ifindex $1/address $1/operstate $1/device/vendor $1/device/device \
+$1/device/subsystem_vendor $1/device/subsystem_device $1/device/numa_node ; \
+printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
+' sh \{\}/* \;
+"""
+
+ BASE_ADAPTER_RE = re.compile('^/sys/devices/(.*)/net/([^/]*)/([^:]*):(.*)$', re.M)
+ DPDK_DEVBIND = "dpdk-devbind.py"
+
+ @classmethod
+ def parse_netdev_info(cls, stdout):
+ network_devices = defaultdict(dict)
+ match_iter = (match.groups() for match in cls.BASE_ADAPTER_RE.finditer(stdout))
+ for bus_path, interface_name, name, value in match_iter:
+ dir_name, bus_id = os.path.split(bus_path)
+ if 'virtio' in bus_id:
+ # for some stupid reason VMs include virtio1/
+ # in PCI device path
+ bus_id = os.path.basename(dir_name)
+
+ # remove extra 'device/' from 'device/vendor,
+ # device/subsystem_vendor', etc.
+ if 'device' in name:
+ name = name.split('/')[1]
+
+ network_devices[interface_name].update({
+ name: value,
+ 'interface_name': interface_name,
+ 'pci_bus_id': bus_id,
+ })
+
+ # convert back to regular dict
+ return dict(network_devices)
+
def clean_status(self):
self.dpdk_status = {
NETWORK_KERNEL: [],
@@ -61,11 +263,17 @@ class DpdkBindHelper(object):
CRYPTO_OTHER: [],
}
- def __init__(self, ssh_helper):
+ # TODO: add support for driver other than igb_uio
+ def __init__(self, ssh_helper, dpdk_driver="igb_uio"):
+ self.ssh_helper = ssh_helper
+ self.real_kernel_interface_driver_map = {}
+ self.dpdk_driver = dpdk_driver
self.dpdk_status = None
self.status_nic_row_re = None
- self._dpdk_nic_bind_attr = None
+ self.dpdk_devbind = self.ssh_helper.join_bin_path(self.DPDK_DEVBIND)
self._status_cmd_attr = None
+ self.used_drivers = None
+ self.real_kernel_drivers = {}
self.ssh_helper = ssh_helper
self.clean_status()
@@ -73,28 +281,38 @@ class DpdkBindHelper(object):
def _dpdk_execute(self, *args, **kwargs):
res = self.ssh_helper.execute(*args, **kwargs)
if res[0] != 0:
- raise DpdkBindHelperException('{} command failed with rc={}'.format(
- self._dpdk_nic_bind, res[0]))
+ template = '{} command failed with rc={}'
+ LOG.critical("DPDK_DEVBIND Failure %s", res[1])
+ raise DpdkBindHelperException(template.format(self.dpdk_devbind, res[0]))
return res
- @property
- def _dpdk_nic_bind(self):
- if self._dpdk_nic_bind_attr is None:
- self._dpdk_nic_bind_attr = self.ssh_helper.provision_tool(tool_file="dpdk-devbind.py")
- return self._dpdk_nic_bind_attr
+ def load_dpdk_driver(self, dpdk_driver=None):
+ if dpdk_driver is None:
+ dpdk_driver = self.dpdk_driver
+ cmd_template = "sudo modprobe {} && sudo modprobe {}"
+ self.ssh_helper.execute(
+ cmd_template.format(self.UIO_DRIVER, dpdk_driver))
+
+ def check_dpdk_driver(self, dpdk_driver=None):
+ if dpdk_driver is None:
+ dpdk_driver = self.dpdk_driver
+ return \
+ self.ssh_helper.execute("lsmod | grep -i {}".format(dpdk_driver))[0]
@property
def _status_cmd(self):
if self._status_cmd_attr is None:
- self._status_cmd_attr = self.DPDK_STATUS_CMD.format(dpdk_nic_bind=self._dpdk_nic_bind)
+ self._status_cmd_attr = self.DPDK_STATUS_CMD.format(dpdk_devbind=self.dpdk_devbind)
return self._status_cmd_attr
- def _addline(self, active_list, line):
+ def _add_line(self, active_list, line):
if active_list is None:
return
+
res = self.NIC_ROW_RE.match(line)
if res is None:
return
+
new_data = {k: v for k, v in zip(self.NIC_ROW_FIELDS, res.groups())}
new_data['active'] = bool(new_data['active'])
self.dpdk_status[active_list].append(new_data)
@@ -106,14 +324,14 @@ class DpdkBindHelper(object):
return a_dict
return active_dict
- def parse_dpdk_status_output(self, input):
+ def _parse_dpdk_status_output(self, output):
active_dict = None
self.clean_status()
- for a_row in input.splitlines():
+ for a_row in output.splitlines():
if self.SKIP_RE.match(a_row):
continue
active_dict = self._switch_active_dict(a_row, active_dict)
- self._addline(active_dict, a_row)
+ self._add_line(active_dict, a_row)
return self.dpdk_status
def _get_bound_pci_addresses(self, active_dict):
@@ -130,31 +348,85 @@ class DpdkBindHelper(object):
@property
def interface_driver_map(self):
return {interface['vpci']: interface['driver']
- for interface in itertools.chain.from_iterable(self.dpdk_status.values())}
+ for interface in chain.from_iterable(self.dpdk_status.values())}
def read_status(self):
- return self.parse_dpdk_status_output(self._dpdk_execute(self._status_cmd)[1])
+ return self._parse_dpdk_status_output(self._dpdk_execute(self._status_cmd)[1])
+
+ def find_net_devices(self):
+ exit_status, stdout, _ = self.ssh_helper.execute(self.FIND_NETDEVICE_STRING)
+ if exit_status != 0:
+ return {}
+
+ return self.parse_netdev_info(stdout)
def bind(self, pci_addresses, driver, force=True):
- # accept single PCI or list of PCI
- if isinstance(pci_addresses, six.string_types):
- pci_addresses = [pci_addresses]
- cmd = self.DPDK_BIND_CMD.format(dpdk_nic_bind=self._dpdk_nic_bind,
+ # accept single PCI or sequence of PCI
+ pci_addresses = validate_non_string_sequence(pci_addresses, [pci_addresses])
+
+ cmd = self.DPDK_BIND_CMD.format(dpdk_devbind=self.dpdk_devbind,
driver=driver,
vpci=' '.join(list(pci_addresses)),
force='--force' if force else '')
LOG.debug(cmd)
self._dpdk_execute(cmd)
+
# update the inner status dict
self.read_status()
+ def probe_real_kernel_drivers(self):
+ self.read_status()
+ self.save_real_kernel_interface_driver_map()
+
+ def force_dpdk_rebind(self):
+ self.load_dpdk_driver()
+ self.read_status()
+ self.save_real_kernel_interface_driver_map()
+ self.save_used_drivers()
+
+ real_driver_map = {}
+ # only rebind devices that are bound to DPDK
+ for pci in self.dpdk_bound_pci_addresses:
+ # messy
+ real_driver = self.real_kernel_interface_driver_map[pci]
+ real_driver_map.setdefault(real_driver, []).append(pci)
+ for real_driver, pcis in real_driver_map.items():
+ self.bind(pcis, real_driver, force=True)
+
def save_used_drivers(self):
# invert the map, so we can bind by driver type
self.used_drivers = {}
- # sort for stabililty
+ # sort for stability
for vpci, driver in sorted(self.interface_driver_map.items()):
self.used_drivers.setdefault(driver, []).append(vpci)
+ KERNEL_DRIVER_RE = re.compile(r"Kernel modules: (\S+)", re.M)
+ VIRTIO_DRIVER_RE = re.compile(r"Ethernet.*Virtio network device", re.M)
+ VIRTIO_DRIVER = "virtio-pci"
+
+ def save_real_kernel_drivers(self):
+ # invert the map, so we can bind by driver type
+ self.real_kernel_drivers = {}
+ # sort for stability
+ for vpci, driver in sorted(self.real_kernel_interface_driver_map.items()):
+ self.used_drivers.setdefault(driver, []).append(vpci)
+
+ def get_real_kernel_driver(self, pci):
+ out = self.ssh_helper.execute('lspci -k -s %s' % pci)[1]
+ match = self.KERNEL_DRIVER_RE.search(out)
+ if match:
+ return match.group(1)
+
+ match = self.VIRTIO_DRIVER_RE.search(out)
+ if match:
+ return self.VIRTIO_DRIVER
+
+ return None
+
+ def save_real_kernel_interface_driver_map(self):
+ iter1 = ((pci, self.get_real_kernel_driver(pci)) for pci in self.interface_driver_map)
+ self.real_kernel_interface_driver_map = {pci: driver for pci, driver in iter1 if driver}
+
def rebind_drivers(self, force=True):
for driver, vpcis in self.used_drivers.items():
self.bind(vpcis, driver, force)
diff --git a/yardstick/network_services/helpers/samplevnf_helper.py b/yardstick/network_services/helpers/samplevnf_helper.py
index 0ab10d7b7..8e6a3a3ea 100644
--- a/yardstick/network_services/helpers/samplevnf_helper.py
+++ b/yardstick/network_services/helpers/samplevnf_helper.py
@@ -23,8 +23,7 @@ from itertools import chain, repeat
import six
from six.moves.configparser import ConfigParser
-
-from yardstick.common.utils import ip_to_hex
+from yardstick.common import utils
LOG = logging.getLogger(__name__)
@@ -34,19 +33,6 @@ link {0} config {1} {2}
link {0} up
"""
-ACTION_TEMPLATE = """\
-p action add {0} accept
-p action add {0} fwd {0}
-p action add {0} count
-"""
-
-FW_ACTION_TEMPLATE = """\
-p action add {0} accept
-p action add {0} fwd {0}
-p action add {0} count
-p action add {0} conntrack
-"""
-
# This sets up a basic passthrough with no rules
SCRIPT_TPL = """
{link_config}
@@ -59,9 +45,7 @@ SCRIPT_TPL = """
{arp_route_tbl6}
-{actions}
-
-{rules}
+{flows}
"""
@@ -182,26 +166,9 @@ class MultiPortConfig(object):
return parser.get(section, key)
return default
- @staticmethod
- def make_ip_addr(ip, mask):
- """
- :param ip: ip adddress
- :type ip: str
- :param mask: /24 prefix of 255.255.255.0 netmask
- :type mask: str
- :return: interface
- :rtype: IPv4Interface
- """
-
- try:
- return ipaddress.ip_interface(six.text_type('/'.join([ip, mask])))
- except (TypeError, ValueError):
- # None so we can skip later
- return None
-
@classmethod
def validate_ip_and_prefixlen(cls, ip_addr, prefixlen):
- ip_addr = cls.make_ip_addr(ip_addr, prefixlen)
+ ip_addr = utils.make_ip_addr(ip_addr, prefixlen)
return ip_addr.ip.exploded, ip_addr.network.prefixlen
def __init__(self, topology_file, config_tpl, tmp_file, vnfd_helper,
@@ -245,7 +212,7 @@ class MultiPortConfig(object):
self.ports_len = 0
self.prv_que_handler = None
self.vnfd = None
- self.rules = None
+ self.flows = None
self.pktq_out = []
@staticmethod
@@ -360,7 +327,7 @@ class MultiPortConfig(object):
"%s/%s" % (interface["dst_ip"], interface["netmask"])))
arp_vars = {
- "port_netmask_hex": ip_to_hex(dst_port_ip.network.netmask.exploded),
+ "port_netmask_hex": utils.ip_to_hex(dst_port_ip.network.netmask.exploded),
# this is the port num that contains port0 subnet and next_hop_ip_hex
# this is LINKID which should be based on DPDK port number
"port_num": dpdk_port_num,
@@ -542,7 +509,7 @@ class MultiPortConfig(object):
self.update_write_parser(self.loadb_tpl)
self.start_core += 1
- for i in range(self.worker_threads):
+ for _ in range(self.worker_threads):
vnf_data = self.generate_vnf_data()
if not self.vnf_tpl:
self.vnf_tpl = {}
@@ -637,65 +604,8 @@ class MultiPortConfig(object):
return '\n'.join(('p {3} arpadd {0} {1} {2}'.format(*values) for values in arp_config6))
- def generate_action_config(self):
- port_list = (self.vnfd_helper.port_num(p) for p in self.all_ports)
- if self.vnf_type == "VFW":
- template = FW_ACTION_TEMPLATE
- else:
- template = ACTION_TEMPLATE
-
- return ''.join((template.format(port) for port in port_list))
-
- def get_ip_from_port(self, port):
- # we can't use gateway because in OpenStack gateways interfer with floating ip routing
- # return self.make_ip_addr(self.get_ports_gateway(port), self.get_netmask_gateway(port))
- vintf = self.vnfd_helper.find_interface(name=port)["virtual-interface"]
- ip = vintf["local_ip"]
- netmask = vintf["netmask"]
- return self.make_ip_addr(ip, netmask)
-
- def get_network_and_prefixlen_from_ip_of_port(self, port):
- ip_addr = self.get_ip_from_port(port)
- # handle cases with no gateway
- if ip_addr:
- return ip_addr.network.network_address.exploded, ip_addr.network.prefixlen
- else:
- return None, None
-
- def generate_rule_config(self):
- cmd = 'acl' if self.vnf_type == "ACL" else "vfw"
- rules_config = self.rules if self.rules else ''
- new_rules = []
- new_ipv6_rules = []
- pattern = 'p {0} add {1} {2} {3} {4} {5} 0 65535 0 65535 0 0 {6}'
- for src_intf, dst_intf in self.port_pair_list:
- src_port = self.vnfd_helper.port_num(src_intf)
- dst_port = self.vnfd_helper.port_num(dst_intf)
-
- src_net, src_prefix_len = self.get_network_and_prefixlen_from_ip_of_port(src_intf)
- dst_net, dst_prefix_len = self.get_network_and_prefixlen_from_ip_of_port(dst_intf)
- # ignore entires with empty values
- if all((src_net, src_prefix_len, dst_net, dst_prefix_len)):
- new_rules.append((cmd, self.txrx_pipeline, src_net, src_prefix_len,
- dst_net, dst_prefix_len, dst_port))
- new_rules.append((cmd, self.txrx_pipeline, dst_net, dst_prefix_len,
- src_net, src_prefix_len, src_port))
-
- # src_net = self.get_ports_gateway6(port_pair[0])
- # src_prefix_len = self.get_netmask_gateway6(port_pair[0])
- # dst_net = self.get_ports_gateway6(port_pair[1])
- # dst_prefix_len = self.get_netmask_gateway6(port_pair[0])
- # # ignore entires with empty values
- # if all((src_net, src_prefix_len, dst_net, dst_prefix_len)):
- # new_ipv6_rules.append((cmd, self.txrx_pipeline, src_net, src_prefix_len,
- # dst_net, dst_prefix_len, dst_port))
- # new_ipv6_rules.append((cmd, self.txrx_pipeline, dst_net, dst_prefix_len,
- # src_net, src_prefix_len, src_port))
-
- acl_apply = "\np %s applyruleset" % cmd
- new_rules_config = '\n'.join(pattern.format(*values) for values
- in chain(new_rules, new_ipv6_rules))
- return ''.join([rules_config, new_rules_config, acl_apply])
+ def get_flows_config(self):
+ return self.flows if self.flows else ''
def generate_script_data(self):
self._port_pairs = PortPairs(self.vnfd_helper.interfaces)
@@ -707,24 +617,15 @@ class MultiPortConfig(object):
# disable IPv6 for now
# 'arp_config6': self.generate_arp_config6(),
'arp_config6': "",
- 'arp_config': self.generate_arp_config(),
'arp_route_tbl': self.generate_arp_route_tbl(),
'arp_route_tbl6': "",
- 'actions': '',
- 'rules': '',
+ 'flows': self.get_flows_config()
}
-
- if self.vnf_type in ('ACL', 'VFW'):
- script_data.update({
- 'actions': self.generate_action_config(),
- 'rules': self.generate_rule_config(),
- })
-
return script_data
- def generate_script(self, vnfd, rules=None):
+ def generate_script(self, vnfd, flows=None):
self.vnfd = vnfd
- self.rules = rules
+ self.flows = flows
script_data = self.generate_script_data()
script = SCRIPT_TPL.format(**script_data)
if self.lb_config == self.HW_LB:
diff --git a/yardstick/network_services/libs/ixia_libs/IxNet/__init__.py b/yardstick/network_services/helpers/vpp_helpers/__init__.py
index e69de29bb..e69de29bb 100644
--- a/yardstick/network_services/libs/ixia_libs/IxNet/__init__.py
+++ b/yardstick/network_services/helpers/vpp_helpers/__init__.py
diff --git a/yardstick/network_services/helpers/vpp_helpers/abstract_search_algorithm.py b/yardstick/network_services/helpers/vpp_helpers/abstract_search_algorithm.py
new file mode 100644
index 000000000..fced05833
--- /dev/null
+++ b/yardstick/network_services/helpers/vpp_helpers/abstract_search_algorithm.py
@@ -0,0 +1,53 @@
+# Copyright (c) 2019 Viosoft Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This is a modified copy of
+# https://gerrit.fd.io/r/gitweb?p=csit.git;a=blob_plain;f=resources/libraries/python/MLRsearch/AbstractSearchAlgorithm.py;hb=HEAD
+
+
+from abc import ABCMeta, abstractmethod
+
+
+class AbstractSearchAlgorithm(object):
+ """Abstract class defining common API for search algorithms."""
+
+ __metaclass__ = ABCMeta
+
+ def __init__(self, measurer):
+ """Store the rate provider.
+
+ :param measurer: Object able to perform trial or composite measurements.
+ :type measurer: AbstractMeasurer.AbstractMeasurer
+ """
+ # TODO: Type check for AbstractMeasurer?
+ self.measurer = measurer
+
+ @abstractmethod
+ def narrow_down_ndr_and_pdr(
+ self, fail_rate, line_rate, packet_loss_ratio):
+ """Perform measurements to narrow down intervals, return them.
+
+ This will be renamed when custom loss ratio lists are supported.
+
+ :param fail_rate: Minimal target transmit rate [pps].
+ :param line_rate: Maximal target transmit rate [pps].
+ :param packet_loss_ratio: Fraction of packets lost, for PDR [1].
+ :type fail_rate: float
+ :type line_rate: float
+ :type packet_loss_ratio: float
+ :returns: Structure containing narrowed down intervals
+ and their measurements.
+ :rtype: NdrPdrResult.NdrPdrResult
+ """
+ # TODO: Do we agree on arguments related to precision or trial duration?
diff --git a/yardstick/network_services/helpers/vpp_helpers/multiple_loss_ratio_search.py b/yardstick/network_services/helpers/vpp_helpers/multiple_loss_ratio_search.py
new file mode 100644
index 000000000..582e3dc27
--- /dev/null
+++ b/yardstick/network_services/helpers/vpp_helpers/multiple_loss_ratio_search.py
@@ -0,0 +1,688 @@
+# Copyright (c) 2019 Viosoft Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This is a modified copy of
+# https://gerrit.fd.io/r/gitweb?p=csit.git;a=blob_plain;f=resources/libraries/python/MLRsearch/MultipleLossRatioSearch.py;hb=HEAD
+
+import datetime
+import logging
+import math
+import time
+
+from yardstick.network_services.helpers.vpp_helpers.abstract_search_algorithm import \
+ AbstractSearchAlgorithm
+from yardstick.network_services.helpers.vpp_helpers.ndr_pdr_result import \
+ NdrPdrResult
+from yardstick.network_services.helpers.vpp_helpers.receive_rate_interval import \
+ ReceiveRateInterval
+from yardstick.network_services.helpers.vpp_helpers.receive_rate_measurement import \
+ ReceiveRateMeasurement
+
+LOGGING = logging.getLogger(__name__)
+
+
+class MultipleLossRatioSearch(AbstractSearchAlgorithm):
+ """Optimized binary search algorithm for finding NDR and PDR bounds.
+
+ Traditional binary search algorithm needs initial interval
+ (lower and upper bound), and returns final interval after bisecting
+ (until some exit condition is met).
+ The exit condition is usually related to the interval width,
+ (upper bound value minus lower bound value).
+
+ The optimized algorithm contains several improvements
+ aimed to reduce overall search time.
+
+ One improvement is searching for two intervals at once.
+ The intervals are for NDR (No Drop Rate) and PDR (Partial Drop Rate).
+
+ Next improvement is that the initial interval does not need to be valid.
+ Imagine initial interval (10, 11) where 11 is smaller
+ than the searched value.
+ The algorithm will try (11, 13) interval next, and if 13 is still smaller,
+ (13, 17) and so on, doubling width until the upper bound is valid.
+ The part when interval expands is called external search,
+ the part when interval is bisected is called internal search.
+
+ Next improvement is that trial measurements at small trial duration
+ can be used to find a reasonable interval for full trial duration search.
+ This results in more trials performed, but smaller overall duration
+ in general.
+
+ Next improvement is bisecting in logarithmic quantities,
+ so that exit criteria can be independent of measurement units.
+
+ Next improvement is basing the initial interval on receive rates.
+
+ Final improvement is exiting early if the minimal value
+ is not a valid lower bound.
+
+ The complete search consist of several phases,
+ each phase performing several trial measurements.
+ Initial phase creates initial interval based on receive rates
+ at maximum rate and at maximum receive rate (MRR).
+ Final phase and preceding intermediate phases are performing
+ external and internal search steps,
+ each resulting interval is the starting point for the next phase.
+ The resulting interval of final phase is the result of the whole algorithm.
+
+ Each non-initial phase uses its own trial duration and width goal.
+ Any non-initial phase stops searching (for NDR or PDR independently)
+ when minimum is not a valid lower bound (at current duration),
+ or all of the following is true:
+ Both bounds are valid, bound bounds are measured at the current phase
+ trial duration, interval width is less than the width goal
+ for current phase.
+
+ TODO: Review and update this docstring according to rst docs.
+ TODO: Support configurable number of Packet Loss Ratios.
+ """
+
+ class ProgressState(object):
+ """Structure containing data to be passed around in recursion."""
+
+ def __init__(
+ self, result, phases, duration, width_goal, packet_loss_ratio,
+ minimum_transmit_rate, maximum_transmit_rate):
+ """Convert and store the argument values.
+
+ :param result: Current measured NDR and PDR intervals.
+ :param phases: How many intermediate phases to perform
+ before the current one.
+ :param duration: Trial duration to use in the current phase [s].
+ :param width_goal: The goal relative width for the curreent phase.
+ :param packet_loss_ratio: PDR fraction for the current search.
+ :param minimum_transmit_rate: Minimum target transmit rate
+ for the current search [pps].
+ :param maximum_transmit_rate: Maximum target transmit rate
+ for the current search [pps].
+ :type result: NdrPdrResult.NdrPdrResult
+ :type phases: int
+ :type duration: float
+ :type width_goal: float
+ :type packet_loss_ratio: float
+ :type minimum_transmit_rate: float
+ :type maximum_transmit_rate: float
+ """
+ self.result = result
+ self.phases = int(phases)
+ self.duration = float(duration)
+ self.width_goal = float(width_goal)
+ self.packet_loss_ratio = float(packet_loss_ratio)
+ self.minimum_transmit_rate = float(minimum_transmit_rate)
+ self.maximum_transmit_rate = float(maximum_transmit_rate)
+
+ def __init__(self, measurer, latency=False, pkt_size=64,
+ final_relative_width=0.005,
+ final_trial_duration=30.0, initial_trial_duration=1.0,
+ number_of_intermediate_phases=2, timeout=600.0, doublings=1):
+ """Store the measurer object and additional arguments.
+
+ :param measurer: Rate provider to use by this search object.
+ :param final_relative_width: Final lower bound transmit rate
+ cannot be more distant that this multiple of upper bound [1].
+ :param final_trial_duration: Trial duration for the final phase [s].
+ :param initial_trial_duration: Trial duration for the initial phase
+ and also for the first intermediate phase [s].
+ :param number_of_intermediate_phases: Number of intermediate phases
+ to perform before the final phase [1].
+ :param timeout: The search will fail itself when not finished
+ before this overall time [s].
+ :param doublings: How many doublings to do in external search step.
+ Default 1 is suitable for fairly stable tests,
+ less stable tests might get better overal duration with 2 or more.
+ :type measurer: AbstractMeasurer.AbstractMeasurer
+ :type final_relative_width: float
+ :type final_trial_duration: float
+ :type initial_trial_duration: int
+ :type number_of_intermediate_phases: int
+ :type timeout: float
+ :type doublings: int
+ """
+ super(MultipleLossRatioSearch, self).__init__(measurer)
+ self.latency = latency
+ self.pkt_size = int(pkt_size)
+ self.final_trial_duration = float(final_trial_duration)
+ self.final_relative_width = float(final_relative_width)
+ self.number_of_intermediate_phases = int(number_of_intermediate_phases)
+ self.initial_trial_duration = float(initial_trial_duration)
+ self.timeout = float(timeout)
+ self.doublings = int(doublings)
+
+ self.queue = None
+ self.port_pg_id = None
+ self.ports = []
+ self.test_data = {}
+ self.profiles = {}
+
+ @staticmethod
+ def double_relative_width(relative_width):
+ """Return relative width corresponding to double logarithmic width.
+
+ :param relative_width: The base relative width to double.
+ :type relative_width: float
+ :returns: The relative width of double logarithmic size.
+ :rtype: float
+ """
+ return 1.999 * relative_width - relative_width * relative_width
+ # The number should be 2.0, but we want to avoid rounding errors,
+ # and ensure half of double is not larger than the original value.
+
+ @staticmethod
+ def double_step_down(relative_width, current_bound):
+ """Return rate of double logarithmic width below.
+
+ :param relative_width: The base relative width to double.
+ :param current_bound: The current target transmit rate to move [pps].
+ :type relative_width: float
+ :type current_bound: float
+ :returns: Transmit rate smaller by logarithmically double width [pps].
+ :rtype: float
+ """
+ return current_bound * (
+ 1.0 - MultipleLossRatioSearch.double_relative_width(
+ relative_width))
+
+ @staticmethod
+ def expand_down(relative_width, doublings, current_bound):
+ """Return rate of expanded logarithmic width below.
+
+ :param relative_width: The base relative width to double.
+ :param doublings: How many doublings to do for expansion.
+ :param current_bound: The current target transmit rate to move [pps].
+ :type relative_width: float
+ :type doublings: int
+ :type current_bound: float
+ :returns: Transmit rate smaller by logarithmically double width [pps].
+ :rtype: float
+ """
+ for _ in range(doublings):
+ relative_width = MultipleLossRatioSearch.double_relative_width(
+ relative_width)
+ return current_bound * (1.0 - relative_width)
+
+ @staticmethod
+ def double_step_up(relative_width, current_bound):
+ """Return rate of double logarithmic width above.
+
+ :param relative_width: The base relative width to double.
+ :param current_bound: The current target transmit rate to move [pps].
+ :type relative_width: float
+ :type current_bound: float
+ :returns: Transmit rate larger by logarithmically double width [pps].
+ :rtype: float
+ """
+ return current_bound / (
+ 1.0 - MultipleLossRatioSearch.double_relative_width(
+ relative_width))
+
+ @staticmethod
+ def expand_up(relative_width, doublings, current_bound):
+ """Return rate of expanded logarithmic width above.
+
+ :param relative_width: The base relative width to double.
+ :param doublings: How many doublings to do for expansion.
+ :param current_bound: The current target transmit rate to move [pps].
+ :type relative_width: float
+ :type doublings: int
+ :type current_bound: float
+ :returns: Transmit rate smaller by logarithmically double width [pps].
+ :rtype: float
+ """
+ for _ in range(doublings):
+ relative_width = MultipleLossRatioSearch.double_relative_width(
+ relative_width)
+ return current_bound / (1.0 - relative_width)
+
+ @staticmethod
+ def half_relative_width(relative_width):
+ """Return relative width corresponding to half logarithmic width.
+
+ :param relative_width: The base relative width to halve.
+ :type relative_width: float
+ :returns: The relative width of half logarithmic size.
+ :rtype: float
+ """
+ return 1.0 - math.sqrt(1.0 - relative_width)
+
+ @staticmethod
+ def half_step_up(relative_width, current_bound):
+ """Return rate of half logarithmic width above.
+
+ :param relative_width: The base relative width to halve.
+ :param current_bound: The current target transmit rate to move [pps].
+ :type relative_width: float
+ :type current_bound: float
+ :returns: Transmit rate larger by logarithmically half width [pps].
+ :rtype: float
+ """
+ return current_bound / (
+ 1.0 - MultipleLossRatioSearch.half_relative_width(
+ relative_width))
+
+ def init_generator(self, ports, port_pg_id, profiles, test_data, queue):
+ self.ports = ports
+ self.port_pg_id = port_pg_id
+ self.profiles = profiles
+ self.test_data = test_data
+ self.queue = queue
+ self.queue.cancel_join_thread()
+
+ def collect_kpi(self, stats, test_value):
+ samples = self.measurer.generate_samples(stats, self.ports,
+ self.port_pg_id, self.latency)
+ samples.update(self.test_data)
+ LOGGING.info("Collect TG KPIs %s %s %s", datetime.datetime.now(),
+ test_value, samples)
+ self.queue.put(samples)
+
+ def narrow_down_ndr_and_pdr(
+ self, minimum_transmit_rate, maximum_transmit_rate,
+ packet_loss_ratio):
+ """Perform initial phase, create state object, proceed with next phases.
+
+ :param minimum_transmit_rate: Minimal target transmit rate [pps].
+ :param maximum_transmit_rate: Maximal target transmit rate [pps].
+ :param packet_loss_ratio: Fraction of packets lost, for PDR [1].
+ :type minimum_transmit_rate: float
+ :type maximum_transmit_rate: float
+ :type packet_loss_ratio: float
+ :returns: Structure containing narrowed down intervals
+ and their measurements.
+ :rtype: NdrPdrResult.NdrPdrResult
+ :raises RuntimeError: If total duration is larger than timeout.
+ """
+ minimum_transmit_rate = float(minimum_transmit_rate)
+ maximum_transmit_rate = float(maximum_transmit_rate)
+ packet_loss_ratio = float(packet_loss_ratio)
+ line_measurement = self.measure(
+ self.initial_trial_duration, maximum_transmit_rate, self.latency)
+ initial_width_goal = self.final_relative_width
+ for _ in range(self.number_of_intermediate_phases):
+ initial_width_goal = self.double_relative_width(initial_width_goal)
+ max_lo = maximum_transmit_rate * (1.0 - initial_width_goal)
+ mrr = max(
+ minimum_transmit_rate,
+ min(max_lo, line_measurement.receive_rate))
+ mrr_measurement = self.measure(
+ self.initial_trial_duration, mrr, self.latency)
+ # Attempt to get narrower width.
+ if mrr_measurement.loss_fraction > 0.0:
+ max2_lo = mrr * (1.0 - initial_width_goal)
+ mrr2 = min(max2_lo, mrr_measurement.receive_rate)
+ else:
+ mrr2 = mrr / (1.0 - initial_width_goal)
+ if mrr2 > minimum_transmit_rate and mrr2 < maximum_transmit_rate:
+ line_measurement = mrr_measurement
+ mrr_measurement = self.measure(
+ self.initial_trial_duration, mrr2, self.latency)
+ if mrr2 > mrr:
+ buf = line_measurement
+ line_measurement = mrr_measurement
+ mrr_measurement = buf
+ starting_interval = ReceiveRateInterval(
+ mrr_measurement, line_measurement)
+ starting_result = NdrPdrResult(starting_interval, starting_interval)
+ state = self.ProgressState(
+ starting_result, self.number_of_intermediate_phases,
+ self.final_trial_duration, self.final_relative_width,
+ packet_loss_ratio, minimum_transmit_rate, maximum_transmit_rate)
+ state = self.ndrpdr(state)
+ result = state.result
+ # theor_max_thruput = 0
+ result_samples = {}
+
+ MultipleLossRatioSearch.display_single_bound(result_samples,
+ 'NDR_LOWER', result.ndr_interval.measured_low.transmit_rate,
+ self.pkt_size, result.ndr_interval.measured_low.latency)
+ MultipleLossRatioSearch.display_single_bound(result_samples,
+ 'NDR_UPPER', result.ndr_interval.measured_high.transmit_rate,
+ self.pkt_size)
+ MultipleLossRatioSearch.display_single_bound(result_samples,
+ 'PDR_LOWER', result.pdr_interval.measured_low.transmit_rate,
+ self.pkt_size, result.pdr_interval.measured_low.latency)
+ MultipleLossRatioSearch.display_single_bound(result_samples,
+ 'PDR_UPPER', result.pdr_interval.measured_high.transmit_rate,
+ self.pkt_size)
+ pdr_msg = self.check_ndrpdr_interval_validity(result_samples, "PDR",
+ result.pdr_interval,
+ packet_loss_ratio)
+ ndr_msg = self.check_ndrpdr_interval_validity(result_samples, "NDR",
+ result.ndr_interval)
+ self.queue.put(result_samples)
+
+ LOGGING.debug("result_samples: %s", result_samples)
+ LOGGING.info(pdr_msg)
+ LOGGING.info(ndr_msg)
+
+ self.perform_additional_measurements_based_on_ndrpdr_result(result)
+
+ return result_samples
+
+ def _measure_and_update_state(self, state, transmit_rate):
+ """Perform trial measurement, update bounds, return new state.
+
+ :param state: State before this measurement.
+ :param transmit_rate: Target transmit rate for this measurement [pps].
+ :type state: ProgressState
+ :type transmit_rate: float
+ :returns: State after the measurement.
+ :rtype: ProgressState
+ """
+ # TODO: Implement https://stackoverflow.com/a/24683360
+ # to avoid the string manipulation if log verbosity is too low.
+ LOGGING.info("result before update: %s", state.result)
+ LOGGING.debug(
+ "relative widths in goals: %s", state.result.width_in_goals(
+ self.final_relative_width))
+ measurement = self.measure(state.duration, transmit_rate, self.latency)
+ ndr_interval = self._new_interval(
+ state.result.ndr_interval, measurement, 0.0)
+ pdr_interval = self._new_interval(
+ state.result.pdr_interval, measurement, state.packet_loss_ratio)
+ state.result = NdrPdrResult(ndr_interval, pdr_interval)
+ return state
+
+ @staticmethod
+ def _new_interval(old_interval, measurement, packet_loss_ratio):
+ """Return new interval with bounds updated according to the measurement.
+
+ :param old_interval: The current interval before the measurement.
+ :param measurement: The new meaqsurement to take into account.
+ :param packet_loss_ratio: Fraction for PDR (or zero for NDR).
+ :type old_interval: ReceiveRateInterval.ReceiveRateInterval
+ :type measurement: ReceiveRateMeasurement.ReceiveRateMeasurement
+ :type packet_loss_ratio: float
+ :returns: The updated interval.
+ :rtype: ReceiveRateInterval.ReceiveRateInterval
+ """
+ old_lo, old_hi = old_interval.measured_low, old_interval.measured_high
+ # Priority zero: direct replace if the target Tr is the same.
+ if measurement.target_tr in (old_lo.target_tr, old_hi.target_tr):
+ if measurement.target_tr == old_lo.target_tr:
+ return ReceiveRateInterval(measurement, old_hi)
+ else:
+ return ReceiveRateInterval(old_lo, measurement)
+ # Priority one: invalid lower bound allows only one type of update.
+ if old_lo.loss_fraction > packet_loss_ratio:
+ # We can only expand down, old bound becomes valid upper one.
+ if measurement.target_tr < old_lo.target_tr:
+ return ReceiveRateInterval(measurement, old_lo)
+ else:
+ return old_interval
+ # Lower bound is now valid.
+ # Next priorities depend on target Tr.
+ if measurement.target_tr < old_lo.target_tr:
+ # Lower external measurement, relevant only
+ # if the new measurement has high loss rate.
+ if measurement.loss_fraction > packet_loss_ratio:
+ # Returning the broader interval as old_lo
+ # would be invalid upper bound.
+ return ReceiveRateInterval(measurement, old_hi)
+ elif measurement.target_tr > old_hi.target_tr:
+ # Upper external measurement, only relevant for invalid upper bound.
+ if old_hi.loss_fraction <= packet_loss_ratio:
+ # Old upper bound becomes valid new lower bound.
+ return ReceiveRateInterval(old_hi, measurement)
+ else:
+ # Internal measurement, replaced boundary
+ # depends on measured loss fraction.
+ if measurement.loss_fraction > packet_loss_ratio:
+ # We have found a narrow valid interval,
+ # regardless of whether old upper bound was valid.
+ return ReceiveRateInterval(old_lo, measurement)
+ else:
+ # In ideal world, we would not want to shrink interval
+ # if upper bound is not valid.
+ # In the real world, we want to shrink it for
+ # "invalid upper bound at maximal rate" case.
+ return ReceiveRateInterval(measurement, old_hi)
+ # Fallback, the interval is unchanged by the measurement.
+ return old_interval
+
+ def ndrpdr(self, state):
+ """Pefrom trials for this phase. Return the new state when done.
+
+ :param state: State before this phase.
+ :type state: ProgressState
+ :returns: The updated state.
+ :rtype: ProgressState
+ :raises RuntimeError: If total duration is larger than timeout.
+ """
+ start_time = time.time()
+ if state.phases > 0:
+ # We need to finish preceding intermediate phases first.
+ saved_phases = state.phases
+ state.phases -= 1
+ # Preceding phases have shorter duration.
+ saved_duration = state.duration
+ duration_multiplier = state.duration / self.initial_trial_duration
+ phase_exponent = float(state.phases) / saved_phases
+ state.duration = self.initial_trial_duration * math.pow(
+ duration_multiplier, phase_exponent)
+ # Shorter durations do not need that narrow widths.
+ saved_width = state.width_goal
+ state.width_goal = self.double_relative_width(state.width_goal)
+ # Recurse.
+ state = self.ndrpdr(state)
+ # Restore the state for current phase.
+ state.duration = saved_duration
+ state.width_goal = saved_width
+ state.phases = saved_phases # Not needed, but just in case.
+ LOGGING.info(
+ "starting iterations with duration %s and relative width goal %s",
+ state.duration, state.width_goal)
+ while 1:
+ if time.time() > start_time + self.timeout:
+ raise RuntimeError("Optimized search takes too long.")
+ # Order of priorities: invalid bounds (nl, pl, nh, ph),
+ # then narrowing relative Tr widths.
+ # Durations are not priorities yet,
+ # they will settle on their own hopefully.
+ ndr_lo = state.result.ndr_interval.measured_low
+ ndr_hi = state.result.ndr_interval.measured_high
+ pdr_lo = state.result.pdr_interval.measured_low
+ pdr_hi = state.result.pdr_interval.measured_high
+ ndr_rel_width = max(
+ state.width_goal, state.result.ndr_interval.rel_tr_width)
+ pdr_rel_width = max(
+ state.width_goal, state.result.pdr_interval.rel_tr_width)
+ # If we are hitting maximal or minimal rate, we cannot shift,
+ # but we can re-measure.
+ if ndr_lo.loss_fraction > 0.0:
+ if ndr_lo.target_tr > state.minimum_transmit_rate:
+ new_tr = max(
+ state.minimum_transmit_rate,
+ self.expand_down(
+ ndr_rel_width, self.doublings, ndr_lo.target_tr))
+ LOGGING.info("ndr lo external %s", new_tr)
+ state = self._measure_and_update_state(state, new_tr)
+ continue
+ elif ndr_lo.duration < state.duration:
+ LOGGING.info("ndr lo minimal re-measure")
+ state = self._measure_and_update_state(
+ state, state.minimum_transmit_rate)
+ continue
+ if pdr_lo.loss_fraction > state.packet_loss_ratio:
+ if pdr_lo.target_tr > state.minimum_transmit_rate:
+ new_tr = max(
+ state.minimum_transmit_rate,
+ self.expand_down(
+ pdr_rel_width, self.doublings, pdr_lo.target_tr))
+ LOGGING.info("pdr lo external %s", new_tr)
+ state = self._measure_and_update_state(state, new_tr)
+ continue
+ elif pdr_lo.duration < state.duration:
+ LOGGING.info("pdr lo minimal re-measure")
+ state = self._measure_and_update_state(
+ state, state.minimum_transmit_rate)
+ continue
+ if ndr_hi.loss_fraction <= 0.0:
+ if ndr_hi.target_tr < state.maximum_transmit_rate:
+ new_tr = min(
+ state.maximum_transmit_rate,
+ self.expand_up(
+ ndr_rel_width, self.doublings, ndr_hi.target_tr))
+ LOGGING.info("ndr hi external %s", new_tr)
+ state = self._measure_and_update_state(state, new_tr)
+ continue
+ elif ndr_hi.duration < state.duration:
+ LOGGING.info("ndr hi maximal re-measure")
+ state = self._measure_and_update_state(
+ state, state.maximum_transmit_rate)
+ continue
+ if pdr_hi.loss_fraction <= state.packet_loss_ratio:
+ if pdr_hi.target_tr < state.maximum_transmit_rate:
+ new_tr = min(
+ state.maximum_transmit_rate,
+ self.expand_up(
+ pdr_rel_width, self.doublings, pdr_hi.target_tr))
+ LOGGING.info("pdr hi external %s", new_tr)
+ state = self._measure_and_update_state(state, new_tr)
+ continue
+ elif pdr_hi.duration < state.duration:
+ LOGGING.info("ndr hi maximal re-measure")
+ state = self._measure_and_update_state(
+ state, state.maximum_transmit_rate)
+ continue
+ # If we are hitting maximum_transmit_rate,
+ # it is still worth narrowing width,
+ # hoping large enough loss fraction will happen.
+ # But if we are hitting the minimal rate (at current duration),
+ # no additional measurement will help with that,
+ # so we can stop narrowing in this phase.
+ if (ndr_lo.target_tr <= state.minimum_transmit_rate
+ and ndr_lo.loss_fraction > 0.0):
+ ndr_rel_width = 0.0
+ if (pdr_lo.target_tr <= state.minimum_transmit_rate
+ and pdr_lo.loss_fraction > state.packet_loss_ratio):
+ pdr_rel_width = 0.0
+ if ndr_rel_width > state.width_goal:
+ # We have to narrow NDR width first, as NDR internal search
+ # can invalidate PDR (but not vice versa).
+ new_tr = self.half_step_up(ndr_rel_width, ndr_lo.target_tr)
+ LOGGING.info("Bisecting for NDR at %s", new_tr)
+ state = self._measure_and_update_state(state, new_tr)
+ continue
+ if pdr_rel_width > state.width_goal:
+ # PDR iternal search.
+ new_tr = self.half_step_up(pdr_rel_width, pdr_lo.target_tr)
+ LOGGING.info("Bisecting for PDR at %s", new_tr)
+ state = self._measure_and_update_state(state, new_tr)
+ continue
+ # We do not need to improve width, but there still might be
+ # some measurements with smaller duration.
+ # We need to re-measure with full duration, possibly
+ # creating invalid bounds to resolve (thus broadening width).
+ if ndr_lo.duration < state.duration:
+ LOGGING.info("re-measuring NDR lower bound")
+ state = self._measure_and_update_state(state, ndr_lo.target_tr)
+ continue
+ if pdr_lo.duration < state.duration:
+ LOGGING.info("re-measuring PDR lower bound")
+ state = self._measure_and_update_state(state, pdr_lo.target_tr)
+ continue
+ # Except when lower bounds have high loss fraction, in that case
+ # we do not need to re-measure _upper_ bounds.
+ if ndr_hi.duration < state.duration and ndr_rel_width > 0.0:
+ LOGGING.info("re-measuring NDR upper bound")
+ state = self._measure_and_update_state(state, ndr_hi.target_tr)
+ continue
+ if pdr_hi.duration < state.duration and pdr_rel_width > 0.0:
+ LOGGING.info("re-measuring PDR upper bound")
+ state = self._measure_and_update_state(state, pdr_hi.target_tr)
+ continue
+ # Widths are narrow (or lower bound minimal), bound measurements
+ # are long enough, we can return.
+ LOGGING.info("phase done")
+ break
+ return state
+
+ def measure(self, duration, transmit_rate, latency):
+ duration = float(duration)
+ transmit_rate = float(transmit_rate)
+ # Trex needs target Tr per stream, but reports aggregate Tx and Dx.
+ unit_rate = str(transmit_rate / 2.0) + "pps"
+ stats = self.measurer.send_traffic_on_tg(self.ports, self.port_pg_id,
+ duration, unit_rate,
+ latency=latency)
+ self.measurer.client.reset(ports=self.ports)
+ self.measurer.client.clear_stats(ports=self.ports)
+ self.measurer.client.remove_all_streams(ports=self.ports)
+ for port, profile in self.profiles.items():
+ self.measurer.client.add_streams(profile, ports=[port])
+ self.collect_kpi(stats, unit_rate)
+ transmit_count = int(self.measurer.sent)
+ loss_count = int(self.measurer.loss)
+ measurement = ReceiveRateMeasurement(
+ duration, transmit_rate, transmit_count, loss_count)
+ measurement.latency = self.measurer.latency
+ return measurement
+
+ def perform_additional_measurements_based_on_ndrpdr_result(self, result):
+ duration = 5.0
+ rate = "{}{}".format(result.ndr_interval.measured_low.target_tr / 2.0,
+ 'pps')
+ for _ in range(0, 1):
+ stats = self.measurer.send_traffic_on_tg(self.ports,
+ self.port_pg_id, duration,
+ rate)
+ self.collect_kpi(stats, rate)
+ LOGGING.info('Traffic loss occurred: %s', self.measurer.loss)
+
+ @staticmethod
+ def display_single_bound(result_samples, result_type, rate_total, pkt_size,
+ latency=None):
+ bandwidth_total = float(rate_total) * (pkt_size + 20) * 8 / (10 ** 9)
+
+ result_samples["Result_{}".format(result_type)] = {
+ "rate_total_pps": float(rate_total),
+ "bandwidth_total_Gbps": float(bandwidth_total),
+ }
+
+ if latency:
+ for item in latency:
+ if latency.index(item) == 0:
+ name = "Result_{}_{}".format("stream0", result_type)
+ else:
+ name = "Result_{}_{}".format("stream1", result_type)
+ lat_min, lat_avg, lat_max = item.split('/')
+ result_samples[name] = {
+ "min_latency": float(lat_min),
+ "avg_latency": float(lat_avg),
+ "max_latency": float(lat_max),
+ }
+
+ @staticmethod
+ def check_ndrpdr_interval_validity(result_samples, result_type, interval,
+ packet_loss_ratio=0.0):
+ lower_bound = interval.measured_low
+ lower_bound_lf = lower_bound.loss_fraction
+
+ result_samples["Result_{}_packets_lost".format(result_type)] = {
+ "packet_loss_ratio": float(lower_bound_lf),
+ "packets_lost": float(lower_bound.loss_count),
+ }
+
+ if lower_bound_lf <= packet_loss_ratio:
+ return "Minimal rate loss fraction {} reach target {}".format(
+ lower_bound_lf, packet_loss_ratio)
+ else:
+ message = "Minimal rate loss fraction {} does not reach target {}".format(
+ lower_bound_lf, packet_loss_ratio)
+ if lower_bound_lf >= 1.0:
+ return '{}\nZero packets forwarded!'.format(message)
+ else:
+ return '{}\n{} packets lost.'.format(message,
+ lower_bound.loss_count)
diff --git a/yardstick/network_services/helpers/vpp_helpers/ndr_pdr_result.py b/yardstick/network_services/helpers/vpp_helpers/ndr_pdr_result.py
new file mode 100644
index 000000000..34a97f9fb
--- /dev/null
+++ b/yardstick/network_services/helpers/vpp_helpers/ndr_pdr_result.py
@@ -0,0 +1,68 @@
+# Copyright (c) 2019 Viosoft Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This is a modified copy of
+# https://gerrit.fd.io/r/gitweb?p=csit.git;a=blob_plain;f=resources/libraries/python/MLRsearch/NdrPdrResult.py;hb=HEAD
+
+from yardstick.network_services.helpers.vpp_helpers.receive_rate_interval import \
+ ReceiveRateInterval
+
+
+class NdrPdrResult(object):
+ """Two measurement intervals, return value of search algorithms.
+
+ Partial fraction is NOT part of the result. Pdr interval should be valid
+ for all partial fractions implied by the interval."""
+
+ def __init__(self, ndr_interval, pdr_interval):
+ """Store the measured intervals after checking argument types.
+
+ :param ndr_interval: Object containing data for NDR part of the result.
+ :param pdr_interval: Object containing data for PDR part of the result.
+ :type ndr_interval: ReceiveRateInterval.ReceiveRateInterval
+ :type pdr_interval: ReceiveRateInterval.ReceiveRateInterval
+ """
+ # TODO: Type checking is not very pythonic,
+ # perhaps users can fix wrong usage without it?
+ if not isinstance(ndr_interval, ReceiveRateInterval):
+ raise TypeError("ndr_interval, is not a ReceiveRateInterval: "
+ "{ndr!r}".format(ndr=ndr_interval))
+ if not isinstance(pdr_interval, ReceiveRateInterval):
+ raise TypeError("pdr_interval, is not a ReceiveRateInterval: "
+ "{pdr!r}".format(pdr=pdr_interval))
+ self.ndr_interval = ndr_interval
+ self.pdr_interval = pdr_interval
+
+ def width_in_goals(self, relative_width_goal):
+ """Return a debug string related to current widths in logarithmic scale.
+
+ :param relative_width_goal: Upper bound times this is the goal
+ difference between upper bound and lower bound.
+ :type relative_width_goal: float
+ :returns: Message containing NDR and PDR widths in goals.
+ :rtype: str
+ """
+ return "ndr {ndr_in_goals}; pdr {pdr_in_goals}".format(
+ ndr_in_goals=self.ndr_interval.width_in_goals(relative_width_goal),
+ pdr_in_goals=self.pdr_interval.width_in_goals(relative_width_goal))
+
+ def __str__(self):
+ """Return string as tuple of named values."""
+ return "NDR={ndr!s};PDR={pdr!s}".format(
+ ndr=self.ndr_interval, pdr=self.pdr_interval)
+
+ def __repr__(self):
+ """Return string evaluable as a constructor call."""
+ return "NdrPdrResult(ndr_interval={ndr!r},pdr_interval={pdr!r})".format(
+ ndr=self.ndr_interval, pdr=self.pdr_interval)
diff --git a/yardstick/network_services/helpers/vpp_helpers/receive_rate_interval.py b/yardstick/network_services/helpers/vpp_helpers/receive_rate_interval.py
new file mode 100644
index 000000000..517a99c1f
--- /dev/null
+++ b/yardstick/network_services/helpers/vpp_helpers/receive_rate_interval.py
@@ -0,0 +1,88 @@
+# Copyright (c) 2019 Viosoft Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This is a modified copy of
+# https://gerrit.fd.io/r/gitweb?p=csit.git;a=blob_plain;f=resources/libraries/python/MLRsearch/ReceiveRateInterval.py;hb=HEAD
+
+import math
+
+from yardstick.network_services.helpers.vpp_helpers.receive_rate_measurement import \
+ ReceiveRateMeasurement
+
+
+class ReceiveRateInterval(object):
+ """Structure defining two Rr measurements, and their relation."""
+
+ def __init__(self, measured_low, measured_high):
+ """Store the bound measurements after checking argument types.
+
+ :param measured_low: Measurement for the lower bound.
+ :param measured_high: Measurement for the upper bound.
+ :type measured_low: ReceiveRateMeasurement.ReceiveRateMeasurement
+ :type measured_high: ReceiveRateMeasurement.ReceiveRateMeasurement
+ """
+ # TODO: Type checking is not very pythonic,
+ # perhaps users can fix wrong usage without it?
+ if not isinstance(measured_low, ReceiveRateMeasurement):
+ raise TypeError("measured_low is not a ReceiveRateMeasurement: "
+ "{low!r}".format(low=measured_low))
+ if not isinstance(measured_high, ReceiveRateMeasurement):
+ raise TypeError("measured_high is not a ReceiveRateMeasurement: "
+ "{high!r}".format(high=measured_high))
+ self.measured_low = measured_low
+ self.measured_high = measured_high
+ # Declare secondary quantities to appease pylint.
+ self.abs_tr_width = None
+ """Absolute width of target transmit rate. Upper minus lower."""
+ self.rel_tr_width = None
+ """Relative width of target transmit rate. Absolute divided by upper."""
+ self.sort()
+
+ def sort(self):
+ """Sort bounds by target Tr, compute secondary quantities."""
+ if self.measured_low.target_tr > self.measured_high.target_tr:
+ self.measured_low, self.measured_high = (
+ self.measured_high, self.measured_low)
+ self.abs_tr_width = (
+ self.measured_high.target_tr - self.measured_low.target_tr)
+ self.rel_tr_width = round(
+ self.abs_tr_width / self.measured_high.target_tr, 5)
+
+ def width_in_goals(self, relative_width_goal):
+ """Return float value.
+
+ Relative width goal is some (negative) value on logarithmic scale.
+ Current relative width is another logarithmic value.
+ Return the latter divided by the former.
+ This is useful when investigating how did surprising widths come to be.
+
+ :param relative_width_goal: Upper bound times this is the goal
+ difference between upper bound and lower bound.
+ :type relative_width_goal: float
+ :returns: Current width as logarithmic multiple of goal width [1].
+ :rtype: float
+ """
+ return round(math.log(1.0 - self.rel_tr_width) / math.log(
+ 1.0 - relative_width_goal), 5)
+
+ def __str__(self):
+ """Return string as half-open interval."""
+ return "[{low!s};{high!s})".format(
+ low=self.measured_low, high=self.measured_high)
+
+ def __repr__(self):
+ """Return string evaluable as a constructor call."""
+ return ("ReceiveRateInterval(measured_low={low!r}"
+ ",measured_high={high!r})".format(low=self.measured_low,
+ high=self.measured_high))
diff --git a/yardstick/network_services/helpers/vpp_helpers/receive_rate_measurement.py b/yardstick/network_services/helpers/vpp_helpers/receive_rate_measurement.py
new file mode 100644
index 000000000..2c59ea104
--- /dev/null
+++ b/yardstick/network_services/helpers/vpp_helpers/receive_rate_measurement.py
@@ -0,0 +1,58 @@
+# Copyright (c) 2019 Viosoft Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This is a modified copy of
+# https://gerrit.fd.io/r/gitweb?p=csit.git;a=blob_plain;f=resources/libraries/python/MLRsearch/ReceiveRateMeasurement.py;hb=HEAD
+
+
+class ReceiveRateMeasurement(object):
+ """Structure defining the result of single Rr measurement."""
+
+ def __init__(self, duration, target_tr, transmit_count, loss_count):
+ """Constructor, normalize primary and compute secondary quantities.
+
+ :param duration: Measurement duration [s].
+ :param target_tr: Target transmit rate [pps].
+ If bidirectional traffic is measured, this is bidirectional rate.
+ :param transmit_count: Number of packets transmitted [1].
+ :param loss_count: Number of packets transmitted but not received [1].
+ :type duration: float
+ :type target_tr: float
+ :type transmit_count: int
+ :type loss_count: int
+ """
+ self.duration = float(duration)
+ self.target_tr = float(target_tr)
+ self.transmit_count = int(transmit_count)
+ self.loss_count = int(loss_count)
+ self.receive_count = round(transmit_count - loss_count, 5)
+ self.transmit_rate = round(transmit_count / self.duration, 5)
+ self.loss_rate = round(loss_count / self.duration, 5)
+ self.receive_rate = round(self.receive_count / self.duration, 5)
+ self.loss_fraction = round(
+ float(self.loss_count) / self.transmit_count, 5)
+ # TODO: Do we want to store also the real time (duration + overhead)?
+
+ def __str__(self):
+ """Return string reporting input and loss fraction."""
+ return "d={dur!s},Tr={rate!s},Df={frac!s}".format(
+ dur=self.duration, rate=self.target_tr, frac=self.loss_fraction)
+
+ def __repr__(self):
+ """Return string evaluable as a constructor call."""
+ return ("ReceiveRateMeasurement(duration={dur!r},target_tr={rate!r}"
+ ",transmit_count={trans!r},loss_count={loss!r})".format(
+ dur=self.duration, rate=self.target_tr,
+ trans=self.transmit_count,
+ loss=self.loss_count))
diff --git a/yardstick/network_services/libs/ixia_libs/IxNet/IxNet.py b/yardstick/network_services/libs/ixia_libs/IxNet/IxNet.py
deleted file mode 100644
index 70ce4ff03..000000000
--- a/yardstick/network_services/libs/ixia_libs/IxNet/IxNet.py
+++ /dev/null
@@ -1,344 +0,0 @@
-# Copyright (c) 2016-2017 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import absolute_import
-from __future__ import print_function
-import sys
-import logging
-
-import re
-from itertools import product
-
-log = logging.getLogger(__name__)
-
-IP_VERSION_4 = 4
-IP_VERSION_6 = 6
-
-
-class TrafficStreamHelper(object):
-
- TEMPLATE = '{0.traffic_item}/{0.stream}:{0.param_id}/{1}'
-
- def __init__(self, traffic_item, stream, param_id):
- super(TrafficStreamHelper, self).__init__()
- self.traffic_item = traffic_item
- self.stream = stream
- self.param_id = param_id
-
- def __getattr__(self, item):
- return self.TEMPLATE.format(self, item)
-
-
-class FramesizeHelper(object):
-
- def __init__(self):
- super(FramesizeHelper, self).__init__()
- self.weighted_pairs = []
- self.weighted_range_pairs = []
-
- @property
- def weighted_pairs_arg(self):
- return '-weightedPairs', self.weighted_pairs
-
- @property
- def weighted_range_pairs_arg(self):
- return '-weightedRangePairs', self.weighted_range_pairs
-
- def make_args(self, *args):
- return self.weighted_pairs_arg + self.weighted_range_pairs_arg + args
-
- def populate_data(self, framesize_data):
- for key, value in framesize_data.items():
- if value == '0':
- continue
-
- replaced = re.sub('[Bb]', '', key)
- self.weighted_pairs.extend([
- replaced,
- value,
- ])
- pairs = [
- replaced,
- replaced,
- value,
- ]
- self.weighted_range_pairs.append(pairs)
-
-
-class IxNextgen(object):
-
- STATS_NAME_MAP = {
- "traffic_item": 'Traffic Item',
- "Tx_Frames": 'Tx Frames',
- "Rx_Frames": 'Rx Frames',
- "Tx_Frame_Rate": 'Tx Frame Rate',
- "Rx_Frame_Rate": 'Tx Frame Rate',
- "Store-Forward_Avg_latency_ns": 'Store-Forward Avg Latency (ns)',
- "Store-Forward_Min_latency_ns": 'Store-Forward Min Latency (ns)',
- "Store-Forward_Max_latency_ns": 'Store-Forward Max Latency (ns)',
- }
-
- PORT_STATS_NAME_MAP = {
- "stat_name": 'Stat Name',
- "Frames_Tx": 'Frames Tx.',
- "Valid_Frames_Rx": 'Valid Frames Rx.',
- "Frames_Tx_Rate": 'Frames Tx. Rate',
- "Valid_Frames_Rx_Rate": 'Valid Frames Rx. Rate',
- "Tx_Rate_Kbps": 'Tx. Rate (Kbps)',
- "Rx_Rate_Kbps": 'Rx. Rate (Kbps)',
- "Tx_Rate_Mbps": 'Tx. Rate (Mbps)',
- "Rx_Rate_Mbps": 'Rx. Rate (Mbps)',
- }
-
- LATENCY_NAME_MAP = {
- "Store-Forward_Avg_latency_ns": 'Store-Forward Avg Latency (ns)',
- "Store-Forward_Min_latency_ns": 'Store-Forward Min Latency (ns)',
- "Store-Forward_Max_latency_ns": 'Store-Forward Max Latency (ns)',
- }
-
- RANDOM_MASK_MAP = {
- IP_VERSION_4: '0.0.0.255',
- IP_VERSION_6: '0:0:0:0:0:0:0:ff',
- }
-
- MODE_SEEDS_MAP = {
- 0: ('uplink', ['256', '2048']),
- }
-
- MODE_SEEDS_DEFAULT = 'downlink', ['2048', '256']
-
- @staticmethod
- def find_view_obj(view_name, views):
- edited_view_name = '::ixNet::OBJ-/statistics/view:"{}"'.format(view_name)
- return next((view for view in views if edited_view_name == view), '')
-
- @staticmethod
- def get_config(tg_cfg):
- card = []
- port = []
- external_interface = tg_cfg["vdu"][0]["external-interface"]
- for intf in external_interface:
- card_port0 = intf["virtual-interface"]["vpci"]
- card0, port0 = card_port0.split(':')[:2]
- card.append(card0)
- port.append(port0)
-
- cfg = {
- 'py_lib_path': tg_cfg["mgmt-interface"]["tg-config"]["py_lib_path"],
- 'machine': tg_cfg["mgmt-interface"]["ip"],
- 'port': tg_cfg["mgmt-interface"]["tg-config"]["tcl_port"],
- 'chassis': tg_cfg["mgmt-interface"]["tg-config"]["ixchassis"],
- 'cards': card,
- 'ports': port,
- 'output_dir': tg_cfg["mgmt-interface"]["tg-config"]["dut_result_dir"],
- 'version': tg_cfg["mgmt-interface"]["tg-config"]["version"],
- 'bidir': True,
- }
-
- return cfg
-
- def __init__(self, ixnet=None):
- self.ixnet = ixnet
- self._objRefs = dict()
- self._cfg = None
- self._logger = logging.getLogger(__name__)
- self._params = None
- self._bidir = None
-
- def iter_over_get_lists(self, x1, x2, y2, offset=0):
- for x in self.ixnet.getList(x1, x2):
- y_list = self.ixnet.getList(x, y2)
- for i, y in enumerate(y_list, offset):
- yield x, y, i
-
- def set_random_ip_multi_attribute(self, ipv4, seed, fixed_bits, random_mask, l3_count):
- self.ixnet.setMultiAttribute(
- ipv4,
- '-seed', str(seed),
- '-fixedBits', str(fixed_bits),
- '-randomMask', str(random_mask),
- '-valueType', 'random',
- '-countValue', str(l3_count))
-
- def set_random_ip_multi_attributes(self, ip, version, seeds, l3):
- try:
- random_mask = self.RANDOM_MASK_MAP[version]
- except KeyError:
- raise ValueError('Unknown version %s' % version)
-
- l3_count = l3['count']
- if "srcIp" in ip:
- fixed_bits = l3['srcip4']
- self.set_random_ip_multi_attribute(ip, seeds[0], fixed_bits, random_mask, l3_count)
- if "dstIp" in ip:
- fixed_bits = l3['dstip4']
- self.set_random_ip_multi_attribute(ip, seeds[1], fixed_bits, random_mask, l3_count)
-
- def add_ip_header(self, params, version):
- for it, ep, i in self.iter_over_get_lists('/traffic', 'trafficItem', "configElement", 1):
- iter1 = (v['outer_l3'] for v in params.values() if str(v['id']) == str(i))
- try:
- l3 = next(iter1, {})
- seeds = self.MODE_SEEDS_MAP.get(i, self.MODE_SEEDS_DEFAULT)[1]
- except (KeyError, IndexError):
- continue
-
- for ip, ip_bits, _ in self.iter_over_get_lists(ep, 'stack', 'field'):
- self.set_random_ip_multi_attributes(ip_bits, version, seeds, l3)
-
- self.ixnet.commit()
-
- def _connect(self, tg_cfg):
- self._cfg = self.get_config(tg_cfg)
-
- sys.path.append(self._cfg["py_lib_path"])
- # Import IxNetwork after getting ixia lib path
- try:
- import IxNetwork
- except ImportError:
- raise
-
- self.ixnet = IxNetwork.IxNet()
-
- machine = self._cfg['machine']
- port = str(self._cfg['port'])
- version = str(self._cfg['version'])
- result = self.ixnet.connect(machine, '-port', port, '-version', version)
- return result
-
- def clear_ixia_config(self):
- self.ixnet.execute('newConfig')
-
- def load_ixia_profile(self, profile):
- self.ixnet.execute('loadConfig', self.ixnet.readFrom(profile))
-
- def ix_load_config(self, profile):
- self.clear_ixia_config()
- self.load_ixia_profile(profile)
-
- def ix_assign_ports(self):
- vports = self.ixnet.getList(self.ixnet.getRoot(), 'vport')
- ports = []
-
- chassis = self._cfg['chassis']
- ports = [(chassis, card, port) for card, port in
- zip(self._cfg['cards'], self._cfg['ports'])]
-
- vport_list = self.ixnet.getList("/", "vport")
- self.ixnet.execute('assignPorts', ports, [], vport_list, True)
- self.ixnet.commit()
-
- for vport in vports:
- if self.ixnet.getAttribute(vport, '-state') != 'up':
- log.error("Both thr ports are down...")
-
- def ix_update_frame(self, params):
- streams = ["configElement"]
-
- for param in params.values():
- framesize_data = FramesizeHelper()
- traffic_items = self.ixnet.getList('/traffic', 'trafficItem')
- param_id = param['id']
- for traffic_item, stream in product(traffic_items, streams):
- helper = TrafficStreamHelper(traffic_item, stream, param_id)
-
- self.ixnet.setMultiAttribute(helper.transmissionControl,
- '-type', '{0}'.format(param.get('traffic_type',
- 'continuous')),
- '-duration', '{0}'.format(param.get('duration',
- "30")))
-
- stream_frame_rate_path = helper.frameRate
- self.ixnet.setMultiAttribute(stream_frame_rate_path, '-rate', param['iload'])
- if param['outer_l2']['framesPerSecond']:
- self.ixnet.setMultiAttribute(stream_frame_rate_path,
- '-type', 'framesPerSecond')
-
- framesize_data.populate_data(param['outer_l2']['framesize'])
-
- make_attr_args = framesize_data.make_args('-incrementFrom', '66',
- '-randomMin', '66',
- '-quadGaussian', [],
- '-type', 'weightedPairs',
- '-presetDistribution', 'cisco',
- '-incrementTo', '1518')
-
- self.ixnet.setMultiAttribute(helper.frameSize, *make_attr_args)
-
- self.ixnet.commit()
-
- def update_ether_multi_attribute(self, ether, mac_addr):
- self.ixnet.setMultiAttribute(ether,
- '-singleValue', mac_addr,
- '-fieldValue', mac_addr,
- '-valueType', 'singleValue')
-
- def update_ether_multi_attributes(self, ether, l2):
- if "ethernet.header.destinationAddress" in ether:
- self.update_ether_multi_attribute(ether, str(l2.get('dstmac', "00:00:00:00:00:02")))
-
- if "ethernet.header.sourceAddress" in ether:
- self.update_ether_multi_attribute(ether, str(l2.get('srcmac', "00:00:00:00:00:01")))
-
- def ix_update_ether(self, params):
- for ti, ep, index in self.iter_over_get_lists('/traffic', 'trafficItem',
- "configElement", 1):
- iter1 = (v['outer_l2'] for v in params.values() if str(v['id']) == str(index))
- try:
- l2 = next(iter1, {})
- except KeyError:
- continue
-
- for ip, ether, _ in self.iter_over_get_lists(ep, 'stack', 'field'):
- self.update_ether_multi_attributes(ether, l2)
-
- self.ixnet.commit()
-
- def ix_update_udp(self, params):
- pass
-
- def ix_update_tcp(self, params):
- pass
-
- def ix_start_traffic(self):
- tis = self.ixnet.getList('/traffic', 'trafficItem')
- for ti in tis:
- self.ixnet.execute('generate', [ti])
- self.ixnet.execute('apply', '/traffic')
- self.ixnet.execute('start', '/traffic')
-
- def ix_stop_traffic(self):
- tis = self.ixnet.getList('/traffic', 'trafficItem')
- for _ in tis:
- self.ixnet.execute('stop', '/traffic')
-
- def build_stats_map(self, view_obj, name_map):
- return {kl: self.execute_get_column_values(view_obj, kr) for kl, kr in name_map.items()}
-
- def execute_get_column_values(self, view_obj, name):
- return self.ixnet.execute('getColumnValues', view_obj, name)
-
- def ix_get_statistics(self):
- views = self.ixnet.getList('/statistics', 'view')
- stats = {}
- view_obj = self.find_view_obj("Traffic Item Statistics", views)
- stats = self.build_stats_map(view_obj, self.STATS_NAME_MAP)
-
- view_obj = self.find_view_obj("Port Statistics", views)
- ports_stats = self.build_stats_map(view_obj, self.PORT_STATS_NAME_MAP)
-
- view_obj = self.find_view_obj("Flow Statistics", views)
- stats["latency"] = self.build_stats_map(view_obj, self.LATENCY_NAME_MAP)
-
- return stats, ports_stats
diff --git a/yardstick/network_services/libs/ixia_libs/ixnet/__init__.py b/yardstick/network_services/libs/ixia_libs/ixnet/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/yardstick/network_services/libs/ixia_libs/ixnet/__init__.py
diff --git a/yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py b/yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py
new file mode 100644
index 000000000..89a855480
--- /dev/null
+++ b/yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py
@@ -0,0 +1,1132 @@
+# Copyright (c) 2016-2019 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import ipaddress
+import logging
+import re
+import collections
+
+import IxNetwork
+
+from yardstick.common import exceptions
+from yardstick.common import utils
+from yardstick.network_services.traffic_profile import base as tp_base
+
+
+log = logging.getLogger(__name__)
+
+IP_VERSION_4 = 4
+IP_VERSION_6 = 6
+
+PROTO_ETHERNET = 'ethernet'
+PROTO_IPV4 = 'ipv4'
+PROTO_IPV6 = 'ipv6'
+PROTO_UDP = 'udp'
+PROTO_TCP = 'tcp'
+PROTO_VLAN = 'vlan'
+
+SINGLE_VALUE = "singleValue"
+
+S_VLAN = 0
+C_VLAN = 1
+
+ETHER_TYPE_802_1ad = '0x88a8'
+
+TRAFFIC_STATUS_STARTED = 'started'
+TRAFFIC_STATUS_STOPPED = 'stopped'
+
+PROTOCOL_STATUS_UP = 'up'
+PROTOCOL_STATUS_DOWN = ['down', 'notStarted']
+
+SUPPORTED_PROTO = [PROTO_UDP]
+
+SUPPORTED_DSCP_CLASSES = [
+ 'defaultPHB',
+ 'classSelectorPHB',
+ 'assuredForwardingPHB',
+ 'expeditedForwardingPHB']
+
+SUPPORTED_TOS_FIELDS = [
+ 'precedence',
+ 'delay',
+ 'throughput',
+ 'reliability'
+]
+
+IP_PRIORITY_PATTERN = r'[^\w+]*.+(Raw priority|' \
+ 'Precedence|' \
+ 'Default PHB|' \
+ 'Class selector PHB|' \
+ 'Assured forwarding selector PHB|' \
+ 'Expedited forwarding PHB)'
+
+
+class Vlan(object):
+ def __init__(self,
+ vlan_id, vlan_id_step=None, vlan_id_direction='increment',
+ prio=None, prio_step=None, prio_direction='increment',
+ tp_id=None):
+ self.vlan_id = vlan_id
+ self.vlan_id_step = vlan_id_step
+ self.vlan_id_direction = vlan_id_direction
+ self.prio = prio
+ self.prio_step = prio_step
+ self.prio_direction = prio_direction
+ self.tp_id = tp_id
+
+
+# NOTE(ralonsoh): this pragma will be removed in the last patch of this series
+class IxNextgen(object): # pragma: no cover
+
+ PORT_STATS_NAME_MAP = {
+ "stat_name": 'Stat Name',
+ "port_name": 'Port Name',
+ "Frames_Tx": 'Frames Tx.',
+ "Valid_Frames_Rx": 'Valid Frames Rx.',
+ "Bytes_Tx": 'Bytes Tx.',
+ "Bytes_Rx": 'Bytes Rx.'
+ }
+
+ LATENCY_NAME_MAP = {
+ "Store-Forward_Avg_latency_ns": 'Store-Forward Avg Latency (ns)',
+ "Store-Forward_Min_latency_ns": 'Store-Forward Min Latency (ns)',
+ "Store-Forward_Max_latency_ns": 'Store-Forward Max Latency (ns)',
+ }
+
+ FLOWS_STATS_NAME_MAP = {
+ "Tx_Port": 'Tx Port',
+ "VLAN-ID": 'VLAN:VLAN-ID',
+ "IP_Priority": re.compile(IP_PRIORITY_PATTERN),
+ "Flow_Group": 'Flow Group',
+ "Tx_Frames": 'Tx Frames',
+ "Rx_Frames": 'Rx Frames',
+ "Store-Forward_Avg_latency_ns": 'Store-Forward Avg Latency (ns)',
+ "Store-Forward_Min_latency_ns": 'Store-Forward Min Latency (ns)',
+ "Store-Forward_Max_latency_ns": 'Store-Forward Max Latency (ns)'
+ }
+
+ PPPOX_CLIENT_PER_PORT_NAME_MAP = {
+ 'subs_port': 'Port',
+ 'Sessions_Up': 'Sessions Up',
+ 'Sessions_Down': 'Sessions Down',
+ 'Sessions_Not_Started': 'Sessions Not Started',
+ 'Sessions_Total': 'Sessions Total'
+ }
+
+ PORT_STATISTICS = '::ixNet::OBJ-/statistics/view:"Port Statistics"'
+ FLOW_STATISTICS = '::ixNet::OBJ-/statistics/view:"Flow Statistics"'
+ PPPOX_CLIENT_PER_PORT = '::ixNet::OBJ-/statistics/view:"PPPoX Client Per Port"'
+
+ PPPOE_SCENARIO_STATS = {
+ 'port_statistics': PORT_STATISTICS,
+ 'flow_statistic': FLOW_STATISTICS,
+ 'pppox_client_per_port': PPPOX_CLIENT_PER_PORT
+ }
+
+ PPPOE_SCENARIO_STATS_MAP = {
+ 'port_statistics': PORT_STATS_NAME_MAP,
+ 'flow_statistic': FLOWS_STATS_NAME_MAP,
+ 'pppox_client_per_port': PPPOX_CLIENT_PER_PORT_NAME_MAP
+ }
+
+ @staticmethod
+ def get_config(tg_cfg):
+ card = []
+ port = []
+ external_interface = tg_cfg["vdu"][0]["external-interface"]
+ for intf in external_interface:
+ card_port0 = intf["virtual-interface"]["vpci"]
+ card0, port0 = card_port0.split(':')[:2]
+ card.append(card0)
+ port.append(port0)
+
+ cfg = {
+ 'machine': tg_cfg["mgmt-interface"]["ip"],
+ 'port': tg_cfg["mgmt-interface"]["tg-config"]["tcl_port"],
+ 'chassis': tg_cfg["mgmt-interface"]["tg-config"]["ixchassis"],
+ 'cards': card,
+ 'ports': port,
+ 'output_dir': tg_cfg["mgmt-interface"]["tg-config"]["dut_result_dir"],
+ 'version': tg_cfg["mgmt-interface"]["tg-config"]["version"],
+ 'bidir': True,
+ }
+
+ return cfg
+
+ def __init__(self): # pragma: no cover
+ self._ixnet = None
+ self._cfg = None
+ self._params = None
+ self._bidir = None
+
+ @property
+ def ixnet(self): # pragma: no cover
+ if self._ixnet:
+ return self._ixnet
+ raise exceptions.IxNetworkClientNotConnected()
+
+ def get_vports(self):
+ """Return the list of assigned ports (vport objects)"""
+ vports = self.ixnet.getList(self.ixnet.getRoot(), 'vport')
+ return vports
+
+ def get_static_interface(self, vport):
+ return self.ixnet.getList(vport, 'interface')
+
+ def _get_config_element_by_flow_group_name(self, flow_group_name):
+ """Get a config element using the flow group name
+
+ Each named flow group contains one config element (by configuration).
+ According to the documentation, "configElements" is a list and "each
+ item in this list is aligned to the sequential order of your endpoint
+ list".
+
+ :param flow_group_name: (str) flow group name; this parameter is
+ always a number (converted to string) starting
+ from "1".
+ :return: (str) config element reference ID or None.
+ """
+ traffic_item = self.ixnet.getList(self.ixnet.getRoot() + '/traffic',
+ 'trafficItem')[0]
+ flow_groups = self.ixnet.getList(traffic_item, 'endpointSet')
+ for flow_group in flow_groups:
+ if (str(self.ixnet.getAttribute(flow_group, '-name')) ==
+ flow_group_name):
+ return traffic_item + '/configElement:' + flow_group_name
+
+ def _get_stack_item(self, flow_group_name, protocol_name):
+ """Return the stack item given the flow group name and the proto name
+
+ :param flow_group_name: (str) flow group name
+ :param protocol_name: (str) protocol name, referred to PROTO_*
+ constants
+ :return: list of stack item descriptors
+ """
+ celement = self._get_config_element_by_flow_group_name(flow_group_name)
+ if not celement:
+ raise exceptions.IxNetworkFlowNotPresent(
+ flow_group=flow_group_name)
+ stack_items = self.ixnet.getList(celement, 'stack')
+ return [s_i for s_i in stack_items if protocol_name in s_i]
+
+ def _get_field_in_stack_item(self, stack_item, field_name):
+ """Return the field in a stack item given the name
+
+ :param stack_item: (str) stack item descriptor
+ :param field_name: (str) field name
+ :return: (str) field descriptor
+ """
+ fields = self.ixnet.getList(stack_item, 'field')
+ for field in (field for field in fields if field_name in field):
+ return field
+ raise exceptions.IxNetworkFieldNotPresentInStackItem(
+ field_name=field_name, stack_item=stack_item)
+
+ def _get_traffic_state(self):
+ """Get traffic state"""
+ return self.ixnet.getAttribute(self.ixnet.getRoot() + 'traffic',
+ '-state')
+
+ def _get_protocol_status(self, proto):
+ """Get protocol status
+
+ :param proto: IxNet protocol str representation, e.g.:
+ '::ixNet::OBJ-/topology:2/deviceGroup:1/ethernet:1/ipv4:L14'
+ :return: (list) protocol status: list of sessions protocol
+ statuses which include states 'up', 'down' and 'notStarted'
+ """
+ return self.ixnet.getAttribute(proto, '-sessionStatus')
+
+ def get_topology_device_groups(self, topology):
+ """Get list of device groups in topology
+
+ :param topology: (str) topology descriptor
+ :return: (list) list of device groups descriptors
+ """
+ return self.ixnet.getList(topology, 'deviceGroup')
+
+ def is_traffic_running(self):
+ """Returns true if traffic state == TRAFFIC_STATUS_STARTED"""
+ return self._get_traffic_state() == TRAFFIC_STATUS_STARTED
+
+ def is_traffic_stopped(self):
+ """Returns true if traffic state == TRAFFIC_STATUS_STOPPED"""
+ return self._get_traffic_state() == TRAFFIC_STATUS_STOPPED
+
+ def is_protocols_running(self, protocols):
+ """Returns true if all protocols statuses are PROTOCOL_STATUS_UP
+
+ :param protocols: list of protocols str representations, e.g.:
+ ['::ixNet::OBJ-/topology:2/deviceGroup:1/ethernet:1/ipv4:L14', ...]
+ :return: (bool) True if all protocols status is 'up', False if any
+ protocol status is 'down' or 'notStarted'
+ """
+ return all(session_status is PROTOCOL_STATUS_UP for proto in protocols
+ for session_status in self._get_protocol_status(proto))
+
+ def is_protocols_stopped(self, protocols):
+ """Returns true if all protocols statuses are in PROTOCOL_STATUS_DOWN
+
+ :param protocols: list of protocols str representations, e.g.:
+ ['::ixNet::OBJ-/topology:2/deviceGroup:1/ethernet:1/ipv4:L14', ...]
+ :return: (bool) True if all protocols status is 'down' or 'notStarted',
+ False if any protocol status is 'up'
+ """
+ return all(session_status in PROTOCOL_STATUS_DOWN for proto in protocols
+ for session_status in self._get_protocol_status(proto))
+
+ @staticmethod
+ def _parse_framesize(framesize):
+ """Parse "framesize" config param. to return a list of weighted pairs
+
+ :param framesize: dictionary of frame sizes and weights
+ :return: list of paired frame sizes and weights
+ """
+ weighted_range_pairs = []
+ for size, weight in ((s, w) for (s, w) in framesize.items()
+ if int(w) != 0):
+ size = int(size.upper().replace('B', ''))
+ weighted_range_pairs.append([size, size, int(weight)])
+ return weighted_range_pairs
+
+ def iter_over_get_lists(self, x1, x2, y2, offset=0):
+ for x in self.ixnet.getList(x1, x2):
+ y_list = self.ixnet.getList(x, y2)
+ for i, y in enumerate(y_list, offset):
+ yield x, y, i
+
+ def connect(self, tg_cfg):
+ self._cfg = self.get_config(tg_cfg)
+ self._ixnet = IxNetwork.IxNet()
+
+ machine = self._cfg['machine']
+ port = str(self._cfg['port'])
+ version = str(self._cfg['version'])
+ return self.ixnet.connect(machine, '-port', port,
+ '-version', version)
+
+ def clear_config(self):
+ """Wipe out any possible configuration present in the client"""
+ self.ixnet.execute('newConfig')
+
+ def assign_ports(self):
+ """Create and assign vports for each physical port defined in config
+
+ This configuration is present in the IXIA profile file. E.g.:
+ name: trafficgen_1
+ role: IxNet
+ interfaces:
+ xe0:
+ vpci: "2:15" # Card:port
+ driver: "none"
+ dpdk_port_num: 0
+ local_ip: "152.16.100.20"
+ netmask: "255.255.0.0"
+ local_mac: "00:98:10:64:14:00"
+ xe1:
+ ...
+ """
+ chassis_ip = self._cfg['chassis']
+ ports = [(chassis_ip, card, port) for card, port in
+ zip(self._cfg['cards'], self._cfg['ports'])]
+
+ log.info('Create and assign vports: %s', ports)
+
+ vports = []
+ for _ in ports:
+ vports.append(self.ixnet.add(self.ixnet.getRoot(), 'vport'))
+ self.ixnet.commit()
+
+ self.ixnet.execute('assignPorts', ports, [], vports, True)
+ self.ixnet.commit()
+
+ for vport in vports:
+ if self.ixnet.getAttribute(vport, '-state') != 'up':
+ log.warning('Port %s is down', vport)
+
+ def _create_traffic_item(self, traffic_type='raw'):
+ """Create the traffic item to hold the flow groups
+
+ The traffic item tracking by "Traffic Item" is enabled to retrieve the
+ latency statistics.
+ """
+ log.info('Create the traffic item "RFC2544"')
+ traffic_item = self.ixnet.add(self.ixnet.getRoot() + '/traffic',
+ 'trafficItem')
+ self.ixnet.setMultiAttribute(traffic_item, '-name', 'RFC2544',
+ '-trafficType', traffic_type)
+ self.ixnet.commit()
+
+ traffic_item_id = self.ixnet.remapIds(traffic_item)[0]
+ self.ixnet.setAttribute(traffic_item_id + '/tracking',
+ '-trackBy', 'trafficGroupId0')
+ self.ixnet.commit()
+
+ def _create_flow_groups(self, uplink, downlink):
+ """Create the flow groups between the endpoints"""
+ traffic_item_id = self.ixnet.getList(self.ixnet.getRoot() + 'traffic',
+ 'trafficItem')[0]
+ log.info('Create the flow groups')
+
+ index = 0
+ for up, down in zip(uplink, downlink):
+ log.info('FGs: %s <--> %s', up, down)
+ endpoint_set_1 = self.ixnet.add(traffic_item_id, 'endpointSet')
+ endpoint_set_2 = self.ixnet.add(traffic_item_id, 'endpointSet')
+ self.ixnet.setMultiAttribute(
+ endpoint_set_1, '-name', str(index + 1),
+ '-sources', [up],
+ '-destinations', [down])
+ self.ixnet.setMultiAttribute(
+ endpoint_set_2, '-name', str(index + 2),
+ '-sources', [down],
+ '-destinations', [up])
+ self.ixnet.commit()
+ index += 2
+
+ def _append_procotol_to_stack(self, protocol_name, previous_element):
+ """Append a new element in the packet definition stack"""
+ protocol = (self.ixnet.getRoot() +
+ '/traffic/protocolTemplate:"{}"'.format(protocol_name))
+ self.ixnet.execute('append', previous_element, protocol)
+
+ def is_qinq(self, flow_data):
+ for traffic_type in flow_data:
+ if flow_data[traffic_type]['outer_l2'].get('QinQ'):
+ return True
+ return False
+
+ def _flows_settings(self, cfg):
+ flows_data = []
+ res = [key for key in cfg.keys() if key.split('_')[0] in ['uplink', 'downlink']]
+ for i in range(len(res)):
+ uplink = 'uplink_{}'.format(i)
+ downlink = 'downlink_{}'.format(i)
+ if uplink in res:
+ flows_data.append(cfg[uplink])
+ if downlink in res:
+ flows_data.append(cfg[downlink])
+ return flows_data
+
+ def _setup_config_elements(self, traffic_profile, add_default_proto=True):
+ """Setup the config elements
+
+ The traffic item is configured to allow individual configurations per
+ config element. The default frame configuration is applied:
+ Ethernet II: added by default
+ IPv4: element to add
+ UDP: element to add
+ Payload: added by default
+ Ethernet II (Trailer): added by default
+ :return:
+ """
+ traffic_item_id = self.ixnet.getList(self.ixnet.getRoot() + 'traffic',
+ 'trafficItem')[0]
+ log.info('Split the frame rate distribution per config element')
+ config_elements = self.ixnet.getList(traffic_item_id, 'configElement')
+ flows = self._flows_settings(traffic_profile.params)
+ # TODO: check length of both lists, it should be equal!!!
+ for config_element, flow_data in zip(config_elements, flows):
+ self.ixnet.setAttribute(config_element + '/frameRateDistribution',
+ '-portDistribution', 'splitRateEvenly')
+ self.ixnet.setAttribute(config_element + '/frameRateDistribution',
+ '-streamDistribution', 'splitRateEvenly')
+ self.ixnet.commit()
+ if add_default_proto:
+ self._append_procotol_to_stack(
+ PROTO_UDP, config_element + '/stack:"ethernet-1"')
+ self._append_procotol_to_stack(
+ PROTO_IPV4, config_element + '/stack:"ethernet-1"')
+ if self.is_qinq(flow_data):
+ self._append_procotol_to_stack(
+ PROTO_VLAN, config_element + '/stack:"ethernet-1"')
+ self._append_procotol_to_stack(
+ PROTO_VLAN, config_element + '/stack:"ethernet-1"')
+
+ def create_traffic_model(self, uplink_ports, downlink_ports, traffic_profile):
+ """Create a traffic item and the needed flow groups
+
+ Each flow group inside the traffic item (only one is present)
+ represents the traffic between two ports:
+ (uplink) (downlink)
+ FlowGroup1: port1 -> port2
+ FlowGroup2: port1 <- port2
+ FlowGroup3: port3 -> port4
+ FlowGroup4: port3 <- port4
+ """
+ self._create_traffic_item('raw')
+ uplink_endpoints = [port + '/protocols' for port in uplink_ports]
+ downlink_endpoints = [port + '/protocols' for port in downlink_ports]
+ self._create_flow_groups(uplink_endpoints, downlink_endpoints)
+ self._setup_config_elements(traffic_profile=traffic_profile)
+
+ def create_ipv4_traffic_model(self, uplink_endpoints, downlink_endpoints,
+ traffic_profile):
+ """Create a traffic item and the needed flow groups
+
+ Each flow group inside the traffic item (only one is present)
+ represents the traffic between two topologies:
+ (uplink) (downlink)
+ FlowGroup1: uplink1 -> downlink1
+ FlowGroup2: uplink1 <- downlink1
+ FlowGroup3: uplink2 -> downlink2
+ FlowGroup4: uplink2 <- downlink2
+ """
+ self._create_traffic_item('ipv4')
+ self._create_flow_groups(uplink_endpoints, downlink_endpoints)
+ self._setup_config_elements(traffic_profile=traffic_profile,
+ add_default_proto=False)
+
+ def _update_frame_mac(self, ethernet_descriptor, field, mac_address):
+ """Set the MAC address in a config element stack Ethernet field
+
+ :param ethernet_descriptor: (str) ethernet descriptor, e.g.:
+ /traffic/trafficItem:1/configElement:1/stack:"ethernet-1"
+ :param field: (str) field name, e.g.: destinationAddress
+ :param mac_address: (str) MAC address
+ """
+ field_descriptor = self._get_field_in_stack_item(ethernet_descriptor,
+ field)
+ self.ixnet.setMultiAttribute(field_descriptor,
+ '-singleValue', mac_address,
+ '-fieldValue', mac_address,
+ '-valueType', 'singleValue')
+ self.ixnet.commit()
+
+ def update_frame(self, traffic, duration):
+ """Update the L2 frame
+
+ This function updates the L2 frame options:
+ - Traffic type: "continuous", "fixedDuration".
+ - Duration: in case of traffic_type="fixedDuration", amount of seconds
+ to inject traffic.
+ - Rate: in frames per seconds or percentage.
+ - Type of rate: "framesPerSecond" or "percentLineRate" ("bitsPerSecond"
+ no used)
+ - Frame size: custom IMIX [1] definition; a list of packet size in
+ bytes and the weight. E.g.:
+ [[64, 64, 10], [128, 128, 15], [512, 512, 5]]
+
+ [1] https://en.wikipedia.org/wiki/Internet_Mix
+
+ :param traffic: list of traffic elements; each traffic element contains
+ the injection parameter for each flow group.
+ :param duration: (int) injection time in seconds.
+ """
+ for traffic_param in traffic.values():
+ fg_id = str(traffic_param['id'])
+ config_element = self._get_config_element_by_flow_group_name(fg_id)
+ if not config_element:
+ raise exceptions.IxNetworkFlowNotPresent(flow_group=fg_id)
+
+ type = traffic_param.get('traffic_type', 'fixedDuration')
+ rate_unit = (
+ 'framesPerSecond' if traffic_param['rate_unit'] ==
+ tp_base.TrafficProfileConfig.RATE_FPS else 'percentLineRate')
+ weighted_range_pairs = self._parse_framesize(
+ traffic_param['outer_l2'].get('framesize', {}))
+ srcmac = str(traffic_param['outer_l2'].get('srcmac', '00:00:00:00:00:01'))
+ dstmac = str(traffic_param['outer_l2'].get('dstmac', '00:00:00:00:00:02'))
+
+ if traffic_param['outer_l2'].get('QinQ'):
+ s_vlan = traffic_param['outer_l2']['QinQ']['S-VLAN']
+ c_vlan = traffic_param['outer_l2']['QinQ']['C-VLAN']
+
+ field_descriptor = self._get_field_in_stack_item(
+ self._get_stack_item(fg_id, PROTO_ETHERNET)[0],
+ 'etherType')
+
+ self.ixnet.setMultiAttribute(field_descriptor,
+ '-auto', 'false',
+ '-singleValue', ETHER_TYPE_802_1ad,
+ '-fieldValue', ETHER_TYPE_802_1ad,
+ '-valueType', SINGLE_VALUE)
+
+ self._update_vlan_tag(fg_id, s_vlan, S_VLAN)
+ self._update_vlan_tag(fg_id, c_vlan, C_VLAN)
+
+ self.ixnet.setMultiAttribute(
+ config_element + '/transmissionControl',
+ '-type', type, '-duration', duration)
+
+ self.ixnet.setMultiAttribute(
+ config_element + '/frameRate',
+ '-rate', traffic_param['rate'], '-type', rate_unit)
+
+ if len(weighted_range_pairs):
+ self.ixnet.setMultiAttribute(
+ config_element + '/frameSize',
+ '-type', 'weightedPairs',
+ '-weightedRangePairs', weighted_range_pairs)
+
+ self.ixnet.commit()
+
+ if dstmac:
+ self._update_frame_mac(
+ self._get_stack_item(fg_id, PROTO_ETHERNET)[0],
+ 'destinationAddress', dstmac)
+ if srcmac:
+ self._update_frame_mac(
+ self._get_stack_item(fg_id, PROTO_ETHERNET)[0],
+ 'sourceAddress', srcmac)
+
+ def _update_vlan_tag(self, fg_id, params, vlan=0):
+ field_to_param_map = {
+ 'vlanUserPriority': 'priority',
+ 'cfi': 'cfi',
+ 'vlanID': 'id'
+ }
+ for field, param in field_to_param_map.items():
+ value = params.get(param)
+ if value:
+ field_descriptor = self._get_field_in_stack_item(
+ self._get_stack_item(fg_id, PROTO_VLAN)[vlan],
+ field)
+
+ self.ixnet.setMultiAttribute(field_descriptor,
+ '-auto', 'false',
+ '-singleValue', value,
+ '-fieldValue', value,
+ '-valueType', SINGLE_VALUE)
+
+ self.ixnet.commit()
+
+ def _update_ipv4_address(self, ip_descriptor, field, ip_address, seed,
+ mask, count):
+ """Set the IPv4 address in a config element stack IP field
+
+ :param ip_descriptor: (str) IP descriptor, e.g.:
+ /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"
+ :param field: (str) field name, e.g.: scrIp, dstIp
+ :param ip_address: (str) IP address
+ :param seed: (int) seed length
+ :param mask: (int) IP address mask length
+ :param count: (int) number of random IPs to generate
+ """
+ field_descriptor = self._get_field_in_stack_item(ip_descriptor,
+ field)
+ random_mask = str(ipaddress.IPv4Address(
+ 2**(ipaddress.IPV4LENGTH - mask) - 1).compressed)
+ self.ixnet.setMultiAttribute(field_descriptor,
+ '-seed', seed,
+ '-fixedBits', ip_address,
+ '-randomMask', random_mask,
+ '-valueType', 'random',
+ '-countValue', count)
+ self.ixnet.commit()
+
+ def update_ip_packet(self, traffic):
+ """Update the IP packet
+
+ NOTE: Only IPv4 is currently supported.
+ :param traffic: list of traffic elements; each traffic element contains
+ the injection parameter for each flow group.
+ """
+ # NOTE(ralonsoh): L4 configuration is not set.
+ for traffic_param in traffic.values():
+ fg_id = str(traffic_param['id'])
+ if not self._get_config_element_by_flow_group_name(fg_id):
+ raise exceptions.IxNetworkFlowNotPresent(flow_group=fg_id)
+
+ if traffic_param['outer_l3']:
+ count = traffic_param['outer_l3']['count']
+ srcip = traffic_param['outer_l3']['srcip']
+ dstip = traffic_param['outer_l3']['dstip']
+ srcseed = traffic_param['outer_l3']['srcseed']
+ dstseed = traffic_param['outer_l3']['dstseed']
+ srcmask = traffic_param['outer_l3']['srcmask'] \
+ or ipaddress.IPV4LENGTH
+ dstmask = traffic_param['outer_l3']['dstmask'] \
+ or ipaddress.IPV4LENGTH
+ priority = traffic_param['outer_l3']['priority']
+
+ if srcip:
+ self._update_ipv4_address(
+ self._get_stack_item(fg_id, PROTO_IPV4)[0],
+ 'srcIp', str(srcip), srcseed, srcmask, count)
+ if dstip:
+ self._update_ipv4_address(
+ self._get_stack_item(fg_id, PROTO_IPV4)[0],
+ 'dstIp', str(dstip), dstseed, dstmask, count)
+ if priority:
+ self._update_ipv4_priority(
+ self._get_stack_item(fg_id, PROTO_IPV4)[0], priority)
+
+ def _update_ipv4_priority(self, ip_descriptor, priority):
+ """Set the IPv4 priority in a config element stack IP field
+
+ :param ip_descriptor: (str) IP descriptor, e.g.:
+ /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"
+ :param priority: (dict) priority configuration from traffic profile, e.g.:
+ {'tos':
+ 'precedence': [1, 4, 7]
+ }
+ """
+ if priority.get('raw'):
+ priority_field = self._get_field_in_stack_item(ip_descriptor,
+ 'priority.raw')
+ self._set_priority_field(priority_field, priority['raw'])
+
+ elif priority.get('dscp'):
+ for field, value in priority['dscp'].items():
+ if field in SUPPORTED_DSCP_CLASSES:
+ priority_field = self._get_field_in_stack_item(
+ ip_descriptor,
+ 'priority.ds.phb.{field}.{field}'.format(field=field))
+ self._set_priority_field(priority_field, value)
+
+ elif priority.get('tos'):
+ for field, value in priority['tos'].items():
+ if field in SUPPORTED_TOS_FIELDS:
+ priority_field = self._get_field_in_stack_item(
+ ip_descriptor, 'priority.tos.' + field)
+ self._set_priority_field(priority_field, value)
+
+ def _set_priority_field(self, field_descriptor, value):
+ """Set the priority field described by field_descriptor
+
+ :param field_descriptor: (str) field descriptor, e.g.:
+ /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/ \
+ field:"ipv4.header.priority.raw-3
+ :param value: (list, int) list of integers or single integer value
+ """
+ if isinstance(value, list):
+ self.ixnet.setMultiAttribute(field_descriptor,
+ '-valueList', value,
+ '-activeFieldChoice', 'true',
+ '-valueType', 'valueList')
+ else:
+ self.ixnet.setMultiAttribute(field_descriptor,
+ '-activeFieldChoice', 'true',
+ '-singleValue', str(value))
+ self.ixnet.commit()
+
+ def update_l4(self, traffic):
+ """Update the L4 headers
+
+ NOTE: Only UDP is currently supported
+ :param traffic: list of traffic elements; each traffic element contains
+ the injection parameter for each flow group
+ """
+ for traffic_param in traffic.values():
+ fg_id = str(traffic_param['id'])
+ if not self._get_config_element_by_flow_group_name(fg_id):
+ raise exceptions.IxNetworkFlowNotPresent(flow_group=fg_id)
+
+ proto = traffic_param['outer_l3'].get('proto')
+ if not (proto and traffic_param['outer_l4']):
+ continue
+
+ if proto not in SUPPORTED_PROTO:
+ raise exceptions.IXIAUnsupportedProtocol(protocol=proto)
+
+ count = traffic_param['outer_l4']['count']
+ seed = traffic_param['outer_l4']['seed']
+
+ srcport = traffic_param['outer_l4']['srcport']
+ srcmask = traffic_param['outer_l4']['srcportmask']
+
+ dstport = traffic_param['outer_l4']['dstport']
+ dstmask = traffic_param['outer_l4']['dstportmask']
+
+ if proto == PROTO_UDP:
+ if srcport:
+ self._update_udp_port(
+ self._get_stack_item(fg_id, proto)[0],
+ 'srcPort', srcport, seed, srcmask, count)
+ if dstport:
+ self._update_udp_port(
+ self._get_stack_item(fg_id, proto)[0],
+ 'dstPort', dstport, seed, dstmask, count)
+
+ def _update_udp_port(self, descriptor, field, value,
+ seed=1, mask=0, count=1):
+ """Set the UDP port in a config element stack UDP field
+
+ :param udp_descriptor: (str) UDP descriptor, e.g.:
+ /traffic/trafficItem:1/configElement:1/stack:"udp-3"
+ :param field: (str) field name, e.g.: scrPort, dstPort
+ :param value: (int) UDP port fixed bits
+ :param seed: (int) seed length
+ :param mask: (int) UDP port mask
+ :param count: (int) number of random ports to generate
+ """
+ field_descriptor = self._get_field_in_stack_item(descriptor, field)
+
+ if mask == 0:
+ seed = count = 1
+
+ self.ixnet.setMultiAttribute(field_descriptor,
+ '-auto', 'false',
+ '-seed', seed,
+ '-fixedBits', value,
+ '-randomMask', mask,
+ '-valueType', 'random',
+ '-countValue', count)
+
+ self.ixnet.commit()
+
+ def _build_stats_map(self, view_obj, name_map):
+ return {data_yardstick: self.ixnet.execute(
+ 'getColumnValues', view_obj, data_ixia)
+ for data_yardstick, data_ixia in name_map.items()}
+
+ def _get_view_page_stats(self, view_obj):
+ """Get full view page stats
+
+ :param view_obj: view object, e.g.
+ '::ixNet::OBJ-/statistics/view:"Port Statistics"'
+ :return: (list) List of dicts. Each dict represents view page row
+ """
+ view = view_obj + '/page'
+ column_headers = self.ixnet.getAttribute(view, '-columnCaptions')
+ view_rows = self.ixnet.getAttribute(view, '-rowValues')
+ view_page = [dict(zip(column_headers, row[0])) for row in view_rows]
+ return view_page
+
+ def _set_egress_flow_tracking(self, encapsulation, offset):
+ """Set egress flow tracking options
+
+ :param encapsulation: encapsulation type
+ :type encapsulation: str, e.g. 'Ethernet'
+ :param offset: offset type
+ :type offset: str, e.g. 'IPv4 TOS Precedence (3 bits)'
+ """
+ traffic_item = self.ixnet.getList(self.ixnet.getRoot() + '/traffic',
+ 'trafficItem')[0]
+ # Enable Egress Tracking
+ self.ixnet.setAttribute(traffic_item, '-egressEnabled', True)
+ self.ixnet.commit()
+
+ # Set encapsulation type
+ enc_obj = self.ixnet.getList(traffic_item, 'egressTracking')[0]
+ self.ixnet.setAttribute(enc_obj, '-encapsulation', encapsulation)
+
+ # Set offset
+ self.ixnet.setAttribute(enc_obj, '-offset', offset)
+ self.ixnet.commit()
+
+ def set_flow_tracking(self, track_by):
+ """Set flow tracking options
+
+ :param track_by: list of tracking fields
+ :type track_by: list, e.g. ['vlanVlanId0','ipv4Precedence0']
+ """
+ traffic_item = self.ixnet.getList(self.ixnet.getRoot() + '/traffic',
+ 'trafficItem')[0]
+ self.ixnet.setAttribute(traffic_item + '/tracking', '-trackBy', track_by)
+ self.ixnet.commit()
+
+ def get_statistics(self):
+ """Retrieve port and flow statistics
+
+ "Port Statistics" parameters are stored in self.PORT_STATS_NAME_MAP.
+ "Flow Statistics" parameters are stored in self.LATENCY_NAME_MAP.
+
+ :return: dictionary with the statistics; the keys of this dictionary
+ are PORT_STATS_NAME_MAP and LATENCY_NAME_MAP keys.
+ """
+ stats = self._build_stats_map(self.PORT_STATISTICS,
+ self.PORT_STATS_NAME_MAP)
+ stats.update(self._build_stats_map(self.FLOW_STATISTICS,
+ self.LATENCY_NAME_MAP))
+ return stats
+
+ def get_pppoe_scenario_statistics(self):
+ """Retrieve port, flow and PPPoE subscribers statistics"""
+ stats = collections.defaultdict(list)
+ result = collections.defaultdict(list)
+ for stat, view in self.PPPOE_SCENARIO_STATS.items():
+ # Get view total pages number
+ total_pages = self.ixnet.getAttribute(
+ view + '/page', '-totalPages')
+ # Collect stats from all view pages
+ for page in range(1, int(total_pages) + 1):
+ current_page = int(self.ixnet.getAttribute(
+ view + '/page', '-currentPage'))
+ if page != int(current_page):
+ self.ixnet.setAttribute(view + '/page', '-currentPage',
+ str(page))
+ self.ixnet.commit()
+ page_data = self._get_view_page_stats(view)
+ stats[stat].extend(page_data)
+ # Filter collected views stats
+ for stat in stats:
+ for view_row in stats[stat]:
+ filtered_row = {}
+ for key, value in self.PPPOE_SCENARIO_STATS_MAP[stat].items():
+ if isinstance(value, str):
+ filtered_row.update({key: view_row[value]})
+ # Handle keys which values are represented by regex
+ else:
+ for k in view_row.keys():
+ if value.match(k):
+ value = value.match(k).group()
+ filtered_row.update({key: view_row[value]})
+ break
+ result[stat].append(filtered_row)
+ return result
+
+ def start_protocols(self):
+ self.ixnet.execute('startAllProtocols')
+
+ def stop_protocols(self):
+ self.ixnet.execute('stopAllProtocols')
+
+ def start_traffic(self):
+ """Start the traffic injection in the traffic item
+
+ By configuration, there is only one traffic item. This function returns
+ when the traffic state is TRAFFIC_STATUS_STARTED.
+ """
+ traffic_items = self.ixnet.getList('/traffic', 'trafficItem')
+ if self.is_traffic_running():
+ self.ixnet.execute('stop', '/traffic')
+ # pylint: disable=unnecessary-lambda
+ utils.wait_until_true(lambda: self.is_traffic_stopped())
+
+ self.ixnet.execute('generate', traffic_items)
+ self.ixnet.execute('apply', '/traffic')
+ self.ixnet.execute('start', '/traffic')
+ # pylint: disable=unnecessary-lambda
+ utils.wait_until_true(lambda: self.is_traffic_running())
+
+ def add_topology(self, name, vports):
+ log.debug("add_topology: name='%s' ports='%s'", name, vports)
+ obj = self.ixnet.add(self.ixnet.getRoot(), 'topology')
+ self.ixnet.setMultiAttribute(obj, '-name', name, '-vports', vports)
+ self.ixnet.commit()
+ return obj
+
+ def add_device_group(self, topology, name, multiplier):
+ log.debug("add_device_group: tpl='%s', name='%s', multiplier='%s'",
+ topology, name, multiplier)
+
+ obj = self.ixnet.add(topology, 'deviceGroup')
+ self.ixnet.setMultiAttribute(obj, '-name', name, '-multiplier',
+ multiplier)
+ self.ixnet.commit()
+ return obj
+
+ def add_ethernet(self, dev_group, name):
+ log.debug(
+ "add_ethernet: device_group='%s' name='%s'", dev_group, name)
+ obj = self.ixnet.add(dev_group, 'ethernet')
+ self.ixnet.setMultiAttribute(obj, '-name', name)
+ self.ixnet.commit()
+ return obj
+
+ def _create_vlans(self, ethernet, count):
+ self.ixnet.setMultiAttribute(ethernet, '-useVlans', 'true')
+ self.ixnet.setMultiAttribute(ethernet, '-vlanCount', count)
+ self.ixnet.commit()
+
+ def _configure_vlans(self, ethernet, vlans):
+ vlans_obj = self.ixnet.getList(ethernet, 'vlan')
+ for i, vlan_obj in enumerate(vlans_obj):
+ if vlans[i].vlan_id_step is not None:
+ vlan_id_obj = self.ixnet.getAttribute(vlan_obj, '-vlanId')
+ self.ixnet.setMultiAttribute(vlan_id_obj, '-clearOverlays',
+ 'true', '-pattern', 'counter')
+ vlan_id_counter = self.ixnet.add(vlan_id_obj, 'counter')
+ self.ixnet.setMultiAttribute(vlan_id_counter, '-start',
+ vlans[i].vlan_id, '-step',
+ vlans[i].vlan_id_step,
+ '-direction',
+ vlans[i].vlan_id_direction)
+ else:
+ vlan_id_obj = self.ixnet.getAttribute(vlan_obj, '-vlanId')
+ self.ixnet.setMultiAttribute(vlan_id_obj + '/singleValue',
+ '-value', vlans[i].vlan_id)
+
+ if vlans[i].prio_step is not None:
+ prio_obj = self.ixnet.getAttribute(vlan_obj, '-priority')
+ self.ixnet.setMultiAttribute(prio_obj, '-clearOverlays', 'true',
+ '-pattern', 'counter')
+ prio_counter = self.ixnet.add(prio_obj, 'counter')
+ self.ixnet.setMultiAttribute(prio_counter,
+ '-start', vlans[i].prio,
+ '-step', vlans[i].prio_step,
+ '-direction', vlans[i].prio_direction)
+ elif vlans[i].prio is not None:
+ prio_obj = self.ixnet.getAttribute(vlan_obj, '-priority')
+ self.ixnet.setMultiAttribute(prio_obj + '/singleValue',
+ '-value', vlans[i].prio)
+
+ if vlans[i].tp_id is not None:
+ tp_id_obj = self.ixnet.getAttribute(vlan_obj, '-tpid')
+ self.ixnet.setMultiAttribute(tp_id_obj + '/singleValue',
+ '-value', vlans[i].tp_id)
+
+ self.ixnet.commit()
+
+ def add_vlans(self, ethernet, vlans):
+ log.debug("add_vlans: ethernet='%s'", ethernet)
+
+ if vlans is None or len(vlans) == 0:
+ raise RuntimeError(
+ "Invalid 'vlans' argument. Expected list of Vlan instances.")
+
+ self._create_vlans(ethernet, len(vlans))
+ self._configure_vlans(ethernet, vlans)
+
+ def add_ipv4(self, ethernet, name='',
+ addr=None, addr_step=None, addr_direction='increment',
+ prefix=None, prefix_step=None, prefix_direction='increment',
+ gateway=None, gw_step=None, gw_direction='increment'):
+ log.debug("add_ipv4: ethernet='%s' name='%s'", ethernet, name)
+ obj = self.ixnet.add(ethernet, 'ipv4')
+ if name != '':
+ self.ixnet.setAttribute(obj, '-name', name)
+ self.ixnet.commit()
+
+ if addr_step is not None:
+ # handle counter pattern
+ _address = self.ixnet.getAttribute(obj, '-address')
+ self.ixnet.setMultiAttribute(_address, '-clearOverlays', 'true',
+ '-pattern', 'counter')
+
+ address_counter = self.ixnet.add(_address, 'counter')
+ self.ixnet.setMultiAttribute(address_counter,
+ '-start', addr,
+ '-step', addr_step,
+ '-direction', addr_direction)
+ elif addr is not None:
+ # handle single value
+ _address = self.ixnet.getAttribute(obj, '-address')
+ self.ixnet.setMultiAttribute(_address + '/singleValue', '-value',
+ addr)
+
+ if prefix_step is not None:
+ # handle counter pattern
+ _prefix = self.ixnet.getAttribute(obj, '-prefix')
+ self.ixnet.setMultiAttribute(_prefix, '-clearOverlays', 'true',
+ '-pattern', 'counter')
+ prefix_counter = self.ixnet.add(_prefix, 'counter')
+ self.ixnet.setMultiAttribute(prefix_counter,
+ '-start', prefix,
+ '-step', prefix_step,
+ '-direction', prefix_direction)
+ elif prefix is not None:
+ # handle single value
+ _prefix = self.ixnet.getAttribute(obj, '-prefix')
+ self.ixnet.setMultiAttribute(_prefix + '/singleValue', '-value',
+ prefix)
+
+ if gw_step is not None:
+ # handle counter pattern
+ _gateway = self.ixnet.getAttribute(obj, '-gatewayIp')
+ self.ixnet.setMultiAttribute(_gateway, '-clearOverlays', 'true',
+ '-pattern', 'counter')
+
+ gateway_counter = self.ixnet.add(_gateway, 'counter')
+ self.ixnet.setMultiAttribute(gateway_counter,
+ '-start', gateway,
+ '-step', gw_step,
+ '-direction', gw_direction)
+ elif gateway is not None:
+ # handle single value
+ _gateway = self.ixnet.getAttribute(obj, '-gatewayIp')
+ self.ixnet.setMultiAttribute(_gateway + '/singleValue', '-value',
+ gateway)
+
+ self.ixnet.commit()
+ return obj
+
+ def add_pppox_client(self, xproto, auth, user, pwd, enable_redial=True):
+ log.debug(
+ "add_pppox_client: xproto='%s', auth='%s', user='%s', pwd='%s'",
+ xproto, auth, user, pwd)
+ obj = self.ixnet.add(xproto, 'pppoxclient')
+ self.ixnet.commit()
+
+ if auth == 'pap':
+ auth_type = self.ixnet.getAttribute(obj, '-authType')
+ self.ixnet.setMultiAttribute(auth_type + '/singleValue', '-value',
+ auth)
+ pap_user = self.ixnet.getAttribute(obj, '-papUser')
+ self.ixnet.setMultiAttribute(pap_user + '/singleValue', '-value',
+ user)
+ pap_pwd = self.ixnet.getAttribute(obj, '-papPassword')
+ self.ixnet.setMultiAttribute(pap_pwd + '/singleValue', '-value',
+ pwd)
+ else:
+ raise NotImplementedError()
+
+ if enable_redial:
+ redial = self.ixnet.getAttribute(obj, '-enableRedial')
+ self.ixnet.setAttribute(redial + '/singleValue', '-value', 'true')
+
+ self.ixnet.commit()
+ return obj
+
+ def add_bgp(self, ipv4, dut_ip, local_as, bgp_type=None):
+ """Add BGP protocol"""
+ log.debug("add_bgp: ipv4='%s', dut_ip='%s', local_as='%s'", ipv4,
+ dut_ip, local_as)
+ obj = self.ixnet.add(ipv4, 'bgpIpv4Peer')
+ self.ixnet.commit()
+
+ # Set DUT IP address
+ dut_ip_addr = self.ixnet.getAttribute(obj, '-dutIp')
+ self.ixnet.setAttribute(dut_ip_addr + '/singleValue',
+ '-value', dut_ip)
+
+ # Set local AS number
+ local_as_number = self.ixnet.getAttribute(obj, '-localAs2Bytes')
+ self.ixnet.setAttribute(local_as_number + '/singleValue',
+ '-value', local_as)
+
+ if bgp_type:
+ # Set BGP type. If not specified, default value is using.
+ # Default type is "internal"
+ bgp_type_field = self.ixnet.getAttribute(obj, '-type')
+ self.ixnet.setAttribute(bgp_type_field + '/singleValue',
+ '-value', bgp_type)
+ self.ixnet.commit()
+ return obj
+
+ def add_interface(self, vport, ip, mac=None, gateway=None):
+ """Add protocol interface to the vport"""
+ log.debug("add_interface: mac='%s', ip='%s', gateway='%s'", mac, ip,
+ gateway)
+ obj = self.ixnet.add(vport, 'interface')
+ self.ixnet.commit()
+
+ if mac is not None:
+ self.ixnet.setMultiAttribute(obj + '/ethernet', '-macAddress', mac)
+
+ ipv4 = self.ixnet.add(obj, 'ipv4')
+ self.ixnet.setMultiAttribute(ipv4, '-ip', ip)
+
+ if gateway is not None:
+ self.ixnet.setMultiAttribute(ipv4, '-gateway', gateway)
+
+ self.ixnet.commit()
+
+ self.ixnet.setMultiAttribute(obj, '-enabled', 'true')
+ self.ixnet.commit()
+
+ return obj
+
+ def add_static_ipv4(self, iface, vport, start_ip, count, mask='24'):
+ """Add static IP range to the interface"""
+ log.debug("add_static_ipv4: start_ip:'%s', count:'%s'",
+ start_ip, count)
+ obj = self.ixnet.add(vport + '/protocols/static', 'ip')
+
+ self.ixnet.setMultiAttribute(obj, '-protocolInterface', iface,
+ '-ipStart', start_ip, '-count', count,
+ '-mask', mask, '-enabled', 'true')
+ self.ixnet.commit()
diff --git a/yardstick/network_services/nfvi/resource.py b/yardstick/network_services/nfvi/resource.py
index adf4d8ae6..ba49ab5b4 100644
--- a/yardstick/network_services/nfvi/resource.py
+++ b/yardstick/network_services/nfvi/resource.py
@@ -13,27 +13,25 @@
# limitations under the License.
""" Resource collection definitions """
-from __future__ import absolute_import
-from __future__ import print_function
-
-import logging
-from itertools import chain
-
import errno
-import jinja2
+from itertools import chain
+import logging
+import multiprocessing
import os
import os.path
import re
-import multiprocessing
-import pkg_resources
+import jinja2
+import pkg_resources
from oslo_config import cfg
from oslo_utils.encodeutils import safe_decode
from yardstick import ssh
+from yardstick.common.exceptions import ResourceCommandError
from yardstick.common.task_template import finalize_for_yaml
from yardstick.common.utils import validate_non_string_sequence
from yardstick.network_services.nfvi.collectd import AmqpConsumer
+from yardstick.benchmark.contexts import heat
LOG = logging.getLogger(__name__)
@@ -50,12 +48,14 @@ class ResourceProfile(object):
This profile adds a resource at the beginning of the test session
"""
COLLECTD_CONF = "collectd.conf"
+ BAR_COLLECTD_CONF_PATH = "/opt/collectd/etc/collectd.conf.d/"
AMPQ_PORT = 5672
DEFAULT_INTERVAL = 25
DEFAULT_TIMEOUT = 3600
OVS_SOCKET_PATH = "/usr/local/var/run/openvswitch/db.sock"
- def __init__(self, mgmt, port_names=None, plugins=None, interval=None, timeout=None):
+ def __init__(self, mgmt, port_names=None, plugins=None,
+ interval=None, timeout=None, reset_mq_flag=True):
if plugins is None:
self.plugins = {}
@@ -80,6 +80,7 @@ class ResourceProfile(object):
# we need to save mgmt so we can connect to port 5672
self.mgmt = mgmt
self.connection = ssh.AutoConnectSSH.from_node(mgmt)
+ self._reset_mq_flag = reset_mq_flag
@classmethod
def make_from_node(cls, node, timeout):
@@ -90,9 +91,12 @@ class ResourceProfile(object):
plugins = collectd_options.get("plugins", {})
interval = collectd_options.get("interval")
- return cls(node, plugins=plugins, interval=interval, timeout=timeout)
+ reset_mq_flag = (False if node.get("ctx_type") == heat.HeatContext.__context_type__
+ else True)
+ return cls(node, plugins=plugins, interval=interval,
+ timeout=timeout, reset_mq_flag=reset_mq_flag)
- def check_if_sa_running(self, process):
+ def check_if_system_agent_running(self, process):
""" verify if system agent is running """
try:
err, pid, _ = self.connection.execute("pgrep -f %s" % process)
@@ -101,7 +105,7 @@ class ResourceProfile(object):
except OSError as e:
if e.errno in {errno.ECONNRESET}:
# if we can't connect to check, then we won't be able to connect to stop it
- LOG.exception("can't connect to host to check collectd status")
+ LOG.exception("Can't connect to host to check %s status", process)
return 1, None
raise
@@ -213,11 +217,14 @@ class ResourceProfile(object):
if not self.enable:
return {}
+ if self.check_if_system_agent_running("collectd")[0] != 0:
+ return {}
+
metric = {}
while not self._queue.empty():
metric.update(self._queue.get())
- msg = self.parse_collectd_result(metric)
- return msg
+
+ return self.parse_collectd_result(metric)
def _provide_config_file(self, config_file_path, nfvi_cfg, template_kwargs):
template = pkg_resources.resource_string("yardstick.network_services.nfvi",
@@ -242,6 +249,8 @@ class ResourceProfile(object):
"plugins": self.plugins,
}
self._provide_config_file(config_file_path, self.COLLECTD_CONF, kwargs)
+ self._provide_config_file(self.BAR_COLLECTD_CONF_PATH,
+ self.COLLECTD_CONF, kwargs)
def _setup_ovs_stats(self, connection):
try:
@@ -253,59 +262,93 @@ class ResourceProfile(object):
if status != 0:
LOG.error("cannot find OVS socket %s", socket_path)
+ def _reset_rabbitmq(self, connection):
+ # Reset amqp queue
+ LOG.debug("reset and setup amqp to collect data from collectd")
+ # ensure collectd.conf.d exists to avoid error/warning
+ cmd_list = ["sudo mkdir -p /etc/collectd/collectd.conf.d",
+ "sudo service rabbitmq-server restart",
+ "sudo rabbitmqctl stop_app",
+ "sudo rabbitmqctl reset",
+ "sudo rabbitmqctl start_app",
+ "sudo rabbitmqctl add_user admin admin",
+ "sudo rabbitmqctl authenticate_user admin admin",
+ "sudo rabbitmqctl set_permissions -p / admin '.*' '.*' '.*'"
+ ]
+
+ for cmd in cmd_list:
+ exit_status, _, stderr = connection.execute(cmd)
+ if exit_status != 0:
+ raise ResourceCommandError(command=cmd, stderr=stderr)
+
+ def _check_rabbitmq_user(self, connection, user='admin'):
+ exit_status, stdout, _ = connection.execute("sudo rabbitmqctl list_users")
+ if exit_status == 0:
+ for line in stdout.split('\n')[1:]:
+ if line.split('\t')[0] == user:
+ return True
+
+ def _set_rabbitmq_admin_user(self, connection):
+ LOG.debug("add admin user to amqp")
+ cmd_list = ["sudo rabbitmqctl add_user admin admin",
+ "sudo rabbitmqctl authenticate_user admin admin",
+ "sudo rabbitmqctl set_permissions -p / admin '.*' '.*' '.*'"
+ ]
+
+ for cmd in cmd_list:
+ exit_status, stdout, stderr = connection.execute(cmd)
+ if exit_status != 0:
+ raise ResourceCommandError(command=cmd, stdout=stdout, stderr=stderr)
+
+ def _start_rabbitmq(self, connection):
+ if self._reset_mq_flag:
+ self._reset_rabbitmq(connection)
+ else:
+ if not self._check_rabbitmq_user(connection):
+ self._set_rabbitmq_admin_user(connection)
+
+ # check stdout for "sudo rabbitmqctl status" command
+ cmd = "sudo rabbitmqctl status"
+ _, stdout, stderr = connection.execute(cmd)
+ if not re.search("RabbitMQ", stdout):
+ LOG.error("rabbitmqctl status don't have RabbitMQ in running apps")
+ raise ResourceCommandError(command=cmd, stderr=stderr)
+
def _start_collectd(self, connection, bin_path):
LOG.debug("Starting collectd to collect NFVi stats")
- connection.execute('sudo pkill -x -9 collectd')
collectd_path = os.path.join(bin_path, "collectd", "sbin", "collectd")
config_file_path = os.path.join(bin_path, "collectd", "etc")
- exit_status = connection.execute("which %s > /dev/null 2>&1" % collectd_path)[0]
+ self._prepare_collectd_conf(config_file_path)
+
+ connection.execute('sudo pkill -x -9 collectd')
+ cmd = "which %s > /dev/null 2>&1" % collectd_path
+ exit_status, _, stderr = connection.execute(cmd)
if exit_status != 0:
- LOG.warning("%s is not present disabling", collectd_path)
- # disable auto-provisioning because it requires Internet access
- # collectd_installer = os.path.join(bin_path, "collectd.sh")
- # provision_tool(connection, collectd)
- # http_proxy = os.environ.get('http_proxy', '')
- # https_proxy = os.environ.get('https_proxy', '')
- # connection.execute("sudo %s '%s' '%s'" % (
- # collectd_installer, http_proxy, https_proxy))
- return
+ raise ResourceCommandError(command=cmd, stderr=stderr)
+
if "ovs_stats" in self.plugins:
self._setup_ovs_stats(connection)
LOG.debug("Starting collectd to collect NFVi stats")
- # ensure collectd.conf.d exists to avoid error/warning
- connection.execute("sudo mkdir -p /etc/collectd/collectd.conf.d")
- self._prepare_collectd_conf(config_file_path)
-
- # Reset amqp queue
- LOG.debug("reset and setup amqp to collect data from collectd")
- connection.execute("sudo rm -rf /var/lib/rabbitmq/mnesia/rabbit*")
- connection.execute("sudo service rabbitmq-server start")
- connection.execute("sudo rabbitmqctl stop_app")
- connection.execute("sudo rabbitmqctl reset")
- connection.execute("sudo rabbitmqctl start_app")
- connection.execute("sudo service rabbitmq-server restart")
-
- LOG.debug("Creating admin user for rabbitmq in order to collect data from collectd")
- connection.execute("sudo rabbitmqctl delete_user guest")
- connection.execute("sudo rabbitmqctl add_user admin admin")
- connection.execute("sudo rabbitmqctl authenticate_user admin admin")
- connection.execute("sudo rabbitmqctl set_permissions -p / admin '.*' '.*' '.*'")
-
LOG.debug("Start collectd service..... %s second timeout", self.timeout)
# intel_pmu plug requires large numbers of files open, so try to set
# ulimit -n to a large value
- connection.execute("sudo bash -c 'ulimit -n 1000000 ; %s'" % collectd_path,
- timeout=self.timeout)
+
+ cmd = "sudo bash -c 'ulimit -n 1000000 ; %s'" % collectd_path
+ exit_status, _, stderr = connection.execute(cmd, timeout=self.timeout)
+ if exit_status != 0:
+ raise ResourceCommandError(command=cmd, stderr=stderr)
+
LOG.debug("Done")
def initiate_systemagent(self, bin_path):
""" Start system agent for NFVi collection on host """
if self.enable:
try:
+ self._start_rabbitmq(self.connection)
self._start_collectd(self.connection, bin_path)
- except Exception:
- LOG.exception("Exception during collectd start")
+ except ResourceCommandError as e:
+ LOG.exception("Exception during collectd and rabbitmq start: %s", str(e))
raise
def start(self):
@@ -327,7 +370,7 @@ class ResourceProfile(object):
self.amqp_client.terminate()
LOG.debug("Check if %s is running", agent)
- status, pid = self.check_if_sa_running(agent)
+ status, pid = self.check_if_system_agent_running(agent)
LOG.debug("status %s pid %s", status, pid)
if status != 0:
return
@@ -335,5 +378,7 @@ class ResourceProfile(object):
if pid:
self.connection.execute('sudo kill -9 "%s"' % pid)
self.connection.execute('sudo pkill -9 "%s"' % agent)
- self.connection.execute('sudo service rabbitmq-server stop')
- self.connection.execute("sudo rabbitmqctl stop_app")
+
+ if self._reset_mq_flag:
+ self.connection.execute('sudo service rabbitmq-server stop')
+ self.connection.execute("sudo rabbitmqctl stop_app")
diff --git a/yardstick/network_services/pipeline.py b/yardstick/network_services/pipeline.py
index d781ba0cd..4fbe7967f 100644
--- a/yardstick/network_services/pipeline.py
+++ b/yardstick/network_services/pipeline.py
@@ -18,9 +18,11 @@ import itertools
from six.moves import zip
+from yardstick.common import utils
+
FIREWALL_ADD_DEFAULT = "p {0} firewall add default 1"
FIREWALL_ADD_PRIO = """\
-p {0} firewall add priority 1 ipv4 {1} 24 0.0.0.0 0 0 65535 0 65535 6 0xFF port 0"""
+p {0} firewall add priority 1 ipv4 {1} 24 0.0.0.0 0 0 65535 0 65535 17 0xFF port 0"""
FLOW_ADD_QINQ_RULES = """\
p {0} flow add qinq 128 512 port 0 id 1
@@ -59,8 +61,7 @@ class PipelineRules(object):
self.add_rule(FIREWALL_ADD_PRIO, ip)
def add_firewall_script(self, ip):
- ip_addr = ip.split('.')
- assert len(ip_addr) == 4
+ ip_addr = str(utils.make_ipv4_address(ip)).split('.')
ip_addr[-1] = '0'
for i in range(256):
ip_addr[-2] = str(i)
@@ -87,8 +88,7 @@ class PipelineRules(object):
self.add_rule(ROUTE_ADD_ETHER_MPLS, ip, mac_addr, index)
def add_route_script(self, ip, mac_addr):
- ip_addr = ip.split('.')
- assert len(ip_addr) == 4
+ ip_addr = str(utils.make_ipv4_address(ip)).split('.')
ip_addr[-1] = '0'
for index in range(0, 256, 8):
ip_addr[-2] = str(index)
@@ -101,8 +101,7 @@ class PipelineRules(object):
self.add_rule(ROUTE_ADD_ETHER_QINQ, ip, mask, mac_addr, index)
def add_route_script2(self, ip, mac_addr):
- ip_addr = ip.split('.')
- assert len(ip_addr) == 4
+ ip_addr = str(utils.make_ipv4_address(ip)).split('.')
ip_addr[-1] = '0'
mask = 24
for i in range(0, 256):
diff --git a/yardstick/network_services/traffic_profile/__init__.py b/yardstick/network_services/traffic_profile/__init__.py
index e69de29bb..85b3d54a0 100644
--- a/yardstick/network_services/traffic_profile/__init__.py
+++ b/yardstick/network_services/traffic_profile/__init__.py
@@ -0,0 +1,38 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import importlib
+
+
+def register_modules():
+ modules = [
+ 'yardstick.network_services.traffic_profile.trex_traffic_profile',
+ 'yardstick.network_services.traffic_profile.fixed',
+ 'yardstick.network_services.traffic_profile.http',
+ 'yardstick.network_services.traffic_profile.http_ixload',
+ 'yardstick.network_services.traffic_profile.ixia_rfc2544',
+ 'yardstick.network_services.traffic_profile.prox_ACL',
+ 'yardstick.network_services.traffic_profile.prox_irq',
+ 'yardstick.network_services.traffic_profile.prox_binsearch',
+ 'yardstick.network_services.traffic_profile.prox_profile',
+ 'yardstick.network_services.traffic_profile.prox_ramp',
+ 'yardstick.network_services.traffic_profile.rfc2544',
+ 'yardstick.network_services.traffic_profile.pktgen',
+ 'yardstick.network_services.traffic_profile.landslide_profile',
+ 'yardstick.network_services.traffic_profile.vpp_rfc2544',
+ 'yardstick.network_services.traffic_profile.sip',
+ ]
+
+ for module in modules:
+ importlib.import_module(module)
diff --git a/yardstick/network_services/traffic_profile/base.py b/yardstick/network_services/traffic_profile/base.py
index ad256b444..2fdf6ce4a 100644
--- a/yardstick/network_services/traffic_profile/base.py
+++ b/yardstick/network_services/traffic_profile/base.py
@@ -11,10 +11,61 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-""" Base class for the generic traffic profile implementation """
-from __future__ import absolute_import
-from yardstick.common.utils import import_modules_from_package, itersubclasses
+import re
+
+from yardstick.common import exceptions
+from yardstick.common import utils
+
+
+class TrafficProfileConfig(object):
+ """Class to contain the TrafficProfile class information
+
+ This object will parse and validate the traffic profile information.
+ """
+ DEFAULT_SCHEMA = 'nsb:traffic_profile:0.1'
+ DEFAULT_FRAME_RATE = '100'
+ DEFAULT_DURATION = 30
+ RATE_FPS = 'fps'
+ RATE_PERCENTAGE = '%'
+ RATE_REGEX = re.compile(r'([0-9]*\.[0-9]+|[0-9]+)\s*(fps|%)*(.*)')
+
+ def __init__(self, tp_config):
+ self.schema = tp_config.get('schema', self.DEFAULT_SCHEMA)
+ self.name = tp_config.get('name')
+ self.description = tp_config.get('description')
+ tprofile = tp_config['traffic_profile']
+ self.traffic_type = tprofile.get('traffic_type')
+ self.frame_rate, self.rate_unit = self.parse_rate(
+ tprofile.get('frame_rate', self.DEFAULT_FRAME_RATE))
+ self.test_precision = tprofile.get('test_precision')
+ self.packet_sizes = tprofile.get('packet_sizes')
+ self.duration = tprofile.get('duration', self.DEFAULT_DURATION)
+ self.lower_bound = tprofile.get('lower_bound')
+ self.upper_bound = tprofile.get('upper_bound')
+ self.step_interval = tprofile.get('step_interval')
+ self.enable_latency = tprofile.get('enable_latency', False)
+
+ def parse_rate(self, rate):
+ """Parse traffic profile rate
+
+ The line rate can be defined in fps or percentage over the maximum line
+ rate:
+ - frame_rate = 5000 (by default, unit is 'fps')
+ - frame_rate = 5000fps
+ - frame_rate = 25%
+
+ :param rate: (string, int) line rate in fps or %
+ :return: (tuple: int, string) line rate number and unit
+ """
+ match = self.RATE_REGEX.match(str(rate))
+ if not match:
+ exceptions.TrafficProfileRate()
+ rate = float(match.group(1))
+ unit = match.group(2) if match.group(2) else self.RATE_FPS
+ if match.group(3):
+ raise exceptions.TrafficProfileRate()
+ return rate, unit
class TrafficProfile(object):
@@ -33,20 +84,23 @@ class TrafficProfile(object):
:return:
"""
profile_class = tp_config["traffic_profile"]["traffic_type"]
- import_modules_from_package(
- "yardstick.network_services.traffic_profile")
try:
- return next(c for c in itersubclasses(TrafficProfile)
+ return next(c for c in utils.itersubclasses(TrafficProfile)
if c.__name__ == profile_class)(tp_config)
except StopIteration:
- raise RuntimeError("No implementation for %s", profile_class)
+ raise exceptions.TrafficProfileNotImplemented(
+ profile_class=profile_class)
def __init__(self, tp_config):
# e.g. RFC2544 start_ip, stop_ip, drop_rate,
# IMIX = {"10K": 0.1, "100M": 0.5}
self.params = tp_config
+ self.config = TrafficProfileConfig(tp_config)
+
+ def is_ended(self):
+ return False
- def execute_traffic(self, traffic_generator):
+ def execute_traffic(self, traffic_generator, **kawrgs):
""" This methods defines the behavior of the traffic generator.
It will be called in a loop until the traffic generator exits.
diff --git a/yardstick/network_services/traffic_profile/http.py b/yardstick/network_services/traffic_profile/http.py
index 2d00fb849..31ab17ef7 100644
--- a/yardstick/network_services/traffic_profile/http.py
+++ b/yardstick/network_services/traffic_profile/http.py
@@ -24,6 +24,10 @@ class TrafficProfileGenericHTTP(TrafficProfile):
def __init__(self, TrafficProfile):
super(TrafficProfileGenericHTTP, self).__init__(TrafficProfile)
+ def get_links_param(self):
+ return {k: v for k, v in self.params.items() if
+ "uplink" in k or "downlink" in k}
+
def execute(self, traffic_generator):
''' send run traffic for a selected traffic generator'''
pass
diff --git a/yardstick/network_services/traffic_profile/http_ixload.py b/yardstick/network_services/traffic_profile/http_ixload.py
index 348056551..ec0762500 100644
--- a/yardstick/network_services/traffic_profile/http_ixload.py
+++ b/yardstick/network_services/traffic_profile/http_ixload.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2016-2017 Intel Corporation
+# Copyright (c) 2016-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,9 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import absolute_import
-from __future__ import print_function
-
import sys
import os
import logging
@@ -38,6 +35,10 @@ class ErrorClass(object):
raise AttributeError
+class InvalidRxfFile(Exception):
+ message = 'Loaded rxf file has unexpected format'
+
+
try:
from IxLoad import IxLoad, StatCollectorUtils
except ImportError:
@@ -93,7 +94,7 @@ def validate_non_string_sequence(value, default=None, raise_exc=None):
if isinstance(value, collections.Sequence) and not isinstance(value, str):
return value
if raise_exc:
- raise raise_exc
+ raise raise_exc # pylint: disable=raising-bad-type
return default
@@ -117,8 +118,10 @@ class IXLOADHttpTest(object):
self.chassis = None
self.card = None
self.ports_to_reassign = None
+ self.links_param = None
self.test_input = jsonutils.loads(test_input)
self.parse_run_test()
+ self.test = None
@staticmethod
def format_ports_for_reassignment(ports):
@@ -182,6 +185,145 @@ class IXLOADHttpTest(object):
LOG.error('Error: IxLoad config file not found: %s', config_file)
raise
+ def update_network_address(self, net_traffic, address, gateway, prefix):
+ """Update ip address and gateway for net_traffic object
+
+ This function update field which configure source addresses for
+ traffic which is described by net_traffic object.
+ Do not return anything
+
+ :param net_traffic: (IxLoadObjectProxy) proxy obj to tcl net_traffic object
+ :param address: (str) Ipv4 range start address
+ :param gateway: (str) Ipv4 address of gateway
+ :param prefix: (int) subnet prefix
+ :return:
+ """
+ try:
+ ethernet = net_traffic.network.getL1Plugin()
+ ix_net_l2_ethernet_plugin = ethernet.childrenList[0]
+ ix_net_ip_v4_v6_plugin = ix_net_l2_ethernet_plugin.childrenList[0]
+ ix_net_ip_v4_v6_range = ix_net_ip_v4_v6_plugin.rangeList[0]
+
+ ix_net_ip_v4_v6_range.config(
+ prefix=prefix,
+ ipAddress=address,
+ gatewayAddress=gateway)
+ except Exception:
+ raise InvalidRxfFile
+
+ def update_network_mac_address(self, net_traffic, mac):
+ """Update MACaddress for net_traffic object
+
+ This function update field which configure MACaddresses for
+ traffic which is described by net_traffic object.
+ If mac == "auto" then will be configured auto generated mac
+ Do not return anything.
+
+ :param net_traffic: (IxLoadObjectProxy) proxy obj to tcl net_traffic object
+ :param mac: (str) MAC
+ :return:
+ """
+ try:
+ ethernet = net_traffic.network.getL1Plugin()
+ ix_net_l2_ethernet_plugin = ethernet.childrenList[0]
+ ix_net_ip_v4_v6_plugin = ix_net_l2_ethernet_plugin.childrenList[0]
+ ix_net_ip_v4_v6_range = ix_net_ip_v4_v6_plugin.rangeList[0]
+
+ if str(mac).lower() == "auto":
+ ix_net_ip_v4_v6_range.config(autoMacGeneration=True)
+ else:
+ ix_net_ip_v4_v6_range.config(autoMacGeneration=False)
+ mac_range = ix_net_ip_v4_v6_range.getLowerRelatedRange(
+ "MacRange")
+ mac_range.config(mac=mac)
+ except Exception:
+ raise InvalidRxfFile
+
+ def update_network_param(self, net_traffic, param):
+ """Update net_traffic by parameters specified in param"""
+
+ self.update_network_address(net_traffic, param["address"],
+ param["gateway"], param["subnet_prefix"])
+
+ self.update_network_mac_address(net_traffic, param["mac"])
+
+ def update_config(self):
+ """Update some fields by parameters from traffic profile"""
+
+ net_traffics = {}
+ # self.test.communityList is a IxLoadObjectProxy to some tcl object
+ # which contain all net_traffic objects in scenario.
+ # net_traffic item has a name in format "activity_name@item_name"
+ try:
+ for item in self.test.communityList:
+ net_traffics[item.name.split('@')[1]] = item
+ except Exception: # pylint: disable=broad-except
+ pass
+
+ for name, net_traffic in net_traffics.items():
+ try:
+ param = self.links_param[name]
+ except KeyError:
+ LOG.debug('There is no param for net_traffic %s', name)
+ continue
+
+ self.update_network_param(net_traffic, param["ip"])
+ if "uplink" in name:
+ self.update_http_client_param(net_traffic, param["http_client"])
+
+ def update_http_client_param(self, net_traffic, param):
+ """Update http client object in net_traffic
+
+ Update http client object in net_traffic by parameters
+ specified in param.
+ Do not return anything.
+
+ :param net_traffic: (IxLoadObjectProxy) proxy obj to tcl net_traffic object
+ :param param: (dict) http_client section from traffic profile
+ :return:
+ """
+ page = param.get("page_object")
+ if page:
+ self.update_page_size(net_traffic, page)
+ users = param.get("simulated_users")
+ if users:
+ self.update_user_count(net_traffic, users)
+
+ def update_page_size(self, net_traffic, page_object):
+ """Update page_object field in http client object in net_traffic
+
+ This function update field which configure page_object
+ which will be loaded from server
+ Do not return anything.
+
+ :param net_traffic: (IxLoadObjectProxy) proxy obj to tcl net_traffic object
+ :param page_object: (str) path to object on server e.g. "/4k.html"
+ :return:
+ """
+ try:
+ activity = net_traffic.activityList[0]
+ ix_http_command = activity.agent.actionList[0]
+ ix_http_command.config(pageObject=page_object)
+ except Exception:
+ raise InvalidRxfFile
+
+ def update_user_count(self, net_traffic, user_count):
+ """Update userObjectiveValue field in activity object in net_traffic
+
+ This function update field which configure users count
+ which will be simulated by client.
+ Do not return anything.
+
+ :param net_traffic: (IxLoadObjectProxy) proxy obj to tcl net_traffic object
+ :param user_count: (int) number of simulated users
+ :return:
+ """
+ try:
+ activity = net_traffic.activityList[0]
+ activity.config(userObjectiveValue=user_count)
+ except Exception:
+ raise InvalidRxfFile
+
def start_http_test(self):
self.ix_load = IxLoad()
@@ -208,17 +350,19 @@ class IXLOADHttpTest(object):
# Get the first test on the testList
test_name = repository.testList[0].cget("name")
- test = repository.testList.getItem(test_name)
+ self.test = repository.testList.getItem(test_name)
self.set_results_dir(test_controller, self.results_on_windows)
- test.config(statsRequired=1, enableResetPorts=1, csvInterval=2,
- enableForceOwnership=True)
+ self.test.config(statsRequired=1, enableResetPorts=1, csvInterval=2,
+ enableForceOwnership=True)
+
+ self.update_config()
# ---- Remap ports ----
try:
- self.reassign_ports(test, repository, self.ports_to_reassign)
- except Exception:
+ self.reassign_ports(self.test, repository, self.ports_to_reassign)
+ except Exception: # pylint: disable=broad-except
LOG.exception("Exception occurred during reassign_ports")
# -----------------------------------------------------------------------
@@ -257,7 +401,7 @@ class IXLOADHttpTest(object):
self.stat_utils.StartCollector(self.IxL_StatCollectorCommand)
- test_controller.run(test)
+ test_controller.run(self.test)
self.ix_load.waitForTestFinish()
test_controller.releaseConfigWaitFinish()
@@ -269,7 +413,7 @@ class IXLOADHttpTest(object):
test_controller.generateReport(detailedReport=1, format="PDF;HTML")
test_controller.releaseConfigWaitFinish()
- self.ix_load.delete(test)
+ self.ix_load.delete(self.test)
self.ix_load.delete(test_controller)
self.ix_load.delete(logger)
self.ix_load.delete(log_engine)
@@ -307,6 +451,9 @@ class IXLOADHttpTest(object):
LOG.debug("Ports to be reassigned: %s", self.ports_to_reassign)
+ self.links_param = self.test_input["links_param"]
+ LOG.debug("Links param to be applied: %s", self.links_param)
+
def main(args):
# Get the args from cmdline and parse and run the test
diff --git a/yardstick/network_services/traffic_profile/ixia_rfc2544.py b/yardstick/network_services/traffic_profile/ixia_rfc2544.py
index 7881131a7..ca45b500d 100644
--- a/yardstick/network_services/traffic_profile/ixia_rfc2544.py
+++ b/yardstick/network_services/traffic_profile/ixia_rfc2544.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2016-2017 Intel Corporation
+# Copyright (c) 2016-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,83 +12,149 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import absolute_import
import logging
+import collections
+
+from yardstick.common import utils
+from yardstick.network_services.traffic_profile import base as tp_base
+from yardstick.network_services.traffic_profile import trex_traffic_profile
-from yardstick.network_services.traffic_profile.traffic_profile import \
- TrexProfile
LOG = logging.getLogger(__name__)
-class IXIARFC2544Profile(TrexProfile):
+class IXIARFC2544Profile(trex_traffic_profile.TrexProfile):
UPLINK = 'uplink'
DOWNLINK = 'downlink'
+ DROP_PERCENT_ROUND = 6
+ STATUS_SUCCESS = "Success"
+ STATUS_FAIL = "Failure"
+
+ def __init__(self, yaml_data):
+ super(IXIARFC2544Profile, self).__init__(yaml_data)
+ self.rate = self.config.frame_rate
+ self.rate_unit = self.config.rate_unit
+ self.iteration = 0
+ self.full_profile = {}
+
+ def _get_ip_and_mask(self, ip_range):
+ _ip_range = ip_range.split('-')
+ if len(_ip_range) == 1:
+ return _ip_range[0], None
+
+ mask = utils.get_mask_from_ip_range(_ip_range[0], _ip_range[1])
+ return _ip_range[0], mask
+
+ def _get_fixed_and_mask(self, port_range):
+ _port_range = str(port_range).split('-')
+ if len(_port_range) == 1:
+ return int(_port_range[0]), 0
- def _get_ixia_traffic_profile(self, profile_data, mac=None, xfile=None, static_traffic=None):
- if mac is None:
- mac = {}
+ return int(_port_range[0]), int(_port_range[1])
+ def _get_ixia_traffic_profile(self, profile_data, mac=None):
+ mac = {} if mac is None else mac
result = {}
for traffickey, values in profile_data.items():
if not traffickey.startswith((self.UPLINK, self.DOWNLINK)):
continue
+ # values should be single-item dict, so just grab the first item
try:
- # values should be single-item dict, so just grab the first item
- try:
- key, value = next(iter(values.items()))
- except StopIteration:
- result[traffickey] = {}
- continue
-
- port_id = value.get('id', 1)
- port_index = port_id - 1
- try:
- ip = value['outer_l3v6']
- except KeyError:
- ip = value['outer_l3v4']
- src_key, dst_key = 'srcip4', 'dstip4'
- else:
- src_key, dst_key = 'srcip6', 'dstip6'
-
- result[traffickey] = {
- 'bidir': False,
- 'iload': '100',
- 'id': port_id,
- 'outer_l2': {
- 'framesize': value['outer_l2']['framesize'],
- 'framesPerSecond': True,
- 'srcmac': mac['src_mac_{}'.format(port_index)],
- 'dstmac': mac['dst_mac_{}'.format(port_index)],
- },
- 'outer_l3': {
- 'count': ip['count'],
- 'dscp': ip['dscp'],
- 'ttl': ip['ttl'],
- src_key: ip[src_key].split("-")[0],
- dst_key: ip[dst_key].split("-")[0],
- 'type': key,
- 'proto': ip['proto'],
- },
- 'outer_l4': value['outer_l4'],
- }
- except Exception:
+ key, value = next(iter(values.items()))
+ except StopIteration:
+ result[traffickey] = {}
continue
+ port_id = value.get('id', 1)
+ port_index = port_id - 1
+
+ result[traffickey] = {
+ 'bidir': False,
+ 'id': port_id,
+ 'rate': self.rate,
+ 'rate_unit': self.rate_unit,
+ 'outer_l2': {},
+ 'outer_l3': {},
+ 'outer_l4': {},
+ }
+
+ frame_rate = value.get('frame_rate')
+ if frame_rate:
+ flow_rate, flow_rate_unit = self.config.parse_rate(frame_rate)
+ result[traffickey]['rate'] = flow_rate
+ result[traffickey]['rate_unit'] = flow_rate_unit
+
+ outer_l2 = value.get('outer_l2')
+ if outer_l2:
+ result[traffickey]['outer_l2'].update({
+ 'framesize': outer_l2.get('framesize'),
+ 'framesPerSecond': True,
+ 'QinQ': outer_l2.get('QinQ'),
+ 'srcmac': mac.get('src_mac_{}'.format(port_index)),
+ 'dstmac': mac.get('dst_mac_{}'.format(port_index)),
+ })
+
+ if value.get('outer_l3v4'):
+ outer_l3 = value['outer_l3v4']
+ src_key, dst_key = 'srcip4', 'dstip4'
+ else:
+ outer_l3 = value.get('outer_l3v6')
+ src_key, dst_key = 'srcip6', 'dstip6'
+ if outer_l3:
+ srcip = srcmask = dstip = dstmask = None
+ if outer_l3.get(src_key):
+ srcip, srcmask = self._get_ip_and_mask(outer_l3[src_key])
+ if outer_l3.get(dst_key):
+ dstip, dstmask = self._get_ip_and_mask(outer_l3[dst_key])
+
+ result[traffickey]['outer_l3'].update({
+ 'count': outer_l3.get('count', 1),
+ 'dscp': outer_l3.get('dscp'),
+ 'ttl': outer_l3.get('ttl'),
+ 'srcseed': outer_l3.get('srcseed', 1),
+ 'dstseed': outer_l3.get('dstseed', 1),
+ 'srcip': srcip,
+ 'dstip': dstip,
+ 'srcmask': srcmask,
+ 'dstmask': dstmask,
+ 'type': key,
+ 'proto': outer_l3.get('proto'),
+ 'priority': outer_l3.get('priority')
+ })
+
+ outer_l4 = value.get('outer_l4')
+ if outer_l4:
+ src_port = src_port_mask = dst_port = dst_port_mask = None
+ if outer_l4.get('srcport'):
+ src_port, src_port_mask = (
+ self._get_fixed_and_mask(outer_l4['srcport']))
+
+ if outer_l4.get('dstport'):
+ dst_port, dst_port_mask = (
+ self._get_fixed_and_mask(outer_l4['dstport']))
+
+ result[traffickey]['outer_l4'].update({
+ 'srcport': src_port,
+ 'dstport': dst_port,
+ 'srcportmask': src_port_mask,
+ 'dstportmask': dst_port_mask,
+ 'count': outer_l4.get('count', 1),
+ 'seed': outer_l4.get('seed', 1),
+ })
+
return result
- def _ixia_traffic_generate(self, traffic_generator, traffic, ixia_obj):
- for key, value in traffic.items():
- if key.startswith((self.UPLINK, self.DOWNLINK)):
- value["iload"] = str(self.rate)
- ixia_obj.ix_update_frame(traffic)
- ixia_obj.ix_update_ether(traffic)
- ixia_obj.add_ip_header(traffic, 4)
- ixia_obj.ix_start_traffic()
- self.tmp_drop = 0
- self.tmp_throughput = 0
+ def _ixia_traffic_generate(self, traffic, ixia_obj, traffic_gen):
+ ixia_obj.update_frame(traffic, self.config.duration)
+ ixia_obj.update_ip_packet(traffic)
+ ixia_obj.update_l4(traffic)
+ self._update_traffic_tracking_options(traffic_gen)
+ ixia_obj.start_traffic()
+
+ def _update_traffic_tracking_options(self, traffic_gen):
+ traffic_gen.update_tracking_options()
def update_traffic_profile(self, traffic_generator):
def port_generator():
@@ -99,86 +165,261 @@ class IXIARFC2544Profile(TrexProfile):
if not profile_data:
continue
self.profile_data = profile_data
- self.get_streams(self.profile_data)
self.full_profile.update({vld_id: self.profile_data})
for intf in intfs:
yield traffic_generator.vnfd_helper.port_num(intf)
self.ports = [port for port in port_generator()]
- def execute_traffic(self, traffic_generator, ixia_obj, mac=None, xfile=None):
- if mac is None:
- mac = {}
+ def execute_traffic(self, traffic_generator, ixia_obj=None, mac=None):
+ mac = {} if mac is None else mac
+ first_run = self.first_run
if self.first_run:
- self.full_profile = {}
+ self.first_run = False
self.pg_id = 0
- self.update_traffic_profile(traffic_generator)
- traffic = \
- self._get_ixia_traffic_profile(self.full_profile, mac, xfile)
self.max_rate = self.rate
- self.min_rate = 0
- self.get_multiplier()
- self._ixia_traffic_generate(traffic_generator, traffic, ixia_obj)
-
- def get_multiplier(self):
- self.rate = round((self.max_rate + self.min_rate) / 2.0, 2)
- multiplier = round(self.rate / self.pps, 2)
- return str(multiplier)
-
- def start_ixia_latency(self, traffic_generator, ixia_obj,
- mac=None, xfile=None):
- if mac is None:
- mac = {}
- self.update_traffic_profile(traffic_generator)
- traffic = \
- self._get_ixia_traffic_profile(self.full_profile, mac, xfile)
- self._ixia_traffic_generate(traffic_generator, traffic, ixia_obj)
-
- def get_drop_percentage(self, traffic_generator, samples, tol_min,
- tolerance, ixia_obj, mac=None, xfile=None):
- if mac is None:
- mac = {}
- status = 'Running'
+ self.min_rate = 0.0
+ else:
+ self.rate = self._get_next_rate()
+
+ self.iteration = traffic_generator.rfc_helper.iteration.value
+ traffic = self._get_ixia_traffic_profile(self.full_profile, mac)
+ self._ixia_traffic_generate(traffic, ixia_obj, traffic_generator)
+ return first_run
+
+ # pylint: disable=unused-argument
+ def get_drop_percentage(self, samples, tol_min, tolerance, precision,
+ resolution, first_run=False, tc_rfc2544_opts=None):
+ completed = False
+ drop_percent = 100.0
+ num_ifaces = len(samples)
+ duration = self.config.duration
+ in_packets_sum = sum(
+ [samples[iface]['InPackets'] for iface in samples])
+ out_packets_sum = sum(
+ [samples[iface]['OutPackets'] for iface in samples])
+ in_bytes_sum = sum(
+ [samples[iface]['InBytes'] for iface in samples])
+ out_bytes_sum = sum(
+ [samples[iface]['OutBytes'] for iface in samples])
+ rx_throughput = round(float(in_packets_sum) / duration, 3)
+ tx_throughput = round(float(out_packets_sum) / duration, 3)
+ # Rx throughput in Bps
+ rx_throughput_bps = round(float(in_bytes_sum) / duration, 3)
+ # Tx throughput in Bps
+ tx_throughput_bps = round(float(out_bytes_sum) / duration, 3)
+ packet_drop = abs(out_packets_sum - in_packets_sum)
+
+ try:
+ drop_percent = round(
+ (packet_drop / float(out_packets_sum)) * 100,
+ self.DROP_PERCENT_ROUND)
+ except ZeroDivisionError:
+ LOG.info('No traffic is flowing')
+
+ if first_run:
+ completed = True if drop_percent <= tolerance else False
+ if (first_run and
+ self.rate_unit == tp_base.TrafficProfileConfig.RATE_FPS):
+ self.rate = float(out_packets_sum) / duration / num_ifaces
+
+ if drop_percent > tolerance:
+ self.max_rate = self.rate
+ elif drop_percent < tol_min:
+ self.min_rate = self.rate
+ else:
+ completed = True
+
+ last_rate = self.rate
+ next_rate = self._get_next_rate()
+ if abs(next_rate - self.rate) < resolution:
+ LOG.debug("rate=%s, next_rate=%s, resolution=%s", self.rate,
+ next_rate, resolution)
+ # stop test if the difference between the rate transmission
+ # in two iterations is smaller than the value of the resolution
+ completed = True
+
+ LOG.debug("tolerance=%s, tolerance_precision=%s drop_percent=%s "
+ "completed=%s", tolerance, precision, drop_percent,
+ completed)
+
+ latency_ns_avg = float(sum(
+ [samples[iface]['LatencyAvg'] for iface in samples])) / num_ifaces
+ latency_ns_min = min([samples[iface]['LatencyMin'] for iface in samples])
+ latency_ns_max = max([samples[iface]['LatencyMax'] for iface in samples])
+
+ samples['Status'] = self.STATUS_FAIL
+ if round(drop_percent, precision) <= tolerance:
+ samples['Status'] = self.STATUS_SUCCESS
+
+ samples['TxThroughput'] = tx_throughput
+ samples['RxThroughput'] = rx_throughput
+ samples['TxThroughputBps'] = tx_throughput_bps
+ samples['RxThroughputBps'] = rx_throughput_bps
+ samples['DropPercentage'] = drop_percent
+ samples['LatencyAvg'] = latency_ns_avg
+ samples['LatencyMin'] = latency_ns_min
+ samples['LatencyMax'] = latency_ns_max
+ samples['Rate'] = last_rate
+ samples['PktSize'] = self._get_framesize()
+ samples['Iteration'] = self.iteration
+
+ return completed, samples
+
+
+class IXIARFC2544PppoeScenarioProfile(IXIARFC2544Profile):
+ """Class handles BNG PPPoE scenario tests traffic profile"""
+
+ def __init__(self, yaml_data):
+ super(IXIARFC2544PppoeScenarioProfile, self).__init__(yaml_data)
+ self.full_profile = collections.OrderedDict()
+
+ def _get_flow_groups_params(self):
+ flows_data = [key for key in self.params.keys()
+ if key.split('_')[0] in [self.UPLINK, self.DOWNLINK]]
+ for i in range(len(flows_data)):
+ uplink = '_'.join([self.UPLINK, str(i)])
+ downlink = '_'.join([self.DOWNLINK, str(i)])
+ if uplink in flows_data:
+ self.full_profile.update({uplink: self.params[uplink]})
+ if downlink in flows_data:
+ self.full_profile.update({downlink: self.params[downlink]})
+
+ def update_traffic_profile(self, traffic_generator):
+
+ networks = collections.OrderedDict()
+
+ # Sort network interfaces pairs
+ for i in range(len(traffic_generator.networks)):
+ uplink = '_'.join([self.UPLINK, str(i)])
+ downlink = '_'.join([self.DOWNLINK, str(i)])
+ if uplink in traffic_generator.networks:
+ networks[uplink] = traffic_generator.networks[uplink]
+ if downlink in traffic_generator.networks:
+ networks[downlink] = traffic_generator.networks[downlink]
+
+ def port_generator():
+ for intfs in networks.values():
+ for intf in intfs:
+ yield traffic_generator.vnfd_helper.port_num(intf)
+
+ self._get_flow_groups_params()
+ self.ports = [port for port in port_generator()]
+
+ def _get_prio_flows_drop_percentage(self, stats):
drop_percent = 100
- in_packets = sum([samples[iface]['in_packets'] for iface in samples])
- out_packets = sum([samples[iface]['out_packets'] for iface in samples])
- rx_throughput = \
- sum([samples[iface]['RxThroughput'] for iface in samples])
- tx_throughput = \
- sum([samples[iface]['TxThroughput'] for iface in samples])
- packet_drop = abs(out_packets - in_packets)
+ for prio_id in stats:
+ prio_flow = stats[prio_id]
+ sum_packet_drop = abs(prio_flow['OutPackets'] - prio_flow['InPackets'])
+ try:
+ drop_percent = round(
+ (sum_packet_drop / float(prio_flow['OutPackets'])) * 100,
+ self.DROP_PERCENT_ROUND)
+ except ZeroDivisionError:
+ LOG.info('No traffic is flowing')
+ prio_flow['DropPercentage'] = drop_percent
+ return stats
+
+ def _get_summary_pppoe_subs_counters(self, samples):
+ result = {}
+ keys = ['SessionsUp',
+ 'SessionsDown',
+ 'SessionsNotStarted',
+ 'SessionsTotal']
+ for key in keys:
+ result[key] = \
+ sum([samples[port][key] for port in samples
+ if key in samples[port]])
+ return result
+
+ def get_drop_percentage(self, samples, tol_min, tolerance, precision,
+ resolution, first_run=False, tc_rfc2544_opts=None):
+ completed = False
+ sum_drop_percent = 100
+ num_ifaces = len(samples)
+ duration = self.config.duration
+ last_rate = self.rate
+ priority_stats = samples.pop('priority_stats')
+ priority_stats = self._get_prio_flows_drop_percentage(priority_stats)
+ summary_subs_stats = self._get_summary_pppoe_subs_counters(samples)
+ in_packets_sum = sum(
+ [samples[iface]['InPackets'] for iface in samples])
+ out_packets_sum = sum(
+ [samples[iface]['OutPackets'] for iface in samples])
+ in_bytes_sum = sum(
+ [samples[iface]['InBytes'] for iface in samples])
+ out_bytes_sum = sum(
+ [samples[iface]['OutBytes'] for iface in samples])
+ rx_throughput = round(float(in_packets_sum) / duration, 3)
+ tx_throughput = round(float(out_packets_sum) / duration, 3)
+ # Rx throughput in Bps
+ rx_throughput_bps = round(float(in_bytes_sum) / duration, 3)
+ # Tx throughput in Bps
+ tx_throughput_bps = round(float(out_bytes_sum) / duration, 3)
+ sum_packet_drop = abs(out_packets_sum - in_packets_sum)
+
try:
- drop_percent = round((packet_drop / float(out_packets)) * 100, 2)
+ sum_drop_percent = round(
+ (sum_packet_drop / float(out_packets_sum)) * 100,
+ self.DROP_PERCENT_ROUND)
except ZeroDivisionError:
LOG.info('No traffic is flowing')
- samples['TxThroughput'] = round(tx_throughput / 1.0, 2)
- samples['RxThroughput'] = round(rx_throughput / 1.0, 2)
- samples['CurrentDropPercentage'] = drop_percent
- samples['Throughput'] = self.tmp_throughput
- samples['DropPercentage'] = self.tmp_drop
- if drop_percent > tolerance and self.tmp_throughput == 0:
- samples['Throughput'] = round(rx_throughput / 1.0, 2)
- samples['DropPercentage'] = drop_percent
- if self.first_run:
- max_supported_rate = out_packets / 30.0
- self.rate = max_supported_rate
- self.first_run = False
- if drop_percent <= tolerance:
- status = 'Completed'
+
+ latency_ns_avg = float(sum(
+ [samples[iface]['LatencyAvg'] for iface in samples])) / num_ifaces
+ latency_ns_min = min([samples[iface]['LatencyMin'] for iface in samples])
+ latency_ns_max = max([samples[iface]['LatencyMax'] for iface in samples])
+
+ samples['TxThroughput'] = tx_throughput
+ samples['RxThroughput'] = rx_throughput
+ samples['TxThroughputBps'] = tx_throughput_bps
+ samples['RxThroughputBps'] = rx_throughput_bps
+ samples['DropPercentage'] = sum_drop_percent
+ samples['LatencyAvg'] = latency_ns_avg
+ samples['LatencyMin'] = latency_ns_min
+ samples['LatencyMax'] = latency_ns_max
+ samples['Priority'] = priority_stats
+ samples['Rate'] = last_rate
+ samples['PktSize'] = self._get_framesize()
+ samples['Iteration'] = self.iteration
+ samples.update(summary_subs_stats)
+
+ if tc_rfc2544_opts:
+ priority = tc_rfc2544_opts.get('priority')
+ if priority:
+ drop_percent = samples['Priority'][priority]['DropPercentage']
+ else:
+ drop_percent = sum_drop_percent
+ else:
+ drop_percent = sum_drop_percent
+
+ if first_run:
+ completed = True if drop_percent <= tolerance else False
+ if (first_run and
+ self.rate_unit == tp_base.TrafficProfileConfig.RATE_FPS):
+ self.rate = float(out_packets_sum) / duration / num_ifaces
+
if drop_percent > tolerance:
self.max_rate = self.rate
elif drop_percent < tol_min:
self.min_rate = self.rate
- if drop_percent >= self.tmp_drop:
- self.tmp_drop = drop_percent
- self.tmp_throughput = round((rx_throughput / 1.0), 2)
- samples['Throughput'] = round(rx_throughput / 1.0, 2)
- samples['DropPercentage'] = drop_percent
else:
- samples['Throughput'] = round(rx_throughput / 1.0, 2)
- samples['DropPercentage'] = drop_percent
- return status, samples
- self.get_multiplier()
- traffic = self._get_ixia_traffic_profile(self.full_profile, mac, xfile)
- self._ixia_traffic_generate(traffic_generator, traffic, ixia_obj)
- return status, samples
+ completed = True
+
+ next_rate = self._get_next_rate()
+ if abs(next_rate - self.rate) < resolution:
+ LOG.debug("rate=%s, next_rate=%s, resolution=%s", self.rate,
+ next_rate, resolution)
+ # stop test if the difference between the rate transmission
+ # in two iterations is smaller than the value of the resolution
+ completed = True
+
+ LOG.debug("tolerance=%s, tolerance_precision=%s drop_percent=%s "
+ "completed=%s", tolerance, precision, drop_percent,
+ completed)
+
+ samples['Status'] = self.STATUS_FAIL
+ if round(drop_percent, precision) <= tolerance:
+ samples['Status'] = self.STATUS_SUCCESS
+
+ return completed, samples
diff --git a/yardstick/network_services/traffic_profile/landslide_profile.py b/yardstick/network_services/traffic_profile/landslide_profile.py
new file mode 100644
index 000000000..f79226fb4
--- /dev/null
+++ b/yardstick/network_services/traffic_profile/landslide_profile.py
@@ -0,0 +1,47 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Spirent Landslide traffic profile definitions """
+
+from yardstick.network_services.traffic_profile import base
+
+
+class LandslideProfile(base.TrafficProfile):
+ """
+ This traffic profile handles attributes of Landslide data stream
+ """
+
+ def __init__(self, tp_config):
+ super(LandslideProfile, self).__init__(tp_config)
+
+ # for backward compatibility support dict and list of dicts
+ if isinstance(tp_config["dmf_config"], dict):
+ self.dmf_config = [tp_config["dmf_config"]]
+ else:
+ self.dmf_config = tp_config["dmf_config"]
+
+ def execute(self, traffic_generator):
+ pass
+
+ def update_dmf(self, options):
+ if 'dmf' in options:
+ if isinstance(options['dmf'], dict):
+ _dmfs = [options['dmf']]
+ else:
+ _dmfs = options['dmf']
+
+ for index, _dmf in enumerate(_dmfs):
+ try:
+ self.dmf_config[index].update(_dmf)
+ except IndexError:
+ pass
diff --git a/yardstick/network_services/traffic_profile/pktgen.py b/yardstick/network_services/traffic_profile/pktgen.py
new file mode 100644
index 000000000..30f81b794
--- /dev/null
+++ b/yardstick/network_services/traffic_profile/pktgen.py
@@ -0,0 +1,61 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from yardstick.common import exceptions
+from yardstick.common import utils
+from yardstick.network_services.traffic_profile import base as tp_base
+
+
+class PktgenTrafficProfile(tp_base.TrafficProfile):
+ """This class handles Pktgen Trex Traffic profile execution"""
+
+ def __init__(self, tp_config): # pragma: no cover
+ super(PktgenTrafficProfile, self).__init__(tp_config)
+ self._host = None
+ self._port = None
+
+ def init(self, host, port): # pragma: no cover
+ """Initialize control parameters
+
+ :param host: (str) ip or host name
+ :param port: (int) TCP socket port number for Lua commands
+ """
+ self._host = host
+ self._port = port
+
+ def start(self):
+ if utils.send_socket_command(self._host, self._port,
+ 'pktgen.start("0")') != 0:
+ raise exceptions.PktgenActionError(action='start')
+
+ def stop(self):
+ if utils.send_socket_command(self._host, self._port,
+ 'pktgen.stop("0")') != 0:
+ raise exceptions.PktgenActionError(action='stop')
+
+ def rate(self, rate):
+ command = 'pktgen.set("0", "rate", ' + str(rate) + ')'
+ if utils.send_socket_command(self._host, self._port, command) != 0:
+ raise exceptions.PktgenActionError(action='rate')
+
+ def clear_all_stats(self):
+ if utils.send_socket_command(self._host, self._port, 'clr') != 0:
+ raise exceptions.PktgenActionError(action='clear all stats')
+
+ def help(self):
+ if utils.send_socket_command(self._host, self._port, 'help') != 0:
+ raise exceptions.PktgenActionError(action='help')
+
+ def execute_traffic(self, *args, **kwargs): # pragma: no cover
+ pass
diff --git a/yardstick/network_services/traffic_profile/prox_binsearch.py b/yardstick/network_services/traffic_profile/prox_binsearch.py
index 1fd6ec41a..402bf741c 100644
--- a/yardstick/network_services/traffic_profile/prox_binsearch.py
+++ b/yardstick/network_services/traffic_profile/prox_binsearch.py
@@ -16,11 +16,23 @@
from __future__ import absolute_import
import logging
+import datetime
+import time
from yardstick.network_services.traffic_profile.prox_profile import ProxProfile
+from yardstick.network_services import constants
+from yardstick.common import constants as overall_constants
LOG = logging.getLogger(__name__)
+STATUS_SUCCESS = "Success"
+STATUS_FAIL = "Failure"
+STATUS_RESULT = "Result"
+STEP_CONFIRM = "Confirm retry"
+STEP_INCREASE_LOWER = "Increase lower"
+STEP_DECREASE_LOWER = "Decrease lower"
+STEP_DECREASE_UPPER = "Decrease upper"
+
class ProxBinSearchProfile(ProxProfile):
"""
@@ -54,6 +66,9 @@ class ProxBinSearchProfile(ProxProfile):
yield test_value
test_value = self.mid_point
+ def is_ended(self):
+ return self.done.is_set()
+
def run_test_with_pkt_size(self, traffic_gen, pkt_size, duration):
"""Run the test for a single packet size.
@@ -81,19 +96,111 @@ class ProxBinSearchProfile(ProxProfile):
# success, the binary search will complete on an integer multiple
# of the precision, rather than on a fraction of it.
+ theor_max_thruput = 0.0
+
+ result_samples = {}
+
+ test_data = {
+ "test_duration": traffic_gen.scenario_helper.scenario_cfg["runner"]["duration"],
+ "test_precision": self.params["traffic_profile"]["test_precision"],
+ "tolerated_loss": self.params["traffic_profile"]["tolerated_loss"],
+ "duration": duration
+ }
+ self.prev_time = time.time()
+
# throughput and packet loss from the most recent successful test
successful_pkt_loss = 0.0
- for test_value in self.bounds_iterator(LOG):
- result, port_samples = self._profile_helper.run_test(pkt_size, duration,
- test_value, self.tolerated_loss)
-
- if result.success:
- LOG.debug("Success! Increasing lower bound")
- self.current_lower = test_value
- successful_pkt_loss = result.pkt_loss
- else:
- LOG.debug("Failure... Decreasing upper bound")
- self.current_upper = test_value
-
- samples = result.get_samples(pkt_size, successful_pkt_loss, port_samples)
- self.queue.put(samples)
+ line_speed = traffic_gen.scenario_helper.all_options.get(
+ "interface_speed_gbps", constants.NIC_GBPS_DEFAULT) * constants.ONE_GIGABIT_IN_BITS
+
+ ok_retry = traffic_gen.scenario_helper.scenario_cfg["runner"].get("confirmation", 0)
+ for step_id, test_value in enumerate(self.bounds_iterator(LOG)):
+ pos_retry = 0
+ neg_retry = 0
+ total_retry = 0
+
+ LOG.info("Checking MAX %s MIN %s TEST %s", self.current_upper,
+ self.lower_bound, test_value)
+
+ while (pos_retry <= ok_retry) and (neg_retry <= ok_retry):
+
+ total_retry = total_retry + 1
+
+ result, port_samples = self._profile_helper.run_test(pkt_size, duration,
+ test_value,
+ self.tolerated_loss,
+ line_speed)
+
+ if (total_retry > (ok_retry * 3)) and (ok_retry is not 0):
+ status = STATUS_FAIL
+ next_step = STEP_DECREASE_LOWER
+ successful_pkt_loss = result.pkt_loss
+ self.current_upper = test_value
+ neg_retry = total_retry
+ elif result.success:
+ if (pos_retry < ok_retry) and (ok_retry is not 0):
+ status = STATUS_SUCCESS
+ next_step = STEP_CONFIRM
+ successful_pkt_loss = result.pkt_loss
+ neg_retry = 0
+ else:
+ status = STATUS_SUCCESS
+ next_step = STEP_INCREASE_LOWER
+ self.current_lower = test_value
+ successful_pkt_loss = result.pkt_loss
+
+ pos_retry = pos_retry + 1
+
+ else:
+ if (neg_retry < ok_retry) and (ok_retry is not 0):
+ status = STATUS_FAIL
+ next_step = STEP_CONFIRM
+ pos_retry = 0
+ else:
+ status = STATUS_FAIL
+ next_step = STEP_DECREASE_UPPER
+ self.current_upper = test_value
+
+ neg_retry = neg_retry + 1
+
+ LOG.info(
+ "Status = '%s' Next_Step = '%s'", status, next_step)
+
+ samples = result.get_samples(pkt_size, successful_pkt_loss, port_samples)
+
+ if theor_max_thruput < samples["RequestedTxThroughput"]:
+ theor_max_thruput = samples['RequestedTxThroughput']
+ samples['theor_max_throughput'] = theor_max_thruput
+
+ samples["rx_total"] = int(result.rx_total)
+ samples["tx_total"] = int(result.tx_total)
+ samples["can_be_lost"] = int(result.can_be_lost)
+ samples["drop_total"] = int(result.drop_total)
+ samples["RxThroughput_gbps"] = \
+ (samples["RxThroughput"] / 1000) * ((pkt_size + 20) * 8)
+ samples['Status'] = status
+ samples['Next_Step'] = next_step
+ samples["MAX_Rate"] = self.current_upper
+ samples["MIN_Rate"] = self.current_lower
+ samples["Test_Rate"] = test_value
+ samples["Step_Id"] = step_id
+ samples["Confirmation_Retry"] = total_retry
+
+ samples.update(test_data)
+
+ if status == STATUS_SUCCESS and next_step == STEP_INCREASE_LOWER:
+ # Store success samples for result samples
+ result_samples = samples
+
+ LOG.info(">>>##>>Collect TG KPIs %s %s", datetime.datetime.now(), samples)
+
+ self.queue.put(samples, True, overall_constants.QUEUE_PUT_TIMEOUT)
+
+ LOG.info(
+ ">>>##>> Result Reached PktSize %s Theor_Max_Thruput %s Actual_throughput %s",
+ pkt_size, theor_max_thruput, result_samples.get("RxThroughput", 0.0))
+ result_samples["Status"] = STATUS_RESULT
+ result_samples["Next_Step"] = ""
+ result_samples["Actual_throughput"] = result_samples.get("RxThroughput", 0.0)
+ result_samples["theor_max_throughput"] = theor_max_thruput
+ self.queue.put(result_samples)
diff --git a/yardstick/network_services/traffic_profile/prox_irq.py b/yardstick/network_services/traffic_profile/prox_irq.py
new file mode 100644
index 000000000..0ea294914
--- /dev/null
+++ b/yardstick/network_services/traffic_profile/prox_irq.py
@@ -0,0 +1,48 @@
+# Copyright (c) 2016-2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+""" Fixed traffic profile definitions """
+
+import logging
+import time
+
+from yardstick.network_services.traffic_profile.prox_profile import ProxProfile
+
+LOG = logging.getLogger(__name__)
+
+
+class ProxIrqProfile(ProxProfile):
+ """
+ This profile adds a single stream at the beginning of the traffic session
+ """
+
+ def __init__(self, tp_config):
+ super(ProxIrqProfile, self).__init__(tp_config)
+
+ def init(self, queue):
+ self.queue = queue
+ self.queue.cancel_join_thread()
+
+ def execute_traffic(self, traffic_generator):
+ LOG.debug("Prox_IRQ Execute Traffic....")
+ time.sleep(5)
+
+ def is_ended(self):
+ return False
+
+ def run_test(self):
+ """Run the test
+ """
+
+ LOG.info("Prox_IRQ ....")
diff --git a/yardstick/network_services/traffic_profile/prox_profile.py b/yardstick/network_services/traffic_profile/prox_profile.py
index 170dfd96f..be450c9f7 100644
--- a/yardstick/network_services/traffic_profile/prox_profile.py
+++ b/yardstick/network_services/traffic_profile/prox_profile.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2016-2017 Intel Corporation
+# Copyright (c) 2016-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,6 +16,8 @@
from __future__ import absolute_import
import logging
+import multiprocessing
+import time
from yardstick.network_services.traffic_profile.base import TrafficProfile
from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxProfileHelper
@@ -29,8 +31,22 @@ class ProxProfile(TrafficProfile):
"""
@staticmethod
+ def sort_vpci(traffic_gen):
+ """Return the list of external interfaces ordered by vpci and name
+
+ :param traffic_gen: (ProxTrafficGen) traffic generator
+ :return: list of ordered interfaces
+ """
+ def key_func(interface):
+ return interface['virtual-interface']['vpci'], interface['name']
+
+ return sorted(traffic_gen.vnfd_helper['vdu'][0]['external-interface'],
+ key=key_func)
+
+ @staticmethod
def fill_samples(samples, traffic_gen):
- for vpci_idx, intf in enumerate(traffic_gen.vpci_if_name_ascending):
+ vpci_if_name_ascending = ProxProfile.sort_vpci(traffic_gen)
+ for vpci_idx, intf in enumerate(vpci_if_name_ascending):
name = intf[1]
# TODO: VNFDs KPIs values needs to be mapped to TRex structure
xe_port = traffic_gen.resource_helper.sut.port_stats([vpci_idx])
@@ -42,7 +58,7 @@ class ProxProfile(TrafficProfile):
def __init__(self, tp_config):
super(ProxProfile, self).__init__(tp_config)
self.queue = None
- self.done = False
+ self.done = multiprocessing.Event()
self.results = []
# TODO: get init values from tp_config
@@ -102,7 +118,8 @@ class ProxProfile(TrafficProfile):
try:
pkt_size = next(self.pkt_size_iterator)
except StopIteration:
- self.done = True
+ time.sleep(5)
+ self.done.set()
return
# Adjust packet size upwards if it's less than the minimum
diff --git a/yardstick/network_services/traffic_profile/rfc2544.py b/yardstick/network_services/traffic_profile/rfc2544.py
index b1ca8a345..aaa491b75 100644
--- a/yardstick/network_services/traffic_profile/rfc2544.py
+++ b/yardstick/network_services/traffic_profile/rfc2544.py
@@ -11,190 +11,352 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-""" RFC2544 Throughput implemenation """
-from __future__ import absolute_import
-from __future__ import division
import logging
-from trex_stl_lib.trex_stl_client import STLStream
-from trex_stl_lib.trex_stl_streams import STLFlowLatencyStats
-from trex_stl_lib.trex_stl_streams import STLTXCont
+from trex_stl_lib import api as Pkt
+from trex_stl_lib import trex_stl_client
+from trex_stl_lib import trex_stl_packet_builder_scapy
+from trex_stl_lib import trex_stl_streams
-from yardstick.network_services.traffic_profile.traffic_profile \
- import TrexProfile
+from yardstick.common import constants
+from yardstick.network_services.traffic_profile import trex_traffic_profile
-LOGGING = logging.getLogger(__name__)
+LOG = logging.getLogger(__name__)
+SRC_PORT = 'sport'
+DST_PORT = 'dport'
-class RFC2544Profile(TrexProfile):
- """ This class handles rfc2544 implemenation. """
- def __init__(self, traffic_generator):
- super(RFC2544Profile, self).__init__(traffic_generator)
- self.generator = None
- self.max_rate = None
- self.min_rate = None
- self.ports = None
- self.rate = 100
- self.drop_percent_at_max_tx = None
- self.throughput_max = None
+class PortPgIDMap(object):
+ """Port and pg_id mapping class
- def register_generator(self, generator):
- self.generator = generator
+ "pg_id" is the identification STL library gives to each stream. In the
+ RFC2544Profile class, the traffic has a STLProfile per port, which contains
+ one or several streams, one per packet size defined in the IMIX test case
+ description.
- def execute_traffic(self, traffic_generator=None):
- """ Generate the stream and run traffic on the given ports """
- if traffic_generator is not None and self.generator is None:
- self.generator = traffic_generator
+ Example of port <-> pg_id map:
+ self._port_pg_id_map = {
+ 0: [1, 2, 3, 4],
+ 1: [5, 6, 7, 8]
+ }
+ """
- if self.ports is not None:
- return
+ def __init__(self):
+ self._pg_id = 0
+ self._last_port = None
+ self._port_pg_id_map = {}
- self.ports = []
- for vld_id, intfs in sorted(self.generator.networks.items()):
- profile_data = self.params.get(vld_id)
- # no profile for this port
- if not profile_data:
- continue
- # correlated traffic doesn't use public traffic?
- if vld_id.startswith(self.DOWNLINK) and \
- self.generator.rfc2544_helper.correlated_traffic:
- continue
- for intf in intfs:
- port = self.generator.port_num(intf)
- self.ports.append(port)
- self.generator.client.add_streams(self.get_streams(profile_data), ports=port)
-
- self.max_rate = self.rate
- self.min_rate = 0
- self.generator.client.start(ports=self.ports, mult=self.get_multiplier(),
- duration=30, force=True)
- self.drop_percent_at_max_tx = 0
- self.throughput_max = 0
-
- def get_multiplier(self):
- """ Get the rate at which next iteration to run """
- self.rate = round((self.max_rate + self.min_rate) / 2.0, 2)
- multiplier = round(self.rate / self.pps, 2)
- return str(multiplier)
-
- def get_drop_percentage(self, generator=None):
- """ Calculate the drop percentage and run the traffic """
- if generator is None:
- generator = self.generator
- run_duration = self.generator.RUN_DURATION
- samples = self.generator.generate_samples(self.ports)
-
- in_packets = sum([value['in_packets'] for value in samples.values()])
- out_packets = sum([value['out_packets'] for value in samples.values()])
-
- packet_drop = abs(out_packets - in_packets)
- drop_percent = 100.0
- try:
- drop_percent = round((packet_drop / float(out_packets)) * 100, 5)
- except ZeroDivisionError:
- LOGGING.info('No traffic is flowing')
+ def add_port(self, port):
+ self._last_port = port
+ self._port_pg_id_map[port] = []
- # TODO(esm): RFC2544 doesn't tolerate packet loss, why do we?
- tolerance_low = generator.rfc2544_helper.tolerance_low
- tolerance_high = generator.rfc2544_helper.tolerance_high
+ def get_pg_ids(self, port):
+ return self._port_pg_id_map.get(port, [])
- tx_rate = out_packets / run_duration
- rx_rate = in_packets / run_duration
+ def increase_pg_id(self, port=None):
+ port = self._last_port if not port else port
+ if port is None:
+ return
+ pg_id_list = self._port_pg_id_map.get(port)
+ if not pg_id_list:
+ self.add_port(port)
+ pg_id_list = self._port_pg_id_map[port]
+ self._pg_id += 1
+ pg_id_list.append(self._pg_id)
+ return self._pg_id
- throughput_max = self.throughput_max
- drop_percent_at_max_tx = self.drop_percent_at_max_tx
- if self.drop_percent_at_max_tx is None:
- self.rate = tx_rate
- self.first_run = False
+class RFC2544Profile(trex_traffic_profile.TrexProfile):
+ """TRex RFC2544 traffic profile"""
- if drop_percent > tolerance_high:
- # TODO(esm): why don't we discard results that are out of tolerance?
- self.max_rate = self.rate
- if throughput_max == 0:
- throughput_max = rx_rate
- drop_percent_at_max_tx = drop_percent
-
- elif drop_percent >= tolerance_low:
- # TODO(esm): why do we update the samples dict in this case
- # and not update our tracking values?
- throughput_max = rx_rate
- drop_percent_at_max_tx = drop_percent
-
- elif drop_percent >= self.drop_percent_at_max_tx:
- # TODO(esm): why don't we discard results that are out of tolerance?
- self.min_rate = self.rate
- self.drop_percent_at_max_tx = drop_percent_at_max_tx = drop_percent
- self.throughput_max = throughput_max = rx_rate
+ TOLERANCE_LIMIT = 0.01
+ STATUS_SUCCESS = "Success"
+ STATUS_FAIL = "Failure"
- else:
- # TODO(esm): why don't we discard results that are out of tolerance?
- self.min_rate = self.rate
-
- generator.clear_client_stats(self.ports)
- generator.start_client(self.ports, mult=self.get_multiplier(),
- duration=run_duration, force=True)
-
- # if correlated traffic update the Throughput
- if generator.rfc2544_helper.correlated_traffic:
- throughput_max *= 2
+ def __init__(self, traffic_generator):
+ super(RFC2544Profile, self).__init__(traffic_generator)
+ self.generator = None
+ self.iteration = 0
+ self.rate = self.config.frame_rate
+ self.max_rate = self.config.frame_rate
+ self.min_rate = 0
- samples.update({
- 'TxThroughput': tx_rate,
- 'RxThroughput': rx_rate,
- 'CurrentDropPercentage': drop_percent,
- 'Throughput': throughput_max,
- 'DropPercentage': drop_percent_at_max_tx,
- })
+ def register_generator(self, generator):
+ self.generator = generator
- return samples
+ def stop_traffic(self, traffic_generator=None):
+ """"Stop traffic injection, reset counters and remove streams"""
+ if traffic_generator is not None and self.generator is None:
+ self.generator = traffic_generator
- def execute_latency(self, generator=None, samples=None):
- if generator is not None and self.generator is None:
- self.generator = generator
+ self.generator.client.stop()
+ self.generator.client.reset()
+ self.generator.client.remove_all_streams()
- if samples is None:
- samples = self.generator.generate_samples()
+ def execute_traffic(self, traffic_generator=None):
+ """Generate the stream and run traffic on the given ports
+
+ :param traffic_generator: (TrexTrafficGenRFC) traffic generator
+ :return ports: (list of int) indexes of ports
+ port_pg_id: (dict) port indexes and pg_id [1] map
+ [1] https://trex-tgn.cisco.com/trex/doc/cp_stl_docs/api/
+ profile_code.html#stlstream-modes
+ """
+ if traffic_generator is not None and self.generator is None:
+ self.generator = traffic_generator
- self.pps, multiplier = self.calculate_pps(samples)
- self.ports = []
- self.pg_id = self.params['traffic_profile'].get('pg_id', 1)
+ port_pg_id = PortPgIDMap()
+ ports = []
for vld_id, intfs in sorted(self.generator.networks.items()):
profile_data = self.params.get(vld_id)
if not profile_data:
continue
- # correlated traffic doesn't use public traffic?
- if vld_id.startswith(self.DOWNLINK) and \
- self.generator.rfc2544_helper.correlated_traffic:
+ if (vld_id.startswith(self.DOWNLINK) and
+ self.generator.rfc2544_helper.correlated_traffic):
continue
for intf in intfs:
- port = self.generator.port_num(intf)
- self.ports.append(port)
- self.generator.client.add_streams(self.get_streams(profile_data), ports=port)
-
- self.generator.start_client(ports=self.ports, mult=str(multiplier),
- duration=120, force=True)
- self.first_run = False
-
- def calculate_pps(self, samples):
- pps = round(samples['Throughput'] / 2, 2)
- multiplier = round(self.rate / self.pps, 2)
- return pps, multiplier
-
- def create_single_stream(self, packet_size, pps, isg=0):
- packet = self._create_single_packet(packet_size)
- if pps:
- stl_mode = STLTXCont(pps=pps)
+ port_num = int(self.generator.port_num(intf))
+ ports.append(port_num)
+ port_pg_id.add_port(port_num)
+ profile = self._create_profile(profile_data,
+ self.rate, port_pg_id,
+ self.config.enable_latency)
+ self.generator.client.add_streams(profile, ports=[port_num])
+
+ self.generator.client.start(ports=ports,
+ duration=self.config.duration,
+ force=True)
+ self.iteration = self.generator.rfc2544_helper.iteration.value
+ return ports, port_pg_id
+
+ def _create_profile(self, profile_data, rate, port_pg_id, enable_latency):
+ """Create a STL profile (list of streams) for a port"""
+ streams = []
+ for packet_name in profile_data:
+ imix = (profile_data[packet_name].
+ get('outer_l2', {}).get('framesize'))
+ imix_data = self._create_imix_data(imix)
+ self._create_vm(profile_data[packet_name])
+ _streams = self._create_streams(imix_data, rate, port_pg_id,
+ enable_latency)
+ streams.extend(_streams)
+ return trex_stl_streams.STLProfile(streams)
+
+ def _create_imix_data(self, imix,
+ weight_mode=constants.DISTRIBUTION_IN_BYTES):
+ """Generate the IMIX distribution for a STL profile
+
+ The input information is the framesize dictionary in a test case
+ traffic profile definition. E.g.:
+ downlink_0:
+ ipv4:
+ id: 2
+ outer_l2:
+ framesize:
+ 64B: 10
+ 128B: 20
+ ...
+
+ This function normalizes the sum of framesize weights to 100 and
+ returns a dictionary of frame sizes in bytes and weight in percentage.
+ E.g.:
+ imix_count = {64: 25, 128: 75}
+
+ The weight mode is described in [1]. There are two ways to describe the
+ weight of the packets:
+ - Distribution in packets: the weight defines the percentage of
+ packets sent per packet size. IXIA uses this definition.
+ - Distribution in bytes: the weight defines the percentage of bytes
+ sent per packet size.
+
+ Packet size # packets D. in packets Bytes D. in bytes
+ 40 7 58.33% 280 7%
+ 576 4 33.33% 2304 56%
+ 1500 1 8.33% 1500 37%
+
+ [1] https://en.wikipedia.org/wiki/Internet_Mix
+
+ :param imix: (dict) IMIX size and weight
+ """
+ imix_count = {}
+ if not imix:
+ return imix_count
+
+ imix_count = {size.upper().replace('B', ''): int(weight)
+ for size, weight in imix.items()}
+ imix_sum = sum(imix_count.values())
+ if imix_sum <= 0:
+ imix_count = {64: 100}
+ imix_sum = 100
+
+ weight_normalize = float(imix_sum) / 100
+ imix_dip = {size: float(weight) / weight_normalize
+ for size, weight in imix_count.items()}
+
+ if weight_mode == constants.DISTRIBUTION_IN_PACKETS:
+ return imix_dip
+
+ byte_total = sum([int(size) * weight
+ for size, weight in imix_count.items()])
+ return {size: float(int(size) * weight * 100) / byte_total
+ for size, weight in imix_count.items()}
+
+ def _create_vm(self, packet_definition):
+ """Create the STL Raw instructions"""
+ self.ether_packet = Pkt.Ether()
+ self.ip_packet = Pkt.IP()
+ self.ip6_packet = None
+ self.udp_packet = Pkt.UDP()
+ self.udp[DST_PORT] = 'UDP.dport'
+ self.udp[SRC_PORT] = 'UDP.sport'
+ self.qinq = False
+ self.vm_flow_vars = []
+ outer_l2 = packet_definition.get('outer_l2')
+ outer_l3v4 = packet_definition.get('outer_l3v4')
+ outer_l3v6 = packet_definition.get('outer_l3v6')
+ outer_l4 = packet_definition.get('outer_l4')
+ if outer_l2:
+ self._set_outer_l2_fields(outer_l2)
+ if outer_l3v4:
+ self._set_outer_l3v4_fields(outer_l3v4)
+ if outer_l3v6:
+ self._set_outer_l3v6_fields(outer_l3v6)
+ if outer_l4:
+ self._set_outer_l4_fields(outer_l4)
+ self.trex_vm = trex_stl_packet_builder_scapy.STLScVmRaw(
+ self.vm_flow_vars)
+
+ def _create_single_packet(self, size=64):
+ size -= 4
+ ether_packet = self.ether_packet
+ ip_packet = self.ip6_packet if self.ip6_packet else self.ip_packet
+ udp_packet = self.udp_packet
+ if self.qinq:
+ qinq_packet = self.qinq_packet
+ base_pkt = ether_packet / qinq_packet / ip_packet / udp_packet
else:
- stl_mode = STLTXCont(pps=self.pps)
- if self.pg_id:
- LOGGING.debug("pg_id: %s", self.pg_id)
- stl_flow_stats = STLFlowLatencyStats(pg_id=self.pg_id)
- stream = STLStream(isg=isg, packet=packet, mode=stl_mode,
- flow_stats=stl_flow_stats)
- self.pg_id += 1
+ base_pkt = ether_packet / ip_packet / udp_packet
+ pad = max(0, size - len(base_pkt)) * 'x'
+ return trex_stl_packet_builder_scapy.STLPktBuilder(
+ pkt=base_pkt / pad, vm=self.trex_vm)
+
+ def _create_streams(self, imix_data, rate, port_pg_id, enable_latency):
+ """Create a list of streams per packet size
+
+ The STL TX mode speed of the generated streams will depend on the frame
+ weight and the frame rate. Both the frame weight and the total frame
+ rate are normalized to 100. The STL TX mode speed, defined in
+ percentage, is the combitation of both percentages. E.g.:
+ frame weight = 100
+ rate = 90
+ --> STLTXmode percentage = 10 (%)
+
+ frame weight = 80
+ rate = 50
+ --> STLTXmode percentage = 40 (%)
+
+ :param imix_data: (dict) IMIX size and weight
+ :param rate: (float) normalized [0..100] total weight
+ :param pg_id: (PortPgIDMap) port / pg_id (list) map
+ """
+ streams = []
+ for size, weight in ((int(size), float(weight)) for (size, weight)
+ in imix_data.items() if float(weight) > 0):
+ packet = self._create_single_packet(size)
+ pg_id = port_pg_id.increase_pg_id()
+ stl_flow = (trex_stl_streams.STLFlowLatencyStats(pg_id=pg_id) if
+ enable_latency else None)
+ mode = trex_stl_streams.STLTXCont(percentage=weight * rate / 100)
+ streams.append(trex_stl_client.STLStream(
+ packet=packet, flow_stats=stl_flow, mode=mode))
+ return streams
+
+ def get_drop_percentage(self, samples, tol_low, tol_high,
+ correlated_traffic, resolution): # pylint: disable=unused-argument
+ """Calculate the drop percentage and run the traffic"""
+ completed = False
+ status = self.STATUS_FAIL
+ out_pkt_end = sum(port['out_packets'] for port in samples[-1].values())
+ in_pkt_end = sum(port['in_packets'] for port in samples[-1].values())
+ out_pkt_ini = sum(port['out_packets'] for port in samples[0].values())
+ in_pkt_ini = sum(port['in_packets'] for port in samples[0].values())
+ in_bytes_ini = sum(port['in_bytes'] for port in samples[0].values())
+ out_bytes_ini = sum(port['out_bytes'] for port in samples[0].values())
+ in_bytes_end = sum(port['in_bytes'] for port in samples[-1].values())
+ out_bytes_end = sum(port['out_bytes'] for port in samples[-1].values())
+ time_diff = (list(samples[-1].values())[0]['timestamp'] -
+ list(samples[0].values())[0]['timestamp']).total_seconds()
+ out_packets = out_pkt_end - out_pkt_ini
+ in_packets = in_pkt_end - in_pkt_ini
+ out_bytes = out_bytes_end - out_bytes_ini
+ in_bytes = in_bytes_end - in_bytes_ini
+ tx_rate_fps = float(out_packets) / time_diff
+ rx_rate_fps = float(in_packets) / time_diff
+ drop_percent = 100.0
+
+ # https://tools.ietf.org/html/rfc2544#section-26.3
+ if out_packets:
+ drop_percent = round(
+ (float(abs(out_packets - in_packets)) / out_packets) * 100, 5)
+
+ tol_high = max(tol_high, self.TOLERANCE_LIMIT)
+ tol_low = min(tol_low, self.TOLERANCE_LIMIT)
+ if drop_percent > tol_high:
+ self.max_rate = self.rate
+ elif drop_percent < tol_low:
+ self.min_rate = self.rate
else:
- stream = STLStream(isg=isg, packet=packet, mode=stl_mode)
- return stream
+ status = self.STATUS_SUCCESS
+ completed = True
+
+ last_rate = self.rate
+ self.rate = self._get_next_rate()
+ if abs(last_rate - self.rate) < resolution:
+ # stop test if the difference between the rate transmission
+ # in two iterations is smaller than the value of the resolution
+ completed = True
+ LOG.debug("rate=%s, next_rate=%s, resolution=%s, completed=%s",
+ last_rate, self.rate, resolution, completed)
+
+ ports = samples[-1].keys()
+ num_ports = len(ports)
+
+ output = {}
+ for port in ports:
+ output[port] = {}
+ first = samples[0][port]
+ last = samples[-1][port]
+ output[port]['InPackets'] = last['in_packets'] - first['in_packets']
+ output[port]['OutPackets'] = last['out_packets'] - first['out_packets']
+ output[port]['InBytes'] = last['in_bytes'] - first['in_bytes']
+ output[port]['OutBytes'] = last['out_bytes'] - first['out_bytes']
+ if self.config.enable_latency:
+ output[port]['LatencyAvg'] = float(sum(
+ [last['latency'][id]['average'] for id in
+ last['latency']]) * 1000) / len(last['latency'])
+ output[port]['LatencyMin'] = min(
+ [last['latency'][id]['total_min'] for id in
+ last['latency']]) * 1000
+ output[port]['LatencyMax'] = max(
+ [last['latency'][id]['total_max'] for id in
+ last['latency']]) * 1000
+
+ output['TxThroughput'] = tx_rate_fps
+ output['RxThroughput'] = rx_rate_fps
+ output['RxThroughputBps'] = round(float(in_bytes) / time_diff, 3)
+ output['TxThroughputBps'] = round(float(out_bytes) / time_diff, 3)
+ output['DropPercentage'] = drop_percent
+ output['Rate'] = last_rate
+ output['PktSize'] = self._get_framesize()
+ output['Iteration'] = self.iteration
+ output['Status'] = status
+
+ if self.config.enable_latency:
+ output['LatencyAvg'] = float(
+ sum([output[port]['LatencyAvg'] for port in ports])) / num_ports
+ output['LatencyMin'] = min([output[port]['LatencyMin'] for port in ports])
+ output['LatencyMax'] = max([output[port]['LatencyMax'] for port in ports])
+
+ return completed, output
diff --git a/yardstick/network_services/traffic_profile/sip.py b/yardstick/network_services/traffic_profile/sip.py
new file mode 100644
index 000000000..d18574090
--- /dev/null
+++ b/yardstick/network_services/traffic_profile/sip.py
@@ -0,0 +1,32 @@
+# Copyright (c) 2019 Viosoft Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from yardstick.network_services.traffic_profile import base
+
+
+class SipProfile(base.TrafficProfile):
+ """ Sipp Traffic profile """
+
+ def __init__(self, yaml_data):
+ super(SipProfile, self).__init__(yaml_data)
+ self.generator = None
+
+ def execute_traffic(self, traffic_generator=None):
+ if traffic_generator is not None and self.generator is None:
+ self.generator = traffic_generator
+
+ def is_ended(self):
+ if self.generator is not None:
+ return self.generator.is_ended()
+ return False
diff --git a/yardstick/network_services/traffic_profile/traffic_profile.py b/yardstick/network_services/traffic_profile/trex_traffic_profile.py
index 2f97945c0..cf538d488 100644
--- a/yardstick/network_services/traffic_profile/traffic_profile.py
+++ b/yardstick/network_services/traffic_profile/trex_traffic_profile.py
@@ -11,29 +11,24 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-""" Trex Traffic Profile definitions """
-from __future__ import absolute_import
import struct
import socket
import logging
from random import SystemRandom
-import six
import ipaddress
-from yardstick.network_services.traffic_profile.base import TrafficProfile
-from trex_stl_lib.trex_stl_client import STLStream
-from trex_stl_lib.trex_stl_streams import STLFlowLatencyStats
-from trex_stl_lib.trex_stl_streams import STLTXCont
-from trex_stl_lib.trex_stl_streams import STLProfile
+import six
from trex_stl_lib.trex_stl_packet_builder_scapy import STLVmWrFlowVar
from trex_stl_lib.trex_stl_packet_builder_scapy import STLVmFlowVarRepeatableRandom
from trex_stl_lib.trex_stl_packet_builder_scapy import STLVmFlowVar
-from trex_stl_lib.trex_stl_packet_builder_scapy import STLPktBuilder
-from trex_stl_lib.trex_stl_packet_builder_scapy import STLScVmRaw
from trex_stl_lib.trex_stl_packet_builder_scapy import STLVmFixIpv4
from trex_stl_lib import api as Pkt
+from yardstick.common import exceptions as y_exc
+from yardstick.network_services.traffic_profile import base
+
+
SRC = 'src'
DST = 'dst'
ETHERNET = 'Ethernet'
@@ -48,7 +43,7 @@ TYPE_OF_SERVICE = 'tos'
LOG = logging.getLogger(__name__)
-class TrexProfile(TrafficProfile):
+class TrexProfile(base.TrafficProfile):
""" This class handles Trex Traffic profile generation and execution """
PROTO_MAP = {
@@ -57,6 +52,7 @@ class TrexProfile(TrafficProfile):
IPv6: ('ip6_packet', Pkt.IPv6),
UDP: ('udp_packet', Pkt.UDP),
}
+ RATE_ROUND = 5
def _general_single_action_partial(self, protocol):
def f(field):
@@ -70,6 +66,7 @@ class TrexProfile(TrafficProfile):
def _ethernet_range_action_partial(self, direction, _):
def partial(min_value, max_value, count):
+ # pylint: disable=unused-argument
stl_vm_flow_var = STLVmFlowVar(name="mac_{}".format(direction),
min_value=1,
max_value=30,
@@ -77,30 +74,32 @@ class TrexProfile(TrafficProfile):
op='inc',
step=1)
self.vm_flow_vars.append(stl_vm_flow_var)
- stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='mac_{}'.format(direction),
- pkt_offset='Ether.{}'.format(direction))
+ stl_vm_wr_flow_var = STLVmWrFlowVar(
+ fv_name='mac_{}'.format(direction),
+ pkt_offset='Ether.{}'.format(direction))
self.vm_flow_vars.append(stl_vm_wr_flow_var)
return partial
def _ip_range_action_partial(self, direction, count=1):
+ # pylint: disable=unused-argument
def partial(min_value, max_value, count):
- ip1 = int(ipaddress.IPv4Address(min_value))
- ip2 = int(ipaddress.IPv4Address(max_value))
- actual_count = (ip2 - ip1)
+ _, _, actual_count = self._count_ip(min_value, max_value)
if not actual_count:
count = 1
elif actual_count < int(count):
count = actual_count
- stl_vm_flow_var = STLVmFlowVarRepeatableRandom(name="ip4_{}".format(direction),
- min_value=min_value,
- max_value=max_value,
- size=4,
- limit=int(count),
- seed=0x1235)
+ stl_vm_flow_var = STLVmFlowVarRepeatableRandom(
+ name="ip4_{}".format(direction),
+ min_value=min_value,
+ max_value=max_value,
+ size=4,
+ limit=int(count),
+ seed=0x1235)
self.vm_flow_vars.append(stl_vm_flow_var)
- stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='ip4_{}'.format(direction),
- pkt_offset='IP.{}'.format(direction))
+ stl_vm_wr_flow_var = STLVmWrFlowVar(
+ fv_name='ip4_{}'.format(direction),
+ pkt_offset='IP.{}'.format(direction))
self.vm_flow_vars.append(stl_vm_wr_flow_var)
stl_vm_fix_ipv4 = STLVmFixIpv4(offset="IP")
self.vm_flow_vars.append(stl_vm_fix_ipv4)
@@ -108,7 +107,8 @@ class TrexProfile(TrafficProfile):
def _ip6_range_action_partial(self, direction, _):
def partial(min_value, max_value, count):
- min_value, max_value = self._get_start_end_ipv6(min_value, max_value)
+ # pylint: disable=unused-argument
+ min_value, max_value, _ = self._count_ip(min_value, max_value)
stl_vm_flow_var = STLVmFlowVar(name="ip6_{}".format(direction),
min_value=min_value,
max_value=max_value,
@@ -116,14 +116,16 @@ class TrexProfile(TrafficProfile):
op='random',
step=1)
self.vm_flow_vars.append(stl_vm_flow_var)
- stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='ip6_{}'.format(direction),
- pkt_offset='IPv6.{}'.format(direction),
- offset_fixup=8)
+ stl_vm_wr_flow_var = STLVmWrFlowVar(
+ fv_name='ip6_{}'.format(direction),
+ pkt_offset='IPv6.{}'.format(direction),
+ offset_fixup=8)
self.vm_flow_vars.append(stl_vm_wr_flow_var)
return partial
- def _dscp_range_action_partial(self, *_):
+ def _dscp_range_action_partial(self, *args):
def partial(min_value, max_value, count):
+ # pylint: disable=unused-argument
stl_vm_flow_var = STLVmFlowVar(name="dscp",
min_value=min_value,
max_value=max_value,
@@ -134,8 +136,10 @@ class TrexProfile(TrafficProfile):
stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='dscp',
pkt_offset='IP.tos')
self.vm_flow_vars.append(stl_vm_wr_flow_var)
+ return partial
def _udp_range_action_partial(self, field, count=1):
+ # pylint: disable=unused-argument
def partial(min_value, max_value, count):
actual_count = int(max_value) - int(min_value)
if not actual_count:
@@ -143,15 +147,17 @@ class TrexProfile(TrafficProfile):
elif int(count) > actual_count:
count = actual_count
- stl_vm_flow_var = STLVmFlowVarRepeatableRandom(name="port_{}".format(field),
- min_value=min_value,
- max_value=max_value,
- size=2,
- limit=int(count),
- seed=0x1235)
+ stl_vm_flow_var = STLVmFlowVarRepeatableRandom(
+ name="port_{}".format(field),
+ min_value=min_value,
+ max_value=max_value,
+ size=2,
+ limit=int(count),
+ seed=0x1235)
self.vm_flow_vars.append(stl_vm_flow_var)
- stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='port_{}'.format(field),
- pkt_offset=self.udp[field])
+ stl_vm_wr_flow_var = STLVmWrFlowVar(
+ fv_name='port_{}'.format(field),
+ pkt_offset=self.udp[field])
self.vm_flow_vars.append(stl_vm_wr_flow_var)
return partial
@@ -181,6 +187,8 @@ class TrexProfile(TrafficProfile):
self.qinq = False
self.vm_flow_vars = []
self.packets = []
+ self.max_rate = 0
+ self.min_rate = 0
self._map_proto_actions = {
# the tuple is (single value function, range value function, if the values should be
@@ -332,130 +340,38 @@ class TrexProfile(TrafficProfile):
if 'dstport' in outer_l4:
self._set_proto_addr(UDP, DST_PORT, outer_l4['dstport'], outer_l4['count'])
- def generate_imix_data(self, packet_definition):
- """ generate packet size for a given traffic profile """
- imix_count = {}
- imix_data = {}
- if not packet_definition:
- return imix_count
- imix = packet_definition.get('framesize')
- if imix:
- for size in imix:
- data = imix[size]
- imix_data[int(size[:-1])] = int(data)
- imix_sum = sum(imix_data.values())
- if imix_sum > 100:
- raise SystemExit("Error in IMIX data")
- elif imix_sum < 100:
- imix_data[64] = imix_data.get(64, 0) + (100 - imix_sum)
-
- avg_size = 0.0
- for size in imix_data:
- count = int(imix_data[size])
- if count:
- avg_size += round(size * count / 100, 2)
- pps = round(self.pps * count / 100, 0)
- imix_count[size] = pps
- self.rate = round(1342177280 / avg_size, 0) * 2
- logging.debug("Imax: %s rate: %s", imix_count, self.rate)
- return imix_count
-
- def get_streams(self, profile_data):
- """ generate trex stream
- :param profile_data:
- :type profile_data:
- """
- self.streams = []
- self.pps = self.params['traffic_profile'].get('frame_rate', 100)
- for packet_name in profile_data:
- outer_l2 = profile_data[packet_name].get('outer_l2')
- imix_data = self.generate_imix_data(outer_l2)
- if not imix_data:
- imix_data = {64: self.pps}
- self.generate_vm(profile_data[packet_name])
- for size in imix_data:
- self._generate_streams(size, imix_data[size])
- self._generate_profile()
- return self.profile
-
- def generate_vm(self, packet_definition):
- """ generate trex vm with flows setup """
- self.ether_packet = Pkt.Ether()
- self.ip_packet = Pkt.IP()
- self.ip6_packet = None
- self.udp_packet = Pkt.UDP()
- self.udp[DST_PORT] = 'UDP.dport'
- self.udp[SRC_PORT] = 'UDP.sport'
- self.qinq = False
- self.vm_flow_vars = []
- outer_l2 = packet_definition.get('outer_l2', None)
- outer_l3v4 = packet_definition.get('outer_l3v4', None)
- outer_l3v6 = packet_definition.get('outer_l3v6', None)
- outer_l4 = packet_definition.get('outer_l4', None)
- if outer_l2:
- self._set_outer_l2_fields(outer_l2)
- if outer_l3v4:
- self._set_outer_l3v4_fields(outer_l3v4)
- if outer_l3v6:
- self._set_outer_l3v6_fields(outer_l3v6)
- if outer_l4:
- self._set_outer_l4_fields(outer_l4)
- self.trex_vm = STLScVmRaw(self.vm_flow_vars)
-
- def generate_packets(self):
- """ generate packets from trex TG """
- base_pkt = self.base_pkt
- size = self.fsize - 4
- pad = max(0, size - len(base_pkt)) * 'x'
- self.packets = [STLPktBuilder(pkt=base_pkt / pad,
- vm=vm) for vm in self.vms]
-
- def _create_single_packet(self, size=64):
- size = size - 4
- ether_packet = self.ether_packet
- ip_packet = self.ip6_packet if self.ip6_packet else self.ip_packet
- udp_packet = self.udp_packet
- if self.qinq:
- qinq_packet = self.qinq_packet
- base_pkt = ether_packet / qinq_packet / ip_packet / udp_packet
- else:
- base_pkt = ether_packet / ip_packet / udp_packet
- pad = max(0, size - len(base_pkt)) * 'x'
- packet = STLPktBuilder(pkt=base_pkt / pad, vm=self.trex_vm)
- return packet
-
- def _create_single_stream(self, packet_size, pps, isg=0):
- packet = self._create_single_packet(packet_size)
- if self.pg_id:
- self.pg_id += 1
- stl_flow = STLFlowLatencyStats(pg_id=self.pg_id)
- stream = STLStream(isg=isg, packet=packet, mode=STLTXCont(pps=pps),
- flow_stats=stl_flow)
- else:
- stream = STLStream(isg=isg, packet=packet, mode=STLTXCont(pps=pps))
- return stream
-
- def _generate_streams(self, packet_size, pps):
- self.streams.append(self._create_single_stream(packet_size, pps))
-
- def _generate_profile(self):
- self.profile = STLProfile(self.streams)
+ def _get_next_rate(self):
+ rate = round(float(self.max_rate + self.min_rate)/2.0, self.RATE_ROUND)
+ return rate
+
+ def _get_framesize(self):
+ framesizes = []
+ for traffickey, value in self.params.items():
+ if not traffickey.startswith((self.UPLINK, self.DOWNLINK)):
+ continue
+ for _, data in value.items():
+ framesize = data['outer_l2']['framesize']
+ for size in (s for s, w in framesize.items() if int(w) != 0):
+ framesizes.append(size)
+ if len(set(framesizes)) == 0:
+ return ''
+ elif len(set(framesizes)) == 1:
+ return framesizes[0]
+ return 'IMIX'
@classmethod
- def _get_start_end_ipv6(cls, start_ip, end_ip):
- try:
- ip1 = socket.inet_pton(socket.AF_INET6, start_ip)
- ip2 = socket.inet_pton(socket.AF_INET6, end_ip)
- hi1, lo1 = struct.unpack('!QQ', ip1)
- hi2, lo2 = struct.unpack('!QQ', ip2)
- if ((hi1 << 64) | lo1) > ((hi2 << 64) | lo2):
- raise SystemExit("IPv6: start_ip is greater then end_ip")
- max_p1 = abs(int(lo1) - int(lo2))
- base_p1 = lo1
- except Exception as ex_error:
- raise SystemExit(ex_error)
- else:
- return base_p1, max_p1 + base_p1
+ def _count_ip(cls, start_ip, end_ip):
+ start = ipaddress.ip_address(six.u(start_ip))
+ end = ipaddress.ip_address(six.u(end_ip))
+ if start.version == 4:
+ return start, end, int(end) - int(start)
+ elif start.version == 6:
+ if int(start) > int(end):
+ raise y_exc.IPv6RangeError(start_ip=str(start),
+ end_ip=str(end))
+ _, lo1 = struct.unpack('!QQ', start.packed)
+ _, lo2 = struct.unpack('!QQ', end.packed)
+ return lo1, lo2, lo2 - lo1
@classmethod
def _get_random_value(cls, min_port, max_port):
diff --git a/yardstick/network_services/traffic_profile/vpp_rfc2544.py b/yardstick/network_services/traffic_profile/vpp_rfc2544.py
new file mode 100644
index 000000000..412e4e69a
--- /dev/null
+++ b/yardstick/network_services/traffic_profile/vpp_rfc2544.py
@@ -0,0 +1,339 @@
+# Copyright (c) 2019 Viosoft Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import ipaddress
+import logging
+import random
+import string
+
+from trex_stl_lib import api as Pkt
+from trex_stl_lib import trex_stl_client
+from trex_stl_lib import trex_stl_packet_builder_scapy
+from trex_stl_lib import trex_stl_streams
+
+from yardstick.common import constants
+from yardstick.network_services.helpers.vpp_helpers.multiple_loss_ratio_search import \
+ MultipleLossRatioSearch
+from yardstick.network_services.traffic_profile.rfc2544 import RFC2544Profile, \
+ PortPgIDMap
+from yardstick.network_services.traffic_profile.trex_traffic_profile import IP, \
+ DST
+
+LOGGING = logging.getLogger(__name__)
+
+
+class VppRFC2544Profile(RFC2544Profile):
+
+ def __init__(self, traffic_generator):
+ super(VppRFC2544Profile, self).__init__(traffic_generator)
+
+ tp_cfg = traffic_generator["traffic_profile"]
+ self.number_of_intermediate_phases = tp_cfg.get("intermediate_phases",
+ 2)
+
+ self.duration = self.config.duration
+ self.precision = self.config.test_precision
+ self.lower_bound = self.config.lower_bound
+ self.upper_bound = self.config.upper_bound
+ self.step_interval = self.config.step_interval
+ self.enable_latency = self.config.enable_latency
+
+ self.pkt_size = None
+ self.flow = None
+
+ self.tolerance_low = 0
+ self.tolerance_high = 0
+
+ self.queue = None
+ self.port_pg_id = None
+
+ self.current_lower = self.lower_bound
+ self.current_upper = self.upper_bound
+
+ self.ports = []
+ self.profiles = {}
+
+ @property
+ def delta(self):
+ return self.current_upper - self.current_lower
+
+ @property
+ def mid_point(self):
+ return (self.current_lower + self.current_upper) / 2
+
+ @staticmethod
+ def calculate_frame_size(imix):
+ if not imix:
+ return 64, 100
+
+ imix_count = {size.upper().replace('B', ''): int(weight)
+ for size, weight in imix.items()}
+ imix_sum = sum(imix_count.values())
+ if imix_sum <= 0:
+ return 64, 100
+ packets_total = sum([int(size) * weight
+ for size, weight in imix_count.items()])
+ return packets_total / imix_sum, imix_sum
+
+ @staticmethod
+ def _gen_payload(length):
+ payload = ""
+ for _ in range(length):
+ payload += random.choice(string.ascii_letters)
+
+ return payload
+
+ def bounds_iterator(self, logger=None):
+ self.current_lower = self.lower_bound
+ self.current_upper = self.upper_bound
+
+ test_value = self.current_upper
+ while abs(self.delta) >= self.precision:
+ if logger:
+ logger.debug("New interval [%s, %s), precision: %d",
+ self.current_lower,
+ self.current_upper, self.step_interval)
+ logger.info("Testing with value %s", test_value)
+
+ yield test_value
+ test_value = self.mid_point
+
+ def register_generator(self, generator):
+ super(VppRFC2544Profile, self).register_generator(generator)
+ self.init_traffic_params(generator)
+
+ def init_queue(self, queue):
+ self.queue = queue
+ self.queue.cancel_join_thread()
+
+ def init_traffic_params(self, generator):
+ if generator.rfc2544_helper.latency:
+ self.enable_latency = True
+ self.tolerance_low = generator.rfc2544_helper.tolerance_low
+ self.tolerance_high = generator.rfc2544_helper.tolerance_high
+ self.max_rate = generator.scenario_helper.all_options.get('vpp_config',
+ {}).get(
+ 'max_rate', self.rate)
+
+ def create_profile(self, profile_data, current_port):
+ streams = []
+ for packet_name in profile_data:
+ imix = (profile_data[packet_name].
+ get('outer_l2', {}).get('framesize'))
+ self.pkt_size, imix_sum = self.calculate_frame_size(imix)
+ self._create_vm(profile_data[packet_name])
+ if self.max_rate > 100:
+ imix_data = self._create_imix_data(imix,
+ constants.DISTRIBUTION_IN_PACKETS)
+ else:
+ imix_data = self._create_imix_data(imix)
+ _streams = self._create_single_stream(current_port, imix_data,
+ imix_sum)
+ streams.extend(_streams)
+ return trex_stl_streams.STLProfile(streams)
+
+ def _set_outer_l3v4_fields(self, outer_l3v4):
+ """ setup outer l3v4 fields from traffic profile """
+ ip_params = {}
+ if 'proto' in outer_l3v4:
+ ip_params['proto'] = outer_l3v4['proto']
+ self._set_proto_fields(IP, **ip_params)
+
+ self.flow = int(outer_l3v4['count'])
+ src_start_ip, _ = outer_l3v4['srcip4'].split('-')
+ dst_start_ip, _ = outer_l3v4['dstip4'].split('-')
+
+ self.ip_packet = Pkt.IP(src=src_start_ip,
+ dst=dst_start_ip,
+ proto=outer_l3v4['proto'])
+ if self.flow > 1:
+ dst_start_int = int(ipaddress.ip_address(str(dst_start_ip)))
+ dst_end_ip_new = ipaddress.ip_address(
+ dst_start_int + self.flow - 1)
+ # self._set_proto_addr(IP, SRC, outer_l3v4['srcip4'], outer_l3v4['count'])
+ self._set_proto_addr(IP, DST,
+ "{start_ip}-{end_ip}".format(
+ start_ip=dst_start_ip,
+ end_ip=str(dst_end_ip_new)),
+ self.flow)
+
+ def _create_single_packet(self, size=64):
+ ether_packet = self.ether_packet
+ ip_packet = self.ip6_packet if self.ip6_packet else self.ip_packet
+ base_pkt = ether_packet / ip_packet
+ payload_len = max(0, size - len(base_pkt) - 4)
+ packet = trex_stl_packet_builder_scapy.STLPktBuilder(
+ pkt=base_pkt / self._gen_payload(payload_len),
+ vm=self.trex_vm)
+ packet_lat = trex_stl_packet_builder_scapy.STLPktBuilder(
+ pkt=base_pkt / self._gen_payload(payload_len))
+
+ return packet, packet_lat
+
+ def _create_single_stream(self, current_port, imix_data, imix_sum,
+ isg=0.0):
+ streams = []
+ for size, weight in ((int(size), float(weight)) for (size, weight)
+ in imix_data.items() if float(weight) > 0):
+ if current_port == 1:
+ isg += 10.0
+ if self.max_rate > 100:
+ mode = trex_stl_streams.STLTXCont(
+ pps=int(weight * imix_sum / 100))
+ mode_lat = mode
+ else:
+ mode = trex_stl_streams.STLTXCont(
+ percentage=weight * self.max_rate / 100)
+ mode_lat = trex_stl_streams.STLTXCont(pps=9000)
+
+ packet, packet_lat = self._create_single_packet(size)
+ streams.append(
+ trex_stl_client.STLStream(isg=isg, packet=packet, mode=mode))
+ if self.enable_latency:
+ pg_id = self.port_pg_id.increase_pg_id(current_port)
+ stl_flow = trex_stl_streams.STLFlowLatencyStats(pg_id=pg_id)
+ stream_lat = trex_stl_client.STLStream(isg=isg,
+ packet=packet_lat,
+ mode=mode_lat,
+ flow_stats=stl_flow)
+ streams.append(stream_lat)
+ return streams
+
+ def execute_traffic(self, traffic_generator=None):
+ if traffic_generator is not None and self.generator is None:
+ self.generator = traffic_generator
+
+ self.ports = []
+ self.profiles = {}
+ self.port_pg_id = PortPgIDMap()
+ for vld_id, intfs in sorted(self.generator.networks.items()):
+ profile_data = self.params.get(vld_id)
+ if not profile_data:
+ continue
+ if (vld_id.startswith(self.DOWNLINK) and
+ self.generator.rfc2544_helper.correlated_traffic):
+ continue
+ for intf in intfs:
+ current_port = int(self.generator.port_num(intf))
+ self.port_pg_id.add_port(current_port)
+ profile = self.create_profile(profile_data, current_port)
+ self.generator.client.add_streams(profile,
+ ports=[current_port])
+
+ self.ports.append(current_port)
+ self.profiles[current_port] = profile
+
+ timeout = self.generator.scenario_helper.scenario_cfg["runner"][
+ "duration"]
+ test_data = {
+ "test_duration": timeout,
+ "test_precision": self.precision,
+ "tolerated_loss": self.tolerance_high,
+ "duration": self.duration,
+ "packet_size": self.pkt_size,
+ "flow": self.flow
+ }
+
+ if self.max_rate > 100:
+ self.binary_search_with_optimized(self.generator, self.duration,
+ timeout, test_data)
+ else:
+ self.binary_search(self.generator, self.duration,
+ self.tolerance_high, test_data)
+
+ def binary_search_with_optimized(self, traffic_generator, duration,
+ timeout, test_data):
+ self.queue.cancel_join_thread()
+ algorithm = MultipleLossRatioSearch(
+ measurer=traffic_generator, latency=self.enable_latency,
+ pkt_size=self.pkt_size,
+ final_trial_duration=duration,
+ final_relative_width=self.step_interval / 100,
+ number_of_intermediate_phases=self.number_of_intermediate_phases,
+ initial_trial_duration=1,
+ timeout=timeout)
+ algorithm.init_generator(self.ports, self.port_pg_id, self.profiles,
+ test_data, self.queue)
+ return algorithm.narrow_down_ndr_and_pdr(10000, self.max_rate,
+ self.tolerance_high)
+
+ def binary_search(self, traffic_generator, duration, tolerance_value,
+ test_data):
+ theor_max_thruput = 0
+ result_samples = {}
+
+ for test_value in self.bounds_iterator(LOGGING):
+ stats = traffic_generator.send_traffic_on_tg(self.ports,
+ self.port_pg_id,
+ duration,
+ str(
+ test_value / self.max_rate / 2),
+ latency=self.enable_latency)
+ traffic_generator.client.reset(ports=self.ports)
+ traffic_generator.client.clear_stats(ports=self.ports)
+ traffic_generator.client.remove_all_streams(ports=self.ports)
+ for port, profile in self.profiles.items():
+ traffic_generator.client.add_streams(profile, ports=[port])
+
+ loss_ratio = (float(traffic_generator.loss) / float(
+ traffic_generator.sent)) * 100
+
+ samples = traffic_generator.generate_samples(stats, self.ports,
+ self.port_pg_id,
+ self.enable_latency)
+ samples.update(test_data)
+ LOGGING.info("Collect TG KPIs %s %s %s", datetime.datetime.now(),
+ test_value, samples)
+ self.queue.put(samples)
+
+ if float(loss_ratio) > float(tolerance_value):
+ LOGGING.debug("Failure... Decreasing upper bound")
+ self.current_upper = test_value
+ else:
+ LOGGING.debug("Success! Increasing lower bound")
+ self.current_lower = test_value
+
+ rate_total = float(traffic_generator.sent) / float(duration)
+ bandwidth_total = float(rate_total) * (
+ float(self.pkt_size) + 20) * 8 / (10 ** 9)
+
+ success_samples = {'Result_' + key: value for key, value in
+ samples.items()}
+ success_samples["Result_{}".format('PDR')] = {
+ "rate_total_pps": float(rate_total),
+ "bandwidth_total_Gbps": float(bandwidth_total),
+ "packet_loss_ratio": float(loss_ratio),
+ "packets_lost": int(traffic_generator.loss),
+ }
+ self.queue.put(success_samples)
+
+ # Store Actual throughput for result samples
+ for intf in traffic_generator.vnfd_helper.interfaces:
+ name = intf["name"]
+ result_samples[name] = {
+ "Result_Actual_throughput": float(
+ success_samples["Result_{}".format(name)][
+ "rx_throughput_bps"]),
+ }
+
+ for intf in traffic_generator.vnfd_helper.interfaces:
+ name = intf["name"]
+ if theor_max_thruput < samples[name]["tx_throughput_bps"]:
+ theor_max_thruput = samples[name]['tx_throughput_bps']
+ self.queue.put({'theor_max_throughput': theor_max_thruput})
+
+ result_samples["Result_theor_max_throughput"] = theor_max_thruput
+ self.queue.put(result_samples)
+ return result_samples
diff --git a/yardstick/network_services/utils.py b/yardstick/network_services/utils.py
index 7a1815eb9..9c64fecde 100644
--- a/yardstick/network_services/utils.py
+++ b/yardstick/network_services/utils.py
@@ -36,6 +36,9 @@ OPTS = [
cfg.StrOpt('trex_client_lib',
default=os.path.join(NSB_ROOT, 'trex_client/stl'),
help='trex python library path.'),
+ cfg.StrOpt('jre_path_i386',
+ default='',
+ help='path to installation of 32-bit Java 1.7+.'),
]
CONF.register_opts(OPTS, group="nsb")
@@ -121,7 +124,6 @@ def provision_tool(connection, tool_path, tool_file=None):
tool_path = get_nsb_option('tool_path')
if tool_file:
tool_path = os.path.join(tool_path, tool_file)
- bin_path = get_nsb_option("bin_path")
exit_status = connection.execute("which %s > /dev/null 2>&1" % tool_path)[0]
if exit_status == 0:
return encodeutils.safe_decode(tool_path, incoming='utf-8').rstrip()
diff --git a/yardstick/network_services/vnf_generic/vnf/acl_vnf.py b/yardstick/network_services/vnf_generic/vnf/acl_vnf.py
index 1390dd02e..69d29bf76 100644
--- a/yardstick/network_services/vnf_generic/vnf/acl_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/acl_vnf.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2016-2017 Intel Corporation
+# Copyright (c) 2016-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,19 +12,21 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import absolute_import
-from __future__ import print_function
import logging
+import ipaddress
+import six
+from yardstick.common import utils
+from yardstick.common import exceptions
-from yardstick.benchmark.scenarios.networking.vnf_generic import find_relative_file
from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNF, DpdkVnfSetupEnvHelper
-from yardstick.network_services.yang_model import YangModel
+from yardstick.network_services.helpers.samplevnf_helper import PortPairs
+from itertools import chain
LOG = logging.getLogger(__name__)
# ACL should work the same on all systems, we can provide the binary
ACL_PIPELINE_COMMAND = \
- 'sudo {tool_path} -p {port_mask_hex} -f {cfg_file} -s {script}'
+ 'sudo {tool_path} -p {port_mask_hex} -f {cfg_file} -s {script} {hwlb}'
ACL_COLLECT_KPI = r"""\
ACL TOTAL:[^p]+pkts_processed"?:\s(\d+),[^p]+pkts_drop"?:\s(\d+),[^p]+pkts_received"?:\s(\d+),"""
@@ -40,6 +42,196 @@ class AclApproxSetupEnvSetupEnvHelper(DpdkVnfSetupEnvHelper):
SW_DEFAULT_CORE = 5
DEFAULT_CONFIG_TPL_CFG = "acl.cfg"
VNF_TYPE = "ACL"
+ RULE_CMD = "acl"
+
+ DEFAULT_PRIORITY = 1
+ DEFAULT_PROTOCOL = 0
+ DEFAULT_PROTOCOL_MASK = 0
+ # Default actions to be applied to SampleVNF. Please note,
+ # that this list is extended with `fwd` action when default
+ # actions are generated.
+ DEFAULT_FWD_ACTIONS = ["accept", "count"]
+
+ def __init__(self, vnfd_helper, ssh_helper, scenario_helper):
+ super(AclApproxSetupEnvSetupEnvHelper, self).__init__(vnfd_helper,
+ ssh_helper,
+ scenario_helper)
+ self._action_id = 0
+
+ def get_ip_from_port(self, port):
+ # we can't use gateway because in OpenStack gateways interfere with floating ip routing
+ # return self.make_ip_addr(self.get_ports_gateway(port), self.get_netmask_gateway(port))
+ vintf = self.vnfd_helper.find_interface(name=port)["virtual-interface"]
+ return utils.make_ip_addr(vintf["local_ip"], vintf["netmask"])
+
+ def get_network_and_prefixlen_from_ip_of_port(self, port):
+ ip_addr = self.get_ip_from_port(port)
+ # handle cases with no gateway
+ if ip_addr:
+ return ip_addr.network.network_address.exploded, ip_addr.network.prefixlen
+ else:
+ return None, None
+
+ @property
+ def new_action_id(self):
+ """Get new action id"""
+ self._action_id += 1
+ return self._action_id
+
+ def get_default_flows(self):
+ """Get default actions/rules
+ Returns: (<actions>, <rules>)
+ <actions>:
+ { <action_id>: [ <list of actions> ]}
+ Example:
+ { 0 : [ "accept", "count", {"fwd" : "port": 0} ], ... }
+ <rules>:
+ [ {"src_ip": "x.x.x.x", "src_ip_mask", 24, ...}, ... ]
+ Note:
+ See `generate_rule_cmds()` to get list of possible map keys.
+ """
+ actions, rules = {}, []
+ _port_pairs = PortPairs(self.vnfd_helper.interfaces)
+ port_pair_list = _port_pairs.port_pair_list
+ for src_intf, dst_intf in port_pair_list:
+ # get port numbers of the interfaces
+ src_port = self.vnfd_helper.port_num(src_intf)
+ dst_port = self.vnfd_helper.port_num(dst_intf)
+ # get interface addresses and prefixes
+ src_net, src_prefix_len = self.get_network_and_prefixlen_from_ip_of_port(src_intf)
+ dst_net, dst_prefix_len = self.get_network_and_prefixlen_from_ip_of_port(dst_intf)
+ # ignore entries with empty values
+ if all((src_net, src_prefix_len, dst_net, dst_prefix_len)):
+ # flow: src_net:dst_net -> dst_port
+ action_id = self.new_action_id
+ actions[action_id] = self.DEFAULT_FWD_ACTIONS[:]
+ actions[action_id].append({"fwd": {"port": dst_port}})
+ rules.append({"priority": 1, 'cmd': self.RULE_CMD,
+ "src_ip": src_net, "src_ip_mask": src_prefix_len,
+ "dst_ip": dst_net, "dst_ip_mask": dst_prefix_len,
+ "src_port_from": 0, "src_port_to": 65535,
+ "dst_port_from": 0, "dst_port_to": 65535,
+ "protocol": 0, "protocol_mask": 0,
+ "action_id": action_id})
+ # flow: dst_net:src_net -> src_port
+ action_id = self.new_action_id
+ actions[action_id] = self.DEFAULT_FWD_ACTIONS[:]
+ actions[action_id].append({"fwd": {"port": src_port}})
+ rules.append({"cmd":self.RULE_CMD, "priority": 1,
+ "src_ip": dst_net, "src_ip_mask": dst_prefix_len,
+ "dst_ip": src_net, "dst_ip_mask": src_prefix_len,
+ "src_port_from": 0, "src_port_to": 65535,
+ "dst_port_from": 0, "dst_port_to": 65535,
+ "protocol": 0, "protocol_mask": 0,
+ "action_id": action_id})
+ return actions, rules
+
+ def get_flows(self, options):
+ """Get actions/rules based on provided options.
+ The `options` is a dict representing the ACL rules configuration
+ file. Result is the same as described in `get_default_flows()`.
+ """
+ actions, rules = {}, []
+ for ace in options['access-list-entries']:
+ # Generate list of actions
+ action_id = self.new_action_id
+ actions[action_id] = ace['actions']
+ # Destination nestwork
+ matches = ace['matches']
+ dst_ipv4_net = matches['destination-ipv4-network']
+ dst_ipv4_net_ip = ipaddress.ip_interface(six.text_type(dst_ipv4_net))
+ # Source network
+ src_ipv4_net = matches['source-ipv4-network']
+ src_ipv4_net_ip = ipaddress.ip_interface(six.text_type(src_ipv4_net))
+ # Append the rule
+ rules.append({'action_id': action_id, 'cmd': self.RULE_CMD,
+ 'dst_ip': dst_ipv4_net_ip.network.network_address.exploded,
+ 'dst_ip_mask': dst_ipv4_net_ip.network.prefixlen,
+ 'src_ip': src_ipv4_net_ip.network.network_address.exploded,
+ 'src_ip_mask': src_ipv4_net_ip.network.prefixlen,
+ 'dst_port_from': matches['destination-port-range']['lower-port'],
+ 'dst_port_to': matches['destination-port-range']['upper-port'],
+ 'src_port_from': matches['source-port-range']['lower-port'],
+ 'src_port_to': matches['source-port-range']['upper-port'],
+ 'priority': matches.get('priority', self.DEFAULT_PRIORITY),
+ 'protocol': matches.get('protocol', self.DEFAULT_PROTOCOL),
+ 'protocol_mask': matches.get('protocol_mask',
+ self.DEFAULT_PROTOCOL_MASK)
+ })
+ return actions, rules
+
+ def generate_rule_cmds(self, rules, apply_rules=False):
+ """Convert rules into list of SampleVNF CLI commands"""
+ rule_template = ("p {cmd} add {priority} {src_ip} {src_ip_mask} "
+ "{dst_ip} {dst_ip_mask} {src_port_from} {src_port_to} "
+ "{dst_port_from} {dst_port_to} {protocol} "
+ "{protocol_mask} {action_id}")
+ rule_cmd_list = []
+ for rule in rules:
+ rule_cmd_list.append(rule_template.format(**rule))
+ if apply_rules:
+ # add command to apply all rules at the end
+ rule_cmd_list.append("p {cmd} applyruleset".format(cmd=self.RULE_CMD))
+ return rule_cmd_list
+
+ def generate_action_cmds(self, actions):
+ """Convert actions into list of SampleVNF CLI commands.
+ These method doesn't validate the provided list of actions. Supported
+ list of actions are limited by SampleVNF. Thus, the user should be
+ responsible to specify correct action name(s). Yardstick should take
+ the provided action by user and apply it to SampleVNF.
+ Anyway, some of the actions require addition parameters to be
+ specified. In case of `fwd` & `nat` action used have to specify
+ the port attribute.
+ """
+ _action_template_map = {
+ "fwd": "p action add {action_id} fwd {port}",
+ "nat": "p action add {action_id} nat {port}"
+ }
+ action_cmd_list = []
+ for action_id, actions in actions.items():
+ for action in actions:
+ if isinstance(action, dict):
+ for action_name in action.keys():
+ # user provided an action name with addition options
+ # e.g.: {"fwd": {"port": 0}}
+ # format action CLI command and add it to the list
+ if action_name not in _action_template_map.keys():
+ raise exceptions.AclUnknownActionTemplate(
+ action_name=action_name)
+ template = _action_template_map[action_name]
+ try:
+ action_cmd_list.append(template.format(
+ action_id=action_id, **action[action_name]))
+ except KeyError as exp:
+ raise exceptions.AclMissingActionArguments(
+ action_name=action_name,
+ action_param=exp.args[0])
+ else:
+ # user provided an action name w/o addition options
+ # e.g.: "accept", "count"
+ action_cmd_list.append(
+ "p action add {action_id} {action}".format(
+ action_id=action_id, action=action))
+ return action_cmd_list
+
+ def get_flows_config(self, options=None):
+ """Get action/rules configuration commands (string) to be
+ applied to SampleVNF to configure ACL rules (flows).
+ """
+ action_cmd_list, rule_cmd_list = [], []
+ if options:
+ # if file name is set, read actions/rules from the file
+ actions, rules = self.get_flows(options)
+ action_cmd_list = self.generate_action_cmds(actions)
+ rule_cmd_list = self.generate_rule_cmds(rules)
+ # default actions/rules
+ dft_actions, dft_rules = self.get_default_flows()
+ dft_action_cmd_list = self.generate_action_cmds(dft_actions)
+ dft_rule_cmd_list = self.generate_rule_cmds(dft_rules, apply_rules=True)
+ # generate multi-line commands to add actions/rules
+ return '\n'.join(chain(action_cmd_list, dft_action_cmd_list,
+ rule_cmd_list, dft_rule_cmd_list))
class AclApproxVnf(SampleVNF):
@@ -59,11 +251,7 @@ class AclApproxVnf(SampleVNF):
setup_env_helper_type = AclApproxSetupEnvSetupEnvHelper
super(AclApproxVnf, self).__init__(name, vnfd, setup_env_helper_type, resource_helper_type)
- self.acl_rules = None
-
- def _start_vnf(self):
- yang_model_path = find_relative_file(self.scenario_helper.options['rules'],
- self.scenario_helper.task_path)
- yang_model = YangModel(yang_model_path)
- self.acl_rules = yang_model.get_rules()
- super(AclApproxVnf, self)._start_vnf()
+
+ def wait_for_instantiate(self):
+ """Wait for VNF to initialize"""
+ self.wait_for_initialize()
diff --git a/yardstick/network_services/vnf_generic/vnf/agnostic_vnf.py b/yardstick/network_services/vnf_generic/vnf/agnostic_vnf.py
new file mode 100644
index 000000000..d1d9667db
--- /dev/null
+++ b/yardstick/network_services/vnf_generic/vnf/agnostic_vnf.py
@@ -0,0 +1,46 @@
+# Copyright (c) 2018-2019 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from yardstick.network_services.vnf_generic.vnf import base
+
+LOG = logging.getLogger(__name__)
+
+
+class AgnosticVnf(base.GenericVNF):
+ """ AgnosticVnf implementation. """
+ def __init__(self, name, vnfd):
+ super(AgnosticVnf, self).__init__(name, vnfd)
+
+ def instantiate(self, scenario_cfg, context_cfg):
+ pass
+
+ def wait_for_instantiate(self):
+ pass
+
+ def terminate(self):
+ pass
+
+ def scale(self, flavor=""):
+ pass
+
+ def collect_kpi(self):
+ pass
+
+ def start_collect(self):
+ pass
+
+ def stop_collect(self):
+ pass
diff --git a/yardstick/network_services/vnf_generic/vnf/base.py b/yardstick/network_services/vnf_generic/vnf/base.py
index a776b0989..8ef96b744 100644
--- a/yardstick/network_services/vnf_generic/vnf/base.py
+++ b/yardstick/network_services/vnf_generic/vnf/base.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2016-2017 Intel Corporation
+# Copyright (c) 2016-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-""" Base class implementation for generic vnf implementation """
import abc
@@ -95,7 +94,7 @@ class VnfdHelper(dict):
for interface in self.interfaces:
virtual_intf = interface["virtual-interface"]
if virtual_intf[key] == value:
- return interface
+ return virtual_intf
raise KeyError()
def find_interface(self, **kwargs):
@@ -195,10 +194,22 @@ class GenericVNF(object):
:return: {"kpi": value, "kpi2": value}
"""
+ @abc.abstractmethod
+ def start_collect(self):
+ """Start KPI collection
+ :return: None
+ """
+
+ @abc.abstractmethod
+ def stop_collect(self):
+ """Stop KPI collection
+ :return: None
+ """
+
@six.add_metaclass(abc.ABCMeta)
class GenericTrafficGen(GenericVNF):
- """ Class providing file-like API for generic traffic generator """
+ """Class providing file-like API for generic traffic generator"""
def __init__(self, name, vnfd):
super(GenericTrafficGen, self).__init__(name, vnfd)
@@ -254,3 +265,23 @@ class GenericTrafficGen(GenericVNF):
:return: True/False
"""
pass
+
+ def start_collect(self):
+ """Start KPI collection.
+
+ Traffic measurements are always collected during injection.
+
+ Optional.
+
+ :return: True/False
+ """
+ pass
+
+ def stop_collect(self):
+ """Stop KPI collection.
+
+ Optional.
+
+ :return: True/False
+ """
+ pass
diff --git a/yardstick/network_services/vnf_generic/vnf/cgnapt_vnf.py b/yardstick/network_services/vnf_generic/vnf/cgnapt_vnf.py
index 53f73b4d7..ee4a581b1 100644
--- a/yardstick/network_services/vnf_generic/vnf/cgnapt_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/cgnapt_vnf.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2016-2017 Intel Corporation
+# Copyright (c) 2016-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -21,10 +21,10 @@ from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNF, Dpd
LOG = logging.getLogger(__name__)
# CGNAPT should work the same on all systems, we can provide the binary
-CGNAPT_PIPELINE_COMMAND = 'sudo {tool_path} -p {port_mask_hex} -f {cfg_file} -s {script}'
+CGNAPT_PIPELINE_COMMAND = 'sudo {tool_path} -p {port_mask_hex} -f {cfg_file} -s {script} {hwlb}'
WAIT_FOR_STATIC_NAPT = 4
-CGNAPT_COLLECT_KPI = """\
+CGNAPT_COLLECT_KPI = r"""\
CG-NAPT(.*\n)*\
Received\s(\d+),\
Missed\s(\d+),\
@@ -120,3 +120,7 @@ class CgnaptApproxVnf(SampleVNF):
self.vnf_execute(cmd)
time.sleep(WAIT_FOR_STATIC_NAPT)
+
+ def wait_for_instantiate(self):
+ """Wait for VNF to initialize"""
+ self.wait_for_initialize()
diff --git a/yardstick/network_services/vnf_generic/vnf/epc_vnf.py b/yardstick/network_services/vnf_generic/vnf/epc_vnf.py
new file mode 100644
index 000000000..8112963e9
--- /dev/null
+++ b/yardstick/network_services/vnf_generic/vnf/epc_vnf.py
@@ -0,0 +1,53 @@
+# Copyright (c) 2018-2019 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from yardstick.network_services.vnf_generic.vnf import base
+
+LOG = logging.getLogger(__name__)
+
+
+class EPCVnf(base.GenericVNF):
+
+ def __init__(self, name, vnfd):
+ super(EPCVnf, self).__init__(name, vnfd)
+
+ def instantiate(self, scenario_cfg, context_cfg):
+ """Prepare VNF for operation and start the VNF process/VM
+
+ :param scenario_cfg: Scenario config
+ :param context_cfg: Context config
+ """
+ pass
+
+ def wait_for_instantiate(self):
+ """Wait for VNF to start"""
+ pass
+
+ def terminate(self):
+ """Kill all VNF processes"""
+ pass
+
+ def scale(self, flavor=""):
+ pass
+
+ def collect_kpi(self):
+ pass
+
+ def start_collect(self):
+ pass
+
+ def stop_collect(self):
+ pass
diff --git a/yardstick/network_services/vnf_generic/vnf/ipsec_vnf.py b/yardstick/network_services/vnf_generic/vnf/ipsec_vnf.py
new file mode 100644
index 000000000..1961ac1b1
--- /dev/null
+++ b/yardstick/network_services/vnf_generic/vnf/ipsec_vnf.py
@@ -0,0 +1,498 @@
+# Copyright (c) 2019 Viosoft Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import re
+import time
+from collections import Counter
+from enum import Enum
+
+from yardstick.benchmark.contexts.base import Context
+from yardstick.common.process import check_if_process_failed
+from yardstick.network_services import constants
+from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNF
+from yardstick.network_services.vnf_generic.vnf.vpp_helpers import \
+ VppSetupEnvHelper, VppConfigGenerator
+
+LOG = logging.getLogger(__name__)
+
+
+class CryptoAlg(Enum):
+ """Encryption algorithms."""
+ AES_CBC_128 = ('aes-cbc-128', 'AES-CBC', 16)
+ AES_CBC_192 = ('aes-cbc-192', 'AES-CBC', 24)
+ AES_CBC_256 = ('aes-cbc-256', 'AES-CBC', 32)
+ AES_GCM_128 = ('aes-gcm-128', 'AES-GCM', 20)
+
+ def __init__(self, alg_name, scapy_name, key_len):
+ self.alg_name = alg_name
+ self.scapy_name = scapy_name
+ self.key_len = key_len
+
+
+class IntegAlg(Enum):
+ """Integrity algorithms."""
+ SHA1_96 = ('sha1-96', 'HMAC-SHA1-96', 20)
+ SHA_256_128 = ('sha-256-128', 'SHA2-256-128', 32)
+ SHA_384_192 = ('sha-384-192', 'SHA2-384-192', 48)
+ SHA_512_256 = ('sha-512-256', 'SHA2-512-256', 64)
+ AES_GCM_128 = ('aes-gcm-128', 'AES-GCM', 20)
+
+ def __init__(self, alg_name, scapy_name, key_len):
+ self.alg_name = alg_name
+ self.scapy_name = scapy_name
+ self.key_len = key_len
+
+
+class VipsecApproxSetupEnvHelper(VppSetupEnvHelper):
+ DEFAULT_IPSEC_VNF_CFG = {
+ 'crypto_type': 'SW_cryptodev',
+ 'rxq': 1,
+ 'worker_config': '1C/1T',
+ 'worker_threads': 1,
+ }
+
+ def __init__(self, vnfd_helper, ssh_helper, scenario_helper):
+ super(VipsecApproxSetupEnvHelper, self).__init__(
+ vnfd_helper, ssh_helper, scenario_helper)
+
+ def _get_crypto_type(self):
+ vnf_cfg = self.scenario_helper.options.get('vnf_config',
+ self.DEFAULT_IPSEC_VNF_CFG)
+ return vnf_cfg.get('crypto_type', 'SW_cryptodev')
+
+ def _get_crypto_algorithms(self):
+ vpp_cfg = self.scenario_helper.all_options.get('vpp_config', {})
+ return vpp_cfg.get('crypto_algorithms', 'aes-gcm')
+
+ def _get_n_tunnels(self):
+ vpp_cfg = self.scenario_helper.all_options.get('vpp_config', {})
+ return vpp_cfg.get('tunnels', 1)
+
+ def _get_n_connections(self):
+ try:
+ flow_cfg = self.scenario_helper.all_options['flow']
+ return flow_cfg['count']
+ except KeyError:
+ raise KeyError("Missing flow definition in scenario section" +
+ " of the task definition file")
+
+ def _get_flow_src_start_ip(self):
+ node_name = self.find_encrypted_data_interface()["node_name"]
+ try:
+ flow_cfg = self.scenario_helper.all_options['flow']
+ src_ips = flow_cfg['src_ip']
+ dst_ips = flow_cfg['dst_ip']
+ except KeyError:
+ raise KeyError("Missing flow definition in scenario section" +
+ " of the task definition file")
+
+ for src, dst in zip(src_ips, dst_ips):
+ flow_src_start_ip, _ = src.split('-')
+ flow_dst_start_ip, _ = dst.split('-')
+
+ if node_name == "vnf__0":
+ return flow_src_start_ip
+ elif node_name == "vnf__1":
+ return flow_dst_start_ip
+
+ def _get_flow_dst_start_ip(self):
+ node_name = self.find_encrypted_data_interface()["node_name"]
+ try:
+ flow_cfg = self.scenario_helper.all_options['flow']
+ src_ips = flow_cfg['src_ip']
+ dst_ips = flow_cfg['dst_ip']
+ except KeyError:
+ raise KeyError("Missing flow definition in scenario section" +
+ " of the task definition file")
+
+ for src, dst in zip(src_ips, dst_ips):
+ flow_src_start_ip, _ = src.split('-')
+ flow_dst_start_ip, _ = dst.split('-')
+
+ if node_name == "vnf__0":
+ return flow_dst_start_ip
+ elif node_name == "vnf__1":
+ return flow_src_start_ip
+
+ def build_config(self):
+ vnf_cfg = self.scenario_helper.options.get('vnf_config',
+ self.DEFAULT_IPSEC_VNF_CFG)
+ rxq = vnf_cfg.get('rxq', 1)
+ phy_cores = vnf_cfg.get('worker_threads', 1)
+ # worker_config = vnf_cfg.get('worker_config', '1C/1T').split('/')[1].lower()
+
+ vpp_cfg = self.create_startup_configuration_of_vpp()
+ self.add_worker_threads_and_rxqueues(vpp_cfg, phy_cores, rxq)
+ self.add_pci_devices(vpp_cfg)
+
+ frame_size_cfg = self.scenario_helper.all_options.get('framesize', {})
+ uplink_cfg = frame_size_cfg.get('uplink', {})
+ downlink_cfg = frame_size_cfg.get('downlink', {})
+ framesize = min(self.calculate_frame_size(uplink_cfg),
+ self.calculate_frame_size(downlink_cfg))
+ if framesize < 1522:
+ vpp_cfg.add_dpdk_no_multi_seg()
+
+ crypto_algorithms = self._get_crypto_algorithms()
+ if crypto_algorithms == 'aes-gcm':
+ self.add_dpdk_cryptodev(vpp_cfg, 'aesni_gcm', phy_cores)
+ elif crypto_algorithms == 'cbc-sha1':
+ self.add_dpdk_cryptodev(vpp_cfg, 'aesni_mb', phy_cores)
+
+ vpp_cfg.add_dpdk_dev_default_rxd(2048)
+ vpp_cfg.add_dpdk_dev_default_txd(2048)
+ self.apply_config(vpp_cfg, True)
+ self.update_vpp_interface_data()
+
+ def setup_vnf_environment(self):
+ resource = super(VipsecApproxSetupEnvHelper,
+ self).setup_vnf_environment()
+
+ self.start_vpp_service()
+ # for QAT device DH895xCC, the number of VFs is required as 32
+ if self._get_crypto_type() == 'HW_cryptodev':
+ sriov_numvfs = self.get_sriov_numvfs(
+ self.find_encrypted_data_interface()["vpci"])
+ if sriov_numvfs != 32:
+ self.crypto_device_init(
+ self.find_encrypted_data_interface()["vpci"], 32)
+
+ self._update_vnfd_helper(self.sys_cores.get_cpu_layout())
+ self.update_vpp_interface_data()
+ self.iface_update_numa()
+
+ return resource
+
+ @staticmethod
+ def calculate_frame_size(frame_cfg):
+ if not frame_cfg:
+ return 64
+
+ imix_count = {size.upper().replace('B', ''): int(weight)
+ for size, weight in frame_cfg.items()}
+ imix_sum = sum(imix_count.values())
+ if imix_sum <= 0:
+ return 64
+ packets_total = sum([int(size) * weight
+ for size, weight in imix_count.items()])
+ return packets_total / imix_sum
+
+ def check_status(self):
+ ipsec_created = False
+ cmd = "vppctl show int"
+ _, stdout, _ = self.ssh_helper.execute(cmd)
+ entries = re.split(r"\n+", stdout)
+ tmp = [re.split(r"\s\s+", entry, 5) for entry in entries]
+
+ for item in tmp:
+ if isinstance(item, list):
+ if item[0] and item[0] != 'local0':
+ if "ipsec" in item[0] and not ipsec_created:
+ ipsec_created = True
+ if len(item) > 2 and item[2] == 'down':
+ return False
+ return ipsec_created
+
+ def get_vpp_statistics(self):
+ cmd = "vppctl show int {intf}"
+ result = {}
+ for interface in self.vnfd_helper.interfaces:
+ iface_name = self.get_value_by_interface_key(
+ interface["virtual-interface"]["ifname"], "vpp_name")
+ command = cmd.format(intf=iface_name)
+ _, stdout, _ = self.ssh_helper.execute(command)
+ result.update(
+ self.parser_vpp_stats(interface["virtual-interface"]["ifname"],
+ iface_name, stdout))
+ self.ssh_helper.execute("vppctl clear interfaces")
+ return result
+
+ @staticmethod
+ def parser_vpp_stats(interface, iface_name, stats):
+ packets_in = 0
+ packets_fwd = 0
+ packets_dropped = 0
+ result = {}
+
+ entries = re.split(r"\n+", stats)
+ tmp = [re.split(r"\s\s+", entry, 5) for entry in entries]
+
+ for item in tmp:
+ if isinstance(item, list):
+ if item[0] == iface_name and len(item) >= 5:
+ if item[3] == 'rx packets':
+ packets_in = int(item[4])
+ elif item[4] == 'rx packets':
+ packets_in = int(item[5])
+ elif len(item) == 3:
+ if item[1] == 'tx packets':
+ packets_fwd = int(item[2])
+ elif item[1] == 'drops' or item[1] == 'rx-miss':
+ packets_dropped = int(item[2])
+ if packets_dropped == 0 and packets_in > 0 and packets_fwd > 0:
+ packets_dropped = abs(packets_fwd - packets_in)
+
+ result[interface] = {
+ 'packets_in': packets_in,
+ 'packets_fwd': packets_fwd,
+ 'packets_dropped': packets_dropped,
+ }
+
+ return result
+
+ def create_ipsec_tunnels(self):
+ self.initialize_ipsec()
+
+ # TODO generate the same key
+ crypto_algorithms = self._get_crypto_algorithms()
+ if crypto_algorithms == 'aes-gcm':
+ encr_alg = CryptoAlg.AES_GCM_128
+ auth_alg = IntegAlg.AES_GCM_128
+ encr_key = 'LNYZXMBQDKESNLREHJMS'
+ auth_key = 'SWGLDTYZSQKVBZZMPIEV'
+ elif crypto_algorithms == 'cbc-sha1':
+ encr_alg = CryptoAlg.AES_CBC_128
+ auth_alg = IntegAlg.SHA1_96
+ encr_key = 'IFEMSHYLCZIYFUTT'
+ auth_key = 'PEALEIPSCPTRHYJSDXLY'
+
+ self.execute_script("enable_dpdk_traces.vat", json_out=False)
+ self.execute_script("enable_vhost_user_traces.vat", json_out=False)
+ self.execute_script("enable_memif_traces.vat", json_out=False)
+
+ node_name = self.find_encrypted_data_interface()["node_name"]
+ n_tunnels = self._get_n_tunnels()
+ n_connections = self._get_n_connections()
+ flow_dst_start_ip = self._get_flow_dst_start_ip()
+ if node_name == "vnf__0":
+ self.vpp_create_ipsec_tunnels(
+ self.find_encrypted_data_interface()["local_ip"],
+ self.find_encrypted_data_interface()["peer_intf"]["local_ip"],
+ self.find_encrypted_data_interface()["ifname"],
+ n_tunnels, n_connections, encr_alg, encr_key, auth_alg,
+ auth_key, flow_dst_start_ip)
+ elif node_name == "vnf__1":
+ self.vpp_create_ipsec_tunnels(
+ self.find_encrypted_data_interface()["local_ip"],
+ self.find_encrypted_data_interface()["peer_intf"]["local_ip"],
+ self.find_encrypted_data_interface()["ifname"],
+ n_tunnels, n_connections, encr_alg, encr_key, auth_alg,
+ auth_key, flow_dst_start_ip, 20000, 10000)
+
+ def find_raw_data_interface(self):
+ try:
+ return self.vnfd_helper.find_virtual_interface(vld_id="uplink_0")
+ except KeyError:
+ return self.vnfd_helper.find_virtual_interface(vld_id="downlink_0")
+
+ def find_encrypted_data_interface(self):
+ return self.vnfd_helper.find_virtual_interface(vld_id="ciphertext")
+
+ def create_startup_configuration_of_vpp(self):
+ vpp_config_generator = VppConfigGenerator()
+ vpp_config_generator.add_unix_log()
+ vpp_config_generator.add_unix_cli_listen()
+ vpp_config_generator.add_unix_nodaemon()
+ vpp_config_generator.add_unix_coredump()
+ vpp_config_generator.add_dpdk_socketmem('1024,1024')
+ vpp_config_generator.add_dpdk_no_tx_checksum_offload()
+ vpp_config_generator.add_dpdk_log_level('debug')
+ for interface in self.vnfd_helper.interfaces:
+ vpp_config_generator.add_dpdk_uio_driver(
+ interface["virtual-interface"]["driver"])
+ vpp_config_generator.add_heapsize('4G')
+ # TODO Enable configuration depend on VPP version
+ vpp_config_generator.add_statseg_size('4G')
+ vpp_config_generator.add_plugin('disable', ['default'])
+ vpp_config_generator.add_plugin('enable', ['dpdk_plugin.so'])
+ vpp_config_generator.add_ip6_hash_buckets('2000000')
+ vpp_config_generator.add_ip6_heap_size('4G')
+ vpp_config_generator.add_ip_heap_size('4G')
+ return vpp_config_generator
+
+ def add_worker_threads_and_rxqueues(self, vpp_cfg, phy_cores,
+ rx_queues=None):
+ thr_count_int = phy_cores
+ cpu_count_int = phy_cores
+ num_mbufs_int = 32768
+
+ numa_list = []
+
+ if_list = [self.find_encrypted_data_interface()["ifname"],
+ self.find_raw_data_interface()["ifname"]]
+ for if_key in if_list:
+ try:
+ numa_list.append(
+ self.get_value_by_interface_key(if_key, 'numa_node'))
+ except KeyError:
+ pass
+ numa_cnt_mc = Counter(numa_list).most_common()
+
+ if numa_cnt_mc and numa_cnt_mc[0][0] is not None and \
+ numa_cnt_mc[0][0] != -1:
+ numa = numa_cnt_mc[0][0]
+ elif len(numa_cnt_mc) > 1 and numa_cnt_mc[0][0] == -1:
+ numa = numa_cnt_mc[1][0]
+ else:
+ numa = 0
+
+ try:
+ smt_used = self.sys_cores.is_smt_enabled()
+ except KeyError:
+ smt_used = False
+
+ cpu_main = self.sys_cores.cpu_list_per_node_str(numa, skip_cnt=1,
+ cpu_cnt=1)
+ cpu_wt = self.sys_cores.cpu_list_per_node_str(numa, skip_cnt=2,
+ cpu_cnt=cpu_count_int,
+ smt_used=smt_used)
+
+ if smt_used:
+ thr_count_int = 2 * cpu_count_int
+
+ if rx_queues is None:
+ rxq_count_int = int(thr_count_int / 2)
+ else:
+ rxq_count_int = rx_queues
+
+ if rxq_count_int == 0:
+ rxq_count_int = 1
+
+ num_mbufs_int = num_mbufs_int * rxq_count_int
+
+ vpp_cfg.add_cpu_main_core(cpu_main)
+ vpp_cfg.add_cpu_corelist_workers(cpu_wt)
+ vpp_cfg.add_dpdk_dev_default_rxq(rxq_count_int)
+ vpp_cfg.add_dpdk_num_mbufs(num_mbufs_int)
+
+ def add_pci_devices(self, vpp_cfg):
+ pci_devs = [self.find_encrypted_data_interface()["vpci"],
+ self.find_raw_data_interface()["vpci"]]
+ vpp_cfg.add_dpdk_dev(*pci_devs)
+
+ def add_dpdk_cryptodev(self, vpp_cfg, sw_pmd_type, count):
+ crypto_type = self._get_crypto_type()
+ smt_used = self.sys_cores.is_smt_enabled()
+ cryptodev = self.find_encrypted_data_interface()["vpci"]
+ socket_id = self.get_value_by_interface_key(
+ self.find_encrypted_data_interface()["ifname"], "numa_node")
+
+ if smt_used:
+ thr_count_int = count * 2
+ if crypto_type == 'HW_cryptodev':
+ vpp_cfg.add_dpdk_cryptodev(thr_count_int, cryptodev)
+ else:
+ vpp_cfg.add_dpdk_sw_cryptodev(sw_pmd_type, socket_id,
+ thr_count_int)
+ else:
+ thr_count_int = count
+ if crypto_type == 'HW_cryptodev':
+ vpp_cfg.add_dpdk_cryptodev(thr_count_int, cryptodev)
+ else:
+ vpp_cfg.add_dpdk_sw_cryptodev(sw_pmd_type, socket_id,
+ thr_count_int)
+
+ def initialize_ipsec(self):
+ flow_src_start_ip = self._get_flow_src_start_ip()
+
+ self.set_interface_state(
+ self.find_encrypted_data_interface()["ifname"], 'up')
+ self.set_interface_state(self.find_raw_data_interface()["ifname"],
+ 'up')
+ self.vpp_interfaces_ready_wait()
+ self.vpp_set_interface_mtu(
+ self.find_encrypted_data_interface()["ifname"])
+ self.vpp_set_interface_mtu(self.find_raw_data_interface()["ifname"])
+ self.vpp_interfaces_ready_wait()
+
+ self.set_ip(self.find_encrypted_data_interface()["ifname"],
+ self.find_encrypted_data_interface()["local_ip"], 24)
+ self.set_ip(self.find_raw_data_interface()["ifname"],
+ self.find_raw_data_interface()["local_ip"],
+ 24)
+
+ self.add_arp_on_dut(self.find_encrypted_data_interface()["ifname"],
+ self.find_encrypted_data_interface()["peer_intf"][
+ "local_ip"],
+ self.find_encrypted_data_interface()["peer_intf"][
+ "local_mac"])
+ self.add_arp_on_dut(self.find_raw_data_interface()["ifname"],
+ self.find_raw_data_interface()["peer_intf"][
+ "local_ip"],
+ self.find_raw_data_interface()["peer_intf"][
+ "local_mac"])
+
+ self.vpp_route_add(flow_src_start_ip, 8,
+ self.find_raw_data_interface()["peer_intf"][
+ "local_ip"],
+ self.find_raw_data_interface()["ifname"])
+
+
+class VipsecApproxVnf(SampleVNF):
+ """ This class handles vIPSEC VNF model-driver definitions """
+
+ APP_NAME = 'vIPSEC'
+ APP_WORD = 'vipsec'
+ WAIT_TIME = 20
+
+ def __init__(self, name, vnfd, setup_env_helper_type=None,
+ resource_helper_type=None):
+ if setup_env_helper_type is None:
+ setup_env_helper_type = VipsecApproxSetupEnvHelper
+ super(VipsecApproxVnf, self).__init__(
+ name, vnfd, setup_env_helper_type,
+ resource_helper_type)
+
+ def _run(self):
+ # we can't share ssh paramiko objects to force new connection
+ self.ssh_helper.drop_connection()
+ # kill before starting
+ self.setup_helper.kill_vnf()
+ self._build_config()
+ self.setup_helper.create_ipsec_tunnels()
+
+ def wait_for_instantiate(self):
+ time.sleep(self.WAIT_TIME)
+ while True:
+ status = self.setup_helper.check_status()
+ if not self._vnf_process.is_alive() and not status:
+ raise RuntimeError("%s VNF process died." % self.APP_NAME)
+ LOG.info("Waiting for %s VNF to start.. ", self.APP_NAME)
+ time.sleep(self.WAIT_TIME_FOR_SCRIPT)
+ status = self.setup_helper.check_status()
+ if status:
+ LOG.info("%s VNF is up and running.", self.APP_NAME)
+ self._vnf_up_post()
+ return self._vnf_process.exitcode
+
+ def terminate(self):
+ self.setup_helper.kill_vnf()
+ self._tear_down()
+ self.resource_helper.stop_collect()
+ if self._vnf_process is not None:
+ # be proper and join first before we kill
+ LOG.debug("joining before terminate %s", self._vnf_process.name)
+ self._vnf_process.join(constants.PROCESS_JOIN_TIMEOUT)
+ self._vnf_process.terminate()
+
+ def collect_kpi(self):
+ # we can't get KPIs if the VNF is down
+ check_if_process_failed(self._vnf_process, 0.01)
+ physical_node = Context.get_physical_node_from_server(
+ self.scenario_helper.nodes[self.name])
+ result = {"physical_node": physical_node}
+ result["collect_stats"] = self.setup_helper.get_vpp_statistics()
+ LOG.debug("%s collect KPIs %s", self.APP_NAME, result)
+ return result
diff --git a/yardstick/network_services/vnf_generic/vnf/prox_helpers.py b/yardstick/network_services/vnf_generic/vnf/prox_helpers.py
index 285ead3b6..3507315f2 100644
--- a/yardstick/network_services/vnf_generic/vnf/prox_helpers.py
+++ b/yardstick/network_services/vnf_generic/vnf/prox_helpers.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2017 Intel Corporation
+# Copyright (c) 2018-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -21,6 +21,7 @@ import re
import select
import socket
import time
+
from collections import OrderedDict, namedtuple
from contextlib import contextmanager
from itertools import repeat, chain
@@ -30,12 +31,12 @@ import six
from six.moves import cStringIO
from six.moves import zip, StringIO
-from yardstick.benchmark.scenarios.networking.vnf_generic import find_relative_file
from yardstick.common import utils
from yardstick.common.utils import SocketTopology, join_non_strings, try_int
from yardstick.network_services.helpers.iniparser import ConfigParser
from yardstick.network_services.vnf_generic.vnf.sample_vnf import ClientResourceHelper
from yardstick.network_services.vnf_generic.vnf.sample_vnf import DpdkVnfSetupEnvHelper
+from yardstick.network_services import constants
PROX_PORT = 8474
@@ -44,8 +45,9 @@ SECTION_CONTENTS = 1
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.DEBUG)
+LOG_RESULT = logging.getLogger('yardstick')
+LOG_RESULT.setLevel(logging.DEBUG)
-TEN_GIGABIT = 1e10
BITS_PER_BYTE = 8
RETRY_SECONDS = 60
RETRY_INTERVAL = 1
@@ -124,7 +126,8 @@ class TotStatsTuple(namedtuple('TotStats', 'rx,tx,tsc,hz')):
class ProxTestDataTuple(namedtuple('ProxTestDataTuple', 'tolerated,tsc_hz,delta_rx,'
'delta_tx,delta_tsc,'
- 'latency,rx_total,tx_total,pps')):
+ 'latency,rx_total,tx_total,'
+ 'requested_pps')):
@property
def pkt_loss(self):
try:
@@ -133,11 +136,16 @@ class ProxTestDataTuple(namedtuple('ProxTestDataTuple', 'tolerated,tsc_hz,delta_
return 100.0
@property
- def mpps(self):
+ def tx_mpps(self):
# calculate the effective throughput in Mpps
return float(self.delta_tx) * self.tsc_hz / self.delta_tsc / 1e6
@property
+ def rx_mpps(self):
+ # calculate the effective throughput in Mpps
+ return float(self.delta_rx) * self.tsc_hz / self.delta_tsc / 1e6
+
+ @property
def can_be_lost(self):
return int(self.tx_total * self.tolerated / 1e2)
@@ -163,11 +171,12 @@ class ProxTestDataTuple(namedtuple('ProxTestDataTuple', 'tolerated,tsc_hz,delta_
]
samples = {
- "Throughput": self.mpps,
+ "Throughput": self.rx_mpps,
+ "RxThroughput": self.rx_mpps,
"DropPackets": pkt_loss,
"CurrentDropPackets": pkt_loss,
- "TxThroughput": self.pps / 1e6,
- "RxThroughput": self.mpps,
+ "RequestedTxThroughput": self.requested_pps / 1e6,
+ "TxThroughput": self.tx_mpps,
"PktSize": pkt_size,
}
if port_samples:
@@ -178,11 +187,12 @@ class ProxTestDataTuple(namedtuple('ProxTestDataTuple', 'tolerated,tsc_hz,delta_
def log_data(self, logger=None):
if logger is None:
- logger = LOG
+ logger = LOG_RESULT
template = "RX: %d; TX: %d; dropped: %d (tolerated: %d)"
- logger.debug(template, self.rx_total, self.tx_total, self.drop_total, self.can_be_lost)
- logger.debug("Mpps configured: %f; Mpps effective %f", self.pps / 1e6, self.mpps)
+ logger.info(template, self.rx_total, self.tx_total, self.drop_total, self.can_be_lost)
+ logger.info("Mpps configured: %f; Mpps generated %f; Mpps received %f",
+ self.requested_pps / 1e6, self.tx_mpps, self.rx_mpps)
class PacketDump(object):
@@ -289,7 +299,7 @@ class ProxSocketHelper(object):
if mode != 'pktdump':
# Regular 1-line message. Stop reading from the socket.
LOG.debug("Regular response read")
- return ret_str
+ return ret_str, True
LOG.debug("Packet dump header read: [%s]", ret_str)
@@ -310,13 +320,34 @@ class ProxSocketHelper(object):
# Return boolean instead of string to signal
# successful reception of the packet dump.
LOG.debug("Packet dump stored, returning")
- return True
+ return True, False
index = data_end + 1
- return ret_str
+ return ret_str, False
- def get_data(self, pkt_dump_only=False, timeout=1):
+ def get_string(self, pkt_dump_only=False, timeout=0.01):
+
+ def is_ready_string():
+ # recv() is blocking, so avoid calling it when no data is waiting.
+ ready = select.select([self._sock], [], [], timeout)
+ return bool(ready[0])
+
+ status = False
+ ret_str = ""
+ while status is False:
+ for status in iter(is_ready_string, False):
+ decoded_data = self._sock.recv(256).decode('utf-8')
+ ret_str, done = self._parse_socket_data(decoded_data,
+ pkt_dump_only)
+ if (done):
+ status = True
+ break
+
+ LOG.debug("Received data from socket: [%s]", ret_str)
+ return status, ret_str
+
+ def get_data(self, pkt_dump_only=False, timeout=10.0):
""" read data from the socket """
# This method behaves slightly differently depending on whether it is
@@ -353,7 +384,9 @@ class ProxSocketHelper(object):
ret_str = ""
for status in iter(is_ready, False):
decoded_data = self._sock.recv(256).decode('utf-8')
- ret_str = self._parse_socket_data(decoded_data, pkt_dump_only)
+ ret_str, done = self._parse_socket_data(decoded_data, pkt_dump_only)
+ if (done):
+ break
LOG.debug("Received data from socket: [%s]", ret_str)
return ret_str if status else ''
@@ -383,13 +416,17 @@ class ProxSocketHelper(object):
""" stop all cores on the remote instance """
LOG.debug("Stop all")
self.put_command("stop all\n")
- time.sleep(3)
def stop(self, cores, task=''):
""" stop specific cores on the remote instance """
- LOG.debug("Stopping cores %s", cores)
- self.put_command("stop {} {}\n".format(join_non_strings(',', cores), task))
- time.sleep(3)
+
+ tmpcores = []
+ for core in cores:
+ if core not in tmpcores:
+ tmpcores.append(core)
+
+ LOG.debug("Stopping cores %s", tmpcores)
+ self.put_command("stop {} {}\n".format(join_non_strings(',', tmpcores), task))
def start_all(self):
""" start all cores on the remote instance """
@@ -398,15 +435,19 @@ class ProxSocketHelper(object):
def start(self, cores):
""" start specific cores on the remote instance """
- LOG.debug("Starting cores %s", cores)
- self.put_command("start {}\n".format(join_non_strings(',', cores)))
- time.sleep(3)
+
+ tmpcores = []
+ for core in cores:
+ if core not in tmpcores:
+ tmpcores.append(core)
+
+ LOG.debug("Starting cores %s", tmpcores)
+ self.put_command("start {}\n".format(join_non_strings(',', tmpcores)))
def reset_stats(self):
""" reset the statistics on the remote instance """
LOG.debug("Reset stats")
self.put_command("reset stats\n")
- time.sleep(1)
def _run_template_over_cores(self, template, cores, *args):
for core in cores:
@@ -417,7 +458,6 @@ class ProxSocketHelper(object):
LOG.debug("Set packet size for core(s) %s to %d", cores, pkt_size)
pkt_size -= 4
self._run_template_over_cores("pkt_size {} 0 {}\n", cores, pkt_size)
- time.sleep(1)
def set_value(self, cores, offset, value, length):
""" set value on the remote instance """
@@ -467,13 +507,14 @@ class ProxSocketHelper(object):
core_data['current'] = core_data[key1] + core_data[key2]
self.set_speed(core_data['cores'], core_data['current'])
- def set_pps(self, cores, pps, pkt_size):
+ def set_pps(self, cores, pps, pkt_size,
+ line_speed=(constants.ONE_GIGABIT_IN_BITS * constants.NIC_GBPS_DEFAULT)):
""" set packets per second for specific cores on the remote instance """
msg = "Set packets per sec for core(s) %s to %g%% of line rate (packet size: %d)"
LOG.debug(msg, cores, pps, pkt_size)
# speed in percent of line-rate
- speed = float(pps) * (pkt_size + 20) / TEN_GIGABIT / BITS_PER_BYTE
+ speed = float(pps) * (pkt_size + 20) / line_speed / BITS_PER_BYTE
self._run_template_over_cores("speed {} 0 {}\n", cores, speed)
def lat_stats(self, cores, task=0):
@@ -520,6 +561,174 @@ class ProxSocketHelper(object):
tsc = int(ret[3])
return rx, tx, drop, tsc
+ def irq_core_stats(self, cores_tasks):
+ """ get IRQ stats per core"""
+
+ stat = {}
+ core = 0
+ task = 0
+ for core, task in cores_tasks:
+ self.put_command("stats task.core({}).task({}).max_irq,task.core({}).task({}).irq(0),"
+ "task.core({}).task({}).irq(1),task.core({}).task({}).irq(2),"
+ "task.core({}).task({}).irq(3),task.core({}).task({}).irq(4),"
+ "task.core({}).task({}).irq(5),task.core({}).task({}).irq(6),"
+ "task.core({}).task({}).irq(7),task.core({}).task({}).irq(8),"
+ "task.core({}).task({}).irq(9),task.core({}).task({}).irq(10),"
+ "task.core({}).task({}).irq(11),task.core({}).task({}).irq(12)"
+ "\n".format(core, task, core, task, core, task, core, task,
+ core, task, core, task, core, task, core, task,
+ core, task, core, task, core, task, core, task,
+ core, task, core, task))
+ in_data_str = self.get_data().split(",")
+ ret = [try_int(s, 0) for s in in_data_str]
+ key = "core_" + str(core)
+ try:
+ stat[key] = {"cpu": core, "max_irq": ret[0], "bucket_0" : ret[1],
+ "bucket_1" : ret[2], "bucket_2" : ret[3],
+ "bucket_3" : ret[4], "bucket_4" : ret[5],
+ "bucket_5" : ret[6], "bucket_6" : ret[7],
+ "bucket_7" : ret[8], "bucket_8" : ret[9],
+ "bucket_9" : ret[10], "bucket_10" : ret[11],
+ "bucket_11" : ret[12], "bucket_12" : ret[13],
+ "overflow": ret[10] + ret[11] + ret[12] + ret[13]}
+ except (KeyError, IndexError):
+ LOG.error("Corrupted PACKET %s", in_data_str)
+
+ return stat
+
+ def multi_port_stats(self, ports):
+ """get counter values from all ports at once"""
+
+ ports_str = ",".join(map(str, ports))
+ ports_all_data = []
+ tot_result = [0] * len(ports)
+
+ port_index = 0
+ while (len(ports) is not len(ports_all_data)):
+ self.put_command("multi port stats {}\n".format(ports_str))
+ status, ports_all_data_str = self.get_string()
+
+ if not status:
+ return False, []
+
+ ports_all_data = ports_all_data_str.split(";")
+
+ if len(ports) is len(ports_all_data):
+ for port_data_str in ports_all_data:
+
+ tmpdata = []
+ try:
+ tmpdata = [try_int(s, 0) for s in port_data_str.split(",")]
+ except (IndexError, TypeError):
+ LOG.error("Unpacking data error %s", port_data_str)
+ return False, []
+
+ if (len(tmpdata) < 6) or tmpdata[0] not in ports:
+ LOG.error("Corrupted PACKET %s - retrying", port_data_str)
+ return False, []
+ else:
+ tot_result[port_index] = tmpdata
+ port_index = port_index + 1
+ else:
+ LOG.error("Empty / too much data - retry -%s-", ports_all_data)
+ return False, []
+
+ LOG.debug("Multi port packet ..OK.. %s", tot_result)
+ return True, tot_result
+
+ @staticmethod
+ def multi_port_stats_tuple(stats, ports):
+ """
+ Create a statistics tuple from port stats.
+
+ Returns a dict with contains the port stats indexed by port name
+
+ :param stats: (List) - List of List of port stats in pps
+ :param ports (Iterator) - to List of Ports
+
+ :return: (Dict) of port stats indexed by port_name
+ """
+
+ samples = {}
+ port_names = {}
+ try:
+ port_names = {port_num: port_name for port_name, port_num in ports}
+ except (TypeError, IndexError, KeyError):
+ LOG.critical("Ports are not initialized or number of port is ZERO ... CRITICAL ERROR")
+ return {}
+
+ try:
+ for stat in stats:
+ port_num = stat[0]
+ samples[port_names[port_num]] = {
+ "in_packets": stat[1],
+ "out_packets": stat[2]}
+ except (TypeError, IndexError, KeyError):
+ LOG.error("Ports data and samples data is incompatable ....")
+ return {}
+
+ return samples
+
+ @staticmethod
+ def multi_port_stats_diff(prev_stats, new_stats, hz):
+ """
+ Create a statistics tuple from difference between prev port stats
+ and current port stats. And store results in pps.
+
+ :param prev_stats: (List) - Previous List of port statistics
+ :param new_stats: (List) - Current List of port statistics
+ :param hz (float) - speed of system in Hz
+
+ :return: sample (List) - Difference of prev_port_stats and
+ new_port_stats in pps
+ """
+
+ RX_TOTAL_INDEX = 1
+ TX_TOTAL_INDEX = 2
+ TSC_INDEX = 5
+
+ stats = []
+
+ if len(prev_stats) is not len(new_stats):
+ for port_index, stat in enumerate(new_stats):
+ stats.append([port_index, float(0), float(0), 0, 0, 0])
+ return stats
+
+ try:
+ for port_index, stat in enumerate(new_stats):
+ if stat[RX_TOTAL_INDEX] > prev_stats[port_index][RX_TOTAL_INDEX]:
+ rx_total = stat[RX_TOTAL_INDEX] - \
+ prev_stats[port_index][RX_TOTAL_INDEX]
+ else:
+ rx_total = stat[RX_TOTAL_INDEX]
+
+ if stat[TX_TOTAL_INDEX] > prev_stats[port_index][TX_TOTAL_INDEX]:
+ tx_total = stat[TX_TOTAL_INDEX] - prev_stats[port_index][TX_TOTAL_INDEX]
+ else:
+ tx_total = stat[TX_TOTAL_INDEX]
+
+ if stat[TSC_INDEX] > prev_stats[port_index][TSC_INDEX]:
+ tsc = stat[TSC_INDEX] - prev_stats[port_index][TSC_INDEX]
+ else:
+ tsc = stat[TSC_INDEX]
+
+ if tsc is 0:
+ rx_total = tx_total = float(0)
+ else:
+ if hz is 0:
+ LOG.error("HZ is ZERO ..")
+ rx_total = tx_total = float(0)
+ else:
+ rx_total = float(rx_total * hz / tsc)
+ tx_total = float(tx_total * hz / tsc)
+
+ stats.append([port_index, rx_total, tx_total, 0, 0, tsc])
+ except (TypeError, IndexError, KeyError):
+ stats = []
+ LOG.info("Current Port Stats incompatable to previous Port stats .. Discarded")
+
+ return stats
+
def port_stats(self, ports):
"""get counter values from a specific port"""
tot_result = [0] * 12
@@ -580,7 +789,6 @@ class ProxSocketHelper(object):
self.put_command("quit_force\n")
time.sleep(3)
-
_LOCAL_OBJECT = object()
@@ -662,6 +870,30 @@ class ProxDpdkVnfSetupEnvHelper(DpdkVnfSetupEnvHelper):
file_str[1] = self.additional_files[base_name]
return '"'.join(file_str)
+ def _make_core_list(self, inputStr):
+
+ my_input = inputStr.split("core ", 1)[1]
+ ok_list = set()
+
+ substrs = [x.strip() for x in my_input.split(',')]
+ for i in substrs:
+ try:
+ ok_list.add(int(i))
+
+ except ValueError:
+ try:
+ substr = [int(k.strip()) for k in i.split('-')]
+ if len(substr) > 1:
+ startstr = substr[0]
+ endstr = substr[len(substr) - 1]
+ for z in range(startstr, endstr + 1):
+ ok_list.add(z)
+ except ValueError:
+ LOG.error("Error in cores list ... resuming ")
+ return ok_list
+
+ return ok_list
+
def generate_prox_config_file(self, config_path):
sections = []
prox_config = ConfigParser(config_path, sections)
@@ -681,6 +913,18 @@ class ProxDpdkVnfSetupEnvHelper(DpdkVnfSetupEnvHelper):
if section_data[0] == "mac":
section_data[1] = "hardware"
+ # adjust for range of cores
+ new_sections = []
+ for section_name, section in sections:
+ if section_name.startswith('core') and section_name.find('$') == -1:
+ core_list = self._make_core_list(section_name)
+ for core in core_list:
+ new_sections.append(["core " + str(core), section])
+ else:
+ new_sections.append([section_name, section])
+
+ sections = new_sections
+
# search for dst mac
for _, section in sections:
for section_data in section:
@@ -699,6 +943,20 @@ class ProxDpdkVnfSetupEnvHelper(DpdkVnfSetupEnvHelper):
mac = intf["virtual-interface"]["dst_mac"]
section_data[1] = mac
+ if item_val.startswith("@@src_mac"):
+ tx_port_iter = re.finditer(r'\d+', item_val)
+ tx_port_no = int(next(tx_port_iter).group(0))
+ intf = self.vnfd_helper.find_interface_by_port(tx_port_no)
+ mac = intf["virtual-interface"]["local_mac"]
+ section_data[1] = mac.replace(":", " ", 6)
+
+ if item_key == "src mac" and item_val.startswith("@@"):
+ tx_port_iter = re.finditer(r'\d+', item_val)
+ tx_port_no = int(next(tx_port_iter).group(0))
+ intf = self.vnfd_helper.find_interface_by_port(tx_port_no)
+ mac = intf["virtual-interface"]["local_mac"]
+ section_data[1] = mac
+
# if addition file specified in prox config
if not self.additional_files:
return sections
@@ -798,7 +1056,7 @@ class ProxDpdkVnfSetupEnvHelper(DpdkVnfSetupEnvHelper):
options = self.scenario_helper.options
config_path = options['prox_config']
config_file = os.path.basename(config_path)
- config_path = find_relative_file(config_path, task_path)
+ config_path = utils.find_relative_file(config_path, task_path)
self.additional_files = {}
try:
@@ -815,7 +1073,7 @@ class ProxDpdkVnfSetupEnvHelper(DpdkVnfSetupEnvHelper):
prox_files = [prox_files]
for key_prox_file in prox_files:
base_prox_file = os.path.basename(key_prox_file)
- key_prox_path = find_relative_file(key_prox_file, task_path)
+ key_prox_path = utils.find_relative_file(key_prox_file, task_path)
remote_prox_file = self.copy_to_target(key_prox_path, base_prox_file)
self.additional_files[base_prox_file] = remote_prox_file
@@ -873,6 +1131,8 @@ class ProxResourceHelper(ClientResourceHelper):
self.step_delta = 1
self.step_time = 0.5
self._test_type = None
+ self.prev_multi_port = []
+ self.prev_hz = 0
@property
def sut(self):
@@ -901,7 +1161,7 @@ class ProxResourceHelper(ClientResourceHelper):
def _run_traffic_once(self, traffic_profile):
traffic_profile.execute_traffic(self)
- if traffic_profile.done:
+ if traffic_profile.done.is_set():
self._queue.put({'done': True})
LOG.debug("tg_prox done")
self._terminated.value = 1
@@ -911,11 +1171,40 @@ class ProxResourceHelper(ClientResourceHelper):
def collect_collectd_kpi(self):
return self._collect_resource_kpi()
+ def collect_live_stats(self):
+ ports = []
+ for _, port_num in self.vnfd_helper.ports_iter():
+ ports.append(port_num)
+
+ ok, curr_port_stats = self.sut.multi_port_stats(ports)
+ if not ok:
+ return False, {}
+
+ hz = self.sut.hz()
+ if hz is 0:
+ hz = self.prev_hz
+ else:
+ self.prev_hz = hz
+
+ new_all_port_stats = \
+ self.sut.multi_port_stats_diff(self.prev_multi_port, curr_port_stats, hz)
+
+ self.prev_multi_port = curr_port_stats
+
+ live_stats = self.sut.multi_port_stats_tuple(new_all_port_stats,
+ self.vnfd_helper.ports_iter())
+ return True, live_stats
+
def collect_kpi(self):
result = super(ProxResourceHelper, self).collect_kpi()
# add in collectd kpis manually
if result:
result['collect_stats'] = self._collect_resource_kpi()
+
+ ok, live_stats = self.collect_live_stats()
+ if ok:
+ result.update({'live_stats': live_stats})
+
return result
def terminate(self):
@@ -929,6 +1218,7 @@ class ProxResourceHelper(ClientResourceHelper):
func = getattr(self.sut, cmd, None)
if func:
return func(*args, **kwargs)
+ return None
def _connect(self, client=None):
"""Run and connect to prox on the remote system """
@@ -967,12 +1257,13 @@ class ProxResourceHelper(ClientResourceHelper):
class ProxDataHelper(object):
- def __init__(self, vnfd_helper, sut, pkt_size, value, tolerated_loss):
+ def __init__(self, vnfd_helper, sut, pkt_size, value, tolerated_loss, line_speed):
super(ProxDataHelper, self).__init__()
self.vnfd_helper = vnfd_helper
self.sut = sut
self.pkt_size = pkt_size
self.value = value
+ self.line_speed = line_speed
self.tolerated_loss = tolerated_loss
self.port_count = len(self.vnfd_helper.port_pairs.all_ports)
self.tsc_hz = None
@@ -984,32 +1275,71 @@ class ProxDataHelper(object):
@property
def totals_and_pps(self):
if self._totals_and_pps is None:
- rx_total, tx_total = self.sut.port_stats(range(self.port_count))[6:8]
- pps = self.value / 100.0 * self.line_rate_to_pps()
- self._totals_and_pps = rx_total, tx_total, pps
+ rx_total = tx_total = 0
+ ok = False
+ timeout = time.time() + constants.RETRY_TIMEOUT
+ while not ok:
+ ok, all_ports = self.sut.multi_port_stats([
+ self.vnfd_helper.port_num(port_name)
+ for port_name in self.vnfd_helper.port_pairs.all_ports])
+ if time.time() > timeout:
+ break
+ if ok:
+ for port in all_ports:
+ rx_total = rx_total + port[1]
+ tx_total = tx_total + port[2]
+ requested_pps = self.value / 100.0 * self.line_rate_to_pps()
+ self._totals_and_pps = rx_total, tx_total, requested_pps
return self._totals_and_pps
@property
def rx_total(self):
- return self.totals_and_pps[0]
+ try:
+ ret_val = self.totals_and_pps[0]
+ except (AttributeError, ValueError, TypeError, LookupError):
+ ret_val = 0
+ return ret_val
@property
def tx_total(self):
- return self.totals_and_pps[1]
+ try:
+ ret_val = self.totals_and_pps[1]
+ except (AttributeError, ValueError, TypeError, LookupError):
+ ret_val = 0
+ return ret_val
@property
- def pps(self):
- return self.totals_and_pps[2]
+ def requested_pps(self):
+ try:
+ ret_val = self.totals_and_pps[2]
+ except (AttributeError, ValueError, TypeError, LookupError):
+ ret_val = 0
+ return ret_val
@property
def samples(self):
samples = {}
+ ports = []
+ port_names = {}
for port_name, port_num in self.vnfd_helper.ports_iter():
- port_rx_total, port_tx_total = self.sut.port_stats([port_num])[6:8]
- samples[port_name] = {
- "in_packets": port_rx_total,
- "out_packets": port_tx_total,
- }
+ ports.append(port_num)
+ port_names[port_num] = port_name
+
+ ok = False
+ timeout = time.time() + constants.RETRY_TIMEOUT
+ while not ok:
+ ok, results = self.sut.multi_port_stats(ports)
+ if time.time() > timeout:
+ break
+ if ok:
+ for result in results:
+ port_num = result[0]
+ try:
+ samples[port_names[port_num]] = {
+ "in_packets": result[1],
+ "out_packets": result[2]}
+ except (IndexError, KeyError):
+ pass
return samples
def __enter__(self):
@@ -1032,7 +1362,7 @@ class ProxDataHelper(object):
self.latency,
self.rx_total,
self.tx_total,
- self.pps,
+ self.requested_pps,
)
self.result_tuple.log_data()
@@ -1051,9 +1381,7 @@ class ProxDataHelper(object):
self.tsc_hz = float(self.sut.hz())
def line_rate_to_pps(self):
- # NOTE: to fix, don't hardcode 10Gb/s
- return self.port_count * TEN_GIGABIT / BITS_PER_BYTE / (self.pkt_size + 20)
-
+ return self.port_count * self.line_speed / BITS_PER_BYTE / (self.pkt_size + 20)
class ProxProfileHelper(object):
@@ -1113,6 +1441,7 @@ class ProxProfileHelper(object):
self.sut.set_pkt_size(self.test_cores, pkt_size)
self.sut.set_speed(self.test_cores, value)
self.sut.start_all()
+ time.sleep(1)
yield
finally:
self.sut.stop_all()
@@ -1127,15 +1456,37 @@ class ProxProfileHelper(object):
for key, value in section:
if key == "mode" and value == mode:
core_tuple = CoreSocketTuple(section_name)
- core = core_tuple.find_in_topology(self.cpu_topology)
+ core = core_tuple.core_id
cores.append(core)
return cores
- def run_test(self, pkt_size, duration, value, tolerated_loss=0.0):
- data_helper = ProxDataHelper(self.vnfd_helper, self.sut, pkt_size, value, tolerated_loss)
+ def pct_10gbps(self, percent, line_speed):
+ """Get rate in percent of 10 Gbps.
+
+ Returns the rate in percent of 10 Gbps.
+ For instance 100.0 = 10 Gbps; 400.0 = 40 Gbps.
+
+ This helper method isrequired when setting interface_speed option in
+ the testcase because NSB/PROX considers 10Gbps as 100% of line rate,
+ this means that the line rate must be expressed as a percentage of
+ 10Gbps.
+
+ :param percent: (float) Percent of line rate (100.0 = line rate).
+ :param line_speed: (int) line rate speed, in bits per second.
+
+ :return: (float) Represents the rate in percent of 10Gbps.
+ """
+ return (percent * line_speed / (
+ constants.ONE_GIGABIT_IN_BITS * constants.NIC_GBPS_DEFAULT))
- with data_helper, self.traffic_context(pkt_size, value):
+ def run_test(self, pkt_size, duration, value, tolerated_loss=0.0,
+ line_speed=(constants.ONE_GIGABIT_IN_BITS * constants.NIC_GBPS_DEFAULT)):
+ data_helper = ProxDataHelper(self.vnfd_helper, self.sut, pkt_size,
+ value, tolerated_loss, line_speed)
+
+ with data_helper, self.traffic_context(pkt_size,
+ self.pct_10gbps(value, line_speed)):
with data_helper.measure_tot_stats():
time.sleep(duration)
# Getting statistics to calculate PPS at right speed....
@@ -1149,6 +1500,10 @@ class ProxProfileHelper(object):
:return: return lat_min, lat_max, lat_avg
:rtype: list
"""
+
+ if not self._latency_cores:
+ self._latency_cores = self.get_cores(self.PROX_CORE_LAT_MODE)
+
if self._latency_cores:
return self.sut.lat_stats(self._latency_cores)
return []
@@ -1198,12 +1553,12 @@ class ProxMplsProfileHelper(ProxProfileHelper):
if item_value.startswith("tag"):
core_tuple = CoreSocketTuple(section_name)
- core_tag = core_tuple.find_in_topology(self.cpu_topology)
+ core_tag = core_tuple.core_id
cores_tagged.append(core_tag)
elif item_value.startswith("udp"):
core_tuple = CoreSocketTuple(section_name)
- core_udp = core_tuple.find_in_topology(self.cpu_topology)
+ core_udp = core_tuple.core_id
cores_plain.append(core_udp)
return cores_tagged, cores_plain
@@ -1219,6 +1574,7 @@ class ProxMplsProfileHelper(ProxProfileHelper):
ratio = 1.0 * (pkt_size - 4 + 20) / (pkt_size + 20)
self.sut.set_speed(self.plain_cores, value * ratio)
self.sut.start_all()
+ time.sleep(1)
yield
finally:
self.sut.stop_all()
@@ -1276,23 +1632,23 @@ class ProxBngProfileHelper(ProxProfileHelper):
if item_value.startswith("cpe"):
core_tuple = CoreSocketTuple(section_name)
- cpe_core = core_tuple.find_in_topology(self.cpu_topology)
+ cpe_core = core_tuple.core_id
cpe_cores.append(cpe_core)
elif item_value.startswith("inet"):
core_tuple = CoreSocketTuple(section_name)
- inet_core = core_tuple.find_in_topology(self.cpu_topology)
+ inet_core = core_tuple.core_id
inet_cores.append(inet_core)
elif item_value.startswith("arp"):
core_tuple = CoreSocketTuple(section_name)
- arp_core = core_tuple.find_in_topology(self.cpu_topology)
+ arp_core = core_tuple.core_id
arp_cores.append(arp_core)
# We check the tasks/core separately
if item_value.startswith("arp_task"):
core_tuple = CoreSocketTuple(section_name)
- arp_task_core = core_tuple.find_in_topology(self.cpu_topology)
+ arp_task_core = core_tuple.core_id
arp_tasks_core.append(arp_task_core)
return cpe_cores, inet_cores, arp_cores, arp_tasks_core
@@ -1385,10 +1741,13 @@ class ProxBngProfileHelper(ProxProfileHelper):
time.sleep(3)
self.sut.stop(self.all_rx_cores)
- def run_test(self, pkt_size, duration, value, tolerated_loss=0.0):
- data_helper = ProxDataHelper(self.vnfd_helper, self.sut, pkt_size, value, tolerated_loss)
+ def run_test(self, pkt_size, duration, value, tolerated_loss=0.0,
+ line_speed=(constants.ONE_GIGABIT_IN_BITS * constants.NIC_GBPS_DEFAULT)):
+ data_helper = ProxDataHelper(self.vnfd_helper, self.sut, pkt_size,
+ value, tolerated_loss, line_speed)
- with data_helper, self.traffic_context(pkt_size, value):
+ with data_helper, self.traffic_context(pkt_size,
+ self.pct_10gbps(value, line_speed)):
with data_helper.measure_tot_stats():
time.sleep(duration)
# Getting statistics to calculate PPS at right speed....
@@ -1455,12 +1814,12 @@ class ProxVpeProfileHelper(ProxProfileHelper):
if item_value.startswith("cpe"):
core_tuple = CoreSocketTuple(section_name)
- core_tag = core_tuple.find_in_topology(self.cpu_topology)
+ core_tag = core_tuple.core_id
cpe_cores.append(core_tag)
elif item_value.startswith("inet"):
core_tuple = CoreSocketTuple(section_name)
- inet_core = core_tuple.find_in_topology(self.cpu_topology)
+ inet_core = core_tuple.core_id
inet_cores.append(inet_core)
return cpe_cores, inet_cores
@@ -1572,10 +1931,13 @@ class ProxVpeProfileHelper(ProxProfileHelper):
time.sleep(3)
self.sut.stop(self.all_rx_cores)
- def run_test(self, pkt_size, duration, value, tolerated_loss=0.0):
- data_helper = ProxDataHelper(self.vnfd_helper, self.sut, pkt_size, value, tolerated_loss)
+ def run_test(self, pkt_size, duration, value, tolerated_loss=0.0,
+ line_speed=(constants.ONE_GIGABIT_IN_BITS * constants.NIC_GBPS_DEFAULT)):
+ data_helper = ProxDataHelper(self.vnfd_helper, self.sut, pkt_size,
+ value, tolerated_loss, line_speed)
- with data_helper, self.traffic_context(pkt_size, value):
+ with data_helper, self.traffic_context(pkt_size,
+ self.pct_10gbps(value, line_speed)):
with data_helper.measure_tot_stats():
time.sleep(duration)
# Getting statistics to calculate PPS at right speed....
@@ -1639,7 +2001,7 @@ class ProxlwAFTRProfileHelper(ProxProfileHelper):
continue
core_tuple = CoreSocketTuple(section_name)
- core_tag = core_tuple.find_in_topology(self.cpu_topology)
+ core_tag = core_tuple.core_id
for item_value in (v for k, v in section if k == 'name'):
if item_value.startswith('tun'):
tun_cores.append(core_tag)
@@ -1761,10 +2123,13 @@ class ProxlwAFTRProfileHelper(ProxProfileHelper):
time.sleep(3)
self.sut.stop(self.all_rx_cores)
- def run_test(self, pkt_size, duration, value, tolerated_loss=0.0):
- data_helper = ProxDataHelper(self.vnfd_helper, self.sut, pkt_size, value, tolerated_loss)
+ def run_test(self, pkt_size, duration, value, tolerated_loss=0.0,
+ line_speed=(constants.ONE_GIGABIT_IN_BITS * constants.NIC_GBPS_DEFAULT)):
+ data_helper = ProxDataHelper(self.vnfd_helper, self.sut, pkt_size,
+ value, tolerated_loss, line_speed)
- with data_helper, self.traffic_context(pkt_size, value):
+ with data_helper, self.traffic_context(pkt_size,
+ self.pct_10gbps(value, line_speed)):
with data_helper.measure_tot_stats():
time.sleep(duration)
# Getting statistics to calculate PPS at right speed....
@@ -1772,3 +2137,15 @@ class ProxlwAFTRProfileHelper(ProxProfileHelper):
data_helper.latency = self.get_latency()
return data_helper.result_tuple, data_helper.samples
+
+
+class ProxIrqProfileHelper(ProxProfileHelper):
+
+ __prox_profile_type__ = "IRQ Query"
+
+ def __init__(self, resource_helper):
+ super(ProxIrqProfileHelper, self).__init__(resource_helper)
+ self._cores_tuple = None
+ self._ports_tuple = None
+ self.step_delta = 5
+ self.step_time = 0.5
diff --git a/yardstick/network_services/vnf_generic/vnf/prox_irq.py b/yardstick/network_services/vnf_generic/vnf/prox_irq.py
new file mode 100644
index 000000000..614066e46
--- /dev/null
+++ b/yardstick/network_services/vnf_generic/vnf/prox_irq.py
@@ -0,0 +1,200 @@
+# Copyright (c) 2018-2019 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import errno
+import logging
+import copy
+import time
+
+from yardstick.common.process import check_if_process_failed
+from yardstick.network_services.utils import get_nsb_option
+from yardstick.network_services.vnf_generic.vnf.prox_vnf import ProxApproxVnf
+from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNFTrafficGen
+from yardstick.benchmark.contexts.base import Context
+from yardstick.network_services.vnf_generic.vnf.prox_helpers import CoreSocketTuple
+LOG = logging.getLogger(__name__)
+
+
+class ProxIrq(SampleVNFTrafficGen):
+
+ def __init__(self, name, vnfd, setup_env_helper_type=None,
+ resource_helper_type=None):
+ vnfd_cpy = copy.deepcopy(vnfd)
+ super(ProxIrq, self).__init__(name, vnfd_cpy)
+
+ self._vnf_wrapper = ProxApproxVnf(
+ name, vnfd, setup_env_helper_type, resource_helper_type)
+ self.bin_path = get_nsb_option('bin_path', '')
+ self.name = self._vnf_wrapper.name
+ self.ssh_helper = self._vnf_wrapper.ssh_helper
+ self.setup_helper = self._vnf_wrapper.setup_helper
+ self.resource_helper = self._vnf_wrapper.resource_helper
+ self.scenario_helper = self._vnf_wrapper.scenario_helper
+ self.irq_cores = None
+
+ def terminate(self):
+ self._vnf_wrapper.terminate()
+ super(ProxIrq, self).terminate()
+
+ def instantiate(self, scenario_cfg, context_cfg):
+ self._vnf_wrapper.instantiate(scenario_cfg, context_cfg)
+ self._tg_process = self._vnf_wrapper._vnf_process
+
+ def wait_for_instantiate(self):
+ self._vnf_wrapper.wait_for_instantiate()
+
+ def get_irq_cores(self):
+ cores = []
+ mode = "irq"
+
+ for section_name, section in self.setup_helper.prox_config_data:
+ if not section_name.startswith("core"):
+ continue
+ irq_mode = task_present = False
+ task_present_task = 0
+ for key, value in section:
+ if key == "mode" and value == mode:
+ irq_mode = True
+ if key == "task":
+ task_present = True
+ task_present_task = int(value)
+
+ if irq_mode:
+ if not task_present:
+ task_present_task = 0
+ core_tuple = CoreSocketTuple(section_name)
+ core = core_tuple.core_id
+ cores.append((core, task_present_task))
+
+ return cores
+
+class ProxIrqVNF(ProxIrq, SampleVNFTrafficGen):
+
+ APP_NAME = 'ProxIrqVNF'
+
+ def __init__(self, name, vnfd, setup_env_helper_type=None,
+ resource_helper_type=None):
+ ProxIrq.__init__(self, name, vnfd, setup_env_helper_type,
+ resource_helper_type)
+
+ self.start_test_time = None
+ self.end_test_time = None
+
+ def vnf_execute(self, cmd, *args, **kwargs):
+ ignore_errors = kwargs.pop("_ignore_errors", False)
+ try:
+ return self.resource_helper.execute(cmd, *args, **kwargs)
+ except OSError as e:
+ if e.errno in {errno.EPIPE, errno.ESHUTDOWN, errno.ECONNRESET}:
+ if ignore_errors:
+ LOG.debug("ignoring vnf_execute exception %s for command %s", e, cmd)
+ else:
+ raise
+ else:
+ raise
+
+ def collect_kpi(self):
+ # check if the tg processes have exited
+ physical_node = Context.get_physical_node_from_server(
+ self.scenario_helper.nodes[self.name])
+
+ result = {"physical_node": physical_node}
+ for proc in (self._tg_process, self._traffic_process):
+ check_if_process_failed(proc)
+
+ if self.resource_helper is None:
+ return result
+
+ if self.irq_cores is None:
+ self.setup_helper.build_config_file()
+ self.irq_cores = self.get_irq_cores()
+
+ data = self.vnf_execute('irq_core_stats', self.irq_cores)
+ new_data = copy.deepcopy(data)
+
+ self.end_test_time = time.time()
+ self.vnf_execute('reset_stats')
+
+ if self.start_test_time is None:
+ new_data = {}
+ else:
+ test_time = self.end_test_time - self.start_test_time
+ for index, item in data.items():
+ for counter, value in item.items():
+ if counter.startswith("bucket_")or \
+ counter.startswith("overflow"):
+ if value is 0:
+ del new_data[index][counter]
+ else:
+ new_data[index][counter] = float(value) / test_time
+
+ self.start_test_time = time.time()
+
+ result["collect_stats"] = new_data
+ LOG.debug("%s collect KPIs %s", self.APP_NAME, result)
+
+ return result
+
+class ProxIrqGen(ProxIrq, SampleVNFTrafficGen):
+
+ APP_NAME = 'ProxIrqGen'
+
+ def __init__(self, name, vnfd, setup_env_helper_type=None,
+ resource_helper_type=None):
+ ProxIrq.__init__(self, name, vnfd, setup_env_helper_type,
+ resource_helper_type)
+ self.start_test_time = None
+ self.end_test_time = None
+
+ def collect_kpi(self):
+ # check if the tg processes have exited
+ physical_node = Context.get_physical_node_from_server(
+ self.scenario_helper.nodes[self.name])
+
+ result = {"physical_node": physical_node}
+ for proc in (self._tg_process, self._traffic_process):
+ check_if_process_failed(proc)
+
+ if self.resource_helper is None:
+ return result
+
+ if self.irq_cores is None:
+ self.setup_helper.build_config_file()
+ self.irq_cores = self.get_irq_cores()
+
+ data = self.resource_helper.sut.irq_core_stats(self.irq_cores)
+ new_data = copy.deepcopy(data)
+
+ self.end_test_time = time.time()
+ self.resource_helper.sut.reset_stats()
+
+ if self.start_test_time is None:
+ new_data = {}
+ else:
+ test_time = self.end_test_time - self.start_test_time
+ for index, item in data.items():
+ for counter, value in item.items():
+ if counter.startswith("bucket_") or \
+ counter.startswith("overflow"):
+ if value is 0:
+ del new_data[index][counter]
+ else:
+ new_data[index][counter] = float(value) / test_time
+
+ self.start_test_time = time.time()
+
+ result["collect_stats"] = new_data
+ LOG.debug("%s collect KPIs %s", self.APP_NAME, result)
+
+ return result
diff --git a/yardstick/network_services/vnf_generic/vnf/prox_vnf.py b/yardstick/network_services/vnf_generic/vnf/prox_vnf.py
index b7d295eee..c9abc757e 100644
--- a/yardstick/network_services/vnf_generic/vnf/prox_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/prox_vnf.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2017 Intel Corporation
+# Copyright (c) 2018-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,12 +14,15 @@
import errno
import logging
-
+import datetime
+import time
from yardstick.common.process import check_if_process_failed
from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxDpdkVnfSetupEnvHelper
from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxResourceHelper
-from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNF, PROCESS_JOIN_TIMEOUT
+from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNF
+from yardstick.network_services import constants
+from yardstick.benchmark.contexts import base as context_base
LOG = logging.getLogger(__name__)
@@ -39,6 +42,10 @@ class ProxApproxVnf(SampleVNF):
if resource_helper_type is None:
resource_helper_type = ProxResourceHelper
+ self.prev_packets_in = 0
+ self.prev_packets_sent = 0
+ self.prev_tsc = 0
+ self.tsc_hz = 0
super(ProxApproxVnf, self).__init__(name, vnfd, setup_env_helper_type,
resource_helper_type)
@@ -62,41 +69,90 @@ class ProxApproxVnf(SampleVNF):
def collect_kpi(self):
# we can't get KPIs if the VNF is down
- check_if_process_failed(self._vnf_process)
+ check_if_process_failed(self._vnf_process, 0.01)
+
+ physical_node = context_base.Context.get_physical_node_from_server(
+ self.scenario_helper.nodes[self.name])
+
+ result = {"physical_node": physical_node}
if self.resource_helper is None:
- result = {
+ result.update({
"packets_in": 0,
"packets_dropped": 0,
"packets_fwd": 0,
+ "curr_packets_in": 0,
+ "curr_packets_fwd": 0,
"collect_stats": {"core": {}},
- }
+ })
return result
+ if (self.tsc_hz == 0):
+ self.tsc_hz = float(self.resource_helper.sut.hz())
+ LOG.debug("TSC = %f", self.tsc_hz)
+ if (self.tsc_hz == 0):
+ raise RuntimeError("Unable to retrieve TSC")
+
# use all_ports so we only use ports matched in topology
port_count = len(self.vnfd_helper.port_pairs.all_ports)
if port_count not in {1, 2, 4}:
raise RuntimeError("Failed ..Invalid no of ports .. "
"1, 2 or 4 ports only supported at this time")
- port_stats = self.vnf_execute('port_stats', range(port_count))
- try:
- rx_total = port_stats[6]
- tx_total = port_stats[7]
- except IndexError:
- LOG.error("port_stats parse fail %s", port_stats)
- # return empty dict so we don't mess up existing KPIs
+ tmpPorts = [self.vnfd_helper.port_num(port_name)
+ for port_name in self.vnfd_helper.port_pairs.all_ports]
+ ok = False
+ timeout = time.time() + constants.RETRY_TIMEOUT
+ while not ok:
+ ok, all_port_stats = self.vnf_execute('multi_port_stats', tmpPorts)
+ if time.time() > timeout:
+ break
+
+ if ok:
+ rx_total = tx_total = tsc = 0
+ try:
+ for single_port_stats in all_port_stats:
+ rx_total = rx_total + single_port_stats[1]
+ tx_total = tx_total + single_port_stats[2]
+ tsc = tsc + single_port_stats[5]
+ except (TypeError, IndexError):
+ LOG.error("Invalid data ...")
+ return {}
+ else:
return {}
- result = {
+ tsc = tsc / port_count
+
+ result.update({
"packets_in": rx_total,
"packets_dropped": max((tx_total - rx_total), 0),
"packets_fwd": tx_total,
# we share ProxResourceHelper with TG, but we want to collect
# collectd KPIs here and not TG KPIs, so use a different method name
"collect_stats": self.resource_helper.collect_collectd_kpi(),
- }
- LOG.debug("%s collect KPIs %s", self.APP_NAME, result)
+ })
+ try:
+ curr_packets_in = int(((rx_total - self.prev_packets_in) * self.tsc_hz)
+ / (tsc - self.prev_tsc))
+ except ZeroDivisionError:
+ LOG.error("Error.... Divide by Zero")
+ curr_packets_in = 0
+
+ try:
+ curr_packets_fwd = int(((tx_total - self.prev_packets_sent) * self.tsc_hz)
+ / (tsc - self.prev_tsc))
+ except ZeroDivisionError:
+ LOG.error("Error.... Divide by Zero")
+ curr_packets_fwd = 0
+
+ result["curr_packets_in"] = curr_packets_in
+ result["curr_packets_fwd"] = curr_packets_fwd
+
+ self.prev_packets_in = rx_total
+ self.prev_packets_sent = tx_total
+ self.prev_tsc = tsc
+
+ LOG.debug("%s collect KPIs %s %s", self.APP_NAME, datetime.datetime.now(), result)
return result
def _tear_down(self):
@@ -119,5 +175,5 @@ class ProxApproxVnf(SampleVNF):
self._tear_down()
if self._vnf_process is not None:
LOG.debug("joining before terminate %s", self._vnf_process.name)
- self._vnf_process.join(PROCESS_JOIN_TIMEOUT)
+ self._vnf_process.join(constants.PROCESS_JOIN_TIMEOUT)
self._vnf_process.terminate()
diff --git a/yardstick/network_services/vnf_generic/vnf/router_vnf.py b/yardstick/network_services/vnf_generic/vnf/router_vnf.py
index aea27ffa6..f1486bdb4 100644
--- a/yardstick/network_services/vnf_generic/vnf/router_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/router_vnf.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2016-2017 Intel Corporation
+# Copyright (c) 2016-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -47,7 +47,6 @@ class RouterVNF(SampleVNF):
def instantiate(self, scenario_cfg, context_cfg):
self.scenario_helper.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
- self.nfvi_context = Context.get_context_from_server(self.scenario_helper.nodes[self.name])
self.configure_routes(self.name, scenario_cfg, context_cfg)
def wait_for_instantiate(self):
@@ -107,8 +106,11 @@ class RouterVNF(SampleVNF):
stdout = self.ssh_helper.execute(ip_link_stats)[1]
link_stats = self.get_stats(stdout)
# get RX/TX from link_stats and assign to results
+ physical_node = Context.get_physical_node_from_server(
+ self.scenario_helper.nodes[self.name])
result = {
+ "physical_node": physical_node,
"packets_in": 0,
"packets_dropped": 0,
"packets_fwd": 0,
diff --git a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
index 5eeb6c889..a369a3ae6 100644
--- a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2016-2017 Intel Corporation
+# Copyright (c) 2016-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -11,91 +11,43 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-""" Base class implementation for generic vnf implementation """
-from __future__ import absolute_import
-
-import posixpath
-import time
import logging
+import decimal
+from multiprocessing import Queue, Value, Process, JoinableQueue
import os
+import posixpath
import re
import subprocess
-from collections import Mapping
-from multiprocessing import Queue, Value, Process
+import time
-from six.moves import cStringIO
+from trex_stl_lib.trex_stl_client import LoggerApi
+from trex_stl_lib.trex_stl_client import STLClient
+from trex_stl_lib.trex_stl_exceptions import STLError
from yardstick.benchmark.contexts.base import Context
-from yardstick.benchmark.scenarios.networking.vnf_generic import find_relative_file
from yardstick.common import exceptions as y_exceptions
from yardstick.common.process import check_if_process_failed
-from yardstick.network_services.helpers.samplevnf_helper import PortPairs
+from yardstick.common import utils
+from yardstick.common import yaml_loader
+from yardstick.network_services import constants
+from yardstick.network_services.helpers.dpdkbindnic_helper import DpdkBindHelper, DpdkNode
from yardstick.network_services.helpers.samplevnf_helper import MultiPortConfig
-from yardstick.network_services.helpers.dpdkbindnic_helper import DpdkBindHelper
from yardstick.network_services.nfvi.resource import ResourceProfile
+from yardstick.network_services.utils import get_nsb_option
+from yardstick.network_services.vnf_generic.vnf.base import GenericTrafficGen
from yardstick.network_services.vnf_generic.vnf.base import GenericVNF
from yardstick.network_services.vnf_generic.vnf.base import QueueFileWrapper
-from yardstick.network_services.vnf_generic.vnf.base import GenericTrafficGen
-from yardstick.network_services.utils import get_nsb_option
-
-from trex_stl_lib.trex_stl_client import STLClient
-from trex_stl_lib.trex_stl_client import LoggerApi
-from trex_stl_lib.trex_stl_exceptions import STLError
-
-from yardstick.ssh import AutoConnectSSH
-
-DPDK_VERSION = "dpdk-16.07"
+from yardstick.network_services.vnf_generic.vnf.vnf_ssh_helper import VnfSshHelper
+from yardstick.benchmark.contexts.node import NodeContext
LOG = logging.getLogger(__name__)
-REMOTE_TMP = "/tmp"
-DEFAULT_VNF_TIMEOUT = 3600
-PROCESS_JOIN_TIMEOUT = 3
-
-
-class VnfSshHelper(AutoConnectSSH):
-
- def __init__(self, node, bin_path, wait=None):
- self.node = node
- kwargs = self.args_from_node(self.node)
- if wait:
- kwargs.setdefault('wait', wait)
-
- super(VnfSshHelper, self).__init__(**kwargs)
- self.bin_path = bin_path
-
- @staticmethod
- def get_class():
- # must return static class name, anything else refers to the calling class
- # i.e. the subclass, not the superclass
- return VnfSshHelper
-
- def copy(self):
- # this copy constructor is different from SSH classes, since it uses node
- return self.get_class()(self.node, self.bin_path)
-
- def upload_config_file(self, prefix, content):
- cfg_file = os.path.join(REMOTE_TMP, prefix)
- LOG.debug(content)
- file_obj = cStringIO(content)
- self.put_file_obj(file_obj, cfg_file)
- return cfg_file
-
- def join_bin_path(self, *args):
- return os.path.join(self.bin_path, *args)
-
- def provision_tool(self, tool_path=None, tool_file=None):
- if tool_path is None:
- tool_path = self.bin_path
- return super(VnfSshHelper, self).provision_tool(tool_path, tool_file)
-
-
class SetupEnvHelper(object):
- CFG_CONFIG = os.path.join(REMOTE_TMP, "sample_config")
- CFG_SCRIPT = os.path.join(REMOTE_TMP, "sample_script")
+ CFG_CONFIG = os.path.join(constants.REMOTE_TMP, "sample_config")
+ CFG_SCRIPT = os.path.join(constants.REMOTE_TMP, "sample_script")
DEFAULT_CONFIG_TPL_CFG = "sample.cfg"
PIPELINE_COMMAND = ''
VNF_TYPE = "SAMPLE"
@@ -105,6 +57,7 @@ class SetupEnvHelper(object):
self.vnfd_helper = vnfd_helper
self.ssh_helper = ssh_helper
self.scenario_helper = scenario_helper
+ self.collectd_options = {}
def build_config(self):
raise NotImplementedError
@@ -123,6 +76,7 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper):
APP_NAME = 'DpdkVnf'
FIND_NET_CMD = "find /sys/class/net -lname '*{}*' -printf '%f'"
+ NR_HUGEPAGES_PATH = '/proc/sys/vm/nr_hugepages'
@staticmethod
def _update_packet_type(ip_pipeline_cfg, traffic_options):
@@ -158,25 +112,11 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper):
self.used_drivers = None
self.dpdk_bind_helper = DpdkBindHelper(ssh_helper)
- def _setup_hugepages(self):
- cmd = "awk '/Hugepagesize/ { print $2$3 }' < /proc/meminfo"
- hugepages = self.ssh_helper.execute(cmd)[1].rstrip()
-
- memory_path = \
- '/sys/kernel/mm/hugepages/hugepages-%s/nr_hugepages' % hugepages
- self.ssh_helper.execute("awk -F: '{ print $1 }' < %s" % memory_path)
-
- if hugepages == "2048kB":
- pages = 8192
- else:
- pages = 16
-
- self.ssh_helper.execute("echo %s | sudo tee %s" % (pages, memory_path))
-
def build_config(self):
vnf_cfg = self.scenario_helper.vnf_cfg
task_path = self.scenario_helper.task_path
+ config_file = vnf_cfg.get('file')
lb_count = vnf_cfg.get('lb_count', 3)
lb_config = vnf_cfg.get('lb_config', 'SW')
worker_config = vnf_cfg.get('worker_config', '1C/1T')
@@ -189,7 +129,15 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper):
'vnf_type': self.VNF_TYPE,
}
- config_tpl_cfg = find_relative_file(self.DEFAULT_CONFIG_TPL_CFG, task_path)
+ # read actions/rules from file
+ acl_options = None
+ acl_file_name = self.scenario_helper.options.get('rules')
+ if acl_file_name:
+ with utils.open_relative_file(acl_file_name, task_path) as infile:
+ acl_options = yaml_loader.yaml_load(infile)
+
+ config_tpl_cfg = utils.find_relative_file(self.DEFAULT_CONFIG_TPL_CFG,
+ task_path)
config_basename = posixpath.basename(self.CFG_CONFIG)
script_basename = posixpath.basename(self.CFG_SCRIPT)
multiport = MultiPortConfig(self.scenario_helper.topology,
@@ -204,21 +152,34 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper):
self.socket)
multiport.generate_config()
- with open(self.CFG_CONFIG) as handle:
- new_config = handle.read()
-
- new_config = self._update_traffic_type(new_config, traffic_options)
- new_config = self._update_packet_type(new_config, traffic_options)
-
+ if config_file:
+ with utils.open_relative_file(config_file, task_path) as infile:
+ new_config = ['[EAL]']
+ vpci = []
+ for port in self.vnfd_helper.port_pairs.all_ports:
+ interface = self.vnfd_helper.find_interface(name=port)
+ vpci.append(interface['virtual-interface']["vpci"])
+ new_config.extend('w = {0}'.format(item) for item in vpci)
+ new_config = '\n'.join(new_config) + '\n' + infile.read()
+ else:
+ with open(self.CFG_CONFIG) as handle:
+ new_config = handle.read()
+ new_config = self._update_traffic_type(new_config, traffic_options)
+ new_config = self._update_packet_type(new_config, traffic_options)
self.ssh_helper.upload_config_file(config_basename, new_config)
self.ssh_helper.upload_config_file(script_basename,
- multiport.generate_script(self.vnfd_helper))
+ multiport.generate_script(self.vnfd_helper,
+ self.get_flows_config(acl_options)))
LOG.info("Provision and start the %s", self.APP_NAME)
self._build_pipeline_kwargs()
return self.PIPELINE_COMMAND.format(**self.pipeline_kwargs)
- def _build_pipeline_kwargs(self):
+ def get_flows_config(self, options=None): # pylint: disable=unused-argument
+ """No actions/rules (flows) by default"""
+ return None
+
+ def _build_pipeline_kwargs(self, cfg_file=None, script=None):
tool_path = self.ssh_helper.provision_tool(tool_file=self.APP_NAME)
# count the number of actual ports in the list of pairs
# remove duplicate ports
@@ -229,16 +190,24 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper):
port_nums = self.vnfd_helper.port_nums(ports)
# create mask from all the dpdk port numbers
ports_mask_hex = hex(sum(2 ** num for num in port_nums))
+
+ vnf_cfg = self.scenario_helper.vnf_cfg
+ lb_config = vnf_cfg.get('lb_config', 'SW')
+ worker_threads = vnf_cfg.get('worker_threads', 3)
+ hwlb = ''
+ if lb_config == 'HW':
+ hwlb = ' --hwlb %s' % worker_threads
+
self.pipeline_kwargs = {
- 'cfg_file': self.CFG_CONFIG,
- 'script': self.CFG_SCRIPT,
+ 'cfg_file': cfg_file if cfg_file else self.CFG_CONFIG,
+ 'script': script if script else self.CFG_SCRIPT,
'port_mask_hex': ports_mask_hex,
'tool_path': tool_path,
+ 'hwlb': hwlb,
}
def setup_vnf_environment(self):
self._setup_dpdk()
- self.bound_pci = [v['virtual-interface']["vpci"] for v in self.vnfd_helper.interfaces]
self.kill_vnf()
# bind before _setup_resources so we can use dpdk_port_num
self._detect_and_bind_drivers()
@@ -254,26 +223,17 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper):
self.ssh_helper.execute("sudo killall %s" % self.APP_NAME)
def _setup_dpdk(self):
- """ setup dpdk environment needed for vnf to run """
-
- self._setup_hugepages()
- self.ssh_helper.execute("sudo modprobe uio && sudo modprobe igb_uio")
+ """Setup DPDK environment needed for VNF to run"""
+ hugepages_gb = self.scenario_helper.all_options.get('hugepages_gb', 16)
+ utils.setup_hugepages(self.ssh_helper, hugepages_gb * 1024 * 1024)
+ self.dpdk_bind_helper.load_dpdk_driver()
- exit_status = self.ssh_helper.execute("lsmod | grep -i igb_uio")[0]
+ exit_status = self.dpdk_bind_helper.check_dpdk_driver()
if exit_status == 0:
return
-
- dpdk = self.ssh_helper.join_bin_path(DPDK_VERSION)
- dpdk_setup = self.ssh_helper.provision_tool(tool_file="nsb_setup.sh")
- exit_status = self.ssh_helper.execute("which {} >/dev/null 2>&1".format(dpdk))[0]
- if exit_status != 0:
- self.ssh_helper.execute("bash %s dpdk >/dev/null 2>&1" % dpdk_setup)
-
- def get_collectd_options(self):
- options = self.scenario_helper.all_options.get("collectd", {})
- # override with specific node settings
- options.update(self.scenario_helper.options.get("collectd", {}))
- return options
+ else:
+ LOG.critical("DPDK Driver not installed")
+ return
def _setup_resources(self):
# what is this magic? how do we know which socket is for which port?
@@ -287,16 +247,29 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper):
# this won't work because we don't have DPDK port numbers yet
ports = sorted(self.vnfd_helper.interfaces, key=self.vnfd_helper.port_num)
port_names = (intf["name"] for intf in ports)
- collectd_options = self.get_collectd_options()
- plugins = collectd_options.get("plugins", {})
+ plugins = self.collectd_options.get("plugins", {})
+ interval = self.collectd_options.get("interval")
# we must set timeout to be the same as the VNF otherwise KPIs will die before VNF
return ResourceProfile(self.vnfd_helper.mgmt_interface, port_names=port_names,
- plugins=plugins, interval=collectd_options.get("interval"),
+ plugins=plugins, interval=interval,
timeout=self.scenario_helper.timeout)
+ def _check_interface_fields(self):
+ num_nodes = len(self.scenario_helper.nodes)
+ # OpenStack instance creation time is probably proportional to the number
+ # of instances
+ timeout = 120 * num_nodes
+ dpdk_node = DpdkNode(self.scenario_helper.name, self.vnfd_helper.interfaces,
+ self.ssh_helper, timeout)
+ dpdk_node.check()
+
def _detect_and_bind_drivers(self):
interfaces = self.vnfd_helper.interfaces
+ self._check_interface_fields()
+ # check for bound after probe
+ self.bound_pci = [v['virtual-interface']["vpci"] for v in interfaces]
+
self.dpdk_bind_helper.read_status()
self.dpdk_bind_helper.save_used_drivers()
@@ -337,6 +310,7 @@ class ResourceHelper(object):
self.resource = None
self.setup_helper = setup_helper
self.ssh_helper = setup_helper.ssh_helper
+ self._enable = True
def setup(self):
self.resource = self.setup_helper.setup_vnf_environment()
@@ -344,22 +318,33 @@ class ResourceHelper(object):
def generate_cfg(self):
pass
+ def update_from_context(self, context, attr_name):
+ """Disable resource helper in case of baremetal context.
+
+ And update appropriate node collectd options in context
+ """
+ if isinstance(context, NodeContext):
+ self._enable = False
+ context.update_collectd_options_for_node(self.setup_helper.collectd_options,
+ attr_name)
+
def _collect_resource_kpi(self):
result = {}
- status = self.resource.check_if_sa_running("collectd")[0]
- if status == 0:
+ status = self.resource.check_if_system_agent_running("collectd")[0]
+ if status == 0 and self._enable:
result = self.resource.amqp_collect_nfvi_kpi()
result = {"core": result}
return result
def start_collect(self):
- self.resource.initiate_systemagent(self.ssh_helper.bin_path)
- self.resource.start()
- self.resource.amqp_process_for_nfvi_kpi()
+ if self._enable:
+ self.resource.initiate_systemagent(self.ssh_helper.bin_path)
+ self.resource.start()
+ self.resource.amqp_process_for_nfvi_kpi()
def stop_collect(self):
- if self.resource:
+ if self.resource and self._enable:
self.resource.stop()
def collect_kpi(self):
@@ -400,42 +385,17 @@ class ClientResourceHelper(ResourceHelper):
try:
return self.client.get_stats(*args, **kwargs)
except STLError:
- LOG.exception("TRex client not connected")
- return {}
-
- def generate_samples(self, ports, key=None, default=None):
- # needs to be used ports
- last_result = self.get_stats(ports)
- key_value = last_result.get(key, default)
-
- if not isinstance(last_result, Mapping): # added for mock unit test
- self._terminated.value = 1
+ LOG.error('TRex client not connected')
return {}
- samples = {}
- # recalculate port for interface and see if it matches ports provided
- for intf in self.vnfd_helper.interfaces:
- name = intf["name"]
- port = self.vnfd_helper.port_num(name)
- if port in ports:
- xe_value = last_result.get(port, {})
- samples[name] = {
- "rx_throughput_fps": float(xe_value.get("rx_pps", 0.0)),
- "tx_throughput_fps": float(xe_value.get("tx_pps", 0.0)),
- "rx_throughput_mbps": float(xe_value.get("rx_bps", 0.0)),
- "tx_throughput_mbps": float(xe_value.get("tx_bps", 0.0)),
- "in_packets": int(xe_value.get("ipackets", 0)),
- "out_packets": int(xe_value.get("opackets", 0)),
- }
- if key:
- samples[name][key] = key_value
- return samples
+ def _get_samples(self, ports, port_pg_id=False):
+ raise NotImplementedError()
def _run_traffic_once(self, traffic_profile):
traffic_profile.execute_traffic(self)
self.client_started.value = 1
time.sleep(self.RUN_DURATION)
- samples = self.generate_samples(traffic_profile.ports)
+ samples = self._get_samples(traffic_profile.ports)
time.sleep(self.QUEUE_WAIT_TIME)
self._queue.put(samples)
@@ -448,12 +408,17 @@ class ClientResourceHelper(ResourceHelper):
try:
self._build_ports()
self.client = self._connect()
+ if self.client is None:
+ LOG.critical("Failure to Connect ... unable to continue")
+ return
+
self.client.reset(ports=self.all_ports)
self.client.remove_all_streams(self.all_ports) # remove all streams
traffic_profile.register_generator(self)
while self._terminated.value == 0:
- self._run_traffic_once(traffic_profile)
+ if self._run_traffic_once(traffic_profile):
+ self._terminated.value = 1
self.client.stop(self.all_ports)
self.client.disconnect()
@@ -496,22 +461,35 @@ class ClientResourceHelper(ResourceHelper):
server=self.vnfd_helper.mgmt_interface["ip"],
verbose_level=LoggerApi.VERBOSE_QUIET)
- # try to connect with 5s intervals, 30s max
+ # try to connect with 5s intervals
for idx in range(6):
try:
client.connect()
- break
+ for idx2 in range(6):
+ if client.is_connected():
+ return client
+ LOG.info("Waiting to confirm connection %s .. Attempt %s",
+ idx, idx2)
+ time.sleep(1)
+ client.disconnect(stop_traffic=True, release_ports=True)
except STLError:
LOG.info("Unable to connect to Trex Server.. Attempt %s", idx)
time.sleep(5)
- return client
+ if client.is_connected():
+ return client
+ else:
+ LOG.critical("Connection failure ..TRex username: %s server: %s",
+ self.vnfd_helper.mgmt_interface["user"],
+ self.vnfd_helper.mgmt_interface["ip"])
+ return None
class Rfc2544ResourceHelper(object):
DEFAULT_CORRELATED_TRAFFIC = False
DEFAULT_LATENCY = False
DEFAULT_TOLERANCE = '0.0001 - 0.0001'
+ DEFAULT_RESOLUTION = '0.1'
def __init__(self, scenario_helper):
super(Rfc2544ResourceHelper, self).__init__()
@@ -522,6 +500,8 @@ class Rfc2544ResourceHelper(object):
self._rfc2544 = None
self._tolerance_low = None
self._tolerance_high = None
+ self._tolerance_precision = None
+ self._resolution = None
@property
def rfc2544(self):
@@ -542,6 +522,12 @@ class Rfc2544ResourceHelper(object):
return self._tolerance_high
@property
+ def tolerance_precision(self):
+ if self._tolerance_precision is None:
+ self.get_rfc_tolerance()
+ return self._tolerance_precision
+
+ @property
def correlated_traffic(self):
if self._correlated_traffic is None:
self._correlated_traffic = \
@@ -555,14 +541,25 @@ class Rfc2544ResourceHelper(object):
self._latency = self.get_rfc2544('latency', self.DEFAULT_LATENCY)
return self._latency
+ @property
+ def resolution(self):
+ if self._resolution is None:
+ self._resolution = float(self.get_rfc2544('resolution',
+ self.DEFAULT_RESOLUTION))
+ return self._resolution
+
def get_rfc2544(self, name, default=None):
return self.rfc2544.get(name, default)
def get_rfc_tolerance(self):
tolerance_str = self.get_rfc2544('allowed_drop_rate', self.DEFAULT_TOLERANCE)
- tolerance_iter = iter(sorted(float(t.strip()) for t in tolerance_str.split('-')))
- self._tolerance_low = next(tolerance_iter)
- self._tolerance_high = next(tolerance_iter, self.tolerance_low)
+ tolerance_iter = iter(sorted(
+ decimal.Decimal(t.strip()) for t in tolerance_str.split('-')))
+ tolerance_low = next(tolerance_iter)
+ tolerance_high = next(tolerance_iter, tolerance_low)
+ self._tolerance_precision = abs(tolerance_high.as_tuple().exponent)
+ self._tolerance_high = float(tolerance_high)
+ self._tolerance_low = float(tolerance_low)
class SampleVNFDeployHelper(object):
@@ -638,8 +635,10 @@ class ScenarioHelper(object):
@property
def timeout(self):
- return self.options.get('timeout', DEFAULT_VNF_TIMEOUT)
-
+ test_duration = self.scenario_cfg.get('runner', {}).get('duration',
+ self.options.get('timeout', constants.DEFAULT_VNF_TIMEOUT))
+ test_timeout = self.options.get('timeout', constants.DEFAULT_VNF_TIMEOUT)
+ return test_duration if test_duration > test_timeout else test_timeout
class SampleVNF(GenericVNF):
""" Class providing file-like API for generic VNF implementation """
@@ -672,7 +671,6 @@ class SampleVNF(GenericVNF):
self.resource_helper = resource_helper_type(self.setup_helper)
self.context_cfg = None
- self.nfvi_context = None
self.pipeline_kwargs = {}
self.uplink_ports = None
self.downlink_ports = None
@@ -686,49 +684,6 @@ class SampleVNF(GenericVNF):
self.vnf_port_pairs = None
self._vnf_process = None
- def _build_ports(self):
- self._port_pairs = PortPairs(self.vnfd_helper.interfaces)
- self.networks = self._port_pairs.networks
- self.uplink_ports = self.vnfd_helper.port_nums(self._port_pairs.uplink_ports)
- self.downlink_ports = self.vnfd_helper.port_nums(self._port_pairs.downlink_ports)
- self.my_ports = self.vnfd_helper.port_nums(self._port_pairs.all_ports)
-
- def _get_route_data(self, route_index, route_type):
- route_iter = iter(self.vnfd_helper.vdu0.get('nd_route_tbl', []))
- for _ in range(route_index):
- next(route_iter, '')
- return next(route_iter, {}).get(route_type, '')
-
- def _get_port0localip6(self):
- return_value = self._get_route_data(0, 'network')
- LOG.info("_get_port0localip6 : %s", return_value)
- return return_value
-
- def _get_port1localip6(self):
- return_value = self._get_route_data(1, 'network')
- LOG.info("_get_port1localip6 : %s", return_value)
- return return_value
-
- def _get_port0prefixlen6(self):
- return_value = self._get_route_data(0, 'netmask')
- LOG.info("_get_port0prefixlen6 : %s", return_value)
- return return_value
-
- def _get_port1prefixlen6(self):
- return_value = self._get_route_data(1, 'netmask')
- LOG.info("_get_port1prefixlen6 : %s", return_value)
- return return_value
-
- def _get_port0gateway6(self):
- return_value = self._get_route_data(0, 'network')
- LOG.info("_get_port0gateway6 : %s", return_value)
- return return_value
-
- def _get_port1gateway6(self):
- return_value = self._get_route_data(1, 'network')
- LOG.info("_get_port1gateway6 : %s", return_value)
- return return_value
-
def _start_vnf(self):
self.queue_wrapper = QueueFileWrapper(self.q_in, self.q_out, self.VNF_PROMPT)
name = "{}-{}-{}".format(self.name, self.APP_NAME, os.getpid())
@@ -739,10 +694,13 @@ class SampleVNF(GenericVNF):
pass
def instantiate(self, scenario_cfg, context_cfg):
+ self._update_collectd_options(scenario_cfg, context_cfg)
self.scenario_helper.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
- self.nfvi_context = Context.get_context_from_server(self.scenario_helper.nodes[self.name])
- # self.nfvi_context = None
+ self.resource_helper.update_from_context(
+ Context.get_context_from_server(self.scenario_helper.nodes[self.name]),
+ self.scenario_helper.nodes[self.name]
+ )
# vnf deploy is unsupported, use ansible playbooks
if self.scenario_helper.options.get("vnf_deploy", False):
@@ -750,6 +708,54 @@ class SampleVNF(GenericVNF):
self.resource_helper.setup()
self._start_vnf()
+ def _update_collectd_options(self, scenario_cfg, context_cfg):
+ """Update collectd configuration options
+ This function retrieves all collectd options contained in the test case
+
+ definition builds a single dictionary combining them. The following fragment
+ represents a test case with the collectd options and priorities (1 highest, 3 lowest):
+ ---
+ schema: yardstick:task:0.1
+ scenarios:
+ - type: NSPerf
+ nodes:
+ tg__0: trafficgen_0.yardstick
+ vnf__0: vnf_0.yardstick
+ options:
+ collectd:
+ <options> # COLLECTD priority 3
+ vnf__0:
+ collectd:
+ plugins:
+ load
+ <options> # COLLECTD priority 2
+ context:
+ type: Node
+ name: yardstick
+ nfvi_type: baremetal
+ file: /etc/yardstick/nodes/pod_ixia.yaml # COLLECTD priority 1
+ """
+ scenario_options = scenario_cfg.get('options', {})
+ generic_options = scenario_options.get('collectd', {})
+ scenario_node_options = scenario_options.get(self.name, {})\
+ .get('collectd', {})
+ context_node_options = context_cfg.get('nodes', {})\
+ .get(self.name, {}).get('collectd', {})
+
+ options = generic_options
+ self._update_options(options, scenario_node_options)
+ self._update_options(options, context_node_options)
+
+ self.setup_helper.collectd_options = options
+
+ def _update_options(self, options, additional_options):
+ """Update collectd options and plugins dictionary"""
+ for k, v in additional_options.items():
+ if isinstance(v, dict) and k in options:
+ options[k].update(v)
+ else:
+ options[k] = v
+
def wait_for_instantiate(self):
buf = []
time.sleep(self.WAIT_TIME) # Give some time for config to load
@@ -765,7 +771,6 @@ class SampleVNF(GenericVNF):
LOG.info("%s VNF is up and running.", self.APP_NAME)
self._vnf_up_post()
self.queue_wrapper.clear()
- self.resource_helper.start_collect()
return self._vnf_process.exitcode
if "PANIC" in message:
@@ -778,6 +783,59 @@ class SampleVNF(GenericVNF):
# by other VNF output
self.q_in.put('\r\n')
+ def wait_for_initialize(self):
+ buf = []
+ vnf_prompt_found = False
+ prompt_command = '\r\n'
+ script_name = 'non_existent_script_name'
+ done_string = 'Cannot open file "{}"'.format(script_name)
+ time.sleep(self.WAIT_TIME) # Give some time for config to load
+ while True:
+ if not self._vnf_process.is_alive():
+ raise RuntimeError("%s VNF process died." % self.APP_NAME)
+ while self.q_out.qsize() > 0:
+ buf.append(self.q_out.get())
+ message = ''.join(buf)
+
+ if self.VNF_PROMPT in message and not vnf_prompt_found:
+ # Once we got VNF promt, it doesn't mean that the VNF is
+ # up and running/initialized completely. But we can run
+ # addition (any) VNF command and wait for it to complete
+ # as it will be finished ONLY at the end of the VNF
+ # initialization. So, this approach can be used to
+ # indentify that VNF is completely initialized.
+ LOG.info("Got %s VNF prompt.", self.APP_NAME)
+ prompt_command = "run {}\r\n".format(script_name)
+ self.q_in.put(prompt_command)
+ # Cut the buffer since we are not interesting to find
+ # the VNF prompt anymore
+ prompt_pos = message.find(self.VNF_PROMPT)
+ buf = [message[prompt_pos + len(self.VNF_PROMPT):]]
+ vnf_prompt_found = True
+ continue
+
+ if done_string in message:
+ LOG.info("%s VNF is up and running.", self.APP_NAME)
+ self._vnf_up_post()
+ self.queue_wrapper.clear()
+ return self._vnf_process.exitcode
+
+ if "PANIC" in message:
+ raise RuntimeError("Error starting %s VNF." %
+ self.APP_NAME)
+
+ LOG.info("Waiting for %s VNF to start.. ", self.APP_NAME)
+ time.sleep(self.WAIT_TIME_FOR_SCRIPT)
+ # Send command again to display the expected prompt in case the
+ # expected text was corrupted by other VNF output
+ self.q_in.put(prompt_command)
+
+ def start_collect(self):
+ self.resource_helper.start_collect()
+
+ def stop_collect(self):
+ self.resource_helper.stop_collect()
+
def _build_run_kwargs(self):
self.run_kwargs = {
'stdin': self.queue_wrapper,
@@ -823,7 +881,7 @@ class SampleVNF(GenericVNF):
if self._vnf_process is not None:
# be proper and join first before we kill
LOG.debug("joining before terminate %s", self._vnf_process.name)
- self._vnf_process.join(PROCESS_JOIN_TIMEOUT)
+ self._vnf_process.join(constants.PROCESS_JOIN_TIMEOUT)
self._vnf_process.terminate()
# no terminate children here because we share processes with tg
@@ -840,18 +898,21 @@ class SampleVNF(GenericVNF):
def collect_kpi(self):
# we can't get KPIs if the VNF is down
- check_if_process_failed(self._vnf_process)
+ check_if_process_failed(self._vnf_process, 0.01)
stats = self.get_stats()
m = re.search(self.COLLECT_KPI, stats, re.MULTILINE)
+ physical_node = Context.get_physical_node_from_server(
+ self.scenario_helper.nodes[self.name])
+
+ result = {"physical_node": physical_node}
if m:
- result = {k: int(m.group(v)) for k, v in self.COLLECT_MAP.items()}
+ result.update({k: int(m.group(v)) for k, v in self.COLLECT_MAP.items()})
result["collect_stats"] = self.resource_helper.collect_kpi()
else:
- result = {
- "packets_in": 0,
- "packets_fwd": 0,
- "packets_dropped": 0,
- }
+ result.update({"packets_in": 0,
+ "packets_fwd": 0,
+ "packets_dropped": 0})
+
LOG.debug("%s collect KPIs %s", self.APP_NAME, result)
return result
@@ -890,6 +951,39 @@ class SampleVNFTrafficGen(GenericTrafficGen):
self.traffic_finished = False
self._tg_process = None
self._traffic_process = None
+ self._tasks_queue = JoinableQueue()
+ self._result_queue = Queue()
+
+ def _test_runner(self, traffic_profile, tasks, results):
+ self.resource_helper.run_test(traffic_profile, tasks, results)
+
+ def _init_traffic_process(self, traffic_profile):
+ name = '{}-{}-{}-{}'.format(self.name, self.APP_NAME,
+ traffic_profile.__class__.__name__,
+ os.getpid())
+ self._traffic_process = Process(name=name, target=self._test_runner,
+ args=(
+ traffic_profile, self._tasks_queue,
+ self._result_queue))
+
+ self._traffic_process.start()
+ while self.resource_helper.client_started.value == 0:
+ time.sleep(1)
+ if not self._traffic_process.is_alive():
+ break
+
+ def run_traffic_once(self, traffic_profile):
+ if self.resource_helper.client_started.value == 0:
+ self._init_traffic_process(traffic_profile)
+
+ # continue test - run next iteration
+ LOG.info("Run next iteration ...")
+ self._tasks_queue.put('RUN_TRAFFIC')
+
+ def wait_on_traffic(self):
+ self._tasks_queue.join()
+ result = self._result_queue.get()
+ return result
def _start_server(self):
# we can't share ssh paramiko objects to force new connection
@@ -897,6 +991,13 @@ class SampleVNFTrafficGen(GenericTrafficGen):
def instantiate(self, scenario_cfg, context_cfg):
self.scenario_helper.scenario_cfg = scenario_cfg
+ self.resource_helper.update_from_context(
+ Context.get_context_from_server(self.scenario_helper.nodes[self.name]),
+ self.scenario_helper.nodes[self.name]
+ )
+
+ self.resource_helper.context_cfg = context_cfg
+
self.resource_helper.setup()
# must generate_cfg after DPDK bind because we need port number
self.resource_helper.generate_cfg()
@@ -951,9 +1052,14 @@ class SampleVNFTrafficGen(GenericTrafficGen):
def collect_kpi(self):
# check if the tg processes have exited
+ physical_node = Context.get_physical_node_from_server(
+ self.scenario_helper.nodes[self.name])
+
+ result = {"physical_node": physical_node}
for proc in (self._tg_process, self._traffic_process):
check_if_process_failed(proc)
- result = self.resource_helper.collect_kpi()
+
+ result["collect_stats"] = self.resource_helper.collect_kpi()
LOG.debug("%s collect KPIs %s", self.APP_NAME, result)
return result
@@ -967,12 +1073,12 @@ class SampleVNFTrafficGen(GenericTrafficGen):
if self._traffic_process is not None:
# be proper and try to join before terminating
LOG.debug("joining before terminate %s", self._traffic_process.name)
- self._traffic_process.join(PROCESS_JOIN_TIMEOUT)
+ self._traffic_process.join(constants.PROCESS_JOIN_TIMEOUT)
self._traffic_process.terminate()
if self._tg_process is not None:
# be proper and try to join before terminating
LOG.debug("joining before terminate %s", self._tg_process.name)
- self._tg_process.join(PROCESS_JOIN_TIMEOUT)
+ self._tg_process.join(constants.PROCESS_JOIN_TIMEOUT)
self._tg_process.terminate()
# no terminate children here because we share processes with vnf
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_imsbench_sipp.py b/yardstick/network_services/vnf_generic/vnf/tg_imsbench_sipp.py
new file mode 100644
index 000000000..70557b848
--- /dev/null
+++ b/yardstick/network_services/vnf_generic/vnf/tg_imsbench_sipp.py
@@ -0,0 +1,143 @@
+# Copyright (c) 2019 Viosoft Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from collections import deque
+
+from yardstick.network_services.vnf_generic.vnf import sample_vnf
+
+LOG = logging.getLogger(__name__)
+
+
+class SippSetupEnvHelper(sample_vnf.SetupEnvHelper):
+ APP_NAME = "ImsbenchSipp"
+
+
+class SippResourceHelper(sample_vnf.ClientResourceHelper):
+ pass
+
+
+class SippVnf(sample_vnf.SampleVNFTrafficGen):
+ """
+ This class calls the test script from TG machine, then gets the result file
+ from IMS machine. After that, the result file is handled line by line, and
+ is updated to database.
+ """
+
+ APP_NAME = "ImsbenchSipp"
+ APP_WORD = "ImsbenchSipp"
+ VNF_TYPE = "ImsbenchSipp"
+ HW_OFFLOADING_NFVI_TYPES = {'baremetal', 'sriov'}
+ RESULT = "/tmp/final_result.dat"
+ SIPP_RESULT = "/tmp/sipp_dat_files/final_result.dat"
+ LOCAL_PATH = "/tmp"
+ CMD = "./SIPp_benchmark.bash {} {} {} '{}'"
+
+ def __init__(self, name, vnfd, setup_env_helper_type=None,
+ resource_helper_type=None):
+ if resource_helper_type is None:
+ resource_helper_type = SippResourceHelper
+ if setup_env_helper_type is None:
+ setup_env_helper_type = SippSetupEnvHelper
+ super(SippVnf, self).__init__(
+ name, vnfd, setup_env_helper_type, resource_helper_type)
+ self.params = ""
+ self.pcscf_ip = self.vnfd_helper.interfaces[0]["virtual-interface"]\
+ ["peer_intf"]["local_ip"]
+ self.sipp_ip = self.vnfd_helper.interfaces[0]["virtual-interface"]\
+ ["local_ip"]
+ self.media_ip = self.vnfd_helper.interfaces[1]["virtual-interface"]\
+ ["local_ip"]
+ self.queue = ""
+ self.count = 0
+
+ def instantiate(self, scenario_cfg, context_cfg):
+ super(SippVnf, self).instantiate(scenario_cfg, context_cfg)
+ scenario_cfg = {}
+ _params = [("port", 5060), ("start_user", 1), ("end_user", 10000),
+ ("init_reg_cps", 50), ("init_reg_max", 5000), ("reg_cps", 50),
+ ("reg_step", 10), ("rereg_cps", 10), ("rereg_step", 5),
+ ("dereg_cps", 10), ("dereg_step", 5), ("msgc_cps", 10),
+ ("msgc_step", 2), ("run_mode", "rtp"), ("call_cps", 10),
+ ("hold_time", 15), ("call_step", 5)]
+
+ self.params = ';'.join([str(scenario_cfg.get("options", {}).get(k, v))
+ for k, v in dict(_params).items()])
+
+ def wait_for_instantiate(self):
+ pass
+
+ def get_result_files(self):
+ self.ssh_helper.get(self.SIPP_RESULT, self.LOCAL_PATH, True)
+
+ # Example of result file:
+ # cat /tmp/final_result.dat
+ # timestamp:1000 reg:100 reg_saps:0
+ # timestamp:2000 reg:100 reg_saps:50
+ # timestamp:3000 reg:100 reg_saps:50
+ # timestamp:4000 reg:100 reg_saps:50
+ # ...
+ # reg_Requested_prereg:50
+ # reg_Effective_prereg:49.49
+ # reg_DOC:0
+ # ...
+ @staticmethod
+ def handle_result_files(filename):
+ with open(filename, 'r') as f:
+ content = f.readlines()
+ result = [{k: round(float(v), 2) for k, v in [i.split(":", 1) for i in x.split()]}
+ for x in content if x]
+ return deque(result)
+
+ def run_traffic(self, traffic_profile):
+ traffic_profile.execute_traffic(self)
+ cmd = self.CMD.format(self.sipp_ip, self.media_ip,
+ self.pcscf_ip, self.params)
+ self.ssh_helper.execute(cmd, None, 3600, False)
+ self.get_result_files()
+ self.queue = self.handle_result_files(self.RESULT)
+
+ def collect_kpi(self):
+ result = {}
+ try:
+ result = self.queue.popleft()
+ except IndexError:
+ pass
+ return result
+
+ @staticmethod
+ def count_line_num(fname):
+ try:
+ with open(fname, 'r') as f:
+ return sum(1 for line in f)
+ except IOError:
+ return 0
+
+ def is_ended(self):
+ """
+ The test will end when the results are pushed into database.
+ It does not depend on the "duration" value, so this value will be set
+ enough big to make sure that the test will end before duration.
+ """
+ num_lines = self.count_line_num(self.RESULT)
+ if self.count == num_lines:
+ LOG.debug('TG IS ENDED.....................')
+ self.count = 0
+ return True
+ self.count += 1
+ return False
+
+ def terminate(self):
+ LOG.debug('TERMINATE:.....................')
+ self.resource_helper.terminate()
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_ixload.py b/yardstick/network_services/vnf_generic/vnf/tg_ixload.py
index 61c045405..38b00a4b2 100644
--- a/yardstick/network_services/vnf_generic/vnf/tg_ixload.py
+++ b/yardstick/network_services/vnf_generic/vnf/tg_ixload.py
@@ -12,20 +12,20 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import absolute_import
+import collections
import csv
import glob
import logging
import os
import shutil
+import subprocess
+
+from oslo_serialization import jsonutils
+
+from yardstick.common import utils
+from yardstick.network_services.vnf_generic.vnf import sample_vnf
-from collections import OrderedDict
-from subprocess import call
-from yardstick.common.utils import makedirs
-from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNFTrafficGen
-from yardstick.network_services.vnf_generic.vnf.sample_vnf import ClientResourceHelper
-from yardstick.benchmark.scenarios.networking.vnf_generic import find_relative_file
LOG = logging.getLogger(__name__)
@@ -45,7 +45,8 @@ IXLOAD_CONFIG_TEMPLATE = '''\
},
"remote_server": "%s",
"result_dir": "%s",
- "ixload_cfg": "C:/Results/%s"
+ "ixload_cfg": "C:/Results/%s",
+ "links_param": %s
}'''
IXLOAD_CMD = "{ixloadpy} {http_ixload} {args}"
@@ -61,11 +62,11 @@ class ResourceDataHelper(list):
}
-class IxLoadResourceHelper(ClientResourceHelper):
+class IxLoadResourceHelper(sample_vnf.ClientResourceHelper):
RESULTS_MOUNT = "/mnt/Results"
- KPI_LIST = OrderedDict((
+ KPI_LIST = collections.OrderedDict((
('http_throughput', 'HTTP Total Throughput (Kbps)'),
('simulated_users', 'HTTP Simulated Users'),
('concurrent_connections', 'HTTP Concurrent Connections'),
@@ -75,7 +76,8 @@ class IxLoadResourceHelper(ClientResourceHelper):
def __init__(self, setup_helper):
super(IxLoadResourceHelper, self).__init__(setup_helper)
- self.result = OrderedDict((key, ResourceDataHelper()) for key in self.KPI_LIST)
+ self.result = collections.OrderedDict((key, ResourceDataHelper())
+ for key in self.KPI_LIST)
self.resource_file_name = ''
self.data = None
@@ -91,19 +93,20 @@ class IxLoadResourceHelper(ClientResourceHelper):
self.result[key].append(value)
def setup(self):
- # TODO: fixupt scenario_helper to hanlde ixia
+ # NOTE: fixup scenario_helper to hanlde ixia
self.resource_file_name = \
- find_relative_file(self.scenario_helper.scenario_cfg['ixia_profile'],
- self.scenario_helper.scenario_cfg["task_path"])
- makedirs(self.RESULTS_MOUNT)
+ utils.find_relative_file(
+ self.scenario_helper.scenario_cfg['ixia_profile'],
+ self.scenario_helper.scenario_cfg["task_path"])
+ utils.makedirs(self.RESULTS_MOUNT)
cmd = MOUNT_CMD.format(self.vnfd_helper.mgmt_interface, self)
LOG.debug(cmd)
if not os.path.ismount(self.RESULTS_MOUNT):
- call(cmd, shell=True)
+ subprocess.call(cmd, shell=True)
shutil.rmtree(self.RESULTS_MOUNT, ignore_errors=True)
- makedirs(self.RESULTS_MOUNT)
+ utils.makedirs(self.RESULTS_MOUNT)
shutil.copy(self.resource_file_name, self.RESULTS_MOUNT)
def make_aggregates(self):
@@ -113,7 +116,7 @@ class IxLoadResourceHelper(ClientResourceHelper):
def collect_kpi(self):
if self.data:
self._result.update(self.data)
- LOG.info("Collect {0} KPIs {1}".format(self.RESOURCE_WORD, self._result))
+ LOG.info("Collect %s KPIs %s", self.RESOURCE_WORD, self._result)
return self._result
def log(self):
@@ -121,7 +124,7 @@ class IxLoadResourceHelper(ClientResourceHelper):
LOG.debug(self.result[key])
-class IxLoadTrafficGen(SampleVNFTrafficGen):
+class IxLoadTrafficGen(sample_vnf.SampleVNFTrafficGen):
def __init__(self, name, vnfd, setup_env_helper_type=None, resource_helper_type=None):
if resource_helper_type is None:
@@ -131,6 +134,26 @@ class IxLoadTrafficGen(SampleVNFTrafficGen):
resource_helper_type)
self._result = {}
+ def update_gateways(self, links):
+ for name in links:
+ try:
+ gateway = next(intf["virtual-interface"]["dst_ip"] for intf in
+ self.setup_helper.vnfd_helper["vdu"][0][
+ "external-interface"] if
+ intf["virtual-interface"]["vld_id"] == name)
+
+ try:
+ links[name]["ip"]["gateway"] = gateway
+ except KeyError:
+ LOG.error("Invalid traffic profile: No IP section defined for %s", name)
+ raise
+
+ except StopIteration:
+ LOG.debug("Cant find gateway for link %s", name)
+ links[name]["ip"]["gateway"] = "0.0.0.0"
+
+ return links
+
def run_traffic(self, traffic_profile):
ports = []
card = None
@@ -142,11 +165,16 @@ class IxLoadTrafficGen(SampleVNFTrafficGen):
for csv_file in glob.iglob(self.ssh_helper.join_bin_path('*.csv')):
os.unlink(csv_file)
+ links_param = self.update_gateways(
+ traffic_profile.get_links_param())
+
ixia_config = self.vnfd_helper.mgmt_interface["tg-config"]
ixload_config = IXLOAD_CONFIG_TEMPLATE % (
ixia_config["ixchassis"], ports, card,
self.vnfd_helper.mgmt_interface["ip"], self.ssh_helper.bin_path,
- os.path.basename(self.resource_helper.resource_file_name))
+ os.path.basename(self.resource_helper.resource_file_name),
+ jsonutils.dumps(links_param)
+ )
http_ixload_path = os.path.join(VNF_PATH, "../../traffic_profile")
@@ -156,7 +184,7 @@ class IxLoadTrafficGen(SampleVNFTrafficGen):
args="'%s'" % ixload_config)
LOG.debug(cmd)
- call(cmd, shell=True)
+ subprocess.call(cmd, shell=True)
with open(self.ssh_helper.join_bin_path("ixLoad_HTTP_Client.csv")) as csv_file:
lines = csv_file.readlines()[10:]
@@ -170,9 +198,6 @@ class IxLoadTrafficGen(SampleVNFTrafficGen):
self.resource_helper.log()
self.resource_helper.data = self.resource_helper.make_aggregates()
- def instantiate(self, scenario_cfg, context_cfg):
- super(IxLoadTrafficGen, self).instantiate(scenario_cfg, context_cfg)
-
def terminate(self):
- call(["pkill", "-9", "http_ixload.py"])
+ subprocess.call(["pkill", "-9", "http_ixload.py"])
super(IxLoadTrafficGen, self).terminate()
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_landslide.py b/yardstick/network_services/vnf_generic/vnf/tg_landslide.py
new file mode 100644
index 000000000..285374a92
--- /dev/null
+++ b/yardstick/network_services/vnf_generic/vnf/tg_landslide.py
@@ -0,0 +1,1226 @@
+# Copyright (c) 2018-2019 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import collections
+import logging
+import requests
+import six
+import time
+
+from yardstick.common import exceptions
+from yardstick.common import utils as common_utils
+from yardstick.common import yaml_loader
+from yardstick.network_services import utils as net_serv_utils
+from yardstick.network_services.vnf_generic.vnf import sample_vnf
+
+try:
+ from lsapi import LsApi
+except ImportError:
+ LsApi = common_utils.ErrorClass
+
+LOG = logging.getLogger(__name__)
+
+
+class LandslideTrafficGen(sample_vnf.SampleVNFTrafficGen):
+ APP_NAME = 'LandslideTG'
+
+ def __init__(self, name, vnfd, setup_env_helper_type=None,
+ resource_helper_type=None):
+ if resource_helper_type is None:
+ resource_helper_type = LandslideResourceHelper
+ super(LandslideTrafficGen, self).__init__(name, vnfd,
+ setup_env_helper_type,
+ resource_helper_type)
+
+ self.bin_path = net_serv_utils.get_nsb_option('bin_path')
+ self.name = name
+ self.runs_traffic = True
+ self.traffic_finished = False
+ self.session_profile = None
+
+ def listen_traffic(self, traffic_profile):
+ pass
+
+ def terminate(self):
+ self.resource_helper.disconnect()
+
+ def instantiate(self, scenario_cfg, context_cfg):
+ super(LandslideTrafficGen, self).instantiate(scenario_cfg, context_cfg)
+ self.resource_helper.connect()
+
+ # Create test servers
+ test_servers = [x['test_server'] for x in self.vnfd_helper['config']]
+ self.resource_helper.create_test_servers(test_servers)
+
+ # Create SUTs
+ [self.resource_helper.create_suts(x['suts']) for x in
+ self.vnfd_helper['config']]
+
+ # Fill in test session based on session profile and test case options
+ self._load_session_profile()
+
+ def run_traffic(self, traffic_profile):
+ self.resource_helper.abort_running_tests()
+ # Update DMF profile with related test case options
+ traffic_profile.update_dmf(self.scenario_helper.all_options)
+ # Create DMF in test user library
+ self.resource_helper.create_dmf(traffic_profile.dmf_config)
+ # Create/update test session in test user library
+ self.resource_helper.create_test_session(self.session_profile)
+ # Start test session
+ self.resource_helper.create_running_tests(self.session_profile['name'])
+
+ def collect_kpi(self):
+ return self.resource_helper.collect_kpi()
+
+ def wait_for_instantiate(self):
+ pass
+
+ @staticmethod
+ def _update_session_suts(suts, testcase):
+ """ Create SUT entry. Update related EPC block in session profile. """
+ for sut in suts:
+ # Update session profile EPC element with SUT info from pod file
+ tc_role = testcase['parameters'].get(sut['role'])
+ if tc_role:
+ _param = {}
+ if tc_role['class'] == 'Sut':
+ _param['name'] = sut['name']
+ elif tc_role['class'] == 'TestNode':
+ _param.update({x: sut[x] for x in {'ip', 'phy', 'nextHop'}
+ if x in sut and sut[x]})
+ testcase['parameters'][sut['role']].update(_param)
+ else:
+ LOG.info('Unexpected SUT role in pod file: "%s".', sut['role'])
+ return testcase
+
+ def _update_session_test_servers(self, test_server, _tsgroup_index):
+ """ Update tsId, reservations, pre-resolved ARP in session profile """
+ # Update test server name
+ test_groups = self.session_profile['tsGroups']
+ test_groups[_tsgroup_index]['tsId'] = test_server['name']
+
+ # Update preResolvedArpAddress
+ arp_key = 'preResolvedArpAddress'
+ _preresolved_arp = test_server.get(arp_key) # list of dicts
+ if _preresolved_arp:
+ test_groups[_tsgroup_index][arp_key] = _preresolved_arp
+
+ # Update reservations
+ if 'phySubnets' in test_server:
+ reservation = {'tsId': test_server['name'],
+ 'tsIndex': _tsgroup_index,
+ 'tsName': test_server['name'],
+ 'phySubnets': test_server['phySubnets']}
+ if 'reservations' in self.session_profile:
+ self.session_profile['reservations'].append(reservation)
+ else:
+ self.session_profile['reservePorts'] = 'true'
+ self.session_profile['reservations'] = [reservation]
+
+ def _update_session_library_name(self, test_session):
+ """Update DMF library name in session profile"""
+ for _ts_group in test_session['tsGroups']:
+ for _tc in _ts_group['testCases']:
+ try:
+ for _mainflow in _tc['parameters']['Dmf']['mainflows']:
+ _mainflow['library'] = \
+ self.vnfd_helper.mgmt_interface['user']
+ except KeyError:
+ pass
+
+ @staticmethod
+ def _update_session_tc_params(tc_options, testcase):
+ for _param_key in tc_options:
+ if _param_key == 'AssociatedPhys':
+ testcase[_param_key] = tc_options[_param_key]
+ continue
+ testcase['parameters'][_param_key] = tc_options[_param_key]
+ return testcase
+
+ def _load_session_profile(self):
+
+ with common_utils.open_relative_file(
+ self.scenario_helper.scenario_cfg['session_profile'],
+ self.scenario_helper.task_path) as stream:
+ self.session_profile = yaml_loader.yaml_load(stream)
+
+ # Raise exception if number of entries differs in following files,
+ _config_files = ['pod file', 'session_profile file', 'test_case file']
+ # Count testcases number in all tsGroups of session profile
+ session_tests_num = [xx for x in self.session_profile['tsGroups']
+ for xx in x['testCases']]
+ # Create a set containing number of list elements in each structure
+ _config_files_blocks_num = [
+ len(x) for x in
+ (self.vnfd_helper['config'], # test_servers and suts info
+ session_tests_num,
+ self.scenario_helper.all_options['test_cases'])] # test case file
+
+ if len(set(_config_files_blocks_num)) != 1:
+ raise RuntimeError('Unequal number of elements. {}'.format(
+ dict(six.moves.zip_longest(_config_files,
+ _config_files_blocks_num))))
+
+ ts_names = set()
+ _tsgroup_idx = -1
+ _testcase_idx = 0
+
+ # Iterate over data structures to overwrite session profile defaults
+ # _config: single list element holding test servers and SUTs info
+ # _tc_options: single test case parameters
+ for _config, tc_options in zip(
+ self.vnfd_helper['config'], # test servers and SUTS
+ self.scenario_helper.all_options['test_cases']): # testcase
+
+ _ts_config = _config['test_server']
+
+ # Calculate test group/test case indexes based on test server name
+ if _ts_config['name'] in ts_names:
+ _testcase_idx += 1
+ else:
+ _tsgroup_idx += 1
+ _testcase_idx = 0
+
+ _testcase = \
+ self.session_profile['tsGroups'][_tsgroup_idx]['testCases'][
+ _testcase_idx]
+
+ if _testcase['type'] != _ts_config['role']:
+ raise RuntimeError(
+ 'Test type mismatch in TC#{} of test server {}'.format(
+ _testcase_idx, _ts_config['name']))
+
+ # Fill session profile with test servers parameters
+ if _ts_config['name'] not in ts_names:
+ self._update_session_test_servers(_ts_config, _tsgroup_idx)
+ ts_names.add(_ts_config['name'])
+
+ # Fill session profile with suts parameters
+ self.session_profile['tsGroups'][_tsgroup_idx]['testCases'][
+ _testcase_idx].update(
+ self._update_session_suts(_config['suts'], _testcase))
+
+ # Update test case parameters
+ self.session_profile['tsGroups'][_tsgroup_idx]['testCases'][
+ _testcase_idx].update(
+ self._update_session_tc_params(tc_options, _testcase))
+
+ self._update_session_library_name(self.session_profile)
+
+
+class LandslideResourceHelper(sample_vnf.ClientResourceHelper):
+ """Landslide TG helper class"""
+
+ REST_STATUS_CODES = {'OK': 200, 'CREATED': 201, 'NO CHANGE': 409}
+ REST_API_CODES = {'NOT MODIFIED': 500810}
+
+ def __init__(self, setup_helper):
+ super(LandslideResourceHelper, self).__init__(setup_helper)
+ self._result = {}
+ self.vnfd_helper = setup_helper.vnfd_helper
+ self.scenario_helper = setup_helper.scenario_helper
+
+ # TAS Manager config initialization
+ self._url = None
+ self._user_id = None
+ self.session = None
+ self.license_data = {}
+
+ # TCL session initialization
+ self._tcl = LandslideTclClient(LsTclHandler(), self)
+
+ self.session = requests.Session()
+ self.running_tests_uri = 'runningTests'
+ self.test_session_uri = 'testSessions'
+ self.test_serv_uri = 'testServers'
+ self.suts_uri = 'suts'
+ self.users_uri = 'users'
+ self.user_lib_uri = None
+ self.run_id = None
+
+ def abort_running_tests(self, timeout=60, delay=5):
+ """ Abort running test sessions, if any """
+ _start_time = time.time()
+ while time.time() < _start_time + timeout:
+ run_tests_states = {x['id']: x['testStateOrStep']
+ for x in self.get_running_tests()}
+ if not set(run_tests_states.values()).difference(
+ {'COMPLETE', 'COMPLETE_ERROR'}):
+ break
+ else:
+ [self.stop_running_tests(running_test_id=_id, force=True)
+ for _id, _state in run_tests_states.items()
+ if 'COMPLETE' not in _state]
+ time.sleep(delay)
+ else:
+ raise RuntimeError(
+ 'Some test runs not stopped during {} seconds'.format(timeout))
+
+ def _build_url(self, resource, action=None):
+ """ Build URL string
+
+ :param resource: REST API resource name
+ :type resource: str
+ :param action: actions name and value
+ :type action: dict('name': <str>, 'value': <str>)
+ :returns str: REST API resource name with optional action info
+ """
+ # Action is optional and accepted only in presence of resource param
+ if action and not resource:
+ raise ValueError("Resource name not provided")
+ # Concatenate actions
+ _action = ''.join(['?{}={}'.format(k, v) for k, v in
+ action.items()]) if action else ''
+
+ return ''.join([self._url, resource, _action])
+
+ def get_response_params(self, method, resource, params=None):
+ """ Retrieve params from JSON response of specific resource URL
+
+ :param method: one of supported REST API methods
+ :type method: str
+ :param resource: URI, requested resource name
+ :type resource: str
+ :param params: attributes to be found in JSON response
+ :type params: list(str)
+ """
+ _res = []
+ params = params if params else []
+ response = self.exec_rest_request(method, resource)
+ # Get substring between last slash sign and question mark (if any)
+ url_last_part = resource.rsplit('/', 1)[-1].rsplit('?', 1)[0]
+ _response_json = response.json()
+ # Expect dict(), if URL last part and top dict key don't match
+ # Else, if they match, expect list()
+ k, v = list(_response_json.items())[0]
+ if k != url_last_part:
+ v = [v] # v: list(dict(str: str))
+ # Extract params, or whole list of dicts (without top level key)
+ for x in v:
+ _res.append({param: x[param] for param in params} if params else x)
+ return _res
+
+ def _create_user(self, auth, level=1):
+ """ Create new user
+
+ :param auth: data to create user account on REST server
+ :type auth: dict
+ :param level: Landslide user permissions level
+ :type level: int
+ :returns int: user id
+ """
+ # Set expiration date in two years since account creation date
+ _exp_date = time.strftime(
+ '{}/%m/%d %H:%M %Z'.format(time.gmtime().tm_year + 2))
+ _username = auth['user']
+ _fields = {"contactInformation": "", "expiresOn": _exp_date,
+ "fullName": "Test User",
+ "isActive": "true", "level": level,
+ "password": auth['password'],
+ "username": _username}
+ _response = self.exec_rest_request('post', self.users_uri,
+ json_data=_fields, raise_exc=False)
+ _resp_json = _response.json()
+ if _response.status_code == self.REST_STATUS_CODES['CREATED']:
+ # New user created
+ _id = _resp_json['id']
+ LOG.info("New user created: username='%s', id='%s'", _username,
+ _id)
+ elif _resp_json.get('apiCode') == self.REST_API_CODES['NOT MODIFIED']:
+ # User already exists
+ LOG.info("Account '%s' already exists.", _username)
+ # Get user id
+ _id = self._modify_user(_username, {"isActive": "true"})['id']
+ else:
+ raise exceptions.RestApiError(
+ 'Error during new user "{}" creation'.format(_username))
+ return _id
+
+ def _modify_user(self, username, fields):
+ """ Modify information about existing user
+
+ :param username: user name of account to be modified
+ :type username: str
+ :param fields: data to modify user account on REST server
+ :type fields: dict
+ :returns dict: user info
+ """
+ _response = self.exec_rest_request('post', self.users_uri,
+ action={'username': username},
+ json_data=fields, raise_exc=False)
+ if _response.status_code == self.REST_STATUS_CODES['OK']:
+ _response = _response.json()
+ else:
+ raise exceptions.RestApiError(
+ 'Error during user "{}" data update: {}'.format(
+ username,
+ _response.status_code))
+ LOG.info("User account '%s' modified: '%s'", username, _response)
+ return _response
+
+ def _delete_user(self, username):
+ """ Delete user account
+
+ :param username: username field
+ :type username: str
+ :returns bool: True if succeeded
+ """
+ self.exec_rest_request('delete', self.users_uri,
+ action={'username': username})
+
+ def _get_users(self, username=None):
+ """ Get user records from REST server
+
+ :param username: username field
+ :type username: None|str
+ :returns list(dict): empty list, or user record, or list of all users
+ """
+ _response = self.get_response_params('get', self.users_uri)
+ _res = [u for u in _response if
+ u['username'] == username] if username else _response
+ return _res
+
+ def exec_rest_request(self, method, resource, action=None, json_data=None,
+ logs=True, raise_exc=True):
+ """ Execute REST API request, return response object
+
+ :param method: one of supported requests ('post', 'get', 'delete')
+ :type method: str
+ :param resource: URL of resource
+ :type resource: str
+ :param action: data used to provide URI located after question mark
+ :type action: dict
+ :param json_data: mandatory only for 'post' method
+ :type json_data: dict
+ :param logs: debug logs display flag
+ :type raise_exc: bool
+ :param raise_exc: if True, raise exception on REST API call error
+ :returns requests.Response(): REST API call response object
+ """
+ json_data = json_data if json_data else {}
+ action = action if action else {}
+ _method = method.upper()
+ method = method.lower()
+ if method not in ('post', 'get', 'delete'):
+ raise ValueError("Method '{}' not supported".format(_method))
+
+ if method == 'post' and not action:
+ if not (json_data and isinstance(json_data, collections.Mapping)):
+ raise ValueError(
+ 'JSON data missing in {} request'.format(_method))
+
+ r = getattr(self.session, method)(self._build_url(resource, action),
+ json=json_data)
+ if raise_exc and not r.ok:
+ msg = 'Failed to "{}" resource "{}". Reason: "{}"'.format(
+ method, self._build_url(resource, action), r.reason)
+ raise exceptions.RestApiError(msg)
+
+ if logs:
+ LOG.debug("RC: %s | Request: %s | URL: %s", r.status_code, method,
+ r.request.url)
+ LOG.debug("Response: %s", r.json())
+ return r
+
+ def connect(self):
+ """Connect to RESTful server using test user account"""
+ tas_info = self.vnfd_helper['mgmt-interface']
+ # Supported REST Server ports: HTTP - 8080, HTTPS - 8181
+ _port = '8080' if tas_info['proto'] == 'http' else '8181'
+ tas_info.update({'port': _port})
+ self._url = '{proto}://{ip}:{port}/api/'.format(**tas_info)
+ self.session.headers.update({'Accept': 'application/json',
+ 'Content-type': 'application/json'})
+ # Login with super user to create test user
+ self.session.auth = (
+ tas_info['super-user'], tas_info['super-user-password'])
+ LOG.info("Connect using superuser: server='%s'", self._url)
+ auth = {x: tas_info[x] for x in ('user', 'password')}
+ self._user_id = self._create_user(auth)
+ # Login with test user
+ self.session.auth = auth['user'], auth['password']
+ # Test user validity
+ self.exec_rest_request('get', '')
+
+ self.user_lib_uri = 'libraries/{{}}/{}'.format(self.test_session_uri)
+ LOG.info("Login with test user: server='%s'", self._url)
+ # Read existing license
+ self.license_data['lic_id'] = tas_info['license']
+
+ # Tcl client init
+ self._tcl.connect(tas_info['ip'], *self.session.auth)
+
+ return self.session
+
+ def disconnect(self):
+ self.session = None
+ self._tcl.disconnect()
+
+ def terminate(self):
+ self._terminated.value = 1
+
+ def create_dmf(self, dmf):
+ if isinstance(dmf, dict):
+ dmf = [dmf]
+ for _dmf in dmf:
+ # Update DMF library name in traffic profile
+ _dmf['dmf'].update(
+ {'library': self.vnfd_helper.mgmt_interface['user']})
+ # Create DMF on Landslide server
+ self._tcl.create_dmf(_dmf)
+
+ def delete_dmf(self, dmf):
+ if isinstance(dmf, list):
+ for _dmf in dmf:
+ self._tcl.delete_dmf(_dmf)
+ else:
+ self._tcl.delete_dmf(dmf)
+
+ def create_suts(self, suts):
+ # Keep only supported keys in suts object
+ for _sut in suts:
+ sut_entry = {k: v for k, v in _sut.items()
+ if k not in {'phy', 'nextHop', 'role'}}
+ _response = self.exec_rest_request(
+ 'post', self.suts_uri, json_data=sut_entry,
+ logs=False, raise_exc=False)
+ if _response.status_code != self.REST_STATUS_CODES['CREATED']:
+ LOG.info(_response.reason) # Failed to create
+ _name = sut_entry.pop('name')
+ # Modify existing SUT
+ self.configure_sut(sut_name=_name, json_data=sut_entry)
+ else:
+ LOG.info("SUT created: %s", sut_entry)
+
+ def get_suts(self, suts_id=None):
+ if suts_id:
+ _suts = self.exec_rest_request(
+ 'get', '{}/{}'.format(self.suts_uri, suts_id)).json()
+ else:
+ _suts = self.get_response_params('get', self.suts_uri)
+
+ return _suts
+
+ def configure_sut(self, sut_name, json_data):
+ """ Modify information of specific SUTs
+
+ :param sut_name: name of existing SUT
+ :type sut_name: str
+ :param json_data: SUT settings
+ :type json_data: dict()
+ """
+ LOG.info("Modifying SUT information...")
+ _response = self.exec_rest_request('post',
+ self.suts_uri,
+ action={'name': sut_name},
+ json_data=json_data,
+ raise_exc=False)
+ if _response.status_code not in {self.REST_STATUS_CODES[x] for x in
+ {'OK', 'NO CHANGE'}}:
+ raise exceptions.RestApiError(_response.reason)
+
+ LOG.info("Modified SUT: %s", sut_name)
+
+ def delete_suts(self, suts_ids=None):
+ if not suts_ids:
+ _curr_suts = self.get_response_params('get', self.suts_uri)
+ suts_ids = [x['id'] for x in _curr_suts]
+ LOG.info("Deleting SUTs with following IDs: %s", suts_ids)
+ for _id in suts_ids:
+ self.exec_rest_request('delete',
+ '{}/{}'.format(self.suts_uri, _id))
+ LOG.info("\tDone for SUT id: %s", _id)
+
+ def _check_test_servers_state(self, test_servers_ids=None, delay=10,
+ timeout=300):
+ LOG.info("Waiting for related test servers state change to READY...")
+ # Wait on state change
+ _start_time = time.time()
+ while time.time() - _start_time < timeout:
+ ts_ids_not_ready = {x['id'] for x in
+ self.get_test_servers(test_servers_ids)
+ if x['state'] != 'READY'}
+ if ts_ids_not_ready == set():
+ break
+ time.sleep(delay)
+ else:
+ raise RuntimeError(
+ 'Test servers not in READY state after {} seconds.'.format(
+ timeout))
+
+ def create_test_servers(self, test_servers):
+ """ Create test servers
+
+ :param test_servers: input data for test servers creation
+ mandatory fields: managementIp
+ optional fields: name
+ :type test_servers: list(dict)
+ """
+ _ts_ids = []
+ for _ts in test_servers:
+ _msg = 'Created test server "%(name)s"'
+ _ts_ids.append(self._tcl.create_test_server(_ts))
+ if _ts.get('thread_model'):
+ _msg += ' in mode: "%(thread_model)s"'
+ LOG.info(_msg, _ts)
+
+ self._check_test_servers_state(_ts_ids)
+
+ def get_test_servers(self, test_server_ids=None):
+ if not test_server_ids: # Get all test servers info
+ _test_servers = self.exec_rest_request(
+ 'get', self.test_serv_uri).json()[self.test_serv_uri]
+ LOG.info("Current test servers configuration: %s", _test_servers)
+ return _test_servers
+
+ _test_servers = []
+ for _id in test_server_ids:
+ _test_servers.append(self.exec_rest_request(
+ 'get', '{}/{}'.format(self.test_serv_uri, _id)).json())
+ LOG.info("Current test servers configuration: %s", _test_servers)
+ return _test_servers
+
+ def configure_test_servers(self, action, json_data=None,
+ test_server_ids=None):
+ if not test_server_ids:
+ test_server_ids = [x['id'] for x in self.get_test_servers()]
+ elif isinstance(test_server_ids, int):
+ test_server_ids = [test_server_ids]
+ for _id in test_server_ids:
+ self.exec_rest_request('post',
+ '{}/{}'.format(self.test_serv_uri, _id),
+ action=action, json_data=json_data)
+ LOG.info("Test server (id: %s) configuration done: %s", _id,
+ action)
+ return test_server_ids
+
+ def delete_test_servers(self, test_servers_ids=None):
+ # Delete test servers
+ for _ts in self.get_test_servers(test_servers_ids):
+ self.exec_rest_request('delete', '{}/{}'.format(self.test_serv_uri,
+ _ts['id']))
+ LOG.info("Deleted test server: %s", _ts['name'])
+
+ def create_test_session(self, test_session):
+ # Use tcl client to create session
+ test_session['library'] = self._user_id
+
+ # If no traffic duration set in test case, use predefined default value
+ # in session profile
+ test_session['duration'] = self.scenario_helper.all_options.get(
+ 'traffic_duration',
+ test_session['duration'])
+
+ LOG.debug("Creating session='%s'", test_session['name'])
+ self._tcl.create_test_session(test_session)
+
+ def get_test_session(self, test_session_name=None):
+ if test_session_name:
+ uri = 'libraries/{}/{}/{}'.format(self._user_id,
+ self.test_session_uri,
+ test_session_name)
+ else:
+ uri = self.user_lib_uri.format(self._user_id)
+ _test_sessions = self.exec_rest_request('get', uri).json()
+ return _test_sessions
+
+ def configure_test_session(self, template_name, test_session):
+ # Override specified test session parameters
+ LOG.info('Update test session parameters: %s', test_session['name'])
+ test_session.update({'library': self._user_id})
+ return self.exec_rest_request(
+ method='post',
+ action={'action': 'overrideAndSaveAs'},
+ json_data=test_session,
+ resource='{}/{}'.format(self.user_lib_uri.format(self._user_id),
+ template_name))
+
+ def delete_test_session(self, test_session):
+ return self.exec_rest_request('delete', '{}/{}'.format(
+ self.user_lib_uri.format(self._user_id), test_session))
+
+ def create_running_tests(self, test_session_name):
+ r = self.exec_rest_request('post',
+ self.running_tests_uri,
+ json_data={'library': self._user_id,
+ 'name': test_session_name})
+ if r.status_code != self.REST_STATUS_CODES['CREATED']:
+ raise exceptions.RestApiError('Failed to start test session.')
+ self.run_id = r.json()['id']
+
+ def get_running_tests(self, running_test_id=None):
+ """Get JSON structure of specified running test entity
+
+ :param running_test_id: ID of created running test entity
+ :type running_test_id: int
+ :returns list: running tests entity
+ """
+ if not running_test_id:
+ running_test_id = ''
+ _res_name = '{}/{}'.format(self.running_tests_uri, running_test_id)
+ _res = self.exec_rest_request('get', _res_name, logs=False).json()
+ # If no run_id specified, skip top level key in response dict.
+ # Else return JSON as list
+ return _res.get('runningTests', [_res])
+
+ def delete_running_tests(self, running_test_id=None):
+ if not running_test_id:
+ running_test_id = ''
+ _res_name = '{}/{}'.format(self.running_tests_uri, running_test_id)
+ self.get_response_params('delete', _res_name)
+ LOG.info("Deleted running test with id: %s", running_test_id)
+
+ def _running_tests_action(self, running_test_id, action, json_data=None):
+ if not json_data:
+ json_data = {}
+ # Supported actions:
+ # 'stop', 'abort', 'continue', 'update', 'sendTcCommand', 'sendOdc'
+ _res_name = '{}/{}'.format(self.running_tests_uri, running_test_id)
+ self.exec_rest_request('post', _res_name, {'action': action},
+ json_data)
+ LOG.debug("Executed action: '%s' on running test id: %s", action,
+ running_test_id)
+
+ def stop_running_tests(self, running_test_id, json_data=None, force=False):
+ _action = 'abort' if force else 'stop'
+ self._running_tests_action(running_test_id, _action,
+ json_data=json_data)
+ LOG.info('Performed action: "%s" to test run with id: %s', _action,
+ running_test_id)
+
+ def check_running_test_state(self, run_id):
+ r = self.exec_rest_request('get',
+ '{}/{}'.format(self.running_tests_uri,
+ run_id))
+ return r.json().get("testStateOrStep")
+
+ def get_running_tests_results(self, run_id):
+ _res = self.exec_rest_request(
+ 'get',
+ '{}/{}/{}'.format(self.running_tests_uri,
+ run_id,
+ 'measurements')).json()
+ return _res
+
+ def _write_results(self, results):
+ # Avoid None value at test session start
+ _elapsed_time = results['elapsedTime'] if results['elapsedTime'] else 0
+
+ _res_tabs = results.get('tabs')
+ # Avoid parsing 'tab' dict key initially (missing or empty)
+ if not _res_tabs:
+ return
+
+ # Flatten nested dict holding Landslide KPIs of current test run
+ flat_kpis_dict = {}
+ for _tab, _kpis in six.iteritems(_res_tabs):
+ for _kpi, _value in six.iteritems(_kpis):
+ # Combine table name and KPI name using delimiter "::"
+ _key = '::'.join([_tab, _kpi])
+ try:
+ # Cast value from str to float
+ # Remove comma and/or measure units, e.g. "us"
+ flat_kpis_dict[_key] = float(
+ _value.split(' ')[0].replace(',', ''))
+ except ValueError: # E.g. if KPI represents datetime
+ pass
+ LOG.info("Polling test results of test run id: %s. Elapsed time: %s "
+ "seconds", self.run_id, _elapsed_time)
+ return flat_kpis_dict
+
+ def collect_kpi(self):
+ if 'COMPLETE' in self.check_running_test_state(self.run_id):
+ self._result.update({'done': True})
+ return self._result
+ _res = self.get_running_tests_results(self.run_id)
+ _kpis = self._write_results(_res)
+ if _kpis:
+ _kpis.update({'run_id': int(self.run_id)})
+ _kpis.update({'iteration': _res['iteration']})
+ self._result.update(_kpis)
+ return self._result
+
+
+class LandslideTclClient(object):
+ """Landslide TG TCL client class"""
+
+ DEFAULT_TEST_NODE = {
+ 'ethStatsEnabled': True,
+ 'forcedEthInterface': '',
+ 'innerVlanId': 0,
+ 'ip': '',
+ 'mac': '',
+ 'mtu': 1500,
+ 'nextHop': '',
+ 'numLinksOrNodes': 1,
+ 'numVlan': 1,
+ 'phy': '',
+ 'uniqueVlanAddr': False,
+ 'vlanDynamic': 0,
+ 'vlanId': 0,
+ 'vlanUserPriority': 0,
+ 'vlanTagType': 0
+ }
+
+ TEST_NODE_CMD = \
+ 'ls::create -TestNode-{} -under $p_ -Type "eth"' \
+ ' -Phy "{phy}" -Ip "{ip}" -NumLinksOrNodes {numLinksOrNodes}' \
+ ' -NextHop "{nextHop}" -Mac "{mac}" -MTU {mtu}' \
+ ' -ForcedEthInterface "{forcedEthInterface}"' \
+ ' -EthStatsEnabled {ethStatsEnabled}' \
+ ' -VlanId {vlanId} -VlanUserPriority {vlanUserPriority}' \
+ ' -NumVlan {numVlan} -UniqueVlanAddr {uniqueVlanAddr}' \
+ ';'
+
+ def __init__(self, tcl_handler, ts_context):
+ self.tcl_server_ip = None
+ self._user = None
+ self._library_id = None
+ self._basic_library_id = None
+ self._tcl = tcl_handler
+ self._ts_context = ts_context
+ self.ts_ids = set()
+
+ # Test types names expected in session profile, test case and pod files
+ self._tc_types = {"SGW_Nodal", "SGW_Node", "MME_Nodal", "PGW_Node",
+ "PCRF_Node"}
+
+ self._class_param_config_handler = {
+ "Array": self._configure_array_param,
+ "TestNode": self._configure_test_node_param,
+ "Sut": self._configure_sut_param,
+ "Dmf": self._configure_dmf_param
+ }
+
+ def connect(self, tcl_server_ip, username, password):
+ """ Connect to TCL server with username and password
+
+ :param tcl_server_ip: TCL server IP address
+ :type tcl_server_ip: str
+ :param username: existing username on TCL server
+ :type username: str
+ :param password: password related to username on TCL server
+ :type password: str
+ """
+ LOG.info("connect: server='%s' user='%s'", tcl_server_ip, username)
+ res = self._tcl.execute(
+ "ls::login {} {} {}".format(tcl_server_ip, username, password))
+ if 'java0x' not in res: # handle assignment reflects login success
+ raise exceptions.LandslideTclException(
+ "connect: login failed ='{}'.".format(res))
+ self._library_id = self._tcl.execute(
+ "ls::get [ls::query LibraryInfo -userLibraryName {}] -Id".format(
+ username))
+ self._basic_library_id = self._get_library_id('Basic')
+ self.tcl_server_ip = tcl_server_ip
+ self._user = username
+ LOG.debug("connect: user='%s' me='%s' basic='%s'", self._user,
+ self._library_id,
+ self._basic_library_id)
+
+ def disconnect(self):
+ """ Disconnect from TCL server. Drop TCL connection configuration """
+ LOG.info("disconnect: server='%s' user='%s'",
+ self.tcl_server_ip, self._user)
+ self._tcl.execute("ls::logout")
+ self.tcl_server_ip = None
+ self._user = None
+ self._library_id = None
+ self._basic_library_id = None
+
+ def _add_test_server(self, name, ip):
+ try:
+ # Check if test server exists with name equal to _ts_name
+ ts_id = int(self.resolve_test_server_name(name))
+ except ValueError:
+ # Such test server does not exist. Attempt to create it
+ ts_id = self._tcl.execute(
+ 'ls::perform AddTs -Name "{}" -Ip "{}"'.format(name, ip))
+ try:
+ int(ts_id)
+ except ValueError:
+ # Failed to create test server, e.g. limit reached
+ raise RuntimeError(
+ 'Failed to create test server: "{}". {}'.format(name,
+ ts_id))
+ return ts_id
+
+ def _update_license(self, name):
+ """ Setup/update test server license
+
+ :param name: test server name
+ :type name: str
+ """
+ # Retrieve current TsInfo configuration, result stored in handle "ts"
+ self._tcl.execute(
+ 'set ts [ls::retrieve TsInfo -Name "{}"]'.format(name))
+
+ # Set license ID, if it differs from current one, update test server
+ _curr_lic_id = self._tcl.execute('ls::get $ts -RequestedLicense')
+ if _curr_lic_id != self._ts_context.license_data['lic_id']:
+ self._tcl.execute('ls::config $ts -RequestedLicense {}'.format(
+ self._ts_context.license_data['lic_id']))
+ self._tcl.execute('ls::perform ModifyTs $ts')
+
+ def _set_thread_model(self, name, thread_model):
+ # Retrieve test server configuration, store it in handle "tsc"
+ _cfguser_password = self._ts_context.vnfd_helper['mgmt-interface'][
+ 'cfguser_password']
+ self._tcl.execute(
+ 'set tsc [ls::perform RetrieveTsConfiguration '
+ '-name "{}" {}]'.format(name, _cfguser_password))
+ # Configure ThreadModel, if it differs from current one
+ thread_model_map = {'Legacy': 'V0',
+ 'Max': 'V1',
+ 'Fireball': 'V1_FB3'}
+ _model = thread_model_map[thread_model]
+ _curr_model = self._tcl.execute('ls::get $tsc -ThreadModel')
+ if _curr_model != _model:
+ self._tcl.execute(
+ 'ls::config $tsc -ThreadModel "{}"'.format(_model))
+ self._tcl.execute(
+ 'ls::perform ApplyTsConfiguration $tsc {}'.format(
+ _cfguser_password))
+
+ def create_test_server(self, test_server):
+ _ts_thread_model = test_server.get('thread_model')
+ _ts_name = test_server['name']
+
+ ts_id = self._add_test_server(_ts_name, test_server['ip'])
+
+ self._update_license(_ts_name)
+
+ # Skip below code modifying thread_model if it is not defined
+ if _ts_thread_model:
+ self._set_thread_model(_ts_name, _ts_thread_model)
+
+ return ts_id
+
+ def create_test_session(self, test_session):
+ """ Create, configure and save Landslide test session object.
+
+ :param test_session: Landslide TestSession object
+ :type test_session: dict
+ """
+ LOG.info("create_test_session: name='%s'", test_session['name'])
+ self._tcl.execute('set test_ [ls::create TestSession]')
+ self._tcl.execute('ls::config $test_ -Library {} -Name "{}"'.format(
+ self._library_id, test_session['name']))
+ self._tcl.execute('ls::config $test_ -Description "{}"'.format(
+ test_session['description']))
+ if 'keywords' in test_session:
+ self._tcl.execute('ls::config $test_ -Keywords "{}"'.format(
+ test_session['keywords']))
+ if 'duration' in test_session:
+ self._tcl.execute('ls::config $test_ -Duration "{}"'.format(
+ test_session['duration']))
+ if 'iterations' in test_session:
+ self._tcl.execute('ls::config $test_ -Iterations "{}"'.format(
+ test_session['iterations']))
+ if 'reservePorts' in test_session:
+ if test_session['reservePorts'] == 'true':
+ self._tcl.execute('ls::config $test_ -Reserve Ports')
+
+ if 'reservations' in test_session:
+ for _reservation in test_session['reservations']:
+ self._configure_reservation(_reservation)
+
+ if 'reportOptions' in test_session:
+ self._configure_report_options(test_session['reportOptions'])
+
+ for _index, _group in enumerate(test_session['tsGroups']):
+ self._configure_ts_group(_group, _index)
+
+ self._save_test_session()
+
+ def create_dmf(self, dmf):
+ """ Create, configure and save Landslide Data Message Flow object.
+
+ :param dmf: Landslide Data Message Flow object
+ :type: dmf: dict
+ """
+ self._tcl.execute('set dmf_ [ls::create Dmf]')
+ _lib_id = self._get_library_id(dmf['dmf']['library'])
+ self._tcl.execute('ls::config $dmf_ -Library {} -Name "{}"'.format(
+ _lib_id,
+ dmf['dmf']['name']))
+ for _param_key in dmf:
+ if _param_key == 'dmf':
+ continue
+ _param_value = dmf[_param_key]
+ if isinstance(_param_value, dict):
+ # Configure complex parameter
+ _tcl_cmd = 'ls::config $dmf_'
+ for _sub_param_key in _param_value:
+ _sub_param_value = _param_value[_sub_param_key]
+ if isinstance(_sub_param_value, str):
+ _tcl_cmd += ' -{} "{}"'.format(_sub_param_key,
+ _sub_param_value)
+ else:
+ _tcl_cmd += ' -{} {}'.format(_sub_param_key,
+ _sub_param_value)
+
+ self._tcl.execute(_tcl_cmd)
+ else:
+ # Configure simple parameter
+ if isinstance(_param_value, str):
+ self._tcl.execute(
+ 'ls::config $dmf_ -{} "{}"'.format(_param_key,
+ _param_value))
+ else:
+ self._tcl.execute(
+ 'ls::config $dmf_ -{} {}'.format(_param_key,
+ _param_value))
+ self._save_dmf()
+
+ def configure_dmf(self, dmf):
+ # Use create to reconfigure and overwrite existing dmf
+ self.create_dmf(dmf)
+
+ def delete_dmf(self, dmf):
+ raise NotImplementedError
+
+ def _save_dmf(self):
+ # Call 'Validate' to set default values for missing parameters
+ res = self._tcl.execute('ls::perform Validate -Dmf $dmf_')
+ if res == 'Invalid':
+ res = self._tcl.execute('ls::get $dmf_ -ErrorsAndWarnings')
+ LOG.error("_save_dmf: %s", res)
+ raise exceptions.LandslideTclException("_save_dmf: {}".format(res))
+ else:
+ res = self._tcl.execute('ls::save $dmf_ -overwrite')
+ LOG.debug("_save_dmf: result (%s)", res)
+
+ def _configure_report_options(self, options):
+ for _option_key in options:
+ _option_value = options[_option_key]
+ if _option_key == 'format':
+ _format = 0
+ if _option_value == 'CSV':
+ _format = 1
+ self._tcl.execute(
+ 'ls::config $test_.ReportOptions -Format {} '
+ '-Ts -3 -Tc -3'.format(_format))
+ else:
+ self._tcl.execute(
+ 'ls::config $test_.ReportOptions -{} {}'.format(
+ _option_key,
+ _option_value))
+
+ def _configure_ts_group(self, ts_group, ts_group_index):
+ try:
+ _ts_id = int(self.resolve_test_server_name(ts_group['tsId']))
+ except ValueError:
+ raise RuntimeError('Test server name "{}" does not exist.'.format(
+ ts_group['tsId']))
+ if _ts_id not in self.ts_ids:
+ self._tcl.execute(
+ 'set tss_ [ls::create TsGroup -under $test_ -tsId {} ]'.format(
+ _ts_id))
+ self.ts_ids.add(_ts_id)
+ for _case in ts_group.get('testCases', []):
+ self._configure_tc_type(_case, ts_group_index)
+
+ self._configure_preresolved_arp(ts_group.get('preResolvedArpAddress'))
+
+ def _configure_tc_type(self, tc, ts_group_index):
+ if tc['type'] not in self._tc_types:
+ raise RuntimeError('Test type {} not supported.'.format(
+ tc['type']))
+ tc['type'] = tc['type'].replace('_', ' ')
+ res = self._tcl.execute(
+ 'set tc_ [ls::retrieve testcase -libraryId {0} "{1}"]'.format(
+ self._basic_library_id, tc['type']))
+ if 'Invalid' in res:
+ raise RuntimeError('Test type {} not found in "Basic" '
+ 'library.'.format(tc['type']))
+ self._tcl.execute(
+ 'ls::config $test_.TsGroup({}) -children-Tc $tc_'.format(
+ ts_group_index))
+ self._tcl.execute('ls::config $tc_ -Library {0} -Name "{1}"'.format(
+ self._basic_library_id, tc['name']))
+ self._tcl.execute(
+ 'ls::config $tc_ -Description "{}"'.format(tc['type']))
+ self._tcl.execute(
+ 'ls::config $tc_ -Keywords "GTP LTE {}"'.format(tc['type']))
+ if 'linked' in tc:
+ self._tcl.execute(
+ 'ls::config $tc_ -Linked {}'.format(tc['linked']))
+ if 'AssociatedPhys' in tc:
+ self._tcl.execute('ls::config $tc_ -AssociatedPhys "{}"'.format(
+ tc['AssociatedPhys']))
+ if 'parameters' in tc:
+ self._configure_parameters(tc['parameters'])
+
+ def _configure_parameters(self, params):
+ self._tcl.execute('set p_ [ls::get $tc_ -children-Parameters(0)]')
+ for _param_key in sorted(params):
+ _param_value = params[_param_key]
+ if isinstance(_param_value, dict):
+ # Configure complex parameter
+ if _param_value['class'] in self._class_param_config_handler:
+ self._class_param_config_handler[_param_value['class']](
+ _param_key,
+ _param_value)
+ else:
+ # Configure simple parameter
+ self._tcl.execute(
+ 'ls::create {} -under $p_ -Value "{}"'.format(
+ _param_key,
+ _param_value))
+
+ def _configure_array_param(self, name, params):
+ self._tcl.execute('ls::create -Array-{} -under $p_ ;'.format(name))
+ for param in params['array']:
+ self._tcl.execute(
+ 'ls::create ArrayItem -under $p_.{} -Value "{}"'.format(name,
+ param))
+
+ def _configure_test_node_param(self, name, params):
+ _params = self.DEFAULT_TEST_NODE
+ _params.update(params)
+
+ # TCL command expects lower case 'true' or 'false'
+ _params['ethStatsEnabled'] = str(_params['ethStatsEnabled']).lower()
+ _params['uniqueVlanAddr'] = str(_params['uniqueVlanAddr']).lower()
+
+ cmd = self.TEST_NODE_CMD.format(name, **_params)
+ self._tcl.execute(cmd)
+
+ def _configure_sut_param(self, name, params):
+ self._tcl.execute(
+ 'ls::create -Sut-{} -under $p_ -Name "{}";'.format(name,
+ params['name']))
+
+ def _configure_dmf_param(self, name, params):
+ self._tcl.execute('ls::create -Dmf-{} -under $p_ ;'.format(name))
+
+ for _flow_index, _flow in enumerate(params['mainflows']):
+ _lib_id = self._get_library_id(_flow['library'])
+ self._tcl.execute(
+ 'ls::perform AddDmfMainflow $p_.Dmf {} "{}"'.format(
+ _lib_id,
+ _flow['name']))
+
+ if not params.get('instanceGroups'):
+ return
+
+ _instance_group = params['instanceGroups'][_flow_index]
+
+ # Traffic Mixer parameters handling
+ for _key in ['mixType', 'rate']:
+ if _key in _instance_group:
+ self._tcl.execute(
+ 'ls::config $p_.Dmf.InstanceGroup({}) -{} {}'.format(
+ _flow_index, _key, _instance_group[_key]))
+
+ # Assignments parameters handling
+ for _row_id, _row in enumerate(_instance_group.get('rows', [])):
+ self._tcl.execute(
+ 'ls::config $p_.Dmf.InstanceGroup({}).Row({}) -Node {} '
+ '-OverridePort {} -ClientPort {} -Context {} -Role {} '
+ '-PreferredTransport {} -RatingGroup {} '
+ '-ServiceID {}'.format(
+ _flow_index, _row_id, _row['node'],
+ _row['overridePort'], _row['clientPort'],
+ _row['context'], _row['role'], _row['transport'],
+ _row['ratingGroup'], _row['serviceId']))
+
+ def _configure_reservation(self, reservation):
+ _ts_id = self.resolve_test_server_name(reservation['tsId'])
+ self._tcl.execute(
+ 'set reservation_ [ls::create Reservation -under $test_]')
+ self._tcl.execute(
+ 'ls::config $reservation_ -TsIndex {} -TsId {} '
+ '-TsName "{}"'.format(reservation['tsIndex'],
+ _ts_id,
+ reservation['tsName']))
+ for _subnet in reservation['phySubnets']:
+ self._tcl.execute(
+ 'set physubnet_ [ls::create PhySubnet -under $reservation_]')
+ self._tcl.execute(
+ 'ls::config $physubnet_ -Name "{}" -Base "{}" -Mask "{}" '
+ '-NumIps {}'.format(_subnet['name'], _subnet['base'],
+ _subnet['mask'], _subnet['numIps']))
+
+ def _configure_preresolved_arp(self, pre_resolved_arp):
+ if not pre_resolved_arp: # Pre-resolved ARP configuration not found
+ return
+ for _entry in pre_resolved_arp:
+ # TsGroup handle name should correspond in _configure_ts_group()
+ self._tcl.execute(
+ 'ls::create PreResolvedArpAddress -under $tss_ '
+ '-StartingAddress "{StartingAddress}" '
+ '-NumNodes {NumNodes}'.format(**_entry))
+
+ def delete_test_session(self, test_session):
+ raise NotImplementedError
+
+ def _save_test_session(self):
+ # Call 'Validate' to set default values for missing parameters
+ res = self._tcl.execute('ls::perform Validate -TestSession $test_')
+ if res == 'Invalid':
+ res = self._tcl.execute('ls::get $test_ -ErrorsAndWarnings')
+ raise exceptions.LandslideTclException(
+ "Test session validation failed. Server response: {}".format(
+ res))
+ else:
+ self._tcl.execute('ls::save $test_ -overwrite')
+ LOG.debug("Test session saved successfully.")
+
+ def _get_library_id(self, library):
+ _library_id = self._tcl.execute(
+ "ls::get [ls::query LibraryInfo -systemLibraryName {}] -Id".format(
+ library))
+ try:
+ int(_library_id)
+ return _library_id
+ except ValueError:
+ pass
+
+ _library_id = self._tcl.execute(
+ "ls::get [ls::query LibraryInfo -userLibraryName {}] -Id".format(
+ library))
+ try:
+ int(_library_id)
+ except ValueError:
+ LOG.error("_get_library_id: library='%s' not found.", library)
+ raise exceptions.LandslideTclException(
+ "_get_library_id: library='{}' not found.".format(
+ library))
+
+ return _library_id
+
+ def resolve_test_server_name(self, ts_name):
+ return self._tcl.execute("ls::query TsId {}".format(ts_name))
+
+
+class LsTclHandler(object):
+ """Landslide TCL Handler class"""
+
+ LS_OK = "ls_ok"
+ JRE_PATH = net_serv_utils.get_nsb_option('jre_path_i386')
+
+ def __init__(self):
+ self.tcl_cmds = {}
+ self._ls = LsApi(jre_path=self.JRE_PATH)
+ self._ls.tcl(
+ "ls::config ApiOptions -NoReturnSuccessResponseString '{}'".format(
+ self.LS_OK))
+
+ def execute(self, command):
+ res = self._ls.tcl(command)
+ self.tcl_cmds[command] = res
+ return res
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_ping.py b/yardstick/network_services/vnf_generic/vnf/tg_ping.py
index a989543f5..5c8819119 100644
--- a/yardstick/network_services/vnf_generic/vnf/tg_ping.py
+++ b/yardstick/network_services/vnf_generic/vnf/tg_ping.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2016-2017 Intel Corporation
+# Copyright (c) 2016-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_pktgen.py b/yardstick/network_services/vnf_generic/vnf/tg_pktgen.py
new file mode 100644
index 000000000..5da2178af
--- /dev/null
+++ b/yardstick/network_services/vnf_generic/vnf/tg_pktgen.py
@@ -0,0 +1,88 @@
+# Copyright (c) 2018-2019 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import time
+
+from yardstick.common import constants
+from yardstick.common import exceptions
+from yardstick.common import utils
+from yardstick.network_services.vnf_generic.vnf import base as vnf_base
+
+
+LOG = logging.getLogger(__name__)
+
+
+class PktgenTrafficGen(vnf_base.GenericTrafficGen):
+ """DPDK Pktgen traffic generator
+
+ Website: http://pktgen-dpdk.readthedocs.io/en/latest/index.html
+ """
+
+ TIMEOUT = 30
+
+ def __init__(self, name, vnfd):
+ vnf_base.GenericTrafficGen.__init__(self, name, vnfd)
+ self._traffic_profile = None
+ self._node_ip = vnfd['mgmt-interface'].get('ip')
+ self._lua_node_port = self._get_lua_node_port(
+ vnfd['mgmt-interface'].get('service_ports', []))
+ self._rate = 1
+
+ def instantiate(self, scenario_cfg, context_cfg): # pragma: no cover
+ pass
+
+ def run_traffic(self, traffic_profile):
+ self._traffic_profile = traffic_profile
+ self._traffic_profile.init(self._node_ip, self._lua_node_port)
+ utils.wait_until_true(self._is_running, timeout=self.TIMEOUT,
+ sleep=2)
+
+ def terminate(self): # pragma: no cover
+ pass
+
+ def collect_kpi(self): # pragma: no cover
+ pass
+
+ def scale(self, flavor=''): # pragma: no cover
+ pass
+
+ def wait_for_instantiate(self): # pragma: no cover
+ pass
+
+ def runner_method_start_iteration(self):
+ # pragma: no cover
+ LOG.debug('Start method')
+ # NOTE(ralonsoh): 'rate' should be modified between iterations. The
+ # current implementation is just for testing.
+ self._rate += 1
+ self._traffic_profile.start()
+ self._traffic_profile.rate(self._rate)
+ time.sleep(4)
+ self._traffic_profile.stop()
+
+ @staticmethod
+ def _get_lua_node_port(service_ports):
+ for port in (port for port in service_ports if
+ int(port['port']) == constants.LUA_PORT):
+ return int(port['node_port'])
+ # NOTE(ralonsoh): in case LUA port is not present, an exception should
+ # be raised.
+
+ def _is_running(self):
+ try:
+ self._traffic_profile.help()
+ return True
+ except exceptions.PktgenActionError:
+ return False
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_prox.py b/yardstick/network_services/vnf_generic/vnf/tg_prox.py
index 151252ce8..65b7bac10 100644
--- a/yardstick/network_services/vnf_generic/vnf/tg_prox.py
+++ b/yardstick/network_services/vnf_generic/vnf/tg_prox.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2017 Intel Corporation
+# Copyright (c) 2017-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,9 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import absolute_import
-
import logging
+import copy
from yardstick.network_services.utils import get_nsb_option
from yardstick.network_services.vnf_generic.vnf.prox_vnf import ProxApproxVnf
@@ -30,23 +29,13 @@ class ProxTrafficGen(SampleVNFTrafficGen):
LUA_PARAMETER_NAME = "gen"
WAIT_TIME = 1
- @staticmethod
- def _sort_vpci(vnfd):
- """
-
- :param vnfd: vnfd.yaml
- :return: trex_cfg.yaml file
- """
-
- def key_func(interface):
- return interface["virtual-interface"]["vpci"], interface["name"]
+ def __init__(self, name, vnfd, setup_env_helper_type=None,
+ resource_helper_type=None):
+ vnfd_cpy = copy.deepcopy(vnfd)
+ super(ProxTrafficGen, self).__init__(name, vnfd_cpy)
- ext_intf = vnfd["vdu"][0]["external-interface"]
- return sorted(ext_intf, key=key_func)
-
- def __init__(self, name, vnfd, setup_env_helper_type=None, resource_helper_type=None):
- # don't call superclass, use custom wrapper of ProxApproxVnf
- self._vnf_wrapper = ProxApproxVnf(name, vnfd, setup_env_helper_type, resource_helper_type)
+ self._vnf_wrapper = ProxApproxVnf(
+ name, vnfd, setup_env_helper_type, resource_helper_type)
self.bin_path = get_nsb_option('bin_path', '')
self.name = self._vnf_wrapper.name
self.ssh_helper = self._vnf_wrapper.ssh_helper
@@ -59,10 +48,6 @@ class ProxTrafficGen(SampleVNFTrafficGen):
self._tg_process = None
self._traffic_process = None
- # used for generating stats
- self.vpci_if_name_ascending = self._sort_vpci(vnfd)
- self.resource_helper.vpci_if_name_ascending = self._sort_vpci(vnfd)
-
def terminate(self):
self._vnf_wrapper.terminate()
super(ProxTrafficGen, self).terminate()
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py
index 630c8b9c0..80812876d 100644
--- a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py
+++ b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2016-2017 Intel Corporation
+# Copyright (c) 2016-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,31 +12,624 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import absolute_import
-
-import time
-import os
+import ipaddress
import logging
-import sys
+import six
+import collections
-from yardstick.common.utils import ErrorClass
+from six import moves
+from yardstick.common import utils
+from yardstick.common import exceptions
+from yardstick.network_services.libs.ixia_libs.ixnet import ixnet_api
from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNFTrafficGen
from yardstick.network_services.vnf_generic.vnf.sample_vnf import ClientResourceHelper
from yardstick.network_services.vnf_generic.vnf.sample_vnf import Rfc2544ResourceHelper
-from yardstick.benchmark.scenarios.networking.vnf_generic import find_relative_file
+
LOG = logging.getLogger(__name__)
WAIT_AFTER_CFG_LOAD = 10
WAIT_FOR_TRAFFIC = 30
-IXIA_LIB = os.path.dirname(os.path.realpath(__file__))
-IXNET_LIB = os.path.join(IXIA_LIB, "../../libs/ixia_libs/IxNet")
-sys.path.append(IXNET_LIB)
+WAIT_PROTOCOLS_STARTED = 420
+
+
+class IxiaBasicScenario(object):
+ """Ixia Basic scenario for flow from port to port"""
+
+ def __init__(self, client, context_cfg, ixia_cfg):
+
+ self.client = client
+ self.context_cfg = context_cfg
+ self.ixia_cfg = ixia_cfg
+
+ self._uplink_vports = None
+ self._downlink_vports = None
+
+ def apply_config(self):
+ pass
+
+ def run_protocols(self):
+ pass
+
+ def stop_protocols(self):
+ pass
+
+ def create_traffic_model(self, traffic_profile):
+ vports = self.client.get_vports()
+ self._uplink_vports = vports[::2]
+ self._downlink_vports = vports[1::2]
+ self.client.create_traffic_model(self._uplink_vports,
+ self._downlink_vports,
+ traffic_profile)
+
+ def _get_stats(self):
+ return self.client.get_statistics()
+
+ def generate_samples(self, resource_helper, ports, duration):
+ stats = self._get_stats()
+
+ samples = {}
+ # this is not DPDK port num, but this is whatever number we gave
+ # when we selected ports and programmed the profile
+ for port_num in ports:
+ try:
+ # reverse lookup port name from port_num so the stats dict is descriptive
+ intf = resource_helper.vnfd_helper.find_interface_by_port(port_num)
+ port_name = intf['name']
+ avg_latency = stats['Store-Forward_Avg_latency_ns'][port_num]
+ min_latency = stats['Store-Forward_Min_latency_ns'][port_num]
+ max_latency = stats['Store-Forward_Max_latency_ns'][port_num]
+ samples[port_name] = {
+ 'RxThroughputBps': float(stats['Bytes_Rx'][port_num]) / duration,
+ 'TxThroughputBps': float(stats['Bytes_Tx'][port_num]) / duration,
+ 'InPackets': int(stats['Valid_Frames_Rx'][port_num]),
+ 'OutPackets': int(stats['Frames_Tx'][port_num]),
+ 'InBytes': int(stats['Bytes_Rx'][port_num]),
+ 'OutBytes': int(stats['Bytes_Tx'][port_num]),
+ 'RxThroughput': float(stats['Valid_Frames_Rx'][port_num]) / duration,
+ 'TxThroughput': float(stats['Frames_Tx'][port_num]) / duration,
+ 'LatencyAvg': utils.safe_cast(avg_latency, int, 0),
+ 'LatencyMin': utils.safe_cast(min_latency, int, 0),
+ 'LatencyMax': utils.safe_cast(max_latency, int, 0)
+ }
+ except IndexError:
+ pass
+
+ return samples
+
+ def update_tracking_options(self):
+ pass
+
+ def get_tc_rfc2544_options(self):
+ pass
+
+
+class IxiaL3Scenario(IxiaBasicScenario):
+ """Ixia scenario for L3 flow between static ip's"""
+
+ def _add_static_ips(self):
+ vports = self.client.get_vports()
+ uplink_intf_vport = [(self.client.get_static_interface(vport), vport)
+ for vport in vports[::2]]
+ downlink_intf_vport = [(self.client.get_static_interface(vport), vport)
+ for vport in vports[1::2]]
+
+ for index in range(len(uplink_intf_vport)):
+ intf, vport = uplink_intf_vport[index]
+ try:
+ iprange = self.ixia_cfg['flow'].get('src_ip')[index]
+ start_ip = utils.get_ip_range_start(iprange)
+ count = utils.get_ip_range_count(iprange)
+ self.client.add_static_ipv4(intf, vport, start_ip, count, '32')
+ except IndexError:
+ raise exceptions.IncorrectFlowOption(
+ option="src_ip", link="uplink_{}".format(index))
+
+ intf, vport = downlink_intf_vport[index]
+ try:
+ iprange = self.ixia_cfg['flow'].get('dst_ip')[index]
+ start_ip = utils.get_ip_range_start(iprange)
+ count = utils.get_ip_range_count(iprange)
+ self.client.add_static_ipv4(intf, vport, start_ip, count, '32')
+ except IndexError:
+ raise exceptions.IncorrectFlowOption(
+ option="dst_ip", link="downlink_{}".format(index))
+
+ def _add_interfaces(self):
+ vports = self.client.get_vports()
+ uplink_vports = (vport for vport in vports[::2])
+ downlink_vports = (vport for vport in vports[1::2])
+
+ ix_node = next(node for _, node in self.context_cfg['nodes'].items()
+ if node['role'] == 'IxNet')
+
+ for intf in ix_node['interfaces'].values():
+ ip = intf.get('local_ip')
+ mac = intf.get('local_mac')
+ gateway = None
+ try:
+ gateway = next(route.get('gateway')
+ for route in ix_node.get('routing_table')
+ if route.get('if') == intf.get('ifname'))
+ except StopIteration:
+ LOG.debug("Gateway not provided")
+
+ if 'uplink' in intf.get('vld_id'):
+ self.client.add_interface(next(uplink_vports),
+ ip, mac, gateway)
+ else:
+ self.client.add_interface(next(downlink_vports),
+ ip, mac, gateway)
+
+ def apply_config(self):
+ self._add_interfaces()
+ self._add_static_ips()
+
+ def create_traffic_model(self, traffic_profile):
+ vports = self.client.get_vports()
+ self._uplink_vports = vports[::2]
+ self._downlink_vports = vports[1::2]
+
+ uplink_endpoints = [port + '/protocols/static'
+ for port in self._uplink_vports]
+ downlink_endpoints = [port + '/protocols/static'
+ for port in self._downlink_vports]
+
+ self.client.create_ipv4_traffic_model(uplink_endpoints,
+ downlink_endpoints,
+ traffic_profile)
+
+
+class IxiaPppoeClientScenario(object):
+ def __init__(self, client, context_cfg, ixia_cfg):
+
+ self.client = client
+
+ self._uplink_vports = None
+ self._downlink_vports = None
+
+ self._access_topologies = []
+ self._core_topologies = []
+
+ self._context_cfg = context_cfg
+ self._ixia_cfg = ixia_cfg
+ self.protocols = []
+ self.device_groups = []
+
+ def apply_config(self):
+ vports = self.client.get_vports()
+ self._uplink_vports = vports[::2]
+ self._downlink_vports = vports[1::2]
+ self._fill_ixia_config()
+ self._apply_access_network_config()
+ self._apply_core_network_config()
+
+ def create_traffic_model(self, traffic_profile):
+ endpoints_id_pairs = self._get_endpoints_src_dst_id_pairs(
+ traffic_profile.full_profile)
+ endpoints_obj_pairs = \
+ self._get_endpoints_src_dst_obj_pairs(endpoints_id_pairs)
+ if endpoints_obj_pairs:
+ uplink_endpoints = endpoints_obj_pairs[::2]
+ downlink_endpoints = endpoints_obj_pairs[1::2]
+ else:
+ uplink_endpoints = self._access_topologies
+ downlink_endpoints = self._core_topologies
+ self.client.create_ipv4_traffic_model(uplink_endpoints,
+ downlink_endpoints,
+ traffic_profile)
+
+ def run_protocols(self):
+ LOG.info('PPPoE Scenario - Start Protocols')
+ self.client.start_protocols()
+ utils.wait_until_true(
+ lambda: self.client.is_protocols_running(self.protocols),
+ timeout=WAIT_PROTOCOLS_STARTED, sleep=2)
+
+ def stop_protocols(self):
+ LOG.info('PPPoE Scenario - Stop Protocols')
+ self.client.stop_protocols()
+
+ def _get_intf_addr(self, intf):
+ """Retrieve interface IP address and mask
+
+ :param intf: could be the string which represents IP address
+ with mask (e.g 192.168.10.2/24) or a dictionary with the host
+ name and the port (e.g. {'tg__0': 'xe1'})
+ :return: (tuple) pair of ip address and mask
+ """
+ if isinstance(intf, six.string_types):
+ ip, mask = tuple(intf.split('/'))
+ return ip, int(mask)
+
+ node_name, intf_name = next(iter(intf.items()))
+ node = self._context_cfg["nodes"].get(node_name, {})
+ interface = node.get("interfaces", {})[intf_name]
+ ip = interface["local_ip"]
+ mask = interface["netmask"]
+ ipaddr = ipaddress.ip_network(six.text_type('{}/{}'.format(ip, mask)),
+ strict=False)
+ return ip, ipaddr.prefixlen
+
+ @staticmethod
+ def _get_endpoints_src_dst_id_pairs(flows_params):
+ """Get list of flows src/dst port pairs
+
+ Create list of flows src/dst port pairs based on traffic profile
+ flows data. Each uplink/downlink pair in traffic profile represents
+ specific flows between the pair of ports.
+
+ Example ('port' key represents port on which flow will be created):
+
+ Input flows data:
+ uplink_0:
+ ipv4:
+ id: 1
+ port: xe0
+ downlink_0:
+ ipv4:
+ id: 2
+ port: xe1
+ uplink_1:
+ ipv4:
+ id: 3
+ port: xe2
+ downlink_1:
+ ipv4:
+ id: 4
+ port: xe3
+
+ Result list: ['xe0', 'xe1', 'xe2', 'xe3']
+
+ Result list means that the following flows pairs will be created:
+ - uplink 0: port xe0 <-> port xe1
+ - downlink 0: port xe1 <-> port xe0
+ - uplink 1: port xe2 <-> port xe3
+ - downlink 1: port xe3 <-> port xe2
+
+ :param flows_params: ordered dict of traffic profile flows params
+ :return: (list) list of flows src/dst ports
+ """
+ if len(flows_params) % 2:
+ raise RuntimeError('Number of uplink/downlink pairs'
+ ' in traffic profile is not equal')
+ endpoint_pairs = []
+ for flow in flows_params:
+ port = flows_params[flow]['ipv4'].get('port')
+ if port is None:
+ continue
+ endpoint_pairs.append(port)
+ return endpoint_pairs
+
+ def _get_endpoints_src_dst_obj_pairs(self, endpoints_id_pairs):
+ """Create list of uplink/downlink device groups pairs
+
+ Based on traffic profile options, create list of uplink/downlink
+ device groups pairs between which flow groups will be created:
+
+ 1. In case uplink/downlink flows in traffic profile doesn't have
+ specified 'port' key, flows will be created between topologies
+ on corresponding access and core port.
+ E.g.:
+ Access topology on xe0: topology1
+ Core topology on xe1: topology2
+ Flows will be created between:
+ topology1 -> topology2
+ topology2 -> topology1
+
+ 2. In case uplink/downlink flows in traffic profile have specified
+ 'port' key, flows will be created between device groups on this
+ port.
+ E.g., for the following traffic profile
+ uplink_0:
+ port: xe0
+ downlink_0:
+ port: xe1
+ uplink_1:
+ port: xe0
+ downlink_0:
+ port: xe3
+ Flows will be created between:
+ Port xe0 (dg1) -> Port xe1 (dg1)
+ Port xe1 (dg1) -> Port xe0 (dg1)
+ Port xe0 (dg2) -> Port xe3 (dg1)
+ Port xe3 (dg3) -> Port xe0 (dg1)
+
+ :param endpoints_id_pairs: (list) List of uplink/downlink flows ports
+ pairs
+ :return: (list) list of uplink/downlink device groups descriptors pairs
+ """
+ pppoe = self._ixia_cfg['pppoe_client']
+ sessions_per_port = pppoe['sessions_per_port']
+ sessions_per_svlan = pppoe['sessions_per_svlan']
+ svlan_count = int(sessions_per_port / sessions_per_svlan)
+
+ uplink_ports = [p['tg__0'] for p in self._ixia_cfg['flow']['src_ip']]
+ downlink_ports = [p['tg__0'] for p in self._ixia_cfg['flow']['dst_ip']]
+ uplink_port_topology_map = zip(uplink_ports, self._access_topologies)
+ downlink_port_topology_map = zip(downlink_ports, self._core_topologies)
+
+ port_to_dev_group_mapping = {}
+ for port, topology in uplink_port_topology_map:
+ topology_dgs = self.client.get_topology_device_groups(topology)
+ port_to_dev_group_mapping[port] = topology_dgs
+ for port, topology in downlink_port_topology_map:
+ topology_dgs = self.client.get_topology_device_groups(topology)
+ port_to_dev_group_mapping[port] = topology_dgs
+
+ uplink_endpoints = endpoints_id_pairs[::2]
+ downlink_endpoints = endpoints_id_pairs[1::2]
+
+ uplink_dev_groups = []
+ group_up = [uplink_endpoints[i:i + svlan_count]
+ for i in range(0, len(uplink_endpoints), svlan_count)]
+
+ for group in group_up:
+ for i, port in enumerate(group):
+ uplink_dev_groups.append(port_to_dev_group_mapping[port][i])
+
+ downlink_dev_groups = []
+ for port in downlink_endpoints:
+ downlink_dev_groups.append(port_to_dev_group_mapping[port][0])
+
+ endpoint_obj_pairs = []
+ [endpoint_obj_pairs.extend([up, down])
+ for up, down in zip(uplink_dev_groups, downlink_dev_groups)]
+
+ return endpoint_obj_pairs
+
+ def _fill_ixia_config(self):
+ pppoe = self._ixia_cfg["pppoe_client"]
+ ipv4 = self._ixia_cfg["ipv4_client"]
+
+ _ip = [self._get_intf_addr(intf)[0] for intf in pppoe["ip"]]
+ self._ixia_cfg["pppoe_client"]["ip"] = _ip
+
+ _ip = [self._get_intf_addr(intf)[0] for intf in ipv4["gateway_ip"]]
+ self._ixia_cfg["ipv4_client"]["gateway_ip"] = _ip
+
+ addrs = [self._get_intf_addr(intf) for intf in ipv4["ip"]]
+ _ip = [addr[0] for addr in addrs]
+ _prefix = [addr[1] for addr in addrs]
+
+ self._ixia_cfg["ipv4_client"]["ip"] = _ip
+ self._ixia_cfg["ipv4_client"]["prefix"] = _prefix
+
+ def _apply_access_network_config(self):
+ pppoe = self._ixia_cfg["pppoe_client"]
+ sessions_per_port = pppoe['sessions_per_port']
+ sessions_per_svlan = pppoe['sessions_per_svlan']
+ svlan_count = int(sessions_per_port / sessions_per_svlan)
+
+ # add topology per uplink port (access network)
+ for access_tp_id, vport in enumerate(self._uplink_vports):
+ name = 'Topology access {}'.format(access_tp_id)
+ tp = self.client.add_topology(name, vport)
+ self._access_topologies.append(tp)
+ # add device group per svlan
+ for dg_id in range(svlan_count):
+ s_vlan_id = int(pppoe['s_vlan']) + dg_id + access_tp_id * svlan_count
+ s_vlan = ixnet_api.Vlan(vlan_id=s_vlan_id)
+ c_vlan = ixnet_api.Vlan(vlan_id=pppoe['c_vlan'], vlan_id_step=1)
+ name = 'SVLAN {}'.format(s_vlan_id)
+ dg = self.client.add_device_group(tp, name, sessions_per_svlan)
+ self.device_groups.append(dg)
+ # add ethernet layer to device group
+ ethernet = self.client.add_ethernet(dg, 'Ethernet')
+ self.protocols.append(ethernet)
+ self.client.add_vlans(ethernet, [s_vlan, c_vlan])
+ # add ppp over ethernet
+ if 'pap_user' in pppoe:
+ ppp = self.client.add_pppox_client(ethernet, 'pap',
+ pppoe['pap_user'],
+ pppoe['pap_password'])
+ else:
+ ppp = self.client.add_pppox_client(ethernet, 'chap',
+ pppoe['chap_user'],
+ pppoe['chap_password'])
+ self.protocols.append(ppp)
+
+ def _apply_core_network_config(self):
+ ipv4 = self._ixia_cfg["ipv4_client"]
+ sessions_per_port = ipv4['sessions_per_port']
+ sessions_per_vlan = ipv4['sessions_per_vlan']
+ vlan_count = int(sessions_per_port / sessions_per_vlan)
+
+ # add topology per downlink port (core network)
+ for core_tp_id, vport in enumerate(self._downlink_vports):
+ name = 'Topology core {}'.format(core_tp_id)
+ tp = self.client.add_topology(name, vport)
+ self._core_topologies.append(tp)
+ # add device group per vlan
+ for dg_id in range(vlan_count):
+ name = 'Core port {}'.format(core_tp_id)
+ dg = self.client.add_device_group(tp, name, sessions_per_vlan)
+ self.device_groups.append(dg)
+ # add ethernet layer to device group
+ ethernet = self.client.add_ethernet(dg, 'Ethernet')
+ self.protocols.append(ethernet)
+ if 'vlan' in ipv4:
+ vlan_id = int(ipv4['vlan']) + dg_id + core_tp_id * vlan_count
+ vlan = ixnet_api.Vlan(vlan_id=vlan_id)
+ self.client.add_vlans(ethernet, [vlan])
+ # add ipv4 layer
+ gw_ip = ipv4['gateway_ip'][core_tp_id]
+ # use gw addr to generate ip addr from the same network
+ ip_addr = ipaddress.IPv4Address(gw_ip) + 1
+ ipv4_obj = self.client.add_ipv4(ethernet, name='ipv4',
+ addr=ip_addr,
+ addr_step='0.0.0.1',
+ prefix=ipv4['prefix'][core_tp_id],
+ gateway=gw_ip)
+ self.protocols.append(ipv4_obj)
+ if ipv4.get("bgp"):
+ bgp_peer_obj = self.client.add_bgp(ipv4_obj,
+ dut_ip=ipv4["bgp"]["dut_ip"],
+ local_as=ipv4["bgp"]["as_number"],
+ bgp_type=ipv4["bgp"].get("bgp_type"))
+ self.protocols.append(bgp_peer_obj)
+
+ def update_tracking_options(self):
+ priority_map = {
+ 'raw': 'ipv4Raw0',
+ 'tos': {'precedence': 'ipv4Precedence0'},
+ 'dscp': {'defaultPHB': 'ipv4DefaultPhb0',
+ 'selectorPHB': 'ipv4ClassSelectorPhb0',
+ 'assuredPHB': 'ipv4AssuredForwardingPhb0',
+ 'expeditedPHB': 'ipv4ExpeditedForwardingPhb0'}
+ }
+
+ prio_trackby_key = 'ipv4Precedence0'
+
+ try:
+ priority = list(self._ixia_cfg['priority'])[0]
+ if priority == 'raw':
+ prio_trackby_key = priority_map[priority]
+ elif priority in ['tos', 'dscp']:
+ priority_type = list(self._ixia_cfg['priority'][priority])[0]
+ prio_trackby_key = priority_map[priority][priority_type]
+ except KeyError:
+ pass
+
+ tracking_options = ['flowGroup0', 'vlanVlanId0', prio_trackby_key]
+ self.client.set_flow_tracking(tracking_options)
+
+ def get_tc_rfc2544_options(self):
+ return self._ixia_cfg.get('rfc2544')
+
+ def _get_stats(self):
+ return self.client.get_pppoe_scenario_statistics()
+
+ @staticmethod
+ def get_flow_id_data(stats, flow_id, key):
+ result = [float(flow.get(key)) for flow in stats if flow['id'] == flow_id]
+ return sum(result) / len(result)
+
+ def get_priority_flows_stats(self, samples, duration):
+ results = {}
+ priorities = set([flow['IP_Priority'] for flow in samples])
+ for priority in priorities:
+ tx_frames = sum(
+ [int(flow['Tx_Frames']) for flow in samples
+ if flow['IP_Priority'] == priority])
+ rx_frames = sum(
+ [int(flow['Rx_Frames']) for flow in samples
+ if flow['IP_Priority'] == priority])
+ prio_flows_num = len([flow for flow in samples
+ if flow['IP_Priority'] == priority])
+ avg_latency_ns = sum(
+ [int(flow['Store-Forward_Avg_latency_ns']) for flow in samples
+ if flow['IP_Priority'] == priority]) / prio_flows_num
+ min_latency_ns = min(
+ [int(flow['Store-Forward_Min_latency_ns']) for flow in samples
+ if flow['IP_Priority'] == priority])
+ max_latency_ns = max(
+ [int(flow['Store-Forward_Max_latency_ns']) for flow in samples
+ if flow['IP_Priority'] == priority])
+ tx_throughput = float(tx_frames) / duration
+ rx_throughput = float(rx_frames) / duration
+ results[priority] = {
+ 'InPackets': rx_frames,
+ 'OutPackets': tx_frames,
+ 'RxThroughput': round(rx_throughput, 3),
+ 'TxThroughput': round(tx_throughput, 3),
+ 'LatencyAvg': utils.safe_cast(avg_latency_ns, int, 0),
+ 'LatencyMin': utils.safe_cast(min_latency_ns, int, 0),
+ 'LatencyMax': utils.safe_cast(max_latency_ns, int, 0)
+ }
+ return results
+
+ def generate_samples(self, resource_helper, ports, duration):
+
+ stats = self._get_stats()
+ samples = {}
+ ports_stats = stats['port_statistics']
+ flows_stats = stats['flow_statistic']
+ pppoe_subs_per_port = stats['pppox_client_per_port']
+
+ # Get sorted list of ixia ports names
+ ixia_port_names = sorted([data['port_name'] for data in ports_stats])
+
+ # Set 'port_id' key for ports stats items
+ for item in ports_stats:
+ port_id = item.pop('port_name').split('-')[-1].strip()
+ item['port_id'] = int(port_id)
+
+ # Set 'id' key for flows stats items
+ for item in flows_stats:
+ flow_id = item.pop('Flow_Group').split('-')[1].strip()
+ item['id'] = int(flow_id)
+
+ # Set 'port_id' key for pppoe subs per port stats
+ for item in pppoe_subs_per_port:
+ port_id = item.pop('subs_port').split('-')[-1].strip()
+ item['port_id'] = int(port_id)
+
+ # Map traffic flows to ports
+ port_flow_map = collections.defaultdict(set)
+ for item in flows_stats:
+ tx_port = item.pop('Tx_Port')
+ tx_port_index = ixia_port_names.index(tx_port)
+ port_flow_map[tx_port_index].update([item['id']])
+
+ # Sort ports stats
+ ports_stats = sorted(ports_stats, key=lambda k: k['port_id'])
+
+ # Get priority flows stats
+ prio_flows_stats = self.get_priority_flows_stats(flows_stats, duration)
+ samples['priority_stats'] = prio_flows_stats
+
+ # this is not DPDK port num, but this is whatever number we gave
+ # when we selected ports and programmed the profile
+ for port_num in ports:
+ try:
+ # reverse lookup port name from port_num so the stats dict is descriptive
+ intf = resource_helper.vnfd_helper.find_interface_by_port(port_num)
+ port_name = intf['name']
+ port_id = ports_stats[port_num]['port_id']
+ port_subs_stats = \
+ [port_data for port_data in pppoe_subs_per_port
+ if port_data.get('port_id') == port_id]
+
+ avg_latency = \
+ sum([float(self.get_flow_id_data(
+ flows_stats, flow, 'Store-Forward_Avg_latency_ns'))
+ for flow in port_flow_map[port_num]]) / len(port_flow_map[port_num])
+ min_latency = \
+ min([float(self.get_flow_id_data(
+ flows_stats, flow, 'Store-Forward_Min_latency_ns'))
+ for flow in port_flow_map[port_num]])
+ max_latency = \
+ max([float(self.get_flow_id_data(
+ flows_stats, flow, 'Store-Forward_Max_latency_ns'))
+ for flow in port_flow_map[port_num]])
+
+ samples[port_name] = {
+ 'RxThroughputBps': float(ports_stats[port_num]['Bytes_Rx']) / duration,
+ 'TxThroughputBps': float(ports_stats[port_num]['Bytes_Tx']) / duration,
+ 'InPackets': int(ports_stats[port_num]['Valid_Frames_Rx']),
+ 'OutPackets': int(ports_stats[port_num]['Frames_Tx']),
+ 'InBytes': int(ports_stats[port_num]['Bytes_Rx']),
+ 'OutBytes': int(ports_stats[port_num]['Bytes_Tx']),
+ 'RxThroughput': float(ports_stats[port_num]['Valid_Frames_Rx']) / duration,
+ 'TxThroughput': float(ports_stats[port_num]['Frames_Tx']) / duration,
+ 'LatencyAvg': utils.safe_cast(avg_latency, int, 0),
+ 'LatencyMin': utils.safe_cast(min_latency, int, 0),
+ 'LatencyMax': utils.safe_cast(max_latency, int, 0)
+ }
+
+ if port_subs_stats:
+ samples[port_name].update(
+ {'SessionsUp': int(port_subs_stats[0]['Sessions_Up']),
+ 'SessionsDown': int(port_subs_stats[0]['Sessions_Down']),
+ 'SessionsNotStarted': int(port_subs_stats[0]['Sessions_Not_Started']),
+ 'SessionsTotal': int(port_subs_stats[0]['Sessions_Total'])}
+ )
+
+ except IndexError:
+ pass
-try:
- from IxNet import IxNextgen
-except ImportError:
- IxNextgen = ErrorClass
+ return samples
class IxiaRfc2544Helper(Rfc2544ResourceHelper):
@@ -53,7 +646,13 @@ class IxiaResourceHelper(ClientResourceHelper):
super(IxiaResourceHelper, self).__init__(setup_helper)
self.scenario_helper = setup_helper.scenario_helper
- self.client = IxNextgen()
+ self._ixia_scenarios = {
+ "IxiaBasic": IxiaBasicScenario,
+ "IxiaL3": IxiaL3Scenario,
+ "IxiaPppoeClient": IxiaPppoeClientScenario,
+ }
+
+ self.client = ixnet_api.IxNextgen()
if rfc_helper_type is None:
rfc_helper_type = IxiaRfc2544Helper
@@ -61,54 +660,45 @@ class IxiaResourceHelper(ClientResourceHelper):
self.rfc_helper = rfc_helper_type(self.scenario_helper)
self.uplink_ports = None
self.downlink_ports = None
+ self.context_cfg = None
+ self._ix_scenario = None
self._connect()
def _connect(self, client=None):
- self.client._connect(self.vnfd_helper)
+ self.client.connect(self.vnfd_helper)
- def get_stats(self, *args, **kwargs):
- return self.client.ix_get_statistics()
+ def setup(self):
+ super(IxiaResourceHelper, self).setup()
+ self._init_ix_scenario()
def stop_collect(self):
+ self._ix_scenario.stop_protocols()
self._terminated.value = 1
- if self.client:
- self.client.ix_stop_traffic()
- def generate_samples(self, ports, key=None, default=None):
- stats = self.get_stats()
- last_result = stats[1]
- latency = stats[0]
+ def generate_samples(self, ports, duration):
+ return self._ix_scenario.generate_samples(self, ports, duration)
- samples = {}
- # this is not DPDK port num, but this is whatever number we gave
- # when we selected ports and programmed the profile
- for port_num in ports:
- try:
- # reverse lookup port name from port_num so the stats dict is descriptive
- intf = self.vnfd_helper.find_interface_by_port(port_num)
- port_name = intf["name"]
- samples[port_name] = {
- "rx_throughput_kps": float(last_result["Rx_Rate_Kbps"][port_num]),
- "tx_throughput_kps": float(last_result["Tx_Rate_Kbps"][port_num]),
- "rx_throughput_mbps": float(last_result["Rx_Rate_Mbps"][port_num]),
- "tx_throughput_mbps": float(last_result["Tx_Rate_Mbps"][port_num]),
- "in_packets": int(last_result["Valid_Frames_Rx"][port_num]),
- "out_packets": int(last_result["Frames_Tx"][port_num]),
- "RxThroughput": int(last_result["Valid_Frames_Rx"][port_num]) / 30,
- "TxThroughput": int(last_result["Frames_Tx"][port_num]) / 30,
- }
- if key:
- avg_latency = latency["Store-Forward_Avg_latency_ns"][port_num]
- min_latency = latency["Store-Forward_Min_latency_ns"][port_num]
- max_latency = latency["Store-Forward_Max_latency_ns"][port_num]
- samples[port_name][key] = \
- {"Store-Forward_Avg_latency_ns": avg_latency,
- "Store-Forward_Min_latency_ns": min_latency,
- "Store-Forward_Max_latency_ns": max_latency}
- except IndexError:
- pass
+ def _init_ix_scenario(self):
+ ixia_config = self.scenario_helper.scenario_cfg.get('ixia_config', 'IxiaBasic')
- return samples
+ if ixia_config in self._ixia_scenarios:
+ scenario_type = self._ixia_scenarios[ixia_config]
+
+ self._ix_scenario = scenario_type(self.client, self.context_cfg,
+ self.scenario_helper.scenario_cfg['options'])
+ else:
+ raise RuntimeError(
+ "IXIA config type '{}' not supported".format(ixia_config))
+
+ def _initialize_client(self, traffic_profile):
+ """Initialize the IXIA IxNetwork client and configure the server"""
+ self.client.clear_config()
+ self.client.assign_ports()
+ self._ix_scenario.apply_config()
+ self._ix_scenario.create_traffic_model(traffic_profile)
+
+ def update_tracking_options(self):
+ self._ix_scenario.update_tracking_options()
def run_traffic(self, traffic_profile):
if self._terminated.value:
@@ -116,18 +706,13 @@ class IxiaResourceHelper(ClientResourceHelper):
min_tol = self.rfc_helper.tolerance_low
max_tol = self.rfc_helper.tolerance_high
+ precision = self.rfc_helper.tolerance_precision
+ resolution = self.rfc_helper.resolution
default = "00:00:00:00:00:00"
self._build_ports()
-
- # we don't know client_file_name until runtime as instantiate
- client_file_name = \
- find_relative_file(self.scenario_helper.scenario_cfg['ixia_profile'],
- self.scenario_helper.scenario_cfg["task_path"])
- self.client.ix_load_config(client_file_name)
- time.sleep(WAIT_AFTER_CFG_LOAD)
-
- self.client.ix_assign_ports()
+ traffic_profile.update_traffic_profile(self)
+ self._initialize_client(traffic_profile)
mac = {}
for port_name in self.vnfd_helper.port_pairs.all_ports:
@@ -139,49 +724,106 @@ class IxiaResourceHelper(ClientResourceHelper):
mac["src_mac_{}".format(port_num)] = virt_intf.get("local_mac", default)
mac["dst_mac_{}".format(port_num)] = virt_intf.get("dst_mac", default)
- samples = {}
- # Generate ixia traffic config...
+ self._ix_scenario.run_protocols()
+
try:
while not self._terminated.value:
- traffic_profile.execute_traffic(self, self.client, mac)
+ first_run = traffic_profile.execute_traffic(self, self.client,
+ mac)
self.client_started.value = 1
- time.sleep(WAIT_FOR_TRAFFIC)
- self.client.ix_stop_traffic()
- samples = self.generate_samples(traffic_profile.ports)
+ # pylint: disable=unnecessary-lambda
+ utils.wait_until_true(lambda: self.client.is_traffic_stopped(),
+ timeout=traffic_profile.config.duration * 2)
+ rfc2544_opts = self._ix_scenario.get_tc_rfc2544_options()
+ samples = self.generate_samples(traffic_profile.ports,
+ traffic_profile.config.duration)
+
+ completed, samples = traffic_profile.get_drop_percentage(
+ samples, min_tol, max_tol, precision, resolution,
+ first_run=first_run, tc_rfc2544_opts=rfc2544_opts)
self._queue.put(samples)
- status, samples = traffic_profile.get_drop_percentage(self, samples, min_tol,
- max_tol, self.client, mac)
- current = samples['CurrentDropPercentage']
- if min_tol <= current <= max_tol or status == 'Completed':
+ if completed:
self._terminated.value = 1
- self.client.ix_stop_traffic()
- self._queue.put(samples)
+ except Exception: # pylint: disable=broad-except
+ LOG.exception('Run Traffic terminated')
+
+ self._ix_scenario.stop_protocols()
+ self.client_started.value = 0
+ self._terminated.value = 1
+
+ def run_test(self, traffic_profile, tasks_queue, results_queue, *args): # pragma: no cover
+ LOG.info("Ixia resource_helper run_test")
+ if self._terminated.value:
+ return
+
+ min_tol = self.rfc_helper.tolerance_low
+ max_tol = self.rfc_helper.tolerance_high
+ precision = self.rfc_helper.tolerance_precision
+ resolution = self.rfc_helper.resolution
+ default = "00:00:00:00:00:00"
- if not self.rfc_helper.is_done():
- self._terminated.value = 1
- return
+ self._build_ports()
+ traffic_profile.update_traffic_profile(self)
+ self._initialize_client(traffic_profile)
- traffic_profile.execute_traffic(self, self.client, mac)
- for _ in range(5):
- time.sleep(self.LATENCY_TIME_SLEEP)
- self.client.ix_stop_traffic()
- samples = self.generate_samples(traffic_profile.ports, 'latency', {})
+ mac = {}
+ for port_name in self.vnfd_helper.port_pairs.all_ports:
+ intf = self.vnfd_helper.find_interface(name=port_name)
+ virt_intf = intf["virtual-interface"]
+ # we only know static traffic id by reading the json
+ # this is used by _get_ixia_trafficrofile
+ port_num = self.vnfd_helper.port_num(intf)
+ mac["src_mac_{}".format(port_num)] = virt_intf.get("local_mac", default)
+ mac["dst_mac_{}".format(port_num)] = virt_intf.get("dst_mac", default)
+
+ self._ix_scenario.run_protocols()
+
+ try:
+ completed = False
+ self.rfc_helper.iteration.value = 0
+ self.client_started.value = 1
+ while completed is False and not self._terminated.value:
+ LOG.info("Wait for task ...")
+
+ try:
+ task = tasks_queue.get(True, 5)
+ except moves.queue.Empty:
+ continue
+ else:
+ if task != 'RUN_TRAFFIC':
+ continue
+
+ self.rfc_helper.iteration.value += 1
+ LOG.info("Got %s task, start iteration %d", task,
+ self.rfc_helper.iteration.value)
+ first_run = traffic_profile.execute_traffic(self, self.client,
+ mac)
+ # pylint: disable=unnecessary-lambda
+ utils.wait_until_true(lambda: self.client.is_traffic_stopped(),
+ timeout=traffic_profile.config.duration * 2)
+ samples = self.generate_samples(traffic_profile.ports,
+ traffic_profile.config.duration)
+
+ completed, samples = traffic_profile.get_drop_percentage(
+ samples, min_tol, max_tol, precision, resolution,
+ first_run=first_run)
self._queue.put(samples)
- traffic_profile.start_ixia_latency(self, self.client, mac)
- if self._terminated.value:
- break
- self.client.ix_stop_traffic()
- except Exception: # pylint: disable=broad-except
- LOG.exception("Run Traffic terminated")
+ if completed:
+ LOG.debug("IxiaResourceHelper::run_test - test completed")
+ results_queue.put('COMPLETE')
+ else:
+ results_queue.put('CONTINUE')
+ tasks_queue.task_done()
- self._terminated.value = 1
+ except Exception: # pylint: disable=broad-except
+ LOG.exception('Run Traffic terminated')
- def collect_kpi(self):
- self.rfc_helper.iteration.value += 1
- return super(IxiaResourceHelper, self).collect_kpi()
+ self._ix_scenario.stop_protocols()
+ self.client_started.value = 0
+ LOG.debug("IxiaResourceHelper::run_test done")
class IxiaTrafficGen(SampleVNFTrafficGen):
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py
index 4e9f4bdc1..a9c0222ac 100644
--- a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py
+++ b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2016-2017 Intel Corporation
+# Copyright (c) 2016-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -11,74 +11,49 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-""" Trex traffic generation definitions which implements rfc2544 """
-from __future__ import absolute_import
-from __future__ import print_function
-import time
import logging
-from collections import Mapping
-
-from yardstick.network_services.vnf_generic.vnf.tg_trex import TrexTrafficGen
-from yardstick.network_services.vnf_generic.vnf.sample_vnf import Rfc2544ResourceHelper
-from yardstick.network_services.vnf_generic.vnf.tg_trex import TrexResourceHelper
-
-LOGGING = logging.getLogger(__name__)
+import time
+from six import moves
+from yardstick.common import utils
+from yardstick.network_services.vnf_generic.vnf import sample_vnf
+from yardstick.network_services.vnf_generic.vnf import tg_trex
+from trex_stl_lib.trex_stl_exceptions import STLError
-class TrexRfc2544ResourceHelper(Rfc2544ResourceHelper):
- def is_done(self):
- return self.latency and self.iteration.value > 10
+LOG = logging.getLogger(__name__)
-class TrexRfcResourceHelper(TrexResourceHelper):
+class TrexRfcResourceHelper(tg_trex.TrexResourceHelper):
- LATENCY_TIME_SLEEP = 120
- RUN_DURATION = 30
- WAIT_TIME = 3
+ SAMPLING_PERIOD = 2
+ TRANSIENT_PERIOD = 10
- def __init__(self, setup_helper, rfc_helper_type=None):
+ def __init__(self, setup_helper):
super(TrexRfcResourceHelper, self).__init__(setup_helper)
-
- if rfc_helper_type is None:
- rfc_helper_type = TrexRfc2544ResourceHelper
-
- self.rfc2544_helper = rfc_helper_type(self.scenario_helper)
+ self.rfc2544_helper = sample_vnf.Rfc2544ResourceHelper(
+ self.scenario_helper)
def _run_traffic_once(self, traffic_profile):
- if self._terminated.value:
- return
-
- traffic_profile.execute_traffic(self)
self.client_started.value = 1
- time.sleep(self.RUN_DURATION)
- self.client.stop(traffic_profile.ports)
- time.sleep(self.WAIT_TIME)
- samples = traffic_profile.get_drop_percentage(self)
- self._queue.put(samples)
-
- if not self.rfc2544_helper.is_done():
- return
-
- self.client.stop(traffic_profile.ports)
- self.client.reset(ports=traffic_profile.ports)
- self.client.remove_all_streams(traffic_profile.ports)
- traffic_profile.execute_traffic_latency(samples=samples)
- multiplier = traffic_profile.calculate_pps(samples)[1]
- for _ in range(5):
- time.sleep(self.LATENCY_TIME_SLEEP)
- self.client.stop(traffic_profile.ports)
- time.sleep(self.WAIT_TIME)
- last_res = self.client.get_stats(traffic_profile.ports)
- if not isinstance(last_res, Mapping):
- self._terminated.value = 1
- continue
- self.generate_samples(traffic_profile.ports, 'latency', {})
- self._queue.put(samples)
- self.client.start(mult=str(multiplier),
- ports=traffic_profile.ports,
- duration=120, force=True)
+ ports, port_pg_id = traffic_profile.execute_traffic(self)
+
+ samples = []
+ timeout = int(traffic_profile.config.duration) - self.TRANSIENT_PERIOD
+ time.sleep(self.TRANSIENT_PERIOD)
+ for _ in utils.Timer(timeout=timeout):
+ samples.append(self._get_samples(ports, port_pg_id=port_pg_id))
+ time.sleep(self.SAMPLING_PERIOD)
+
+ traffic_profile.stop_traffic(self)
+ completed, output = traffic_profile.get_drop_percentage(
+ samples, self.rfc2544_helper.tolerance_low,
+ self.rfc2544_helper.tolerance_high,
+ self.rfc2544_helper.correlated_traffic,
+ self.rfc2544_helper.resolution)
+ self._queue.put(output)
+ return completed
def start_client(self, ports, mult=None, duration=None, force=True):
self.client.start(ports=ports, mult=mult, duration=duration, force=force)
@@ -86,12 +61,58 @@ class TrexRfcResourceHelper(TrexResourceHelper):
def clear_client_stats(self, ports):
self.client.clear_stats(ports=ports)
- def collect_kpi(self):
- self.rfc2544_helper.iteration.value += 1
- return super(TrexRfcResourceHelper, self).collect_kpi()
-
-
-class TrexTrafficGenRFC(TrexTrafficGen):
+ def run_test(self, traffic_profile, tasks_queue, results_queue, *args): # pragma: no cover
+ LOG.debug("Trex resource_helper run_test")
+ if self._terminated.value:
+ return
+ # if we don't do this we can hang waiting for the queue to drain
+ # have to do this in the subprocess
+ self._queue.cancel_join_thread()
+ try:
+ self._build_ports()
+ self.client = self._connect()
+ self.client.reset(ports=self.all_ports)
+ self.client.remove_all_streams(self.all_ports) # remove all streams
+ traffic_profile.register_generator(self)
+
+ completed = False
+ self.rfc2544_helper.iteration.value = 0
+ self.client_started.value = 1
+ while completed is False and not self._terminated.value:
+ LOG.debug("Wait for task ...")
+ try:
+ task = tasks_queue.get(True, 5)
+ except moves.queue.Empty:
+ LOG.debug("Wait for task timeout, continue waiting...")
+ continue
+ else:
+ if task != 'RUN_TRAFFIC':
+ continue
+ self.rfc2544_helper.iteration.value += 1
+ LOG.info("Got %s task, start iteration %d", task,
+ self.rfc2544_helper.iteration.value)
+ completed = self._run_traffic_once(traffic_profile)
+ if completed:
+ LOG.debug("%s::run_test - test completed",
+ self.__class__.__name__)
+ results_queue.put('COMPLETE')
+ else:
+ results_queue.put('CONTINUE')
+ tasks_queue.task_done()
+
+ self.client.stop(self.all_ports)
+ self.client.disconnect()
+ self._terminated.value = 0
+ except STLError:
+ if self._terminated.value:
+ LOG.debug("traffic generator is stopped")
+ return # return if trex/tg server is stopped.
+ raise
+
+ self.client_started.value = 0
+ LOG.debug("%s::run_test done", self.__class__.__name__)
+
+class TrexTrafficGenRFC(tg_trex.TrexTrafficGen):
"""
This class handles mapping traffic profile and generating
traffic for rfc2544 testcase.
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_trex.py b/yardstick/network_services/vnf_generic/vnf/tg_trex.py
index 0084a124c..0cb66a714 100644
--- a/yardstick/network_services/vnf_generic/vnf/tg_trex.py
+++ b/yardstick/network_services/vnf_generic/vnf/tg_trex.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2016-2017 Intel Corporation
+# Copyright (c) 2016-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -11,9 +11,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-""" Trex acts as traffic generation and vnf definitions based on IETS Spec """
-from __future__ import absolute_import
+import datetime
import logging
import os
@@ -25,6 +24,7 @@ from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNFTraff
from yardstick.network_services.vnf_generic.vnf.sample_vnf import ClientResourceHelper
from yardstick.network_services.vnf_generic.vnf.sample_vnf import DpdkVnfSetupEnvHelper
+
LOG = logging.getLogger(__name__)
@@ -165,6 +165,34 @@ class TrexResourceHelper(ClientResourceHelper):
cmd = "sudo fuser -n tcp %s %s -k > /dev/null 2>&1"
self.ssh_helper.execute(cmd % (self.SYNC_PORT, self.ASYNC_PORT))
+ def _get_samples(self, ports, port_pg_id=None):
+ stats = self.get_stats(ports)
+ timestamp = datetime.datetime.now()
+ samples = {}
+ for pname in (intf['name'] for intf in self.vnfd_helper.interfaces):
+ port_num = self.vnfd_helper.port_num(pname)
+ port_stats = stats.get(port_num, {})
+ samples[pname] = {
+ 'rx_throughput_fps': float(port_stats.get('rx_pps', 0.0)),
+ 'tx_throughput_fps': float(port_stats.get('tx_pps', 0.0)),
+ 'rx_throughput_bps': float(port_stats.get('rx_bps', 0.0)),
+ 'tx_throughput_bps': float(port_stats.get('tx_bps', 0.0)),
+ 'in_packets': int(port_stats.get('ipackets', 0)),
+ 'out_packets': int(port_stats.get('opackets', 0)),
+ 'in_bytes': int(port_stats.get('ibytes', 0)),
+ 'out_bytes': int(port_stats.get('obytes', 0)),
+ 'timestamp': timestamp
+ }
+
+ pg_id_list = port_pg_id.get_pg_ids(port_num)
+ samples[pname]['latency'] = {}
+ for pg_id in pg_id_list:
+ latency_global = stats.get('latency', {})
+ pg_latency = latency_global.get(pg_id, {}).get('latency')
+ samples[pname]['latency'][pg_id] = pg_latency
+
+ return samples
+
class TrexTrafficGen(SampleVNFTrafficGen):
"""
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_trex_vpp.py b/yardstick/network_services/vnf_generic/vnf/tg_trex_vpp.py
new file mode 100644
index 000000000..846304880
--- /dev/null
+++ b/yardstick/network_services/vnf_generic/vnf/tg_trex_vpp.py
@@ -0,0 +1,178 @@
+# Copyright (c) 2019 Viosoft Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from trex_stl_lib.trex_stl_exceptions import STLError
+
+from yardstick.common.utils import safe_cast
+from yardstick.network_services.vnf_generic.vnf.sample_vnf import \
+ Rfc2544ResourceHelper
+from yardstick.network_services.vnf_generic.vnf.sample_vnf import \
+ SampleVNFTrafficGen
+from yardstick.network_services.vnf_generic.vnf.tg_trex import \
+ TrexDpdkVnfSetupEnvHelper
+from yardstick.network_services.vnf_generic.vnf.tg_trex import \
+ TrexResourceHelper
+
+LOGGING = logging.getLogger(__name__)
+
+
+class TrexVppResourceHelper(TrexResourceHelper):
+
+ def __init__(self, setup_helper, rfc_helper_type=None):
+ super(TrexVppResourceHelper, self).__init__(setup_helper)
+
+ if rfc_helper_type is None:
+ rfc_helper_type = Rfc2544ResourceHelper
+
+ self.rfc2544_helper = rfc_helper_type(self.scenario_helper)
+
+ self.loss = None
+ self.sent = None
+ self.latency = None
+
+ def generate_samples(self, stats=None, ports=None, port_pg_id=None,
+ latency=False):
+ samples = {}
+ if stats is None:
+ stats = self.get_stats(ports)
+ for pname in (intf['name'] for intf in self.vnfd_helper.interfaces):
+ port_num = self.vnfd_helper.port_num(pname)
+ port_stats = stats.get(port_num, {})
+ samples[pname] = {
+ 'rx_throughput_fps': float(port_stats.get('rx_pps', 0.0)),
+ 'tx_throughput_fps': float(port_stats.get('tx_pps', 0.0)),
+ 'rx_throughput_bps': float(port_stats.get('rx_bps', 0.0)),
+ 'tx_throughput_bps': float(port_stats.get('tx_bps', 0.0)),
+ 'in_packets': int(port_stats.get('ipackets', 0)),
+ 'out_packets': int(port_stats.get('opackets', 0)),
+ }
+
+ if latency:
+ pg_id_list = port_pg_id.get_pg_ids(port_num)
+ samples[pname]['latency'] = {}
+ for pg_id in pg_id_list:
+ latency_global = stats.get('latency', {})
+ pg_latency = latency_global.get(pg_id, {}).get('latency')
+
+ t_min = safe_cast(pg_latency.get("total_min", 0.0), float,
+ -1.0)
+ t_avg = safe_cast(pg_latency.get("average", 0.0), float,
+ -1.0)
+ t_max = safe_cast(pg_latency.get("total_max", 0.0), float,
+ -1.0)
+
+ latency = {
+ "min_latency": t_min,
+ "max_latency": t_max,
+ "avg_latency": t_avg,
+ }
+ samples[pname]['latency'][pg_id] = latency
+
+ return samples
+
+ def _run_traffic_once(self, traffic_profile):
+ self.client_started.value = 1
+ traffic_profile.execute_traffic(self)
+ return True
+
+ def run_traffic(self, traffic_profile):
+ self._queue.cancel_join_thread()
+ traffic_profile.init_queue(self._queue)
+ super(TrexVppResourceHelper, self).run_traffic(traffic_profile)
+
+ @staticmethod
+ def fmt_latency(lat_min, lat_avg, lat_max):
+ t_min = int(round(safe_cast(lat_min, float, -1.0)))
+ t_avg = int(round(safe_cast(lat_avg, float, -1.0)))
+ t_max = int(round(safe_cast(lat_max, float, -1.0)))
+
+ return "/".join(str(tmp) for tmp in (t_min, t_avg, t_max))
+
+ def send_traffic_on_tg(self, ports, port_pg_id, duration, rate,
+ latency=False):
+ try:
+ # Choose rate and start traffic:
+ self.client.start(ports=ports, mult=rate, duration=duration)
+ # Block until done:
+ try:
+ self.client.wait_on_traffic(ports=ports, timeout=duration + 20)
+ except STLError as err:
+ self.client.stop(ports)
+ LOGGING.error("TRex stateless timeout error: %s", err)
+
+ if self.client.get_warnings():
+ for warning in self.client.get_warnings():
+ LOGGING.warning(warning)
+
+ # Read the stats after the test
+ stats = self.client.get_stats()
+
+ packets_in = []
+ packets_out = []
+ for port in ports:
+ packets_in.append(stats[port]["ipackets"])
+ packets_out.append(stats[port]["opackets"])
+
+ if latency:
+ self.latency = []
+ pg_id_list = port_pg_id.get_pg_ids(port)
+ for pg_id in pg_id_list:
+ latency_global = stats.get('latency', {})
+ pg_latency = latency_global.get(pg_id, {}).get(
+ 'latency')
+ lat = self.fmt_latency(
+ str(pg_latency.get("total_min")),
+ str(pg_latency.get("average")),
+ str(pg_latency.get("total_max")))
+ LOGGING.info(
+ "latencyStream%s(usec)=%s", pg_id, lat)
+ self.latency.append(lat)
+
+ self.sent = sum(packets_out)
+ total_rcvd = sum(packets_in)
+ self.loss = self.sent - total_rcvd
+ LOGGING.info("rate=%s, totalReceived=%s, totalSent=%s,"
+ " frameLoss=%s", rate, total_rcvd, self.sent,
+ self.loss)
+ return stats
+ except STLError as err:
+ LOGGING.error("TRex stateless runtime error: %s", err)
+ raise RuntimeError('TRex stateless runtime error')
+
+
+class TrexTrafficGenVpp(SampleVNFTrafficGen):
+ APP_NAME = 'TRex'
+ WAIT_TIME = 20
+
+ def __init__(self, name, vnfd, setup_env_helper_type=None,
+ resource_helper_type=None):
+ if setup_env_helper_type is None:
+ setup_env_helper_type = TrexDpdkVnfSetupEnvHelper
+ if resource_helper_type is None:
+ resource_helper_type = TrexVppResourceHelper
+
+ super(TrexTrafficGenVpp, self).__init__(
+ name, vnfd, setup_env_helper_type, resource_helper_type)
+
+ def _check_status(self):
+ return self.resource_helper.check_status()
+
+ def _start_server(self):
+ super(TrexTrafficGenVpp, self)._start_server()
+ self.resource_helper.start()
+
+ def wait_for_instantiate(self):
+ return self._wait_for_process()
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_vcmts_pktgen.py b/yardstick/network_services/vnf_generic/vnf/tg_vcmts_pktgen.py
new file mode 100755
index 000000000..c6df9d04c
--- /dev/null
+++ b/yardstick/network_services/vnf_generic/vnf/tg_vcmts_pktgen.py
@@ -0,0 +1,215 @@
+# Copyright (c) 2019 Viosoft Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import time
+import socket
+import yaml
+import os
+
+from yardstick.network_services.vnf_generic.vnf import sample_vnf
+from yardstick.common import exceptions
+
+
+LOG = logging.getLogger(__name__)
+
+
+class PktgenHelper(object):
+
+ RETRY_SECONDS = 0.5
+ RETRY_COUNT = 20
+ CONNECT_TIMEOUT = 5
+
+ def __init__(self, host, port=23000):
+ self.host = host
+ self.port = port
+ self.connected = False
+
+ def _connect(self):
+ self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ ret = True
+ try:
+ self._sock.settimeout(self.CONNECT_TIMEOUT)
+ self._sock.connect((self.host, self.port))
+ except (socket.gaierror, socket.error, socket.timeout):
+ self._sock.close()
+ ret = False
+
+ return ret
+
+ def connect(self):
+ if self.connected:
+ return True
+ LOG.info("Connecting to pktgen instance at %s...", self.host)
+ for idx in range(self.RETRY_COUNT):
+ self.connected = self._connect()
+ if self.connected:
+ return True
+ LOG.debug("Connection attempt %d: Unable to connect to %s, " \
+ "retrying in %d seconds",
+ idx, self.host, self.RETRY_SECONDS)
+ time.sleep(self.RETRY_SECONDS)
+
+ LOG.error("Unable to connect to pktgen instance on %s !",
+ self.host)
+ return False
+
+
+ def send_command(self, command):
+ if not self.connected:
+ LOG.error("Pktgen socket is not connected")
+ return False
+
+ try:
+ self._sock.sendall((command + "\n").encode())
+ time.sleep(1)
+ except (socket.timeout, socket.error):
+ LOG.error("Error sending command '%s'", command)
+ return False
+
+ return True
+
+
+class VcmtsPktgenSetupEnvHelper(sample_vnf.SetupEnvHelper):
+
+ BASE_PARAMETERS = "export LUA_PATH=/vcmts/Pktgen.lua;"\
+ + "export CMK_PROC_FS=/host/proc;"
+
+ PORTS_COUNT = 8
+
+ def generate_pcap_filename(self, port_cfg):
+ return port_cfg['traffic_type'] + "_" + port_cfg['num_subs'] \
+ + "cms_" + port_cfg['num_ofdm'] + "ofdm.pcap"
+
+ def find_port_cfg(self, ports_cfg, port_name):
+ for port_cfg in ports_cfg:
+ if port_name in port_cfg:
+ return port_cfg
+ return None
+
+ def build_pktgen_parameters(self, pod_cfg):
+ ports_cfg = pod_cfg['ports']
+ port_cfg = list()
+
+ for i in range(self.PORTS_COUNT):
+ port_cfg.append(self.find_port_cfg(ports_cfg, 'port_' + str(i)))
+
+ pktgen_parameters = self.BASE_PARAMETERS + " " \
+ + " /pktgen-config/setup.sh " + pod_cfg['pktgen_id'] \
+ + " " + pod_cfg['num_ports']
+
+ for i in range(self.PORTS_COUNT):
+ pktgen_parameters += " " + port_cfg[i]['net_pktgen']
+
+ for i in range(self.PORTS_COUNT):
+ pktgen_parameters += " " + self.generate_pcap_filename(port_cfg[i])
+
+ return pktgen_parameters
+
+ def start_pktgen(self, pod_cfg):
+ self.ssh_helper.drop_connection()
+ cmd = self.build_pktgen_parameters(pod_cfg)
+ LOG.debug("Executing: '%s'", cmd)
+ self.ssh_helper.send_command(cmd)
+ LOG.info("Pktgen executed")
+
+ def setup_vnf_environment(self):
+ pass
+
+
+class VcmtsPktgen(sample_vnf.SampleVNFTrafficGen):
+
+ TG_NAME = 'VcmtsPktgen'
+ APP_NAME = 'VcmtsPktgen'
+ RUN_WAIT = 4
+ DEFAULT_RATE = 8.0
+
+ PKTGEN_BASE_PORT = 23000
+
+ def __init__(self, name, vnfd, setup_env_helper_type=None,
+ resource_helper_type=None):
+ if setup_env_helper_type is None:
+ setup_env_helper_type = VcmtsPktgenSetupEnvHelper
+ super(VcmtsPktgen, self).__init__(
+ name, vnfd, setup_env_helper_type, resource_helper_type)
+
+ self.pktgen_address = vnfd['mgmt-interface']['ip']
+ LOG.info("Pktgen container '%s', IP: %s", name, self.pktgen_address)
+
+ def extract_pod_cfg(self, pktgen_pods_cfg, pktgen_id):
+ for pod_cfg in pktgen_pods_cfg:
+ if pod_cfg['pktgen_id'] == pktgen_id:
+ return pod_cfg
+ return None
+
+ def instantiate(self, scenario_cfg, context_cfg):
+ super(VcmtsPktgen, self).instantiate(scenario_cfg, context_cfg)
+ self._start_server()
+ options = scenario_cfg.get('options', {})
+ self.pktgen_rate = options.get('pktgen_rate', self.DEFAULT_RATE)
+
+ try:
+ pktgen_values_filepath = options['pktgen_values']
+ except KeyError:
+ raise KeyError("Missing pktgen_values key in scenario options" \
+ "section of the task definition file")
+
+ if not os.path.isfile(pktgen_values_filepath):
+ raise RuntimeError("The pktgen_values file path provided " \
+ "does not exists")
+
+ # The yaml_loader.py (SafeLoader) underlying regex has an issue
+ # with reading PCI addresses (processed as double). so the
+ # BaseLoader is used here.
+ with open(pktgen_values_filepath) as stream:
+ pktgen_values = yaml.load(stream, Loader=yaml.BaseLoader)
+
+ if pktgen_values == None:
+ raise RuntimeError("Error reading pktgen_values file provided (" +
+ pktgen_values_filepath + ")")
+
+ self.pktgen_id = int(options[self.name]['pktgen_id'])
+ self.resource_helper.pktgen_id = self.pktgen_id
+
+ self.pktgen_helper = PktgenHelper(self.pktgen_address,
+ self.PKTGEN_BASE_PORT + self.pktgen_id)
+
+ pktgen_pods_cfg = pktgen_values['topology']['pktgen_pods']
+
+ self.pod_cfg = self.extract_pod_cfg(pktgen_pods_cfg,
+ str(self.pktgen_id))
+
+ if self.pod_cfg == None:
+ raise KeyError("Pktgen with id " + str(self.pktgen_id) + \
+ " was not found")
+
+ self.setup_helper.start_pktgen(self.pod_cfg)
+
+ def run_traffic(self, traffic_profile):
+ if not self.pktgen_helper.connect():
+ raise exceptions.PktgenActionError(command="connect")
+ LOG.info("Connected to pktgen instance at %s", self.pktgen_address)
+
+ commands = []
+ for i in range(self.setup_helper.PORTS_COUNT):
+ commands.append('pktgen.set("' + str(i) + '", "rate", ' +
+ "%0.1f" % self.pktgen_rate + ');')
+
+ commands.append('pktgen.start("all");')
+
+ for command in commands:
+ if self.pktgen_helper.send_command(command):
+ LOG.debug("Command '%s' sent to pktgen", command)
+ LOG.info("Traffic started on %s...", self.name)
+ return True
diff --git a/yardstick/network_services/vnf_generic/vnf/udp_replay.py b/yardstick/network_services/vnf_generic/vnf/udp_replay.py
index a57f53bc7..a3b0b9fd9 100644
--- a/yardstick/network_services/vnf_generic/vnf/udp_replay.py
+++ b/yardstick/network_services/vnf_generic/vnf/udp_replay.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2016-2017 Intel Corporation
+# Copyright (c) 2016-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -19,7 +19,7 @@ from yardstick.common.process import check_if_process_failed
from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNF
from yardstick.network_services.vnf_generic.vnf.sample_vnf import DpdkVnfSetupEnvHelper
from yardstick.network_services.vnf_generic.vnf.sample_vnf import ClientResourceHelper
-
+from yardstick.benchmark.contexts import base as ctx_base
LOG = logging.getLogger(__name__)
@@ -79,9 +79,11 @@ class UdpReplayApproxVnf(SampleVNF):
ports_mask_hex = hex(sum(2 ** num for num in port_nums))
# one core extra for master
cpu_mask_hex = hex(2 ** (number_of_ports + 1) - 1)
+ nfvi_context = ctx_base.Context.get_context_from_server(
+ self.scenario_helper.nodes[self.name])
hw_csum = ""
if (not self.scenario_helper.options.get('hw_csum', False) or
- self.nfvi_context.attrs.get('nfvi_type') not in self.HW_OFFLOADING_NFVI_TYPES):
+ nfvi_context.attrs.get('nfvi_type') not in self.HW_OFFLOADING_NFVI_TYPES):
hw_csum = '--no-hw-csum'
# tuples of (FLD_PORT, FLD_QUEUE, FLD_LCORE)
@@ -107,7 +109,7 @@ class UdpReplayApproxVnf(SampleVNF):
def collect_kpi(self):
def get_sum(offset):
- return sum(int(i) for i in split_stats[offset::5])
+ return sum(int(i) for i in split_stats[offset::6])
# we can't get KPIs if the VNF is down
check_if_process_failed(self._vnf_process)
@@ -115,8 +117,13 @@ class UdpReplayApproxVnf(SampleVNF):
stats = self.get_stats()
stats_words = stats.split()
- split_stats = stats_words[stats_words.index('0'):][:number_of_ports * 5]
+ split_stats = stats_words[stats_words.index('arp_pkts') + 1:][:number_of_ports * 6]
+
+ physical_node = ctx_base.Context.get_physical_node_from_server(
+ self.scenario_helper.nodes[self.name])
+
result = {
+ "physical_node": physical_node,
"packets_in": get_sum(1),
"packets_fwd": get_sum(2),
"packets_dropped": get_sum(3) + get_sum(4),
diff --git a/yardstick/network_services/vnf_generic/vnf/vcmts_vnf.py b/yardstick/network_services/vnf_generic/vnf/vcmts_vnf.py
new file mode 100755
index 000000000..0b48ef4e9
--- /dev/null
+++ b/yardstick/network_services/vnf_generic/vnf/vcmts_vnf.py
@@ -0,0 +1,273 @@
+# Copyright (c) 2019 Viosoft Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import os
+import yaml
+
+from influxdb import InfluxDBClient
+
+from yardstick.network_services.vnf_generic.vnf.sample_vnf import SetupEnvHelper
+from yardstick.common import constants
+from yardstick.common import exceptions
+from yardstick.network_services.vnf_generic.vnf.base import GenericVNF
+from yardstick.network_services.vnf_generic.vnf.sample_vnf import ScenarioHelper
+from yardstick.network_services.vnf_generic.vnf.vnf_ssh_helper import VnfSshHelper
+from yardstick.network_services.utils import get_nsb_option
+
+
+LOG = logging.getLogger(__name__)
+
+
+class InfluxDBHelper(object):
+
+ INITIAL_VALUE = 'now() - 1m'
+
+ def __init__(self, vcmts_influxdb_ip, vcmts_influxdb_port):
+ self._vcmts_influxdb_ip = vcmts_influxdb_ip
+ self._vcmts_influxdb_port = vcmts_influxdb_port
+ self._last_upstream_rx = self.INITIAL_VALUE
+ self._last_values_time = dict()
+
+ def start(self):
+ self._read_client = InfluxDBClient(host=self._vcmts_influxdb_ip,
+ port=self._vcmts_influxdb_port,
+ database='collectd')
+ self._write_client = InfluxDBClient(host=constants.INFLUXDB_IP,
+ port=constants.INFLUXDB_PORT,
+ database='collectd')
+
+ def _get_last_value_time(self, measurement):
+ if measurement in self._last_values_time:
+ return self._last_values_time[measurement]
+ return self.INITIAL_VALUE
+
+ def _set_last_value_time(self, measurement, time):
+ self._last_values_time[measurement] = "'" + time + "'"
+
+ def _query_measurement(self, measurement):
+ # There is a delay before influxdb flushes the data
+ query = "SELECT * FROM " + measurement + " WHERE time > " \
+ + self._get_last_value_time(measurement) \
+ + " ORDER BY time ASC;"
+ query_result = self._read_client.query(query)
+ if len(query_result.keys()) == 0:
+ return None
+ return query_result.get_points(measurement)
+
+ def _rw_measurment(self, measurement, columns):
+ query_result = self._query_measurement(measurement)
+ if query_result == None:
+ return
+
+ points_to_write = list()
+ for entry in query_result:
+ point = {
+ "measurement": measurement,
+ "tags": {
+ "type": entry['type'],
+ "host": entry['host']
+ },
+ "time": entry['time'],
+ "fields": {}
+ }
+
+ for column in columns:
+ if column == 'value':
+ point["fields"][column] = float(entry[column])
+ else:
+ point["fields"][column] = entry[column]
+
+ points_to_write.append(point)
+ self._set_last_value_time(measurement, entry['time'])
+
+ # Write the points to yardstick database
+ if self._write_client.write_points(points_to_write):
+ LOG.debug("%d new points written to '%s' measurement",
+ len(points_to_write), measurement)
+
+ def copy_kpi(self):
+ self._rw_measurment("cpu_value", ["instance", "type_instance", "value"])
+ self._rw_measurment("cpufreq_value", ["type_instance", "value"])
+ self._rw_measurment("downstream_rx", ["value"])
+ self._rw_measurment("downstream_tx", ["value"])
+ self._rw_measurment("downstream_value", ["value"])
+ self._rw_measurment("ds_per_cm_value", ["instance", "value"])
+ self._rw_measurment("intel_rdt_value", ["instance", "type_instance", "value"])
+ self._rw_measurment("turbostat_value", ["instance", "type_instance", "value"])
+ self._rw_measurment("upstream_rx", ["value"])
+ self._rw_measurment("upstream_tx", ["value"])
+ self._rw_measurment("upstream_value", ["value"])
+
+
+class VcmtsdSetupEnvHelper(SetupEnvHelper):
+
+ BASE_PARAMETERS = "export LD_LIBRARY_PATH=/opt/collectd/lib:;"\
+ + "export CMK_PROC_FS=/host/proc;"
+
+ def build_us_parameters(self, pod_cfg):
+ return self.BASE_PARAMETERS + " " \
+ + " /opt/bin/cmk isolate --conf-dir=/etc/cmk" \
+ + " --socket-id=" + pod_cfg['cpu_socket_id'] \
+ + " --pool=shared" \
+ + " /vcmts-config/run_upstream.sh " + pod_cfg['sg_id'] \
+ + " " + pod_cfg['ds_core_type'] \
+ + " " + pod_cfg['num_ofdm'] + "ofdm" \
+ + " " + pod_cfg['num_subs'] + "cm" \
+ + " " + pod_cfg['cm_crypto'] \
+ + " " + pod_cfg['qat'] \
+ + " " + pod_cfg['net_us'] \
+ + " " + pod_cfg['power_mgmt']
+
+ def build_ds_parameters(self, pod_cfg):
+ return self.BASE_PARAMETERS + " " \
+ + " /opt/bin/cmk isolate --conf-dir=/etc/cmk" \
+ + " --socket-id=" + pod_cfg['cpu_socket_id'] \
+ + " --pool=" + pod_cfg['ds_core_type'] \
+ + " /vcmts-config/run_downstream.sh " + pod_cfg['sg_id'] \
+ + " " + pod_cfg['ds_core_type'] \
+ + " " + pod_cfg['ds_core_pool_index'] \
+ + " " + pod_cfg['num_ofdm'] + "ofdm" \
+ + " " + pod_cfg['num_subs'] + "cm" \
+ + " " + pod_cfg['cm_crypto'] \
+ + " " + pod_cfg['qat'] \
+ + " " + pod_cfg['net_ds'] \
+ + " " + pod_cfg['power_mgmt']
+
+ def build_cmd(self, stream_dir, pod_cfg):
+ if stream_dir == 'ds':
+ return self.build_ds_parameters(pod_cfg)
+ else:
+ return self.build_us_parameters(pod_cfg)
+
+ def run_vcmtsd(self, stream_dir, pod_cfg):
+ cmd = self.build_cmd(stream_dir, pod_cfg)
+ LOG.debug("Executing %s", cmd)
+ self.ssh_helper.send_command(cmd)
+
+ def setup_vnf_environment(self):
+ pass
+
+
+class VcmtsVNF(GenericVNF):
+
+ RUN_WAIT = 4
+
+ def __init__(self, name, vnfd):
+ super(VcmtsVNF, self).__init__(name, vnfd)
+ self.name = name
+ self.bin_path = get_nsb_option('bin_path', '')
+ self.scenario_helper = ScenarioHelper(self.name)
+ self.ssh_helper = VnfSshHelper(self.vnfd_helper.mgmt_interface, self.bin_path)
+
+ self.setup_helper = VcmtsdSetupEnvHelper(self.vnfd_helper,
+ self.ssh_helper,
+ self.scenario_helper)
+
+ def extract_pod_cfg(self, vcmts_pods_cfg, sg_id):
+ for pod_cfg in vcmts_pods_cfg:
+ if pod_cfg['sg_id'] == sg_id:
+ return pod_cfg
+
+ def instantiate(self, scenario_cfg, context_cfg):
+ self._update_collectd_options(scenario_cfg, context_cfg)
+ self.scenario_helper.scenario_cfg = scenario_cfg
+ self.context_cfg = context_cfg
+
+ options = scenario_cfg.get('options', {})
+
+ try:
+ self.vcmts_influxdb_ip = options['vcmts_influxdb_ip']
+ self.vcmts_influxdb_port = options['vcmts_influxdb_port']
+ except KeyError:
+ raise KeyError("Missing destination InfluxDB details in scenario" \
+ " section of the task definition file")
+
+ try:
+ vcmtsd_values_filepath = options['vcmtsd_values']
+ except KeyError:
+ raise KeyError("Missing vcmtsd_values key in scenario options" \
+ "section of the task definition file")
+
+ if not os.path.isfile(vcmtsd_values_filepath):
+ raise RuntimeError("The vcmtsd_values file path provided " \
+ "does not exists")
+
+ # The yaml_loader.py (SafeLoader) underlying regex has an issue
+ # with reading PCI addresses (processed as double). so the
+ # BaseLoader is used here.
+ with open(vcmtsd_values_filepath) as stream:
+ vcmtsd_values = yaml.load(stream, Loader=yaml.BaseLoader)
+
+ if vcmtsd_values == None:
+ raise RuntimeError("Error reading vcmtsd_values file provided (" +
+ vcmtsd_values_filepath + ")")
+
+ vnf_options = options.get(self.name, {})
+ sg_id = str(vnf_options['sg_id'])
+ stream_dir = vnf_options['stream_dir']
+
+ try:
+ vcmts_pods_cfg = vcmtsd_values['topology']['vcmts_pods']
+ except KeyError:
+ raise KeyError("Missing vcmts_pods key in the " \
+ "vcmtsd_values file provided")
+
+ pod_cfg = self.extract_pod_cfg(vcmts_pods_cfg, sg_id)
+ if pod_cfg == None:
+ raise exceptions.IncorrectConfig(error_msg="Service group " + sg_id + " not found")
+
+ self.setup_helper.run_vcmtsd(stream_dir, pod_cfg)
+
+ def _update_collectd_options(self, scenario_cfg, context_cfg):
+ scenario_options = scenario_cfg.get('options', {})
+ generic_options = scenario_options.get('collectd', {})
+ scenario_node_options = scenario_options.get(self.name, {})\
+ .get('collectd', {})
+ context_node_options = context_cfg.get('nodes', {})\
+ .get(self.name, {}).get('collectd', {})
+
+ options = generic_options
+ self._update_options(options, scenario_node_options)
+ self._update_options(options, context_node_options)
+
+ self.setup_helper.collectd_options = options
+
+ def _update_options(self, options, additional_options):
+ for k, v in additional_options.items():
+ if isinstance(v, dict) and k in options:
+ options[k].update(v)
+ else:
+ options[k] = v
+
+ def wait_for_instantiate(self):
+ pass
+
+ def terminate(self):
+ pass
+
+ def scale(self, flavor=""):
+ pass
+
+ def collect_kpi(self):
+ self.influxdb_helper.copy_kpi()
+ return {"n/a": "n/a"}
+
+ def start_collect(self):
+ self.influxdb_helper = InfluxDBHelper(self.vcmts_influxdb_ip,
+ self.vcmts_influxdb_port)
+ self.influxdb_helper.start()
+
+ def stop_collect(self):
+ pass
diff --git a/yardstick/network_services/vnf_generic/vnf/vfw_vnf.py b/yardstick/network_services/vnf_generic/vnf/vfw_vnf.py
index 6c95648ce..743f2d4bb 100644
--- a/yardstick/network_services/vnf_generic/vnf/vfw_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/vfw_vnf.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2016-2017 Intel Corporation
+# Copyright (c) 2016-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,23 +12,21 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import absolute_import
import logging
-from yardstick.benchmark.scenarios.networking.vnf_generic import find_relative_file
-from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNF, DpdkVnfSetupEnvHelper
-from yardstick.network_services.yang_model import YangModel
+from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNF
+from yardstick.network_services.vnf_generic.vnf.acl_vnf import AclApproxSetupEnvSetupEnvHelper
LOG = logging.getLogger(__name__)
# vFW should work the same on all systems, we can provide the binary
-FW_PIPELINE_COMMAND = """sudo {tool_path} -p {port_mask_hex} -f {cfg_file} -s {script}"""
+FW_PIPELINE_COMMAND = "sudo {tool_path} -p {port_mask_hex} -f {cfg_file} -s {script} {hwlb}"
FW_COLLECT_KPI = (r"""VFW TOTAL:[^p]+pkts_received"?:\s(\d+),[^p]+pkts_fw_forwarded"?:\s(\d+),"""
r"""[^p]+pkts_drop_fw"?:\s(\d+),\s""")
-class FWApproxSetupEnvHelper(DpdkVnfSetupEnvHelper):
+class FWApproxSetupEnvHelper(AclApproxSetupEnvSetupEnvHelper):
APP_NAME = "vFW"
CFG_CONFIG = "/tmp/vfw_config"
@@ -38,6 +36,8 @@ class FWApproxSetupEnvHelper(DpdkVnfSetupEnvHelper):
SW_DEFAULT_CORE = 5
HW_DEFAULT_CORE = 2
VNF_TYPE = "VFW"
+ RULE_CMD = "vfw"
+ DEFAULT_FWD_ACTIONS = ["accept", "count", "conntrack"]
class FWApproxVnf(SampleVNF):
@@ -57,11 +57,7 @@ class FWApproxVnf(SampleVNF):
setup_env_helper_type = FWApproxSetupEnvHelper
super(FWApproxVnf, self).__init__(name, vnfd, setup_env_helper_type, resource_helper_type)
- self.vfw_rules = None
- def _start_vnf(self):
- yang_model_path = find_relative_file(self.scenario_helper.options['rules'],
- self.scenario_helper.task_path)
- yang_model = YangModel(yang_model_path)
- self.vfw_rules = yang_model.get_rules()
- super(FWApproxVnf, self)._start_vnf()
+ def wait_for_instantiate(self):
+ """Wait for VNF to initialize"""
+ self.wait_for_initialize()
diff --git a/yardstick/network_services/vnf_generic/vnf/vims_vnf.py b/yardstick/network_services/vnf_generic/vnf/vims_vnf.py
new file mode 100644
index 000000000..0e339b171
--- /dev/null
+++ b/yardstick/network_services/vnf_generic/vnf/vims_vnf.py
@@ -0,0 +1,105 @@
+# Copyright (c) 2019 Viosoft Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import time
+
+from yardstick.network_services.vnf_generic.vnf import sample_vnf
+
+LOG = logging.getLogger(__name__)
+
+
+class VimsSetupEnvHelper(sample_vnf.SetupEnvHelper):
+
+ def setup_vnf_environment(self):
+ LOG.debug('VimsSetupEnvHelper:\n')
+
+
+class VimsResourceHelper(sample_vnf.ClientResourceHelper):
+ pass
+
+
+class VimsPcscfVnf(sample_vnf.SampleVNF):
+
+ APP_NAME = "VimsPcscf"
+ APP_WORD = "VimsPcscf"
+
+ def __init__(self, name, vnfd, setup_env_helper_type=None,
+ resource_helper_type=None):
+ if resource_helper_type is None:
+ resource_helper_type = VimsResourceHelper
+ if setup_env_helper_type is None:
+ setup_env_helper_type = VimsSetupEnvHelper
+ super(VimsPcscfVnf, self).__init__(name, vnfd, setup_env_helper_type,
+ resource_helper_type)
+
+ def wait_for_instantiate(self):
+ pass
+
+ def _run(self):
+ pass
+
+ def start_collect(self):
+ # TODO
+ pass
+
+ def collect_kpi(self):
+ # TODO
+ pass
+
+
+class VimsHssVnf(sample_vnf.SampleVNF):
+
+ APP_NAME = "VimsHss"
+ APP_WORD = "VimsHss"
+ CMD = "sudo /media/generate_user.sh {} {} >> /dev/null 2>&1"
+
+ def __init__(self, name, vnfd, setup_env_helper_type=None,
+ resource_helper_type=None):
+ if resource_helper_type is None:
+ resource_helper_type = VimsResourceHelper
+ if setup_env_helper_type is None:
+ setup_env_helper_type = VimsSetupEnvHelper
+ super(VimsHssVnf, self).__init__(name, vnfd, setup_env_helper_type,
+ resource_helper_type)
+ self.start_user = 1
+ self.end_user = 10000
+ self.WAIT_TIME = 600
+
+ def instantiate(self, scenario_cfg, context_cfg):
+ LOG.debug("scenario_cfg=%s\n", scenario_cfg)
+ self.start_user = scenario_cfg.get("options", {}).get("start_user", self.start_user)
+ self.end_user = scenario_cfg.get("options", {}).get("end_user", self.end_user)
+ # TODO
+ # Need to check HSS services are ready before generating user accounts
+ # Now, adding time sleep that manually configured by user
+ # to wait for HSS services.
+ # Note: for heat, waiting time is too long (~ 600s)
+ self.WAIT_TIME = scenario_cfg.get("options", {}).get("wait_time", self.WAIT_TIME)
+ time.sleep(self.WAIT_TIME)
+ LOG.debug("Generate user accounts from %d to %d\n",
+ self.start_user, self.end_user)
+ cmd = self.CMD.format(self.start_user, self.end_user)
+ self.ssh_helper.execute(cmd, None, 3600, False)
+
+ def wait_for_instantiate(self):
+ pass
+
+ def start_collect(self):
+ # TODO
+ pass
+
+ def collect_kpi(self):
+ # TODO
+ pass
diff --git a/yardstick/network_services/vnf_generic/vnf/vnf_ssh_helper.py b/yardstick/network_services/vnf_generic/vnf/vnf_ssh_helper.py
new file mode 100644
index 000000000..6c5c6c833
--- /dev/null
+++ b/yardstick/network_services/vnf_generic/vnf/vnf_ssh_helper.py
@@ -0,0 +1,62 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import os
+
+from six.moves import StringIO
+
+from yardstick.network_services import constants
+from yardstick.ssh import AutoConnectSSH
+
+LOG = logging.getLogger(__name__)
+
+
+class VnfSshHelper(AutoConnectSSH):
+
+ def __init__(self, node, bin_path, wait=None):
+ self.node = node
+ kwargs = self.args_from_node(self.node)
+ if wait:
+ # if wait is defined here we want to override
+ kwargs['wait'] = wait
+
+ super(VnfSshHelper, self).__init__(**kwargs)
+ self.bin_path = bin_path
+
+ @staticmethod
+ def get_class():
+ # must return static class name, anything else refers to the calling class
+ # i.e. the subclass, not the superclass
+ return VnfSshHelper
+
+ def copy(self):
+ # this copy constructor is different from SSH classes, since it uses node
+ return self.get_class()(self.node, self.bin_path)
+
+ def upload_config_file(self, prefix, content):
+ cfg_file = os.path.join(constants.REMOTE_TMP, prefix)
+ LOG.debug('Config file name: %s', cfg_file)
+ LOG.debug(content)
+ file_obj = StringIO(content)
+ self.put_file_obj(file_obj, cfg_file)
+ return cfg_file
+
+ def join_bin_path(self, *args):
+ return os.path.join(self.bin_path, *args)
+
+ def provision_tool(self, tool_path=None, tool_file=None):
+ if tool_path is None:
+ tool_path = self.bin_path
+ return super(VnfSshHelper, self).provision_tool(tool_path, tool_file)
diff --git a/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py b/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py
index c02c0eb27..322ecd016 100644
--- a/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2016-2017 Intel Corporation
+# Copyright (c) 2016-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -17,40 +17,30 @@ from __future__ import absolute_import
from __future__ import print_function
-import os
import logging
import re
import posixpath
-from six.moves import configparser, zip
-
+from yardstick.common import utils
from yardstick.common.process import check_if_process_failed
from yardstick.network_services.helpers.samplevnf_helper import PortPairs
from yardstick.network_services.pipeline import PipelineRules
from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNF, DpdkVnfSetupEnvHelper
+from yardstick.benchmark.contexts import base as ctx_base
LOG = logging.getLogger(__name__)
-VPE_PIPELINE_COMMAND = """sudo {tool_path} -p {port_mask_hex} -f {cfg_file} -s {script}"""
+VPE_PIPELINE_COMMAND = "sudo {tool_path} -p {port_mask_hex} -f {cfg_file} -s {script} {hwlb}"
VPE_COLLECT_KPI = """\
-Pkts in:\s(\d+)\r\n\
-\tPkts dropped by AH:\s(\d+)\r\n\
-\tPkts dropped by other:\s(\d+)\
+Pkts in:\\s(\\d+)\r\n\
+\tPkts dropped by AH:\\s(\\d+)\r\n\
+\tPkts dropped by other:\\s(\\d+)\
"""
class ConfigCreate(object):
- @staticmethod
- def vpe_tmq(config, index):
- tm_q = 'TM{0}'.format(index)
- config.add_section(tm_q)
- config.set(tm_q, 'burst_read', '24')
- config.set(tm_q, 'burst_write', '32')
- config.set(tm_q, 'cfg', '/tmp/full_tm_profile_10G.cfg')
- return config
-
def __init__(self, vnfd_helper, socket):
super(ConfigCreate, self).__init__()
self.sw_q = -1
@@ -61,122 +51,8 @@ class ConfigCreate(object):
self.downlink_ports = self.vnfd_helper.port_pairs.downlink_ports
self.pipeline_per_port = 9
self.socket = socket
+ self._dpdk_port_to_link_id_map = None
- def vpe_initialize(self, config):
- config.add_section('EAL')
- config.set('EAL', 'log_level', '0')
-
- config.add_section('PIPELINE0')
- config.set('PIPELINE0', 'type', 'MASTER')
- config.set('PIPELINE0', 'core', 's%sC0' % self.socket)
-
- config.add_section('MEMPOOL0')
- config.set('MEMPOOL0', 'pool_size', '256K')
-
- config.add_section('MEMPOOL1')
- config.set('MEMPOOL1', 'pool_size', '2M')
- return config
-
- def vpe_rxq(self, config):
- for port in self.downlink_ports:
- new_section = 'RXQ{0}.0'.format(self.vnfd_helper.port_num(port))
- config.add_section(new_section)
- config.set(new_section, 'mempool', 'MEMPOOL1')
-
- return config
-
- def get_sink_swq(self, parser, pipeline, k, index):
- sink = ""
- pktq = parser.get(pipeline, k)
- if "SINK" in pktq:
- self.sink_q += 1
- sink = " SINK{0}".format(self.sink_q)
- if "TM" in pktq:
- sink = " TM{0}".format(index)
- pktq = "SWQ{0}{1}".format(self.sw_q, sink)
- return pktq
-
- def vpe_upstream(self, vnf_cfg, index=0):
- parser = configparser.ConfigParser()
- parser.read(os.path.join(vnf_cfg, 'vpe_upstream'))
-
- for pipeline in parser.sections():
- for k, v in parser.items(pipeline):
- if k == "pktq_in":
- if "RXQ" in v:
- port = self.vnfd_helper.port_num(self.uplink_ports[index])
- value = "RXQ{0}.0".format(port)
- else:
- value = self.get_sink_swq(parser, pipeline, k, index)
-
- parser.set(pipeline, k, value)
-
- elif k == "pktq_out":
- if "TXQ" in v:
- port = self.vnfd_helper.port_num(self.downlink_ports[index])
- value = "TXQ{0}.0".format(port)
- else:
- self.sw_q += 1
- value = self.get_sink_swq(parser, pipeline, k, index)
-
- parser.set(pipeline, k, value)
-
- new_pipeline = 'PIPELINE{0}'.format(self.n_pipeline)
- if new_pipeline != pipeline:
- parser._sections[new_pipeline] = parser._sections[pipeline]
- parser._sections.pop(pipeline)
- self.n_pipeline += 1
- return parser
-
- def vpe_downstream(self, vnf_cfg, index):
- parser = configparser.ConfigParser()
- parser.read(os.path.join(vnf_cfg, 'vpe_downstream'))
- for pipeline in parser.sections():
- for k, v in parser.items(pipeline):
-
- if k == "pktq_in":
- port = self.vnfd_helper.port_num(self.downlink_ports[index])
- if "RXQ" not in v:
- value = self.get_sink_swq(parser, pipeline, k, index)
- elif "TM" in v:
- value = "RXQ{0}.0 TM{1}".format(port, index)
- else:
- value = "RXQ{0}.0".format(port)
-
- parser.set(pipeline, k, value)
-
- if k == "pktq_out":
- port = self.vnfd_helper.port_num(self.uplink_ports[index])
- if "TXQ" not in v:
- self.sw_q += 1
- value = self.get_sink_swq(parser, pipeline, k, index)
- elif "TM" in v:
- value = "TXQ{0}.0 TM{1}".format(port, index)
- else:
- value = "TXQ{0}.0".format(port)
-
- parser.set(pipeline, k, value)
-
- new_pipeline = 'PIPELINE{0}'.format(self.n_pipeline)
- if new_pipeline != pipeline:
- parser._sections[new_pipeline] = parser._sections[pipeline]
- parser._sections.pop(pipeline)
- self.n_pipeline += 1
- return parser
-
- def create_vpe_config(self, vnf_cfg):
- config = configparser.ConfigParser()
- vpe_cfg = os.path.join("/tmp/vpe_config")
- with open(vpe_cfg, 'w') as cfg_file:
- config = self.vpe_initialize(config)
- config = self.vpe_rxq(config)
- config.write(cfg_file)
- for index in range(0, len(self.uplink_ports)):
- config = self.vpe_upstream(vnf_cfg, index)
- config.write(cfg_file)
- config = self.vpe_downstream(vnf_cfg, index)
- config = self.vpe_tmq(config, index)
- config.write(cfg_file)
def generate_vpe_script(self, interfaces):
rules = PipelineRules(pipeline_id=1)
@@ -209,16 +85,10 @@ class ConfigCreate(object):
return rules.get_string()
- def generate_tm_cfg(self, vnf_cfg, index=0):
- vnf_cfg = os.path.join(vnf_cfg, "full_tm_profile_10G.cfg")
- if os.path.exists(vnf_cfg):
- return open(vnf_cfg).read()
-
class VpeApproxSetupEnvHelper(DpdkVnfSetupEnvHelper):
APP_NAME = 'vPE_vnf'
- CFG_CONFIG = "/tmp/vpe_config"
CFG_SCRIPT = "/tmp/vpe_script"
TM_CONFIG = "/tmp/full_tm_profile_10G.cfg"
CORES = ['0', '1', '2', '3', '4', '5']
@@ -231,33 +101,52 @@ class VpeApproxSetupEnvHelper(DpdkVnfSetupEnvHelper):
self.all_ports = self._port_pairs.all_ports
def build_config(self):
+ vnf_cfg = self.scenario_helper.vnf_cfg
+ task_path = self.scenario_helper.task_path
+ action_bulk_file = vnf_cfg.get('action_bulk_file', '/tmp/action_bulk_512.txt')
+ full_tm_profile_file = vnf_cfg.get('full_tm_profile_file', '/tmp/full_tm_profile_10G.cfg')
+ config_file = vnf_cfg.get('file', '/tmp/vpe_config')
+ script_file = vnf_cfg.get('script_file', None)
vpe_vars = {
"bin_path": self.ssh_helper.bin_path,
"socket": self.socket,
}
-
self._build_vnf_ports()
vpe_conf = ConfigCreate(self.vnfd_helper, self.socket)
- vpe_conf.create_vpe_config(self.scenario_helper.vnf_cfg)
- config_basename = posixpath.basename(self.CFG_CONFIG)
- script_basename = posixpath.basename(self.CFG_SCRIPT)
- tm_basename = posixpath.basename(self.TM_CONFIG)
- with open(self.CFG_CONFIG) as handle:
+ if script_file is None:
+ # autogenerate vpe_script if not given
+ vpe_script = vpe_conf.generate_vpe_script(self.vnfd_helper.interfaces)
+ script_file = self.CFG_SCRIPT
+ else:
+ with utils.open_relative_file(script_file, task_path) as handle:
+ vpe_script = handle.read()
+
+ config_basename = posixpath.basename(config_file)
+ script_basename = posixpath.basename(script_file)
+
+ with utils.open_relative_file(action_bulk_file, task_path) as handle:
+ action_bulk = handle.read()
+
+ with utils.open_relative_file(full_tm_profile_file, task_path) as handle:
+ full_tm_profile = handle.read()
+
+ with utils.open_relative_file(config_file, task_path) as handle:
vpe_config = handle.read()
+ # upload the 4 config files to the target server
self.ssh_helper.upload_config_file(config_basename, vpe_config.format(**vpe_vars))
-
- vpe_script = vpe_conf.generate_vpe_script(self.vnfd_helper.interfaces)
self.ssh_helper.upload_config_file(script_basename, vpe_script.format(**vpe_vars))
-
- tm_config = vpe_conf.generate_tm_cfg(self.scenario_helper.vnf_cfg)
- self.ssh_helper.upload_config_file(tm_basename, tm_config)
+ self.ssh_helper.upload_config_file(posixpath.basename(action_bulk_file),
+ action_bulk.format(**vpe_vars))
+ self.ssh_helper.upload_config_file(posixpath.basename(full_tm_profile_file),
+ full_tm_profile.format(**vpe_vars))
LOG.info("Provision and start the %s", self.APP_NAME)
- LOG.info(self.CFG_CONFIG)
+ LOG.info(config_file)
LOG.info(self.CFG_SCRIPT)
- self._build_pipeline_kwargs()
+ self._build_pipeline_kwargs(cfg_file='/tmp/' + config_basename,
+ script='/tmp/' + script_basename)
return self.PIPELINE_COMMAND.format(**self.pipeline_kwargs)
@@ -281,7 +170,11 @@ class VpeApproxVnf(SampleVNF):
def collect_kpi(self):
# we can't get KPIs if the VNF is down
check_if_process_failed(self._vnf_process)
+ physical_node = ctx_base.Context.get_physical_node_from_server(
+ self.scenario_helper.nodes[self.name])
+
result = {
+ "physical_node": physical_node,
'pkt_in_up_stream': 0,
'pkt_drop_up_stream': 0,
'pkt_in_down_stream': 0,
diff --git a/yardstick/network_services/vnf_generic/vnf/vpp_helpers.py b/yardstick/network_services/vnf_generic/vnf/vpp_helpers.py
new file mode 100644
index 000000000..fe8e7b2ba
--- /dev/null
+++ b/yardstick/network_services/vnf_generic/vnf/vpp_helpers.py
@@ -0,0 +1,751 @@
+# Copyright (c) 2019 Viosoft Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import binascii
+import ipaddress
+import json
+import logging
+import os
+import re
+import tempfile
+import time
+from collections import OrderedDict
+
+from yardstick.common import constants
+from yardstick.common import exceptions
+from yardstick.network_services.helpers.cpu import CpuSysCores
+from yardstick.network_services.vnf_generic.vnf.sample_vnf import \
+ DpdkVnfSetupEnvHelper
+
+LOG = logging.getLogger(__name__)
+
+
+class VppConfigGenerator(object):
+ VPP_LOG_FILE = '/tmp/vpe.log'
+
+ def __init__(self):
+ self._nodeconfig = {}
+ self._vpp_config = ''
+
+ def add_config_item(self, config, value, path):
+ if len(path) == 1:
+ config[path[0]] = value
+ return
+ if path[0] not in config:
+ config[path[0]] = {}
+ elif isinstance(config[path[0]], str):
+ config[path[0]] = {} if config[path[0]] == '' \
+ else {config[path[0]]: ''}
+ self.add_config_item(config[path[0]], value, path[1:])
+
+ def add_unix_log(self, value=None):
+ path = ['unix', 'log']
+ if value is None:
+ value = self.VPP_LOG_FILE
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_unix_cli_listen(self, value='/run/vpp/cli.sock'):
+ path = ['unix', 'cli-listen']
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_unix_nodaemon(self):
+ path = ['unix', 'nodaemon']
+ self.add_config_item(self._nodeconfig, '', path)
+
+ def add_unix_coredump(self):
+ path = ['unix', 'full-coredump']
+ self.add_config_item(self._nodeconfig, '', path)
+
+ def add_dpdk_dev(self, *devices):
+ for device in devices:
+ if VppConfigGenerator.pci_dev_check(device):
+ path = ['dpdk', 'dev {0}'.format(device)]
+ self.add_config_item(self._nodeconfig, '', path)
+
+ def add_dpdk_cryptodev(self, count, cryptodev):
+ for i in range(count):
+ cryptodev_config = 'dev {0}'.format(
+ re.sub(r'\d.\d$', '1.' + str(i), cryptodev))
+ path = ['dpdk', cryptodev_config]
+ self.add_config_item(self._nodeconfig, '', path)
+ self.add_dpdk_uio_driver('igb_uio')
+
+ def add_dpdk_sw_cryptodev(self, sw_pmd_type, socket_id, count):
+ for _ in range(count):
+ cryptodev_config = 'vdev cryptodev_{0}_pmd,socket_id={1}'. \
+ format(sw_pmd_type, str(socket_id))
+ path = ['dpdk', cryptodev_config]
+ self.add_config_item(self._nodeconfig, '', path)
+
+ def add_dpdk_dev_default_rxq(self, value):
+ path = ['dpdk', 'dev default', 'num-rx-queues']
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_dpdk_dev_default_rxd(self, value):
+ path = ['dpdk', 'dev default', 'num-rx-desc']
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_dpdk_dev_default_txd(self, value):
+ path = ['dpdk', 'dev default', 'num-tx-desc']
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_dpdk_log_level(self, value):
+ path = ['dpdk', 'log-level']
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_dpdk_socketmem(self, value):
+ path = ['dpdk', 'socket-mem']
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_dpdk_num_mbufs(self, value):
+ path = ['dpdk', 'num-mbufs']
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_dpdk_uio_driver(self, value=None):
+ path = ['dpdk', 'uio-driver']
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_cpu_main_core(self, value):
+ path = ['cpu', 'main-core']
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_cpu_corelist_workers(self, value):
+ path = ['cpu', 'corelist-workers']
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_heapsize(self, value):
+ path = ['heapsize']
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_ip6_hash_buckets(self, value):
+ path = ['ip6', 'hash-buckets']
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_ip6_heap_size(self, value):
+ path = ['ip6', 'heap-size']
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_ip_heap_size(self, value):
+ path = ['ip', 'heap-size']
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_statseg_size(self, value):
+ path = ['statseg', 'size']
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_plugin(self, state, *plugins):
+ for plugin in plugins:
+ path = ['plugins', 'plugin {0}'.format(plugin), state]
+ self.add_config_item(self._nodeconfig, ' ', path)
+
+ def add_dpdk_no_multi_seg(self):
+ path = ['dpdk', 'no-multi-seg']
+ self.add_config_item(self._nodeconfig, '', path)
+
+ def add_dpdk_no_tx_checksum_offload(self):
+ path = ['dpdk', 'no-tx-checksum-offload']
+ self.add_config_item(self._nodeconfig, '', path)
+
+ def dump_config(self, obj=None, level=-1):
+ if obj is None:
+ obj = self._nodeconfig
+ obj = OrderedDict(sorted(obj.items()))
+
+ indent = ' '
+ if level >= 0:
+ self._vpp_config += '{}{{\n'.format(level * indent)
+ if isinstance(obj, dict):
+ for key, val in obj.items():
+ if hasattr(val, '__iter__') and not isinstance(val, str):
+ self._vpp_config += '{}{}\n'.format((level + 1) * indent,
+ key)
+ self.dump_config(val, level + 1)
+ else:
+ self._vpp_config += '{}{} {}\n'.format(
+ (level + 1) * indent,
+ key, val)
+ if level >= 0:
+ self._vpp_config += '{}}}\n'.format(level * indent)
+
+ return self._vpp_config
+
+ @staticmethod
+ def pci_dev_check(pci_dev):
+ pattern = re.compile("^[0-9A-Fa-f]{4}:[0-9A-Fa-f]{2}:"
+ "[0-9A-Fa-f]{2}\\.[0-9A-Fa-f]$")
+ if not pattern.match(pci_dev):
+ raise ValueError('PCI address {addr} is not in valid format '
+ 'xxxx:xx:xx.x'.format(addr=pci_dev))
+ return True
+
+
+class VppSetupEnvHelper(DpdkVnfSetupEnvHelper):
+ APP_NAME = "vpp"
+ CFG_CONFIG = "/etc/vpp/startup.conf"
+ CFG_SCRIPT = ""
+ PIPELINE_COMMAND = ""
+ QAT_DRIVER = "qat_dh895xcc"
+ VNF_TYPE = "IPSEC"
+ VAT_BIN_NAME = 'vpp_api_test'
+
+ def __init__(self, vnfd_helper, ssh_helper, scenario_helper):
+ super(VppSetupEnvHelper, self).__init__(vnfd_helper, ssh_helper,
+ scenario_helper)
+ self.sys_cores = CpuSysCores(self.ssh_helper)
+
+ def kill_vnf(self):
+ ret_code, _, _ = \
+ self.ssh_helper.execute(
+ 'service {name} stop'.format(name=self.APP_NAME))
+ if int(ret_code):
+ raise RuntimeError(
+ 'Failed to stop service {name}'.format(name=self.APP_NAME))
+
+ def tear_down(self):
+ pass
+
+ def start_vpp_service(self):
+ ret_code, _, _ = \
+ self.ssh_helper.execute(
+ 'service {name} restart'.format(name=self.APP_NAME))
+ if int(ret_code):
+ raise RuntimeError(
+ 'Failed to start service {name}'.format(name=self.APP_NAME))
+
+ def _update_vnfd_helper(self, additional_data, iface_key=None):
+ for k, v in additional_data.items():
+ if iface_key is None:
+ if isinstance(v, dict) and k in self.vnfd_helper:
+ self.vnfd_helper[k].update(v)
+ else:
+ self.vnfd_helper[k] = v
+ else:
+ if isinstance(v,
+ dict) and k in self.vnfd_helper.find_virtual_interface(
+ ifname=iface_key):
+ self.vnfd_helper.find_virtual_interface(ifname=iface_key)[
+ k].update(v)
+ else:
+ self.vnfd_helper.find_virtual_interface(ifname=iface_key)[
+ k] = v
+
+ def get_value_by_interface_key(self, interface, key):
+ try:
+ return self.vnfd_helper.find_virtual_interface(
+ ifname=interface).get(key)
+ except (KeyError, ValueError):
+ return None
+
+ def crypto_device_init(self, pci_addr, numvfs):
+ # QAT device must be re-bound to kernel driver before initialization.
+ self.dpdk_bind_helper.load_dpdk_driver(self.QAT_DRIVER)
+
+ # Stop VPP to prevent deadlock.
+ self.kill_vnf()
+
+ current_driver = self.get_pci_dev_driver(pci_addr.replace(':', r'\:'))
+ if current_driver is not None:
+ self.pci_driver_unbind(pci_addr)
+
+ # Bind to kernel driver.
+ self.dpdk_bind_helper.bind(pci_addr, self.QAT_DRIVER.replace('qat_', ''))
+
+ # Initialize QAT VFs.
+ if numvfs > 0:
+ self.set_sriov_numvfs(pci_addr, numvfs)
+
+ def get_sriov_numvfs(self, pf_pci_addr):
+ command = 'cat /sys/bus/pci/devices/{pci}/sriov_numvfs'. \
+ format(pci=pf_pci_addr.replace(':', r'\:'))
+ _, stdout, _ = self.ssh_helper.execute(command)
+ try:
+ return int(stdout)
+ except ValueError:
+ LOG.debug('Reading sriov_numvfs info failed')
+ return 0
+
+ def set_sriov_numvfs(self, pf_pci_addr, numvfs=0):
+ command = "sh -c 'echo {num} | tee /sys/bus/pci/devices/{pci}/sriov_numvfs'". \
+ format(num=numvfs, pci=pf_pci_addr.replace(':', r'\:'))
+ self.ssh_helper.execute(command)
+
+ def pci_driver_unbind(self, pci_addr):
+ command = "sh -c 'echo {pci} | tee /sys/bus/pci/devices/{pcie}/driver/unbind'". \
+ format(pci=pci_addr, pcie=pci_addr.replace(':', r'\:'))
+ self.ssh_helper.execute(command)
+
+ def get_pci_dev_driver(self, pci_addr):
+ cmd = 'lspci -vmmks {0}'.format(pci_addr)
+ ret_code, stdout, _ = self.ssh_helper.execute(cmd)
+ if int(ret_code):
+ raise RuntimeError("'{0}' failed".format(cmd))
+ for line in stdout.splitlines():
+ if not line:
+ continue
+ name = None
+ value = None
+ try:
+ name, value = line.split("\t", 1)
+ except ValueError:
+ if name == "Driver:":
+ return None
+ if name == 'Driver:':
+ return value
+ return None
+
+ def vpp_create_ipsec_tunnels(self, if1_ip_addr, if2_ip_addr, if_name,
+ n_tunnels, n_connections, crypto_alg,
+ crypto_key, integ_alg, integ_key, addrs_ip,
+ spi_1=10000, spi_2=20000):
+ mask_length = 32
+ if n_connections <= n_tunnels:
+ count = 1
+ else:
+ count = int(n_connections / n_tunnels)
+ addr_ip_i = int(ipaddress.ip_address(str(addrs_ip)))
+ dst_start_ip = addr_ip_i
+
+ tmp_fd, tmp_path = tempfile.mkstemp()
+
+ vpp_ifname = self.get_value_by_interface_key(if_name, 'vpp_name')
+ ckey = binascii.hexlify(crypto_key.encode())
+ ikey = binascii.hexlify(integ_key.encode())
+
+ integ = ''
+ if crypto_alg.alg_name != 'aes-gcm-128':
+ integ = 'integ_alg {integ_alg} ' \
+ 'local_integ_key {local_integ_key} ' \
+ 'remote_integ_key {remote_integ_key} ' \
+ .format(integ_alg=integ_alg.alg_name,
+ local_integ_key=ikey,
+ remote_integ_key=ikey)
+ create_tunnels_cmds = 'ipsec_tunnel_if_add_del ' \
+ 'local_spi {local_spi} ' \
+ 'remote_spi {remote_spi} ' \
+ 'crypto_alg {crypto_alg} ' \
+ 'local_crypto_key {local_crypto_key} ' \
+ 'remote_crypto_key {remote_crypto_key} ' \
+ '{integ} ' \
+ 'local_ip {local_ip} ' \
+ 'remote_ip {remote_ip}\n'
+ start_tunnels_cmds = 'ip_add_del_route {raddr}/{mask} via {addr} ipsec{i}\n' \
+ 'exec set interface unnumbered ipsec{i} use {uifc}\n' \
+ 'sw_interface_set_flags ipsec{i} admin-up\n'
+
+ with os.fdopen(tmp_fd, 'w') as tmp_file:
+ for i in range(0, n_tunnels):
+ create_tunnel = create_tunnels_cmds.format(local_spi=spi_1 + i,
+ remote_spi=spi_2 + i,
+ crypto_alg=crypto_alg.alg_name,
+ local_crypto_key=ckey,
+ remote_crypto_key=ckey,
+ integ=integ,
+ local_ip=if1_ip_addr,
+ remote_ip=if2_ip_addr)
+ tmp_file.write(create_tunnel)
+ self.execute_script(tmp_path, json_out=False, copy_on_execute=True)
+ os.remove(tmp_path)
+
+ tmp_fd, tmp_path = tempfile.mkstemp()
+
+ with os.fdopen(tmp_fd, 'w') as tmp_file:
+ for i in range(0, n_tunnels):
+ if count > 1:
+ dst_start_ip = addr_ip_i + i * count
+ dst_end_ip = ipaddress.ip_address(dst_start_ip + count - 1)
+ ips = [ipaddress.ip_address(ip) for ip in
+ [str(ipaddress.ip_address(dst_start_ip)),
+ str(dst_end_ip)]]
+ lowest_ip, highest_ip = min(ips), max(ips)
+ mask_length = self.get_prefix_length(int(lowest_ip),
+ int(highest_ip),
+ lowest_ip.max_prefixlen)
+ # TODO check duplicate route for some IPs
+ elif count == 1:
+ dst_start_ip = addr_ip_i + i
+ start_tunnel = start_tunnels_cmds.format(
+ raddr=str(ipaddress.ip_address(dst_start_ip)),
+ mask=mask_length,
+ addr=if2_ip_addr,
+ i=i, count=count,
+ uifc=vpp_ifname)
+ tmp_file.write(start_tunnel)
+ # TODO add route for remain IPs
+
+ self.execute_script(tmp_path, json_out=False, copy_on_execute=True)
+ os.remove(tmp_path)
+
+ def apply_config(self, vpp_cfg, restart_vpp=True):
+ vpp_config = vpp_cfg.dump_config()
+ ret, _, _ = \
+ self.ssh_helper.execute('echo "{config}" | sudo tee {filename}'.
+ format(config=vpp_config,
+ filename=self.CFG_CONFIG))
+ if ret != 0:
+ raise RuntimeError('Writing config file failed')
+ if restart_vpp:
+ self.start_vpp_service()
+
+ def vpp_route_add(self, network, prefix_len, gateway=None, interface=None,
+ use_sw_index=True, resolve_attempts=10,
+ count=1, vrf=None, lookup_vrf=None, multipath=False,
+ weight=None, local=False):
+ if interface:
+ if use_sw_index:
+ int_cmd = ('sw_if_index {}'.format(
+ self.get_value_by_interface_key(interface,
+ 'vpp_sw_index')))
+ else:
+ int_cmd = interface
+ else:
+ int_cmd = ''
+
+ rap = 'resolve-attempts {}'.format(resolve_attempts) \
+ if resolve_attempts else ''
+
+ via = 'via {}'.format(gateway) if gateway else ''
+
+ cnt = 'count {}'.format(count) \
+ if count else ''
+
+ vrf = 'vrf {}'.format(vrf) if vrf else ''
+
+ lookup_vrf = 'lookup-in-vrf {}'.format(
+ lookup_vrf) if lookup_vrf else ''
+
+ multipath = 'multipath' if multipath else ''
+
+ weight = 'weight {}'.format(weight) if weight else ''
+
+ local = 'local' if local else ''
+
+ with VatTerminal(self.ssh_helper, json_param=False) as vat:
+ vat.vat_terminal_exec_cmd_from_template('add_route.vat',
+ network=network,
+ prefix_length=prefix_len,
+ via=via,
+ vrf=vrf,
+ interface=int_cmd,
+ resolve_attempts=rap,
+ count=cnt,
+ lookup_vrf=lookup_vrf,
+ multipath=multipath,
+ weight=weight,
+ local=local)
+
+ def add_arp_on_dut(self, iface_key, ip_address, mac_address):
+ with VatTerminal(self.ssh_helper) as vat:
+ return vat.vat_terminal_exec_cmd_from_template(
+ 'add_ip_neighbor.vat',
+ sw_if_index=self.get_value_by_interface_key(iface_key,
+ 'vpp_sw_index'),
+ ip_address=ip_address, mac_address=mac_address)
+
+ def set_ip(self, interface, address, prefix_length):
+ with VatTerminal(self.ssh_helper) as vat:
+ return vat.vat_terminal_exec_cmd_from_template(
+ 'add_ip_address.vat',
+ sw_if_index=self.get_value_by_interface_key(interface,
+ 'vpp_sw_index'),
+ address=address, prefix_length=prefix_length)
+
+ def set_interface_state(self, interface, state):
+ sw_if_index = self.get_value_by_interface_key(interface,
+ 'vpp_sw_index')
+
+ if state == 'up':
+ state = 'admin-up link-up'
+ elif state == 'down':
+ state = 'admin-down link-down'
+ else:
+ raise ValueError('Unexpected interface state: {}'.format(state))
+ with VatTerminal(self.ssh_helper) as vat:
+ return vat.vat_terminal_exec_cmd_from_template(
+ 'set_if_state.vat', sw_if_index=sw_if_index, state=state)
+
+ def vpp_set_interface_mtu(self, interface, mtu=9200):
+ sw_if_index = self.get_value_by_interface_key(interface,
+ 'vpp_sw_index')
+ if sw_if_index:
+ with VatTerminal(self.ssh_helper, json_param=False) as vat:
+ vat.vat_terminal_exec_cmd_from_template(
+ "hw_interface_set_mtu.vat", sw_if_index=sw_if_index,
+ mtu=mtu)
+
+ def vpp_interfaces_ready_wait(self, timeout=30):
+ if_ready = False
+ not_ready = []
+ start = time.time()
+ while not if_ready:
+ out = self.vpp_get_interface_data()
+ if time.time() - start > timeout:
+ for interface in out:
+ if interface.get('admin_up_down') == 1:
+ if interface.get('link_up_down') != 1:
+ LOG.debug('%s link-down',
+ interface.get('interface_name'))
+ raise RuntimeError('timeout, not up {0}'.format(not_ready))
+ not_ready = []
+ for interface in out:
+ if interface.get('admin_up_down') == 1:
+ if interface.get('link_up_down') != 1:
+ not_ready.append(interface.get('interface_name'))
+ if not not_ready:
+ if_ready = True
+ else:
+ LOG.debug('Interfaces still in link-down state: %s, '
+ 'waiting...', not_ready)
+ time.sleep(1)
+
+ def vpp_get_interface_data(self, interface=None):
+ with VatTerminal(self.ssh_helper) as vat:
+ response = vat.vat_terminal_exec_cmd_from_template(
+ "interface_dump.vat")
+ data = response[0]
+ if interface is not None:
+ if isinstance(interface, str):
+ param = "interface_name"
+ elif isinstance(interface, int):
+ param = "sw_if_index"
+ else:
+ raise TypeError
+ for data_if in data:
+ if data_if[param] == interface:
+ return data_if
+ return dict()
+ return data
+
+ def update_vpp_interface_data(self):
+ data = {}
+ interface_dump_json = self.execute_script_json_out(
+ "dump_interfaces.vat")
+ interface_list = json.loads(interface_dump_json)
+ for interface in self.vnfd_helper.interfaces:
+ if_mac = interface['virtual-interface']['local_mac']
+ interface_dict = VppSetupEnvHelper.get_vpp_interface_by_mac(
+ interface_list, if_mac)
+ if not interface_dict:
+ LOG.debug('Interface %s not found by MAC %s', interface,
+ if_mac)
+ continue
+ data[interface['virtual-interface']['ifname']] = {
+ 'vpp_name': interface_dict["interface_name"],
+ 'vpp_sw_index': interface_dict["sw_if_index"]
+ }
+ for iface_key, updated_vnfd in data.items():
+ self._update_vnfd_helper(updated_vnfd, iface_key)
+
+ def iface_update_numa(self):
+ iface_numa = {}
+ for interface in self.vnfd_helper.interfaces:
+ cmd = "cat /sys/bus/pci/devices/{}/numa_node".format(
+ interface["virtual-interface"]["vpci"])
+ ret, out, _ = self.ssh_helper.execute(cmd)
+ if ret == 0:
+ try:
+ numa_node = int(out)
+ if numa_node < 0:
+ if self.vnfd_helper["cpuinfo"][-1][3] + 1 == 1:
+ iface_numa[
+ interface['virtual-interface']['ifname']] = {
+ 'numa_node': 0
+ }
+ else:
+ raise ValueError
+ else:
+ iface_numa[
+ interface['virtual-interface']['ifname']] = {
+ 'numa_node': numa_node
+ }
+ except ValueError:
+ LOG.debug(
+ 'Reading numa location failed for: %s',
+ interface["virtual-interface"]["vpci"])
+ for iface_key, updated_vnfd in iface_numa.items():
+ self._update_vnfd_helper(updated_vnfd, iface_key)
+
+ def execute_script(self, vat_name, json_out=True, copy_on_execute=False):
+ if copy_on_execute:
+ self.ssh_helper.put_file(vat_name, vat_name)
+ remote_file_path = vat_name
+ else:
+ vat_path = self.ssh_helper.join_bin_path("vpp", "templates")
+ remote_file_path = '{0}/{1}'.format(vat_path, vat_name)
+
+ cmd = "{vat_bin} {json} in {vat_path} script".format(
+ vat_bin=self.VAT_BIN_NAME,
+ json="json" if json_out is True else "",
+ vat_path=remote_file_path)
+
+ try:
+ return self.ssh_helper.execute(cmd=cmd)
+ except Exception:
+ raise RuntimeError("VAT script execution failed: {0}".format(cmd))
+
+ def execute_script_json_out(self, vat_name):
+ vat_path = self.ssh_helper.join_bin_path("vpp", "templates")
+ remote_file_path = '{0}/{1}'.format(vat_path, vat_name)
+
+ _, stdout, _ = self.execute_script(vat_name, json_out=True)
+ return self.cleanup_vat_json_output(stdout, vat_file=remote_file_path)
+
+ @staticmethod
+ def cleanup_vat_json_output(json_output, vat_file=None):
+ retval = json_output
+ clutter = ['vat#', 'dump_interface_table error: Misc',
+ 'dump_interface_table:6019: JSON output supported only ' \
+ 'for VPE API calls and dump_stats_table']
+ if vat_file:
+ clutter.append("{0}(2):".format(vat_file))
+ for garbage in clutter:
+ retval = retval.replace(garbage, '')
+ return retval.strip()
+
+ @staticmethod
+ def _convert_mac_to_number_list(mac_address):
+ list_mac = []
+ for num in mac_address.split(":"):
+ list_mac.append(int(num, 16))
+ return list_mac
+
+ @staticmethod
+ def get_vpp_interface_by_mac(interfaces_list, mac_address):
+ interface_dict = {}
+ list_mac_address = VppSetupEnvHelper._convert_mac_to_number_list(
+ mac_address)
+ LOG.debug("MAC address %s converted to list %s.", mac_address,
+ list_mac_address)
+ for interface in interfaces_list:
+ # TODO: create vat json integrity checking and move there
+ if "l2_address" not in interface:
+ raise KeyError(
+ "key l2_address not found in interface dict."
+ "Probably input list is not parsed from correct VAT "
+ "json output.")
+ if "l2_address_length" not in interface:
+ raise KeyError(
+ "key l2_address_length not found in interface "
+ "dict. Probably input list is not parsed from correct "
+ "VAT json output.")
+ mac_from_json = interface["l2_address"][:6]
+ if mac_from_json == list_mac_address:
+ if interface["l2_address_length"] != 6:
+ raise ValueError("l2_address_length value is not 6.")
+ interface_dict = interface
+ break
+ return interface_dict
+
+ @staticmethod
+ def get_prefix_length(number1, number2, bits):
+ for i in range(bits):
+ if number1 >> i == number2 >> i:
+ return bits - i
+ return 0
+
+
+class VatTerminal(object):
+
+ __VAT_PROMPT = ("vat# ",)
+ __LINUX_PROMPT = (":~# ", ":~$ ", "~]$ ", "~]# ")
+
+
+ def __init__(self, ssh_helper, json_param=True):
+ json_text = ' json' if json_param else ''
+ self.json = json_param
+ self.ssh_helper = ssh_helper
+ EXEC_RETRY = 3
+
+ try:
+ self._tty = self.ssh_helper.interactive_terminal_open()
+ except Exception:
+ raise RuntimeError("Cannot open interactive terminal")
+
+ for _ in range(EXEC_RETRY):
+ try:
+ self.ssh_helper.interactive_terminal_exec_command(
+ self._tty,
+ 'sudo -S {0}{1}'.format(VppSetupEnvHelper.VAT_BIN_NAME,
+ json_text),
+ self.__VAT_PROMPT)
+ except exceptions.SSHTimeout:
+ continue
+ else:
+ break
+
+ self._exec_failure = False
+ self.vat_stdout = None
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.vat_terminal_close()
+
+ def vat_terminal_exec_cmd(self, cmd):
+ try:
+ out = self.ssh_helper.interactive_terminal_exec_command(self._tty,
+ cmd,
+ self.__VAT_PROMPT)
+ self.vat_stdout = out
+ except exceptions.SSHTimeout:
+ self._exec_failure = True
+ raise RuntimeError(
+ "VPP is not running on node. VAT command {0} execution failed".
+ format(cmd))
+ if self.json:
+ obj_start = out.find('{')
+ obj_end = out.rfind('}')
+ array_start = out.find('[')
+ array_end = out.rfind(']')
+
+ if obj_start == -1 and array_start == -1:
+ raise RuntimeError(
+ "VAT command {0}: no JSON data.".format(cmd))
+
+ if obj_start < array_start or array_start == -1:
+ start = obj_start
+ end = obj_end + 1
+ else:
+ start = array_start
+ end = array_end + 1
+ out = out[start:end]
+ json_out = json.loads(out)
+ return json_out
+ else:
+ return None
+
+ def vat_terminal_close(self):
+ if not self._exec_failure:
+ try:
+ self.ssh_helper.interactive_terminal_exec_command(self._tty,
+ 'quit',
+ self.__LINUX_PROMPT)
+ except exceptions.SSHTimeout:
+ raise RuntimeError("Failed to close VAT console")
+ try:
+ self.ssh_helper.interactive_terminal_close(self._tty)
+ except Exception:
+ raise RuntimeError("Cannot close interactive terminal")
+
+ def vat_terminal_exec_cmd_from_template(self, vat_template_file, **args):
+ file_path = os.path.join(constants.YARDSTICK_ROOT_PATH,
+ 'yardstick/resources/templates/',
+ vat_template_file)
+ with open(file_path, 'r') as template_file:
+ cmd_template = template_file.readlines()
+ ret = []
+ for line_tmpl in cmd_template:
+ vat_cmd = line_tmpl.format(**args)
+ ret.append(self.vat_terminal_exec_cmd(vat_cmd.replace('\n', '')))
+ return ret
diff --git a/yardstick/network_services/yang_model.py b/yardstick/network_services/yang_model.py
deleted file mode 100644
index ec00c4513..000000000
--- a/yardstick/network_services/yang_model.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# Copyright (c) 2017 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import absolute_import
-from __future__ import print_function
-import logging
-import ipaddress
-import six
-
-from yardstick.common.yaml_loader import yaml_load
-
-LOG = logging.getLogger(__name__)
-
-
-class YangModel(object):
-
- RULE_TEMPLATE = "p acl add 1 {0} {1} {2} {3} {4} {5} {6} {7} 0 0 {8}"
-
- def __init__(self, config_file):
- super(YangModel, self).__init__()
- self._config_file = config_file
- self._options = {}
- self._rules = ''
-
- @property
- def config_file(self):
- return self._config_file
-
- @config_file.setter
- def config_file(self, value):
- self._config_file = value
- self._options = {}
- self._rules = ''
-
- def _read_config(self):
- # TODO: add some error handling in case of empty or non-existing file
- try:
- with open(self._config_file) as f:
- self._options = yaml_load(f)
- except Exception as e:
- LOG.exception("Failed to load the yaml %s", e)
- raise
-
- def _get_entries(self):
- if not self._options:
- return ''
-
- rule_list = []
- for ace in self._options['access-list1']['acl']['access-list-entries']:
- # TODO: resolve ports using topology file and nodes'
- # ids: public or private.
- matches = ace['ace']['matches']
- dst_ipv4_net = matches['destination-ipv4-network']
- dst_ipv4_net_ip = ipaddress.ip_interface(six.text_type(dst_ipv4_net))
- port0_local_network = dst_ipv4_net_ip.network.network_address.exploded
- port0_prefix = dst_ipv4_net_ip.network.prefixlen
-
- src_ipv4_net = matches['source-ipv4-network']
- src_ipv4_net_ip = ipaddress.ip_interface(six.text_type(src_ipv4_net))
- port1_local_network = src_ipv4_net_ip.network.network_address.exploded
- port1_prefix = src_ipv4_net_ip.network.prefixlen
-
- lower_dport = matches['destination-port-range']['lower-port']
- upper_dport = matches['destination-port-range']['upper-port']
-
- lower_sport = matches['source-port-range']['lower-port']
- upper_sport = matches['source-port-range']['upper-port']
-
- # TODO: proto should be read from file also.
- # Now all rules in sample ACL file are TCP.
- rule_list.append('') # get an extra new line
- rule_list.append(self.RULE_TEMPLATE.format(port0_local_network,
- port0_prefix,
- port1_local_network,
- port1_prefix,
- lower_dport,
- upper_dport,
- lower_sport,
- upper_sport,
- 0))
- rule_list.append(self.RULE_TEMPLATE.format(port1_local_network,
- port1_prefix,
- port0_local_network,
- port0_prefix,
- lower_sport,
- upper_sport,
- lower_dport,
- upper_dport,
- 1))
-
- self._rules = '\n'.join(rule_list)
-
- def get_rules(self):
- if not self._rules:
- self._read_config()
- self._get_entries()
- return self._rules