aboutsummaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/functions.py43
-rw-r--r--tools/hugepages.py3
-rw-r--r--tools/namespace.py178
-rw-r--r--tools/networkcard.py266
-rw-r--r--tools/pkt_fwd/testpmd.py2
-rw-r--r--tools/pkt_gen/moongen/__init__.py13
-rw-r--r--tools/pkt_gen/moongen/moongen.py753
-rw-r--r--tools/pkt_gen/testcenter/testcenter-rfc2544-rest.py570
-rw-r--r--tools/pkt_gen/testcenter/testcenter.py305
-rw-r--r--tools/pkt_gen/trafficgen/trafficgenhelper.py2
-rw-r--r--tools/pkt_gen/xena/XenaDriver.py1129
-rw-r--r--tools/pkt_gen/xena/__init__.py13
-rw-r--r--tools/pkt_gen/xena/profiles/baseconfig.x2544373
-rwxr-xr-xtools/pkt_gen/xena/xena.py660
-rw-r--r--tools/pkt_gen/xena/xena_json.py625
-rw-r--r--tools/systeminfo.py21
-rw-r--r--tools/tasks.py84
-rw-r--r--tools/veth.py118
18 files changed, 5046 insertions, 112 deletions
diff --git a/tools/functions.py b/tools/functions.py
new file mode 100644
index 00000000..5079a9f0
--- /dev/null
+++ b/tools/functions.py
@@ -0,0 +1,43 @@
+# Copyright 2016 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Various helper functions
+"""
+
+from conf import settings
+
+#
+# Support functions
+#
+
+def settings_update_paths():
+ """ Configure paths to OVS and DPDK based on VSWITCH and VNF values
+ """
+ # set dpdk and ovs paths accorfing to VNF and VSWITCH
+ if settings.getValue('VSWITCH').endswith('Vanilla'):
+ # settings paths for Vanilla
+ settings.setValue('OVS_DIR', (settings.getValue('OVS_DIR_VANILLA')))
+ elif settings.getValue('VSWITCH').endswith('Vhost'):
+ if settings.getValue('VNF').endswith('Cuse'):
+ # settings paths for Cuse
+ settings.setValue('RTE_SDK', (settings.getValue('RTE_SDK_CUSE')))
+ settings.setValue('OVS_DIR', (settings.getValue('OVS_DIR_CUSE')))
+ else:
+ # settings paths for VhostUser
+ settings.setValue('RTE_SDK', (settings.getValue('RTE_SDK_USER')))
+ settings.setValue('OVS_DIR', (settings.getValue('OVS_DIR_USER')))
+ else:
+ # default - set to VHOST USER but can be changed during enhancement
+ settings.setValue('RTE_SDK', (settings.getValue('RTE_SDK_USER')))
+ settings.setValue('OVS_DIR', (settings.getValue('OVS_DIR_USER')))
diff --git a/tools/hugepages.py b/tools/hugepages.py
index 71535922..3a434d6e 100644
--- a/tools/hugepages.py
+++ b/tools/hugepages.py
@@ -78,7 +78,8 @@ def mount_hugepages():
return
if not os.path.exists(settings.getValue('HUGEPAGE_DIR')):
- os.makedirs(settings.getValue('HUGEPAGE_DIR'))
+ tasks.run_task(['sudo', 'mkdir', settings.getValue('HUGEPAGE_DIR')], _LOGGER,
+ 'Creating directory ' + settings.getValue('HUGEPAGE_DIR'), True)
try:
tasks.run_task(['sudo', 'mount', '-t', 'hugetlbfs', 'nodev',
settings.getValue('HUGEPAGE_DIR')],
diff --git a/tools/namespace.py b/tools/namespace.py
new file mode 100644
index 00000000..e6bcd819
--- /dev/null
+++ b/tools/namespace.py
@@ -0,0 +1,178 @@
+# Copyright 2016 Red Hat Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""
+Network namespace emulation
+"""
+
+import logging
+import os
+
+from tools import tasks
+
+_LOGGER = logging.getLogger(__name__)
+
+
+def add_ip_to_namespace_eth(port, name, ip_addr, cidr):
+ """
+ Assign port ip address in namespace
+ :param port: port to assign ip to
+ :param name: namespace where port resides
+ :param ip_addr: ip address in dot notation format
+ :param cidr: cidr as string
+ :return:
+ """
+ ip_string = '{}/{}'.format(ip_addr, cidr)
+ tasks.run_task(['sudo', 'ip', 'netns', 'exec', name,
+ 'ip', 'addr', 'add', ip_string, 'dev', port],
+ _LOGGER, 'Assigning ip to port {}...'.format(port), False)
+
+
+def assign_port_to_namespace(port, name, port_up=False):
+ """
+ Assign NIC port to namespace
+ :param port: port name as string
+ :param name: namespace name as string
+ :param port_up: Boolean if the port should be brought up on assignment
+ :return: None
+ """
+ tasks.run_task(['sudo', 'ip', 'link', 'set',
+ 'netns', name, 'dev', port],
+ _LOGGER, 'Assigning port {} to namespace {}...'.format(
+ port, name), False)
+ if port_up:
+ tasks.run_task(['sudo', 'ip', 'netns', 'exec', name,
+ 'ip', 'link', 'set', port, 'up'],
+ _LOGGER, 'Bringing up port {}...'.format(port), False)
+
+
+def create_namespace(name):
+ """
+ Create a linux namespace. Raises RuntimeError if namespace already exists
+ in the system.
+ :param name: name of the namespace to be created as string
+ :return: None
+ """
+ if name in get_system_namespace_list():
+ raise RuntimeError('Namespace already exists in system')
+
+ # touch some files in a tmp area so we can track them separately from
+ # the OS's internal namespace tracking. This allows us to track VSPerf
+ # created namespaces so they can be cleaned up if needed.
+ if not os.path.isdir('/tmp/namespaces'):
+ try:
+ os.mkdir('/tmp/namespaces')
+ except os.error:
+ # OK don't crash, but cleanup may be an issue...
+ _LOGGER.error('Unable to create namespace temp folder.')
+ _LOGGER.error(
+ 'Namespaces will not be removed on test case completion')
+ if os.path.isdir('/tmp/namespaces'):
+ with open('/tmp/namespaces/{}'.format(name), 'a'):
+ os.utime('/tmp/namespaces/{}'.format(name), None)
+
+ tasks.run_task(['sudo', 'ip', 'netns', 'add', name], _LOGGER,
+ 'Creating namespace {}...'.format(name), False)
+ tasks.run_task(['sudo', 'ip', 'netns', 'exec', name,
+ 'ip', 'link', 'set', 'lo', 'up'], _LOGGER,
+ 'Enabling loopback interface...', False)
+
+
+def delete_namespace(name):
+ """
+ Delete linux network namespace
+ :param name: namespace to delete
+ :return: None
+ """
+ # delete the file if it exists in the temp area
+ if os.path.exists('/tmp/namespaces/{}'.format(name)):
+ os.remove('/tmp/namespaces/{}'.format(name))
+ tasks.run_task(['sudo', 'ip', 'netns', 'delete', name], _LOGGER,
+ 'Deleting namespace {}...'.format(name), False)
+
+
+def get_system_namespace_list():
+ """
+ Return tuple of strings for namespaces on the system
+ :return: tuple of namespaces as string
+ """
+ return tuple(os.listdir('/var/run/netns'))
+
+
+def get_vsperf_namespace_list():
+ """
+ Return a tuple of strings for namespaces created by vsperf testcase
+ :return: tuple of namespaces as string
+ """
+ if os.path.isdir('/tmp/namespaces'):
+ return tuple(os.listdir('/tmp/namespaces'))
+ else:
+ return []
+
+
+def reset_port_to_root(port, name):
+ """
+ Return the assigned port to the root namespace
+ :param port: port to return as string
+ :param name: namespace the port currently resides
+ :return: None
+ """
+ tasks.run_task(['sudo', 'ip', 'netns', 'exec', name,
+ 'ip', 'link', 'set', port, 'netns', '1'],
+ _LOGGER, 'Assigning port {} to namespace {}...'.format(
+ port, name), False)
+
+
+# pylint: disable=unused-argument
+# pylint: disable=invalid-name
+def validate_add_ip_to_namespace_eth(result, port, name, ip_addr, cidr):
+ """
+ Validation function for integration testcases
+ """
+ ip_string = '{}/{}'.format(ip_addr, cidr)
+ return ip_string in ''.join(tasks.run_task(
+ ['sudo', 'ip', 'netns', 'exec', name, 'ip', 'addr', 'show', port],
+ _LOGGER, 'Validating ip address in namespace...', False))
+
+
+def validate_assign_port_to_namespace(result, port, name, port_up=False):
+ """
+ Validation function for integration testcases
+ """
+ # this could be improved...its not 100% accurate
+ return port in ''.join(tasks.run_task(
+ ['sudo', 'ip', 'netns', 'exec', name, 'ip', 'addr'],
+ _LOGGER, 'Validating port in namespace...'))
+
+
+def validate_create_namespace(result, name):
+ """
+ Validation function for integration testcases
+ """
+ return name in get_system_namespace_list()
+
+
+def validate_delete_namespace(result, name):
+ """
+ Validation function for integration testcases
+ """
+ return name not in get_system_namespace_list()
+
+
+def validate_reset_port_to_root(result, port, name):
+ """
+ Validation function for integration testcases
+ """
+ return not validate_assign_port_to_namespace(result, port, name)
diff --git a/tools/networkcard.py b/tools/networkcard.py
new file mode 100644
index 00000000..c31be691
--- /dev/null
+++ b/tools/networkcard.py
@@ -0,0 +1,266 @@
+# Copyright 2016 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tools for network card manipulation
+"""
+
+import os
+import subprocess
+import logging
+import glob
+from conf import settings
+
+_LOGGER = logging.getLogger('tools.networkcard')
+
+_PCI_DIR = '/sys/bus/pci/devices/{}/'
+_SRIOV_NUMVFS = os.path.join(_PCI_DIR, 'sriov_numvfs')
+_SRIOV_TOTALVFS = os.path.join(_PCI_DIR, 'sriov_totalvfs')
+_SRIOV_VF_PREFIX = 'virtfn'
+_SRIOV_PF = 'physfn'
+_PCI_NET = 'net'
+_PCI_DRIVER = 'driver'
+
+
+def check_pci(pci_handle):
+ """ Checks if given extended PCI handle has correct length and fixes
+ it if possible.
+
+ :param pci_handle: PCI slot identifier. It can contain vsperf specific
+ suffix after '|' with VF indication. e.g. '0000:05:00.0|vf1'
+
+ :returns: PCI handle
+ """
+ pci = pci_handle.split('|')
+ pci_len = len(pci[0])
+ if pci_len == 12:
+ return pci_handle
+ elif pci_len == 7:
+ pci[0] = '0000:' + pci[0][-7:]
+ _LOGGER.debug('Adding domain part to PCI slot %s', pci[0])
+ return '|'.join(pci)
+ elif pci_len > 12:
+ pci[0] = pci[0][-12:]
+ _LOGGER.warning('PCI slot is too long, it will be shortened to %s', pci[0])
+ return '|'.join(pci)
+ else:
+ # pci_handle has a strange length, but let us try to use it
+ _LOGGER.error('Unknown format of PCI slot %s', pci_handle)
+ return pci_handle
+
+def is_sriov_supported(pci_handle):
+ """ Checks if sriov is supported by given NIC
+
+ :param pci_handle: PCI slot identifier with domain part.
+
+ :returns: True on success, False otherwise
+ """
+ return os.path.isfile(_SRIOV_TOTALVFS.format(pci_handle))
+
+def is_sriov_nic(pci_handle):
+ """ Checks if given extended PCI ID refers to the VF
+
+ :param pci_handle: PCI slot identifier with domain part. It can contain
+ vsperf specific suffix after '|' with VF indication.
+ e.g. '0000:05:00.0|vf1'
+
+ :returns: True on success, False otherwise
+ """
+ for item in pci_handle.split('|'):
+ if item.lower().startswith('vf'):
+ return True
+ return False
+
+def set_sriov_numvfs(pci_handle, numvfs):
+ """ Checks if sriov is supported and configures given number of VFs
+
+ :param pci_handle: PCI slot identifier with domain part.
+ :param numvfs: Number of VFs to be configured at given NIC.
+
+ :returns: True on success, False otherwise
+ """
+ if not is_sriov_supported(pci_handle):
+ return False
+
+ if get_sriov_numvfs(pci_handle) == numvfs:
+ return True
+
+ if numvfs and get_sriov_numvfs(pci_handle) != 0:
+ if not set_sriov_numvfs(pci_handle, 0):
+ return False
+
+ try:
+ subprocess.call('sudo bash -c "echo {} > {}"'.format(numvfs, _SRIOV_NUMVFS.format(pci_handle)), shell=True)
+ return get_sriov_numvfs(pci_handle) == numvfs
+ except OSError:
+ _LOGGER.debug('Number of VFs cant be changed to %s for PF %s', numvfs, pci_handle)
+ return False
+
+def get_sriov_numvfs(pci_handle):
+ """ Returns the number of configured VFs
+
+ :param pci_handle: PCI slot identifier with domain part
+ :returns: the number of configured VFs
+ """
+ if is_sriov_supported(pci_handle):
+ with open(_SRIOV_NUMVFS.format(pci_handle), 'r') as numvfs:
+ return int(numvfs.readline().rstrip('\n'))
+
+ return None
+
+def get_sriov_totalvfs(pci_handle):
+ """ Checks if sriov is supported and returns max number of supported VFs
+
+ :param pci_handle: PCI slot identifier with domain part
+ :returns: the max number of supported VFs by given NIC
+ """
+ if is_sriov_supported(pci_handle):
+ with open(_SRIOV_TOTALVFS.format(pci_handle), 'r') as total:
+ return int(total.readline().rstrip('\n'))
+
+ return None
+
+def get_sriov_vfs_list(pf_pci_handle):
+ """ Returns list of PCI handles of VFs configured at given NIC/PF
+
+ :param pf_pci_handle: PCI slot identifier of PF with domain part.
+ :returns: list
+ """
+ vfs = []
+ if is_sriov_supported(pf_pci_handle):
+ for vf_name in glob.glob(os.path.join(_PCI_DIR, _SRIOV_VF_PREFIX + '*').format(pf_pci_handle)):
+ vfs.append(os.path.basename(os.path.realpath(vf_name)))
+
+ return vfs
+
+def get_sriov_pf(vf_pci_handle):
+ """ Get PCI handle of PF which belongs to given VF
+
+ :param vf_pci_handle: PCI slot identifier of VF with domain part.
+ :returns: PCI handle of parent PF
+ """
+ pf_path = os.path.join(_PCI_DIR, _SRIOV_PF).format(vf_pci_handle)
+ if os.path.isdir(pf_path):
+ return os.path.basename(os.path.realpath(pf_path))
+
+ return None
+
+def get_driver(pci_handle):
+ """ Returns name of kernel driver assigned to given NIC
+
+ :param pci_handle: PCI slot identifier with domain part.
+ :returns: string with assigned kernel driver, None otherwise
+ """
+ driver_path = os.path.join(_PCI_DIR, _PCI_DRIVER).format(pci_handle)
+ if os.path.isdir(driver_path):
+ return os.path.basename(os.path.realpath(driver_path))
+
+ return None
+
+def get_device_name(pci_handle):
+ """ Returns name of network card device name
+
+ :param pci_handle: PCI slot identifier with domain part.
+ :returns: string with assigned NIC device name, None otherwise
+ """
+ net_path = os.path.join(_PCI_DIR, _PCI_NET).format(pci_handle)
+ try:
+ return os.listdir(net_path)[0]
+ except FileNotFoundError:
+ return None
+ except IndexError:
+ return None
+
+ return None
+
+def get_mac(pci_handle):
+ """ Returns MAC address of given NIC
+
+ :param pci_handle: PCI slot identifier with domain part.
+ :returns: string with assigned MAC address, None otherwise
+ """
+ mac_path = glob.glob(os.path.join(_PCI_DIR, _PCI_NET, '*', 'address').format(pci_handle))
+ # kernel driver is loaded and MAC can be read
+ if len(mac_path) and os.path.isfile(mac_path[0]):
+ with open(mac_path[0], 'r') as _file:
+ return _file.readline().rstrip('\n')
+
+ # MAC address is unknown, e.g. NIC is assigned to DPDK
+ return None
+
+def get_nic_info(full_pci_handle):
+ """ Parse given pci handle with additional info and returns
+ requested NIC info.
+
+ :param full_pci_handle: A string with extended network card PCI ID.
+ extended PCI ID syntax: PCI_ID[|vfx][|(mac|dev)]
+ examples:
+ 0000:06:00.0 - returns the same value
+ 0000:06:00.0|vf0 - returns PCI ID of 1st virtual function of given NIC
+ 0000:06:00.0|mac - returns MAC address of given NIC
+ 0000:06:00.0|vf0|mac - returns MAC address of 1st virtual function of given NIC
+
+ :returns: A string with requested NIC data or None if data cannot be read.
+ """
+ parsed_handle = full_pci_handle.split('|')
+ if len(parsed_handle) not in (1, 2, 3):
+ _LOGGER.error("Invalid PCI device name: '%s'", full_pci_handle)
+ return None
+
+ pci_handle = parsed_handle[0]
+
+ for action in parsed_handle[1:]:
+ # in case of SRIOV get PCI handle of given virtual function
+ if action.lower().startswith('vf'):
+ try:
+ vf_num = int(action[2:])
+ pci_handle = get_sriov_vfs_list(pci_handle)[vf_num]
+ except ValueError:
+ _LOGGER.error("Pci device '%s', does not have VF with index '%s'", pci_handle, action[2:])
+ return None
+ except IndexError:
+ _LOGGER.error("Pci device '%s', does not have VF with index '%s'", pci_handle, vf_num)
+ return None
+ continue
+
+ # return requested info for given PCI handle
+ if action.lower() == 'mac':
+ return get_mac(pci_handle)
+ elif action.lower() == 'dev':
+ return get_device_name(pci_handle)
+ else:
+ _LOGGER.error("Invalid item '%s' in PCI handle '%s'", action, full_pci_handle)
+ return None
+
+ return pci_handle
+
+def reinit_vfs(pf_pci_handle):
+ """ Reinitializates all VFs, which belong to given PF
+
+ :param pf_pci_handle: PCI slot identifier of PF with domain part.
+ """
+ rte_pci_tool = os.path.join(settings.getValue('RTE_SDK'), 'tools', 'dpdk_nic_bind.py')
+
+ for vf_nic in get_sriov_vfs_list(pf_pci_handle):
+ nic_driver = get_driver(vf_nic)
+ if nic_driver:
+ try:
+ subprocess.call(['sudo', rte_pci_tool, '--unbind', vf_nic],
+ stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
+ subprocess.call(['sudo', rte_pci_tool, '--bind=' + nic_driver, vf_nic],
+ stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
+ except subprocess.CalledProcessError:
+ _LOGGER.warning('Error during reinitialization of VF %s', vf_nic)
+ else:
+ _LOGGER.warning("Can't detect driver for VF %s", vf_nic)
+
diff --git a/tools/pkt_fwd/testpmd.py b/tools/pkt_fwd/testpmd.py
index d8ed8905..e1b987bc 100644
--- a/tools/pkt_fwd/testpmd.py
+++ b/tools/pkt_fwd/testpmd.py
@@ -42,7 +42,7 @@ class TestPMD(IPktFwd):
vswitchd_args += _VSWITCHD_CONST_ARGS
vswitchd_args += settings.getValue('TESTPMD_ARGS')
- self._nports = len(settings.getValue('WHITELIST_NICS'))
+ self._nports = len(settings.getValue('NICS'))
self._fwdmode = settings.getValue('TESTPMD_FWD_MODE')
self._csum_layer = settings.getValue('TESTPMD_CSUM_LAYER')
self._csum_calc = settings.getValue('TESTPMD_CSUM_CALC')
diff --git a/tools/pkt_gen/moongen/__init__.py b/tools/pkt_gen/moongen/__init__.py
new file mode 100644
index 00000000..562eb088
--- /dev/null
+++ b/tools/pkt_gen/moongen/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2016 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/tools/pkt_gen/moongen/moongen.py b/tools/pkt_gen/moongen/moongen.py
new file mode 100644
index 00000000..d6c09e5d
--- /dev/null
+++ b/tools/pkt_gen/moongen/moongen.py
@@ -0,0 +1,753 @@
+# Copyright 2016 Red Hat Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Contributors:
+# Bill Michalowski, Red Hat Inc.
+# Andrew Theurer, Red Hat Inc.
+"""
+Moongen Traffic Generator Model
+"""
+
+# python imports
+import logging
+from collections import OrderedDict
+import subprocess
+import re
+
+# VSPerf imports
+from conf import settings
+from core.results.results_constants import ResultsConstants
+from tools.pkt_gen.trafficgen.trafficgenhelper import (
+ TRAFFIC_DEFAULTS,
+ merge_spec)
+from tools.pkt_gen.trafficgen.trafficgen import ITrafficGenerator
+
+class Moongen(ITrafficGenerator):
+ """Moongen Traffic generator wrapper."""
+ _traffic_defaults = TRAFFIC_DEFAULTS.copy()
+ _logger = logging.getLogger(__name__)
+
+ def __init__(self):
+ """Moongen class constructor."""
+ self._logger.info("In moongen __init__ method")
+ self._params = {}
+ self._moongen_host_ip_addr = (
+ settings.getValue('TRAFFICGEN_MOONGEN_HOST_IP_ADDR'))
+ self._moongen_base_dir = (
+ settings.getValue('TRAFFICGEN_MOONGEN_BASE_DIR'))
+ self._moongen_user = settings.getValue('TRAFFICGEN_MOONGEN_USER')
+ self._moongen_ports = settings.getValue('TRAFFICGEN_MOONGEN_PORTS')
+
+ @property
+ def traffic_defaults(self):
+ """Default traffic values.
+
+ These can be expected to be constant across traffic generators,
+ so no setter is provided. Changes to the structure or contents
+ will likely break traffic generator implementations or tests
+ respectively.
+ """
+ self._logger.info("In moongen traffic_defaults method")
+ return self._traffic_defaults
+
+ def create_moongen_cfg_file(self, traffic, duration=60,
+ acceptable_loss_pct=1, one_shot=0):
+ """Create the MoonGen configuration file from VSPERF's traffic profile
+ :param traffic: Detailed "traffic" spec, i.e. IP address, VLAN tags
+ :param duration: The length of time to generate packet throughput
+ :param acceptable_loss: Maximum packet loss acceptable
+ :param one_shot: No RFC 2544 binary search,
+ just packet flow at traffic specifics
+ """
+ logging.debug("traffic['frame_rate'] = " + \
+ str(traffic['frame_rate']))
+
+ logging.debug("traffic['multistream'] = " + \
+ str(traffic['multistream']))
+
+ logging.debug("traffic['stream_type'] = " + \
+ str(traffic['stream_type']))
+
+ logging.debug("traffic['l2']['srcmac'] = " + \
+ str(traffic['l2']['srcmac']))
+
+ logging.debug("traffic['l2']['dstmac'] = " + \
+ str(traffic['l2']['dstmac']))
+
+ logging.debug("traffic['l3']['proto'] = " + \
+ str(traffic['l3']['proto']))
+
+ logging.debug("traffic['l3']['srcip'] = " + \
+ str(traffic['l3']['srcip']))
+
+ logging.debug("traffic['l3']['dstip'] = " + \
+ str(traffic['l3']['dstip']))
+
+ logging.debug("traffic['l4']['srcport'] = " + \
+ str(traffic['l4']['srcport']))
+
+ logging.debug("traffic['l4']['dstport'] = " + \
+ str(traffic['l4']['dstport']))
+
+ logging.debug("traffic['vlan']['enabled'] = " + \
+ str(traffic['vlan']['enabled']))
+
+ logging.debug("traffic['vlan']['id'] = " + \
+ str(traffic['vlan']['id']))
+
+ logging.debug("traffic['vlan']['priority'] = " + \
+ str(traffic['vlan']['priority']))
+
+ logging.debug("traffic['vlan']['cfi'] = " + \
+ str(traffic['vlan']['cfi']))
+
+ logging.debug(traffic['l2']['framesize'])
+
+ out_file = open("opnfv-vsperf-cfg.lua", "wt")
+
+ out_file.write("VSPERF {\n")
+
+ out_file.write("testType = \"throughput\",\n")
+
+ out_file.write("runBidirec = " + \
+ traffic['bidir'].lower() + ",\n")
+
+ out_file.write("frameSize = " + \
+ str(traffic['l2']['framesize']) + ",\n")
+
+ out_file.write("srcMac = \"" + \
+ str(traffic['l2']['srcmac']) + "\",\n")
+
+ out_file.write("dstMac = \"" + \
+ str(traffic['l2']['dstmac']) + "\",\n")
+
+ out_file.write("srcIp = \"" + \
+ str(traffic['l3']['srcip']) + "\",\n")
+
+ out_file.write("dstIp = \"" + \
+ str(traffic['l3']['dstip']) + "\",\n")
+
+ out_file.write("vlanId = " + \
+ str(traffic['vlan']['id']) + ",\n")
+
+ out_file.write("searchRunTime = " + \
+ str(duration) + ",\n")
+
+ out_file.write("validationRunTime = " + \
+ str(duration) + ",\n")
+
+ out_file.write("acceptableLossPct = " + \
+ str(acceptable_loss_pct) + ",\n")
+
+ out_file.write("ports = " +\
+ str(self._moongen_ports) + ",\n")
+
+ if one_shot:
+ out_file.write("oneShot = true,\n")
+
+ # Assume 10G line rates at the moment. Need to convert VSPERF
+ # frame_rate (percentage of line rate) to Mpps for MoonGen
+
+ out_file.write("startRate = " + str((traffic['frame_rate'] / 100) * 14.88) + "\n")
+ out_file.write("}" + "\n")
+ out_file.close()
+
+ copy_moongen_cfg = "scp opnfv-vsperf-cfg.lua " + \
+ self._moongen_user + "@" + \
+ self._moongen_host_ip_addr + ":" + \
+ self._moongen_base_dir + \
+ "/. && rm opnfv-vsperf-cfg.lua"
+
+ find_moongen = subprocess.Popen(copy_moongen_cfg,
+ shell=True,
+ stderr=subprocess.PIPE)
+
+ output, error = find_moongen.communicate()
+
+ if error:
+ logging.error(output)
+ logging.error(error)
+ raise RuntimeError('MOONGEN: Error copying configuration file')
+
+ def connect(self):
+ """Connect to MoonGen traffic generator
+
+ Verify that MoonGen is on the system indicated by
+ the configuration file
+ """
+ self._logger.info("MOONGEN: In MoonGen connect method...")
+
+ if self._moongen_host_ip_addr:
+ cmd_ping = "ping -c1 " + self._moongen_host_ip_addr
+ else:
+ raise RuntimeError('MOONGEN: MoonGen host not defined')
+
+ ping = subprocess.Popen(cmd_ping, shell=True, stderr=subprocess.PIPE)
+ output, error = ping.communicate()
+
+ if ping.returncode:
+ self._logger.error(error)
+ self._logger.error(output)
+ raise RuntimeError('MOONGEN: Cannot ping MoonGen host at ' + \
+ self._moongen_host_ip_addr)
+
+ connect_moongen = "ssh " + self._moongen_user + \
+ "@" + self._moongen_host_ip_addr
+
+ cmd_find_moongen = connect_moongen + " ls " + \
+ self._moongen_base_dir + "/examples/opnfv-vsperf.lua"
+
+ find_moongen = subprocess.Popen(cmd_find_moongen,
+ shell=True,
+ stderr=subprocess.PIPE)
+
+ output, error = find_moongen.communicate()
+
+ if find_moongen.returncode:
+ self._logger.error(error)
+ self._logger.error(output)
+ raise RuntimeError(
+ 'MOONGEN: Cannot locate MoonGen program at %s within %s' \
+ % (self._moongen_host_ip_addr, self._moongen_base_dir))
+
+ self._logger.info("MOONGEN: MoonGen host successfully found...")
+
+ def disconnect(self):
+ """Disconnect from the traffic generator.
+
+ As with :func:`connect`, this function is optional.
+
+ Where implemented, this function should raise an exception on
+ failure.
+
+ :returns: None
+ """
+ self._logger.info("MOONGEN: In moongen disconnect method")
+
+ def send_burst_traffic(self, traffic=None, numpkts=100, duration=20):
+ """Send a burst of traffic.
+
+ Send a ``numpkts`` packets of traffic, using ``traffic``
+ configuration, with a timeout of ``time``.
+
+ :param traffic: Detailed "traffic" spec, i.e. IP address, VLAN tags
+ :param numpkts: Number of packets to send
+ :param duration: Time to wait to receive packets
+
+ :returns: dictionary of strings with following data:
+ - List of Tx Frames,
+ - List of Rx Frames,
+ - List of Tx Bytes,
+ - List of List of Rx Bytes,
+ - Payload Errors and Sequence Errors.
+ """
+ self._logger.info("In moongen send_burst_traffic method")
+ return NotImplementedError('Moongen Burst traffic not implemented')
+
+ def send_cont_traffic(self, traffic=None, duration=20):
+ """Send a continuous flow of traffic
+
+ Send packets at ``frame rate``, using ``traffic`` configuration,
+ until timeout ``time`` occurs.
+
+ :param traffic: Detailed "traffic" spec, i.e. IP address, VLAN tags
+ :param duration: Time to wait to receive packets (secs)
+ :returns: dictionary of strings with following data:
+ - Tx Throughput (fps),
+ - Rx Throughput (fps),
+ - Tx Throughput (mbps),
+ - Rx Throughput (mbps),
+ - Tx Throughput (% linerate),
+ - Rx Throughput (% linerate),
+ - Min Latency (ns),
+ - Max Latency (ns),
+ - Avg Latency (ns)
+ """
+ self._logger.info("In moongen send_cont_traffic method")
+
+ self._params.clear()
+ self._params['traffic'] = self.traffic_defaults.copy()
+
+ if traffic:
+ self._params['traffic'] = merge_spec(self._params['traffic'],
+ traffic)
+
+ Moongen.create_moongen_cfg_file(self,
+ traffic,
+ duration=duration,
+ acceptable_loss_pct=100.0,
+ one_shot=1)
+
+ collected_results = Moongen.run_moongen_and_collect_results(self,
+ test_run=1)
+
+ total_throughput_rx_fps = (
+ float(collected_results[ResultsConstants.THROUGHPUT_RX_FPS]))
+
+ total_throughput_rx_mbps = (
+ float(collected_results[ResultsConstants.THROUGHPUT_RX_MBPS]))
+
+ total_throughput_rx_pct = (
+ float(collected_results[ResultsConstants.THROUGHPUT_RX_PERCENT]))
+
+ total_throughput_tx_fps = (
+ float(collected_results[ResultsConstants.TX_RATE_FPS]))
+
+ total_throughput_tx_mbps = (
+ float(collected_results[ResultsConstants.TX_RATE_MBPS]))
+
+ total_throughput_tx_pct = (
+ float(collected_results[ResultsConstants.TX_RATE_PERCENT]))
+
+ total_min_latency_ns = 0
+ total_max_latency_ns = 0
+ total_avg_latency_ns = 0
+
+ results = OrderedDict()
+ results[ResultsConstants.THROUGHPUT_RX_FPS] = (
+ '{:,.6f}'.format(total_throughput_rx_fps))
+
+ results[ResultsConstants.THROUGHPUT_RX_MBPS] = (
+ '{:,.3f}'.format(total_throughput_rx_mbps))
+
+ results[ResultsConstants.THROUGHPUT_RX_PERCENT] = (
+ '{:,.3f}'.format(total_throughput_rx_pct))
+
+ results[ResultsConstants.TX_RATE_FPS] = (
+ '{:,.6f}'.format(total_throughput_tx_fps))
+
+ results[ResultsConstants.TX_RATE_MBPS] = (
+ '{:,.3f}'.format(total_throughput_tx_mbps))
+
+ results[ResultsConstants.TX_RATE_PERCENT] = (
+ '{:,.3f}'.format(total_throughput_tx_pct))
+
+ results[ResultsConstants.MIN_LATENCY_NS] = (
+ '{:,.3f}'.format(total_min_latency_ns))
+
+ results[ResultsConstants.MAX_LATENCY_NS] = (
+ '{:,.3f}'.format(total_max_latency_ns))
+
+ results[ResultsConstants.AVG_LATENCY_NS] = (
+ '{:,.3f}'.format(total_avg_latency_ns))
+
+ return results
+
+ def start_cont_traffic(self, traffic=None, duration=20):
+ """ Non-blocking version of 'send_cont_traffic'.
+
+ Start transmission and immediately return. Do not wait for
+ results.
+ :param traffic: Detailed "traffic" spec, i.e. IP address, VLAN tags
+ :param duration: Time to wait to receive packets (secs)
+ """
+ self._logger.info("In moongen start_cont_traffic method")
+ return NotImplementedError('Moongen continuous traffic not implemented')
+
+ def stop_cont_traffic(self):
+ # Stop continuous transmission and return results.
+ self._logger.info("In moongen stop_cont_traffic method")
+
+ def run_moongen_and_collect_results(self, test_run=1):
+ """Execute MoonGen and transform results into VSPERF format
+ :param test_run: The number of tests to run
+ """
+ # Start MoonGen and create logfile of the run
+ connect_moongen = "ssh " + self._moongen_user + "@" + \
+ self._moongen_host_ip_addr
+
+ cmd_moongen = " 'cd " + self._moongen_base_dir + \
+ "; ./build/MoonGen examples/opnfv-vsperf.lua | tee moongen_log.txt'"
+
+ cmd_start_moongen = connect_moongen + cmd_moongen
+
+ start_moongen = subprocess.Popen(cmd_start_moongen,
+ shell=True, stderr=subprocess.PIPE)
+
+ output, error = start_moongen.communicate()
+
+ if start_moongen.returncode:
+ logging.debug(error)
+ logging.debug(output)
+ raise RuntimeError(
+ 'MOONGEN: Error starting MoonGen program at %s within %s' \
+ % (self._moongen_host_ip_addr, self._moongen_base_dir))
+
+ cmd_moongen = "mkdir -p /tmp/moongen/" + str(test_run)
+
+ moongen_create_log_dir = subprocess.Popen(cmd_moongen,
+ shell=True,
+ stderr=subprocess.PIPE)
+
+ output, error = moongen_create_log_dir.communicate()
+
+ if moongen_create_log_dir.returncode:
+ logging.debug(error)
+ logging.debug(output)
+ raise RuntimeError(
+ 'MOONGEN: Error obtaining MoonGen log from %s within %s' \
+ % (self._moongen_host_ip_addr, self._moongen_base_dir))
+
+ cmd_moongen = " scp " + self._moongen_user + "@" + \
+ self._moongen_host_ip_addr + ":" + \
+ self._moongen_base_dir + "/moongen_log.txt /tmp/moongen/" + \
+ str(test_run) + "/moongen-run.log"
+
+ copy_moongen_log = subprocess.Popen(cmd_moongen,
+ shell=True,
+ stderr=subprocess.PIPE)
+
+ output, error = copy_moongen_log.communicate()
+
+ if copy_moongen_log.returncode:
+ logging.debug(error)
+ logging.debug(output)
+ raise RuntimeError(
+ 'MOONGEN: Error obtaining MoonGen log from %s within %s' \
+ % (self._moongen_host_ip_addr, self._moongen_base_dir))
+
+ log_file = "/tmp/moongen/" + str(test_run) + "/moongen-run.log"
+
+ with open(log_file, 'r') as logfile_handle:
+ mytext = logfile_handle.read()
+
+ # REPORT results line
+ # match.group(1) = Tx frames
+ # match.group(2) = Rx frames
+ # match.group(3) = Frame loss (count)
+ # match.group(4) = Frame loss (percentage)
+ # match.group(5) = Tx Mpps
+ # match.group(6) = Rx Mpps
+ search_pattern = re.compile(
+ r'\[REPORT\]\s+total\:\s+'
+ r'Tx\s+frames\:\s+(\d+)\s+'
+ r'Rx\s+Frames\:\s+(\d+)\s+'
+ r'frame\s+loss\:\s+(\d+)\,'
+ r'\s+(\d+\.\d+|\d+)%\s+'
+ r'Tx\s+Mpps\:\s+(\d+.\d+|\d+)\s+'
+ r'Rx\s+Mpps\:\s+(\d+\.\d+|\d+)',
+ re.IGNORECASE)
+
+ results_match = search_pattern.search(mytext)
+
+ if not results_match:
+ logging.error('There was a problem parsing ' +\
+ 'MoonGen REPORT section of MoonGen log file')
+
+ moongen_results = OrderedDict()
+ moongen_results[ResultsConstants.THROUGHPUT_RX_FPS] = 0
+ moongen_results[ResultsConstants.THROUGHPUT_RX_MBPS] = 0
+ moongen_results[ResultsConstants.THROUGHPUT_RX_PERCENT] = 0
+ moongen_results[ResultsConstants.TX_RATE_FPS] = 0
+ moongen_results[ResultsConstants.TX_RATE_MBPS] = 0
+ moongen_results[ResultsConstants.TX_RATE_PERCENT] = 0
+ moongen_results[ResultsConstants.B2B_TX_COUNT] = 0
+ moongen_results[ResultsConstants.B2B_FRAMES] = 0
+ moongen_results[ResultsConstants.B2B_FRAME_LOSS_FRAMES] = 0
+ moongen_results[ResultsConstants.B2B_FRAME_LOSS_PERCENT] = 0
+
+ # find PARAMETERS line
+ # parameters_match.group(1) = Frame size
+
+ search_pattern = re.compile(
+ r'\[PARAMETERS\]\s+.*frameSize\:\s+(\d+)',
+ flags=re.IGNORECASE)
+ parameters_match = search_pattern.search(mytext)
+
+ if parameters_match:
+ frame_size = int(parameters_match.group(1))
+ else:
+ logging.error('There was a problem parsing MoonGen ' +\
+ 'PARAMETERS section of MoonGen log file')
+ frame_size = 0
+
+ if results_match and parameters_match:
+ # Assume for now 10G link speed
+ max_theoretical_mfps = (
+ (10000000000 / 8) / (frame_size + 20))
+
+ moongen_results[ResultsConstants.THROUGHPUT_RX_FPS] = (
+ float(results_match.group(6)) * 1000000)
+
+ moongen_results[ResultsConstants.THROUGHPUT_RX_MBPS] = (
+ (float(results_match.group(6)) * frame_size + 20) * 8)
+
+ moongen_results[ResultsConstants.THROUGHPUT_RX_PERCENT] = (
+ float(results_match.group(6)) * \
+ 1000000 / max_theoretical_mfps * 100)
+
+ moongen_results[ResultsConstants.TX_RATE_FPS] = (
+ float(results_match.group(5)) * 1000000)
+
+ moongen_results[ResultsConstants.TX_RATE_MBPS] = (
+ float(results_match.group(5)) * (frame_size + 20) * 8)
+
+ moongen_results[ResultsConstants.TX_RATE_PERCENT] = (
+ float(results_match.group(5)) *
+ 1000000 / max_theoretical_mfps * 100)
+
+ moongen_results[ResultsConstants.B2B_TX_COUNT] = (
+ float(results_match.group(1)))
+
+ moongen_results[ResultsConstants.B2B_FRAMES] = (
+ float(results_match.group(2)))
+
+ moongen_results[ResultsConstants.B2B_FRAME_LOSS_FRAMES] = (
+ float(results_match.group(3)))
+
+ moongen_results[ResultsConstants.B2B_FRAME_LOSS_PERCENT] = (
+ float(results_match.group(4)))
+
+ return moongen_results
+
+ def send_rfc2544_throughput(self, traffic=None, duration=20,
+ lossrate=0.0, trials=1):
+ #
+ # Send traffic per RFC2544 throughput test specifications.
+ #
+ # Send packets at a variable rate, using ``traffic``
+ # configuration, until minimum rate at which no packet loss is
+ # detected is found.
+ #
+ # :param traffic: Detailed "traffic" spec, see design docs for details
+ # :param trials: Number of trials to execute
+ # :param duration: Per iteration duration
+ # :param lossrate: Acceptable lossrate percentage
+ # :returns: dictionary of strings with following data:
+ # - Tx Throughput (fps),
+ # - Rx Throughput (fps),
+ # - Tx Throughput (mbps),
+ # - Rx Throughput (mbps),
+ # - Tx Throughput (% linerate),
+ # - Rx Throughput (% linerate),
+ # - Min Latency (ns),
+ # - Max Latency (ns),
+ # - Avg Latency (ns)
+ #
+ self._logger.info("In moongen send_rfc2544_throughput method")
+ self._params.clear()
+ self._params['traffic'] = self.traffic_defaults.copy()
+
+ if traffic:
+ self._params['traffic'] = merge_spec(self._params['traffic'],
+ traffic)
+ Moongen.create_moongen_cfg_file(self,
+ traffic,
+ duration=duration,
+ acceptable_loss_pct=lossrate)
+
+ total_throughput_rx_fps = 0
+ total_throughput_rx_mbps = 0
+ total_throughput_rx_pct = 0
+ total_throughput_tx_fps = 0
+ total_throughput_tx_mbps = 0
+ total_throughput_tx_pct = 0
+ total_min_latency_ns = 0
+ total_max_latency_ns = 0
+ total_avg_latency_ns = 0
+
+ for test_run in range(1, trials+1):
+ collected_results = (
+ Moongen.run_moongen_and_collect_results(self, test_run=test_run))
+
+ total_throughput_rx_fps += (
+ float(collected_results[ResultsConstants.THROUGHPUT_RX_FPS]))
+
+ total_throughput_rx_mbps += (
+ float(collected_results[ResultsConstants.THROUGHPUT_RX_MBPS]))
+
+ total_throughput_rx_pct += (
+ float(collected_results[ResultsConstants.THROUGHPUT_RX_PERCENT]))
+
+ total_throughput_tx_fps += (
+ float(collected_results[ResultsConstants.TX_RATE_FPS]))
+
+ total_throughput_tx_mbps += (
+ float(collected_results[ResultsConstants.TX_RATE_MBPS]))
+
+ total_throughput_tx_pct += (
+ float(collected_results[ResultsConstants.TX_RATE_PERCENT]))
+
+ # Latency not supported now, leaving as placeholder
+ total_min_latency_ns = 0
+ total_max_latency_ns = 0
+ total_avg_latency_ns = 0
+
+ results = OrderedDict()
+ results[ResultsConstants.THROUGHPUT_RX_FPS] = (
+ '{:,.6f}'.format(total_throughput_rx_fps / trials))
+
+ results[ResultsConstants.THROUGHPUT_RX_MBPS] = (
+ '{:,.3f}'.format(total_throughput_rx_mbps / trials))
+
+ results[ResultsConstants.THROUGHPUT_RX_PERCENT] = (
+ '{:,.3f}'.format(total_throughput_rx_pct / trials))
+
+ results[ResultsConstants.TX_RATE_FPS] = (
+ '{:,.6f}'.format(total_throughput_tx_fps / trials))
+
+ results[ResultsConstants.TX_RATE_MBPS] = (
+ '{:,.3f}'.format(total_throughput_tx_mbps / trials))
+
+ results[ResultsConstants.TX_RATE_PERCENT] = (
+ '{:,.3f}'.format(total_throughput_tx_pct / trials))
+
+ results[ResultsConstants.MIN_LATENCY_NS] = (
+ '{:,.3f}'.format(total_min_latency_ns / trials))
+
+ results[ResultsConstants.MAX_LATENCY_NS] = (
+ '{:,.3f}'.format(total_max_latency_ns / trials))
+
+ results[ResultsConstants.AVG_LATENCY_NS] = (
+ '{:,.3f}'.format(total_avg_latency_ns / trials))
+
+ return results
+
+ def start_rfc2544_throughput(self, traffic=None, trials=3, duration=20,
+ lossrate=0.0):
+ """Non-blocking version of 'send_rfc2544_throughput'.
+
+ Start transmission and immediately return. Do not wait for
+ results.
+ """
+ self._logger.info(
+ "MOONGEN: In moongen start_rfc2544_throughput method")
+
+ def wait_rfc2544_throughput(self):
+ """Wait for and return results of RFC2544 test.
+ """
+ self._logger.info('In moongen wait_rfc2544_throughput')
+
+ def send_rfc2544_back2back(self, traffic=None, duration=60,
+ lossrate=0.0, trials=1):
+ """Send traffic per RFC2544 back2back test specifications.
+
+ Send packets at a fixed rate, using ``traffic``
+ configuration, for duration seconds.
+
+ :param traffic: Detailed "traffic" spec, see design docs for details
+ :param trials: Number of trials to execute
+ :param duration: Per iteration duration
+ :param lossrate: Acceptable loss percentage
+
+ :returns: Named tuple of Rx Throughput (fps), Rx Throughput (mbps),
+ Tx Rate (% linerate), Rx Rate (% linerate), Tx Count (frames),
+ Back to Back Count (frames), Frame Loss (frames), Frame Loss (%)
+ :rtype: :class:`Back2BackResult`
+ """
+ self._params.clear()
+ self._params['traffic'] = self.traffic_defaults.copy()
+
+ if traffic:
+ self._params['traffic'] = merge_spec(self._params['traffic'],
+ traffic)
+
+ Moongen.create_moongen_cfg_file(self,
+ traffic,
+ duration=duration,
+ acceptable_loss_pct=lossrate)
+
+ results = OrderedDict()
+ results[ResultsConstants.B2B_RX_FPS] = 0
+ results[ResultsConstants.B2B_TX_FPS] = 0
+ results[ResultsConstants.B2B_RX_PERCENT] = 0
+ results[ResultsConstants.B2B_TX_PERCENT] = 0
+ results[ResultsConstants.B2B_TX_COUNT] = 0
+ results[ResultsConstants.B2B_FRAMES] = 0
+ results[ResultsConstants.B2B_FRAME_LOSS_FRAMES] = 0
+ results[ResultsConstants.B2B_FRAME_LOSS_PERCENT] = 0
+ results[ResultsConstants.SCAL_STREAM_COUNT] = 0
+ results[ResultsConstants.SCAL_STREAM_TYPE] = 0
+ results[ResultsConstants.SCAL_PRE_INSTALLED_FLOWS] = 0
+
+ for test_run in range(1, trials+1):
+ collected_results = (
+ Moongen.run_moongen_and_collect_results(self, test_run=test_run))
+
+ results[ResultsConstants.B2B_RX_FPS] += (
+ float(collected_results[ResultsConstants.THROUGHPUT_RX_FPS]))
+
+ results[ResultsConstants.B2B_RX_PERCENT] += (
+ float(collected_results[ResultsConstants.THROUGHPUT_RX_PERCENT]))
+
+ results[ResultsConstants.B2B_TX_FPS] += (
+ float(collected_results[ResultsConstants.TX_RATE_FPS]))
+
+ results[ResultsConstants.B2B_TX_PERCENT] += (
+ float(collected_results[ResultsConstants.TX_RATE_PERCENT]))
+
+ results[ResultsConstants.B2B_TX_COUNT] += (
+ int(collected_results[ResultsConstants.B2B_TX_COUNT]))
+
+ results[ResultsConstants.B2B_FRAMES] += (
+ int(collected_results[ResultsConstants.B2B_FRAMES]))
+
+ results[ResultsConstants.B2B_FRAME_LOSS_FRAMES] += (
+ int(collected_results[ResultsConstants.B2B_FRAME_LOSS_FRAMES]))
+
+ results[ResultsConstants.B2B_FRAME_LOSS_PERCENT] += (
+ int(collected_results[ResultsConstants.B2B_FRAME_LOSS_PERCENT]))
+
+ # Calculate average results
+ results[ResultsConstants.B2B_RX_FPS] = (
+ results[ResultsConstants.B2B_RX_FPS] / trials)
+
+ results[ResultsConstants.B2B_RX_PERCENT] = (
+ results[ResultsConstants.B2B_RX_PERCENT] / trials)
+
+ results[ResultsConstants.B2B_TX_FPS] = (
+ results[ResultsConstants.B2B_TX_FPS] / trials)
+
+ results[ResultsConstants.B2B_TX_PERCENT] = (
+ results[ResultsConstants.B2B_TX_PERCENT] / trials)
+
+ results[ResultsConstants.B2B_TX_COUNT] = (
+ results[ResultsConstants.B2B_TX_COUNT] / trials)
+
+ results[ResultsConstants.B2B_FRAMES] = (
+ results[ResultsConstants.B2B_FRAMES] / trials)
+
+ results[ResultsConstants.B2B_FRAME_LOSS_FRAMES] = (
+ results[ResultsConstants.B2B_FRAME_LOSS_FRAMES] / trials)
+
+ results[ResultsConstants.B2B_FRAME_LOSS_PERCENT] = (
+ results[ResultsConstants.B2B_FRAME_LOSS_PERCENT] / trials)
+
+ results[ResultsConstants.SCAL_STREAM_COUNT] = 0
+ results[ResultsConstants.SCAL_STREAM_TYPE] = 0
+ results[ResultsConstants.SCAL_PRE_INSTALLED_FLOWS] = 0
+
+ return results
+
+ def start_rfc2544_back2back(self, traffic=None, trials=1, duration=20,
+ lossrate=0.0):
+ #
+ # Non-blocking version of 'send_rfc2544_back2back'.
+ #
+ # Start transmission and immediately return. Do not wait for results.
+ #
+ self._logger.info("In moongen start_rfc2544_back2back method")
+ return NotImplementedError(
+ 'Moongen start back2back traffic not implemented')
+
+ def wait_rfc2544_back2back(self):
+ self._logger.info("In moongen wait_rfc2544_back2back method")
+ #
+ # Wait and set results of RFC2544 test.
+ #
+ return NotImplementedError(
+ 'Moongen wait back2back traffic not implemented')
+
+if __name__ == "__main__":
+ pass
diff --git a/tools/pkt_gen/testcenter/testcenter-rfc2544-rest.py b/tools/pkt_gen/testcenter/testcenter-rfc2544-rest.py
new file mode 100644
index 00000000..91f7e27f
--- /dev/null
+++ b/tools/pkt_gen/testcenter/testcenter-rfc2544-rest.py
@@ -0,0 +1,570 @@
+# Copyright 2016 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+'''
+@author Spirent Communications
+
+This test automates the RFC2544 tests using the Spirent
+TestCenter REST APIs. This test supports Python 3.4
+
+'''
+import argparse
+import logging
+import os
+
+
+logger = logging.getLogger(__name__)
+
+
+def create_dir(path):
+ """Create the directory as specified in path """
+ if not os.path.exists(path):
+ try:
+ os.makedirs(path)
+ except OSError as e:
+ logger.error("Failed to create directory %s: %s", path, str(e))
+ raise
+
+
+def write_query_results_to_csv(results_path, csv_results_file_prefix,
+ query_results):
+ """ Write the results of the query to the CSV """
+ create_dir(results_path)
+ filec = os.path.join(results_path, csv_results_file_prefix + ".csv")
+ with open(filec, "wb") as f:
+ f.write(query_results["Columns"].replace(" ", ",") + "\n")
+ for row in (query_results["Output"].replace("} {", ",").
+ replace("{", "").replace("}", "").split(",")):
+ f.write(row.replace(" ", ",") + "\n")
+
+
+def positive_int(value):
+ """ Positive Integer type for Arguments """
+ ivalue = int(value)
+ if ivalue <= 0:
+ raise argparse.ArgumentTypeError(
+ "%s is an invalid positive int value" % value)
+ return ivalue
+
+
+def percent_float(value):
+ """ Floating type for Arguments """
+ pvalue = float(value)
+ if pvalue < 0.0 or pvalue > 100.0:
+ raise argparse.ArgumentTypeError(
+ "%s not in range [0.0, 100.0]" % pvalue)
+ return pvalue
+
+
+def main():
+ """ Read the arguments, Invoke Test and Return the results"""
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ required_named = parser.add_argument_group("required named arguments")
+ required_named.add_argument("--lab_server_addr",
+ required=True,
+ help=("The IP address of the"
+ "Spirent Lab Server"),
+ dest="lab_server_addr")
+ required_named.add_argument("--license_server_addr",
+ required=True,
+ help=("The IP address of the Spirent"
+ "License Server"),
+ dest="license_server_addr")
+ required_named.add_argument("--east_chassis_addr",
+ required=True,
+ help=("The TestCenter chassis IP address to"
+ "use for the east test port"),
+ dest="east_chassis_addr")
+ required_named.add_argument("--east_slot_num",
+ type=positive_int,
+ required=True,
+ help=("The TestCenter slot number to"
+ "use for the east test port"),
+ dest="east_slot_num")
+ required_named.add_argument("--east_port_num",
+ type=positive_int,
+ required=True,
+ help=("The TestCenter port number to use"
+ "for the east test port"),
+ dest="east_port_num")
+ required_named.add_argument("--west_chassis_addr",
+ required=True,
+ help=("The TestCenter chassis IP address"
+ "to use for the west test port"),
+ dest="west_chassis_addr")
+ required_named.add_argument("--west_slot_num",
+ type=positive_int,
+ required=True,
+ help=("The TestCenter slot number to use"
+ "for the west test port"),
+ dest="west_slot_num")
+ required_named.add_argument("--west_port_num",
+ type=positive_int,
+ required=True,
+ help=("The TestCenter port number to"
+ "use for the west test port"),
+ dest="west_port_num")
+ # Optional parameters
+ optional_named = parser.add_argument_group("optional named arguments")
+ optional_named.add_argument("--metric",
+ required=False,
+ help=("One among - throughput, latency,\
+ backtoback and frameloss"),
+ choices=["throughput", "latency",
+ "backtoback", "frameloss"],
+ default="throughput",
+ dest="metric")
+ optional_named.add_argument("--test_session_name",
+ required=False,
+ default="RFC2544 East-West Throughput",
+ help=("The friendly name to identify"
+ "the Spirent Lab Server test session"),
+ dest="test_session_name")
+
+ optional_named.add_argument("--test_user_name",
+ required=False,
+ default="RFC2544 East-West User",
+ help=("The friendly name to identify the"
+ "Spirent Lab Server test user"),
+ dest="test_user_name")
+ optional_named.add_argument("--results_dir",
+ required=False,
+ default="./Results",
+ help="The directory to copy results to",
+ dest="results_dir")
+ optional_named.add_argument("--csv_results_file_prefix",
+ required=False,
+ default="Rfc2544Tput",
+ help="The prefix for the CSV results files",
+ dest="csv_results_file_prefix")
+ optional_named.add_argument("--num_trials",
+ type=positive_int,
+ required=False,
+ default=1,
+ help=("The number of trials to execute during"
+ "the test"),
+ dest="num_trials")
+ optional_named.add_argument("--trial_duration_sec",
+ type=positive_int,
+ required=False,
+ default=60,
+ help=("The duration of each trial executed"
+ "during the test"),
+ dest="trial_duration_sec")
+ optional_named.add_argument("--traffic_pattern",
+ required=False,
+ choices=["BACKBONE", "MESH", "PAIR"],
+ default="PAIR",
+ help="The traffic pattern between endpoints",
+ dest="traffic_pattern")
+ optional_named.add_argument("--traffic_custom",
+ required=False,
+ default=None,
+ help="The traffic pattern between endpoints",
+ dest="traffic_custom")
+ optional_named.add_argument("--search_mode",
+ required=False,
+ choices=["COMBO", "STEP", "BINARY"],
+ default="BINARY",
+ help=("The search mode used to find the"
+ "throughput rate"),
+ dest="search_mode")
+ optional_named.add_argument("--learning_mode",
+ required=False,
+ choices=["AUTO", "L2_LEARNING",
+ "L3_LEARNING", "NONE"],
+ default="AUTO",
+ help=("The learning mode used during the test,"
+ "default is 'NONE'"),
+ dest="learning_mode")
+ optional_named.add_argument("--rate_lower_limit_pct",
+ type=percent_float,
+ required=False,
+ default=1.0,
+ help=("The minimum percent line rate that"
+ "will be used during the test"),
+ dest="rate_lower_limit_pct")
+ optional_named.add_argument("--rate_upper_limit_pct",
+ type=percent_float,
+ required=False,
+ default=99.0,
+ help=("The maximum percent line rate that"
+ "will be used during the test"),
+ dest="rate_upper_limit_pct")
+ optional_named.add_argument("--rate_initial_pct",
+ type=percent_float,
+ required=False,
+ default=99.0,
+ help=("If Search Mode is BINARY, the percent"
+ "line rate that will be used at the"
+ "start of the test"),
+ dest="rate_initial_pct")
+ optional_named.add_argument("--rate_step_pct",
+ type=percent_float,
+ required=False,
+ default=10.0,
+ help=("If SearchMode is STEP, the percent"
+ "load increase per step"),
+ dest="rate_step_pct")
+ optional_named.add_argument("--resolution_pct",
+ type=percent_float,
+ required=False,
+ default=1.0,
+ help=("The minimum percentage of load"
+ "adjustment between iterations"),
+ dest="resolution_pct")
+ optional_named.add_argument("--frame_size_list",
+ type=lambda s: [int(item)
+ for item in s.split(',')],
+ required=False,
+ default=[256],
+ help="A comma-delimited list of frame sizes",
+ dest="frame_size_list")
+ optional_named.add_argument("--acceptable_frame_loss_pct",
+ type=percent_float,
+ required=False,
+ default=0.0,
+ help=("The maximum acceptable frame loss"
+ "percent in any iteration"),
+ dest="acceptable_frame_loss_pct")
+ optional_named.add_argument("--east_intf_addr",
+ required=False,
+ default="192.85.1.3",
+ help=("The address to assign to the first"
+ "emulated device interface on the first"
+ "east port"),
+ dest="east_intf_addr")
+ optional_named.add_argument("--east_intf_gateway_addr",
+ required=False,
+ default="192.85.1.53",
+ help=("The gateway address to assign to the"
+ "first emulated device interface on the"
+ "first east port"),
+ dest="east_intf_gateway_addr")
+ optional_named.add_argument("--west_intf_addr",
+ required=False,
+ default="192.85.1.53",
+ help=("The address to assign to the first"
+ "emulated device interface on the"
+ "first west port"),
+ dest="west_intf_addr")
+ optional_named.add_argument("--west_intf_gateway_addr",
+ required=False,
+ default="192.85.1.53",
+ help=("The gateway address to assign to"
+ "the first emulated device interface"
+ "on the first west port"),
+ dest="west_intf_gateway_addr")
+ parser.add_argument("-v",
+ "--verbose",
+ required=False,
+ default=True,
+ help="More output during operation when present",
+ action="store_true",
+ dest="verbose")
+ args = parser.parse_args()
+
+ if args.verbose:
+ logger.debug("Creating results directory")
+ create_dir(args.results_dir)
+
+ session_name = args.test_session_name
+ user_name = args.test_user_name
+
+ try:
+ # Load Spirent REST Library
+ from stcrestclient import stchttp
+
+ stc = stchttp.StcHttp(args.lab_server_addr)
+ session_id = stc.new_session(user_name, session_name)
+ stc.join_session(session_id)
+ except RuntimeError as e:
+ logger.error(e)
+ raise
+
+ # Get STC system info.
+ tx_port_loc = "//%s/%s/%s" % (args.east_chassis_addr,
+ args.east_slot_num,
+ args.east_port_num)
+ rx_port_loc = "//%s/%s/%s" % (args.west_chassis_addr,
+ args.west_slot_num,
+ args.west_port_num)
+
+ # Retrieve and display the server information
+ if args.verbose:
+ logger.debug("SpirentTestCenter system version: %s",
+ stc.get("system1", "version"))
+
+ try:
+ device_list = []
+ port_list = []
+ if args.verbose:
+ logger.debug("Bring up license server")
+ license_mgr = stc.get("system1", "children-licenseservermanager")
+ if args.verbose:
+ logger.debug("license_mgr = %s", license_mgr)
+ stc.create("LicenseServer", under=license_mgr, attributes={
+ "server": args.license_server_addr})
+
+ # Create the root project object
+ if args.verbose:
+ logger.debug("Creating project ...")
+ project = stc.get("System1", "children-Project")
+
+ # Configure any custom traffic parameters
+ if args.traffic_custom == "cont":
+ if args.verbose:
+ logger.debug("Configure Continuous Traffic")
+ stc.create("ContinuousTestConfig", under=project)
+
+ # Create ports
+ if args.verbose:
+ logger.debug("Creating ports ...")
+ east_chassis_port = stc.create('port', project)
+ if args.verbose:
+ logger.debug("Configuring TX port ...")
+ stc.config(east_chassis_port, {'location': tx_port_loc})
+ port_list.append(east_chassis_port)
+
+ west_chassis_port = stc.create('port', project)
+ if args.verbose:
+ logger.debug("Configuring RX port ...")
+ stc.config(west_chassis_port, {'location': rx_port_loc})
+ port_list.append(west_chassis_port)
+
+ # Create emulated genparam for east port
+ east_device_gen_params = stc.create("EmulatedDeviceGenParams",
+ under=project,
+ attributes={"Port":
+ east_chassis_port})
+ # Create the DeviceGenEthIIIfParams object
+ stc.create("DeviceGenEthIIIfParams",
+ under=east_device_gen_params)
+ # Configuring Ipv4 interfaces
+ stc.create("DeviceGenIpv4IfParams",
+ under=east_device_gen_params,
+ attributes={"Addr": args.east_intf_addr,
+ "Gateway": args.east_intf_gateway_addr})
+ # Create Devices using the Device Wizard
+ device_gen_config = stc.perform("DeviceGenConfigExpand",
+ params={"DeleteExisting": "No",
+ "GenParams":
+ east_device_gen_params})
+ # Append to the device list
+ device_list.append(device_gen_config['ReturnList'])
+
+ # Create emulated genparam for west port
+ west_device_gen_params = stc.create("EmulatedDeviceGenParams",
+ under=project,
+ attributes={"Port":
+ west_chassis_port})
+ # Create the DeviceGenEthIIIfParams object
+ stc.create("DeviceGenEthIIIfParams",
+ under=west_device_gen_params)
+ # Configuring Ipv4 interfaces
+ stc.create("DeviceGenIpv4IfParams",
+ under=west_device_gen_params,
+ attributes={"Addr": args.west_intf_addr,
+ "Gateway": args.west_intf_gateway_addr})
+ # Create Devices using the Device Wizard
+ device_gen_config = stc.perform("DeviceGenConfigExpand",
+ params={"DeleteExisting": "No",
+ "GenParams":
+ west_device_gen_params})
+ # Append to the device list
+ device_list.append(device_gen_config['ReturnList'])
+ if args.verbose:
+ logger.debug(device_list)
+
+ # Create the RFC 2544 'metric test
+ if args.metric == "throughput":
+ if args.verbose:
+ logger.debug("Set up the RFC2544 throughput test...")
+ stc.perform("Rfc2544SetupThroughputTestCommand",
+ params={"AcceptableFrameLoss":
+ args.acceptable_frame_loss_pct,
+ "Duration": args.trial_duration_sec,
+ "FrameSizeList": args.frame_size_list,
+ "LearningMode": args.learning_mode,
+ "NumOfTrials": args.num_trials,
+ "RateInitial": args.rate_initial_pct,
+ "RateLowerLimit": args.rate_lower_limit_pct,
+ "RateStep": args.rate_step_pct,
+ "RateUpperLimit": args.rate_upper_limit_pct,
+ "Resolution": args.resolution_pct,
+ "SearchMode": args.search_mode,
+ "TrafficPattern": args.traffic_pattern})
+ elif args.metric == "backtoback":
+ stc.perform("Rfc2544SetupBackToBackTestCommand",
+ params={"AcceptableFrameLoss":
+ args.acceptable_frame_loss_pct,
+ "Duration": args.trial_duration_sec,
+ "FrameSizeList": args.frame_size_list,
+ "LearningMode": args.learning_mode,
+ "LatencyType": args.latency_type,
+ "NumOfTrials": args.num_trials,
+ "RateInitial": args.rate_initial_pct,
+ "RateLowerLimit": args.rate_lower_limit_pct,
+ "RateStep": args.rate_step_pct,
+ "RateUpperLimit": args.rate_upper_limit_pct,
+ "Resolution": args.resolution_pct,
+ "SearchMode": args.search_mode,
+ "TrafficPattern": args.traffic_pattern})
+ elif args.metric == "frameloss":
+ stc.perform("Rfc2544SetupFrameLossTestCommand",
+ params={"AcceptableFrameLoss":
+ args.acceptable_frame_loss_pct,
+ "Duration": args.trial_duration_sec,
+ "FrameSizeList": args.frame_size_list,
+ "LearningMode": args.learning_mode,
+ "LatencyType": args.latency_type,
+ "NumOfTrials": args.num_trials,
+ "RateInitial": args.rate_initial_pct,
+ "RateLowerLimit": args.rate_lower_limit_pct,
+ "RateStep": args.rate_step_pct,
+ "RateUpperLimit": args.rate_upper_limit_pct,
+ "Resolution": args.resolution_pct,
+ "SearchMode": args.search_mode,
+ "TrafficPattern": args.traffic_pattern})
+ elif args.metric == "latency":
+ stc.perform("Rfc2544SetupLatencyTestCommand",
+ params={"AcceptableFrameLoss":
+ args.acceptable_frame_loss_pct,
+ "Duration": args.trial_duration_sec,
+ "FrameSizeList": args.frame_size_list,
+ "LearningMode": args.learning_mode,
+ "LatencyType": args.latency_type,
+ "NumOfTrials": args.num_trials,
+ "RateInitial": args.rate_initial_pct,
+ "RateLowerLimit": args.rate_lower_limit_pct,
+ "RateStep": args.rate_step_pct,
+ "RateUpperLimit": args.rate_upper_limit_pct,
+ "Resolution": args.resolution_pct,
+ "SearchMode": args.search_mode,
+ "TrafficPattern": args.traffic_pattern})
+
+ # Save the configuration
+ stc.perform("SaveToTcc", params={"Filename": "2544.tcc"})
+ # Connect to the hardware...
+ stc.perform("AttachPorts", params={"portList": stc.get(
+ "system1.project", "children-port"), "autoConnect": "TRUE"})
+ # Apply configuration.
+ if args.verbose:
+ logger.debug("Apply configuration...")
+ stc.apply()
+
+ if args.verbose:
+ logger.debug("Starting the sequencer...")
+ stc.perform("SequencerStart")
+
+ # Wait for sequencer to finish
+ logger.info(
+ "Starting test... Please wait for the test to complete...")
+ stc.wait_until_complete()
+ logger.info("The test has completed... Saving results...")
+
+ # Determine what the results database filename is...
+ lab_server_resultsdb = stc.get(
+ "system1.project.TestResultSetting", "CurrentResultFileName")
+
+ if args.verbose:
+ logger.debug("The lab server results database is %s",
+ lab_server_resultsdb)
+
+ stc.perform("CSSynchronizeFiles",
+ params={"DefaultDownloadDir": args.results_dir})
+
+ resultsdb = args.results_dir + \
+ lab_server_resultsdb.split("/Results")[1]
+
+ logger.info(
+ "The local summary DB file has been saved to %s", resultsdb)
+
+ # The returns the "RFC2544ThroughputTestResultDetailedSummaryView"
+ # table view from the results database.
+ # There are other views available.
+
+ if args.metric == "throughput":
+ resultsdict = (
+ stc.perform("QueryResult",
+ params={
+ "DatabaseConnectionString":
+ resultsdb,
+ "ResultPath":
+ ("RFC2544ThroughputTestResultDetailed"
+ "SummaryView")}))
+
+ # The returns the "RFC2544BacktoBackTestResultDetailedSummaryView"
+ # table view from the results database.
+ # There are other views available.
+ elif args.metric == "backtoback":
+ resultsdict = (
+ stc.perform("QueryResult",
+ params={
+ "DatabaseConnectionString":
+ resultsdb,
+ "ResultPath":
+ ("RFC2544Back2BackTestResultDetailed"
+ "SummaryView")}))
+
+ # The returns the "RFC2544LatencyTestResultDetailedSummaryView"
+ # table view from the results database.
+ # There are other views available.
+ elif args.metric == "latency":
+ resultsdict = (
+ stc.perform("QueryResult",
+ params={
+ "DatabaseConnectionString":
+ resultsdb,
+ "ResultPath":
+ ("RFC2544LatencyTestResultDetailed"
+ "SummaryView")}))
+
+ # The returns the "RFC2544FrameLossTestResultDetailedSummaryView"
+ # table view from the results database.
+ # There are other views available.
+ elif args.metric == "frameloss":
+ resultsdict = (
+ stc.perform("QueryResult",
+ params={
+ "DatabaseConnectionString":
+ resultsdb,
+ "ResultPath":
+ ("RFC2544FrameLossTestResultDetailed"
+ "SummaryView")}))
+ if args.verbose:
+ logger.debug("resultsdict[\"Columns\"]: %s",
+ resultsdict["Columns"])
+ logger.debug("resultsdict[\"Output\"]: %s", resultsdict["Output"])
+ logger.debug("Result paths: %s",
+ stc.perform("GetTestResultSettingPaths"))
+
+ # Write results to csv
+ logger.debug("Writing CSV file to results directory %s",
+ args.results_dir)
+ write_query_results_to_csv(
+ args.results_dir, args.csv_results_file_prefix, resultsdict)
+
+ except RuntimeError as e:
+ logger.error(e)
+
+ if args.verbose:
+ logger.debug("Destroy session on lab server")
+ stc.end_session()
+
+ logger.info("Test complete!")
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/pkt_gen/testcenter/testcenter.py b/tools/pkt_gen/testcenter/testcenter.py
index f670612c..a1f38d8b 100644
--- a/tools/pkt_gen/testcenter/testcenter.py
+++ b/tools/pkt_gen/testcenter/testcenter.py
@@ -1,4 +1,4 @@
-# Copyright 2015 Spirent Communications.
+# Copyright 2016 Spirent Communications.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -19,20 +19,108 @@ Provides a model for Spirent TestCenter as a test tool for implementing
various performance tests of a virtual switch.
"""
-from __future__ import print_function
-
-from tools.pkt_gen import trafficgen
-from core.results.results_constants import ResultsConstants
-import subprocess
-import os
import csv
+import logging
+import os
+import subprocess
+
from conf import settings
+from core.results.results_constants import ResultsConstants
+from tools.pkt_gen import trafficgen
+
+
+def get_stc_common_settings():
+ """
+ Return the common Settings
+ These settings would apply to almost all the tests.
+ """
+ args = ["--lab_server_addr",
+ settings.getValue("TRAFFICGEN_STC_LAB_SERVER_ADDR"),
+ "--license_server_addr",
+ settings.getValue("TRAFFICGEN_STC_LICENSE_SERVER_ADDR"),
+ "--east_chassis_addr",
+ settings.getValue("TRAFFICGEN_STC_EAST_CHASSIS_ADDR"),
+ "--east_slot_num",
+ settings.getValue("TRAFFICGEN_STC_EAST_SLOT_NUM"),
+ "--east_port_num",
+ settings.getValue("TRAFFICGEN_STC_EAST_PORT_NUM"),
+ "--west_chassis_addr",
+ settings.getValue("TRAFFICGEN_STC_WEST_CHASSIS_ADDR"),
+ "--west_slot_num",
+ settings.getValue("TRAFFICGEN_STC_WEST_SLOT_NUM"),
+ "--west_port_num",
+ settings.getValue("TRAFFICGEN_STC_WEST_PORT_NUM"),
+ "--test_session_name",
+ settings.getValue("TRAFFICGEN_STC_TEST_SESSION_NAME"),
+ "--results_dir",
+ settings.getValue("TRAFFICGEN_STC_RESULTS_DIR"),
+ "--csv_results_file_prefix",
+ settings.getValue("TRAFFICGEN_STC_CSV_RESULTS_FILE_PREFIX")]
+ return args
+
+
+def get_rfc2544_common_settings():
+ """
+ Retrun Generic RFC 2544 settings.
+ These settings apply to all the 2544 tests
+ """
+ args = [settings.getValue("TRAFFICGEN_STC_PYTHON2_PATH"),
+ os.path.join(
+ settings.getValue("TRAFFICGEN_STC_TESTCENTER_PATH"),
+ settings.getValue(
+ "TRAFFICGEN_STC_RFC2544_TPUT_TEST_FILE_NAME")),
+ "--metric",
+ settings.getValue("TRAFFICGEN_STC_RFC2544_METRIC"),
+ "--search_mode",
+ settings.getValue("TRAFFICGEN_STC_SEARCH_MODE"),
+ "--learning_mode",
+ settings.getValue("TRAFFICGEN_STC_LEARNING_MODE"),
+ "--rate_lower_limit_pct",
+ settings.getValue("TRAFFICGEN_STC_RATE_LOWER_LIMIT_PCT"),
+ "--rate_upper_limit_pct",
+ settings.getValue("TRAFFICGEN_STC_RATE_UPPER_LIMIT_PCT"),
+ "--rate_initial_pct",
+ settings.getValue("TRAFFICGEN_STC_RATE_INITIAL_PCT"),
+ "--rate_step_pct",
+ settings.getValue("TRAFFICGEN_STC_RATE_STEP_PCT"),
+ "--resolution_pct",
+ settings.getValue("TRAFFICGEN_STC_RESOLUTION_PCT"),
+ "--acceptable_frame_loss_pct",
+ settings.getValue("TRAFFICGEN_STC_ACCEPTABLE_FRAME_LOSS_PCT"),
+ "--east_intf_addr",
+ settings.getValue("TRAFFICGEN_STC_EAST_INTF_ADDR"),
+ "--east_intf_gateway_addr",
+ settings.getValue("TRAFFICGEN_STC_EAST_INTF_GATEWAY_ADDR"),
+ "--west_intf_addr",
+ settings.getValue("TRAFFICGEN_STC_WEST_INTF_ADDR"),
+ "--west_intf_gateway_addr",
+ settings.getValue("TRAFFICGEN_STC_WEST_INTF_GATEWAY_ADDR"),
+ "--num_trials",
+ settings.getValue("TRAFFICGEN_STC_NUMBER_OF_TRIALS"),
+ "--trial_duration_sec",
+ settings.getValue("TRAFFICGEN_STC_TRIAL_DURATION_SEC"),
+ "--traffic_pattern",
+ settings.getValue("TRAFFICGEN_STC_TRAFFIC_PATTERN")]
+ return args
+
+
+def get_rfc2544_custom_settings(framesize, custom_tr):
+ """
+ Return RFC2544 Custom Settings
+ """
+ args = ["--frame_size_list",
+ str(framesize),
+ "--traffic_custom",
+ str(custom_tr)]
+ return args
class TestCenter(trafficgen.ITrafficGenerator):
"""
Spirent TestCenter
"""
+ _logger = logging.getLogger(__name__)
+
def connect(self):
"""
Do nothing.
@@ -45,111 +133,146 @@ class TestCenter(trafficgen.ITrafficGenerator):
"""
pass
- def send_burst_traffic(self, traffic=None, numpkts=100, duration=20, framerate=100):
+ def send_burst_traffic(self, traffic=None, numpkts=100, duration=20):
"""
Do nothing.
"""
return None
- def send_cont_traffic(self, traffic=None, duration=30, framerate=0,
- multistream=False):
+ def get_rfc2544_results(self, filename):
"""
- Do nothing.
+ Reads the CSV file and return the results
"""
- return None
+ result = {}
+ with open(filename, "r") as csvfile:
+ csvreader = csv.DictReader(csvfile)
+ for row in csvreader:
+ self._logger.info("Row: %s", row)
+ tx_fps = ((float(row["TxFrameCount"])) /
+ (float(row["Duration(sec)"])))
+ rx_fps = ((float(row["RxFrameCount"])) /
+ (float(row["Duration(sec)"])))
+ tx_mbps = ((float(row["TxFrameCount"]) *
+ float(row["ConfiguredFrameSize"])) /
+ (float(row["Duration(sec)"]) * 1000000.0))
+ rx_mbps = ((float(row["RxFrameCount"]) *
+ float(row["ConfiguredFrameSize"])) /
+ (float(row["Duration(sec)"]) * 1000000.0))
+ result[ResultsConstants.TX_RATE_FPS] = tx_fps
+ result[ResultsConstants.THROUGHPUT_RX_FPS] = rx_fps
+ result[ResultsConstants.TX_RATE_MBPS] = tx_mbps
+ result[ResultsConstants.THROUGHPUT_RX_MBPS] = rx_mbps
+ result[ResultsConstants.TX_RATE_PERCENT] = float(
+ row["OfferedLoad(%)"])
+ result[ResultsConstants.THROUGHPUT_RX_PERCENT] = float(
+ row["Throughput(%)"])
+ result[ResultsConstants.MIN_LATENCY_NS] = float(
+ row["MinimumLatency(us)"]) * 1000
+ result[ResultsConstants.MAX_LATENCY_NS] = float(
+ row["MaximumLatency(us)"]) * 1000
+ result[ResultsConstants.AVG_LATENCY_NS] = float(
+ row["AverageLatency(us)"]) * 1000
+ result[ResultsConstants.FRAME_LOSS_PERCENT] = float(
+ row["PercentLoss"])
+ return result
+
+ def send_cont_traffic(self, traffic=None, duration=30):
+ """
+ Send Custom - Continuous Test traffic
+ Reuse RFC2544 throughput test specifications along with
+ 'custom' configuration
+ """
+ verbose = False
+ custom = "cont"
+ framesize = settings.getValue("TRAFFICGEN_STC_FRAME_SIZE")
+ if traffic and 'l2' in traffic:
+ if 'framesize' in traffic['l2']:
+ framesize = traffic['l2']['framesize']
+
+ stc_common_args = get_stc_common_settings()
+ rfc2544_common_args = get_rfc2544_common_settings()
+ rfc2544_custom_args = get_rfc2544_custom_settings(framesize,
+ custom)
+ args = stc_common_args + rfc2544_common_args + rfc2544_custom_args
+
+ if settings.getValue("TRAFFICGEN_STC_VERBOSE") is "True":
+ args.append("--verbose")
+ verbose = True
+ self._logger.debug("Arguments used to call test: %s", args)
+ subprocess.check_call(args)
+
+ filec = os.path.join(settings.getValue("TRAFFICGEN_STC_RESULTS_DIR"),
+ settings.getValue(
+ "TRAFFICGEN_STC_CSV_RESULTS_FILE_PREFIX") +
+ ".csv")
+
+ if verbose:
+ self._logger.info("file: %s", filec)
+
+ return self.get_rfc2544_results(filec)
def send_rfc2544_throughput(self, traffic=None, trials=3, duration=20,
- lossrate=0.0, multistream=False):
+ lossrate=0.0):
"""
Send traffic per RFC2544 throughput test specifications.
"""
verbose = False
+ framesize = settings.getValue("TRAFFICGEN_STC_FRAME_SIZE")
+ if traffic and 'l2' in traffic:
+ if 'framesize' in traffic['l2']:
+ framesize = traffic['l2']['framesize']
+
+ stc_common_args = get_stc_common_settings()
+ rfc2544_common_args = get_rfc2544_common_settings()
+ rfc2544_custom_args = get_rfc2544_custom_settings(framesize, '')
+ args = stc_common_args + rfc2544_common_args + rfc2544_custom_args
- args = [settings.getValue("TRAFFICGEN_STC_PYTHON2_PATH"),
- os.path.join(settings.getValue("TRAFFICGEN_STC_TESTCENTER_PATH"),
- settings.getValue("TRAFFICGEN_STC_RFC2544_TPUT_TEST_FILE_NAME")),
- "--lab_server_addr",
- settings.getValue("TRAFFICGEN_STC_LAB_SERVER_ADDR"),
- "--license_server_addr",
- settings.getValue("TRAFFICGEN_STC_LICENSE_SERVER_ADDR"),
- "--east_chassis_addr",
- settings.getValue("TRAFFICGEN_STC_EAST_CHASSIS_ADDR"),
- "--east_slot_num",
- settings.getValue("TRAFFICGEN_STC_EAST_SLOT_NUM"),
- "--east_port_num",
- settings.getValue("TRAFFICGEN_STC_EAST_PORT_NUM"),
- "--west_chassis_addr",
- settings.getValue("TRAFFICGEN_STC_WEST_CHASSIS_ADDR"),
- "--west_slot_num",
- settings.getValue("TRAFFICGEN_STC_WEST_SLOT_NUM"),
- "--west_port_num",
- settings.getValue("TRAFFICGEN_STC_WEST_PORT_NUM"),
- "--test_session_name",
- settings.getValue("TRAFFICGEN_STC_TEST_SESSION_NAME"),
- "--results_dir",
- settings.getValue("TRAFFICGEN_STC_RESULTS_DIR"),
- "--csv_results_file_prefix",
- settings.getValue("TRAFFICGEN_STC_CSV_RESULTS_FILE_PREFIX"),
- "--num_trials",
- settings.getValue("TRAFFICGEN_STC_NUMBER_OF_TRIALS"),
- "--trial_duration_sec",
- settings.getValue("TRAFFICGEN_STC_TRIAL_DURATION_SEC"),
- "--traffic_pattern",
- settings.getValue("TRAFFICGEN_STC_TRAFFIC_PATTERN"),
- "--search_mode",
- settings.getValue("TRAFFICGEN_STC_SEARCH_MODE"),
- "--learning_mode",
- settings.getValue("TRAFFICGEN_STC_LEARNING_MODE"),
- "--rate_lower_limit_pct",
- settings.getValue("TRAFFICGEN_STC_RATE_LOWER_LIMIT_PCT"),
- "--rate_upper_limit_pct",
- settings.getValue("TRAFFICGEN_STC_RATE_UPPER_LIMIT_PCT"),
- "--rate_initial_pct",
- settings.getValue("TRAFFICGEN_STC_RATE_INITIAL_PCT"),
- "--rate_step_pct",
- settings.getValue("TRAFFICGEN_STC_RATE_STEP_PCT"),
- "--resolution_pct",
- settings.getValue("TRAFFICGEN_STC_RESOLUTION_PCT"),
- "--frame_size_list",
- settings.getValue("TRAFFICGEN_STC_FRAME_SIZE"),
- "--acceptable_frame_loss_pct",
- settings.getValue("TRAFFICGEN_STC_ACCEPTABLE_FRAME_LOSS_PCT"),
- "--east_intf_addr",
- settings.getValue("TRAFFICGEN_STC_EAST_INTF_ADDR"),
- "--east_intf_gateway_addr",
- settings.getValue("TRAFFICGEN_STC_EAST_INTF_GATEWAY_ADDR"),
- "--west_intf_addr",
- settings.getValue("TRAFFICGEN_STC_WEST_INTF_ADDR"),
- "--west_intf_gateway_addr",
- settings.getValue("TRAFFICGEN_STC_WEST_INTF_GATEWAY_ADDR")]
if settings.getValue("TRAFFICGEN_STC_VERBOSE") is "True":
args.append("--verbose")
verbose = True
- print("Arguments used to call test: %s" % args)
+ self._logger.debug("Arguments used to call test: %s", args)
+ subprocess.check_call(args)
- subprocess.check_call(map(os.path.expanduser, args))
+ filec = os.path.join(settings.getValue("TRAFFICGEN_STC_RESULTS_DIR"),
+ settings.getValue(
+ "TRAFFICGEN_STC_CSV_RESULTS_FILE_PREFIX") +
+ ".csv")
- file = os.path.join(settings.getValue("TRAFFICGEN_STC_RESULTS_DIR"),
- settings.getValue("TRAFFICGEN_STC_CSV_RESULTS_FILE_PREFIX") + ".csv")
if verbose:
- print("file: %s" % file)
+ self._logger.info("file: %s", filec)
- result = {}
+ return self.get_rfc2544_results(filec)
- with open(file, "r") as csvfile:
- csvreader = csv.DictReader(csvfile)
- for row in csvreader:
- print("Row: %s" % row)
- result[ResultsConstants.TX_RATE_FPS] = 0.0
- result[ResultsConstants.THROUGHPUT_RX_FPS] = 0.0
- result[ResultsConstants.TX_RATE_MBPS] = 0.0
- result[ResultsConstants.THROUGHPUT_RX_MBPS] = 0.0
- result[ResultsConstants.TX_RATE_PERCENT] = float(row["OfferedLoad(%)"])
- result[ResultsConstants.THROUGHPUT_RX_PERCENT] = float(row["Throughput(%)"])
- result[ResultsConstants.MIN_LATENCY_NS] = float(row["MinimumLatency(us)"]) * 1000
- result[ResultsConstants.MAX_LATENCY_NS] = float(row["MaximumLatency(us)"]) * 1000
- result[ResultsConstants.AVG_LATENCY_NS] = float(row["AverageLatency(us)"]) * 1000
- return result
+ def send_rfc2544_back2back(self, traffic=None, trials=1, duration=20,
+ lossrate=0.0):
+ """
+ Send traffic per RFC2544 BacktoBack test specifications.
+ """
+ verbose = False
+ framesize = settings.getValue("TRAFFICGEN_STC_FRAME_SIZE")
+ if traffic and 'l2' in traffic:
+ if 'framesize' in traffic['l2']:
+ framesize = traffic['l2']['framesize']
+
+ stc_common_args = get_stc_common_settings()
+ rfc2544_common_args = get_rfc2544_common_settings()
+ rfc2544_custom_args = get_rfc2544_custom_settings(framesize, '')
+ args = stc_common_args + rfc2544_common_args + rfc2544_custom_args
+
+ if settings.getValue("TRAFFICGEN_STC_VERBOSE") is "True":
+ args.append("--verbose")
+ verbose = True
+ self._logger.info("Arguments used to call test: %s", args)
+ subprocess.check_call(args)
+
+ filecs = os.path.join(settings.getValue("TRAFFICGEN_STC_RESULTS_DIR"),
+ settings.getValue(
+ "TRAFFICGEN_STC_CSV_RESULTS_FILE_PREFIX") +
+ ".csv")
+ if verbose:
+ self._logger.debug("file: %s", filecs)
+
+ return self.get_rfc2544_results(filecs)
if __name__ == '__main__':
TRAFFIC = {
@@ -159,6 +282,6 @@ if __name__ == '__main__':
'dstip': '90.90.90.90',
},
}
-
with TestCenter() as dev:
print(dev.send_rfc2544_throughput(traffic=TRAFFIC))
+ print(dev.send_rfc2544_backtoback(traffic=TRAFFIC))
diff --git a/tools/pkt_gen/trafficgen/trafficgenhelper.py b/tools/pkt_gen/trafficgen/trafficgenhelper.py
index 0a240579..90c77b09 100644
--- a/tools/pkt_gen/trafficgen/trafficgenhelper.py
+++ b/tools/pkt_gen/trafficgen/trafficgenhelper.py
@@ -23,7 +23,7 @@ CMD_PREFIX = 'gencmd : '
TRAFFIC_DEFAULTS = {
'traffic_type' : 'rfc2544',
'frame_rate' : 100,
- 'bidir' : False,
+ 'bidir' : 'False', # will be passed as string in title format to tgen
'multistream' : 0,
'stream_type' : 'L4',
'pre_installed_flows' : 'No', # used by vswitch implementation
diff --git a/tools/pkt_gen/xena/XenaDriver.py b/tools/pkt_gen/xena/XenaDriver.py
new file mode 100644
index 00000000..aa8443c9
--- /dev/null
+++ b/tools/pkt_gen/xena/XenaDriver.py
@@ -0,0 +1,1129 @@
+# Copyright 2016 Red Hat Inc & Xena Networks.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This is a port of code by Xena and Flavios ported to python 3 compatibility.
+# Credit given to Xena and Flavio for providing most of the logic of this code.
+# The code has changes for PEP 8 and python 3 conversion. Added Stat classes
+# for better scaling of future requirements. Also added calculation functions
+# for line rate to align within VSPerf project.
+# Flavios xena libraries available at https://github.com/fleitner/XenaPythonLib
+
+# Contributors:
+# Flavio Leitner, Red Hat Inc.
+# Dan Amzulescu, Xena Networks
+# Christian Trautman, Red Hat Inc.
+
+"""
+Xena Socket API Driver module for communicating directly with Xena system
+through socket commands and returning different statistics.
+"""
+import locale
+import logging
+import socket
+import struct
+import sys
+import threading
+import time
+
+# Xena Socket Commands
+CMD_CLEAR_RX_STATS = 'pr_clear'
+CMD_CLEAR_TX_STATS = 'pt_clear'
+CMD_COMMENT = ';'
+CMD_CREATE_STREAM = 'ps_create'
+CMD_DELETE_STREAM = 'ps_delete'
+CMD_GET_PORT_SPEED = 'p_speed ?'
+CMD_GET_PORT_SPEED_REDUCTION = 'p_speedreduction ?'
+CMD_GET_RX_STATS_PER_TID = 'pr_tpldtraffic'
+CMD_GET_STREAM_DATA = 'pt_stream'
+CMD_GET_STREAMS_PER_PORT = 'ps_indices'
+CMD_GET_TID_PER_STREAM = 'ps_tpldid'
+CMD_GET_TX_STATS_PER_STREAM = 'pt_stream'
+CMD_GET_RX_STATS = 'pr_all ?'
+CMD_GET_TX_STATS = 'pt_all ?'
+CMD_INTERFRAME_GAP = 'p_interframegap'
+CMD_LOGIN = 'c_logon'
+CMD_LOGOFF = 'c_logoff'
+CMD_OWNER = 'c_owner'
+CMD_PORT = ';Port:'
+CMD_PORT_IP = 'p_ipaddress'
+CMD_RESERVE = 'p_reservation reserve'
+CMD_RELEASE = 'p_reservation release'
+CMD_RELINQUISH = 'p_reservation relinquish'
+CMD_RESET = 'p_reset'
+CMD_SET_PORT_TIME_LIMIT = 'p_txtimelimit'
+CMD_SET_STREAM_HEADER_PROTOCOL = 'ps_headerprotocol'
+CMD_SET_STREAM_ON_OFF = 'ps_enable'
+CMD_SET_STREAM_PACKET_HEADER = 'ps_packetheader'
+CMD_SET_STREAM_PACKET_LENGTH = 'ps_packetlength'
+CMD_SET_STREAM_PACKET_LIMIT = 'ps_packetlimit'
+CMD_SET_STREAM_PACKET_PAYLOAD = 'ps_payload'
+CMD_SET_STREAM_RATE_FRACTION = 'ps_ratefraction'
+CMD_SET_STREAM_TEST_PAYLOAD_ID = 'ps_tpldid'
+CMD_SET_TPLD_MODE = 'p_tpldmode'
+CMD_START_TRAFFIC = 'p_traffic on'
+CMD_STOP_TRAFFIC = 'p_traffic off'
+CMD_STREAM_MODIFIER = 'ps_modifier'
+CMD_STREAM_MODIFIER_COUNT = 'ps_modifiercount'
+CMD_STREAM_MODIFIER_RANGE = 'ps_modifierrange'
+CMD_VERSION = 'c_versionno ?'
+
+_LOCALE = locale.getlocale()[1]
+_LOGGER = logging.getLogger(__name__)
+
+
+class SimpleSocket(object):
+ """
+ Socket class
+ """
+ def __init__(self, hostname, port=5025, timeout=1):
+ """Constructor
+ :param hostname: hostname or ip as string
+ :param port: port number to use for socket as int
+ :param timeout: socket timeout as int
+ :return: SimpleSocket object
+ """
+ self.hostname = hostname
+ try:
+ self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ self.sock.settimeout(timeout)
+ self.sock.connect((hostname, port))
+ except socket.error as msg:
+ _LOGGER.error(
+ "Cannot connect to Xena Socket at %s", hostname)
+ _LOGGER.error("Exception : %s", msg)
+ sys.exit(1)
+
+ def __del__(self):
+ """Deconstructor
+ :return:
+ """
+ self.sock.close()
+
+ def ask(self, cmd):
+ """ Send the command over the socket
+ :param cmd: cmd as string
+ :return: byte utf encoded return value from socket
+ """
+ cmd += '\n'
+ try:
+ self.sock.send(cmd.encode('utf-8'))
+ return self.sock.recv(1024)
+ except OSError:
+ return ''
+
+ def read_reply(self):
+ """ Get the response from the socket
+ :return: Return the reply
+ """
+ reply = self.sock.recv(1024)
+ if reply.find("---^".encode('utf-8')) != -1:
+ # read again the syntax error msg
+ reply = self.sock.recv(1024)
+ return reply
+
+ def send_command(self, cmd):
+ """ Send the command specified over the socket
+ :param cmd: Command to send as string
+ :return: None
+ """
+ cmd += '\n'
+ self.sock.send(cmd.encode('utf-8'))
+
+ def set_keep_alive(self):
+ """ Set the keep alive for the socket
+ :return: None
+ """
+ self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
+
+
+class KeepAliveThread(threading.Thread):
+ """
+ Keep alive socket class
+ """
+ message = ''
+
+ def __init__(self, connection, interval=10):
+ """ Constructor
+ :param connection: Socket for keep alive
+ :param interval: interval in seconds to send keep alive
+ :return: KeepAliveThread object
+ """
+ threading.Thread.__init__(self)
+ self.connection = connection
+ self.interval = interval
+ self.finished = threading.Event()
+ self.setDaemon(True)
+ _LOGGER.debug(
+ 'Xena Socket keep alive thread initiated, interval ' +
+ '{} seconds'.format(self.interval))
+
+ def stop(self):
+ """ Thread stop. See python thread docs for more info
+ :return: None
+ """
+ self.finished.set()
+ self.join()
+
+ def run(self):
+ """ Thread start. See python thread docs for more info
+ :return: None
+ """
+ while not self.finished.isSet():
+ self.finished.wait(self.interval)
+ self.connection.ask(self.message)
+
+
+class XenaSocketDriver(SimpleSocket):
+ """
+ Xena socket class
+ """
+ reply_ok = '<OK>'
+
+ def __init__(self, hostname, port=22611):
+ """ Constructor
+ :param hostname: Hostname or ip as string
+ :param port: port to use as int
+ :return: XenaSocketDriver object
+ """
+ SimpleSocket.__init__(self, hostname=hostname, port=port)
+ SimpleSocket.set_keep_alive(self)
+ self.access_semaphor = threading.Semaphore(1)
+
+ def ask(self, cmd):
+ """ Send the command over the socket in a thread safe manner
+ :param cmd: Command to send
+ :return: reply from socket
+ """
+ self.access_semaphor.acquire()
+ reply = SimpleSocket.ask(self, cmd)
+ self.access_semaphor.release()
+ return reply
+
+ def ask_verify(self, cmd):
+ """ Send the command over the socket in a thread safe manner and
+ verify the response is good.
+ :param cmd: Command to send
+ :return: Boolean True if command response is good, False otherwise
+ """
+ resp = self.ask(cmd).decode(_LOCALE).strip('\n')
+ _LOGGER.info('[ask_verify] %s', resp)
+ if resp == self.reply_ok:
+ return True
+ return False
+
+ def disconnect(self):
+ """
+ Close the socket connection
+ :return: None
+ """
+ self.sock.close()
+
+ def send_command(self, cmd):
+ """ Send the command over the socket with no return
+ :param cmd: Command to send
+ :return: None
+ """
+ self.access_semaphor.acquire()
+ SimpleSocket.send_command(self, cmd)
+ self.access_semaphor.release()
+
+ def send_query_replies(self, cmd):
+ """ Send the command over the socket and wait for all replies and return
+ the lines as a list
+ :param cmd: Command to send
+ :return: Response from command as list
+ """
+ # send the command followed by cmd SYNC to find out
+ # when the last reply arrives.
+ self.send_command(cmd.strip('\n'))
+ self.send_command('SYNC')
+ replies = []
+ self.access_semaphor.acquire()
+ msg = SimpleSocket.read_reply(self).decode(_LOCALE)
+ msgleft = ''
+ while True:
+ if '\n' in msg:
+ (reply, msgleft) = msg.split('\n', 1)
+ # check for syntax problems
+ if reply.rfind('Syntax') != -1:
+ self.access_semaphor.release()
+ return []
+
+ if reply.rfind('<SYNC>') == 0:
+
+ self.access_semaphor.release()
+ return replies
+
+ replies.append(reply + '\n')
+ msg = msgleft
+ else:
+ # more bytes to come
+ msgnew = SimpleSocket.read_reply(self).decode(_LOCALE)
+ msg = msgleft + msgnew
+
+
+class XenaManager(object):
+ """
+ Manager class for port and socket functions
+ """
+ def __init__(self, socketDriver, user='', password='xena'):
+ """Constructor
+
+ Establish a connection to Xena using a ``driver`` with the ``password``
+ supplied.
+
+ Attributes:
+ :param socketDriver: XenaSocketDriver connection object
+ :param password: Password to the Xena traffic generator
+ :returns: XenaManager object
+ """
+ self.driver = socketDriver
+ self.ports = list()
+ self.keep_alive_thread = KeepAliveThread(self.driver)
+
+ if self.logon(password):
+ _LOGGER.info('Connected to Xena at %s', self.driver.hostname)
+ else:
+ _LOGGER.error('Failed to logon to Xena at %s', self.driver.hostname)
+ return
+
+ self.set_owner(user)
+
+ def disconnect(self):
+ """ Release ports and disconnect from chassis.
+ """
+ for module_port in self.ports:
+ module_port.release_port()
+ self.ports = []
+ self.logoff()
+ self.keep_alive_thread.stop()
+
+ def add_module_port(self, module, port):
+ """Factory for Xena Ports
+
+ :param module: String or int of module
+ :param port: String or int of port
+ :return: XenaPort object if success, None if port already added
+ """
+ xenaport = XenaPort(self, module, port)
+ if xenaport in self.ports:
+ return None
+ else:
+ self.ports.append(xenaport)
+ return xenaport
+
+ def get_module_port(self, module, port):
+ """Return the Xena Port object if available
+ :param module: module number as int or str
+ :param port: port number as int or str
+ :return: XenaPort object or None if not found
+ """
+ for por in self.ports:
+ if por.port == str(port) and por.module == str(module):
+ return por
+ return None
+
+ def get_version(self):
+ """
+ Get the version from the chassis
+ :return: versions of server and driver as string
+ """
+ res = self.driver.ask(make_manager_command(
+ CMD_VERSION, '')).decode(_LOCALE)
+ res = res.rstrip('\n').split()
+ return "Server: {} Driver: {}".format(res[1], res[2])
+
+ def logoff(self):
+ """
+ Logoff from the Xena chassis
+ :return: Boolean True if response OK, False if error.
+ """
+ return self.driver.ask_verify(make_manager_command(CMD_LOGOFF))
+
+ def logon(self, password):
+ """Login to the Xena traffic generator using the ``password`` supplied.
+
+ :param password: string of password
+ :return: Boolean True if response OK, False if error.
+ """
+ self.keep_alive_thread.start()
+ return self.driver.ask_verify(make_manager_command(CMD_LOGIN, password))
+
+ def set_owner(self, username):
+ """Set the ports owner.
+ :return: Boolean True if response OK, False if error.
+ """
+ return self.driver.ask_verify(make_manager_command(CMD_OWNER, username))
+
+
+class XenaPort(object):
+ """
+ Xena Port emulator class
+ """
+ def __init__(self, manager, module, port):
+ """Constructor
+
+ :param manager: XenaManager object
+ :param module: Module as string or int of module to use
+ :param port: Port as string or int of port to use
+ :return: XenaPort object
+ """
+ self._manager = manager
+ self._module = str(module)
+ self._port = str(port)
+ self._streams = list()
+
+ @property
+ def manager(self):
+ """Property for manager attribute
+ :return: manager object
+ """
+ return self._manager
+
+ @property
+ def module(self):
+ """Property for module attribute
+ :return: module value as string
+ """
+ return self._module
+
+ @property
+ def port(self):
+ """Property for port attribute
+ :return: port value as string
+ """
+ return self._port
+
+ def port_string(self):
+ """String builder with attributes
+ :return: String of module port for command sequence
+ """
+ stringify = "{}/{}".format(self._module, self._port)
+ return stringify
+
+ def add_stream(self):
+ """Add a stream to the port.
+ :return: XenaStream object, None if failure
+ """
+ identifier = len(self._streams)
+ stream = XenaStream(self, identifier)
+ if self._manager.driver.ask_verify(make_stream_command(
+ CMD_CREATE_STREAM, '', stream)):
+ self._streams.append(stream)
+ return stream
+ else:
+ _LOGGER.error("Error during stream creation")
+ return None
+
+ def clear_stats(self, rx_clear=True, tx_clear=True):
+ """Clear the port stats
+
+ :param rx_clear: Boolean if rx stats are to be cleared
+ :param tx_clear: Boolean if tx stats are to be cleared
+ :return: Boolean True if response OK, False if error.
+ """
+ command = make_port_command(CMD_CLEAR_RX_STATS, self)
+ res1 = self._manager.driver.ask_verify(command) if rx_clear else True
+ command = make_port_command(CMD_CLEAR_TX_STATS, self)
+ res2 = self._manager.driver.ask_verify(command) if tx_clear else True
+ return all([res1, res2])
+
+ def get_effective_speed(self):
+ """
+ Get the effective speed on the port
+ :return: effective speed as float
+ """
+ port_speed = self.get_port_speed()
+ reduction = self.get_port_speed_reduction()
+ effective_speed = port_speed * (1.0 - reduction / 1000000.0)
+ return effective_speed
+
+ def get_inter_frame_gap(self):
+ """
+ Get the interframe gap and return it as string
+ :return: integer of interframe gap
+ """
+ command = make_port_command(CMD_INTERFRAME_GAP + '?', self)
+ res = self._manager.driver.ask(command).decode(_LOCALE)
+ res = int(res.rstrip('\n').split(' ')[-1])
+ return res
+
+ def get_port_speed(self):
+ """
+ Get the port speed as bits from port and return it as a int.
+ :return: Int of port speed
+ """
+ command = make_port_command(CMD_GET_PORT_SPEED, self)
+ res = self._manager.driver.ask(command).decode(_LOCALE)
+ port_speed = res.split(' ')[-1].rstrip('\n')
+ return int(port_speed) * 1000000
+
+ def get_port_speed_reduction(self):
+ """
+ Get the port speed reduction value as int
+ :return: Integer of port speed reduction value
+ """
+ command = make_port_command(CMD_GET_PORT_SPEED_REDUCTION, self)
+ res = self._manager.driver.ask(command).decode(_LOCALE)
+ res = int(res.rstrip('\n').split(' ')[-1])
+ return res
+
+ def get_rx_stats(self):
+ """Get the rx stats and return the data as a dict.
+ :return: Receive stats as dictionary
+ """
+ command = make_port_command(CMD_GET_RX_STATS, self)
+ rx_data = self._manager.driver.send_query_replies(command)
+ data = XenaRXStats(rx_data, time.time())
+ return data
+
+ def get_tx_stats(self):
+ """Get the tx stats and return the data as a dict.
+ :return: Receive stats as dictionary
+ """
+ command = make_port_command(CMD_GET_TX_STATS, self)
+ tx_data = self._manager.driver.send_query_replies(command)
+ data = XenaTXStats(tx_data, time.time())
+ return data
+
+ def micro_tpld_disable(self):
+ """Disable micro TPLD and return to standard payload size
+ :return: Boolean if response OK, False if error
+ """
+ command = make_port_command(CMD_SET_TPLD_MODE + ' normal', self)
+ return self._manager.driver.ask_verify(command)
+
+ def micro_tpld_enable(self):
+ """Enable micro TPLD 6 byte payloads.
+ :Return Boolean if response OK, False if error
+ """
+ command = make_port_command(CMD_SET_TPLD_MODE + ' micro', self)
+ return self._manager.driver.ask_verify(command)
+
+ def release_port(self):
+ """Release the port
+ :return: Boolean True if response OK, False if error.
+ """
+ command = make_port_command(CMD_RELEASE, self)
+ return self._manager.driver.ask_verify(command)
+
+ def reserve_port(self):
+ """Reserve the port
+ :return: Boolean True if response OK, False if error.
+ """
+ command = make_port_command(CMD_RESERVE, self)
+ return self._manager.driver.ask_verify(command)
+
+ def reset_port(self):
+ """Reset the port
+ :return: Boolean True if response OK, False if error.
+ """
+ command = make_port_command(CMD_RESET, self)
+ return self._manager.driver.ask_verify(command)
+
+ def set_port_ip(self, ip_addr, cidr, gateway, wild='255'):
+ """
+ Set the port ip address of the specific port
+ :param ip_addr: IP address to set to port
+ :param cidr: cidr number for the subnet
+ :param gateway: Gateway ip for port
+ :param wild: wildcard used for ARP and PING replies
+ :return: Boolean True if response OK, False if error
+ """
+ # convert the cidr to a dot notation subnet address
+ subnet = socket.inet_ntoa(
+ struct.pack(">I", (0xffffffff << (32 - cidr)) & 0xffffffff))
+
+ command = make_port_command('{} {} {} {} 0.0.0.{}'.format(
+ CMD_PORT_IP, ip_addr, subnet, gateway, wild), self)
+ return self._manager.driver.ask_verify(command)
+
+ def set_port_time_limit(self, micro_seconds):
+ """Set the port time limit in ms
+ :param micro_seconds: ms for port time limit
+ :return: Boolean True if response OK, False if error.
+ """
+ command = make_port_command('{} {}'.format(
+ CMD_SET_PORT_TIME_LIMIT, micro_seconds), self)
+ return self._manager.driver.ask_verify(command)
+
+ def traffic_off(self):
+ """Start traffic
+ :return: Boolean True if response OK, False if error.
+ """
+ command = make_port_command(CMD_STOP_TRAFFIC, self)
+ return self._manager.driver.ask_verify(command)
+
+ def traffic_on(self):
+ """Stop traffic
+ :return: Boolean True if response OK, False if error.
+ """
+ command = make_port_command(CMD_START_TRAFFIC, self)
+ return self._manager.driver.ask_verify(command)
+
+
+class XenaStream(object):
+ """
+ Xena stream emulator class
+ """
+ def __init__(self, xenaPort, streamID):
+ """Constructor
+
+ :param xenaPort: XenaPort object
+ :param streamID: Stream ID as int or string
+ :return: XenaStream object
+ """
+ self._xena_port = xenaPort
+ self._stream_id = str(streamID)
+ self._manager = self._xena_port.manager
+ self._header_protocol = None
+
+ @property
+ def xena_port(self):
+ """Property for port attribute
+ :return: XenaPort object
+ """
+ return self._xena_port
+
+ @property
+ def stream_id(self):
+ """Property for streamID attribute
+ :return: streamID value as string
+ """
+ return self._stream_id
+
+ def enable_multistream(self, flows, layer):
+ """
+ Basic implementation of multi stream. Enable multi stream by setting
+ modifiers on the stream
+ :param flows: Numbers of flows or end range
+ :param layer: layer to enable multi stream as str. Acceptable values
+ are L2, L3, or L4
+ :return: True if success False otherwise
+ """
+ if not self._header_protocol:
+ raise RuntimeError(
+ "Please set a protocol header before calling this method.")
+
+ # byte offsets for setting the modifier
+ offsets = {
+ 'L2': [0, 6],
+ 'L3': [32, 36] if 'VLAN' in self._header_protocol else [28, 32],
+ 'L4': [38, 40] if 'VLAN' in self._header_protocol else [34, 36]
+ }
+
+ responses = list()
+ if layer in offsets.keys() and flows > 0:
+ command = make_port_command(
+ CMD_STREAM_MODIFIER_COUNT + ' [{}]'.format(self._stream_id) +
+ ' 2', self._xena_port)
+ responses.append(self._manager.driver.ask_verify(command))
+ command = make_port_command(
+ CMD_STREAM_MODIFIER + ' [{},0] {} 0xFFFF0000 INC 1'.format(
+ self._stream_id, offsets[layer][0]), self._xena_port)
+ responses.append(self._manager.driver.ask_verify(command))
+ command = make_port_command(
+ CMD_STREAM_MODIFIER_RANGE + ' [{},0] 0 1 {}'.format(
+ self._stream_id, flows), self._xena_port)
+ responses.append(self._manager.driver.ask_verify(command))
+ command = make_port_command(
+ CMD_STREAM_MODIFIER + ' [{},1] {} 0xFFFF0000 INC 1'.format(
+ self._stream_id, offsets[layer][1]), self._xena_port)
+ responses.append(self._manager.driver.ask_verify(command))
+ command = make_port_command(
+ CMD_STREAM_MODIFIER_RANGE + ' [{},1] 0 1 {}'.format(
+ self._stream_id, flows), self._xena_port)
+ responses.append(self._manager.driver.ask_verify(command))
+ return all(responses) # return True if they all worked
+ elif flows < 1:
+ _LOGGER.warning(
+ 'No flows specified in enable multistream. Bypassing...')
+ return False
+ else:
+ raise NotImplementedError(
+ "Non-implemented stream layer in method enable multistream ",
+ "layer=", layer)
+
+ def get_stream_data(self):
+ """
+ Get the response for stream data
+ :return: String of response for stream data info
+ """
+ command = make_stream_command(CMD_GET_STREAM_DATA, '?', self)
+ res = self._manager.driver.ask(command).decode(_LOCALE)
+ return res
+
+ def set_header_protocol(self, protocol_header):
+ """Set the header info for the packet header hex.
+ If the packet header contains just Ethernet and IP info then call this
+ method with ETHERNET IP as the protocol header.
+
+ :param protocol_header: protocol header argument
+ :return: Boolean True if success, False if error
+ """
+ command = make_stream_command(
+ CMD_SET_STREAM_HEADER_PROTOCOL,
+ protocol_header, self)
+ if self._manager.driver.ask_verify(command):
+ self._header_protocol = protocol_header
+ return True
+ else:
+ return False
+
+ def set_off(self):
+ """Set the stream to off
+ :return: Boolean True if success, False if error
+ """
+ return self._manager.driver.ask_verify(make_stream_command(
+ CMD_SET_STREAM_ON_OFF, 'off', self))
+
+ def set_on(self):
+ """Set the stream to on
+ :return: Boolean True if success, False if error
+ """
+ return self._manager.driver.ask_verify(make_stream_command(
+ CMD_SET_STREAM_ON_OFF, 'on', self))
+
+ def set_packet_header(self, header):
+ """Set the stream packet header
+
+ :param header: packet header as hex bytes
+ :return: Boolean True if success, False if error
+ """
+ return self._manager.driver.ask_verify(make_stream_command(
+ CMD_SET_STREAM_PACKET_HEADER, header, self))
+
+ def set_packet_length(self, pattern_type, minimum, maximum):
+ """Set the pattern length with min and max values based on the pattern
+ type supplied
+
+ :param pattern_type: String of pattern type, valid entries [ fixed,
+ butterfly, random, mix, incrementing ]
+ :param minimum: integer of minimum byte value
+ :param maximum: integer of maximum byte value
+ :return: Boolean True if success, False if error
+ """
+ return self._manager.driver.ask_verify(make_stream_command(
+ CMD_SET_STREAM_PACKET_LENGTH, '{} {} {}'.format(
+ pattern_type, minimum, maximum), self))
+
+ def set_packet_limit(self, limit):
+ """Set the packet limit
+
+ :param limit: number of packets that will be sent, use -1 to disable
+ :return: Boolean True if success, False if error
+ """
+ return self._manager.driver.ask_verify(make_stream_command(
+ CMD_SET_STREAM_PACKET_LIMIT, limit, self))
+
+ def set_packet_payload(self, payload_type, hex_value):
+ """Set the payload to the hex value based on the payload type
+
+ :param payload_type: string of the payload type, valid entries [ pattern,
+ incrementing, prbs ]
+ :param hex_value: hex string of valid hex
+ :return: Boolean True if success, False if error
+ """
+ return self._manager.driver.ask_verify(make_stream_command(
+ CMD_SET_STREAM_PACKET_PAYLOAD, '{} {}'.format(
+ payload_type, hex_value), self))
+
+ def set_rate_fraction(self, fraction):
+ """Set the rate fraction
+
+ :param fraction: fraction for the stream
+ :return: Boolean True if success, False if error
+ """
+ return self._manager.driver.ask_verify(make_stream_command(
+ CMD_SET_STREAM_RATE_FRACTION, fraction, self))
+
+ def set_payload_id(self, identifier):
+ """ Set the test payload ID
+ :param identifier: ID as int or string
+ :return: Boolean True if success, False if error
+ """
+ return self._manager.driver.ask_verify(make_stream_command(
+ CMD_SET_STREAM_TEST_PAYLOAD_ID, identifier, self))
+
+
+class XenaRXStats(object):
+ """
+ Receive stat class
+ """
+ def __init__(self, stats, epoc):
+ """ Constructor
+ :param stats: Stats from pr all command as list
+ :param epoc: Current time in epoc
+ :return: XenaRXStats object
+ """
+ self._stats = stats
+ self._time = epoc
+ self.data = self.parse_stats()
+ self.preamble = 8
+
+ @staticmethod
+ def _pack_stats(param, start, fields=None):
+ """ Pack up the list of stats in a dictionary
+ :param param: The list of params to process
+ :param start: What element to start at
+ :param fields: The field names to pack as keys
+ :return: Dictionary of data where fields match up to the params
+ """
+ if not fields:
+ fields = ['bps', 'pps', 'bytes', 'packets']
+ data = {}
+ i = 0
+ for column in fields:
+ data[column] = int(param[start + i])
+ i += 1
+
+ return data
+
+ @staticmethod
+ def _pack_tplds_stats(param, start):
+ """ Pack up the tplds stats
+ :param param: List of params to pack
+ :param start: What element to start at
+ :return: Dictionary of stats
+ """
+ data = {}
+ i = 0
+ for val in range(start, len(param) - start):
+ data[i] = int(param[val])
+ i += 1
+ return data
+
+ def _pack_rxextra_stats(self, param, start):
+ """ Pack up the extra stats
+ :param param: List of params to pack
+ :param start: What element to start at
+ :return: Dictionary of stats
+ """
+ fields = ['fcserrors', 'pauseframes', 'arprequests', 'arpreplies',
+ 'pingrequests', 'pingreplies', 'gapcount', 'gapduration']
+ return self._pack_stats(param, start, fields)
+
+ def _pack_tplderrors_stats(self, param, start):
+ """ Pack up tlpd errors
+ :param param: List of params to pack
+ :param start: What element to start at
+ :return: Dictionary of stats
+ """
+ fields = ['dummy', 'seq', 'mis', 'pld']
+ return self._pack_stats(param, start, fields)
+
+ def _pack_tpldlatency_stats(self, param, start):
+ """ Pack up the tpld latency stats
+ :param param: List of params to pack
+ :param start: What element to start at
+ :return: Dictionary of stats
+ """
+ fields = ['min', 'avg', 'max', '1sec']
+ return self._pack_stats(param, start, fields)
+
+ def _pack_tpldjitter_stats(self, param, start):
+ """ Pack up the tpld jitter stats
+ :param param: List of params to pack
+ :param start: What element to start at
+ :return: Dictionary of stats
+ """
+ fields = ['min', 'avg', 'max', '1sec']
+ return self._pack_stats(param, start, fields)
+
+ @property
+ def time(self):
+ """
+ :return: Time as String of epoc of when stats were collected
+ """
+ return self._time
+
+ def parse_stats(self):
+ """ Parse the stats from pr all command
+ :return: Dictionary of all stats
+ """
+ statdict = {}
+ for line in self._stats:
+ param = line.split()
+ if param[1] == 'PR_TOTAL':
+ statdict['pr_total'] = self._pack_stats(param, 2)
+ elif param[1] == 'PR_NOTPLD':
+ statdict['pr_notpld'] = self._pack_stats(param, 2,)
+ elif param[1] == 'PR_EXTRA':
+ statdict['pr_extra'] = self._pack_rxextra_stats(param, 2)
+ elif param[1] == 'PT_STREAM':
+ entry_id = "pt_stream_%s" % param[2].strip('[]')
+ statdict[entry_id] = self._pack_stats(param, 3)
+ elif param[1] == 'PR_TPLDS':
+ tid_list = self._pack_tplds_stats(param, 2)
+ if len(tid_list):
+ statdict['pr_tplds'] = tid_list
+ elif param[1] == 'PR_TPLDTRAFFIC':
+ if 'pr_tpldstraffic' in statdict:
+ data = statdict['pr_tpldstraffic']
+ else:
+ data = {}
+ entry_id = param[2].strip('[]')
+ data[entry_id] = self._pack_stats(param, 3)
+ statdict['pr_tpldstraffic'] = data
+ elif param[1] == 'PR_TPLDERRORS':
+ if 'pr_tplderrors' in statdict:
+ data = statdict['pr_tplderrors']
+ else:
+ data = {}
+ entry_id = param[2].strip('[]')
+ data[entry_id] = self._pack_tplderrors_stats(param, 3)
+ statdict['pr_tplderrors'] = data
+ elif param[1] == 'PR_TPLDLATENCY':
+ if 'pr_tpldlatency' in statdict:
+ data = statdict['pr_tpldlatency']
+ else:
+ data = {}
+ entry_id = param[2].strip('[]')
+ data[entry_id] = self._pack_tpldlatency_stats(param, 3)
+ statdict['pr_tpldlatency'] = data
+ elif param[1] == 'PR_TPLDJITTER':
+ if 'pr_tpldjitter' in statdict:
+ data = statdict['pr_tpldjitter']
+ else:
+ data = {}
+ entry_id = param[2].strip('[]')
+ data[entry_id] = self._pack_tpldjitter_stats(param, 3)
+ statdict['pr_pldjitter'] = data
+ elif param[1] == 'PR_FILTER':
+ if 'pr_filter' in statdict:
+ data = statdict['pr_filter']
+ else:
+ data = {}
+ entry_id = param[2].strip('[]')
+ data[entry_id] = self._pack_stats(param, 3)
+ statdict['pr_filter'] = data
+ elif param[1] == 'P_RECEIVESYNC':
+ if param[2] == 'IN_SYNC':
+ statdict['p_receivesync'] = {'IN SYNC': 'True'}
+ else:
+ statdict['p_receivesync'] = {'IN SYNC': 'False'}
+ else:
+ logging.warning("XenaPort: unknown stats: %s", param[1])
+
+ mydict = statdict
+ return mydict
+
+
+class XenaTXStats(object):
+ """
+ Xena transmit stat class
+ """
+ def __init__(self, stats, epoc):
+ """ Constructor
+ :param stats: Stats from pt all command as list
+ :param epoc: Current time in epoc
+ :return: XenaTXStats object
+ """
+ self._stats = stats
+ self._time = epoc
+ self._ptstreamkeys = list()
+ self.data = self.parse_stats()
+ self.preamble = 8
+
+ @staticmethod
+ def _pack_stats(params, start, fields=None):
+ """ Pack up the list of stats in a dictionary
+ :param params: The list of params to process
+ :param start: What element to start at
+ :param fields: The field names to pack as keys
+ :return: Dictionary of data where fields match up to the params
+ """
+ if not fields:
+ fields = ['bps', 'pps', 'bytes', 'packets']
+ data = {}
+ i = 0
+ for column in fields:
+ data[column] = int(params[start + i])
+ i += 1
+
+ return data
+
+ def _pack_txextra_stats(self, params, start):
+ """ Pack up the tx extra stats
+ :param params: List of params to pack
+ :param start: What element to start at
+ :return: Dictionary of stats
+ """
+ fields = ['arprequests', 'arpreplies', 'pingrequests', 'pingreplies',
+ 'injectedfcs', 'injectedseq', 'injectedmis', 'injectedint',
+ 'injectedtid', 'training']
+ return self._pack_stats(params, start, fields)
+
+ @property
+ def pt_stream_keys(self):
+ """
+ :return: Return a list of pt_stream_x stream key ids
+ """
+ return self._ptstreamkeys
+
+ @property
+ def time(self):
+ """
+ :return: Time as String of epoc of when stats were collected
+ """
+ return self._time
+
+ def parse_stats(self):
+ """ Parse the stats from pr all command
+ :return: Dictionary of all stats
+ """
+ statdict = {}
+ for line in self._stats:
+ param = line.split()
+ if param[1] == 'PT_TOTAL':
+ statdict['pt_total'] = self._pack_stats(param, 2)
+ elif param[1] == 'PT_NOTPLD':
+ statdict['pt_notpld'] = self._pack_stats(param, 2,)
+ elif param[1] == 'PT_EXTRA':
+ statdict['pt_extra'] = self._pack_txextra_stats(param, 2)
+ elif param[1] == 'PT_STREAM':
+ entry_id = "pt_stream_%s" % param[2].strip('[]')
+ self._ptstreamkeys.append(entry_id)
+ statdict[entry_id] = self._pack_stats(param, 3)
+ else:
+ logging.warning("XenaPort: unknown stats: %s", param[1])
+ mydict = statdict
+ return mydict
+
+
+def aggregate_stats(stat1, stat2):
+ """
+ Recursive function to aggregate two sets of statistics. This is used when
+ bi directional traffic is done and statistics need to be calculated based
+ on two sets of statistics.
+ :param stat1: One set of dictionary stats from RX or TX stats
+ :param stat2: Second set of dictionary stats from RX or TX stats
+ :return: stats for data entry in RX or TX Stats instance
+ """
+ newstat = dict()
+ for (keys1, keys2) in zip(stat1.keys(), stat2.keys()):
+ if isinstance(stat1[keys1], dict):
+ newstat[keys1] = aggregate_stats(stat1[keys1], stat2[keys2])
+ else:
+ if not isinstance(stat1[keys1], int) and not isinstance(
+ [keys1], float):
+ # its some value we don't need to aggregate
+ return stat1[keys1]
+ # for latency stats do the appropriate calculation
+ if keys1 == 'max':
+ newstat[keys1] = max(stat1[keys1], stat2[keys2])
+ elif keys1 == 'min':
+ newstat[keys1] = min(stat1[keys1], stat2[keys2])
+ elif keys1 == 'avg':
+ newstat[keys1] = (stat1[keys1] + stat2[keys2]) / 2
+ else:
+ newstat[keys1] = (stat1[keys1] + stat2[keys2])
+ return newstat
+
+
+def line_percentage(port, stats, time_active, packet_size):
+ """
+ Calculate the line percentage rate from the duration, port object and stat
+ object.
+ :param port: XenaPort object
+ :param stats: Xena RXStat or TXStat object
+ :param time_active: time the stream was active in secs as int
+ :param packet_size: packet size as int
+ :return: line percentage as float
+ """
+ # this is ugly, but its prettier than calling the get method 3 times...
+ try:
+ packets = stats.data['pr_total']['packets']
+ except KeyError:
+ try:
+ packets = stats.data['pt_total']['packets']
+ except KeyError:
+ _LOGGER.error(
+ 'Could not calculate line rate because packet stat not found.')
+ return 0
+ ifg = port.get_inter_frame_gap()
+ pps = packets_per_second(packets, time_active)
+ l2br = l2_bit_rate(packet_size, stats.preamble, pps)
+ l1br = l1_bit_rate(l2br, pps, ifg, stats.preamble)
+ return 100.0 * l1br / port.get_effective_speed()
+
+
+def l2_bit_rate(packet_size, preamble, pps):
+ """
+ Return the l2 bit rate
+ :param packet_size: packet size on the line in bytes
+ :param preamble: preamble size of the packet header in bytes
+ :param pps: packets per second
+ :return: l2 bit rate as float
+ """
+ return (packet_size * preamble) * pps
+
+
+def l1_bit_rate(l2br, pps, ifg, preamble):
+ """
+ Return the l1 bit rate
+ :param l2br: l2 bit rate int bits per second
+ :param pps: packets per second
+ :param ifg: the inter frame gap
+ :param preamble: preamble size of the packet header in bytes
+ :return: l1 bit rate as float
+ """
+ return l2br + (pps * ifg * preamble)
+
+
+def make_manager_command(cmd, argument=None):
+ """ String builder for Xena socket commands
+
+ :param cmd: Command to send
+ :param argument: Arguments for command to send
+ :return: String of command
+ """
+ command = '{} "{}"'.format(cmd, argument) if argument else cmd
+ _LOGGER.info("[Command Sent] : %s", command)
+ return command
+
+
+def make_port_command(cmd, xena_port):
+ """ String builder for Xena port commands
+
+ :param cmd: Command to send
+ :param xena_port: XenaPort object
+ :return: String of command
+ """
+ command = "{} {}".format(xena_port.port_string(), cmd)
+ _LOGGER.info("[Command Sent] : %s", command)
+ return command
+
+
+def make_stream_command(cmd, args, xena_stream):
+ """ String builder for Xena port commands
+
+ :param cmd: Command to send
+ :param xena_stream: XenaStream object
+ :return: String of command
+ """
+ command = "{} {} [{}] {}".format(xena_stream.xena_port.port_string(), cmd,
+ xena_stream.stream_id, args)
+ _LOGGER.info("[Command Sent] : %s", command)
+ return command
+
+
+def packets_per_second(packets, time_in_sec):
+ """
+ Return the pps as float
+ :param packets: total packets
+ :param time_in_sec: time in seconds
+ :return: float of pps
+ """
+ return packets / time_in_sec
diff --git a/tools/pkt_gen/xena/__init__.py b/tools/pkt_gen/xena/__init__.py
new file mode 100644
index 00000000..8081be42
--- /dev/null
+++ b/tools/pkt_gen/xena/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2015-2016 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/tools/pkt_gen/xena/profiles/baseconfig.x2544 b/tools/pkt_gen/xena/profiles/baseconfig.x2544
new file mode 100644
index 00000000..0612b329
--- /dev/null
+++ b/tools/pkt_gen/xena/profiles/baseconfig.x2544
@@ -0,0 +1,373 @@
+{
+ "copyright": [
+ "# Copyright 2015-2016 Xena Networks.",
+ "#",
+ "# Licensed under the Apache License, Version 2.0 (the 'License');",
+ "# you may not use this file except in compliance with the License.",
+ "# You may obtain a copy of the License at\n",
+ "#",
+ "# http://www.apache.org/licenses/LICENSE-2.0",
+ "#",
+ "# Unless required by applicable law or agreed to in writing, software",
+ "# distributed under the License is distributed on an 'AS IS' BASIS,",
+ "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
+ "# See the License for the specific language governing permissions and\n",
+ "# limitations under the License."
+ ],
+ "PortHandler": {
+ "EntityList": [
+ {
+ "PortRef": {
+ "ChassisId": "4605b3c9-70cc-42d9-9d8c-16c34989a4c1",
+ "ModuleIndex": 3,
+ "PortIndex": 0
+ },
+ "PortGroup": "UNDEFINED",
+ "PairPeerRef": null,
+ "PairPeerId": "",
+ "MulticastRole": "Undefined",
+ "PortSpeed": "AUTO",
+ "InterFrameGap": 20,
+ "PauseModeOn": false,
+ "AutoNegEnabled": true,
+ "AdjustPpm": 0,
+ "LatencyOffset": 0,
+ "MdiMdixMode": "AUTO",
+ "EnableFec": true,
+ "ReplyArpRequests": true,
+ "ReplyPingRequests": true,
+ "IpV4Address": "192.168.199.10",
+ "IpV4RoutingPrefix": 24,
+ "IpV4Gateway": "192.168.199.1",
+ "IpV6Address": "::",
+ "IpV6RoutingPrefix": 64,
+ "IpV6Gateway": "::",
+ "IpGatewayMacAddress": "AAAAAAAA",
+ "PublicIpAddress": "",
+ "PublicIpRoutingPrefix": 24,
+ "PublicIpAddressV6": "",
+ "PublicIpRoutingPrefixV6": 64,
+ "RemoteLoopIpAddress": "",
+ "RemoteLoopIpAddressV6": "",
+ "RemoteLoopMacAddress": "AAAAAAAA",
+ "EnablePortRateCap": false,
+ "PortRateCapValue": 1000.0,
+ "PortRateCapProfile": "Physical Port Rate",
+ "PortRateCapUnit": "Mbps",
+ "MultiStreamMap": null,
+ "ItemID": "4faf0f0c-2fc6-44a7-87ea-5f47b02d4c1a",
+ "ParentID": "",
+ "Label": ""
+ },
+ {
+ "PortRef": {
+ "ChassisId": "4605b3c9-70cc-42d9-9d8c-16c34989a4c1",
+ "ModuleIndex": 3,
+ "PortIndex": 1
+ },
+ "PortGroup": "UNDEFINED",
+ "PairPeerRef": null,
+ "PairPeerId": "",
+ "MulticastRole": "Undefined",
+ "PortSpeed": "AUTO",
+ "InterFrameGap": 20,
+ "PauseModeOn": false,
+ "AutoNegEnabled": true,
+ "AdjustPpm": 0,
+ "LatencyOffset": 0,
+ "MdiMdixMode": "AUTO",
+ "EnableFec": true,
+ "ReplyArpRequests": true,
+ "ReplyPingRequests": true,
+ "IpV4Address": "192.168.199.11",
+ "IpV4RoutingPrefix": 24,
+ "IpV4Gateway": "192.168.199.1",
+ "IpV6Address": "::",
+ "IpV6RoutingPrefix": 64,
+ "IpV6Gateway": "::",
+ "IpGatewayMacAddress": "AAAAAAAA",
+ "PublicIpAddress": "",
+ "PublicIpRoutingPrefix": 24,
+ "PublicIpAddressV6": "",
+ "PublicIpRoutingPrefixV6": 64,
+ "RemoteLoopIpAddress": "",
+ "RemoteLoopIpAddressV6": "",
+ "RemoteLoopMacAddress": "AAAAAAAA",
+ "EnablePortRateCap": false,
+ "PortRateCapValue": 1000.0,
+ "PortRateCapProfile": "Physical Port Rate",
+ "PortRateCapUnit": "Mbps",
+ "MultiStreamMap": null,
+ "ItemID": "1b88dc59-1b1a-43f5-a314-673219f47545",
+ "ParentID": "",
+ "Label": ""
+ }
+ ]
+ },
+ "StreamHandler": {
+ "StreamConnectionList": [
+ {
+ "ConnectionId": 0,
+ "Port1Id": "4faf0f0c-2fc6-44a7-87ea-5f47b02d4c1a",
+ "Port2Id": "1b88dc59-1b1a-43f5-a314-673219f47545",
+ "AddressOffset1": 2,
+ "AddressOffset2": 3,
+ "ItemID": "244b9295-9a5a-4405-8404-a62074152783",
+ "ParentID": "",
+ "Label": ""
+ }
+ ]
+ },
+ "StreamProfileHandler": {
+ "ProfileAssignmentMap": {
+ "guid_1b88dc59-1b1a-43f5-a314-673219f47545": "033f23c9-3986-40c9-b7e4-9ac1176f3c0b",
+ "guid_4faf0f0c-2fc6-44a7-87ea-5f47b02d4c1a": "106a3aa6-ea43-4dd7-84b5-51424a52ac87"
+ },
+ "EntityList": [
+ {
+ "StreamConfig": {
+ "SwModifier": null,
+ "HwModifiers": [],
+ "FieldValueRanges": [],
+ "StreamDescrPrefix": "Stream",
+ "ResourceIndex": -1,
+ "TpldId": -1,
+ "EnableState": "OFF",
+ "RateType": "Fraction",
+ "PacketLimit": 0,
+ "RateFraction": 100.0,
+ "RatePps": 0.0,
+ "RateL2Mbps": 0.0,
+ "UseBurstValues": false,
+ "BurstSize": 0,
+ "BurstDensity": 100,
+ "HeaderSegments": [],
+ "PacketLengthType": "FIXED",
+ "PacketMinSize": 64,
+ "PacketMaxSize": 64,
+ "PayloadDefinition": {
+ "PayloadType": "Incrementing",
+ "PayloadPattern": "0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0"
+ },
+ "ResourceUsed": false,
+ "ChildResourceUsed": false
+ },
+ "ItemID": "106a3aa6-ea43-4dd7-84b5-51424a52ac87",
+ "ParentID": "",
+ "Label": ""
+ },
+ {
+ "StreamConfig": {
+ "SwModifier": null,
+ "HwModifiers": [],
+ "FieldValueRanges": [],
+ "StreamDescrPrefix": "Stream",
+ "ResourceIndex": -1,
+ "TpldId": -1,
+ "EnableState": "OFF",
+ "RateType": "Fraction",
+ "PacketLimit": 0,
+ "RateFraction": 100.0,
+ "RatePps": 0.0,
+ "RateL2Mbps": 0.0,
+ "UseBurstValues": false,
+ "BurstSize": 0,
+ "BurstDensity": 100,
+ "HeaderSegments": [],
+ "PacketLengthType": "FIXED",
+ "PacketMinSize": 64,
+ "PacketMaxSize": 64,
+ "PayloadDefinition": {
+ "PayloadType": "Incrementing",
+ "PayloadPattern": "0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0"
+ },
+ "ResourceUsed": false,
+ "ChildResourceUsed": false
+ },
+ "ItemID": "033f23c9-3986-40c9-b7e4-9ac1176f3c0b",
+ "ParentID": "",
+ "Label": ""
+ }
+ ]
+ },
+ "TestOptions": {
+ "TestTypeOptionMap": {
+ "Throughput": {
+ "$type": "XenaCommon.TestConfig.Xena2544.TestTypeOptions.ThroughputTestOptions, Xena2544",
+ "RateIterationOptions": {
+ "SearchType": "BinarySearch",
+ "AcceptableLoss": 0.0,
+ "ResultScope": "CommonResult",
+ "FastBinarySearch": false,
+ "InitialValue": 10.0,
+ "MinimumValue": 0.1,
+ "MaximumValue": 100.0,
+ "ValueResolution": 0.5,
+ "UsePassThreshold": false,
+ "PassThreshold": 0.0
+ },
+ "ReportPropertyOptions": [
+ "LatencyCounters"
+ ],
+ "TestType": "Throughput",
+ "Enabled": false,
+ "DurationType": "Seconds",
+ "Duration": 1.0,
+ "DurationFrames": 1,
+ "DurationFrameUnit": "Mframes",
+ "Iterations": 3,
+ "ItemID": "5ba8b4d4-9a52-4697-860a-4af1b97d2a5c",
+ "ParentID": "",
+ "Label": ""
+ },
+ "Latency": {
+ "$type": "XenaCommon.TestConfig.Xena2544.TestTypeOptions.LatencyTestOptions, Xena2544",
+ "RateSweepOptions": {
+ "StartValue": 50.0,
+ "EndValue": 100.0,
+ "StepValue": 50.0
+ },
+ "LatencyMode": "Last_To_Last",
+ "RateRelativeTputMaxRate": true,
+ "TestType": "Latency",
+ "Enabled": false,
+ "DurationType": "Seconds",
+ "Duration": 1.0,
+ "DurationFrames": 1,
+ "DurationFrameUnit": "Mframes",
+ "Iterations": 1,
+ "ItemID": "c63c0362-96a6-434b-9c67-6be518492a49",
+ "ParentID": "",
+ "Label": ""
+ },
+ "Loss": {
+ "$type": "XenaCommon.TestConfig.Xena2544.TestTypeOptions.LossTestOptions, Xena2544",
+ "RateSweepOptions": {
+ "StartValue": 50.0,
+ "EndValue": 100.0,
+ "StepValue": 50.0
+ },
+ "UsePassFailCriteria": false,
+ "AcceptableLoss": 0.0,
+ "AcceptableLossType": "Percent",
+ "TestType": "Loss",
+ "Enabled": false,
+ "DurationType": "Seconds",
+ "Duration": 1.0,
+ "DurationFrames": 1,
+ "DurationFrameUnit": "Mframes",
+ "Iterations": 1,
+ "ItemID": "f5cf336e-c983-4c48-a8cb-88447b3e2adb",
+ "ParentID": "",
+ "Label": ""
+ },
+ "Back2Back": {
+ "$type": "XenaCommon.TestConfig.Xena2544.TestTypeOptions.Back2BackTestOptions, Xena2544",
+ "RateSweepOptions": {
+ "StartValue": 100.0,
+ "EndValue": 100.0,
+ "StepValue": 50.0
+ },
+ "ResultScope": "CommonResult",
+ "BurstResolution": 100.0,
+ "TestType": "Back2Back",
+ "Enabled": false,
+ "DurationType": "Seconds",
+ "Duration": 1.0,
+ "DurationFrames": 1,
+ "DurationFrameUnit": "Mframes",
+ "Iterations": 1,
+ "ItemID": "2c494ee2-16f1-4a40-b28b-aff6ad7464e3",
+ "ParentID": "",
+ "Label": ""
+ }
+ },
+ "PacketSizes": {
+ "PacketSizeType": "CustomSizes",
+ "CustomPacketSizes": [
+ 512.0
+ ],
+ "SwPacketStartSize": 100,
+ "SwPacketEndSize": 1500,
+ "SwPacketStepSize": 100,
+ "HwPacketMinSize": 64,
+ "HwPacketMaxSize": 1500,
+ "MixedSizesWeights": []
+ },
+ "TopologyConfig": {
+ "Topology": "MESH",
+ "Direction": "BIDIR"
+ },
+ "FlowCreationOptions": {
+ "FlowCreationType": "StreamBased",
+ "MacBaseAddress": "4,244,188",
+ "UseGatewayMacAsDmac": true,
+ "EnableMultiStream": false,
+ "PerPortStreamCount": 1,
+ "MultiStreamAddressOffset": 2,
+ "MultiStreamAddressIncrement": 1,
+ "MultiStreamMacBaseAddress": "4,244,188",
+ "UseMicroTpldOnDemand": false
+ },
+ "LearningOptions": {
+ "MacLearningMode": "EveryTrial",
+ "MacLearningRetries": 1,
+ "ArpRefreshEnabled": true,
+ "ArpRefreshPeriod": 4000.0,
+ "UseFlowBasedLearningPreamble": false,
+ "FlowBasedLearningFrameCount": 1,
+ "FlowBasedLearningDelay": 500,
+ "LearningRatePercent": 1.0,
+ "LearningDuration": 5000.0
+ },
+ "ToggleSyncState": true,
+ "SyncOffDuration": 1,
+ "SyncOnDuration": 1,
+ "PayloadDefinition": {
+ "PayloadType": "Incrementing",
+ "PayloadPattern": "0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0"
+ },
+ "EnableSpeedReductSweep": false,
+ "UsePortSyncStart": false,
+ "PortStaggerSteps": 0,
+ "ShouldStopOnLos": true,
+ "PortResetDelay": 5
+ },
+ "CreationDate": "2016-02-24 13:33:50Z",
+ "ChassisManager": {
+ "ChassisList": [
+ {
+ "ChassisID": "4605b3c9-70cc-42d9-9d8c-16c34989a4c1",
+ "HostName": "10.19.15.19",
+ "PortNumber": 22606,
+ "Password": "xena",
+ "ConnectionType": "Native",
+ "UsedModuleList": [],
+ "ResourceIndex": 0,
+ "ResourceUsed": false,
+ "ChildResourceUsed": false
+ }
+ ]
+ },
+ "ReportConfig": {
+ "CustomerName": "Xena Networks",
+ "CustomerServiceID": "",
+ "CustomerAccessID": "",
+ "Comments": "",
+ "RateUnitTerminology": "FPS",
+ "IncludeTestPairInfo": true,
+ "IncludePerStreamInfo": false,
+ "IncludeGraphs": true,
+ "PlotThroughputUnit": "Pps",
+ "GeneratePdf": false,
+ "GenerateHtml": false,
+ "GenerateXml": true,
+ "GenerateCsv": false,
+ "SaveIntermediateResults": false,
+ "ReportFilename": "xena2544-report",
+ "AppendTimestamp": false
+ },
+ "TidAllocationScope": "ConfigScope",
+ "FormatVersion": 10,
+ "ApplicationVersion": "2.39.5876.25884"
+} \ No newline at end of file
diff --git a/tools/pkt_gen/xena/xena.py b/tools/pkt_gen/xena/xena.py
new file mode 100755
index 00000000..7dd4b90b
--- /dev/null
+++ b/tools/pkt_gen/xena/xena.py
@@ -0,0 +1,660 @@
+# Copyright 2016 Red Hat Inc & Xena Networks.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Contributors:
+# Rick Alongi, Red Hat Inc.
+# Amit Supugade, Red Hat Inc.
+# Dan Amzulescu, Xena Networks
+# Christian Trautman, Red Hat Inc.
+
+"""
+Xena Traffic Generator Model
+"""
+
+# python imports
+import binascii
+import logging
+import subprocess
+import sys
+from time import sleep
+import xml.etree.ElementTree as ET
+from collections import OrderedDict
+# scapy imports
+import scapy.layers.inet as inet
+
+# VSPerf imports
+from conf import settings
+from core.results.results_constants import ResultsConstants
+from tools.pkt_gen.trafficgen.trafficgenhelper import (
+ TRAFFIC_DEFAULTS,
+ merge_spec)
+from tools.pkt_gen.trafficgen.trafficgen import ITrafficGenerator
+
+# Xena module imports
+from tools.pkt_gen.xena.xena_json import XenaJSON
+from tools.pkt_gen.xena.XenaDriver import (
+ aggregate_stats,
+ line_percentage,
+ XenaSocketDriver,
+ XenaManager,
+ )
+
+class Xena(ITrafficGenerator):
+ """
+ Xena Traffic generator wrapper class
+ """
+ _traffic_defaults = TRAFFIC_DEFAULTS.copy()
+ _logger = logging.getLogger(__name__)
+
+ def __init__(self):
+ self.mono_pipe = None
+ self.xmanager = None
+ self._params = {}
+ self._xsocket = None
+ self._duration = None
+ self.tx_stats = None
+ self.rx_stats = None
+
+ @property
+ def traffic_defaults(self):
+ """Default traffic values.
+
+ These can be expected to be constant across traffic generators,
+ so no setter is provided. Changes to the structure or contents
+ will likely break traffic generator implementations or tests
+ respectively.
+ """
+ return self._traffic_defaults
+
+ @staticmethod
+ def _create_throughput_result(root):
+ """
+ Create the results based off the output xml file from the Xena2544.exe
+ execution
+ :param root: root dictionary from xml import
+ :return: Results Ordered dictionary based off ResultsConstants
+ """
+ # get the test type from the report file
+ test_type = root[0][1].get('TestType')
+ # set the version from the report file
+ settings.setValue('XENA_VERSION', root[0][0][1].get('GeneratedBy'))
+
+ if test_type == 'Throughput':
+ results = OrderedDict()
+ results[ResultsConstants.THROUGHPUT_RX_FPS] = float(
+ root[0][1][0][0].get('PortRxPps')) + float(
+ root[0][1][0][1].get('PortRxPps'))
+ results[ResultsConstants.THROUGHPUT_RX_MBPS] = (float(
+ root[0][1][0][0].get('PortRxBpsL1')) + float(
+ root[0][1][0][1].get('PortRxBpsL1')))/ 1000000
+ results[ResultsConstants.THROUGHPUT_RX_PERCENT] = (
+ 100 - int(root[0][1][0].get('TotalLossRatioPcnt'))) * float(
+ root[0][1][0].get('TotalTxRatePcnt'))/100
+ results[ResultsConstants.TX_RATE_FPS] = root[0][1][0].get(
+ 'TotalTxRateFps')
+ results[ResultsConstants.TX_RATE_MBPS] = float(
+ root[0][1][0].get('TotalTxRateBpsL1')) / 1000000
+ results[ResultsConstants.TX_RATE_PERCENT] = root[0][1][0].get(
+ 'TotalTxRatePcnt')
+ try:
+ results[ResultsConstants.MIN_LATENCY_NS] = float(
+ root[0][1][0][0].get('MinLatency')) * 1000
+ except ValueError:
+ # Stats for latency returned as N/A so just post them
+ results[ResultsConstants.MIN_LATENCY_NS] = root[0][1][0][0].get(
+ 'MinLatency')
+ try:
+ results[ResultsConstants.MAX_LATENCY_NS] = float(
+ root[0][1][0][0].get('MaxLatency')) * 1000
+ except ValueError:
+ # Stats for latency returned as N/A so just post them
+ results[ResultsConstants.MAX_LATENCY_NS] = root[0][1][0][0].get(
+ 'MaxLatency')
+ try:
+ results[ResultsConstants.AVG_LATENCY_NS] = float(
+ root[0][1][0][0].get('AvgLatency')) * 1000
+ except ValueError:
+ # Stats for latency returned as N/A so just post them
+ results[ResultsConstants.AVG_LATENCY_NS] = root[0][1][0][0].get(
+ 'AvgLatency')
+ elif test_type == 'Back2Back':
+ results = OrderedDict()
+
+ # Just mimic what Ixia does and only return the b2b frame count.
+ # This may change later once its decided the common results stats
+ # to be returned should be.
+ results[ResultsConstants.B2B_FRAMES] = root[0][1][0][0].get(
+ 'TotalTxBurstFrames')
+ else:
+ raise NotImplementedError('Unknown test type in report file.')
+
+ return results
+
+ def _build_packet_header(self, reverse=False):
+ """
+ Build a packet header based on traffic profile using scapy external
+ libraries.
+ :param reverse: Swap source and destination info when building header
+ :return: packet header in hex
+ """
+ srcmac = self._params['traffic']['l2'][
+ 'srcmac'] if not reverse else self._params['traffic']['l2'][
+ 'dstmac']
+ dstmac = self._params['traffic']['l2'][
+ 'dstmac'] if not reverse else self._params['traffic']['l2'][
+ 'srcmac']
+ srcip = self._params['traffic']['l3'][
+ 'srcip'] if not reverse else self._params['traffic']['l3']['dstip']
+ dstip = self._params['traffic']['l3'][
+ 'dstip'] if not reverse else self._params['traffic']['l3']['srcip']
+ layer2 = inet.Ether(src=srcmac, dst=dstmac)
+ layer3 = inet.IP(src=srcip, dst=dstip,
+ proto=self._params['traffic']['l3']['proto'])
+ layer4 = inet.UDP(sport=self._params['traffic']['l4']['srcport'],
+ dport=self._params['traffic']['l4']['dstport'])
+ if self._params['traffic']['vlan']['enabled']:
+ vlan = inet.Dot1Q(vlan=self._params['traffic']['vlan']['id'],
+ prio=self._params['traffic']['vlan']['priority'],
+ id=self._params['traffic']['vlan']['cfi'])
+ else:
+ vlan = None
+ packet = layer2/vlan/layer3/layer4 if vlan else layer2/layer3/layer4
+ packet_bytes = bytes(packet)
+ packet_hex = '0x' + binascii.hexlify(packet_bytes).decode('utf-8')
+ return packet_hex
+
+ def _create_api_result(self):
+ """
+ Create result dictionary per trafficgen specifications from socket API
+ stats. If stats are not available return values of 0.
+ :return: ResultsConstants as dictionary
+ """
+ # Handle each case of statistics based on if the data is available.
+ # This prevents uncaught exceptions when the stats aren't available.
+ result_dict = OrderedDict()
+ if self.tx_stats.data.get(self.tx_stats.pt_stream_keys[0]):
+ result_dict[ResultsConstants.TX_FRAMES] = self.tx_stats.data[
+ self.tx_stats.pt_stream_keys[0]]['packets']
+ result_dict[ResultsConstants.TX_RATE_FPS] = self.tx_stats.data[
+ self.tx_stats.pt_stream_keys[0]]['pps']
+ result_dict[ResultsConstants.TX_RATE_MBPS] = self.tx_stats.data[
+ self.tx_stats.pt_stream_keys[0]]['bps'] / 1000000
+ result_dict[ResultsConstants.TX_BYTES] = self.tx_stats.data[
+ self.tx_stats.pt_stream_keys[0]]['bytes']
+ # tx rate percent may need to be halved if bi directional
+ result_dict[ResultsConstants.TX_RATE_PERCENT] = line_percentage(
+ self.xmanager.ports[0], self.tx_stats, self._duration,
+ self._params['traffic']['l2']['framesize']) if \
+ self._params['traffic']['bidir'] == 'False' else\
+ line_percentage(
+ self.xmanager.ports[0], self.tx_stats, self._duration,
+ self._params['traffic']['l2']['framesize']) / 2
+ else:
+ self._logger.error('Transmit stats not available.')
+ result_dict[ResultsConstants.TX_FRAMES] = 0
+ result_dict[ResultsConstants.TX_RATE_FPS] = 0
+ result_dict[ResultsConstants.TX_RATE_MBPS] = 0
+ result_dict[ResultsConstants.TX_BYTES] = 0
+ result_dict[ResultsConstants.TX_RATE_PERCENT] = 0
+
+ if self.rx_stats.data.get('pr_tpldstraffic'):
+ result_dict[ResultsConstants.RX_FRAMES] = self.rx_stats.data[
+ 'pr_tpldstraffic']['0']['packets']
+ result_dict[
+ ResultsConstants.THROUGHPUT_RX_FPS] = self.rx_stats.data[
+ 'pr_tpldstraffic']['0']['pps']
+ result_dict[
+ ResultsConstants.THROUGHPUT_RX_MBPS] = self.rx_stats.data[
+ 'pr_tpldstraffic']['0']['bps'] / 1000000
+ result_dict[ResultsConstants.RX_BYTES] = self.rx_stats.data[
+ 'pr_tpldstraffic']['0']['bytes']
+ # throughput percent may need to be halved if bi directional
+ result_dict[
+ ResultsConstants.THROUGHPUT_RX_PERCENT] = line_percentage(
+ self.xmanager.ports[1], self.rx_stats, self._duration,
+ self._params['traffic']['l2']['framesize']) if \
+ self._params['traffic']['bidir'] == 'False' else \
+ line_percentage(
+ self.xmanager.ports[1], self.rx_stats, self._duration,
+ self._params['traffic']['l2']['framesize']) / 2
+
+ else:
+ self._logger.error('Receive stats not available.')
+ result_dict[ResultsConstants.RX_FRAMES] = 0
+ result_dict[ResultsConstants.THROUGHPUT_RX_FPS] = 0
+ result_dict[ResultsConstants.THROUGHPUT_RX_MBPS] = 0
+ result_dict[ResultsConstants.RX_BYTES] = 0
+ result_dict[ResultsConstants.THROUGHPUT_RX_PERCENT] = 0
+
+ if self.rx_stats.data.get('pr_tplderrors'):
+ result_dict[ResultsConstants.PAYLOAD_ERR] = self.rx_stats.data[
+ 'pr_tplderrors']['0']['pld']
+ result_dict[ResultsConstants.SEQ_ERR] = self.rx_stats.data[
+ 'pr_tplderrors']['0']['seq']
+ else:
+ result_dict[ResultsConstants.PAYLOAD_ERR] = 0
+ result_dict[ResultsConstants.SEQ_ERR] = 0
+
+ if self.rx_stats.data.get('pr_tpldlatency'):
+ result_dict[ResultsConstants.MIN_LATENCY_NS] = self.rx_stats.data[
+ 'pr_tpldlatency']['0']['min']
+ result_dict[ResultsConstants.MAX_LATENCY_NS] = self.rx_stats.data[
+ 'pr_tpldlatency']['0']['max']
+ result_dict[ResultsConstants.AVG_LATENCY_NS] = self.rx_stats.data[
+ 'pr_tpldlatency']['0']['avg']
+ else:
+ result_dict[ResultsConstants.MIN_LATENCY_NS] = 0
+ result_dict[ResultsConstants.MAX_LATENCY_NS] = 0
+ result_dict[ResultsConstants.AVG_LATENCY_NS] = 0
+
+ return result_dict
+
+ def _setup_json_config(self, trials, loss_rate, testtype=None):
+ """
+ Create a 2bUsed json file that will be used for xena2544.exe execution.
+ :param trials: Number of trials
+ :param loss_rate: The acceptable loss rate as float
+ :param testtype: Either '2544_b2b' or '2544_throughput' as string
+ :return: None
+ """
+ try:
+ j_file = XenaJSON('./tools/pkt_gen/xena/profiles/baseconfig.x2544')
+ j_file.set_chassis_info(
+ settings.getValue('TRAFFICGEN_XENA_IP'),
+ settings.getValue('TRAFFICGEN_XENA_PASSWORD')
+ )
+ j_file.set_port(0, settings.getValue('TRAFFICGEN_XENA_MODULE1'),
+ settings.getValue('TRAFFICGEN_XENA_PORT1'))
+ j_file.set_port(1, settings.getValue('TRAFFICGEN_XENA_MODULE2'),
+ settings.getValue('TRAFFICGEN_XENA_PORT2'))
+ j_file.set_port_ip_v4(
+ 0, settings.getValue("TRAFFICGEN_XENA_PORT0_IP"),
+ settings.getValue("TRAFFICGEN_XENA_PORT0_CIDR"),
+ settings.getValue("TRAFFICGEN_XENA_PORT0_GATEWAY"))
+ j_file.set_port_ip_v4(
+ 1, settings.getValue("TRAFFICGEN_XENA_PORT1_IP"),
+ settings.getValue("TRAFFICGEN_XENA_PORT1_CIDR"),
+ settings.getValue("TRAFFICGEN_XENA_PORT1_GATEWAY"))
+
+ if testtype == '2544_throughput':
+ j_file.set_test_options_tput(
+ packet_sizes=self._params['traffic']['l2']['framesize'],
+ iterations=trials, loss_rate=loss_rate,
+ duration=self._duration, micro_tpld=True if self._params[
+ 'traffic']['l2']['framesize'] == 64 else False)
+ j_file.enable_throughput_test()
+
+ elif testtype == '2544_b2b':
+ j_file.set_test_options_back2back(
+ packet_sizes=self._params['traffic']['l2']['framesize'],
+ iterations=trials, duration=self._duration,
+ startvalue=self._params['traffic']['frame_rate'],
+ endvalue=self._params['traffic']['frame_rate'],
+ micro_tpld=True if self._params[
+ 'traffic']['l2']['framesize'] == 64 else False)
+ j_file.enable_back2back_test()
+
+ j_file.set_header_layer2(
+ dst_mac=self._params['traffic']['l2']['dstmac'],
+ src_mac=self._params['traffic']['l2']['srcmac'])
+ j_file.set_header_layer3(
+ src_ip=self._params['traffic']['l3']['srcip'],
+ dst_ip=self._params['traffic']['l3']['dstip'],
+ protocol=self._params['traffic']['l3']['proto'])
+ j_file.set_header_layer4_udp(
+ source_port=self._params['traffic']['l4']['srcport'],
+ destination_port=self._params['traffic']['l4']['dstport'])
+ if self._params['traffic']['vlan']['enabled']:
+ j_file.set_header_vlan(
+ vlan_id=self._params['traffic']['vlan']['id'],
+ id=self._params['traffic']['vlan']['cfi'],
+ prio=self._params['traffic']['vlan']['priority'])
+ j_file.add_header_segments(
+ flows=self._params['traffic']['multistream'],
+ multistream_layer=self._params['traffic']['stream_type'])
+ # set duplex mode
+ if self._params['traffic']['bidir'] == "True":
+ j_file.set_topology_mesh()
+ else:
+ j_file.set_topology_blocks()
+
+ j_file.write_config('./tools/pkt_gen/xena/profiles/2bUsed.x2544')
+ except Exception as exc:
+ self._logger.exception("Error during Xena JSON setup: %s", exc)
+ raise
+
+ def _start_traffic_api(self, packet_limit):
+ """
+ Start the Xena traffic using the socket API driver
+ :param packet_limit: packet limit for stream, set to -1 for no limit
+ :return: None
+ """
+ if not self.xmanager:
+ self._xsocket = XenaSocketDriver(
+ settings.getValue('TRAFFICGEN_XENA_IP'))
+ self.xmanager = XenaManager(
+ self._xsocket, settings.getValue('TRAFFICGEN_XENA_USER'),
+ settings.getValue('TRAFFICGEN_XENA_PASSWORD'))
+
+ # for the report file version info ask the chassis directly for its
+ # software versions
+ settings.setValue('XENA_VERSION', 'XENA Socket API - {}'.format(
+ self.xmanager.get_version()))
+
+ if not len(self.xmanager.ports):
+ self.xmanager.ports[0] = self.xmanager.add_module_port(
+ settings.getValue('TRAFFICGEN_XENA_MODULE1'),
+ settings.getValue('TRAFFICGEN_XENA_PORT1'))
+ if not self.xmanager.ports[0].reserve_port():
+ self._logger.error(
+ 'Unable to reserve port 0. Please release Xena Port')
+
+ if len(self.xmanager.ports) < 2:
+ self.xmanager.ports[1] = self.xmanager.add_module_port(
+ settings.getValue('TRAFFICGEN_XENA_MODULE2'),
+ settings.getValue('TRAFFICGEN_XENA_PORT2'))
+ if not self.xmanager.ports[1].reserve_port():
+ self._logger.error(
+ 'Unable to reserve port 1. Please release Xena Port')
+
+ # Clear port configuration for a clean start
+ self.xmanager.ports[0].reset_port()
+ self.xmanager.ports[1].reset_port()
+ self.xmanager.ports[0].clear_stats()
+ self.xmanager.ports[1].clear_stats()
+
+ # set the port IP from the conf file
+ self.xmanager.ports[0].set_port_ip(
+ settings.getValue('TRAFFICGEN_XENA_PORT0_IP'),
+ settings.getValue('TRAFFICGEN_XENA_PORT0_CIDR'),
+ settings.getValue('TRAFFICGEN_XENA_PORT0_GATEWAY'))
+ self.xmanager.ports[1].set_port_ip(
+ settings.getValue('TRAFFICGEN_XENA_PORT1_IP'),
+ settings.getValue('TRAFFICGEN_XENA_PORT1_CIDR'),
+ settings.getValue('TRAFFICGEN_XENA_PORT1_GATEWAY'))
+
+ def setup_stream(stream, port, payload_id, flip_addr=False):
+ """
+ Helper function to configure streams.
+ :param stream: Stream object from XenaDriver module
+ :param port: Port object from XenaDriver module
+ :param payload_id: payload ID as int
+ :param flip_addr: Boolean if the source and destination addresses
+ should be flipped.
+ :return: None
+ """
+ stream.set_on()
+ stream.set_packet_limit(packet_limit)
+
+ stream.set_rate_fraction(
+ 10000 * self._params['traffic']['frame_rate'])
+ stream.set_packet_header(self._build_packet_header(
+ reverse=flip_addr))
+ stream.set_header_protocol(
+ 'ETHERNET VLAN IP UDP' if self._params['traffic']['vlan'][
+ 'enabled'] else 'ETHERNET IP UDP')
+ stream.set_packet_length(
+ 'fixed', self._params['traffic']['l2']['framesize'], 16383)
+ stream.set_packet_payload('incrementing', '0x00')
+ stream.set_payload_id(payload_id)
+ port.set_port_time_limit(self._duration * 1000000)
+
+ if self._params['traffic']['l2']['framesize'] == 64:
+ # set micro tpld
+ port.micro_tpld_enable()
+
+ if self._params['traffic']['multistream']:
+ stream.enable_multistream(
+ flows=self._params['traffic']['multistream'],
+ layer=self._params['traffic']['stream_type'])
+
+ s1_p0 = self.xmanager.ports[0].add_stream()
+ setup_stream(s1_p0, self.xmanager.ports[0], 0)
+
+ if self._params['traffic']['bidir'] == 'True':
+ s1_p1 = self.xmanager.ports[1].add_stream()
+ setup_stream(s1_p1, self.xmanager.ports[1], 1, flip_addr=True)
+
+ if not self.xmanager.ports[0].traffic_on():
+ self._logger.error(
+ "Failure to start port 0. Check settings and retry.")
+ if self._params['traffic']['bidir'] == 'True':
+ if not self.xmanager.ports[1].traffic_on():
+ self._logger.error(
+ "Failure to start port 1. Check settings and retry.")
+ sleep(self._duration)
+ # getting results
+ if self._params['traffic']['bidir'] == 'True':
+ # need to aggregate out both ports stats and assign that data
+ self.rx_stats = self.xmanager.ports[1].get_rx_stats()
+ self.tx_stats = self.xmanager.ports[0].get_tx_stats()
+ self.tx_stats.data = aggregate_stats(
+ self.tx_stats.data,
+ self.xmanager.ports[1].get_tx_stats().data)
+ self.rx_stats.data = aggregate_stats(
+ self.rx_stats.data,
+ self.xmanager.ports[0].get_rx_stats().data)
+ else:
+ # no need to aggregate, just grab the appropriate port stats
+ self.tx_stats = self.xmanager.ports[0].get_tx_stats()
+ self.rx_stats = self.xmanager.ports[1].get_rx_stats()
+ sleep(1)
+
+ def _stop_api_traffic(self):
+ """
+ Stop traffic through the socket API
+ :return: Return results from _create_api_result method
+ """
+ self.xmanager.ports[0].traffic_off()
+ if self._params['traffic']['bidir'] == 'True':
+ self.xmanager.ports[1].traffic_off()
+ sleep(5)
+
+ stat = self._create_api_result()
+ self.disconnect()
+ return stat
+
+ def connect(self):
+ self._logger.debug('Connect')
+ return self
+
+ def disconnect(self):
+ """Disconnect from the traffic generator.
+
+ As with :func:`connect`, this function is optional.
+
+
+ Where implemented, this function should raise an exception on
+ failure.
+
+ :returns: None
+ """
+ self._logger.debug('disconnect')
+ if self.xmanager:
+ self.xmanager.disconnect()
+ self.xmanager = None
+
+ if self._xsocket:
+ self._xsocket.disconnect()
+ self._xsocket = None
+
+ def send_burst_traffic(self, traffic=None, numpkts=100, duration=20):
+ """Send a burst of traffic.
+
+ See ITrafficGenerator for description
+ """
+ self._duration = duration
+
+ self._params.clear()
+ self._params['traffic'] = self.traffic_defaults.copy()
+ if traffic:
+ self._params['traffic'] = merge_spec(self._params['traffic'],
+ traffic)
+
+ self._start_traffic_api(numpkts)
+ return self._stop_api_traffic()
+
+ def send_cont_traffic(self, traffic=None, duration=20):
+ """Send a continuous flow of traffic.
+
+ See ITrafficGenerator for description
+ """
+ self._duration = duration
+
+ self._params.clear()
+ self._params['traffic'] = self.traffic_defaults.copy()
+ if traffic:
+ self._params['traffic'] = merge_spec(self._params['traffic'],
+ traffic)
+
+ self._start_traffic_api(-1)
+ return self._stop_api_traffic()
+
+ def start_cont_traffic(self, traffic=None, duration=20):
+ """Non-blocking version of 'send_cont_traffic'.
+
+ See ITrafficGenerator for description
+ """
+ self._duration = duration
+
+ self._params.clear()
+ self._params['traffic'] = self.traffic_defaults.copy()
+ if traffic:
+ self._params['traffic'] = merge_spec(self._params['traffic'],
+ traffic)
+
+ self._start_traffic_api(-1)
+
+ def stop_cont_traffic(self):
+ """Stop continuous transmission and return results.
+ """
+ return self._stop_api_traffic()
+
+ def send_rfc2544_throughput(self, traffic=None, trials=3, duration=20,
+ lossrate=0.0):
+ """Send traffic per RFC2544 throughput test specifications.
+
+ See ITrafficGenerator for description
+ """
+ self._duration = duration
+
+ self._params.clear()
+ self._params['traffic'] = self.traffic_defaults.copy()
+ if traffic:
+ self._params['traffic'] = merge_spec(self._params['traffic'],
+ traffic)
+
+ self._setup_json_config(trials, lossrate, '2544_throughput')
+
+ args = ["mono", "./tools/pkt_gen/xena/Xena2544.exe", "-c",
+ "./tools/pkt_gen/xena/profiles/2bUsed.x2544", "-e", "-r",
+ "./tools/pkt_gen/xena", "-u",
+ settings.getValue('TRAFFICGEN_XENA_USER')]
+ self.mono_pipe = subprocess.Popen(args, stdout=sys.stdout)
+ self.mono_pipe.communicate()
+ root = ET.parse(r'./tools/pkt_gen/xena/xena2544-report.xml').getroot()
+ return Xena._create_throughput_result(root)
+
+ def start_rfc2544_throughput(self, traffic=None, trials=3, duration=20,
+ lossrate=0.0):
+ """Non-blocking version of 'send_rfc2544_throughput'.
+
+ See ITrafficGenerator for description
+ """
+ self._duration = duration
+ self._params.clear()
+ self._params['traffic'] = self.traffic_defaults.copy()
+ if traffic:
+ self._params['traffic'] = merge_spec(self._params['traffic'],
+ traffic)
+
+ self._setup_json_config(trials, lossrate, '2544_throughput')
+
+ args = ["mono", "./tools/pkt_gen/xena/Xena2544.exe", "-c",
+ "./tools/pkt_gen/xena/profiles/2bUsed.x2544", "-e", "-r",
+ "./tools/pkt_gen/xena", "-u",
+ settings.getValue('TRAFFICGEN_XENA_USER')]
+ self.mono_pipe = subprocess.Popen(args, stdout=sys.stdout)
+
+ def wait_rfc2544_throughput(self):
+ """Wait for and return results of RFC2544 test.
+
+ See ITrafficGenerator for description
+ """
+ self.mono_pipe.communicate()
+ sleep(2)
+ root = ET.parse(r'./tools/pkt_gen/xena/xena2544-report.xml').getroot()
+ return Xena._create_throughput_result(root)
+
+ def send_rfc2544_back2back(self, traffic=None, trials=1, duration=20,
+ lossrate=0.0):
+ """Send traffic per RFC2544 back2back test specifications.
+
+ See ITrafficGenerator for description
+ """
+ self._duration = duration
+
+ self._params.clear()
+ self._params['traffic'] = self.traffic_defaults.copy()
+ if traffic:
+ self._params['traffic'] = merge_spec(self._params['traffic'],
+ traffic)
+
+ self._setup_json_config(trials, lossrate, '2544_b2b')
+
+ args = ["mono", "./tools/pkt_gen/xena/Xena2544.exe", "-c",
+ "./tools/pkt_gen/xena/profiles/2bUsed.x2544", "-e", "-r",
+ "./tools/pkt_gen/xena", "-u",
+ settings.getValue('TRAFFICGEN_XENA_USER')]
+ self.mono_pipe = subprocess.Popen(
+ args, stdout=sys.stdout)
+ self.mono_pipe.communicate()
+ root = ET.parse(r'./tools/pkt_gen/xena/xena2544-report.xml').getroot()
+ return Xena._create_throughput_result(root)
+
+ def start_rfc2544_back2back(self, traffic=None, trials=1, duration=20,
+ lossrate=0.0):
+ """Non-blocking version of 'send_rfc2544_back2back'.
+
+ See ITrafficGenerator for description
+ """
+ self._duration = duration
+
+ self._params.clear()
+ self._params['traffic'] = self.traffic_defaults.copy()
+ if traffic:
+ self._params['traffic'] = merge_spec(self._params['traffic'],
+ traffic)
+
+ self._setup_json_config(trials, lossrate, '2544_b2b')
+
+ args = ["mono", "./tools/pkt_gen/xena/Xena2544.exe", "-c",
+ "./tools/pkt_gen/xena/profiles/2bUsed.x2544", "-e", "-r",
+ "./tools/pkt_gen/xena", "-u",
+ settings.getValue('TRAFFICGEN_XENA_USER')]
+ self.mono_pipe = subprocess.Popen(
+ args, stdout=sys.stdout)
+
+ def wait_rfc2544_back2back(self):
+ """Wait and set results of RFC2544 test.
+ """
+ self.mono_pipe.communicate()
+ sleep(2)
+ root = ET.parse(r'./tools/pkt_gen/xena/xena2544-report.xml').getroot()
+ return Xena._create_throughput_result(root)
+
+
+if __name__ == "__main__":
+ pass
+
diff --git a/tools/pkt_gen/xena/xena_json.py b/tools/pkt_gen/xena/xena_json.py
new file mode 100644
index 00000000..2a15a932
--- /dev/null
+++ b/tools/pkt_gen/xena/xena_json.py
@@ -0,0 +1,625 @@
+# Copyright 2016 Red Hat Inc & Xena Networks.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Contributors:
+# Dan Amzulescu, Xena Networks
+# Christian Trautman, Red Hat Inc.
+#
+# Usage can be seen below in unit test. This implementation is designed for one
+# module two port Xena chassis runs only.
+
+"""
+Xena JSON module
+"""
+
+import base64
+from collections import OrderedDict
+import json
+import locale
+import logging
+import uuid
+
+import scapy.layers.inet as inet
+
+_LOGGER = logging.getLogger(__name__)
+_LOCALE = locale.getlocale()[1]
+
+
+class XenaJSON(object):
+ """
+ Class to modify and read Xena JSON configuration files.
+ """
+ def __init__(self, json_path='./profiles/baseconfig.x2544'):
+ """
+ Constructor
+ :param json_path: path to JSON file to read. Expected files must have
+ two module ports with each port having its own stream config profile.
+ :return: XenaJSON object
+ """
+ self.json_data = read_json_file(json_path)
+
+ self.packet_data = OrderedDict()
+ self.packet_data['layer2'] = None
+ self.packet_data['vlan'] = None
+ self.packet_data['layer3'] = None
+ self.packet_data['layer4'] = None
+
+ def _add_multistream_layer(self, entity, seg_uuid, stop_value, layer):
+ """
+ Add the multi stream layers to the json file based on the layer provided
+ :param entity: Entity to append the segment to in entity list
+ :param seg_uuid: The UUID to attach the multistream layer to
+ :param stop_value: The number of flows to configure
+ :param layer: the layer that the multistream will be attached to
+ :return: None
+ """
+ field_name = {
+ 2: ('Dst MAC addr', 'Src MAC addr'),
+ 3: ('Dest IP Addr', 'Src IP Addr'),
+ 4: ('Dest Port', 'Src Port')
+ }
+ segments = [
+ {
+ "Offset": 0,
+ "Mask": "//8=", # mask of 255/255
+ "Action": "INC",
+ "StartValue": 0,
+ "StopValue": stop_value,
+ "StepValue": 1,
+ "RepeatCount": 1,
+ "SegmentId": seg_uuid,
+ "FieldName": field_name[int(layer)][0]
+ },
+ {
+ "Offset": 0,
+ "Mask": "//8=", # mask of 255/255
+ "Action": "INC",
+ "StartValue": 0,
+ "StopValue": stop_value,
+ "StepValue": 1,
+ "RepeatCount": 1,
+ "SegmentId": seg_uuid,
+ "FieldName": field_name[int(layer)][1]
+ }
+ ]
+
+ self.json_data['StreamProfileHandler']['EntityList'][entity][
+ 'StreamConfig']['HwModifiers'] = (segments)
+
+ def _create_packet_header(self):
+ """
+ Create the scapy packet header based on what has been built in this
+ instance using the set header methods. Return tuple of the two byte
+ arrays, one for each port.
+ :return: Scapy packet headers as bytearrays
+ """
+ if not self.packet_data['layer2']:
+ _LOGGER.warning('Using dummy info for layer 2 in Xena JSON file')
+ self.set_header_layer2()
+ packet1, packet2 = (self.packet_data['layer2'][0],
+ self.packet_data['layer2'][1])
+ for packet_header in list(self.packet_data.copy().values())[1:]:
+ if packet_header:
+ packet1 /= packet_header[0]
+ packet2 /= packet_header[1]
+ ret = (bytes(packet1), bytes(packet2))
+ return ret
+
+ def add_header_segments(self, flows=0, multistream_layer=None):
+ """
+ Build the header segments to write to the JSON file.
+ :param flows: Number of flows to configure for multistream if enabled
+ :param multistream_layer: layer to set multistream flows as string.
+ Acceptable values are L2, L3 or L4
+ :return: None
+ """
+ packet = self._create_packet_header()
+ segment1 = list()
+ segment2 = list()
+ header_pos = 0
+ if self.packet_data['layer2']:
+ # slice out the layer 2 bytes from the packet header byte array
+ layer2 = packet[0][header_pos: len(self.packet_data['layer2'][0])]
+ seg = create_segment(
+ "ETHERNET", encode_byte_array(layer2).decode(_LOCALE))
+ if multistream_layer == 'L2' and flows > 0:
+ self._add_multistream_layer(entity=0, seg_uuid=seg['ItemID'],
+ stop_value=flows, layer=2)
+ segment1.append(seg)
+ # now do the other port data with reversed src, dst info
+ layer2 = packet[1][header_pos: len(self.packet_data['layer2'][1])]
+ seg = create_segment(
+ "ETHERNET", encode_byte_array(layer2).decode(_LOCALE))
+ segment2.append(seg)
+ if multistream_layer == 'L2' and flows > 0:
+ self._add_multistream_layer(entity=1, seg_uuid=seg['ItemID'],
+ stop_value=flows, layer=2)
+ header_pos = len(layer2)
+ if self.packet_data['vlan']:
+ # slice out the vlan bytes from the packet header byte array
+ vlan = packet[0][header_pos: len(
+ self.packet_data['vlan'][0]) + header_pos]
+ segment1.append(create_segment(
+ "VLAN", encode_byte_array(vlan).decode(_LOCALE)))
+ segment2.append(create_segment(
+ "VLAN", encode_byte_array(vlan).decode(_LOCALE)))
+ header_pos += len(vlan)
+ if self.packet_data['layer3']:
+ # slice out the layer 3 bytes from the packet header byte array
+ layer3 = packet[0][header_pos: len(
+ self.packet_data['layer3'][0]) + header_pos]
+ seg = create_segment(
+ "IP", encode_byte_array(layer3).decode(_LOCALE))
+ segment1.append(seg)
+ if multistream_layer == 'L3' and flows > 0:
+ self._add_multistream_layer(entity=0, seg_uuid=seg['ItemID'],
+ stop_value=flows, layer=3)
+ # now do the other port data with reversed src, dst info
+ layer3 = packet[1][header_pos: len(
+ self.packet_data['layer3'][1]) + header_pos]
+ seg = create_segment(
+ "IP", encode_byte_array(layer3).decode(_LOCALE))
+ segment2.append(seg)
+ if multistream_layer == 'L3' and flows > 0:
+ self._add_multistream_layer(entity=1, seg_uuid=seg['ItemID'],
+ stop_value=flows, layer=3)
+ header_pos += len(layer3)
+ if self.packet_data['layer4']:
+ # slice out the layer 4 bytes from the packet header byte array
+ layer4 = packet[0][header_pos: len(
+ self.packet_data['layer4'][0]) + header_pos]
+ seg = create_segment(
+ "UDP", encode_byte_array(layer4).decode(_LOCALE))
+ segment1.append(seg)
+ if multistream_layer == 'L4' and flows > 0:
+ self._add_multistream_layer(entity=0, seg_uuid=seg['ItemID'],
+ stop_value=flows, layer=4)
+ # now do the other port data with reversed src, dst info
+ layer4 = packet[1][header_pos: len(
+ self.packet_data['layer4'][1]) + header_pos]
+ seg = create_segment(
+ "UDP", encode_byte_array(layer4).decode(_LOCALE))
+ segment2.append(seg)
+ if multistream_layer == 'L4' and flows > 0:
+ self._add_multistream_layer(entity=1, seg_uuid=seg['ItemID'],
+ stop_value=flows, layer=4)
+ header_pos += len(layer4)
+
+ self.json_data['StreamProfileHandler']['EntityList'][0][
+ 'StreamConfig']['HeaderSegments'] = segment1
+ self.json_data['StreamProfileHandler']['EntityList'][1][
+ 'StreamConfig']['HeaderSegments'] = segment2
+
+ def disable_back2back_test(self):
+ """
+ Disable the rfc2544 back to back test
+ :return: None
+ """
+ self.json_data['TestOptions']['TestTypeOptionMap']['Back2Back'][
+ 'Enabled'] = 'false'
+
+ def disable_throughput_test(self):
+ """
+ Disable the rfc2544 throughput test
+ :return: None
+ """
+ self.json_data['TestOptions']['TestTypeOptionMap']['Throughput'][
+ 'Enabled'] = 'false'
+
+ def enable_back2back_test(self):
+ """
+ Enable the rfc2544 back to back test
+ :return: None
+ """
+ self.json_data['TestOptions']['TestTypeOptionMap']['Back2Back'][
+ 'Enabled'] = 'true'
+
+ def enable_throughput_test(self):
+ """
+ Enable the rfc2544 throughput test
+ :return: None
+ """
+ self.json_data['TestOptions']['TestTypeOptionMap']['Throughput'][
+ 'Enabled'] = 'true'
+
+ def set_chassis_info(self, hostname, pwd):
+ """
+ Set the chassis info
+ :param hostname: hostname as string of ip
+ :param pwd: password to chassis as string
+ :return: None
+ """
+ self.json_data['ChassisManager']['ChassisList'][0][
+ 'HostName'] = hostname
+ self.json_data['ChassisManager']['ChassisList'][0][
+ 'Password'] = pwd
+
+ def set_header_layer2(self, dst_mac='cc:cc:cc:cc:cc:cc',
+ src_mac='bb:bb:bb:bb:bb:bb', **kwargs):
+ """
+ Build a scapy Ethernet L2 objects inside instance packet_data structure
+ :param dst_mac: destination mac as string. Example "aa:aa:aa:aa:aa:aa"
+ :param src_mac: source mac as string. Example "bb:bb:bb:bb:bb:bb"
+ :param kwargs: Extra params per scapy usage.
+ :return: None
+ """
+ self.packet_data['layer2'] = [
+ inet.Ether(dst=dst_mac, src=src_mac, **kwargs),
+ inet.Ether(dst=src_mac, src=dst_mac, **kwargs)]
+
+ def set_header_layer3(self, src_ip='192.168.0.2', dst_ip='192.168.0.3',
+ protocol='UDP', **kwargs):
+ """
+ Build scapy IPV4 L3 objects inside instance packet_data structure
+ :param src_ip: source IP as string in dot notation format
+ :param dst_ip: destination IP as string in dot notation format
+ :param protocol: protocol for l4
+ :param kwargs: Extra params per scapy usage
+ :return: None
+ """
+ self.packet_data['layer3'] = [
+ inet.IP(src=src_ip, dst=dst_ip, proto=protocol.lower(), **kwargs),
+ inet.IP(src=dst_ip, dst=src_ip, proto=protocol.lower(), **kwargs)]
+
+ def set_header_layer4_udp(self, source_port, destination_port, **kwargs):
+ """
+ Build scapy UDP L4 objects inside instance packet_data structure
+ :param source_port: Source port as int
+ :param destination_port: Destination port as int
+ :param kwargs: Extra params per scapy usage
+ :return: None
+ """
+ self.packet_data['layer4'] = [
+ inet.UDP(sport=source_port, dport=destination_port, **kwargs),
+ inet.UDP(sport=source_port, dport=destination_port, **kwargs)]
+
+ def set_header_vlan(self, vlan_id=1, **kwargs):
+ """
+ Build a Dot1Q scapy object inside instance packet_data structure
+ :param vlan_id: The VLAN ID
+ :param kwargs: Extra params per scapy usage
+ :return: None
+ """
+ self.packet_data['vlan'] = [
+ inet.Dot1Q(vlan=vlan_id, **kwargs),
+ inet.Dot1Q(vlan=vlan_id, **kwargs)]
+
+ def set_port(self, index, module, port):
+ """
+ Set the module and port for the 0 index port to use with the test
+ :param index: Index of port to set, 0 = port1, 1=port2, etc..
+ :param module: module location as int
+ :param port: port location in module as int
+ :return: None
+ """
+ self.json_data['PortHandler']['EntityList'][index]['PortRef'][
+ 'ModuleIndex'] = module
+ self.json_data['PortHandler']['EntityList'][index]['PortRef'][
+ 'PortIndex'] = port
+
+ def set_port_ip_v4(self, port, ip_addr, netmask, gateway):
+ """
+ Set the port IP info
+ :param port: port number as int of port to set ip info
+ :param ip_addr: ip address in dot notation format as string
+ :param netmask: cidr number for netmask (ie 24/16/8) as int
+ :param gateway: gateway address in dot notation format
+ :return: None
+ """
+ available_ports = range(len(
+ self.json_data['PortHandler']['EntityList']))
+ if port not in available_ports:
+ raise ValueError("{}{}{}".format(
+ 'Port assignment must be an available port ',
+ 'number in baseconfig file. Port=', port))
+ self.json_data['PortHandler']['EntityList'][
+ port]["IpV4Address"] = ip_addr
+ self.json_data['PortHandler']['EntityList'][
+ port]["IpV4Gateway"] = gateway
+ self.json_data['PortHandler']['EntityList'][
+ port]["IpV4RoutingPrefix"] = int(netmask)
+
+ def set_port_ip_v6(self, port, ip_addr, netmask, gateway):
+ """
+ Set the port IP info
+ :param port: port number as int of port to set ip info
+ :param ip_addr: ip address as 8 groups of 4 hexadecimal groups separated
+ by a colon.
+ :param netmask: cidr number for netmask (ie 24/16/8) as int
+ :param gateway: gateway address as string in 8 group of 4 hexadecimal
+ groups separated by a colon.
+ :return: None
+ """
+ available_ports = range(len(
+ self.json_data['PortHandler']['EntityList']))
+ if port not in available_ports:
+ raise ValueError("{}{}{}".format(
+ 'Port assignment must be an available port ',
+ 'number in baseconfig file. Port=', port))
+ self.json_data['PortHandler']['EntityList'][
+ port]["IpV6Address"] = ip_addr
+ self.json_data['PortHandler']['EntityList'][
+ port]["IpV6Gateway"] = gateway
+ self.json_data['PortHandler']['EntityList'][
+ port]["IpV6RoutingPrefix"] = int(netmask)
+
+ def set_test_options_tput(self, packet_sizes, duration, iterations,
+ loss_rate, micro_tpld=False):
+ """
+ Set the tput test options
+ :param packet_sizes: List of packet sizes to test, single int entry is
+ acceptable for one packet size testing
+ :param duration: time for each test in seconds as int
+ :param iterations: number of iterations of testing as int
+ :param loss_rate: acceptable loss rate as float
+ :param micro_tpld: boolean if micro_tpld should be enabled or disabled
+ :return: None
+ """
+ if isinstance(packet_sizes, int):
+ packet_sizes = [packet_sizes]
+ self.json_data['TestOptions']['PacketSizes'][
+ 'CustomPacketSizes'] = packet_sizes
+ self.json_data['TestOptions']['TestTypeOptionMap']['Throughput'][
+ 'Duration'] = duration
+ self.json_data['TestOptions']['TestTypeOptionMap']['Throughput'][
+ 'RateIterationOptions']['AcceptableLoss'] = loss_rate
+ self.json_data['TestOptions']['FlowCreationOptions'][
+ 'UseMicroTpldOnDemand'] = 'true' if micro_tpld else 'false'
+ self.json_data['TestOptions']['TestTypeOptionMap']['Throughput'][
+ 'Iterations'] = iterations
+
+ def set_test_options_back2back(self, packet_sizes, duration,
+ iterations, startvalue, endvalue,
+ micro_tpld=False):
+ """
+ Set the back2back test options
+ :param packet_sizes: List of packet sizes to test, single int entry is
+ acceptable for one packet size testing
+ :param duration: time for each test in seconds as int
+ :param iterations: number of iterations of testing as int
+ :param micro_tpld: boolean if micro_tpld should be enabled or disabled
+ :param StartValue: start value
+ :param EndValue: end value
+ :return: None
+ """
+ if isinstance(packet_sizes, int):
+ packet_sizes = [packet_sizes]
+ self.json_data['TestOptions']['PacketSizes'][
+ 'CustomPacketSizes'] = packet_sizes
+ self.json_data['TestOptions']['TestTypeOptionMap']['Back2Back'][
+ 'Duration'] = duration
+ self.json_data['TestOptions']['FlowCreationOptions'][
+ 'UseMicroTpldOnDemand'] = 'true' if micro_tpld else 'false'
+ self.json_data['TestOptions']['TestTypeOptionMap']['Back2Back'][
+ 'Iterations'] = iterations
+ self.json_data['TestOptions']['TestTypeOptionMap']['Back2Back'][
+ 'RateSweepOptions']['StartValue'] = startvalue
+ self.json_data['TestOptions']['TestTypeOptionMap']['Back2Back'][
+ 'RateSweepOptions']['EndValue'] = endvalue
+
+ def set_topology_blocks(self):
+ """
+ Set the test topology to a West to East config for half duplex flow with
+ port 0 as the sender and port 1 as the receiver.
+ :return: None
+ """
+ self.json_data['TestOptions']['TopologyConfig']['Topology'] = 'BLOCKS'
+ self.json_data['TestOptions']['TopologyConfig'][
+ 'Direction'] = 'WEST_EAST'
+ self.json_data['PortHandler']['EntityList'][0][
+ 'PortGroup'] = "WEST"
+ self.json_data['PortHandler']['EntityList'][1][
+ 'PortGroup'] = "EAST"
+
+ def set_topology_mesh(self):
+ """
+ Set the test topology to Mesh for bi directional full duplex flow
+ :return: None
+ """
+ self.json_data['TestOptions']['TopologyConfig']['Topology'] = 'MESH'
+ self.json_data['TestOptions']['TopologyConfig']['Direction'] = 'BIDIR'
+ self.json_data['PortHandler']['EntityList'][0][
+ 'PortGroup'] = "UNDEFINED"
+ self.json_data['PortHandler']['EntityList'][1][
+ 'PortGroup'] = "UNDEFINED"
+
+ def write_config(self, path='./2bUsed.x2544'):
+ """
+ Write the config to out as file
+ :param path: Output file to export the json data to
+ :return: None
+ """
+ if not write_json_file(self.json_data, path):
+ raise RuntimeError("Could not write out file, please check config")
+
+
+def create_segment(header_type, encode_64_string):
+ """
+ Create segment for JSON file
+ :param header_type: Type of header as string
+ :param encode_64_string: 64 byte encoded string value of the hex bytes
+ :return: segment as dictionary
+ """
+ return {
+ "SegmentType": header_type.upper(),
+ "SegmentValue": encode_64_string,
+ "ItemID": str(uuid.uuid4()),
+ "ParentID": "",
+ "Label": ""}
+
+
+def decode_byte_array(enc_str):
+ """ Decodes the base64-encoded string to a byte array
+ :param enc_str: The base64-encoded string representing a byte array
+ :return: The decoded byte array
+ """
+ dec_string = base64.b64decode(enc_str)
+ barray = bytearray()
+ barray.extend(dec_string)
+ return barray
+
+
+def encode_byte_array(byte_arr):
+ """ Encodes the byte array as a base64-encoded string
+ :param byte_arr: A bytearray containing the bytes to convert
+ :return: A base64 encoded string
+ """
+ enc_string = base64.b64encode(bytes(byte_arr))
+ return enc_string
+
+
+def print_json_report(json_data):
+ """
+ Print out info from the json data for testing purposes only.
+ :param json_data: json loaded data from json.loads
+ :return: None
+ """
+ print("<<Xena JSON Config Report>>\n")
+ try:
+ print("### Chassis Info ###")
+ print("Chassis IP: {}".format(json_data['ChassisManager'][
+ 'ChassisList'][0]['HostName']))
+ print("Chassis Password: {}".format(json_data['ChassisManager'][
+ 'ChassisList'][0]['Password']))
+ print("### Port Configuration ###")
+ print("Port 1 IPv4:{}/{} gateway:{}".format(
+ json_data['PortHandler']['EntityList'][0]["IpV4Address"],
+ json_data['PortHandler']['EntityList'][0]["IpV4RoutingPrefix"],
+ json_data['PortHandler']['EntityList'][0]["IpV4Gateway"]))
+ print("Port 1 IPv6:{}/{} gateway:{}".format(
+ json_data['PortHandler']['EntityList'][0]["IpV6Address"],
+ json_data['PortHandler']['EntityList'][0]["IpV6RoutingPrefix"],
+ json_data['PortHandler']['EntityList'][0]["IpV6Gateway"]))
+ print("Port 2 IPv4:{}/{} gateway:{}".format(
+ json_data['PortHandler']['EntityList'][1]["IpV4Address"],
+ json_data['PortHandler']['EntityList'][1]["IpV4RoutingPrefix"],
+ json_data['PortHandler']['EntityList'][1]["IpV4Gateway"]))
+ print("Port 2 IPv6:{}/{} gateway:{}".format(
+ json_data['PortHandler']['EntityList'][1]["IpV6Address"],
+ json_data['PortHandler']['EntityList'][1]["IpV6RoutingPrefix"],
+ json_data['PortHandler']['EntityList'][1]["IpV6Gateway"]))
+ print("Port 1: {}/{} group: {}".format(
+ json_data['PortHandler']['EntityList'][0]['PortRef']['ModuleIndex'],
+ json_data['PortHandler']['EntityList'][0]['PortRef']['PortIndex'],
+ json_data['PortHandler']['EntityList'][0]['PortGroup']))
+ print("Port 2: {}/{} group: {}".format(
+ json_data['PortHandler']['EntityList'][1]['PortRef']['ModuleIndex'],
+ json_data['PortHandler']['EntityList'][1]['PortRef']['PortIndex'],
+ json_data['PortHandler']['EntityList'][1]['PortGroup']))
+ print("### Tests Enabled ###")
+ print("Back2Back Enabled: {}".format(json_data['TestOptions'][
+ 'TestTypeOptionMap']['Back2Back']['Enabled']))
+ print("Throughput Enabled: {}".format(json_data['TestOptions'][
+ 'TestTypeOptionMap']['Throughput']['Enabled']))
+ print("### Test Options ###")
+ print("Test topology: {}/{}".format(
+ json_data['TestOptions']['TopologyConfig']['Topology'],
+ json_data['TestOptions']['TopologyConfig']['Direction']))
+ print("Packet Sizes: {}".format(json_data['TestOptions'][
+ 'PacketSizes']['CustomPacketSizes']))
+ print("Test duration: {}".format(json_data['TestOptions'][
+ 'TestTypeOptionMap']['Throughput']['Duration']))
+ print("Acceptable loss rate: {}".format(json_data['TestOptions'][
+ 'TestTypeOptionMap']['Throughput']['RateIterationOptions'][
+ 'AcceptableLoss']))
+ print("Micro TPLD enabled: {}".format(json_data['TestOptions'][
+ 'FlowCreationOptions']['UseMicroTpldOnDemand']))
+ print("Test iterations: {}".format(json_data['TestOptions'][
+ 'TestTypeOptionMap']['Throughput']['Iterations']))
+ if 'StreamConfig' in json_data['StreamProfileHandler']['EntityList'][0]:
+ print("### Header segments ###")
+ for seg in json_data['StreamProfileHandler']['EntityList']:
+ for header in seg['StreamConfig']['HeaderSegments']:
+ print("Type: {}".format(
+ header['SegmentType']))
+ print("Value: {}".format(decode_byte_array(
+ header['SegmentValue'])))
+ print("### Multi Stream config ###")
+ for seg in json_data['StreamProfileHandler']['EntityList']:
+ for header in seg['StreamConfig']['HwModifiers']:
+ print(header)
+ except KeyError as exc:
+ print("Error setting not found in JSON data: {}".format(exc))
+
+
+def read_json_file(json_file):
+ """
+ Read the json file path and return a dictionary of the data
+ :param json_file: path to json file
+ :return: dictionary of json data
+ """
+ try:
+ with open(json_file, 'r', encoding=_LOCALE) as data_file:
+ file_data = json.loads(data_file.read())
+ except ValueError as exc:
+ # general json exception, Python 3.5 adds new exception type
+ _LOGGER.exception("Exception with json read: %s", exc)
+ raise
+ except IOError as exc:
+ _LOGGER.exception(
+ 'Exception during file open: %s file=%s', exc, json_file)
+ raise
+ return file_data
+
+
+def write_json_file(json_data, output_path):
+ """
+ Write out the dictionary of data to a json file
+ :param json_data: dictionary of json data
+ :param output_path: file path to write output
+ :return: Boolean if success
+ """
+ try:
+ with open(output_path, 'w', encoding=_LOCALE) as fileh:
+ json.dump(json_data, fileh, indent=2, sort_keys=True,
+ ensure_ascii=True)
+ return True
+ except ValueError as exc:
+ # general json exception, Python 3.5 adds new exception type
+ _LOGGER.exception(
+ "Exception with json write: %s", exc)
+ return False
+ except IOError as exc:
+ _LOGGER.exception(
+ 'Exception during file write: %s file=%s', exc, output_path)
+ return False
+
+
+if __name__ == "__main__":
+ print("Running UnitTest for XenaJSON")
+ JSON = XenaJSON()
+ print_json_report(JSON.json_data)
+ JSON.set_chassis_info('192.168.0.5', 'vsperf')
+ JSON.set_port(0, 1, 0)
+ JSON.set_port(1, 1, 1)
+ JSON.set_port_ip_v4(0, '192.168.240.10', 32, '192.168.240.1')
+ JSON.set_port_ip_v4(1, '192.168.240.11', 32, '192.168.240.1')
+ JSON.set_port_ip_v6(0, 'a1a1:a2a2:a3a3:a4a4:a5a5:a6a6:a7a7:a8a8', 128,
+ 'a1a1:a2a2:a3a3:a4a4:a5a5:a6a6:a7a7:1111')
+ JSON.set_port_ip_v6(1, 'b1b1:b2b2:b3b3:b4b4:b5b5:b6b6:b7b7:b8b8', 128,
+ 'b1b1:b2b2:b3b3:b4b4:b5b5:b6b6:b7b7:1111')
+ JSON.set_header_layer2(dst_mac='dd:dd:dd:dd:dd:dd',
+ src_mac='ee:ee:ee:ee:ee:ee')
+ JSON.set_header_vlan(vlan_id=5)
+ JSON.set_header_layer3(src_ip='192.168.100.2', dst_ip='192.168.100.3',
+ protocol='udp')
+ JSON.set_header_layer4_udp(source_port=3000, destination_port=3001)
+ JSON.set_test_options_tput(packet_sizes=[64], duration=10, iterations=1,
+ loss_rate=0.0, micro_tpld=True)
+ JSON.add_header_segments(flows=4000, multistream_layer='L4')
+ JSON.set_topology_blocks()
+ write_json_file(JSON.json_data, './testthis.x2544')
+ JSON = XenaJSON('./testthis.x2544')
+ print_json_report(JSON.json_data)
+
diff --git a/tools/systeminfo.py b/tools/systeminfo.py
index 62db852b..9d8eb5cb 100644
--- a/tools/systeminfo.py
+++ b/tools/systeminfo.py
@@ -71,8 +71,9 @@ def get_nic():
output = subprocess.check_output('lspci', shell=True)
output = output.decode(locale.getdefaultlocale()[1])
for line in output.split('\n'):
- for nic_pciid in S.getValue('WHITELIST_NICS'):
- if line.startswith(nic_pciid):
+ for nic in S.getValue('NICS'):
+ # lspci shows PCI addresses without domain part, i.e. last 7 chars
+ if line.startswith(nic['pci'][-7:]):
nics.append(''.join(line.split(':')[2:]).strip())
return nics
@@ -167,6 +168,14 @@ def get_pid(proc_name_str):
"""
return get_pids([proc_name_str])
+def pid_isalive(pid):
+ """ Checks if given PID is alive
+
+ :param pid: PID of the process
+ :returns: True if given process is running, False otherwise
+ """
+ return os.path.isdir('/proc/' + str(pid))
+
# This function uses long switch per purpose, so let us suppress pylint warning too-many-branches
# pylint: disable=R0912
def get_version(app_name):
@@ -181,7 +190,7 @@ def get_version(app_name):
'dpdk' : os.path.join(S.getValue('RTE_SDK'), 'lib/librte_eal/common/include/rte_version.h'),
'qemu' : os.path.join(S.getValue('QEMU_DIR'), 'VERSION'),
'l2fwd' : os.path.join(S.getValue('ROOT_DIR'), 'src/l2fwd/l2fwd.c'),
- 'ixnet' : os.path.join(S.getValue('TRAFFICGEN_IXNET_LIB_PATH'), 'pkgIndex.tcl')
+ 'ixnet' : os.path.join(S.getValue('TRAFFICGEN_IXNET_LIB_PATH'), 'pkgIndex.tcl'),
}
@@ -239,6 +248,12 @@ def get_version(app_name):
app_version = match_line(app_version_file['ixnet'], 'package provide IxTclNetwork')
if app_version:
app_version = app_version.split(' ')[3]
+ elif app_name.lower() == 'xena':
+ try:
+ app_version = S.getValue('XENA_VERSION')
+ except AttributeError:
+ # setting was not available after execution
+ app_version = 'N/A'
elif app_name.lower() == 'dummy':
# get git tag of file with Dummy implementation
app_git_tag = get_git_tag(os.path.join(S.getValue('ROOT_DIR'), 'tools/pkt_gen/dummy/dummy.py'))
diff --git a/tools/tasks.py b/tools/tasks.py
index 90b7e553..9816a336 100644
--- a/tools/tasks.py
+++ b/tools/tasks.py
@@ -26,6 +26,7 @@ import locale
import time
from conf import settings
+from tools import systeminfo
CMD_PREFIX = 'cmd : '
@@ -85,17 +86,24 @@ def run_task(cmd, logger, msg=None, check_error=False):
for file_d in ret[0]:
if file_d == proc.stdout.fileno():
- line = proc.stdout.readline()
- if settings.getValue('VERBOSITY') == 'debug':
- sys.stdout.write(line.decode(my_encoding))
- stdout.append(line)
+ while True:
+ line = proc.stdout.readline()
+ if not line:
+ break
+ if settings.getValue('VERBOSITY') == 'debug':
+ sys.stdout.write(line.decode(my_encoding))
+ stdout.append(line)
if file_d == proc.stderr.fileno():
- line = proc.stderr.readline()
- sys.stderr.write(line.decode(my_encoding))
- stderr.append(line)
+ while True:
+ line = proc.stderr.readline()
+ if not line:
+ break
+ sys.stderr.write(line.decode(my_encoding))
+ stderr.append(line)
if proc.poll() is not None:
break
+
except OSError as ex:
handle_error(ex)
else:
@@ -150,6 +158,55 @@ def run_interactive_task(cmd, logger, msg):
return child
+def terminate_task_subtree(pid, signal='-15', sleep=10, logger=None):
+ """Terminate given process and all its children
+
+ Function will sent given signal to the process. In case
+ that process will not terminate within given sleep interval
+ and signal was not SIGKILL, then process will be killed by SIGKILL.
+ After that function will check if all children of the process
+ are terminated and if not the same terminating procedure is applied
+ on any living child (only one level of children is considered).
+
+ :param pid: Process ID to terminate
+ :param signal: Signal to be sent to the process
+ :param sleep: Maximum delay in seconds after signal is sent
+ :param logger: Logger to write details to
+ """
+ try:
+ output = subprocess.check_output("pgrep -P " + str(pid), shell=True).decode().rstrip('\n')
+ except subprocess.CalledProcessError:
+ output = ""
+
+ terminate_task(pid, signal, sleep, logger)
+
+ # just for case children were kept alive
+ children = output.split('\n')
+ for child in children:
+ terminate_task(child, signal, sleep, logger)
+
+def terminate_task(pid, signal='-15', sleep=10, logger=None):
+ """Terminate process with given pid
+
+ Function will sent given signal to the process. In case
+ that process will not terminate within given sleep interval
+ and signal was not SIGKILL, then process will be killed by SIGKILL.
+
+ :param pid: Process ID to terminate
+ :param signal: Signal to be sent to the process
+ :param sleep: Maximum delay in seconds after signal is sent
+ :param logger: Logger to write details to
+ """
+ if systeminfo.pid_isalive(pid):
+ run_task(['sudo', 'kill', signal, str(pid)], logger)
+ logger.debug('Wait for process %s to terminate after signal %s', pid, signal)
+ for dummy in range(sleep):
+ time.sleep(1)
+ if not systeminfo.pid_isalive(pid):
+ break
+
+ if signal.lstrip('-').upper() not in ('9', 'KILL', 'SIGKILL') and systeminfo.pid_isalive(pid):
+ terminate_task(pid, '-9', sleep, logger)
class Process(object):
"""Control an instance of a long-running process.
@@ -242,17 +299,14 @@ class Process(object):
self.kill()
raise exc
- def kill(self, signal='-15', sleep=2):
+ def kill(self, signal='-15', sleep=10):
"""Kill process instance if it is alive.
:param signal: signal to be sent to the process
:param sleep: delay in seconds after signal is sent
"""
- if self._child and self._child.isalive():
- run_task(['sudo', 'kill', signal, str(self._child.pid)],
- self._logger)
- self._logger.debug('Wait for process to terminate')
- time.sleep(sleep)
+ if self.is_running():
+ terminate_task_subtree(self._child.pid, signal, sleep, self._logger)
if self.is_relinquished():
self._relinquish_thread.join()
@@ -275,7 +329,7 @@ class Process(object):
:returns: True if process is running, else False
"""
- return self._child is not None
+ return self._child and self._child.isalive()
def _affinitize_pid(self, core, pid):
"""Affinitize a process with ``pid`` to ``core``.
@@ -298,7 +352,7 @@ class Process(object):
"""
self._logger.info('Affinitizing process')
- if self._child and self._child.isalive():
+ if self.is_running():
self._affinitize_pid(core, self._child.pid)
class ContinueReadPrintLoop(threading.Thread):
diff --git a/tools/veth.py b/tools/veth.py
new file mode 100644
index 00000000..6418d11a
--- /dev/null
+++ b/tools/veth.py
@@ -0,0 +1,118 @@
+# Copyright 2016 Red Hat Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+veth port emulation
+"""
+
+import logging
+import os
+
+from tools import tasks
+
+_LOGGER = logging.getLogger(__name__)
+
+
+def add_veth_port(port, peer_port):
+ """
+ Add a veth port
+ :param port:port name for the first port
+ :param peer_port: port name for the peer port
+ :return: None
+ """
+ # touch some files in a tmp area so we can track them. This allows us to
+ # track VSPerf created veth ports so they can be cleaned up if needed.
+ if not os.path.isdir('/tmp/veth'):
+ try:
+ os.mkdir('/tmp/veth')
+ except os.error:
+ # OK don't crash but cleanup may be an issue
+ _LOGGER.error('Unable to create veth temp folder.')
+ _LOGGER.error(
+ 'Veth ports may not be removed on testcase completion')
+ if os.path.isdir('/tmp/veth'):
+ with open('/tmp/veth/{}-{}'.format(port, peer_port), 'a'):
+ os.utime('/tmp/veth/{}-{}'.format(port, peer_port), None)
+ tasks.run_task(['sudo', 'ip', 'link', 'add',
+ port, 'type', 'veth', 'peer', 'name', peer_port],
+ _LOGGER, 'Adding veth port {} with peer port {}...'.format(
+ port, peer_port), False)
+
+
+def bring_up_eth_port(eth_port, namespace=None):
+ """
+ Bring up an eth port
+ :param eth_port: string of eth port to bring up
+ :param namespace: Namespace eth port it located if needed
+ :return: None
+ """
+ if namespace:
+ tasks.run_task(['sudo', 'ip', 'netns', 'exec', namespace,
+ 'ip', 'link', 'set', eth_port, 'up'],
+ _LOGGER,
+ 'Bringing up port {} in namespace {}...'.format(
+ eth_port, namespace), False)
+ else:
+ tasks.run_task(['sudo', 'ip', 'link', 'set', eth_port, 'up'],
+ _LOGGER, 'Bringing up port...', False)
+
+
+def del_veth_port(port, peer_port):
+ """
+ Delete the veth ports, the peer will automatically be deleted on deletion
+ of the first port param
+ :param port: port name to delete
+ :param port: peer port name
+ :return: None
+ """
+ # delete the file if it exists in the temp area
+ if os.path.exists('/tmp/veth/{}-{}'.format(port, peer_port)):
+ os.remove('/tmp/veth/{}-{}'.format(port, peer_port))
+ tasks.run_task(['sudo', 'ip', 'link', 'del', port],
+ _LOGGER, 'Deleting veth port {} with peer {}...'.format(
+ port, peer_port), False)
+
+
+# pylint: disable=unused-argument
+def validate_add_veth_port(result, port, peer_port):
+ """
+ Validation function for integration testcases
+ """
+ devs = os.listdir('/sys/class/net')
+ return all([port in devs, peer_port in devs])
+
+
+def validate_bring_up_eth_port(result, eth_port, namespace=None):
+ """
+ Validation function for integration testcases
+ """
+ command = list()
+ if namespace:
+ command += ['ip', 'netns', 'exec', namespace]
+ command += ['cat', '/sys/class/net/{}/operstate'.format(eth_port)]
+ out = tasks.run_task(command, _LOGGER, 'Validating port up...', False)
+
+ # since different types of ports may report different status the best way
+ # we can do this for now is to just make sure it doesn't say down
+ if 'down' in out:
+ return False
+ return True
+
+
+def validate_del_veth_port(result, port, peer_port):
+ """
+ Validation function for integration testcases
+ """
+ devs = os.listdir('/sys/class/net')
+ return not any([port in devs, peer_port in devs])