aboutsummaryrefslogtreecommitdiffstats
path: root/yardstick
diff options
context:
space:
mode:
Diffstat (limited to 'yardstick')
-rw-r--r--yardstick/network_services/traffic_profile/prox_binsearch.py6
-rw-r--r--yardstick/network_services/traffic_profile/prox_mpls_tag_untag.py101
-rw-r--r--yardstick/network_services/vnf_generic/vnf/iniparser.py135
-rw-r--r--yardstick/network_services/vnf_generic/vnf/prox_helpers.py517
-rw-r--r--yardstick/network_services/vnf_generic/vnf/prox_vnf.py85
-rw-r--r--yardstick/network_services/vnf_generic/vnf/sample_vnf.py35
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_prox.py46
7 files changed, 566 insertions, 359 deletions
diff --git a/yardstick/network_services/traffic_profile/prox_binsearch.py b/yardstick/network_services/traffic_profile/prox_binsearch.py
index f98b1e59e..385702b75 100644
--- a/yardstick/network_services/traffic_profile/prox_binsearch.py
+++ b/yardstick/network_services/traffic_profile/prox_binsearch.py
@@ -86,8 +86,8 @@ class ProxBinSearchProfile(ProxProfile):
# throughput and packet loss from the most recent successful test
successful_pkt_loss = 0.0
for test_value in self.bounds_iterator(LOG):
- result = traffic_gen.resource_helper.run_test(pkt_size, duration,
- test_value, self.tolerated_loss)
+ result, port_samples = traffic_gen.run_test(pkt_size, duration,
+ test_value, self.tolerated_loss)
if result.success:
LOG.debug("Success! Increasing lower bound")
@@ -97,5 +97,5 @@ class ProxBinSearchProfile(ProxProfile):
LOG.debug("Failure... Decreasing upper bound")
self.current_upper = test_value
- samples = result.get_samples(pkt_size, successful_pkt_loss)
+ samples = result.get_samples(pkt_size, successful_pkt_loss, port_samples)
self.queue.put(samples)
diff --git a/yardstick/network_services/traffic_profile/prox_mpls_tag_untag.py b/yardstick/network_services/traffic_profile/prox_mpls_tag_untag.py
new file mode 100644
index 000000000..7e3cfa852
--- /dev/null
+++ b/yardstick/network_services/traffic_profile/prox_mpls_tag_untag.py
@@ -0,0 +1,101 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Fixed traffic profile definitions """
+
+from __future__ import absolute_import
+
+import logging
+
+from yardstick.network_services.traffic_profile.prox_profile import ProxProfile
+
+LOG = logging.getLogger(__name__)
+
+
+class ProxMplsTagUntagProfile(ProxProfile):
+ """
+ This profile adds a single stream at the beginning of the traffic session
+ """
+
+ def __init__(self, tp_config):
+ super(ProxMplsTagUntagProfile, self).__init__(tp_config)
+ self.current_lower = self.lower_bound
+ self.current_upper = self.upper_bound
+
+ @property
+ def delta(self):
+ return self.current_upper - self.current_lower
+
+ @property
+ def mid_point(self):
+ return (self.current_lower + self.current_upper) / 2
+
+ def bounds_iterator(self, logger=None):
+ self.current_lower = self.lower_bound
+ self.current_upper = self.upper_bound
+
+ test_value = self.current_upper
+ while abs(self.delta) >= self.precision:
+ if logger:
+ logger.debug("New interval [%s, %s), precision: %d", self.current_lower,
+ self.current_upper, self.step_value)
+ logger.info("Testing with value %s", test_value)
+
+ yield test_value
+ test_value = self.mid_point
+
+ def run_test_with_pkt_size(self, traffic_gen, pkt_size, duration):
+ """Run the test for a single packet size.
+
+ :param queue: queue object we put samples into
+ :type queue: Queue
+ :param traffic_gen: traffic generator instance
+ :type traffic_gen: TrafficGen
+ :param pkt_size: The packet size to test with.
+ :type pkt_size: int
+ :param duration: The duration for each try.
+ :type duration: int
+
+ """
+
+ LOG.info("Testing with packet size %d", pkt_size)
+
+ # Binary search assumes the lower value of the interval is
+ # successful and the upper value is a failure.
+ # The first value that is tested, is the maximum value. If that
+ # succeeds, no more searching is needed. If it fails, a regular
+ # binary search is performed.
+ #
+ # The test_value used for the first iteration of binary search
+ # is adjusted so that the delta between this test_value and the
+ # upper bound is a power-of-2 multiple of precision. In the
+ # optimistic situation where this first test_value results in a
+ # success, the binary search will complete on an integer multiple
+ # of the precision, rather than on a fraction of it.
+
+ # throughput and packet loss from the most recent successful test
+ successful_pkt_loss = 0.0
+ for test_value in self.bounds_iterator(LOG):
+ result, port_samples = traffic_gen.run_test(pkt_size, duration,
+ test_value, self.tolerated_loss)
+
+ if result.success:
+ LOG.debug("Success! Increasing lower bound")
+ self.current_lower = test_value
+ successful_pkt_loss = result.pkt_loss
+ else:
+ LOG.debug("Failure... Decreasing upper bound")
+ self.current_upper = test_value
+
+ samples = result.get_samples(pkt_size, successful_pkt_loss, port_samples)
+ self.queue.put(samples)
diff --git a/yardstick/network_services/vnf_generic/vnf/iniparser.py b/yardstick/network_services/vnf_generic/vnf/iniparser.py
index 996441264..70e24de5b 100644
--- a/yardstick/network_services/vnf_generic/vnf/iniparser.py
+++ b/yardstick/network_services/vnf_generic/vnf/iniparser.py
@@ -14,22 +14,18 @@
class ParseError(Exception):
- def __init__(self, message, line_no, line):
+ def __init__(self, message, lineno, line):
self.msg = message
self.line = line
- self.line_no = line_no
+ self.lineno = lineno
def __str__(self):
- return 'at line %d, %s: %r' % (self.line_no, self.msg, self.line)
+ return 'at line %d, %s: %r' % (self.lineno, self.msg, self.line)
class BaseParser(object):
-
- PARSE_EXC = ParseError
-
- def __init__(self):
- super(BaseParser, self).__init__()
- self.line_no = 0
+ lineno = 0
+ parse_exc = ParseError
def _assignment(self, key, value):
self.assignment(key, value)
@@ -47,9 +43,9 @@ class BaseParser(object):
colon = line.find(':')
equal = line.find('=')
if colon < 0 and equal < 0:
- return self.error_invalid_assignment(line)
+ return line.strip(), '@'
- if colon < 0 or (0 <= equal < colon):
+ if colon < 0 or (equal >= 0 and equal < colon):
key, value = line[:equal], line[equal + 1:]
else:
key, value = line[:colon], line[colon + 1:]
@@ -59,56 +55,44 @@ class BaseParser(object):
value = value[1:-1]
return key.strip(), [value]
- def _single_line_parse(self, line, key, value):
- self.line_no += 1
-
- if line.startswith(('#', ';')):
- self.comment(line[1:].strip())
- return key, value
+ def parse(self, lineiter):
+ key = None
+ value = []
- active, _, comment = line.partition(';')
- self.comment(comment.strip())
+ for line in lineiter:
+ self.lineno += 1
+
+ line = line.rstrip()
+ lines = line.split(';')
+ line = lines[0]
+ if not line:
+ # Blank line, ends multi-line values
+ if key:
+ key, value = self._assignment(key, value)
+ continue
+ elif line.startswith((' ', '\t')):
+ # Continuation of previous assignment
+ if key is None:
+ self.error_unexpected_continuation(line)
+ else:
+ value.append(line.lstrip())
+ continue
- if not active:
- # Blank line, ends multi-line values
if key:
+ # Flush previous assignment, if any
key, value = self._assignment(key, value)
- return key, value
-
- if active.startswith((' ', '\t')):
- # Continuation of previous assignment
- if key is None:
- return self.error_unexpected_continuation(line)
- value.append(active.lstrip())
- return key, value
-
- if key:
- # Flush previous assignment, if any
- key, value = self._assignment(key, value)
-
- if active.startswith('['):
- # Section start
- section = self._get_section(active)
- if section:
- self.new_section(section)
-
- else:
- key, value = self._split_key_value(active)
- if not key:
- return self.error_empty_key(line)
-
- return key, value
-
- def parse(self, line_iter=None):
- if line_iter is None:
- return
-
- key = None
- value = []
-
- for line in line_iter:
- key, value = self._single_line_parse(line, key, value)
+ if line.startswith('['):
+ # Section start
+ section = self._get_section(line)
+ if section:
+ self.new_section(section)
+ elif line.startswith(('#', ';')):
+ self.comment(line[1:].lstrip())
+ else:
+ key, value = self._split_key_value(line)
+ if not key:
+ return self.error_empty_key(line)
if key:
# Flush previous assignment, if any
@@ -126,23 +110,23 @@ class BaseParser(object):
"""Called when a comment is parsed."""
pass
- def make_parser_error(self, template, line):
- raise self.PARSE_EXC(template, self.line_no, line)
-
def error_invalid_assignment(self, line):
- self.make_parser_error("No ':' or '=' found in assignment", line)
+ raise self.parse_exc("No ':' or '=' found in assignment",
+ self.lineno, line)
def error_empty_key(self, line):
- self.make_parser_error('Key cannot be empty', line)
+ raise self.parse_exc('Key cannot be empty', self.lineno, line)
def error_unexpected_continuation(self, line):
- self.make_parser_error('Unexpected continuation line', line)
+ raise self.parse_exc('Unexpected continuation line',
+ self.lineno, line)
def error_no_section_end_bracket(self, line):
- self.make_parser_error('Invalid section (must end with ])', line)
+ raise self.parse_exc('Invalid section (must end with ])',
+ self.lineno, line)
def error_no_section_name(self, line):
- self.make_parser_error('Empty section name', line)
+ raise self.parse_exc('Empty section name', self.lineno, line)
class ConfigParser(BaseParser):
@@ -158,20 +142,35 @@ class ConfigParser(BaseParser):
self.sections = sections
self.section = None
- def parse(self, line_iter=None):
+ def parse(self):
with open(self.filename) as f:
return super(ConfigParser, self).parse(f)
+ def find_section(self, sections, section):
+ return next((i for i, sect in enumerate(sections) if sect == section), -1)
+
def new_section(self, section):
self.section = section
- self.sections.setdefault(self.section, [])
+ index = self.find_section(self.sections, section)
+ if index == -1:
+ self.sections.append([section, []])
def assignment(self, key, value):
if not self.section:
raise self.error_no_section()
value = '\n'.join(value)
- self.sections[self.section].append([key, value])
+
+ def append(sections, section):
+ entry = [key, value]
+ index = self.find_section(sections, section)
+ sections[index][1].append(entry)
+
+ append(self.sections, self.section)
+
+ def parse_exc(self, msg, lineno, line=None):
+ return ParseError(msg, lineno, line)
def error_no_section(self):
- self.make_parser_error('Section must be started before assignment', '')
+ return self.parse_exc('Section must be started before assignment',
+ self.lineno)
diff --git a/yardstick/network_services/vnf_generic/vnf/prox_helpers.py b/yardstick/network_services/vnf_generic/vnf/prox_helpers.py
index dfed45aa4..d6ec271c9 100644
--- a/yardstick/network_services/vnf_generic/vnf/prox_helpers.py
+++ b/yardstick/network_services/vnf_generic/vnf/prox_helpers.py
@@ -16,26 +16,35 @@ from __future__ import absolute_import
import array
import operator
import logging
+import io
import os
import re
import select
import socket
+
from collections import OrderedDict, namedtuple
import time
from contextlib import contextmanager
from itertools import repeat, chain
+import six
from six.moves import zip, StringIO
+from six.moves import cStringIO
from yardstick.benchmark.scenarios.networking.vnf_generic import find_relative_file
-from yardstick.common.utils import SocketTopology, ip_to_hex, join_non_strings
+from yardstick.common.utils import SocketTopology, ip_to_hex, join_non_strings, try_int
from yardstick.network_services.vnf_generic.vnf.iniparser import ConfigParser
from yardstick.network_services.vnf_generic.vnf.sample_vnf import ClientResourceHelper
from yardstick.network_services.vnf_generic.vnf.sample_vnf import DpdkVnfSetupEnvHelper
+
PROX_PORT = 8474
+SECTION_NAME = 0
+SECTION_CONTENTS = 1
+
LOG = logging.getLogger(__name__)
+LOG.setLevel(logging.DEBUG)
TEN_GIGABIT = 1e10
BITS_PER_BYTE = 8
@@ -73,7 +82,7 @@ CONFIGURATION_OPTIONS = (
class CoreSocketTuple(namedtuple('CoreTuple', 'core_id, socket_id, hyperthread')):
- CORE_RE = re.compile(r"core\s+(\d+)(?:s(\d+))?(h)?")
+ CORE_RE = re.compile(r"core\s+(\d+)(?:s(\d+))?(h)?$")
def __new__(cls, *args):
try:
@@ -81,7 +90,7 @@ class CoreSocketTuple(namedtuple('CoreTuple', 'core_id, socket_id, hyperthread')
if matches:
args = matches.groups()
- return super(CoreSocketTuple, cls).__new__(cls, int(args[0]), int(args[1]),
+ return super(CoreSocketTuple, cls).__new__(cls, int(args[0]), try_int(args[1], 0),
'h' if args[2] else '')
except (AttributeError, TypeError, IndexError, ValueError):
@@ -144,10 +153,13 @@ class ProxTestDataTuple(namedtuple('ProxTestDataTuple', 'tolerated,tsc_hz,delta_
def success(self):
return self.drop_total <= self.can_be_lost
- def get_samples(self, pkt_size, pkt_loss=None):
+ def get_samples(self, pkt_size, pkt_loss=None, port_samples=None):
if pkt_loss is None:
pkt_loss = self.pkt_loss
+ if port_samples is None:
+ port_samples = {}
+
latency_keys = [
"LatencyMin",
"LatencyMax",
@@ -162,6 +174,8 @@ class ProxTestDataTuple(namedtuple('ProxTestDataTuple', 'tolerated,tsc_hz,delta_
"RxThroughput": self.mpps,
"PktSize": pkt_size,
}
+ if port_samples:
+ samples.update(port_samples)
samples.update((key, value) for key, value in zip(latency_keys, self.latency))
return samples
@@ -341,7 +355,6 @@ class ProxSocketHelper(object):
status = False
ret_str = ""
for status in iter(is_ready, False):
- LOG.debug("Reading from socket")
decoded_data = self._sock.recv(256).decode('utf-8')
ret_str = self._parse_socket_data(decoded_data, pkt_dump_only)
@@ -351,7 +364,10 @@ class ProxSocketHelper(object):
def put_command(self, to_send):
""" send data to the remote instance """
LOG.debug("Sending data to socket: [%s]", to_send.rstrip('\n'))
- self._sock.sendall(to_send.encode('utf-8'))
+ try:
+ self._sock.sendall(to_send.encode('utf-8'))
+ except:
+ pass
def get_packet_dump(self):
""" get the next packet dump """
@@ -478,11 +494,16 @@ class ProxSocketHelper(object):
def get_all_tot_stats(self):
self.put_command("tot stats\n")
- all_stats = TotStatsTuple(int(v) for v in self.get_data().split(","))
+ all_stats_str = self.get_data().split(",")
+ if len(all_stats_str) != 4:
+ all_stats = [0] * 4
+ return all_stats
+ all_stats = TotStatsTuple(int(v) for v in all_stats_str)
+ self.master_stats = all_stats
return all_stats
def hz(self):
- return self.get_all_tot_stats().hz
+ return self.get_all_tot_stats()[3]
# Deprecated
# TODO: remove
@@ -503,11 +524,11 @@ class ProxSocketHelper(object):
def port_stats(self, ports):
"""get counter values from a specific port"""
- tot_result = list(repeat(0, 12))
+ tot_result = [0] * 12
for port in ports:
self.put_command("port_stats {}\n".format(port))
- for index, n in enumerate(self.get_data().split(',')):
- tot_result[index] += int(n)
+ ret = [try_int(s, 0) for s in self.get_data().split(",")]
+ tot_result = [sum(x) for x in zip(tot_result, ret)]
return tot_result
@contextmanager
@@ -563,53 +584,8 @@ class ProxSocketHelper(object):
class ProxDpdkVnfSetupEnvHelper(DpdkVnfSetupEnvHelper):
-
- def __init__(self, vnfd_helper, ssh_helper, scenario_helper):
- super(ProxDpdkVnfSetupEnvHelper, self).__init__(vnfd_helper, ssh_helper, scenario_helper)
- self.dpdk_root = "/root/dpdk-17.02"
-
- def setup_vnf_environment(self):
- super(ProxDpdkVnfSetupEnvHelper, self).setup_vnf_environment()
-
- # debug dump after binding
- self.ssh_helper.execute("sudo {} -s".format(self.dpdk_nic_bind))
-
- def rebind_drivers(self, force=True):
- if force:
- force = '--force '
- else:
- force = ''
- cmd_template = "{} {}-b {} {}"
- if not self.used_drivers:
- self._find_used_drivers()
- for vpci, (_, driver) in self.used_drivers.items():
- self.ssh_helper.execute(cmd_template.format(self.dpdk_nic_bind, force, driver, vpci))
-
- def _setup_dpdk(self):
- self._setup_hugepages()
-
- self.ssh_helper.execute("pkill prox")
- self.ssh_helper.execute("sudo modprobe uio")
-
- # for baremetal
- self.ssh_helper.execute("sudo modprobe msr")
-
- # why remove?, just keep it loaded
- # self.connection.execute("sudo rmmod igb_uio")
-
- igb_uio_path = os.path.join(self.dpdk_root, "x86_64-native-linuxapp-gcc/kmod/igb_uio.ko")
- self.ssh_helper.execute("sudo insmod {}".format(igb_uio_path))
-
- # quick hack to allow non-root copy
- self.ssh_helper.execute("sudo chmod 0777 {}".format(self.ssh_helper.bin_path))
-
-
-class ProxResourceHelper(ClientResourceHelper):
-
- PROX_CORE_GEN_MODE = "gen"
- PROX_CORE_LAT_MODE = "lat"
-
- PROX_MODE = ""
+ # the actual app is lowercase
+ APP_NAME = 'prox'
LUA_PARAMETER_NAME = ""
LUA_PARAMETER_PEER = {
@@ -617,12 +593,24 @@ class ProxResourceHelper(ClientResourceHelper):
"sut": "gen",
}
- WAIT_TIME = 3
+ def __init__(self, vnfd_helper, ssh_helper, scenario_helper):
+ self.remote_path = None
+ super(ProxDpdkVnfSetupEnvHelper, self).__init__(vnfd_helper, ssh_helper, scenario_helper)
+ self.remote_prox_file_name = None
+ self.prox_config_dict = None
+ self.additional_files = {}
- @staticmethod
- def _replace_quoted_with_value(quoted, value, count=1):
- new_string = re.sub('"[^"]*"', '"{}"'.format(value), quoted, count)
- return new_string
+ def _build_pipeline_kwargs(self):
+ tool_path = self.ssh_helper.provision_tool(tool_file=self.APP_NAME)
+ self.pipeline_kwargs = {
+ 'tool_path': tool_path,
+ 'tool_dir': os.path.dirname(tool_path),
+ }
+
+ def copy_to_target(self, config_file_path, prox_file):
+ remote_path = os.path.join("/tmp", prox_file)
+ self.ssh_helper.put(config_file_path, remote_path)
+ return remote_path
@staticmethod
def _get_tx_port(section, sections):
@@ -635,14 +623,67 @@ class ProxResourceHelper(ClientResourceHelper):
return int(iface_port[0])
@staticmethod
- def line_rate_to_pps(pkt_size, n_ports):
- # FIXME Don't hardcode 10Gb/s
- return n_ports * TEN_GIGABIT / BITS_PER_BYTE / (pkt_size + 20)
+ def _replace_quoted_with_value(quoted, value, count=1):
+ new_string = re.sub('"[^"]*"', '"{}"'.format(value), quoted, count)
+ return new_string
- @staticmethod
- def find_pci(pci, bound_pci):
- # we have to substring match PCI bus address from the end
- return any(b.endswith(pci) for b in bound_pci)
+ def _insert_additional_file(self, value):
+ file_str = value.split('"')
+ base_name = os.path.basename(file_str[1])
+ file_str[1] = self.additional_files[base_name]
+ return '"'.join(file_str)
+
+ def generate_prox_config_file(self, config_path):
+ sections = []
+ prox_config = ConfigParser(config_path, sections)
+ prox_config.parse()
+
+ # Ensure MAC is set "hardware"
+ ext_intf = self.vnfd_helper.interfaces
+ # we are using enumeration to map logical port numbers to interfaces
+ for port_num, intf in enumerate(ext_intf):
+ port_section_name = "port {}".format(port_num)
+ for section_name, section in sections:
+ if port_section_name != section_name:
+ continue
+
+ for index, section_data in enumerate(section):
+ if section_data[0] == "mac":
+ section_data[1] = "hardware"
+
+ # search for dst mac
+ for _, section in sections:
+ # for index, (item_key, item_val) in enumerate(section):
+ for index, section_data in enumerate(section):
+ item_key, item_val = section_data
+ if item_val.startswith("@@dst_mac"):
+ tx_port_iter = re.finditer(r'\d+', item_val)
+ tx_port_no = int(next(tx_port_iter).group(0))
+ mac = ext_intf[tx_port_no]["virtual-interface"]["dst_mac"]
+ section_data[1] = mac.replace(":", " ", 6)
+
+ if item_key == "dst mac" and item_val.startswith("@@"):
+ tx_port_iter = re.finditer(r'\d+', item_val)
+ tx_port_no = int(next(tx_port_iter).group(0))
+ mac = ext_intf[tx_port_no]["virtual-interface"]["dst_mac"]
+ section_data[1] = mac
+
+ # if addition file specified in prox config
+ if not self.additional_files:
+ return sections
+
+ for section_name, section in sections:
+ for index, section_data in enumerate(section):
+ try:
+ if section_data[0].startswith("dofile"):
+ section_data[0] = self._insert_additional_file(section_data[0])
+
+ if section_data[1].startswith("dofile"):
+ section_data[1] = self._insert_additional_file(section_data[1])
+ except:
+ pass
+
+ return sections
@staticmethod
def write_prox_config(prox_config):
@@ -652,16 +693,122 @@ class ProxResourceHelper(ClientResourceHelper):
a custom method
"""
out = []
- for section_name, section_value in prox_config.items():
+ for i, (section_name, section) in enumerate(prox_config):
out.append("[{}]".format(section_name))
- for key, value in section_value:
+ for index, item in enumerate(section):
+ key, value = item
if key == "__name__":
continue
- if value is not None:
+ if value is not None and value != '@':
key = "=".join((key, str(value).replace('\n', '\n\t')))
- out.append(key)
+ out.append(key)
+ else:
+ key = str(key).replace('\n', '\n\t')
+ out.append(key)
return os.linesep.join(out)
+ def put_string_to_file(self, s, remote_path):
+ file_obj = cStringIO(s)
+ self.ssh_helper.put_file_obj(file_obj, remote_path)
+ return remote_path
+
+ def generate_prox_lua_file(self):
+ p = OrderedDict()
+ ext_intf = self.vnfd_helper.interfaces
+ lua_param = self.LUA_PARAMETER_NAME
+ for intf in ext_intf:
+ peer = self.LUA_PARAMETER_PEER[lua_param]
+ port_num = intf["virtual-interface"]["dpdk_port_num"]
+ local_ip = intf["local_ip"]
+ dst_ip = intf["dst_ip"]
+ local_ip_hex = ip_to_hex(local_ip, separator=' ')
+ dst_ip_hex = ip_to_hex(dst_ip, separator=' ')
+ p.update([
+ ("{}_hex_ip_port_{}".format(lua_param, port_num), local_ip_hex),
+ ("{}_ip_port_{}".format(lua_param, port_num), local_ip),
+ ("{}_hex_ip_port_{}".format(peer, port_num), dst_ip_hex),
+ ("{}_ip_port_{}".format(peer, port_num), dst_ip),
+ ])
+ lua = os.linesep.join(('{}:"{}"'.format(k, v) for k, v in p.items()))
+ return lua
+
+ def upload_prox_lua(self, config_dir, prox_config_dict):
+ # we could have multiple lua directives
+ lau_dict = prox_config_dict.get('lua', {})
+ find_iter = (re.findall(r'\("([^"]+)"\)', k) for k in lau_dict)
+ lua_file = next((found[0] for found in find_iter if found), None)
+ if not lua_file:
+ return ""
+
+ out = self.generate_prox_lua_file()
+ remote_path = os.path.join(config_dir, lua_file)
+ return self.put_string_to_file(out, remote_path)
+
+ def upload_prox_config(self, config_file, prox_config_dict):
+ # prox can't handle spaces around ' = ' so use custom method
+ out = StringIO(self.write_prox_config(prox_config_dict))
+ out.seek(0)
+ remote_path = os.path.join("/tmp", config_file)
+ self.ssh_helper.put_file_obj(out, remote_path)
+
+ return remote_path
+
+ def build_config_file(self):
+ task_path = self.scenario_helper.task_path
+ options = self.scenario_helper.options
+ config_path = options['prox_config']
+ config_file = os.path.basename(config_path)
+ config_path = find_relative_file(config_path, task_path)
+ self.additional_files = {}
+
+ prox_files = options.get('prox_files', [])
+ if isinstance(prox_files, six.string_types):
+ prox_files = [prox_files]
+ for key_prox_file in prox_files:
+ base_prox_file = os.path.basename(key_prox_file)
+ remote_prox_file = self.copy_to_target(key_prox_file, base_prox_file)
+ self.additional_files[base_prox_file] = remote_prox_file
+
+ self.prox_config_dict = self.generate_prox_config_file(config_path)
+ self.remote_path = self.upload_prox_config(config_file, self.prox_config_dict)
+
+ def build_config(self):
+
+ options = self.scenario_helper.options
+
+ prox_args = options['prox_args']
+ LOG.info("Provision and start the %s", self.APP_NAME)
+ self._build_pipeline_kwargs()
+ self.pipeline_kwargs["args"] = " ".join(
+ " ".join([k, v if v else ""]) for k, v in prox_args.items())
+ self.pipeline_kwargs["cfg_file"] = self.remote_path
+
+ cmd_template = "sudo bash -c 'cd {tool_dir}; {tool_path} -o cli {args} -f {cfg_file} '"
+ prox_cmd = cmd_template.format(**self.pipeline_kwargs)
+ return prox_cmd
+
+
+class ProxResourceHelper(ClientResourceHelper):
+
+ RESOURCE_WORD = 'prox'
+ PROX_CORE_GEN_MODE = "gen"
+ PROX_CORE_LAT_MODE = "lat"
+ PROX_CORE_MPLS_TEST = "MPLS tag/untag"
+
+ PROX_MODE = ""
+
+ WAIT_TIME = 3
+
+ @staticmethod
+ def line_rate_to_pps(pkt_size, n_ports):
+ # FIXME Don't hardcode 10Gb/s
+ return n_ports * TEN_GIGABIT / BITS_PER_BYTE / (pkt_size + 20)
+
+ @staticmethod
+ def find_pci(pci, bound_pci):
+ # we have to substring match PCI bus address from the end
+ return any(b.endswith(pci) for b in bound_pci)
+
def __init__(self, setup_helper):
super(ProxResourceHelper, self).__init__(setup_helper)
self.mgmt_interface = self.vnfd_helper.mgmt_interface
@@ -671,43 +818,50 @@ class ProxResourceHelper(ClientResourceHelper):
self.done = False
self._cpu_topology = None
self._vpci_to_if_name_map = None
- self.additional_file = False
+ self.additional_file = {}
self.remote_prox_file_name = None
- self.prox_config_dict = None
self.lower = None
self.upper = None
self._test_cores = None
self._latency_cores = None
+ self._tagged_cores = None
+ self._plain_cores = None
@property
def sut(self):
if not self.client:
- self.client = ProxSocketHelper()
+ self.client = self._connect()
return self.client
@property
def cpu_topology(self):
if not self._cpu_topology:
- stdout = self.ssh_helper.execute("cat /proc/cpuinfo")[1]
- self._cpu_topology = SocketTopology.parse_cpuinfo(stdout)
+ stdout = io.BytesIO()
+ self.ssh_helper.get_file_obj("/proc/cpuinfo", stdout)
+ self._cpu_topology = SocketTopology.parse_cpuinfo(stdout.getvalue().decode('utf-8'))
return self._cpu_topology
@property
- def vpci_to_if_name_map(self):
- if self._vpci_to_if_name_map is None:
- self._vpci_to_if_name_map = {
- interface["virtual-interface"]["vpci"]: interface["name"]
- for interface in self.vnfd_helper.interfaces
- }
- return self._vpci_to_if_name_map
-
- @property
def test_cores(self):
if not self._test_cores:
self._test_cores = self.get_cores(self.PROX_CORE_GEN_MODE)
return self._test_cores
@property
+ def mpls_cores(self):
+ if not self._tagged_cores:
+ self._tagged_cores, self._plain_cores = self.get_cores_mpls(self.PROX_CORE_GEN_MODE)
+ return self._tagged_cores, self._plain_cores
+
+ @property
+ def tagged_cores(self):
+ return self.mpls_cores[0]
+
+ @property
+ def plain_cores(self):
+ return self.mpls_cores[1]
+
+ @property
def latency_cores(self):
if not self._latency_cores:
self._latency_cores = self.get_cores(self.PROX_CORE_LAT_MODE)
@@ -736,34 +890,8 @@ class ProxResourceHelper(ClientResourceHelper):
pass
def terminate(self):
- super(ProxResourceHelper, self).terminate()
- self.ssh_helper.execute('sudo pkill prox')
- self.setup_helper.rebind_drivers()
-
- def get_process_args(self):
- task_path = self.scenario_helper.task_path
- options = self.scenario_helper.options
-
- prox_args = options['prox_args']
- prox_path = options['prox_path']
- config_path = options['prox_config']
-
- config_file = os.path.basename(config_path)
- config_path = find_relative_file(config_path, task_path)
-
- try:
- prox_file_config_path = options['prox_files']
- prox_file_file = os.path.basename(prox_file_config_path)
- prox_file_config_path = find_relative_file(prox_file_config_path, task_path)
- self.remote_prox_file_name = self.copy_to_target(prox_file_config_path, prox_file_file)
- self.additional_file = True
- except:
- self.additional_file = False
-
- self.prox_config_dict = self.generate_prox_config_file(config_path)
-
- remote_path = self.upload_prox_config(config_file, self.prox_config_dict)
- return prox_args, prox_path, remote_path
+ # should not be called, use VNF terminate
+ raise NotImplementedError()
def up_post(self):
return self.sut # force connection
@@ -773,26 +901,20 @@ class ProxResourceHelper(ClientResourceHelper):
if func:
return func(*args, **kwargs)
- def copy_to_target(self, config_file_path, prox_file):
- remote_path = os.path.join("/tmp", prox_file)
- self.ssh_helper.put(config_file_path, remote_path)
- return remote_path
-
- def upload_prox_config(self, config_file, prox_config_dict):
- # prox can't handle spaces around ' = ' so use custom method
- out = StringIO(self.write_prox_config(prox_config_dict))
- out.seek(0)
- remote_path = os.path.join("/tmp", config_file)
- self.ssh_helper.put_file_obj(out, remote_path)
-
- return remote_path
-
@contextmanager
def traffic_context(self, pkt_size, value):
self.sut.stop_all()
self.sut.reset_stats()
- self.sut.set_pkt_size(self.test_cores, pkt_size)
- self.sut.set_speed(self.test_cores, value)
+ if self.get_test_type() == self.PROX_CORE_MPLS_TEST:
+ self.sut.set_pkt_size(self.tagged_cores, pkt_size)
+ self.sut.set_pkt_size(self.plain_cores, pkt_size - 4)
+ self.sut.set_speed(self.tagged_cores, value)
+ ratio = 1.0 * (pkt_size - 4 + 20) / (pkt_size + 20)
+ self.sut.set_speed(self.plain_cores, value * ratio)
+ else:
+ self.sut.set_pkt_size(self.test_cores, pkt_size)
+ self.sut.set_speed(self.test_cores, value)
+
self.sut.start_all()
try:
yield
@@ -800,12 +922,13 @@ class ProxResourceHelper(ClientResourceHelper):
self.sut.stop_all()
def run_test(self, pkt_size, duration, value, tolerated_loss=0.0):
+ # type: (object, object, object, object) -> object
# do this assert in init? unless we expect interface count to
# change from one run to another run...
interfaces = self.vnfd_helper.interfaces
interface_count = len(interfaces)
- assert interface_count in {2, 4}, \
- "Invalid no of ports, 2 or 4 ports only supported at this time"
+ assert interface_count in {1, 2, 4}, \
+ "Invalid number of ports: 1, 2 or 4 ports only supported at this time"
with self.traffic_context(pkt_size, value):
# Getting statistics to calculate PPS at right speed....
@@ -822,99 +945,67 @@ class ProxResourceHelper(ClientResourceHelper):
rx_total, tx_total = self.sut.port_stats(range(interface_count))[6:8]
pps = value / 100.0 * self.line_rate_to_pps(pkt_size, interface_count)
+ samples = {}
+ # we are currently using enumeration to map logical port num to interface
+ for index, iface in enumerate(interfaces):
+ port_rx_total, port_tx_total = self.sut.port_stats([index])[6:8]
+ samples[iface["name"]] = {"in_packets": port_rx_total,
+ "out_packets": port_tx_total}
+
result = ProxTestDataTuple(tolerated_loss, tsc_hz, deltas.rx, deltas.tx,
deltas.tsc, latency, rx_total, tx_total, pps)
-
result.log_data()
- return result
+ return result, samples
- def get_cores(self, mode):
- cores = []
- for section_name, section_data in self.prox_config_dict.items():
- if section_name.startswith("core"):
- for index, item in enumerate(section_data):
- if item[0] == "mode" and item[1] == mode:
- core = CoreSocketTuple(section_name).find_in_topology(self.cpu_topology)
- cores.append(core)
- return cores
+ def get_test_type(self):
+ test_type = None
+ for section_name, section in self.setup_helper.prox_config_dict:
+ if section_name != "global":
+ continue
- def upload_prox_lua(self, config_dir, prox_config_dict):
- # we could have multiple lua directives
- lau_dict = prox_config_dict.get('lua', {})
- find_iter = (re.findall('\("([^"]+)"\)', k) for k in lau_dict)
- lua_file = next((found[0] for found in find_iter if found), None)
- if not lua_file:
- return ""
+ for key, value in section:
+ if key == "name" and value == self.PROX_CORE_MPLS_TEST:
+ test_type = self.PROX_CORE_MPLS_TEST
- out = self.generate_prox_lua_file()
- remote_path = os.path.join(config_dir, lua_file)
- return self.put_string_to_file(out, remote_path)
+ return test_type
- def put_string_to_file(self, s, remote_path):
- self.ssh_helper.run("cat > '{}'".format(remote_path), stdin=s)
- return remote_path
+ def get_cores(self, mode):
+ cores = []
- def generate_prox_lua_file(self):
- p = OrderedDict()
- ext_intf = self.vnfd_helper.interfaces
- lua_param = self.LUA_PARAMETER_NAME
- for intf in ext_intf:
- peer = self.LUA_PARAMETER_PEER[lua_param]
- port_num = intf["virtual-interface"]["dpdk_port_num"]
- local_ip = intf["local_ip"]
- dst_ip = intf["dst_ip"]
- local_ip_hex = ip_to_hex(local_ip, separator=' ')
- dst_ip_hex = ip_to_hex(dst_ip, separator=' ')
- p.update([
- ("{}_hex_ip_port_{}".format(lua_param, port_num), local_ip_hex),
- ("{}_ip_port_{}".format(lua_param, port_num), local_ip),
- ("{}_hex_ip_port_{}".format(peer, port_num), dst_ip_hex),
- ("{}_ip_port_{}".format(peer, port_num), dst_ip),
- ])
- lua = os.linesep.join(('{}:"{}"'.format(k, v) for k, v in p.items()))
- return lua
+ for section_name, section in self.setup_helper.prox_config_dict:
+ if not section_name.startswith("core"):
+ continue
- def generate_prox_config_file(self, config_path):
- sections = {}
- prox_config = ConfigParser(config_path, sections)
- prox_config.parse()
+ for key, value in section:
+ if key == "mode" and value == mode:
+ core_tuple = CoreSocketTuple(section_name)
+ core = core_tuple.find_in_topology(self.cpu_topology)
+ cores.append(core)
- # Ensure MAC is set "hardware"
- ext_intf = self.vnfd_helper.interfaces
- for intf in ext_intf:
- port_num = intf["virtual-interface"]["dpdk_port_num"]
- section_name = "port {}".format(port_num)
- for index, section_data in enumerate(sections.get(section_name, [])):
- if section_data[0] == "mac":
- sections[section_name][index][1] = "hardware"
-
- # search for dest mac
- for section_name, section_data in sections.items():
- for index, section_attr in enumerate(section_data):
- if section_attr[0] != "dst mac":
- continue
+ return cores
- tx_port_no = self._get_tx_port(section_name, sections)
- if tx_port_no == -1:
- raise Exception("Failed ..destination MAC undefined")
+ def get_cores_mpls(self, mode=PROX_CORE_GEN_MODE):
+ cores_tagged = []
+ cores_plain = []
+ for section_name, section in self.setup_helper.prox_config_dict:
+ if not section_name.startswith("core"):
+ continue
- dst_mac = ext_intf[tx_port_no]["virtual-interface"]["dst_mac"]
- section_attr[1] = dst_mac
+ if all(key != "mode" or value != mode for key, value in section):
+ continue
- # if addition file specified in prox config
- if self.additional_file:
- remote_name = self.remote_prox_file_name
- for section_data in sections.values():
- for index, section_attr in enumerate(section_data):
- try:
- if section_attr[1].startswith("dofile"):
- new_string = self._replace_quoted_with_value(section_attr[1],
- remote_name)
- section_attr[1] = new_string
- except:
- pass
+ for item_key, item_value in section:
+ if item_key == "name" and item_value.startswith("tag"):
+ core_tuple = CoreSocketTuple(section_name)
+ core_tag = core_tuple.find_in_topology(self.cpu_topology)
+ cores_tagged.append(core_tag)
- return sections
+ elif item_key == "name" and item_value.startswith("udp"):
+ core_tuple = CoreSocketTuple(section_name)
+ core_udp = core_tuple.find_in_topology(self.cpu_topology)
+ cores_plain.append(core_udp)
+
+ return cores_tagged, cores_plain
def get_latency(self):
"""
diff --git a/yardstick/network_services/vnf_generic/vnf/prox_vnf.py b/yardstick/network_services/vnf_generic/vnf/prox_vnf.py
index 88911c3fc..214c9f3a0 100644
--- a/yardstick/network_services/vnf_generic/vnf/prox_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/prox_vnf.py
@@ -12,14 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import errno
import logging
-import multiprocessing
-import os
-import time
-from yardstick.network_services.vnf_generic.vnf.base import QueueFileWrapper
-from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxResourceHelper
+
from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxDpdkVnfSetupEnvHelper
+from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxResourceHelper
from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNF
LOG = logging.getLogger(__name__)
@@ -42,51 +40,21 @@ class ProxApproxVnf(SampleVNF):
super(ProxApproxVnf, self).__init__(name, vnfd, setup_env_helper_type,
resource_helper_type)
- self._result = {}
- self._terminated = multiprocessing.Value('i', 0)
- self._queue = multiprocessing.Value('i', 0)
-
- def instantiate(self, scenario_cfg, context_cfg):
- LOG.info("printing .........prox instantiate ")
-
- self.scenario_helper.scenario_cfg = scenario_cfg
-
- # this won't work we need 1GB hugepages at boot
- self.setup_helper.setup_vnf_environment()
-
- # self.connection.run("cat /proc/cpuinfo")
-
- prox_args, prox_path, remote_path = self.resource_helper.get_process_args()
-
- self.q_in = multiprocessing.Queue()
- self.q_out = multiprocessing.Queue()
- self.queue_wrapper = QueueFileWrapper(self.q_in, self.q_out, "PROX started")
- self._vnf_process = multiprocessing.Process(target=self._run_prox,
- args=(remote_path, prox_path, prox_args))
- self._vnf_process.start()
def _vnf_up_post(self):
self.resource_helper.up_post()
- def _run_prox(self, file_wrapper, config_path, prox_path, prox_args):
- # This runs in a different process and should not share an SSH connection
- # with the rest of the object
- self.ssh_helper.drop_connection()
-
- time.sleep(self.WAIT_TIME)
-
- args = " ".join(" ".join([k, v if v else ""]) for k, v in prox_args.items())
-
- cmd_template = "sudo bash -c 'cd {}; {} -o cli {} -f {} '"
- prox_cmd = cmd_template.format(os.path.dirname(prox_path), prox_path, args, config_path)
-
- LOG.debug(prox_cmd)
- self.ssh_helper.run(prox_cmd, stdin=file_wrapper, stdout=file_wrapper,
- keep_stdin_open=True, pty=False)
-
- def vnf_execute(self, cmd, wait_time=2):
+ def vnf_execute(self, cmd, *args, **kwargs):
# try to execute with socket commands
- self.resource_helper.execute(cmd)
+ # ignore socket errors, e.g. when using force_quit
+ ignore_errors = kwargs.pop("_ignore_errors", False)
+ try:
+ return self.resource_helper.execute(cmd, *args, **kwargs)
+ except OSError as e:
+ if ignore_errors and e.errno in {errno.EPIPE, errno.ESHUTDOWN}:
+ pass
+ else:
+ raise
def collect_kpi(self):
if self.resource_helper is None:
@@ -98,11 +66,11 @@ class ProxApproxVnf(SampleVNF):
}
return result
- if len(self.vnfd_helper.interfaces) not in {2, 4}:
+ if len(self.vnfd_helper.interfaces) not in {1, 2, 4}:
raise RuntimeError("Failed ..Invalid no of ports .. "
- "2 or 4 ports only supported at this time")
+ "1, 2 or 4 ports only supported at this time")
- port_stats = self.resource_helper.execute('port_stats', self.vnfd_helper.interfaces)
+ port_stats = self.vnf_execute('port_stats', range(len(self.vnfd_helper.interfaces)))
rx_total = port_stats[6]
tx_total = port_stats[7]
result = {
@@ -111,7 +79,28 @@ class ProxApproxVnf(SampleVNF):
"packets_fwd": rx_total,
"collect_stats": self.resource_helper.collect_kpi(),
}
+ LOG.debug("%s collect KPIs %s", self.APP_NAME, result)
return result
def _tear_down(self):
+ # this should be standardized for all VNFs or removed
self.setup_helper.rebind_drivers()
+
+ def terminate(self):
+ # try to quit with socket commands
+ self.vnf_execute("stop_all")
+ self.vnf_execute("quit")
+ # hopefully quit succeeds and socket closes, so ignore force_quit socket errors
+ self.vnf_execute("force_quit", _ignore_errors=True)
+ if self._vnf_process:
+ self._vnf_process.terminate()
+ self.setup_helper.kill_vnf()
+ self._tear_down()
+ self.resource_helper.stop_collect()
+
+ def instantiate(self, scenario_cfg, context_cfg):
+ # build config in parent process so we can access
+ # config from TG subprocesses
+ self.scenario_helper.scenario_cfg = scenario_cfg
+ self.setup_helper.build_config_file()
+ super(ProxApproxVnf, self).instantiate(scenario_cfg, context_cfg)
diff --git a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
index f41994814..9a7d39913 100644
--- a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
@@ -115,7 +115,9 @@ class SetupEnvHelper(object):
def setup_vnf_environment(self):
pass
- # raise NotImplementedError
+
+ def kill_vnf(self):
+ pass
def tear_down(self):
raise NotImplementedError
@@ -297,12 +299,13 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper):
def setup_vnf_environment(self):
self._setup_dpdk()
resource = self._setup_resources()
- self._kill_vnf()
- self._detect_drivers()
+ self.kill_vnf()
+ self._detect_and_bind_drivers()
return resource
- def _kill_vnf(self):
- self.ssh_helper.execute("sudo pkill %s" % self.APP_NAME)
+ def kill_vnf(self):
+ # have to use exact match
+ self.ssh_helper.execute("sudo pkill -x %s" % self.APP_NAME)
def _setup_dpdk(self):
""" setup dpdk environment needed for vnf to run """
@@ -335,7 +338,7 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper):
return ResourceProfile(self.vnfd_helper.mgmt_interface,
interfaces=self.vnfd_helper.interfaces, cores=cores)
- def _detect_drivers(self):
+ def _detect_and_bind_drivers(self):
interfaces = self.vnfd_helper.interfaces
self._find_used_drivers()
@@ -351,6 +354,15 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper):
self._bind_dpdk('igb_uio', vpci)
time.sleep(2)
+ # debug dump after binding
+ self.ssh_helper.execute("sudo {} -s".format(self.dpdk_nic_bind))
+
+ def rebind_drivers(self, force=True):
+ if not self.used_drivers:
+ self._find_used_drivers()
+ for vpci, (_, driver) in self.used_drivers.items():
+ self._bind_dpdk(driver, vpci, force)
+
def _bind_dpdk(self, driver, vpci, force=True):
if force:
force = '--force '
@@ -376,6 +388,7 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper):
return stdout
def _bind_kernel_devices(self):
+ # only used by PingSetupEnvHelper?
for intf in self.vnfd_helper.interfaces:
vi = intf["virtual-interface"]
stdout = self._detect_and_bind_dpdk(vi["vpci"], vi["driver"])
@@ -533,7 +546,8 @@ class ClientResourceHelper(ResourceHelper):
if not self._queue.empty():
kpi = self._queue.get()
self._result.update(kpi)
- LOG.debug("Collect {0} KPIs {1}".format(self.RESOURCE_WORD, self._result))
+ LOG.debug("Got KPIs from _queue for {0} {1}".format(
+ self.scenario_helper.name, self.RESOURCE_WORD))
return self._result
def _connect(self, client=None):
@@ -829,7 +843,7 @@ class SampleVNF(GenericVNF):
self.ssh_helper.drop_connection()
cmd = self._build_config()
# kill before starting
- self.ssh_helper.execute("pkill {}".format(self.APP_NAME))
+ self.setup_helper.kill_vnf()
LOG.debug(cmd)
self._build_run_kwargs()
@@ -853,7 +867,7 @@ class SampleVNF(GenericVNF):
self.vnf_execute("quit")
if self._vnf_process:
self._vnf_process.terminate()
- self.ssh_helper.execute("sudo pkill %s" % self.APP_NAME)
+ self.setup_helper.kill_vnf()
self._tear_down()
self.resource_helper.stop_collect()
@@ -949,6 +963,9 @@ class SampleVNFTrafficGen(GenericTrafficGen):
return self._tg_process.exitcode
def _traffic_runner(self, traffic_profile):
+ # always drop connections first thing in new processes
+ # so we don't get paramiko errors
+ self.ssh_helper.drop_connection()
LOG.info("Starting %s client...", self.APP_NAME)
self.resource_helper.run_traffic(traffic_profile)
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_prox.py b/yardstick/network_services/vnf_generic/vnf/tg_prox.py
index b4568bf4b..c266f2c0f 100644
--- a/yardstick/network_services/vnf_generic/vnf/tg_prox.py
+++ b/yardstick/network_services/vnf_generic/vnf/tg_prox.py
@@ -16,9 +16,8 @@ from __future__ import print_function, absolute_import
import logging
-
-from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxDpdkVnfSetupEnvHelper
-from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxResourceHelper
+from yardstick.network_services.utils import get_nsb_option
+from yardstick.network_services.vnf_generic.vnf.prox_vnf import ProxApproxVnf
from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNFTrafficGen
LOG = logging.getLogger(__name__)
@@ -26,8 +25,10 @@ LOG = logging.getLogger(__name__)
class ProxTrafficGen(SampleVNFTrafficGen):
+ APP_NAME = 'ProxTG'
PROX_MODE = "Traffic Gen"
LUA_PARAMETER_NAME = "gen"
+ WAIT_TIME = 1
@staticmethod
def _sort_vpci(vnfd):
@@ -44,26 +45,35 @@ class ProxTrafficGen(SampleVNFTrafficGen):
return sorted(ext_intf, key=key_func)
def __init__(self, name, vnfd, setup_env_helper_type=None, resource_helper_type=None):
- if setup_env_helper_type is None:
- setup_env_helper_type = ProxDpdkVnfSetupEnvHelper
-
- if resource_helper_type is None:
- resource_helper_type = ProxResourceHelper
+ # don't call superclass, use custom wrapper of ProxApproxVnf
+ self._vnf_wrapper = ProxApproxVnf(name, vnfd, setup_env_helper_type, resource_helper_type)
+ self.bin_path = get_nsb_option('bin_path', '')
+ self.name = self._vnf_wrapper.name
+ self.ssh_helper = self._vnf_wrapper.ssh_helper
+ self.setup_helper = self._vnf_wrapper.setup_helper
+ self.resource_helper = self._vnf_wrapper.resource_helper
+ self.scenario_helper = self._vnf_wrapper.scenario_helper
+
+ self.runs_traffic = True
+ self.traffic_finished = False
+ self.tg_port_pairs = None
+ self._tg_process = None
+ self._traffic_process = None
- super(ProxTrafficGen, self).__init__(name, vnfd, setup_env_helper_type,
- resource_helper_type)
- self._result = {}
- # for some reason
+ # used for generating stats
self.vpci_if_name_ascending = self._sort_vpci(vnfd)
- self._traffic_process = None
+ self.resource_helper.vpci_if_name_ascending = self._sort_vpci(vnfd)
def listen_traffic(self, traffic_profile):
pass
def terminate(self):
+ self._vnf_wrapper.terminate()
super(ProxTrafficGen, self).terminate()
- self.resource_helper.terminate()
- if self._traffic_process:
- self._traffic_process.terminate()
- self.ssh_helper.execute("pkill prox")
- self.resource_helper.rebind_drivers()
+
+ def instantiate(self, scenario_cfg, context_cfg):
+ self._vnf_wrapper.instantiate(scenario_cfg, context_cfg)
+ self._tg_process = self._vnf_wrapper._vnf_process
+
+ def wait_for_instantiate(self):
+ self._vnf_wrapper.wait_for_instantiate()