From ce9b1175dd9e382c623ee91755b1943eaf0e6cdb Mon Sep 17 00:00:00 2001 From: Abhijit Sinha Date: Tue, 12 Sep 2017 19:08:41 +0100 Subject: Addition of Prox NSB BNG and BNG-QoS test JIRA: YARDSTICK-802 Added Prox BNG and BNG-QoS Test - The tests supports BM, Openstack Heat - Supports 4 ports - Test added for BNG traffic profile - Fixed the Prox heat test cases with proper upstream and downstream links - Grafana Dashboard for BNG & BNG-QoS added - Increased the test Duration to 300 TODO: - Test does not Terminate correctly Update: Added new helper class for run_test: Genric, MPLS and BNG tests. Change-Id: Ib40811bedb45a3c3030643943f32679a4044e076 Signed-off-by: Abhijit Sinha Signed-off-by: Edward MacGillivray Signed-off-by: Ross Brattain Signed-off-by: Daniel Martin Buckley --- .../vnf_generic/vnf/prox_helpers.py | 632 +++++++++++++++------ 1 file changed, 467 insertions(+), 165 deletions(-) (limited to 'yardstick/network_services/vnf_generic/vnf/prox_helpers.py') diff --git a/yardstick/network_services/vnf_generic/vnf/prox_helpers.py b/yardstick/network_services/vnf_generic/vnf/prox_helpers.py index d24710132..02ae06170 100644 --- a/yardstick/network_services/vnf_generic/vnf/prox_helpers.py +++ b/yardstick/network_services/vnf_generic/vnf/prox_helpers.py @@ -26,19 +26,19 @@ from collections import OrderedDict, namedtuple import time from contextlib import contextmanager from itertools import repeat, chain +from multiprocessing import Queue import six -from multiprocessing import Queue from six.moves import zip, StringIO from six.moves import cStringIO from yardstick.benchmark.scenarios.networking.vnf_generic import find_relative_file +from yardstick.common import utils from yardstick.common.utils import SocketTopology, ip_to_hex, join_non_strings, try_int from yardstick.network_services.vnf_generic.vnf.iniparser import ConfigParser from yardstick.network_services.vnf_generic.vnf.sample_vnf import ClientResourceHelper from yardstick.network_services.vnf_generic.vnf.sample_vnf import DpdkVnfSetupEnvHelper - PROX_PORT = 8474 SECTION_NAME = 0 @@ -82,7 +82,6 @@ CONFIGURATION_OPTIONS = ( class CoreSocketTuple(namedtuple('CoreTuple', 'core_id, socket_id, hyperthread')): - CORE_RE = re.compile(r"core\s+(\d+)(?:s(\d+))?(h)?$") def __new__(cls, *args): @@ -115,7 +114,6 @@ class CoreSocketTuple(namedtuple('CoreTuple', 'core_id, socket_id, hyperthread') class TotStatsTuple(namedtuple('TotStats', 'rx,tx,tsc,hz')): - def __new__(cls, *args): try: assert args[0] is not str(args[0]) @@ -129,7 +127,6 @@ class TotStatsTuple(namedtuple('TotStats', 'rx,tx,tsc,hz')): class ProxTestDataTuple(namedtuple('ProxTestDataTuple', 'tolerated,tsc_hz,delta_rx,' 'delta_tx,delta_tsc,' 'latency,rx_total,tx_total,pps')): - @property def pkt_loss(self): try: @@ -191,7 +188,6 @@ class ProxTestDataTuple(namedtuple('ProxTestDataTuple', 'tolerated,tsc_hz,delta_ class PacketDump(object): - @staticmethod def assert_func(func, value1, value2, template=None): assert func(value1, value2), template.format(value1, value2) @@ -268,6 +264,7 @@ class ProxSocketHelper(object): self._sock = sock self._pkt_dumps = [] + self.master_stats = None def connect(self, ip, port): """Connect to the prox instance on the remote system""" @@ -323,6 +320,7 @@ class ProxSocketHelper(object): def get_data(self, pkt_dump_only=False, timeout=1): """ read data from the socket """ + # This method behaves slightly differently depending on whether it is # called to read the response to a command (pkt_dump_only = 0) or if # it is called specifically to read a packet dump (pkt_dump_only = 1). @@ -434,10 +432,15 @@ class ProxSocketHelper(object): LOG.debug("Set value for core(s) %s", cores) self._run_template_over_cores("reset values {} 0\n", cores) - def set_speed(self, cores, speed): + def set_speed(self, cores, speed, tasks=None): """ set speed on the remote instance """ - LOG.debug("Set speed for core(s) %s to %g", cores, speed) - self._run_template_over_cores("speed {} 0 {}\n", cores, speed) + if tasks is None: + tasks = [0] * len(cores) + elif len(tasks) != len(cores): + LOG.error("set_speed: cores and tasks must have the same len") + LOG.debug("Set speed for core(s)/tasks(s) %s to %g", list(zip(cores, tasks)), speed) + for (core, task) in list(zip(cores, tasks)): + self.put_command("speed {} {} {}\n".format(core, task, speed)) def slope_speed(self, cores_speed, duration, n_steps=0): """will start to increase speed from 0 to N where N is taken from @@ -564,7 +567,7 @@ class ProxSocketHelper(object): """Activate dump on rx on the specified core""" LOG.debug("Activating dump on RX for core %d, task %d, count %d", core_id, task_id, count) self.put_command("dump_rx {} {} {}\n".format(core_id, task_id, count)) - time.sleep(1.5) # Give PROX time to set up packet dumping + time.sleep(1.5) # Give PROX time to set up packet dumping def quit(self): self.stop_all() @@ -584,6 +587,9 @@ class ProxSocketHelper(object): time.sleep(3) +_LOCAL_OBJECT = object() + + class ProxDpdkVnfSetupEnvHelper(DpdkVnfSetupEnvHelper): # the actual app is lowercase APP_NAME = 'prox' @@ -594,6 +600,8 @@ class ProxDpdkVnfSetupEnvHelper(DpdkVnfSetupEnvHelper): "sut": "gen", } + CONFIG_QUEUE_TIMEOUT = 120 + def __init__(self, vnfd_helper, ssh_helper, scenario_helper): self.remote_path = None super(ProxDpdkVnfSetupEnvHelper, self).__init__(vnfd_helper, ssh_helper, scenario_helper) @@ -601,6 +609,34 @@ class ProxDpdkVnfSetupEnvHelper(DpdkVnfSetupEnvHelper): self._prox_config_data = None self.additional_files = {} self.config_queue = Queue() + self._global_section = None + + @property + def prox_config_data(self): + if self._prox_config_data is None: + # this will block, but it needs too + self._prox_config_data = self.config_queue.get(True, self.CONFIG_QUEUE_TIMEOUT) + return self._prox_config_data + + @property + def global_section(self): + if self._global_section is None and self.prox_config_data: + self._global_section = self.find_section("global") + return self._global_section + + def find_section(self, name, default=_LOCAL_OBJECT): + result = next((value for key, value in self.prox_config_data if key == name), default) + if result is _LOCAL_OBJECT: + raise KeyError('{} not found in Prox config'.format(name)) + return result + + def find_in_section(self, section_name, section_key, default=_LOCAL_OBJECT): + section = self.find_section(section_name, []) + result = next((value for key, value in section if key == section_key), default) + if result is _LOCAL_OBJECT: + template = '{} not found in {} section of Prox config' + raise KeyError(template.format(section_key, section_name)) + return result def _build_pipeline_kwargs(self): tool_path = self.ssh_helper.provision_tool(tool_file=self.APP_NAME) @@ -739,9 +775,9 @@ class ProxDpdkVnfSetupEnvHelper(DpdkVnfSetupEnvHelper): lua = os.linesep.join(('{}:"{}"'.format(k, v) for k, v in p.items())) return lua - def upload_prox_lua(self, config_dir, prox_config_dict): + def upload_prox_lua(self, config_dir, prox_config_data): # we could have multiple lua directives - lau_dict = prox_config_dict.get('lua', {}) + lau_dict = prox_config_data.get('lua', {}) find_iter = (re.findall(r'\("([^"]+)"\)', k) for k in lau_dict) lua_file = next((found[0] for found in find_iter if found), None) if not lua_file: @@ -751,24 +787,15 @@ class ProxDpdkVnfSetupEnvHelper(DpdkVnfSetupEnvHelper): remote_path = os.path.join(config_dir, lua_file) return self.put_string_to_file(out, remote_path) - def upload_prox_config(self, config_file, prox_config_dict): + def upload_prox_config(self, config_file, prox_config_data): # prox can't handle spaces around ' = ' so use custom method - out = StringIO(self.write_prox_config(prox_config_dict)) + out = StringIO(self.write_prox_config(prox_config_data)) out.seek(0) remote_path = os.path.join("/tmp", config_file) self.ssh_helper.put_file_obj(out, remote_path) return remote_path - CONFIG_QUEUE_TIMEOUT = 120 - - @property - def prox_config_data(self): - if self._prox_config_data is None: - # this will block, but it needs too - self._prox_config_data = self.config_queue.get(True, self.CONFIG_QUEUE_TIMEOUT) - return self._prox_config_data - def build_config_file(self): task_path = self.scenario_helper.task_path options = self.scenario_helper.options @@ -812,19 +839,11 @@ class ProxDpdkVnfSetupEnvHelper(DpdkVnfSetupEnvHelper): class ProxResourceHelper(ClientResourceHelper): RESOURCE_WORD = 'prox' - PROX_CORE_GEN_MODE = "gen" - PROX_CORE_LAT_MODE = "lat" - PROX_CORE_MPLS_TEST = "MPLS tag/untag" PROX_MODE = "" WAIT_TIME = 3 - @staticmethod - def line_rate_to_pps(pkt_size, n_ports): - # FIXME Don't hardcode 10Gb/s - return n_ports * TEN_GIGABIT / BITS_PER_BYTE / (pkt_size + 20) - @staticmethod def find_pci(pci, bound_pci): # we have to substring match PCI bus address from the end @@ -837,16 +856,14 @@ class ProxResourceHelper(ClientResourceHelper): self._ip = self.mgmt_interface["ip"] self.done = False - self._cpu_topology = None self._vpci_to_if_name_map = None self.additional_file = {} self.remote_prox_file_name = None self.lower = None self.upper = None - self._test_cores = None - self._latency_cores = None - self._tagged_cores = None - self._plain_cores = None + self.step_delta = 1 + self.step_time = 0.5 + self._test_type = None @property def sut(self): @@ -855,38 +872,10 @@ class ProxResourceHelper(ClientResourceHelper): return self.client @property - def cpu_topology(self): - if not self._cpu_topology: - stdout = io.BytesIO() - self.ssh_helper.get_file_obj("/proc/cpuinfo", stdout) - self._cpu_topology = SocketTopology.parse_cpuinfo(stdout.getvalue().decode('utf-8')) - return self._cpu_topology - - @property - def test_cores(self): - if not self._test_cores: - self._test_cores = self.get_cores(self.PROX_CORE_GEN_MODE) - return self._test_cores - - @property - def mpls_cores(self): - if not self._tagged_cores: - self._tagged_cores, self._plain_cores = self.get_cores_mpls(self.PROX_CORE_GEN_MODE) - return self._tagged_cores, self._plain_cores - - @property - def tagged_cores(self): - return self.mpls_cores[0] - - @property - def plain_cores(self): - return self.mpls_cores[1] - - @property - def latency_cores(self): - if not self._latency_cores: - self._latency_cores = self.get_cores(self.PROX_CORE_LAT_MODE) - return self._latency_cores + def test_type(self): + if self._test_type is None: + self._test_type = self.setup_helper.find_in_section('global', 'name', None) + return self._test_type def run_traffic(self, traffic_profile): self.lower = 0.0 @@ -931,75 +920,192 @@ class ProxResourceHelper(ClientResourceHelper): if func: return func(*args, **kwargs) - @contextmanager - def traffic_context(self, pkt_size, value): - self.sut.stop_all() - self.sut.reset_stats() - if self.get_test_type() == self.PROX_CORE_MPLS_TEST: - self.sut.set_pkt_size(self.tagged_cores, pkt_size) - self.sut.set_pkt_size(self.plain_cores, pkt_size - 4) - self.sut.set_speed(self.tagged_cores, value) - ratio = 1.0 * (pkt_size - 4 + 20) / (pkt_size + 20) - self.sut.set_speed(self.plain_cores, value * ratio) - else: - self.sut.set_pkt_size(self.test_cores, pkt_size) - self.sut.set_speed(self.test_cores, value) + def _connect(self, client=None): + """Run and connect to prox on the remote system """ + # De-allocating a large amount of hugepages takes some time. If a new + # PROX instance is started immediately after killing the previous one, + # it might not be able to allocate hugepages, because they are still + # being freed. Hence the -w switch. + # self.connection.execute("sudo killall -w Prox 2>/dev/null") + # prox_cmd = "export TERM=xterm; cd "+ self.bin_path +"; ./Prox -t + # -f ./handle_none-4.cfg" + # prox_cmd = "export TERM=xterm; export RTE_SDK=" + self._dpdk_dir + + # "; " \ + # + "export RTE_TARGET=" + self._dpdk_target + ";" \ + # + " cd " + self._prox_dir + "; make HW_DIRECT_STATS=y -j50; + # sudo " \ + # + "./build/Prox " + prox_args + # log.debug("Starting PROX with command [%s]", prox_cmd) + # thread.start_new_thread(self.ssh_check_quit, (self, self._user, + # self._ip, prox_cmd)) + if client is None: + client = ProxSocketHelper() - self.sut.start_all() - try: - yield - finally: - self.sut.stop_all() + # try connecting to Prox for 60s + for _ in range(RETRY_SECONDS): + time.sleep(RETRY_INTERVAL) + try: + client.connect(self._ip, PROX_PORT) + except (socket.gaierror, socket.error): + continue + else: + return client - def run_test(self, pkt_size, duration, value, tolerated_loss=0.0): - # do this assert in init? unless we expect interface count to - # change from one run to another run... - ports = self.vnfd_helper.port_pairs.all_ports - port_count = len(ports) - assert port_count in {1, 2, 4}, \ - "Invalid number of ports: 1, 2 or 4 ports only supported at this time" + msg = "Failed to connect to prox, please check if system {} accepts connections on port {}" + raise Exception(msg.format(self._ip, PROX_PORT)) - with self.traffic_context(pkt_size, value): - # Getting statistics to calculate PPS at right speed.... - tsc_hz = float(self.sut.hz()) - time.sleep(2) - with self.sut.measure_tot_stats() as data: - time.sleep(duration) - # Get stats before stopping the cores. Stopping cores takes some time - # and might skew results otherwise. - latency = self.get_latency() +class ProxDataHelper(object): + + def __init__(self, vnfd_helper, sut, pkt_size, value, tolerated_loss): + super(ProxDataHelper, self).__init__() + self.vnfd_helper = vnfd_helper + self.sut = sut + self.pkt_size = pkt_size + self.value = value + self.tolerated_loss = tolerated_loss + self.port_count = len(self.vnfd_helper.port_pairs.all_ports) + self.tsc_hz = None + self.measured_stats = None + self.latency = None + self._totals_and_pps = None + self.result_tuple = None + + @property + def totals_and_pps(self): + if self._totals_and_pps is None: + rx_total, tx_total = self.sut.port_stats(range(self.port_count))[6:8] + pps = self.value / 100.0 * self.line_rate_to_pps() + self._totals_and_pps = rx_total, tx_total, pps + return self._totals_and_pps + + @property + def rx_total(self): + return self.totals_and_pps[0] + + @property + def tx_total(self): + return self.totals_and_pps[1] - deltas = data['delta'] - rx_total, tx_total = self.sut.port_stats(range(port_count))[6:8] - pps = value / 100.0 * self.line_rate_to_pps(pkt_size, port_count) + @property + def pps(self): + return self.totals_and_pps[2] + @property + def samples(self): samples = {} - # we are currently using enumeration to map logical port num to interface - for port_name in ports: - port = self.vnfd_helper.port_num(port_name) - port_rx_total, port_tx_total = self.sut.port_stats([port])[6:8] + for port_name, port_num in self.vnfd_helper.ports_iter(): + port_rx_total, port_tx_total = self.sut.port_stats([port_num])[6:8] samples[port_name] = { "in_packets": port_rx_total, "out_packets": port_tx_total, } + return samples - result = ProxTestDataTuple(tolerated_loss, tsc_hz, deltas.rx, deltas.tx, - deltas.tsc, latency, rx_total, tx_total, pps) - result.log_data() - return result, samples + def __enter__(self): + self.check_interface_count() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.make_tuple() + + def make_tuple(self): + if self.result_tuple: + return + + self.result_tuple = ProxTestDataTuple( + self.tolerated_loss, + self.tsc_hz, + self.measured_stats['delta'].rx, + self.measured_stats['delta'].tx, + self.measured_stats['delta'].tsc, + self.latency, + self.rx_total, + self.tx_total, + self.pps, + ) + self.result_tuple.log_data() - def get_test_type(self): - test_type = None - for section_name, section in self.setup_helper.prox_config_data: - if section_name != "global": - continue + @contextmanager + def measure_tot_stats(self): + with self.sut.measure_tot_stats() as self.measured_stats: + yield - for key, value in section: - if key == "name" and value == self.PROX_CORE_MPLS_TEST: - test_type = self.PROX_CORE_MPLS_TEST + def check_interface_count(self): + # do this assert in init? unless we expect interface count to + # change from one run to another run... + assert self.port_count in {1, 2, 4}, \ + "Invalid number of ports: 1, 2 or 4 ports only supported at this time" + + def capture_tsc_hz(self): + self.tsc_hz = float(self.sut.hz()) + + def line_rate_to_pps(self): + # FIXME Don't hardcode 10Gb/s + return self.port_count * TEN_GIGABIT / BITS_PER_BYTE / (self.pkt_size + 20) + + +class ProxProfileHelper(object): + + __prox_profile_type__ = "Generic" + + PROX_CORE_GEN_MODE = "gen" + PROX_CORE_LAT_MODE = "lat" + + @classmethod + def get_cls(cls, helper_type): + """Return class of specified type.""" + if not helper_type: + return ProxProfileHelper + + for profile_helper_class in utils.itersubclasses(cls): + if helper_type == profile_helper_class.__prox_profile_type__: + return profile_helper_class + + return ProxProfileHelper + + @classmethod + def make_profile_helper(cls, resource_helper): + return cls.get_cls(resource_helper.test_type)(resource_helper) + + def __init__(self, resource_helper): + super(ProxProfileHelper, self).__init__() + self.resource_helper = resource_helper + self._cpu_topology = None + self._test_cores = None + self._latency_cores = None + + @property + def cpu_topology(self): + if not self._cpu_topology: + stdout = io.BytesIO() + self.ssh_helper.get_file_obj("/proc/cpuinfo", stdout) + self._cpu_topology = SocketTopology.parse_cpuinfo(stdout.getvalue().decode('utf-8')) + return self._cpu_topology + + @property + def test_cores(self): + if not self._test_cores: + self._test_cores = self.get_cores(self.PROX_CORE_GEN_MODE) + return self._test_cores + + @property + def latency_cores(self): + if not self._latency_cores: + self._latency_cores = self.get_cores(self.PROX_CORE_LAT_MODE) + return self._latency_cores - return test_type + @contextmanager + def traffic_context(self, pkt_size, value): + self.sut.stop_all() + self.sut.reset_stats() + try: + self.sut.set_pkt_size(self.test_cores, pkt_size) + self.sut.set_speed(self.test_cores, value) + self.sut.start_all() + yield + finally: + self.sut.stop_all() def get_cores(self, mode): cores = [] @@ -1016,68 +1122,264 @@ class ProxResourceHelper(ClientResourceHelper): return cores - def get_cores_mpls(self, mode=PROX_CORE_GEN_MODE): + def run_test(self, pkt_size, duration, value, tolerated_loss=0.0): + data_helper = ProxDataHelper(self.vnfd_helper, self.sut, pkt_size, value, tolerated_loss) + + with data_helper, self.traffic_context(pkt_size, value): + with data_helper.measure_tot_stats(): + time.sleep(duration) + # Getting statistics to calculate PPS at right speed.... + data_helper.capture_tsc_hz() + data_helper.latency = self.get_latency() + + return data_helper.result_tuple, data_helper.samples + + def get_latency(self): + """ + :return: return lat_min, lat_max, lat_avg + :rtype: list + """ + if self._latency_cores: + return self.sut.lat_stats(self._latency_cores) + return [] + + def terminate(self): + pass + + def __getattr__(self, item): + return getattr(self.resource_helper, item) + + +class ProxMplsProfileHelper(ProxProfileHelper): + + __prox_profile_type__ = "MPLS tag/untag" + + def __init__(self, resource_helper): + super(ProxMplsProfileHelper, self).__init__(resource_helper) + self._cores_tuple = None + + @property + def mpls_cores(self): + if not self._cores_tuple: + self._cores_tuple = self.get_cores_mpls() + return self._cores_tuple + + @property + def tagged_cores(self): + return self.mpls_cores[0] + + @property + def plain_cores(self): + return self.mpls_cores[1] + + def get_cores_mpls(self): cores_tagged = [] cores_plain = [] - for section_name, section in self.setup_helper.prox_config_data: + for section_name, section in self.resource_helper.setup_helper.prox_config_data: if not section_name.startswith("core"): continue - if all(key != "mode" or value != mode for key, value in section): + if all(key != "mode" or value != self.PROX_CORE_GEN_MODE for key, value in section): continue for item_key, item_value in section: - if item_key == "name" and item_value.startswith("tag"): + if item_key != 'name': + continue + + if item_value.startswith("tag"): core_tuple = CoreSocketTuple(section_name) core_tag = core_tuple.find_in_topology(self.cpu_topology) cores_tagged.append(core_tag) - elif item_key == "name" and item_value.startswith("udp"): + elif item_value.startswith("udp"): core_tuple = CoreSocketTuple(section_name) core_udp = core_tuple.find_in_topology(self.cpu_topology) cores_plain.append(core_udp) return cores_tagged, cores_plain - def get_latency(self): - """ - :return: return lat_min, lat_max, lat_avg - :rtype: list - """ - if self._latency_cores: - return self.sut.lat_stats(self._latency_cores) - return [] + @contextmanager + def traffic_context(self, pkt_size, value): + self.sut.stop_all() + self.sut.reset_stats() + try: + self.sut.set_pkt_size(self.tagged_cores, pkt_size) + self.sut.set_pkt_size(self.plain_cores, pkt_size - 4) + self.sut.set_speed(self.tagged_cores, value) + ratio = 1.0 * (pkt_size - 4 + 20) / (pkt_size + 20) + self.sut.set_speed(self.plain_cores, value * ratio) + self.sut.start_all() + yield + finally: + self.sut.stop_all() - def _connect(self, client=None): - """Run and connect to prox on the remote system """ - # De-allocating a large amount of hugepages takes some time. If a new - # PROX instance is started immediately after killing the previous one, - # it might not be able to allocate hugepages, because they are still - # being freed. Hence the -w switch. - # self.connection.execute("sudo killall -w Prox 2>/dev/null") - # prox_cmd = "export TERM=xterm; cd "+ self.bin_path +"; ./Prox -t - # -f ./handle_none-4.cfg" - # prox_cmd = "export TERM=xterm; export RTE_SDK=" + self._dpdk_dir + - # "; " \ - # + "export RTE_TARGET=" + self._dpdk_target + ";" \ - # + " cd " + self._prox_dir + "; make HW_DIRECT_STATS=y -j50; - # sudo " \ - # + "./build/Prox " + prox_args - # log.debug("Starting PROX with command [%s]", prox_cmd) - # thread.start_new_thread(self.ssh_check_quit, (self, self._user, - # self._ip, prox_cmd)) - if client is None: - client = ProxSocketHelper() - # try connecting to Prox for 60s - for _ in range(RETRY_SECONDS): - time.sleep(RETRY_INTERVAL) - try: - client.connect(self._ip, PROX_PORT) - except (socket.gaierror, socket.error): +class ProxBngProfileHelper(ProxProfileHelper): + + __prox_profile_type__ = "BNG gen" + + def __init__(self, resource_helper): + super(ProxBngProfileHelper, self).__init__(resource_helper) + self._cores_tuple = None + + @property + def bng_cores(self): + if not self._cores_tuple: + self._cores_tuple = self.get_cores_gen_bng_qos() + return self._cores_tuple + + @property + def cpe_cores(self): + return self.bng_cores[0] + + @property + def inet_cores(self): + return self.bng_cores[1] + + @property + def arp_cores(self): + return self.bng_cores[2] + + @property + def arp_task_cores(self): + return self.bng_cores[3] + + @property + def all_rx_cores(self): + return self.latency_cores + + def get_cores_gen_bng_qos(self): + cpe_cores = [] + inet_cores = [] + arp_cores = [] + arp_tasks_core = [0] + for section_name, section in self.resource_helper.setup_helper.prox_config_data: + if not section_name.startswith("core"): continue - else: - return client - msg = "Failed to connect to prox, please check if system {} accepts connections on port {}" - raise Exception(msg.format(self._ip, PROX_PORT)) + if all(key != "mode" or value != self.PROX_CORE_GEN_MODE for key, value in section): + continue + + for item_key, item_value in section: + if item_key == "name" and item_value.startswith("cpe"): + core_tuple = CoreSocketTuple(section_name) + core_tag = core_tuple.find_in_topology(self.cpu_topology) + cpe_cores.append(core_tag) + + elif item_key == "name" and item_value.startswith("inet"): + core_tuple = CoreSocketTuple(section_name) + inet_core = core_tuple.find_in_topology(self.cpu_topology) + inet_cores.append(inet_core) + + elif item_key == "name" and item_value.startswith("arp"): + core_tuple = CoreSocketTuple(section_name) + arp_core = core_tuple.find_in_topology(self.cpu_topology) + arp_cores.append(arp_core) + + # We check the tasks/core separately + if item_key == "name" and item_value.startswith("arp_task"): + core_tuple = CoreSocketTuple(section_name) + arp_task_core = core_tuple.find_in_topology(self.cpu_topology) + arp_tasks_core.append(arp_task_core) + + return cpe_cores, inet_cores, arp_cores, arp_tasks_core + + @contextmanager + def traffic_context(self, pkt_size, value): + # Tester is sending packets at the required speed already after + # setup_test(). Just get the current statistics, sleep the required + # amount of time and calculate packet loss. + inet_pkt_size = pkt_size + cpe_pkt_size = pkt_size - 24 + ratio = 1.0 * (cpe_pkt_size + 20) / (inet_pkt_size + 20) + + curr_up_speed = curr_down_speed = 0 + max_up_speed = max_down_speed = value + if ratio < 1: + max_down_speed = value * ratio + else: + max_up_speed = value / ratio + + # Initialize cores + self.sut.stop_all() + time.sleep(0.5) + + # Flush any packets in the NIC RX buffers, otherwise the stats will be + # wrong. + self.sut.start(self.all_rx_cores) + time.sleep(0.5) + self.sut.stop(self.all_rx_cores) + time.sleep(0.5) + self.sut.reset_stats() + + self.sut.set_pkt_size(self.inet_cores, inet_pkt_size) + self.sut.set_pkt_size(self.cpe_cores, cpe_pkt_size) + + self.sut.reset_values(self.cpe_cores) + self.sut.reset_values(self.inet_cores) + + # Set correct IP and UDP lengths in packet headers + # CPE + # IP length (byte 24): 26 for MAC(12), EthType(2), QinQ(8), CRC(4) + self.sut.set_value(self.cpe_cores, 24, cpe_pkt_size - 26, 2) + # UDP length (byte 46): 46 for MAC(12), EthType(2), QinQ(8), IP(20), CRC(4) + self.sut.set_value(self.cpe_cores, 46, cpe_pkt_size - 46, 2) + + # INET + # IP length (byte 20): 22 for MAC(12), EthType(2), MPLS(4), CRC(4) + self.sut.set_value(self.inet_cores, 20, inet_pkt_size - 22, 2) + # IP length (byte 48): 50 for MAC(12), EthType(2), MPLS(4), IP(20), GRE(8), CRC(4) + self.sut.set_value(self.inet_cores, 48, inet_pkt_size - 50, 2) + # UDP length (byte 70): 70 for MAC(12), EthType(2), MPLS(4), IP(20), GRE(8), IP(20), CRC(4) + self.sut.set_value(self.inet_cores, 70, inet_pkt_size - 70, 2) + + # Sending ARP to initialize tables - need a few seconds of generation + # to make sure all CPEs are initialized + LOG.info("Initializing SUT: sending ARP packets") + self.sut.set_speed(self.arp_cores, 1, self.arp_task_cores) + self.sut.set_speed(self.inet_cores, curr_up_speed) + self.sut.set_speed(self.cpe_cores, curr_down_speed) + self.sut.start(self.arp_cores) + time.sleep(4) + + # Ramp up the transmission speed. First go to the common speed, then + # increase steps for the faster one. + self.sut.start(self.cpe_cores + self.inet_cores + self.latency_cores) + + LOG.info("Ramping up speed to %s up, %s down", max_up_speed, max_down_speed) + + while (curr_up_speed < max_up_speed) or (curr_down_speed < max_down_speed): + # The min(..., ...) takes care of 1) floating point rounding errors + # that could make curr_*_speed to be slightly greater than + # max_*_speed and 2) max_*_speed not being an exact multiple of + # self._step_delta. + if curr_up_speed < max_up_speed: + curr_up_speed = min(curr_up_speed + self.step_delta, max_up_speed) + if curr_down_speed < max_down_speed: + curr_down_speed = min(curr_down_speed + self.step_delta, max_down_speed) + + self.sut.set_speed(self.inet_cores, curr_up_speed) + self.sut.set_speed(self.cpe_cores, curr_down_speed) + time.sleep(self.step_time) + + LOG.info("Target speeds reached. Starting real test.") + + yield + + self.sut.stop(self.arp_cores + self.cpe_cores + self.inet_cores) + LOG.info("Test ended. Flushing NIC buffers") + self.sut.start(self.all_rx_cores) + time.sleep(3) + self.sut.stop(self.all_rx_cores) + + def run_test(self, pkt_size, duration, value, tolerated_loss=0.0): + data_helper = ProxDataHelper(self.vnfd_helper, self.sut, pkt_size, value, tolerated_loss) + + with data_helper, self.traffic_context(pkt_size, value): + with data_helper.measure_tot_stats(): + time.sleep(duration) + # Getting statistics to calculate PPS at right speed.... + data_helper.capture_tsc_hz() + data_helper.latency = self.get_latency() + + return data_helper.result_tuple, data_helper.samples -- cgit 1.2.3-korg