aboutsummaryrefslogtreecommitdiffstats
path: root/yardstick/network_services/vnf_generic
diff options
context:
space:
mode:
Diffstat (limited to 'yardstick/network_services/vnf_generic')
-rw-r--r--yardstick/network_services/vnf_generic/vnf/prox_helpers.py48
-rw-r--r--yardstick/network_services/vnf_generic/vnf/prox_irq.py200
-rw-r--r--yardstick/network_services/vnf_generic/vnf/sample_vnf.py4
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py163
-rw-r--r--yardstick/network_services/vnf_generic/vnf/vpe_vnf.py16
5 files changed, 417 insertions, 14 deletions
diff --git a/yardstick/network_services/vnf_generic/vnf/prox_helpers.py b/yardstick/network_services/vnf_generic/vnf/prox_helpers.py
index e9d83623b..5d980037a 100644
--- a/yardstick/network_services/vnf_generic/vnf/prox_helpers.py
+++ b/yardstick/network_services/vnf_generic/vnf/prox_helpers.py
@@ -561,6 +561,41 @@ class ProxSocketHelper(object):
tsc = int(ret[3])
return rx, tx, drop, tsc
+ def irq_core_stats(self, cores_tasks):
+ """ get IRQ stats per core"""
+
+ stat = {}
+ core = 0
+ task = 0
+ for core, task in cores_tasks:
+ self.put_command("stats task.core({}).task({}).max_irq,task.core({}).task({}).irq(0),"
+ "task.core({}).task({}).irq(1),task.core({}).task({}).irq(2),"
+ "task.core({}).task({}).irq(3),task.core({}).task({}).irq(4),"
+ "task.core({}).task({}).irq(5),task.core({}).task({}).irq(6),"
+ "task.core({}).task({}).irq(7),task.core({}).task({}).irq(8),"
+ "task.core({}).task({}).irq(9),task.core({}).task({}).irq(10),"
+ "task.core({}).task({}).irq(11),task.core({}).task({}).irq(12)"
+ "\n".format(core, task, core, task, core, task, core, task,
+ core, task, core, task, core, task, core, task,
+ core, task, core, task, core, task, core, task,
+ core, task, core, task))
+ in_data_str = self.get_data().split(",")
+ ret = [try_int(s, 0) for s in in_data_str]
+ key = "core_" + str(core)
+ try:
+ stat[key] = {"cpu": core, "max_irq": ret[0], "bucket_0" : ret[1],
+ "bucket_1" : ret[2], "bucket_2" : ret[3],
+ "bucket_3" : ret[4], "bucket_4" : ret[5],
+ "bucket_5" : ret[6], "bucket_6" : ret[7],
+ "bucket_7" : ret[8], "bucket_8" : ret[9],
+ "bucket_9" : ret[10], "bucket_10" : ret[11],
+ "bucket_11" : ret[12], "bucket_12" : ret[13],
+ "overflow": ret[10] + ret[11] + ret[12] + ret[13]}
+ except (KeyError, IndexError):
+ LOG.error("Corrupted PACKET %s", in_data_str)
+
+ return stat
+
def multi_port_stats(self, ports):
"""get counter values from all ports at once"""
@@ -754,7 +789,6 @@ class ProxSocketHelper(object):
self.put_command("quit_force\n")
time.sleep(3)
-
_LOCAL_OBJECT = object()
@@ -2067,3 +2101,15 @@ class ProxlwAFTRProfileHelper(ProxProfileHelper):
data_helper.latency = self.get_latency()
return data_helper.result_tuple, data_helper.samples
+
+
+class ProxIrqProfileHelper(ProxProfileHelper):
+
+ __prox_profile_type__ = "IRQ Query"
+
+ def __init__(self, resource_helper):
+ super(ProxIrqProfileHelper, self).__init__(resource_helper)
+ self._cores_tuple = None
+ self._ports_tuple = None
+ self.step_delta = 5
+ self.step_time = 0.5
diff --git a/yardstick/network_services/vnf_generic/vnf/prox_irq.py b/yardstick/network_services/vnf_generic/vnf/prox_irq.py
new file mode 100644
index 000000000..dda26b0fe
--- /dev/null
+++ b/yardstick/network_services/vnf_generic/vnf/prox_irq.py
@@ -0,0 +1,200 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import errno
+import logging
+import copy
+import time
+
+from yardstick.common.process import check_if_process_failed
+from yardstick.network_services.utils import get_nsb_option
+from yardstick.network_services.vnf_generic.vnf.prox_vnf import ProxApproxVnf
+from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNFTrafficGen
+from yardstick.benchmark.contexts.base import Context
+from yardstick.network_services.vnf_generic.vnf.prox_helpers import CoreSocketTuple
+LOG = logging.getLogger(__name__)
+
+
+class ProxIrq(SampleVNFTrafficGen):
+
+ def __init__(self, name, vnfd, task_id, setup_env_helper_type=None,
+ resource_helper_type=None):
+ vnfd_cpy = copy.deepcopy(vnfd)
+ super(ProxIrq, self).__init__(name, vnfd_cpy, task_id)
+
+ self._vnf_wrapper = ProxApproxVnf(
+ name, vnfd, task_id, setup_env_helper_type, resource_helper_type)
+ self.bin_path = get_nsb_option('bin_path', '')
+ self.name = self._vnf_wrapper.name
+ self.ssh_helper = self._vnf_wrapper.ssh_helper
+ self.setup_helper = self._vnf_wrapper.setup_helper
+ self.resource_helper = self._vnf_wrapper.resource_helper
+ self.scenario_helper = self._vnf_wrapper.scenario_helper
+ self.irq_cores = None
+
+ def terminate(self):
+ self._vnf_wrapper.terminate()
+ super(ProxIrq, self).terminate()
+
+ def instantiate(self, scenario_cfg, context_cfg):
+ self._vnf_wrapper.instantiate(scenario_cfg, context_cfg)
+ self._tg_process = self._vnf_wrapper._vnf_process
+
+ def wait_for_instantiate(self):
+ self._vnf_wrapper.wait_for_instantiate()
+
+ def get_irq_cores(self):
+ cores = []
+ mode = "irq"
+
+ for section_name, section in self.setup_helper.prox_config_data:
+ if not section_name.startswith("core"):
+ continue
+ irq_mode = task_present = False
+ task_present_task = 0
+ for key, value in section:
+ if key == "mode" and value == mode:
+ irq_mode = True
+ if key == "task":
+ task_present = True
+ task_present_task = int(value)
+
+ if irq_mode:
+ if not task_present:
+ task_present_task = 0
+ core_tuple = CoreSocketTuple(section_name)
+ core = core_tuple.core_id
+ cores.append((core, task_present_task))
+
+ return cores
+
+class ProxIrqVNF(ProxIrq, SampleVNFTrafficGen):
+
+ APP_NAME = 'ProxIrqVNF'
+
+ def __init__(self, name, vnfd, task_id, setup_env_helper_type=None,
+ resource_helper_type=None):
+ ProxIrq.__init__(self, name, vnfd, task_id, setup_env_helper_type,
+ resource_helper_type)
+
+ self.start_test_time = None
+ self.end_test_time = None
+
+ def vnf_execute(self, cmd, *args, **kwargs):
+ ignore_errors = kwargs.pop("_ignore_errors", False)
+ try:
+ return self.resource_helper.execute(cmd, *args, **kwargs)
+ except OSError as e:
+ if e.errno in {errno.EPIPE, errno.ESHUTDOWN, errno.ECONNRESET}:
+ if ignore_errors:
+ LOG.debug("ignoring vnf_execute exception %s for command %s", e, cmd)
+ else:
+ raise
+ else:
+ raise
+
+ def collect_kpi(self):
+ # check if the tg processes have exited
+ physical_node = Context.get_physical_node_from_server(
+ self.scenario_helper.nodes[self.name])
+
+ result = {"physical_node": physical_node}
+ for proc in (self._tg_process, self._traffic_process):
+ check_if_process_failed(proc)
+
+ if self.resource_helper is None:
+ return result
+
+ if self.irq_cores is None:
+ self.setup_helper.build_config_file()
+ self.irq_cores = self.get_irq_cores()
+
+ data = self.vnf_execute('irq_core_stats', self.irq_cores)
+ new_data = copy.deepcopy(data)
+
+ self.end_test_time = time.time()
+ self.vnf_execute('reset_stats')
+
+ if self.start_test_time is None:
+ new_data = {}
+ else:
+ test_time = self.end_test_time - self.start_test_time
+ for index, item in data.items():
+ for counter, value in item.items():
+ if counter.startswith("bucket_")or \
+ counter.startswith("overflow"):
+ if value is 0:
+ del new_data[index][counter]
+ else:
+ new_data[index][counter] = float(value) / test_time
+
+ self.start_test_time = time.time()
+
+ result["collect_stats"] = new_data
+ LOG.debug("%s collect KPIs %s", self.APP_NAME, result)
+
+ return result
+
+class ProxIrqGen(ProxIrq, SampleVNFTrafficGen):
+
+ APP_NAME = 'ProxIrqGen'
+
+ def __init__(self, name, vnfd, task_id, setup_env_helper_type=None,
+ resource_helper_type=None):
+ ProxIrq.__init__(self, name, vnfd, task_id, setup_env_helper_type,
+ resource_helper_type)
+ self.start_test_time = None
+ self.end_test_time = None
+
+ def collect_kpi(self):
+ # check if the tg processes have exited
+ physical_node = Context.get_physical_node_from_server(
+ self.scenario_helper.nodes[self.name])
+
+ result = {"physical_node": physical_node}
+ for proc in (self._tg_process, self._traffic_process):
+ check_if_process_failed(proc)
+
+ if self.resource_helper is None:
+ return result
+
+ if self.irq_cores is None:
+ self.setup_helper.build_config_file()
+ self.irq_cores = self.get_irq_cores()
+
+ data = self.resource_helper.sut.irq_core_stats(self.irq_cores)
+ new_data = copy.deepcopy(data)
+
+ self.end_test_time = time.time()
+ self.resource_helper.sut.reset_stats()
+
+ if self.start_test_time is None:
+ new_data = {}
+ else:
+ test_time = self.end_test_time - self.start_test_time
+ for index, item in data.items():
+ for counter, value in item.items():
+ if counter.startswith("bucket_") or \
+ counter.startswith("overflow"):
+ if value is 0:
+ del new_data[index][counter]
+ else:
+ new_data[index][counter] = float(value) / test_time
+
+ self.start_test_time = time.time()
+
+ result["collect_stats"] = new_data
+ LOG.debug("%s collect KPIs %s", self.APP_NAME, result)
+
+ return result
diff --git a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
index ebe3ff774..8833b88f2 100644
--- a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
@@ -180,7 +180,7 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper):
"""No actions/rules (flows) by default"""
return None
- def _build_pipeline_kwargs(self, cfg_file=None):
+ def _build_pipeline_kwargs(self, cfg_file=None, script=None):
tool_path = self.ssh_helper.provision_tool(tool_file=self.APP_NAME)
# count the number of actual ports in the list of pairs
# remove duplicate ports
@@ -201,7 +201,7 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper):
self.pipeline_kwargs = {
'cfg_file': cfg_file if cfg_file else self.CFG_CONFIG,
- 'script': self.CFG_SCRIPT,
+ 'script': script if script else self.CFG_SCRIPT,
'port_mask_hex': ports_mask_hex,
'tool_path': tool_path,
'hwlb': hwlb,
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py
index 4c13112be..1d37f8f6f 100644
--- a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py
+++ b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py
@@ -43,7 +43,8 @@ class IxiaBasicScenario(object):
def apply_config(self):
pass
- def create_traffic_model(self):
+ def create_traffic_model(self, traffic_profile=None):
+ # pylint: disable=unused-argument
vports = self.client.get_vports()
self._uplink_vports = vports[::2]
self._downlink_vports = vports[1::2]
@@ -71,6 +72,7 @@ class IxiaPppoeClientScenario(object):
self._context_cfg = context_cfg
self._ixia_cfg = ixia_cfg
self.protocols = []
+ self.device_groups = []
def apply_config(self):
vports = self.client.get_vports()
@@ -80,9 +82,15 @@ class IxiaPppoeClientScenario(object):
self._apply_access_network_config()
self._apply_core_network_config()
- def create_traffic_model(self):
- self.client.create_ipv4_traffic_model(self._access_topologies,
- self._core_topologies)
+ def create_traffic_model(self, traffic_profile):
+ endpoints_id_pairs = self._get_endpoints_src_dst_id_pairs(
+ traffic_profile.full_profile)
+ endpoints_obj_pairs = \
+ self._get_endpoints_src_dst_obj_pairs(endpoints_id_pairs)
+ uplink_endpoints = endpoints_obj_pairs[::2]
+ downlink_endpoints = endpoints_obj_pairs[1::2]
+ self.client.create_ipv4_traffic_model(uplink_endpoints,
+ downlink_endpoints)
def run_protocols(self):
LOG.info('PPPoE Scenario - Start Protocols')
@@ -116,6 +124,144 @@ class IxiaPppoeClientScenario(object):
strict=False)
return ip, ipaddr.prefixlen
+ @staticmethod
+ def _get_endpoints_src_dst_id_pairs(flows_params):
+ """Get list of flows src/dst port pairs
+
+ Create list of flows src/dst port pairs based on traffic profile
+ flows data. Each uplink/downlink pair in traffic profile represents
+ specific flows between the pair of ports.
+
+ Example ('port' key represents port on which flow will be created):
+
+ Input flows data:
+ uplink_0:
+ ipv4:
+ id: 1
+ port: xe0
+ downlink_0:
+ ipv4:
+ id: 2
+ port: xe1
+ uplink_1:
+ ipv4:
+ id: 3
+ port: xe2
+ downlink_1:
+ ipv4:
+ id: 4
+ port: xe3
+
+ Result list: ['xe0', 'xe1', 'xe2', 'xe3']
+
+ Result list means that the following flows pairs will be created:
+ - uplink 0: port xe0 <-> port xe1
+ - downlink 0: port xe1 <-> port xe0
+ - uplink 1: port xe2 <-> port xe3
+ - downlink 1: port xe3 <-> port xe2
+
+ :param flows_params: ordered dict of traffic profile flows params
+ :return: (list) list of flows src/dst ports
+ """
+ if len(flows_params) % 2:
+ raise RuntimeError('Number of uplink/downlink pairs'
+ ' in traffic profile is not equal')
+ endpoint_pairs = []
+ for flow in flows_params:
+ port = flows_params[flow]['ipv4'].get('port')
+ if port is None:
+ continue
+ endpoint_pairs.append(port)
+ return endpoint_pairs
+
+ def _get_endpoints_src_dst_obj_pairs(self, endpoints_id_pairs):
+ """Create list of uplink/downlink device groups pairs
+
+ Based on traffic profile options, create list of uplink/downlink
+ device groups pairs between which flow groups will be created:
+
+ 1. In case uplink/downlink flows in traffic profile doesn't have
+ specified 'port' key, flows will be created between each device
+ group on access port and device group on corresponding core port.
+ E.g.:
+ Device groups created on access port xe0: dg1, dg2, dg3
+ Device groups created on core port xe1: dg4
+ Flows will be created between:
+ dg1 -> dg4
+ dg4 -> dg1
+ dg2 -> dg4
+ dg4 -> dg2
+ dg3 -> dg4
+ dg4 -> dg3
+
+ 2. In case uplink/downlink flows in traffic profile have specified
+ 'port' key, flows will be created between device groups on this
+ port.
+ E.g., for the following traffic profile
+ uplink_0:
+ port: xe0
+ downlink_0:
+ port: xe1
+ uplink_1:
+ port: xe0
+ downlink_0:
+ port: xe3
+ Flows will be created between:
+ Port xe0 (dg1) -> Port xe1 (dg1)
+ Port xe1 (dg1) -> Port xe0 (dg1)
+ Port xe0 (dg2) -> Port xe3 (dg1)
+ Port xe3 (dg3) -> Port xe0 (dg1)
+
+ :param endpoints_id_pairs: (list) List of uplink/downlink flows ports
+ pairs
+ :return: (list) list of uplink/downlink device groups descriptors pairs
+ """
+ pppoe = self._ixia_cfg['pppoe_client']
+ sessions_per_port = pppoe['sessions_per_port']
+ sessions_per_svlan = pppoe['sessions_per_svlan']
+ svlan_count = int(sessions_per_port / sessions_per_svlan)
+
+ uplink_ports = [p['tg__0'] for p in self._ixia_cfg['flow']['src_ip']]
+ downlink_ports = [p['tg__0'] for p in self._ixia_cfg['flow']['dst_ip']]
+ uplink_port_topology_map = zip(uplink_ports, self._access_topologies)
+ downlink_port_topology_map = zip(downlink_ports, self._core_topologies)
+
+ port_to_dev_group_mapping = {}
+ for port, topology in uplink_port_topology_map:
+ topology_dgs = self.client.get_topology_device_groups(topology)
+ port_to_dev_group_mapping[port] = topology_dgs
+ for port, topology in downlink_port_topology_map:
+ topology_dgs = self.client.get_topology_device_groups(topology)
+ port_to_dev_group_mapping[port] = topology_dgs
+
+ uplink_endpoints = endpoints_id_pairs[::2]
+ downlink_endpoints = endpoints_id_pairs[1::2]
+
+ uplink_dev_groups = []
+ group_up = [uplink_endpoints[i:i + svlan_count]
+ for i in range(0, len(uplink_endpoints), svlan_count)]
+
+ for group in group_up:
+ for i, port in enumerate(group):
+ uplink_dev_groups.append(port_to_dev_group_mapping[port][i])
+
+ downlink_dev_groups = []
+ for port in downlink_endpoints:
+ downlink_dev_groups.append(port_to_dev_group_mapping[port][0])
+
+ endpoint_obj_pairs = []
+ [endpoint_obj_pairs.extend([up, down])
+ for up, down in zip(uplink_dev_groups, downlink_dev_groups)]
+
+ if not endpoint_obj_pairs:
+ for up, down in zip(uplink_ports, downlink_ports):
+ uplink_dev_groups = port_to_dev_group_mapping[up]
+ downlink_dev_groups = \
+ port_to_dev_group_mapping[down] * len(uplink_dev_groups)
+ [endpoint_obj_pairs.extend(list(i))
+ for i in zip(uplink_dev_groups, downlink_dev_groups)]
+ return endpoint_obj_pairs
+
def _fill_ixia_config(self):
pppoe = self._ixia_cfg["pppoe_client"]
ipv4 = self._ixia_cfg["ipv4_client"]
@@ -151,6 +297,7 @@ class IxiaPppoeClientScenario(object):
c_vlan = ixnet_api.Vlan(vlan_id=pppoe['c_vlan'], vlan_id_step=1)
name = 'SVLAN {}'.format(s_vlan_id)
dg = self.client.add_device_group(tp, name, sessions_per_svlan)
+ self.device_groups.append(dg)
# add ethernet layer to device group
ethernet = self.client.add_ethernet(dg, 'Ethernet')
self.protocols.append(ethernet)
@@ -181,6 +328,7 @@ class IxiaPppoeClientScenario(object):
for dg_id in range(vlan_count):
name = 'Core port {}'.format(core_tp_id)
dg = self.client.add_device_group(tp, name, sessions_per_vlan)
+ self.device_groups.append(dg)
# add ethernet layer to device group
ethernet = self.client.add_ethernet(dg, 'Ethernet')
self.protocols.append(ethernet)
@@ -295,12 +443,12 @@ class IxiaResourceHelper(ClientResourceHelper):
raise RuntimeError(
"IXIA config type '{}' not supported".format(ixia_config))
- def _initialize_client(self):
+ def _initialize_client(self, traffic_profile):
"""Initialize the IXIA IxNetwork client and configure the server"""
self.client.clear_config()
self.client.assign_ports()
self._ix_scenario.apply_config()
- self._ix_scenario.create_traffic_model()
+ self._ix_scenario.create_traffic_model(traffic_profile)
def run_traffic(self, traffic_profile, *args):
if self._terminated.value:
@@ -312,7 +460,8 @@ class IxiaResourceHelper(ClientResourceHelper):
default = "00:00:00:00:00:00"
self._build_ports()
- self._initialize_client()
+ traffic_profile.update_traffic_profile(self)
+ self._initialize_client(traffic_profile)
mac = {}
for port_name in self.vnfd_helper.port_pairs.all_ports:
diff --git a/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py b/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py
index 349ef7888..dd3221386 100644
--- a/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py
@@ -106,6 +106,7 @@ class VpeApproxSetupEnvHelper(DpdkVnfSetupEnvHelper):
action_bulk_file = vnf_cfg.get('action_bulk_file', '/tmp/action_bulk_512.txt')
full_tm_profile_file = vnf_cfg.get('full_tm_profile_file', '/tmp/full_tm_profile_10G.cfg')
config_file = vnf_cfg.get('file', '/tmp/vpe_config')
+ script_file = vnf_cfg.get('script_file', None)
vpe_vars = {
"bin_path": self.ssh_helper.bin_path,
"socket": self.socket,
@@ -113,8 +114,16 @@ class VpeApproxSetupEnvHelper(DpdkVnfSetupEnvHelper):
self._build_vnf_ports()
vpe_conf = ConfigCreate(self.vnfd_helper, self.socket)
+ if script_file is None:
+ # autogenerate vpe_script if not given
+ vpe_script = vpe_conf.generate_vpe_script(self.vnfd_helper.interfaces)
+ script_file = self.CFG_SCRIPT
+ else:
+ with utils.open_relative_file(script_file, task_path) as handle:
+ vpe_script = handle.read()
+
config_basename = posixpath.basename(config_file)
- script_basename = posixpath.basename(self.CFG_SCRIPT)
+ script_basename = posixpath.basename(script_file)
with utils.open_relative_file(action_bulk_file, task_path) as handle:
action_bulk = handle.read()
@@ -125,8 +134,6 @@ class VpeApproxSetupEnvHelper(DpdkVnfSetupEnvHelper):
with utils.open_relative_file(config_file, task_path) as handle:
vpe_config = handle.read()
- # vpe_script needs to be autogenerated
- vpe_script = vpe_conf.generate_vpe_script(self.vnfd_helper.interfaces)
# upload the 4 config files to the target server
self.ssh_helper.upload_config_file(config_basename, vpe_config.format(**vpe_vars))
self.ssh_helper.upload_config_file(script_basename, vpe_script.format(**vpe_vars))
@@ -138,7 +145,8 @@ class VpeApproxSetupEnvHelper(DpdkVnfSetupEnvHelper):
LOG.info("Provision and start the %s", self.APP_NAME)
LOG.info(config_file)
LOG.info(self.CFG_SCRIPT)
- self._build_pipeline_kwargs(cfg_file='/tmp/' + config_basename)
+ self._build_pipeline_kwargs(cfg_file='/tmp/' + config_basename,
+ script='/tmp/' + script_basename)
return self.PIPELINE_COMMAND.format(**self.pipeline_kwargs)