aboutsummaryrefslogtreecommitdiffstats
path: root/yardstick/network_services
diff options
context:
space:
mode:
Diffstat (limited to 'yardstick/network_services')
-rw-r--r--yardstick/network_services/helpers/cpu.py86
-rw-r--r--yardstick/network_services/helpers/dpdkbindnic_helper.py18
-rw-r--r--yardstick/network_services/helpers/vpp_helpers/__init__.py0
-rw-r--r--yardstick/network_services/helpers/vpp_helpers/abstract_search_algorithm.py53
-rw-r--r--yardstick/network_services/helpers/vpp_helpers/multiple_loss_ratio_search.py688
-rw-r--r--yardstick/network_services/helpers/vpp_helpers/ndr_pdr_result.py68
-rw-r--r--yardstick/network_services/helpers/vpp_helpers/receive_rate_interval.py88
-rw-r--r--yardstick/network_services/helpers/vpp_helpers/receive_rate_measurement.py58
-rw-r--r--yardstick/network_services/traffic_profile/vpp_rfc2544.py34
-rw-r--r--yardstick/network_services/vnf_generic/vnf/ipsec_vnf.py301
-rwxr-xr-xyardstick/network_services/vnf_generic/vnf/tg_vcmts_pktgen.py215
-rwxr-xr-xyardstick/network_services/vnf_generic/vnf/vcmts_vnf.py273
-rw-r--r--yardstick/network_services/vnf_generic/vnf/vims_vnf.py105
-rw-r--r--yardstick/network_services/vnf_generic/vnf/vpp_helpers.py675
14 files changed, 2631 insertions, 31 deletions
diff --git a/yardstick/network_services/helpers/cpu.py b/yardstick/network_services/helpers/cpu.py
index 8cdd829a0..279af204a 100644
--- a/yardstick/network_services/helpers/cpu.py
+++ b/yardstick/network_services/helpers/cpu.py
@@ -15,11 +15,15 @@
import io
+# Number of threads per core.
+NR_OF_THREADS = 2
+
class CpuSysCores(object):
def __init__(self, connection=""):
self.core_map = {}
+ self.cpuinfo = {}
self.connection = connection
def _open_cpuinfo(self):
@@ -62,13 +66,14 @@ class CpuSysCores(object):
def get_cpu_layout(self):
_, stdout, _ = self.connection.execute("lscpu -p")
- cpuinfo = {}
- cpuinfo['cpuinfo'] = list()
+ self.cpuinfo = {}
+ self.cpuinfo['cpuinfo'] = list()
for line in stdout.split("\n"):
if line and line[0] != "#":
- cpuinfo['cpuinfo'].append([CpuSysCores._str2int(x) for x in
- line.split(",")])
- return cpuinfo
+ self.cpuinfo['cpuinfo'].append(
+ [CpuSysCores._str2int(x) for x in
+ line.split(",")])
+ return self.cpuinfo
def validate_cpu_cfg(self, vnf_cfg=None):
if vnf_cfg is None:
@@ -89,9 +94,80 @@ class CpuSysCores(object):
return 0
+ def is_smt_enabled(self):
+ return CpuSysCores.smt_enabled(self.cpuinfo)
+
+ def cpu_list_per_node(self, cpu_node, smt_used=False):
+ cpu_node = int(cpu_node)
+ cpu_info = self.cpuinfo.get("cpuinfo")
+ if cpu_info is None:
+ raise RuntimeError("Node cpuinfo not available.")
+
+ smt_enabled = self.is_smt_enabled()
+ if not smt_enabled and smt_used:
+ raise RuntimeError("SMT is not enabled.")
+
+ cpu_list = []
+ for cpu in cpu_info:
+ if cpu[3] == cpu_node:
+ cpu_list.append(cpu[0])
+
+ if not smt_enabled or smt_enabled and smt_used:
+ pass
+
+ if smt_enabled and not smt_used:
+ cpu_list_len = len(cpu_list)
+ cpu_list = cpu_list[:int(cpu_list_len / NR_OF_THREADS)]
+
+ return cpu_list
+
+ def cpu_slice_of_list_per_node(self, cpu_node, skip_cnt=0, cpu_cnt=0,
+ smt_used=False):
+ cpu_list = self.cpu_list_per_node(cpu_node, smt_used)
+
+ cpu_list_len = len(cpu_list)
+ if cpu_cnt + skip_cnt > cpu_list_len:
+ raise RuntimeError("cpu_cnt + skip_cnt > length(cpu list).")
+
+ if cpu_cnt == 0:
+ cpu_cnt = cpu_list_len - skip_cnt
+
+ if smt_used:
+ cpu_list_0 = cpu_list[:int(cpu_list_len / NR_OF_THREADS)]
+ cpu_list_1 = cpu_list[int(cpu_list_len / NR_OF_THREADS):]
+ cpu_list = [cpu for cpu in cpu_list_0[skip_cnt:skip_cnt + cpu_cnt]]
+ cpu_list_ex = [cpu for cpu in
+ cpu_list_1[skip_cnt:skip_cnt + cpu_cnt]]
+ cpu_list.extend(cpu_list_ex)
+ else:
+ cpu_list = [cpu for cpu in cpu_list[skip_cnt:skip_cnt + cpu_cnt]]
+
+ return cpu_list
+
+ def cpu_list_per_node_str(self, cpu_node, skip_cnt=0, cpu_cnt=0, sep=",",
+ smt_used=False):
+ cpu_list = self.cpu_slice_of_list_per_node(cpu_node,
+ skip_cnt=skip_cnt,
+ cpu_cnt=cpu_cnt,
+ smt_used=smt_used)
+ return sep.join(str(cpu) for cpu in cpu_list)
+
@staticmethod
def _str2int(string):
try:
return int(string)
except ValueError:
return 0
+
+ @staticmethod
+ def smt_enabled(cpuinfo):
+ cpu_info = cpuinfo.get("cpuinfo")
+ if cpu_info is None:
+ raise RuntimeError("Node cpuinfo not available.")
+ cpu_mems = [item[-4:] for item in cpu_info]
+ cpu_mems_len = int(len(cpu_mems) / NR_OF_THREADS)
+ count = 0
+ for cpu_mem in cpu_mems[:cpu_mems_len]:
+ if cpu_mem in cpu_mems[cpu_mems_len:]:
+ count += 1
+ return count == cpu_mems_len
diff --git a/yardstick/network_services/helpers/dpdkbindnic_helper.py b/yardstick/network_services/helpers/dpdkbindnic_helper.py
index 0fc63f88d..33a5e8c1d 100644
--- a/yardstick/network_services/helpers/dpdkbindnic_helper.py
+++ b/yardstick/network_services/helpers/dpdkbindnic_helper.py
@@ -13,7 +13,6 @@
# limitations under the License.
import logging
import os
-
import re
from collections import defaultdict
from itertools import chain
@@ -21,7 +20,6 @@ from itertools import chain
from yardstick.common import exceptions
from yardstick.common.utils import validate_non_string_sequence
-
NETWORK_KERNEL = 'network_kernel'
NETWORK_DPDK = 'network_dpdk'
NETWORK_OTHER = 'network_other'
@@ -288,12 +286,18 @@ printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
raise DpdkBindHelperException(template.format(self.dpdk_devbind, res[0]))
return res
- def load_dpdk_driver(self):
+ def load_dpdk_driver(self, dpdk_driver=None):
+ if dpdk_driver is None:
+ dpdk_driver = self.dpdk_driver
cmd_template = "sudo modprobe {} && sudo modprobe {}"
- self.ssh_helper.execute(cmd_template.format(self.UIO_DRIVER, self.dpdk_driver))
-
- def check_dpdk_driver(self):
- return self.ssh_helper.execute("lsmod | grep -i {}".format(self.dpdk_driver))[0]
+ self.ssh_helper.execute(
+ cmd_template.format(self.UIO_DRIVER, dpdk_driver))
+
+ def check_dpdk_driver(self, dpdk_driver=None):
+ if dpdk_driver is None:
+ dpdk_driver = self.dpdk_driver
+ return \
+ self.ssh_helper.execute("lsmod | grep -i {}".format(dpdk_driver))[0]
@property
def _status_cmd(self):
diff --git a/yardstick/network_services/helpers/vpp_helpers/__init__.py b/yardstick/network_services/helpers/vpp_helpers/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/yardstick/network_services/helpers/vpp_helpers/__init__.py
diff --git a/yardstick/network_services/helpers/vpp_helpers/abstract_search_algorithm.py b/yardstick/network_services/helpers/vpp_helpers/abstract_search_algorithm.py
new file mode 100644
index 000000000..fced05833
--- /dev/null
+++ b/yardstick/network_services/helpers/vpp_helpers/abstract_search_algorithm.py
@@ -0,0 +1,53 @@
+# Copyright (c) 2019 Viosoft Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This is a modified copy of
+# https://gerrit.fd.io/r/gitweb?p=csit.git;a=blob_plain;f=resources/libraries/python/MLRsearch/AbstractSearchAlgorithm.py;hb=HEAD
+
+
+from abc import ABCMeta, abstractmethod
+
+
+class AbstractSearchAlgorithm(object):
+ """Abstract class defining common API for search algorithms."""
+
+ __metaclass__ = ABCMeta
+
+ def __init__(self, measurer):
+ """Store the rate provider.
+
+ :param measurer: Object able to perform trial or composite measurements.
+ :type measurer: AbstractMeasurer.AbstractMeasurer
+ """
+ # TODO: Type check for AbstractMeasurer?
+ self.measurer = measurer
+
+ @abstractmethod
+ def narrow_down_ndr_and_pdr(
+ self, fail_rate, line_rate, packet_loss_ratio):
+ """Perform measurements to narrow down intervals, return them.
+
+ This will be renamed when custom loss ratio lists are supported.
+
+ :param fail_rate: Minimal target transmit rate [pps].
+ :param line_rate: Maximal target transmit rate [pps].
+ :param packet_loss_ratio: Fraction of packets lost, for PDR [1].
+ :type fail_rate: float
+ :type line_rate: float
+ :type packet_loss_ratio: float
+ :returns: Structure containing narrowed down intervals
+ and their measurements.
+ :rtype: NdrPdrResult.NdrPdrResult
+ """
+ # TODO: Do we agree on arguments related to precision or trial duration?
diff --git a/yardstick/network_services/helpers/vpp_helpers/multiple_loss_ratio_search.py b/yardstick/network_services/helpers/vpp_helpers/multiple_loss_ratio_search.py
new file mode 100644
index 000000000..582e3dc27
--- /dev/null
+++ b/yardstick/network_services/helpers/vpp_helpers/multiple_loss_ratio_search.py
@@ -0,0 +1,688 @@
+# Copyright (c) 2019 Viosoft Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This is a modified copy of
+# https://gerrit.fd.io/r/gitweb?p=csit.git;a=blob_plain;f=resources/libraries/python/MLRsearch/MultipleLossRatioSearch.py;hb=HEAD
+
+import datetime
+import logging
+import math
+import time
+
+from yardstick.network_services.helpers.vpp_helpers.abstract_search_algorithm import \
+ AbstractSearchAlgorithm
+from yardstick.network_services.helpers.vpp_helpers.ndr_pdr_result import \
+ NdrPdrResult
+from yardstick.network_services.helpers.vpp_helpers.receive_rate_interval import \
+ ReceiveRateInterval
+from yardstick.network_services.helpers.vpp_helpers.receive_rate_measurement import \
+ ReceiveRateMeasurement
+
+LOGGING = logging.getLogger(__name__)
+
+
+class MultipleLossRatioSearch(AbstractSearchAlgorithm):
+ """Optimized binary search algorithm for finding NDR and PDR bounds.
+
+ Traditional binary search algorithm needs initial interval
+ (lower and upper bound), and returns final interval after bisecting
+ (until some exit condition is met).
+ The exit condition is usually related to the interval width,
+ (upper bound value minus lower bound value).
+
+ The optimized algorithm contains several improvements
+ aimed to reduce overall search time.
+
+ One improvement is searching for two intervals at once.
+ The intervals are for NDR (No Drop Rate) and PDR (Partial Drop Rate).
+
+ Next improvement is that the initial interval does not need to be valid.
+ Imagine initial interval (10, 11) where 11 is smaller
+ than the searched value.
+ The algorithm will try (11, 13) interval next, and if 13 is still smaller,
+ (13, 17) and so on, doubling width until the upper bound is valid.
+ The part when interval expands is called external search,
+ the part when interval is bisected is called internal search.
+
+ Next improvement is that trial measurements at small trial duration
+ can be used to find a reasonable interval for full trial duration search.
+ This results in more trials performed, but smaller overall duration
+ in general.
+
+ Next improvement is bisecting in logarithmic quantities,
+ so that exit criteria can be independent of measurement units.
+
+ Next improvement is basing the initial interval on receive rates.
+
+ Final improvement is exiting early if the minimal value
+ is not a valid lower bound.
+
+ The complete search consist of several phases,
+ each phase performing several trial measurements.
+ Initial phase creates initial interval based on receive rates
+ at maximum rate and at maximum receive rate (MRR).
+ Final phase and preceding intermediate phases are performing
+ external and internal search steps,
+ each resulting interval is the starting point for the next phase.
+ The resulting interval of final phase is the result of the whole algorithm.
+
+ Each non-initial phase uses its own trial duration and width goal.
+ Any non-initial phase stops searching (for NDR or PDR independently)
+ when minimum is not a valid lower bound (at current duration),
+ or all of the following is true:
+ Both bounds are valid, bound bounds are measured at the current phase
+ trial duration, interval width is less than the width goal
+ for current phase.
+
+ TODO: Review and update this docstring according to rst docs.
+ TODO: Support configurable number of Packet Loss Ratios.
+ """
+
+ class ProgressState(object):
+ """Structure containing data to be passed around in recursion."""
+
+ def __init__(
+ self, result, phases, duration, width_goal, packet_loss_ratio,
+ minimum_transmit_rate, maximum_transmit_rate):
+ """Convert and store the argument values.
+
+ :param result: Current measured NDR and PDR intervals.
+ :param phases: How many intermediate phases to perform
+ before the current one.
+ :param duration: Trial duration to use in the current phase [s].
+ :param width_goal: The goal relative width for the curreent phase.
+ :param packet_loss_ratio: PDR fraction for the current search.
+ :param minimum_transmit_rate: Minimum target transmit rate
+ for the current search [pps].
+ :param maximum_transmit_rate: Maximum target transmit rate
+ for the current search [pps].
+ :type result: NdrPdrResult.NdrPdrResult
+ :type phases: int
+ :type duration: float
+ :type width_goal: float
+ :type packet_loss_ratio: float
+ :type minimum_transmit_rate: float
+ :type maximum_transmit_rate: float
+ """
+ self.result = result
+ self.phases = int(phases)
+ self.duration = float(duration)
+ self.width_goal = float(width_goal)
+ self.packet_loss_ratio = float(packet_loss_ratio)
+ self.minimum_transmit_rate = float(minimum_transmit_rate)
+ self.maximum_transmit_rate = float(maximum_transmit_rate)
+
+ def __init__(self, measurer, latency=False, pkt_size=64,
+ final_relative_width=0.005,
+ final_trial_duration=30.0, initial_trial_duration=1.0,
+ number_of_intermediate_phases=2, timeout=600.0, doublings=1):
+ """Store the measurer object and additional arguments.
+
+ :param measurer: Rate provider to use by this search object.
+ :param final_relative_width: Final lower bound transmit rate
+ cannot be more distant that this multiple of upper bound [1].
+ :param final_trial_duration: Trial duration for the final phase [s].
+ :param initial_trial_duration: Trial duration for the initial phase
+ and also for the first intermediate phase [s].
+ :param number_of_intermediate_phases: Number of intermediate phases
+ to perform before the final phase [1].
+ :param timeout: The search will fail itself when not finished
+ before this overall time [s].
+ :param doublings: How many doublings to do in external search step.
+ Default 1 is suitable for fairly stable tests,
+ less stable tests might get better overal duration with 2 or more.
+ :type measurer: AbstractMeasurer.AbstractMeasurer
+ :type final_relative_width: float
+ :type final_trial_duration: float
+ :type initial_trial_duration: int
+ :type number_of_intermediate_phases: int
+ :type timeout: float
+ :type doublings: int
+ """
+ super(MultipleLossRatioSearch, self).__init__(measurer)
+ self.latency = latency
+ self.pkt_size = int(pkt_size)
+ self.final_trial_duration = float(final_trial_duration)
+ self.final_relative_width = float(final_relative_width)
+ self.number_of_intermediate_phases = int(number_of_intermediate_phases)
+ self.initial_trial_duration = float(initial_trial_duration)
+ self.timeout = float(timeout)
+ self.doublings = int(doublings)
+
+ self.queue = None
+ self.port_pg_id = None
+ self.ports = []
+ self.test_data = {}
+ self.profiles = {}
+
+ @staticmethod
+ def double_relative_width(relative_width):
+ """Return relative width corresponding to double logarithmic width.
+
+ :param relative_width: The base relative width to double.
+ :type relative_width: float
+ :returns: The relative width of double logarithmic size.
+ :rtype: float
+ """
+ return 1.999 * relative_width - relative_width * relative_width
+ # The number should be 2.0, but we want to avoid rounding errors,
+ # and ensure half of double is not larger than the original value.
+
+ @staticmethod
+ def double_step_down(relative_width, current_bound):
+ """Return rate of double logarithmic width below.
+
+ :param relative_width: The base relative width to double.
+ :param current_bound: The current target transmit rate to move [pps].
+ :type relative_width: float
+ :type current_bound: float
+ :returns: Transmit rate smaller by logarithmically double width [pps].
+ :rtype: float
+ """
+ return current_bound * (
+ 1.0 - MultipleLossRatioSearch.double_relative_width(
+ relative_width))
+
+ @staticmethod
+ def expand_down(relative_width, doublings, current_bound):
+ """Return rate of expanded logarithmic width below.
+
+ :param relative_width: The base relative width to double.
+ :param doublings: How many doublings to do for expansion.
+ :param current_bound: The current target transmit rate to move [pps].
+ :type relative_width: float
+ :type doublings: int
+ :type current_bound: float
+ :returns: Transmit rate smaller by logarithmically double width [pps].
+ :rtype: float
+ """
+ for _ in range(doublings):
+ relative_width = MultipleLossRatioSearch.double_relative_width(
+ relative_width)
+ return current_bound * (1.0 - relative_width)
+
+ @staticmethod
+ def double_step_up(relative_width, current_bound):
+ """Return rate of double logarithmic width above.
+
+ :param relative_width: The base relative width to double.
+ :param current_bound: The current target transmit rate to move [pps].
+ :type relative_width: float
+ :type current_bound: float
+ :returns: Transmit rate larger by logarithmically double width [pps].
+ :rtype: float
+ """
+ return current_bound / (
+ 1.0 - MultipleLossRatioSearch.double_relative_width(
+ relative_width))
+
+ @staticmethod
+ def expand_up(relative_width, doublings, current_bound):
+ """Return rate of expanded logarithmic width above.
+
+ :param relative_width: The base relative width to double.
+ :param doublings: How many doublings to do for expansion.
+ :param current_bound: The current target transmit rate to move [pps].
+ :type relative_width: float
+ :type doublings: int
+ :type current_bound: float
+ :returns: Transmit rate smaller by logarithmically double width [pps].
+ :rtype: float
+ """
+ for _ in range(doublings):
+ relative_width = MultipleLossRatioSearch.double_relative_width(
+ relative_width)
+ return current_bound / (1.0 - relative_width)
+
+ @staticmethod
+ def half_relative_width(relative_width):
+ """Return relative width corresponding to half logarithmic width.
+
+ :param relative_width: The base relative width to halve.
+ :type relative_width: float
+ :returns: The relative width of half logarithmic size.
+ :rtype: float
+ """
+ return 1.0 - math.sqrt(1.0 - relative_width)
+
+ @staticmethod
+ def half_step_up(relative_width, current_bound):
+ """Return rate of half logarithmic width above.
+
+ :param relative_width: The base relative width to halve.
+ :param current_bound: The current target transmit rate to move [pps].
+ :type relative_width: float
+ :type current_bound: float
+ :returns: Transmit rate larger by logarithmically half width [pps].
+ :rtype: float
+ """
+ return current_bound / (
+ 1.0 - MultipleLossRatioSearch.half_relative_width(
+ relative_width))
+
+ def init_generator(self, ports, port_pg_id, profiles, test_data, queue):
+ self.ports = ports
+ self.port_pg_id = port_pg_id
+ self.profiles = profiles
+ self.test_data = test_data
+ self.queue = queue
+ self.queue.cancel_join_thread()
+
+ def collect_kpi(self, stats, test_value):
+ samples = self.measurer.generate_samples(stats, self.ports,
+ self.port_pg_id, self.latency)
+ samples.update(self.test_data)
+ LOGGING.info("Collect TG KPIs %s %s %s", datetime.datetime.now(),
+ test_value, samples)
+ self.queue.put(samples)
+
+ def narrow_down_ndr_and_pdr(
+ self, minimum_transmit_rate, maximum_transmit_rate,
+ packet_loss_ratio):
+ """Perform initial phase, create state object, proceed with next phases.
+
+ :param minimum_transmit_rate: Minimal target transmit rate [pps].
+ :param maximum_transmit_rate: Maximal target transmit rate [pps].
+ :param packet_loss_ratio: Fraction of packets lost, for PDR [1].
+ :type minimum_transmit_rate: float
+ :type maximum_transmit_rate: float
+ :type packet_loss_ratio: float
+ :returns: Structure containing narrowed down intervals
+ and their measurements.
+ :rtype: NdrPdrResult.NdrPdrResult
+ :raises RuntimeError: If total duration is larger than timeout.
+ """
+ minimum_transmit_rate = float(minimum_transmit_rate)
+ maximum_transmit_rate = float(maximum_transmit_rate)
+ packet_loss_ratio = float(packet_loss_ratio)
+ line_measurement = self.measure(
+ self.initial_trial_duration, maximum_transmit_rate, self.latency)
+ initial_width_goal = self.final_relative_width
+ for _ in range(self.number_of_intermediate_phases):
+ initial_width_goal = self.double_relative_width(initial_width_goal)
+ max_lo = maximum_transmit_rate * (1.0 - initial_width_goal)
+ mrr = max(
+ minimum_transmit_rate,
+ min(max_lo, line_measurement.receive_rate))
+ mrr_measurement = self.measure(
+ self.initial_trial_duration, mrr, self.latency)
+ # Attempt to get narrower width.
+ if mrr_measurement.loss_fraction > 0.0:
+ max2_lo = mrr * (1.0 - initial_width_goal)
+ mrr2 = min(max2_lo, mrr_measurement.receive_rate)
+ else:
+ mrr2 = mrr / (1.0 - initial_width_goal)
+ if mrr2 > minimum_transmit_rate and mrr2 < maximum_transmit_rate:
+ line_measurement = mrr_measurement
+ mrr_measurement = self.measure(
+ self.initial_trial_duration, mrr2, self.latency)
+ if mrr2 > mrr:
+ buf = line_measurement
+ line_measurement = mrr_measurement
+ mrr_measurement = buf
+ starting_interval = ReceiveRateInterval(
+ mrr_measurement, line_measurement)
+ starting_result = NdrPdrResult(starting_interval, starting_interval)
+ state = self.ProgressState(
+ starting_result, self.number_of_intermediate_phases,
+ self.final_trial_duration, self.final_relative_width,
+ packet_loss_ratio, minimum_transmit_rate, maximum_transmit_rate)
+ state = self.ndrpdr(state)
+ result = state.result
+ # theor_max_thruput = 0
+ result_samples = {}
+
+ MultipleLossRatioSearch.display_single_bound(result_samples,
+ 'NDR_LOWER', result.ndr_interval.measured_low.transmit_rate,
+ self.pkt_size, result.ndr_interval.measured_low.latency)
+ MultipleLossRatioSearch.display_single_bound(result_samples,
+ 'NDR_UPPER', result.ndr_interval.measured_high.transmit_rate,
+ self.pkt_size)
+ MultipleLossRatioSearch.display_single_bound(result_samples,
+ 'PDR_LOWER', result.pdr_interval.measured_low.transmit_rate,
+ self.pkt_size, result.pdr_interval.measured_low.latency)
+ MultipleLossRatioSearch.display_single_bound(result_samples,
+ 'PDR_UPPER', result.pdr_interval.measured_high.transmit_rate,
+ self.pkt_size)
+ pdr_msg = self.check_ndrpdr_interval_validity(result_samples, "PDR",
+ result.pdr_interval,
+ packet_loss_ratio)
+ ndr_msg = self.check_ndrpdr_interval_validity(result_samples, "NDR",
+ result.ndr_interval)
+ self.queue.put(result_samples)
+
+ LOGGING.debug("result_samples: %s", result_samples)
+ LOGGING.info(pdr_msg)
+ LOGGING.info(ndr_msg)
+
+ self.perform_additional_measurements_based_on_ndrpdr_result(result)
+
+ return result_samples
+
+ def _measure_and_update_state(self, state, transmit_rate):
+ """Perform trial measurement, update bounds, return new state.
+
+ :param state: State before this measurement.
+ :param transmit_rate: Target transmit rate for this measurement [pps].
+ :type state: ProgressState
+ :type transmit_rate: float
+ :returns: State after the measurement.
+ :rtype: ProgressState
+ """
+ # TODO: Implement https://stackoverflow.com/a/24683360
+ # to avoid the string manipulation if log verbosity is too low.
+ LOGGING.info("result before update: %s", state.result)
+ LOGGING.debug(
+ "relative widths in goals: %s", state.result.width_in_goals(
+ self.final_relative_width))
+ measurement = self.measure(state.duration, transmit_rate, self.latency)
+ ndr_interval = self._new_interval(
+ state.result.ndr_interval, measurement, 0.0)
+ pdr_interval = self._new_interval(
+ state.result.pdr_interval, measurement, state.packet_loss_ratio)
+ state.result = NdrPdrResult(ndr_interval, pdr_interval)
+ return state
+
+ @staticmethod
+ def _new_interval(old_interval, measurement, packet_loss_ratio):
+ """Return new interval with bounds updated according to the measurement.
+
+ :param old_interval: The current interval before the measurement.
+ :param measurement: The new meaqsurement to take into account.
+ :param packet_loss_ratio: Fraction for PDR (or zero for NDR).
+ :type old_interval: ReceiveRateInterval.ReceiveRateInterval
+ :type measurement: ReceiveRateMeasurement.ReceiveRateMeasurement
+ :type packet_loss_ratio: float
+ :returns: The updated interval.
+ :rtype: ReceiveRateInterval.ReceiveRateInterval
+ """
+ old_lo, old_hi = old_interval.measured_low, old_interval.measured_high
+ # Priority zero: direct replace if the target Tr is the same.
+ if measurement.target_tr in (old_lo.target_tr, old_hi.target_tr):
+ if measurement.target_tr == old_lo.target_tr:
+ return ReceiveRateInterval(measurement, old_hi)
+ else:
+ return ReceiveRateInterval(old_lo, measurement)
+ # Priority one: invalid lower bound allows only one type of update.
+ if old_lo.loss_fraction > packet_loss_ratio:
+ # We can only expand down, old bound becomes valid upper one.
+ if measurement.target_tr < old_lo.target_tr:
+ return ReceiveRateInterval(measurement, old_lo)
+ else:
+ return old_interval
+ # Lower bound is now valid.
+ # Next priorities depend on target Tr.
+ if measurement.target_tr < old_lo.target_tr:
+ # Lower external measurement, relevant only
+ # if the new measurement has high loss rate.
+ if measurement.loss_fraction > packet_loss_ratio:
+ # Returning the broader interval as old_lo
+ # would be invalid upper bound.
+ return ReceiveRateInterval(measurement, old_hi)
+ elif measurement.target_tr > old_hi.target_tr:
+ # Upper external measurement, only relevant for invalid upper bound.
+ if old_hi.loss_fraction <= packet_loss_ratio:
+ # Old upper bound becomes valid new lower bound.
+ return ReceiveRateInterval(old_hi, measurement)
+ else:
+ # Internal measurement, replaced boundary
+ # depends on measured loss fraction.
+ if measurement.loss_fraction > packet_loss_ratio:
+ # We have found a narrow valid interval,
+ # regardless of whether old upper bound was valid.
+ return ReceiveRateInterval(old_lo, measurement)
+ else:
+ # In ideal world, we would not want to shrink interval
+ # if upper bound is not valid.
+ # In the real world, we want to shrink it for
+ # "invalid upper bound at maximal rate" case.
+ return ReceiveRateInterval(measurement, old_hi)
+ # Fallback, the interval is unchanged by the measurement.
+ return old_interval
+
+ def ndrpdr(self, state):
+ """Pefrom trials for this phase. Return the new state when done.
+
+ :param state: State before this phase.
+ :type state: ProgressState
+ :returns: The updated state.
+ :rtype: ProgressState
+ :raises RuntimeError: If total duration is larger than timeout.
+ """
+ start_time = time.time()
+ if state.phases > 0:
+ # We need to finish preceding intermediate phases first.
+ saved_phases = state.phases
+ state.phases -= 1
+ # Preceding phases have shorter duration.
+ saved_duration = state.duration
+ duration_multiplier = state.duration / self.initial_trial_duration
+ phase_exponent = float(state.phases) / saved_phases
+ state.duration = self.initial_trial_duration * math.pow(
+ duration_multiplier, phase_exponent)
+ # Shorter durations do not need that narrow widths.
+ saved_width = state.width_goal
+ state.width_goal = self.double_relative_width(state.width_goal)
+ # Recurse.
+ state = self.ndrpdr(state)
+ # Restore the state for current phase.
+ state.duration = saved_duration
+ state.width_goal = saved_width
+ state.phases = saved_phases # Not needed, but just in case.
+ LOGGING.info(
+ "starting iterations with duration %s and relative width goal %s",
+ state.duration, state.width_goal)
+ while 1:
+ if time.time() > start_time + self.timeout:
+ raise RuntimeError("Optimized search takes too long.")
+ # Order of priorities: invalid bounds (nl, pl, nh, ph),
+ # then narrowing relative Tr widths.
+ # Durations are not priorities yet,
+ # they will settle on their own hopefully.
+ ndr_lo = state.result.ndr_interval.measured_low
+ ndr_hi = state.result.ndr_interval.measured_high
+ pdr_lo = state.result.pdr_interval.measured_low
+ pdr_hi = state.result.pdr_interval.measured_high
+ ndr_rel_width = max(
+ state.width_goal, state.result.ndr_interval.rel_tr_width)
+ pdr_rel_width = max(
+ state.width_goal, state.result.pdr_interval.rel_tr_width)
+ # If we are hitting maximal or minimal rate, we cannot shift,
+ # but we can re-measure.
+ if ndr_lo.loss_fraction > 0.0:
+ if ndr_lo.target_tr > state.minimum_transmit_rate:
+ new_tr = max(
+ state.minimum_transmit_rate,
+ self.expand_down(
+ ndr_rel_width, self.doublings, ndr_lo.target_tr))
+ LOGGING.info("ndr lo external %s", new_tr)
+ state = self._measure_and_update_state(state, new_tr)
+ continue
+ elif ndr_lo.duration < state.duration:
+ LOGGING.info("ndr lo minimal re-measure")
+ state = self._measure_and_update_state(
+ state, state.minimum_transmit_rate)
+ continue
+ if pdr_lo.loss_fraction > state.packet_loss_ratio:
+ if pdr_lo.target_tr > state.minimum_transmit_rate:
+ new_tr = max(
+ state.minimum_transmit_rate,
+ self.expand_down(
+ pdr_rel_width, self.doublings, pdr_lo.target_tr))
+ LOGGING.info("pdr lo external %s", new_tr)
+ state = self._measure_and_update_state(state, new_tr)
+ continue
+ elif pdr_lo.duration < state.duration:
+ LOGGING.info("pdr lo minimal re-measure")
+ state = self._measure_and_update_state(
+ state, state.minimum_transmit_rate)
+ continue
+ if ndr_hi.loss_fraction <= 0.0:
+ if ndr_hi.target_tr < state.maximum_transmit_rate:
+ new_tr = min(
+ state.maximum_transmit_rate,
+ self.expand_up(
+ ndr_rel_width, self.doublings, ndr_hi.target_tr))
+ LOGGING.info("ndr hi external %s", new_tr)
+ state = self._measure_and_update_state(state, new_tr)
+ continue
+ elif ndr_hi.duration < state.duration:
+ LOGGING.info("ndr hi maximal re-measure")
+ state = self._measure_and_update_state(
+ state, state.maximum_transmit_rate)
+ continue
+ if pdr_hi.loss_fraction <= state.packet_loss_ratio:
+ if pdr_hi.target_tr < state.maximum_transmit_rate:
+ new_tr = min(
+ state.maximum_transmit_rate,
+ self.expand_up(
+ pdr_rel_width, self.doublings, pdr_hi.target_tr))
+ LOGGING.info("pdr hi external %s", new_tr)
+ state = self._measure_and_update_state(state, new_tr)
+ continue
+ elif pdr_hi.duration < state.duration:
+ LOGGING.info("ndr hi maximal re-measure")
+ state = self._measure_and_update_state(
+ state, state.maximum_transmit_rate)
+ continue
+ # If we are hitting maximum_transmit_rate,
+ # it is still worth narrowing width,
+ # hoping large enough loss fraction will happen.
+ # But if we are hitting the minimal rate (at current duration),
+ # no additional measurement will help with that,
+ # so we can stop narrowing in this phase.
+ if (ndr_lo.target_tr <= state.minimum_transmit_rate
+ and ndr_lo.loss_fraction > 0.0):
+ ndr_rel_width = 0.0
+ if (pdr_lo.target_tr <= state.minimum_transmit_rate
+ and pdr_lo.loss_fraction > state.packet_loss_ratio):
+ pdr_rel_width = 0.0
+ if ndr_rel_width > state.width_goal:
+ # We have to narrow NDR width first, as NDR internal search
+ # can invalidate PDR (but not vice versa).
+ new_tr = self.half_step_up(ndr_rel_width, ndr_lo.target_tr)
+ LOGGING.info("Bisecting for NDR at %s", new_tr)
+ state = self._measure_and_update_state(state, new_tr)
+ continue
+ if pdr_rel_width > state.width_goal:
+ # PDR iternal search.
+ new_tr = self.half_step_up(pdr_rel_width, pdr_lo.target_tr)
+ LOGGING.info("Bisecting for PDR at %s", new_tr)
+ state = self._measure_and_update_state(state, new_tr)
+ continue
+ # We do not need to improve width, but there still might be
+ # some measurements with smaller duration.
+ # We need to re-measure with full duration, possibly
+ # creating invalid bounds to resolve (thus broadening width).
+ if ndr_lo.duration < state.duration:
+ LOGGING.info("re-measuring NDR lower bound")
+ state = self._measure_and_update_state(state, ndr_lo.target_tr)
+ continue
+ if pdr_lo.duration < state.duration:
+ LOGGING.info("re-measuring PDR lower bound")
+ state = self._measure_and_update_state(state, pdr_lo.target_tr)
+ continue
+ # Except when lower bounds have high loss fraction, in that case
+ # we do not need to re-measure _upper_ bounds.
+ if ndr_hi.duration < state.duration and ndr_rel_width > 0.0:
+ LOGGING.info("re-measuring NDR upper bound")
+ state = self._measure_and_update_state(state, ndr_hi.target_tr)
+ continue
+ if pdr_hi.duration < state.duration and pdr_rel_width > 0.0:
+ LOGGING.info("re-measuring PDR upper bound")
+ state = self._measure_and_update_state(state, pdr_hi.target_tr)
+ continue
+ # Widths are narrow (or lower bound minimal), bound measurements
+ # are long enough, we can return.
+ LOGGING.info("phase done")
+ break
+ return state
+
+ def measure(self, duration, transmit_rate, latency):
+ duration = float(duration)
+ transmit_rate = float(transmit_rate)
+ # Trex needs target Tr per stream, but reports aggregate Tx and Dx.
+ unit_rate = str(transmit_rate / 2.0) + "pps"
+ stats = self.measurer.send_traffic_on_tg(self.ports, self.port_pg_id,
+ duration, unit_rate,
+ latency=latency)
+ self.measurer.client.reset(ports=self.ports)
+ self.measurer.client.clear_stats(ports=self.ports)
+ self.measurer.client.remove_all_streams(ports=self.ports)
+ for port, profile in self.profiles.items():
+ self.measurer.client.add_streams(profile, ports=[port])
+ self.collect_kpi(stats, unit_rate)
+ transmit_count = int(self.measurer.sent)
+ loss_count = int(self.measurer.loss)
+ measurement = ReceiveRateMeasurement(
+ duration, transmit_rate, transmit_count, loss_count)
+ measurement.latency = self.measurer.latency
+ return measurement
+
+ def perform_additional_measurements_based_on_ndrpdr_result(self, result):
+ duration = 5.0
+ rate = "{}{}".format(result.ndr_interval.measured_low.target_tr / 2.0,
+ 'pps')
+ for _ in range(0, 1):
+ stats = self.measurer.send_traffic_on_tg(self.ports,
+ self.port_pg_id, duration,
+ rate)
+ self.collect_kpi(stats, rate)
+ LOGGING.info('Traffic loss occurred: %s', self.measurer.loss)
+
+ @staticmethod
+ def display_single_bound(result_samples, result_type, rate_total, pkt_size,
+ latency=None):
+ bandwidth_total = float(rate_total) * (pkt_size + 20) * 8 / (10 ** 9)
+
+ result_samples["Result_{}".format(result_type)] = {
+ "rate_total_pps": float(rate_total),
+ "bandwidth_total_Gbps": float(bandwidth_total),
+ }
+
+ if latency:
+ for item in latency:
+ if latency.index(item) == 0:
+ name = "Result_{}_{}".format("stream0", result_type)
+ else:
+ name = "Result_{}_{}".format("stream1", result_type)
+ lat_min, lat_avg, lat_max = item.split('/')
+ result_samples[name] = {
+ "min_latency": float(lat_min),
+ "avg_latency": float(lat_avg),
+ "max_latency": float(lat_max),
+ }
+
+ @staticmethod
+ def check_ndrpdr_interval_validity(result_samples, result_type, interval,
+ packet_loss_ratio=0.0):
+ lower_bound = interval.measured_low
+ lower_bound_lf = lower_bound.loss_fraction
+
+ result_samples["Result_{}_packets_lost".format(result_type)] = {
+ "packet_loss_ratio": float(lower_bound_lf),
+ "packets_lost": float(lower_bound.loss_count),
+ }
+
+ if lower_bound_lf <= packet_loss_ratio:
+ return "Minimal rate loss fraction {} reach target {}".format(
+ lower_bound_lf, packet_loss_ratio)
+ else:
+ message = "Minimal rate loss fraction {} does not reach target {}".format(
+ lower_bound_lf, packet_loss_ratio)
+ if lower_bound_lf >= 1.0:
+ return '{}\nZero packets forwarded!'.format(message)
+ else:
+ return '{}\n{} packets lost.'.format(message,
+ lower_bound.loss_count)
diff --git a/yardstick/network_services/helpers/vpp_helpers/ndr_pdr_result.py b/yardstick/network_services/helpers/vpp_helpers/ndr_pdr_result.py
new file mode 100644
index 000000000..34a97f9fb
--- /dev/null
+++ b/yardstick/network_services/helpers/vpp_helpers/ndr_pdr_result.py
@@ -0,0 +1,68 @@
+# Copyright (c) 2019 Viosoft Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This is a modified copy of
+# https://gerrit.fd.io/r/gitweb?p=csit.git;a=blob_plain;f=resources/libraries/python/MLRsearch/NdrPdrResult.py;hb=HEAD
+
+from yardstick.network_services.helpers.vpp_helpers.receive_rate_interval import \
+ ReceiveRateInterval
+
+
+class NdrPdrResult(object):
+ """Two measurement intervals, return value of search algorithms.
+
+ Partial fraction is NOT part of the result. Pdr interval should be valid
+ for all partial fractions implied by the interval."""
+
+ def __init__(self, ndr_interval, pdr_interval):
+ """Store the measured intervals after checking argument types.
+
+ :param ndr_interval: Object containing data for NDR part of the result.
+ :param pdr_interval: Object containing data for PDR part of the result.
+ :type ndr_interval: ReceiveRateInterval.ReceiveRateInterval
+ :type pdr_interval: ReceiveRateInterval.ReceiveRateInterval
+ """
+ # TODO: Type checking is not very pythonic,
+ # perhaps users can fix wrong usage without it?
+ if not isinstance(ndr_interval, ReceiveRateInterval):
+ raise TypeError("ndr_interval, is not a ReceiveRateInterval: "
+ "{ndr!r}".format(ndr=ndr_interval))
+ if not isinstance(pdr_interval, ReceiveRateInterval):
+ raise TypeError("pdr_interval, is not a ReceiveRateInterval: "
+ "{pdr!r}".format(pdr=pdr_interval))
+ self.ndr_interval = ndr_interval
+ self.pdr_interval = pdr_interval
+
+ def width_in_goals(self, relative_width_goal):
+ """Return a debug string related to current widths in logarithmic scale.
+
+ :param relative_width_goal: Upper bound times this is the goal
+ difference between upper bound and lower bound.
+ :type relative_width_goal: float
+ :returns: Message containing NDR and PDR widths in goals.
+ :rtype: str
+ """
+ return "ndr {ndr_in_goals}; pdr {pdr_in_goals}".format(
+ ndr_in_goals=self.ndr_interval.width_in_goals(relative_width_goal),
+ pdr_in_goals=self.pdr_interval.width_in_goals(relative_width_goal))
+
+ def __str__(self):
+ """Return string as tuple of named values."""
+ return "NDR={ndr!s};PDR={pdr!s}".format(
+ ndr=self.ndr_interval, pdr=self.pdr_interval)
+
+ def __repr__(self):
+ """Return string evaluable as a constructor call."""
+ return "NdrPdrResult(ndr_interval={ndr!r},pdr_interval={pdr!r})".format(
+ ndr=self.ndr_interval, pdr=self.pdr_interval)
diff --git a/yardstick/network_services/helpers/vpp_helpers/receive_rate_interval.py b/yardstick/network_services/helpers/vpp_helpers/receive_rate_interval.py
new file mode 100644
index 000000000..517a99c1f
--- /dev/null
+++ b/yardstick/network_services/helpers/vpp_helpers/receive_rate_interval.py
@@ -0,0 +1,88 @@
+# Copyright (c) 2019 Viosoft Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This is a modified copy of
+# https://gerrit.fd.io/r/gitweb?p=csit.git;a=blob_plain;f=resources/libraries/python/MLRsearch/ReceiveRateInterval.py;hb=HEAD
+
+import math
+
+from yardstick.network_services.helpers.vpp_helpers.receive_rate_measurement import \
+ ReceiveRateMeasurement
+
+
+class ReceiveRateInterval(object):
+ """Structure defining two Rr measurements, and their relation."""
+
+ def __init__(self, measured_low, measured_high):
+ """Store the bound measurements after checking argument types.
+
+ :param measured_low: Measurement for the lower bound.
+ :param measured_high: Measurement for the upper bound.
+ :type measured_low: ReceiveRateMeasurement.ReceiveRateMeasurement
+ :type measured_high: ReceiveRateMeasurement.ReceiveRateMeasurement
+ """
+ # TODO: Type checking is not very pythonic,
+ # perhaps users can fix wrong usage without it?
+ if not isinstance(measured_low, ReceiveRateMeasurement):
+ raise TypeError("measured_low is not a ReceiveRateMeasurement: "
+ "{low!r}".format(low=measured_low))
+ if not isinstance(measured_high, ReceiveRateMeasurement):
+ raise TypeError("measured_high is not a ReceiveRateMeasurement: "
+ "{high!r}".format(high=measured_high))
+ self.measured_low = measured_low
+ self.measured_high = measured_high
+ # Declare secondary quantities to appease pylint.
+ self.abs_tr_width = None
+ """Absolute width of target transmit rate. Upper minus lower."""
+ self.rel_tr_width = None
+ """Relative width of target transmit rate. Absolute divided by upper."""
+ self.sort()
+
+ def sort(self):
+ """Sort bounds by target Tr, compute secondary quantities."""
+ if self.measured_low.target_tr > self.measured_high.target_tr:
+ self.measured_low, self.measured_high = (
+ self.measured_high, self.measured_low)
+ self.abs_tr_width = (
+ self.measured_high.target_tr - self.measured_low.target_tr)
+ self.rel_tr_width = round(
+ self.abs_tr_width / self.measured_high.target_tr, 5)
+
+ def width_in_goals(self, relative_width_goal):
+ """Return float value.
+
+ Relative width goal is some (negative) value on logarithmic scale.
+ Current relative width is another logarithmic value.
+ Return the latter divided by the former.
+ This is useful when investigating how did surprising widths come to be.
+
+ :param relative_width_goal: Upper bound times this is the goal
+ difference between upper bound and lower bound.
+ :type relative_width_goal: float
+ :returns: Current width as logarithmic multiple of goal width [1].
+ :rtype: float
+ """
+ return round(math.log(1.0 - self.rel_tr_width) / math.log(
+ 1.0 - relative_width_goal), 5)
+
+ def __str__(self):
+ """Return string as half-open interval."""
+ return "[{low!s};{high!s})".format(
+ low=self.measured_low, high=self.measured_high)
+
+ def __repr__(self):
+ """Return string evaluable as a constructor call."""
+ return ("ReceiveRateInterval(measured_low={low!r}"
+ ",measured_high={high!r})".format(low=self.measured_low,
+ high=self.measured_high))
diff --git a/yardstick/network_services/helpers/vpp_helpers/receive_rate_measurement.py b/yardstick/network_services/helpers/vpp_helpers/receive_rate_measurement.py
new file mode 100644
index 000000000..2c59ea104
--- /dev/null
+++ b/yardstick/network_services/helpers/vpp_helpers/receive_rate_measurement.py
@@ -0,0 +1,58 @@
+# Copyright (c) 2019 Viosoft Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This is a modified copy of
+# https://gerrit.fd.io/r/gitweb?p=csit.git;a=blob_plain;f=resources/libraries/python/MLRsearch/ReceiveRateMeasurement.py;hb=HEAD
+
+
+class ReceiveRateMeasurement(object):
+ """Structure defining the result of single Rr measurement."""
+
+ def __init__(self, duration, target_tr, transmit_count, loss_count):
+ """Constructor, normalize primary and compute secondary quantities.
+
+ :param duration: Measurement duration [s].
+ :param target_tr: Target transmit rate [pps].
+ If bidirectional traffic is measured, this is bidirectional rate.
+ :param transmit_count: Number of packets transmitted [1].
+ :param loss_count: Number of packets transmitted but not received [1].
+ :type duration: float
+ :type target_tr: float
+ :type transmit_count: int
+ :type loss_count: int
+ """
+ self.duration = float(duration)
+ self.target_tr = float(target_tr)
+ self.transmit_count = int(transmit_count)
+ self.loss_count = int(loss_count)
+ self.receive_count = round(transmit_count - loss_count, 5)
+ self.transmit_rate = round(transmit_count / self.duration, 5)
+ self.loss_rate = round(loss_count / self.duration, 5)
+ self.receive_rate = round(self.receive_count / self.duration, 5)
+ self.loss_fraction = round(
+ float(self.loss_count) / self.transmit_count, 5)
+ # TODO: Do we want to store also the real time (duration + overhead)?
+
+ def __str__(self):
+ """Return string reporting input and loss fraction."""
+ return "d={dur!s},Tr={rate!s},Df={frac!s}".format(
+ dur=self.duration, rate=self.target_tr, frac=self.loss_fraction)
+
+ def __repr__(self):
+ """Return string evaluable as a constructor call."""
+ return ("ReceiveRateMeasurement(duration={dur!r},target_tr={rate!r}"
+ ",transmit_count={trans!r},loss_count={loss!r})".format(
+ dur=self.duration, rate=self.target_tr,
+ trans=self.transmit_count,
+ loss=self.loss_count))
diff --git a/yardstick/network_services/traffic_profile/vpp_rfc2544.py b/yardstick/network_services/traffic_profile/vpp_rfc2544.py
index 0f8185f90..412e4e69a 100644
--- a/yardstick/network_services/traffic_profile/vpp_rfc2544.py
+++ b/yardstick/network_services/traffic_profile/vpp_rfc2544.py
@@ -13,17 +13,19 @@
# limitations under the License.
import datetime
+import ipaddress
import logging
-from random import choice
-from string import ascii_letters
+import random
+import string
-from ipaddress import ip_address
from trex_stl_lib import api as Pkt
from trex_stl_lib import trex_stl_client
from trex_stl_lib import trex_stl_packet_builder_scapy
from trex_stl_lib import trex_stl_streams
from yardstick.common import constants
+from yardstick.network_services.helpers.vpp_helpers.multiple_loss_ratio_search import \
+ MultipleLossRatioSearch
from yardstick.network_services.traffic_profile.rfc2544 import RFC2544Profile, \
PortPgIDMap
from yardstick.network_services.traffic_profile.trex_traffic_profile import IP, \
@@ -37,6 +39,10 @@ class VppRFC2544Profile(RFC2544Profile):
def __init__(self, traffic_generator):
super(VppRFC2544Profile, self).__init__(traffic_generator)
+ tp_cfg = traffic_generator["traffic_profile"]
+ self.number_of_intermediate_phases = tp_cfg.get("intermediate_phases",
+ 2)
+
self.duration = self.config.duration
self.precision = self.config.test_precision
self.lower_bound = self.config.lower_bound
@@ -85,7 +91,7 @@ class VppRFC2544Profile(RFC2544Profile):
def _gen_payload(length):
payload = ""
for _ in range(length):
- payload += choice(ascii_letters)
+ payload += random.choice(string.ascii_letters)
return payload
@@ -153,8 +159,9 @@ class VppRFC2544Profile(RFC2544Profile):
dst=dst_start_ip,
proto=outer_l3v4['proto'])
if self.flow > 1:
- dst_start_int = int(ip_address(str(dst_start_ip)))
- dst_end_ip_new = ip_address(dst_start_int + self.flow - 1)
+ dst_start_int = int(ipaddress.ip_address(str(dst_start_ip)))
+ dst_end_ip_new = ipaddress.ip_address(
+ dst_start_int + self.flow - 1)
# self._set_proto_addr(IP, SRC, outer_l3v4['srcip4'], outer_l3v4['count'])
self._set_proto_addr(IP, DST,
"{start_ip}-{end_ip}".format(
@@ -248,8 +255,19 @@ class VppRFC2544Profile(RFC2544Profile):
def binary_search_with_optimized(self, traffic_generator, duration,
timeout, test_data):
- # TODO Support FD.io Multiple Loss Ratio search (MLRsearch)
- pass
+ self.queue.cancel_join_thread()
+ algorithm = MultipleLossRatioSearch(
+ measurer=traffic_generator, latency=self.enable_latency,
+ pkt_size=self.pkt_size,
+ final_trial_duration=duration,
+ final_relative_width=self.step_interval / 100,
+ number_of_intermediate_phases=self.number_of_intermediate_phases,
+ initial_trial_duration=1,
+ timeout=timeout)
+ algorithm.init_generator(self.ports, self.port_pg_id, self.profiles,
+ test_data, self.queue)
+ return algorithm.narrow_down_ndr_and_pdr(10000, self.max_rate,
+ self.tolerance_high)
def binary_search(self, traffic_generator, duration, tolerance_value,
test_data):
diff --git a/yardstick/network_services/vnf_generic/vnf/ipsec_vnf.py b/yardstick/network_services/vnf_generic/vnf/ipsec_vnf.py
index 75a8cce06..1961ac1b1 100644
--- a/yardstick/network_services/vnf_generic/vnf/ipsec_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/ipsec_vnf.py
@@ -15,18 +15,46 @@
import logging
import re
import time
+from collections import Counter
+from enum import Enum
from yardstick.benchmark.contexts.base import Context
from yardstick.common.process import check_if_process_failed
from yardstick.network_services import constants
-from yardstick.network_services.helpers.cpu import CpuSysCores
from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNF
from yardstick.network_services.vnf_generic.vnf.vpp_helpers import \
- VppSetupEnvHelper
+ VppSetupEnvHelper, VppConfigGenerator
LOG = logging.getLogger(__name__)
+class CryptoAlg(Enum):
+ """Encryption algorithms."""
+ AES_CBC_128 = ('aes-cbc-128', 'AES-CBC', 16)
+ AES_CBC_192 = ('aes-cbc-192', 'AES-CBC', 24)
+ AES_CBC_256 = ('aes-cbc-256', 'AES-CBC', 32)
+ AES_GCM_128 = ('aes-gcm-128', 'AES-GCM', 20)
+
+ def __init__(self, alg_name, scapy_name, key_len):
+ self.alg_name = alg_name
+ self.scapy_name = scapy_name
+ self.key_len = key_len
+
+
+class IntegAlg(Enum):
+ """Integrity algorithms."""
+ SHA1_96 = ('sha1-96', 'HMAC-SHA1-96', 20)
+ SHA_256_128 = ('sha-256-128', 'SHA2-256-128', 32)
+ SHA_384_192 = ('sha-384-192', 'SHA2-384-192', 48)
+ SHA_512_256 = ('sha-512-256', 'SHA2-512-256', 64)
+ AES_GCM_128 = ('aes-gcm-128', 'AES-GCM', 20)
+
+ def __init__(self, alg_name, scapy_name, key_len):
+ self.alg_name = alg_name
+ self.scapy_name = scapy_name
+ self.key_len = key_len
+
+
class VipsecApproxSetupEnvHelper(VppSetupEnvHelper):
DEFAULT_IPSEC_VNF_CFG = {
'crypto_type': 'SW_cryptodev',
@@ -99,17 +127,51 @@ class VipsecApproxSetupEnvHelper(VppSetupEnvHelper):
return flow_src_start_ip
def build_config(self):
- # TODO Implement later
- pass
+ vnf_cfg = self.scenario_helper.options.get('vnf_config',
+ self.DEFAULT_IPSEC_VNF_CFG)
+ rxq = vnf_cfg.get('rxq', 1)
+ phy_cores = vnf_cfg.get('worker_threads', 1)
+ # worker_config = vnf_cfg.get('worker_config', '1C/1T').split('/')[1].lower()
+
+ vpp_cfg = self.create_startup_configuration_of_vpp()
+ self.add_worker_threads_and_rxqueues(vpp_cfg, phy_cores, rxq)
+ self.add_pci_devices(vpp_cfg)
+
+ frame_size_cfg = self.scenario_helper.all_options.get('framesize', {})
+ uplink_cfg = frame_size_cfg.get('uplink', {})
+ downlink_cfg = frame_size_cfg.get('downlink', {})
+ framesize = min(self.calculate_frame_size(uplink_cfg),
+ self.calculate_frame_size(downlink_cfg))
+ if framesize < 1522:
+ vpp_cfg.add_dpdk_no_multi_seg()
+
+ crypto_algorithms = self._get_crypto_algorithms()
+ if crypto_algorithms == 'aes-gcm':
+ self.add_dpdk_cryptodev(vpp_cfg, 'aesni_gcm', phy_cores)
+ elif crypto_algorithms == 'cbc-sha1':
+ self.add_dpdk_cryptodev(vpp_cfg, 'aesni_mb', phy_cores)
+
+ vpp_cfg.add_dpdk_dev_default_rxd(2048)
+ vpp_cfg.add_dpdk_dev_default_txd(2048)
+ self.apply_config(vpp_cfg, True)
+ self.update_vpp_interface_data()
def setup_vnf_environment(self):
resource = super(VipsecApproxSetupEnvHelper,
self).setup_vnf_environment()
self.start_vpp_service()
-
- sys_cores = CpuSysCores(self.ssh_helper)
- self._update_vnfd_helper(sys_cores.get_cpu_layout())
+ # for QAT device DH895xCC, the number of VFs is required as 32
+ if self._get_crypto_type() == 'HW_cryptodev':
+ sriov_numvfs = self.get_sriov_numvfs(
+ self.find_encrypted_data_interface()["vpci"])
+ if sriov_numvfs != 32:
+ self.crypto_device_init(
+ self.find_encrypted_data_interface()["vpci"], 32)
+
+ self._update_vnfd_helper(self.sys_cores.get_cpu_layout())
+ self.update_vpp_interface_data()
+ self.iface_update_numa()
return resource
@@ -144,12 +206,90 @@ class VipsecApproxSetupEnvHelper(VppSetupEnvHelper):
return ipsec_created
def get_vpp_statistics(self):
- # TODO Implement later
- return None
+ cmd = "vppctl show int {intf}"
+ result = {}
+ for interface in self.vnfd_helper.interfaces:
+ iface_name = self.get_value_by_interface_key(
+ interface["virtual-interface"]["ifname"], "vpp_name")
+ command = cmd.format(intf=iface_name)
+ _, stdout, _ = self.ssh_helper.execute(command)
+ result.update(
+ self.parser_vpp_stats(interface["virtual-interface"]["ifname"],
+ iface_name, stdout))
+ self.ssh_helper.execute("vppctl clear interfaces")
+ return result
+
+ @staticmethod
+ def parser_vpp_stats(interface, iface_name, stats):
+ packets_in = 0
+ packets_fwd = 0
+ packets_dropped = 0
+ result = {}
+
+ entries = re.split(r"\n+", stats)
+ tmp = [re.split(r"\s\s+", entry, 5) for entry in entries]
+
+ for item in tmp:
+ if isinstance(item, list):
+ if item[0] == iface_name and len(item) >= 5:
+ if item[3] == 'rx packets':
+ packets_in = int(item[4])
+ elif item[4] == 'rx packets':
+ packets_in = int(item[5])
+ elif len(item) == 3:
+ if item[1] == 'tx packets':
+ packets_fwd = int(item[2])
+ elif item[1] == 'drops' or item[1] == 'rx-miss':
+ packets_dropped = int(item[2])
+ if packets_dropped == 0 and packets_in > 0 and packets_fwd > 0:
+ packets_dropped = abs(packets_fwd - packets_in)
+
+ result[interface] = {
+ 'packets_in': packets_in,
+ 'packets_fwd': packets_fwd,
+ 'packets_dropped': packets_dropped,
+ }
+
+ return result
def create_ipsec_tunnels(self):
- # TODO Implement later
- pass
+ self.initialize_ipsec()
+
+ # TODO generate the same key
+ crypto_algorithms = self._get_crypto_algorithms()
+ if crypto_algorithms == 'aes-gcm':
+ encr_alg = CryptoAlg.AES_GCM_128
+ auth_alg = IntegAlg.AES_GCM_128
+ encr_key = 'LNYZXMBQDKESNLREHJMS'
+ auth_key = 'SWGLDTYZSQKVBZZMPIEV'
+ elif crypto_algorithms == 'cbc-sha1':
+ encr_alg = CryptoAlg.AES_CBC_128
+ auth_alg = IntegAlg.SHA1_96
+ encr_key = 'IFEMSHYLCZIYFUTT'
+ auth_key = 'PEALEIPSCPTRHYJSDXLY'
+
+ self.execute_script("enable_dpdk_traces.vat", json_out=False)
+ self.execute_script("enable_vhost_user_traces.vat", json_out=False)
+ self.execute_script("enable_memif_traces.vat", json_out=False)
+
+ node_name = self.find_encrypted_data_interface()["node_name"]
+ n_tunnels = self._get_n_tunnels()
+ n_connections = self._get_n_connections()
+ flow_dst_start_ip = self._get_flow_dst_start_ip()
+ if node_name == "vnf__0":
+ self.vpp_create_ipsec_tunnels(
+ self.find_encrypted_data_interface()["local_ip"],
+ self.find_encrypted_data_interface()["peer_intf"]["local_ip"],
+ self.find_encrypted_data_interface()["ifname"],
+ n_tunnels, n_connections, encr_alg, encr_key, auth_alg,
+ auth_key, flow_dst_start_ip)
+ elif node_name == "vnf__1":
+ self.vpp_create_ipsec_tunnels(
+ self.find_encrypted_data_interface()["local_ip"],
+ self.find_encrypted_data_interface()["peer_intf"]["local_ip"],
+ self.find_encrypted_data_interface()["ifname"],
+ n_tunnels, n_connections, encr_alg, encr_key, auth_alg,
+ auth_key, flow_dst_start_ip, 20000, 10000)
def find_raw_data_interface(self):
try:
@@ -160,6 +300,145 @@ class VipsecApproxSetupEnvHelper(VppSetupEnvHelper):
def find_encrypted_data_interface(self):
return self.vnfd_helper.find_virtual_interface(vld_id="ciphertext")
+ def create_startup_configuration_of_vpp(self):
+ vpp_config_generator = VppConfigGenerator()
+ vpp_config_generator.add_unix_log()
+ vpp_config_generator.add_unix_cli_listen()
+ vpp_config_generator.add_unix_nodaemon()
+ vpp_config_generator.add_unix_coredump()
+ vpp_config_generator.add_dpdk_socketmem('1024,1024')
+ vpp_config_generator.add_dpdk_no_tx_checksum_offload()
+ vpp_config_generator.add_dpdk_log_level('debug')
+ for interface in self.vnfd_helper.interfaces:
+ vpp_config_generator.add_dpdk_uio_driver(
+ interface["virtual-interface"]["driver"])
+ vpp_config_generator.add_heapsize('4G')
+ # TODO Enable configuration depend on VPP version
+ vpp_config_generator.add_statseg_size('4G')
+ vpp_config_generator.add_plugin('disable', ['default'])
+ vpp_config_generator.add_plugin('enable', ['dpdk_plugin.so'])
+ vpp_config_generator.add_ip6_hash_buckets('2000000')
+ vpp_config_generator.add_ip6_heap_size('4G')
+ vpp_config_generator.add_ip_heap_size('4G')
+ return vpp_config_generator
+
+ def add_worker_threads_and_rxqueues(self, vpp_cfg, phy_cores,
+ rx_queues=None):
+ thr_count_int = phy_cores
+ cpu_count_int = phy_cores
+ num_mbufs_int = 32768
+
+ numa_list = []
+
+ if_list = [self.find_encrypted_data_interface()["ifname"],
+ self.find_raw_data_interface()["ifname"]]
+ for if_key in if_list:
+ try:
+ numa_list.append(
+ self.get_value_by_interface_key(if_key, 'numa_node'))
+ except KeyError:
+ pass
+ numa_cnt_mc = Counter(numa_list).most_common()
+
+ if numa_cnt_mc and numa_cnt_mc[0][0] is not None and \
+ numa_cnt_mc[0][0] != -1:
+ numa = numa_cnt_mc[0][0]
+ elif len(numa_cnt_mc) > 1 and numa_cnt_mc[0][0] == -1:
+ numa = numa_cnt_mc[1][0]
+ else:
+ numa = 0
+
+ try:
+ smt_used = self.sys_cores.is_smt_enabled()
+ except KeyError:
+ smt_used = False
+
+ cpu_main = self.sys_cores.cpu_list_per_node_str(numa, skip_cnt=1,
+ cpu_cnt=1)
+ cpu_wt = self.sys_cores.cpu_list_per_node_str(numa, skip_cnt=2,
+ cpu_cnt=cpu_count_int,
+ smt_used=smt_used)
+
+ if smt_used:
+ thr_count_int = 2 * cpu_count_int
+
+ if rx_queues is None:
+ rxq_count_int = int(thr_count_int / 2)
+ else:
+ rxq_count_int = rx_queues
+
+ if rxq_count_int == 0:
+ rxq_count_int = 1
+
+ num_mbufs_int = num_mbufs_int * rxq_count_int
+
+ vpp_cfg.add_cpu_main_core(cpu_main)
+ vpp_cfg.add_cpu_corelist_workers(cpu_wt)
+ vpp_cfg.add_dpdk_dev_default_rxq(rxq_count_int)
+ vpp_cfg.add_dpdk_num_mbufs(num_mbufs_int)
+
+ def add_pci_devices(self, vpp_cfg):
+ pci_devs = [self.find_encrypted_data_interface()["vpci"],
+ self.find_raw_data_interface()["vpci"]]
+ vpp_cfg.add_dpdk_dev(*pci_devs)
+
+ def add_dpdk_cryptodev(self, vpp_cfg, sw_pmd_type, count):
+ crypto_type = self._get_crypto_type()
+ smt_used = self.sys_cores.is_smt_enabled()
+ cryptodev = self.find_encrypted_data_interface()["vpci"]
+ socket_id = self.get_value_by_interface_key(
+ self.find_encrypted_data_interface()["ifname"], "numa_node")
+
+ if smt_used:
+ thr_count_int = count * 2
+ if crypto_type == 'HW_cryptodev':
+ vpp_cfg.add_dpdk_cryptodev(thr_count_int, cryptodev)
+ else:
+ vpp_cfg.add_dpdk_sw_cryptodev(sw_pmd_type, socket_id,
+ thr_count_int)
+ else:
+ thr_count_int = count
+ if crypto_type == 'HW_cryptodev':
+ vpp_cfg.add_dpdk_cryptodev(thr_count_int, cryptodev)
+ else:
+ vpp_cfg.add_dpdk_sw_cryptodev(sw_pmd_type, socket_id,
+ thr_count_int)
+
+ def initialize_ipsec(self):
+ flow_src_start_ip = self._get_flow_src_start_ip()
+
+ self.set_interface_state(
+ self.find_encrypted_data_interface()["ifname"], 'up')
+ self.set_interface_state(self.find_raw_data_interface()["ifname"],
+ 'up')
+ self.vpp_interfaces_ready_wait()
+ self.vpp_set_interface_mtu(
+ self.find_encrypted_data_interface()["ifname"])
+ self.vpp_set_interface_mtu(self.find_raw_data_interface()["ifname"])
+ self.vpp_interfaces_ready_wait()
+
+ self.set_ip(self.find_encrypted_data_interface()["ifname"],
+ self.find_encrypted_data_interface()["local_ip"], 24)
+ self.set_ip(self.find_raw_data_interface()["ifname"],
+ self.find_raw_data_interface()["local_ip"],
+ 24)
+
+ self.add_arp_on_dut(self.find_encrypted_data_interface()["ifname"],
+ self.find_encrypted_data_interface()["peer_intf"][
+ "local_ip"],
+ self.find_encrypted_data_interface()["peer_intf"][
+ "local_mac"])
+ self.add_arp_on_dut(self.find_raw_data_interface()["ifname"],
+ self.find_raw_data_interface()["peer_intf"][
+ "local_ip"],
+ self.find_raw_data_interface()["peer_intf"][
+ "local_mac"])
+
+ self.vpp_route_add(flow_src_start_ip, 8,
+ self.find_raw_data_interface()["peer_intf"][
+ "local_ip"],
+ self.find_raw_data_interface()["ifname"])
+
class VipsecApproxVnf(SampleVNF):
""" This class handles vIPSEC VNF model-driver definitions """
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_vcmts_pktgen.py b/yardstick/network_services/vnf_generic/vnf/tg_vcmts_pktgen.py
new file mode 100755
index 000000000..c6df9d04c
--- /dev/null
+++ b/yardstick/network_services/vnf_generic/vnf/tg_vcmts_pktgen.py
@@ -0,0 +1,215 @@
+# Copyright (c) 2019 Viosoft Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import time
+import socket
+import yaml
+import os
+
+from yardstick.network_services.vnf_generic.vnf import sample_vnf
+from yardstick.common import exceptions
+
+
+LOG = logging.getLogger(__name__)
+
+
+class PktgenHelper(object):
+
+ RETRY_SECONDS = 0.5
+ RETRY_COUNT = 20
+ CONNECT_TIMEOUT = 5
+
+ def __init__(self, host, port=23000):
+ self.host = host
+ self.port = port
+ self.connected = False
+
+ def _connect(self):
+ self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ ret = True
+ try:
+ self._sock.settimeout(self.CONNECT_TIMEOUT)
+ self._sock.connect((self.host, self.port))
+ except (socket.gaierror, socket.error, socket.timeout):
+ self._sock.close()
+ ret = False
+
+ return ret
+
+ def connect(self):
+ if self.connected:
+ return True
+ LOG.info("Connecting to pktgen instance at %s...", self.host)
+ for idx in range(self.RETRY_COUNT):
+ self.connected = self._connect()
+ if self.connected:
+ return True
+ LOG.debug("Connection attempt %d: Unable to connect to %s, " \
+ "retrying in %d seconds",
+ idx, self.host, self.RETRY_SECONDS)
+ time.sleep(self.RETRY_SECONDS)
+
+ LOG.error("Unable to connect to pktgen instance on %s !",
+ self.host)
+ return False
+
+
+ def send_command(self, command):
+ if not self.connected:
+ LOG.error("Pktgen socket is not connected")
+ return False
+
+ try:
+ self._sock.sendall((command + "\n").encode())
+ time.sleep(1)
+ except (socket.timeout, socket.error):
+ LOG.error("Error sending command '%s'", command)
+ return False
+
+ return True
+
+
+class VcmtsPktgenSetupEnvHelper(sample_vnf.SetupEnvHelper):
+
+ BASE_PARAMETERS = "export LUA_PATH=/vcmts/Pktgen.lua;"\
+ + "export CMK_PROC_FS=/host/proc;"
+
+ PORTS_COUNT = 8
+
+ def generate_pcap_filename(self, port_cfg):
+ return port_cfg['traffic_type'] + "_" + port_cfg['num_subs'] \
+ + "cms_" + port_cfg['num_ofdm'] + "ofdm.pcap"
+
+ def find_port_cfg(self, ports_cfg, port_name):
+ for port_cfg in ports_cfg:
+ if port_name in port_cfg:
+ return port_cfg
+ return None
+
+ def build_pktgen_parameters(self, pod_cfg):
+ ports_cfg = pod_cfg['ports']
+ port_cfg = list()
+
+ for i in range(self.PORTS_COUNT):
+ port_cfg.append(self.find_port_cfg(ports_cfg, 'port_' + str(i)))
+
+ pktgen_parameters = self.BASE_PARAMETERS + " " \
+ + " /pktgen-config/setup.sh " + pod_cfg['pktgen_id'] \
+ + " " + pod_cfg['num_ports']
+
+ for i in range(self.PORTS_COUNT):
+ pktgen_parameters += " " + port_cfg[i]['net_pktgen']
+
+ for i in range(self.PORTS_COUNT):
+ pktgen_parameters += " " + self.generate_pcap_filename(port_cfg[i])
+
+ return pktgen_parameters
+
+ def start_pktgen(self, pod_cfg):
+ self.ssh_helper.drop_connection()
+ cmd = self.build_pktgen_parameters(pod_cfg)
+ LOG.debug("Executing: '%s'", cmd)
+ self.ssh_helper.send_command(cmd)
+ LOG.info("Pktgen executed")
+
+ def setup_vnf_environment(self):
+ pass
+
+
+class VcmtsPktgen(sample_vnf.SampleVNFTrafficGen):
+
+ TG_NAME = 'VcmtsPktgen'
+ APP_NAME = 'VcmtsPktgen'
+ RUN_WAIT = 4
+ DEFAULT_RATE = 8.0
+
+ PKTGEN_BASE_PORT = 23000
+
+ def __init__(self, name, vnfd, setup_env_helper_type=None,
+ resource_helper_type=None):
+ if setup_env_helper_type is None:
+ setup_env_helper_type = VcmtsPktgenSetupEnvHelper
+ super(VcmtsPktgen, self).__init__(
+ name, vnfd, setup_env_helper_type, resource_helper_type)
+
+ self.pktgen_address = vnfd['mgmt-interface']['ip']
+ LOG.info("Pktgen container '%s', IP: %s", name, self.pktgen_address)
+
+ def extract_pod_cfg(self, pktgen_pods_cfg, pktgen_id):
+ for pod_cfg in pktgen_pods_cfg:
+ if pod_cfg['pktgen_id'] == pktgen_id:
+ return pod_cfg
+ return None
+
+ def instantiate(self, scenario_cfg, context_cfg):
+ super(VcmtsPktgen, self).instantiate(scenario_cfg, context_cfg)
+ self._start_server()
+ options = scenario_cfg.get('options', {})
+ self.pktgen_rate = options.get('pktgen_rate', self.DEFAULT_RATE)
+
+ try:
+ pktgen_values_filepath = options['pktgen_values']
+ except KeyError:
+ raise KeyError("Missing pktgen_values key in scenario options" \
+ "section of the task definition file")
+
+ if not os.path.isfile(pktgen_values_filepath):
+ raise RuntimeError("The pktgen_values file path provided " \
+ "does not exists")
+
+ # The yaml_loader.py (SafeLoader) underlying regex has an issue
+ # with reading PCI addresses (processed as double). so the
+ # BaseLoader is used here.
+ with open(pktgen_values_filepath) as stream:
+ pktgen_values = yaml.load(stream, Loader=yaml.BaseLoader)
+
+ if pktgen_values == None:
+ raise RuntimeError("Error reading pktgen_values file provided (" +
+ pktgen_values_filepath + ")")
+
+ self.pktgen_id = int(options[self.name]['pktgen_id'])
+ self.resource_helper.pktgen_id = self.pktgen_id
+
+ self.pktgen_helper = PktgenHelper(self.pktgen_address,
+ self.PKTGEN_BASE_PORT + self.pktgen_id)
+
+ pktgen_pods_cfg = pktgen_values['topology']['pktgen_pods']
+
+ self.pod_cfg = self.extract_pod_cfg(pktgen_pods_cfg,
+ str(self.pktgen_id))
+
+ if self.pod_cfg == None:
+ raise KeyError("Pktgen with id " + str(self.pktgen_id) + \
+ " was not found")
+
+ self.setup_helper.start_pktgen(self.pod_cfg)
+
+ def run_traffic(self, traffic_profile):
+ if not self.pktgen_helper.connect():
+ raise exceptions.PktgenActionError(command="connect")
+ LOG.info("Connected to pktgen instance at %s", self.pktgen_address)
+
+ commands = []
+ for i in range(self.setup_helper.PORTS_COUNT):
+ commands.append('pktgen.set("' + str(i) + '", "rate", ' +
+ "%0.1f" % self.pktgen_rate + ');')
+
+ commands.append('pktgen.start("all");')
+
+ for command in commands:
+ if self.pktgen_helper.send_command(command):
+ LOG.debug("Command '%s' sent to pktgen", command)
+ LOG.info("Traffic started on %s...", self.name)
+ return True
diff --git a/yardstick/network_services/vnf_generic/vnf/vcmts_vnf.py b/yardstick/network_services/vnf_generic/vnf/vcmts_vnf.py
new file mode 100755
index 000000000..0b48ef4e9
--- /dev/null
+++ b/yardstick/network_services/vnf_generic/vnf/vcmts_vnf.py
@@ -0,0 +1,273 @@
+# Copyright (c) 2019 Viosoft Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import os
+import yaml
+
+from influxdb import InfluxDBClient
+
+from yardstick.network_services.vnf_generic.vnf.sample_vnf import SetupEnvHelper
+from yardstick.common import constants
+from yardstick.common import exceptions
+from yardstick.network_services.vnf_generic.vnf.base import GenericVNF
+from yardstick.network_services.vnf_generic.vnf.sample_vnf import ScenarioHelper
+from yardstick.network_services.vnf_generic.vnf.vnf_ssh_helper import VnfSshHelper
+from yardstick.network_services.utils import get_nsb_option
+
+
+LOG = logging.getLogger(__name__)
+
+
+class InfluxDBHelper(object):
+
+ INITIAL_VALUE = 'now() - 1m'
+
+ def __init__(self, vcmts_influxdb_ip, vcmts_influxdb_port):
+ self._vcmts_influxdb_ip = vcmts_influxdb_ip
+ self._vcmts_influxdb_port = vcmts_influxdb_port
+ self._last_upstream_rx = self.INITIAL_VALUE
+ self._last_values_time = dict()
+
+ def start(self):
+ self._read_client = InfluxDBClient(host=self._vcmts_influxdb_ip,
+ port=self._vcmts_influxdb_port,
+ database='collectd')
+ self._write_client = InfluxDBClient(host=constants.INFLUXDB_IP,
+ port=constants.INFLUXDB_PORT,
+ database='collectd')
+
+ def _get_last_value_time(self, measurement):
+ if measurement in self._last_values_time:
+ return self._last_values_time[measurement]
+ return self.INITIAL_VALUE
+
+ def _set_last_value_time(self, measurement, time):
+ self._last_values_time[measurement] = "'" + time + "'"
+
+ def _query_measurement(self, measurement):
+ # There is a delay before influxdb flushes the data
+ query = "SELECT * FROM " + measurement + " WHERE time > " \
+ + self._get_last_value_time(measurement) \
+ + " ORDER BY time ASC;"
+ query_result = self._read_client.query(query)
+ if len(query_result.keys()) == 0:
+ return None
+ return query_result.get_points(measurement)
+
+ def _rw_measurment(self, measurement, columns):
+ query_result = self._query_measurement(measurement)
+ if query_result == None:
+ return
+
+ points_to_write = list()
+ for entry in query_result:
+ point = {
+ "measurement": measurement,
+ "tags": {
+ "type": entry['type'],
+ "host": entry['host']
+ },
+ "time": entry['time'],
+ "fields": {}
+ }
+
+ for column in columns:
+ if column == 'value':
+ point["fields"][column] = float(entry[column])
+ else:
+ point["fields"][column] = entry[column]
+
+ points_to_write.append(point)
+ self._set_last_value_time(measurement, entry['time'])
+
+ # Write the points to yardstick database
+ if self._write_client.write_points(points_to_write):
+ LOG.debug("%d new points written to '%s' measurement",
+ len(points_to_write), measurement)
+
+ def copy_kpi(self):
+ self._rw_measurment("cpu_value", ["instance", "type_instance", "value"])
+ self._rw_measurment("cpufreq_value", ["type_instance", "value"])
+ self._rw_measurment("downstream_rx", ["value"])
+ self._rw_measurment("downstream_tx", ["value"])
+ self._rw_measurment("downstream_value", ["value"])
+ self._rw_measurment("ds_per_cm_value", ["instance", "value"])
+ self._rw_measurment("intel_rdt_value", ["instance", "type_instance", "value"])
+ self._rw_measurment("turbostat_value", ["instance", "type_instance", "value"])
+ self._rw_measurment("upstream_rx", ["value"])
+ self._rw_measurment("upstream_tx", ["value"])
+ self._rw_measurment("upstream_value", ["value"])
+
+
+class VcmtsdSetupEnvHelper(SetupEnvHelper):
+
+ BASE_PARAMETERS = "export LD_LIBRARY_PATH=/opt/collectd/lib:;"\
+ + "export CMK_PROC_FS=/host/proc;"
+
+ def build_us_parameters(self, pod_cfg):
+ return self.BASE_PARAMETERS + " " \
+ + " /opt/bin/cmk isolate --conf-dir=/etc/cmk" \
+ + " --socket-id=" + pod_cfg['cpu_socket_id'] \
+ + " --pool=shared" \
+ + " /vcmts-config/run_upstream.sh " + pod_cfg['sg_id'] \
+ + " " + pod_cfg['ds_core_type'] \
+ + " " + pod_cfg['num_ofdm'] + "ofdm" \
+ + " " + pod_cfg['num_subs'] + "cm" \
+ + " " + pod_cfg['cm_crypto'] \
+ + " " + pod_cfg['qat'] \
+ + " " + pod_cfg['net_us'] \
+ + " " + pod_cfg['power_mgmt']
+
+ def build_ds_parameters(self, pod_cfg):
+ return self.BASE_PARAMETERS + " " \
+ + " /opt/bin/cmk isolate --conf-dir=/etc/cmk" \
+ + " --socket-id=" + pod_cfg['cpu_socket_id'] \
+ + " --pool=" + pod_cfg['ds_core_type'] \
+ + " /vcmts-config/run_downstream.sh " + pod_cfg['sg_id'] \
+ + " " + pod_cfg['ds_core_type'] \
+ + " " + pod_cfg['ds_core_pool_index'] \
+ + " " + pod_cfg['num_ofdm'] + "ofdm" \
+ + " " + pod_cfg['num_subs'] + "cm" \
+ + " " + pod_cfg['cm_crypto'] \
+ + " " + pod_cfg['qat'] \
+ + " " + pod_cfg['net_ds'] \
+ + " " + pod_cfg['power_mgmt']
+
+ def build_cmd(self, stream_dir, pod_cfg):
+ if stream_dir == 'ds':
+ return self.build_ds_parameters(pod_cfg)
+ else:
+ return self.build_us_parameters(pod_cfg)
+
+ def run_vcmtsd(self, stream_dir, pod_cfg):
+ cmd = self.build_cmd(stream_dir, pod_cfg)
+ LOG.debug("Executing %s", cmd)
+ self.ssh_helper.send_command(cmd)
+
+ def setup_vnf_environment(self):
+ pass
+
+
+class VcmtsVNF(GenericVNF):
+
+ RUN_WAIT = 4
+
+ def __init__(self, name, vnfd):
+ super(VcmtsVNF, self).__init__(name, vnfd)
+ self.name = name
+ self.bin_path = get_nsb_option('bin_path', '')
+ self.scenario_helper = ScenarioHelper(self.name)
+ self.ssh_helper = VnfSshHelper(self.vnfd_helper.mgmt_interface, self.bin_path)
+
+ self.setup_helper = VcmtsdSetupEnvHelper(self.vnfd_helper,
+ self.ssh_helper,
+ self.scenario_helper)
+
+ def extract_pod_cfg(self, vcmts_pods_cfg, sg_id):
+ for pod_cfg in vcmts_pods_cfg:
+ if pod_cfg['sg_id'] == sg_id:
+ return pod_cfg
+
+ def instantiate(self, scenario_cfg, context_cfg):
+ self._update_collectd_options(scenario_cfg, context_cfg)
+ self.scenario_helper.scenario_cfg = scenario_cfg
+ self.context_cfg = context_cfg
+
+ options = scenario_cfg.get('options', {})
+
+ try:
+ self.vcmts_influxdb_ip = options['vcmts_influxdb_ip']
+ self.vcmts_influxdb_port = options['vcmts_influxdb_port']
+ except KeyError:
+ raise KeyError("Missing destination InfluxDB details in scenario" \
+ " section of the task definition file")
+
+ try:
+ vcmtsd_values_filepath = options['vcmtsd_values']
+ except KeyError:
+ raise KeyError("Missing vcmtsd_values key in scenario options" \
+ "section of the task definition file")
+
+ if not os.path.isfile(vcmtsd_values_filepath):
+ raise RuntimeError("The vcmtsd_values file path provided " \
+ "does not exists")
+
+ # The yaml_loader.py (SafeLoader) underlying regex has an issue
+ # with reading PCI addresses (processed as double). so the
+ # BaseLoader is used here.
+ with open(vcmtsd_values_filepath) as stream:
+ vcmtsd_values = yaml.load(stream, Loader=yaml.BaseLoader)
+
+ if vcmtsd_values == None:
+ raise RuntimeError("Error reading vcmtsd_values file provided (" +
+ vcmtsd_values_filepath + ")")
+
+ vnf_options = options.get(self.name, {})
+ sg_id = str(vnf_options['sg_id'])
+ stream_dir = vnf_options['stream_dir']
+
+ try:
+ vcmts_pods_cfg = vcmtsd_values['topology']['vcmts_pods']
+ except KeyError:
+ raise KeyError("Missing vcmts_pods key in the " \
+ "vcmtsd_values file provided")
+
+ pod_cfg = self.extract_pod_cfg(vcmts_pods_cfg, sg_id)
+ if pod_cfg == None:
+ raise exceptions.IncorrectConfig(error_msg="Service group " + sg_id + " not found")
+
+ self.setup_helper.run_vcmtsd(stream_dir, pod_cfg)
+
+ def _update_collectd_options(self, scenario_cfg, context_cfg):
+ scenario_options = scenario_cfg.get('options', {})
+ generic_options = scenario_options.get('collectd', {})
+ scenario_node_options = scenario_options.get(self.name, {})\
+ .get('collectd', {})
+ context_node_options = context_cfg.get('nodes', {})\
+ .get(self.name, {}).get('collectd', {})
+
+ options = generic_options
+ self._update_options(options, scenario_node_options)
+ self._update_options(options, context_node_options)
+
+ self.setup_helper.collectd_options = options
+
+ def _update_options(self, options, additional_options):
+ for k, v in additional_options.items():
+ if isinstance(v, dict) and k in options:
+ options[k].update(v)
+ else:
+ options[k] = v
+
+ def wait_for_instantiate(self):
+ pass
+
+ def terminate(self):
+ pass
+
+ def scale(self, flavor=""):
+ pass
+
+ def collect_kpi(self):
+ self.influxdb_helper.copy_kpi()
+ return {"n/a": "n/a"}
+
+ def start_collect(self):
+ self.influxdb_helper = InfluxDBHelper(self.vcmts_influxdb_ip,
+ self.vcmts_influxdb_port)
+ self.influxdb_helper.start()
+
+ def stop_collect(self):
+ pass
diff --git a/yardstick/network_services/vnf_generic/vnf/vims_vnf.py b/yardstick/network_services/vnf_generic/vnf/vims_vnf.py
new file mode 100644
index 000000000..0e339b171
--- /dev/null
+++ b/yardstick/network_services/vnf_generic/vnf/vims_vnf.py
@@ -0,0 +1,105 @@
+# Copyright (c) 2019 Viosoft Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import time
+
+from yardstick.network_services.vnf_generic.vnf import sample_vnf
+
+LOG = logging.getLogger(__name__)
+
+
+class VimsSetupEnvHelper(sample_vnf.SetupEnvHelper):
+
+ def setup_vnf_environment(self):
+ LOG.debug('VimsSetupEnvHelper:\n')
+
+
+class VimsResourceHelper(sample_vnf.ClientResourceHelper):
+ pass
+
+
+class VimsPcscfVnf(sample_vnf.SampleVNF):
+
+ APP_NAME = "VimsPcscf"
+ APP_WORD = "VimsPcscf"
+
+ def __init__(self, name, vnfd, setup_env_helper_type=None,
+ resource_helper_type=None):
+ if resource_helper_type is None:
+ resource_helper_type = VimsResourceHelper
+ if setup_env_helper_type is None:
+ setup_env_helper_type = VimsSetupEnvHelper
+ super(VimsPcscfVnf, self).__init__(name, vnfd, setup_env_helper_type,
+ resource_helper_type)
+
+ def wait_for_instantiate(self):
+ pass
+
+ def _run(self):
+ pass
+
+ def start_collect(self):
+ # TODO
+ pass
+
+ def collect_kpi(self):
+ # TODO
+ pass
+
+
+class VimsHssVnf(sample_vnf.SampleVNF):
+
+ APP_NAME = "VimsHss"
+ APP_WORD = "VimsHss"
+ CMD = "sudo /media/generate_user.sh {} {} >> /dev/null 2>&1"
+
+ def __init__(self, name, vnfd, setup_env_helper_type=None,
+ resource_helper_type=None):
+ if resource_helper_type is None:
+ resource_helper_type = VimsResourceHelper
+ if setup_env_helper_type is None:
+ setup_env_helper_type = VimsSetupEnvHelper
+ super(VimsHssVnf, self).__init__(name, vnfd, setup_env_helper_type,
+ resource_helper_type)
+ self.start_user = 1
+ self.end_user = 10000
+ self.WAIT_TIME = 600
+
+ def instantiate(self, scenario_cfg, context_cfg):
+ LOG.debug("scenario_cfg=%s\n", scenario_cfg)
+ self.start_user = scenario_cfg.get("options", {}).get("start_user", self.start_user)
+ self.end_user = scenario_cfg.get("options", {}).get("end_user", self.end_user)
+ # TODO
+ # Need to check HSS services are ready before generating user accounts
+ # Now, adding time sleep that manually configured by user
+ # to wait for HSS services.
+ # Note: for heat, waiting time is too long (~ 600s)
+ self.WAIT_TIME = scenario_cfg.get("options", {}).get("wait_time", self.WAIT_TIME)
+ time.sleep(self.WAIT_TIME)
+ LOG.debug("Generate user accounts from %d to %d\n",
+ self.start_user, self.end_user)
+ cmd = self.CMD.format(self.start_user, self.end_user)
+ self.ssh_helper.execute(cmd, None, 3600, False)
+
+ def wait_for_instantiate(self):
+ pass
+
+ def start_collect(self):
+ # TODO
+ pass
+
+ def collect_kpi(self):
+ # TODO
+ pass
diff --git a/yardstick/network_services/vnf_generic/vnf/vpp_helpers.py b/yardstick/network_services/vnf_generic/vnf/vpp_helpers.py
index 86c42ecfd..fe8e7b2ba 100644
--- a/yardstick/network_services/vnf_generic/vnf/vpp_helpers.py
+++ b/yardstick/network_services/vnf_generic/vnf/vpp_helpers.py
@@ -12,25 +12,197 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import binascii
+import ipaddress
+import json
import logging
+import os
+import re
+import tempfile
+import time
+from collections import OrderedDict
+from yardstick.common import constants
+from yardstick.common import exceptions
+from yardstick.network_services.helpers.cpu import CpuSysCores
from yardstick.network_services.vnf_generic.vnf.sample_vnf import \
DpdkVnfSetupEnvHelper
LOG = logging.getLogger(__name__)
+class VppConfigGenerator(object):
+ VPP_LOG_FILE = '/tmp/vpe.log'
+
+ def __init__(self):
+ self._nodeconfig = {}
+ self._vpp_config = ''
+
+ def add_config_item(self, config, value, path):
+ if len(path) == 1:
+ config[path[0]] = value
+ return
+ if path[0] not in config:
+ config[path[0]] = {}
+ elif isinstance(config[path[0]], str):
+ config[path[0]] = {} if config[path[0]] == '' \
+ else {config[path[0]]: ''}
+ self.add_config_item(config[path[0]], value, path[1:])
+
+ def add_unix_log(self, value=None):
+ path = ['unix', 'log']
+ if value is None:
+ value = self.VPP_LOG_FILE
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_unix_cli_listen(self, value='/run/vpp/cli.sock'):
+ path = ['unix', 'cli-listen']
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_unix_nodaemon(self):
+ path = ['unix', 'nodaemon']
+ self.add_config_item(self._nodeconfig, '', path)
+
+ def add_unix_coredump(self):
+ path = ['unix', 'full-coredump']
+ self.add_config_item(self._nodeconfig, '', path)
+
+ def add_dpdk_dev(self, *devices):
+ for device in devices:
+ if VppConfigGenerator.pci_dev_check(device):
+ path = ['dpdk', 'dev {0}'.format(device)]
+ self.add_config_item(self._nodeconfig, '', path)
+
+ def add_dpdk_cryptodev(self, count, cryptodev):
+ for i in range(count):
+ cryptodev_config = 'dev {0}'.format(
+ re.sub(r'\d.\d$', '1.' + str(i), cryptodev))
+ path = ['dpdk', cryptodev_config]
+ self.add_config_item(self._nodeconfig, '', path)
+ self.add_dpdk_uio_driver('igb_uio')
+
+ def add_dpdk_sw_cryptodev(self, sw_pmd_type, socket_id, count):
+ for _ in range(count):
+ cryptodev_config = 'vdev cryptodev_{0}_pmd,socket_id={1}'. \
+ format(sw_pmd_type, str(socket_id))
+ path = ['dpdk', cryptodev_config]
+ self.add_config_item(self._nodeconfig, '', path)
+
+ def add_dpdk_dev_default_rxq(self, value):
+ path = ['dpdk', 'dev default', 'num-rx-queues']
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_dpdk_dev_default_rxd(self, value):
+ path = ['dpdk', 'dev default', 'num-rx-desc']
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_dpdk_dev_default_txd(self, value):
+ path = ['dpdk', 'dev default', 'num-tx-desc']
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_dpdk_log_level(self, value):
+ path = ['dpdk', 'log-level']
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_dpdk_socketmem(self, value):
+ path = ['dpdk', 'socket-mem']
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_dpdk_num_mbufs(self, value):
+ path = ['dpdk', 'num-mbufs']
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_dpdk_uio_driver(self, value=None):
+ path = ['dpdk', 'uio-driver']
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_cpu_main_core(self, value):
+ path = ['cpu', 'main-core']
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_cpu_corelist_workers(self, value):
+ path = ['cpu', 'corelist-workers']
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_heapsize(self, value):
+ path = ['heapsize']
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_ip6_hash_buckets(self, value):
+ path = ['ip6', 'hash-buckets']
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_ip6_heap_size(self, value):
+ path = ['ip6', 'heap-size']
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_ip_heap_size(self, value):
+ path = ['ip', 'heap-size']
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_statseg_size(self, value):
+ path = ['statseg', 'size']
+ self.add_config_item(self._nodeconfig, value, path)
+
+ def add_plugin(self, state, *plugins):
+ for plugin in plugins:
+ path = ['plugins', 'plugin {0}'.format(plugin), state]
+ self.add_config_item(self._nodeconfig, ' ', path)
+
+ def add_dpdk_no_multi_seg(self):
+ path = ['dpdk', 'no-multi-seg']
+ self.add_config_item(self._nodeconfig, '', path)
+
+ def add_dpdk_no_tx_checksum_offload(self):
+ path = ['dpdk', 'no-tx-checksum-offload']
+ self.add_config_item(self._nodeconfig, '', path)
+
+ def dump_config(self, obj=None, level=-1):
+ if obj is None:
+ obj = self._nodeconfig
+ obj = OrderedDict(sorted(obj.items()))
+
+ indent = ' '
+ if level >= 0:
+ self._vpp_config += '{}{{\n'.format(level * indent)
+ if isinstance(obj, dict):
+ for key, val in obj.items():
+ if hasattr(val, '__iter__') and not isinstance(val, str):
+ self._vpp_config += '{}{}\n'.format((level + 1) * indent,
+ key)
+ self.dump_config(val, level + 1)
+ else:
+ self._vpp_config += '{}{} {}\n'.format(
+ (level + 1) * indent,
+ key, val)
+ if level >= 0:
+ self._vpp_config += '{}}}\n'.format(level * indent)
+
+ return self._vpp_config
+
+ @staticmethod
+ def pci_dev_check(pci_dev):
+ pattern = re.compile("^[0-9A-Fa-f]{4}:[0-9A-Fa-f]{2}:"
+ "[0-9A-Fa-f]{2}\\.[0-9A-Fa-f]$")
+ if not pattern.match(pci_dev):
+ raise ValueError('PCI address {addr} is not in valid format '
+ 'xxxx:xx:xx.x'.format(addr=pci_dev))
+ return True
+
+
class VppSetupEnvHelper(DpdkVnfSetupEnvHelper):
APP_NAME = "vpp"
CFG_CONFIG = "/etc/vpp/startup.conf"
CFG_SCRIPT = ""
PIPELINE_COMMAND = ""
+ QAT_DRIVER = "qat_dh895xcc"
VNF_TYPE = "IPSEC"
VAT_BIN_NAME = 'vpp_api_test'
def __init__(self, vnfd_helper, ssh_helper, scenario_helper):
super(VppSetupEnvHelper, self).__init__(vnfd_helper, ssh_helper,
scenario_helper)
+ self.sys_cores = CpuSysCores(self.ssh_helper)
def kill_vnf(self):
ret_code, _, _ = \
@@ -74,3 +246,506 @@ class VppSetupEnvHelper(DpdkVnfSetupEnvHelper):
ifname=interface).get(key)
except (KeyError, ValueError):
return None
+
+ def crypto_device_init(self, pci_addr, numvfs):
+ # QAT device must be re-bound to kernel driver before initialization.
+ self.dpdk_bind_helper.load_dpdk_driver(self.QAT_DRIVER)
+
+ # Stop VPP to prevent deadlock.
+ self.kill_vnf()
+
+ current_driver = self.get_pci_dev_driver(pci_addr.replace(':', r'\:'))
+ if current_driver is not None:
+ self.pci_driver_unbind(pci_addr)
+
+ # Bind to kernel driver.
+ self.dpdk_bind_helper.bind(pci_addr, self.QAT_DRIVER.replace('qat_', ''))
+
+ # Initialize QAT VFs.
+ if numvfs > 0:
+ self.set_sriov_numvfs(pci_addr, numvfs)
+
+ def get_sriov_numvfs(self, pf_pci_addr):
+ command = 'cat /sys/bus/pci/devices/{pci}/sriov_numvfs'. \
+ format(pci=pf_pci_addr.replace(':', r'\:'))
+ _, stdout, _ = self.ssh_helper.execute(command)
+ try:
+ return int(stdout)
+ except ValueError:
+ LOG.debug('Reading sriov_numvfs info failed')
+ return 0
+
+ def set_sriov_numvfs(self, pf_pci_addr, numvfs=0):
+ command = "sh -c 'echo {num} | tee /sys/bus/pci/devices/{pci}/sriov_numvfs'". \
+ format(num=numvfs, pci=pf_pci_addr.replace(':', r'\:'))
+ self.ssh_helper.execute(command)
+
+ def pci_driver_unbind(self, pci_addr):
+ command = "sh -c 'echo {pci} | tee /sys/bus/pci/devices/{pcie}/driver/unbind'". \
+ format(pci=pci_addr, pcie=pci_addr.replace(':', r'\:'))
+ self.ssh_helper.execute(command)
+
+ def get_pci_dev_driver(self, pci_addr):
+ cmd = 'lspci -vmmks {0}'.format(pci_addr)
+ ret_code, stdout, _ = self.ssh_helper.execute(cmd)
+ if int(ret_code):
+ raise RuntimeError("'{0}' failed".format(cmd))
+ for line in stdout.splitlines():
+ if not line:
+ continue
+ name = None
+ value = None
+ try:
+ name, value = line.split("\t", 1)
+ except ValueError:
+ if name == "Driver:":
+ return None
+ if name == 'Driver:':
+ return value
+ return None
+
+ def vpp_create_ipsec_tunnels(self, if1_ip_addr, if2_ip_addr, if_name,
+ n_tunnels, n_connections, crypto_alg,
+ crypto_key, integ_alg, integ_key, addrs_ip,
+ spi_1=10000, spi_2=20000):
+ mask_length = 32
+ if n_connections <= n_tunnels:
+ count = 1
+ else:
+ count = int(n_connections / n_tunnels)
+ addr_ip_i = int(ipaddress.ip_address(str(addrs_ip)))
+ dst_start_ip = addr_ip_i
+
+ tmp_fd, tmp_path = tempfile.mkstemp()
+
+ vpp_ifname = self.get_value_by_interface_key(if_name, 'vpp_name')
+ ckey = binascii.hexlify(crypto_key.encode())
+ ikey = binascii.hexlify(integ_key.encode())
+
+ integ = ''
+ if crypto_alg.alg_name != 'aes-gcm-128':
+ integ = 'integ_alg {integ_alg} ' \
+ 'local_integ_key {local_integ_key} ' \
+ 'remote_integ_key {remote_integ_key} ' \
+ .format(integ_alg=integ_alg.alg_name,
+ local_integ_key=ikey,
+ remote_integ_key=ikey)
+ create_tunnels_cmds = 'ipsec_tunnel_if_add_del ' \
+ 'local_spi {local_spi} ' \
+ 'remote_spi {remote_spi} ' \
+ 'crypto_alg {crypto_alg} ' \
+ 'local_crypto_key {local_crypto_key} ' \
+ 'remote_crypto_key {remote_crypto_key} ' \
+ '{integ} ' \
+ 'local_ip {local_ip} ' \
+ 'remote_ip {remote_ip}\n'
+ start_tunnels_cmds = 'ip_add_del_route {raddr}/{mask} via {addr} ipsec{i}\n' \
+ 'exec set interface unnumbered ipsec{i} use {uifc}\n' \
+ 'sw_interface_set_flags ipsec{i} admin-up\n'
+
+ with os.fdopen(tmp_fd, 'w') as tmp_file:
+ for i in range(0, n_tunnels):
+ create_tunnel = create_tunnels_cmds.format(local_spi=spi_1 + i,
+ remote_spi=spi_2 + i,
+ crypto_alg=crypto_alg.alg_name,
+ local_crypto_key=ckey,
+ remote_crypto_key=ckey,
+ integ=integ,
+ local_ip=if1_ip_addr,
+ remote_ip=if2_ip_addr)
+ tmp_file.write(create_tunnel)
+ self.execute_script(tmp_path, json_out=False, copy_on_execute=True)
+ os.remove(tmp_path)
+
+ tmp_fd, tmp_path = tempfile.mkstemp()
+
+ with os.fdopen(tmp_fd, 'w') as tmp_file:
+ for i in range(0, n_tunnels):
+ if count > 1:
+ dst_start_ip = addr_ip_i + i * count
+ dst_end_ip = ipaddress.ip_address(dst_start_ip + count - 1)
+ ips = [ipaddress.ip_address(ip) for ip in
+ [str(ipaddress.ip_address(dst_start_ip)),
+ str(dst_end_ip)]]
+ lowest_ip, highest_ip = min(ips), max(ips)
+ mask_length = self.get_prefix_length(int(lowest_ip),
+ int(highest_ip),
+ lowest_ip.max_prefixlen)
+ # TODO check duplicate route for some IPs
+ elif count == 1:
+ dst_start_ip = addr_ip_i + i
+ start_tunnel = start_tunnels_cmds.format(
+ raddr=str(ipaddress.ip_address(dst_start_ip)),
+ mask=mask_length,
+ addr=if2_ip_addr,
+ i=i, count=count,
+ uifc=vpp_ifname)
+ tmp_file.write(start_tunnel)
+ # TODO add route for remain IPs
+
+ self.execute_script(tmp_path, json_out=False, copy_on_execute=True)
+ os.remove(tmp_path)
+
+ def apply_config(self, vpp_cfg, restart_vpp=True):
+ vpp_config = vpp_cfg.dump_config()
+ ret, _, _ = \
+ self.ssh_helper.execute('echo "{config}" | sudo tee {filename}'.
+ format(config=vpp_config,
+ filename=self.CFG_CONFIG))
+ if ret != 0:
+ raise RuntimeError('Writing config file failed')
+ if restart_vpp:
+ self.start_vpp_service()
+
+ def vpp_route_add(self, network, prefix_len, gateway=None, interface=None,
+ use_sw_index=True, resolve_attempts=10,
+ count=1, vrf=None, lookup_vrf=None, multipath=False,
+ weight=None, local=False):
+ if interface:
+ if use_sw_index:
+ int_cmd = ('sw_if_index {}'.format(
+ self.get_value_by_interface_key(interface,
+ 'vpp_sw_index')))
+ else:
+ int_cmd = interface
+ else:
+ int_cmd = ''
+
+ rap = 'resolve-attempts {}'.format(resolve_attempts) \
+ if resolve_attempts else ''
+
+ via = 'via {}'.format(gateway) if gateway else ''
+
+ cnt = 'count {}'.format(count) \
+ if count else ''
+
+ vrf = 'vrf {}'.format(vrf) if vrf else ''
+
+ lookup_vrf = 'lookup-in-vrf {}'.format(
+ lookup_vrf) if lookup_vrf else ''
+
+ multipath = 'multipath' if multipath else ''
+
+ weight = 'weight {}'.format(weight) if weight else ''
+
+ local = 'local' if local else ''
+
+ with VatTerminal(self.ssh_helper, json_param=False) as vat:
+ vat.vat_terminal_exec_cmd_from_template('add_route.vat',
+ network=network,
+ prefix_length=prefix_len,
+ via=via,
+ vrf=vrf,
+ interface=int_cmd,
+ resolve_attempts=rap,
+ count=cnt,
+ lookup_vrf=lookup_vrf,
+ multipath=multipath,
+ weight=weight,
+ local=local)
+
+ def add_arp_on_dut(self, iface_key, ip_address, mac_address):
+ with VatTerminal(self.ssh_helper) as vat:
+ return vat.vat_terminal_exec_cmd_from_template(
+ 'add_ip_neighbor.vat',
+ sw_if_index=self.get_value_by_interface_key(iface_key,
+ 'vpp_sw_index'),
+ ip_address=ip_address, mac_address=mac_address)
+
+ def set_ip(self, interface, address, prefix_length):
+ with VatTerminal(self.ssh_helper) as vat:
+ return vat.vat_terminal_exec_cmd_from_template(
+ 'add_ip_address.vat',
+ sw_if_index=self.get_value_by_interface_key(interface,
+ 'vpp_sw_index'),
+ address=address, prefix_length=prefix_length)
+
+ def set_interface_state(self, interface, state):
+ sw_if_index = self.get_value_by_interface_key(interface,
+ 'vpp_sw_index')
+
+ if state == 'up':
+ state = 'admin-up link-up'
+ elif state == 'down':
+ state = 'admin-down link-down'
+ else:
+ raise ValueError('Unexpected interface state: {}'.format(state))
+ with VatTerminal(self.ssh_helper) as vat:
+ return vat.vat_terminal_exec_cmd_from_template(
+ 'set_if_state.vat', sw_if_index=sw_if_index, state=state)
+
+ def vpp_set_interface_mtu(self, interface, mtu=9200):
+ sw_if_index = self.get_value_by_interface_key(interface,
+ 'vpp_sw_index')
+ if sw_if_index:
+ with VatTerminal(self.ssh_helper, json_param=False) as vat:
+ vat.vat_terminal_exec_cmd_from_template(
+ "hw_interface_set_mtu.vat", sw_if_index=sw_if_index,
+ mtu=mtu)
+
+ def vpp_interfaces_ready_wait(self, timeout=30):
+ if_ready = False
+ not_ready = []
+ start = time.time()
+ while not if_ready:
+ out = self.vpp_get_interface_data()
+ if time.time() - start > timeout:
+ for interface in out:
+ if interface.get('admin_up_down') == 1:
+ if interface.get('link_up_down') != 1:
+ LOG.debug('%s link-down',
+ interface.get('interface_name'))
+ raise RuntimeError('timeout, not up {0}'.format(not_ready))
+ not_ready = []
+ for interface in out:
+ if interface.get('admin_up_down') == 1:
+ if interface.get('link_up_down') != 1:
+ not_ready.append(interface.get('interface_name'))
+ if not not_ready:
+ if_ready = True
+ else:
+ LOG.debug('Interfaces still in link-down state: %s, '
+ 'waiting...', not_ready)
+ time.sleep(1)
+
+ def vpp_get_interface_data(self, interface=None):
+ with VatTerminal(self.ssh_helper) as vat:
+ response = vat.vat_terminal_exec_cmd_from_template(
+ "interface_dump.vat")
+ data = response[0]
+ if interface is not None:
+ if isinstance(interface, str):
+ param = "interface_name"
+ elif isinstance(interface, int):
+ param = "sw_if_index"
+ else:
+ raise TypeError
+ for data_if in data:
+ if data_if[param] == interface:
+ return data_if
+ return dict()
+ return data
+
+ def update_vpp_interface_data(self):
+ data = {}
+ interface_dump_json = self.execute_script_json_out(
+ "dump_interfaces.vat")
+ interface_list = json.loads(interface_dump_json)
+ for interface in self.vnfd_helper.interfaces:
+ if_mac = interface['virtual-interface']['local_mac']
+ interface_dict = VppSetupEnvHelper.get_vpp_interface_by_mac(
+ interface_list, if_mac)
+ if not interface_dict:
+ LOG.debug('Interface %s not found by MAC %s', interface,
+ if_mac)
+ continue
+ data[interface['virtual-interface']['ifname']] = {
+ 'vpp_name': interface_dict["interface_name"],
+ 'vpp_sw_index': interface_dict["sw_if_index"]
+ }
+ for iface_key, updated_vnfd in data.items():
+ self._update_vnfd_helper(updated_vnfd, iface_key)
+
+ def iface_update_numa(self):
+ iface_numa = {}
+ for interface in self.vnfd_helper.interfaces:
+ cmd = "cat /sys/bus/pci/devices/{}/numa_node".format(
+ interface["virtual-interface"]["vpci"])
+ ret, out, _ = self.ssh_helper.execute(cmd)
+ if ret == 0:
+ try:
+ numa_node = int(out)
+ if numa_node < 0:
+ if self.vnfd_helper["cpuinfo"][-1][3] + 1 == 1:
+ iface_numa[
+ interface['virtual-interface']['ifname']] = {
+ 'numa_node': 0
+ }
+ else:
+ raise ValueError
+ else:
+ iface_numa[
+ interface['virtual-interface']['ifname']] = {
+ 'numa_node': numa_node
+ }
+ except ValueError:
+ LOG.debug(
+ 'Reading numa location failed for: %s',
+ interface["virtual-interface"]["vpci"])
+ for iface_key, updated_vnfd in iface_numa.items():
+ self._update_vnfd_helper(updated_vnfd, iface_key)
+
+ def execute_script(self, vat_name, json_out=True, copy_on_execute=False):
+ if copy_on_execute:
+ self.ssh_helper.put_file(vat_name, vat_name)
+ remote_file_path = vat_name
+ else:
+ vat_path = self.ssh_helper.join_bin_path("vpp", "templates")
+ remote_file_path = '{0}/{1}'.format(vat_path, vat_name)
+
+ cmd = "{vat_bin} {json} in {vat_path} script".format(
+ vat_bin=self.VAT_BIN_NAME,
+ json="json" if json_out is True else "",
+ vat_path=remote_file_path)
+
+ try:
+ return self.ssh_helper.execute(cmd=cmd)
+ except Exception:
+ raise RuntimeError("VAT script execution failed: {0}".format(cmd))
+
+ def execute_script_json_out(self, vat_name):
+ vat_path = self.ssh_helper.join_bin_path("vpp", "templates")
+ remote_file_path = '{0}/{1}'.format(vat_path, vat_name)
+
+ _, stdout, _ = self.execute_script(vat_name, json_out=True)
+ return self.cleanup_vat_json_output(stdout, vat_file=remote_file_path)
+
+ @staticmethod
+ def cleanup_vat_json_output(json_output, vat_file=None):
+ retval = json_output
+ clutter = ['vat#', 'dump_interface_table error: Misc',
+ 'dump_interface_table:6019: JSON output supported only ' \
+ 'for VPE API calls and dump_stats_table']
+ if vat_file:
+ clutter.append("{0}(2):".format(vat_file))
+ for garbage in clutter:
+ retval = retval.replace(garbage, '')
+ return retval.strip()
+
+ @staticmethod
+ def _convert_mac_to_number_list(mac_address):
+ list_mac = []
+ for num in mac_address.split(":"):
+ list_mac.append(int(num, 16))
+ return list_mac
+
+ @staticmethod
+ def get_vpp_interface_by_mac(interfaces_list, mac_address):
+ interface_dict = {}
+ list_mac_address = VppSetupEnvHelper._convert_mac_to_number_list(
+ mac_address)
+ LOG.debug("MAC address %s converted to list %s.", mac_address,
+ list_mac_address)
+ for interface in interfaces_list:
+ # TODO: create vat json integrity checking and move there
+ if "l2_address" not in interface:
+ raise KeyError(
+ "key l2_address not found in interface dict."
+ "Probably input list is not parsed from correct VAT "
+ "json output.")
+ if "l2_address_length" not in interface:
+ raise KeyError(
+ "key l2_address_length not found in interface "
+ "dict. Probably input list is not parsed from correct "
+ "VAT json output.")
+ mac_from_json = interface["l2_address"][:6]
+ if mac_from_json == list_mac_address:
+ if interface["l2_address_length"] != 6:
+ raise ValueError("l2_address_length value is not 6.")
+ interface_dict = interface
+ break
+ return interface_dict
+
+ @staticmethod
+ def get_prefix_length(number1, number2, bits):
+ for i in range(bits):
+ if number1 >> i == number2 >> i:
+ return bits - i
+ return 0
+
+
+class VatTerminal(object):
+
+ __VAT_PROMPT = ("vat# ",)
+ __LINUX_PROMPT = (":~# ", ":~$ ", "~]$ ", "~]# ")
+
+
+ def __init__(self, ssh_helper, json_param=True):
+ json_text = ' json' if json_param else ''
+ self.json = json_param
+ self.ssh_helper = ssh_helper
+ EXEC_RETRY = 3
+
+ try:
+ self._tty = self.ssh_helper.interactive_terminal_open()
+ except Exception:
+ raise RuntimeError("Cannot open interactive terminal")
+
+ for _ in range(EXEC_RETRY):
+ try:
+ self.ssh_helper.interactive_terminal_exec_command(
+ self._tty,
+ 'sudo -S {0}{1}'.format(VppSetupEnvHelper.VAT_BIN_NAME,
+ json_text),
+ self.__VAT_PROMPT)
+ except exceptions.SSHTimeout:
+ continue
+ else:
+ break
+
+ self._exec_failure = False
+ self.vat_stdout = None
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.vat_terminal_close()
+
+ def vat_terminal_exec_cmd(self, cmd):
+ try:
+ out = self.ssh_helper.interactive_terminal_exec_command(self._tty,
+ cmd,
+ self.__VAT_PROMPT)
+ self.vat_stdout = out
+ except exceptions.SSHTimeout:
+ self._exec_failure = True
+ raise RuntimeError(
+ "VPP is not running on node. VAT command {0} execution failed".
+ format(cmd))
+ if self.json:
+ obj_start = out.find('{')
+ obj_end = out.rfind('}')
+ array_start = out.find('[')
+ array_end = out.rfind(']')
+
+ if obj_start == -1 and array_start == -1:
+ raise RuntimeError(
+ "VAT command {0}: no JSON data.".format(cmd))
+
+ if obj_start < array_start or array_start == -1:
+ start = obj_start
+ end = obj_end + 1
+ else:
+ start = array_start
+ end = array_end + 1
+ out = out[start:end]
+ json_out = json.loads(out)
+ return json_out
+ else:
+ return None
+
+ def vat_terminal_close(self):
+ if not self._exec_failure:
+ try:
+ self.ssh_helper.interactive_terminal_exec_command(self._tty,
+ 'quit',
+ self.__LINUX_PROMPT)
+ except exceptions.SSHTimeout:
+ raise RuntimeError("Failed to close VAT console")
+ try:
+ self.ssh_helper.interactive_terminal_close(self._tty)
+ except Exception:
+ raise RuntimeError("Cannot close interactive terminal")
+
+ def vat_terminal_exec_cmd_from_template(self, vat_template_file, **args):
+ file_path = os.path.join(constants.YARDSTICK_ROOT_PATH,
+ 'yardstick/resources/templates/',
+ vat_template_file)
+ with open(file_path, 'r') as template_file:
+ cmd_template = template_file.readlines()
+ ret = []
+ for line_tmpl in cmd_template:
+ vat_cmd = line_tmpl.format(**args)
+ ret.append(self.vat_terminal_exec_cmd(vat_cmd.replace('\n', '')))
+ return ret