diff options
Diffstat (limited to 'yardstick')
172 files changed, 28866 insertions, 3385 deletions
diff --git a/yardstick/benchmark/contexts/heat.py b/yardstick/benchmark/contexts/heat.py index c3c5451bd..917aa9c39 100644 --- a/yardstick/benchmark/contexts/heat.py +++ b/yardstick/benchmark/contexts/heat.py @@ -71,6 +71,7 @@ class HeatContext(Context): self.shade_client = None self.heat_timeout = None self.key_filename = None + self.yardstick_gen_key_file = True self.shade_client = None self.operator_client = None self.nodes = [] @@ -105,6 +106,14 @@ class HeatContext(Context): self.template_file = attrs.get("heat_template") + # try looking for external private key when using external heat template + if self.template_file is not None: + self.key_filename = attrs.get("key_filename", None) + if self.key_filename is not None: + # Disable key file generation if an external private key + # has been provided + self.yardstick_gen_key_file = False + self.shade_client = openstack_utils.get_shade_client() self.operator_client = openstack_utils.get_shade_operator_client() @@ -335,14 +344,16 @@ class HeatContext(Context): """deploys template into a stack using cloud""" LOG.info("Deploying context '%s' START", self.name) - self.key_filename = ''.join( - [consts.YARDSTICK_ROOT_PATH, - 'yardstick/resources/files/yardstick_key-', - self.name]) + # Check if there was no external private key provided + if self.key_filename is None: + self.key_filename = ''.join( + [consts.YARDSTICK_ROOT_PATH, + 'yardstick/resources/files/yardstick_key-', + self.name]) # Permissions may have changed since creation; this can be fixed. If we # overwrite the file, we lose future access to VMs using this key. # As long as the file exists, even if it is unreadable, keep it intact - if not os.path.exists(self.key_filename): + if self.yardstick_gen_key_file and not os.path.exists(self.key_filename): SSH.gen_keys(self.key_filename) heat_template = HeatTemplate( @@ -442,12 +453,14 @@ class HeatContext(Context): } def _delete_key_file(self): - try: - utils.remove_file(self.key_filename) - utils.remove_file(self.key_filename + ".pub") - except OSError: - LOG.exception("There was an error removing the key file %s", - self.key_filename) + # Only remove the key file if it has been generated by yardstick + if self.yardstick_gen_key_file: + try: + utils.remove_file(self.key_filename) + utils.remove_file(self.key_filename + ".pub") + except OSError: + LOG.exception("There was an error removing the key file %s", + self.key_filename) def undeploy(self): """undeploys stack from cloud""" @@ -496,6 +509,14 @@ class HeatContext(Context): server.private_ip = self.stack.outputs.get( attr_name.get("private_ip_attr", object()), None) + + # Try to find interfaces + for key, value in attr_name.get("interfaces", {}).items(): + value["local_ip"] = server.private_ip + for k in ["local_mac", "netmask", "gateway_ip"]: + # Keep explicit None or missing entry as is + value[k] = self.stack.outputs.get(value[k]) + server.interfaces.update({key: value}) else: try: server = self._server_map[attr_name] @@ -505,13 +526,29 @@ class HeatContext(Context): if server is None: return None - pkey = pkg_resources.resource_string( - 'yardstick.resources', - h_join('files/yardstick_key', self.name)).decode('utf-8') - + # Get the pkey + if self.yardstick_gen_key_file: + pkey = pkg_resources.resource_string( + 'yardstick.resources', + h_join('files/yardstick_key', self.name)).decode('utf-8') + key_filename = pkg_resources.resource_filename('yardstick.resources', + h_join('files/yardstick_key', self.name)) + else: + # make sure the file exists before attempting to open it + if not os.path.exists(self.key_filename): + LOG.error("The key_filename provided %s does not exist!", + self.key_filename) + else: + try: + pkey = open(self.key_filename, 'r').read().decode('utf-8') + key_filename = self.key_filename + except IOError: + LOG.error("The key_filename provided (%s) is unreadable.", + self.key_filename) result = { "user": server.context.user, "pkey": pkey, + "key_filename": key_filename, "private_ip": server.private_ip, "interfaces": server.interfaces, "routing_table": self.generate_routing_table(server), diff --git a/yardstick/benchmark/contexts/standalone/model.py b/yardstick/benchmark/contexts/standalone/model.py index fa78fc1eb..a15426872 100644 --- a/yardstick/benchmark/contexts/standalone/model.py +++ b/yardstick/benchmark/contexts/standalone/model.py @@ -26,6 +26,7 @@ import xml.etree.ElementTree as ET from yardstick import ssh from yardstick.common import constants from yardstick.common import exceptions +from yardstick.common import utils as common_utils from yardstick.common import yaml_loader from yardstick.network_services.utils import PciAddress from yardstick.network_services.helpers.cpu import CpuSysCores @@ -45,7 +46,7 @@ VM_TEMPLATE = """ <vcpu cpuset='{cpuset}'>{vcpu}</vcpu> {cputune} <os> - <type arch="x86_64" machine="pc-i440fx-xenial">hvm</type> + <type arch="x86_64" machine="{machine}">hvm</type> <boot dev="hd" /> </os> <features> @@ -107,7 +108,7 @@ version: 2 ethernets: ens3: match: - mac_address: {mac_address} + macaddress: {mac_address} addresses: - {ip_address} EOF @@ -161,7 +162,8 @@ class Libvirt(object): return vm_pci @classmethod - def add_ovs_interface(cls, vpath, port_num, vpci, vports_mac, xml_str): + def add_ovs_interface(cls, vpath, port_num, vpci, vports_mac, xml_str, + queues): """Add a DPDK OVS 'interface' XML node in 'devices' node <devices> @@ -203,7 +205,7 @@ class Libvirt(object): model.set('type', 'virtio') driver = ET.SubElement(interface, 'driver') - driver.set('queues', '4') + driver.set('queues', str(queues)) host = ET.SubElement(driver, 'host') host.set('mrg_rxbuf', 'off') @@ -305,6 +307,7 @@ class Libvirt(object): cpuset = Libvirt.pin_vcpu_for_perf(connection, hw_socket) cputune = extra_spec.get('cputune', '') + machine = extra_spec.get('machine_type', 'pc-i440fx-xenial') mac = StandaloneContextHelper.get_mac_address(0x00) image = cls.create_snapshot_qemu(connection, index, flavor.get("images", None)) @@ -315,7 +318,8 @@ class Libvirt(object): memory=memory, vcpu=vcpu, cpu=cpu, numa_cpus=numa_cpus, socket=socket, threads=threads, - vm_image=image, cpuset=cpuset, cputune=cputune) + vm_image=image, cpuset=cpuset, + machine=machine, cputune=cputune) # Add CD-ROM device vm_xml = Libvirt.add_cdrom(cdrom_img, vm_xml) @@ -551,6 +555,16 @@ class StandaloneContextHelper(object): ip = cls.get_mgmt_ip(connection, node["mac"], mgmtip, node) if ip: node["ip"] = ip + client = ssh.SSH.from_node(node) + LOG.debug("OS version: %s", + common_utils.get_os_version(client)) + LOG.debug("Kernel version: %s", + common_utils.get_kernel_version(client)) + vnfs_data = common_utils.get_sample_vnf_info(client) + for vnf_name, vnf_data in vnfs_data.items(): + LOG.debug("VNF name: '%s', commit ID/branch: '%s'", + vnf_name, vnf_data["branch_commit"]) + LOG.debug("%s", vnf_data["md5_result"]) return nodes @classmethod @@ -564,12 +578,14 @@ class StandaloneContextHelper(object): key_filename = ''.join( [constants.YARDSTICK_ROOT_PATH, 'yardstick/resources/files/yardstick_key-', - id_name]) + id_name, '-', vm_name]) ssh.SSH.gen_keys(key_filename) node['key_filename'] = key_filename # Update image with public key key_filename = node.get('key_filename') ip_netmask = "{0}/{1}".format(node.get('ip'), node.get('netmask')) + ip_netmask = "{0}/{1}".format(node.get('ip'), + IPNetwork(ip_netmask).prefixlen) Libvirt.gen_cdrom_image(connection, cdrom_img, vm_name, user_name, key_filename, mac, ip_netmask) return node diff --git a/yardstick/benchmark/contexts/standalone/ovs_dpdk.py b/yardstick/benchmark/contexts/standalone/ovs_dpdk.py index 73311f0c2..c6e19f614 100644 --- a/yardstick/benchmark/contexts/standalone/ovs_dpdk.py +++ b/yardstick/benchmark/contexts/standalone/ovs_dpdk.py @@ -24,6 +24,7 @@ from yardstick.benchmark import contexts from yardstick.benchmark.contexts import base from yardstick.benchmark.contexts.standalone import model from yardstick.common import exceptions +from yardstick.common import utils as common_utils from yardstick.network_services import utils from yardstick.network_services.utils import get_nsb_option @@ -46,7 +47,8 @@ class OvsDpdkContext(base.Context): '2.7.0': '16.11.1', '2.7.1': '16.11.2', '2.7.2': '16.11.3', - '2.8.0': '17.05.2' + '2.8.0': '17.05.2', + '2.8.1': '17.05.2' } DEFAULT_OVS = '2.6.0' @@ -72,6 +74,11 @@ class OvsDpdkContext(base.Context): self.wait_for_vswitchd = 10 super(OvsDpdkContext, self).__init__() + def get_dpdk_socket_mem_size(self, socket_id): + """Get the size of OvS DPDK socket memory (Mb)""" + ram = self.ovs_properties.get("ram", {}) + return ram.get('socket_%d' % (socket_id), 2048) + def init(self, attrs): """initializes itself from the supplied arguments""" super(OvsDpdkContext, self).init(attrs) @@ -132,9 +139,6 @@ class OvsDpdkContext(base.Context): if pmd_cpu_mask: pmd_mask = pmd_cpu_mask - socket0 = self.ovs_properties.get("ram", {}).get("socket_0", "2048") - socket1 = self.ovs_properties.get("ram", {}).get("socket_1", "2048") - ovs_other_config = "ovs-vsctl {0}set Open_vSwitch . other_config:{1}" detach_cmd = "ovs-vswitchd unix:{0}{1} --pidfile --detach --log-file={2}" @@ -142,16 +146,23 @@ class OvsDpdkContext(base.Context): if lcore_mask: lcore_mask = ovs_other_config.format("--no-wait ", "dpdk-lcore-mask='%s'" % lcore_mask) + max_idle = self.ovs_properties.get("max_idle", '') + if max_idle: + max_idle = ovs_other_config.format("", "max-idle=%s" % max_idle) + cmd_list = [ "mkdir -p /usr/local/var/run/openvswitch", "mkdir -p {}".format(os.path.dirname(log_path)), - "ovsdb-server --remote=punix:/{0}/{1} --pidfile --detach".format(vpath, - ovs_sock_path), + ("ovsdb-server --remote=punix:/{0}/{1} --remote=ptcp:6640" + " --pidfile --detach").format(vpath, ovs_sock_path), ovs_other_config.format("--no-wait ", "dpdk-init=true"), - ovs_other_config.format("--no-wait ", "dpdk-socket-mem='%s,%s'" % (socket0, socket1)), + ovs_other_config.format("--no-wait ", "dpdk-socket-mem='%d,%d'" % ( + self.get_dpdk_socket_mem_size(0), + self.get_dpdk_socket_mem_size(1))), lcore_mask, detach_cmd.format(vpath, ovs_sock_path, log_path), ovs_other_config.format("", "pmd-cpu-mask=%s" % pmd_mask), + max_idle, ] for cmd in cmd_list: @@ -161,13 +172,12 @@ class OvsDpdkContext(base.Context): def setup_ovs_bridge_add_flows(self): dpdk_args = "" - dpdk_list = [] vpath = self.ovs_properties.get("vpath", "/usr/local") version = self.ovs_properties.get('version', {}) ovs_ver = [int(x) for x in version.get('ovs', self.DEFAULT_OVS).split('.')] ovs_add_port = ('ovs-vsctl add-port {br} {port} -- ' - 'set Interface {port} type={type_}{dpdk_args}') - ovs_add_queue = 'ovs-vsctl set Interface {port} options:n_rxq={queue}' + 'set Interface {port} type={type_}{dpdk_args}' + '{dpdk_rxq}{pmd_rx_aff}') chmod_vpath = 'chmod 0777 {0}/var/run/openvswitch/dpdkvhostuser*' cmd_list = [ @@ -176,27 +186,43 @@ class OvsDpdkContext(base.Context): 'ovs-vsctl add-br {0} -- set bridge {0} datapath_type=netdev'. format(MAIN_BRIDGE) ] + dpdk_rxq = "" + queues = self.ovs_properties.get("queues") + if queues: + dpdk_rxq = " options:n_rxq={queue}".format(queue=queues) - ordered_network = collections.OrderedDict(self.networks) + # Sorting the array to make sure we execute dpdk0... in the order + ordered_network = collections.OrderedDict( + sorted(self.networks.items(), key=lambda t: t[1].get('port_num', 0))) + pmd_rx_aff_ports = self.ovs_properties.get("dpdk_pmd-rxq-affinity", {}) for index, vnf in enumerate(ordered_network.values()): if ovs_ver >= [2, 7, 0]: dpdk_args = " options:dpdk-devargs=%s" % vnf.get("phy_port") - dpdk_list.append(ovs_add_port.format( + affinity = pmd_rx_aff_ports.get(vnf.get("port_num", -1), "") + if affinity: + pmd_rx_aff = ' other_config:pmd-rxq-affinity=' \ + '"{affinity}"'.format(affinity=affinity) + else: + pmd_rx_aff = "" + cmd_list.append(ovs_add_port.format( br=MAIN_BRIDGE, port='dpdk%s' % vnf.get("port_num", 0), - type_='dpdk', dpdk_args=dpdk_args)) - dpdk_list.append(ovs_add_queue.format( - port='dpdk%s' % vnf.get("port_num", 0), - queue=self.ovs_properties.get("queues", 1))) - - # Sorting the array to make sure we execute dpdk0... in the order - list.sort(dpdk_list) - cmd_list.extend(dpdk_list) + type_='dpdk', dpdk_args=dpdk_args, dpdk_rxq=dpdk_rxq, + pmd_rx_aff=pmd_rx_aff)) # Need to do two for loop to maintain the dpdk/vhost ports. + pmd_rx_aff_ports = self.ovs_properties.get("vhost_pmd-rxq-affinity", + {}) for index, _ in enumerate(ordered_network): + affinity = pmd_rx_aff_ports.get(index) + if affinity: + pmd_rx_aff = ' other_config:pmd-rxq-affinity=' \ + '"{affinity}"'.format(affinity=affinity) + else: + pmd_rx_aff = "" cmd_list.append(ovs_add_port.format( br=MAIN_BRIDGE, port='dpdkvhostuser%s' % index, - type_='dpdkvhostuser', dpdk_args="")) + type_='dpdkvhostuser', dpdk_args="", dpdk_rxq=dpdk_rxq, + pmd_rx_aff=pmd_rx_aff)) ovs_flow = ("ovs-ofctl add-flow {0} in_port=%s,action=output:%s". format(MAIN_BRIDGE)) @@ -236,7 +262,6 @@ class OvsDpdkContext(base.Context): def check_ovs_dpdk_env(self): self.cleanup_ovs_dpdk_env() - self._check_hugepages() version = self.ovs_properties.get("version", {}) ovs_ver = version.get("ovs", self.DEFAULT_OVS) @@ -376,6 +401,7 @@ class OvsDpdkContext(base.Context): def _enable_interfaces(self, index, vfs, xml_str): vpath = self.ovs_properties.get("vpath", "/usr/local") + queue = self.ovs_properties.get("queues", 1) vf = self.networks[vfs[0]] port_num = vf.get('port_num', 0) vpci = utils.PciAddress(vf['vpci'].strip()) @@ -384,13 +410,20 @@ class OvsDpdkContext(base.Context): vf['vpci'] = \ "{}:{}:{:02x}.{}".format(vpci.domain, vpci.bus, slot, vpci.function) return model.Libvirt.add_ovs_interface( - vpath, port_num, vf['vpci'], vf['mac'], xml_str) + vpath, port_num, vf['vpci'], vf['mac'], xml_str, queue) def setup_ovs_dpdk_context(self): nodes = [] self.configure_nics_for_ovs_dpdk() + hp_total_mb = int(self.vm_flavor.get('ram', '4096')) * len(self.servers) + common_utils.setup_hugepages(self.connection, (hp_total_mb + \ + self.get_dpdk_socket_mem_size(0) + \ + self.get_dpdk_socket_mem_size(1)) * 1024) + + self._check_hugepages() + for index, (key, vnf) in enumerate(collections.OrderedDict( self.servers).items()): cfg = '/tmp/vm_ovs_%d.xml' % index diff --git a/yardstick/benchmark/contexts/standalone/sriov.py b/yardstick/benchmark/contexts/standalone/sriov.py index f1b67a2da..e037dd85a 100644 --- a/yardstick/benchmark/contexts/standalone/sriov.py +++ b/yardstick/benchmark/contexts/standalone/sriov.py @@ -21,6 +21,7 @@ from yardstick import ssh from yardstick.benchmark import contexts from yardstick.benchmark.contexts import base from yardstick.benchmark.contexts.standalone import model +from yardstick.common import utils from yardstick.network_services.utils import get_nsb_option from yardstick.network_services.utils import PciAddress @@ -222,6 +223,9 @@ class SriovContext(base.Context): # 1 : modprobe host_driver with num_vfs self.configure_nics_for_sriov() + hp_total_mb = int(self.vm_flavor.get('ram', '4096')) * len(self.servers) + utils.setup_hugepages(self.connection, hp_total_mb * 1024) + for index, (key, vnf) in enumerate(collections.OrderedDict( self.servers).items()): cfg = '/tmp/vm_sriov_%s.xml' % str(index) diff --git a/yardstick/benchmark/core/report.py b/yardstick/benchmark/core/report.py index 199602444..e5dc62050 100644 --- a/yardstick/benchmark/core/report.py +++ b/yardstick/benchmark/core/report.py @@ -1,7 +1,7 @@ -############################################################################# -# Copyright (c) 2017 Rajesh Kudaka +############################################################################## +# Copyright (c) 2017 Rajesh Kudaka <4k.rajesh@gmail.com> +# Copyright (c) 2018-2019 Intel Corporation. # -# Author: Rajesh Kudaka 4k.rajesh@gmail.com # All rights reserved. This program and the accompanying materials # are made available under the terms of the Apache License, Version 2.0 # which accompanies this distribution, and is available at @@ -10,33 +10,79 @@ """ Handler for yardstick command 'report' """ -from __future__ import print_function - -from __future__ import absolute_import - -import ast import re +import six import uuid +import jinja2 from api.utils import influx - -from django.conf import settings -from django.template import Context -from django.template import Template - -from oslo_utils import encodeutils from oslo_utils import uuidutils from yardstick.common import constants as consts -from yardstick.common.html_template import template from yardstick.common.utils import cliargs -settings.configure() + +class JSTree(object): + """Data structure to parse data for use with the JS library jsTree""" + def __init__(self): + self._created_nodes = ['#'] + self.jstree_data = [] + + def _create_node(self, _id): + """Helper method for format_for_jstree to create each node. + + Creates the node (and any required parents) and keeps track + of the created nodes. + + :param _id: (string) id of the node to be created + :return: None + """ + components = _id.split(".") + + if len(components) == 1: + text = components[0] + parent_id = "#" + else: + text = components[-1] + parent_id = ".".join(components[:-1]) + # make sure the parent has been created + if not parent_id in self._created_nodes: + self._create_node(parent_id) + + self.jstree_data.append({"id": _id, "text": text, "parent": parent_id}) + self._created_nodes.append(_id) + + def format_for_jstree(self, data): + """Format the data into the required format for jsTree. + + The data format expected is a list of metric names e.g.: + + ['tg__0.DropPackets', 'tg__0.LatencyAvg.5'] + + This data is converted into the format required for jsTree to group and + display the metrics in a hierarchial fashion, including creating a + number of parent nodes e.g.:: + + [{"id": "tg__0", "text": "tg__0", "parent": "#"}, + {"id": "tg__0.DropPackets", "text": "DropPackets", "parent": "tg__0"}, + {"id": "tg__0.LatencyAvg", "text": "LatencyAvg", "parent": "tg__0"}, + {"id": "tg__0.LatencyAvg.5", "text": "5", "parent": "tg__0.LatencyAvg"},] + + :param data: (list) data to be converted + :return: list + """ + self._created_nodes = ['#'] + self.jstree_data = [] + + for metric in data: + self._create_node(metric) + + return self.jstree_data class Report(object): """Report commands. - Set of commands to manage benchmark tasks. + Set of commands to manage reports. """ def __init__(self): @@ -64,65 +110,280 @@ class Report(object): if query_exec: return query_exec else: - raise KeyError("Task ID or Test case not found..") + raise KeyError("Test case not found.") - def _get_tasks(self): - task_cmd = "select * from \"%s\" where task_id= '%s'" - task_query = task_cmd % (self.yaml_name, self.task_id) - query_exec = influx.query(task_query) + def _get_metrics(self): + metrics_cmd = "select * from \"%s\" where task_id = '%s'" + metrics_query = metrics_cmd % (self.yaml_name, self.task_id) + query_exec = influx.query(metrics_query) if query_exec: return query_exec else: - raise KeyError("Task ID or Test case not found..") + raise KeyError("Task ID or Test case not found.") + + def _get_task_start_time(self): + # The start time should come from the task or the metadata table. + # The first entry into influx for a task will be AFTER the first TC + # iteration + cmd = "select * from \"%s\" where task_id='%s' ORDER BY time ASC limit 1" + task_query = cmd % (self.yaml_name, self.task_id) + + query_exec = influx.query(task_query) + start_time = query_exec[0]['time'] + return start_time + + def _get_task_end_time(self): + # NOTE(elfoley): when using select first() and select last() for the + # DB query, the timestamp returned is 0, so later queries try to + # return metrics from 1970 + cmd = "select * from \"%s\" where task_id='%s' ORDER BY time DESC limit 1" + task_query = cmd % (self.yaml_name, self.task_id) + query_exec = influx.query(task_query) + end_time = query_exec[0]['time'] + return end_time + + def _get_baro_metrics(self): + start_time = self._get_task_start_time() + end_time = self._get_task_end_time() + metric_list = [ + "cpu_value", "cpufreq_value", "intel_pmu_value", + "virt_value", "memory_value"] + metrics = {} + times = [] + query_exec = {} + for metric in metric_list: + cmd = "select * from \"%s\" where time >= '%s' and time <= '%s'" + query = cmd % (metric, start_time, end_time) + query_exec[metric] = influx.query(query, db='collectd') + print("query_exec: {}".format(query_exec)) + + for metric in query_exec: + print("metric in query_exec: {}".format(metric)) + met_values = query_exec[metric] + print("met_values: {}".format(met_values)) + for x in met_values: + x['name'] = metric + metric_name = str('.'.join( + [x[f] for f in [ + 'host', 'name', 'type', 'type_instance', 'instance' + ] if x.get(f)])) + + if not metrics.get(metric_name): + metrics[metric_name] = {} + metric_time = self._get_trimmed_timestamp(x['time']) + times.append(metric_time) + time = metric_time + metrics[metric_name][time] = x['value'] + + times = sorted(list(set(times))) + + metrics['Timestamp'] = times + print("metrics: {}".format(metrics)) + return metrics + + def _get_trimmed_timestamp(self, metric_time, resolution=4): + if not isinstance(metric_time, str): + metric_time = metric_time.encode('utf8') # PY2: unicode to str + metric_time = metric_time[11:] # skip date, keep time + head, _, tail = metric_time.partition('.') # split HH:MM:SS & nsZ + metric_time = head + '.' + tail[:resolution] # join HH:MM:SS & .us + return metric_time + + def _get_timestamps(self, metrics, resolution=6): + # Extract the timestamps from a list of metrics + timestamps = [] + for metric in metrics: + metric_time = self._get_trimmed_timestamp( + metric['time'], resolution) + timestamps.append(metric_time) # HH:MM:SS.micros + return timestamps + + def _format_datasets(self, metric_name, metrics): + values = [] + for metric in metrics: + val = metric.get(metric_name, None) + if val is None: + # keep explicit None or missing entry as is + pass + elif isinstance(val, (int, float)): + # keep plain int or float as is + pass + elif six.PY2 and isinstance(val, + long): # pylint: disable=undefined-variable + # PY2: long value would be rendered with trailing L, + # which JS does not support, so convert it to float + val = float(val) + elif isinstance(val, six.string_types): + s = val + if not isinstance(s, str): + s = s.encode('utf8') # PY2: unicode to str + try: + # convert until failure + val = s + val = float(s) + val = int(s) + if six.PY2 and isinstance(val, + long): # pylint: disable=undefined-variable + val = float(val) # PY2: long to float + except ValueError: + # value may have been converted to a number + pass + finally: + # if val was not converted into a num, then it must be + # text, which shouldn't end up in the report + if isinstance(val, six.string_types): + val = None + else: + raise ValueError("Cannot convert %r" % val) + values.append(val) + return values @cliargs("task_id", type=str, help=" task id", nargs=1) @cliargs("yaml_name", type=str, help=" Yaml file Name", nargs=1) - def generate(self, args): - """Start report generation.""" + def _generate_common(self, args): + """Actions that are common to both report formats. + + Create the necessary data structure for rendering + the report templates. + """ self._validate(args.yaml_name[0], args.task_id[0]) - self.db_fieldkeys = self._get_fieldkeys() + db_fieldkeys = self._get_fieldkeys() + # list of dicts of: + # - PY2: unicode key and unicode value + # - PY3: str key and str value - self.db_task = self._get_tasks() + db_metrics = self._get_metrics() + # list of dicts of: + # - PY2: unicode key and { None | unicode | float | long | int } value + # - PY3: str key and { None | str | float | int } value - field_keys = [] - temp_series = [] - table_vals = {} + # extract fieldKey entries, and convert them to str where needed + field_keys = [key if isinstance(key, str) # PY3: already str + else key.encode('utf8') # PY2: unicode to str + for key in + [field['fieldKey'] + for field in db_fieldkeys]] - field_keys = [encodeutils.to_utf8(field['fieldKey']) - for field in self.db_fieldkeys] + # extract timestamps + self.Timestamp = self._get_timestamps(db_metrics) + # prepare return values + datasets = [] + table_vals = {'Timestamp': self.Timestamp} + + # extract and convert field values for key in field_keys: - self.Timestamp = [] - series = {} - values = [] - for task in self.db_task: - task_time = encodeutils.to_utf8(task['time']) - if not isinstance(task_time, str): - task_time = str(task_time, 'utf8') - key = str(key, 'utf8') - task_time = task_time[11:] - head, _, tail = task_time.partition('.') - task_time = head + "." + tail[:6] - self.Timestamp.append(task_time) - if task[key] is None: - values.append('') - elif isinstance(task[key], (int, float)) is True: - values.append(task[key]) - else: - values.append(ast.literal_eval(task[key])) - table_vals['Timestamp'] = self.Timestamp + values = self._format_datasets(key, db_metrics) + datasets.append({'label': key, 'data': values}) table_vals[key] = values - series['name'] = key - series['data'] = values - temp_series.append(series) - - Template_html = Template(template) - Context_html = Context({"series": temp_series, - "Timestamp": self.Timestamp, - "task_id": self.task_id, - "table": table_vals}) + + return datasets, table_vals + + @cliargs("task_id", type=str, help=" task id", nargs=1) + @cliargs("yaml_name", type=str, help=" Yaml file Name", nargs=1) + def generate(self, args): + """Start report generation.""" + datasets, table_vals = self._generate_common(args) + + template_dir = consts.YARDSTICK_ROOT_PATH + "yardstick/common" + template_environment = jinja2.Environment( + autoescape=False, + loader=jinja2.FileSystemLoader(template_dir)) + + context = { + "datasets": datasets, + "Timestamps": self.Timestamp, + "task_id": self.task_id, + "table": table_vals, + } + + template_html = template_environment.get_template("report.html.j2") + + with open(consts.DEFAULT_HTML_FILE, "w") as file_open: + file_open.write(template_html.render(context)) + + print("Report generated. View %s" % consts.DEFAULT_HTML_FILE) + + def _combine_times(self, *args): + times = [] + # Combines an arbitrary number of lists + [times.extend(x) for x in args] + times = list(set(times)) + times.sort() + return times + + def _combine_metrics(self, *args): + baro_data, baro_time, yard_data, yard_time = args + combo_time = self._combine_times(baro_time, yard_time) + + data = {} + [data.update(x) for x in (baro_data, yard_data)] + + table_data = {} + table_data['Timestamp'] = combo_time + combo = {} + keys = sorted(data.keys()) + for met_name in data: + dataset = [] + for point in data[met_name]: + dataset.append({'x': point, 'y': data[met_name][point]}) + # the metrics need to be ordered by time + combo[met_name] = sorted(dataset, key=lambda i: i['x']) + for met_name in data: + table_data[met_name] = [] + for t in combo_time: + table_data[met_name].append(data[met_name].get(t, '')) + return combo, keys, table_data + + @cliargs("task_id", type=str, help=" task id", nargs=1) + @cliargs("yaml_name", type=str, help=" Yaml file Name", nargs=1) + def generate_nsb(self, args): + """Start NSB report generation.""" + _, report_data = self._generate_common(args) + report_time = report_data.pop('Timestamp') + report_meta = { + "testcase": self.yaml_name, + "task_id": self.task_id, + } + + yardstick_data = {} + for i, t in enumerate(report_time): + for m in report_data: + if not yardstick_data.get(m): + yardstick_data[m] = {} + yardstick_data[m][t] = report_data[m][i] + + baro_data = self._get_baro_metrics() + baro_timestamps = baro_data.pop('Timestamp') + + yard_timestamps = report_time + report_time = self._combine_times(yard_timestamps, baro_timestamps) + + combo_metrics, combo_keys, combo_table = self._combine_metrics( + baro_data, baro_timestamps, yardstick_data, yard_timestamps) + combo_time = self._combine_times(baro_timestamps, yard_timestamps) + combo_tree = JSTree().format_for_jstree(combo_keys) + + template_dir = consts.YARDSTICK_ROOT_PATH + "yardstick/common" + template_environment = jinja2.Environment( + autoescape=False, + loader=jinja2.FileSystemLoader(template_dir), + lstrip_blocks=True) + + combo_data = combo_metrics + context = { + "report_meta": report_meta, + "report_data": combo_data, + "report_time": combo_time, + "report_keys": combo_keys, + "report_tree": combo_tree, + "table_data": combo_table, + } + + template_html = template_environment.get_template("nsb_report.html.j2") + with open(consts.DEFAULT_HTML_FILE, "w") as file_open: - file_open.write(Template_html.render(Context_html)) + file_open.write(template_html.render(context)) - print("Report generated. View /tmp/yardstick.htm") + print("Report generated. View %s" % consts.DEFAULT_HTML_FILE) diff --git a/yardstick/benchmark/core/task.py b/yardstick/benchmark/core/task.py index 1dfd6c31e..bcca3558f 100644 --- a/yardstick/benchmark/core/task.py +++ b/yardstick/benchmark/core/task.py @@ -11,6 +11,7 @@ import sys import os from collections import OrderedDict +import six import yaml import atexit import ipaddress @@ -313,7 +314,7 @@ class Task(object): # pragma: no cover return {k: self._parse_options(v) for k, v in op.items()} elif isinstance(op, list): return [self._parse_options(v) for v in op] - elif isinstance(op, str): + elif isinstance(op, six.string_types): return self.outputs.get(op[1:]) if op.startswith('$') else op else: return op @@ -620,9 +621,19 @@ class TaskParser(object): # pragma: no cover scenario: nodes: - tg__0: tg_0.yardstick + tg__0: trafficgen_0.yardstick vnf__0: vnf_0.yardstick + scenario: + nodes: + tg__0: + name: trafficgen_0.yardstick + public_ip_attr: "server1_public_ip" + private_ip_attr: "server1_private_ip" + vnf__0: + name: vnf_0.yardstick + public_ip_attr: "server2_public_ip" + private_ip_attr: "server2_private_ip" NOTE: in Kubernetes context, the separator character between the server name and the context name is "-": scenario: @@ -654,7 +665,15 @@ class TaskParser(object): # pragma: no cover scenario['targets'][idx] = qualified_name(target) if 'nodes' in scenario: for scenario_node, target in scenario['nodes'].items(): - scenario['nodes'][scenario_node] = qualified_name(target) + if isinstance(target, collections.Mapping): + # Update node info on scenario with context info + # Just update the node name with context + # Append context information + target['name'] = qualified_name(target['name']) + # Then update node + scenario['nodes'][scenario_node] = target + else: + scenario['nodes'][scenario_node] = qualified_name(target) def _check_schema(self, cfg_schema, schema_type): """Check if config file is using the correct schema type""" diff --git a/yardstick/benchmark/runners/base.py b/yardstick/benchmark/runners/base.py index af2557441..94de45d1e 100755 --- a/yardstick/benchmark/runners/base.py +++ b/yardstick/benchmark/runners/base.py @@ -25,9 +25,6 @@ import traceback from six import moves from yardstick.benchmark.scenarios import base as base_scenario -from yardstick.common import messaging -from yardstick.common.messaging import payloads -from yardstick.common.messaging import producer from yardstick.common import utils from yardstick.dispatcher.base import Base as DispatcherBase @@ -80,6 +77,33 @@ def _periodic_action(interval, command, queue): queue.put({'periodic-action-data': data}) +class ScenarioOutput(dict): + + QUEUE_PUT_TIMEOUT = 10 + + def __init__(self, queue, **kwargs): + super(ScenarioOutput, self).__init__() + self._queue = queue + self.result_ext = dict() + for key, val in kwargs.items(): + self.result_ext[key] = val + setattr(self, key, val) + + def push(self, data=None, add_timestamp=True): + if data is None: + data = dict(self) + + if add_timestamp: + result = {'timestamp': time.time(), 'data': data} + else: + result = data + + for key in self.result_ext.keys(): + result[key] = getattr(self, key) + + self._queue.put(result, True, self.QUEUE_PUT_TIMEOUT) + + class Runner(object): runners = [] @@ -271,22 +295,3 @@ class Runner(object): dispatchers = DispatcherBase.get(self.config['output_config']) dispatcher = next((d for d in dispatchers if d.__dispatcher_type__ == 'Influxdb')) dispatcher.upload_one_record(record, self.case_name, '', task_id=self.task_id) - - -class RunnerProducer(producer.MessagingProducer): - """Class implementing the message producer for runners""" - - def __init__(self, _id): - super(RunnerProducer, self).__init__(messaging.TOPIC_RUNNER, _id=_id) - - def start_iteration(self, version=1, data=None): - data = {} if not data else data - self.send_message( - messaging.RUNNER_METHOD_START_ITERATION, - payloads.RunnerPayload(version=version, data=data)) - - def stop_iteration(self, version=1, data=None): - data = {} if not data else data - self.send_message( - messaging.RUNNER_METHOD_STOP_ITERATION, - payloads.RunnerPayload(version=version, data=data)) diff --git a/yardstick/benchmark/runners/duration.py b/yardstick/benchmark/runners/duration.py index 14fd8bb47..55c3690fd 100644 --- a/yardstick/benchmark/runners/duration.py +++ b/yardstick/benchmark/runners/duration.py @@ -106,7 +106,8 @@ def _worker_process(queue, cls, method_name, scenario_cfg, sequence += 1 - if (errors and sla_action is None) or time.time() > timeout or aborted.is_set(): + if ((errors and sla_action is None) or time.time() > timeout + or aborted.is_set() or benchmark.is_ended()): LOG.info("Worker END") break diff --git a/yardstick/benchmark/runners/iteration.py b/yardstick/benchmark/runners/iteration.py index 4c88f3671..15dad2cd5 100644 --- a/yardstick/benchmark/runners/iteration.py +++ b/yardstick/benchmark/runners/iteration.py @@ -23,7 +23,6 @@ from __future__ import absolute_import import logging import multiprocessing -import time import traceback import os @@ -40,8 +39,6 @@ QUEUE_PUT_TIMEOUT = 10 def _worker_process(queue, cls, method_name, scenario_cfg, context_cfg, aborted, output_queue): - sequence = 1 - runner_cfg = scenario_cfg['runner'] interval = runner_cfg.get("interval", 1) @@ -53,6 +50,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg, runner_cfg['runner_id'] = os.getpid() + scenario_output = base.ScenarioOutput(queue, sequence=1, errors="") benchmark = cls(scenario_cfg, context_cfg) if "setup" in run_step: benchmark.setup() @@ -67,22 +65,21 @@ def _worker_process(queue, cls, method_name, scenario_cfg, LOG.debug("runner=%(runner)s seq=%(sequence)s START", {"runner": runner_cfg["runner_id"], - "sequence": sequence}) - - data = {} - errors = "" + "sequence": scenario_output.sequence}) + scenario_output.clear() + scenario_output.errors = "" benchmark.pre_run_wait_time(interval) try: - result = method(data) + result = method(scenario_output) except y_exc.SLAValidationError as error: # SLA validation failed in scenario, determine what to do now if sla_action == "assert": raise elif sla_action == "monitor": LOG.warning("SLA validation failed: %s", error.args) - errors = error.args + scenario_output.errors = error.args elif sla_action == "rate-control": try: scenario_cfg['options']['rate'] @@ -91,11 +88,12 @@ def _worker_process(queue, cls, method_name, scenario_cfg, scenario_cfg['options']['rate'] = 100 scenario_cfg['options']['rate'] -= delta - sequence = 1 + scenario_output.sequence = 1 continue except Exception: # pylint: disable=broad-except - errors = traceback.format_exc() + scenario_output.errors = traceback.format_exc() LOG.exception("") + raise else: if result: # add timeout for put so we don't block test @@ -104,23 +102,17 @@ def _worker_process(queue, cls, method_name, scenario_cfg, benchmark.post_run_wait_time(interval) - benchmark_output = { - 'timestamp': time.time(), - 'sequence': sequence, - 'data': data, - 'errors': errors - } - - queue.put(benchmark_output, True, QUEUE_PUT_TIMEOUT) + if scenario_output: + scenario_output.push() LOG.debug("runner=%(runner)s seq=%(sequence)s END", {"runner": runner_cfg["runner_id"], - "sequence": sequence}) + "sequence": scenario_output.sequence}) - sequence += 1 + scenario_output.sequence += 1 - if (errors and sla_action is None) or \ - (sequence > iterations or aborted.is_set()): + if (scenario_output.errors and sla_action is None) or \ + (scenario_output.sequence > iterations or aborted.is_set()): LOG.info("worker END") break if "teardown" in run_step: diff --git a/yardstick/benchmark/runners/iteration_ipc.py b/yardstick/benchmark/runners/iteration_ipc.py deleted file mode 100644 index a0335fdc7..000000000 --- a/yardstick/benchmark/runners/iteration_ipc.py +++ /dev/null @@ -1,205 +0,0 @@ -# Copyright 2018: Intel Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""A runner that runs a configurable number of times before it returns. Each - iteration has a configurable timeout. The loop control depends on the - feedback received from the running VNFs. The context PIDs from the VNFs - to listen the messages from are given in the scenario "setup" method. -""" - -import logging -import multiprocessing -import time -import traceback - -import os - -from yardstick.benchmark.runners import base as base_runner -from yardstick.common import exceptions -from yardstick.common import messaging -from yardstick.common import utils -from yardstick.common.messaging import consumer -from yardstick.common.messaging import payloads - - -LOG = logging.getLogger(__name__) - -QUEUE_PUT_TIMEOUT = 10 -ITERATION_TIMEOUT = 180 - - -class RunnerIterationIPCEndpoint(consumer.NotificationHandler): - """Endpoint class for ``RunnerIterationIPCConsumer``""" - - def tg_method_started(self, ctxt, **kwargs): - if ctxt['id'] in self._ctx_ids: - self._queue.put( - {'id': ctxt['id'], - 'action': messaging.TG_METHOD_STARTED, - 'payload': payloads.TrafficGeneratorPayload.dict_to_obj( - kwargs)}, - QUEUE_PUT_TIMEOUT) - - def tg_method_finished(self, ctxt, **kwargs): - if ctxt['id'] in self._ctx_ids: - self._queue.put( - {'id': ctxt['id'], - 'action': messaging.TG_METHOD_FINISHED, - 'payload': payloads.TrafficGeneratorPayload.dict_to_obj( - kwargs)}) - - def tg_method_iteration(self, ctxt, **kwargs): - if ctxt['id'] in self._ctx_ids: - self._queue.put( - {'id': ctxt['id'], - 'action': messaging.TG_METHOD_ITERATION, - 'payload': payloads.TrafficGeneratorPayload.dict_to_obj( - kwargs)}) - - -class RunnerIterationIPCConsumer(consumer.MessagingConsumer): - """MQ consumer for "IterationIPC" runner""" - - def __init__(self, _id, ctx_ids): - self._id = _id - self._queue = multiprocessing.Queue() - endpoints = [RunnerIterationIPCEndpoint(_id, ctx_ids, self._queue)] - super(RunnerIterationIPCConsumer, self).__init__( - messaging.TOPIC_TG, ctx_ids, endpoints) - self._kpi_per_id = {ctx: [] for ctx in ctx_ids} - self.iteration_index = None - - def is_all_kpis_received_in_iteration(self): - """Check if all producers registered have sent the ITERATION msg - - During the present iteration, all producers (traffic generators) must - start and finish the traffic injection, and at the end of the traffic - injection a TG_METHOD_ITERATION must be sent. This function will check - all KPIs in the present iteration are received. E.g.: - self.iteration_index = 2 - - self._kpi_per_id = { - 'ctx1': [kpi0, kpi1, kpi2], - 'ctx2': [kpi0, kpi1]} --> return False - - self._kpi_per_id = { - 'ctx1': [kpi0, kpi1, kpi2], - 'ctx2': [kpi0, kpi1, kpi2]} --> return True - """ - while not self._queue.empty(): - msg = self._queue.get(True, 1) - if msg['action'] == messaging.TG_METHOD_ITERATION: - id_iter_list = self._kpi_per_id[msg['id']] - id_iter_list.append(msg['payload'].kpi) - - return all(len(id_iter_list) == self.iteration_index - for id_iter_list in self._kpi_per_id.values()) - - -def _worker_process(queue, cls, method_name, scenario_cfg, - context_cfg, aborted, output_queue): # pragma: no cover - runner_cfg = scenario_cfg['runner'] - - timeout = runner_cfg.get('timeout', ITERATION_TIMEOUT) - iterations = runner_cfg.get('iterations', 1) - run_step = runner_cfg.get('run_step', 'setup,run,teardown') - LOG.info('Worker START. Iterations %d times, class %s', iterations, cls) - - runner_cfg['runner_id'] = os.getpid() - - benchmark = cls(scenario_cfg, context_cfg) - method = getattr(benchmark, method_name) - - if 'setup' not in run_step: - raise exceptions.RunnerIterationIPCSetupActionNeeded() - benchmark.setup() - producer_ctxs = benchmark.get_mq_ids() - if not producer_ctxs: - raise exceptions.RunnerIterationIPCNoCtxs() - - mq_consumer = RunnerIterationIPCConsumer(os.getpid(), producer_ctxs) - mq_consumer.start_rpc_server() - mq_producer = base_runner.RunnerProducer(scenario_cfg['task_id']) - - iteration_index = 1 - while 'run' in run_step: - LOG.debug('runner=%(runner)s seq=%(sequence)s START', - {'runner': runner_cfg['runner_id'], - 'sequence': iteration_index}) - data = {} - result = None - errors = '' - mq_consumer.iteration_index = iteration_index - mq_producer.start_iteration() - - try: - utils.wait_until_true( - mq_consumer.is_all_kpis_received_in_iteration, - timeout=timeout, sleep=2) - result = method(data) - except Exception: # pylint: disable=broad-except - errors = traceback.format_exc() - LOG.exception(errors) - - mq_producer.stop_iteration() - - if result: - output_queue.put(result, True, QUEUE_PUT_TIMEOUT) - benchmark_output = {'timestamp': time.time(), - 'sequence': iteration_index, - 'data': data, - 'errors': errors} - queue.put(benchmark_output, True, QUEUE_PUT_TIMEOUT) - - LOG.debug('runner=%(runner)s seq=%(sequence)s END', - {'runner': runner_cfg['runner_id'], - 'sequence': iteration_index}) - - iteration_index += 1 - if iteration_index > iterations or aborted.is_set(): - LOG.info('"IterationIPC" worker END') - break - - if 'teardown' in run_step: - try: - benchmark.teardown() - except Exception: - LOG.exception('Exception during teardown process') - mq_consumer.stop_rpc_server() - raise SystemExit(1) - - LOG.debug('Data queue size = %s', queue.qsize()) - LOG.debug('Output queue size = %s', output_queue.qsize()) - mq_consumer.stop_rpc_server() - - -class IterationIPCRunner(base_runner.Runner): - """Run a scenario for a configurable number of times. - - Each iteration has a configurable timeout. The loop control depends on the - feedback received from the running VNFs. The context PIDs from the VNFs to - listen the messages from are given in the scenario "setup" method. - """ - __execution_type__ = 'IterationIPC' - - def _run_benchmark(self, cls, method, scenario_cfg, context_cfg): - name = '{}-{}-{}'.format( - self.__execution_type__, scenario_cfg.get('type'), os.getpid()) - self.process = multiprocessing.Process( - name=name, - target=_worker_process, - args=(self.result_queue, cls, method, scenario_cfg, - context_cfg, self.aborted, self.output_queue)) - self.process.start() diff --git a/yardstick/benchmark/runners/proxduration.py b/yardstick/benchmark/runners/proxduration.py index 61a468fd3..e217904b9 100644 --- a/yardstick/benchmark/runners/proxduration.py +++ b/yardstick/benchmark/runners/proxduration.py @@ -112,7 +112,8 @@ def _worker_process(queue, cls, method_name, scenario_cfg, sequence += 1 - if (errors and sla_action is None) or time.time() > timeout or aborted.is_set(): + if ((errors and sla_action is None) or time.time() > timeout + or aborted.is_set() or benchmark.is_ended()): LOG.info("Worker END") break diff --git a/yardstick/benchmark/runners/sequence.py b/yardstick/benchmark/runners/sequence.py index 0148a45b2..58ffddd22 100644 --- a/yardstick/benchmark/runners/sequence.py +++ b/yardstick/benchmark/runners/sequence.py @@ -38,8 +38,6 @@ LOG = logging.getLogger(__name__) def _worker_process(queue, cls, method_name, scenario_cfg, context_cfg, aborted, output_queue): - sequence = 1 - runner_cfg = scenario_cfg['runner'] interval = runner_cfg.get("interval", 1) @@ -56,6 +54,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg, LOG.info("worker START, sequence_values(%s, %s), class %s", arg_name, sequence_values, cls) + scenario_output = base.ScenarioOutput(queue, sequence=1, errors="") benchmark = cls(scenario_cfg, context_cfg) benchmark.setup() method = getattr(benchmark, method_name) @@ -68,22 +67,23 @@ def _worker_process(queue, cls, method_name, scenario_cfg, options[arg_name] = value LOG.debug("runner=%(runner)s seq=%(sequence)s START", - {"runner": runner_cfg["runner_id"], "sequence": sequence}) + {"runner": runner_cfg["runner_id"], + "sequence": scenario_output.sequence}) - data = {} - errors = "" + scenario_output.clear() + scenario_output.errors = "" try: - result = method(data) + result = method(scenario_output) except y_exc.SLAValidationError as error: # SLA validation failed in scenario, determine what to do now if sla_action == "assert": raise elif sla_action == "monitor": LOG.warning("SLA validation failed: %s", error.args) - errors = error.args + scenario_output.errors = error.args except Exception as e: # pylint: disable=broad-except - errors = traceback.format_exc() + scenario_output.errors = traceback.format_exc() LOG.exception(e) else: if result: @@ -91,21 +91,16 @@ def _worker_process(queue, cls, method_name, scenario_cfg, time.sleep(interval) - benchmark_output = { - 'timestamp': time.time(), - 'sequence': sequence, - 'data': data, - 'errors': errors - } - - queue.put(benchmark_output) + if scenario_output: + scenario_output.push() LOG.debug("runner=%(runner)s seq=%(sequence)s END", - {"runner": runner_cfg["runner_id"], "sequence": sequence}) + {"runner": runner_cfg["runner_id"], + "sequence": scenario_output.sequence}) - sequence += 1 + scenario_output.sequence += 1 - if (errors and sla_action is None) or aborted.is_set(): + if (scenario_output.errors and sla_action is None) or aborted.is_set(): break try: diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/fault_process_kill.bash b/yardstick/benchmark/scenarios/availability/ha_tools/fault_process_kill.bash index d34ce9338..cda469cf9 100755 --- a/yardstick/benchmark/scenarios/availability/ha_tools/fault_process_kill.bash +++ b/yardstick/benchmark/scenarios/availability/ha_tools/fault_process_kill.bash @@ -16,7 +16,7 @@ set -e process_name=$1 if [ "$process_name" = "keystone" ]; then - for pid in $(ps aux | grep "keystone" | grep -iv heartbeat | grep -iv monitor | grep -v grep | grep -v /bin/sh | awk '{print $2}'); \ + for pid in $(ps aux | grep "keystone" | grep -iv monitor | grep -v grep | grep -v /bin/sh | awk '{print $2}'); \ do kill -9 "${pid}" done @@ -26,7 +26,7 @@ elif [ "$process_name" = "haproxy" ]; then kill -9 "${pid}" done else - for pid in $(pgrep -fa [^-_a-zA-Z0-9]${process_name} | grep -iv heartbeat | awk '{print $1}'); + for pid in $(pgrep -fa [^-_a-zA-Z0-9]${process_name} | awk '{print $1}'); do kill -9 "${pid}" done diff --git a/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py b/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py index 971bae1e9..8f1f53cde 100644 --- a/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py +++ b/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py @@ -62,20 +62,19 @@ class MultiMonitor(basemonitor.BaseMonitor): outage_time = ( last_outage - first_outage if last_outage > first_outage else 0 ) + self._result = {"outage_time": outage_time} LOG.debug("outage_time is: %f", outage_time) max_outage_time = 0 - if "max_outage_time" in self._config["sla"]: - max_outage_time = self._config["sla"]["max_outage_time"] - elif "max_recover_time" in self._config["sla"]: - max_outage_time = self._config["sla"]["max_recover_time"] - else: - raise RuntimeError("'max_outage_time' or 'max_recover_time' " - "config is not found") - self._result = {"outage_time": outage_time} - - if outage_time > max_outage_time: - LOG.error("SLA failure: %f > %f", outage_time, max_outage_time) - return False - else: - return True + if self._config.get("sla"): + if "max_outage_time" in self._config["sla"]: + max_outage_time = self._config["sla"]["max_outage_time"] + elif "max_recover_time" in self._config["sla"]: + max_outage_time = self._config["sla"]["max_recover_time"] + else: + raise RuntimeError("'max_outage_time' or 'max_recover_time' " + "config is not found") + if outage_time > max_outage_time: + LOG.error("SLA failure: %f > %f", outage_time, max_outage_time) + return False + return True diff --git a/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py b/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py index 8d2f2633c..280e5811d 100644 --- a/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py +++ b/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py @@ -46,12 +46,12 @@ class MonitorProcess(basemonitor.BaseMonitor): def verify_SLA(self): outage_time = self._result.get('outage_time', None) - max_outage_time = self._config["sla"]["max_recover_time"] - if outage_time > max_outage_time: - LOG.info("SLA failure: %f > %f", outage_time, max_outage_time) - return False - else: - return True + if self._config.get("sla"): + max_outage_time = self._config["sla"]["max_recover_time"] + if outage_time > max_outage_time: + LOG.info("SLA failure: %f > %f", outage_time, max_outage_time) + return False + return True def _test(): # pragma: no cover diff --git a/yardstick/benchmark/scenarios/base.py b/yardstick/benchmark/scenarios/base.py index 90a87ac29..ae8bfad71 100644 --- a/yardstick/benchmark/scenarios/base.py +++ b/yardstick/benchmark/scenarios/base.py @@ -50,6 +50,9 @@ class Scenario(object): def run(self, *args): """Entry point for scenario classes, called from runner worker""" + def is_ended(self): + return False + def teardown(self): """Default teardown implementation for Scenario classes""" pass @@ -119,7 +122,3 @@ class Scenario(object): except TypeError: dic[k] = v return dic - - def get_mq_ids(self): # pragma: no cover - """Return stored MQ producer IDs, if defined""" - pass diff --git a/yardstick/benchmark/scenarios/compute/unixbench_benchmark.bash b/yardstick/benchmark/scenarios/compute/unixbench_benchmark.bash index 9f1804819..0f0122e51 100644 --- a/yardstick/benchmark/scenarios/compute/unixbench_benchmark.bash +++ b/yardstick/benchmark/scenarios/compute/unixbench_benchmark.bash @@ -25,8 +25,8 @@ run_unixbench() # write the result to stdout in json format output_json() { - single_score=$(awk '/Score/{print $7}' $OUTPUT_FILE | head -1 ) - parallel_score=$(awk '/Score/{print $7}' $OUTPUT_FILE | tail -1 ) + single_score=$(awk '/Score/{print $NF}' $OUTPUT_FILE | head -1 ) + parallel_score=$(awk '/Score/{print $NF}' $OUTPUT_FILE | tail -1 ) echo -e "{ \ \"single_score\":\"$single_score\", \ \"parallel_score\":\"$parallel_score\" \ diff --git a/yardstick/tests/functional/network_services/__init__.py b/yardstick/benchmark/scenarios/energy/__init__.py index e69de29bb..e69de29bb 100644 --- a/yardstick/tests/functional/network_services/__init__.py +++ b/yardstick/benchmark/scenarios/energy/__init__.py diff --git a/yardstick/benchmark/scenarios/energy/energy.py b/yardstick/benchmark/scenarios/energy/energy.py new file mode 100644 index 000000000..7440835be --- /dev/null +++ b/yardstick/benchmark/scenarios/energy/energy.py @@ -0,0 +1,139 @@ +############################################################################## +# Copyright (c) 2019 Lenovo Group Limited Co.,Ltd and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +from __future__ import print_function +from __future__ import absolute_import +import logging +import requests +import json +from yardstick.benchmark.scenarios import base + +LOG = logging.getLogger(__name__) +logging.captureWarnings(True) + + +class Energy(base.Scenario): + """Get current energy consumption of target host + + This scenario sends a REDFISH request to a host BMC + to request current energy consumption. + The response returns a number of Watts. + Usually this is an average of a rolling windows + taken from server internal sensor. + This is dependant of the server provider. + + This scenario should be used with node context + + As this scenario usually run background with other scenarios, + error of api query or data parse will not terminate task runner. + If any error occured, energy consumption will be set to -1. + + Parameters + None + """ + + __scenario_type__ = "Energy" + + def __init__(self, scenario_cfg, context_cfg): + self.scenario_cfg = scenario_cfg + self.context_cfg = context_cfg + self.target = self.context_cfg['target'] + self.setup_done = False + self.get_response = False + + def _send_request(self, url): + LOG.info("Send request to %s", url) + pod_auth = (self.target["redfish_user"], self.target["redfish_pwd"]) + response = requests.get(url, auth=pod_auth, verify=False) + return response + + def setup(self): + url = "https://{}/redfish/v1/".format(self.target["redfish_ip"]) + response = self._send_request(url) + if response.status_code != 200: + LOG.info("Don't get right response from %s", url) + self.get_response = False + else: + LOG.info("Get response from %s", url) + self.get_response = True + + self.setup_done = True + + def load_chassis_list(self): + chassis_list = [] + + # Get Chassis list + request_url = "https://" + self.target["redfish_ip"] + request_url += "/redfish/v1/Chassis/" + response = self._send_request(request_url) + if response.status_code != 200: + LOG.info("Do not get proper response from %s", request_url) + return chassis_list + + try: + chassis_data = json.loads(response.text) + except(TypeError, ValueError) as e: + LOG.info("Invalid response data, %s", e) + return chassis_list + + try: + for chassis in chassis_data['Members']: + chassis_list.append(chassis["@odata.id"]) + except KeyError as e: + LOG.info("Error data format of chassis data or invalid key.") + + return chassis_list + + def get_power(self, chassis_uri): + """Get PowerMetter values from Redfish API.""" + if chassis_uri[-1:] != '/': + chassis_uri += '/' + request_url = "https://" + self.target['redfish_ip'] + request_url += chassis_uri + request_url += "Power/" + response = self._send_request(request_url) + if response.status_code != 200: + LOG.info("Do not get proper response from %s", request_url) + power = -1 + return power + + try: + power_metrics = json.loads(response.text) + except(TypeError, ValueError) as e: + LOG.info("Invalid response data, %s", e) + power = -1 + return power + + try: + power = power_metrics["PowerControl"][0]["PowerConsumedWatts"] + except KeyError as e: + LOG.info("Error data format of power metrics or invalid key.") + power = -1 + + return power + + def run(self, result): + """execute the benchmark""" + if not self.setup_done: + self.setup() + chassis_list = self.load_chassis_list() + if not self.get_response or not chassis_list: + power = -1 + data = { + "power": power, + } + result.update(data) + else: + power = 0 + for chassis in chassis_list: + power += self.get_power(chassis) + data = { + "power": power, + } + result.update(data) diff --git a/yardstick/benchmark/scenarios/networking/ping.py b/yardstick/benchmark/scenarios/networking/ping.py index 6caeab5ef..1c9510220 100644 --- a/yardstick/benchmark/scenarios/networking/ping.py +++ b/yardstick/benchmark/scenarios/networking/ping.py @@ -103,12 +103,14 @@ class Ping(base.Scenario): rtt_result[target_vm_name] = float(self.PING_ERROR_RTT) # store result before potential AssertionError result.update(utils.flatten_dict_key(ping_result)) - self.verify_SLA(sla_max_rtt is None, - "packet dropped rtt %f > sla: max_rtt(%f)" - % (rtt_result[target_vm_name], sla_max_rtt)) - self.verify_SLA(False, - "packet dropped rtt %f" - % (rtt_result[target_vm_name])) + if sla_max_rtt is not None: + self.verify_SLA(rtt_result[target_vm_name] <= sla_max_rtt, + "packet dropped rtt %f > sla: max_rtt(%f)" + % (rtt_result[target_vm_name], sla_max_rtt)) + else: + self.verify_SLA(False, + "packet dropped rtt %f" + % (rtt_result[target_vm_name])) def _test(): # pragma: no cover diff --git a/yardstick/benchmark/scenarios/networking/vnf_generic.py b/yardstick/benchmark/scenarios/networking/vnf_generic.py index 6d68c5e64..c5e75d093 100644 --- a/yardstick/benchmark/scenarios/networking/vnf_generic.py +++ b/yardstick/benchmark/scenarios/networking/vnf_generic.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016-2017 Intel Corporation +# Copyright (c) 2016-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -44,14 +44,13 @@ traffic_profile.register_modules() LOG = logging.getLogger(__name__) -class NetworkServiceTestCase(scenario_base.Scenario): - """Class handles Generic framework to do pre-deployment VNF & - Network service testing """ +class NetworkServiceBase(scenario_base.Scenario): + """Base class for Network service testing scenarios""" - __scenario_type__ = "NSPerf" + __scenario_type__ = "" def __init__(self, scenario_cfg, context_cfg): # pragma: no cover - super(NetworkServiceTestCase, self).__init__() + super(NetworkServiceBase, self).__init__() self.scenario_cfg = scenario_cfg self.context_cfg = context_cfg @@ -61,7 +60,35 @@ class NetworkServiceTestCase(scenario_base.Scenario): self.traffic_profile = None self.node_netdevs = {} self.bin_path = get_nsb_option('bin_path', '') - self._mq_ids = [] + + def run(self, *args): + pass + + def teardown(self): + """ Stop the collector and terminate VNF & TG instance + + :return + """ + + try: + try: + self.collector.stop() + for vnf in self.vnfs: + LOG.info("Stopping %s", vnf.name) + vnf.terminate() + LOG.debug("all VNFs terminated: %s", ", ".join(vnf.name for vnf in self.vnfs)) + finally: + terminate_children() + except Exception: + # catch any exception in teardown and convert to simple exception + # never pass exceptions back to multiprocessing, because some exceptions can + # be unpicklable + # https://bugs.python.org/issue9400 + LOG.exception("") + raise RuntimeError("Error in teardown") + + def is_ended(self): + return self.traffic_profile is not None and self.traffic_profile.is_ended() def _get_ip_flow_range(self, ip_start_range): """Retrieve a CIDR first and last viable IPs @@ -137,6 +164,13 @@ class NetworkServiceTestCase(scenario_base.Scenario): imix = {} return imix + def _get_ip_priority(self): + try: + priority = self.scenario_cfg['options']['priority'] + except KeyError: + priority = {} + return priority + def _get_traffic_profile(self): profile = self.scenario_cfg["traffic_profile"] path = self.scenario_cfg["task_path"] @@ -148,18 +182,49 @@ class NetworkServiceTestCase(scenario_base.Scenario): return options.get('duration', tprofile_base.TrafficProfileConfig.DEFAULT_DURATION) + def _key_list_to_dict(self, key, value_list): + value_dict = {} + try: + for index, count in enumerate(value_list[key]): + value_dict["{}_{}".format(key, index)] = count + except KeyError: + value_dict = {} + + return value_dict + + def _get_simulated_users(self): + users = self.scenario_cfg.get("options", {}).get("simulated_users", {}) + simulated_users = self._key_list_to_dict("uplink", users) + return {"simulated_users": simulated_users} + + def _get_page_object(self): + objects = self.scenario_cfg.get("options", {}).get("page_object", {}) + page_object = self._key_list_to_dict("uplink", objects) + return {"page_object": page_object} + def _fill_traffic_profile(self): tprofile = self._get_traffic_profile() extra_args = self.scenario_cfg.get('extra_args', {}) tprofile_data = { 'flow': self._get_traffic_flow(), 'imix': self._get_traffic_imix(), + 'priority': self._get_ip_priority(), tprofile_base.TrafficProfile.UPLINK: {}, tprofile_base.TrafficProfile.DOWNLINK: {}, 'extra_args': extra_args, - 'duration': self._get_duration()} + 'duration': self._get_duration(), + 'page_object': self._get_page_object(), + 'simulated_users': self._get_simulated_users()} traffic_vnfd = vnfdgen.generate_vnfd(tprofile, tprofile_data) - self.traffic_profile = tprofile_base.TrafficProfile.get(traffic_vnfd) + + traffic_config = \ + self.scenario_cfg.get("options", {}).get("traffic_config", {}) + + traffic_vnfd.setdefault("traffic_profile", {}) + traffic_vnfd["traffic_profile"].update(traffic_config) + + self.traffic_profile = \ + tprofile_base.TrafficProfile.get(traffic_vnfd) def _get_topology(self): topology = self.scenario_cfg["topology"] @@ -413,12 +478,30 @@ class NetworkServiceTestCase(scenario_base.Scenario): pass self.create_interfaces_from_node(vnfd, node) vnf_impl = self.get_vnf_impl(vnfd['id']) - vnf_instance = vnf_impl(node_name, vnfd, scenario_cfg['task_id']) + vnf_instance = vnf_impl(node_name, vnfd) vnfs.append(vnf_instance) self.vnfs = vnfs return vnfs + def pre_run_wait_time(self, time_seconds): # pragma: no cover + """Time waited before executing the run method""" + time.sleep(time_seconds) + + def post_run_wait_time(self, time_seconds): # pragma: no cover + """Time waited after executing the run method""" + pass + + +class NetworkServiceTestCase(NetworkServiceBase): + """Class handles Generic framework to do pre-deployment VNF & + Network service testing """ + + __scenario_type__ = "NSPerf" + + def __init__(self, scenario_cfg, context_cfg): # pragma: no cover + super(NetworkServiceTestCase, self).__init__(scenario_cfg, context_cfg) + def setup(self): """Setup infrastructure, provission VNFs & start traffic""" # 1. Verify if infrastructure mapping can meet topology @@ -462,11 +545,6 @@ class NetworkServiceTestCase(scenario_base.Scenario): for traffic_gen in traffic_runners: LOG.info("Starting traffic on %s", traffic_gen.name) traffic_gen.run_traffic(self.traffic_profile) - self._mq_ids.append(traffic_gen.get_mq_producer_id()) - - def get_mq_ids(self): # pragma: no cover - """Return stored MQ producer IDs""" - return self._mq_ids def run(self, result): # yardstick API """ Yardstick calls run() at intervals defined in the yaml and @@ -482,33 +560,125 @@ class NetworkServiceTestCase(scenario_base.Scenario): result.update(self.collector.get_kpi()) - def teardown(self): - """ Stop the collector and terminate VNF & TG instance - :return +class NetworkServiceRFC2544(NetworkServiceBase): + """Class handles RFC2544 Network service testing""" + + __scenario_type__ = "NSPerf-RFC2544" + + def __init__(self, scenario_cfg, context_cfg): # pragma: no cover + super(NetworkServiceRFC2544, self).__init__(scenario_cfg, context_cfg) + + def setup(self): + """Setup infrastructure, provision VNFs""" + self.map_topology_to_infrastructure() + self.load_vnf_models() + + traffic_runners = [vnf for vnf in self.vnfs if vnf.runs_traffic] + non_traffic_runners = [vnf for vnf in self.vnfs if not vnf.runs_traffic] + try: + for vnf in chain(traffic_runners, non_traffic_runners): + LOG.info("Instantiating %s", vnf.name) + vnf.instantiate(self.scenario_cfg, self.context_cfg) + LOG.info("Waiting for %s to instantiate", vnf.name) + vnf.wait_for_instantiate() + except: + LOG.exception("") + for vnf in self.vnfs: + vnf.terminate() + raise + + self._generate_pod_yaml() + + def run(self, output): + """ Run experiment + + :param output: scenario output to push results + :return: None """ + self._fill_traffic_profile() + + traffic_runners = [vnf for vnf in self.vnfs if vnf.runs_traffic] + + for traffic_gen in traffic_runners: + traffic_gen.listen_traffic(self.traffic_profile) + + self.collector = Collector(self.vnfs, + context_base.Context.get_physical_nodes()) + self.collector.start() + + test_completed = False + while not test_completed: + for traffic_gen in traffic_runners: + LOG.info("Run traffic on %s", traffic_gen.name) + traffic_gen.run_traffic_once(self.traffic_profile) + + test_completed = True + for traffic_gen in traffic_runners: + # wait for all tg to complete running traffic + status = traffic_gen.wait_on_traffic() + LOG.info("Run traffic on %s complete status=%s", + traffic_gen.name, status) + if status == 'CONTINUE': + # continue running if at least one tg is running + test_completed = False + + output.push(self.collector.get_kpi()) + + self.collector.stop() + +class NetworkServiceRFC3511(NetworkServiceBase): + """Class handles RFC3511 Network service testing""" + + __scenario_type__ = "NSPerf-RFC3511" + + def __init__(self, scenario_cfg, context_cfg): # pragma: no cover + super(NetworkServiceRFC3511, self).__init__(scenario_cfg, context_cfg) + + def setup(self): + """Setup infrastructure, provision VNFs""" + self.map_topology_to_infrastructure() + self.load_vnf_models() + + traffic_runners = [vnf for vnf in self.vnfs if vnf.runs_traffic] + non_traffic_runners = [vnf for vnf in self.vnfs if not vnf.runs_traffic] try: - try: - self.collector.stop() - for vnf in self.vnfs: - LOG.info("Stopping %s", vnf.name) - vnf.terminate() - LOG.debug("all VNFs terminated: %s", ", ".join(vnf.name for vnf in self.vnfs)) - finally: - terminate_children() - except Exception: - # catch any exception in teardown and convert to simple exception - # never pass exceptions back to multiprocessing, because some exceptions can - # be unpicklable - # https://bugs.python.org/issue9400 + for vnf in chain(traffic_runners, non_traffic_runners): + LOG.info("Instantiating %s", vnf.name) + vnf.instantiate(self.scenario_cfg, self.context_cfg) + LOG.info("Waiting for %s to instantiate", vnf.name) + vnf.wait_for_instantiate() + except: LOG.exception("") - raise RuntimeError("Error in teardown") + for vnf in self.vnfs: + vnf.terminate() + raise - def pre_run_wait_time(self, time_seconds): # pragma: no cover - """Time waited before executing the run method""" - time.sleep(time_seconds) + self._generate_pod_yaml() - def post_run_wait_time(self, time_seconds): # pragma: no cover - """Time waited after executing the run method""" - pass + def run(self, output): + """ Run experiment + + :param output: scenario output to push results + :return: None + """ + + self._fill_traffic_profile() + + traffic_runners = [vnf for vnf in self.vnfs if vnf.runs_traffic] + + for traffic_gen in traffic_runners: + traffic_gen.listen_traffic(self.traffic_profile) + + self.collector = Collector(self.vnfs, + context_base.Context.get_physical_nodes()) + self.collector.start() + + for traffic_gen in traffic_runners: + LOG.info("Run traffic on %s", traffic_gen.name) + traffic_gen.run_traffic(self.traffic_profile) + + output.push(self.collector.get_kpi()) + + self.collector.stop() diff --git a/yardstick/benchmark/scenarios/parser/parser.py b/yardstick/benchmark/scenarios/parser/parser.py index 5b2b49c2c..a0f8e9e72 100644 --- a/yardstick/benchmark/scenarios/parser/parser.py +++ b/yardstick/benchmark/scenarios/parser/parser.py @@ -20,7 +20,7 @@ class Parser(base.Scenario): """running Parser Yang-to-Tosca module as a tool validating output against expected outcome - more info https://wiki.opnfv.org/parser + more info https://wiki.opnfv.org/display/parser """ __scenario_type__ = "Parser" diff --git a/yardstick/benchmark/scenarios/storage/storperf.py b/yardstick/benchmark/scenarios/storage/storperf.py index 8093cd2d2..5b8b00075 100644 --- a/yardstick/benchmark/scenarios/storage/storperf.py +++ b/yardstick/benchmark/scenarios/storage/storperf.py @@ -87,7 +87,9 @@ class StorPerf(base.Scenario): env_args = {} env_args_payload_list = ["agent_count", "agent_flavor", "public_network", "agent_image", - "volume_size"] + "volume_size", "volume_type", + "volume_count", "availability_zone", + "stack_name", "subnet_CIDR"] for env_argument in env_args_payload_list: try: @@ -100,13 +102,14 @@ class StorPerf(base.Scenario): setup_res = requests.post('http://%s:5000/api/v1.0/configurations' % self.target, json=env_args) - setup_res_content = jsonutils.loads( - setup_res.content) if setup_res.status_code != 200: - raise RuntimeError("Failed to create a stack, error message:", - setup_res_content["message"]) + LOG.error("Failed to create stack. %s: %s", + setup_res.status_code, setup_res.content) + raise RuntimeError("Failed to create stack. %s: %s" % + (setup_res.status_code, setup_res.content)) elif setup_res.status_code == 200: + setup_res_content = jsonutils.loads(setup_res.content) LOG.info("stack_id: %s", setup_res_content["stack_id"]) while not self._query_setup_state(): @@ -120,14 +123,15 @@ class StorPerf(base.Scenario): def _query_job_state(self, job_id): """Query the status of the supplied job_id and report on metrics""" LOG.info("Fetching report for %s...", job_id) - report_res = requests.get('http://{}:5000/api/v1.0/jobs'.format - (self.target), + report_res = requests.get('http://%s:5000/api/v1.0/jobs' % self.target, params={'id': job_id, 'type': 'status'}) report_res_content = jsonutils.loads( report_res.content) if report_res.status_code != 200: + LOG.error("Failed to fetch report, error message: %s", + report_res_content["message"]) raise RuntimeError("Failed to fetch report, error message:", report_res_content["message"]) else: @@ -184,15 +188,15 @@ class StorPerf(base.Scenario): LOG.info("Starting a job with parameters %s", job_args) job_res = requests.post('http://%s:5000/api/%s/jobs' % (self.target, - api_version), - json=job_args) - - job_res_content = jsonutils.loads(job_res.content) + api_version), json=job_args) if job_res.status_code != 200: - raise RuntimeError("Failed to start a job, error message:", - job_res_content["message"]) + LOG.error("Failed to start job. %s: %s", + job_res.status_code, job_res.content) + raise RuntimeError("Failed to start job. %s: %s" % + (job_res.status_code, job_res.content)) elif job_res.status_code == 200: + job_res_content = jsonutils.loads(job_res.content) job_id = job_res_content["job_id"] LOG.info("Started job id: %s...", job_id) @@ -213,11 +217,20 @@ class StorPerf(base.Scenario): # else: # time.sleep(int(esti_time)/2) + result_res = requests.get('http://%s:5000/api/v1.0/jobs?type=' + 'metadata&id=%s' % (self.target, job_id)) + result_res_content = jsonutils.loads(result_res.content) + if 'report' in result_res_content and \ + 'steady_state' in result_res_content['report']['details']: + res = result_res_content['report']['details']['steady_state'] + steady_state = res.values()[0] + LOG.info("Job %s completed with steady state %s", + job_id, steady_state) + result_res = requests.get('http://%s:5000/api/v1.0/jobs?id=%s' % (self.target, job_id)) result_res_content = jsonutils.loads( result_res.content) - result.update(result_res_content) def initialize_disks(self): @@ -236,13 +249,14 @@ class StorPerf(base.Scenario): job_res = requests.post('http://%s:5000/api/v1.0/initializations' % self.target, json=job_args) - job_res_content = jsonutils.loads(job_res.content) if job_res.status_code != 200: - raise RuntimeError( - "Failed to start initialization job, error message:", - job_res_content["message"]) + LOG.error("Failed to start initialization job, error message: %s: %s", + job_res.status_code, job_res.content) + raise RuntimeError("Failed to start initialization job, error message: %s: %s" % + (job_res.status_code, job_res.content)) elif job_res.status_code == 200: + job_res_content = jsonutils.loads(job_res.content) job_id = job_res_content["job_id"] LOG.info("Started initialization as job id: %s...", job_id) @@ -260,6 +274,8 @@ class StorPerf(base.Scenario): if teardown_res.status_code == 400: teardown_res_content = jsonutils.loads( teardown_res.json_data) + LOG.error("Failed to reset environment, error message: %s", + teardown_res_content['message']) raise RuntimeError("Failed to reset environment, error message:", teardown_res_content['message']) diff --git a/yardstick/cmd/commands/report.py b/yardstick/cmd/commands/report.py index 47bf22a1f..4f057a05d 100644 --- a/yardstick/cmd/commands/report.py +++ b/yardstick/cmd/commands/report.py @@ -1,7 +1,7 @@ ############################################################################## -# Copyright (c) 2017 Rajesh Kudaka. +# Copyright (c) 2017 Rajesh Kudaka <4k.rajesh@gmail.com> +# Copyright (c) 2018 Intel Corporation. # -# Author: Rajesh Kudaka (4k.rajesh@gmail.com) # All rights reserved. This program and the accompanying materials # are made available under the terms of the Apache License, Version 2.0 # which accompanies this distribution, and is available at @@ -10,11 +10,7 @@ """ Handler for yardstick command 'report' """ -from __future__ import print_function - -from __future__ import absolute_import - -from yardstick.benchmark.core.report import Report +from yardstick.benchmark.core import report from yardstick.cmd.commands import change_osloobj_to_paras from yardstick.common.utils import cliargs @@ -22,12 +18,19 @@ from yardstick.common.utils import cliargs class ReportCommands(object): # pragma: no cover """Report commands. - Set of commands to manage benchmark tasks. + Set of commands to manage reports. """ @cliargs("task_id", type=str, help=" task id", nargs=1) @cliargs("yaml_name", type=str, help=" Yaml file Name", nargs=1) def do_generate(self, args): - """Start a benchmark scenario.""" + """Generate a report.""" + param = change_osloobj_to_paras(args) + report.Report().generate(param) + + @cliargs("task_id", type=str, help=" task id", nargs=1) + @cliargs("yaml_name", type=str, help=" Yaml file Name", nargs=1) + def do_generate_nsb(self, args): + """Generate a report using the NSB template.""" param = change_osloobj_to_paras(args) - Report().generate(param) + report.Report().generate_nsb(param) diff --git a/yardstick/common/exceptions.py b/yardstick/common/exceptions.py index c9d61e45c..010ec6a51 100644 --- a/yardstick/common/exceptions.py +++ b/yardstick/common/exceptions.py @@ -1,4 +1,4 @@ -# Copyright (c) 2017 Intel Corporation +# Copyright (c) 2017-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -83,10 +83,6 @@ class InvalidType(YardstickException): message = 'Type "%(type_to_convert)s" is not valid' -class InvalidRxfFile(YardstickException): - message = 'Loaded rxf file has unexpected format' - - class InfluxDBConfigurationMissing(YardstickException): message = ('InfluxDB configuration is not available. Add "influxdb" as ' 'a dispatcher and the configuration section') @@ -207,15 +203,6 @@ class TaskRenderError(YardstickException): message = 'Failed to render template:\n%(input_task)s' -class RunnerIterationIPCSetupActionNeeded(YardstickException): - message = ('IterationIPC needs the "setup" action to retrieve the VNF ' - 'handling processes PIDs to receive the messages sent') - - -class RunnerIterationIPCNoCtxs(YardstickException): - message = 'Benchmark "setup" action did not return any VNF process PID' - - class TimerTimeout(YardstickException): message = 'Timer timeout expired, %(timeout)s seconds' @@ -405,6 +392,10 @@ class IxNetworkFieldNotPresentInStackItem(YardstickException): message = 'Field "%(field_name)s" not present in stack item %(stack_item)s' +class IncorrectFlowOption(YardstickException): + message = 'Flow option {option} for {link} is incorrect' + + class SLAValidationError(YardstickException): message = '%(case_name)s SLA validation failed. Error: %(error_msg)s' @@ -424,3 +415,15 @@ class InvalidMacAddress(YardstickException): class ValueCheckError(YardstickException): message = 'Constraint "%(value1)s %(operator)s %(value2)s" does not hold' + + +class RestApiError(RuntimeError): + def __init__(self, message): + self._message = message + super(RestApiError, self).__init__(message) + + +class LandslideTclException(RuntimeError): + def __init__(self, message): + self._message = message + super(LandslideTclException, self).__init__(message) diff --git a/yardstick/common/html_template.py b/yardstick/common/html_template.py index e17c76637..c15dd8238 100644 --- a/yardstick/common/html_template.py +++ b/yardstick/common/html_template.py @@ -8,130 +8,6 @@ # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################# -template = """ -<html> -<body> -<head> -<meta charset="utf-8"> -<meta name="viewport" content="width=device-width, initial-scale=1"> -<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7\ -/css/bootstrap.min.css"> -<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.1.1\ -/jquery.min.js"></script> -<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7\ -/js/bootstrap.min.js"></script> -<script src="https://code.highcharts.com/highcharts.js"></script> -<script src="jquery.min.js"></script> -<script src="highcharts.js"></script> -</head> -<style> - -table{ - overflow-y: scroll; - height: 360px; - display: block; - } - - header,h3{ - font-family:Frutiger; - clear: left; - text-align: center; -} -</style> -<header class="jumbotron text-center"> - <h1>Yardstick User Interface</h1> - <h4>Report of {{task_id}} Generated</h4> -</header> - -<div class="container"> - <div class="row"> - <div class="col-md-4"> - <div class="table-responsive" > - <table class="table table-hover" > </table> - </div> - </div> - <div class="col-md-8" > - <div id="container" ></div> - </div> - </div> -</div> -<script> - var arr, tab, th, tr, td, tn, row, col, thead, tbody; - arr={{table|safe}} - tab = document.getElementsByTagName('table')[0]; - thead=document.createElement('thead'); - tr = document.createElement('tr'); - for(row=0;row<Object.keys(arr).length;row++) - { - th = document.createElement('th'); - tn = document.createTextNode(Object.keys(arr).sort()[row]); - th.appendChild(tn); - tr.appendChild(th); - thead.appendChild(tr); - } - tab.appendChild(thead); - tbody=document.createElement('tbody'); - - for (col = 0; col < arr[Object.keys(arr)[0]].length; col++){ - tr = document.createElement('tr'); - for(row=0;row<Object.keys(arr).length;row++) - { - td = document.createElement('td'); - tn = document.createTextNode(arr[Object.keys(arr).sort()[row]][col]); - td.appendChild(tn); - tr.appendChild(td); - } - tbody.appendChild(tr); - } -tab.appendChild(tbody); - -</script> - -<script language="JavaScript"> - -$(function() { - $('#container').highcharts({ - title: { - text: 'Yardstick test results', - x: -20 //center - }, - subtitle: { - text: 'Report of {{task_id}} Task Generated', - x: -20 - }, - xAxis: { - title: { - text: 'Timestamp' - }, - categories:{{Timestamp|safe}} - }, - yAxis: { - - plotLines: [{ - value: 0, - width: 1, - color: '#808080' - }] - }, - tooltip: { - valueSuffix: '' - }, - legend: { - layout: 'vertical', - align: 'right', - verticalAlign: 'middle', - borderWidth: 0 - }, - series: {{series|safe}} - }); -}); - -</script> - - -</body> -</html>""" - report_template = """ <html> <head> diff --git a/yardstick/common/messaging/__init__.py b/yardstick/common/messaging/__init__.py index bd700d9b1..089c99c9f 100644 --- a/yardstick/common/messaging/__init__.py +++ b/yardstick/common/messaging/__init__.py @@ -1,3 +1,14 @@ +# Copyright (c) 2018-2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. @@ -15,19 +26,3 @@ TRANSPORT_URL = (MQ_SERVICE + '://' + MQ_USER + ':' + MQ_PASS + '@' + SERVER + # RPC server. RPC_SERVER_EXECUTOR = 'threading' - -# Topics. -TOPIC_TG = 'topic_traffic_generator' -TOPIC_RUNNER = 'topic_runner' - -# Methods. -# Traffic generator consumers methods. Names must match the methods implemented -# in the consumer endpoint class. -TG_METHOD_STARTED = 'tg_method_started' -TG_METHOD_FINISHED = 'tg_method_finished' -TG_METHOD_ITERATION = 'tg_method_iteration' - -# Runner consumers methods. Names must match the methods implemented in the -# consumer endpoint class. -RUNNER_METHOD_START_ITERATION = "runner_method_start_iteration" -RUNNER_METHOD_STOP_ITERATION = "runner_method_stop_iteration" diff --git a/yardstick/common/nsb_report.css b/yardstick/common/nsb_report.css new file mode 100644 index 000000000..667f865a5 --- /dev/null +++ b/yardstick/common/nsb_report.css @@ -0,0 +1,34 @@ +/******************************************************************************* + * Copyright (c) 2017 Rajesh Kudaka <4k.rajesh@gmail.com> + * Copyright (c) 2018 Intel Corporation. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Apache License, Version 2.0 + * which accompanies this distribution, and is available at + * http://www.apache.org/licenses/LICENSE-2.0 + ******************************************************************************/ + +body { + font-family: Frutiger, "Helvetica Neue", Helvetica, Arial, sans-serif; +} + +header { + padding-top: 5px; + text-align: center; + font-weight: bold; +} + +#tblMetrics { + overflow-y: scroll; + height: 360px; + display: block; +} + +#cnvGraph { + width: 100%; + height: 500px; +} + +#divTree { + font-size: 10pt; +} diff --git a/yardstick/common/nsb_report.html.j2 b/yardstick/common/nsb_report.html.j2 new file mode 100644 index 000000000..a6713eb16 --- /dev/null +++ b/yardstick/common/nsb_report.html.j2 @@ -0,0 +1,75 @@ +<!DOCTYPE html> +<html> + +<!-- + Copyright (c) 2017 Rajesh Kudaka <4k.rajesh@gmail.com> + Copyright (c) 2018-2019 Intel Corporation. + + All rights reserved. This program and the accompanying materials + are made available under the terms of the Apache License, Version 2.0 + which accompanies this distribution, and is available at + http://www.apache.org/licenses/LICENSE-2.0 +--> + + <head> + <meta charset="utf-8"> + <meta name="viewport" content="width=device-width, initial-scale=1"> + <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css"> + <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/jstree/3.3.7/themes/default/style.min.css"> + <script src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script> + <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js"></script> + <script src="https://cdnjs.cloudflare.com/ajax/libs/jstree/3.3.7/jstree.min.js"></script> + <script src="https://cdnjs.cloudflare.com/ajax/libs/Chart.js/2.7.3/Chart.bundle.min.js"></script> + <style> + {% include 'nsb_report.css' %} + </style> + <script> + {% include 'nsb_report.js' %} + </script> + </head> + + <body> + <div class="container-fluid"> + <div class="row"> + <header> + Testcase: {{report_meta.testcase}}<br> + Task-ID: {{report_meta.task_id}}<br> + </header> + </div> + <div class="row"> + <div class="col-md-2"> + <div id="divTree"></div> + </div> + <div class="col-md-10"> + <canvas id="cnvGraph"></canvas> + </div> + </div> + <div class="row"> + <div class="col-md-12 table-responsive"> + <table id="tblMetrics" class="table table-condensed table-hover"></table> + </div> + </div> + </div> + + <script> + // Injected metrics, timestamps, keys and hierarchy + var report_data = {{report_data|safe}}; + var report_time = {{report_time|safe}}; + var report_keys = {{report_keys|safe}}; + var report_tree = {{report_tree|safe}}; + var table_data = {{table_data|safe}}; + + // Wait for DOM to be loaded + $(function() { + var tblMetrics = $('#tblMetrics'); + var cnvGraph = $('#cnvGraph'); + var divTree = $('#divTree'); + + create_table(tblMetrics, table_data, report_time, report_keys); + var objGraph = create_graph(cnvGraph, report_time); + create_tree(divTree, report_tree); + handle_tree(divTree, tblMetrics, objGraph, report_data, table_data, report_time); + }); + </script> + </body> +</html> diff --git a/yardstick/common/nsb_report.js b/yardstick/common/nsb_report.js new file mode 100644 index 000000000..18141900b --- /dev/null +++ b/yardstick/common/nsb_report.js @@ -0,0 +1,170 @@ +/******************************************************************************* + * Copyright (c) 2017 Rajesh Kudaka <4k.rajesh@gmail.com> + * Copyright (c) 2018-2019 Intel Corporation. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Apache License, Version 2.0 + * which accompanies this distribution, and is available at + * http://www.apache.org/licenses/LICENSE-2.0 + ******************************************************************************/ + +var None = null; + +function create_tree(divTree, jstree_data) +{ + divTree.jstree({ + plugins: ['checkbox'], + checkbox: { + three_state: false, + whole_node: true, + tie_selection: false, + }, + core: { + themes: { + icons: false, + stripes: true, + }, + data: jstree_data, + }, + }); +} + +function create_table(tblMetrics, table_data, timestamps, table_keys) +{ + var tbody = $('<tbody></tbody>'); + var tr0 = $('<tr></tr>'); + var th0 = $('<th></th>'); + var td0 = $('<td></td>'); + var tr; + + // create table headings using timestamps + tr = tr0.clone().append(th0.clone().text('Timestamp')); + timestamps.forEach(function(t) { + tr.append(th0.clone().text(t)); + }); + tbody.append(tr); + + // for each metric + table_keys.forEach(function(key) { + tr = tr0.clone().append(td0.clone().text(key)); + // add each piece of data as its own column + table_data[key].forEach(function(val) { + tr.append(td0.clone().text(val === None ? '' : val)); + }); + tbody.append(tr); + }); + + // re-create table + tblMetrics.empty().append(tbody); +} + +function create_graph(cnvGraph, timestamps) +{ + return new Chart(cnvGraph, { + type: 'line', + data: { + labels: timestamps, + datasets: [], + }, + options: { + elements: { + line: { + borderWidth: 2, + fill: false, + tension: 0, + showline: true, + spanGaps: true, + }, + }, + scales: { + xAxes: [{ + type: 'category', + display: true, + labels: timestamps, + autoSkip: true, + }], + yAxes: [{ + type: 'linear', + }], + }, + tooltips: { + mode: 'point', + intersect: true, + }, + hover: { + mode: 'index', + intersect: false, + animationDuration: 0, + }, + legend: { + position: 'bottom', + labels: { + usePointStyle: true, + }, + }, + animation: { + duration: 0, + }, + responsive: true, + responsiveAnimationDuration: 0, + maintainAspectRatio: false, + }, + }); +} + +function update_graph(objGraph, datasets) +{ + var colors = [ + '#FF0000', // Red + '#228B22', // ForestGreen + '#FF8C00', // DarkOrange + '#00008B', // DarkBlue + '#FF00FF', // Fuchsia + '#9ACD32', // YellowGreen + '#FFD700', // Gold + '#4169E1', // RoyalBlue + '#A0522D', // Sienna + '#20B2AA', // LightSeaGreen + '#8A2BE2', // BlueViolet + ]; + + var points = [ + {s: 'circle', r: 3}, + {s: 'rect', r: 4}, + {s: 'triangle', r: 4}, + {s: 'star', r: 4}, + {s: 'rectRot', r: 5}, + ]; + + datasets.forEach(function(d, i) { + var color = colors[i % colors.length]; + var point = points[i % points.length]; + d.borderColor = color; + d.backgroundColor = color; + d.pointStyle = point.s; + d.pointRadius = point.r; + d.pointHoverRadius = point.r + 1; + }); + objGraph.data.datasets = datasets; + objGraph.update(); +} + +function handle_tree(divTree, tblMetrics, objGraph, graph_data, table_data, timestamps) +{ + divTree.on('check_node.jstree uncheck_node.jstree', function(e, data) { + var selected_keys = []; + var selected_datasets = []; + data.selected.forEach(function(sel) { + var node = data.instance.get_node(sel); + if (node.children.length == 0) { + selected_keys.push(node.id); + selected_datasets.push({ + label: node.id, + data: graph_data[node.id], + }); + } + }); + create_table(tblMetrics, table_data, timestamps, selected_keys); + update_graph(objGraph, selected_datasets); + }); +} diff --git a/yardstick/common/packages.py b/yardstick/common/packages.py index f20217fdc..c65eab2ba 100644 --- a/yardstick/common/packages.py +++ b/yardstick/common/packages.py @@ -15,9 +15,9 @@ import logging import re -import pip -from pip import exceptions as pip_exceptions -from pip.operations import freeze +from pip._internal.main import main +from pip._internal import exceptions as pip_exceptions +from pip._internal.operations import freeze from yardstick.common import privsep @@ -36,7 +36,7 @@ def _pip_main(package, action, target=None): cmd = [action, package, '--upgrade'] if target: cmd.append('--target=%s' % target) - return pip.main(cmd) + return main(cmd) def _pip_execute_action(package, action=ACTION_INSTALL, target=None): diff --git a/yardstick/common/report.html.j2 b/yardstick/common/report.html.j2 new file mode 100644 index 000000000..1dc7b1db1 --- /dev/null +++ b/yardstick/common/report.html.j2 @@ -0,0 +1,184 @@ +<!DOCTYPE html> +<html> + +<!-- + Copyright (c) 2017 Rajesh Kudaka <4k.rajesh@gmail.com> + Copyright (c) 2018 Intel Corporation. + + All rights reserved. This program and the accompanying materials + are made available under the terms of the Apache License, Version 2.0 + which accompanies this distribution, and is available at + http://www.apache.org/licenses/LICENSE-2.0 +--> + + <head> + <meta charset="utf-8"> + <meta name="viewport" content="width=device-width, initial-scale=1"> + <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css"> + <script src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script> + <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js"></script> + <script src="https://cdnjs.cloudflare.com/ajax/libs/Chart.js/2.7.3/Chart.bundle.min.js"></script> + + <style> + table { + overflow-y: scroll; + height: 360px; + display: block; + } + header { + font-family: Frutiger, "Helvetica Neue", Helvetica, Arial, sans-serif; + clear: left; + text-align: center; + } + </style> + </head> + + <body> + <header class="jumbotron text-center"> + <h1>Yardstick User Interface</h1> + <h4>Report of {{task_id}} Generated</h4> + </header> + + <div class="container"> + <div class="row"> + <div class="col-md-4"> + <div class="table-responsive"> + <table class="table table-hover"></table> + </div> + </div> + <div class="col-md-8"> + <canvas id="cnvGraph" style="width: 100%; height: 500px"></canvas> + </div> + </div> + </div> + + <script> + var None = null; + var arr, tab, th, tr, td, tn, row, col, thead, tbody, val; + arr = {{table|safe}}; + tab = document.getElementsByTagName('table')[0]; + + thead = document.createElement('thead'); + tr = document.createElement('tr'); + for (col = 0; col < Object.keys(arr).length; col++) { + th = document.createElement('th'); + tn = document.createTextNode(Object.keys(arr).sort()[col]); + th.appendChild(tn); + tr.appendChild(th); + } + thead.appendChild(tr); + tab.appendChild(thead); + + tbody = document.createElement('tbody'); + for (row = 0; row < arr[Object.keys(arr)[0]].length; row++) { + tr = document.createElement('tr'); + for (col = 0; col < Object.keys(arr).length; col++) { + val = arr[Object.keys(arr).sort()[col]][row]; + td = document.createElement('td'); + tn = document.createTextNode(val === None ? '' : val); + td.appendChild(tn); + tr.appendChild(td); + } + tbody.appendChild(tr); + } + tab.appendChild(tbody); + + $(function() { + var datasets = {{datasets|safe}}; + + var colors = [ + '#FF0000', // Red + '#228B22', // ForestGreen + '#FF8C00', // DarkOrange + '#00008B', // DarkBlue + '#FF00FF', // Fuchsia + '#9ACD32', // YellowGreen + '#FFD700', // Gold + '#4169E1', // RoyalBlue + '#A0522D', // Sienna + '#20B2AA', // LightSeaGreen + '#8A2BE2', // BlueViolet + ]; + + var points = [ + {s: 'circle', r: 3}, + {s: 'rect', r: 4}, + {s: 'triangle', r: 4}, + {s: 'star', r: 4}, + {s: 'rectRot', r: 5}, + ]; + + datasets.forEach(function(d, i) { + var color = colors[i % colors.length]; + var point = points[i % points.length]; + d.borderColor = color; + d.backgroundColor = color; + d.pointStyle = point.s; + d.pointRadius = point.r; + d.pointHoverRadius = point.r + 1; + }); + + new Chart($('#cnvGraph'), { + type: 'line', + data: { + labels: {{Timestamps|safe}}, + datasets: datasets, + }, + options: { + elements: { + line: { + borderWidth: 2, + fill: false, + tension: 0, + }, + }, + title: { + text: [ + 'Yardstick test results', + 'Report of {{task_id}} Task Generated', + ], + display: true, + }, + scales: { + xAxes: [{ + type: 'category', + scaleLabel: { + display: true, + labelString: 'Timestamp', + }, + }], + yAxes: [{ + type: 'linear', + scaleLabel: { + display: true, + labelString: 'Values', + }, + }], + }, + tooltips: { + mode: 'point', + intersect: true, + }, + hover: { + mode: 'index', + intersect: false, + animationDuration: 0, + }, + legend: { + position: 'right', + labels: { + usePointStyle: true, + }, + }, + animation: { + duration: 0, + }, + responsive: true, + responsiveAnimationDuration: 0, + maintainAspectRatio: false, + }, + }); + }); + </script> + </body> +</html> diff --git a/yardstick/common/utils.py b/yardstick/common/utils.py index 31885c073..7475f6991 100644 --- a/yardstick/common/utils.py +++ b/yardstick/common/utils.py @@ -19,6 +19,7 @@ import datetime import errno import importlib import ipaddress +import json import logging import os import pydoc @@ -30,6 +31,7 @@ import subprocess import sys import time import threading +import math import six from flask import jsonify @@ -292,6 +294,17 @@ def make_ipv4_address(ip_addr): return ipaddress.IPv4Address(six.text_type(ip_addr)) +def get_ip_range_count(iprange): + start_range, end_range = iprange.split("-") + start = int(make_ipv4_address(start_range)) + end = int(make_ipv4_address(end_range)) + return end - start + + +def get_ip_range_start(iprange): + return str(make_ipv4_address(iprange.split("-")[0])) + + def safe_ip_address(ip_addr): """ get ip address version v6 or v4 """ try: @@ -499,6 +512,23 @@ def read_meminfo(ssh_client): return output +def setup_hugepages(ssh_client, size_kb): + """Setup needed number of hugepages for the size specified""" + + NR_HUGEPAGES_PATH = '/proc/sys/vm/nr_hugepages' + meminfo = read_meminfo(ssh_client) + hp_size_kb = int(meminfo['Hugepagesize']) + hp_number = int(math.ceil(size_kb / float(hp_size_kb))) + ssh_client.execute( + 'echo %s | sudo tee %s' % (hp_number, NR_HUGEPAGES_PATH)) + hp = six.BytesIO() + ssh_client.get_file_obj(NR_HUGEPAGES_PATH, hp) + hp_number_set = int(hp.getvalue().decode('utf-8').splitlines()[0]) + logger.info('Hugepages size (kB): %s, number claimed: %s, number set: %s', + hp_size_kb, hp_number, hp_number_set) + return hp_size_kb, hp_number, hp_number_set + + def find_relative_file(path, task_path): """ Find file in one of places: in abs of path or relative to a directory path, @@ -600,3 +630,47 @@ def safe_cast(value, type_to_convert, default_value): return _type(value) except ValueError: return default_value + + +def get_os_version(ssh_client): + """Return OS version. + + :param ssh_client: SSH + :return str: Linux OS versions + """ + os_ver = ssh_client.execute("cat /etc/lsb-release")[1] + return os_ver + + +def get_kernel_version(ssh_client): + """Return kernel version. + + :param ssh_client: SSH + :return str: Linux kernel versions + """ + kernel_ver = ssh_client.execute("uname -a")[1] + return kernel_ver + + +def get_sample_vnf_info(ssh_client, + json_file='/opt/nsb_bin/yardstick_sample_vnf.json'): + """Return sample VNF data. + + :param ssh_client: SSH + :param json_file: str + :return dict: information about sample VNF + """ + rc, json_str, err = ssh_client.execute("cat %s" % json_file) + logger.debug("cat %s: %s, rc: %s, err: %s", json_file, json_str, rc, err) + + if rc: + return {} + json_data = json.loads(json_str) + for vnf_data in json_data.values(): + out = ssh_client.execute("md5sum %s" % vnf_data["path_vnf"])[1] + md5 = out.split()[0].strip() + if md5 == vnf_data["md5"]: + vnf_data["md5_result"] = "MD5 checksum is valid" + else: + vnf_data["md5_result"] = "MD5 checksum is invalid" + return json_data diff --git a/yardstick/network_services/constants.py b/yardstick/network_services/constants.py index 0064b4fc5..5a186be42 100644 --- a/yardstick/network_services/constants.py +++ b/yardstick/network_services/constants.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016-2017 Intel Corporation +# Copyright (c) 2016-2018 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,3 +17,4 @@ DEFAULT_VNF_TIMEOUT = 3600 PROCESS_JOIN_TIMEOUT = 3 ONE_GIGABIT_IN_BITS = 1000000000 NIC_GBPS_DEFAULT = 10 +RETRY_TIMEOUT = 5 diff --git a/yardstick/network_services/helpers/cpu.py b/yardstick/network_services/helpers/cpu.py index 8c21754ff..279af204a 100644 --- a/yardstick/network_services/helpers/cpu.py +++ b/yardstick/network_services/helpers/cpu.py @@ -15,11 +15,15 @@ import io +# Number of threads per core. +NR_OF_THREADS = 2 + class CpuSysCores(object): def __init__(self, connection=""): self.core_map = {} + self.cpuinfo = {} self.connection = connection def _open_cpuinfo(self): @@ -33,11 +37,11 @@ class CpuSysCores(object): core_lines = {} for line in lines: if line.strip(): - name, value = line.split(":", 1) - core_lines[name.strip()] = value.strip() + name, value = line.split(":", 1) + core_lines[name.strip()] = value.strip() else: - core_details.append(core_lines) - core_lines = {} + core_details.append(core_lines) + core_lines = {} return core_details @@ -51,7 +55,7 @@ class CpuSysCores(object): lines = self._open_cpuinfo() core_details = self._get_core_details(lines) for core in core_details: - for k, v in core.items(): + for k, _ in core.items(): if k == "physical id": if core["physical id"] not in self.core_map: self.core_map[core['physical id']] = [] @@ -60,6 +64,17 @@ class CpuSysCores(object): return self.core_map + def get_cpu_layout(self): + _, stdout, _ = self.connection.execute("lscpu -p") + self.cpuinfo = {} + self.cpuinfo['cpuinfo'] = list() + for line in stdout.split("\n"): + if line and line[0] != "#": + self.cpuinfo['cpuinfo'].append( + [CpuSysCores._str2int(x) for x in + line.split(",")]) + return self.cpuinfo + def validate_cpu_cfg(self, vnf_cfg=None): if vnf_cfg is None: vnf_cfg = { @@ -78,3 +93,81 @@ class CpuSysCores(object): return -1 return 0 + + def is_smt_enabled(self): + return CpuSysCores.smt_enabled(self.cpuinfo) + + def cpu_list_per_node(self, cpu_node, smt_used=False): + cpu_node = int(cpu_node) + cpu_info = self.cpuinfo.get("cpuinfo") + if cpu_info is None: + raise RuntimeError("Node cpuinfo not available.") + + smt_enabled = self.is_smt_enabled() + if not smt_enabled and smt_used: + raise RuntimeError("SMT is not enabled.") + + cpu_list = [] + for cpu in cpu_info: + if cpu[3] == cpu_node: + cpu_list.append(cpu[0]) + + if not smt_enabled or smt_enabled and smt_used: + pass + + if smt_enabled and not smt_used: + cpu_list_len = len(cpu_list) + cpu_list = cpu_list[:int(cpu_list_len / NR_OF_THREADS)] + + return cpu_list + + def cpu_slice_of_list_per_node(self, cpu_node, skip_cnt=0, cpu_cnt=0, + smt_used=False): + cpu_list = self.cpu_list_per_node(cpu_node, smt_used) + + cpu_list_len = len(cpu_list) + if cpu_cnt + skip_cnt > cpu_list_len: + raise RuntimeError("cpu_cnt + skip_cnt > length(cpu list).") + + if cpu_cnt == 0: + cpu_cnt = cpu_list_len - skip_cnt + + if smt_used: + cpu_list_0 = cpu_list[:int(cpu_list_len / NR_OF_THREADS)] + cpu_list_1 = cpu_list[int(cpu_list_len / NR_OF_THREADS):] + cpu_list = [cpu for cpu in cpu_list_0[skip_cnt:skip_cnt + cpu_cnt]] + cpu_list_ex = [cpu for cpu in + cpu_list_1[skip_cnt:skip_cnt + cpu_cnt]] + cpu_list.extend(cpu_list_ex) + else: + cpu_list = [cpu for cpu in cpu_list[skip_cnt:skip_cnt + cpu_cnt]] + + return cpu_list + + def cpu_list_per_node_str(self, cpu_node, skip_cnt=0, cpu_cnt=0, sep=",", + smt_used=False): + cpu_list = self.cpu_slice_of_list_per_node(cpu_node, + skip_cnt=skip_cnt, + cpu_cnt=cpu_cnt, + smt_used=smt_used) + return sep.join(str(cpu) for cpu in cpu_list) + + @staticmethod + def _str2int(string): + try: + return int(string) + except ValueError: + return 0 + + @staticmethod + def smt_enabled(cpuinfo): + cpu_info = cpuinfo.get("cpuinfo") + if cpu_info is None: + raise RuntimeError("Node cpuinfo not available.") + cpu_mems = [item[-4:] for item in cpu_info] + cpu_mems_len = int(len(cpu_mems) / NR_OF_THREADS) + count = 0 + for cpu_mem in cpu_mems[:cpu_mems_len]: + if cpu_mem in cpu_mems[cpu_mems_len:]: + count += 1 + return count == cpu_mems_len diff --git a/yardstick/network_services/helpers/dpdkbindnic_helper.py b/yardstick/network_services/helpers/dpdkbindnic_helper.py index 1c74355ef..33a5e8c1d 100644 --- a/yardstick/network_services/helpers/dpdkbindnic_helper.py +++ b/yardstick/network_services/helpers/dpdkbindnic_helper.py @@ -13,7 +13,6 @@ # limitations under the License. import logging import os - import re from collections import defaultdict from itertools import chain @@ -21,7 +20,6 @@ from itertools import chain from yardstick.common import exceptions from yardstick.common.utils import validate_non_string_sequence - NETWORK_KERNEL = 'network_kernel' NETWORK_DPDK = 'network_dpdk' NETWORK_OTHER = 'network_other' @@ -284,15 +282,22 @@ printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \ res = self.ssh_helper.execute(*args, **kwargs) if res[0] != 0: template = '{} command failed with rc={}' + LOG.critical("DPDK_DEVBIND Failure %s", res[1]) raise DpdkBindHelperException(template.format(self.dpdk_devbind, res[0])) return res - def load_dpdk_driver(self): + def load_dpdk_driver(self, dpdk_driver=None): + if dpdk_driver is None: + dpdk_driver = self.dpdk_driver cmd_template = "sudo modprobe {} && sudo modprobe {}" - self.ssh_helper.execute(cmd_template.format(self.UIO_DRIVER, self.dpdk_driver)) - - def check_dpdk_driver(self): - return self.ssh_helper.execute("lsmod | grep -i {}".format(self.dpdk_driver))[0] + self.ssh_helper.execute( + cmd_template.format(self.UIO_DRIVER, dpdk_driver)) + + def check_dpdk_driver(self, dpdk_driver=None): + if dpdk_driver is None: + dpdk_driver = self.dpdk_driver + return \ + self.ssh_helper.execute("lsmod | grep -i {}".format(dpdk_driver))[0] @property def _status_cmd(self): diff --git a/yardstick/tests/functional/network_services/vnf_generic/__init__.py b/yardstick/network_services/helpers/vpp_helpers/__init__.py index e69de29bb..e69de29bb 100644 --- a/yardstick/tests/functional/network_services/vnf_generic/__init__.py +++ b/yardstick/network_services/helpers/vpp_helpers/__init__.py diff --git a/yardstick/network_services/helpers/vpp_helpers/abstract_search_algorithm.py b/yardstick/network_services/helpers/vpp_helpers/abstract_search_algorithm.py new file mode 100644 index 000000000..fced05833 --- /dev/null +++ b/yardstick/network_services/helpers/vpp_helpers/abstract_search_algorithm.py @@ -0,0 +1,53 @@ +# Copyright (c) 2019 Viosoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This is a modified copy of +# https://gerrit.fd.io/r/gitweb?p=csit.git;a=blob_plain;f=resources/libraries/python/MLRsearch/AbstractSearchAlgorithm.py;hb=HEAD + + +from abc import ABCMeta, abstractmethod + + +class AbstractSearchAlgorithm(object): + """Abstract class defining common API for search algorithms.""" + + __metaclass__ = ABCMeta + + def __init__(self, measurer): + """Store the rate provider. + + :param measurer: Object able to perform trial or composite measurements. + :type measurer: AbstractMeasurer.AbstractMeasurer + """ + # TODO: Type check for AbstractMeasurer? + self.measurer = measurer + + @abstractmethod + def narrow_down_ndr_and_pdr( + self, fail_rate, line_rate, packet_loss_ratio): + """Perform measurements to narrow down intervals, return them. + + This will be renamed when custom loss ratio lists are supported. + + :param fail_rate: Minimal target transmit rate [pps]. + :param line_rate: Maximal target transmit rate [pps]. + :param packet_loss_ratio: Fraction of packets lost, for PDR [1]. + :type fail_rate: float + :type line_rate: float + :type packet_loss_ratio: float + :returns: Structure containing narrowed down intervals + and their measurements. + :rtype: NdrPdrResult.NdrPdrResult + """ + # TODO: Do we agree on arguments related to precision or trial duration? diff --git a/yardstick/network_services/helpers/vpp_helpers/multiple_loss_ratio_search.py b/yardstick/network_services/helpers/vpp_helpers/multiple_loss_ratio_search.py new file mode 100644 index 000000000..582e3dc27 --- /dev/null +++ b/yardstick/network_services/helpers/vpp_helpers/multiple_loss_ratio_search.py @@ -0,0 +1,688 @@ +# Copyright (c) 2019 Viosoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This is a modified copy of +# https://gerrit.fd.io/r/gitweb?p=csit.git;a=blob_plain;f=resources/libraries/python/MLRsearch/MultipleLossRatioSearch.py;hb=HEAD + +import datetime +import logging +import math +import time + +from yardstick.network_services.helpers.vpp_helpers.abstract_search_algorithm import \ + AbstractSearchAlgorithm +from yardstick.network_services.helpers.vpp_helpers.ndr_pdr_result import \ + NdrPdrResult +from yardstick.network_services.helpers.vpp_helpers.receive_rate_interval import \ + ReceiveRateInterval +from yardstick.network_services.helpers.vpp_helpers.receive_rate_measurement import \ + ReceiveRateMeasurement + +LOGGING = logging.getLogger(__name__) + + +class MultipleLossRatioSearch(AbstractSearchAlgorithm): + """Optimized binary search algorithm for finding NDR and PDR bounds. + + Traditional binary search algorithm needs initial interval + (lower and upper bound), and returns final interval after bisecting + (until some exit condition is met). + The exit condition is usually related to the interval width, + (upper bound value minus lower bound value). + + The optimized algorithm contains several improvements + aimed to reduce overall search time. + + One improvement is searching for two intervals at once. + The intervals are for NDR (No Drop Rate) and PDR (Partial Drop Rate). + + Next improvement is that the initial interval does not need to be valid. + Imagine initial interval (10, 11) where 11 is smaller + than the searched value. + The algorithm will try (11, 13) interval next, and if 13 is still smaller, + (13, 17) and so on, doubling width until the upper bound is valid. + The part when interval expands is called external search, + the part when interval is bisected is called internal search. + + Next improvement is that trial measurements at small trial duration + can be used to find a reasonable interval for full trial duration search. + This results in more trials performed, but smaller overall duration + in general. + + Next improvement is bisecting in logarithmic quantities, + so that exit criteria can be independent of measurement units. + + Next improvement is basing the initial interval on receive rates. + + Final improvement is exiting early if the minimal value + is not a valid lower bound. + + The complete search consist of several phases, + each phase performing several trial measurements. + Initial phase creates initial interval based on receive rates + at maximum rate and at maximum receive rate (MRR). + Final phase and preceding intermediate phases are performing + external and internal search steps, + each resulting interval is the starting point for the next phase. + The resulting interval of final phase is the result of the whole algorithm. + + Each non-initial phase uses its own trial duration and width goal. + Any non-initial phase stops searching (for NDR or PDR independently) + when minimum is not a valid lower bound (at current duration), + or all of the following is true: + Both bounds are valid, bound bounds are measured at the current phase + trial duration, interval width is less than the width goal + for current phase. + + TODO: Review and update this docstring according to rst docs. + TODO: Support configurable number of Packet Loss Ratios. + """ + + class ProgressState(object): + """Structure containing data to be passed around in recursion.""" + + def __init__( + self, result, phases, duration, width_goal, packet_loss_ratio, + minimum_transmit_rate, maximum_transmit_rate): + """Convert and store the argument values. + + :param result: Current measured NDR and PDR intervals. + :param phases: How many intermediate phases to perform + before the current one. + :param duration: Trial duration to use in the current phase [s]. + :param width_goal: The goal relative width for the curreent phase. + :param packet_loss_ratio: PDR fraction for the current search. + :param minimum_transmit_rate: Minimum target transmit rate + for the current search [pps]. + :param maximum_transmit_rate: Maximum target transmit rate + for the current search [pps]. + :type result: NdrPdrResult.NdrPdrResult + :type phases: int + :type duration: float + :type width_goal: float + :type packet_loss_ratio: float + :type minimum_transmit_rate: float + :type maximum_transmit_rate: float + """ + self.result = result + self.phases = int(phases) + self.duration = float(duration) + self.width_goal = float(width_goal) + self.packet_loss_ratio = float(packet_loss_ratio) + self.minimum_transmit_rate = float(minimum_transmit_rate) + self.maximum_transmit_rate = float(maximum_transmit_rate) + + def __init__(self, measurer, latency=False, pkt_size=64, + final_relative_width=0.005, + final_trial_duration=30.0, initial_trial_duration=1.0, + number_of_intermediate_phases=2, timeout=600.0, doublings=1): + """Store the measurer object and additional arguments. + + :param measurer: Rate provider to use by this search object. + :param final_relative_width: Final lower bound transmit rate + cannot be more distant that this multiple of upper bound [1]. + :param final_trial_duration: Trial duration for the final phase [s]. + :param initial_trial_duration: Trial duration for the initial phase + and also for the first intermediate phase [s]. + :param number_of_intermediate_phases: Number of intermediate phases + to perform before the final phase [1]. + :param timeout: The search will fail itself when not finished + before this overall time [s]. + :param doublings: How many doublings to do in external search step. + Default 1 is suitable for fairly stable tests, + less stable tests might get better overal duration with 2 or more. + :type measurer: AbstractMeasurer.AbstractMeasurer + :type final_relative_width: float + :type final_trial_duration: float + :type initial_trial_duration: int + :type number_of_intermediate_phases: int + :type timeout: float + :type doublings: int + """ + super(MultipleLossRatioSearch, self).__init__(measurer) + self.latency = latency + self.pkt_size = int(pkt_size) + self.final_trial_duration = float(final_trial_duration) + self.final_relative_width = float(final_relative_width) + self.number_of_intermediate_phases = int(number_of_intermediate_phases) + self.initial_trial_duration = float(initial_trial_duration) + self.timeout = float(timeout) + self.doublings = int(doublings) + + self.queue = None + self.port_pg_id = None + self.ports = [] + self.test_data = {} + self.profiles = {} + + @staticmethod + def double_relative_width(relative_width): + """Return relative width corresponding to double logarithmic width. + + :param relative_width: The base relative width to double. + :type relative_width: float + :returns: The relative width of double logarithmic size. + :rtype: float + """ + return 1.999 * relative_width - relative_width * relative_width + # The number should be 2.0, but we want to avoid rounding errors, + # and ensure half of double is not larger than the original value. + + @staticmethod + def double_step_down(relative_width, current_bound): + """Return rate of double logarithmic width below. + + :param relative_width: The base relative width to double. + :param current_bound: The current target transmit rate to move [pps]. + :type relative_width: float + :type current_bound: float + :returns: Transmit rate smaller by logarithmically double width [pps]. + :rtype: float + """ + return current_bound * ( + 1.0 - MultipleLossRatioSearch.double_relative_width( + relative_width)) + + @staticmethod + def expand_down(relative_width, doublings, current_bound): + """Return rate of expanded logarithmic width below. + + :param relative_width: The base relative width to double. + :param doublings: How many doublings to do for expansion. + :param current_bound: The current target transmit rate to move [pps]. + :type relative_width: float + :type doublings: int + :type current_bound: float + :returns: Transmit rate smaller by logarithmically double width [pps]. + :rtype: float + """ + for _ in range(doublings): + relative_width = MultipleLossRatioSearch.double_relative_width( + relative_width) + return current_bound * (1.0 - relative_width) + + @staticmethod + def double_step_up(relative_width, current_bound): + """Return rate of double logarithmic width above. + + :param relative_width: The base relative width to double. + :param current_bound: The current target transmit rate to move [pps]. + :type relative_width: float + :type current_bound: float + :returns: Transmit rate larger by logarithmically double width [pps]. + :rtype: float + """ + return current_bound / ( + 1.0 - MultipleLossRatioSearch.double_relative_width( + relative_width)) + + @staticmethod + def expand_up(relative_width, doublings, current_bound): + """Return rate of expanded logarithmic width above. + + :param relative_width: The base relative width to double. + :param doublings: How many doublings to do for expansion. + :param current_bound: The current target transmit rate to move [pps]. + :type relative_width: float + :type doublings: int + :type current_bound: float + :returns: Transmit rate smaller by logarithmically double width [pps]. + :rtype: float + """ + for _ in range(doublings): + relative_width = MultipleLossRatioSearch.double_relative_width( + relative_width) + return current_bound / (1.0 - relative_width) + + @staticmethod + def half_relative_width(relative_width): + """Return relative width corresponding to half logarithmic width. + + :param relative_width: The base relative width to halve. + :type relative_width: float + :returns: The relative width of half logarithmic size. + :rtype: float + """ + return 1.0 - math.sqrt(1.0 - relative_width) + + @staticmethod + def half_step_up(relative_width, current_bound): + """Return rate of half logarithmic width above. + + :param relative_width: The base relative width to halve. + :param current_bound: The current target transmit rate to move [pps]. + :type relative_width: float + :type current_bound: float + :returns: Transmit rate larger by logarithmically half width [pps]. + :rtype: float + """ + return current_bound / ( + 1.0 - MultipleLossRatioSearch.half_relative_width( + relative_width)) + + def init_generator(self, ports, port_pg_id, profiles, test_data, queue): + self.ports = ports + self.port_pg_id = port_pg_id + self.profiles = profiles + self.test_data = test_data + self.queue = queue + self.queue.cancel_join_thread() + + def collect_kpi(self, stats, test_value): + samples = self.measurer.generate_samples(stats, self.ports, + self.port_pg_id, self.latency) + samples.update(self.test_data) + LOGGING.info("Collect TG KPIs %s %s %s", datetime.datetime.now(), + test_value, samples) + self.queue.put(samples) + + def narrow_down_ndr_and_pdr( + self, minimum_transmit_rate, maximum_transmit_rate, + packet_loss_ratio): + """Perform initial phase, create state object, proceed with next phases. + + :param minimum_transmit_rate: Minimal target transmit rate [pps]. + :param maximum_transmit_rate: Maximal target transmit rate [pps]. + :param packet_loss_ratio: Fraction of packets lost, for PDR [1]. + :type minimum_transmit_rate: float + :type maximum_transmit_rate: float + :type packet_loss_ratio: float + :returns: Structure containing narrowed down intervals + and their measurements. + :rtype: NdrPdrResult.NdrPdrResult + :raises RuntimeError: If total duration is larger than timeout. + """ + minimum_transmit_rate = float(minimum_transmit_rate) + maximum_transmit_rate = float(maximum_transmit_rate) + packet_loss_ratio = float(packet_loss_ratio) + line_measurement = self.measure( + self.initial_trial_duration, maximum_transmit_rate, self.latency) + initial_width_goal = self.final_relative_width + for _ in range(self.number_of_intermediate_phases): + initial_width_goal = self.double_relative_width(initial_width_goal) + max_lo = maximum_transmit_rate * (1.0 - initial_width_goal) + mrr = max( + minimum_transmit_rate, + min(max_lo, line_measurement.receive_rate)) + mrr_measurement = self.measure( + self.initial_trial_duration, mrr, self.latency) + # Attempt to get narrower width. + if mrr_measurement.loss_fraction > 0.0: + max2_lo = mrr * (1.0 - initial_width_goal) + mrr2 = min(max2_lo, mrr_measurement.receive_rate) + else: + mrr2 = mrr / (1.0 - initial_width_goal) + if mrr2 > minimum_transmit_rate and mrr2 < maximum_transmit_rate: + line_measurement = mrr_measurement + mrr_measurement = self.measure( + self.initial_trial_duration, mrr2, self.latency) + if mrr2 > mrr: + buf = line_measurement + line_measurement = mrr_measurement + mrr_measurement = buf + starting_interval = ReceiveRateInterval( + mrr_measurement, line_measurement) + starting_result = NdrPdrResult(starting_interval, starting_interval) + state = self.ProgressState( + starting_result, self.number_of_intermediate_phases, + self.final_trial_duration, self.final_relative_width, + packet_loss_ratio, minimum_transmit_rate, maximum_transmit_rate) + state = self.ndrpdr(state) + result = state.result + # theor_max_thruput = 0 + result_samples = {} + + MultipleLossRatioSearch.display_single_bound(result_samples, + 'NDR_LOWER', result.ndr_interval.measured_low.transmit_rate, + self.pkt_size, result.ndr_interval.measured_low.latency) + MultipleLossRatioSearch.display_single_bound(result_samples, + 'NDR_UPPER', result.ndr_interval.measured_high.transmit_rate, + self.pkt_size) + MultipleLossRatioSearch.display_single_bound(result_samples, + 'PDR_LOWER', result.pdr_interval.measured_low.transmit_rate, + self.pkt_size, result.pdr_interval.measured_low.latency) + MultipleLossRatioSearch.display_single_bound(result_samples, + 'PDR_UPPER', result.pdr_interval.measured_high.transmit_rate, + self.pkt_size) + pdr_msg = self.check_ndrpdr_interval_validity(result_samples, "PDR", + result.pdr_interval, + packet_loss_ratio) + ndr_msg = self.check_ndrpdr_interval_validity(result_samples, "NDR", + result.ndr_interval) + self.queue.put(result_samples) + + LOGGING.debug("result_samples: %s", result_samples) + LOGGING.info(pdr_msg) + LOGGING.info(ndr_msg) + + self.perform_additional_measurements_based_on_ndrpdr_result(result) + + return result_samples + + def _measure_and_update_state(self, state, transmit_rate): + """Perform trial measurement, update bounds, return new state. + + :param state: State before this measurement. + :param transmit_rate: Target transmit rate for this measurement [pps]. + :type state: ProgressState + :type transmit_rate: float + :returns: State after the measurement. + :rtype: ProgressState + """ + # TODO: Implement https://stackoverflow.com/a/24683360 + # to avoid the string manipulation if log verbosity is too low. + LOGGING.info("result before update: %s", state.result) + LOGGING.debug( + "relative widths in goals: %s", state.result.width_in_goals( + self.final_relative_width)) + measurement = self.measure(state.duration, transmit_rate, self.latency) + ndr_interval = self._new_interval( + state.result.ndr_interval, measurement, 0.0) + pdr_interval = self._new_interval( + state.result.pdr_interval, measurement, state.packet_loss_ratio) + state.result = NdrPdrResult(ndr_interval, pdr_interval) + return state + + @staticmethod + def _new_interval(old_interval, measurement, packet_loss_ratio): + """Return new interval with bounds updated according to the measurement. + + :param old_interval: The current interval before the measurement. + :param measurement: The new meaqsurement to take into account. + :param packet_loss_ratio: Fraction for PDR (or zero for NDR). + :type old_interval: ReceiveRateInterval.ReceiveRateInterval + :type measurement: ReceiveRateMeasurement.ReceiveRateMeasurement + :type packet_loss_ratio: float + :returns: The updated interval. + :rtype: ReceiveRateInterval.ReceiveRateInterval + """ + old_lo, old_hi = old_interval.measured_low, old_interval.measured_high + # Priority zero: direct replace if the target Tr is the same. + if measurement.target_tr in (old_lo.target_tr, old_hi.target_tr): + if measurement.target_tr == old_lo.target_tr: + return ReceiveRateInterval(measurement, old_hi) + else: + return ReceiveRateInterval(old_lo, measurement) + # Priority one: invalid lower bound allows only one type of update. + if old_lo.loss_fraction > packet_loss_ratio: + # We can only expand down, old bound becomes valid upper one. + if measurement.target_tr < old_lo.target_tr: + return ReceiveRateInterval(measurement, old_lo) + else: + return old_interval + # Lower bound is now valid. + # Next priorities depend on target Tr. + if measurement.target_tr < old_lo.target_tr: + # Lower external measurement, relevant only + # if the new measurement has high loss rate. + if measurement.loss_fraction > packet_loss_ratio: + # Returning the broader interval as old_lo + # would be invalid upper bound. + return ReceiveRateInterval(measurement, old_hi) + elif measurement.target_tr > old_hi.target_tr: + # Upper external measurement, only relevant for invalid upper bound. + if old_hi.loss_fraction <= packet_loss_ratio: + # Old upper bound becomes valid new lower bound. + return ReceiveRateInterval(old_hi, measurement) + else: + # Internal measurement, replaced boundary + # depends on measured loss fraction. + if measurement.loss_fraction > packet_loss_ratio: + # We have found a narrow valid interval, + # regardless of whether old upper bound was valid. + return ReceiveRateInterval(old_lo, measurement) + else: + # In ideal world, we would not want to shrink interval + # if upper bound is not valid. + # In the real world, we want to shrink it for + # "invalid upper bound at maximal rate" case. + return ReceiveRateInterval(measurement, old_hi) + # Fallback, the interval is unchanged by the measurement. + return old_interval + + def ndrpdr(self, state): + """Pefrom trials for this phase. Return the new state when done. + + :param state: State before this phase. + :type state: ProgressState + :returns: The updated state. + :rtype: ProgressState + :raises RuntimeError: If total duration is larger than timeout. + """ + start_time = time.time() + if state.phases > 0: + # We need to finish preceding intermediate phases first. + saved_phases = state.phases + state.phases -= 1 + # Preceding phases have shorter duration. + saved_duration = state.duration + duration_multiplier = state.duration / self.initial_trial_duration + phase_exponent = float(state.phases) / saved_phases + state.duration = self.initial_trial_duration * math.pow( + duration_multiplier, phase_exponent) + # Shorter durations do not need that narrow widths. + saved_width = state.width_goal + state.width_goal = self.double_relative_width(state.width_goal) + # Recurse. + state = self.ndrpdr(state) + # Restore the state for current phase. + state.duration = saved_duration + state.width_goal = saved_width + state.phases = saved_phases # Not needed, but just in case. + LOGGING.info( + "starting iterations with duration %s and relative width goal %s", + state.duration, state.width_goal) + while 1: + if time.time() > start_time + self.timeout: + raise RuntimeError("Optimized search takes too long.") + # Order of priorities: invalid bounds (nl, pl, nh, ph), + # then narrowing relative Tr widths. + # Durations are not priorities yet, + # they will settle on their own hopefully. + ndr_lo = state.result.ndr_interval.measured_low + ndr_hi = state.result.ndr_interval.measured_high + pdr_lo = state.result.pdr_interval.measured_low + pdr_hi = state.result.pdr_interval.measured_high + ndr_rel_width = max( + state.width_goal, state.result.ndr_interval.rel_tr_width) + pdr_rel_width = max( + state.width_goal, state.result.pdr_interval.rel_tr_width) + # If we are hitting maximal or minimal rate, we cannot shift, + # but we can re-measure. + if ndr_lo.loss_fraction > 0.0: + if ndr_lo.target_tr > state.minimum_transmit_rate: + new_tr = max( + state.minimum_transmit_rate, + self.expand_down( + ndr_rel_width, self.doublings, ndr_lo.target_tr)) + LOGGING.info("ndr lo external %s", new_tr) + state = self._measure_and_update_state(state, new_tr) + continue + elif ndr_lo.duration < state.duration: + LOGGING.info("ndr lo minimal re-measure") + state = self._measure_and_update_state( + state, state.minimum_transmit_rate) + continue + if pdr_lo.loss_fraction > state.packet_loss_ratio: + if pdr_lo.target_tr > state.minimum_transmit_rate: + new_tr = max( + state.minimum_transmit_rate, + self.expand_down( + pdr_rel_width, self.doublings, pdr_lo.target_tr)) + LOGGING.info("pdr lo external %s", new_tr) + state = self._measure_and_update_state(state, new_tr) + continue + elif pdr_lo.duration < state.duration: + LOGGING.info("pdr lo minimal re-measure") + state = self._measure_and_update_state( + state, state.minimum_transmit_rate) + continue + if ndr_hi.loss_fraction <= 0.0: + if ndr_hi.target_tr < state.maximum_transmit_rate: + new_tr = min( + state.maximum_transmit_rate, + self.expand_up( + ndr_rel_width, self.doublings, ndr_hi.target_tr)) + LOGGING.info("ndr hi external %s", new_tr) + state = self._measure_and_update_state(state, new_tr) + continue + elif ndr_hi.duration < state.duration: + LOGGING.info("ndr hi maximal re-measure") + state = self._measure_and_update_state( + state, state.maximum_transmit_rate) + continue + if pdr_hi.loss_fraction <= state.packet_loss_ratio: + if pdr_hi.target_tr < state.maximum_transmit_rate: + new_tr = min( + state.maximum_transmit_rate, + self.expand_up( + pdr_rel_width, self.doublings, pdr_hi.target_tr)) + LOGGING.info("pdr hi external %s", new_tr) + state = self._measure_and_update_state(state, new_tr) + continue + elif pdr_hi.duration < state.duration: + LOGGING.info("ndr hi maximal re-measure") + state = self._measure_and_update_state( + state, state.maximum_transmit_rate) + continue + # If we are hitting maximum_transmit_rate, + # it is still worth narrowing width, + # hoping large enough loss fraction will happen. + # But if we are hitting the minimal rate (at current duration), + # no additional measurement will help with that, + # so we can stop narrowing in this phase. + if (ndr_lo.target_tr <= state.minimum_transmit_rate + and ndr_lo.loss_fraction > 0.0): + ndr_rel_width = 0.0 + if (pdr_lo.target_tr <= state.minimum_transmit_rate + and pdr_lo.loss_fraction > state.packet_loss_ratio): + pdr_rel_width = 0.0 + if ndr_rel_width > state.width_goal: + # We have to narrow NDR width first, as NDR internal search + # can invalidate PDR (but not vice versa). + new_tr = self.half_step_up(ndr_rel_width, ndr_lo.target_tr) + LOGGING.info("Bisecting for NDR at %s", new_tr) + state = self._measure_and_update_state(state, new_tr) + continue + if pdr_rel_width > state.width_goal: + # PDR iternal search. + new_tr = self.half_step_up(pdr_rel_width, pdr_lo.target_tr) + LOGGING.info("Bisecting for PDR at %s", new_tr) + state = self._measure_and_update_state(state, new_tr) + continue + # We do not need to improve width, but there still might be + # some measurements with smaller duration. + # We need to re-measure with full duration, possibly + # creating invalid bounds to resolve (thus broadening width). + if ndr_lo.duration < state.duration: + LOGGING.info("re-measuring NDR lower bound") + state = self._measure_and_update_state(state, ndr_lo.target_tr) + continue + if pdr_lo.duration < state.duration: + LOGGING.info("re-measuring PDR lower bound") + state = self._measure_and_update_state(state, pdr_lo.target_tr) + continue + # Except when lower bounds have high loss fraction, in that case + # we do not need to re-measure _upper_ bounds. + if ndr_hi.duration < state.duration and ndr_rel_width > 0.0: + LOGGING.info("re-measuring NDR upper bound") + state = self._measure_and_update_state(state, ndr_hi.target_tr) + continue + if pdr_hi.duration < state.duration and pdr_rel_width > 0.0: + LOGGING.info("re-measuring PDR upper bound") + state = self._measure_and_update_state(state, pdr_hi.target_tr) + continue + # Widths are narrow (or lower bound minimal), bound measurements + # are long enough, we can return. + LOGGING.info("phase done") + break + return state + + def measure(self, duration, transmit_rate, latency): + duration = float(duration) + transmit_rate = float(transmit_rate) + # Trex needs target Tr per stream, but reports aggregate Tx and Dx. + unit_rate = str(transmit_rate / 2.0) + "pps" + stats = self.measurer.send_traffic_on_tg(self.ports, self.port_pg_id, + duration, unit_rate, + latency=latency) + self.measurer.client.reset(ports=self.ports) + self.measurer.client.clear_stats(ports=self.ports) + self.measurer.client.remove_all_streams(ports=self.ports) + for port, profile in self.profiles.items(): + self.measurer.client.add_streams(profile, ports=[port]) + self.collect_kpi(stats, unit_rate) + transmit_count = int(self.measurer.sent) + loss_count = int(self.measurer.loss) + measurement = ReceiveRateMeasurement( + duration, transmit_rate, transmit_count, loss_count) + measurement.latency = self.measurer.latency + return measurement + + def perform_additional_measurements_based_on_ndrpdr_result(self, result): + duration = 5.0 + rate = "{}{}".format(result.ndr_interval.measured_low.target_tr / 2.0, + 'pps') + for _ in range(0, 1): + stats = self.measurer.send_traffic_on_tg(self.ports, + self.port_pg_id, duration, + rate) + self.collect_kpi(stats, rate) + LOGGING.info('Traffic loss occurred: %s', self.measurer.loss) + + @staticmethod + def display_single_bound(result_samples, result_type, rate_total, pkt_size, + latency=None): + bandwidth_total = float(rate_total) * (pkt_size + 20) * 8 / (10 ** 9) + + result_samples["Result_{}".format(result_type)] = { + "rate_total_pps": float(rate_total), + "bandwidth_total_Gbps": float(bandwidth_total), + } + + if latency: + for item in latency: + if latency.index(item) == 0: + name = "Result_{}_{}".format("stream0", result_type) + else: + name = "Result_{}_{}".format("stream1", result_type) + lat_min, lat_avg, lat_max = item.split('/') + result_samples[name] = { + "min_latency": float(lat_min), + "avg_latency": float(lat_avg), + "max_latency": float(lat_max), + } + + @staticmethod + def check_ndrpdr_interval_validity(result_samples, result_type, interval, + packet_loss_ratio=0.0): + lower_bound = interval.measured_low + lower_bound_lf = lower_bound.loss_fraction + + result_samples["Result_{}_packets_lost".format(result_type)] = { + "packet_loss_ratio": float(lower_bound_lf), + "packets_lost": float(lower_bound.loss_count), + } + + if lower_bound_lf <= packet_loss_ratio: + return "Minimal rate loss fraction {} reach target {}".format( + lower_bound_lf, packet_loss_ratio) + else: + message = "Minimal rate loss fraction {} does not reach target {}".format( + lower_bound_lf, packet_loss_ratio) + if lower_bound_lf >= 1.0: + return '{}\nZero packets forwarded!'.format(message) + else: + return '{}\n{} packets lost.'.format(message, + lower_bound.loss_count) diff --git a/yardstick/network_services/helpers/vpp_helpers/ndr_pdr_result.py b/yardstick/network_services/helpers/vpp_helpers/ndr_pdr_result.py new file mode 100644 index 000000000..34a97f9fb --- /dev/null +++ b/yardstick/network_services/helpers/vpp_helpers/ndr_pdr_result.py @@ -0,0 +1,68 @@ +# Copyright (c) 2019 Viosoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This is a modified copy of +# https://gerrit.fd.io/r/gitweb?p=csit.git;a=blob_plain;f=resources/libraries/python/MLRsearch/NdrPdrResult.py;hb=HEAD + +from yardstick.network_services.helpers.vpp_helpers.receive_rate_interval import \ + ReceiveRateInterval + + +class NdrPdrResult(object): + """Two measurement intervals, return value of search algorithms. + + Partial fraction is NOT part of the result. Pdr interval should be valid + for all partial fractions implied by the interval.""" + + def __init__(self, ndr_interval, pdr_interval): + """Store the measured intervals after checking argument types. + + :param ndr_interval: Object containing data for NDR part of the result. + :param pdr_interval: Object containing data for PDR part of the result. + :type ndr_interval: ReceiveRateInterval.ReceiveRateInterval + :type pdr_interval: ReceiveRateInterval.ReceiveRateInterval + """ + # TODO: Type checking is not very pythonic, + # perhaps users can fix wrong usage without it? + if not isinstance(ndr_interval, ReceiveRateInterval): + raise TypeError("ndr_interval, is not a ReceiveRateInterval: " + "{ndr!r}".format(ndr=ndr_interval)) + if not isinstance(pdr_interval, ReceiveRateInterval): + raise TypeError("pdr_interval, is not a ReceiveRateInterval: " + "{pdr!r}".format(pdr=pdr_interval)) + self.ndr_interval = ndr_interval + self.pdr_interval = pdr_interval + + def width_in_goals(self, relative_width_goal): + """Return a debug string related to current widths in logarithmic scale. + + :param relative_width_goal: Upper bound times this is the goal + difference between upper bound and lower bound. + :type relative_width_goal: float + :returns: Message containing NDR and PDR widths in goals. + :rtype: str + """ + return "ndr {ndr_in_goals}; pdr {pdr_in_goals}".format( + ndr_in_goals=self.ndr_interval.width_in_goals(relative_width_goal), + pdr_in_goals=self.pdr_interval.width_in_goals(relative_width_goal)) + + def __str__(self): + """Return string as tuple of named values.""" + return "NDR={ndr!s};PDR={pdr!s}".format( + ndr=self.ndr_interval, pdr=self.pdr_interval) + + def __repr__(self): + """Return string evaluable as a constructor call.""" + return "NdrPdrResult(ndr_interval={ndr!r},pdr_interval={pdr!r})".format( + ndr=self.ndr_interval, pdr=self.pdr_interval) diff --git a/yardstick/network_services/helpers/vpp_helpers/receive_rate_interval.py b/yardstick/network_services/helpers/vpp_helpers/receive_rate_interval.py new file mode 100644 index 000000000..517a99c1f --- /dev/null +++ b/yardstick/network_services/helpers/vpp_helpers/receive_rate_interval.py @@ -0,0 +1,88 @@ +# Copyright (c) 2019 Viosoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This is a modified copy of +# https://gerrit.fd.io/r/gitweb?p=csit.git;a=blob_plain;f=resources/libraries/python/MLRsearch/ReceiveRateInterval.py;hb=HEAD + +import math + +from yardstick.network_services.helpers.vpp_helpers.receive_rate_measurement import \ + ReceiveRateMeasurement + + +class ReceiveRateInterval(object): + """Structure defining two Rr measurements, and their relation.""" + + def __init__(self, measured_low, measured_high): + """Store the bound measurements after checking argument types. + + :param measured_low: Measurement for the lower bound. + :param measured_high: Measurement for the upper bound. + :type measured_low: ReceiveRateMeasurement.ReceiveRateMeasurement + :type measured_high: ReceiveRateMeasurement.ReceiveRateMeasurement + """ + # TODO: Type checking is not very pythonic, + # perhaps users can fix wrong usage without it? + if not isinstance(measured_low, ReceiveRateMeasurement): + raise TypeError("measured_low is not a ReceiveRateMeasurement: " + "{low!r}".format(low=measured_low)) + if not isinstance(measured_high, ReceiveRateMeasurement): + raise TypeError("measured_high is not a ReceiveRateMeasurement: " + "{high!r}".format(high=measured_high)) + self.measured_low = measured_low + self.measured_high = measured_high + # Declare secondary quantities to appease pylint. + self.abs_tr_width = None + """Absolute width of target transmit rate. Upper minus lower.""" + self.rel_tr_width = None + """Relative width of target transmit rate. Absolute divided by upper.""" + self.sort() + + def sort(self): + """Sort bounds by target Tr, compute secondary quantities.""" + if self.measured_low.target_tr > self.measured_high.target_tr: + self.measured_low, self.measured_high = ( + self.measured_high, self.measured_low) + self.abs_tr_width = ( + self.measured_high.target_tr - self.measured_low.target_tr) + self.rel_tr_width = round( + self.abs_tr_width / self.measured_high.target_tr, 5) + + def width_in_goals(self, relative_width_goal): + """Return float value. + + Relative width goal is some (negative) value on logarithmic scale. + Current relative width is another logarithmic value. + Return the latter divided by the former. + This is useful when investigating how did surprising widths come to be. + + :param relative_width_goal: Upper bound times this is the goal + difference between upper bound and lower bound. + :type relative_width_goal: float + :returns: Current width as logarithmic multiple of goal width [1]. + :rtype: float + """ + return round(math.log(1.0 - self.rel_tr_width) / math.log( + 1.0 - relative_width_goal), 5) + + def __str__(self): + """Return string as half-open interval.""" + return "[{low!s};{high!s})".format( + low=self.measured_low, high=self.measured_high) + + def __repr__(self): + """Return string evaluable as a constructor call.""" + return ("ReceiveRateInterval(measured_low={low!r}" + ",measured_high={high!r})".format(low=self.measured_low, + high=self.measured_high)) diff --git a/yardstick/network_services/helpers/vpp_helpers/receive_rate_measurement.py b/yardstick/network_services/helpers/vpp_helpers/receive_rate_measurement.py new file mode 100644 index 000000000..2c59ea104 --- /dev/null +++ b/yardstick/network_services/helpers/vpp_helpers/receive_rate_measurement.py @@ -0,0 +1,58 @@ +# Copyright (c) 2019 Viosoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This is a modified copy of +# https://gerrit.fd.io/r/gitweb?p=csit.git;a=blob_plain;f=resources/libraries/python/MLRsearch/ReceiveRateMeasurement.py;hb=HEAD + + +class ReceiveRateMeasurement(object): + """Structure defining the result of single Rr measurement.""" + + def __init__(self, duration, target_tr, transmit_count, loss_count): + """Constructor, normalize primary and compute secondary quantities. + + :param duration: Measurement duration [s]. + :param target_tr: Target transmit rate [pps]. + If bidirectional traffic is measured, this is bidirectional rate. + :param transmit_count: Number of packets transmitted [1]. + :param loss_count: Number of packets transmitted but not received [1]. + :type duration: float + :type target_tr: float + :type transmit_count: int + :type loss_count: int + """ + self.duration = float(duration) + self.target_tr = float(target_tr) + self.transmit_count = int(transmit_count) + self.loss_count = int(loss_count) + self.receive_count = round(transmit_count - loss_count, 5) + self.transmit_rate = round(transmit_count / self.duration, 5) + self.loss_rate = round(loss_count / self.duration, 5) + self.receive_rate = round(self.receive_count / self.duration, 5) + self.loss_fraction = round( + float(self.loss_count) / self.transmit_count, 5) + # TODO: Do we want to store also the real time (duration + overhead)? + + def __str__(self): + """Return string reporting input and loss fraction.""" + return "d={dur!s},Tr={rate!s},Df={frac!s}".format( + dur=self.duration, rate=self.target_tr, frac=self.loss_fraction) + + def __repr__(self): + """Return string evaluable as a constructor call.""" + return ("ReceiveRateMeasurement(duration={dur!r},target_tr={rate!r}" + ",transmit_count={trans!r},loss_count={loss!r})".format( + dur=self.duration, rate=self.target_tr, + trans=self.transmit_count, + loss=self.loss_count)) diff --git a/yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py b/yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py index 29533808c..89a855480 100644 --- a/yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py +++ b/yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016-2017 Intel Corporation +# Copyright (c) 2016-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ import ipaddress import logging +import re +import collections import IxNetwork @@ -41,28 +43,59 @@ C_VLAN = 1 ETHER_TYPE_802_1ad = '0x88a8' -IP_VERSION_4_MASK = 24 -IP_VERSION_6_MASK = 64 - TRAFFIC_STATUS_STARTED = 'started' TRAFFIC_STATUS_STOPPED = 'stopped' +PROTOCOL_STATUS_UP = 'up' +PROTOCOL_STATUS_DOWN = ['down', 'notStarted'] + SUPPORTED_PROTO = [PROTO_UDP] +SUPPORTED_DSCP_CLASSES = [ + 'defaultPHB', + 'classSelectorPHB', + 'assuredForwardingPHB', + 'expeditedForwardingPHB'] + +SUPPORTED_TOS_FIELDS = [ + 'precedence', + 'delay', + 'throughput', + 'reliability' +] + +IP_PRIORITY_PATTERN = r'[^\w+]*.+(Raw priority|' \ + 'Precedence|' \ + 'Default PHB|' \ + 'Class selector PHB|' \ + 'Assured forwarding selector PHB|' \ + 'Expedited forwarding PHB)' + + +class Vlan(object): + def __init__(self, + vlan_id, vlan_id_step=None, vlan_id_direction='increment', + prio=None, prio_step=None, prio_direction='increment', + tp_id=None): + self.vlan_id = vlan_id + self.vlan_id_step = vlan_id_step + self.vlan_id_direction = vlan_id_direction + self.prio = prio + self.prio_step = prio_step + self.prio_direction = prio_direction + self.tp_id = tp_id + # NOTE(ralonsoh): this pragma will be removed in the last patch of this series class IxNextgen(object): # pragma: no cover PORT_STATS_NAME_MAP = { "stat_name": 'Stat Name', + "port_name": 'Port Name', "Frames_Tx": 'Frames Tx.', "Valid_Frames_Rx": 'Valid Frames Rx.', - "Frames_Tx_Rate": 'Frames Tx. Rate', - "Valid_Frames_Rx_Rate": 'Valid Frames Rx. Rate', - "Tx_Rate_Kbps": 'Tx. Rate (Kbps)', - "Rx_Rate_Kbps": 'Rx. Rate (Kbps)', - "Tx_Rate_Mbps": 'Tx. Rate (Mbps)', - "Rx_Rate_Mbps": 'Rx. Rate (Mbps)', + "Bytes_Tx": 'Bytes Tx.', + "Bytes_Rx": 'Bytes Rx.' } LATENCY_NAME_MAP = { @@ -71,6 +104,42 @@ class IxNextgen(object): # pragma: no cover "Store-Forward_Max_latency_ns": 'Store-Forward Max Latency (ns)', } + FLOWS_STATS_NAME_MAP = { + "Tx_Port": 'Tx Port', + "VLAN-ID": 'VLAN:VLAN-ID', + "IP_Priority": re.compile(IP_PRIORITY_PATTERN), + "Flow_Group": 'Flow Group', + "Tx_Frames": 'Tx Frames', + "Rx_Frames": 'Rx Frames', + "Store-Forward_Avg_latency_ns": 'Store-Forward Avg Latency (ns)', + "Store-Forward_Min_latency_ns": 'Store-Forward Min Latency (ns)', + "Store-Forward_Max_latency_ns": 'Store-Forward Max Latency (ns)' + } + + PPPOX_CLIENT_PER_PORT_NAME_MAP = { + 'subs_port': 'Port', + 'Sessions_Up': 'Sessions Up', + 'Sessions_Down': 'Sessions Down', + 'Sessions_Not_Started': 'Sessions Not Started', + 'Sessions_Total': 'Sessions Total' + } + + PORT_STATISTICS = '::ixNet::OBJ-/statistics/view:"Port Statistics"' + FLOW_STATISTICS = '::ixNet::OBJ-/statistics/view:"Flow Statistics"' + PPPOX_CLIENT_PER_PORT = '::ixNet::OBJ-/statistics/view:"PPPoX Client Per Port"' + + PPPOE_SCENARIO_STATS = { + 'port_statistics': PORT_STATISTICS, + 'flow_statistic': FLOW_STATISTICS, + 'pppox_client_per_port': PPPOX_CLIENT_PER_PORT + } + + PPPOE_SCENARIO_STATS_MAP = { + 'port_statistics': PORT_STATS_NAME_MAP, + 'flow_statistic': FLOWS_STATS_NAME_MAP, + 'pppox_client_per_port': PPPOX_CLIENT_PER_PORT_NAME_MAP + } + @staticmethod def get_config(tg_cfg): card = [] @@ -107,6 +176,14 @@ class IxNextgen(object): # pragma: no cover return self._ixnet raise exceptions.IxNetworkClientNotConnected() + def get_vports(self): + """Return the list of assigned ports (vport objects)""" + vports = self.ixnet.getList(self.ixnet.getRoot(), 'vport') + return vports + + def get_static_interface(self, vport): + return self.ixnet.getList(vport, 'interface') + def _get_config_element_by_flow_group_name(self, flow_group_name): """Get a config element using the flow group name @@ -161,6 +238,24 @@ class IxNextgen(object): # pragma: no cover return self.ixnet.getAttribute(self.ixnet.getRoot() + 'traffic', '-state') + def _get_protocol_status(self, proto): + """Get protocol status + + :param proto: IxNet protocol str representation, e.g.: + '::ixNet::OBJ-/topology:2/deviceGroup:1/ethernet:1/ipv4:L14' + :return: (list) protocol status: list of sessions protocol + statuses which include states 'up', 'down' and 'notStarted' + """ + return self.ixnet.getAttribute(proto, '-sessionStatus') + + def get_topology_device_groups(self, topology): + """Get list of device groups in topology + + :param topology: (str) topology descriptor + :return: (list) list of device groups descriptors + """ + return self.ixnet.getList(topology, 'deviceGroup') + def is_traffic_running(self): """Returns true if traffic state == TRAFFIC_STATUS_STARTED""" return self._get_traffic_state() == TRAFFIC_STATUS_STARTED @@ -169,6 +264,28 @@ class IxNextgen(object): # pragma: no cover """Returns true if traffic state == TRAFFIC_STATUS_STOPPED""" return self._get_traffic_state() == TRAFFIC_STATUS_STOPPED + def is_protocols_running(self, protocols): + """Returns true if all protocols statuses are PROTOCOL_STATUS_UP + + :param protocols: list of protocols str representations, e.g.: + ['::ixNet::OBJ-/topology:2/deviceGroup:1/ethernet:1/ipv4:L14', ...] + :return: (bool) True if all protocols status is 'up', False if any + protocol status is 'down' or 'notStarted' + """ + return all(session_status is PROTOCOL_STATUS_UP for proto in protocols + for session_status in self._get_protocol_status(proto)) + + def is_protocols_stopped(self, protocols): + """Returns true if all protocols statuses are in PROTOCOL_STATUS_DOWN + + :param protocols: list of protocols str representations, e.g.: + ['::ixNet::OBJ-/topology:2/deviceGroup:1/ethernet:1/ipv4:L14', ...] + :return: (bool) True if all protocols status is 'down' or 'notStarted', + False if any protocol status is 'up' + """ + return all(session_status in PROTOCOL_STATUS_DOWN for proto in protocols + for session_status in self._get_protocol_status(proto)) + @staticmethod def _parse_framesize(framesize): """Parse "framesize" config param. to return a list of weighted pairs @@ -225,15 +342,20 @@ class IxNextgen(object): # pragma: no cover zip(self._cfg['cards'], self._cfg['ports'])] log.info('Create and assign vports: %s', ports) - for port in ports: - vport = self.ixnet.add(self.ixnet.getRoot(), 'vport') - self.ixnet.commit() - self.ixnet.execute('assignPorts', [port], [], [vport], True) + + vports = [] + for _ in ports: + vports.append(self.ixnet.add(self.ixnet.getRoot(), 'vport')) self.ixnet.commit() + + self.ixnet.execute('assignPorts', ports, [], vports, True) + self.ixnet.commit() + + for vport in vports: if self.ixnet.getAttribute(vport, '-state') != 'up': log.warning('Port %s is down', vport) - def _create_traffic_item(self): + def _create_traffic_item(self, traffic_type='raw'): """Create the traffic item to hold the flow groups The traffic item tracking by "Traffic Item" is enabled to retrieve the @@ -243,7 +365,7 @@ class IxNextgen(object): # pragma: no cover traffic_item = self.ixnet.add(self.ixnet.getRoot() + '/traffic', 'trafficItem') self.ixnet.setMultiAttribute(traffic_item, '-name', 'RFC2544', - '-trafficType', 'raw') + '-trafficType', traffic_type) self.ixnet.commit() traffic_item_id = self.ixnet.remapIds(traffic_item)[0] @@ -251,27 +373,25 @@ class IxNextgen(object): # pragma: no cover '-trackBy', 'trafficGroupId0') self.ixnet.commit() - def _create_flow_groups(self): - """Create the flow groups between the assigned ports""" + def _create_flow_groups(self, uplink, downlink): + """Create the flow groups between the endpoints""" traffic_item_id = self.ixnet.getList(self.ixnet.getRoot() + 'traffic', 'trafficItem')[0] log.info('Create the flow groups') - vports = self.ixnet.getList(self.ixnet.getRoot(), 'vport') - uplink_ports = vports[::2] - downlink_ports = vports[1::2] + index = 0 - for up, down in zip(uplink_ports, downlink_ports): + for up, down in zip(uplink, downlink): log.info('FGs: %s <--> %s', up, down) endpoint_set_1 = self.ixnet.add(traffic_item_id, 'endpointSet') endpoint_set_2 = self.ixnet.add(traffic_item_id, 'endpointSet') self.ixnet.setMultiAttribute( endpoint_set_1, '-name', str(index + 1), - '-sources', [up + '/protocols'], - '-destinations', [down + '/protocols']) + '-sources', [up], + '-destinations', [down]) self.ixnet.setMultiAttribute( endpoint_set_2, '-name', str(index + 2), - '-sources', [down + '/protocols'], - '-destinations', [up + '/protocols']) + '-sources', [down], + '-destinations', [up]) self.ixnet.commit() index += 2 @@ -281,7 +401,25 @@ class IxNextgen(object): # pragma: no cover '/traffic/protocolTemplate:"{}"'.format(protocol_name)) self.ixnet.execute('append', previous_element, protocol) - def _setup_config_elements(self): + def is_qinq(self, flow_data): + for traffic_type in flow_data: + if flow_data[traffic_type]['outer_l2'].get('QinQ'): + return True + return False + + def _flows_settings(self, cfg): + flows_data = [] + res = [key for key in cfg.keys() if key.split('_')[0] in ['uplink', 'downlink']] + for i in range(len(res)): + uplink = 'uplink_{}'.format(i) + downlink = 'downlink_{}'.format(i) + if uplink in res: + flows_data.append(cfg[uplink]) + if downlink in res: + flows_data.append(cfg[downlink]) + return flows_data + + def _setup_config_elements(self, traffic_profile, add_default_proto=True): """Setup the config elements The traffic item is configured to allow individual configurations per @@ -297,18 +435,26 @@ class IxNextgen(object): # pragma: no cover 'trafficItem')[0] log.info('Split the frame rate distribution per config element') config_elements = self.ixnet.getList(traffic_item_id, 'configElement') - for config_element in config_elements: + flows = self._flows_settings(traffic_profile.params) + # TODO: check length of both lists, it should be equal!!! + for config_element, flow_data in zip(config_elements, flows): self.ixnet.setAttribute(config_element + '/frameRateDistribution', '-portDistribution', 'splitRateEvenly') self.ixnet.setAttribute(config_element + '/frameRateDistribution', '-streamDistribution', 'splitRateEvenly') self.ixnet.commit() - self._append_procotol_to_stack( - PROTO_UDP, config_element + '/stack:"ethernet-1"') - self._append_procotol_to_stack( - PROTO_IPV4, config_element + '/stack:"ethernet-1"') + if add_default_proto: + self._append_procotol_to_stack( + PROTO_UDP, config_element + '/stack:"ethernet-1"') + self._append_procotol_to_stack( + PROTO_IPV4, config_element + '/stack:"ethernet-1"') + if self.is_qinq(flow_data): + self._append_procotol_to_stack( + PROTO_VLAN, config_element + '/stack:"ethernet-1"') + self._append_procotol_to_stack( + PROTO_VLAN, config_element + '/stack:"ethernet-1"') - def create_traffic_model(self): + def create_traffic_model(self, uplink_ports, downlink_ports, traffic_profile): """Create a traffic item and the needed flow groups Each flow group inside the traffic item (only one is present) @@ -319,9 +465,28 @@ class IxNextgen(object): # pragma: no cover FlowGroup3: port3 -> port4 FlowGroup4: port3 <- port4 """ - self._create_traffic_item() - self._create_flow_groups() - self._setup_config_elements() + self._create_traffic_item('raw') + uplink_endpoints = [port + '/protocols' for port in uplink_ports] + downlink_endpoints = [port + '/protocols' for port in downlink_ports] + self._create_flow_groups(uplink_endpoints, downlink_endpoints) + self._setup_config_elements(traffic_profile=traffic_profile) + + def create_ipv4_traffic_model(self, uplink_endpoints, downlink_endpoints, + traffic_profile): + """Create a traffic item and the needed flow groups + + Each flow group inside the traffic item (only one is present) + represents the traffic between two topologies: + (uplink) (downlink) + FlowGroup1: uplink1 -> downlink1 + FlowGroup2: uplink1 <- downlink1 + FlowGroup3: uplink2 -> downlink2 + FlowGroup4: uplink2 <- downlink2 + """ + self._create_traffic_item('ipv4') + self._create_flow_groups(uplink_endpoints, downlink_endpoints) + self._setup_config_elements(traffic_profile=traffic_profile, + add_default_proto=False) def _update_frame_mac(self, ethernet_descriptor, field, mac_address): """Set the MAC address in a config element stack Ethernet field @@ -366,16 +531,15 @@ class IxNextgen(object): # pragma: no cover raise exceptions.IxNetworkFlowNotPresent(flow_group=fg_id) type = traffic_param.get('traffic_type', 'fixedDuration') - rate = traffic_param['rate'] rate_unit = ( 'framesPerSecond' if traffic_param['rate_unit'] == tp_base.TrafficProfileConfig.RATE_FPS else 'percentLineRate') weighted_range_pairs = self._parse_framesize( - traffic_param['outer_l2']['framesize']) - srcmac = str(traffic_param.get('srcmac', '00:00:00:00:00:01')) - dstmac = str(traffic_param.get('dstmac', '00:00:00:00:00:02')) + traffic_param['outer_l2'].get('framesize', {})) + srcmac = str(traffic_param['outer_l2'].get('srcmac', '00:00:00:00:00:01')) + dstmac = str(traffic_param['outer_l2'].get('dstmac', '00:00:00:00:00:02')) - if traffic_param['outer_l2']['QinQ']: + if traffic_param['outer_l2'].get('QinQ'): s_vlan = traffic_param['outer_l2']['QinQ']['S-VLAN'] c_vlan = traffic_param['outer_l2']['QinQ']['C-VLAN'] @@ -389,32 +553,33 @@ class IxNextgen(object): # pragma: no cover '-fieldValue', ETHER_TYPE_802_1ad, '-valueType', SINGLE_VALUE) - self._append_procotol_to_stack( - PROTO_VLAN, config_element + '/stack:"ethernet-1"') - self._append_procotol_to_stack( - PROTO_VLAN, config_element + '/stack:"ethernet-1"') - self._update_vlan_tag(fg_id, s_vlan, S_VLAN) self._update_vlan_tag(fg_id, c_vlan, C_VLAN) self.ixnet.setMultiAttribute( config_element + '/transmissionControl', '-type', type, '-duration', duration) + self.ixnet.setMultiAttribute( config_element + '/frameRate', - '-rate', rate, '-type', rate_unit) - self.ixnet.setMultiAttribute( - config_element + '/frameSize', - '-type', 'weightedPairs', - '-weightedRangePairs', weighted_range_pairs) + '-rate', traffic_param['rate'], '-type', rate_unit) + + if len(weighted_range_pairs): + self.ixnet.setMultiAttribute( + config_element + '/frameSize', + '-type', 'weightedPairs', + '-weightedRangePairs', weighted_range_pairs) + self.ixnet.commit() - self._update_frame_mac( - self._get_stack_item(fg_id, PROTO_ETHERNET)[0], - 'destinationAddress', dstmac) - self._update_frame_mac( - self._get_stack_item(fg_id, PROTO_ETHERNET)[0], - 'sourceAddress', srcmac) + if dstmac: + self._update_frame_mac( + self._get_stack_item(fg_id, PROTO_ETHERNET)[0], + 'destinationAddress', dstmac) + if srcmac: + self._update_frame_mac( + self._get_stack_item(fg_id, PROTO_ETHERNET)[0], + 'sourceAddress', srcmac) def _update_vlan_tag(self, fg_id, params, vlan=0): field_to_param_map = { @@ -474,20 +639,78 @@ class IxNextgen(object): # pragma: no cover if not self._get_config_element_by_flow_group_name(fg_id): raise exceptions.IxNetworkFlowNotPresent(flow_group=fg_id) - count = traffic_param['outer_l3']['count'] - srcip = str(traffic_param['outer_l3']['srcip']) - dstip = str(traffic_param['outer_l3']['dstip']) - srcseed = traffic_param['outer_l3']['srcseed'] - dstseed = traffic_param['outer_l3']['dstseed'] - srcmask = traffic_param['outer_l3']['srcmask'] or IP_VERSION_4_MASK - dstmask = traffic_param['outer_l3']['dstmask'] or IP_VERSION_4_MASK - - self._update_ipv4_address( - self._get_stack_item(fg_id, PROTO_IPV4)[0], - 'srcIp', srcip, srcseed, srcmask, count) - self._update_ipv4_address( - self._get_stack_item(fg_id, PROTO_IPV4)[0], - 'dstIp', dstip, dstseed, dstmask, count) + if traffic_param['outer_l3']: + count = traffic_param['outer_l3']['count'] + srcip = traffic_param['outer_l3']['srcip'] + dstip = traffic_param['outer_l3']['dstip'] + srcseed = traffic_param['outer_l3']['srcseed'] + dstseed = traffic_param['outer_l3']['dstseed'] + srcmask = traffic_param['outer_l3']['srcmask'] \ + or ipaddress.IPV4LENGTH + dstmask = traffic_param['outer_l3']['dstmask'] \ + or ipaddress.IPV4LENGTH + priority = traffic_param['outer_l3']['priority'] + + if srcip: + self._update_ipv4_address( + self._get_stack_item(fg_id, PROTO_IPV4)[0], + 'srcIp', str(srcip), srcseed, srcmask, count) + if dstip: + self._update_ipv4_address( + self._get_stack_item(fg_id, PROTO_IPV4)[0], + 'dstIp', str(dstip), dstseed, dstmask, count) + if priority: + self._update_ipv4_priority( + self._get_stack_item(fg_id, PROTO_IPV4)[0], priority) + + def _update_ipv4_priority(self, ip_descriptor, priority): + """Set the IPv4 priority in a config element stack IP field + + :param ip_descriptor: (str) IP descriptor, e.g.: + /traffic/trafficItem:1/configElement:1/stack:"ipv4-2" + :param priority: (dict) priority configuration from traffic profile, e.g.: + {'tos': + 'precedence': [1, 4, 7] + } + """ + if priority.get('raw'): + priority_field = self._get_field_in_stack_item(ip_descriptor, + 'priority.raw') + self._set_priority_field(priority_field, priority['raw']) + + elif priority.get('dscp'): + for field, value in priority['dscp'].items(): + if field in SUPPORTED_DSCP_CLASSES: + priority_field = self._get_field_in_stack_item( + ip_descriptor, + 'priority.ds.phb.{field}.{field}'.format(field=field)) + self._set_priority_field(priority_field, value) + + elif priority.get('tos'): + for field, value in priority['tos'].items(): + if field in SUPPORTED_TOS_FIELDS: + priority_field = self._get_field_in_stack_item( + ip_descriptor, 'priority.tos.' + field) + self._set_priority_field(priority_field, value) + + def _set_priority_field(self, field_descriptor, value): + """Set the priority field described by field_descriptor + + :param field_descriptor: (str) field descriptor, e.g.: + /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/ \ + field:"ipv4.header.priority.raw-3 + :param value: (list, int) list of integers or single integer value + """ + if isinstance(value, list): + self.ixnet.setMultiAttribute(field_descriptor, + '-valueList', value, + '-activeFieldChoice', 'true', + '-valueType', 'valueList') + else: + self.ixnet.setMultiAttribute(field_descriptor, + '-activeFieldChoice', 'true', + '-singleValue', str(value)) + self.ixnet.commit() def update_l4(self, traffic): """Update the L4 headers @@ -501,7 +724,10 @@ class IxNextgen(object): # pragma: no cover if not self._get_config_element_by_flow_group_name(fg_id): raise exceptions.IxNetworkFlowNotPresent(flow_group=fg_id) - proto = traffic_param['outer_l3']['proto'] + proto = traffic_param['outer_l3'].get('proto') + if not (proto and traffic_param['outer_l4']): + continue + if proto not in SUPPORTED_PROTO: raise exceptions.IXIAUnsupportedProtocol(protocol=proto) @@ -514,12 +740,15 @@ class IxNextgen(object): # pragma: no cover dstport = traffic_param['outer_l4']['dstport'] dstmask = traffic_param['outer_l4']['dstportmask'] - if proto in SUPPORTED_PROTO: - self._update_udp_port(self._get_stack_item(fg_id, proto)[0], - 'srcPort', srcport, seed, srcmask, count) - - self._update_udp_port(self._get_stack_item(fg_id, proto)[0], - 'dstPort', dstport, seed, dstmask, count) + if proto == PROTO_UDP: + if srcport: + self._update_udp_port( + self._get_stack_item(fg_id, proto)[0], + 'srcPort', srcport, seed, srcmask, count) + if dstport: + self._update_udp_port( + self._get_stack_item(fg_id, proto)[0], + 'dstPort', dstport, seed, dstmask, count) def _update_udp_port(self, descriptor, field, value, seed=1, mask=0, count=1): @@ -553,6 +782,52 @@ class IxNextgen(object): # pragma: no cover 'getColumnValues', view_obj, data_ixia) for data_yardstick, data_ixia in name_map.items()} + def _get_view_page_stats(self, view_obj): + """Get full view page stats + + :param view_obj: view object, e.g. + '::ixNet::OBJ-/statistics/view:"Port Statistics"' + :return: (list) List of dicts. Each dict represents view page row + """ + view = view_obj + '/page' + column_headers = self.ixnet.getAttribute(view, '-columnCaptions') + view_rows = self.ixnet.getAttribute(view, '-rowValues') + view_page = [dict(zip(column_headers, row[0])) for row in view_rows] + return view_page + + def _set_egress_flow_tracking(self, encapsulation, offset): + """Set egress flow tracking options + + :param encapsulation: encapsulation type + :type encapsulation: str, e.g. 'Ethernet' + :param offset: offset type + :type offset: str, e.g. 'IPv4 TOS Precedence (3 bits)' + """ + traffic_item = self.ixnet.getList(self.ixnet.getRoot() + '/traffic', + 'trafficItem')[0] + # Enable Egress Tracking + self.ixnet.setAttribute(traffic_item, '-egressEnabled', True) + self.ixnet.commit() + + # Set encapsulation type + enc_obj = self.ixnet.getList(traffic_item, 'egressTracking')[0] + self.ixnet.setAttribute(enc_obj, '-encapsulation', encapsulation) + + # Set offset + self.ixnet.setAttribute(enc_obj, '-offset', offset) + self.ixnet.commit() + + def set_flow_tracking(self, track_by): + """Set flow tracking options + + :param track_by: list of tracking fields + :type track_by: list, e.g. ['vlanVlanId0','ipv4Precedence0'] + """ + traffic_item = self.ixnet.getList(self.ixnet.getRoot() + '/traffic', + 'trafficItem')[0] + self.ixnet.setAttribute(traffic_item + '/tracking', '-trackBy', track_by) + self.ixnet.commit() + def get_statistics(self): """Retrieve port and flow statistics @@ -562,14 +837,53 @@ class IxNextgen(object): # pragma: no cover :return: dictionary with the statistics; the keys of this dictionary are PORT_STATS_NAME_MAP and LATENCY_NAME_MAP keys. """ - port_statistics = '::ixNet::OBJ-/statistics/view:"Port Statistics"' - flow_statistics = '::ixNet::OBJ-/statistics/view:"Flow Statistics"' - stats = self._build_stats_map(port_statistics, + stats = self._build_stats_map(self.PORT_STATISTICS, self.PORT_STATS_NAME_MAP) - stats.update(self._build_stats_map(flow_statistics, - self.LATENCY_NAME_MAP)) + stats.update(self._build_stats_map(self.FLOW_STATISTICS, + self.LATENCY_NAME_MAP)) return stats + def get_pppoe_scenario_statistics(self): + """Retrieve port, flow and PPPoE subscribers statistics""" + stats = collections.defaultdict(list) + result = collections.defaultdict(list) + for stat, view in self.PPPOE_SCENARIO_STATS.items(): + # Get view total pages number + total_pages = self.ixnet.getAttribute( + view + '/page', '-totalPages') + # Collect stats from all view pages + for page in range(1, int(total_pages) + 1): + current_page = int(self.ixnet.getAttribute( + view + '/page', '-currentPage')) + if page != int(current_page): + self.ixnet.setAttribute(view + '/page', '-currentPage', + str(page)) + self.ixnet.commit() + page_data = self._get_view_page_stats(view) + stats[stat].extend(page_data) + # Filter collected views stats + for stat in stats: + for view_row in stats[stat]: + filtered_row = {} + for key, value in self.PPPOE_SCENARIO_STATS_MAP[stat].items(): + if isinstance(value, str): + filtered_row.update({key: view_row[value]}) + # Handle keys which values are represented by regex + else: + for k in view_row.keys(): + if value.match(k): + value = value.match(k).group() + filtered_row.update({key: view_row[value]}) + break + result[stat].append(filtered_row) + return result + + def start_protocols(self): + self.ixnet.execute('startAllProtocols') + + def stop_protocols(self): + self.ixnet.execute('stopAllProtocols') + def start_traffic(self): """Start the traffic injection in the traffic item @@ -587,3 +901,232 @@ class IxNextgen(object): # pragma: no cover self.ixnet.execute('start', '/traffic') # pylint: disable=unnecessary-lambda utils.wait_until_true(lambda: self.is_traffic_running()) + + def add_topology(self, name, vports): + log.debug("add_topology: name='%s' ports='%s'", name, vports) + obj = self.ixnet.add(self.ixnet.getRoot(), 'topology') + self.ixnet.setMultiAttribute(obj, '-name', name, '-vports', vports) + self.ixnet.commit() + return obj + + def add_device_group(self, topology, name, multiplier): + log.debug("add_device_group: tpl='%s', name='%s', multiplier='%s'", + topology, name, multiplier) + + obj = self.ixnet.add(topology, 'deviceGroup') + self.ixnet.setMultiAttribute(obj, '-name', name, '-multiplier', + multiplier) + self.ixnet.commit() + return obj + + def add_ethernet(self, dev_group, name): + log.debug( + "add_ethernet: device_group='%s' name='%s'", dev_group, name) + obj = self.ixnet.add(dev_group, 'ethernet') + self.ixnet.setMultiAttribute(obj, '-name', name) + self.ixnet.commit() + return obj + + def _create_vlans(self, ethernet, count): + self.ixnet.setMultiAttribute(ethernet, '-useVlans', 'true') + self.ixnet.setMultiAttribute(ethernet, '-vlanCount', count) + self.ixnet.commit() + + def _configure_vlans(self, ethernet, vlans): + vlans_obj = self.ixnet.getList(ethernet, 'vlan') + for i, vlan_obj in enumerate(vlans_obj): + if vlans[i].vlan_id_step is not None: + vlan_id_obj = self.ixnet.getAttribute(vlan_obj, '-vlanId') + self.ixnet.setMultiAttribute(vlan_id_obj, '-clearOverlays', + 'true', '-pattern', 'counter') + vlan_id_counter = self.ixnet.add(vlan_id_obj, 'counter') + self.ixnet.setMultiAttribute(vlan_id_counter, '-start', + vlans[i].vlan_id, '-step', + vlans[i].vlan_id_step, + '-direction', + vlans[i].vlan_id_direction) + else: + vlan_id_obj = self.ixnet.getAttribute(vlan_obj, '-vlanId') + self.ixnet.setMultiAttribute(vlan_id_obj + '/singleValue', + '-value', vlans[i].vlan_id) + + if vlans[i].prio_step is not None: + prio_obj = self.ixnet.getAttribute(vlan_obj, '-priority') + self.ixnet.setMultiAttribute(prio_obj, '-clearOverlays', 'true', + '-pattern', 'counter') + prio_counter = self.ixnet.add(prio_obj, 'counter') + self.ixnet.setMultiAttribute(prio_counter, + '-start', vlans[i].prio, + '-step', vlans[i].prio_step, + '-direction', vlans[i].prio_direction) + elif vlans[i].prio is not None: + prio_obj = self.ixnet.getAttribute(vlan_obj, '-priority') + self.ixnet.setMultiAttribute(prio_obj + '/singleValue', + '-value', vlans[i].prio) + + if vlans[i].tp_id is not None: + tp_id_obj = self.ixnet.getAttribute(vlan_obj, '-tpid') + self.ixnet.setMultiAttribute(tp_id_obj + '/singleValue', + '-value', vlans[i].tp_id) + + self.ixnet.commit() + + def add_vlans(self, ethernet, vlans): + log.debug("add_vlans: ethernet='%s'", ethernet) + + if vlans is None or len(vlans) == 0: + raise RuntimeError( + "Invalid 'vlans' argument. Expected list of Vlan instances.") + + self._create_vlans(ethernet, len(vlans)) + self._configure_vlans(ethernet, vlans) + + def add_ipv4(self, ethernet, name='', + addr=None, addr_step=None, addr_direction='increment', + prefix=None, prefix_step=None, prefix_direction='increment', + gateway=None, gw_step=None, gw_direction='increment'): + log.debug("add_ipv4: ethernet='%s' name='%s'", ethernet, name) + obj = self.ixnet.add(ethernet, 'ipv4') + if name != '': + self.ixnet.setAttribute(obj, '-name', name) + self.ixnet.commit() + + if addr_step is not None: + # handle counter pattern + _address = self.ixnet.getAttribute(obj, '-address') + self.ixnet.setMultiAttribute(_address, '-clearOverlays', 'true', + '-pattern', 'counter') + + address_counter = self.ixnet.add(_address, 'counter') + self.ixnet.setMultiAttribute(address_counter, + '-start', addr, + '-step', addr_step, + '-direction', addr_direction) + elif addr is not None: + # handle single value + _address = self.ixnet.getAttribute(obj, '-address') + self.ixnet.setMultiAttribute(_address + '/singleValue', '-value', + addr) + + if prefix_step is not None: + # handle counter pattern + _prefix = self.ixnet.getAttribute(obj, '-prefix') + self.ixnet.setMultiAttribute(_prefix, '-clearOverlays', 'true', + '-pattern', 'counter') + prefix_counter = self.ixnet.add(_prefix, 'counter') + self.ixnet.setMultiAttribute(prefix_counter, + '-start', prefix, + '-step', prefix_step, + '-direction', prefix_direction) + elif prefix is not None: + # handle single value + _prefix = self.ixnet.getAttribute(obj, '-prefix') + self.ixnet.setMultiAttribute(_prefix + '/singleValue', '-value', + prefix) + + if gw_step is not None: + # handle counter pattern + _gateway = self.ixnet.getAttribute(obj, '-gatewayIp') + self.ixnet.setMultiAttribute(_gateway, '-clearOverlays', 'true', + '-pattern', 'counter') + + gateway_counter = self.ixnet.add(_gateway, 'counter') + self.ixnet.setMultiAttribute(gateway_counter, + '-start', gateway, + '-step', gw_step, + '-direction', gw_direction) + elif gateway is not None: + # handle single value + _gateway = self.ixnet.getAttribute(obj, '-gatewayIp') + self.ixnet.setMultiAttribute(_gateway + '/singleValue', '-value', + gateway) + + self.ixnet.commit() + return obj + + def add_pppox_client(self, xproto, auth, user, pwd, enable_redial=True): + log.debug( + "add_pppox_client: xproto='%s', auth='%s', user='%s', pwd='%s'", + xproto, auth, user, pwd) + obj = self.ixnet.add(xproto, 'pppoxclient') + self.ixnet.commit() + + if auth == 'pap': + auth_type = self.ixnet.getAttribute(obj, '-authType') + self.ixnet.setMultiAttribute(auth_type + '/singleValue', '-value', + auth) + pap_user = self.ixnet.getAttribute(obj, '-papUser') + self.ixnet.setMultiAttribute(pap_user + '/singleValue', '-value', + user) + pap_pwd = self.ixnet.getAttribute(obj, '-papPassword') + self.ixnet.setMultiAttribute(pap_pwd + '/singleValue', '-value', + pwd) + else: + raise NotImplementedError() + + if enable_redial: + redial = self.ixnet.getAttribute(obj, '-enableRedial') + self.ixnet.setAttribute(redial + '/singleValue', '-value', 'true') + + self.ixnet.commit() + return obj + + def add_bgp(self, ipv4, dut_ip, local_as, bgp_type=None): + """Add BGP protocol""" + log.debug("add_bgp: ipv4='%s', dut_ip='%s', local_as='%s'", ipv4, + dut_ip, local_as) + obj = self.ixnet.add(ipv4, 'bgpIpv4Peer') + self.ixnet.commit() + + # Set DUT IP address + dut_ip_addr = self.ixnet.getAttribute(obj, '-dutIp') + self.ixnet.setAttribute(dut_ip_addr + '/singleValue', + '-value', dut_ip) + + # Set local AS number + local_as_number = self.ixnet.getAttribute(obj, '-localAs2Bytes') + self.ixnet.setAttribute(local_as_number + '/singleValue', + '-value', local_as) + + if bgp_type: + # Set BGP type. If not specified, default value is using. + # Default type is "internal" + bgp_type_field = self.ixnet.getAttribute(obj, '-type') + self.ixnet.setAttribute(bgp_type_field + '/singleValue', + '-value', bgp_type) + self.ixnet.commit() + return obj + + def add_interface(self, vport, ip, mac=None, gateway=None): + """Add protocol interface to the vport""" + log.debug("add_interface: mac='%s', ip='%s', gateway='%s'", mac, ip, + gateway) + obj = self.ixnet.add(vport, 'interface') + self.ixnet.commit() + + if mac is not None: + self.ixnet.setMultiAttribute(obj + '/ethernet', '-macAddress', mac) + + ipv4 = self.ixnet.add(obj, 'ipv4') + self.ixnet.setMultiAttribute(ipv4, '-ip', ip) + + if gateway is not None: + self.ixnet.setMultiAttribute(ipv4, '-gateway', gateway) + + self.ixnet.commit() + + self.ixnet.setMultiAttribute(obj, '-enabled', 'true') + self.ixnet.commit() + + return obj + + def add_static_ipv4(self, iface, vport, start_ip, count, mask='24'): + """Add static IP range to the interface""" + log.debug("add_static_ipv4: start_ip:'%s', count:'%s'", + start_ip, count) + obj = self.ixnet.add(vport + '/protocols/static', 'ip') + + self.ixnet.setMultiAttribute(obj, '-protocolInterface', iface, + '-ipStart', start_ip, '-count', count, + '-mask', mask, '-enabled', 'true') + self.ixnet.commit() diff --git a/yardstick/network_services/nfvi/resource.py b/yardstick/network_services/nfvi/resource.py index 5922bd3b9..ba49ab5b4 100644 --- a/yardstick/network_services/nfvi/resource.py +++ b/yardstick/network_services/nfvi/resource.py @@ -48,6 +48,7 @@ class ResourceProfile(object): This profile adds a resource at the beginning of the test session """ COLLECTD_CONF = "collectd.conf" + BAR_COLLECTD_CONF_PATH = "/opt/collectd/etc/collectd.conf.d/" AMPQ_PORT = 5672 DEFAULT_INTERVAL = 25 DEFAULT_TIMEOUT = 3600 @@ -248,6 +249,8 @@ class ResourceProfile(object): "plugins": self.plugins, } self._provide_config_file(config_file_path, self.COLLECTD_CONF, kwargs) + self._provide_config_file(self.BAR_COLLECTD_CONF_PATH, + self.COLLECTD_CONF, kwargs) def _setup_ovs_stats(self, connection): try: diff --git a/yardstick/network_services/pipeline.py b/yardstick/network_services/pipeline.py index 7155480d4..4fbe7967f 100644 --- a/yardstick/network_services/pipeline.py +++ b/yardstick/network_services/pipeline.py @@ -22,7 +22,7 @@ from yardstick.common import utils FIREWALL_ADD_DEFAULT = "p {0} firewall add default 1"
FIREWALL_ADD_PRIO = """\
-p {0} firewall add priority 1 ipv4 {1} 24 0.0.0.0 0 0 65535 0 65535 6 0xFF port 0"""
+p {0} firewall add priority 1 ipv4 {1} 24 0.0.0.0 0 0 65535 0 65535 17 0xFF port 0"""
FLOW_ADD_QINQ_RULES = """\
p {0} flow add qinq 128 512 port 0 id 1
diff --git a/yardstick/network_services/traffic_profile/__init__.py b/yardstick/network_services/traffic_profile/__init__.py index a1b26a24d..85b3d54a0 100644 --- a/yardstick/network_services/traffic_profile/__init__.py +++ b/yardstick/network_services/traffic_profile/__init__.py @@ -23,11 +23,15 @@ def register_modules(): 'yardstick.network_services.traffic_profile.http_ixload', 'yardstick.network_services.traffic_profile.ixia_rfc2544', 'yardstick.network_services.traffic_profile.prox_ACL', + 'yardstick.network_services.traffic_profile.prox_irq', 'yardstick.network_services.traffic_profile.prox_binsearch', 'yardstick.network_services.traffic_profile.prox_profile', 'yardstick.network_services.traffic_profile.prox_ramp', 'yardstick.network_services.traffic_profile.rfc2544', 'yardstick.network_services.traffic_profile.pktgen', + 'yardstick.network_services.traffic_profile.landslide_profile', + 'yardstick.network_services.traffic_profile.vpp_rfc2544', + 'yardstick.network_services.traffic_profile.sip', ] for module in modules: diff --git a/yardstick/network_services/traffic_profile/base.py b/yardstick/network_services/traffic_profile/base.py index 4fbceea9b..2fdf6ce4a 100644 --- a/yardstick/network_services/traffic_profile/base.py +++ b/yardstick/network_services/traffic_profile/base.py @@ -36,7 +36,7 @@ class TrafficProfileConfig(object): self.description = tp_config.get('description') tprofile = tp_config['traffic_profile'] self.traffic_type = tprofile.get('traffic_type') - self.frame_rate, self.rate_unit = self._parse_rate( + self.frame_rate, self.rate_unit = self.parse_rate( tprofile.get('frame_rate', self.DEFAULT_FRAME_RATE)) self.test_precision = tprofile.get('test_precision') self.packet_sizes = tprofile.get('packet_sizes') @@ -46,7 +46,7 @@ class TrafficProfileConfig(object): self.step_interval = tprofile.get('step_interval') self.enable_latency = tprofile.get('enable_latency', False) - def _parse_rate(self, rate): + def parse_rate(self, rate): """Parse traffic profile rate The line rate can be defined in fps or percentage over the maximum line @@ -97,6 +97,9 @@ class TrafficProfile(object): self.params = tp_config self.config = TrafficProfileConfig(tp_config) + def is_ended(self): + return False + def execute_traffic(self, traffic_generator, **kawrgs): """ This methods defines the behavior of the traffic generator. It will be called in a loop until the traffic generator exits. diff --git a/yardstick/network_services/traffic_profile/http_ixload.py b/yardstick/network_services/traffic_profile/http_ixload.py index 3ccec637d..ec0762500 100644 --- a/yardstick/network_services/traffic_profile/http_ixload.py +++ b/yardstick/network_services/traffic_profile/http_ixload.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016-2017 Intel Corporation +# Copyright (c) 2016-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -24,14 +24,26 @@ try: except ImportError: import json as jsonutils -from yardstick.common import exceptions + +class ErrorClass(object): + + def __init__(self, *args, **kwargs): + if 'test' not in kwargs: + raise RuntimeError + + def __getattr__(self, item): + raise AttributeError + + +class InvalidRxfFile(Exception): + message = 'Loaded rxf file has unexpected format' + try: from IxLoad import IxLoad, StatCollectorUtils except ImportError: - IxLoad = exceptions.ErrorClass - StatCollectorUtils = exceptions.ErrorClass - + IxLoad = ErrorClass + StatCollectorUtils = ErrorClass LOG = logging.getLogger(__name__) CSV_FILEPATH_NAME = 'IxL_statResults.csv' @@ -197,7 +209,7 @@ class IXLOADHttpTest(object): ipAddress=address, gatewayAddress=gateway) except Exception: - raise exceptions.InvalidRxfFile + raise InvalidRxfFile def update_network_mac_address(self, net_traffic, mac): """Update MACaddress for net_traffic object @@ -225,7 +237,7 @@ class IXLOADHttpTest(object): "MacRange") mac_range.config(mac=mac) except Exception: - raise exceptions.InvalidRxfFile + raise InvalidRxfFile def update_network_param(self, net_traffic, param): """Update net_traffic by parameters specified in param""" @@ -256,6 +268,61 @@ class IXLOADHttpTest(object): continue self.update_network_param(net_traffic, param["ip"]) + if "uplink" in name: + self.update_http_client_param(net_traffic, param["http_client"]) + + def update_http_client_param(self, net_traffic, param): + """Update http client object in net_traffic + + Update http client object in net_traffic by parameters + specified in param. + Do not return anything. + + :param net_traffic: (IxLoadObjectProxy) proxy obj to tcl net_traffic object + :param param: (dict) http_client section from traffic profile + :return: + """ + page = param.get("page_object") + if page: + self.update_page_size(net_traffic, page) + users = param.get("simulated_users") + if users: + self.update_user_count(net_traffic, users) + + def update_page_size(self, net_traffic, page_object): + """Update page_object field in http client object in net_traffic + + This function update field which configure page_object + which will be loaded from server + Do not return anything. + + :param net_traffic: (IxLoadObjectProxy) proxy obj to tcl net_traffic object + :param page_object: (str) path to object on server e.g. "/4k.html" + :return: + """ + try: + activity = net_traffic.activityList[0] + ix_http_command = activity.agent.actionList[0] + ix_http_command.config(pageObject=page_object) + except Exception: + raise InvalidRxfFile + + def update_user_count(self, net_traffic, user_count): + """Update userObjectiveValue field in activity object in net_traffic + + This function update field which configure users count + which will be simulated by client. + Do not return anything. + + :param net_traffic: (IxLoadObjectProxy) proxy obj to tcl net_traffic object + :param user_count: (int) number of simulated users + :return: + """ + try: + activity = net_traffic.activityList[0] + activity.config(userObjectiveValue=user_count) + except Exception: + raise InvalidRxfFile def start_http_test(self): self.ix_load = IxLoad() diff --git a/yardstick/network_services/traffic_profile/ixia_rfc2544.py b/yardstick/network_services/traffic_profile/ixia_rfc2544.py index 44bf2eafc..ca45b500d 100644 --- a/yardstick/network_services/traffic_profile/ixia_rfc2544.py +++ b/yardstick/network_services/traffic_profile/ixia_rfc2544.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016-2017 Intel Corporation +# Copyright (c) 2016-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,6 +13,7 @@ # limitations under the License. import logging +import collections from yardstick.common import utils from yardstick.network_services.traffic_profile import base as tp_base @@ -27,12 +28,15 @@ class IXIARFC2544Profile(trex_traffic_profile.TrexProfile): UPLINK = 'uplink' DOWNLINK = 'downlink' DROP_PERCENT_ROUND = 6 - RATE_ROUND = 5 + STATUS_SUCCESS = "Success" + STATUS_FAIL = "Failure" def __init__(self, yaml_data): super(IXIARFC2544Profile, self).__init__(yaml_data) self.rate = self.config.frame_rate self.rate_unit = self.config.rate_unit + self.iteration = 0 + self.full_profile = {} def _get_ip_and_mask(self, ip_range): _ip_range = ip_range.split('-') @@ -56,76 +60,102 @@ class IXIARFC2544Profile(trex_traffic_profile.TrexProfile): if not traffickey.startswith((self.UPLINK, self.DOWNLINK)): continue + # values should be single-item dict, so just grab the first item try: - # values should be single-item dict, so just grab the first item - try: - key, value = next(iter(values.items())) - except StopIteration: - result[traffickey] = {} - continue - - port_id = value.get('id', 1) - port_index = port_id - 1 - - if value.get('outer_l3v4'): - ip = value['outer_l3v4'] - src_key, dst_key = 'srcip4', 'dstip4' - else: - ip = value['outer_l3v6'] - src_key, dst_key = 'srcip6', 'dstip6' - - srcip, srcmask = self._get_ip_and_mask(ip[src_key]) - dstip, dstmask = self._get_ip_and_mask(ip[dst_key]) - - outer_l4 = value.get('outer_l4') - src_port, src_port_mask = self._get_fixed_and_mask(outer_l4['srcport']) - dst_port, dst_port_mask = self._get_fixed_and_mask(outer_l4['dstport']) - result[traffickey] = { - 'bidir': False, - 'id': port_id, - 'rate': self.rate, - 'rate_unit': self.rate_unit, - 'outer_l2': { - 'framesize': value['outer_l2']['framesize'], - 'framesPerSecond': True, - 'QinQ': value['outer_l2'].get('QinQ'), - 'srcmac': mac['src_mac_{}'.format(port_index)], - 'dstmac': mac['dst_mac_{}'.format(port_index)], - }, - 'outer_l3': { - 'count': ip['count'], - 'dscp': ip['dscp'], - 'ttl': ip['ttl'], - 'srcseed': ip.get('srcseed', 1), - 'dstseed': ip.get('dstseed', 1), - 'srcip': srcip, - 'dstip': dstip, - 'srcmask': srcmask, - 'dstmask': dstmask, - 'type': key, - 'proto': ip['proto'], - }, - 'outer_l4': { - 'srcport': src_port, - 'dstport': dst_port, - 'srcportmask': src_port_mask, - 'dstportmask': dst_port_mask, - 'count': outer_l4['count'], - 'seed': outer_l4.get('seed', 1) - } - - } - except KeyError: + key, value = next(iter(values.items())) + except StopIteration: + result[traffickey] = {} continue + port_id = value.get('id', 1) + port_index = port_id - 1 + + result[traffickey] = { + 'bidir': False, + 'id': port_id, + 'rate': self.rate, + 'rate_unit': self.rate_unit, + 'outer_l2': {}, + 'outer_l3': {}, + 'outer_l4': {}, + } + + frame_rate = value.get('frame_rate') + if frame_rate: + flow_rate, flow_rate_unit = self.config.parse_rate(frame_rate) + result[traffickey]['rate'] = flow_rate + result[traffickey]['rate_unit'] = flow_rate_unit + + outer_l2 = value.get('outer_l2') + if outer_l2: + result[traffickey]['outer_l2'].update({ + 'framesize': outer_l2.get('framesize'), + 'framesPerSecond': True, + 'QinQ': outer_l2.get('QinQ'), + 'srcmac': mac.get('src_mac_{}'.format(port_index)), + 'dstmac': mac.get('dst_mac_{}'.format(port_index)), + }) + + if value.get('outer_l3v4'): + outer_l3 = value['outer_l3v4'] + src_key, dst_key = 'srcip4', 'dstip4' + else: + outer_l3 = value.get('outer_l3v6') + src_key, dst_key = 'srcip6', 'dstip6' + if outer_l3: + srcip = srcmask = dstip = dstmask = None + if outer_l3.get(src_key): + srcip, srcmask = self._get_ip_and_mask(outer_l3[src_key]) + if outer_l3.get(dst_key): + dstip, dstmask = self._get_ip_and_mask(outer_l3[dst_key]) + + result[traffickey]['outer_l3'].update({ + 'count': outer_l3.get('count', 1), + 'dscp': outer_l3.get('dscp'), + 'ttl': outer_l3.get('ttl'), + 'srcseed': outer_l3.get('srcseed', 1), + 'dstseed': outer_l3.get('dstseed', 1), + 'srcip': srcip, + 'dstip': dstip, + 'srcmask': srcmask, + 'dstmask': dstmask, + 'type': key, + 'proto': outer_l3.get('proto'), + 'priority': outer_l3.get('priority') + }) + + outer_l4 = value.get('outer_l4') + if outer_l4: + src_port = src_port_mask = dst_port = dst_port_mask = None + if outer_l4.get('srcport'): + src_port, src_port_mask = ( + self._get_fixed_and_mask(outer_l4['srcport'])) + + if outer_l4.get('dstport'): + dst_port, dst_port_mask = ( + self._get_fixed_and_mask(outer_l4['dstport'])) + + result[traffickey]['outer_l4'].update({ + 'srcport': src_port, + 'dstport': dst_port, + 'srcportmask': src_port_mask, + 'dstportmask': dst_port_mask, + 'count': outer_l4.get('count', 1), + 'seed': outer_l4.get('seed', 1), + }) + return result - def _ixia_traffic_generate(self, traffic, ixia_obj): + def _ixia_traffic_generate(self, traffic, ixia_obj, traffic_gen): ixia_obj.update_frame(traffic, self.config.duration) ixia_obj.update_ip_packet(traffic) ixia_obj.update_l4(traffic) + self._update_traffic_tracking_options(traffic_gen) ixia_obj.start_traffic() + def _update_traffic_tracking_options(self, traffic_gen): + traffic_gen.update_tracking_options() + def update_traffic_profile(self, traffic_generator): def port_generator(): for vld_id, intfs in sorted(traffic_generator.networks.items()): @@ -146,31 +176,38 @@ class IXIARFC2544Profile(trex_traffic_profile.TrexProfile): first_run = self.first_run if self.first_run: self.first_run = False - self.full_profile = {} self.pg_id = 0 - self.update_traffic_profile(traffic_generator) self.max_rate = self.rate self.min_rate = 0.0 else: - self.rate = round(float(self.max_rate + self.min_rate) / 2.0, - self.RATE_ROUND) + self.rate = self._get_next_rate() + self.iteration = traffic_generator.rfc_helper.iteration.value traffic = self._get_ixia_traffic_profile(self.full_profile, mac) - self._ixia_traffic_generate(traffic, ixia_obj) + self._ixia_traffic_generate(traffic, ixia_obj, traffic_generator) return first_run - def get_drop_percentage(self, samples, tol_min, tolerance, - first_run=False): + # pylint: disable=unused-argument + def get_drop_percentage(self, samples, tol_min, tolerance, precision, + resolution, first_run=False, tc_rfc2544_opts=None): completed = False - drop_percent = 100 + drop_percent = 100.0 num_ifaces = len(samples) duration = self.config.duration in_packets_sum = sum( - [samples[iface]['in_packets'] for iface in samples]) + [samples[iface]['InPackets'] for iface in samples]) out_packets_sum = sum( - [samples[iface]['out_packets'] for iface in samples]) + [samples[iface]['OutPackets'] for iface in samples]) + in_bytes_sum = sum( + [samples[iface]['InBytes'] for iface in samples]) + out_bytes_sum = sum( + [samples[iface]['OutBytes'] for iface in samples]) rx_throughput = round(float(in_packets_sum) / duration, 3) tx_throughput = round(float(out_packets_sum) / duration, 3) + # Rx throughput in Bps + rx_throughput_bps = round(float(in_bytes_sum) / duration, 3) + # Tx throughput in Bps + tx_throughput_bps = round(float(out_bytes_sum) / duration, 3) packet_drop = abs(out_packets_sum - in_packets_sum) try: @@ -193,21 +230,196 @@ class IXIARFC2544Profile(trex_traffic_profile.TrexProfile): else: completed = True - latency_ns_avg = float( - sum([samples[iface]['Store-Forward_Avg_latency_ns'] - for iface in samples])) / num_ifaces - latency_ns_min = float( - sum([samples[iface]['Store-Forward_Min_latency_ns'] - for iface in samples])) / num_ifaces - latency_ns_max = float( - sum([samples[iface]['Store-Forward_Max_latency_ns'] - for iface in samples])) / num_ifaces + last_rate = self.rate + next_rate = self._get_next_rate() + if abs(next_rate - self.rate) < resolution: + LOG.debug("rate=%s, next_rate=%s, resolution=%s", self.rate, + next_rate, resolution) + # stop test if the difference between the rate transmission + # in two iterations is smaller than the value of the resolution + completed = True + + LOG.debug("tolerance=%s, tolerance_precision=%s drop_percent=%s " + "completed=%s", tolerance, precision, drop_percent, + completed) + + latency_ns_avg = float(sum( + [samples[iface]['LatencyAvg'] for iface in samples])) / num_ifaces + latency_ns_min = min([samples[iface]['LatencyMin'] for iface in samples]) + latency_ns_max = max([samples[iface]['LatencyMax'] for iface in samples]) + + samples['Status'] = self.STATUS_FAIL + if round(drop_percent, precision) <= tolerance: + samples['Status'] = self.STATUS_SUCCESS samples['TxThroughput'] = tx_throughput samples['RxThroughput'] = rx_throughput + samples['TxThroughputBps'] = tx_throughput_bps + samples['RxThroughputBps'] = rx_throughput_bps samples['DropPercentage'] = drop_percent - samples['latency_ns_avg'] = latency_ns_avg - samples['latency_ns_min'] = latency_ns_min - samples['latency_ns_max'] = latency_ns_max + samples['LatencyAvg'] = latency_ns_avg + samples['LatencyMin'] = latency_ns_min + samples['LatencyMax'] = latency_ns_max + samples['Rate'] = last_rate + samples['PktSize'] = self._get_framesize() + samples['Iteration'] = self.iteration + + return completed, samples + + +class IXIARFC2544PppoeScenarioProfile(IXIARFC2544Profile): + """Class handles BNG PPPoE scenario tests traffic profile""" + + def __init__(self, yaml_data): + super(IXIARFC2544PppoeScenarioProfile, self).__init__(yaml_data) + self.full_profile = collections.OrderedDict() + + def _get_flow_groups_params(self): + flows_data = [key for key in self.params.keys() + if key.split('_')[0] in [self.UPLINK, self.DOWNLINK]] + for i in range(len(flows_data)): + uplink = '_'.join([self.UPLINK, str(i)]) + downlink = '_'.join([self.DOWNLINK, str(i)]) + if uplink in flows_data: + self.full_profile.update({uplink: self.params[uplink]}) + if downlink in flows_data: + self.full_profile.update({downlink: self.params[downlink]}) + + def update_traffic_profile(self, traffic_generator): + + networks = collections.OrderedDict() + + # Sort network interfaces pairs + for i in range(len(traffic_generator.networks)): + uplink = '_'.join([self.UPLINK, str(i)]) + downlink = '_'.join([self.DOWNLINK, str(i)]) + if uplink in traffic_generator.networks: + networks[uplink] = traffic_generator.networks[uplink] + if downlink in traffic_generator.networks: + networks[downlink] = traffic_generator.networks[downlink] + + def port_generator(): + for intfs in networks.values(): + for intf in intfs: + yield traffic_generator.vnfd_helper.port_num(intf) + + self._get_flow_groups_params() + self.ports = [port for port in port_generator()] + + def _get_prio_flows_drop_percentage(self, stats): + drop_percent = 100 + for prio_id in stats: + prio_flow = stats[prio_id] + sum_packet_drop = abs(prio_flow['OutPackets'] - prio_flow['InPackets']) + try: + drop_percent = round( + (sum_packet_drop / float(prio_flow['OutPackets'])) * 100, + self.DROP_PERCENT_ROUND) + except ZeroDivisionError: + LOG.info('No traffic is flowing') + prio_flow['DropPercentage'] = drop_percent + return stats + + def _get_summary_pppoe_subs_counters(self, samples): + result = {} + keys = ['SessionsUp', + 'SessionsDown', + 'SessionsNotStarted', + 'SessionsTotal'] + for key in keys: + result[key] = \ + sum([samples[port][key] for port in samples + if key in samples[port]]) + return result + + def get_drop_percentage(self, samples, tol_min, tolerance, precision, + resolution, first_run=False, tc_rfc2544_opts=None): + completed = False + sum_drop_percent = 100 + num_ifaces = len(samples) + duration = self.config.duration + last_rate = self.rate + priority_stats = samples.pop('priority_stats') + priority_stats = self._get_prio_flows_drop_percentage(priority_stats) + summary_subs_stats = self._get_summary_pppoe_subs_counters(samples) + in_packets_sum = sum( + [samples[iface]['InPackets'] for iface in samples]) + out_packets_sum = sum( + [samples[iface]['OutPackets'] for iface in samples]) + in_bytes_sum = sum( + [samples[iface]['InBytes'] for iface in samples]) + out_bytes_sum = sum( + [samples[iface]['OutBytes'] for iface in samples]) + rx_throughput = round(float(in_packets_sum) / duration, 3) + tx_throughput = round(float(out_packets_sum) / duration, 3) + # Rx throughput in Bps + rx_throughput_bps = round(float(in_bytes_sum) / duration, 3) + # Tx throughput in Bps + tx_throughput_bps = round(float(out_bytes_sum) / duration, 3) + sum_packet_drop = abs(out_packets_sum - in_packets_sum) + + try: + sum_drop_percent = round( + (sum_packet_drop / float(out_packets_sum)) * 100, + self.DROP_PERCENT_ROUND) + except ZeroDivisionError: + LOG.info('No traffic is flowing') + + latency_ns_avg = float(sum( + [samples[iface]['LatencyAvg'] for iface in samples])) / num_ifaces + latency_ns_min = min([samples[iface]['LatencyMin'] for iface in samples]) + latency_ns_max = max([samples[iface]['LatencyMax'] for iface in samples]) + + samples['TxThroughput'] = tx_throughput + samples['RxThroughput'] = rx_throughput + samples['TxThroughputBps'] = tx_throughput_bps + samples['RxThroughputBps'] = rx_throughput_bps + samples['DropPercentage'] = sum_drop_percent + samples['LatencyAvg'] = latency_ns_avg + samples['LatencyMin'] = latency_ns_min + samples['LatencyMax'] = latency_ns_max + samples['Priority'] = priority_stats + samples['Rate'] = last_rate + samples['PktSize'] = self._get_framesize() + samples['Iteration'] = self.iteration + samples.update(summary_subs_stats) + + if tc_rfc2544_opts: + priority = tc_rfc2544_opts.get('priority') + if priority: + drop_percent = samples['Priority'][priority]['DropPercentage'] + else: + drop_percent = sum_drop_percent + else: + drop_percent = sum_drop_percent + + if first_run: + completed = True if drop_percent <= tolerance else False + if (first_run and + self.rate_unit == tp_base.TrafficProfileConfig.RATE_FPS): + self.rate = float(out_packets_sum) / duration / num_ifaces + + if drop_percent > tolerance: + self.max_rate = self.rate + elif drop_percent < tol_min: + self.min_rate = self.rate + else: + completed = True + + next_rate = self._get_next_rate() + if abs(next_rate - self.rate) < resolution: + LOG.debug("rate=%s, next_rate=%s, resolution=%s", self.rate, + next_rate, resolution) + # stop test if the difference between the rate transmission + # in two iterations is smaller than the value of the resolution + completed = True + + LOG.debug("tolerance=%s, tolerance_precision=%s drop_percent=%s " + "completed=%s", tolerance, precision, drop_percent, + completed) + + samples['Status'] = self.STATUS_FAIL + if round(drop_percent, precision) <= tolerance: + samples['Status'] = self.STATUS_SUCCESS return completed, samples diff --git a/yardstick/network_services/traffic_profile/landslide_profile.py b/yardstick/network_services/traffic_profile/landslide_profile.py new file mode 100644 index 000000000..f79226fb4 --- /dev/null +++ b/yardstick/network_services/traffic_profile/landslide_profile.py @@ -0,0 +1,47 @@ +# Copyright (c) 2018 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Spirent Landslide traffic profile definitions """ + +from yardstick.network_services.traffic_profile import base + + +class LandslideProfile(base.TrafficProfile): + """ + This traffic profile handles attributes of Landslide data stream + """ + + def __init__(self, tp_config): + super(LandslideProfile, self).__init__(tp_config) + + # for backward compatibility support dict and list of dicts + if isinstance(tp_config["dmf_config"], dict): + self.dmf_config = [tp_config["dmf_config"]] + else: + self.dmf_config = tp_config["dmf_config"] + + def execute(self, traffic_generator): + pass + + def update_dmf(self, options): + if 'dmf' in options: + if isinstance(options['dmf'], dict): + _dmfs = [options['dmf']] + else: + _dmfs = options['dmf'] + + for index, _dmf in enumerate(_dmfs): + try: + self.dmf_config[index].update(_dmf) + except IndexError: + pass diff --git a/yardstick/network_services/traffic_profile/prox_binsearch.py b/yardstick/network_services/traffic_profile/prox_binsearch.py index 16a0411ec..402bf741c 100644 --- a/yardstick/network_services/traffic_profile/prox_binsearch.py +++ b/yardstick/network_services/traffic_profile/prox_binsearch.py @@ -66,6 +66,9 @@ class ProxBinSearchProfile(ProxProfile): yield test_value test_value = self.mid_point + def is_ended(self): + return self.done.is_set() + def run_test_with_pkt_size(self, traffic_gen, pkt_size, duration): """Run the test for a single packet size. @@ -93,7 +96,7 @@ class ProxBinSearchProfile(ProxProfile): # success, the binary search will complete on an integer multiple # of the precision, rather than on a fraction of it. - theor_max_thruput = 0 + theor_max_thruput = 0.0 result_samples = {} @@ -165,8 +168,8 @@ class ProxBinSearchProfile(ProxProfile): samples = result.get_samples(pkt_size, successful_pkt_loss, port_samples) - if theor_max_thruput < samples["TxThroughput"]: - theor_max_thruput = samples['TxThroughput'] + if theor_max_thruput < samples["RequestedTxThroughput"]: + theor_max_thruput = samples['RequestedTxThroughput'] samples['theor_max_throughput'] = theor_max_thruput samples["rx_total"] = int(result.rx_total) @@ -195,9 +198,9 @@ class ProxBinSearchProfile(ProxProfile): LOG.info( ">>>##>> Result Reached PktSize %s Theor_Max_Thruput %s Actual_throughput %s", - pkt_size, theor_max_thruput, result_samples.get("RxThroughput", 0)) + pkt_size, theor_max_thruput, result_samples.get("RxThroughput", 0.0)) result_samples["Status"] = STATUS_RESULT result_samples["Next_Step"] = "" - result_samples["Actual_throughput"] = result_samples.get("RxThroughput", 0) + result_samples["Actual_throughput"] = result_samples.get("RxThroughput", 0.0) result_samples["theor_max_throughput"] = theor_max_thruput self.queue.put(result_samples) diff --git a/yardstick/network_services/traffic_profile/prox_irq.py b/yardstick/network_services/traffic_profile/prox_irq.py new file mode 100644 index 000000000..0ea294914 --- /dev/null +++ b/yardstick/network_services/traffic_profile/prox_irq.py @@ -0,0 +1,48 @@ +# Copyright (c) 2016-2018 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" Fixed traffic profile definitions """ + +import logging +import time + +from yardstick.network_services.traffic_profile.prox_profile import ProxProfile + +LOG = logging.getLogger(__name__) + + +class ProxIrqProfile(ProxProfile): + """ + This profile adds a single stream at the beginning of the traffic session + """ + + def __init__(self, tp_config): + super(ProxIrqProfile, self).__init__(tp_config) + + def init(self, queue): + self.queue = queue + self.queue.cancel_join_thread() + + def execute_traffic(self, traffic_generator): + LOG.debug("Prox_IRQ Execute Traffic....") + time.sleep(5) + + def is_ended(self): + return False + + def run_test(self): + """Run the test + """ + + LOG.info("Prox_IRQ ....") diff --git a/yardstick/network_services/traffic_profile/prox_profile.py b/yardstick/network_services/traffic_profile/prox_profile.py index 343ef1da2..be450c9f7 100644 --- a/yardstick/network_services/traffic_profile/prox_profile.py +++ b/yardstick/network_services/traffic_profile/prox_profile.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016-2017 Intel Corporation +# Copyright (c) 2016-2018 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,6 +16,8 @@ from __future__ import absolute_import import logging +import multiprocessing +import time from yardstick.network_services.traffic_profile.base import TrafficProfile from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxProfileHelper @@ -56,7 +58,7 @@ class ProxProfile(TrafficProfile): def __init__(self, tp_config): super(ProxProfile, self).__init__(tp_config) self.queue = None - self.done = False + self.done = multiprocessing.Event() self.results = [] # TODO: get init values from tp_config @@ -116,7 +118,8 @@ class ProxProfile(TrafficProfile): try: pkt_size = next(self.pkt_size_iterator) except StopIteration: - self.done = True + time.sleep(5) + self.done.set() return # Adjust packet size upwards if it's less than the minimum diff --git a/yardstick/network_services/traffic_profile/rfc2544.py b/yardstick/network_services/traffic_profile/rfc2544.py index e33c437c9..aaa491b75 100644 --- a/yardstick/network_services/traffic_profile/rfc2544.py +++ b/yardstick/network_services/traffic_profile/rfc2544.py @@ -23,7 +23,7 @@ from yardstick.common import constants from yardstick.network_services.traffic_profile import trex_traffic_profile -LOGGING = logging.getLogger(__name__) +LOG = logging.getLogger(__name__) SRC_PORT = 'sport' DST_PORT = 'dport' @@ -72,14 +72,16 @@ class RFC2544Profile(trex_traffic_profile.TrexProfile): """TRex RFC2544 traffic profile""" TOLERANCE_LIMIT = 0.01 + STATUS_SUCCESS = "Success" + STATUS_FAIL = "Failure" def __init__(self, traffic_generator): super(RFC2544Profile, self).__init__(traffic_generator) self.generator = None + self.iteration = 0 self.rate = self.config.frame_rate self.max_rate = self.config.frame_rate self.min_rate = 0 - self.drop_percent_max = 0 def register_generator(self, generator): self.generator = generator @@ -126,6 +128,7 @@ class RFC2544Profile(trex_traffic_profile.TrexProfile): self.generator.client.start(ports=ports, duration=self.config.duration, force=True) + self.iteration = self.generator.rfc2544_helper.iteration.value return ports, port_pg_id def _create_profile(self, profile_data, rate, port_pg_id, enable_latency): @@ -142,7 +145,7 @@ class RFC2544Profile(trex_traffic_profile.TrexProfile): return trex_stl_streams.STLProfile(streams) def _create_imix_data(self, imix, - weight_mode=constants.DISTRIBUTION_IN_PACKETS): + weight_mode=constants.DISTRIBUTION_IN_BYTES): """Generate the IMIX distribution for a STL profile The input information is the framesize dictionary in a test case @@ -192,13 +195,13 @@ class RFC2544Profile(trex_traffic_profile.TrexProfile): imix_dip = {size: float(weight) / weight_normalize for size, weight in imix_count.items()} - if weight_mode == constants.DISTRIBUTION_IN_BYTES: + if weight_mode == constants.DISTRIBUTION_IN_PACKETS: return imix_dip byte_total = sum([int(size) * weight - for size, weight in imix_dip.items()]) - return {size: (int(size) * weight * 100) / byte_total - for size, weight in imix_dip.items()} + for size, weight in imix_count.items()]) + return {size: float(int(size) * weight * 100) / byte_total + for size, weight in imix_count.items()} def _create_vm(self, packet_definition): """Create the STL Raw instructions""" @@ -271,17 +274,24 @@ class RFC2544Profile(trex_traffic_profile.TrexProfile): return streams def get_drop_percentage(self, samples, tol_low, tol_high, - correlated_traffic): + correlated_traffic, resolution): # pylint: disable=unused-argument """Calculate the drop percentage and run the traffic""" completed = False + status = self.STATUS_FAIL out_pkt_end = sum(port['out_packets'] for port in samples[-1].values()) in_pkt_end = sum(port['in_packets'] for port in samples[-1].values()) out_pkt_ini = sum(port['out_packets'] for port in samples[0].values()) in_pkt_ini = sum(port['in_packets'] for port in samples[0].values()) + in_bytes_ini = sum(port['in_bytes'] for port in samples[0].values()) + out_bytes_ini = sum(port['out_bytes'] for port in samples[0].values()) + in_bytes_end = sum(port['in_bytes'] for port in samples[-1].values()) + out_bytes_end = sum(port['out_bytes'] for port in samples[-1].values()) time_diff = (list(samples[-1].values())[0]['timestamp'] - list(samples[0].values())[0]['timestamp']).total_seconds() out_packets = out_pkt_end - out_pkt_ini in_packets = in_pkt_end - in_pkt_ini + out_bytes = out_bytes_end - out_bytes_ini + in_bytes = in_bytes_end - in_bytes_ini tx_rate_fps = float(out_packets) / time_diff rx_rate_fps = float(in_packets) / time_diff drop_percent = 100.0 @@ -298,26 +308,55 @@ class RFC2544Profile(trex_traffic_profile.TrexProfile): elif drop_percent < tol_low: self.min_rate = self.rate else: + status = self.STATUS_SUCCESS completed = True last_rate = self.rate - self.rate = round(float(self.max_rate + self.min_rate) / 2.0, 5) - - throughput = rx_rate_fps * 2 if correlated_traffic else rx_rate_fps - - if drop_percent > self.drop_percent_max: - self.drop_percent_max = drop_percent - - latency = {port_num: value['latency'] - for port_num, value in samples[-1].items()} + self.rate = self._get_next_rate() + if abs(last_rate - self.rate) < resolution: + # stop test if the difference between the rate transmission + # in two iterations is smaller than the value of the resolution + completed = True + LOG.debug("rate=%s, next_rate=%s, resolution=%s, completed=%s", + last_rate, self.rate, resolution, completed) + + ports = samples[-1].keys() + num_ports = len(ports) + + output = {} + for port in ports: + output[port] = {} + first = samples[0][port] + last = samples[-1][port] + output[port]['InPackets'] = last['in_packets'] - first['in_packets'] + output[port]['OutPackets'] = last['out_packets'] - first['out_packets'] + output[port]['InBytes'] = last['in_bytes'] - first['in_bytes'] + output[port]['OutBytes'] = last['out_bytes'] - first['out_bytes'] + if self.config.enable_latency: + output[port]['LatencyAvg'] = float(sum( + [last['latency'][id]['average'] for id in + last['latency']]) * 1000) / len(last['latency']) + output[port]['LatencyMin'] = min( + [last['latency'][id]['total_min'] for id in + last['latency']]) * 1000 + output[port]['LatencyMax'] = max( + [last['latency'][id]['total_max'] for id in + last['latency']]) * 1000 + + output['TxThroughput'] = tx_rate_fps + output['RxThroughput'] = rx_rate_fps + output['RxThroughputBps'] = round(float(in_bytes) / time_diff, 3) + output['TxThroughputBps'] = round(float(out_bytes) / time_diff, 3) + output['DropPercentage'] = drop_percent + output['Rate'] = last_rate + output['PktSize'] = self._get_framesize() + output['Iteration'] = self.iteration + output['Status'] = status + + if self.config.enable_latency: + output['LatencyAvg'] = float( + sum([output[port]['LatencyAvg'] for port in ports])) / num_ports + output['LatencyMin'] = min([output[port]['LatencyMin'] for port in ports]) + output['LatencyMax'] = max([output[port]['LatencyMax'] for port in ports]) - output = { - 'TxThroughput': tx_rate_fps, - 'RxThroughput': rx_rate_fps, - 'CurrentDropPercentage': drop_percent, - 'Throughput': throughput, - 'DropPercentage': self.drop_percent_max, - 'Rate': last_rate, - 'Latency': latency - } return completed, output diff --git a/yardstick/network_services/traffic_profile/sip.py b/yardstick/network_services/traffic_profile/sip.py new file mode 100644 index 000000000..d18574090 --- /dev/null +++ b/yardstick/network_services/traffic_profile/sip.py @@ -0,0 +1,32 @@ +# Copyright (c) 2019 Viosoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from yardstick.network_services.traffic_profile import base + + +class SipProfile(base.TrafficProfile): + """ Sipp Traffic profile """ + + def __init__(self, yaml_data): + super(SipProfile, self).__init__(yaml_data) + self.generator = None + + def execute_traffic(self, traffic_generator=None): + if traffic_generator is not None and self.generator is None: + self.generator = traffic_generator + + def is_ended(self): + if self.generator is not None: + return self.generator.is_ended() + return False diff --git a/yardstick/network_services/traffic_profile/trex_traffic_profile.py b/yardstick/network_services/traffic_profile/trex_traffic_profile.py index ed0355fa5..cf538d488 100644 --- a/yardstick/network_services/traffic_profile/trex_traffic_profile.py +++ b/yardstick/network_services/traffic_profile/trex_traffic_profile.py @@ -52,6 +52,7 @@ class TrexProfile(base.TrafficProfile): IPv6: ('ip6_packet', Pkt.IPv6), UDP: ('udp_packet', Pkt.UDP), } + RATE_ROUND = 5 def _general_single_action_partial(self, protocol): def f(field): @@ -186,6 +187,8 @@ class TrexProfile(base.TrafficProfile): self.qinq = False self.vm_flow_vars = [] self.packets = [] + self.max_rate = 0 + self.min_rate = 0 self._map_proto_actions = { # the tuple is (single value function, range value function, if the values should be @@ -337,6 +340,25 @@ class TrexProfile(base.TrafficProfile): if 'dstport' in outer_l4: self._set_proto_addr(UDP, DST_PORT, outer_l4['dstport'], outer_l4['count']) + def _get_next_rate(self): + rate = round(float(self.max_rate + self.min_rate)/2.0, self.RATE_ROUND) + return rate + + def _get_framesize(self): + framesizes = [] + for traffickey, value in self.params.items(): + if not traffickey.startswith((self.UPLINK, self.DOWNLINK)): + continue + for _, data in value.items(): + framesize = data['outer_l2']['framesize'] + for size in (s for s, w in framesize.items() if int(w) != 0): + framesizes.append(size) + if len(set(framesizes)) == 0: + return '' + elif len(set(framesizes)) == 1: + return framesizes[0] + return 'IMIX' + @classmethod def _count_ip(cls, start_ip, end_ip): start = ipaddress.ip_address(six.u(start_ip)) diff --git a/yardstick/network_services/traffic_profile/vpp_rfc2544.py b/yardstick/network_services/traffic_profile/vpp_rfc2544.py new file mode 100644 index 000000000..412e4e69a --- /dev/null +++ b/yardstick/network_services/traffic_profile/vpp_rfc2544.py @@ -0,0 +1,339 @@ +# Copyright (c) 2019 Viosoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import ipaddress +import logging +import random +import string + +from trex_stl_lib import api as Pkt +from trex_stl_lib import trex_stl_client +from trex_stl_lib import trex_stl_packet_builder_scapy +from trex_stl_lib import trex_stl_streams + +from yardstick.common import constants +from yardstick.network_services.helpers.vpp_helpers.multiple_loss_ratio_search import \ + MultipleLossRatioSearch +from yardstick.network_services.traffic_profile.rfc2544 import RFC2544Profile, \ + PortPgIDMap +from yardstick.network_services.traffic_profile.trex_traffic_profile import IP, \ + DST + +LOGGING = logging.getLogger(__name__) + + +class VppRFC2544Profile(RFC2544Profile): + + def __init__(self, traffic_generator): + super(VppRFC2544Profile, self).__init__(traffic_generator) + + tp_cfg = traffic_generator["traffic_profile"] + self.number_of_intermediate_phases = tp_cfg.get("intermediate_phases", + 2) + + self.duration = self.config.duration + self.precision = self.config.test_precision + self.lower_bound = self.config.lower_bound + self.upper_bound = self.config.upper_bound + self.step_interval = self.config.step_interval + self.enable_latency = self.config.enable_latency + + self.pkt_size = None + self.flow = None + + self.tolerance_low = 0 + self.tolerance_high = 0 + + self.queue = None + self.port_pg_id = None + + self.current_lower = self.lower_bound + self.current_upper = self.upper_bound + + self.ports = [] + self.profiles = {} + + @property + def delta(self): + return self.current_upper - self.current_lower + + @property + def mid_point(self): + return (self.current_lower + self.current_upper) / 2 + + @staticmethod + def calculate_frame_size(imix): + if not imix: + return 64, 100 + + imix_count = {size.upper().replace('B', ''): int(weight) + for size, weight in imix.items()} + imix_sum = sum(imix_count.values()) + if imix_sum <= 0: + return 64, 100 + packets_total = sum([int(size) * weight + for size, weight in imix_count.items()]) + return packets_total / imix_sum, imix_sum + + @staticmethod + def _gen_payload(length): + payload = "" + for _ in range(length): + payload += random.choice(string.ascii_letters) + + return payload + + def bounds_iterator(self, logger=None): + self.current_lower = self.lower_bound + self.current_upper = self.upper_bound + + test_value = self.current_upper + while abs(self.delta) >= self.precision: + if logger: + logger.debug("New interval [%s, %s), precision: %d", + self.current_lower, + self.current_upper, self.step_interval) + logger.info("Testing with value %s", test_value) + + yield test_value + test_value = self.mid_point + + def register_generator(self, generator): + super(VppRFC2544Profile, self).register_generator(generator) + self.init_traffic_params(generator) + + def init_queue(self, queue): + self.queue = queue + self.queue.cancel_join_thread() + + def init_traffic_params(self, generator): + if generator.rfc2544_helper.latency: + self.enable_latency = True + self.tolerance_low = generator.rfc2544_helper.tolerance_low + self.tolerance_high = generator.rfc2544_helper.tolerance_high + self.max_rate = generator.scenario_helper.all_options.get('vpp_config', + {}).get( + 'max_rate', self.rate) + + def create_profile(self, profile_data, current_port): + streams = [] + for packet_name in profile_data: + imix = (profile_data[packet_name]. + get('outer_l2', {}).get('framesize')) + self.pkt_size, imix_sum = self.calculate_frame_size(imix) + self._create_vm(profile_data[packet_name]) + if self.max_rate > 100: + imix_data = self._create_imix_data(imix, + constants.DISTRIBUTION_IN_PACKETS) + else: + imix_data = self._create_imix_data(imix) + _streams = self._create_single_stream(current_port, imix_data, + imix_sum) + streams.extend(_streams) + return trex_stl_streams.STLProfile(streams) + + def _set_outer_l3v4_fields(self, outer_l3v4): + """ setup outer l3v4 fields from traffic profile """ + ip_params = {} + if 'proto' in outer_l3v4: + ip_params['proto'] = outer_l3v4['proto'] + self._set_proto_fields(IP, **ip_params) + + self.flow = int(outer_l3v4['count']) + src_start_ip, _ = outer_l3v4['srcip4'].split('-') + dst_start_ip, _ = outer_l3v4['dstip4'].split('-') + + self.ip_packet = Pkt.IP(src=src_start_ip, + dst=dst_start_ip, + proto=outer_l3v4['proto']) + if self.flow > 1: + dst_start_int = int(ipaddress.ip_address(str(dst_start_ip))) + dst_end_ip_new = ipaddress.ip_address( + dst_start_int + self.flow - 1) + # self._set_proto_addr(IP, SRC, outer_l3v4['srcip4'], outer_l3v4['count']) + self._set_proto_addr(IP, DST, + "{start_ip}-{end_ip}".format( + start_ip=dst_start_ip, + end_ip=str(dst_end_ip_new)), + self.flow) + + def _create_single_packet(self, size=64): + ether_packet = self.ether_packet + ip_packet = self.ip6_packet if self.ip6_packet else self.ip_packet + base_pkt = ether_packet / ip_packet + payload_len = max(0, size - len(base_pkt) - 4) + packet = trex_stl_packet_builder_scapy.STLPktBuilder( + pkt=base_pkt / self._gen_payload(payload_len), + vm=self.trex_vm) + packet_lat = trex_stl_packet_builder_scapy.STLPktBuilder( + pkt=base_pkt / self._gen_payload(payload_len)) + + return packet, packet_lat + + def _create_single_stream(self, current_port, imix_data, imix_sum, + isg=0.0): + streams = [] + for size, weight in ((int(size), float(weight)) for (size, weight) + in imix_data.items() if float(weight) > 0): + if current_port == 1: + isg += 10.0 + if self.max_rate > 100: + mode = trex_stl_streams.STLTXCont( + pps=int(weight * imix_sum / 100)) + mode_lat = mode + else: + mode = trex_stl_streams.STLTXCont( + percentage=weight * self.max_rate / 100) + mode_lat = trex_stl_streams.STLTXCont(pps=9000) + + packet, packet_lat = self._create_single_packet(size) + streams.append( + trex_stl_client.STLStream(isg=isg, packet=packet, mode=mode)) + if self.enable_latency: + pg_id = self.port_pg_id.increase_pg_id(current_port) + stl_flow = trex_stl_streams.STLFlowLatencyStats(pg_id=pg_id) + stream_lat = trex_stl_client.STLStream(isg=isg, + packet=packet_lat, + mode=mode_lat, + flow_stats=stl_flow) + streams.append(stream_lat) + return streams + + def execute_traffic(self, traffic_generator=None): + if traffic_generator is not None and self.generator is None: + self.generator = traffic_generator + + self.ports = [] + self.profiles = {} + self.port_pg_id = PortPgIDMap() + for vld_id, intfs in sorted(self.generator.networks.items()): + profile_data = self.params.get(vld_id) + if not profile_data: + continue + if (vld_id.startswith(self.DOWNLINK) and + self.generator.rfc2544_helper.correlated_traffic): + continue + for intf in intfs: + current_port = int(self.generator.port_num(intf)) + self.port_pg_id.add_port(current_port) + profile = self.create_profile(profile_data, current_port) + self.generator.client.add_streams(profile, + ports=[current_port]) + + self.ports.append(current_port) + self.profiles[current_port] = profile + + timeout = self.generator.scenario_helper.scenario_cfg["runner"][ + "duration"] + test_data = { + "test_duration": timeout, + "test_precision": self.precision, + "tolerated_loss": self.tolerance_high, + "duration": self.duration, + "packet_size": self.pkt_size, + "flow": self.flow + } + + if self.max_rate > 100: + self.binary_search_with_optimized(self.generator, self.duration, + timeout, test_data) + else: + self.binary_search(self.generator, self.duration, + self.tolerance_high, test_data) + + def binary_search_with_optimized(self, traffic_generator, duration, + timeout, test_data): + self.queue.cancel_join_thread() + algorithm = MultipleLossRatioSearch( + measurer=traffic_generator, latency=self.enable_latency, + pkt_size=self.pkt_size, + final_trial_duration=duration, + final_relative_width=self.step_interval / 100, + number_of_intermediate_phases=self.number_of_intermediate_phases, + initial_trial_duration=1, + timeout=timeout) + algorithm.init_generator(self.ports, self.port_pg_id, self.profiles, + test_data, self.queue) + return algorithm.narrow_down_ndr_and_pdr(10000, self.max_rate, + self.tolerance_high) + + def binary_search(self, traffic_generator, duration, tolerance_value, + test_data): + theor_max_thruput = 0 + result_samples = {} + + for test_value in self.bounds_iterator(LOGGING): + stats = traffic_generator.send_traffic_on_tg(self.ports, + self.port_pg_id, + duration, + str( + test_value / self.max_rate / 2), + latency=self.enable_latency) + traffic_generator.client.reset(ports=self.ports) + traffic_generator.client.clear_stats(ports=self.ports) + traffic_generator.client.remove_all_streams(ports=self.ports) + for port, profile in self.profiles.items(): + traffic_generator.client.add_streams(profile, ports=[port]) + + loss_ratio = (float(traffic_generator.loss) / float( + traffic_generator.sent)) * 100 + + samples = traffic_generator.generate_samples(stats, self.ports, + self.port_pg_id, + self.enable_latency) + samples.update(test_data) + LOGGING.info("Collect TG KPIs %s %s %s", datetime.datetime.now(), + test_value, samples) + self.queue.put(samples) + + if float(loss_ratio) > float(tolerance_value): + LOGGING.debug("Failure... Decreasing upper bound") + self.current_upper = test_value + else: + LOGGING.debug("Success! Increasing lower bound") + self.current_lower = test_value + + rate_total = float(traffic_generator.sent) / float(duration) + bandwidth_total = float(rate_total) * ( + float(self.pkt_size) + 20) * 8 / (10 ** 9) + + success_samples = {'Result_' + key: value for key, value in + samples.items()} + success_samples["Result_{}".format('PDR')] = { + "rate_total_pps": float(rate_total), + "bandwidth_total_Gbps": float(bandwidth_total), + "packet_loss_ratio": float(loss_ratio), + "packets_lost": int(traffic_generator.loss), + } + self.queue.put(success_samples) + + # Store Actual throughput for result samples + for intf in traffic_generator.vnfd_helper.interfaces: + name = intf["name"] + result_samples[name] = { + "Result_Actual_throughput": float( + success_samples["Result_{}".format(name)][ + "rx_throughput_bps"]), + } + + for intf in traffic_generator.vnfd_helper.interfaces: + name = intf["name"] + if theor_max_thruput < samples[name]["tx_throughput_bps"]: + theor_max_thruput = samples[name]['tx_throughput_bps'] + self.queue.put({'theor_max_throughput': theor_max_thruput}) + + result_samples["Result_theor_max_throughput"] = theor_max_thruput + self.queue.put(result_samples) + return result_samples diff --git a/yardstick/network_services/utils.py b/yardstick/network_services/utils.py index 4b987fafe..9c64fecde 100644 --- a/yardstick/network_services/utils.py +++ b/yardstick/network_services/utils.py @@ -36,6 +36,9 @@ OPTS = [ cfg.StrOpt('trex_client_lib', default=os.path.join(NSB_ROOT, 'trex_client/stl'), help='trex python library path.'), + cfg.StrOpt('jre_path_i386', + default='', + help='path to installation of 32-bit Java 1.7+.'), ] CONF.register_opts(OPTS, group="nsb") diff --git a/yardstick/network_services/vnf_generic/vnf/acl_vnf.py b/yardstick/network_services/vnf_generic/vnf/acl_vnf.py index 11a602472..69d29bf76 100644 --- a/yardstick/network_services/vnf_generic/vnf/acl_vnf.py +++ b/yardstick/network_services/vnf_generic/vnf/acl_vnf.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016-2017 Intel Corporation +# Copyright (c) 2016-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -246,9 +246,12 @@ class AclApproxVnf(SampleVNF): 'packets_dropped': 2, } - def __init__(self, name, vnfd, task_id, setup_env_helper_type=None, - resource_helper_type=None): + def __init__(self, name, vnfd, setup_env_helper_type=None, resource_helper_type=None): if setup_env_helper_type is None: setup_env_helper_type = AclApproxSetupEnvSetupEnvHelper - super(AclApproxVnf, self).__init__( - name, vnfd, task_id, setup_env_helper_type, resource_helper_type) + + super(AclApproxVnf, self).__init__(name, vnfd, setup_env_helper_type, resource_helper_type) + + def wait_for_instantiate(self): + """Wait for VNF to initialize""" + self.wait_for_initialize() diff --git a/yardstick/network_services/vnf_generic/vnf/agnostic_vnf.py b/yardstick/network_services/vnf_generic/vnf/agnostic_vnf.py new file mode 100644 index 000000000..d1d9667db --- /dev/null +++ b/yardstick/network_services/vnf_generic/vnf/agnostic_vnf.py @@ -0,0 +1,46 @@ +# Copyright (c) 2018-2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from yardstick.network_services.vnf_generic.vnf import base + +LOG = logging.getLogger(__name__) + + +class AgnosticVnf(base.GenericVNF): + """ AgnosticVnf implementation. """ + def __init__(self, name, vnfd): + super(AgnosticVnf, self).__init__(name, vnfd) + + def instantiate(self, scenario_cfg, context_cfg): + pass + + def wait_for_instantiate(self): + pass + + def terminate(self): + pass + + def scale(self, flavor=""): + pass + + def collect_kpi(self): + pass + + def start_collect(self): + pass + + def stop_collect(self): + pass diff --git a/yardstick/network_services/vnf_generic/vnf/base.py b/yardstick/network_services/vnf_generic/vnf/base.py index 0fb310075..8ef96b744 100644 --- a/yardstick/network_services/vnf_generic/vnf/base.py +++ b/yardstick/network_services/vnf_generic/vnf/base.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016-2017 Intel Corporation +# Copyright (c) 2016-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,10 +17,6 @@ import abc import logging import six -from yardstick.common import messaging -from yardstick.common.messaging import consumer -from yardstick.common.messaging import payloads -from yardstick.common.messaging import producer from yardstick.network_services.helpers.samplevnf_helper import PortPairs @@ -98,7 +94,7 @@ class VnfdHelper(dict): for interface in self.interfaces: virtual_intf = interface["virtual-interface"] if virtual_intf[key] == value: - return interface + return virtual_intf raise KeyError() def find_interface(self, **kwargs): @@ -141,70 +137,6 @@ class VnfdHelper(dict): yield port_name, port_num -class TrafficGeneratorProducer(producer.MessagingProducer): - """Class implementing the message producer for traffic generators - - This message producer must be instantiated in the process created - "run_traffic" process. - """ - def __init__(self, _id): - super(TrafficGeneratorProducer, self).__init__(messaging.TOPIC_TG, - _id=_id) - - def tg_method_started(self, version=1): - """Send a message to inform the traffic generation has started""" - self.send_message( - messaging.TG_METHOD_STARTED, - payloads.TrafficGeneratorPayload(version=version, iteration=0, - kpi={})) - - def tg_method_finished(self, version=1): - """Send a message to inform the traffic generation has finished""" - self.send_message( - messaging.TG_METHOD_FINISHED, - payloads.TrafficGeneratorPayload(version=version, iteration=0, - kpi={})) - - def tg_method_iteration(self, iteration, version=1, kpi=None): - """Send a message, with KPI, once an iteration has finished""" - kpi = {} if kpi is None else kpi - self.send_message( - messaging.TG_METHOD_ITERATION, - payloads.TrafficGeneratorPayload(version=version, - iteration=iteration, kpi=kpi)) - - -@six.add_metaclass(abc.ABCMeta) -class GenericVNFEndpoint(consumer.NotificationHandler): - """Endpoint class for ``GenericVNFConsumer``""" - - @abc.abstractmethod - def runner_method_start_iteration(self, ctxt, **kwargs): - """Endpoint when RUNNER_METHOD_START_ITERATION is received - - :param ctxt: (dict) {'id': <Producer ID>} - :param kwargs: (dict) ``payloads.RunnerPayload`` context - """ - - @abc.abstractmethod - def runner_method_stop_iteration(self, ctxt, **kwargs): - """Endpoint when RUNNER_METHOD_STOP_ITERATION is received - - :param ctxt: (dict) {'id': <Producer ID>} - :param kwargs: (dict) ``payloads.RunnerPayload`` context - """ - - -class GenericVNFConsumer(consumer.MessagingConsumer): - """MQ consumer for ``GenericVNF`` derived classes""" - - def __init__(self, ctx_ids, endpoints): - if not isinstance(endpoints, list): - endpoints = [endpoints] - super(GenericVNFConsumer, self).__init__(messaging.TOPIC_RUNNER, - ctx_ids, endpoints) - - @six.add_metaclass(abc.ABCMeta) class GenericVNF(object): """Class providing file-like API for generic VNF implementation @@ -217,9 +149,8 @@ class GenericVNF(object): UPLINK = PortPairs.UPLINK DOWNLINK = PortPairs.DOWNLINK - def __init__(self, name, vnfd, task_id): + def __init__(self, name, vnfd): self.name = name - self._task_id = task_id self.vnfd_helper = VnfdHelper(vnfd) # List of statistics we can obtain from this VNF # - ETSI MANO 6.3.1.1 monitoring_parameter @@ -280,11 +211,10 @@ class GenericVNF(object): class GenericTrafficGen(GenericVNF): """Class providing file-like API for generic traffic generator""" - def __init__(self, name, vnfd, task_id): - super(GenericTrafficGen, self).__init__(name, vnfd, task_id) + def __init__(self, name, vnfd): + super(GenericTrafficGen, self).__init__(name, vnfd) self.runs_traffic = True self.traffic_finished = False - self._mq_producer = None @abc.abstractmethod def run_traffic(self, traffic_profile): @@ -355,16 +285,3 @@ class GenericTrafficGen(GenericVNF): :return: True/False """ pass - - @staticmethod - def _setup_mq_producer(id): - """Setup the TG MQ producer to send messages between processes - - :return: (derived class from ``MessagingProducer``) MQ producer object - """ - return TrafficGeneratorProducer(id) - - def get_mq_producer_id(self): - """Return the MQ producer ID if initialized""" - if self._mq_producer: - return self._mq_producer.id diff --git a/yardstick/network_services/vnf_generic/vnf/cgnapt_vnf.py b/yardstick/network_services/vnf_generic/vnf/cgnapt_vnf.py index 14f1e2e97..ee4a581b1 100644 --- a/yardstick/network_services/vnf_generic/vnf/cgnapt_vnf.py +++ b/yardstick/network_services/vnf_generic/vnf/cgnapt_vnf.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016-2017 Intel Corporation +# Copyright (c) 2016-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -85,12 +85,12 @@ class CgnaptApproxVnf(SampleVNF): "packets_dropped": 4, } - def __init__(self, name, vnfd, task_id, setup_env_helper_type=None, - resource_helper_type=None): + def __init__(self, name, vnfd, setup_env_helper_type=None, resource_helper_type=None): if setup_env_helper_type is None: setup_env_helper_type = CgnaptApproxSetupEnvHelper - super(CgnaptApproxVnf, self).__init__( - name, vnfd, task_id, setup_env_helper_type, resource_helper_type) + + super(CgnaptApproxVnf, self).__init__(name, vnfd, setup_env_helper_type, + resource_helper_type) def _vnf_up_post(self): super(CgnaptApproxVnf, self)._vnf_up_post() @@ -120,3 +120,7 @@ class CgnaptApproxVnf(SampleVNF): self.vnf_execute(cmd) time.sleep(WAIT_FOR_STATIC_NAPT) + + def wait_for_instantiate(self): + """Wait for VNF to initialize""" + self.wait_for_initialize() diff --git a/yardstick/network_services/vnf_generic/vnf/epc_vnf.py b/yardstick/network_services/vnf_generic/vnf/epc_vnf.py new file mode 100644 index 000000000..8112963e9 --- /dev/null +++ b/yardstick/network_services/vnf_generic/vnf/epc_vnf.py @@ -0,0 +1,53 @@ +# Copyright (c) 2018-2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from yardstick.network_services.vnf_generic.vnf import base + +LOG = logging.getLogger(__name__) + + +class EPCVnf(base.GenericVNF): + + def __init__(self, name, vnfd): + super(EPCVnf, self).__init__(name, vnfd) + + def instantiate(self, scenario_cfg, context_cfg): + """Prepare VNF for operation and start the VNF process/VM + + :param scenario_cfg: Scenario config + :param context_cfg: Context config + """ + pass + + def wait_for_instantiate(self): + """Wait for VNF to start""" + pass + + def terminate(self): + """Kill all VNF processes""" + pass + + def scale(self, flavor=""): + pass + + def collect_kpi(self): + pass + + def start_collect(self): + pass + + def stop_collect(self): + pass diff --git a/yardstick/network_services/vnf_generic/vnf/ipsec_vnf.py b/yardstick/network_services/vnf_generic/vnf/ipsec_vnf.py new file mode 100644 index 000000000..1961ac1b1 --- /dev/null +++ b/yardstick/network_services/vnf_generic/vnf/ipsec_vnf.py @@ -0,0 +1,498 @@ +# Copyright (c) 2019 Viosoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import re +import time +from collections import Counter +from enum import Enum + +from yardstick.benchmark.contexts.base import Context +from yardstick.common.process import check_if_process_failed +from yardstick.network_services import constants +from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNF +from yardstick.network_services.vnf_generic.vnf.vpp_helpers import \ + VppSetupEnvHelper, VppConfigGenerator + +LOG = logging.getLogger(__name__) + + +class CryptoAlg(Enum): + """Encryption algorithms.""" + AES_CBC_128 = ('aes-cbc-128', 'AES-CBC', 16) + AES_CBC_192 = ('aes-cbc-192', 'AES-CBC', 24) + AES_CBC_256 = ('aes-cbc-256', 'AES-CBC', 32) + AES_GCM_128 = ('aes-gcm-128', 'AES-GCM', 20) + + def __init__(self, alg_name, scapy_name, key_len): + self.alg_name = alg_name + self.scapy_name = scapy_name + self.key_len = key_len + + +class IntegAlg(Enum): + """Integrity algorithms.""" + SHA1_96 = ('sha1-96', 'HMAC-SHA1-96', 20) + SHA_256_128 = ('sha-256-128', 'SHA2-256-128', 32) + SHA_384_192 = ('sha-384-192', 'SHA2-384-192', 48) + SHA_512_256 = ('sha-512-256', 'SHA2-512-256', 64) + AES_GCM_128 = ('aes-gcm-128', 'AES-GCM', 20) + + def __init__(self, alg_name, scapy_name, key_len): + self.alg_name = alg_name + self.scapy_name = scapy_name + self.key_len = key_len + + +class VipsecApproxSetupEnvHelper(VppSetupEnvHelper): + DEFAULT_IPSEC_VNF_CFG = { + 'crypto_type': 'SW_cryptodev', + 'rxq': 1, + 'worker_config': '1C/1T', + 'worker_threads': 1, + } + + def __init__(self, vnfd_helper, ssh_helper, scenario_helper): + super(VipsecApproxSetupEnvHelper, self).__init__( + vnfd_helper, ssh_helper, scenario_helper) + + def _get_crypto_type(self): + vnf_cfg = self.scenario_helper.options.get('vnf_config', + self.DEFAULT_IPSEC_VNF_CFG) + return vnf_cfg.get('crypto_type', 'SW_cryptodev') + + def _get_crypto_algorithms(self): + vpp_cfg = self.scenario_helper.all_options.get('vpp_config', {}) + return vpp_cfg.get('crypto_algorithms', 'aes-gcm') + + def _get_n_tunnels(self): + vpp_cfg = self.scenario_helper.all_options.get('vpp_config', {}) + return vpp_cfg.get('tunnels', 1) + + def _get_n_connections(self): + try: + flow_cfg = self.scenario_helper.all_options['flow'] + return flow_cfg['count'] + except KeyError: + raise KeyError("Missing flow definition in scenario section" + + " of the task definition file") + + def _get_flow_src_start_ip(self): + node_name = self.find_encrypted_data_interface()["node_name"] + try: + flow_cfg = self.scenario_helper.all_options['flow'] + src_ips = flow_cfg['src_ip'] + dst_ips = flow_cfg['dst_ip'] + except KeyError: + raise KeyError("Missing flow definition in scenario section" + + " of the task definition file") + + for src, dst in zip(src_ips, dst_ips): + flow_src_start_ip, _ = src.split('-') + flow_dst_start_ip, _ = dst.split('-') + + if node_name == "vnf__0": + return flow_src_start_ip + elif node_name == "vnf__1": + return flow_dst_start_ip + + def _get_flow_dst_start_ip(self): + node_name = self.find_encrypted_data_interface()["node_name"] + try: + flow_cfg = self.scenario_helper.all_options['flow'] + src_ips = flow_cfg['src_ip'] + dst_ips = flow_cfg['dst_ip'] + except KeyError: + raise KeyError("Missing flow definition in scenario section" + + " of the task definition file") + + for src, dst in zip(src_ips, dst_ips): + flow_src_start_ip, _ = src.split('-') + flow_dst_start_ip, _ = dst.split('-') + + if node_name == "vnf__0": + return flow_dst_start_ip + elif node_name == "vnf__1": + return flow_src_start_ip + + def build_config(self): + vnf_cfg = self.scenario_helper.options.get('vnf_config', + self.DEFAULT_IPSEC_VNF_CFG) + rxq = vnf_cfg.get('rxq', 1) + phy_cores = vnf_cfg.get('worker_threads', 1) + # worker_config = vnf_cfg.get('worker_config', '1C/1T').split('/')[1].lower() + + vpp_cfg = self.create_startup_configuration_of_vpp() + self.add_worker_threads_and_rxqueues(vpp_cfg, phy_cores, rxq) + self.add_pci_devices(vpp_cfg) + + frame_size_cfg = self.scenario_helper.all_options.get('framesize', {}) + uplink_cfg = frame_size_cfg.get('uplink', {}) + downlink_cfg = frame_size_cfg.get('downlink', {}) + framesize = min(self.calculate_frame_size(uplink_cfg), + self.calculate_frame_size(downlink_cfg)) + if framesize < 1522: + vpp_cfg.add_dpdk_no_multi_seg() + + crypto_algorithms = self._get_crypto_algorithms() + if crypto_algorithms == 'aes-gcm': + self.add_dpdk_cryptodev(vpp_cfg, 'aesni_gcm', phy_cores) + elif crypto_algorithms == 'cbc-sha1': + self.add_dpdk_cryptodev(vpp_cfg, 'aesni_mb', phy_cores) + + vpp_cfg.add_dpdk_dev_default_rxd(2048) + vpp_cfg.add_dpdk_dev_default_txd(2048) + self.apply_config(vpp_cfg, True) + self.update_vpp_interface_data() + + def setup_vnf_environment(self): + resource = super(VipsecApproxSetupEnvHelper, + self).setup_vnf_environment() + + self.start_vpp_service() + # for QAT device DH895xCC, the number of VFs is required as 32 + if self._get_crypto_type() == 'HW_cryptodev': + sriov_numvfs = self.get_sriov_numvfs( + self.find_encrypted_data_interface()["vpci"]) + if sriov_numvfs != 32: + self.crypto_device_init( + self.find_encrypted_data_interface()["vpci"], 32) + + self._update_vnfd_helper(self.sys_cores.get_cpu_layout()) + self.update_vpp_interface_data() + self.iface_update_numa() + + return resource + + @staticmethod + def calculate_frame_size(frame_cfg): + if not frame_cfg: + return 64 + + imix_count = {size.upper().replace('B', ''): int(weight) + for size, weight in frame_cfg.items()} + imix_sum = sum(imix_count.values()) + if imix_sum <= 0: + return 64 + packets_total = sum([int(size) * weight + for size, weight in imix_count.items()]) + return packets_total / imix_sum + + def check_status(self): + ipsec_created = False + cmd = "vppctl show int" + _, stdout, _ = self.ssh_helper.execute(cmd) + entries = re.split(r"\n+", stdout) + tmp = [re.split(r"\s\s+", entry, 5) for entry in entries] + + for item in tmp: + if isinstance(item, list): + if item[0] and item[0] != 'local0': + if "ipsec" in item[0] and not ipsec_created: + ipsec_created = True + if len(item) > 2 and item[2] == 'down': + return False + return ipsec_created + + def get_vpp_statistics(self): + cmd = "vppctl show int {intf}" + result = {} + for interface in self.vnfd_helper.interfaces: + iface_name = self.get_value_by_interface_key( + interface["virtual-interface"]["ifname"], "vpp_name") + command = cmd.format(intf=iface_name) + _, stdout, _ = self.ssh_helper.execute(command) + result.update( + self.parser_vpp_stats(interface["virtual-interface"]["ifname"], + iface_name, stdout)) + self.ssh_helper.execute("vppctl clear interfaces") + return result + + @staticmethod + def parser_vpp_stats(interface, iface_name, stats): + packets_in = 0 + packets_fwd = 0 + packets_dropped = 0 + result = {} + + entries = re.split(r"\n+", stats) + tmp = [re.split(r"\s\s+", entry, 5) for entry in entries] + + for item in tmp: + if isinstance(item, list): + if item[0] == iface_name and len(item) >= 5: + if item[3] == 'rx packets': + packets_in = int(item[4]) + elif item[4] == 'rx packets': + packets_in = int(item[5]) + elif len(item) == 3: + if item[1] == 'tx packets': + packets_fwd = int(item[2]) + elif item[1] == 'drops' or item[1] == 'rx-miss': + packets_dropped = int(item[2]) + if packets_dropped == 0 and packets_in > 0 and packets_fwd > 0: + packets_dropped = abs(packets_fwd - packets_in) + + result[interface] = { + 'packets_in': packets_in, + 'packets_fwd': packets_fwd, + 'packets_dropped': packets_dropped, + } + + return result + + def create_ipsec_tunnels(self): + self.initialize_ipsec() + + # TODO generate the same key + crypto_algorithms = self._get_crypto_algorithms() + if crypto_algorithms == 'aes-gcm': + encr_alg = CryptoAlg.AES_GCM_128 + auth_alg = IntegAlg.AES_GCM_128 + encr_key = 'LNYZXMBQDKESNLREHJMS' + auth_key = 'SWGLDTYZSQKVBZZMPIEV' + elif crypto_algorithms == 'cbc-sha1': + encr_alg = CryptoAlg.AES_CBC_128 + auth_alg = IntegAlg.SHA1_96 + encr_key = 'IFEMSHYLCZIYFUTT' + auth_key = 'PEALEIPSCPTRHYJSDXLY' + + self.execute_script("enable_dpdk_traces.vat", json_out=False) + self.execute_script("enable_vhost_user_traces.vat", json_out=False) + self.execute_script("enable_memif_traces.vat", json_out=False) + + node_name = self.find_encrypted_data_interface()["node_name"] + n_tunnels = self._get_n_tunnels() + n_connections = self._get_n_connections() + flow_dst_start_ip = self._get_flow_dst_start_ip() + if node_name == "vnf__0": + self.vpp_create_ipsec_tunnels( + self.find_encrypted_data_interface()["local_ip"], + self.find_encrypted_data_interface()["peer_intf"]["local_ip"], + self.find_encrypted_data_interface()["ifname"], + n_tunnels, n_connections, encr_alg, encr_key, auth_alg, + auth_key, flow_dst_start_ip) + elif node_name == "vnf__1": + self.vpp_create_ipsec_tunnels( + self.find_encrypted_data_interface()["local_ip"], + self.find_encrypted_data_interface()["peer_intf"]["local_ip"], + self.find_encrypted_data_interface()["ifname"], + n_tunnels, n_connections, encr_alg, encr_key, auth_alg, + auth_key, flow_dst_start_ip, 20000, 10000) + + def find_raw_data_interface(self): + try: + return self.vnfd_helper.find_virtual_interface(vld_id="uplink_0") + except KeyError: + return self.vnfd_helper.find_virtual_interface(vld_id="downlink_0") + + def find_encrypted_data_interface(self): + return self.vnfd_helper.find_virtual_interface(vld_id="ciphertext") + + def create_startup_configuration_of_vpp(self): + vpp_config_generator = VppConfigGenerator() + vpp_config_generator.add_unix_log() + vpp_config_generator.add_unix_cli_listen() + vpp_config_generator.add_unix_nodaemon() + vpp_config_generator.add_unix_coredump() + vpp_config_generator.add_dpdk_socketmem('1024,1024') + vpp_config_generator.add_dpdk_no_tx_checksum_offload() + vpp_config_generator.add_dpdk_log_level('debug') + for interface in self.vnfd_helper.interfaces: + vpp_config_generator.add_dpdk_uio_driver( + interface["virtual-interface"]["driver"]) + vpp_config_generator.add_heapsize('4G') + # TODO Enable configuration depend on VPP version + vpp_config_generator.add_statseg_size('4G') + vpp_config_generator.add_plugin('disable', ['default']) + vpp_config_generator.add_plugin('enable', ['dpdk_plugin.so']) + vpp_config_generator.add_ip6_hash_buckets('2000000') + vpp_config_generator.add_ip6_heap_size('4G') + vpp_config_generator.add_ip_heap_size('4G') + return vpp_config_generator + + def add_worker_threads_and_rxqueues(self, vpp_cfg, phy_cores, + rx_queues=None): + thr_count_int = phy_cores + cpu_count_int = phy_cores + num_mbufs_int = 32768 + + numa_list = [] + + if_list = [self.find_encrypted_data_interface()["ifname"], + self.find_raw_data_interface()["ifname"]] + for if_key in if_list: + try: + numa_list.append( + self.get_value_by_interface_key(if_key, 'numa_node')) + except KeyError: + pass + numa_cnt_mc = Counter(numa_list).most_common() + + if numa_cnt_mc and numa_cnt_mc[0][0] is not None and \ + numa_cnt_mc[0][0] != -1: + numa = numa_cnt_mc[0][0] + elif len(numa_cnt_mc) > 1 and numa_cnt_mc[0][0] == -1: + numa = numa_cnt_mc[1][0] + else: + numa = 0 + + try: + smt_used = self.sys_cores.is_smt_enabled() + except KeyError: + smt_used = False + + cpu_main = self.sys_cores.cpu_list_per_node_str(numa, skip_cnt=1, + cpu_cnt=1) + cpu_wt = self.sys_cores.cpu_list_per_node_str(numa, skip_cnt=2, + cpu_cnt=cpu_count_int, + smt_used=smt_used) + + if smt_used: + thr_count_int = 2 * cpu_count_int + + if rx_queues is None: + rxq_count_int = int(thr_count_int / 2) + else: + rxq_count_int = rx_queues + + if rxq_count_int == 0: + rxq_count_int = 1 + + num_mbufs_int = num_mbufs_int * rxq_count_int + + vpp_cfg.add_cpu_main_core(cpu_main) + vpp_cfg.add_cpu_corelist_workers(cpu_wt) + vpp_cfg.add_dpdk_dev_default_rxq(rxq_count_int) + vpp_cfg.add_dpdk_num_mbufs(num_mbufs_int) + + def add_pci_devices(self, vpp_cfg): + pci_devs = [self.find_encrypted_data_interface()["vpci"], + self.find_raw_data_interface()["vpci"]] + vpp_cfg.add_dpdk_dev(*pci_devs) + + def add_dpdk_cryptodev(self, vpp_cfg, sw_pmd_type, count): + crypto_type = self._get_crypto_type() + smt_used = self.sys_cores.is_smt_enabled() + cryptodev = self.find_encrypted_data_interface()["vpci"] + socket_id = self.get_value_by_interface_key( + self.find_encrypted_data_interface()["ifname"], "numa_node") + + if smt_used: + thr_count_int = count * 2 + if crypto_type == 'HW_cryptodev': + vpp_cfg.add_dpdk_cryptodev(thr_count_int, cryptodev) + else: + vpp_cfg.add_dpdk_sw_cryptodev(sw_pmd_type, socket_id, + thr_count_int) + else: + thr_count_int = count + if crypto_type == 'HW_cryptodev': + vpp_cfg.add_dpdk_cryptodev(thr_count_int, cryptodev) + else: + vpp_cfg.add_dpdk_sw_cryptodev(sw_pmd_type, socket_id, + thr_count_int) + + def initialize_ipsec(self): + flow_src_start_ip = self._get_flow_src_start_ip() + + self.set_interface_state( + self.find_encrypted_data_interface()["ifname"], 'up') + self.set_interface_state(self.find_raw_data_interface()["ifname"], + 'up') + self.vpp_interfaces_ready_wait() + self.vpp_set_interface_mtu( + self.find_encrypted_data_interface()["ifname"]) + self.vpp_set_interface_mtu(self.find_raw_data_interface()["ifname"]) + self.vpp_interfaces_ready_wait() + + self.set_ip(self.find_encrypted_data_interface()["ifname"], + self.find_encrypted_data_interface()["local_ip"], 24) + self.set_ip(self.find_raw_data_interface()["ifname"], + self.find_raw_data_interface()["local_ip"], + 24) + + self.add_arp_on_dut(self.find_encrypted_data_interface()["ifname"], + self.find_encrypted_data_interface()["peer_intf"][ + "local_ip"], + self.find_encrypted_data_interface()["peer_intf"][ + "local_mac"]) + self.add_arp_on_dut(self.find_raw_data_interface()["ifname"], + self.find_raw_data_interface()["peer_intf"][ + "local_ip"], + self.find_raw_data_interface()["peer_intf"][ + "local_mac"]) + + self.vpp_route_add(flow_src_start_ip, 8, + self.find_raw_data_interface()["peer_intf"][ + "local_ip"], + self.find_raw_data_interface()["ifname"]) + + +class VipsecApproxVnf(SampleVNF): + """ This class handles vIPSEC VNF model-driver definitions """ + + APP_NAME = 'vIPSEC' + APP_WORD = 'vipsec' + WAIT_TIME = 20 + + def __init__(self, name, vnfd, setup_env_helper_type=None, + resource_helper_type=None): + if setup_env_helper_type is None: + setup_env_helper_type = VipsecApproxSetupEnvHelper + super(VipsecApproxVnf, self).__init__( + name, vnfd, setup_env_helper_type, + resource_helper_type) + + def _run(self): + # we can't share ssh paramiko objects to force new connection + self.ssh_helper.drop_connection() + # kill before starting + self.setup_helper.kill_vnf() + self._build_config() + self.setup_helper.create_ipsec_tunnels() + + def wait_for_instantiate(self): + time.sleep(self.WAIT_TIME) + while True: + status = self.setup_helper.check_status() + if not self._vnf_process.is_alive() and not status: + raise RuntimeError("%s VNF process died." % self.APP_NAME) + LOG.info("Waiting for %s VNF to start.. ", self.APP_NAME) + time.sleep(self.WAIT_TIME_FOR_SCRIPT) + status = self.setup_helper.check_status() + if status: + LOG.info("%s VNF is up and running.", self.APP_NAME) + self._vnf_up_post() + return self._vnf_process.exitcode + + def terminate(self): + self.setup_helper.kill_vnf() + self._tear_down() + self.resource_helper.stop_collect() + if self._vnf_process is not None: + # be proper and join first before we kill + LOG.debug("joining before terminate %s", self._vnf_process.name) + self._vnf_process.join(constants.PROCESS_JOIN_TIMEOUT) + self._vnf_process.terminate() + + def collect_kpi(self): + # we can't get KPIs if the VNF is down + check_if_process_failed(self._vnf_process, 0.01) + physical_node = Context.get_physical_node_from_server( + self.scenario_helper.nodes[self.name]) + result = {"physical_node": physical_node} + result["collect_stats"] = self.setup_helper.get_vpp_statistics() + LOG.debug("%s collect KPIs %s", self.APP_NAME, result) + return result diff --git a/yardstick/network_services/vnf_generic/vnf/prox_helpers.py b/yardstick/network_services/vnf_generic/vnf/prox_helpers.py index 3241719e8..3507315f2 100644 --- a/yardstick/network_services/vnf_generic/vnf/prox_helpers.py +++ b/yardstick/network_services/vnf_generic/vnf/prox_helpers.py @@ -1,4 +1,4 @@ -# Copyright (c) 2017 Intel Corporation +# Copyright (c) 2018-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -21,6 +21,7 @@ import re import select import socket import time + from collections import OrderedDict, namedtuple from contextlib import contextmanager from itertools import repeat, chain @@ -325,7 +326,28 @@ class ProxSocketHelper(object): return ret_str, False - def get_data(self, pkt_dump_only=False, timeout=0.01): + def get_string(self, pkt_dump_only=False, timeout=0.01): + + def is_ready_string(): + # recv() is blocking, so avoid calling it when no data is waiting. + ready = select.select([self._sock], [], [], timeout) + return bool(ready[0]) + + status = False + ret_str = "" + while status is False: + for status in iter(is_ready_string, False): + decoded_data = self._sock.recv(256).decode('utf-8') + ret_str, done = self._parse_socket_data(decoded_data, + pkt_dump_only) + if (done): + status = True + break + + LOG.debug("Received data from socket: [%s]", ret_str) + return status, ret_str + + def get_data(self, pkt_dump_only=False, timeout=10.0): """ read data from the socket """ # This method behaves slightly differently depending on whether it is @@ -394,7 +416,6 @@ class ProxSocketHelper(object): """ stop all cores on the remote instance """ LOG.debug("Stop all") self.put_command("stop all\n") - time.sleep(3) def stop(self, cores, task=''): """ stop specific cores on the remote instance """ @@ -406,7 +427,6 @@ class ProxSocketHelper(object): LOG.debug("Stopping cores %s", tmpcores) self.put_command("stop {} {}\n".format(join_non_strings(',', tmpcores), task)) - time.sleep(3) def start_all(self): """ start all cores on the remote instance """ @@ -423,13 +443,11 @@ class ProxSocketHelper(object): LOG.debug("Starting cores %s", tmpcores) self.put_command("start {}\n".format(join_non_strings(',', tmpcores))) - time.sleep(3) def reset_stats(self): """ reset the statistics on the remote instance """ LOG.debug("Reset stats") self.put_command("reset stats\n") - time.sleep(1) def _run_template_over_cores(self, template, cores, *args): for core in cores: @@ -440,7 +458,6 @@ class ProxSocketHelper(object): LOG.debug("Set packet size for core(s) %s to %d", cores, pkt_size) pkt_size -= 4 self._run_template_over_cores("pkt_size {} 0 {}\n", cores, pkt_size) - time.sleep(1) def set_value(self, cores, offset, value, length): """ set value on the remote instance """ @@ -544,50 +561,173 @@ class ProxSocketHelper(object): tsc = int(ret[3]) return rx, tx, drop, tsc - def multi_port_stats(self, ports): - """get counter values from all ports port""" + def irq_core_stats(self, cores_tasks): + """ get IRQ stats per core""" + + stat = {} + core = 0 + task = 0 + for core, task in cores_tasks: + self.put_command("stats task.core({}).task({}).max_irq,task.core({}).task({}).irq(0)," + "task.core({}).task({}).irq(1),task.core({}).task({}).irq(2)," + "task.core({}).task({}).irq(3),task.core({}).task({}).irq(4)," + "task.core({}).task({}).irq(5),task.core({}).task({}).irq(6)," + "task.core({}).task({}).irq(7),task.core({}).task({}).irq(8)," + "task.core({}).task({}).irq(9),task.core({}).task({}).irq(10)," + "task.core({}).task({}).irq(11),task.core({}).task({}).irq(12)" + "\n".format(core, task, core, task, core, task, core, task, + core, task, core, task, core, task, core, task, + core, task, core, task, core, task, core, task, + core, task, core, task)) + in_data_str = self.get_data().split(",") + ret = [try_int(s, 0) for s in in_data_str] + key = "core_" + str(core) + try: + stat[key] = {"cpu": core, "max_irq": ret[0], "bucket_0" : ret[1], + "bucket_1" : ret[2], "bucket_2" : ret[3], + "bucket_3" : ret[4], "bucket_4" : ret[5], + "bucket_5" : ret[6], "bucket_6" : ret[7], + "bucket_7" : ret[8], "bucket_8" : ret[9], + "bucket_9" : ret[10], "bucket_10" : ret[11], + "bucket_11" : ret[12], "bucket_12" : ret[13], + "overflow": ret[10] + ret[11] + ret[12] + ret[13]} + except (KeyError, IndexError): + LOG.error("Corrupted PACKET %s", in_data_str) + + return stat - ports_str = "" - for port in ports: - ports_str = ports_str + str(port) + "," - ports_str = ports_str[:-1] + def multi_port_stats(self, ports): + """get counter values from all ports at once""" + ports_str = ",".join(map(str, ports)) ports_all_data = [] tot_result = [0] * len(ports) - retry_counter = 0 port_index = 0 - while (len(ports) is not len(ports_all_data)) and (retry_counter < 10): + while (len(ports) is not len(ports_all_data)): self.put_command("multi port stats {}\n".format(ports_str)) - ports_all_data = self.get_data().split(";") + status, ports_all_data_str = self.get_string() + + if not status: + return False, [] + + ports_all_data = ports_all_data_str.split(";") if len(ports) is len(ports_all_data): for port_data_str in ports_all_data: + tmpdata = [] try: - tot_result[port_index] = [try_int(s, 0) for s in port_data_str.split(",")] + tmpdata = [try_int(s, 0) for s in port_data_str.split(",")] except (IndexError, TypeError): - LOG.error("Port Index error %d %s - retrying ", port_index, port_data_str) - - if (len(tot_result[port_index]) is not 6) or \ - tot_result[port_index][0] is not ports[port_index]: - ports_all_data = [] - tot_result = [0] * len(ports) - port_index = 0 - time.sleep(0.1) + LOG.error("Unpacking data error %s", port_data_str) + return False, [] + + if (len(tmpdata) < 6) or tmpdata[0] not in ports: LOG.error("Corrupted PACKET %s - retrying", port_data_str) - break + return False, [] else: + tot_result[port_index] = tmpdata port_index = port_index + 1 else: LOG.error("Empty / too much data - retry -%s-", ports_all_data) - ports_all_data = [] - tot_result = [0] * len(ports) - port_index = 0 - time.sleep(0.1) + return False, [] - retry_counter = retry_counter + 1 - return tot_result + LOG.debug("Multi port packet ..OK.. %s", tot_result) + return True, tot_result + + @staticmethod + def multi_port_stats_tuple(stats, ports): + """ + Create a statistics tuple from port stats. + + Returns a dict with contains the port stats indexed by port name + + :param stats: (List) - List of List of port stats in pps + :param ports (Iterator) - to List of Ports + + :return: (Dict) of port stats indexed by port_name + """ + + samples = {} + port_names = {} + try: + port_names = {port_num: port_name for port_name, port_num in ports} + except (TypeError, IndexError, KeyError): + LOG.critical("Ports are not initialized or number of port is ZERO ... CRITICAL ERROR") + return {} + + try: + for stat in stats: + port_num = stat[0] + samples[port_names[port_num]] = { + "in_packets": stat[1], + "out_packets": stat[2]} + except (TypeError, IndexError, KeyError): + LOG.error("Ports data and samples data is incompatable ....") + return {} + + return samples + + @staticmethod + def multi_port_stats_diff(prev_stats, new_stats, hz): + """ + Create a statistics tuple from difference between prev port stats + and current port stats. And store results in pps. + + :param prev_stats: (List) - Previous List of port statistics + :param new_stats: (List) - Current List of port statistics + :param hz (float) - speed of system in Hz + + :return: sample (List) - Difference of prev_port_stats and + new_port_stats in pps + """ + + RX_TOTAL_INDEX = 1 + TX_TOTAL_INDEX = 2 + TSC_INDEX = 5 + + stats = [] + + if len(prev_stats) is not len(new_stats): + for port_index, stat in enumerate(new_stats): + stats.append([port_index, float(0), float(0), 0, 0, 0]) + return stats + + try: + for port_index, stat in enumerate(new_stats): + if stat[RX_TOTAL_INDEX] > prev_stats[port_index][RX_TOTAL_INDEX]: + rx_total = stat[RX_TOTAL_INDEX] - \ + prev_stats[port_index][RX_TOTAL_INDEX] + else: + rx_total = stat[RX_TOTAL_INDEX] + + if stat[TX_TOTAL_INDEX] > prev_stats[port_index][TX_TOTAL_INDEX]: + tx_total = stat[TX_TOTAL_INDEX] - prev_stats[port_index][TX_TOTAL_INDEX] + else: + tx_total = stat[TX_TOTAL_INDEX] + + if stat[TSC_INDEX] > prev_stats[port_index][TSC_INDEX]: + tsc = stat[TSC_INDEX] - prev_stats[port_index][TSC_INDEX] + else: + tsc = stat[TSC_INDEX] + + if tsc is 0: + rx_total = tx_total = float(0) + else: + if hz is 0: + LOG.error("HZ is ZERO ..") + rx_total = tx_total = float(0) + else: + rx_total = float(rx_total * hz / tsc) + tx_total = float(tx_total * hz / tsc) + + stats.append([port_index, rx_total, tx_total, 0, 0, tsc]) + except (TypeError, IndexError, KeyError): + stats = [] + LOG.info("Current Port Stats incompatable to previous Port stats .. Discarded") + + return stats def port_stats(self, ports): """get counter values from a specific port""" @@ -649,7 +789,6 @@ class ProxSocketHelper(object): self.put_command("quit_force\n") time.sleep(3) - _LOCAL_OBJECT = object() @@ -731,6 +870,30 @@ class ProxDpdkVnfSetupEnvHelper(DpdkVnfSetupEnvHelper): file_str[1] = self.additional_files[base_name] return '"'.join(file_str) + def _make_core_list(self, inputStr): + + my_input = inputStr.split("core ", 1)[1] + ok_list = set() + + substrs = [x.strip() for x in my_input.split(',')] + for i in substrs: + try: + ok_list.add(int(i)) + + except ValueError: + try: + substr = [int(k.strip()) for k in i.split('-')] + if len(substr) > 1: + startstr = substr[0] + endstr = substr[len(substr) - 1] + for z in range(startstr, endstr + 1): + ok_list.add(z) + except ValueError: + LOG.error("Error in cores list ... resuming ") + return ok_list + + return ok_list + def generate_prox_config_file(self, config_path): sections = [] prox_config = ConfigParser(config_path, sections) @@ -750,6 +913,18 @@ class ProxDpdkVnfSetupEnvHelper(DpdkVnfSetupEnvHelper): if section_data[0] == "mac": section_data[1] = "hardware" + # adjust for range of cores + new_sections = [] + for section_name, section in sections: + if section_name.startswith('core') and section_name.find('$') == -1: + core_list = self._make_core_list(section_name) + for core in core_list: + new_sections.append(["core " + str(core), section]) + else: + new_sections.append([section_name, section]) + + sections = new_sections + # search for dst mac for _, section in sections: for section_data in section: @@ -956,6 +1131,8 @@ class ProxResourceHelper(ClientResourceHelper): self.step_delta = 1 self.step_time = 0.5 self._test_type = None + self.prev_multi_port = [] + self.prev_hz = 0 @property def sut(self): @@ -969,7 +1146,7 @@ class ProxResourceHelper(ClientResourceHelper): self._test_type = self.setup_helper.find_in_section('global', 'name', None) return self._test_type - def run_traffic(self, traffic_profile, *args): + def run_traffic(self, traffic_profile): self._queue.cancel_join_thread() self.lower = 0.0 self.upper = 100.0 @@ -984,7 +1161,7 @@ class ProxResourceHelper(ClientResourceHelper): def _run_traffic_once(self, traffic_profile): traffic_profile.execute_traffic(self) - if traffic_profile.done: + if traffic_profile.done.is_set(): self._queue.put({'done': True}) LOG.debug("tg_prox done") self._terminated.value = 1 @@ -994,11 +1171,40 @@ class ProxResourceHelper(ClientResourceHelper): def collect_collectd_kpi(self): return self._collect_resource_kpi() + def collect_live_stats(self): + ports = [] + for _, port_num in self.vnfd_helper.ports_iter(): + ports.append(port_num) + + ok, curr_port_stats = self.sut.multi_port_stats(ports) + if not ok: + return False, {} + + hz = self.sut.hz() + if hz is 0: + hz = self.prev_hz + else: + self.prev_hz = hz + + new_all_port_stats = \ + self.sut.multi_port_stats_diff(self.prev_multi_port, curr_port_stats, hz) + + self.prev_multi_port = curr_port_stats + + live_stats = self.sut.multi_port_stats_tuple(new_all_port_stats, + self.vnfd_helper.ports_iter()) + return True, live_stats + def collect_kpi(self): result = super(ProxResourceHelper, self).collect_kpi() # add in collectd kpis manually if result: result['collect_stats'] = self._collect_resource_kpi() + + ok, live_stats = self.collect_live_stats() + if ok: + result.update({'live_stats': live_stats}) + return result def terminate(self): @@ -1070,41 +1276,70 @@ class ProxDataHelper(object): def totals_and_pps(self): if self._totals_and_pps is None: rx_total = tx_total = 0 - all_ports = self.sut.multi_port_stats(range(self.port_count)) - for port in all_ports: - rx_total = rx_total + port[1] - tx_total = tx_total + port[2] - requested_pps = self.value / 100.0 * self.line_rate_to_pps() - self._totals_and_pps = rx_total, tx_total, requested_pps + ok = False + timeout = time.time() + constants.RETRY_TIMEOUT + while not ok: + ok, all_ports = self.sut.multi_port_stats([ + self.vnfd_helper.port_num(port_name) + for port_name in self.vnfd_helper.port_pairs.all_ports]) + if time.time() > timeout: + break + if ok: + for port in all_ports: + rx_total = rx_total + port[1] + tx_total = tx_total + port[2] + requested_pps = self.value / 100.0 * self.line_rate_to_pps() + self._totals_and_pps = rx_total, tx_total, requested_pps return self._totals_and_pps @property def rx_total(self): - return self.totals_and_pps[0] + try: + ret_val = self.totals_and_pps[0] + except (AttributeError, ValueError, TypeError, LookupError): + ret_val = 0 + return ret_val @property def tx_total(self): - return self.totals_and_pps[1] + try: + ret_val = self.totals_and_pps[1] + except (AttributeError, ValueError, TypeError, LookupError): + ret_val = 0 + return ret_val @property def requested_pps(self): - return self.totals_and_pps[2] + try: + ret_val = self.totals_and_pps[2] + except (AttributeError, ValueError, TypeError, LookupError): + ret_val = 0 + return ret_val @property def samples(self): samples = {} ports = [] - port_names = [] + port_names = {} for port_name, port_num in self.vnfd_helper.ports_iter(): ports.append(port_num) - port_names.append(port_name) - - results = self.sut.multi_port_stats(ports) - for result in results: - port_num = result[0] - samples[port_names[port_num]] = { - "in_packets": result[1], - "out_packets": result[2]} + port_names[port_num] = port_name + + ok = False + timeout = time.time() + constants.RETRY_TIMEOUT + while not ok: + ok, results = self.sut.multi_port_stats(ports) + if time.time() > timeout: + break + if ok: + for result in results: + port_num = result[0] + try: + samples[port_names[port_num]] = { + "in_packets": result[1], + "out_packets": result[2]} + except (IndexError, KeyError): + pass return samples def __enter__(self): @@ -1902,3 +2137,15 @@ class ProxlwAFTRProfileHelper(ProxProfileHelper): data_helper.latency = self.get_latency() return data_helper.result_tuple, data_helper.samples + + +class ProxIrqProfileHelper(ProxProfileHelper): + + __prox_profile_type__ = "IRQ Query" + + def __init__(self, resource_helper): + super(ProxIrqProfileHelper, self).__init__(resource_helper) + self._cores_tuple = None + self._ports_tuple = None + self.step_delta = 5 + self.step_time = 0.5 diff --git a/yardstick/network_services/vnf_generic/vnf/prox_irq.py b/yardstick/network_services/vnf_generic/vnf/prox_irq.py new file mode 100644 index 000000000..614066e46 --- /dev/null +++ b/yardstick/network_services/vnf_generic/vnf/prox_irq.py @@ -0,0 +1,200 @@ +# Copyright (c) 2018-2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import errno +import logging +import copy +import time + +from yardstick.common.process import check_if_process_failed +from yardstick.network_services.utils import get_nsb_option +from yardstick.network_services.vnf_generic.vnf.prox_vnf import ProxApproxVnf +from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNFTrafficGen +from yardstick.benchmark.contexts.base import Context +from yardstick.network_services.vnf_generic.vnf.prox_helpers import CoreSocketTuple +LOG = logging.getLogger(__name__) + + +class ProxIrq(SampleVNFTrafficGen): + + def __init__(self, name, vnfd, setup_env_helper_type=None, + resource_helper_type=None): + vnfd_cpy = copy.deepcopy(vnfd) + super(ProxIrq, self).__init__(name, vnfd_cpy) + + self._vnf_wrapper = ProxApproxVnf( + name, vnfd, setup_env_helper_type, resource_helper_type) + self.bin_path = get_nsb_option('bin_path', '') + self.name = self._vnf_wrapper.name + self.ssh_helper = self._vnf_wrapper.ssh_helper + self.setup_helper = self._vnf_wrapper.setup_helper + self.resource_helper = self._vnf_wrapper.resource_helper + self.scenario_helper = self._vnf_wrapper.scenario_helper + self.irq_cores = None + + def terminate(self): + self._vnf_wrapper.terminate() + super(ProxIrq, self).terminate() + + def instantiate(self, scenario_cfg, context_cfg): + self._vnf_wrapper.instantiate(scenario_cfg, context_cfg) + self._tg_process = self._vnf_wrapper._vnf_process + + def wait_for_instantiate(self): + self._vnf_wrapper.wait_for_instantiate() + + def get_irq_cores(self): + cores = [] + mode = "irq" + + for section_name, section in self.setup_helper.prox_config_data: + if not section_name.startswith("core"): + continue + irq_mode = task_present = False + task_present_task = 0 + for key, value in section: + if key == "mode" and value == mode: + irq_mode = True + if key == "task": + task_present = True + task_present_task = int(value) + + if irq_mode: + if not task_present: + task_present_task = 0 + core_tuple = CoreSocketTuple(section_name) + core = core_tuple.core_id + cores.append((core, task_present_task)) + + return cores + +class ProxIrqVNF(ProxIrq, SampleVNFTrafficGen): + + APP_NAME = 'ProxIrqVNF' + + def __init__(self, name, vnfd, setup_env_helper_type=None, + resource_helper_type=None): + ProxIrq.__init__(self, name, vnfd, setup_env_helper_type, + resource_helper_type) + + self.start_test_time = None + self.end_test_time = None + + def vnf_execute(self, cmd, *args, **kwargs): + ignore_errors = kwargs.pop("_ignore_errors", False) + try: + return self.resource_helper.execute(cmd, *args, **kwargs) + except OSError as e: + if e.errno in {errno.EPIPE, errno.ESHUTDOWN, errno.ECONNRESET}: + if ignore_errors: + LOG.debug("ignoring vnf_execute exception %s for command %s", e, cmd) + else: + raise + else: + raise + + def collect_kpi(self): + # check if the tg processes have exited + physical_node = Context.get_physical_node_from_server( + self.scenario_helper.nodes[self.name]) + + result = {"physical_node": physical_node} + for proc in (self._tg_process, self._traffic_process): + check_if_process_failed(proc) + + if self.resource_helper is None: + return result + + if self.irq_cores is None: + self.setup_helper.build_config_file() + self.irq_cores = self.get_irq_cores() + + data = self.vnf_execute('irq_core_stats', self.irq_cores) + new_data = copy.deepcopy(data) + + self.end_test_time = time.time() + self.vnf_execute('reset_stats') + + if self.start_test_time is None: + new_data = {} + else: + test_time = self.end_test_time - self.start_test_time + for index, item in data.items(): + for counter, value in item.items(): + if counter.startswith("bucket_")or \ + counter.startswith("overflow"): + if value is 0: + del new_data[index][counter] + else: + new_data[index][counter] = float(value) / test_time + + self.start_test_time = time.time() + + result["collect_stats"] = new_data + LOG.debug("%s collect KPIs %s", self.APP_NAME, result) + + return result + +class ProxIrqGen(ProxIrq, SampleVNFTrafficGen): + + APP_NAME = 'ProxIrqGen' + + def __init__(self, name, vnfd, setup_env_helper_type=None, + resource_helper_type=None): + ProxIrq.__init__(self, name, vnfd, setup_env_helper_type, + resource_helper_type) + self.start_test_time = None + self.end_test_time = None + + def collect_kpi(self): + # check if the tg processes have exited + physical_node = Context.get_physical_node_from_server( + self.scenario_helper.nodes[self.name]) + + result = {"physical_node": physical_node} + for proc in (self._tg_process, self._traffic_process): + check_if_process_failed(proc) + + if self.resource_helper is None: + return result + + if self.irq_cores is None: + self.setup_helper.build_config_file() + self.irq_cores = self.get_irq_cores() + + data = self.resource_helper.sut.irq_core_stats(self.irq_cores) + new_data = copy.deepcopy(data) + + self.end_test_time = time.time() + self.resource_helper.sut.reset_stats() + + if self.start_test_time is None: + new_data = {} + else: + test_time = self.end_test_time - self.start_test_time + for index, item in data.items(): + for counter, value in item.items(): + if counter.startswith("bucket_") or \ + counter.startswith("overflow"): + if value is 0: + del new_data[index][counter] + else: + new_data[index][counter] = float(value) / test_time + + self.start_test_time = time.time() + + result["collect_stats"] = new_data + LOG.debug("%s collect KPIs %s", self.APP_NAME, result) + + return result diff --git a/yardstick/network_services/vnf_generic/vnf/prox_vnf.py b/yardstick/network_services/vnf_generic/vnf/prox_vnf.py index 839f30967..c9abc757e 100644 --- a/yardstick/network_services/vnf_generic/vnf/prox_vnf.py +++ b/yardstick/network_services/vnf_generic/vnf/prox_vnf.py @@ -1,4 +1,4 @@ -# Copyright (c) 2017 Intel Corporation +# Copyright (c) 2018-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,6 +15,7 @@ import errno import logging import datetime +import time from yardstick.common.process import check_if_process_failed from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxDpdkVnfSetupEnvHelper @@ -34,8 +35,7 @@ class ProxApproxVnf(SampleVNF): VNF_PROMPT = "PROX started" LUA_PARAMETER_NAME = "sut" - def __init__(self, name, vnfd, task_id, setup_env_helper_type=None, - resource_helper_type=None): + def __init__(self, name, vnfd, setup_env_helper_type=None, resource_helper_type=None): if setup_env_helper_type is None: setup_env_helper_type = ProxDpdkVnfSetupEnvHelper @@ -46,8 +46,8 @@ class ProxApproxVnf(SampleVNF): self.prev_packets_sent = 0 self.prev_tsc = 0 self.tsc_hz = 0 - super(ProxApproxVnf, self).__init__( - name, vnfd, task_id, setup_env_helper_type, resource_helper_type) + super(ProxApproxVnf, self).__init__(name, vnfd, setup_env_helper_type, + resource_helper_type) def _vnf_up_post(self): self.resource_helper.up_post() @@ -81,6 +81,8 @@ class ProxApproxVnf(SampleVNF): "packets_in": 0, "packets_dropped": 0, "packets_fwd": 0, + "curr_packets_in": 0, + "curr_packets_fwd": 0, "collect_stats": {"core": {}}, }) return result @@ -97,15 +99,26 @@ class ProxApproxVnf(SampleVNF): raise RuntimeError("Failed ..Invalid no of ports .. " "1, 2 or 4 ports only supported at this time") - all_port_stats = self.vnf_execute('multi_port_stats', range(port_count)) - rx_total = tx_total = tsc = 0 - try: - for single_port_stats in all_port_stats: - rx_total = rx_total + single_port_stats[1] - tx_total = tx_total + single_port_stats[2] - tsc = tsc + single_port_stats[5] - except (TypeError, IndexError): - LOG.error("Invalid data ...") + tmpPorts = [self.vnfd_helper.port_num(port_name) + for port_name in self.vnfd_helper.port_pairs.all_ports] + ok = False + timeout = time.time() + constants.RETRY_TIMEOUT + while not ok: + ok, all_port_stats = self.vnf_execute('multi_port_stats', tmpPorts) + if time.time() > timeout: + break + + if ok: + rx_total = tx_total = tsc = 0 + try: + for single_port_stats in all_port_stats: + rx_total = rx_total + single_port_stats[1] + tx_total = tx_total + single_port_stats[2] + tsc = tsc + single_port_stats[5] + except (TypeError, IndexError): + LOG.error("Invalid data ...") + return {} + else: return {} tsc = tsc / port_count diff --git a/yardstick/network_services/vnf_generic/vnf/router_vnf.py b/yardstick/network_services/vnf_generic/vnf/router_vnf.py index e99de9cb3..f1486bdb4 100644 --- a/yardstick/network_services/vnf_generic/vnf/router_vnf.py +++ b/yardstick/network_services/vnf_generic/vnf/router_vnf.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016-2017 Intel Corporation +# Copyright (c) 2016-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -34,8 +34,7 @@ class RouterVNF(SampleVNF): WAIT_TIME = 1 - def __init__(self, name, vnfd, task_id, setup_env_helper_type=None, - resource_helper_type=None): + def __init__(self, name, vnfd, setup_env_helper_type=None, resource_helper_type=None): if setup_env_helper_type is None: setup_env_helper_type = DpdkVnfSetupEnvHelper @@ -43,8 +42,7 @@ class RouterVNF(SampleVNF): vnfd['mgmt-interface'].pop("pkey", "") vnfd['mgmt-interface']['password'] = 'password' - super(RouterVNF, self).__init__( - name, vnfd, task_id, setup_env_helper_type, resource_helper_type) + super(RouterVNF, self).__init__(name, vnfd, setup_env_helper_type, resource_helper_type) def instantiate(self, scenario_cfg, context_cfg): self.scenario_helper.scenario_cfg = scenario_cfg diff --git a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py index a09f2a7a9..a369a3ae6 100644 --- a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py +++ b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016-2018 Intel Corporation +# Copyright (c) 2016-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,15 +13,14 @@ # limitations under the License. import logging -from multiprocessing import Queue, Value, Process +import decimal +from multiprocessing import Queue, Value, Process, JoinableQueue import os import posixpath import re -import uuid import subprocess import time -import six from trex_stl_lib.trex_stl_client import LoggerApi from trex_stl_lib.trex_stl_client import STLClient @@ -113,19 +112,6 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper): self.used_drivers = None self.dpdk_bind_helper = DpdkBindHelper(ssh_helper) - def _setup_hugepages(self): - meminfo = utils.read_meminfo(self.ssh_helper) - hp_size_kb = int(meminfo['Hugepagesize']) - hugepages_gb = self.scenario_helper.all_options.get('hugepages_gb', 16) - nr_hugepages = int(abs(hugepages_gb * 1024 * 1024 / hp_size_kb)) - self.ssh_helper.execute('echo %s | sudo tee %s' % - (nr_hugepages, self.NR_HUGEPAGES_PATH)) - hp = six.BytesIO() - self.ssh_helper.get_file_obj(self.NR_HUGEPAGES_PATH, hp) - nr_hugepages_set = int(hp.getvalue().decode('utf-8').splitlines()[0]) - LOG.info('Hugepages size (kB): %s, number claimed: %s, number set: %s', - hp_size_kb, nr_hugepages, nr_hugepages_set) - def build_config(self): vnf_cfg = self.scenario_helper.vnf_cfg task_path = self.scenario_helper.task_path @@ -193,7 +179,7 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper): """No actions/rules (flows) by default""" return None - def _build_pipeline_kwargs(self): + def _build_pipeline_kwargs(self, cfg_file=None, script=None): tool_path = self.ssh_helper.provision_tool(tool_file=self.APP_NAME) # count the number of actual ports in the list of pairs # remove duplicate ports @@ -213,8 +199,8 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper): hwlb = ' --hwlb %s' % worker_threads self.pipeline_kwargs = { - 'cfg_file': self.CFG_CONFIG, - 'script': self.CFG_SCRIPT, + 'cfg_file': cfg_file if cfg_file else self.CFG_CONFIG, + 'script': script if script else self.CFG_SCRIPT, 'port_mask_hex': ports_mask_hex, 'tool_path': tool_path, 'hwlb': hwlb, @@ -238,12 +224,16 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper): def _setup_dpdk(self): """Setup DPDK environment needed for VNF to run""" - self._setup_hugepages() + hugepages_gb = self.scenario_helper.all_options.get('hugepages_gb', 16) + utils.setup_hugepages(self.ssh_helper, hugepages_gb * 1024 * 1024) self.dpdk_bind_helper.load_dpdk_driver() exit_status = self.dpdk_bind_helper.check_dpdk_driver() if exit_status == 0: return + else: + LOG.critical("DPDK Driver not installed") + return def _setup_resources(self): # what is this magic? how do we know which socket is for which port? @@ -409,26 +399,26 @@ class ClientResourceHelper(ResourceHelper): time.sleep(self.QUEUE_WAIT_TIME) self._queue.put(samples) - def run_traffic(self, traffic_profile, mq_producer): + def run_traffic(self, traffic_profile): # if we don't do this we can hang waiting for the queue to drain # have to do this in the subprocess self._queue.cancel_join_thread() # fixme: fix passing correct trex config file, # instead of searching the default path - mq_producer.tg_method_started() try: self._build_ports() self.client = self._connect() + if self.client is None: + LOG.critical("Failure to Connect ... unable to continue") + return + self.client.reset(ports=self.all_ports) self.client.remove_all_streams(self.all_ports) # remove all streams traffic_profile.register_generator(self) - iteration_index = 0 while self._terminated.value == 0: - iteration_index += 1 if self._run_traffic_once(traffic_profile): self._terminated.value = 1 - mq_producer.tg_method_iteration(iteration_index) self.client.stop(self.all_ports) self.client.disconnect() @@ -439,8 +429,6 @@ class ClientResourceHelper(ResourceHelper): return # return if trex/tg server is stopped. raise - mq_producer.tg_method_finished() - def terminate(self): self._terminated.value = 1 # stop client @@ -473,22 +461,35 @@ class ClientResourceHelper(ResourceHelper): server=self.vnfd_helper.mgmt_interface["ip"], verbose_level=LoggerApi.VERBOSE_QUIET) - # try to connect with 5s intervals, 30s max + # try to connect with 5s intervals for idx in range(6): try: client.connect() - break + for idx2 in range(6): + if client.is_connected(): + return client + LOG.info("Waiting to confirm connection %s .. Attempt %s", + idx, idx2) + time.sleep(1) + client.disconnect(stop_traffic=True, release_ports=True) except STLError: LOG.info("Unable to connect to Trex Server.. Attempt %s", idx) time.sleep(5) - return client + if client.is_connected(): + return client + else: + LOG.critical("Connection failure ..TRex username: %s server: %s", + self.vnfd_helper.mgmt_interface["user"], + self.vnfd_helper.mgmt_interface["ip"]) + return None class Rfc2544ResourceHelper(object): DEFAULT_CORRELATED_TRAFFIC = False DEFAULT_LATENCY = False DEFAULT_TOLERANCE = '0.0001 - 0.0001' + DEFAULT_RESOLUTION = '0.1' def __init__(self, scenario_helper): super(Rfc2544ResourceHelper, self).__init__() @@ -499,6 +500,8 @@ class Rfc2544ResourceHelper(object): self._rfc2544 = None self._tolerance_low = None self._tolerance_high = None + self._tolerance_precision = None + self._resolution = None @property def rfc2544(self): @@ -519,6 +522,12 @@ class Rfc2544ResourceHelper(object): return self._tolerance_high @property + def tolerance_precision(self): + if self._tolerance_precision is None: + self.get_rfc_tolerance() + return self._tolerance_precision + + @property def correlated_traffic(self): if self._correlated_traffic is None: self._correlated_traffic = \ @@ -532,14 +541,25 @@ class Rfc2544ResourceHelper(object): self._latency = self.get_rfc2544('latency', self.DEFAULT_LATENCY) return self._latency + @property + def resolution(self): + if self._resolution is None: + self._resolution = float(self.get_rfc2544('resolution', + self.DEFAULT_RESOLUTION)) + return self._resolution + def get_rfc2544(self, name, default=None): return self.rfc2544.get(name, default) def get_rfc_tolerance(self): tolerance_str = self.get_rfc2544('allowed_drop_rate', self.DEFAULT_TOLERANCE) - tolerance_iter = iter(sorted(float(t.strip()) for t in tolerance_str.split('-'))) - self._tolerance_low = next(tolerance_iter) - self._tolerance_high = next(tolerance_iter, self.tolerance_low) + tolerance_iter = iter(sorted( + decimal.Decimal(t.strip()) for t in tolerance_str.split('-'))) + tolerance_low = next(tolerance_iter) + tolerance_high = next(tolerance_iter, tolerance_low) + self._tolerance_precision = abs(tolerance_high.as_tuple().exponent) + self._tolerance_high = float(tolerance_high) + self._tolerance_low = float(tolerance_low) class SampleVNFDeployHelper(object): @@ -620,7 +640,6 @@ class ScenarioHelper(object): test_timeout = self.options.get('timeout', constants.DEFAULT_VNF_TIMEOUT) return test_duration if test_duration > test_timeout else test_timeout - class SampleVNF(GenericVNF): """ Class providing file-like API for generic VNF implementation """ @@ -630,9 +649,8 @@ class SampleVNF(GenericVNF): APP_NAME = "SampleVNF" # we run the VNF interactively, so the ssh command will timeout after this long - def __init__(self, name, vnfd, task_id, setup_env_helper_type=None, - resource_helper_type=None): - super(SampleVNF, self).__init__(name, vnfd, task_id) + def __init__(self, name, vnfd, setup_env_helper_type=None, resource_helper_type=None): + super(SampleVNF, self).__init__(name, vnfd) self.bin_path = get_nsb_option('bin_path', '') self.scenario_helper = ScenarioHelper(self.name) @@ -701,8 +719,8 @@ class SampleVNF(GenericVNF): scenarios: - type: NSPerf nodes: - tg__0: trafficgen_1.yardstick - vnf__0: vnf.yardstick + tg__0: trafficgen_0.yardstick + vnf__0: vnf_0.yardstick options: collectd: <options> # COLLECTD priority 3 @@ -765,6 +783,53 @@ class SampleVNF(GenericVNF): # by other VNF output self.q_in.put('\r\n') + def wait_for_initialize(self): + buf = [] + vnf_prompt_found = False + prompt_command = '\r\n' + script_name = 'non_existent_script_name' + done_string = 'Cannot open file "{}"'.format(script_name) + time.sleep(self.WAIT_TIME) # Give some time for config to load + while True: + if not self._vnf_process.is_alive(): + raise RuntimeError("%s VNF process died." % self.APP_NAME) + while self.q_out.qsize() > 0: + buf.append(self.q_out.get()) + message = ''.join(buf) + + if self.VNF_PROMPT in message and not vnf_prompt_found: + # Once we got VNF promt, it doesn't mean that the VNF is + # up and running/initialized completely. But we can run + # addition (any) VNF command and wait for it to complete + # as it will be finished ONLY at the end of the VNF + # initialization. So, this approach can be used to + # indentify that VNF is completely initialized. + LOG.info("Got %s VNF prompt.", self.APP_NAME) + prompt_command = "run {}\r\n".format(script_name) + self.q_in.put(prompt_command) + # Cut the buffer since we are not interesting to find + # the VNF prompt anymore + prompt_pos = message.find(self.VNF_PROMPT) + buf = [message[prompt_pos + len(self.VNF_PROMPT):]] + vnf_prompt_found = True + continue + + if done_string in message: + LOG.info("%s VNF is up and running.", self.APP_NAME) + self._vnf_up_post() + self.queue_wrapper.clear() + return self._vnf_process.exitcode + + if "PANIC" in message: + raise RuntimeError("Error starting %s VNF." % + self.APP_NAME) + + LOG.info("Waiting for %s VNF to start.. ", self.APP_NAME) + time.sleep(self.WAIT_TIME_FOR_SCRIPT) + # Send command again to display the expected prompt in case the + # expected text was corrupted by other VNF output + self.q_in.put(prompt_command) + def start_collect(self): self.resource_helper.start_collect() @@ -863,9 +928,8 @@ class SampleVNFTrafficGen(GenericTrafficGen): APP_NAME = 'Sample' RUN_WAIT = 1 - def __init__(self, name, vnfd, task_id, setup_env_helper_type=None, - resource_helper_type=None): - super(SampleVNFTrafficGen, self).__init__(name, vnfd, task_id) + def __init__(self, name, vnfd, setup_env_helper_type=None, resource_helper_type=None): + super(SampleVNFTrafficGen, self).__init__(name, vnfd) self.bin_path = get_nsb_option('bin_path', '') self.scenario_helper = ScenarioHelper(self.name) @@ -887,6 +951,39 @@ class SampleVNFTrafficGen(GenericTrafficGen): self.traffic_finished = False self._tg_process = None self._traffic_process = None + self._tasks_queue = JoinableQueue() + self._result_queue = Queue() + + def _test_runner(self, traffic_profile, tasks, results): + self.resource_helper.run_test(traffic_profile, tasks, results) + + def _init_traffic_process(self, traffic_profile): + name = '{}-{}-{}-{}'.format(self.name, self.APP_NAME, + traffic_profile.__class__.__name__, + os.getpid()) + self._traffic_process = Process(name=name, target=self._test_runner, + args=( + traffic_profile, self._tasks_queue, + self._result_queue)) + + self._traffic_process.start() + while self.resource_helper.client_started.value == 0: + time.sleep(1) + if not self._traffic_process.is_alive(): + break + + def run_traffic_once(self, traffic_profile): + if self.resource_helper.client_started.value == 0: + self._init_traffic_process(traffic_profile) + + # continue test - run next iteration + LOG.info("Run next iteration ...") + self._tasks_queue.put('RUN_TRAFFIC') + + def wait_on_traffic(self): + self._tasks_queue.join() + result = self._result_queue.get() + return result def _start_server(self): # we can't share ssh paramiko objects to force new connection @@ -899,6 +996,8 @@ class SampleVNFTrafficGen(GenericTrafficGen): self.scenario_helper.nodes[self.name] ) + self.resource_helper.context_cfg = context_cfg + self.resource_helper.setup() # must generate_cfg after DPDK bind because we need port number self.resource_helper.generate_cfg() @@ -922,13 +1021,12 @@ class SampleVNFTrafficGen(GenericTrafficGen): LOG.info("%s TG Server is up and running.", self.APP_NAME) return self._tg_process.exitcode - def _traffic_runner(self, traffic_profile, mq_id): + def _traffic_runner(self, traffic_profile): # always drop connections first thing in new processes # so we don't get paramiko errors self.ssh_helper.drop_connection() LOG.info("Starting %s client...", self.APP_NAME) - self._mq_producer = self._setup_mq_producer(mq_id) - self.resource_helper.run_traffic(traffic_profile, self._mq_producer) + self.resource_helper.run_traffic(traffic_profile) def run_traffic(self, traffic_profile): """ Generate traffic on the wire according to the given params. @@ -938,12 +1036,10 @@ class SampleVNFTrafficGen(GenericTrafficGen): :param traffic_profile: :return: True/False """ - name = '{}-{}-{}-{}'.format(self.name, self.APP_NAME, - traffic_profile.__class__.__name__, + name = "{}-{}-{}-{}".format(self.name, self.APP_NAME, traffic_profile.__class__.__name__, os.getpid()) - self._traffic_process = Process( - name=name, target=self._traffic_runner, - args=(traffic_profile, uuid.uuid1().int)) + self._traffic_process = Process(name=name, target=self._traffic_runner, + args=(traffic_profile,)) self._traffic_process.start() # Wait for traffic process to start while self.resource_helper.client_started.value == 0: @@ -952,6 +1048,8 @@ class SampleVNFTrafficGen(GenericTrafficGen): if not self._traffic_process.is_alive(): break + return self._traffic_process.is_alive() + def collect_kpi(self): # check if the tg processes have exited physical_node = Context.get_physical_node_from_server( diff --git a/yardstick/network_services/vnf_generic/vnf/tg_imsbench_sipp.py b/yardstick/network_services/vnf_generic/vnf/tg_imsbench_sipp.py new file mode 100644 index 000000000..70557b848 --- /dev/null +++ b/yardstick/network_services/vnf_generic/vnf/tg_imsbench_sipp.py @@ -0,0 +1,143 @@ +# Copyright (c) 2019 Viosoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from collections import deque + +from yardstick.network_services.vnf_generic.vnf import sample_vnf + +LOG = logging.getLogger(__name__) + + +class SippSetupEnvHelper(sample_vnf.SetupEnvHelper): + APP_NAME = "ImsbenchSipp" + + +class SippResourceHelper(sample_vnf.ClientResourceHelper): + pass + + +class SippVnf(sample_vnf.SampleVNFTrafficGen): + """ + This class calls the test script from TG machine, then gets the result file + from IMS machine. After that, the result file is handled line by line, and + is updated to database. + """ + + APP_NAME = "ImsbenchSipp" + APP_WORD = "ImsbenchSipp" + VNF_TYPE = "ImsbenchSipp" + HW_OFFLOADING_NFVI_TYPES = {'baremetal', 'sriov'} + RESULT = "/tmp/final_result.dat" + SIPP_RESULT = "/tmp/sipp_dat_files/final_result.dat" + LOCAL_PATH = "/tmp" + CMD = "./SIPp_benchmark.bash {} {} {} '{}'" + + def __init__(self, name, vnfd, setup_env_helper_type=None, + resource_helper_type=None): + if resource_helper_type is None: + resource_helper_type = SippResourceHelper + if setup_env_helper_type is None: + setup_env_helper_type = SippSetupEnvHelper + super(SippVnf, self).__init__( + name, vnfd, setup_env_helper_type, resource_helper_type) + self.params = "" + self.pcscf_ip = self.vnfd_helper.interfaces[0]["virtual-interface"]\ + ["peer_intf"]["local_ip"] + self.sipp_ip = self.vnfd_helper.interfaces[0]["virtual-interface"]\ + ["local_ip"] + self.media_ip = self.vnfd_helper.interfaces[1]["virtual-interface"]\ + ["local_ip"] + self.queue = "" + self.count = 0 + + def instantiate(self, scenario_cfg, context_cfg): + super(SippVnf, self).instantiate(scenario_cfg, context_cfg) + scenario_cfg = {} + _params = [("port", 5060), ("start_user", 1), ("end_user", 10000), + ("init_reg_cps", 50), ("init_reg_max", 5000), ("reg_cps", 50), + ("reg_step", 10), ("rereg_cps", 10), ("rereg_step", 5), + ("dereg_cps", 10), ("dereg_step", 5), ("msgc_cps", 10), + ("msgc_step", 2), ("run_mode", "rtp"), ("call_cps", 10), + ("hold_time", 15), ("call_step", 5)] + + self.params = ';'.join([str(scenario_cfg.get("options", {}).get(k, v)) + for k, v in dict(_params).items()]) + + def wait_for_instantiate(self): + pass + + def get_result_files(self): + self.ssh_helper.get(self.SIPP_RESULT, self.LOCAL_PATH, True) + + # Example of result file: + # cat /tmp/final_result.dat + # timestamp:1000 reg:100 reg_saps:0 + # timestamp:2000 reg:100 reg_saps:50 + # timestamp:3000 reg:100 reg_saps:50 + # timestamp:4000 reg:100 reg_saps:50 + # ... + # reg_Requested_prereg:50 + # reg_Effective_prereg:49.49 + # reg_DOC:0 + # ... + @staticmethod + def handle_result_files(filename): + with open(filename, 'r') as f: + content = f.readlines() + result = [{k: round(float(v), 2) for k, v in [i.split(":", 1) for i in x.split()]} + for x in content if x] + return deque(result) + + def run_traffic(self, traffic_profile): + traffic_profile.execute_traffic(self) + cmd = self.CMD.format(self.sipp_ip, self.media_ip, + self.pcscf_ip, self.params) + self.ssh_helper.execute(cmd, None, 3600, False) + self.get_result_files() + self.queue = self.handle_result_files(self.RESULT) + + def collect_kpi(self): + result = {} + try: + result = self.queue.popleft() + except IndexError: + pass + return result + + @staticmethod + def count_line_num(fname): + try: + with open(fname, 'r') as f: + return sum(1 for line in f) + except IOError: + return 0 + + def is_ended(self): + """ + The test will end when the results are pushed into database. + It does not depend on the "duration" value, so this value will be set + enough big to make sure that the test will end before duration. + """ + num_lines = self.count_line_num(self.RESULT) + if self.count == num_lines: + LOG.debug('TG IS ENDED.....................') + self.count = 0 + return True + self.count += 1 + return False + + def terminate(self): + LOG.debug('TERMINATE:.....................') + self.resource_helper.terminate() diff --git a/yardstick/network_services/vnf_generic/vnf/tg_ixload.py b/yardstick/network_services/vnf_generic/vnf/tg_ixload.py index d25402740..38b00a4b2 100644 --- a/yardstick/network_services/vnf_generic/vnf/tg_ixload.py +++ b/yardstick/network_services/vnf_generic/vnf/tg_ixload.py @@ -126,13 +126,12 @@ class IxLoadResourceHelper(sample_vnf.ClientResourceHelper): class IxLoadTrafficGen(sample_vnf.SampleVNFTrafficGen): - def __init__(self, name, vnfd, task_id, setup_env_helper_type=None, - resource_helper_type=None): + def __init__(self, name, vnfd, setup_env_helper_type=None, resource_helper_type=None): if resource_helper_type is None: resource_helper_type = IxLoadResourceHelper - super(IxLoadTrafficGen, self).__init__( - name, vnfd, task_id, setup_env_helper_type, resource_helper_type) + super(IxLoadTrafficGen, self).__init__(name, vnfd, setup_env_helper_type, + resource_helper_type) self._result = {} def update_gateways(self, links): @@ -143,7 +142,12 @@ class IxLoadTrafficGen(sample_vnf.SampleVNFTrafficGen): "external-interface"] if intf["virtual-interface"]["vld_id"] == name) - links[name]["ip"]["gateway"] = gateway + try: + links[name]["ip"]["gateway"] = gateway + except KeyError: + LOG.error("Invalid traffic profile: No IP section defined for %s", name) + raise + except StopIteration: LOG.debug("Cant find gateway for link %s", name) links[name]["ip"]["gateway"] = "0.0.0.0" diff --git a/yardstick/network_services/vnf_generic/vnf/tg_landslide.py b/yardstick/network_services/vnf_generic/vnf/tg_landslide.py new file mode 100644 index 000000000..285374a92 --- /dev/null +++ b/yardstick/network_services/vnf_generic/vnf/tg_landslide.py @@ -0,0 +1,1226 @@ +# Copyright (c) 2018-2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import logging +import requests +import six +import time + +from yardstick.common import exceptions +from yardstick.common import utils as common_utils +from yardstick.common import yaml_loader +from yardstick.network_services import utils as net_serv_utils +from yardstick.network_services.vnf_generic.vnf import sample_vnf + +try: + from lsapi import LsApi +except ImportError: + LsApi = common_utils.ErrorClass + +LOG = logging.getLogger(__name__) + + +class LandslideTrafficGen(sample_vnf.SampleVNFTrafficGen): + APP_NAME = 'LandslideTG' + + def __init__(self, name, vnfd, setup_env_helper_type=None, + resource_helper_type=None): + if resource_helper_type is None: + resource_helper_type = LandslideResourceHelper + super(LandslideTrafficGen, self).__init__(name, vnfd, + setup_env_helper_type, + resource_helper_type) + + self.bin_path = net_serv_utils.get_nsb_option('bin_path') + self.name = name + self.runs_traffic = True + self.traffic_finished = False + self.session_profile = None + + def listen_traffic(self, traffic_profile): + pass + + def terminate(self): + self.resource_helper.disconnect() + + def instantiate(self, scenario_cfg, context_cfg): + super(LandslideTrafficGen, self).instantiate(scenario_cfg, context_cfg) + self.resource_helper.connect() + + # Create test servers + test_servers = [x['test_server'] for x in self.vnfd_helper['config']] + self.resource_helper.create_test_servers(test_servers) + + # Create SUTs + [self.resource_helper.create_suts(x['suts']) for x in + self.vnfd_helper['config']] + + # Fill in test session based on session profile and test case options + self._load_session_profile() + + def run_traffic(self, traffic_profile): + self.resource_helper.abort_running_tests() + # Update DMF profile with related test case options + traffic_profile.update_dmf(self.scenario_helper.all_options) + # Create DMF in test user library + self.resource_helper.create_dmf(traffic_profile.dmf_config) + # Create/update test session in test user library + self.resource_helper.create_test_session(self.session_profile) + # Start test session + self.resource_helper.create_running_tests(self.session_profile['name']) + + def collect_kpi(self): + return self.resource_helper.collect_kpi() + + def wait_for_instantiate(self): + pass + + @staticmethod + def _update_session_suts(suts, testcase): + """ Create SUT entry. Update related EPC block in session profile. """ + for sut in suts: + # Update session profile EPC element with SUT info from pod file + tc_role = testcase['parameters'].get(sut['role']) + if tc_role: + _param = {} + if tc_role['class'] == 'Sut': + _param['name'] = sut['name'] + elif tc_role['class'] == 'TestNode': + _param.update({x: sut[x] for x in {'ip', 'phy', 'nextHop'} + if x in sut and sut[x]}) + testcase['parameters'][sut['role']].update(_param) + else: + LOG.info('Unexpected SUT role in pod file: "%s".', sut['role']) + return testcase + + def _update_session_test_servers(self, test_server, _tsgroup_index): + """ Update tsId, reservations, pre-resolved ARP in session profile """ + # Update test server name + test_groups = self.session_profile['tsGroups'] + test_groups[_tsgroup_index]['tsId'] = test_server['name'] + + # Update preResolvedArpAddress + arp_key = 'preResolvedArpAddress' + _preresolved_arp = test_server.get(arp_key) # list of dicts + if _preresolved_arp: + test_groups[_tsgroup_index][arp_key] = _preresolved_arp + + # Update reservations + if 'phySubnets' in test_server: + reservation = {'tsId': test_server['name'], + 'tsIndex': _tsgroup_index, + 'tsName': test_server['name'], + 'phySubnets': test_server['phySubnets']} + if 'reservations' in self.session_profile: + self.session_profile['reservations'].append(reservation) + else: + self.session_profile['reservePorts'] = 'true' + self.session_profile['reservations'] = [reservation] + + def _update_session_library_name(self, test_session): + """Update DMF library name in session profile""" + for _ts_group in test_session['tsGroups']: + for _tc in _ts_group['testCases']: + try: + for _mainflow in _tc['parameters']['Dmf']['mainflows']: + _mainflow['library'] = \ + self.vnfd_helper.mgmt_interface['user'] + except KeyError: + pass + + @staticmethod + def _update_session_tc_params(tc_options, testcase): + for _param_key in tc_options: + if _param_key == 'AssociatedPhys': + testcase[_param_key] = tc_options[_param_key] + continue + testcase['parameters'][_param_key] = tc_options[_param_key] + return testcase + + def _load_session_profile(self): + + with common_utils.open_relative_file( + self.scenario_helper.scenario_cfg['session_profile'], + self.scenario_helper.task_path) as stream: + self.session_profile = yaml_loader.yaml_load(stream) + + # Raise exception if number of entries differs in following files, + _config_files = ['pod file', 'session_profile file', 'test_case file'] + # Count testcases number in all tsGroups of session profile + session_tests_num = [xx for x in self.session_profile['tsGroups'] + for xx in x['testCases']] + # Create a set containing number of list elements in each structure + _config_files_blocks_num = [ + len(x) for x in + (self.vnfd_helper['config'], # test_servers and suts info + session_tests_num, + self.scenario_helper.all_options['test_cases'])] # test case file + + if len(set(_config_files_blocks_num)) != 1: + raise RuntimeError('Unequal number of elements. {}'.format( + dict(six.moves.zip_longest(_config_files, + _config_files_blocks_num)))) + + ts_names = set() + _tsgroup_idx = -1 + _testcase_idx = 0 + + # Iterate over data structures to overwrite session profile defaults + # _config: single list element holding test servers and SUTs info + # _tc_options: single test case parameters + for _config, tc_options in zip( + self.vnfd_helper['config'], # test servers and SUTS + self.scenario_helper.all_options['test_cases']): # testcase + + _ts_config = _config['test_server'] + + # Calculate test group/test case indexes based on test server name + if _ts_config['name'] in ts_names: + _testcase_idx += 1 + else: + _tsgroup_idx += 1 + _testcase_idx = 0 + + _testcase = \ + self.session_profile['tsGroups'][_tsgroup_idx]['testCases'][ + _testcase_idx] + + if _testcase['type'] != _ts_config['role']: + raise RuntimeError( + 'Test type mismatch in TC#{} of test server {}'.format( + _testcase_idx, _ts_config['name'])) + + # Fill session profile with test servers parameters + if _ts_config['name'] not in ts_names: + self._update_session_test_servers(_ts_config, _tsgroup_idx) + ts_names.add(_ts_config['name']) + + # Fill session profile with suts parameters + self.session_profile['tsGroups'][_tsgroup_idx]['testCases'][ + _testcase_idx].update( + self._update_session_suts(_config['suts'], _testcase)) + + # Update test case parameters + self.session_profile['tsGroups'][_tsgroup_idx]['testCases'][ + _testcase_idx].update( + self._update_session_tc_params(tc_options, _testcase)) + + self._update_session_library_name(self.session_profile) + + +class LandslideResourceHelper(sample_vnf.ClientResourceHelper): + """Landslide TG helper class""" + + REST_STATUS_CODES = {'OK': 200, 'CREATED': 201, 'NO CHANGE': 409} + REST_API_CODES = {'NOT MODIFIED': 500810} + + def __init__(self, setup_helper): + super(LandslideResourceHelper, self).__init__(setup_helper) + self._result = {} + self.vnfd_helper = setup_helper.vnfd_helper + self.scenario_helper = setup_helper.scenario_helper + + # TAS Manager config initialization + self._url = None + self._user_id = None + self.session = None + self.license_data = {} + + # TCL session initialization + self._tcl = LandslideTclClient(LsTclHandler(), self) + + self.session = requests.Session() + self.running_tests_uri = 'runningTests' + self.test_session_uri = 'testSessions' + self.test_serv_uri = 'testServers' + self.suts_uri = 'suts' + self.users_uri = 'users' + self.user_lib_uri = None + self.run_id = None + + def abort_running_tests(self, timeout=60, delay=5): + """ Abort running test sessions, if any """ + _start_time = time.time() + while time.time() < _start_time + timeout: + run_tests_states = {x['id']: x['testStateOrStep'] + for x in self.get_running_tests()} + if not set(run_tests_states.values()).difference( + {'COMPLETE', 'COMPLETE_ERROR'}): + break + else: + [self.stop_running_tests(running_test_id=_id, force=True) + for _id, _state in run_tests_states.items() + if 'COMPLETE' not in _state] + time.sleep(delay) + else: + raise RuntimeError( + 'Some test runs not stopped during {} seconds'.format(timeout)) + + def _build_url(self, resource, action=None): + """ Build URL string + + :param resource: REST API resource name + :type resource: str + :param action: actions name and value + :type action: dict('name': <str>, 'value': <str>) + :returns str: REST API resource name with optional action info + """ + # Action is optional and accepted only in presence of resource param + if action and not resource: + raise ValueError("Resource name not provided") + # Concatenate actions + _action = ''.join(['?{}={}'.format(k, v) for k, v in + action.items()]) if action else '' + + return ''.join([self._url, resource, _action]) + + def get_response_params(self, method, resource, params=None): + """ Retrieve params from JSON response of specific resource URL + + :param method: one of supported REST API methods + :type method: str + :param resource: URI, requested resource name + :type resource: str + :param params: attributes to be found in JSON response + :type params: list(str) + """ + _res = [] + params = params if params else [] + response = self.exec_rest_request(method, resource) + # Get substring between last slash sign and question mark (if any) + url_last_part = resource.rsplit('/', 1)[-1].rsplit('?', 1)[0] + _response_json = response.json() + # Expect dict(), if URL last part and top dict key don't match + # Else, if they match, expect list() + k, v = list(_response_json.items())[0] + if k != url_last_part: + v = [v] # v: list(dict(str: str)) + # Extract params, or whole list of dicts (without top level key) + for x in v: + _res.append({param: x[param] for param in params} if params else x) + return _res + + def _create_user(self, auth, level=1): + """ Create new user + + :param auth: data to create user account on REST server + :type auth: dict + :param level: Landslide user permissions level + :type level: int + :returns int: user id + """ + # Set expiration date in two years since account creation date + _exp_date = time.strftime( + '{}/%m/%d %H:%M %Z'.format(time.gmtime().tm_year + 2)) + _username = auth['user'] + _fields = {"contactInformation": "", "expiresOn": _exp_date, + "fullName": "Test User", + "isActive": "true", "level": level, + "password": auth['password'], + "username": _username} + _response = self.exec_rest_request('post', self.users_uri, + json_data=_fields, raise_exc=False) + _resp_json = _response.json() + if _response.status_code == self.REST_STATUS_CODES['CREATED']: + # New user created + _id = _resp_json['id'] + LOG.info("New user created: username='%s', id='%s'", _username, + _id) + elif _resp_json.get('apiCode') == self.REST_API_CODES['NOT MODIFIED']: + # User already exists + LOG.info("Account '%s' already exists.", _username) + # Get user id + _id = self._modify_user(_username, {"isActive": "true"})['id'] + else: + raise exceptions.RestApiError( + 'Error during new user "{}" creation'.format(_username)) + return _id + + def _modify_user(self, username, fields): + """ Modify information about existing user + + :param username: user name of account to be modified + :type username: str + :param fields: data to modify user account on REST server + :type fields: dict + :returns dict: user info + """ + _response = self.exec_rest_request('post', self.users_uri, + action={'username': username}, + json_data=fields, raise_exc=False) + if _response.status_code == self.REST_STATUS_CODES['OK']: + _response = _response.json() + else: + raise exceptions.RestApiError( + 'Error during user "{}" data update: {}'.format( + username, + _response.status_code)) + LOG.info("User account '%s' modified: '%s'", username, _response) + return _response + + def _delete_user(self, username): + """ Delete user account + + :param username: username field + :type username: str + :returns bool: True if succeeded + """ + self.exec_rest_request('delete', self.users_uri, + action={'username': username}) + + def _get_users(self, username=None): + """ Get user records from REST server + + :param username: username field + :type username: None|str + :returns list(dict): empty list, or user record, or list of all users + """ + _response = self.get_response_params('get', self.users_uri) + _res = [u for u in _response if + u['username'] == username] if username else _response + return _res + + def exec_rest_request(self, method, resource, action=None, json_data=None, + logs=True, raise_exc=True): + """ Execute REST API request, return response object + + :param method: one of supported requests ('post', 'get', 'delete') + :type method: str + :param resource: URL of resource + :type resource: str + :param action: data used to provide URI located after question mark + :type action: dict + :param json_data: mandatory only for 'post' method + :type json_data: dict + :param logs: debug logs display flag + :type raise_exc: bool + :param raise_exc: if True, raise exception on REST API call error + :returns requests.Response(): REST API call response object + """ + json_data = json_data if json_data else {} + action = action if action else {} + _method = method.upper() + method = method.lower() + if method not in ('post', 'get', 'delete'): + raise ValueError("Method '{}' not supported".format(_method)) + + if method == 'post' and not action: + if not (json_data and isinstance(json_data, collections.Mapping)): + raise ValueError( + 'JSON data missing in {} request'.format(_method)) + + r = getattr(self.session, method)(self._build_url(resource, action), + json=json_data) + if raise_exc and not r.ok: + msg = 'Failed to "{}" resource "{}". Reason: "{}"'.format( + method, self._build_url(resource, action), r.reason) + raise exceptions.RestApiError(msg) + + if logs: + LOG.debug("RC: %s | Request: %s | URL: %s", r.status_code, method, + r.request.url) + LOG.debug("Response: %s", r.json()) + return r + + def connect(self): + """Connect to RESTful server using test user account""" + tas_info = self.vnfd_helper['mgmt-interface'] + # Supported REST Server ports: HTTP - 8080, HTTPS - 8181 + _port = '8080' if tas_info['proto'] == 'http' else '8181' + tas_info.update({'port': _port}) + self._url = '{proto}://{ip}:{port}/api/'.format(**tas_info) + self.session.headers.update({'Accept': 'application/json', + 'Content-type': 'application/json'}) + # Login with super user to create test user + self.session.auth = ( + tas_info['super-user'], tas_info['super-user-password']) + LOG.info("Connect using superuser: server='%s'", self._url) + auth = {x: tas_info[x] for x in ('user', 'password')} + self._user_id = self._create_user(auth) + # Login with test user + self.session.auth = auth['user'], auth['password'] + # Test user validity + self.exec_rest_request('get', '') + + self.user_lib_uri = 'libraries/{{}}/{}'.format(self.test_session_uri) + LOG.info("Login with test user: server='%s'", self._url) + # Read existing license + self.license_data['lic_id'] = tas_info['license'] + + # Tcl client init + self._tcl.connect(tas_info['ip'], *self.session.auth) + + return self.session + + def disconnect(self): + self.session = None + self._tcl.disconnect() + + def terminate(self): + self._terminated.value = 1 + + def create_dmf(self, dmf): + if isinstance(dmf, dict): + dmf = [dmf] + for _dmf in dmf: + # Update DMF library name in traffic profile + _dmf['dmf'].update( + {'library': self.vnfd_helper.mgmt_interface['user']}) + # Create DMF on Landslide server + self._tcl.create_dmf(_dmf) + + def delete_dmf(self, dmf): + if isinstance(dmf, list): + for _dmf in dmf: + self._tcl.delete_dmf(_dmf) + else: + self._tcl.delete_dmf(dmf) + + def create_suts(self, suts): + # Keep only supported keys in suts object + for _sut in suts: + sut_entry = {k: v for k, v in _sut.items() + if k not in {'phy', 'nextHop', 'role'}} + _response = self.exec_rest_request( + 'post', self.suts_uri, json_data=sut_entry, + logs=False, raise_exc=False) + if _response.status_code != self.REST_STATUS_CODES['CREATED']: + LOG.info(_response.reason) # Failed to create + _name = sut_entry.pop('name') + # Modify existing SUT + self.configure_sut(sut_name=_name, json_data=sut_entry) + else: + LOG.info("SUT created: %s", sut_entry) + + def get_suts(self, suts_id=None): + if suts_id: + _suts = self.exec_rest_request( + 'get', '{}/{}'.format(self.suts_uri, suts_id)).json() + else: + _suts = self.get_response_params('get', self.suts_uri) + + return _suts + + def configure_sut(self, sut_name, json_data): + """ Modify information of specific SUTs + + :param sut_name: name of existing SUT + :type sut_name: str + :param json_data: SUT settings + :type json_data: dict() + """ + LOG.info("Modifying SUT information...") + _response = self.exec_rest_request('post', + self.suts_uri, + action={'name': sut_name}, + json_data=json_data, + raise_exc=False) + if _response.status_code not in {self.REST_STATUS_CODES[x] for x in + {'OK', 'NO CHANGE'}}: + raise exceptions.RestApiError(_response.reason) + + LOG.info("Modified SUT: %s", sut_name) + + def delete_suts(self, suts_ids=None): + if not suts_ids: + _curr_suts = self.get_response_params('get', self.suts_uri) + suts_ids = [x['id'] for x in _curr_suts] + LOG.info("Deleting SUTs with following IDs: %s", suts_ids) + for _id in suts_ids: + self.exec_rest_request('delete', + '{}/{}'.format(self.suts_uri, _id)) + LOG.info("\tDone for SUT id: %s", _id) + + def _check_test_servers_state(self, test_servers_ids=None, delay=10, + timeout=300): + LOG.info("Waiting for related test servers state change to READY...") + # Wait on state change + _start_time = time.time() + while time.time() - _start_time < timeout: + ts_ids_not_ready = {x['id'] for x in + self.get_test_servers(test_servers_ids) + if x['state'] != 'READY'} + if ts_ids_not_ready == set(): + break + time.sleep(delay) + else: + raise RuntimeError( + 'Test servers not in READY state after {} seconds.'.format( + timeout)) + + def create_test_servers(self, test_servers): + """ Create test servers + + :param test_servers: input data for test servers creation + mandatory fields: managementIp + optional fields: name + :type test_servers: list(dict) + """ + _ts_ids = [] + for _ts in test_servers: + _msg = 'Created test server "%(name)s"' + _ts_ids.append(self._tcl.create_test_server(_ts)) + if _ts.get('thread_model'): + _msg += ' in mode: "%(thread_model)s"' + LOG.info(_msg, _ts) + + self._check_test_servers_state(_ts_ids) + + def get_test_servers(self, test_server_ids=None): + if not test_server_ids: # Get all test servers info + _test_servers = self.exec_rest_request( + 'get', self.test_serv_uri).json()[self.test_serv_uri] + LOG.info("Current test servers configuration: %s", _test_servers) + return _test_servers + + _test_servers = [] + for _id in test_server_ids: + _test_servers.append(self.exec_rest_request( + 'get', '{}/{}'.format(self.test_serv_uri, _id)).json()) + LOG.info("Current test servers configuration: %s", _test_servers) + return _test_servers + + def configure_test_servers(self, action, json_data=None, + test_server_ids=None): + if not test_server_ids: + test_server_ids = [x['id'] for x in self.get_test_servers()] + elif isinstance(test_server_ids, int): + test_server_ids = [test_server_ids] + for _id in test_server_ids: + self.exec_rest_request('post', + '{}/{}'.format(self.test_serv_uri, _id), + action=action, json_data=json_data) + LOG.info("Test server (id: %s) configuration done: %s", _id, + action) + return test_server_ids + + def delete_test_servers(self, test_servers_ids=None): + # Delete test servers + for _ts in self.get_test_servers(test_servers_ids): + self.exec_rest_request('delete', '{}/{}'.format(self.test_serv_uri, + _ts['id'])) + LOG.info("Deleted test server: %s", _ts['name']) + + def create_test_session(self, test_session): + # Use tcl client to create session + test_session['library'] = self._user_id + + # If no traffic duration set in test case, use predefined default value + # in session profile + test_session['duration'] = self.scenario_helper.all_options.get( + 'traffic_duration', + test_session['duration']) + + LOG.debug("Creating session='%s'", test_session['name']) + self._tcl.create_test_session(test_session) + + def get_test_session(self, test_session_name=None): + if test_session_name: + uri = 'libraries/{}/{}/{}'.format(self._user_id, + self.test_session_uri, + test_session_name) + else: + uri = self.user_lib_uri.format(self._user_id) + _test_sessions = self.exec_rest_request('get', uri).json() + return _test_sessions + + def configure_test_session(self, template_name, test_session): + # Override specified test session parameters + LOG.info('Update test session parameters: %s', test_session['name']) + test_session.update({'library': self._user_id}) + return self.exec_rest_request( + method='post', + action={'action': 'overrideAndSaveAs'}, + json_data=test_session, + resource='{}/{}'.format(self.user_lib_uri.format(self._user_id), + template_name)) + + def delete_test_session(self, test_session): + return self.exec_rest_request('delete', '{}/{}'.format( + self.user_lib_uri.format(self._user_id), test_session)) + + def create_running_tests(self, test_session_name): + r = self.exec_rest_request('post', + self.running_tests_uri, + json_data={'library': self._user_id, + 'name': test_session_name}) + if r.status_code != self.REST_STATUS_CODES['CREATED']: + raise exceptions.RestApiError('Failed to start test session.') + self.run_id = r.json()['id'] + + def get_running_tests(self, running_test_id=None): + """Get JSON structure of specified running test entity + + :param running_test_id: ID of created running test entity + :type running_test_id: int + :returns list: running tests entity + """ + if not running_test_id: + running_test_id = '' + _res_name = '{}/{}'.format(self.running_tests_uri, running_test_id) + _res = self.exec_rest_request('get', _res_name, logs=False).json() + # If no run_id specified, skip top level key in response dict. + # Else return JSON as list + return _res.get('runningTests', [_res]) + + def delete_running_tests(self, running_test_id=None): + if not running_test_id: + running_test_id = '' + _res_name = '{}/{}'.format(self.running_tests_uri, running_test_id) + self.get_response_params('delete', _res_name) + LOG.info("Deleted running test with id: %s", running_test_id) + + def _running_tests_action(self, running_test_id, action, json_data=None): + if not json_data: + json_data = {} + # Supported actions: + # 'stop', 'abort', 'continue', 'update', 'sendTcCommand', 'sendOdc' + _res_name = '{}/{}'.format(self.running_tests_uri, running_test_id) + self.exec_rest_request('post', _res_name, {'action': action}, + json_data) + LOG.debug("Executed action: '%s' on running test id: %s", action, + running_test_id) + + def stop_running_tests(self, running_test_id, json_data=None, force=False): + _action = 'abort' if force else 'stop' + self._running_tests_action(running_test_id, _action, + json_data=json_data) + LOG.info('Performed action: "%s" to test run with id: %s', _action, + running_test_id) + + def check_running_test_state(self, run_id): + r = self.exec_rest_request('get', + '{}/{}'.format(self.running_tests_uri, + run_id)) + return r.json().get("testStateOrStep") + + def get_running_tests_results(self, run_id): + _res = self.exec_rest_request( + 'get', + '{}/{}/{}'.format(self.running_tests_uri, + run_id, + 'measurements')).json() + return _res + + def _write_results(self, results): + # Avoid None value at test session start + _elapsed_time = results['elapsedTime'] if results['elapsedTime'] else 0 + + _res_tabs = results.get('tabs') + # Avoid parsing 'tab' dict key initially (missing or empty) + if not _res_tabs: + return + + # Flatten nested dict holding Landslide KPIs of current test run + flat_kpis_dict = {} + for _tab, _kpis in six.iteritems(_res_tabs): + for _kpi, _value in six.iteritems(_kpis): + # Combine table name and KPI name using delimiter "::" + _key = '::'.join([_tab, _kpi]) + try: + # Cast value from str to float + # Remove comma and/or measure units, e.g. "us" + flat_kpis_dict[_key] = float( + _value.split(' ')[0].replace(',', '')) + except ValueError: # E.g. if KPI represents datetime + pass + LOG.info("Polling test results of test run id: %s. Elapsed time: %s " + "seconds", self.run_id, _elapsed_time) + return flat_kpis_dict + + def collect_kpi(self): + if 'COMPLETE' in self.check_running_test_state(self.run_id): + self._result.update({'done': True}) + return self._result + _res = self.get_running_tests_results(self.run_id) + _kpis = self._write_results(_res) + if _kpis: + _kpis.update({'run_id': int(self.run_id)}) + _kpis.update({'iteration': _res['iteration']}) + self._result.update(_kpis) + return self._result + + +class LandslideTclClient(object): + """Landslide TG TCL client class""" + + DEFAULT_TEST_NODE = { + 'ethStatsEnabled': True, + 'forcedEthInterface': '', + 'innerVlanId': 0, + 'ip': '', + 'mac': '', + 'mtu': 1500, + 'nextHop': '', + 'numLinksOrNodes': 1, + 'numVlan': 1, + 'phy': '', + 'uniqueVlanAddr': False, + 'vlanDynamic': 0, + 'vlanId': 0, + 'vlanUserPriority': 0, + 'vlanTagType': 0 + } + + TEST_NODE_CMD = \ + 'ls::create -TestNode-{} -under $p_ -Type "eth"' \ + ' -Phy "{phy}" -Ip "{ip}" -NumLinksOrNodes {numLinksOrNodes}' \ + ' -NextHop "{nextHop}" -Mac "{mac}" -MTU {mtu}' \ + ' -ForcedEthInterface "{forcedEthInterface}"' \ + ' -EthStatsEnabled {ethStatsEnabled}' \ + ' -VlanId {vlanId} -VlanUserPriority {vlanUserPriority}' \ + ' -NumVlan {numVlan} -UniqueVlanAddr {uniqueVlanAddr}' \ + ';' + + def __init__(self, tcl_handler, ts_context): + self.tcl_server_ip = None + self._user = None + self._library_id = None + self._basic_library_id = None + self._tcl = tcl_handler + self._ts_context = ts_context + self.ts_ids = set() + + # Test types names expected in session profile, test case and pod files + self._tc_types = {"SGW_Nodal", "SGW_Node", "MME_Nodal", "PGW_Node", + "PCRF_Node"} + + self._class_param_config_handler = { + "Array": self._configure_array_param, + "TestNode": self._configure_test_node_param, + "Sut": self._configure_sut_param, + "Dmf": self._configure_dmf_param + } + + def connect(self, tcl_server_ip, username, password): + """ Connect to TCL server with username and password + + :param tcl_server_ip: TCL server IP address + :type tcl_server_ip: str + :param username: existing username on TCL server + :type username: str + :param password: password related to username on TCL server + :type password: str + """ + LOG.info("connect: server='%s' user='%s'", tcl_server_ip, username) + res = self._tcl.execute( + "ls::login {} {} {}".format(tcl_server_ip, username, password)) + if 'java0x' not in res: # handle assignment reflects login success + raise exceptions.LandslideTclException( + "connect: login failed ='{}'.".format(res)) + self._library_id = self._tcl.execute( + "ls::get [ls::query LibraryInfo -userLibraryName {}] -Id".format( + username)) + self._basic_library_id = self._get_library_id('Basic') + self.tcl_server_ip = tcl_server_ip + self._user = username + LOG.debug("connect: user='%s' me='%s' basic='%s'", self._user, + self._library_id, + self._basic_library_id) + + def disconnect(self): + """ Disconnect from TCL server. Drop TCL connection configuration """ + LOG.info("disconnect: server='%s' user='%s'", + self.tcl_server_ip, self._user) + self._tcl.execute("ls::logout") + self.tcl_server_ip = None + self._user = None + self._library_id = None + self._basic_library_id = None + + def _add_test_server(self, name, ip): + try: + # Check if test server exists with name equal to _ts_name + ts_id = int(self.resolve_test_server_name(name)) + except ValueError: + # Such test server does not exist. Attempt to create it + ts_id = self._tcl.execute( + 'ls::perform AddTs -Name "{}" -Ip "{}"'.format(name, ip)) + try: + int(ts_id) + except ValueError: + # Failed to create test server, e.g. limit reached + raise RuntimeError( + 'Failed to create test server: "{}". {}'.format(name, + ts_id)) + return ts_id + + def _update_license(self, name): + """ Setup/update test server license + + :param name: test server name + :type name: str + """ + # Retrieve current TsInfo configuration, result stored in handle "ts" + self._tcl.execute( + 'set ts [ls::retrieve TsInfo -Name "{}"]'.format(name)) + + # Set license ID, if it differs from current one, update test server + _curr_lic_id = self._tcl.execute('ls::get $ts -RequestedLicense') + if _curr_lic_id != self._ts_context.license_data['lic_id']: + self._tcl.execute('ls::config $ts -RequestedLicense {}'.format( + self._ts_context.license_data['lic_id'])) + self._tcl.execute('ls::perform ModifyTs $ts') + + def _set_thread_model(self, name, thread_model): + # Retrieve test server configuration, store it in handle "tsc" + _cfguser_password = self._ts_context.vnfd_helper['mgmt-interface'][ + 'cfguser_password'] + self._tcl.execute( + 'set tsc [ls::perform RetrieveTsConfiguration ' + '-name "{}" {}]'.format(name, _cfguser_password)) + # Configure ThreadModel, if it differs from current one + thread_model_map = {'Legacy': 'V0', + 'Max': 'V1', + 'Fireball': 'V1_FB3'} + _model = thread_model_map[thread_model] + _curr_model = self._tcl.execute('ls::get $tsc -ThreadModel') + if _curr_model != _model: + self._tcl.execute( + 'ls::config $tsc -ThreadModel "{}"'.format(_model)) + self._tcl.execute( + 'ls::perform ApplyTsConfiguration $tsc {}'.format( + _cfguser_password)) + + def create_test_server(self, test_server): + _ts_thread_model = test_server.get('thread_model') + _ts_name = test_server['name'] + + ts_id = self._add_test_server(_ts_name, test_server['ip']) + + self._update_license(_ts_name) + + # Skip below code modifying thread_model if it is not defined + if _ts_thread_model: + self._set_thread_model(_ts_name, _ts_thread_model) + + return ts_id + + def create_test_session(self, test_session): + """ Create, configure and save Landslide test session object. + + :param test_session: Landslide TestSession object + :type test_session: dict + """ + LOG.info("create_test_session: name='%s'", test_session['name']) + self._tcl.execute('set test_ [ls::create TestSession]') + self._tcl.execute('ls::config $test_ -Library {} -Name "{}"'.format( + self._library_id, test_session['name'])) + self._tcl.execute('ls::config $test_ -Description "{}"'.format( + test_session['description'])) + if 'keywords' in test_session: + self._tcl.execute('ls::config $test_ -Keywords "{}"'.format( + test_session['keywords'])) + if 'duration' in test_session: + self._tcl.execute('ls::config $test_ -Duration "{}"'.format( + test_session['duration'])) + if 'iterations' in test_session: + self._tcl.execute('ls::config $test_ -Iterations "{}"'.format( + test_session['iterations'])) + if 'reservePorts' in test_session: + if test_session['reservePorts'] == 'true': + self._tcl.execute('ls::config $test_ -Reserve Ports') + + if 'reservations' in test_session: + for _reservation in test_session['reservations']: + self._configure_reservation(_reservation) + + if 'reportOptions' in test_session: + self._configure_report_options(test_session['reportOptions']) + + for _index, _group in enumerate(test_session['tsGroups']): + self._configure_ts_group(_group, _index) + + self._save_test_session() + + def create_dmf(self, dmf): + """ Create, configure and save Landslide Data Message Flow object. + + :param dmf: Landslide Data Message Flow object + :type: dmf: dict + """ + self._tcl.execute('set dmf_ [ls::create Dmf]') + _lib_id = self._get_library_id(dmf['dmf']['library']) + self._tcl.execute('ls::config $dmf_ -Library {} -Name "{}"'.format( + _lib_id, + dmf['dmf']['name'])) + for _param_key in dmf: + if _param_key == 'dmf': + continue + _param_value = dmf[_param_key] + if isinstance(_param_value, dict): + # Configure complex parameter + _tcl_cmd = 'ls::config $dmf_' + for _sub_param_key in _param_value: + _sub_param_value = _param_value[_sub_param_key] + if isinstance(_sub_param_value, str): + _tcl_cmd += ' -{} "{}"'.format(_sub_param_key, + _sub_param_value) + else: + _tcl_cmd += ' -{} {}'.format(_sub_param_key, + _sub_param_value) + + self._tcl.execute(_tcl_cmd) + else: + # Configure simple parameter + if isinstance(_param_value, str): + self._tcl.execute( + 'ls::config $dmf_ -{} "{}"'.format(_param_key, + _param_value)) + else: + self._tcl.execute( + 'ls::config $dmf_ -{} {}'.format(_param_key, + _param_value)) + self._save_dmf() + + def configure_dmf(self, dmf): + # Use create to reconfigure and overwrite existing dmf + self.create_dmf(dmf) + + def delete_dmf(self, dmf): + raise NotImplementedError + + def _save_dmf(self): + # Call 'Validate' to set default values for missing parameters + res = self._tcl.execute('ls::perform Validate -Dmf $dmf_') + if res == 'Invalid': + res = self._tcl.execute('ls::get $dmf_ -ErrorsAndWarnings') + LOG.error("_save_dmf: %s", res) + raise exceptions.LandslideTclException("_save_dmf: {}".format(res)) + else: + res = self._tcl.execute('ls::save $dmf_ -overwrite') + LOG.debug("_save_dmf: result (%s)", res) + + def _configure_report_options(self, options): + for _option_key in options: + _option_value = options[_option_key] + if _option_key == 'format': + _format = 0 + if _option_value == 'CSV': + _format = 1 + self._tcl.execute( + 'ls::config $test_.ReportOptions -Format {} ' + '-Ts -3 -Tc -3'.format(_format)) + else: + self._tcl.execute( + 'ls::config $test_.ReportOptions -{} {}'.format( + _option_key, + _option_value)) + + def _configure_ts_group(self, ts_group, ts_group_index): + try: + _ts_id = int(self.resolve_test_server_name(ts_group['tsId'])) + except ValueError: + raise RuntimeError('Test server name "{}" does not exist.'.format( + ts_group['tsId'])) + if _ts_id not in self.ts_ids: + self._tcl.execute( + 'set tss_ [ls::create TsGroup -under $test_ -tsId {} ]'.format( + _ts_id)) + self.ts_ids.add(_ts_id) + for _case in ts_group.get('testCases', []): + self._configure_tc_type(_case, ts_group_index) + + self._configure_preresolved_arp(ts_group.get('preResolvedArpAddress')) + + def _configure_tc_type(self, tc, ts_group_index): + if tc['type'] not in self._tc_types: + raise RuntimeError('Test type {} not supported.'.format( + tc['type'])) + tc['type'] = tc['type'].replace('_', ' ') + res = self._tcl.execute( + 'set tc_ [ls::retrieve testcase -libraryId {0} "{1}"]'.format( + self._basic_library_id, tc['type'])) + if 'Invalid' in res: + raise RuntimeError('Test type {} not found in "Basic" ' + 'library.'.format(tc['type'])) + self._tcl.execute( + 'ls::config $test_.TsGroup({}) -children-Tc $tc_'.format( + ts_group_index)) + self._tcl.execute('ls::config $tc_ -Library {0} -Name "{1}"'.format( + self._basic_library_id, tc['name'])) + self._tcl.execute( + 'ls::config $tc_ -Description "{}"'.format(tc['type'])) + self._tcl.execute( + 'ls::config $tc_ -Keywords "GTP LTE {}"'.format(tc['type'])) + if 'linked' in tc: + self._tcl.execute( + 'ls::config $tc_ -Linked {}'.format(tc['linked'])) + if 'AssociatedPhys' in tc: + self._tcl.execute('ls::config $tc_ -AssociatedPhys "{}"'.format( + tc['AssociatedPhys'])) + if 'parameters' in tc: + self._configure_parameters(tc['parameters']) + + def _configure_parameters(self, params): + self._tcl.execute('set p_ [ls::get $tc_ -children-Parameters(0)]') + for _param_key in sorted(params): + _param_value = params[_param_key] + if isinstance(_param_value, dict): + # Configure complex parameter + if _param_value['class'] in self._class_param_config_handler: + self._class_param_config_handler[_param_value['class']]( + _param_key, + _param_value) + else: + # Configure simple parameter + self._tcl.execute( + 'ls::create {} -under $p_ -Value "{}"'.format( + _param_key, + _param_value)) + + def _configure_array_param(self, name, params): + self._tcl.execute('ls::create -Array-{} -under $p_ ;'.format(name)) + for param in params['array']: + self._tcl.execute( + 'ls::create ArrayItem -under $p_.{} -Value "{}"'.format(name, + param)) + + def _configure_test_node_param(self, name, params): + _params = self.DEFAULT_TEST_NODE + _params.update(params) + + # TCL command expects lower case 'true' or 'false' + _params['ethStatsEnabled'] = str(_params['ethStatsEnabled']).lower() + _params['uniqueVlanAddr'] = str(_params['uniqueVlanAddr']).lower() + + cmd = self.TEST_NODE_CMD.format(name, **_params) + self._tcl.execute(cmd) + + def _configure_sut_param(self, name, params): + self._tcl.execute( + 'ls::create -Sut-{} -under $p_ -Name "{}";'.format(name, + params['name'])) + + def _configure_dmf_param(self, name, params): + self._tcl.execute('ls::create -Dmf-{} -under $p_ ;'.format(name)) + + for _flow_index, _flow in enumerate(params['mainflows']): + _lib_id = self._get_library_id(_flow['library']) + self._tcl.execute( + 'ls::perform AddDmfMainflow $p_.Dmf {} "{}"'.format( + _lib_id, + _flow['name'])) + + if not params.get('instanceGroups'): + return + + _instance_group = params['instanceGroups'][_flow_index] + + # Traffic Mixer parameters handling + for _key in ['mixType', 'rate']: + if _key in _instance_group: + self._tcl.execute( + 'ls::config $p_.Dmf.InstanceGroup({}) -{} {}'.format( + _flow_index, _key, _instance_group[_key])) + + # Assignments parameters handling + for _row_id, _row in enumerate(_instance_group.get('rows', [])): + self._tcl.execute( + 'ls::config $p_.Dmf.InstanceGroup({}).Row({}) -Node {} ' + '-OverridePort {} -ClientPort {} -Context {} -Role {} ' + '-PreferredTransport {} -RatingGroup {} ' + '-ServiceID {}'.format( + _flow_index, _row_id, _row['node'], + _row['overridePort'], _row['clientPort'], + _row['context'], _row['role'], _row['transport'], + _row['ratingGroup'], _row['serviceId'])) + + def _configure_reservation(self, reservation): + _ts_id = self.resolve_test_server_name(reservation['tsId']) + self._tcl.execute( + 'set reservation_ [ls::create Reservation -under $test_]') + self._tcl.execute( + 'ls::config $reservation_ -TsIndex {} -TsId {} ' + '-TsName "{}"'.format(reservation['tsIndex'], + _ts_id, + reservation['tsName'])) + for _subnet in reservation['phySubnets']: + self._tcl.execute( + 'set physubnet_ [ls::create PhySubnet -under $reservation_]') + self._tcl.execute( + 'ls::config $physubnet_ -Name "{}" -Base "{}" -Mask "{}" ' + '-NumIps {}'.format(_subnet['name'], _subnet['base'], + _subnet['mask'], _subnet['numIps'])) + + def _configure_preresolved_arp(self, pre_resolved_arp): + if not pre_resolved_arp: # Pre-resolved ARP configuration not found + return + for _entry in pre_resolved_arp: + # TsGroup handle name should correspond in _configure_ts_group() + self._tcl.execute( + 'ls::create PreResolvedArpAddress -under $tss_ ' + '-StartingAddress "{StartingAddress}" ' + '-NumNodes {NumNodes}'.format(**_entry)) + + def delete_test_session(self, test_session): + raise NotImplementedError + + def _save_test_session(self): + # Call 'Validate' to set default values for missing parameters + res = self._tcl.execute('ls::perform Validate -TestSession $test_') + if res == 'Invalid': + res = self._tcl.execute('ls::get $test_ -ErrorsAndWarnings') + raise exceptions.LandslideTclException( + "Test session validation failed. Server response: {}".format( + res)) + else: + self._tcl.execute('ls::save $test_ -overwrite') + LOG.debug("Test session saved successfully.") + + def _get_library_id(self, library): + _library_id = self._tcl.execute( + "ls::get [ls::query LibraryInfo -systemLibraryName {}] -Id".format( + library)) + try: + int(_library_id) + return _library_id + except ValueError: + pass + + _library_id = self._tcl.execute( + "ls::get [ls::query LibraryInfo -userLibraryName {}] -Id".format( + library)) + try: + int(_library_id) + except ValueError: + LOG.error("_get_library_id: library='%s' not found.", library) + raise exceptions.LandslideTclException( + "_get_library_id: library='{}' not found.".format( + library)) + + return _library_id + + def resolve_test_server_name(self, ts_name): + return self._tcl.execute("ls::query TsId {}".format(ts_name)) + + +class LsTclHandler(object): + """Landslide TCL Handler class""" + + LS_OK = "ls_ok" + JRE_PATH = net_serv_utils.get_nsb_option('jre_path_i386') + + def __init__(self): + self.tcl_cmds = {} + self._ls = LsApi(jre_path=self.JRE_PATH) + self._ls.tcl( + "ls::config ApiOptions -NoReturnSuccessResponseString '{}'".format( + self.LS_OK)) + + def execute(self, command): + res = self._ls.tcl(command) + self.tcl_cmds[command] = res + return res diff --git a/yardstick/network_services/vnf_generic/vnf/tg_ping.py b/yardstick/network_services/vnf_generic/vnf/tg_ping.py index a3b5afa39..5c8819119 100644 --- a/yardstick/network_services/vnf_generic/vnf/tg_ping.py +++ b/yardstick/network_services/vnf_generic/vnf/tg_ping.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016-2017 Intel Corporation +# Copyright (c) 2016-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -71,7 +71,7 @@ class PingResourceHelper(ClientResourceHelper): self._queue = Queue() self._parser = PingParser(self._queue) - def run_traffic(self, traffic_profile, *args): + def run_traffic(self, traffic_profile): # drop the connection in order to force a new one self.ssh_helper.drop_connection() @@ -103,14 +103,14 @@ class PingTrafficGen(SampleVNFTrafficGen): APP_NAME = 'Ping' RUN_WAIT = 4 - def __init__(self, name, vnfd, task_id, setup_env_helper_type=None, - resource_helper_type=None): + def __init__(self, name, vnfd, setup_env_helper_type=None, resource_helper_type=None): if setup_env_helper_type is None: setup_env_helper_type = PingSetupEnvHelper if resource_helper_type is None: resource_helper_type = PingResourceHelper - super(PingTrafficGen, self).__init__( - name, vnfd, task_id, setup_env_helper_type, resource_helper_type) + + super(PingTrafficGen, self).__init__(name, vnfd, setup_env_helper_type, + resource_helper_type) self._result = {} def _check_status(self): diff --git a/yardstick/network_services/vnf_generic/vnf/tg_pktgen.py b/yardstick/network_services/vnf_generic/vnf/tg_pktgen.py index 9d452213f..5da2178af 100644 --- a/yardstick/network_services/vnf_generic/vnf/tg_pktgen.py +++ b/yardstick/network_services/vnf_generic/vnf/tg_pktgen.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,9 +13,7 @@ # limitations under the License. import logging -import multiprocessing import time -import uuid from yardstick.common import constants from yardstick.common import exceptions @@ -26,8 +24,7 @@ from yardstick.network_services.vnf_generic.vnf import base as vnf_base LOG = logging.getLogger(__name__) -class PktgenTrafficGen(vnf_base.GenericTrafficGen, - vnf_base.GenericVNFEndpoint): +class PktgenTrafficGen(vnf_base.GenericTrafficGen): """DPDK Pktgen traffic generator Website: http://pktgen-dpdk.readthedocs.io/en/latest/index.html @@ -35,15 +32,8 @@ class PktgenTrafficGen(vnf_base.GenericTrafficGen, TIMEOUT = 30 - def __init__(self, name, vnfd, task_id): - vnf_base.GenericTrafficGen.__init__(self, name, vnfd, task_id) - self.queue = multiprocessing.Queue() - self._id = uuid.uuid1().int - self._mq_producer = self._setup_mq_producer(self._id) - vnf_base.GenericVNFEndpoint.__init__(self, self._id, [task_id], - self.queue) - self._consumer = vnf_base.GenericVNFConsumer([task_id], self) - self._consumer.start_rpc_server() + def __init__(self, name, vnfd): + vnf_base.GenericTrafficGen.__init__(self, name, vnfd) self._traffic_profile = None self._node_ip = vnfd['mgmt-interface'].get('ip') self._lua_node_port = self._get_lua_node_port( @@ -71,7 +61,7 @@ class PktgenTrafficGen(vnf_base.GenericTrafficGen, def wait_for_instantiate(self): # pragma: no cover pass - def runner_method_start_iteration(self, ctxt, **kwargs): + def runner_method_start_iteration(self): # pragma: no cover LOG.debug('Start method') # NOTE(ralonsoh): 'rate' should be modified between iterations. The @@ -81,11 +71,6 @@ class PktgenTrafficGen(vnf_base.GenericTrafficGen, self._traffic_profile.rate(self._rate) time.sleep(4) self._traffic_profile.stop() - self._mq_producer.tg_method_iteration(1, 1, {}) - - def runner_method_stop_iteration(self, ctxt, **kwargs): # pragma: no cover - # pragma: no cover - LOG.debug('Stop method') @staticmethod def _get_lua_node_port(service_ports): diff --git a/yardstick/network_services/vnf_generic/vnf/tg_prox.py b/yardstick/network_services/vnf_generic/vnf/tg_prox.py index d12c42ec8..65b7bac10 100644 --- a/yardstick/network_services/vnf_generic/vnf/tg_prox.py +++ b/yardstick/network_services/vnf_generic/vnf/tg_prox.py @@ -1,4 +1,4 @@ -# Copyright (c) 2017 Intel Corporation +# Copyright (c) 2017-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -29,13 +29,13 @@ class ProxTrafficGen(SampleVNFTrafficGen): LUA_PARAMETER_NAME = "gen" WAIT_TIME = 1 - def __init__(self, name, vnfd, task_id, setup_env_helper_type=None, + def __init__(self, name, vnfd, setup_env_helper_type=None, resource_helper_type=None): vnfd_cpy = copy.deepcopy(vnfd) - super(ProxTrafficGen, self).__init__(name, vnfd_cpy, task_id) + super(ProxTrafficGen, self).__init__(name, vnfd_cpy) self._vnf_wrapper = ProxApproxVnf( - name, vnfd, task_id, setup_env_helper_type, resource_helper_type) + name, vnfd, setup_env_helper_type, resource_helper_type) self.bin_path = get_nsb_option('bin_path', '') self.name = self._vnf_wrapper.name self.ssh_helper = self._vnf_wrapper.ssh_helper diff --git a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py index 94ab06980..80812876d 100644 --- a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py +++ b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016-2017 Intel Corporation +# Copyright (c) 2016-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,9 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. +import ipaddress import logging +import six +import collections +from six import moves from yardstick.common import utils +from yardstick.common import exceptions from yardstick.network_services.libs.ixia_libs.ixnet import ixnet_api from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNFTrafficGen from yardstick.network_services.vnf_generic.vnf.sample_vnf import ClientResourceHelper @@ -25,6 +30,606 @@ LOG = logging.getLogger(__name__) WAIT_AFTER_CFG_LOAD = 10 WAIT_FOR_TRAFFIC = 30 +WAIT_PROTOCOLS_STARTED = 420 + + +class IxiaBasicScenario(object): + """Ixia Basic scenario for flow from port to port""" + + def __init__(self, client, context_cfg, ixia_cfg): + + self.client = client + self.context_cfg = context_cfg + self.ixia_cfg = ixia_cfg + + self._uplink_vports = None + self._downlink_vports = None + + def apply_config(self): + pass + + def run_protocols(self): + pass + + def stop_protocols(self): + pass + + def create_traffic_model(self, traffic_profile): + vports = self.client.get_vports() + self._uplink_vports = vports[::2] + self._downlink_vports = vports[1::2] + self.client.create_traffic_model(self._uplink_vports, + self._downlink_vports, + traffic_profile) + + def _get_stats(self): + return self.client.get_statistics() + + def generate_samples(self, resource_helper, ports, duration): + stats = self._get_stats() + + samples = {} + # this is not DPDK port num, but this is whatever number we gave + # when we selected ports and programmed the profile + for port_num in ports: + try: + # reverse lookup port name from port_num so the stats dict is descriptive + intf = resource_helper.vnfd_helper.find_interface_by_port(port_num) + port_name = intf['name'] + avg_latency = stats['Store-Forward_Avg_latency_ns'][port_num] + min_latency = stats['Store-Forward_Min_latency_ns'][port_num] + max_latency = stats['Store-Forward_Max_latency_ns'][port_num] + samples[port_name] = { + 'RxThroughputBps': float(stats['Bytes_Rx'][port_num]) / duration, + 'TxThroughputBps': float(stats['Bytes_Tx'][port_num]) / duration, + 'InPackets': int(stats['Valid_Frames_Rx'][port_num]), + 'OutPackets': int(stats['Frames_Tx'][port_num]), + 'InBytes': int(stats['Bytes_Rx'][port_num]), + 'OutBytes': int(stats['Bytes_Tx'][port_num]), + 'RxThroughput': float(stats['Valid_Frames_Rx'][port_num]) / duration, + 'TxThroughput': float(stats['Frames_Tx'][port_num]) / duration, + 'LatencyAvg': utils.safe_cast(avg_latency, int, 0), + 'LatencyMin': utils.safe_cast(min_latency, int, 0), + 'LatencyMax': utils.safe_cast(max_latency, int, 0) + } + except IndexError: + pass + + return samples + + def update_tracking_options(self): + pass + + def get_tc_rfc2544_options(self): + pass + + +class IxiaL3Scenario(IxiaBasicScenario): + """Ixia scenario for L3 flow between static ip's""" + + def _add_static_ips(self): + vports = self.client.get_vports() + uplink_intf_vport = [(self.client.get_static_interface(vport), vport) + for vport in vports[::2]] + downlink_intf_vport = [(self.client.get_static_interface(vport), vport) + for vport in vports[1::2]] + + for index in range(len(uplink_intf_vport)): + intf, vport = uplink_intf_vport[index] + try: + iprange = self.ixia_cfg['flow'].get('src_ip')[index] + start_ip = utils.get_ip_range_start(iprange) + count = utils.get_ip_range_count(iprange) + self.client.add_static_ipv4(intf, vport, start_ip, count, '32') + except IndexError: + raise exceptions.IncorrectFlowOption( + option="src_ip", link="uplink_{}".format(index)) + + intf, vport = downlink_intf_vport[index] + try: + iprange = self.ixia_cfg['flow'].get('dst_ip')[index] + start_ip = utils.get_ip_range_start(iprange) + count = utils.get_ip_range_count(iprange) + self.client.add_static_ipv4(intf, vport, start_ip, count, '32') + except IndexError: + raise exceptions.IncorrectFlowOption( + option="dst_ip", link="downlink_{}".format(index)) + + def _add_interfaces(self): + vports = self.client.get_vports() + uplink_vports = (vport for vport in vports[::2]) + downlink_vports = (vport for vport in vports[1::2]) + + ix_node = next(node for _, node in self.context_cfg['nodes'].items() + if node['role'] == 'IxNet') + + for intf in ix_node['interfaces'].values(): + ip = intf.get('local_ip') + mac = intf.get('local_mac') + gateway = None + try: + gateway = next(route.get('gateway') + for route in ix_node.get('routing_table') + if route.get('if') == intf.get('ifname')) + except StopIteration: + LOG.debug("Gateway not provided") + + if 'uplink' in intf.get('vld_id'): + self.client.add_interface(next(uplink_vports), + ip, mac, gateway) + else: + self.client.add_interface(next(downlink_vports), + ip, mac, gateway) + + def apply_config(self): + self._add_interfaces() + self._add_static_ips() + + def create_traffic_model(self, traffic_profile): + vports = self.client.get_vports() + self._uplink_vports = vports[::2] + self._downlink_vports = vports[1::2] + + uplink_endpoints = [port + '/protocols/static' + for port in self._uplink_vports] + downlink_endpoints = [port + '/protocols/static' + for port in self._downlink_vports] + + self.client.create_ipv4_traffic_model(uplink_endpoints, + downlink_endpoints, + traffic_profile) + + +class IxiaPppoeClientScenario(object): + def __init__(self, client, context_cfg, ixia_cfg): + + self.client = client + + self._uplink_vports = None + self._downlink_vports = None + + self._access_topologies = [] + self._core_topologies = [] + + self._context_cfg = context_cfg + self._ixia_cfg = ixia_cfg + self.protocols = [] + self.device_groups = [] + + def apply_config(self): + vports = self.client.get_vports() + self._uplink_vports = vports[::2] + self._downlink_vports = vports[1::2] + self._fill_ixia_config() + self._apply_access_network_config() + self._apply_core_network_config() + + def create_traffic_model(self, traffic_profile): + endpoints_id_pairs = self._get_endpoints_src_dst_id_pairs( + traffic_profile.full_profile) + endpoints_obj_pairs = \ + self._get_endpoints_src_dst_obj_pairs(endpoints_id_pairs) + if endpoints_obj_pairs: + uplink_endpoints = endpoints_obj_pairs[::2] + downlink_endpoints = endpoints_obj_pairs[1::2] + else: + uplink_endpoints = self._access_topologies + downlink_endpoints = self._core_topologies + self.client.create_ipv4_traffic_model(uplink_endpoints, + downlink_endpoints, + traffic_profile) + + def run_protocols(self): + LOG.info('PPPoE Scenario - Start Protocols') + self.client.start_protocols() + utils.wait_until_true( + lambda: self.client.is_protocols_running(self.protocols), + timeout=WAIT_PROTOCOLS_STARTED, sleep=2) + + def stop_protocols(self): + LOG.info('PPPoE Scenario - Stop Protocols') + self.client.stop_protocols() + + def _get_intf_addr(self, intf): + """Retrieve interface IP address and mask + + :param intf: could be the string which represents IP address + with mask (e.g 192.168.10.2/24) or a dictionary with the host + name and the port (e.g. {'tg__0': 'xe1'}) + :return: (tuple) pair of ip address and mask + """ + if isinstance(intf, six.string_types): + ip, mask = tuple(intf.split('/')) + return ip, int(mask) + + node_name, intf_name = next(iter(intf.items())) + node = self._context_cfg["nodes"].get(node_name, {}) + interface = node.get("interfaces", {})[intf_name] + ip = interface["local_ip"] + mask = interface["netmask"] + ipaddr = ipaddress.ip_network(six.text_type('{}/{}'.format(ip, mask)), + strict=False) + return ip, ipaddr.prefixlen + + @staticmethod + def _get_endpoints_src_dst_id_pairs(flows_params): + """Get list of flows src/dst port pairs + + Create list of flows src/dst port pairs based on traffic profile + flows data. Each uplink/downlink pair in traffic profile represents + specific flows between the pair of ports. + + Example ('port' key represents port on which flow will be created): + + Input flows data: + uplink_0: + ipv4: + id: 1 + port: xe0 + downlink_0: + ipv4: + id: 2 + port: xe1 + uplink_1: + ipv4: + id: 3 + port: xe2 + downlink_1: + ipv4: + id: 4 + port: xe3 + + Result list: ['xe0', 'xe1', 'xe2', 'xe3'] + + Result list means that the following flows pairs will be created: + - uplink 0: port xe0 <-> port xe1 + - downlink 0: port xe1 <-> port xe0 + - uplink 1: port xe2 <-> port xe3 + - downlink 1: port xe3 <-> port xe2 + + :param flows_params: ordered dict of traffic profile flows params + :return: (list) list of flows src/dst ports + """ + if len(flows_params) % 2: + raise RuntimeError('Number of uplink/downlink pairs' + ' in traffic profile is not equal') + endpoint_pairs = [] + for flow in flows_params: + port = flows_params[flow]['ipv4'].get('port') + if port is None: + continue + endpoint_pairs.append(port) + return endpoint_pairs + + def _get_endpoints_src_dst_obj_pairs(self, endpoints_id_pairs): + """Create list of uplink/downlink device groups pairs + + Based on traffic profile options, create list of uplink/downlink + device groups pairs between which flow groups will be created: + + 1. In case uplink/downlink flows in traffic profile doesn't have + specified 'port' key, flows will be created between topologies + on corresponding access and core port. + E.g.: + Access topology on xe0: topology1 + Core topology on xe1: topology2 + Flows will be created between: + topology1 -> topology2 + topology2 -> topology1 + + 2. In case uplink/downlink flows in traffic profile have specified + 'port' key, flows will be created between device groups on this + port. + E.g., for the following traffic profile + uplink_0: + port: xe0 + downlink_0: + port: xe1 + uplink_1: + port: xe0 + downlink_0: + port: xe3 + Flows will be created between: + Port xe0 (dg1) -> Port xe1 (dg1) + Port xe1 (dg1) -> Port xe0 (dg1) + Port xe0 (dg2) -> Port xe3 (dg1) + Port xe3 (dg3) -> Port xe0 (dg1) + + :param endpoints_id_pairs: (list) List of uplink/downlink flows ports + pairs + :return: (list) list of uplink/downlink device groups descriptors pairs + """ + pppoe = self._ixia_cfg['pppoe_client'] + sessions_per_port = pppoe['sessions_per_port'] + sessions_per_svlan = pppoe['sessions_per_svlan'] + svlan_count = int(sessions_per_port / sessions_per_svlan) + + uplink_ports = [p['tg__0'] for p in self._ixia_cfg['flow']['src_ip']] + downlink_ports = [p['tg__0'] for p in self._ixia_cfg['flow']['dst_ip']] + uplink_port_topology_map = zip(uplink_ports, self._access_topologies) + downlink_port_topology_map = zip(downlink_ports, self._core_topologies) + + port_to_dev_group_mapping = {} + for port, topology in uplink_port_topology_map: + topology_dgs = self.client.get_topology_device_groups(topology) + port_to_dev_group_mapping[port] = topology_dgs + for port, topology in downlink_port_topology_map: + topology_dgs = self.client.get_topology_device_groups(topology) + port_to_dev_group_mapping[port] = topology_dgs + + uplink_endpoints = endpoints_id_pairs[::2] + downlink_endpoints = endpoints_id_pairs[1::2] + + uplink_dev_groups = [] + group_up = [uplink_endpoints[i:i + svlan_count] + for i in range(0, len(uplink_endpoints), svlan_count)] + + for group in group_up: + for i, port in enumerate(group): + uplink_dev_groups.append(port_to_dev_group_mapping[port][i]) + + downlink_dev_groups = [] + for port in downlink_endpoints: + downlink_dev_groups.append(port_to_dev_group_mapping[port][0]) + + endpoint_obj_pairs = [] + [endpoint_obj_pairs.extend([up, down]) + for up, down in zip(uplink_dev_groups, downlink_dev_groups)] + + return endpoint_obj_pairs + + def _fill_ixia_config(self): + pppoe = self._ixia_cfg["pppoe_client"] + ipv4 = self._ixia_cfg["ipv4_client"] + + _ip = [self._get_intf_addr(intf)[0] for intf in pppoe["ip"]] + self._ixia_cfg["pppoe_client"]["ip"] = _ip + + _ip = [self._get_intf_addr(intf)[0] for intf in ipv4["gateway_ip"]] + self._ixia_cfg["ipv4_client"]["gateway_ip"] = _ip + + addrs = [self._get_intf_addr(intf) for intf in ipv4["ip"]] + _ip = [addr[0] for addr in addrs] + _prefix = [addr[1] for addr in addrs] + + self._ixia_cfg["ipv4_client"]["ip"] = _ip + self._ixia_cfg["ipv4_client"]["prefix"] = _prefix + + def _apply_access_network_config(self): + pppoe = self._ixia_cfg["pppoe_client"] + sessions_per_port = pppoe['sessions_per_port'] + sessions_per_svlan = pppoe['sessions_per_svlan'] + svlan_count = int(sessions_per_port / sessions_per_svlan) + + # add topology per uplink port (access network) + for access_tp_id, vport in enumerate(self._uplink_vports): + name = 'Topology access {}'.format(access_tp_id) + tp = self.client.add_topology(name, vport) + self._access_topologies.append(tp) + # add device group per svlan + for dg_id in range(svlan_count): + s_vlan_id = int(pppoe['s_vlan']) + dg_id + access_tp_id * svlan_count + s_vlan = ixnet_api.Vlan(vlan_id=s_vlan_id) + c_vlan = ixnet_api.Vlan(vlan_id=pppoe['c_vlan'], vlan_id_step=1) + name = 'SVLAN {}'.format(s_vlan_id) + dg = self.client.add_device_group(tp, name, sessions_per_svlan) + self.device_groups.append(dg) + # add ethernet layer to device group + ethernet = self.client.add_ethernet(dg, 'Ethernet') + self.protocols.append(ethernet) + self.client.add_vlans(ethernet, [s_vlan, c_vlan]) + # add ppp over ethernet + if 'pap_user' in pppoe: + ppp = self.client.add_pppox_client(ethernet, 'pap', + pppoe['pap_user'], + pppoe['pap_password']) + else: + ppp = self.client.add_pppox_client(ethernet, 'chap', + pppoe['chap_user'], + pppoe['chap_password']) + self.protocols.append(ppp) + + def _apply_core_network_config(self): + ipv4 = self._ixia_cfg["ipv4_client"] + sessions_per_port = ipv4['sessions_per_port'] + sessions_per_vlan = ipv4['sessions_per_vlan'] + vlan_count = int(sessions_per_port / sessions_per_vlan) + + # add topology per downlink port (core network) + for core_tp_id, vport in enumerate(self._downlink_vports): + name = 'Topology core {}'.format(core_tp_id) + tp = self.client.add_topology(name, vport) + self._core_topologies.append(tp) + # add device group per vlan + for dg_id in range(vlan_count): + name = 'Core port {}'.format(core_tp_id) + dg = self.client.add_device_group(tp, name, sessions_per_vlan) + self.device_groups.append(dg) + # add ethernet layer to device group + ethernet = self.client.add_ethernet(dg, 'Ethernet') + self.protocols.append(ethernet) + if 'vlan' in ipv4: + vlan_id = int(ipv4['vlan']) + dg_id + core_tp_id * vlan_count + vlan = ixnet_api.Vlan(vlan_id=vlan_id) + self.client.add_vlans(ethernet, [vlan]) + # add ipv4 layer + gw_ip = ipv4['gateway_ip'][core_tp_id] + # use gw addr to generate ip addr from the same network + ip_addr = ipaddress.IPv4Address(gw_ip) + 1 + ipv4_obj = self.client.add_ipv4(ethernet, name='ipv4', + addr=ip_addr, + addr_step='0.0.0.1', + prefix=ipv4['prefix'][core_tp_id], + gateway=gw_ip) + self.protocols.append(ipv4_obj) + if ipv4.get("bgp"): + bgp_peer_obj = self.client.add_bgp(ipv4_obj, + dut_ip=ipv4["bgp"]["dut_ip"], + local_as=ipv4["bgp"]["as_number"], + bgp_type=ipv4["bgp"].get("bgp_type")) + self.protocols.append(bgp_peer_obj) + + def update_tracking_options(self): + priority_map = { + 'raw': 'ipv4Raw0', + 'tos': {'precedence': 'ipv4Precedence0'}, + 'dscp': {'defaultPHB': 'ipv4DefaultPhb0', + 'selectorPHB': 'ipv4ClassSelectorPhb0', + 'assuredPHB': 'ipv4AssuredForwardingPhb0', + 'expeditedPHB': 'ipv4ExpeditedForwardingPhb0'} + } + + prio_trackby_key = 'ipv4Precedence0' + + try: + priority = list(self._ixia_cfg['priority'])[0] + if priority == 'raw': + prio_trackby_key = priority_map[priority] + elif priority in ['tos', 'dscp']: + priority_type = list(self._ixia_cfg['priority'][priority])[0] + prio_trackby_key = priority_map[priority][priority_type] + except KeyError: + pass + + tracking_options = ['flowGroup0', 'vlanVlanId0', prio_trackby_key] + self.client.set_flow_tracking(tracking_options) + + def get_tc_rfc2544_options(self): + return self._ixia_cfg.get('rfc2544') + + def _get_stats(self): + return self.client.get_pppoe_scenario_statistics() + + @staticmethod + def get_flow_id_data(stats, flow_id, key): + result = [float(flow.get(key)) for flow in stats if flow['id'] == flow_id] + return sum(result) / len(result) + + def get_priority_flows_stats(self, samples, duration): + results = {} + priorities = set([flow['IP_Priority'] for flow in samples]) + for priority in priorities: + tx_frames = sum( + [int(flow['Tx_Frames']) for flow in samples + if flow['IP_Priority'] == priority]) + rx_frames = sum( + [int(flow['Rx_Frames']) for flow in samples + if flow['IP_Priority'] == priority]) + prio_flows_num = len([flow for flow in samples + if flow['IP_Priority'] == priority]) + avg_latency_ns = sum( + [int(flow['Store-Forward_Avg_latency_ns']) for flow in samples + if flow['IP_Priority'] == priority]) / prio_flows_num + min_latency_ns = min( + [int(flow['Store-Forward_Min_latency_ns']) for flow in samples + if flow['IP_Priority'] == priority]) + max_latency_ns = max( + [int(flow['Store-Forward_Max_latency_ns']) for flow in samples + if flow['IP_Priority'] == priority]) + tx_throughput = float(tx_frames) / duration + rx_throughput = float(rx_frames) / duration + results[priority] = { + 'InPackets': rx_frames, + 'OutPackets': tx_frames, + 'RxThroughput': round(rx_throughput, 3), + 'TxThroughput': round(tx_throughput, 3), + 'LatencyAvg': utils.safe_cast(avg_latency_ns, int, 0), + 'LatencyMin': utils.safe_cast(min_latency_ns, int, 0), + 'LatencyMax': utils.safe_cast(max_latency_ns, int, 0) + } + return results + + def generate_samples(self, resource_helper, ports, duration): + + stats = self._get_stats() + samples = {} + ports_stats = stats['port_statistics'] + flows_stats = stats['flow_statistic'] + pppoe_subs_per_port = stats['pppox_client_per_port'] + + # Get sorted list of ixia ports names + ixia_port_names = sorted([data['port_name'] for data in ports_stats]) + + # Set 'port_id' key for ports stats items + for item in ports_stats: + port_id = item.pop('port_name').split('-')[-1].strip() + item['port_id'] = int(port_id) + + # Set 'id' key for flows stats items + for item in flows_stats: + flow_id = item.pop('Flow_Group').split('-')[1].strip() + item['id'] = int(flow_id) + + # Set 'port_id' key for pppoe subs per port stats + for item in pppoe_subs_per_port: + port_id = item.pop('subs_port').split('-')[-1].strip() + item['port_id'] = int(port_id) + + # Map traffic flows to ports + port_flow_map = collections.defaultdict(set) + for item in flows_stats: + tx_port = item.pop('Tx_Port') + tx_port_index = ixia_port_names.index(tx_port) + port_flow_map[tx_port_index].update([item['id']]) + + # Sort ports stats + ports_stats = sorted(ports_stats, key=lambda k: k['port_id']) + + # Get priority flows stats + prio_flows_stats = self.get_priority_flows_stats(flows_stats, duration) + samples['priority_stats'] = prio_flows_stats + + # this is not DPDK port num, but this is whatever number we gave + # when we selected ports and programmed the profile + for port_num in ports: + try: + # reverse lookup port name from port_num so the stats dict is descriptive + intf = resource_helper.vnfd_helper.find_interface_by_port(port_num) + port_name = intf['name'] + port_id = ports_stats[port_num]['port_id'] + port_subs_stats = \ + [port_data for port_data in pppoe_subs_per_port + if port_data.get('port_id') == port_id] + + avg_latency = \ + sum([float(self.get_flow_id_data( + flows_stats, flow, 'Store-Forward_Avg_latency_ns')) + for flow in port_flow_map[port_num]]) / len(port_flow_map[port_num]) + min_latency = \ + min([float(self.get_flow_id_data( + flows_stats, flow, 'Store-Forward_Min_latency_ns')) + for flow in port_flow_map[port_num]]) + max_latency = \ + max([float(self.get_flow_id_data( + flows_stats, flow, 'Store-Forward_Max_latency_ns')) + for flow in port_flow_map[port_num]]) + + samples[port_name] = { + 'RxThroughputBps': float(ports_stats[port_num]['Bytes_Rx']) / duration, + 'TxThroughputBps': float(ports_stats[port_num]['Bytes_Tx']) / duration, + 'InPackets': int(ports_stats[port_num]['Valid_Frames_Rx']), + 'OutPackets': int(ports_stats[port_num]['Frames_Tx']), + 'InBytes': int(ports_stats[port_num]['Bytes_Rx']), + 'OutBytes': int(ports_stats[port_num]['Bytes_Tx']), + 'RxThroughput': float(ports_stats[port_num]['Valid_Frames_Rx']) / duration, + 'TxThroughput': float(ports_stats[port_num]['Frames_Tx']) / duration, + 'LatencyAvg': utils.safe_cast(avg_latency, int, 0), + 'LatencyMin': utils.safe_cast(min_latency, int, 0), + 'LatencyMax': utils.safe_cast(max_latency, int, 0) + } + + if port_subs_stats: + samples[port_name].update( + {'SessionsUp': int(port_subs_stats[0]['Sessions_Up']), + 'SessionsDown': int(port_subs_stats[0]['Sessions_Down']), + 'SessionsNotStarted': int(port_subs_stats[0]['Sessions_Not_Started']), + 'SessionsTotal': int(port_subs_stats[0]['Sessions_Total'])} + ) + + except IndexError: + pass + + return samples class IxiaRfc2544Helper(Rfc2544ResourceHelper): @@ -41,6 +646,12 @@ class IxiaResourceHelper(ClientResourceHelper): super(IxiaResourceHelper, self).__init__(setup_helper) self.scenario_helper = setup_helper.scenario_helper + self._ixia_scenarios = { + "IxiaBasic": IxiaBasicScenario, + "IxiaL3": IxiaL3Scenario, + "IxiaPppoeClient": IxiaPppoeClientScenario, + } + self.client = ixnet_api.IxNextgen() if rfc_helper_type is None: @@ -49,65 +660,59 @@ class IxiaResourceHelper(ClientResourceHelper): self.rfc_helper = rfc_helper_type(self.scenario_helper) self.uplink_ports = None self.downlink_ports = None + self.context_cfg = None + self._ix_scenario = None self._connect() def _connect(self, client=None): self.client.connect(self.vnfd_helper) - def get_stats(self, *args, **kwargs): - return self.client.get_statistics() + def setup(self): + super(IxiaResourceHelper, self).setup() + self._init_ix_scenario() def stop_collect(self): + self._ix_scenario.stop_protocols() self._terminated.value = 1 def generate_samples(self, ports, duration): - stats = self.get_stats() + return self._ix_scenario.generate_samples(self, ports, duration) - samples = {} - # this is not DPDK port num, but this is whatever number we gave - # when we selected ports and programmed the profile - for port_num in ports: - try: - # reverse lookup port name from port_num so the stats dict is descriptive - intf = self.vnfd_helper.find_interface_by_port(port_num) - port_name = intf['name'] - avg_latency = stats['Store-Forward_Avg_latency_ns'][port_num] - min_latency = stats['Store-Forward_Min_latency_ns'][port_num] - max_latency = stats['Store-Forward_Max_latency_ns'][port_num] - samples[port_name] = { - 'rx_throughput_kps': float(stats['Rx_Rate_Kbps'][port_num]), - 'tx_throughput_kps': float(stats['Tx_Rate_Kbps'][port_num]), - 'rx_throughput_mbps': float(stats['Rx_Rate_Mbps'][port_num]), - 'tx_throughput_mbps': float(stats['Tx_Rate_Mbps'][port_num]), - 'in_packets': int(stats['Valid_Frames_Rx'][port_num]), - 'out_packets': int(stats['Frames_Tx'][port_num]), - 'RxThroughput': float(stats['Valid_Frames_Rx'][port_num]) / duration, - 'TxThroughput': float(stats['Frames_Tx'][port_num]) / duration, - 'Store-Forward_Avg_latency_ns': utils.safe_cast(avg_latency, int, 0), - 'Store-Forward_Min_latency_ns': utils.safe_cast(min_latency, int, 0), - 'Store-Forward_Max_latency_ns': utils.safe_cast(max_latency, int, 0) - } - except IndexError: - pass + def _init_ix_scenario(self): + ixia_config = self.scenario_helper.scenario_cfg.get('ixia_config', 'IxiaBasic') - return samples + if ixia_config in self._ixia_scenarios: + scenario_type = self._ixia_scenarios[ixia_config] + + self._ix_scenario = scenario_type(self.client, self.context_cfg, + self.scenario_helper.scenario_cfg['options']) + else: + raise RuntimeError( + "IXIA config type '{}' not supported".format(ixia_config)) - def _initialize_client(self): + def _initialize_client(self, traffic_profile): """Initialize the IXIA IxNetwork client and configure the server""" self.client.clear_config() self.client.assign_ports() - self.client.create_traffic_model() + self._ix_scenario.apply_config() + self._ix_scenario.create_traffic_model(traffic_profile) - def run_traffic(self, traffic_profile, *args): + def update_tracking_options(self): + self._ix_scenario.update_tracking_options() + + def run_traffic(self, traffic_profile): if self._terminated.value: return min_tol = self.rfc_helper.tolerance_low max_tol = self.rfc_helper.tolerance_high + precision = self.rfc_helper.tolerance_precision + resolution = self.rfc_helper.resolution default = "00:00:00:00:00:00" self._build_ports() - self._initialize_client() + traffic_profile.update_traffic_profile(self) + self._initialize_client(traffic_profile) mac = {} for port_name in self.vnfd_helper.port_pairs.all_ports: @@ -119,19 +724,23 @@ class IxiaResourceHelper(ClientResourceHelper): mac["src_mac_{}".format(port_num)] = virt_intf.get("local_mac", default) mac["dst_mac_{}".format(port_num)] = virt_intf.get("dst_mac", default) + self._ix_scenario.run_protocols() + try: while not self._terminated.value: - first_run = traffic_profile.execute_traffic( - self, self.client, mac) + first_run = traffic_profile.execute_traffic(self, self.client, + mac) self.client_started.value = 1 # pylint: disable=unnecessary-lambda utils.wait_until_true(lambda: self.client.is_traffic_stopped(), timeout=traffic_profile.config.duration * 2) + rfc2544_opts = self._ix_scenario.get_tc_rfc2544_options() samples = self.generate_samples(traffic_profile.ports, traffic_profile.config.duration) completed, samples = traffic_profile.get_drop_percentage( - samples, min_tol, max_tol, first_run=first_run) + samples, min_tol, max_tol, precision, resolution, + first_run=first_run, tc_rfc2544_opts=rfc2544_opts) self._queue.put(samples) if completed: @@ -140,23 +749,93 @@ class IxiaResourceHelper(ClientResourceHelper): except Exception: # pylint: disable=broad-except LOG.exception('Run Traffic terminated') + self._ix_scenario.stop_protocols() + self.client_started.value = 0 self._terminated.value = 1 - def collect_kpi(self): - self.rfc_helper.iteration.value += 1 - return super(IxiaResourceHelper, self).collect_kpi() + def run_test(self, traffic_profile, tasks_queue, results_queue, *args): # pragma: no cover + LOG.info("Ixia resource_helper run_test") + if self._terminated.value: + return + + min_tol = self.rfc_helper.tolerance_low + max_tol = self.rfc_helper.tolerance_high + precision = self.rfc_helper.tolerance_precision + resolution = self.rfc_helper.resolution + default = "00:00:00:00:00:00" + + self._build_ports() + traffic_profile.update_traffic_profile(self) + self._initialize_client(traffic_profile) + + mac = {} + for port_name in self.vnfd_helper.port_pairs.all_ports: + intf = self.vnfd_helper.find_interface(name=port_name) + virt_intf = intf["virtual-interface"] + # we only know static traffic id by reading the json + # this is used by _get_ixia_trafficrofile + port_num = self.vnfd_helper.port_num(intf) + mac["src_mac_{}".format(port_num)] = virt_intf.get("local_mac", default) + mac["dst_mac_{}".format(port_num)] = virt_intf.get("dst_mac", default) + + self._ix_scenario.run_protocols() + + try: + completed = False + self.rfc_helper.iteration.value = 0 + self.client_started.value = 1 + while completed is False and not self._terminated.value: + LOG.info("Wait for task ...") + + try: + task = tasks_queue.get(True, 5) + except moves.queue.Empty: + continue + else: + if task != 'RUN_TRAFFIC': + continue + + self.rfc_helper.iteration.value += 1 + LOG.info("Got %s task, start iteration %d", task, + self.rfc_helper.iteration.value) + first_run = traffic_profile.execute_traffic(self, self.client, + mac) + # pylint: disable=unnecessary-lambda + utils.wait_until_true(lambda: self.client.is_traffic_stopped(), + timeout=traffic_profile.config.duration * 2) + samples = self.generate_samples(traffic_profile.ports, + traffic_profile.config.duration) + + completed, samples = traffic_profile.get_drop_percentage( + samples, min_tol, max_tol, precision, resolution, + first_run=first_run) + self._queue.put(samples) + + if completed: + LOG.debug("IxiaResourceHelper::run_test - test completed") + results_queue.put('COMPLETE') + else: + results_queue.put('CONTINUE') + tasks_queue.task_done() + + except Exception: # pylint: disable=broad-except + LOG.exception('Run Traffic terminated') + + self._ix_scenario.stop_protocols() + self.client_started.value = 0 + LOG.debug("IxiaResourceHelper::run_test done") class IxiaTrafficGen(SampleVNFTrafficGen): APP_NAME = 'Ixia' - def __init__(self, name, vnfd, task_id, setup_env_helper_type=None, - resource_helper_type=None): + def __init__(self, name, vnfd, setup_env_helper_type=None, resource_helper_type=None): if resource_helper_type is None: resource_helper_type = IxiaResourceHelper - super(IxiaTrafficGen, self).__init__( - name, vnfd, task_id, setup_env_helper_type, resource_helper_type) + + super(IxiaTrafficGen, self).__init__(name, vnfd, setup_env_helper_type, + resource_helper_type) self._ixia_traffic_gen = None self.ixia_file_name = '' self.vnf_port_pairs = [] diff --git a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py index 7ecb12478..a9c0222ac 100644 --- a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py +++ b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016-2017 Intel Corporation +# Copyright (c) 2016-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,12 +15,14 @@ import logging import time +from six import moves from yardstick.common import utils from yardstick.network_services.vnf_generic.vnf import sample_vnf from yardstick.network_services.vnf_generic.vnf import tg_trex +from trex_stl_lib.trex_stl_exceptions import STLError -LOGGING = logging.getLogger(__name__) +LOG = logging.getLogger(__name__) class TrexRfcResourceHelper(tg_trex.TrexResourceHelper): @@ -48,7 +50,8 @@ class TrexRfcResourceHelper(tg_trex.TrexResourceHelper): completed, output = traffic_profile.get_drop_percentage( samples, self.rfc2544_helper.tolerance_low, self.rfc2544_helper.tolerance_high, - self.rfc2544_helper.correlated_traffic) + self.rfc2544_helper.correlated_traffic, + self.rfc2544_helper.resolution) self._queue.put(output) return completed @@ -58,6 +61,56 @@ class TrexRfcResourceHelper(tg_trex.TrexResourceHelper): def clear_client_stats(self, ports): self.client.clear_stats(ports=ports) + def run_test(self, traffic_profile, tasks_queue, results_queue, *args): # pragma: no cover + LOG.debug("Trex resource_helper run_test") + if self._terminated.value: + return + # if we don't do this we can hang waiting for the queue to drain + # have to do this in the subprocess + self._queue.cancel_join_thread() + try: + self._build_ports() + self.client = self._connect() + self.client.reset(ports=self.all_ports) + self.client.remove_all_streams(self.all_ports) # remove all streams + traffic_profile.register_generator(self) + + completed = False + self.rfc2544_helper.iteration.value = 0 + self.client_started.value = 1 + while completed is False and not self._terminated.value: + LOG.debug("Wait for task ...") + try: + task = tasks_queue.get(True, 5) + except moves.queue.Empty: + LOG.debug("Wait for task timeout, continue waiting...") + continue + else: + if task != 'RUN_TRAFFIC': + continue + self.rfc2544_helper.iteration.value += 1 + LOG.info("Got %s task, start iteration %d", task, + self.rfc2544_helper.iteration.value) + completed = self._run_traffic_once(traffic_profile) + if completed: + LOG.debug("%s::run_test - test completed", + self.__class__.__name__) + results_queue.put('COMPLETE') + else: + results_queue.put('CONTINUE') + tasks_queue.task_done() + + self.client.stop(self.all_ports) + self.client.disconnect() + self._terminated.value = 0 + except STLError: + if self._terminated.value: + LOG.debug("traffic generator is stopped") + return # return if trex/tg server is stopped. + raise + + self.client_started.value = 0 + LOG.debug("%s::run_test done", self.__class__.__name__) class TrexTrafficGenRFC(tg_trex.TrexTrafficGen): """ @@ -65,9 +118,9 @@ class TrexTrafficGenRFC(tg_trex.TrexTrafficGen): traffic for rfc2544 testcase. """ - def __init__(self, name, vnfd, task_id, setup_env_helper_type=None, - resource_helper_type=None): + def __init__(self, name, vnfd, setup_env_helper_type=None, resource_helper_type=None): if resource_helper_type is None: resource_helper_type = TrexRfcResourceHelper - super(TrexTrafficGenRFC, self).__init__( - name, vnfd, task_id, setup_env_helper_type, resource_helper_type) + + super(TrexTrafficGenRFC, self).__init__(name, vnfd, setup_env_helper_type, + resource_helper_type) diff --git a/yardstick/network_services/vnf_generic/vnf/tg_trex.py b/yardstick/network_services/vnf_generic/vnf/tg_trex.py index 4296da84c..0cb66a714 100644 --- a/yardstick/network_services/vnf_generic/vnf/tg_trex.py +++ b/yardstick/network_services/vnf_generic/vnf/tg_trex.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016-2017 Intel Corporation +# Copyright (c) 2016-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -179,6 +179,8 @@ class TrexResourceHelper(ClientResourceHelper): 'tx_throughput_bps': float(port_stats.get('tx_bps', 0.0)), 'in_packets': int(port_stats.get('ipackets', 0)), 'out_packets': int(port_stats.get('opackets', 0)), + 'in_bytes': int(port_stats.get('ibytes', 0)), + 'out_bytes': int(port_stats.get('obytes', 0)), 'timestamp': timestamp } @@ -200,14 +202,15 @@ class TrexTrafficGen(SampleVNFTrafficGen): APP_NAME = 'TRex' - def __init__(self, name, vnfd, task_id, setup_env_helper_type=None, - resource_helper_type=None): + def __init__(self, name, vnfd, setup_env_helper_type=None, resource_helper_type=None): if resource_helper_type is None: resource_helper_type = TrexResourceHelper + if setup_env_helper_type is None: setup_env_helper_type = TrexDpdkVnfSetupEnvHelper - super(TrexTrafficGen, self).__init__( - name, vnfd, task_id, setup_env_helper_type, resource_helper_type) + + super(TrexTrafficGen, self).__init__(name, vnfd, setup_env_helper_type, + resource_helper_type) def _check_status(self): return self.resource_helper.check_status() diff --git a/yardstick/network_services/vnf_generic/vnf/tg_trex_vpp.py b/yardstick/network_services/vnf_generic/vnf/tg_trex_vpp.py new file mode 100644 index 000000000..846304880 --- /dev/null +++ b/yardstick/network_services/vnf_generic/vnf/tg_trex_vpp.py @@ -0,0 +1,178 @@ +# Copyright (c) 2019 Viosoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from trex_stl_lib.trex_stl_exceptions import STLError + +from yardstick.common.utils import safe_cast +from yardstick.network_services.vnf_generic.vnf.sample_vnf import \ + Rfc2544ResourceHelper +from yardstick.network_services.vnf_generic.vnf.sample_vnf import \ + SampleVNFTrafficGen +from yardstick.network_services.vnf_generic.vnf.tg_trex import \ + TrexDpdkVnfSetupEnvHelper +from yardstick.network_services.vnf_generic.vnf.tg_trex import \ + TrexResourceHelper + +LOGGING = logging.getLogger(__name__) + + +class TrexVppResourceHelper(TrexResourceHelper): + + def __init__(self, setup_helper, rfc_helper_type=None): + super(TrexVppResourceHelper, self).__init__(setup_helper) + + if rfc_helper_type is None: + rfc_helper_type = Rfc2544ResourceHelper + + self.rfc2544_helper = rfc_helper_type(self.scenario_helper) + + self.loss = None + self.sent = None + self.latency = None + + def generate_samples(self, stats=None, ports=None, port_pg_id=None, + latency=False): + samples = {} + if stats is None: + stats = self.get_stats(ports) + for pname in (intf['name'] for intf in self.vnfd_helper.interfaces): + port_num = self.vnfd_helper.port_num(pname) + port_stats = stats.get(port_num, {}) + samples[pname] = { + 'rx_throughput_fps': float(port_stats.get('rx_pps', 0.0)), + 'tx_throughput_fps': float(port_stats.get('tx_pps', 0.0)), + 'rx_throughput_bps': float(port_stats.get('rx_bps', 0.0)), + 'tx_throughput_bps': float(port_stats.get('tx_bps', 0.0)), + 'in_packets': int(port_stats.get('ipackets', 0)), + 'out_packets': int(port_stats.get('opackets', 0)), + } + + if latency: + pg_id_list = port_pg_id.get_pg_ids(port_num) + samples[pname]['latency'] = {} + for pg_id in pg_id_list: + latency_global = stats.get('latency', {}) + pg_latency = latency_global.get(pg_id, {}).get('latency') + + t_min = safe_cast(pg_latency.get("total_min", 0.0), float, + -1.0) + t_avg = safe_cast(pg_latency.get("average", 0.0), float, + -1.0) + t_max = safe_cast(pg_latency.get("total_max", 0.0), float, + -1.0) + + latency = { + "min_latency": t_min, + "max_latency": t_max, + "avg_latency": t_avg, + } + samples[pname]['latency'][pg_id] = latency + + return samples + + def _run_traffic_once(self, traffic_profile): + self.client_started.value = 1 + traffic_profile.execute_traffic(self) + return True + + def run_traffic(self, traffic_profile): + self._queue.cancel_join_thread() + traffic_profile.init_queue(self._queue) + super(TrexVppResourceHelper, self).run_traffic(traffic_profile) + + @staticmethod + def fmt_latency(lat_min, lat_avg, lat_max): + t_min = int(round(safe_cast(lat_min, float, -1.0))) + t_avg = int(round(safe_cast(lat_avg, float, -1.0))) + t_max = int(round(safe_cast(lat_max, float, -1.0))) + + return "/".join(str(tmp) for tmp in (t_min, t_avg, t_max)) + + def send_traffic_on_tg(self, ports, port_pg_id, duration, rate, + latency=False): + try: + # Choose rate and start traffic: + self.client.start(ports=ports, mult=rate, duration=duration) + # Block until done: + try: + self.client.wait_on_traffic(ports=ports, timeout=duration + 20) + except STLError as err: + self.client.stop(ports) + LOGGING.error("TRex stateless timeout error: %s", err) + + if self.client.get_warnings(): + for warning in self.client.get_warnings(): + LOGGING.warning(warning) + + # Read the stats after the test + stats = self.client.get_stats() + + packets_in = [] + packets_out = [] + for port in ports: + packets_in.append(stats[port]["ipackets"]) + packets_out.append(stats[port]["opackets"]) + + if latency: + self.latency = [] + pg_id_list = port_pg_id.get_pg_ids(port) + for pg_id in pg_id_list: + latency_global = stats.get('latency', {}) + pg_latency = latency_global.get(pg_id, {}).get( + 'latency') + lat = self.fmt_latency( + str(pg_latency.get("total_min")), + str(pg_latency.get("average")), + str(pg_latency.get("total_max"))) + LOGGING.info( + "latencyStream%s(usec)=%s", pg_id, lat) + self.latency.append(lat) + + self.sent = sum(packets_out) + total_rcvd = sum(packets_in) + self.loss = self.sent - total_rcvd + LOGGING.info("rate=%s, totalReceived=%s, totalSent=%s," + " frameLoss=%s", rate, total_rcvd, self.sent, + self.loss) + return stats + except STLError as err: + LOGGING.error("TRex stateless runtime error: %s", err) + raise RuntimeError('TRex stateless runtime error') + + +class TrexTrafficGenVpp(SampleVNFTrafficGen): + APP_NAME = 'TRex' + WAIT_TIME = 20 + + def __init__(self, name, vnfd, setup_env_helper_type=None, + resource_helper_type=None): + if setup_env_helper_type is None: + setup_env_helper_type = TrexDpdkVnfSetupEnvHelper + if resource_helper_type is None: + resource_helper_type = TrexVppResourceHelper + + super(TrexTrafficGenVpp, self).__init__( + name, vnfd, setup_env_helper_type, resource_helper_type) + + def _check_status(self): + return self.resource_helper.check_status() + + def _start_server(self): + super(TrexTrafficGenVpp, self)._start_server() + self.resource_helper.start() + + def wait_for_instantiate(self): + return self._wait_for_process() diff --git a/yardstick/network_services/vnf_generic/vnf/tg_vcmts_pktgen.py b/yardstick/network_services/vnf_generic/vnf/tg_vcmts_pktgen.py new file mode 100755 index 000000000..c6df9d04c --- /dev/null +++ b/yardstick/network_services/vnf_generic/vnf/tg_vcmts_pktgen.py @@ -0,0 +1,215 @@ +# Copyright (c) 2019 Viosoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import time +import socket +import yaml +import os + +from yardstick.network_services.vnf_generic.vnf import sample_vnf +from yardstick.common import exceptions + + +LOG = logging.getLogger(__name__) + + +class PktgenHelper(object): + + RETRY_SECONDS = 0.5 + RETRY_COUNT = 20 + CONNECT_TIMEOUT = 5 + + def __init__(self, host, port=23000): + self.host = host + self.port = port + self.connected = False + + def _connect(self): + self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + ret = True + try: + self._sock.settimeout(self.CONNECT_TIMEOUT) + self._sock.connect((self.host, self.port)) + except (socket.gaierror, socket.error, socket.timeout): + self._sock.close() + ret = False + + return ret + + def connect(self): + if self.connected: + return True + LOG.info("Connecting to pktgen instance at %s...", self.host) + for idx in range(self.RETRY_COUNT): + self.connected = self._connect() + if self.connected: + return True + LOG.debug("Connection attempt %d: Unable to connect to %s, " \ + "retrying in %d seconds", + idx, self.host, self.RETRY_SECONDS) + time.sleep(self.RETRY_SECONDS) + + LOG.error("Unable to connect to pktgen instance on %s !", + self.host) + return False + + + def send_command(self, command): + if not self.connected: + LOG.error("Pktgen socket is not connected") + return False + + try: + self._sock.sendall((command + "\n").encode()) + time.sleep(1) + except (socket.timeout, socket.error): + LOG.error("Error sending command '%s'", command) + return False + + return True + + +class VcmtsPktgenSetupEnvHelper(sample_vnf.SetupEnvHelper): + + BASE_PARAMETERS = "export LUA_PATH=/vcmts/Pktgen.lua;"\ + + "export CMK_PROC_FS=/host/proc;" + + PORTS_COUNT = 8 + + def generate_pcap_filename(self, port_cfg): + return port_cfg['traffic_type'] + "_" + port_cfg['num_subs'] \ + + "cms_" + port_cfg['num_ofdm'] + "ofdm.pcap" + + def find_port_cfg(self, ports_cfg, port_name): + for port_cfg in ports_cfg: + if port_name in port_cfg: + return port_cfg + return None + + def build_pktgen_parameters(self, pod_cfg): + ports_cfg = pod_cfg['ports'] + port_cfg = list() + + for i in range(self.PORTS_COUNT): + port_cfg.append(self.find_port_cfg(ports_cfg, 'port_' + str(i))) + + pktgen_parameters = self.BASE_PARAMETERS + " " \ + + " /pktgen-config/setup.sh " + pod_cfg['pktgen_id'] \ + + " " + pod_cfg['num_ports'] + + for i in range(self.PORTS_COUNT): + pktgen_parameters += " " + port_cfg[i]['net_pktgen'] + + for i in range(self.PORTS_COUNT): + pktgen_parameters += " " + self.generate_pcap_filename(port_cfg[i]) + + return pktgen_parameters + + def start_pktgen(self, pod_cfg): + self.ssh_helper.drop_connection() + cmd = self.build_pktgen_parameters(pod_cfg) + LOG.debug("Executing: '%s'", cmd) + self.ssh_helper.send_command(cmd) + LOG.info("Pktgen executed") + + def setup_vnf_environment(self): + pass + + +class VcmtsPktgen(sample_vnf.SampleVNFTrafficGen): + + TG_NAME = 'VcmtsPktgen' + APP_NAME = 'VcmtsPktgen' + RUN_WAIT = 4 + DEFAULT_RATE = 8.0 + + PKTGEN_BASE_PORT = 23000 + + def __init__(self, name, vnfd, setup_env_helper_type=None, + resource_helper_type=None): + if setup_env_helper_type is None: + setup_env_helper_type = VcmtsPktgenSetupEnvHelper + super(VcmtsPktgen, self).__init__( + name, vnfd, setup_env_helper_type, resource_helper_type) + + self.pktgen_address = vnfd['mgmt-interface']['ip'] + LOG.info("Pktgen container '%s', IP: %s", name, self.pktgen_address) + + def extract_pod_cfg(self, pktgen_pods_cfg, pktgen_id): + for pod_cfg in pktgen_pods_cfg: + if pod_cfg['pktgen_id'] == pktgen_id: + return pod_cfg + return None + + def instantiate(self, scenario_cfg, context_cfg): + super(VcmtsPktgen, self).instantiate(scenario_cfg, context_cfg) + self._start_server() + options = scenario_cfg.get('options', {}) + self.pktgen_rate = options.get('pktgen_rate', self.DEFAULT_RATE) + + try: + pktgen_values_filepath = options['pktgen_values'] + except KeyError: + raise KeyError("Missing pktgen_values key in scenario options" \ + "section of the task definition file") + + if not os.path.isfile(pktgen_values_filepath): + raise RuntimeError("The pktgen_values file path provided " \ + "does not exists") + + # The yaml_loader.py (SafeLoader) underlying regex has an issue + # with reading PCI addresses (processed as double). so the + # BaseLoader is used here. + with open(pktgen_values_filepath) as stream: + pktgen_values = yaml.load(stream, Loader=yaml.BaseLoader) + + if pktgen_values == None: + raise RuntimeError("Error reading pktgen_values file provided (" + + pktgen_values_filepath + ")") + + self.pktgen_id = int(options[self.name]['pktgen_id']) + self.resource_helper.pktgen_id = self.pktgen_id + + self.pktgen_helper = PktgenHelper(self.pktgen_address, + self.PKTGEN_BASE_PORT + self.pktgen_id) + + pktgen_pods_cfg = pktgen_values['topology']['pktgen_pods'] + + self.pod_cfg = self.extract_pod_cfg(pktgen_pods_cfg, + str(self.pktgen_id)) + + if self.pod_cfg == None: + raise KeyError("Pktgen with id " + str(self.pktgen_id) + \ + " was not found") + + self.setup_helper.start_pktgen(self.pod_cfg) + + def run_traffic(self, traffic_profile): + if not self.pktgen_helper.connect(): + raise exceptions.PktgenActionError(command="connect") + LOG.info("Connected to pktgen instance at %s", self.pktgen_address) + + commands = [] + for i in range(self.setup_helper.PORTS_COUNT): + commands.append('pktgen.set("' + str(i) + '", "rate", ' + + "%0.1f" % self.pktgen_rate + ');') + + commands.append('pktgen.start("all");') + + for command in commands: + if self.pktgen_helper.send_command(command): + LOG.debug("Command '%s' sent to pktgen", command) + LOG.info("Traffic started on %s...", self.name) + return True diff --git a/yardstick/network_services/vnf_generic/vnf/udp_replay.py b/yardstick/network_services/vnf_generic/vnf/udp_replay.py index e3fde1a79..a3b0b9fd9 100644 --- a/yardstick/network_services/vnf_generic/vnf/udp_replay.py +++ b/yardstick/network_services/vnf_generic/vnf/udp_replay.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016-2017 Intel Corporation +# Copyright (c) 2016-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -60,14 +60,15 @@ class UdpReplayApproxVnf(SampleVNF): PIPELINE_COMMAND = REPLAY_PIPELINE_COMMAND - def __init__(self, name, vnfd, task_id, setup_env_helper_type=None, - resource_helper_type=None): + def __init__(self, name, vnfd, setup_env_helper_type=None, resource_helper_type=None): if resource_helper_type is None: resource_helper_type = UdpReplayResourceHelper + if setup_env_helper_type is None: setup_env_helper_type = UdpReplaySetupEnvHelper - super(UdpReplayApproxVnf, self).__init__( - name, vnfd, task_id, setup_env_helper_type, resource_helper_type) + + super(UdpReplayApproxVnf, self).__init__(name, vnfd, setup_env_helper_type, + resource_helper_type) def _build_pipeline_kwargs(self): ports = self.vnfd_helper.port_pairs.all_ports @@ -108,7 +109,7 @@ class UdpReplayApproxVnf(SampleVNF): def collect_kpi(self): def get_sum(offset): - return sum(int(i) for i in split_stats[offset::5]) + return sum(int(i) for i in split_stats[offset::6]) # we can't get KPIs if the VNF is down check_if_process_failed(self._vnf_process) @@ -116,7 +117,7 @@ class UdpReplayApproxVnf(SampleVNF): stats = self.get_stats() stats_words = stats.split() - split_stats = stats_words[stats_words.index('0'):][:number_of_ports * 5] + split_stats = stats_words[stats_words.index('arp_pkts') + 1:][:number_of_ports * 6] physical_node = ctx_base.Context.get_physical_node_from_server( self.scenario_helper.nodes[self.name]) diff --git a/yardstick/network_services/vnf_generic/vnf/vcmts_vnf.py b/yardstick/network_services/vnf_generic/vnf/vcmts_vnf.py new file mode 100755 index 000000000..0b48ef4e9 --- /dev/null +++ b/yardstick/network_services/vnf_generic/vnf/vcmts_vnf.py @@ -0,0 +1,273 @@ +# Copyright (c) 2019 Viosoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import os +import yaml + +from influxdb import InfluxDBClient + +from yardstick.network_services.vnf_generic.vnf.sample_vnf import SetupEnvHelper +from yardstick.common import constants +from yardstick.common import exceptions +from yardstick.network_services.vnf_generic.vnf.base import GenericVNF +from yardstick.network_services.vnf_generic.vnf.sample_vnf import ScenarioHelper +from yardstick.network_services.vnf_generic.vnf.vnf_ssh_helper import VnfSshHelper +from yardstick.network_services.utils import get_nsb_option + + +LOG = logging.getLogger(__name__) + + +class InfluxDBHelper(object): + + INITIAL_VALUE = 'now() - 1m' + + def __init__(self, vcmts_influxdb_ip, vcmts_influxdb_port): + self._vcmts_influxdb_ip = vcmts_influxdb_ip + self._vcmts_influxdb_port = vcmts_influxdb_port + self._last_upstream_rx = self.INITIAL_VALUE + self._last_values_time = dict() + + def start(self): + self._read_client = InfluxDBClient(host=self._vcmts_influxdb_ip, + port=self._vcmts_influxdb_port, + database='collectd') + self._write_client = InfluxDBClient(host=constants.INFLUXDB_IP, + port=constants.INFLUXDB_PORT, + database='collectd') + + def _get_last_value_time(self, measurement): + if measurement in self._last_values_time: + return self._last_values_time[measurement] + return self.INITIAL_VALUE + + def _set_last_value_time(self, measurement, time): + self._last_values_time[measurement] = "'" + time + "'" + + def _query_measurement(self, measurement): + # There is a delay before influxdb flushes the data + query = "SELECT * FROM " + measurement + " WHERE time > " \ + + self._get_last_value_time(measurement) \ + + " ORDER BY time ASC;" + query_result = self._read_client.query(query) + if len(query_result.keys()) == 0: + return None + return query_result.get_points(measurement) + + def _rw_measurment(self, measurement, columns): + query_result = self._query_measurement(measurement) + if query_result == None: + return + + points_to_write = list() + for entry in query_result: + point = { + "measurement": measurement, + "tags": { + "type": entry['type'], + "host": entry['host'] + }, + "time": entry['time'], + "fields": {} + } + + for column in columns: + if column == 'value': + point["fields"][column] = float(entry[column]) + else: + point["fields"][column] = entry[column] + + points_to_write.append(point) + self._set_last_value_time(measurement, entry['time']) + + # Write the points to yardstick database + if self._write_client.write_points(points_to_write): + LOG.debug("%d new points written to '%s' measurement", + len(points_to_write), measurement) + + def copy_kpi(self): + self._rw_measurment("cpu_value", ["instance", "type_instance", "value"]) + self._rw_measurment("cpufreq_value", ["type_instance", "value"]) + self._rw_measurment("downstream_rx", ["value"]) + self._rw_measurment("downstream_tx", ["value"]) + self._rw_measurment("downstream_value", ["value"]) + self._rw_measurment("ds_per_cm_value", ["instance", "value"]) + self._rw_measurment("intel_rdt_value", ["instance", "type_instance", "value"]) + self._rw_measurment("turbostat_value", ["instance", "type_instance", "value"]) + self._rw_measurment("upstream_rx", ["value"]) + self._rw_measurment("upstream_tx", ["value"]) + self._rw_measurment("upstream_value", ["value"]) + + +class VcmtsdSetupEnvHelper(SetupEnvHelper): + + BASE_PARAMETERS = "export LD_LIBRARY_PATH=/opt/collectd/lib:;"\ + + "export CMK_PROC_FS=/host/proc;" + + def build_us_parameters(self, pod_cfg): + return self.BASE_PARAMETERS + " " \ + + " /opt/bin/cmk isolate --conf-dir=/etc/cmk" \ + + " --socket-id=" + pod_cfg['cpu_socket_id'] \ + + " --pool=shared" \ + + " /vcmts-config/run_upstream.sh " + pod_cfg['sg_id'] \ + + " " + pod_cfg['ds_core_type'] \ + + " " + pod_cfg['num_ofdm'] + "ofdm" \ + + " " + pod_cfg['num_subs'] + "cm" \ + + " " + pod_cfg['cm_crypto'] \ + + " " + pod_cfg['qat'] \ + + " " + pod_cfg['net_us'] \ + + " " + pod_cfg['power_mgmt'] + + def build_ds_parameters(self, pod_cfg): + return self.BASE_PARAMETERS + " " \ + + " /opt/bin/cmk isolate --conf-dir=/etc/cmk" \ + + " --socket-id=" + pod_cfg['cpu_socket_id'] \ + + " --pool=" + pod_cfg['ds_core_type'] \ + + " /vcmts-config/run_downstream.sh " + pod_cfg['sg_id'] \ + + " " + pod_cfg['ds_core_type'] \ + + " " + pod_cfg['ds_core_pool_index'] \ + + " " + pod_cfg['num_ofdm'] + "ofdm" \ + + " " + pod_cfg['num_subs'] + "cm" \ + + " " + pod_cfg['cm_crypto'] \ + + " " + pod_cfg['qat'] \ + + " " + pod_cfg['net_ds'] \ + + " " + pod_cfg['power_mgmt'] + + def build_cmd(self, stream_dir, pod_cfg): + if stream_dir == 'ds': + return self.build_ds_parameters(pod_cfg) + else: + return self.build_us_parameters(pod_cfg) + + def run_vcmtsd(self, stream_dir, pod_cfg): + cmd = self.build_cmd(stream_dir, pod_cfg) + LOG.debug("Executing %s", cmd) + self.ssh_helper.send_command(cmd) + + def setup_vnf_environment(self): + pass + + +class VcmtsVNF(GenericVNF): + + RUN_WAIT = 4 + + def __init__(self, name, vnfd): + super(VcmtsVNF, self).__init__(name, vnfd) + self.name = name + self.bin_path = get_nsb_option('bin_path', '') + self.scenario_helper = ScenarioHelper(self.name) + self.ssh_helper = VnfSshHelper(self.vnfd_helper.mgmt_interface, self.bin_path) + + self.setup_helper = VcmtsdSetupEnvHelper(self.vnfd_helper, + self.ssh_helper, + self.scenario_helper) + + def extract_pod_cfg(self, vcmts_pods_cfg, sg_id): + for pod_cfg in vcmts_pods_cfg: + if pod_cfg['sg_id'] == sg_id: + return pod_cfg + + def instantiate(self, scenario_cfg, context_cfg): + self._update_collectd_options(scenario_cfg, context_cfg) + self.scenario_helper.scenario_cfg = scenario_cfg + self.context_cfg = context_cfg + + options = scenario_cfg.get('options', {}) + + try: + self.vcmts_influxdb_ip = options['vcmts_influxdb_ip'] + self.vcmts_influxdb_port = options['vcmts_influxdb_port'] + except KeyError: + raise KeyError("Missing destination InfluxDB details in scenario" \ + " section of the task definition file") + + try: + vcmtsd_values_filepath = options['vcmtsd_values'] + except KeyError: + raise KeyError("Missing vcmtsd_values key in scenario options" \ + "section of the task definition file") + + if not os.path.isfile(vcmtsd_values_filepath): + raise RuntimeError("The vcmtsd_values file path provided " \ + "does not exists") + + # The yaml_loader.py (SafeLoader) underlying regex has an issue + # with reading PCI addresses (processed as double). so the + # BaseLoader is used here. + with open(vcmtsd_values_filepath) as stream: + vcmtsd_values = yaml.load(stream, Loader=yaml.BaseLoader) + + if vcmtsd_values == None: + raise RuntimeError("Error reading vcmtsd_values file provided (" + + vcmtsd_values_filepath + ")") + + vnf_options = options.get(self.name, {}) + sg_id = str(vnf_options['sg_id']) + stream_dir = vnf_options['stream_dir'] + + try: + vcmts_pods_cfg = vcmtsd_values['topology']['vcmts_pods'] + except KeyError: + raise KeyError("Missing vcmts_pods key in the " \ + "vcmtsd_values file provided") + + pod_cfg = self.extract_pod_cfg(vcmts_pods_cfg, sg_id) + if pod_cfg == None: + raise exceptions.IncorrectConfig(error_msg="Service group " + sg_id + " not found") + + self.setup_helper.run_vcmtsd(stream_dir, pod_cfg) + + def _update_collectd_options(self, scenario_cfg, context_cfg): + scenario_options = scenario_cfg.get('options', {}) + generic_options = scenario_options.get('collectd', {}) + scenario_node_options = scenario_options.get(self.name, {})\ + .get('collectd', {}) + context_node_options = context_cfg.get('nodes', {})\ + .get(self.name, {}).get('collectd', {}) + + options = generic_options + self._update_options(options, scenario_node_options) + self._update_options(options, context_node_options) + + self.setup_helper.collectd_options = options + + def _update_options(self, options, additional_options): + for k, v in additional_options.items(): + if isinstance(v, dict) and k in options: + options[k].update(v) + else: + options[k] = v + + def wait_for_instantiate(self): + pass + + def terminate(self): + pass + + def scale(self, flavor=""): + pass + + def collect_kpi(self): + self.influxdb_helper.copy_kpi() + return {"n/a": "n/a"} + + def start_collect(self): + self.influxdb_helper = InfluxDBHelper(self.vcmts_influxdb_ip, + self.vcmts_influxdb_port) + self.influxdb_helper.start() + + def stop_collect(self): + pass diff --git a/yardstick/network_services/vnf_generic/vnf/vfw_vnf.py b/yardstick/network_services/vnf_generic/vnf/vfw_vnf.py index a1523dee3..743f2d4bb 100644 --- a/yardstick/network_services/vnf_generic/vnf/vfw_vnf.py +++ b/yardstick/network_services/vnf_generic/vnf/vfw_vnf.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016-2017 Intel Corporation +# Copyright (c) 2016-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -52,9 +52,12 @@ class FWApproxVnf(SampleVNF): 'packets_dropped': 3, } - def __init__(self, name, vnfd, task_id, setup_env_helper_type=None, - resource_helper_type=None): + def __init__(self, name, vnfd, setup_env_helper_type=None, resource_helper_type=None): if setup_env_helper_type is None: setup_env_helper_type = FWApproxSetupEnvHelper - super(FWApproxVnf, self).__init__( - name, vnfd, task_id, setup_env_helper_type, resource_helper_type) + + super(FWApproxVnf, self).__init__(name, vnfd, setup_env_helper_type, resource_helper_type) + + def wait_for_instantiate(self): + """Wait for VNF to initialize""" + self.wait_for_initialize() diff --git a/yardstick/network_services/vnf_generic/vnf/vims_vnf.py b/yardstick/network_services/vnf_generic/vnf/vims_vnf.py new file mode 100644 index 000000000..0e339b171 --- /dev/null +++ b/yardstick/network_services/vnf_generic/vnf/vims_vnf.py @@ -0,0 +1,105 @@ +# Copyright (c) 2019 Viosoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import time + +from yardstick.network_services.vnf_generic.vnf import sample_vnf + +LOG = logging.getLogger(__name__) + + +class VimsSetupEnvHelper(sample_vnf.SetupEnvHelper): + + def setup_vnf_environment(self): + LOG.debug('VimsSetupEnvHelper:\n') + + +class VimsResourceHelper(sample_vnf.ClientResourceHelper): + pass + + +class VimsPcscfVnf(sample_vnf.SampleVNF): + + APP_NAME = "VimsPcscf" + APP_WORD = "VimsPcscf" + + def __init__(self, name, vnfd, setup_env_helper_type=None, + resource_helper_type=None): + if resource_helper_type is None: + resource_helper_type = VimsResourceHelper + if setup_env_helper_type is None: + setup_env_helper_type = VimsSetupEnvHelper + super(VimsPcscfVnf, self).__init__(name, vnfd, setup_env_helper_type, + resource_helper_type) + + def wait_for_instantiate(self): + pass + + def _run(self): + pass + + def start_collect(self): + # TODO + pass + + def collect_kpi(self): + # TODO + pass + + +class VimsHssVnf(sample_vnf.SampleVNF): + + APP_NAME = "VimsHss" + APP_WORD = "VimsHss" + CMD = "sudo /media/generate_user.sh {} {} >> /dev/null 2>&1" + + def __init__(self, name, vnfd, setup_env_helper_type=None, + resource_helper_type=None): + if resource_helper_type is None: + resource_helper_type = VimsResourceHelper + if setup_env_helper_type is None: + setup_env_helper_type = VimsSetupEnvHelper + super(VimsHssVnf, self).__init__(name, vnfd, setup_env_helper_type, + resource_helper_type) + self.start_user = 1 + self.end_user = 10000 + self.WAIT_TIME = 600 + + def instantiate(self, scenario_cfg, context_cfg): + LOG.debug("scenario_cfg=%s\n", scenario_cfg) + self.start_user = scenario_cfg.get("options", {}).get("start_user", self.start_user) + self.end_user = scenario_cfg.get("options", {}).get("end_user", self.end_user) + # TODO + # Need to check HSS services are ready before generating user accounts + # Now, adding time sleep that manually configured by user + # to wait for HSS services. + # Note: for heat, waiting time is too long (~ 600s) + self.WAIT_TIME = scenario_cfg.get("options", {}).get("wait_time", self.WAIT_TIME) + time.sleep(self.WAIT_TIME) + LOG.debug("Generate user accounts from %d to %d\n", + self.start_user, self.end_user) + cmd = self.CMD.format(self.start_user, self.end_user) + self.ssh_helper.execute(cmd, None, 3600, False) + + def wait_for_instantiate(self): + pass + + def start_collect(self): + # TODO + pass + + def collect_kpi(self): + # TODO + pass diff --git a/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py b/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py index b7cf8b35e..322ecd016 100644 --- a/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py +++ b/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016-2017 Intel Corporation +# Copyright (c) 2016-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,13 +17,11 @@ from __future__ import absolute_import from __future__ import print_function -import os import logging import re import posixpath -from six.moves import configparser, zip - +from yardstick.common import utils from yardstick.common.process import check_if_process_failed from yardstick.network_services.helpers.samplevnf_helper import PortPairs from yardstick.network_services.pipeline import PipelineRules @@ -43,15 +41,6 @@ Pkts in:\\s(\\d+)\r\n\ class ConfigCreate(object): - @staticmethod - def vpe_tmq(config, index): - tm_q = 'TM{0}'.format(index) - config.add_section(tm_q) - config.set(tm_q, 'burst_read', '24') - config.set(tm_q, 'burst_write', '32') - config.set(tm_q, 'cfg', '/tmp/full_tm_profile_10G.cfg') - return config - def __init__(self, vnfd_helper, socket): super(ConfigCreate, self).__init__() self.sw_q = -1 @@ -64,141 +53,6 @@ class ConfigCreate(object): self.socket = socket self._dpdk_port_to_link_id_map = None - @property - def dpdk_port_to_link_id_map(self): - # we need interface name -> DPDK port num (PMD ID) -> LINK ID - # LINK ID -> PMD ID is governed by the port mask - # LINK instances are created implicitly based on the PORT_MASK application startup - # argument. LINK0 is the first port enabled in the PORT_MASK, port 1 is the next one, - # etc. The LINK ID is different than the DPDK PMD-level NIC port ID, which is the actual - # position in the bitmask mentioned above. For example, if bit 5 is the first bit set - # in the bitmask, then LINK0 is having the PMD ID of 5. This mechanism creates a - # contiguous LINK ID space and isolates the configuration file against changes in the - # board PCIe slots where NICs are plugged in. - if self._dpdk_port_to_link_id_map is None: - self._dpdk_port_to_link_id_map = {} - for link_id, port_name in enumerate(sorted(self.vnfd_helper.port_pairs.all_ports, - key=self.vnfd_helper.port_num)): - self._dpdk_port_to_link_id_map[port_name] = link_id - return self._dpdk_port_to_link_id_map - - def vpe_initialize(self, config): - config.add_section('EAL') - config.set('EAL', 'log_level', '0') - - config.add_section('PIPELINE0') - config.set('PIPELINE0', 'type', 'MASTER') - config.set('PIPELINE0', 'core', 's%sC0' % self.socket) - - config.add_section('MEMPOOL0') - config.set('MEMPOOL0', 'pool_size', '256K') - - config.add_section('MEMPOOL1') - config.set('MEMPOOL1', 'pool_size', '2M') - return config - - def vpe_rxq(self, config): - for port in self.downlink_ports: - new_section = 'RXQ{0}.0'.format(self.dpdk_port_to_link_id_map[port]) - config.add_section(new_section) - config.set(new_section, 'mempool', 'MEMPOOL1') - - return config - - def get_sink_swq(self, parser, pipeline, k, index): - sink = "" - pktq = parser.get(pipeline, k) - if "SINK" in pktq: - self.sink_q += 1 - sink = " SINK{0}".format(self.sink_q) - if "TM" in pktq: - sink = " TM{0}".format(index) - pktq = "SWQ{0}{1}".format(self.sw_q, sink) - return pktq - - def vpe_upstream(self, vnf_cfg, index=0): # pragma: no cover - # NOTE(ralonsoh): this function must be covered in UTs. - parser = configparser.ConfigParser() - parser.read(os.path.join(vnf_cfg, 'vpe_upstream')) - - for pipeline in parser.sections(): - for k, v in parser.items(pipeline): - if k == "pktq_in": - if "RXQ" in v: - port = self.dpdk_port_to_link_id_map[self.uplink_ports[index]] - value = "RXQ{0}.0".format(port) - else: - value = self.get_sink_swq(parser, pipeline, k, index) - - parser.set(pipeline, k, value) - - elif k == "pktq_out": - if "TXQ" in v: - port = self.dpdk_port_to_link_id_map[self.downlink_ports[index]] - value = "TXQ{0}.0".format(port) - else: - self.sw_q += 1 - value = self.get_sink_swq(parser, pipeline, k, index) - - parser.set(pipeline, k, value) - - new_pipeline = 'PIPELINE{0}'.format(self.n_pipeline) - if new_pipeline != pipeline: - parser._sections[new_pipeline] = parser._sections[pipeline] - parser._sections.pop(pipeline) - self.n_pipeline += 1 - return parser - - def vpe_downstream(self, vnf_cfg, index): # pragma: no cover - # NOTE(ralonsoh): this function must be covered in UTs. - parser = configparser.ConfigParser() - parser.read(os.path.join(vnf_cfg, 'vpe_downstream')) - for pipeline in parser.sections(): - for k, v in parser.items(pipeline): - - if k == "pktq_in": - port = self.dpdk_port_to_link_id_map[self.downlink_ports[index]] - if "RXQ" not in v: - value = self.get_sink_swq(parser, pipeline, k, index) - elif "TM" in v: - value = "RXQ{0}.0 TM{1}".format(port, index) - else: - value = "RXQ{0}.0".format(port) - - parser.set(pipeline, k, value) - - if k == "pktq_out": - port = self.dpdk_port_to_link_id_map[self.uplink_ports[index]] - if "TXQ" not in v: - self.sw_q += 1 - value = self.get_sink_swq(parser, pipeline, k, index) - elif "TM" in v: - value = "TXQ{0}.0 TM{1}".format(port, index) - else: - value = "TXQ{0}.0".format(port) - - parser.set(pipeline, k, value) - - new_pipeline = 'PIPELINE{0}'.format(self.n_pipeline) - if new_pipeline != pipeline: - parser._sections[new_pipeline] = parser._sections[pipeline] - parser._sections.pop(pipeline) - self.n_pipeline += 1 - return parser - - def create_vpe_config(self, vnf_cfg): - config = configparser.ConfigParser() - vpe_cfg = os.path.join("/tmp/vpe_config") - with open(vpe_cfg, 'w') as cfg_file: - config = self.vpe_initialize(config) - config = self.vpe_rxq(config) - config.write(cfg_file) - for index, _ in enumerate(self.uplink_ports): - config = self.vpe_upstream(vnf_cfg, index) - config.write(cfg_file) - config = self.vpe_downstream(vnf_cfg, index) - config = self.vpe_tmq(config, index) - config.write(cfg_file) def generate_vpe_script(self, interfaces): rules = PipelineRules(pipeline_id=1) @@ -231,16 +85,10 @@ class ConfigCreate(object): return rules.get_string() - def generate_tm_cfg(self, vnf_cfg): - vnf_cfg = os.path.join(vnf_cfg, "full_tm_profile_10G.cfg") - if os.path.exists(vnf_cfg): - return open(vnf_cfg).read() - class VpeApproxSetupEnvHelper(DpdkVnfSetupEnvHelper): APP_NAME = 'vPE_vnf' - CFG_CONFIG = "/tmp/vpe_config" CFG_SCRIPT = "/tmp/vpe_script" TM_CONFIG = "/tmp/full_tm_profile_10G.cfg" CORES = ['0', '1', '2', '3', '4', '5'] @@ -253,33 +101,52 @@ class VpeApproxSetupEnvHelper(DpdkVnfSetupEnvHelper): self.all_ports = self._port_pairs.all_ports def build_config(self): + vnf_cfg = self.scenario_helper.vnf_cfg + task_path = self.scenario_helper.task_path + action_bulk_file = vnf_cfg.get('action_bulk_file', '/tmp/action_bulk_512.txt') + full_tm_profile_file = vnf_cfg.get('full_tm_profile_file', '/tmp/full_tm_profile_10G.cfg') + config_file = vnf_cfg.get('file', '/tmp/vpe_config') + script_file = vnf_cfg.get('script_file', None) vpe_vars = { "bin_path": self.ssh_helper.bin_path, "socket": self.socket, } - self._build_vnf_ports() vpe_conf = ConfigCreate(self.vnfd_helper, self.socket) - vpe_conf.create_vpe_config(self.scenario_helper.vnf_cfg) - config_basename = posixpath.basename(self.CFG_CONFIG) - script_basename = posixpath.basename(self.CFG_SCRIPT) - tm_basename = posixpath.basename(self.TM_CONFIG) - with open(self.CFG_CONFIG) as handle: + if script_file is None: + # autogenerate vpe_script if not given + vpe_script = vpe_conf.generate_vpe_script(self.vnfd_helper.interfaces) + script_file = self.CFG_SCRIPT + else: + with utils.open_relative_file(script_file, task_path) as handle: + vpe_script = handle.read() + + config_basename = posixpath.basename(config_file) + script_basename = posixpath.basename(script_file) + + with utils.open_relative_file(action_bulk_file, task_path) as handle: + action_bulk = handle.read() + + with utils.open_relative_file(full_tm_profile_file, task_path) as handle: + full_tm_profile = handle.read() + + with utils.open_relative_file(config_file, task_path) as handle: vpe_config = handle.read() + # upload the 4 config files to the target server self.ssh_helper.upload_config_file(config_basename, vpe_config.format(**vpe_vars)) - - vpe_script = vpe_conf.generate_vpe_script(self.vnfd_helper.interfaces) self.ssh_helper.upload_config_file(script_basename, vpe_script.format(**vpe_vars)) - - tm_config = vpe_conf.generate_tm_cfg(self.scenario_helper.vnf_cfg) - self.ssh_helper.upload_config_file(tm_basename, tm_config) + self.ssh_helper.upload_config_file(posixpath.basename(action_bulk_file), + action_bulk.format(**vpe_vars)) + self.ssh_helper.upload_config_file(posixpath.basename(full_tm_profile_file), + full_tm_profile.format(**vpe_vars)) LOG.info("Provision and start the %s", self.APP_NAME) - LOG.info(self.CFG_CONFIG) + LOG.info(config_file) LOG.info(self.CFG_SCRIPT) - self._build_pipeline_kwargs() + self._build_pipeline_kwargs(cfg_file='/tmp/' + config_basename, + script='/tmp/' + script_basename) return self.PIPELINE_COMMAND.format(**self.pipeline_kwargs) @@ -291,12 +158,11 @@ class VpeApproxVnf(SampleVNF): COLLECT_KPI = VPE_COLLECT_KPI WAIT_TIME = 20 - def __init__(self, name, vnfd, task_id, setup_env_helper_type=None, - resource_helper_type=None): + def __init__(self, name, vnfd, setup_env_helper_type=None, resource_helper_type=None): if setup_env_helper_type is None: setup_env_helper_type = VpeApproxSetupEnvHelper - super(VpeApproxVnf, self).__init__( - name, vnfd, task_id, setup_env_helper_type, resource_helper_type) + + super(VpeApproxVnf, self).__init__(name, vnfd, setup_env_helper_type, resource_helper_type) def get_stats(self, *args, **kwargs): raise NotImplementedError diff --git a/yardstick/network_services/vnf_generic/vnf/vpp_helpers.py b/yardstick/network_services/vnf_generic/vnf/vpp_helpers.py new file mode 100644 index 000000000..fe8e7b2ba --- /dev/null +++ b/yardstick/network_services/vnf_generic/vnf/vpp_helpers.py @@ -0,0 +1,751 @@ +# Copyright (c) 2019 Viosoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import binascii +import ipaddress +import json +import logging +import os +import re +import tempfile +import time +from collections import OrderedDict + +from yardstick.common import constants +from yardstick.common import exceptions +from yardstick.network_services.helpers.cpu import CpuSysCores +from yardstick.network_services.vnf_generic.vnf.sample_vnf import \ + DpdkVnfSetupEnvHelper + +LOG = logging.getLogger(__name__) + + +class VppConfigGenerator(object): + VPP_LOG_FILE = '/tmp/vpe.log' + + def __init__(self): + self._nodeconfig = {} + self._vpp_config = '' + + def add_config_item(self, config, value, path): + if len(path) == 1: + config[path[0]] = value + return + if path[0] not in config: + config[path[0]] = {} + elif isinstance(config[path[0]], str): + config[path[0]] = {} if config[path[0]] == '' \ + else {config[path[0]]: ''} + self.add_config_item(config[path[0]], value, path[1:]) + + def add_unix_log(self, value=None): + path = ['unix', 'log'] + if value is None: + value = self.VPP_LOG_FILE + self.add_config_item(self._nodeconfig, value, path) + + def add_unix_cli_listen(self, value='/run/vpp/cli.sock'): + path = ['unix', 'cli-listen'] + self.add_config_item(self._nodeconfig, value, path) + + def add_unix_nodaemon(self): + path = ['unix', 'nodaemon'] + self.add_config_item(self._nodeconfig, '', path) + + def add_unix_coredump(self): + path = ['unix', 'full-coredump'] + self.add_config_item(self._nodeconfig, '', path) + + def add_dpdk_dev(self, *devices): + for device in devices: + if VppConfigGenerator.pci_dev_check(device): + path = ['dpdk', 'dev {0}'.format(device)] + self.add_config_item(self._nodeconfig, '', path) + + def add_dpdk_cryptodev(self, count, cryptodev): + for i in range(count): + cryptodev_config = 'dev {0}'.format( + re.sub(r'\d.\d$', '1.' + str(i), cryptodev)) + path = ['dpdk', cryptodev_config] + self.add_config_item(self._nodeconfig, '', path) + self.add_dpdk_uio_driver('igb_uio') + + def add_dpdk_sw_cryptodev(self, sw_pmd_type, socket_id, count): + for _ in range(count): + cryptodev_config = 'vdev cryptodev_{0}_pmd,socket_id={1}'. \ + format(sw_pmd_type, str(socket_id)) + path = ['dpdk', cryptodev_config] + self.add_config_item(self._nodeconfig, '', path) + + def add_dpdk_dev_default_rxq(self, value): + path = ['dpdk', 'dev default', 'num-rx-queues'] + self.add_config_item(self._nodeconfig, value, path) + + def add_dpdk_dev_default_rxd(self, value): + path = ['dpdk', 'dev default', 'num-rx-desc'] + self.add_config_item(self._nodeconfig, value, path) + + def add_dpdk_dev_default_txd(self, value): + path = ['dpdk', 'dev default', 'num-tx-desc'] + self.add_config_item(self._nodeconfig, value, path) + + def add_dpdk_log_level(self, value): + path = ['dpdk', 'log-level'] + self.add_config_item(self._nodeconfig, value, path) + + def add_dpdk_socketmem(self, value): + path = ['dpdk', 'socket-mem'] + self.add_config_item(self._nodeconfig, value, path) + + def add_dpdk_num_mbufs(self, value): + path = ['dpdk', 'num-mbufs'] + self.add_config_item(self._nodeconfig, value, path) + + def add_dpdk_uio_driver(self, value=None): + path = ['dpdk', 'uio-driver'] + self.add_config_item(self._nodeconfig, value, path) + + def add_cpu_main_core(self, value): + path = ['cpu', 'main-core'] + self.add_config_item(self._nodeconfig, value, path) + + def add_cpu_corelist_workers(self, value): + path = ['cpu', 'corelist-workers'] + self.add_config_item(self._nodeconfig, value, path) + + def add_heapsize(self, value): + path = ['heapsize'] + self.add_config_item(self._nodeconfig, value, path) + + def add_ip6_hash_buckets(self, value): + path = ['ip6', 'hash-buckets'] + self.add_config_item(self._nodeconfig, value, path) + + def add_ip6_heap_size(self, value): + path = ['ip6', 'heap-size'] + self.add_config_item(self._nodeconfig, value, path) + + def add_ip_heap_size(self, value): + path = ['ip', 'heap-size'] + self.add_config_item(self._nodeconfig, value, path) + + def add_statseg_size(self, value): + path = ['statseg', 'size'] + self.add_config_item(self._nodeconfig, value, path) + + def add_plugin(self, state, *plugins): + for plugin in plugins: + path = ['plugins', 'plugin {0}'.format(plugin), state] + self.add_config_item(self._nodeconfig, ' ', path) + + def add_dpdk_no_multi_seg(self): + path = ['dpdk', 'no-multi-seg'] + self.add_config_item(self._nodeconfig, '', path) + + def add_dpdk_no_tx_checksum_offload(self): + path = ['dpdk', 'no-tx-checksum-offload'] + self.add_config_item(self._nodeconfig, '', path) + + def dump_config(self, obj=None, level=-1): + if obj is None: + obj = self._nodeconfig + obj = OrderedDict(sorted(obj.items())) + + indent = ' ' + if level >= 0: + self._vpp_config += '{}{{\n'.format(level * indent) + if isinstance(obj, dict): + for key, val in obj.items(): + if hasattr(val, '__iter__') and not isinstance(val, str): + self._vpp_config += '{}{}\n'.format((level + 1) * indent, + key) + self.dump_config(val, level + 1) + else: + self._vpp_config += '{}{} {}\n'.format( + (level + 1) * indent, + key, val) + if level >= 0: + self._vpp_config += '{}}}\n'.format(level * indent) + + return self._vpp_config + + @staticmethod + def pci_dev_check(pci_dev): + pattern = re.compile("^[0-9A-Fa-f]{4}:[0-9A-Fa-f]{2}:" + "[0-9A-Fa-f]{2}\\.[0-9A-Fa-f]$") + if not pattern.match(pci_dev): + raise ValueError('PCI address {addr} is not in valid format ' + 'xxxx:xx:xx.x'.format(addr=pci_dev)) + return True + + +class VppSetupEnvHelper(DpdkVnfSetupEnvHelper): + APP_NAME = "vpp" + CFG_CONFIG = "/etc/vpp/startup.conf" + CFG_SCRIPT = "" + PIPELINE_COMMAND = "" + QAT_DRIVER = "qat_dh895xcc" + VNF_TYPE = "IPSEC" + VAT_BIN_NAME = 'vpp_api_test' + + def __init__(self, vnfd_helper, ssh_helper, scenario_helper): + super(VppSetupEnvHelper, self).__init__(vnfd_helper, ssh_helper, + scenario_helper) + self.sys_cores = CpuSysCores(self.ssh_helper) + + def kill_vnf(self): + ret_code, _, _ = \ + self.ssh_helper.execute( + 'service {name} stop'.format(name=self.APP_NAME)) + if int(ret_code): + raise RuntimeError( + 'Failed to stop service {name}'.format(name=self.APP_NAME)) + + def tear_down(self): + pass + + def start_vpp_service(self): + ret_code, _, _ = \ + self.ssh_helper.execute( + 'service {name} restart'.format(name=self.APP_NAME)) + if int(ret_code): + raise RuntimeError( + 'Failed to start service {name}'.format(name=self.APP_NAME)) + + def _update_vnfd_helper(self, additional_data, iface_key=None): + for k, v in additional_data.items(): + if iface_key is None: + if isinstance(v, dict) and k in self.vnfd_helper: + self.vnfd_helper[k].update(v) + else: + self.vnfd_helper[k] = v + else: + if isinstance(v, + dict) and k in self.vnfd_helper.find_virtual_interface( + ifname=iface_key): + self.vnfd_helper.find_virtual_interface(ifname=iface_key)[ + k].update(v) + else: + self.vnfd_helper.find_virtual_interface(ifname=iface_key)[ + k] = v + + def get_value_by_interface_key(self, interface, key): + try: + return self.vnfd_helper.find_virtual_interface( + ifname=interface).get(key) + except (KeyError, ValueError): + return None + + def crypto_device_init(self, pci_addr, numvfs): + # QAT device must be re-bound to kernel driver before initialization. + self.dpdk_bind_helper.load_dpdk_driver(self.QAT_DRIVER) + + # Stop VPP to prevent deadlock. + self.kill_vnf() + + current_driver = self.get_pci_dev_driver(pci_addr.replace(':', r'\:')) + if current_driver is not None: + self.pci_driver_unbind(pci_addr) + + # Bind to kernel driver. + self.dpdk_bind_helper.bind(pci_addr, self.QAT_DRIVER.replace('qat_', '')) + + # Initialize QAT VFs. + if numvfs > 0: + self.set_sriov_numvfs(pci_addr, numvfs) + + def get_sriov_numvfs(self, pf_pci_addr): + command = 'cat /sys/bus/pci/devices/{pci}/sriov_numvfs'. \ + format(pci=pf_pci_addr.replace(':', r'\:')) + _, stdout, _ = self.ssh_helper.execute(command) + try: + return int(stdout) + except ValueError: + LOG.debug('Reading sriov_numvfs info failed') + return 0 + + def set_sriov_numvfs(self, pf_pci_addr, numvfs=0): + command = "sh -c 'echo {num} | tee /sys/bus/pci/devices/{pci}/sriov_numvfs'". \ + format(num=numvfs, pci=pf_pci_addr.replace(':', r'\:')) + self.ssh_helper.execute(command) + + def pci_driver_unbind(self, pci_addr): + command = "sh -c 'echo {pci} | tee /sys/bus/pci/devices/{pcie}/driver/unbind'". \ + format(pci=pci_addr, pcie=pci_addr.replace(':', r'\:')) + self.ssh_helper.execute(command) + + def get_pci_dev_driver(self, pci_addr): + cmd = 'lspci -vmmks {0}'.format(pci_addr) + ret_code, stdout, _ = self.ssh_helper.execute(cmd) + if int(ret_code): + raise RuntimeError("'{0}' failed".format(cmd)) + for line in stdout.splitlines(): + if not line: + continue + name = None + value = None + try: + name, value = line.split("\t", 1) + except ValueError: + if name == "Driver:": + return None + if name == 'Driver:': + return value + return None + + def vpp_create_ipsec_tunnels(self, if1_ip_addr, if2_ip_addr, if_name, + n_tunnels, n_connections, crypto_alg, + crypto_key, integ_alg, integ_key, addrs_ip, + spi_1=10000, spi_2=20000): + mask_length = 32 + if n_connections <= n_tunnels: + count = 1 + else: + count = int(n_connections / n_tunnels) + addr_ip_i = int(ipaddress.ip_address(str(addrs_ip))) + dst_start_ip = addr_ip_i + + tmp_fd, tmp_path = tempfile.mkstemp() + + vpp_ifname = self.get_value_by_interface_key(if_name, 'vpp_name') + ckey = binascii.hexlify(crypto_key.encode()) + ikey = binascii.hexlify(integ_key.encode()) + + integ = '' + if crypto_alg.alg_name != 'aes-gcm-128': + integ = 'integ_alg {integ_alg} ' \ + 'local_integ_key {local_integ_key} ' \ + 'remote_integ_key {remote_integ_key} ' \ + .format(integ_alg=integ_alg.alg_name, + local_integ_key=ikey, + remote_integ_key=ikey) + create_tunnels_cmds = 'ipsec_tunnel_if_add_del ' \ + 'local_spi {local_spi} ' \ + 'remote_spi {remote_spi} ' \ + 'crypto_alg {crypto_alg} ' \ + 'local_crypto_key {local_crypto_key} ' \ + 'remote_crypto_key {remote_crypto_key} ' \ + '{integ} ' \ + 'local_ip {local_ip} ' \ + 'remote_ip {remote_ip}\n' + start_tunnels_cmds = 'ip_add_del_route {raddr}/{mask} via {addr} ipsec{i}\n' \ + 'exec set interface unnumbered ipsec{i} use {uifc}\n' \ + 'sw_interface_set_flags ipsec{i} admin-up\n' + + with os.fdopen(tmp_fd, 'w') as tmp_file: + for i in range(0, n_tunnels): + create_tunnel = create_tunnels_cmds.format(local_spi=spi_1 + i, + remote_spi=spi_2 + i, + crypto_alg=crypto_alg.alg_name, + local_crypto_key=ckey, + remote_crypto_key=ckey, + integ=integ, + local_ip=if1_ip_addr, + remote_ip=if2_ip_addr) + tmp_file.write(create_tunnel) + self.execute_script(tmp_path, json_out=False, copy_on_execute=True) + os.remove(tmp_path) + + tmp_fd, tmp_path = tempfile.mkstemp() + + with os.fdopen(tmp_fd, 'w') as tmp_file: + for i in range(0, n_tunnels): + if count > 1: + dst_start_ip = addr_ip_i + i * count + dst_end_ip = ipaddress.ip_address(dst_start_ip + count - 1) + ips = [ipaddress.ip_address(ip) for ip in + [str(ipaddress.ip_address(dst_start_ip)), + str(dst_end_ip)]] + lowest_ip, highest_ip = min(ips), max(ips) + mask_length = self.get_prefix_length(int(lowest_ip), + int(highest_ip), + lowest_ip.max_prefixlen) + # TODO check duplicate route for some IPs + elif count == 1: + dst_start_ip = addr_ip_i + i + start_tunnel = start_tunnels_cmds.format( + raddr=str(ipaddress.ip_address(dst_start_ip)), + mask=mask_length, + addr=if2_ip_addr, + i=i, count=count, + uifc=vpp_ifname) + tmp_file.write(start_tunnel) + # TODO add route for remain IPs + + self.execute_script(tmp_path, json_out=False, copy_on_execute=True) + os.remove(tmp_path) + + def apply_config(self, vpp_cfg, restart_vpp=True): + vpp_config = vpp_cfg.dump_config() + ret, _, _ = \ + self.ssh_helper.execute('echo "{config}" | sudo tee {filename}'. + format(config=vpp_config, + filename=self.CFG_CONFIG)) + if ret != 0: + raise RuntimeError('Writing config file failed') + if restart_vpp: + self.start_vpp_service() + + def vpp_route_add(self, network, prefix_len, gateway=None, interface=None, + use_sw_index=True, resolve_attempts=10, + count=1, vrf=None, lookup_vrf=None, multipath=False, + weight=None, local=False): + if interface: + if use_sw_index: + int_cmd = ('sw_if_index {}'.format( + self.get_value_by_interface_key(interface, + 'vpp_sw_index'))) + else: + int_cmd = interface + else: + int_cmd = '' + + rap = 'resolve-attempts {}'.format(resolve_attempts) \ + if resolve_attempts else '' + + via = 'via {}'.format(gateway) if gateway else '' + + cnt = 'count {}'.format(count) \ + if count else '' + + vrf = 'vrf {}'.format(vrf) if vrf else '' + + lookup_vrf = 'lookup-in-vrf {}'.format( + lookup_vrf) if lookup_vrf else '' + + multipath = 'multipath' if multipath else '' + + weight = 'weight {}'.format(weight) if weight else '' + + local = 'local' if local else '' + + with VatTerminal(self.ssh_helper, json_param=False) as vat: + vat.vat_terminal_exec_cmd_from_template('add_route.vat', + network=network, + prefix_length=prefix_len, + via=via, + vrf=vrf, + interface=int_cmd, + resolve_attempts=rap, + count=cnt, + lookup_vrf=lookup_vrf, + multipath=multipath, + weight=weight, + local=local) + + def add_arp_on_dut(self, iface_key, ip_address, mac_address): + with VatTerminal(self.ssh_helper) as vat: + return vat.vat_terminal_exec_cmd_from_template( + 'add_ip_neighbor.vat', + sw_if_index=self.get_value_by_interface_key(iface_key, + 'vpp_sw_index'), + ip_address=ip_address, mac_address=mac_address) + + def set_ip(self, interface, address, prefix_length): + with VatTerminal(self.ssh_helper) as vat: + return vat.vat_terminal_exec_cmd_from_template( + 'add_ip_address.vat', + sw_if_index=self.get_value_by_interface_key(interface, + 'vpp_sw_index'), + address=address, prefix_length=prefix_length) + + def set_interface_state(self, interface, state): + sw_if_index = self.get_value_by_interface_key(interface, + 'vpp_sw_index') + + if state == 'up': + state = 'admin-up link-up' + elif state == 'down': + state = 'admin-down link-down' + else: + raise ValueError('Unexpected interface state: {}'.format(state)) + with VatTerminal(self.ssh_helper) as vat: + return vat.vat_terminal_exec_cmd_from_template( + 'set_if_state.vat', sw_if_index=sw_if_index, state=state) + + def vpp_set_interface_mtu(self, interface, mtu=9200): + sw_if_index = self.get_value_by_interface_key(interface, + 'vpp_sw_index') + if sw_if_index: + with VatTerminal(self.ssh_helper, json_param=False) as vat: + vat.vat_terminal_exec_cmd_from_template( + "hw_interface_set_mtu.vat", sw_if_index=sw_if_index, + mtu=mtu) + + def vpp_interfaces_ready_wait(self, timeout=30): + if_ready = False + not_ready = [] + start = time.time() + while not if_ready: + out = self.vpp_get_interface_data() + if time.time() - start > timeout: + for interface in out: + if interface.get('admin_up_down') == 1: + if interface.get('link_up_down') != 1: + LOG.debug('%s link-down', + interface.get('interface_name')) + raise RuntimeError('timeout, not up {0}'.format(not_ready)) + not_ready = [] + for interface in out: + if interface.get('admin_up_down') == 1: + if interface.get('link_up_down') != 1: + not_ready.append(interface.get('interface_name')) + if not not_ready: + if_ready = True + else: + LOG.debug('Interfaces still in link-down state: %s, ' + 'waiting...', not_ready) + time.sleep(1) + + def vpp_get_interface_data(self, interface=None): + with VatTerminal(self.ssh_helper) as vat: + response = vat.vat_terminal_exec_cmd_from_template( + "interface_dump.vat") + data = response[0] + if interface is not None: + if isinstance(interface, str): + param = "interface_name" + elif isinstance(interface, int): + param = "sw_if_index" + else: + raise TypeError + for data_if in data: + if data_if[param] == interface: + return data_if + return dict() + return data + + def update_vpp_interface_data(self): + data = {} + interface_dump_json = self.execute_script_json_out( + "dump_interfaces.vat") + interface_list = json.loads(interface_dump_json) + for interface in self.vnfd_helper.interfaces: + if_mac = interface['virtual-interface']['local_mac'] + interface_dict = VppSetupEnvHelper.get_vpp_interface_by_mac( + interface_list, if_mac) + if not interface_dict: + LOG.debug('Interface %s not found by MAC %s', interface, + if_mac) + continue + data[interface['virtual-interface']['ifname']] = { + 'vpp_name': interface_dict["interface_name"], + 'vpp_sw_index': interface_dict["sw_if_index"] + } + for iface_key, updated_vnfd in data.items(): + self._update_vnfd_helper(updated_vnfd, iface_key) + + def iface_update_numa(self): + iface_numa = {} + for interface in self.vnfd_helper.interfaces: + cmd = "cat /sys/bus/pci/devices/{}/numa_node".format( + interface["virtual-interface"]["vpci"]) + ret, out, _ = self.ssh_helper.execute(cmd) + if ret == 0: + try: + numa_node = int(out) + if numa_node < 0: + if self.vnfd_helper["cpuinfo"][-1][3] + 1 == 1: + iface_numa[ + interface['virtual-interface']['ifname']] = { + 'numa_node': 0 + } + else: + raise ValueError + else: + iface_numa[ + interface['virtual-interface']['ifname']] = { + 'numa_node': numa_node + } + except ValueError: + LOG.debug( + 'Reading numa location failed for: %s', + interface["virtual-interface"]["vpci"]) + for iface_key, updated_vnfd in iface_numa.items(): + self._update_vnfd_helper(updated_vnfd, iface_key) + + def execute_script(self, vat_name, json_out=True, copy_on_execute=False): + if copy_on_execute: + self.ssh_helper.put_file(vat_name, vat_name) + remote_file_path = vat_name + else: + vat_path = self.ssh_helper.join_bin_path("vpp", "templates") + remote_file_path = '{0}/{1}'.format(vat_path, vat_name) + + cmd = "{vat_bin} {json} in {vat_path} script".format( + vat_bin=self.VAT_BIN_NAME, + json="json" if json_out is True else "", + vat_path=remote_file_path) + + try: + return self.ssh_helper.execute(cmd=cmd) + except Exception: + raise RuntimeError("VAT script execution failed: {0}".format(cmd)) + + def execute_script_json_out(self, vat_name): + vat_path = self.ssh_helper.join_bin_path("vpp", "templates") + remote_file_path = '{0}/{1}'.format(vat_path, vat_name) + + _, stdout, _ = self.execute_script(vat_name, json_out=True) + return self.cleanup_vat_json_output(stdout, vat_file=remote_file_path) + + @staticmethod + def cleanup_vat_json_output(json_output, vat_file=None): + retval = json_output + clutter = ['vat#', 'dump_interface_table error: Misc', + 'dump_interface_table:6019: JSON output supported only ' \ + 'for VPE API calls and dump_stats_table'] + if vat_file: + clutter.append("{0}(2):".format(vat_file)) + for garbage in clutter: + retval = retval.replace(garbage, '') + return retval.strip() + + @staticmethod + def _convert_mac_to_number_list(mac_address): + list_mac = [] + for num in mac_address.split(":"): + list_mac.append(int(num, 16)) + return list_mac + + @staticmethod + def get_vpp_interface_by_mac(interfaces_list, mac_address): + interface_dict = {} + list_mac_address = VppSetupEnvHelper._convert_mac_to_number_list( + mac_address) + LOG.debug("MAC address %s converted to list %s.", mac_address, + list_mac_address) + for interface in interfaces_list: + # TODO: create vat json integrity checking and move there + if "l2_address" not in interface: + raise KeyError( + "key l2_address not found in interface dict." + "Probably input list is not parsed from correct VAT " + "json output.") + if "l2_address_length" not in interface: + raise KeyError( + "key l2_address_length not found in interface " + "dict. Probably input list is not parsed from correct " + "VAT json output.") + mac_from_json = interface["l2_address"][:6] + if mac_from_json == list_mac_address: + if interface["l2_address_length"] != 6: + raise ValueError("l2_address_length value is not 6.") + interface_dict = interface + break + return interface_dict + + @staticmethod + def get_prefix_length(number1, number2, bits): + for i in range(bits): + if number1 >> i == number2 >> i: + return bits - i + return 0 + + +class VatTerminal(object): + + __VAT_PROMPT = ("vat# ",) + __LINUX_PROMPT = (":~# ", ":~$ ", "~]$ ", "~]# ") + + + def __init__(self, ssh_helper, json_param=True): + json_text = ' json' if json_param else '' + self.json = json_param + self.ssh_helper = ssh_helper + EXEC_RETRY = 3 + + try: + self._tty = self.ssh_helper.interactive_terminal_open() + except Exception: + raise RuntimeError("Cannot open interactive terminal") + + for _ in range(EXEC_RETRY): + try: + self.ssh_helper.interactive_terminal_exec_command( + self._tty, + 'sudo -S {0}{1}'.format(VppSetupEnvHelper.VAT_BIN_NAME, + json_text), + self.__VAT_PROMPT) + except exceptions.SSHTimeout: + continue + else: + break + + self._exec_failure = False + self.vat_stdout = None + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.vat_terminal_close() + + def vat_terminal_exec_cmd(self, cmd): + try: + out = self.ssh_helper.interactive_terminal_exec_command(self._tty, + cmd, + self.__VAT_PROMPT) + self.vat_stdout = out + except exceptions.SSHTimeout: + self._exec_failure = True + raise RuntimeError( + "VPP is not running on node. VAT command {0} execution failed". + format(cmd)) + if self.json: + obj_start = out.find('{') + obj_end = out.rfind('}') + array_start = out.find('[') + array_end = out.rfind(']') + + if obj_start == -1 and array_start == -1: + raise RuntimeError( + "VAT command {0}: no JSON data.".format(cmd)) + + if obj_start < array_start or array_start == -1: + start = obj_start + end = obj_end + 1 + else: + start = array_start + end = array_end + 1 + out = out[start:end] + json_out = json.loads(out) + return json_out + else: + return None + + def vat_terminal_close(self): + if not self._exec_failure: + try: + self.ssh_helper.interactive_terminal_exec_command(self._tty, + 'quit', + self.__LINUX_PROMPT) + except exceptions.SSHTimeout: + raise RuntimeError("Failed to close VAT console") + try: + self.ssh_helper.interactive_terminal_close(self._tty) + except Exception: + raise RuntimeError("Cannot close interactive terminal") + + def vat_terminal_exec_cmd_from_template(self, vat_template_file, **args): + file_path = os.path.join(constants.YARDSTICK_ROOT_PATH, + 'yardstick/resources/templates/', + vat_template_file) + with open(file_path, 'r') as template_file: + cmd_template = template_file.readlines() + ret = [] + for line_tmpl in cmd_template: + vat_cmd = line_tmpl.format(**args) + ret.append(self.vat_terminal_exec_cmd(vat_cmd.replace('\n', ''))) + return ret diff --git a/yardstick/resources/templates/add_ip_address.vat b/yardstick/resources/templates/add_ip_address.vat new file mode 100644 index 000000000..d59480c33 --- /dev/null +++ b/yardstick/resources/templates/add_ip_address.vat @@ -0,0 +1 @@ +sw_interface_add_del_address sw_if_index {sw_if_index} {address}/{prefix_length} diff --git a/yardstick/resources/templates/add_ip_neighbor.vat b/yardstick/resources/templates/add_ip_neighbor.vat new file mode 100644 index 000000000..730e7112a --- /dev/null +++ b/yardstick/resources/templates/add_ip_neighbor.vat @@ -0,0 +1 @@ +ip_neighbor_add_del sw_if_index {sw_if_index} dst {ip_address} mac {mac_address} diff --git a/yardstick/resources/templates/add_route.vat b/yardstick/resources/templates/add_route.vat new file mode 100644 index 000000000..64c6a6c3b --- /dev/null +++ b/yardstick/resources/templates/add_route.vat @@ -0,0 +1 @@ +ip_add_del_route {network}/{prefix_length} {via} {vrf} {interface} {resolve_attempts} {count} {lookup_vrf} {multipath} {weight} {local}
\ No newline at end of file diff --git a/yardstick/resources/templates/del_route.vat b/yardstick/resources/templates/del_route.vat new file mode 100644 index 000000000..e7fe4bc1e --- /dev/null +++ b/yardstick/resources/templates/del_route.vat @@ -0,0 +1 @@ +ip_add_del_route {network}/{prefix_length} via {gateway} sw_if_index {sw_if_index} del
\ No newline at end of file diff --git a/yardstick/resources/templates/flush_ip_addresses.vat b/yardstick/resources/templates/flush_ip_addresses.vat new file mode 100644 index 000000000..f38fcf12c --- /dev/null +++ b/yardstick/resources/templates/flush_ip_addresses.vat @@ -0,0 +1 @@ +sw_interface_add_del_address sw_if_index {sw_if_index} del-all
\ No newline at end of file diff --git a/yardstick/resources/templates/hw_interface_set_mtu.vat b/yardstick/resources/templates/hw_interface_set_mtu.vat new file mode 100644 index 000000000..645d1a80c --- /dev/null +++ b/yardstick/resources/templates/hw_interface_set_mtu.vat @@ -0,0 +1 @@ +hw_interface_set_mtu sw_if_index {sw_if_index} mtu {mtu} diff --git a/yardstick/resources/templates/interface_dump.vat b/yardstick/resources/templates/interface_dump.vat new file mode 100644 index 000000000..850c348f6 --- /dev/null +++ b/yardstick/resources/templates/interface_dump.vat @@ -0,0 +1 @@ +sw_interface_dump diff --git a/yardstick/resources/templates/set_if_state.vat b/yardstick/resources/templates/set_if_state.vat new file mode 100644 index 000000000..e2c2d4b29 --- /dev/null +++ b/yardstick/resources/templates/set_if_state.vat @@ -0,0 +1 @@ +sw_interface_set_flags sw_if_index {sw_if_index} {state} diff --git a/yardstick/ssh.py b/yardstick/ssh.py index 8bdc32c7c..6bc6010f7 100644 --- a/yardstick/ssh.py +++ b/yardstick/ssh.py @@ -80,6 +80,7 @@ from yardstick.common import exceptions from yardstick.common.utils import try_int, NON_NONE_DEFAULT, make_dict_from_map from yardstick.network_services.utils import provision_tool +LOG = logging.getLogger(__name__) def convert_key_to_str(key): if not isinstance(key, (paramiko.RSAKey, paramiko.DSSKey)): @@ -89,14 +90,6 @@ def convert_key_to_str(key): return k.getvalue() -# class SSHError(Exception): -# pass -# -# -# class SSHTimeout(SSHError): -# pass - - class SSH(object): """Represent ssh connection.""" @@ -345,6 +338,7 @@ class SSH(object): details = fmt % {"cmd": cmd, "status": exit_status} if stderr_data: details += " Last stderr data: '%s'." % stderr_data + LOG.critical("PROX ERROR: %s", details) raise exceptions.SSHError(error_msg=details) return exit_status @@ -456,6 +450,86 @@ class SSH(object): with client.open_sftp() as sftp: sftp.getfo(remotepath, file_obj) + def interactive_terminal_open(self, time_out=45): + """Open interactive terminal on a SSH channel. + + :param time_out: Timeout in seconds. + :returns: SSH channel with opened terminal. + + .. warning:: Interruptingcow is used here, and it uses + signal(SIGALRM) to let the operating system interrupt program + execution. This has the following limitations: Python signal + handlers only apply to the main thread, so you cannot use this + from other threads. You must not use this in a program that + uses SIGALRM itself (this includes certain profilers) + """ + chan = self._get_client().get_transport().open_session() + chan.get_pty() + chan.invoke_shell() + chan.settimeout(int(time_out)) + chan.set_combine_stderr(True) + + buf = '' + while not buf.endswith((":~# ", ":~$ ", "~]$ ", "~]# ")): + try: + chunk = chan.recv(10 * 1024 * 1024) + if not chunk: + break + buf += chunk + if chan.exit_status_ready(): + self.log.error('Channel exit status ready') + break + except socket.timeout: + raise exceptions.SSHTimeout(error_msg='Socket timeout: %s' % buf) + return chan + + def interactive_terminal_exec_command(self, chan, cmd, prompt): + """Execute command on interactive terminal. + + interactive_terminal_open() method has to be called first! + + :param chan: SSH channel with opened terminal. + :param cmd: Command to be executed. + :param prompt: Command prompt, sequence of characters used to + indicate readiness to accept commands. + :returns: Command output. + + .. warning:: Interruptingcow is used here, and it uses + signal(SIGALRM) to let the operating system interrupt program + execution. This has the following limitations: Python signal + handlers only apply to the main thread, so you cannot use this + from other threads. You must not use this in a program that + uses SIGALRM itself (this includes certain profilers) + """ + chan.sendall('{c}\n'.format(c=cmd)) + buf = '' + while not buf.endswith(prompt): + try: + chunk = chan.recv(10 * 1024 * 1024) + if not chunk: + break + buf += chunk + if chan.exit_status_ready(): + self.log.error('Channel exit status ready') + break + except socket.timeout: + message = ("Socket timeout during execution of command: " + "%(cmd)s\nBuffer content:\n%(buf)s" % {"cmd": cmd, + "buf": buf}) + raise exceptions.SSHTimeout(error_msg=message) + tmp = buf.replace(cmd.replace('\n', ''), '') + for item in prompt: + tmp.replace(item, '') + return tmp + + @staticmethod + def interactive_terminal_close(chan): + """Close interactive terminal SSH channel. + + :param: chan: SSH channel to be closed. + """ + chan.close() + class AutoConnectSSH(SSH): diff --git a/yardstick/tests/functional/network_services/vnf_generic/vnf/__init__.py b/yardstick/tests/functional/benchmark/core/__init__.py index e69de29bb..e69de29bb 100644 --- a/yardstick/tests/functional/network_services/vnf_generic/vnf/__init__.py +++ b/yardstick/tests/functional/benchmark/core/__init__.py diff --git a/yardstick/tests/functional/benchmark/core/test_report.py b/yardstick/tests/functional/benchmark/core/test_report.py new file mode 100644 index 000000000..832d3b3e1 --- /dev/null +++ b/yardstick/tests/functional/benchmark/core/test_report.py @@ -0,0 +1,314 @@ +############################################################################## +# Copyright (c) 2018-2019 Intel Corporation. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +import ast +import tempfile +import unittest + +import mock +from six.moves import configparser + +from yardstick.benchmark import core +from yardstick.benchmark.core import report +from yardstick.cmd.commands import change_osloobj_to_paras + + +GOOD_YAML_NAME = 'fake_name' +GOOD_TASK_ID = "9cbe74b6-df09-4535-8bdc-dc3a43b8a4e2" +GOOD_DB_FIELDKEYS = [ + {u'fieldKey': u'metric1', u'fieldType': u'integer'}, + {u'fieldKey': u'metric4', u'fieldType': u'integer'}, + {u'fieldKey': u'metric2', u'fieldType': u'integer'}, + {u'fieldKey': u'metric3', u'fieldType': u'integer'}, +] +GOOD_DB_METRICS = [ + {u'time': u'2018-08-20T16:49:26.372662016Z', + u'metric1': 1, u'metric2': 0, u'metric3': 8, u'metric4': 5}, + {u'time': u'2018-08-20T16:49:27.374208000Z', + u'metric1': 1, u'metric2': 1, u'metric3': 5, u'metric4': 4}, + {u'time': u'2018-08-20T16:49:28.375742976Z', + u'metric1': 2, u'metric2': 2, u'metric3': 3, u'metric4': 3}, + {u'time': u'2018-08-20T16:49:29.377299968Z', + u'metric1': 3, u'metric2': 3, u'metric3': 2, u'metric4': 2}, + {u'time': u'2018-08-20T16:49:30.378252032Z', + u'metric1': 5, u'metric2': 4, u'metric3': 1, u'metric4': 1}, + {u'time': u'2018-08-20T16:49:30.379359421Z', + u'metric1': 8, u'metric2': 5, u'metric3': 1, u'metric4': 0}, +] +GOOD_DB_BARO_METRICS = [ + {u'value': 324050, u'instance': u'0', u'host': u'myhostname', + u'time': u'2018-08-20T16:49:27.383698038Z', + u'type_instance': u'user', u'type': u'cpu'}, + { + u'value': 193798, u'instance': u'0', u'host': u'myhostname', + u'time': u'2018-12-19T16:49:27.383712594Z', + u'type_instance': u'system', u'type': u'cpu'}, + { + u'value': 324051, u'instance': u'0', u'host': u'myhostname', + u'time': u'2018-08-20T16:49:28.383696624Z', + u'type_instance': u'user', u'type': u'cpu'}, + { + u'value': 193800, u'instance': u'0', u'host': u'myhostname', + u'time': u'2018-08-20T16:49:28.383713481Z', + u'type_instance': u'system', u'type': u'cpu'}, + { + u'value': 324054, u'instance': u'0', u'host': u'myhostname', + u'time': u'2018-08-20T16:49:29.3836966789Z', + u'type_instance': u'user', u'type': u'cpu'}, + { + u'value': 193801, u'instance': u'0', u'host': u'myhostname', + u'time': u'2018-08-20T16:49:29.383716296Z', + u'type_instance': u'system', u'type': u'cpu'} +] +TIMESTAMP_START = '2018-08-20T16:49:26.372662016Z' +TIMESTAMP_END = '2018-08-20T16:49:30.379359421Z' + +yardstick_config = """ +[DEFAULT] +dispatcher = influxdb +""" + + +def my_query(query_sql, db=None): + get_fieldkeys_cmd = 'show field keys' + get_metrics_cmd = 'select * from' + get_start_time_cmd = 'ORDER ASC limit 1' + get_end_time_cmd = 'ORDER DESC limit 1' + if db: + if get_start_time_cmd in query_sql: + return TIMESTAMP_START + elif get_end_time_cmd in query_sql: + return TIMESTAMP_END + else: + return GOOD_DB_BARO_METRICS + elif get_fieldkeys_cmd in query_sql: + return GOOD_DB_FIELDKEYS + elif get_metrics_cmd in query_sql: + return GOOD_DB_METRICS + return [] + + +class ReportTestCase(unittest.TestCase): + + @mock.patch.object(report.influx, 'query', new=my_query) + @mock.patch.object(configparser.ConfigParser, + 'read', side_effect=mock.mock_open(read_data=yardstick_config)) + def test_report_generate_nsb_simple(self, *args): + tmpfile = tempfile.NamedTemporaryFile(delete=True) + + args = core.Param({"task_id": [GOOD_TASK_ID], "yaml_name": [GOOD_YAML_NAME]}) + params = change_osloobj_to_paras(args) + + with mock.patch.object(report.consts, 'DEFAULT_HTML_FILE', tmpfile.name): + report.Report().generate_nsb(params) + + data_act = None + time_act = None + keys_act = None + tree_act = None + with open(tmpfile.name) as f: + for l in f.readlines(): + if "var report_data = {" in l: + data_act = ast.literal_eval(l.strip()[18:-1]) + elif "var report_time = [" in l: + time_act = ast.literal_eval(l.strip()[18:-1]) + elif "var report_keys = [" in l: + keys_act = ast.literal_eval(l.strip()[18:-1]) + elif "var report_tree = [" in l: + tree_act = ast.literal_eval(l.strip()[18:-1]) + data_exp = { + 'metric1': [ + {'x': '16:49:26.372662', 'y': 1}, + {'x': '16:49:27.374208', 'y': 1}, + {'x': '16:49:28.375742', 'y': 2}, + {'x': '16:49:29.377299', 'y': 3}, + {'x': '16:49:30.378252', 'y': 5}, + {'x': '16:49:30.379359', 'y': 8}], + 'metric2': [ + {'x': '16:49:26.372662', 'y': 0}, + {'x': '16:49:27.374208', 'y': 1}, + {'x': '16:49:28.375742', 'y': 2}, + {'x': '16:49:29.377299', 'y': 3}, + {'x': '16:49:30.378252', 'y': 4}, + {'x': '16:49:30.379359', 'y': 5}], + 'metric3': [ + {'x': '16:49:26.372662', 'y': 8}, + {'x': '16:49:27.374208', 'y': 5}, + {'x': '16:49:28.375742', 'y': 3}, + {'x': '16:49:29.377299', 'y': 2}, + {'x': '16:49:30.378252', 'y': 1}, + {'x': '16:49:30.379359', 'y': 1}], + 'metric4': [ + {'x': '16:49:26.372662', 'y': 5}, + {'x': '16:49:27.374208', 'y': 4}, + {'x': '16:49:28.375742', 'y': 3}, + {'x': '16:49:29.377299', 'y': 2}, + {'x': '16:49:30.378252', 'y': 1}, + {'x': '16:49:30.379359', 'y': 0}], + 'myhostname.cpu_value.cpu.system.0': [ + {'x': '16:49:27.3837', 'y': 193798}, + {'x': '16:49:28.3837', 'y': 193800}, + {'x': '16:49:29.3837', 'y': 193801}], + 'myhostname.cpu_value.cpu.user.0': [ + {'x': '16:49:27.3836', 'y': 324050}, + {'x': '16:49:28.3836', 'y': 324051}, + {'x': '16:49:29.3836', 'y': 324054}], + 'myhostname.cpufreq_value.cpu.system.0': [ + {'x': '16:49:27.3837', 'y': 193798}, + {'x': '16:49:28.3837', 'y': 193800}, + {'x': '16:49:29.3837', 'y': 193801}], + 'myhostname.cpufreq_value.cpu.user.0': [ + {'x': '16:49:27.3836', 'y': 324050}, + {'x': '16:49:28.3836', 'y': 324051}, + {'x': '16:49:29.3836', 'y': 324054}], + 'myhostname.intel_pmu_value.cpu.system.0': [ + {'x': '16:49:27.3837', 'y': 193798}, + {'x': '16:49:28.3837', 'y': 193800}, + {'x': '16:49:29.3837', 'y': 193801}], + 'myhostname.intel_pmu_value.cpu.user.0': [ + {'x': '16:49:27.3836', 'y': 324050}, + {'x': '16:49:28.3836', 'y': 324051}, + {'x': '16:49:29.3836', 'y': 324054}], + 'myhostname.virt_value.cpu.system.0': [ + {'x': '16:49:27.3837', 'y': 193798}, + {'x': '16:49:28.3837', 'y': 193800}, + {'x': '16:49:29.3837', 'y': 193801}], + 'myhostname.virt_value.cpu.user.0': [ + {'x': '16:49:27.3836', 'y': 324050}, + {'x': '16:49:28.3836', 'y': 324051}, + {'x': '16:49:29.3836', 'y': 324054}], + 'myhostname.memory_value.cpu.system.0': [ + {'x': '16:49:27.3837', 'y': 193798}, + {'x': '16:49:28.3837', 'y': 193800}, + {'x': '16:49:29.3837', 'y': 193801}], + 'myhostname.memory_value.cpu.user.0': [ + {'x': '16:49:27.3836', 'y': 324050}, + {'x': '16:49:28.3836', 'y': 324051}, + {'x': '16:49:29.3836', 'y': 324054}] + } + time_exp = [ + '16:49:26.372662', '16:49:27.374208', '16:49:27.3836', + '16:49:27.3837', '16:49:28.375742', '16:49:28.3836', + '16:49:28.3837', '16:49:29.377299', '16:49:29.3836', + '16:49:29.3837', '16:49:30.378252', '16:49:30.379359', + ] + keys_exp = sorted([ + 'metric1', 'metric2', 'metric3', 'metric4', + 'myhostname.cpu_value.cpu.system.0', + 'myhostname.cpu_value.cpu.user.0', + 'myhostname.cpufreq_value.cpu.system.0', + 'myhostname.cpufreq_value.cpu.user.0', + 'myhostname.intel_pmu_value.cpu.system.0', + 'myhostname.intel_pmu_value.cpu.user.0', + 'myhostname.virt_value.cpu.system.0', + 'myhostname.virt_value.cpu.user.0', + 'myhostname.memory_value.cpu.system.0', + 'myhostname.memory_value.cpu.user.0', + ]) + tree_exp = [ + {'parent': '#', 'text': 'metric1', 'id': 'metric1'}, + {'parent': '#', 'text': 'metric2', 'id': 'metric2'}, + {'parent': '#', 'text': 'metric3', 'id': 'metric3'}, + {'parent': '#', 'text': 'metric4', 'id': 'metric4'}, + {'id': 'myhostname', 'parent': '#', 'text': 'myhostname'}, + {'id': 'myhostname.cpu_value', + 'parent': 'myhostname', + 'text': 'cpu_value'}, + {'id': 'myhostname.cpu_value.cpu', + 'parent': 'myhostname.cpu_value', + 'text': 'cpu'}, + {'id': 'myhostname.cpu_value.cpu.system', + 'parent': 'myhostname.cpu_value.cpu', + 'text': 'system'}, + {'id': 'myhostname.cpu_value.cpu.system.0', + 'parent': 'myhostname.cpu_value.cpu.system', + 'text': '0'}, + {'id': 'myhostname.cpu_value.cpu.user', + 'parent': 'myhostname.cpu_value.cpu', + 'text': 'user'}, + {'id': 'myhostname.cpu_value.cpu.user.0', + 'parent': 'myhostname.cpu_value.cpu.user', + 'text': '0'}, + {'id': 'myhostname.cpufreq_value', + 'parent': 'myhostname', + 'text': 'cpufreq_value'}, + {'id': 'myhostname.cpufreq_value.cpu', + 'parent': 'myhostname.cpufreq_value', + 'text': 'cpu'}, + {'id': 'myhostname.cpufreq_value.cpu.system', + 'parent': 'myhostname.cpufreq_value.cpu', + 'text': 'system'}, + {'id': 'myhostname.cpufreq_value.cpu.system.0', + 'parent': 'myhostname.cpufreq_value.cpu.system', + 'text': '0'}, + {'id': 'myhostname.cpufreq_value.cpu.user', + 'parent': 'myhostname.cpufreq_value.cpu', + 'text': 'user'}, + {'id': 'myhostname.cpufreq_value.cpu.user.0', + 'parent': 'myhostname.cpufreq_value.cpu.user', + 'text': '0'}, + {'id': 'myhostname.intel_pmu_value', + 'parent': 'myhostname', + 'text': 'intel_pmu_value'}, + {'id': 'myhostname.intel_pmu_value.cpu', + 'parent': 'myhostname.intel_pmu_value', + 'text': 'cpu'}, + {'id': 'myhostname.intel_pmu_value.cpu.system', + 'parent': 'myhostname.intel_pmu_value.cpu', + 'text': 'system'}, + {'id': 'myhostname.intel_pmu_value.cpu.system.0', + 'parent': 'myhostname.intel_pmu_value.cpu.system', + 'text': '0'}, + {'id': 'myhostname.intel_pmu_value.cpu.user', + 'parent': 'myhostname.intel_pmu_value.cpu', + 'text': 'user'}, + {'id': 'myhostname.intel_pmu_value.cpu.user.0', + 'parent': 'myhostname.intel_pmu_value.cpu.user', + 'text': '0'}, + {'id': 'myhostname.memory_value', + 'parent': 'myhostname', + 'text': 'memory_value'}, + {'id': 'myhostname.memory_value.cpu', + 'parent': 'myhostname.memory_value', + 'text': 'cpu'}, + {'id': 'myhostname.memory_value.cpu.system', + 'parent': 'myhostname.memory_value.cpu', + 'text': 'system'}, + {'id': 'myhostname.memory_value.cpu.system.0', + 'parent': 'myhostname.memory_value.cpu.system', + 'text': '0'}, + {'id': 'myhostname.memory_value.cpu.user', + 'parent': 'myhostname.memory_value.cpu', + 'text': 'user'}, + {'id': 'myhostname.memory_value.cpu.user.0', + 'parent': 'myhostname.memory_value.cpu.user', + 'text': '0'}, + {'id': 'myhostname.virt_value', 'parent': 'myhostname', + 'text': 'virt_value'}, + {'id': 'myhostname.virt_value.cpu', + 'parent': 'myhostname.virt_value', + 'text': 'cpu'}, + {'id': 'myhostname.virt_value.cpu.system', + 'parent': 'myhostname.virt_value.cpu', + 'text': 'system'}, + {'id': 'myhostname.virt_value.cpu.system.0', + 'parent': 'myhostname.virt_value.cpu.system', + 'text': '0'}, + {'id': 'myhostname.virt_value.cpu.user', + 'parent': 'myhostname.virt_value.cpu', + 'text': 'user'}, + {'id': 'myhostname.virt_value.cpu.user.0', + 'parent': 'myhostname.virt_value.cpu.user', + 'text': '0'} + ] + + self.assertEqual(data_exp, data_act) + self.assertEqual(time_exp, time_act) + self.assertEqual(keys_exp, keys_act) + self.assertEqual(tree_exp, tree_act) diff --git a/yardstick/tests/functional/common/test_packages.py b/yardstick/tests/functional/common/test_packages.py index 5dead4e55..e15f72898 100644 --- a/yardstick/tests/functional/common/test_packages.py +++ b/yardstick/tests/functional/common/test_packages.py @@ -15,6 +15,7 @@ import os from os import path import re +import unittest from yardstick.common import packages from yardstick.common import utils @@ -39,16 +40,21 @@ class PipPackagesTestCase(base.BaseFunctionalTestCase): utils.execute_command('sudo rm -rf %s' % self.TMP_FOLDER) def _remove_package(self, package): - os.system('%s pip uninstall %s -y' % (self.PYTHONPATH, package)) + os.system('%s python -m pip uninstall %s -y' % + (self.PYTHONPATH, package)) def _list_packages(self): pip_list_regex = re.compile( r"(?P<name>[\w\.-]+) \((?P<version>[\w\d_\.\-]+),*.*\)") + pip_list_regex_18 = re.compile( + r"(?P<name>[\w\.-]+)[\s]+(?P<version>[\w\d_\.\-]+),*.*") pkg_dict = {} - pkgs = utils.execute_command('pip list', + pkgs = utils.execute_command('python -m pip list', env={'PYTHONPATH': self.TMP_FOLDER}) for line in pkgs: match = pip_list_regex.match(line) + if not match: + match = pip_list_regex_18.match(line) if match and match.group('name'): pkg_dict[match.group('name')] = match.group('version') return pkg_dict @@ -64,6 +70,7 @@ class PipPackagesTestCase(base.BaseFunctionalTestCase): self.assertEqual(0, packages.pip_install(package_dir, self.TMP_FOLDER)) self.assertTrue(package_name in self._list_packages()) + @unittest.skip("see https://github.com/pypa/pip/issues/3889") def test_install_from_pip_package(self): dirname = path.dirname(__file__) package_path = (dirname + @@ -84,11 +91,10 @@ class PipPackagesTestCase(base.BaseFunctionalTestCase): # NOTE (ralonsoh): from requirements.txt file. The best way to test # this function is to parse requirements.txt and test-requirements.txt # and check all packages. - pkgs_ref = {'Babel': '2.3.4', - 'SQLAlchemy': '1.1.12', - 'influxdb': '4.1.1', - 'netifaces': '0.10.6', - 'unicodecsv': '0.14.1'} + pkgs_ref = {'Babel': '2.6.0', + 'SQLAlchemy': '1.2.18', + 'influxdb': '5.1.0', + 'netifaces': '0.10.9'} pkgs = packages.pip_list() for name, version in (pkgs_ref.items()): self.assertEqual(version, pkgs[name]) diff --git a/yardstick/tests/functional/network_services/vnf_generic/vnf/test_base.py b/yardstick/tests/functional/network_services/vnf_generic/vnf/test_base.py deleted file mode 100644 index e57f8f51c..000000000 --- a/yardstick/tests/functional/network_services/vnf_generic/vnf/test_base.py +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright (c) 2018 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import multiprocessing -import time -import uuid - -import mock - -from yardstick.common import messaging -from yardstick.common.messaging import payloads -from yardstick.common.messaging import producer -from yardstick.network_services.vnf_generic.vnf import base as vnf_base -from yardstick.tests.functional import base as ft_base - - -class _TrafficGenMQConsumer(vnf_base.GenericTrafficGen, - vnf_base.GenericVNFEndpoint): - - def __init__(self, name, vnfd, task_id): - vnf_base.GenericTrafficGen.__init__(self, name, vnfd, task_id) - self.queue = multiprocessing.Queue() - self._id = uuid.uuid1().int - vnf_base.GenericVNFEndpoint.__init__(self, self._id, [task_id], - self.queue) - self._consumer = vnf_base.GenericVNFConsumer([task_id], self) - self._consumer.start_rpc_server() - - def run_traffic(self, *args): - pass - - def terminate(self): - pass - - def collect_kpi(self): - pass - - def instantiate(self, *args): - pass - - def scale(self, flavor=''): - pass - - def runner_method_start_iteration(self, ctxt, **kwargs): - if ctxt['id'] in self._ctx_ids: - self._queue.put( - {'action': messaging.RUNNER_METHOD_START_ITERATION, - 'payload': payloads.RunnerPayload.dict_to_obj(kwargs)}) - - def runner_method_stop_iteration(self, ctxt, **kwargs): - if ctxt['id'] in self._ctx_ids: - self._queue.put( - {'action': messaging.RUNNER_METHOD_STOP_ITERATION, - 'payload': payloads.RunnerPayload.dict_to_obj(kwargs)}) - - -class _DummyProducer(producer.MessagingProducer): - pass - - -class GenericVNFMQConsumerTestCase(ft_base.BaseFunctionalTestCase): - - def test_fistro(self): - vnfd = {'benchmark': {'kpi': mock.ANY}, - 'vdu': [{'external-interface': 'ext_int'}] - } - task_id = uuid.uuid1().int - tg_obj = _TrafficGenMQConsumer('name_tg', vnfd, task_id) - producer = _DummyProducer(messaging.TOPIC_RUNNER, task_id) - - num_messages = 10 - for i in range(num_messages): - pload = payloads.RunnerPayload(version=10, data=i) - for method in (messaging.RUNNER_METHOD_START_ITERATION, - messaging.RUNNER_METHOD_STOP_ITERATION): - producer.send_message(method, pload) - - time.sleep(0.5) # Let consumers attend the calls - output = [] - while not tg_obj.queue.empty(): - data = tg_obj.queue.get(True, 1) - data_dict = {'action': data['action'], - 'payload': data['payload'].obj_to_dict()} - output.append(data_dict) - - self.assertEqual(num_messages * 2, len(output)) - for i in range(num_messages): - pload = payloads.RunnerPayload(version=10, data=i).obj_to_dict() - for method in (messaging.RUNNER_METHOD_START_ITERATION, - messaging.RUNNER_METHOD_STOP_ITERATION): - reg = {'action': method, 'payload': pload} - self.assertIn(reg, output) diff --git a/yardstick/tests/unit/apiserver/utils/test_influx.py b/yardstick/tests/unit/apiserver/utils/test_influx.py index 6021d35df..3a97ff292 100644 --- a/yardstick/tests/unit/apiserver/utils/test_influx.py +++ b/yardstick/tests/unit/apiserver/utils/test_influx.py @@ -1,5 +1,6 @@ ############################################################################## # Copyright (c) 2016 Huawei Technologies Co.,Ltd and others. +# Copyright (c) 2019 Intel Corporation. # # All rights reserved. This program and the accompanying materials # are made available under the terms of the Apache License, Version 2.0 @@ -29,7 +30,7 @@ class GetDataDbClientTestCase(base.BaseUnitTestCase): self.assertEqual('fake_client', influx.get_data_db_client()) _mock_parser.read.assert_called_once_with(constants.CONF_FILE) - mock_get_client.assert_called_once_with(_mock_parser) + mock_get_client.assert_called_once_with(_mock_parser, None) @mock.patch.object(influx.logger, 'error') @mock.patch.object(influx, '_get_influxdb_client', @@ -46,7 +47,7 @@ class GetDataDbClientTestCase(base.BaseUnitTestCase): influx.get_data_db_client() _mock_parser.read.assert_called_once_with(constants.CONF_FILE) - mock_get_client.assert_called_once_with(_mock_parser) + mock_get_client.assert_called_once_with(_mock_parser, None) class GetIpTestCase(base.BaseUnitTestCase): diff --git a/yardstick/tests/unit/benchmark/contexts/standalone/test_model.py b/yardstick/tests/unit/benchmark/contexts/standalone/test_model.py index 98d2b1836..e76a3ca27 100644 --- a/yardstick/tests/unit/benchmark/contexts/standalone/test_model.py +++ b/yardstick/tests/unit/benchmark/contexts/standalone/test_model.py @@ -17,6 +17,7 @@ import os import uuid import mock +import netaddr import unittest from xml.etree import ElementTree @@ -54,7 +55,7 @@ class ModelLibvirtTestCase(unittest.TestCase): numa_cpus=0 - 10, socket=1, threads=1, vm_image="/var/lib/libvirt/images/yardstick-nsb-image.img", - cpuset=2 - 10, cputune='') + cpuset=2 - 10, cputune='', machine='pc') def setUp(self): self.pci_address_str = '0001:04:03.2' @@ -123,7 +124,7 @@ class ModelLibvirtTestCase(unittest.TestCase): def test_add_ovs_interfaces(self): xml_input = copy.deepcopy(XML_SAMPLE) xml_output = model.Libvirt.add_ovs_interface( - '/usr/local', 0, self.pci_address_str, self.mac, xml_input) + '/usr/local', 0, self.pci_address_str, self.mac, xml_input, 4) root = ElementTree.fromstring(xml_output) et_out = ElementTree.ElementTree(element=root) @@ -292,6 +293,7 @@ class ModelLibvirtTestCase(unittest.TestCase): hostname = root.find('name').text mac = "00:11:22:33:44:55" ip = "{0}/{1}".format(node.get('ip'), node.get('netmask')) + ip = "{0}/{1}".format(node.get('ip'), netaddr.IPNetwork(ip).prefixlen) model.StandaloneContextHelper.check_update_key(self.mock_ssh, node, hostname, id_name, cdrom_img, mac) mock_gen_cdrom_image.assert_called_once_with(self.mock_ssh, cdrom_img, hostname, @@ -350,7 +352,8 @@ class ModelLibvirtTestCase(unittest.TestCase): xml_ref = model.VM_TEMPLATE.format(vm_name='vm_name', random_uuid=_uuid, mac_addr=mac, memory='1024', vcpu='8', cpu='4', numa_cpus='0-7', socket='3', threads='2', - vm_image='qemu_image', cpuset='4,5', cputune='cool') + vm_image='qemu_image', cpuset='4,5', cputune='cool', + machine='pc-i440fx-xenial') xml_ref = model.Libvirt.add_cdrom(cdrom_img, xml_ref) self.assertEqual(xml_out, xml_ref) mock_get_mac_address.assert_called_once_with(0x00) diff --git a/yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py b/yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py index 6cc8b11f3..413bb68b7 100644 --- a/yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py +++ b/yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py @@ -24,6 +24,7 @@ from yardstick.benchmark.contexts import base from yardstick.benchmark.contexts.standalone import model from yardstick.benchmark.contexts.standalone import ovs_dpdk from yardstick.common import exceptions +from yardstick.common import utils as common_utils from yardstick.network_services import utils @@ -159,6 +160,13 @@ class OvsDpdkContextTestCase(unittest.TestCase): } self.ovs_dpdk.wait_for_vswitchd = 0 self.assertIsNone(self.ovs_dpdk.setup_ovs_bridge_add_flows()) + self.ovs_dpdk.ovs_properties.update( + {'dpdk_pmd-rxq-affinity': {'0': "0:1"}}) + self.ovs_dpdk.ovs_properties.update( + {'vhost_pmd-rxq-affinity': {'0': "0:1"}}) + self.NETWORKS['private_0'].update({'port_num': '0'}) + self.NETWORKS['public_0'].update({'port_num': '1'}) + self.ovs_dpdk.setup_ovs_bridge_add_flows() @mock.patch("yardstick.ssh.SSH") def test_cleanup_ovs_dpdk_env(self, mock_ssh): @@ -171,11 +179,9 @@ class OvsDpdkContextTestCase(unittest.TestCase): self.ovs_dpdk.wait_for_vswitchd = 0 self.assertIsNone(self.ovs_dpdk.cleanup_ovs_dpdk_env()) - @mock.patch.object(ovs_dpdk.OvsDpdkContext, '_check_hugepages') @mock.patch.object(utils, 'get_nsb_option') @mock.patch.object(model.OvsDeploy, 'ovs_deploy') - def test_check_ovs_dpdk_env(self, mock_ovs_deploy, mock_get_nsb_option, - mock_check_hugepages): + def test_check_ovs_dpdk_env(self, mock_ovs_deploy, mock_get_nsb_option): self.ovs_dpdk.connection = mock.Mock() self.ovs_dpdk.connection.execute = mock.Mock( return_value=(1, 0, 0)) @@ -189,11 +195,9 @@ class OvsDpdkContextTestCase(unittest.TestCase): self.ovs_dpdk.check_ovs_dpdk_env() mock_ovs_deploy.assert_called_once() - mock_check_hugepages.assert_called_once() mock_get_nsb_option.assert_called_once_with('bin_path') - @mock.patch.object(ovs_dpdk.OvsDpdkContext, '_check_hugepages') - def test_check_ovs_dpdk_env_wrong_version(self, mock_check_hugepages): + def test_check_ovs_dpdk_env_wrong_version(self): self.ovs_dpdk.connection = mock.Mock() self.ovs_dpdk.connection.execute = mock.Mock( return_value=(1, 0, 0)) @@ -206,7 +210,6 @@ class OvsDpdkContextTestCase(unittest.TestCase): with self.assertRaises(exceptions.OVSUnsupportedVersion): self.ovs_dpdk.check_ovs_dpdk_env() - mock_check_hugepages.assert_called_once() @mock.patch('yardstick.ssh.SSH') def test_deploy(self, *args): @@ -389,15 +392,20 @@ class OvsDpdkContextTestCase(unittest.TestCase): self.ovs_dpdk._enable_interfaces(0, ["private_0"], 'test') mock_add_ovs_interface.assert_called_once_with( 'fake_path', 0, self.NETWORKS['private_0']['vpci'], - self.NETWORKS['private_0']['mac'], 'test') + self.NETWORKS['private_0']['mac'], 'test', 1) + @mock.patch.object(ovs_dpdk.OvsDpdkContext, '_check_hugepages') + @mock.patch.object(common_utils, 'setup_hugepages') @mock.patch.object(model.StandaloneContextHelper, 'check_update_key') @mock.patch.object(model.Libvirt, 'write_file') @mock.patch.object(model.Libvirt, 'build_vm_xml') @mock.patch.object(model.Libvirt, 'check_if_vm_exists_and_delete') @mock.patch.object(model.Libvirt, 'virsh_create_vm') - def test_setup_ovs_dpdk_context(self, mock_create_vm, mock_check_if_exists, mock_build_xml, - mock_write_file, mock_check_update_key): + def test_setup_ovs_dpdk_context(self, mock_create_vm, mock_check_if_exists, + mock_build_xml, mock_write_file, + mock_check_update_key, + mock_setup_hugepages, + mock__check_hugepages): self.ovs_dpdk.vm_deploy = True self.ovs_dpdk.connection = mock.Mock() self.ovs_dpdk.vm_names = ['vm-0', 'vm-1'] @@ -413,7 +421,7 @@ class OvsDpdkContextTestCase(unittest.TestCase): } self.ovs_dpdk.networks = self.NETWORKS self.ovs_dpdk.host_mgmt = {} - self.ovs_dpdk.flavor = {} + self.ovs_dpdk.vm_flavor = {'ram': '1024'} self.ovs_dpdk.file_path = '/var/lib/libvirt/images/cdrom-0.img' self.ovs_dpdk.configure_nics_for_ovs_dpdk = mock.Mock(return_value="") self.ovs_dpdk._name_task_id = 'fake_name' @@ -429,6 +437,9 @@ class OvsDpdkContextTestCase(unittest.TestCase): self.assertEqual([vnf_instance_2], self.ovs_dpdk.setup_ovs_dpdk_context()) + mock_setup_hugepages.assert_called_once_with(self.ovs_dpdk.connection, + (1024 + 4096) * 1024) # ram + dpdk_socket0_mem + dpdk_socket1_mem + mock__check_hugepages.assert_called_once() mock_create_vm.assert_called_once_with( self.ovs_dpdk.connection, '/tmp/vm_ovs_0.xml') mock_check_if_exists.assert_called_once_with( diff --git a/yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py b/yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py index 316aca72a..0809a983a 100644 --- a/yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py +++ b/yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py @@ -22,6 +22,7 @@ from yardstick.benchmark import contexts from yardstick.benchmark.contexts import base from yardstick.benchmark.contexts.standalone import model from yardstick.benchmark.contexts.standalone import sriov +from yardstick.common import utils class SriovContextTestCase(unittest.TestCase): @@ -276,13 +277,15 @@ class SriovContextTestCase(unittest.TestCase): mock_add_sriov.assert_called_once_with( '0000:00:0a.0', 0, self.NETWORKS['private_0']['mac'], 'test') + @mock.patch.object(utils, 'setup_hugepages') @mock.patch.object(model.StandaloneContextHelper, 'check_update_key') @mock.patch.object(model.Libvirt, 'build_vm_xml') @mock.patch.object(model.Libvirt, 'check_if_vm_exists_and_delete') @mock.patch.object(model.Libvirt, 'write_file') @mock.patch.object(model.Libvirt, 'virsh_create_vm') - def test_setup_sriov_context(self, mock_create_vm, mock_write_file, mock_check, - mock_build_vm_xml, mock_check_update_key): + def test_setup_sriov_context(self, mock_create_vm, mock_write_file, + mock_check, mock_build_vm_xml, + mock_check_update_key, mock_setup_hugepages): self.sriov.servers = { 'vnf_0': { 'network_ports': { @@ -295,7 +298,7 @@ class SriovContextTestCase(unittest.TestCase): connection = mock.Mock() self.sriov.connection = connection self.sriov.host_mgmt = {'ip': '1.2.3.4'} - self.sriov.vm_flavor = 'flavor' + self.sriov.vm_flavor = {'ram': '1024'} self.sriov.networks = 'networks' self.sriov.configure_nics_for_sriov = mock.Mock() self.sriov._name_task_id = 'fake_name' @@ -314,15 +317,16 @@ class SriovContextTestCase(unittest.TestCase): mock_vnf_node.generate_vnf_instance = mock.Mock( return_value='node_1') nodes_out = self.sriov.setup_sriov_context() + mock_setup_hugepages.assert_called_once_with(connection, 1024*1024) mock_check_update_key.assert_called_once_with(connection, 'node_1', vm_name, self.sriov._name_task_id, cdrom_img, mac) self.assertEqual(['node_2'], nodes_out) mock_vnf_node.generate_vnf_instance.assert_called_once_with( - 'flavor', 'networks', '1.2.3.4', 'vnf_0', + self.sriov.vm_flavor, 'networks', '1.2.3.4', 'vnf_0', self.sriov.servers['vnf_0'], '00:00:00:00:00:01') mock_build_vm_xml.assert_called_once_with( - connection, 'flavor', vm_name, 0, cdrom_img) + connection, self.sriov.vm_flavor, vm_name, 0, cdrom_img) mock_create_vm.assert_called_once_with(connection, cfg) mock_check.assert_called_once_with(vm_name, connection) mock_write_file.assert_called_once_with(cfg, 'out_xml') diff --git a/yardstick/tests/unit/benchmark/contexts/test_heat.py b/yardstick/tests/unit/benchmark/contexts/test_heat.py index 3ccae44c7..96946cded 100644 --- a/yardstick/tests/unit/benchmark/contexts/test_heat.py +++ b/yardstick/tests/unit/benchmark/contexts/test_heat.py @@ -13,6 +13,7 @@ import os import mock import unittest +import collections from yardstick.benchmark.contexts import base from yardstick.benchmark.contexts import heat @@ -81,6 +82,7 @@ class HeatContextTestCase(unittest.TestCase): self.assertIsNone(self.test_context.template_file) self.assertIsNone(self.test_context.heat_parameters) self.assertIsNone(self.test_context.key_filename) + self.assertTrue(self.test_context.yardstick_gen_key_file) @mock.patch.object(yaml_loader, 'read_yaml_file') @mock.patch('yardstick.benchmark.contexts.heat.PlacementGroup') @@ -173,6 +175,23 @@ class HeatContextTestCase(unittest.TestCase): self.assertTrue(self.test_context._flags.no_setup) self.assertTrue(self.test_context._flags.no_teardown) + def test_init_key_filename(self): + attrs = {'name': 'foo', + 'file': 'pod.yaml', + 'task_id': '1234567890', + 'server_groups': {}, + 'networks': {}, + 'servers': {}, + 'heat_template': "/root/clearwater.yaml", + 'key_filename': '/etc/yardstick/yardstick.pem'} + + with mock.patch.object(openstack_utils, 'get_shade_client'), \ + mock.patch.object(openstack_utils, 'get_shade_operator_client'): + self.test_context.init(attrs) + + self.assertIsNotNone(self.test_context.key_filename) + self.assertFalse(self.test_context.yardstick_gen_key_file) + @mock.patch('yardstick.benchmark.contexts.heat.HeatTemplate') def test__add_resources_to_template_no_servers(self, mock_template): self.test_context._name = 'ctx' @@ -373,6 +392,25 @@ class HeatContextTestCase(unittest.TestCase): self.assertTrue(mock_manager.mock_calls.index(mock_call_gen_keys) < mock_manager.mock_calls.index(mock_call_add_resources)) + @mock.patch.object(heat, 'HeatTemplate') + @mock.patch.object(ssh.SSH, 'gen_keys') + @mock.patch.object(heat.HeatContext, '_create_new_stack') + def test_deploy_with_key_filename_provided(self, mock_create_new_stack, + mock_gen_keys, *args): + self.test_context._name = 'foo' + self.test_context._task_id = '1234567890' + self.test_context._name_task_id = '{}-{}'.format( + self.test_context._name, self.test_context._task_id[:8]) + self.test_context.template_file = '/bar/baz/some-heat-file' + self.test_context.heat_parameters = {'image': 'cirros'} + self.test_context.yardstick_gen_key_file = False + self.test_context.key_filename = '/etc/yardstick/yardstick.pem' + self.test_context.get_neutron_info = mock.MagicMock() + self.test_context.deploy() + + mock_create_new_stack.assert_called() + mock_gen_keys.assert_not_called() + def test_check_for_context(self): pass # check that the context exists @@ -705,6 +743,50 @@ class HeatContextTestCase(unittest.TestCase): result = self.test_context._get_server(attr_name) self.assertIsNone(result) + @mock.patch("yardstick.benchmark.contexts.heat.pkg_resources") + def test__get_server_found_dict_found_interfaces_dict(self, *args): + """ + Use HeatContext._get_server to get a server that matches + based on a dictionary input. + """ + self.test_context._name = 'bar' + self.test_context._task_id = '1234567890' + self.test_context._name_task_id = '{}-{}'.format( + self.test_context._name, self.test_context._task_id[:8]) + self.test_context._user = 'bot' + self.test_context.stack = mock.Mock() + self.test_context.stack.outputs = { + 'private_ip': '10.0.0.1', + 'public_ip': '127.0.0.1', + 'local_mac_addr': '64:00:6a:18:0f:d6', + 'private_netmask': '255.255.255.0', + 'private_net_name': 'private_network', + 'private_net_gateway': '127.0.0.254' + } + + attr_name = { + 'name': 'foo.bar-12345678', + 'private_ip_attr': 'private_ip', + 'public_ip_attr': 'public_ip', + 'interfaces': { + 'data_net': { + 'local_ip': 'private_ip', + 'local_mac': 'local_mac_addr', + 'netmask': 'private_netmask', + 'network': 'private_net_name', + 'gateway_ip': 'private_net_gateway' + } + } + } + self.test_context.key_uuid = 'foo-42' + result = self.test_context._get_server(attr_name) + self.assertIsInstance(result['interfaces'], collections.Mapping) + for key in attr_name.get("interfaces").keys(): + self.assertEqual(result['interfaces'][key]['local_ip'], '10.0.0.1') + self.assertEqual(result['interfaces'][key]['local_mac'], '64:00:6a:18:0f:d6') + self.assertEqual(result['interfaces'][key]['netmask'], '255.255.255.0') + self.assertEqual(result['interfaces'][key]['gateway_ip'], '127.0.0.254') + # TODO: Split this into more granular tests def test__get_network(self): network1 = mock.MagicMock() diff --git a/yardstick/tests/unit/benchmark/core/test_report.py b/yardstick/tests/unit/benchmark/core/test_report.py index 524302f92..89fb1e90a 100644 --- a/yardstick/tests/unit/benchmark/core/test_report.py +++ b/yardstick/tests/unit/benchmark/core/test_report.py @@ -1,5 +1,6 @@ ############################################################################## # Copyright (c) 2017 Rajesh Kudaka. +# Copyright (c) 2018-2019 Intel Corporation. # # All rights reserved. This program and the accompanying materials # are made available under the terms of the Apache License, Version 2.0 @@ -7,30 +8,155 @@ # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## -# Unittest for yardstick.benchmark.core.report - -from __future__ import print_function - -from __future__ import absolute_import - +import mock +import six import unittest import uuid -try: - from unittest import mock -except ImportError: - import mock - +from api.utils import influx from yardstick.benchmark.core import report from yardstick.cmd.commands import change_osloobj_to_paras -FAKE_YAML_NAME = 'fake_name' -FAKE_TASK_ID = str(uuid.uuid4()) -FAKE_DB_FIELDKEYS = [{'fieldKey': 'fake_key'}] -FAKE_TIME = '0000-00-00T00:00:00.000000Z' -FAKE_DB_TASK = [{'fake_key': 0.000, 'time': FAKE_TIME}] -FAKE_TIMESTAMP = ['fake_time'] -DUMMY_TASK_ID = 'aaaaaa-aaaaaaaa-aaaaaaaaaa-aaaaaa' +GOOD_YAML_NAME = 'fake_name' +GOOD_TASK_ID = str(uuid.uuid4()) +GOOD_DB_FIELDKEYS = [{'fieldKey': 'fake_key'}] +GOOD_DB_METRICS = [{ + 'fake_key': 1.234, + 'time': '0000-00-00T12:34:56.789012Z', + }] +GOOD_TIMESTAMP = ['12:34:56.789012'] +BAD_YAML_NAME = 'F@KE_NAME' +BAD_TASK_ID = 'aaaaaa-aaaaaaaa-aaaaaaaaaa-aaaaaa' +MORE_DB_FIELDKEYS = [ + {'fieldKey': 'fake_key'}, + {'fieldKey': 'str_str'}, + {'fieldKey': u'str_unicode'}, + {u'fieldKey': 'unicode_str'}, + {u'fieldKey': u'unicode_unicode'}, + ] +MORE_DB_METRICS = [{ + 'fake_key': None, + 'time': '0000-00-00T00:00:00.000000Z', + }, { + 'fake_key': 123, + 'time': '0000-00-00T00:00:01.000000Z', + }, { + 'fake_key': 4.56, + 'time': '0000-00-00T00:00:02.000000Z', + }, { + 'fake_key': 9876543210987654321, + 'time': '0000-00-00T00:00:03.000000Z', + }, { + 'fake_key': 'str_str value', + 'time': '0000-00-00T00:00:04.000000Z', + }, { + 'fake_key': u'str_unicode value', + 'time': '0000-00-00T00:00:05.000000Z', + }, { + u'fake_key': 'unicode_str value', + 'time': '0000-00-00T00:00:06.000000Z', + }, { + u'fake_key': u'unicode_unicode value', + 'time': '0000-00-00T00:00:07.000000Z', + }, { + 'fake_key': '7.89', + 'time': '0000-00-00T00:00:08.000000Z', + }, { + 'fake_key': '1011', + 'time': '0000-00-00T00:00:09.000000Z', + }, { + 'fake_key': '9876543210123456789', + 'time': '0000-00-00T00:00:10.000000Z', + }] +MORE_TIMESTAMP = ['00:00:%02d.000000' % n for n in range(len(MORE_DB_METRICS))] +MORE_EMPTY_DATA = [None] * len(MORE_DB_METRICS) +MORE_EXPECTED_TABLE_VALS = { + 'Timestamp': MORE_TIMESTAMP, + 'fake_key': [ + None, + 123, + 4.56, + 9876543210987654321 if six.PY3 else 9.876543210987655e+18, + None, + None, + None, + None, + 7.89, + 1011, + 9876543210123456789 if six.PY3 else 9.876543210123457e+18, + ], + 'str_str': MORE_EMPTY_DATA, + 'str_unicode': MORE_EMPTY_DATA, + 'unicode_str': MORE_EMPTY_DATA, + 'unicode_unicode': MORE_EMPTY_DATA, + } +MORE_EXPECTED_DATASETS = [{ + 'label': key, + 'data': MORE_EXPECTED_TABLE_VALS[key], + } + for key in map(str, [field['fieldKey'] for field in MORE_DB_FIELDKEYS]) + ] + + +class JSTreeTestCase(unittest.TestCase): + + def setUp(self): + self.jstree = report.JSTree() + + def test__create_node(self): + _id = "tg__0.DropPackets" + + expected_data = [ + {"id": "tg__0", "text": "tg__0", "parent": "#"}, + {"id": "tg__0.DropPackets", "text": "DropPackets", "parent": "tg__0"} + ] + self.jstree._create_node(_id) + + self.assertEqual(self.jstree._created_nodes, ['#', 'tg__0', 'tg__0.DropPackets']) + self.assertEqual(self.jstree.jstree_data, expected_data) + + def test_format_for_jstree(self): + data = [ + 'tg__0.DropPackets', + 'tg__0.LatencyAvg.5', 'tg__0.LatencyAvg.6', + 'tg__0.LatencyMax.5', 'tg__0.LatencyMax.6', + 'tg__0.RxThroughput', 'tg__0.TxThroughput', + 'tg__1.DropPackets', + 'tg__1.LatencyAvg.5', 'tg__1.LatencyAvg.6', + 'tg__1.LatencyMax.5', 'tg__1.LatencyMax.6', + 'tg__1.RxThroughput', 'tg__1.TxThroughput', + 'vnf__0.curr_packets_in', 'vnf__0.packets_dropped', 'vnf__0.packets_fwd', + ] + + expected_output = [ + {"id": "tg__0", "text": "tg__0", "parent": "#"}, + {"id": "tg__0.DropPackets", "text": "DropPackets", "parent": "tg__0"}, + {"id": "tg__0.LatencyAvg", "text": "LatencyAvg", "parent": "tg__0"}, + {"id": "tg__0.LatencyAvg.5", "text": "5", "parent": "tg__0.LatencyAvg"}, + {"id": "tg__0.LatencyAvg.6", "text": "6", "parent": "tg__0.LatencyAvg"}, + {"id": "tg__0.LatencyMax", "text": "LatencyMax", "parent": "tg__0"}, + {"id": "tg__0.LatencyMax.5", "text": "5", "parent": "tg__0.LatencyMax"}, + {"id": "tg__0.LatencyMax.6", "text": "6", "parent": "tg__0.LatencyMax"}, + {"id": "tg__0.RxThroughput", "text": "RxThroughput", "parent": "tg__0"}, + {"id": "tg__0.TxThroughput", "text": "TxThroughput", "parent": "tg__0"}, + {"id": "tg__1", "text": "tg__1", "parent": "#"}, + {"id": "tg__1.DropPackets", "text": "DropPackets", "parent": "tg__1"}, + {"id": "tg__1.LatencyAvg", "text": "LatencyAvg", "parent": "tg__1"}, + {"id": "tg__1.LatencyAvg.5", "text": "5", "parent": "tg__1.LatencyAvg"}, + {"id": "tg__1.LatencyAvg.6", "text": "6", "parent": "tg__1.LatencyAvg"}, + {"id": "tg__1.LatencyMax", "text": "LatencyMax", "parent": "tg__1"}, + {"id": "tg__1.LatencyMax.5", "text": "5", "parent": "tg__1.LatencyMax"}, + {"id": "tg__1.LatencyMax.6", "text": "6", "parent": "tg__1.LatencyMax"}, + {"id": "tg__1.RxThroughput", "text": "RxThroughput", "parent": "tg__1"}, + {"id": "tg__1.TxThroughput", "text": "TxThroughput", "parent": "tg__1"}, + {"id": "vnf__0", "text": "vnf__0", "parent": "#"}, + {"id": "vnf__0.curr_packets_in", "text": "curr_packets_in", "parent": "vnf__0"}, + {"id": "vnf__0.packets_dropped", "text": "packets_dropped", "parent": "vnf__0"}, + {"id": "vnf__0.packets_fwd", "text": "packets_fwd", "parent": "vnf__0"}, + ] + + result = self.jstree.format_for_jstree(data) + self.assertEqual(expected_output, result) class ReportTestCase(unittest.TestCase): @@ -38,37 +164,421 @@ class ReportTestCase(unittest.TestCase): def setUp(self): super(ReportTestCase, self).setUp() self.param = change_osloobj_to_paras({}) - self.param.yaml_name = [FAKE_YAML_NAME] - self.param.task_id = [FAKE_TASK_ID] + self.param.yaml_name = [GOOD_YAML_NAME] + self.param.task_id = [GOOD_TASK_ID] self.rep = report.Report() - @mock.patch.object(report.Report, '_get_tasks') + def test___init__(self): + self.assertEqual([], self.rep.Timestamp) + self.assertEqual("", self.rep.yaml_name) + self.assertEqual("", self.rep.task_id) + + def test__validate(self): + self.rep._validate(GOOD_YAML_NAME, GOOD_TASK_ID) + self.assertEqual(GOOD_YAML_NAME, self.rep.yaml_name) + self.assertEqual(GOOD_TASK_ID, str(self.rep.task_id)) + + def test__validate_invalid_yaml_name(self): + with six.assertRaisesRegex(self, ValueError, "yaml*"): + self.rep._validate(BAD_YAML_NAME, GOOD_TASK_ID) + + def test__validate_invalid_task_id(self): + with six.assertRaisesRegex(self, ValueError, "task*"): + self.rep._validate(GOOD_YAML_NAME, BAD_TASK_ID) + + @mock.patch.object(influx, 'query') + def test__get_fieldkeys(self, mock_query): + mock_query.return_value = GOOD_DB_FIELDKEYS + self.rep.yaml_name = GOOD_YAML_NAME + self.rep.task_id = GOOD_TASK_ID + self.assertEqual(GOOD_DB_FIELDKEYS, self.rep._get_fieldkeys()) + + @mock.patch.object(influx, 'query') + def test__get_fieldkeys_nodbclient(self, mock_query): + mock_query.side_effect = RuntimeError + self.assertRaises(RuntimeError, self.rep._get_fieldkeys) + + @mock.patch.object(influx, 'query') + def test__get_fieldkeys_testcase_not_found(self, mock_query): + mock_query.return_value = [] + self.rep.yaml_name = GOOD_YAML_NAME + self.rep.task_id = GOOD_TASK_ID + six.assertRaisesRegex(self, KeyError, "Test case", self.rep._get_fieldkeys) + + @mock.patch.object(influx, 'query') + def test__get_metrics(self, mock_query): + mock_query.return_value = GOOD_DB_METRICS + self.rep.yaml_name = GOOD_YAML_NAME + self.rep.task_id = GOOD_TASK_ID + self.assertEqual(GOOD_DB_METRICS, self.rep._get_metrics()) + + @mock.patch.object(influx, 'query') + def test__get_metrics_task_not_found(self, mock_query): + mock_query.return_value = [] + self.rep.yaml_name = GOOD_YAML_NAME + self.rep.task_id = GOOD_TASK_ID + six.assertRaisesRegex(self, KeyError, "Task ID", self.rep._get_metrics) + + @mock.patch.object(influx, 'query') + def test__get_task_start_time(self, mock_query): + self.rep.yaml_name = GOOD_YAML_NAME + self.rep.task_id = GOOD_TASK_ID + mock_query.return_value = [{ + u'free.memory0.used': u'9789088', + u'free.memory0.available': u'22192984', + u'free.memory0.shared': u'219152', + u'time': u'2019-01-22T16:20:14.568075776Z', + }] + expected = "2019-01-22T16:20:14.568075776Z" + + self.assertEqual( + expected, + self.rep._get_task_start_time() + ) + + def test__get_task_start_time_task_not_found(self): + pass + + @mock.patch.object(influx, 'query') + def test__get_task_end_time(self, mock_query): + self.rep.yaml_name = GOOD_YAML_NAME + self.rep.task_id = GOOD_TASK_ID + # TODO(elfoley): write this test! + mock_query.return_value = [{ + + }] + + @mock.patch.object(influx, 'query') + def test__get_baro_metrics(self, mock_query): + self.rep.yaml_name = GOOD_YAML_NAME + self.rep.task_id = GOOD_TASK_ID + self.rep._get_task_start_time = mock.Mock(return_value=0) + self.rep._get_task_end_time = mock.Mock(return_value=0) + + influx_return_values = ([{ + u'value': 324050, u'instance': u'0', u'host': u'myhostname', + u'time': u'2018-12-19T14:11:25.383698038Z', + u'type_instance': u'user', u'type': u'cpu', + }, { + u'value': 193798, u'instance': u'0', u'host': u'myhostname', + u'time': u'2018-12-19T14:11:25.383712594Z', + u'type_instance': u'system', u'type': u'cpu', + }, { + u'value': 324051, u'instance': u'0', u'host': u'myhostname', + u'time': u'2018-12-19T14:11:35.383696624Z', + u'type_instance': u'user', u'type': u'cpu', + }, { + u'value': 193800, u'instance': u'0', u'host': u'myhostname', + u'time': u'2018-12-19T14:11:35.383713481Z', + u'type_instance': u'system', u'type': u'cpu', + }, { + u'value': 324054, u'instance': u'0', u'host': u'myhostname', + u'time': u'2018-12-19T14:11:45.3836966789Z', + u'type_instance': u'user', u'type': u'cpu', + }, { + u'value': 193801, u'instance': u'0', u'host': u'myhostname', + u'time': u'2018-12-19T14:11:45.383716296Z', + u'type_instance': u'system', u'type': u'cpu', + }], + [{ + u'value': 3598453000, u'host': u'myhostname', + u'time': u'2018-12-19T14:11:25.383698038Z', + u'type_instance': u'0', u'type': u'cpufreq', + }, { + u'value': 3530250000, u'type_instance': u'0', u'host': u'myhostname', + u'time': u'2018-12-19T14:11:35.383712594Z', u'type': u'cpufreq', + }, { + u'value': 3600281000, u'type_instance': u'0', u'host': u'myhostname', + u'time': u'2018-12-19T14:11:45.383696624Z', u'type': u'cpufreq', + }], + ) + + def ret_vals(vals): + for x in vals: + yield x + while True: + yield [] + + mock_query.side_effect = ret_vals(influx_return_values) + + BARO_EXPECTED_METRICS = { + 'Timestamp': [ + '14:11:25.3836', '14:11:25.3837', + '14:11:35.3836', '14:11:35.3837', + '14:11:45.3836', '14:11:45.3837'], + 'myhostname.cpu_value.cpu.user.0': { + '14:11:25.3836': 324050, + '14:11:35.3836': 324051, + '14:11:45.3836': 324054, + }, + 'myhostname.cpu_value.cpu.system.0': { + '14:11:25.3837': 193798, + '14:11:35.3837': 193800, + '14:11:45.3837': 193801, + }, + 'myhostname.cpufreq_value.cpufreq.0': { + '14:11:25.3836': 3598453000, + '14:11:35.3837': 3530250000, + '14:11:45.3836': 3600281000, + } + } + self.assertEqual( + BARO_EXPECTED_METRICS, + self.rep._get_baro_metrics() + ) + + def test__get_timestamps(self): + + metrics = MORE_DB_METRICS + self.assertEqual( + MORE_TIMESTAMP, + self.rep._get_timestamps(metrics) + ) + + def test__format_datasets(self): + metric_name = "free.memory0.used" + metrics = [{ + u'free.memory1.free': u'1958664', + u'free.memory0.used': u'9789560', + }, { + u'free.memory1.free': u'1958228', + u'free.memory0.used': u'9789790', + }, { + u'free.memory1.free': u'1956156', + u'free.memory0.used': u'9791092', + }, { + u'free.memory1.free': u'1956280', + u'free.memory0.used': u'9790796', + }] + self.assertEqual( + [9789560, 9789790, 9791092, 9790796,], + self.rep._format_datasets(metric_name, metrics) + ) + + def test__format_datasets_val_none(self): + metric_name = "free.memory0.used" + metrics = [{ + u'free.memory1.free': u'1958664', + u'free.memory0.used': 9876543109876543210, + }, { + u'free.memory1.free': u'1958228', + }, { + u'free.memory1.free': u'1956156', + u'free.memory0.used': u'9791092', + }, { + u'free.memory1.free': u'1956280', + u'free.memory0.used': u'9790796', + }] + + exp0 = 9876543109876543210 if six.PY3 else 9.876543109876543e+18 + self.assertEqual( + [exp0, None, 9791092, 9790796], + self.rep._format_datasets(metric_name, metrics) + ) + + def test__format_datasets_val_incompatible(self): + metric_name = "free.memory0.used" + metrics = [{ + u'free.memory0.used': "some incompatible value", + }, { + }] + self.assertEqual( + [None, None], + self.rep._format_datasets(metric_name, metrics) + ) + + def test__combine_times(self): + yard_times = [ + '00:00:00.000000', + '00:00:01.000000', + '00:00:02.000000', + '00:00:06.000000', + '00:00:08.000000', + '00:00:09.000000', + ] + baro_times = [ + '00:00:01.000000', + '00:00:03.000000', + '00:00:04.000000', + '00:00:05.000000', + '00:00:07.000000', + '00:00:10.000000', + ] + expected_combo = [ + '00:00:00.000000', + '00:00:01.000000', + '00:00:02.000000', + '00:00:03.000000', + '00:00:04.000000', + '00:00:05.000000', + '00:00:06.000000', + '00:00:07.000000', + '00:00:08.000000', + '00:00:09.000000', + '00:00:10.000000', + ] + + actual_combo = self.rep._combine_times(yard_times, baro_times) + self.assertEqual(len(expected_combo), len(actual_combo)) + + self.assertEqual( + expected_combo, + actual_combo, + ) + + def test__combine_times_2(self): + time1 = ['14:11:25.383698', '14:11:25.383712', '14:11:35.383696',] + time2 = [ + '16:20:14.568075', '16:20:24.575083', + '16:20:34.580989', '16:20:44.586801', ] + time_exp = [ + '14:11:25.383698', '14:11:25.383712', '14:11:35.383696', + '16:20:14.568075', '16:20:24.575083', '16:20:34.580989', + '16:20:44.586801', + ] + self.assertEqual(time_exp, self.rep._combine_times(time1, time2)) + + def test__combine_metrics(self): + BARO_METRICS = { + 'myhostname.cpu_value.cpu.user.0': { + '14:11:25.3836': 324050, '14:11:35.3836': 324051, + '14:11:45.3836': 324054, + }, + 'myhostname.cpu_value.cpu.system.0': { + '14:11:25.3837': 193798, '14:11:35.3837': 193800, + '14:11:45.3837': 193801, + } + } + BARO_TIMES = [ + '14:11:25.3836', '14:11:25.3837', '14:11:35.3836', + '14:11:35.3837', '14:11:45.3836', '14:11:45.3837', + ] + YARD_METRICS = { + 'free.memory9.free': { + '16:20:14.5680': 1958244, '16:20:24.5750': 1955964, + '16:20:34.5809': 1956040, '16:20:44.5868': 1956428, + }, + 'free.memory7.used': { + '16:20:14.5680': 9789068, '16:20:24.5750': 9791284, + '16:20:34.5809': 9791228, '16:20:44.5868': 9790692, + }, + 'free.memory2.total':{ + '16:20:14.5680': 32671288, '16:20:24.5750': 32671288, + '16:20:34.5809': 32671288, '16:20:44.5868': 32671288, + }, + 'free.memory7.free': { + '16:20:14.5680': 1958368, '16:20:24.5750': 1956104, + '16:20:34.5809': 1956040, '16:20:44.5868': 1956552, + }, + 'free.memory1.used': { + '16:20:14.5680': 9788872, '16:20:24.5750': 9789212, + '16:20:34.5809': 9791168, '16:20:44.5868': 9790996, + }, + } + YARD_TIMES = [ + '16:20:14.5680', '16:20:24.5750', + '16:20:34.5809', '16:20:44.5868', + ] + + expected_output = { + 'myhostname.cpu_value.cpu.user.0': [{ + 'x': '14:11:25.3836', 'y': 324050, }, { + 'x': '14:11:35.3836', 'y': 324051, }, { + 'x': '14:11:45.3836', 'y': 324054, }], + 'myhostname.cpu_value.cpu.system.0' : [{ + 'x': '14:11:25.3837', 'y': 193798, }, { + 'x': '14:11:35.3837', 'y': 193800, }, { + 'x': '14:11:45.3837', 'y': 193801, }], + 'free.memory9.free': [{ + 'x': '16:20:14.5680', 'y': 1958244, }, { + 'x': '16:20:24.5750', 'y': 1955964, }, { + 'x': '16:20:34.5809', 'y': 1956040, }, { + 'x': '16:20:44.5868', 'y': 1956428, }], + 'free.memory7.used': [{ + 'x': '16:20:14.5680', 'y': 9789068, }, { + 'x': '16:20:24.5750', 'y': 9791284, }, { + 'x': '16:20:34.5809', 'y': 9791228, }, { + 'x': '16:20:44.5868', 'y': 9790692, }], + 'free.memory2.total': [{ + 'x': '16:20:14.5680', 'y': 32671288, }, { + 'x': '16:20:24.5750', 'y': 32671288, }, { + 'x': '16:20:34.5809', 'y': 32671288, }, { + 'x': '16:20:44.5868', 'y': 32671288, }], + 'free.memory7.free': [{ + 'x': '16:20:14.5680', 'y': 1958368, }, { + 'x': '16:20:24.5750', 'y': 1956104, }, { + 'x': '16:20:34.5809', 'y': 1956040, }, { + 'x': '16:20:44.5868', 'y': 1956552, }], + 'free.memory1.used': [{ + 'x': '16:20:14.5680', 'y': 9788872, }, { + 'x': '16:20:24.5750', 'y': 9789212, }, { + 'x': '16:20:34.5809', 'y': 9791168, }, { + 'x': '16:20:44.5868', 'y': 9790996, }], + } + + actual_output, _, _ = self.rep._combine_metrics( + BARO_METRICS, BARO_TIMES, YARD_METRICS, YARD_TIMES + ) + self.assertEquals( + sorted(expected_output.keys()), + sorted(actual_output.keys()) + ) + + self.assertEquals( + expected_output, + actual_output, + ) + + @mock.patch.object(report.Report, '_get_metrics') + @mock.patch.object(report.Report, '_get_fieldkeys') + def test__generate_common(self, mock_keys, mock_metrics): + mock_metrics.return_value = MORE_DB_METRICS + mock_keys.return_value = MORE_DB_FIELDKEYS + datasets, table_vals = self.rep._generate_common(self.param) + self.assertEqual(MORE_EXPECTED_DATASETS, datasets) + self.assertEqual(MORE_EXPECTED_TABLE_VALS, table_vals) + + @mock.patch.object(report.Report, '_get_metrics') @mock.patch.object(report.Report, '_get_fieldkeys') @mock.patch.object(report.Report, '_validate') - def test_generate_success(self, mock_valid, mock_keys, mock_tasks): - mock_tasks.return_value = FAKE_DB_TASK - mock_keys.return_value = FAKE_DB_FIELDKEYS + def test_generate(self, mock_valid, mock_keys, mock_metrics): + mock_metrics.return_value = GOOD_DB_METRICS + mock_keys.return_value = GOOD_DB_FIELDKEYS self.rep.generate(self.param) - mock_valid.assert_called_once_with(FAKE_YAML_NAME, FAKE_TASK_ID) - mock_tasks.assert_called_once_with() + mock_valid.assert_called_once_with(GOOD_YAML_NAME, GOOD_TASK_ID) + mock_metrics.assert_called_once_with() mock_keys.assert_called_once_with() + self.assertEqual(GOOD_TIMESTAMP, self.rep.Timestamp) - # pylint: disable=deprecated-method - def test_invalid_yaml_name(self): - self.assertRaisesRegexp(ValueError, "yaml*", self.rep._validate, - 'F@KE_NAME', FAKE_TASK_ID) + @mock.patch.object(report.Report, '_get_baro_metrics') + @mock.patch.object(report.Report, '_get_metrics') + @mock.patch.object(report.Report, '_get_fieldkeys') + @mock.patch.object(report.Report, '_validate') + def test_generate_nsb( + self, mock_valid, mock_keys, mock_metrics, mock_baro_metrics): - # pylint: disable=deprecated-method - def test_invalid_task_id(self): - self.assertRaisesRegexp(ValueError, "task*", self.rep._validate, - FAKE_YAML_NAME, DUMMY_TASK_ID) + mock_metrics.return_value = GOOD_DB_METRICS + mock_keys.return_value = GOOD_DB_FIELDKEYS + BARO_METRICS = { + # TODO: is timestamp needed here? + 'Timestamp': [ + '14:11:25.383698', '14:11:25.383712', '14:11:35.383696', + '14:11:35.383713', '14:11:45.383700', '14:11:45.383716'], + 'myhostname.cpu_value.cpu.user.0': { + '14:11:25.383698': 324050, + '14:11:35.383696': 324051, + '14:11:45.383700': 324054, + }, + 'myhostname.cpu_value.cpu.system.0': { + '14:11:25.383712': 193798, + '14:11:35.383713': 193800, + '14:11:45.383716': 193801, + } + } + mock_baro_metrics.return_value = BARO_METRICS - @mock.patch('api.utils.influx.query') - def test_task_not_found(self, mock_query): - mock_query.return_value = [] - self.rep.yaml_name = FAKE_YAML_NAME - self.rep.task_id = FAKE_TASK_ID - # pylint: disable=deprecated-method - self.assertRaisesRegexp(KeyError, "Task ID", self.rep._get_fieldkeys) - self.assertRaisesRegexp(KeyError, "Task ID", self.rep._get_tasks) - # pylint: enable=deprecated-method + self.rep.generate_nsb(self.param) + mock_valid.assert_called_once_with(GOOD_YAML_NAME, GOOD_TASK_ID) + mock_metrics.assert_called_once_with() + mock_keys.assert_called_once_with() + self.assertEqual(GOOD_TIMESTAMP, self.rep.Timestamp) diff --git a/yardstick/tests/unit/benchmark/core/test_task.py b/yardstick/tests/unit/benchmark/core/test_task.py index e1414c2ae..0f09b3e59 100644 --- a/yardstick/tests/unit/benchmark/core/test_task.py +++ b/yardstick/tests/unit/benchmark/core/test_task.py @@ -18,6 +18,7 @@ import six from six.moves import builtins import unittest import uuid +import collections from yardstick.benchmark.contexts import base from yardstick.benchmark.contexts import dummy @@ -30,6 +31,14 @@ from yardstick.common import utils class TaskTestCase(unittest.TestCase): + def setUp(self): + self._mock_log = mock.patch.object(task, 'LOG') + self.mock_log = self._mock_log.start() + self.addCleanup(self._stop_mock) + + def _stop_mock(self): + self._mock_log.stop() + @mock.patch.object(base, 'Context') def test_parse_nodes_with_context_same_context(self, mock_context): scenario_cfg = { @@ -487,6 +496,42 @@ key2: self.parser._change_node_names(scenario, [my_context]) self.assertIsNone(scenario['options']['server_name']) + def test__change_node_names_target_map(self): + ctx_attrs = { + 'name': 'demo', + 'task_id': '1234567890' + } + my_context = dummy.DummyContext() + self.addCleanup(self._remove_contexts) + my_context.init(ctx_attrs) + scenario = copy.deepcopy(self.scenario) + scenario['nodes'] = { + 'tg__0': { + 'name': 'tg__0.demo', + 'public_ip_attr': "1.1.1.1", + }, + 'vnf__0': { + 'name': 'vnf__0.demo', + 'public_ip_attr': "2.2.2.2", + } + } + self.parser._change_node_names(scenario, [my_context]) + for target in scenario['nodes'].values(): + self.assertIsInstance(target, collections.Mapping) + + def test__change_node_names_not_target_map(self): + ctx_attrs = { + 'name': 'demo', + 'task_id': '1234567890' + } + my_context = dummy.DummyContext() + self.addCleanup(self._remove_contexts) + my_context.init(ctx_attrs) + scenario = copy.deepcopy(self.scenario) + self.parser._change_node_names(scenario, [my_context]) + for target in scenario['nodes'].values(): + self.assertNotIsInstance(target, collections.Mapping) + def test__parse_tasks(self): task_obj = task.Task() _uuid = uuid.uuid4() diff --git a/yardstick/tests/unit/benchmark/runner/test_arithmetic.py b/yardstick/tests/unit/benchmark/runner/test_arithmetic.py index 7b1e1e976..35d935cd5 100644 --- a/yardstick/tests/unit/benchmark/runner/test_arithmetic.py +++ b/yardstick/tests/unit/benchmark/runner/test_arithmetic.py @@ -14,16 +14,26 @@ import os import time from yardstick.benchmark.runners import arithmetic +from yardstick.common import exceptions as y_exc class ArithmeticRunnerTest(unittest.TestCase): class MyMethod(object): - def __init__(self): + SLA_VALIDATION_ERROR_SIDE_EFFECT = 1 + BROAD_EXCEPTION_SIDE_EFFECT = 2 + + def __init__(self, side_effect=0): self.count = 101 + self.side_effect = side_effect def __call__(self, data): self.count += 1 data['my_key'] = self.count + if self.side_effect == self.SLA_VALIDATION_ERROR_SIDE_EFFECT: + raise y_exc.SLAValidationError(case_name='My Case', + error_msg='my error message') + elif self.side_effect == self.BROAD_EXCEPTION_SIDE_EFFECT: + raise y_exc.YardstickException return self.count def setUp(self): @@ -218,3 +228,219 @@ class ArithmeticRunnerTest(unittest.TestCase): self.assertEqual(result['sequence'], count) self.assertGreater(result['timestamp'], timestamp) timestamp = result['timestamp'] + + def test__worker_process_except_sla_validation_error_no_sla_cfg(self): + self.benchmark.my_method = mock.Mock( + side_effect=y_exc.SLAValidationError) + + arithmetic._worker_process(mock.Mock(), self.benchmark_cls, + 'my_method', self.scenario_cfg, {}, + multiprocessing.Event(), mock.Mock()) + + self._assert_defaults__worker_process_run_setup_and_teardown() + self.assertEqual(self.benchmark.my_method.call_count, 8) + self.assertDictEqual(self.scenario_cfg['options'], + {'stride': 128, 'size': 2000}) + + def test__worker_process_output_on_sla_validation_error_no_sla_cfg(self): + self.benchmark.my_method = self.MyMethod( + side_effect=self.MyMethod.SLA_VALIDATION_ERROR_SIDE_EFFECT) + + queue = multiprocessing.Queue() + output_queue = multiprocessing.Queue() + timestamp = time.time() + arithmetic._worker_process(queue, self.benchmark_cls, 'my_method', + self.scenario_cfg, {}, + multiprocessing.Event(), output_queue) + time.sleep(0.01) + + self._assert_defaults__worker_process_run_setup_and_teardown() + self.assertEqual(self.benchmark.my_method.count, 109) + self.assertDictEqual(self.scenario_cfg['options'], + {'stride': 128, 'size': 2000}) + count = 0 + while not queue.empty(): + count += 1 + result = queue.get() + self.assertEqual(result['errors'], '') + self.assertEqual(result['data'], {'my_key': count + 101}) + self.assertEqual(result['sequence'], count) + self.assertGreater(result['timestamp'], timestamp) + timestamp = result['timestamp'] + self.assertEqual(count, 8) + self.assertTrue(output_queue.empty()) + + def test__worker_process_except_sla_validation_error_sla_cfg_monitor(self): + self.scenario_cfg['sla'] = {'action': 'monitor'} + self.benchmark.my_method = mock.Mock( + side_effect=y_exc.SLAValidationError) + + arithmetic._worker_process(mock.Mock(), self.benchmark_cls, + 'my_method', self.scenario_cfg, {}, + multiprocessing.Event(), mock.Mock()) + + self._assert_defaults__worker_process_run_setup_and_teardown() + self.assertEqual(self.benchmark.my_method.call_count, 8) + self.assertDictEqual(self.scenario_cfg['options'], + {'stride': 128, 'size': 2000}) + + def test__worker_process_output_sla_validation_error_sla_cfg_monitor(self): + self.scenario_cfg['sla'] = {'action': 'monitor'} + self.benchmark.my_method = self.MyMethod( + side_effect=self.MyMethod.SLA_VALIDATION_ERROR_SIDE_EFFECT) + + queue = multiprocessing.Queue() + output_queue = multiprocessing.Queue() + timestamp = time.time() + arithmetic._worker_process(queue, self.benchmark_cls, 'my_method', + self.scenario_cfg, {}, + multiprocessing.Event(), output_queue) + time.sleep(0.01) + + self._assert_defaults__worker_process_run_setup_and_teardown() + self.assertEqual(self.benchmark.my_method.count, 109) + self.assertDictEqual(self.scenario_cfg['options'], + {'stride': 128, 'size': 2000}) + count = 0 + while not queue.empty(): + count += 1 + result = queue.get() + self.assertEqual(result['errors'], + ('My Case SLA validation failed. ' + 'Error: my error message',)) + self.assertEqual(result['data'], {'my_key': count + 101}) + self.assertEqual(result['sequence'], count) + self.assertGreater(result['timestamp'], timestamp) + timestamp = result['timestamp'] + self.assertEqual(count, 8) + self.assertTrue(output_queue.empty()) + + def test__worker_process_raise_sla_validation_error_sla_cfg_assert(self): + self.scenario_cfg['sla'] = {'action': 'assert'} + self.benchmark.my_method = mock.Mock( + side_effect=y_exc.SLAValidationError) + + with self.assertRaises(y_exc.SLAValidationError): + arithmetic._worker_process(mock.Mock(), self.benchmark_cls, + 'my_method', self.scenario_cfg, {}, + multiprocessing.Event(), mock.Mock()) + self.benchmark_cls.assert_called_once_with(self.scenario_cfg, {}) + self.benchmark.my_method.assert_called_once() + self.benchmark.setup.assert_called_once() + self.benchmark.teardown.assert_not_called() + + def test__worker_process_output_sla_validation_error_sla_cfg_assert(self): + self.scenario_cfg['sla'] = {'action': 'assert'} + self.benchmark.my_method = self.MyMethod( + side_effect=self.MyMethod.SLA_VALIDATION_ERROR_SIDE_EFFECT) + + queue = multiprocessing.Queue() + output_queue = multiprocessing.Queue() + with self.assertRaisesRegexp( + y_exc.SLAValidationError, + 'My Case SLA validation failed. Error: my error message'): + arithmetic._worker_process(queue, self.benchmark_cls, 'my_method', + self.scenario_cfg, {}, + multiprocessing.Event(), output_queue) + time.sleep(0.01) + + self.benchmark_cls.assert_called_once_with(self.scenario_cfg, {}) + self.benchmark.setup.assert_called_once() + self.assertEqual(self.benchmark.my_method.count, 102) + self.benchmark.teardown.assert_not_called() + self.assertTrue(queue.empty()) + self.assertTrue(output_queue.empty()) + + def test__worker_process_broad_exception_no_sla_cfg_early_exit(self): + self.benchmark.my_method = mock.Mock( + side_effect=y_exc.YardstickException) + + arithmetic._worker_process(mock.Mock(), self.benchmark_cls, + 'my_method', self.scenario_cfg, {}, + multiprocessing.Event(), mock.Mock()) + + self._assert_defaults__worker_process_run_setup_and_teardown() + self.benchmark.my_method.assert_called_once() + self.assertDictEqual(self.scenario_cfg['options'], + {'stride': 64, 'size': 500}) + + def test__worker_process_output_on_broad_exception_no_sla_cfg(self): + self.benchmark.my_method = self.MyMethod( + side_effect=self.MyMethod.BROAD_EXCEPTION_SIDE_EFFECT) + + queue = multiprocessing.Queue() + output_queue = multiprocessing.Queue() + timestamp = time.time() + arithmetic._worker_process(queue, self.benchmark_cls, 'my_method', + self.scenario_cfg, {}, + multiprocessing.Event(), output_queue) + time.sleep(0.01) + + self._assert_defaults__worker_process_run_setup_and_teardown() + self.assertEqual(self.benchmark.my_method.count, 102) + self.assertDictEqual(self.scenario_cfg['options'], + {'stride': 64, 'size': 500}) + self.assertEqual(queue.qsize(), 1) + result = queue.get() + self.assertGreater(result['timestamp'], timestamp) + self.assertEqual(result['data'], {'my_key': 102}) + self.assertRegexpMatches( + result['errors'], + 'YardstickException: An unknown exception occurred.') + self.assertEqual(result['sequence'], 1) + self.assertTrue(output_queue.empty()) + + def test__worker_process_broad_exception_sla_cfg_not_none(self): + self.scenario_cfg['sla'] = {'action': 'some action'} + self.benchmark.my_method = mock.Mock( + side_effect=y_exc.YardstickException) + + arithmetic._worker_process(mock.Mock(), self.benchmark_cls, + 'my_method', self.scenario_cfg, {}, + multiprocessing.Event(), mock.Mock()) + + self._assert_defaults__worker_process_run_setup_and_teardown() + self.assertEqual(self.benchmark.my_method.call_count, 8) + self.assertDictEqual(self.scenario_cfg['options'], + {'stride': 128, 'size': 2000}) + + def test__worker_process_output_on_broad_exception_sla_cfg_not_none(self): + self.scenario_cfg['sla'] = {'action': 'some action'} + self.benchmark.my_method = self.MyMethod( + side_effect=self.MyMethod.BROAD_EXCEPTION_SIDE_EFFECT) + + queue = multiprocessing.Queue() + output_queue = multiprocessing.Queue() + timestamp = time.time() + arithmetic._worker_process(queue, self.benchmark_cls, 'my_method', + self.scenario_cfg, {}, + multiprocessing.Event(), output_queue) + time.sleep(0.01) + + self._assert_defaults__worker_process_run_setup_and_teardown() + self.assertEqual(self.benchmark.my_method.count, 109) + self.assertDictEqual(self.scenario_cfg['options'], + {'stride': 128, 'size': 2000}) + self.assertTrue(output_queue.empty()) + count = 0 + while not queue.empty(): + count += 1 + result = queue.get() + self.assertGreater(result['timestamp'], timestamp) + self.assertEqual(result['data'], {'my_key': count + 101}) + self.assertRegexpMatches( + result['errors'], + 'YardstickException: An unknown exception occurred.') + self.assertEqual(result['sequence'], count) + + def test__worker_process_benchmark_teardown_on_broad_exception(self): + self.benchmark.teardown = mock.Mock( + side_effect=y_exc.YardstickException) + + with self.assertRaises(SystemExit) as raised: + arithmetic._worker_process(mock.Mock(), self.benchmark_cls, + 'my_method', self.scenario_cfg, {}, + multiprocessing.Event(), mock.Mock()) + self.assertEqual(raised.exception.code, 1) + self._assert_defaults__worker_process_run_setup_and_teardown() + self.assertEqual(self.benchmark.my_method.call_count, 8) diff --git a/yardstick/tests/unit/benchmark/runner/test_base.py b/yardstick/tests/unit/benchmark/runner/test_base.py index 49ba1efe4..07d6f1843 100644 --- a/yardstick/tests/unit/benchmark/runner/test_base.py +++ b/yardstick/tests/unit/benchmark/runner/test_base.py @@ -8,17 +8,12 @@ ############################################################################## import time -import uuid import mock -from oslo_config import cfg -import oslo_messaging import subprocess from yardstick.benchmark.runners import base as runner_base from yardstick.benchmark.runners import iteration -from yardstick.common import messaging -from yardstick.common.messaging import payloads from yardstick.tests.unit import base as ut_base @@ -48,6 +43,29 @@ class ActionTestCase(ut_base.BaseUnitTestCase): runner_base._periodic_action(0, 'echo', mock.Mock()) +class ScenarioOutputTestCase(ut_base.BaseUnitTestCase): + + def setUp(self): + self.output_queue = mock.Mock() + self.scenario_output = runner_base.ScenarioOutput(self.output_queue, + sequence=1) + + @mock.patch.object(time, 'time') + def test_push(self, mock_time): + mock_time.return_value = 2 + data = {"value1": 1} + self.scenario_output.push(data) + self.output_queue.put.assert_called_once_with({'timestamp': 2, + 'sequence': 1, + 'data': data}, True, 10) + + def test_push_no_timestamp(self): + self.scenario_output["value1"] = 1 + self.scenario_output.push(None, False) + self.output_queue.put.assert_called_once_with({'sequence': 1, + 'value1': 1}, True, 10) + + class RunnerTestCase(ut_base.BaseUnitTestCase): def setUp(self): @@ -99,54 +117,3 @@ class RunnerTestCase(ut_base.BaseUnitTestCase): with self.assertRaises(NotImplementedError): runner._run_benchmark(mock.Mock(), mock.Mock(), mock.Mock(), mock.Mock()) - - -class RunnerProducerTestCase(ut_base.BaseUnitTestCase): - - @mock.patch.object(oslo_messaging, 'Target', return_value='rpc_target') - @mock.patch.object(oslo_messaging, 'RPCClient') - @mock.patch.object(oslo_messaging, 'get_rpc_transport', - return_value='rpc_transport') - @mock.patch.object(cfg, 'CONF') - def test__init(self, mock_config, mock_transport, mock_rpcclient, - mock_target): - _id = uuid.uuid1().int - runner_producer = runner_base.RunnerProducer(_id) - mock_transport.assert_called_once_with( - mock_config, url='rabbit://yardstick:yardstick@localhost:5672/') - mock_target.assert_called_once_with(topic=messaging.TOPIC_RUNNER, - fanout=True, - server=messaging.SERVER) - mock_rpcclient.assert_called_once_with('rpc_transport', 'rpc_target') - self.assertEqual(_id, runner_producer._id) - self.assertEqual(messaging.TOPIC_RUNNER, runner_producer._topic) - - @mock.patch.object(oslo_messaging, 'Target', return_value='rpc_target') - @mock.patch.object(oslo_messaging, 'RPCClient') - @mock.patch.object(oslo_messaging, 'get_rpc_transport', - return_value='rpc_transport') - @mock.patch.object(payloads, 'RunnerPayload', return_value='runner_pload') - def test_start_iteration(self, mock_runner_payload, *args): - runner_producer = runner_base.RunnerProducer(uuid.uuid1().int) - with mock.patch.object(runner_producer, - 'send_message') as mock_message: - runner_producer.start_iteration(version=10) - - mock_message.assert_called_once_with( - messaging.RUNNER_METHOD_START_ITERATION, 'runner_pload') - mock_runner_payload.assert_called_once_with(version=10, data={}) - - @mock.patch.object(oslo_messaging, 'Target', return_value='rpc_target') - @mock.patch.object(oslo_messaging, 'RPCClient') - @mock.patch.object(oslo_messaging, 'get_rpc_transport', - return_value='rpc_transport') - @mock.patch.object(payloads, 'RunnerPayload', return_value='runner_pload') - def test_stop_iteration(self, mock_runner_payload, *args): - runner_producer = runner_base.RunnerProducer(uuid.uuid1().int) - with mock.patch.object(runner_producer, - 'send_message') as mock_message: - runner_producer.stop_iteration(version=15) - - mock_message.assert_called_once_with( - messaging.RUNNER_METHOD_STOP_ITERATION, 'runner_pload') - mock_runner_payload.assert_called_once_with(version=15, data={}) diff --git a/yardstick/tests/unit/benchmark/runner/test_duration.py b/yardstick/tests/unit/benchmark/runner/test_duration.py index d4801ef2c..fa47e96bf 100644 --- a/yardstick/tests/unit/benchmark/runner/test_duration.py +++ b/yardstick/tests/unit/benchmark/runner/test_duration.py @@ -97,9 +97,9 @@ class DurationRunnerTest(unittest.TestCase): multiprocessing.Event(), mock.Mock()) self._assert_defaults__worker_run_setup_and_teardown() - self.assertGreater(self.benchmark.pre_run_wait_time.call_count, 2) - self.assertGreater(self.benchmark.my_method.call_count, 2) - self.assertGreater(self.benchmark.post_run_wait_time.call_count, 2) + self.assertGreater(self.benchmark.pre_run_wait_time.call_count, 0) + self.assertGreater(self.benchmark.my_method.call_count, 0) + self.assertGreater(self.benchmark.post_run_wait_time.call_count, 0) def test__worker_process_called_without_cfg(self): scenario_cfg = {'runner': {}} @@ -140,9 +140,9 @@ class DurationRunnerTest(unittest.TestCase): time.sleep(0.1) self._assert_defaults__worker_run_setup_and_teardown() - self.assertGreater(self.benchmark.pre_run_wait_time.call_count, 2) - self.assertGreater(self.benchmark.my_method.count, 103) - self.assertGreater(self.benchmark.post_run_wait_time.call_count, 2) + self.assertGreater(self.benchmark.pre_run_wait_time.call_count, 0) + self.assertGreater(self.benchmark.my_method.count, 1) + self.assertGreater(self.benchmark.post_run_wait_time.call_count, 0) count = 101 while not output_queue.empty(): @@ -181,9 +181,9 @@ class DurationRunnerTest(unittest.TestCase): time.sleep(0.1) self._assert_defaults__worker_run_setup_and_teardown() - self.assertGreater(self.benchmark.pre_run_wait_time.call_count, 2) - self.assertGreater(self.benchmark.my_method.count, 103) - self.assertGreater(self.benchmark.post_run_wait_time.call_count, 2) + self.assertGreater(self.benchmark.pre_run_wait_time.call_count, 0) + self.assertGreater(self.benchmark.my_method.count, 1) + self.assertGreater(self.benchmark.post_run_wait_time.call_count, 0) count = 0 while not queue.empty(): diff --git a/yardstick/tests/unit/benchmark/runner/test_iteration.py b/yardstick/tests/unit/benchmark/runner/test_iteration.py new file mode 100644 index 000000000..783b236f5 --- /dev/null +++ b/yardstick/tests/unit/benchmark/runner/test_iteration.py @@ -0,0 +1,45 @@ +############################################################################## +# Copyright (c) 2018 Huawei Technologies Co.,Ltd and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +import mock +import unittest +import multiprocessing +from yardstick.benchmark.runners import iteration +from yardstick.common import exceptions as y_exc + + +class IterationRunnerTest(unittest.TestCase): + def setUp(self): + self.scenario_cfg = { + 'runner': {'interval': 0, "duration": 0}, + 'type': 'some_type' + } + + self.benchmark = mock.Mock() + self.benchmark_cls = mock.Mock(return_value=self.benchmark) + + def _assert_defaults__worker_run_setup_and_teardown(self): + self.benchmark_cls.assert_called_once_with(self.scenario_cfg, {}) + self.benchmark.setup.assert_called_once() + + def _assert_defaults__worker_run_one_iteration(self): + self.benchmark.pre_run_wait_time.assert_called_once_with(0) + self.benchmark.my_method.assert_called_once_with({}) + + def test__worker_process_broad_exception(self): + self.benchmark.my_method = mock.Mock( + side_effect=y_exc.YardstickException) + + with self.assertRaises(Exception): + iteration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method', + self.scenario_cfg, {}, + multiprocessing.Event(), mock.Mock()) + + self._assert_defaults__worker_run_one_iteration() + self._assert_defaults__worker_run_setup_and_teardown() diff --git a/yardstick/tests/unit/benchmark/runner/test_iteration_ipc.py b/yardstick/tests/unit/benchmark/runner/test_iteration_ipc.py deleted file mode 100644 index 10d14a8a0..000000000 --- a/yardstick/tests/unit/benchmark/runner/test_iteration_ipc.py +++ /dev/null @@ -1,136 +0,0 @@ -# Copyright (c) 2018 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import multiprocessing -import time -import os -import uuid - -import mock - -from yardstick.benchmark.runners import iteration_ipc -from yardstick.common import messaging -from yardstick.common.messaging import payloads -from yardstick.tests.unit import base as ut_base - - -class RunnerIterationIPCEndpointTestCase(ut_base.BaseUnitTestCase): - - def setUp(self): - self._id = uuid.uuid1().int - self._ctx_ids = [uuid.uuid1().int, uuid.uuid1().int] - self._queue = multiprocessing.Queue() - self.runner = iteration_ipc.RunnerIterationIPCEndpoint( - self._id, self._ctx_ids, self._queue) - self._kwargs = {'version': 1, 'iteration': 10, 'kpi': {}} - self._pload_dict = payloads.TrafficGeneratorPayload.dict_to_obj( - self._kwargs).obj_to_dict() - - def test_tg_method_started(self): - self._queue.empty() - ctxt = {'id': self._ctx_ids[0]} - self.runner.tg_method_started(ctxt, **self._kwargs) - time.sleep(0.2) - - output = [] - while not self._queue.empty(): - output.append(self._queue.get(True, 1)) - - self.assertEqual(1, len(output)) - self.assertEqual(self._ctx_ids[0], output[0]['id']) - self.assertEqual(messaging.TG_METHOD_STARTED, output[0]['action']) - self.assertEqual(self._pload_dict, output[0]['payload'].obj_to_dict()) - - def test_tg_method_finished(self): - self._queue.empty() - ctxt = {'id': self._ctx_ids[0]} - self.runner.tg_method_finished(ctxt, **self._kwargs) - time.sleep(0.2) - - output = [] - while not self._queue.empty(): - output.append(self._queue.get(True, 1)) - - self.assertEqual(1, len(output)) - self.assertEqual(self._ctx_ids[0], output[0]['id']) - self.assertEqual(messaging.TG_METHOD_FINISHED, output[0]['action']) - self.assertEqual(self._pload_dict, output[0]['payload'].obj_to_dict()) - - def test_tg_method_iteration(self): - self._queue.empty() - ctxt = {'id': self._ctx_ids[0]} - self.runner.tg_method_iteration(ctxt, **self._kwargs) - time.sleep(0.2) - - output = [] - while not self._queue.empty(): - output.append(self._queue.get(True, 1)) - - self.assertEqual(1, len(output)) - self.assertEqual(self._ctx_ids[0], output[0]['id']) - self.assertEqual(messaging.TG_METHOD_ITERATION, output[0]['action']) - self.assertEqual(self._pload_dict, output[0]['payload'].obj_to_dict()) - - -class RunnerIterationIPCConsumerTestCase(ut_base.BaseUnitTestCase): - - def setUp(self): - self._id = uuid.uuid1().int - self._ctx_ids = [uuid.uuid1().int, uuid.uuid1().int] - self.consumer = iteration_ipc.RunnerIterationIPCConsumer( - self._id, self._ctx_ids) - self.consumer._queue = mock.Mock() - - def test__init(self): - self.assertEqual({self._ctx_ids[0]: [], self._ctx_ids[1]: []}, - self.consumer._kpi_per_id) - - def test_is_all_kpis_received_in_iteration(self): - payload = payloads.TrafficGeneratorPayload( - version=1, iteration=1, kpi={}) - msg1 = {'action': messaging.TG_METHOD_ITERATION, - 'id': self._ctx_ids[0], 'payload': payload} - msg2 = {'action': messaging.TG_METHOD_ITERATION, - 'id': self._ctx_ids[1], 'payload': payload} - self.consumer.iteration_index = 1 - - self.consumer._queue.empty.side_effect = [False, True] - self.consumer._queue.get.return_value = msg1 - self.assertFalse(self.consumer.is_all_kpis_received_in_iteration()) - - self.consumer._queue.empty.side_effect = [False, True] - self.consumer._queue.get.return_value = msg2 - self.assertTrue(self.consumer.is_all_kpis_received_in_iteration()) - - -class IterationIPCRunnerTestCase(ut_base.BaseUnitTestCase): - - @mock.patch.object(iteration_ipc, '_worker_process') - @mock.patch.object(os, 'getpid', return_value=12345678) - @mock.patch.object(multiprocessing, 'Process', return_value=mock.Mock()) - def test__run_benchmark(self, mock_process, mock_getpid, mock_worker): - method = 'method' - scenario_cfg = {'type': 'scenario_type'} - context_cfg = 'context_cfg' - name = '%s-%s-%s' % ('IterationIPC', 'scenario_type', 12345678) - runner = iteration_ipc.IterationIPCRunner(mock.ANY) - mock_getpid.reset_mock() - - runner._run_benchmark('class', method, scenario_cfg, context_cfg) - mock_process.assert_called_once_with( - name=name, - target=mock_worker, - args=(runner.result_queue, 'class', method, scenario_cfg, - context_cfg, runner.aborted, runner.output_queue)) - mock_getpid.assert_called_once() diff --git a/yardstick/tests/unit/benchmark/runner/test_proxduration.py b/yardstick/tests/unit/benchmark/runner/test_proxduration.py index 3299c5b05..056195fd3 100644 --- a/yardstick/tests/unit/benchmark/runner/test_proxduration.py +++ b/yardstick/tests/unit/benchmark/runner/test_proxduration.py @@ -97,7 +97,7 @@ class ProxDurationRunnerTest(unittest.TestCase): {}, multiprocessing.Event(), mock.Mock()) self._assert_defaults__worker_run_setup_and_teardown() - self.assertGreater(self.benchmark.my_method.call_count, 2) + self.assertGreater(self.benchmark.my_method.call_count, 0) def test__worker_process_called_without_cfg(self): scenario_cfg = {'runner': {}} diff --git a/yardstick/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py index 0f68753fd..35455a49c 100644 --- a/yardstick/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py +++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py @@ -7,10 +7,6 @@ # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## -# Unittest for -# yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal - -from __future__ import absolute_import import mock import unittest @@ -18,33 +14,44 @@ from yardstick.benchmark.scenarios.availability.attacker import \ attacker_baremetal -# pylint: disable=unused-argument -# disable this for now because I keep forgetting mock patch arg ordering +class ExecuteShellTestCase(unittest.TestCase): + def setUp(self): + self._mock_subprocess = mock.patch.object(attacker_baremetal, + 'subprocess') + self.mock_subprocess = self._mock_subprocess.start() -@mock.patch('yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal.subprocess') -class ExecuteShellTestCase(unittest.TestCase): + self.addCleanup(self._stop_mocks) - def test__fun_execute_shell_command_successful(self, mock_subprocess): - cmd = "env" - mock_subprocess.check_output.return_value = (0, 'unittest') - exitcode, _ = attacker_baremetal._execute_shell_command(cmd) + def _stop_mocks(self): + self._mock_subprocess.stop() + + def test__execute_shell_command_successful(self): + self.mock_subprocess.check_output.return_value = (0, 'unittest') + exitcode, _ = attacker_baremetal._execute_shell_command("env") self.assertEqual(exitcode, 0) - @mock.patch('yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal.LOG') - def test__fun_execute_shell_command_fail_cmd_exception(self, mock_log, mock_subprocess): - cmd = "env" - mock_subprocess.check_output.side_effect = RuntimeError - exitcode, _ = attacker_baremetal._execute_shell_command(cmd) + @mock.patch.object(attacker_baremetal, 'LOG') + def test__execute_shell_command_fail_cmd_exception(self, mock_log): + self.mock_subprocess.check_output.side_effect = RuntimeError + exitcode, _ = attacker_baremetal._execute_shell_command("env") self.assertEqual(exitcode, -1) mock_log.error.assert_called_once() -@mock.patch('yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal.subprocess') -@mock.patch('yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal.ssh') class AttackerBaremetalTestCase(unittest.TestCase): def setUp(self): + self._mock_ssh = mock.patch.object(attacker_baremetal, 'ssh') + self.mock_ssh = self._mock_ssh.start() + self._mock_subprocess = mock.patch.object(attacker_baremetal, + 'subprocess') + self.mock_subprocess = self._mock_subprocess.start() + self.addCleanup(self._stop_mocks) + + self.mock_ssh.SSH.from_node().execute.return_value = ( + 0, "running", '') + host = { "ipmi_ip": "10.20.0.5", "ipmi_user": "root", @@ -59,26 +66,26 @@ class AttackerBaremetalTestCase(unittest.TestCase): 'host': 'node1', } - def test__attacker_baremetal_all_successful(self, mock_ssh, mock_subprocess): - mock_ssh.SSH.from_node().execute.return_value = (0, "running", '') - ins = attacker_baremetal.BaremetalAttacker(self.attacker_cfg, - self.context) + self.ins = attacker_baremetal.BaremetalAttacker(self.attacker_cfg, + self.context) - ins.setup() - ins.inject_fault() - ins.recover() + def _stop_mocks(self): + self._mock_ssh.stop() + self._mock_subprocess.stop() - def test__attacker_baremetal_check_failuer(self, mock_ssh, mock_subprocess): - mock_ssh.SSH.from_node().execute.return_value = (0, "error check", '') - ins = attacker_baremetal.BaremetalAttacker(self.attacker_cfg, - self.context) - ins.setup() + def test__attacker_baremetal_all_successful(self): + self.ins.setup() + self.ins.inject_fault() + self.ins.recover() - def test__attacker_baremetal_recover_successful(self, mock_ssh, mock_subprocess): + def test__attacker_baremetal_check_failure(self): + self.mock_ssh.SSH.from_node().execute.return_value = ( + 0, "error check", '') + self.ins.setup() + def test__attacker_baremetal_recover_successful(self): self.attacker_cfg["jump_host"] = 'node1' self.context["node1"]["password"] = "123456" - mock_ssh.SSH.from_node().execute.return_value = (0, "running", '') ins = attacker_baremetal.BaremetalAttacker(self.attacker_cfg, self.context) diff --git a/yardstick/tests/unit/benchmark/scenarios/availability/test_monitor_multi.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_monitor_multi.py index e9c680257..dc3a4b99a 100644 --- a/yardstick/tests/unit/benchmark/scenarios/availability/test_monitor_multi.py +++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_monitor_multi.py @@ -63,3 +63,20 @@ class MultiMonitorServiceTestCase(unittest.TestCase): ins.start_monitor() ins.wait_monitor() ins.verify_SLA() + + def test__monitor_multi_no_sla(self, mock_open, mock_ssh): + monitor_cfg = { + 'monitor_type': 'general-monitor', + 'monitor_number': 3, + 'key': 'service-status', + 'monitor_key': 'service-status', + 'host': 'node1', + 'monitor_time': 0.1, + 'parameter': {'serviceName': 'haproxy'} + } + ins = monitor_multi.MultiMonitor( + monitor_cfg, self.context, {"nova-api": 10}) + mock_ssh.SSH.from_node().execute.return_value = (0, "running", '') + ins.start_monitor() + ins.wait_monitor() + self.assertTrue(ins.verify_SLA()) diff --git a/yardstick/tests/unit/benchmark/scenarios/availability/test_monitor_process.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_monitor_process.py index a6d2ca398..8c73bf221 100644 --- a/yardstick/tests/unit/benchmark/scenarios/availability/test_monitor_process.py +++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_monitor_process.py @@ -55,3 +55,19 @@ class MonitorProcessTestCase(unittest.TestCase): ins.monitor_func() ins._result = {"outage_time": 10} ins.verify_SLA() + + def test__monitor_process_no_sla(self, mock_ssh): + + monitor_cfg = { + 'monitor_type': 'process', + 'process_name': 'nova-api', + 'host': "node1", + 'monitor_time': 1, + } + ins = monitor_process.MonitorProcess(monitor_cfg, self.context, {"nova-api": 10}) + + mock_ssh.SSH.from_node().execute.return_value = (0, "0", '') + ins.setup() + ins.monitor_func() + ins._result = {"outage_time": 10} + self.assertTrue(ins.verify_SLA()) diff --git a/yardstick/tests/unit/benchmark/scenarios/compute/test_lmbench.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_lmbench.py index c4ac347f4..ba63e5f9e 100644 --- a/yardstick/tests/unit/benchmark/scenarios/compute/test_lmbench.py +++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_lmbench.py @@ -6,11 +6,6 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## - -# Unittest for yardstick.benchmark.scenarios.compute.lmbench.Lmbench - -from __future__ import absolute_import - import unittest import mock @@ -18,13 +13,9 @@ from oslo_serialization import jsonutils from yardstick.benchmark.scenarios.compute import lmbench from yardstick.common import exceptions as y_exc +from yardstick import ssh -# pylint: disable=unused-argument -# disable this for now because I keep forgetting mock patch arg ordering - - -@mock.patch('yardstick.benchmark.scenarios.compute.lmbench.ssh') class LmbenchTestCase(unittest.TestCase): def setUp(self): @@ -38,16 +29,23 @@ class LmbenchTestCase(unittest.TestCase): self.result = {} - def test_successful_setup(self, mock_ssh): + self._mock_ssh = mock.patch.object(ssh, 'SSH') + self.mock_ssh = self._mock_ssh.start() + self.addCleanup(self._stop_mocks) + + def _stop_mocks(self): + self._mock_ssh.stop() + + def test_successful_setup(self): l = lmbench.Lmbench({}, self.ctx) - mock_ssh.SSH.from_node().execute.return_value = (0, '', '') + self.mock_ssh.from_node().execute.return_value = (0, '', '') l.setup() self.assertIsNotNone(l.client) self.assertTrue(l.setup_done) - def test_unsuccessful_unknown_type_run(self, mock_ssh): + def test_unsuccessful_unknown_type_run(self): options = { "test_type": "foo" @@ -58,7 +56,7 @@ class LmbenchTestCase(unittest.TestCase): self.assertRaises(RuntimeError, l.run, self.result) - def test_successful_latency_run_no_sla(self, mock_ssh): + def test_successful_latency_run_no_sla(self): options = { "test_type": "latency", @@ -69,12 +67,12 @@ class LmbenchTestCase(unittest.TestCase): l = lmbench.Lmbench(args, self.ctx) sample_output = '[{"latency": 4.944, "size": 0.00049}]' - mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '') + self.mock_ssh.from_node().execute.return_value = (0, sample_output, '') l.run(self.result) expected_result = {"latencies0.latency": 4.944, "latencies0.size": 0.00049} self.assertEqual(self.result, expected_result) - def test_successful_bandwidth_run_no_sla(self, mock_ssh): + def test_successful_bandwidth_run_no_sla(self): options = { "test_type": "bandwidth", @@ -86,12 +84,12 @@ class LmbenchTestCase(unittest.TestCase): l = lmbench.Lmbench(args, self.ctx) sample_output = '{"size(MB)": 0.262144, "bandwidth(MBps)": 11025.5}' - mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '') + self.mock_ssh.from_node().execute.return_value = (0, sample_output, '') l.run(self.result) expected_result = jsonutils.loads(sample_output) self.assertEqual(self.result, expected_result) - def test_successful_latency_run_sla(self, mock_ssh): + def test_successful_latency_run_sla(self): options = { "test_type": "latency", @@ -105,12 +103,12 @@ class LmbenchTestCase(unittest.TestCase): l = lmbench.Lmbench(args, self.ctx) sample_output = '[{"latency": 4.944, "size": 0.00049}]' - mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '') + self.mock_ssh.from_node().execute.return_value = (0, sample_output, '') l.run(self.result) expected_result = {"latencies0.latency": 4.944, "latencies0.size": 0.00049} self.assertEqual(self.result, expected_result) - def test_successful_bandwidth_run_sla(self, mock_ssh): + def test_successful_bandwidth_run_sla(self): options = { "test_type": "bandwidth", @@ -125,12 +123,12 @@ class LmbenchTestCase(unittest.TestCase): l = lmbench.Lmbench(args, self.ctx) sample_output = '{"size(MB)": 0.262144, "bandwidth(MBps)": 11025.5}' - mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '') + self.mock_ssh.from_node().execute.return_value = (0, sample_output, '') l.run(self.result) expected_result = jsonutils.loads(sample_output) self.assertEqual(self.result, expected_result) - def test_unsuccessful_latency_run_sla(self, mock_ssh): + def test_unsuccessful_latency_run_sla(self): options = { "test_type": "latency", @@ -144,10 +142,10 @@ class LmbenchTestCase(unittest.TestCase): l = lmbench.Lmbench(args, self.ctx) sample_output = '[{"latency": 37.5, "size": 0.00049}]' - mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '') + self.mock_ssh.from_node().execute.return_value = (0, sample_output, '') self.assertRaises(y_exc.SLAValidationError, l.run, self.result) - def test_unsuccessful_bandwidth_run_sla(self, mock_ssh): + def test_unsuccessful_bandwidth_run_sla(self): options = { "test_type": "bandwidth", @@ -162,10 +160,10 @@ class LmbenchTestCase(unittest.TestCase): l = lmbench.Lmbench(args, self.ctx) sample_output = '{"size(MB)": 0.262144, "bandwidth(MBps)": 9925.5}' - mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '') + self.mock_ssh.from_node().execute.return_value = (0, sample_output, '') self.assertRaises(y_exc.SLAValidationError, l.run, self.result) - def test_successful_latency_for_cache_run_sla(self, mock_ssh): + def test_successful_latency_for_cache_run_sla(self): options = { "test_type": "latency_for_cache", @@ -179,16 +177,16 @@ class LmbenchTestCase(unittest.TestCase): l = lmbench.Lmbench(args, self.ctx) sample_output = "{\"L1cache\": 1.6}" - mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '') + self.mock_ssh.from_node().execute.return_value = (0, sample_output, '') l.run(self.result) expected_result = jsonutils.loads(sample_output) self.assertEqual(self.result, expected_result) - def test_unsuccessful_script_error(self, mock_ssh): + def test_unsuccessful_script_error(self): options = {"test_type": "bandwidth"} args = {"options": options} l = lmbench.Lmbench(args, self.ctx) - mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR') + self.mock_ssh.from_node().execute.return_value = (1, '', 'FOOBAR') self.assertRaises(RuntimeError, l.run, self.result) diff --git a/yardstick/tests/unit/benchmark/scenarios/energy/__init__.py b/yardstick/tests/unit/benchmark/scenarios/energy/__init__.py new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/yardstick/tests/unit/benchmark/scenarios/energy/__init__.py diff --git a/yardstick/tests/unit/benchmark/scenarios/energy/energy_sample_chassis_output.txt b/yardstick/tests/unit/benchmark/scenarios/energy/energy_sample_chassis_output.txt new file mode 100644 index 000000000..9b3afd1fb --- /dev/null +++ b/yardstick/tests/unit/benchmark/scenarios/energy/energy_sample_chassis_output.txt @@ -0,0 +1,14 @@ +{ + "@odata.id": "/redfish/v1/Chassis", + "Name": "ChassisCollection", + "@odata.context": "/redfish/v1/$metadata#ChassisCollection.ChassisCollection", + "Members": [ + { + "@odata.id": "/redfish/v1/Chassis/1" + } + ], + "@odata.type": "#ChassisCollection.ChassisCollection", + "@odata.etag": "\"af5a94479815eb5f87fe91ea08fde0ac\"", + "Members@odata.count": 1, + "Description": "A collection of Chassis resource instances." +} diff --git a/yardstick/tests/unit/benchmark/scenarios/energy/energy_sample_power_metrics.txt b/yardstick/tests/unit/benchmark/scenarios/energy/energy_sample_power_metrics.txt new file mode 100644 index 000000000..343ed3667 --- /dev/null +++ b/yardstick/tests/unit/benchmark/scenarios/energy/energy_sample_power_metrics.txt @@ -0,0 +1,300 @@ +{ + "PowerControl@odata.count": 1, + "@odata.id": "/redfish/v1/Chassis/1/Power", + "Redundancy@odata.count": 1, + "@odata.context": "/redfish/v1/$metadata#Power.Power", + "Voltages": [ + { + "MaxReadingRange": 14.28, + "RelatedItem": [ + { + "@odata.id": "/redfish/v1/Systems/1" + }, + { + "@odata.id": "/redfish/v1/Chassis/1" + } + ], + "@odata.id": "/redfish/v1/Chassis/1/Power#/Voltages/0", + "Status": { + "State": "Enabled" + }, + "SensorNumber": 140, + "Name": "SysBrd 12V", + "PhysicalContext": "VoltageRegulator", + "LowerThresholdCritical": 10.81, + "RelatedItem@odata.count": 2, + "MemberId": "0", + "MinReadingRange": null, + "ReadingVolts": 12.15, + "UpperThresholdCritical": 13.22 + }, + { + "MaxReadingRange": 3.95, + "RelatedItem": [ + { + "@odata.id": "/redfish/v1/Systems/1" + }, + { + "@odata.id": "/redfish/v1/Chassis/1" + } + ], + "@odata.id": "/redfish/v1/Chassis/1/Power#/Voltages/1", + "Status": { + "State": "Enabled" + }, + "SensorNumber": 141, + "Name": "SysBrd 3.3V", + "PhysicalContext": "VoltageRegulator", + "LowerThresholdCritical": 2.98, + "RelatedItem@odata.count": 2, + "MemberId": "1", + "MinReadingRange": null, + "UpperThresholdCritical": 3.63, + "ReadingVolts": 3.36 + }, + { + "MaxReadingRange": 5.97, + "RelatedItem": [ + { + "@odata.id": "/redfish/v1/Systems/1" + }, + { + "@odata.id": "/redfish/v1/Chassis/1" + } + ], + "@odata.id": "/redfish/v1/Chassis/1/Power#/Voltages/2", + "Status": { + "State": "Enabled" + }, + "SensorNumber": 142, + "Name": "SysBrd 5V", + "PhysicalContext": "VoltageRegulator", + "LowerThresholdCritical": 4.49, + "RelatedItem@odata.count": 2, + "MemberId": "2", + "MinReadingRange": null, + "UpperThresholdCritical": 5.5, + "ReadingVolts": 5.03 + }, + { + "MaxReadingRange": 3.32, + "RelatedItem": [ + { + "@odata.id": "/redfish/v1/Systems/1" + }, + { + "@odata.id": "/redfish/v1/Chassis/1" + } + ], + "@odata.id": "/redfish/v1/Chassis/1/Power#/Voltages/3", + "Status": { + "State": "Enabled" + }, + "SensorNumber": 3, + "Name": "CMOS Battery", + "PhysicalContext": "VoltageRegulator", + "LowerThresholdCritical": 2.25, + "RelatedItem@odata.count": 2, + "MemberId": "3", + "MinReadingRange": null, + "LowerThresholdNonCritical": 2.39, + "ReadingVolts": 3.12 + } + ], + "Voltages@odata.count": 4, + "Redundancy": [ + { + "@odata.id": "/redfish/v1/Chassis/1/Power#/Redundancy/0", + "Status": { + "State": "Enabled", + "Health": "OK" + }, + "Name": "PSU Redundancy", + "MinNumNeeded": 2, + "Oem": { + "Lenovo": { + "NonRedundantAvailablePower": 1100, + "@odata.type": "#LenovoRedundancy.v1_0_0.LenovoRedundancyProperties", + "PowerRedundancySettings": { + "EstimatedUsage": "58.55%", + "MaxPowerLimitWatts": 1100, + "PowerFailureLimit": 0, + "PowerRedundancyPolicy": "RedundantWithThrottling" + } + } + }, + "RedundancyEnabled": true, + "RedundancySet": [ + { + "@odata.id": "/redfish/v1/Chassis/1/Power#/PowerSupplies/0" + }, + { + "@odata.id": "/redfish/v1/Chassis/1/Power#/PowerSupplies/1" + } + ], + "RedundancySet@odata.count": 2, + "MaxNumSupported": 2, + "Mode": "N+m", + "MemberId": "0" + } + ], + "Description": "Power Consumption and Power Limiting", + "Name": "Power", + "PowerSupplies@odata.count": 2, + "Oem": { + "Lenovo": { + "@odata.type": "#LenovoPower.v1_0_0.Capabilities", + "LocalPowerControlEnabled": true, + "PowerOnPermissionEnabled": true, + "PowerRestorePolicy": "Restore", + "WakeOnLANEnabled": true + } + }, + "@odata.type": "#Power.v1_5_1.Power", + "Id": "Power", + "@odata.etag": "\"ad85a1403e07a433386e9907d00565cc\"", + "PowerControl": [ + { + "PowerAllocatedWatts": 1100, + "RelatedItem": [ + { + "@odata.id": "/redfish/v1/Chassis/1" + } + ], + "@odata.id": "/redfish/v1/Chassis/1/Power#/PowerControl/0", + "Status": { + "HealthRollup": "Warning", + "State": "Enabled" + }, + "PowerLimit": { + "LimitException": "NoAction", + "LimitInWatts": null + }, + "Name": "Server Power Control", + "Oem": { + "Lenovo": { + "PowerUtilization": { + "MaxLimitInWatts": 1100, + "EnablePowerCapping": false, + "LimitMode": "AC", + "EnablePowerCapping@Redfish.Deprecated": "The property is deprecated. Please use LimitInWatts instead.", + "CapacityMinAC": 617, + "MinLimitInWatts": 0, + "GuaranteedInWatts": 617, + "CapacityMinDC": 578, + "CapacityMaxDC": 749, + "CapacityMaxAC": 802 + }, + "HistoryPowerMetric": { + "@odata.id": "/redfish/v1/Chassis/1/Power/PowerControl/0/Oem/Lenovo/HistoryPowerMetric" + }, + "@odata.type": "#LenovoPower.v1_0_0.PowerControl" + } + }, + "PowerAvailableWatts": 0, + "PowerMetrics": { + "IntervalInMin": 60, + "AverageConsumedWatts": 314.716675, + "MinConsumedWatts": 311, + "MaxConsumedWatts": 318 + }, + "RelatedItem@odata.count": 1, + "MemberId": "0", + "PowerRequestedWatts": 802, + "PowerConsumedWatts": 344, + "PowerCapacityWatts": 1100 + } + ], + "PowerSupplies": [ + { + "SerialNumber": "A4DB8BP11WJ", + "InputRanges": [ + { + "InputType": null, + "OutputWattage": null, + "MinimumVoltage": null, + "MaximumVoltage": null + } + ], + "@odata.id": "/redfish/v1/Chassis/1/Power#/PowerSupplies/0", + "RelatedItem@odata.count": 1, + "MemberId": "0", + "PartNumber": "SP57A02023", + "FirmwareVersion": "4.52", + "Status": { + "State": "Enabled", + "Health": "Warning" + }, + "LineInputVoltage": null, + "Name": "PSU1", + "PowerSupplyType": "Unknown", + "LastPowerOutputWatts": 316, + "Oem": { + "Lenovo": { + "Location": { + "InfoFormat": "Slot X", + "Info": "Slot 1" + }, + "HistoryPowerSupplyMetric": { + "@odata.id": "/redfish/v1/Chassis/1/Power/PowerSupplies/0/Oem/Lenovo/HistoryPowerSupplyMetric" + }, + "@odata.type": "#LenovoPower.v1_0_0.PowerSupply" + } + }, + "PowerCapacityWatts": null, + "Manufacturer": "ACBE", + "LineInputVoltageType": "Unknown", + "Model": "LENOVO-SP57A02023", + "RelatedItem": [ + { + "@odata.id": "/redfish/v1/Chassis/1" + } + ] + }, + { + "SerialNumber": "A4DB8BP12J7", + "InputRanges": [ + { + "InputType": "AC", + "OutputWattage": 1100, + "MinimumVoltage": 200, + "MaximumVoltage": 240 + } + ], + "@odata.id": "/redfish/v1/Chassis/1/Power#/PowerSupplies/1", + "RelatedItem@odata.count": 1, + "MemberId": "1", + "PartNumber": "SP57A02023", + "FirmwareVersion": "4.52", + "Status": { + "State": "Enabled", + "Health": "OK" + }, + "LineInputVoltage": 220, + "Name": "PSU2", + "PowerSupplyType": "AC", + "LastPowerOutputWatts": 316, + "Oem": { + "Lenovo": { + "Location": { + "InfoFormat": "Slot X", + "Info": "Slot 2" + }, + "HistoryPowerSupplyMetric": { + "@odata.id": "/redfish/v1/Chassis/1/Power/PowerSupplies/1/Oem/Lenovo/HistoryPowerSupplyMetric" + }, + "@odata.type": "#LenovoPower.v1_0_0.PowerSupply" + } + }, + "PowerCapacityWatts": 1100, + "Manufacturer": "ACBE", + "LineInputVoltageType": "ACMidLine", + "Model": "LENOVO-SP57A02023", + "RelatedItem": [ + { + "@odata.id": "/redfish/v1/Chassis/1" + } + ] + } + ] +} diff --git a/yardstick/tests/unit/benchmark/scenarios/energy/test_energy.py b/yardstick/tests/unit/benchmark/scenarios/energy/test_energy.py new file mode 100644 index 000000000..98daefeb7 --- /dev/null +++ b/yardstick/tests/unit/benchmark/scenarios/energy/test_energy.py @@ -0,0 +1,182 @@ +############################################################################## +# Copyright (c) 2019 Lenovo Group Limited Co.,Ltd and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Unittest for yardstick.benchmark.scenarios.energy.energy.Energy + +from __future__ import absolute_import +import unittest +import mock +import os +from yardstick.benchmark.scenarios.energy import energy + + +class EnergyTestCase(unittest.TestCase): + + def setUp(self): + self.ctx = { + 'target': { + 'ip': '172.16.0.137', + 'user': 'root', + 'password': 'passw0rd', + 'redfish_ip': '10.229.17.105', + 'redfish_user': 'USERID', + 'redfish_pwd': "PASSW0RD", + } + } + self.result = {} + + @mock.patch('yardstick.benchmark.scenarios.' + 'energy.energy.Energy._send_request') + def test_setup_response_success(self, mock_send_request): + args = {} + p = energy.Energy(args, self.ctx) + mock_send_request.return_value.status_code = 200 + p.setup() + self.assertTrue(p.get_response) + self.assertTrue(p.setup_done) + + @mock.patch('yardstick.benchmark.scenarios.' + 'energy.energy.Energy._send_request') + def test_setup_response_failed(self, mock_send_request): + args = {} + p = energy.Energy(args, self.ctx) + mock_send_request.return_value.status_code = 404 + p.setup() + self.assertFalse(p.get_response) + self.assertTrue(p.setup_done) + + @mock.patch('yardstick.benchmark.scenarios.' + 'energy.energy.Energy._send_request') + def test_load_chassis_list_success(self, mock_send_request): + args = {} + p = energy.Energy(args, self.ctx) + expect_result = self._read_file("energy_sample_chassis_output.txt") + expect_result = str(expect_result) + expect_result = expect_result.replace("'", '"') + mock_send_request.return_value.status_code = 200 + mock_send_request.return_value.text = expect_result + self.result = p.load_chassis_list() + self.assertEqual(self.result, ["/redfish/v1/Chassis/1"]) + + @mock.patch('yardstick.benchmark.scenarios.' + 'energy.energy.Energy._send_request') + def test_load_chassis_response_fail(self, mock_send_request): + args = {} + p = energy.Energy(args, self.ctx) + mock_send_request.return_value.status_code = 404 + self.result = p.load_chassis_list() + self.assertEqual(self.result, []) + + @mock.patch('yardstick.benchmark.scenarios.' + 'energy.energy.Energy._send_request') + def test_load_chassis_wrongtype_response(self, mock_send_request): + args = {} + p = energy.Energy(args, self.ctx) + mock_send_request.return_value.status_code = 200 + expect_result = {} + mock_send_request.return_value.text = expect_result + self.result = p.load_chassis_list() + self.assertEqual(self.result, []) + + @mock.patch('yardstick.benchmark.scenarios.' + 'energy.energy.Energy._send_request') + def test_load_chassis_inproper_key(self, mock_send_request): + args = {} + p = energy.Energy(args, self.ctx) + mock_send_request.return_value.status_code = 200 + expect_result = '{"some_key": "some_value"}' + mock_send_request.return_value.text = expect_result + self.result = p.load_chassis_list() + self.assertEqual(self.result, []) + + @mock.patch('yardstick.benchmark.scenarios.' + 'energy.energy.Energy._send_request') + def test_energy_getpower_success(self, mock_send_request): + args = {} + p = energy.Energy(args, self.ctx) + expect_result = self._read_file("energy_sample_power_metrics.txt") + expect_result = str(expect_result) + expect_result = expect_result.replace("'", '"') + mock_send_request.return_value.status_code = 200 + mock_send_request.return_value.text = expect_result + self.result = p.get_power("/redfish/v1/Chassis/1") + self.assertEqual(self.result, 344) + + @mock.patch('yardstick.benchmark.scenarios.' + 'energy.energy.Energy._send_request') + def test_energy_getpower_response_fail(self, mock_send_request): + args = {} + p = energy.Energy(args, self.ctx) + mock_send_request.return_value.status_code = 404 + self.result = p.get_power("/redfish/v1/Chassis/1") + self.assertEqual(self.result, -1) + + @mock.patch('yardstick.benchmark.scenarios.' + 'energy.energy.Energy._send_request') + def test_energy_getpower_wrongtype_response(self, mock_send_request): + args = {} + p = energy.Energy(args, self.ctx) + mock_send_request.return_value.status_code = 200 + expect_result = {} + mock_send_request.return_value.text = expect_result + self.result = p.get_power("/redfish/v1/Chassis/1") + self.assertEqual(self.result, -1) + + @mock.patch('yardstick.benchmark.scenarios.' + 'energy.energy.Energy._send_request') + def test_energy_getpower_inproper_key(self, mock_send_request): + args = {} + p = energy.Energy(args, self.ctx) + mock_send_request.return_value.status_code = 200 + expect_result = '{"some_key": "some_value"}' + mock_send_request.return_value.text = expect_result + self.result = p.get_power("/redfish/v1/Chassis/1") + self.assertEqual(self.result, -1) + + @mock.patch('yardstick.benchmark.scenarios.' + 'energy.energy.Energy._send_request') + def test_run_success(self, mock_send_request): + args = {} + p = energy.Energy(args, self.ctx) + mock_send_request.return_value.status_code = 200 + chassis_list = mock.Mock(return_value=["/redfish/v1/Chassis/1"]) + p.load_chassis_list = chassis_list + power = mock.Mock(return_value=344) + p.get_power = power + p.run(self.result) + self.assertEqual(self.result, {"power": 344}) + + @mock.patch('yardstick.benchmark.scenarios.' + 'energy.energy.Energy._send_request') + def test_run_no_response(self, mock_send_request): + args = {} + p = energy.Energy(args, self.ctx) + mock_send_request.return_value.status_code = 404 + chassis_list = mock.Mock(return_value=["/redfish/v1/Chassis/1"]) + p.load_chassis_list = chassis_list + p.run(self.result) + self.assertEqual(self.result, {"power": -1}) + + @mock.patch('yardstick.benchmark.scenarios.' + 'energy.energy.Energy._send_request') + def test_run_wrong_chassis(self, mock_send_request): + args = {} + p = energy.Energy(args, self.ctx) + mock_send_request.return_value.status_code = 200 + chassis_list = mock.Mock(return_value=[]) + p.load_chassis_list = chassis_list + p.run(self.result) + self.assertEqual(self.result, {"power": -1}) + + def _read_file(self, filename): + curr_path = os.path.dirname(os.path.abspath(__file__)) + output = os.path.join(curr_path, filename) + with open(output) as f: + sample_output = f.read() + return sample_output diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_ping.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_ping.py index 559e0599e..944202658 100644 --- a/yardstick/tests/unit/benchmark/scenarios/networking/test_ping.py +++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_ping.py @@ -91,3 +91,17 @@ class PingTestCase(unittest.TestCase): mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR') self.assertRaises(RuntimeError, p.run, result) + + @mock.patch('yardstick.benchmark.scenarios.networking.ping.ssh') + def test_ping_unsuccessful_no_sla(self, mock_ssh): + + args = { + 'options': {'packetsize': 200}, + 'target': 'ares.demo' + } + result = {} + + p = ping.Ping(args, self.ctx) + + mock_ssh.SSH.from_node().execute.return_value = (0, '', '') + self.assertRaises(y_exc.SLAValidationError, p.run, result) diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py index 6bf2f2c2f..cf9a26a76 100644 --- a/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py +++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016-2017 Intel Corporation +# Copyright (c) 2016-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -159,7 +159,7 @@ TRAFFIC_PROFILE = { class TestNetworkServiceTestCase(unittest.TestCase): def setUp(self): - self.tg__1 = { + self.tg__0 = { 'name': 'trafficgen_1.yardstick', 'ip': '10.10.10.11', 'role': 'TrafficGen', @@ -185,7 +185,7 @@ class TestNetworkServiceTestCase(unittest.TestCase): }, } - self.vnf__1 = { + self.vnf__0 = { 'name': 'vnf.yardstick', 'ip': '10.10.10.12', 'host': '10.223.197.164', @@ -242,8 +242,8 @@ class TestNetworkServiceTestCase(unittest.TestCase): self.context_cfg = { 'nodes': { - 'tg__1': self.tg__1, - 'vnf__1': self.vnf__1, + 'tg__0': self.tg__0, + 'vnf__0': self.vnf__0, }, 'networks': { GenericVNF.UPLINK: { @@ -270,7 +270,7 @@ class TestNetworkServiceTestCase(unittest.TestCase): ], 'type': 'ELAN', 'id': GenericVNF.UPLINK, - 'name': 'tg__1 to vnf__1 link 1' + 'name': 'tg__0 to vnf__0 link 1' } self.vld1 = { @@ -288,7 +288,7 @@ class TestNetworkServiceTestCase(unittest.TestCase): ], 'type': 'ELAN', 'id': GenericVNF.DOWNLINK, - 'name': 'vnf__1 to tg__1 link 2' + 'name': 'vnf__0 to tg__0 link 2' } self.topology = { @@ -300,12 +300,12 @@ class TestNetworkServiceTestCase(unittest.TestCase): { 'member-vnf-index': '1', 'VNF model': 'tg_trex_tpl.yaml', - 'vnfd-id-ref': 'tg__1', + 'vnfd-id-ref': 'tg__0', }, { 'member-vnf-index': '2', 'VNF model': 'tg_trex_tpl.yaml', - 'vnfd-id-ref': 'vnf__1', + 'vnfd-id-ref': 'vnf__0', }, ], 'vld': [self.vld0, self.vld1], @@ -325,6 +325,8 @@ class TestNetworkServiceTestCase(unittest.TestCase): }, }, 'options': { + 'simulated_users': {'uplink': [1, 2]}, + 'page_object': {'uplink': [1, 2]}, 'framesize': {'64B': 100} }, 'runner': { @@ -341,8 +343,8 @@ class TestNetworkServiceTestCase(unittest.TestCase): }, 'nodes': { 'tg__2': 'trafficgen_2.yardstick', - 'tg__1': 'trafficgen_1.yardstick', - 'vnf__1': 'vnf.yardstick', + 'tg__0': 'trafficgen_1.yardstick', + 'vnf__0': 'vnf.yardstick', }, } @@ -409,12 +411,12 @@ class TestNetworkServiceTestCase(unittest.TestCase): 'flow': { 'src_ip': [ { - 'tg__1': 'xe0', + 'tg__0': 'xe0', }, ], 'dst_ip': [ { - 'tg__1': 'xe1', + 'tg__0': 'xe1', }, ], 'public_ip': ['1.1.1.1'], @@ -444,11 +446,10 @@ class TestNetworkServiceTestCase(unittest.TestCase): self.assertIn('found in', exc_str) def test_load_vnf_models_invalid(self): - self.context_cfg["nodes"]['tg__1']['VNF model'] = \ + self.context_cfg["nodes"]['tg__0']['VNF model'] = \ self._get_file_abspath("tg_trex_tpl.yaml") - self.context_cfg["nodes"]['vnf__1']['VNF model'] = \ + self.context_cfg["nodes"]['vnf__0']['VNF model'] = \ self._get_file_abspath("tg_trex_tpl.yaml") - self.context_cfg['task_id'] = 'fake_task_id' vnf = mock.Mock(autospec=GenericVNF) self.s.get_vnf_impl = mock.Mock(return_value=vnf) @@ -468,13 +469,13 @@ class TestNetworkServiceTestCase(unittest.TestCase): nodes = self.context_cfg["nodes"] self.assertEqual('../../vnf_descriptors/tg_rfc2544_tpl.yaml', - nodes['tg__1']['VNF model']) + nodes['tg__0']['VNF model']) self.assertEqual('../../vnf_descriptors/vpe_vnf.yaml', - nodes['vnf__1']['VNF model']) + nodes['vnf__0']['VNF model']) def test_map_topology_to_infrastructure_insufficient_nodes(self): cfg = deepcopy(self.context_cfg) - del cfg['nodes']['vnf__1'] + del cfg['nodes']['vnf__0'] cfg_patch = mock.patch.object(self.s, 'context_cfg', cfg) with cfg_patch: @@ -488,10 +489,10 @@ class TestNetworkServiceTestCase(unittest.TestCase): cfg = deepcopy(self.s.context_cfg) # delete all, we don't know which will come first - del cfg['nodes']['vnf__1']['interfaces']['xe0']['local_mac'] - del cfg['nodes']['vnf__1']['interfaces']['xe1']['local_mac'] - del cfg['nodes']['tg__1']['interfaces']['xe0']['local_mac'] - del cfg['nodes']['tg__1']['interfaces']['xe1']['local_mac'] + del cfg['nodes']['vnf__0']['interfaces']['xe0']['local_mac'] + del cfg['nodes']['vnf__0']['interfaces']['xe1']['local_mac'] + del cfg['nodes']['tg__0']['interfaces']['xe0']['local_mac'] + del cfg['nodes']['tg__0']['interfaces']['xe1']['local_mac'] config_patch = mock.patch.object(self.s, 'context_cfg', cfg) with config_patch: @@ -506,7 +507,7 @@ class TestNetworkServiceTestCase(unittest.TestCase): ssh.from_node.return_value = ssh_mock # purge an important key from the data structure - for interface in self.tg__1['interfaces'].values(): + for interface in self.tg__0['interfaces'].values(): del interface['local_mac'] with self.assertRaises(exceptions.IncorrectConfig) as raised: @@ -515,7 +516,7 @@ class TestNetworkServiceTestCase(unittest.TestCase): self.assertIn('not found', str(raised.exception)) # restore local_mac - for index, interface in enumerate(self.tg__1['interfaces'].values()): + for index, interface in enumerate(self.tg__0['interfaces'].values()): interface['local_mac'] = '00:00:00:00:00:{:2x}'.format(index) # make a connection point ref with 3 points @@ -566,7 +567,6 @@ class TestNetworkServiceTestCase(unittest.TestCase): tgen.verify_traffic = lambda x: verified_dict tgen.terminate = mock.Mock(return_value=True) tgen.name = "tgen__1" - tgen.run_traffic.return_value = 'tg_id' vnf = mock.Mock(autospec=GenericVNF) vnf.runs_traffic = False vnf.terminate = mock.Mock(return_value=True) @@ -579,6 +579,7 @@ class TestNetworkServiceTestCase(unittest.TestCase): self.s.load_vnf_models = mock.Mock(return_value=self.s.vnfs) self.s._fill_traffic_profile = \ mock.Mock(return_value=TRAFFIC_PROFILE) + self.assertIsNone(self.s.setup()) def test_setup_exception(self): with mock.patch("yardstick.ssh.SSH") as ssh: @@ -620,16 +621,38 @@ class TestNetworkServiceTestCase(unittest.TestCase): with self.assertRaises(IOError): self.s._get_traffic_profile() + def test__key_list_to_dict(self): + result = self.s._key_list_to_dict("uplink", {"uplink": [1, 2]}) + self.assertEqual({"uplink_0": 1, "uplink_1": 2}, result) + + def test__get_simulated_users(self): + result = self.s._get_simulated_users() + self.assertEqual({'simulated_users': {'uplink_0': 1, 'uplink_1': 2}}, + result) + + def test__get_page_object(self): + result = self.s._get_page_object() + self.assertEqual({'page_object': {'uplink_0': 1, 'uplink_1': 2}}, + result) + def test___get_traffic_imix_exception(self): with mock.patch.dict(self.scenario_cfg["traffic_options"], {'imix': ''}): self.assertEqual({'imix': {'64B': 100}}, self.s._get_traffic_imix()) + def test__get_ip_priority(self): + with mock.patch.dict(self.scenario_cfg["options"], + {'priority': {'raw': '0x01'}}): + self.assertEqual({'raw': '0x01'}, self.s._get_ip_priority()) + + def test__get_ip_priority_exception(self): + self.assertEqual({}, self.s._get_ip_priority()) + @mock.patch.object(base.TrafficProfile, 'get') @mock.patch.object(vnfdgen, 'generate_vnfd') def test__fill_traffic_profile(self, mock_generate, mock_tprofile_get): fake_tprofile = mock.Mock() - fake_vnfd = mock.Mock() + fake_vnfd = mock.MagicMock() with mock.patch.object(self.s, '_get_traffic_profile', return_value=fake_tprofile) as mock_get_tp: mock_generate.return_value = fake_vnfd @@ -641,11 +664,32 @@ class TestNetworkServiceTestCase(unittest.TestCase): 'extra_args': {'arg1': 'value1', 'arg2': 'value2'}, 'flow': {'flow': {}}, 'imix': {'imix': {'64B': 100}}, + 'priority': {}, 'uplink': {}, - 'duration': 30} + 'duration': 30, + 'simulated_users': { + 'simulated_users': {'uplink_0': 1, 'uplink_1': 2}}, + 'page_object': { + 'page_object': {'uplink_0': 1, 'uplink_1': 2}},} ) mock_tprofile_get.assert_called_once_with(fake_vnfd) + @mock.patch.object(base.TrafficProfile, 'get') + @mock.patch.object(vnfdgen, 'generate_vnfd') + def test__fill_traffic_profile2(self, mock_generate, mock_tprofile_get): + fake_tprofile = mock.Mock() + fake_vnfd = {} + with mock.patch.object(self.s, '_get_traffic_profile', + return_value=fake_tprofile) as mock_get_tp: + mock_generate.return_value = fake_vnfd + + self.s.scenario_cfg["options"] = {"traffic_config": {"duration": 99899}} + self.s._fill_traffic_profile() + mock_get_tp.assert_called_once() + self.assertIn("traffic_profile", fake_vnfd) + self.assertIn("duration", fake_vnfd["traffic_profile"]) + self.assertEqual(99899, fake_vnfd["traffic_profile"]["duration"]) + @mock.patch.object(utils, 'open_relative_file') def test__get_topology(self, mock_open_path): self.s.scenario_cfg['topology'] = 'fake_topology' @@ -669,9 +713,6 @@ class TestNetworkServiceTestCase(unittest.TestCase): ) self.assertEqual(self.s.topology, 'fake_nsd') - def test_get_mq_ids(self): - self.assertEqual(self.s._mq_ids, self.s.get_mq_ids()) - def test_teardown(self): vnf = mock.Mock(autospec=GenericVNF) vnf.terminate = mock.Mock(return_value=True) @@ -695,3 +736,138 @@ class TestNetworkServiceTestCase(unittest.TestCase): mock.Mock(return_value=True) with self.assertRaises(RuntimeError): self.s.teardown() + + +class TestNetworkServiceRFC2544TestCase(TestNetworkServiceTestCase): + + def setUp(self): + super(TestNetworkServiceRFC2544TestCase, self).setUp() + self.s = vnf_generic.NetworkServiceRFC2544(self.scenario_cfg, + self.context_cfg) + + def test_run(self): + tgen = mock.Mock(autospec=GenericTrafficGen) + tgen.traffic_finished = True + verified_dict = {"verified": True} + tgen.verify_traffic = lambda x: verified_dict + tgen.name = "tgen__1" + tgen.wait_on_trafic.return_value = 'COMPLETE' + vnf = mock.Mock(autospec=GenericVNF) + vnf.runs_traffic = False + self.s.vnfs = [tgen, vnf] + self.s.traffic_profile = mock.Mock() + self.s._fill_traffic_profile = mock.Mock() + self.s.collector = mock.Mock(autospec=Collector) + self.s.collector.get_kpi = mock.Mock( + return_value={tgen.name: verified_dict}) + result = mock.Mock() + self.s.run(result) + self.s._fill_traffic_profile.assert_called_once() + result.push.assert_called_once() + + def test_setup(self): + with mock.patch("yardstick.ssh.SSH") as ssh: + ssh_mock = mock.Mock(autospec=ssh.SSH) + ssh_mock.execute = \ + mock.Mock(return_value=(0, SYS_CLASS_NET + IP_ADDR_SHOW, "")) + ssh.from_node.return_value = ssh_mock + + tgen = mock.Mock(autospec=GenericTrafficGen) + tgen.traffic_finished = True + verified_dict = {"verified": True} + tgen.verify_traffic = lambda x: verified_dict + tgen.terminate = mock.Mock(return_value=True) + tgen.name = "tgen__1" + tgen.run_traffic.return_value = 'tg_id' + vnf = mock.Mock(autospec=GenericVNF) + vnf.runs_traffic = False + vnf.terminate = mock.Mock(return_value=True) + self.s.vnfs = [tgen, vnf] + self.s.traffic_profile = mock.Mock() + self.s.collector = mock.Mock(autospec=Collector) + self.s.collector.get_kpi = \ + mock.Mock(return_value={tgen.name: verified_dict}) + self.s.map_topology_to_infrastructure = mock.Mock(return_value=0) + self.s.load_vnf_models = mock.Mock(return_value=self.s.vnfs) + self.s.setup() + + def test_setup_exception(self): + with mock.patch("yardstick.ssh.SSH") as ssh: + ssh_mock = mock.Mock(autospec=ssh.SSH) + ssh_mock.execute = \ + mock.Mock(return_value=(0, SYS_CLASS_NET + IP_ADDR_SHOW, "")) + ssh.from_node.return_value = ssh_mock + + tgen = mock.Mock(autospec=GenericTrafficGen) + tgen.traffic_finished = True + verified_dict = {"verified": True} + tgen.verify_traffic = lambda x: verified_dict + tgen.terminate = mock.Mock(return_value=True) + tgen.name = "tgen__1" + vnf = mock.Mock(autospec=GenericVNF) + vnf.runs_traffic = False + vnf.instantiate.side_effect = RuntimeError( + "error during instantiate") + vnf.terminate = mock.Mock(return_value=True) + self.s.vnfs = [tgen, vnf] + self.s.traffic_profile = mock.Mock() + self.s.collector = mock.Mock(autospec=Collector) + self.s.collector.get_kpi = \ + mock.Mock(return_value={tgen.name: verified_dict}) + self.s.map_topology_to_infrastructure = mock.Mock(return_value=0) + self.s.load_vnf_models = mock.Mock(return_value=self.s.vnfs) + self.s._fill_traffic_profile = \ + mock.Mock(return_value=TRAFFIC_PROFILE) + with self.assertRaises(RuntimeError): + self.s.setup() + +class TestNetworkServiceRFC3511TestCase(TestNetworkServiceTestCase): + + def setUp(self): + super(TestNetworkServiceRFC3511TestCase, self).setUp() + self.s = vnf_generic.NetworkServiceRFC3511(self.scenario_cfg, + self.context_cfg) + + def test_run(self): + tgen = mock.Mock(autospec=GenericTrafficGen) + tgen.traffic_finished = True + verified_dict = {"verified": True} + tgen.verify_traffic = lambda x: verified_dict + tgen.name = "tgen__1" + vnf = mock.Mock(autospec=GenericVNF) + vnf.runs_traffic = False + self.s.vnfs = [tgen, vnf] + self.s.traffic_profile = mock.Mock() + self.s._fill_traffic_profile = mock.Mock() + self.s.collector = mock.Mock(autospec=Collector) + self.s.collector.get_kpi = mock.Mock() + result = mock.Mock() + self.s.run(result) + self.s._fill_traffic_profile.assert_called_once() + result.push.assert_called_once() + + def test_setup(self): + with mock.patch("yardstick.ssh.SSH") as ssh: + ssh_mock = mock.Mock(autospec=ssh.SSH) + ssh_mock.execute = \ + mock.Mock(return_value=(0, SYS_CLASS_NET + IP_ADDR_SHOW, "")) + ssh.from_node.return_value = ssh_mock + + tgen = mock.Mock(autospec=GenericTrafficGen) + tgen.traffic_finished = True + verified_dict = {"verified": True} + tgen.verify_traffic = lambda x: verified_dict + tgen.terminate = mock.Mock(return_value=True) + tgen.name = "tgen__1" + tgen.run_traffic.return_value = 'tg_id' + vnf = mock.Mock(autospec=GenericVNF) + vnf.runs_traffic = False + vnf.terminate = mock.Mock(return_value=True) + self.s.vnfs = [tgen, vnf] + self.s.traffic_profile = mock.Mock() + self.s.collector = mock.Mock(autospec=Collector) + self.s.collector.get_kpi = \ + mock.Mock(return_value={tgen.name: verified_dict}) + self.s.map_topology_to_infrastructure = mock.Mock(return_value=0) + self.s.load_vnf_models = mock.Mock(return_value=self.s.vnfs) + self.s.setup() diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/vpe_vnf_topology.yaml b/yardstick/tests/unit/benchmark/scenarios/networking/vpe_vnf_topology.yaml index 1ac6c1f89..aaf84bb5e 100644 --- a/yardstick/tests/unit/benchmark/scenarios/networking/vpe_vnf_topology.yaml +++ b/yardstick/tests/unit/benchmark/scenarios/networking/vpe_vnf_topology.yaml @@ -1,4 +1,4 @@ -# Copyright (c) 2016-2017 Intel Corporation +# Copyright (c) 2016-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -20,31 +20,31 @@ nsd:nsd-catalog: description: scenario with VPE,L3fwd and VNF constituent-vnfd: - member-vnf-index: '1' - vnfd-id-ref: tg__1 - VNF model: ../../vnf_descriptors/tg_rfc2544_tpl.yaml #tg_vpe_upstream.yaml #VPE VNF + vnfd-id-ref: tg__0 + VNF model: ../../vnf_descriptors/tg_rfc2544_tpl.yaml #tg_trex_tpl.yaml #TREX - member-vnf-index: '2' - vnfd-id-ref: vnf__1 - VNF model: ../../vnf_descriptors/vpe_vnf.yaml #tg_l3fwd.yaml #tg_trex_tpl.yaml #TREX + vnfd-id-ref: vnf__0 + VNF model: ../../vnf_descriptors/vpe_vnf.yaml #VPE VNF vld: - id: uplink - name: tg__1 to vnf__1 link 1 + name: tg__0 to vnf__0 link 1 type: ELAN vnfd-connection-point-ref: - member-vnf-index-ref: '1' vnfd-connection-point-ref: xe0 - vnfd-id-ref: tg__1 #TREX + vnfd-id-ref: tg__0 - member-vnf-index-ref: '2' vnfd-connection-point-ref: xe0 - vnfd-id-ref: vnf__1 #VNF + vnfd-id-ref: vnf__0 - id: downlink - name: vnf__1 to tg__1 link 2 + name: vnf__0 to tg__0 link 2 type: ELAN vnfd-connection-point-ref: - member-vnf-index-ref: '2' vnfd-connection-point-ref: xe1 - vnfd-id-ref: vnf__1 #L3fwd + vnfd-id-ref: vnf__0 - member-vnf-index-ref: '1' vnfd-connection-point-ref: xe1 - vnfd-id-ref: tg__1 #VPE VNF + vnfd-id-ref: tg__0 diff --git a/yardstick/tests/unit/common/test_packages.py b/yardstick/tests/unit/common/test_packages.py index ba59a3015..09d76fe44 100644 --- a/yardstick/tests/unit/common/test_packages.py +++ b/yardstick/tests/unit/common/test_packages.py @@ -13,8 +13,8 @@ # limitations under the License. import mock -from pip import exceptions as pip_exceptions -from pip.operations import freeze +from pip._internal import exceptions as pip_exceptions +from pip._internal.operations import freeze import unittest from yardstick.common import packages diff --git a/yardstick/tests/unit/common/test_utils.py b/yardstick/tests/unit/common/test_utils.py index 3cf6c4d05..8fed5ecf1 100644 --- a/yardstick/tests/unit/common/test_utils.py +++ b/yardstick/tests/unit/common/test_utils.py @@ -1135,6 +1135,15 @@ class TestUtilsIpAddrMethods(ut_base.BaseUnitTestCase): for addr in addr_list: self.assertRaises(Exception, utils.make_ipv4_address, addr) + def test_get_ip_range_count(self): + iprange = "192.168.0.1-192.168.0.25" + count = utils.get_ip_range_count(iprange) + self.assertEqual(count, 24) + + def test_get_ip_range_start(self): + iprange = "192.168.0.1-192.168.0.25" + start = utils.get_ip_range_start(iprange) + self.assertEqual(start, "192.168.0.1") def test_safe_ip_address(self): addr_list = self.GOOD_IP_V4_ADDRESS_STR_LIST @@ -1407,3 +1416,48 @@ class SafeCaseTestCase(unittest.TestCase): def test_default_value(self): self.assertEqual(0, utils.safe_cast('', 'int', 0)) + + +class SetupHugepagesTestCase(unittest.TestCase): + + @mock.patch.object(six, 'BytesIO', return_value=six.BytesIO(b'5\n')) + @mock.patch.object(utils, 'read_meminfo', + return_value={'Hugepagesize': '1024'}) + def test_setup_hugepages(self, mock_meminfo, *args): + ssh = mock.Mock() + ssh.execute = mock.Mock() + hp_size_kb, hp_number, hp_number_set = utils.setup_hugepages(ssh, 10 * 1024) + mock_meminfo.assert_called_once_with(ssh) + ssh.execute.assert_called_once_with( + 'echo 10 | sudo tee /proc/sys/vm/nr_hugepages') + self.assertEqual(hp_size_kb, 1024) + self.assertEqual(hp_number, 10) + self.assertEqual(hp_number_set, 5) + + +class GetOSSampleInfoTestCase(unittest.TestCase): + + def test_get_os_version(self, *args): + ssh = mock.Mock() + ssh.execute.return_value = (0, "18.04", "") + utils.get_os_version(ssh) + ssh.execute.assert_called_once_with("cat /etc/lsb-release") + + def test_get_kernel_version(self, *args): + ssh = mock.Mock() + ssh.execute.return_value = (0, "Linux", "") + utils.get_kernel_version(ssh) + ssh.execute.assert_called_once_with("uname -a") + + def test_get_sample_vnf_info(self, *args): + json_out = """ + {"UDP_Replay": { + "branch_commit": "47123bfc1b3c0d0b01884aebbce1a3e09ad7ddb0", + "md5": "4577702f6d6848380bd912232a1b9ca5", + "path_vnf": "/opt/nsb_bin/UDP_Replay" + } + }""" + json_file = '/opt/nsb_bin/yardstick_sample_vnf.json' + ssh = mock.Mock() + ssh.execute.return_value = (0, json_out, "") + utils.get_sample_vnf_info(ssh, json_file) diff --git a/yardstick/tests/unit/network_services/helpers/test_cpu.py b/yardstick/tests/unit/network_services/helpers/test_cpu.py index 871fbf8c9..a1c0826fb 100644 --- a/yardstick/tests/unit/network_services/helpers/test_cpu.py +++ b/yardstick/tests/unit/network_services/helpers/test_cpu.py @@ -119,3 +119,97 @@ class TestCpuSysCores(unittest.TestCase): vnf_cfg = {'lb_config': 'SW', 'lb_count': 1, 'worker_config': '1C/1T', 'worker_threads': 1} self.assertEqual(-1, cpu_topo.validate_cpu_cfg(vnf_cfg)) + + def test_get_cpu_layout(self): + with mock.patch("yardstick.ssh.SSH") as ssh: + ssh_mock = mock.Mock(autospec=ssh.SSH) + ssh_mock.execute = \ + mock.Mock( + return_value=(1, "# CPU,Core,Socket,Node,,L1d,L1i,L2,L3\n'" + "0,0,0,0,,0,0,0,0\n" + "1,1,0,0,,1,1,1,0\n", "")) + ssh_mock.put = \ + mock.Mock(return_value=(1, "", "")) + cpu_topo = CpuSysCores(ssh_mock) + subprocess.check_output = mock.Mock(return_value=0) + self.assertEqual({'cpuinfo': [[0, 0, 0, 0, 0, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 1, 1, 1, 0]]}, + cpu_topo.get_cpu_layout()) + + def test__str2int(self): + self.assertEqual(1, CpuSysCores._str2int("1")) + + def test__str2int_error(self): + self.assertEqual(0, CpuSysCores._str2int("err")) + + def test_smt_enabled(self): + self.assertEqual(False, CpuSysCores.smt_enabled( + {'cpuinfo': [[0, 0, 0, 0, 0, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 1, 1, 1, 0]]})) + + def test_is_smt_enabled(self): + with mock.patch("yardstick.ssh.SSH") as ssh: + ssh_mock = mock.Mock(autospec=ssh.SSH) + cpu_topo = CpuSysCores(ssh_mock) + cpu_topo.cpuinfo = {'cpuinfo': [[0, 0, 0, 0, 0, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 1, 1, 1, 0]]} + self.assertEqual(False, cpu_topo.is_smt_enabled()) + + def test_cpu_list_per_node(self): + with mock.patch("yardstick.ssh.SSH") as ssh: + ssh_mock = mock.Mock(autospec=ssh.SSH) + cpu_topo = CpuSysCores(ssh_mock) + cpu_topo.cpuinfo = {'cpuinfo': [[0, 0, 0, 0, 0, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 1, 1, 1, 0]]} + self.assertEqual([0, 1], cpu_topo.cpu_list_per_node(0, False)) + + def test_cpu_list_per_node_error(self): + with mock.patch("yardstick.ssh.SSH") as ssh: + ssh_mock = mock.Mock(autospec=ssh.SSH) + cpu_topo = CpuSysCores(ssh_mock) + cpu_topo.cpuinfo = {'err': [[0, 0, 0, 0, 0, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 1, 1, 1, 0]]} + with self.assertRaises(RuntimeError) as raised: + cpu_topo.cpu_list_per_node(0, False) + self.assertIn('Node cpuinfo not available.', str(raised.exception)) + + def test_cpu_list_per_node_smt_error(self): + with mock.patch("yardstick.ssh.SSH") as ssh: + ssh_mock = mock.Mock(autospec=ssh.SSH) + cpu_topo = CpuSysCores(ssh_mock) + cpu_topo.cpuinfo = {'cpuinfo': [[0, 0, 0, 0, 0, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 1, 1, 1, 0]]} + with self.assertRaises(RuntimeError) as raised: + cpu_topo.cpu_list_per_node(0, True) + self.assertIn('SMT is not enabled.', str(raised.exception)) + + def test_cpu_slice_of_list_per_node(self): + with mock.patch("yardstick.ssh.SSH") as ssh: + ssh_mock = mock.Mock(autospec=ssh.SSH) + cpu_topo = CpuSysCores(ssh_mock) + cpu_topo.cpuinfo = {'cpuinfo': [[0, 0, 0, 0, 0, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 1, 1, 1, 0]]} + self.assertEqual([1], + cpu_topo.cpu_slice_of_list_per_node(0, 1, 0, + False)) + + def test_cpu_slice_of_list_per_node_error(self): + with mock.patch("yardstick.ssh.SSH") as ssh: + ssh_mock = mock.Mock(autospec=ssh.SSH) + cpu_topo = CpuSysCores(ssh_mock) + cpu_topo.cpuinfo = {'cpuinfo': [[0, 0, 0, 0, 0, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 1, 1, 1, 0]]} + with self.assertRaises(RuntimeError) as raised: + cpu_topo.cpu_slice_of_list_per_node(1, 1, 1, False) + self.assertIn('cpu_cnt + skip_cnt > length(cpu list).', + str(raised.exception)) + + def test_cpu_list_per_node_str(self): + with mock.patch("yardstick.ssh.SSH") as ssh: + ssh_mock = mock.Mock(autospec=ssh.SSH) + cpu_topo = CpuSysCores(ssh_mock) + cpu_topo.cpuinfo = {'cpuinfo': [[0, 0, 0, 0, 0, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 1, 1, 1, 0]]} + self.assertEqual("1", + cpu_topo.cpu_list_per_node_str(0, 1, 1, ',', + False)) diff --git a/yardstick/tests/unit/network_services/helpers/vpp_helpers/__init__.py b/yardstick/tests/unit/network_services/helpers/vpp_helpers/__init__.py new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/yardstick/tests/unit/network_services/helpers/vpp_helpers/__init__.py diff --git a/yardstick/tests/unit/network_services/helpers/vpp_helpers/test_multiple_loss_ratio_search.py b/yardstick/tests/unit/network_services/helpers/vpp_helpers/test_multiple_loss_ratio_search.py new file mode 100644 index 000000000..d3145546a --- /dev/null +++ b/yardstick/tests/unit/network_services/helpers/vpp_helpers/test_multiple_loss_ratio_search.py @@ -0,0 +1,2164 @@ +# Copyright (c) 2019 Viosoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import mock + +from yardstick.network_services.helpers.vpp_helpers.multiple_loss_ratio_search import \ + MultipleLossRatioSearch +from yardstick.network_services.helpers.vpp_helpers.ndr_pdr_result import \ + NdrPdrResult +from yardstick.network_services.helpers.vpp_helpers.receive_rate_interval import \ + ReceiveRateInterval +from yardstick.network_services.helpers.vpp_helpers.receive_rate_measurement import \ + ReceiveRateMeasurement +from yardstick.network_services.traffic_profile.rfc2544 import PortPgIDMap + + +class TestMultipleLossRatioSearch(unittest.TestCase): + + def test___init__(self): + algorithm = MultipleLossRatioSearch(measurer=mock.Mock(), latency=True, + pkt_size=64, + final_trial_duration=30, + final_relative_width=0.005, + number_of_intermediate_phases=2, + initial_trial_duration=1, + timeout=720) + self.assertEqual(True, algorithm.latency) + self.assertEqual(64, algorithm.pkt_size) + self.assertEqual(30, algorithm.final_trial_duration) + self.assertEqual(0.005, algorithm.final_relative_width) + self.assertEqual(2, algorithm.number_of_intermediate_phases) + self.assertEqual(1, algorithm.initial_trial_duration) + self.assertEqual(720, algorithm.timeout) + self.assertEqual(1, algorithm.doublings) + + def test_double_relative_width(self): + algorithm = MultipleLossRatioSearch(measurer=mock.Mock(), latency=True, + pkt_size=64, + final_trial_duration=30, + final_relative_width=0.005, + number_of_intermediate_phases=2, + initial_trial_duration=1, + timeout=720) + self.assertEqual(0.00997, algorithm.double_relative_width(0.005)) + + def test_double_step_down(self): + algorithm = MultipleLossRatioSearch(measurer=mock.Mock(), latency=True, + pkt_size=64, + final_trial_duration=30, + final_relative_width=0.005, + number_of_intermediate_phases=2, + initial_trial_duration=1, + timeout=720) + self.assertEqual(99003.0, algorithm.double_step_down(0.005, 100000)) + + def test_expand_down(self): + algorithm = MultipleLossRatioSearch(measurer=mock.Mock(), latency=True, + pkt_size=64, + final_trial_duration=30, + final_relative_width=0.005, + number_of_intermediate_phases=2, + initial_trial_duration=1, + timeout=720) + self.assertEqual(99003.0, algorithm.expand_down(0.005, 1, 100000)) + + def test_double_step_up(self): + algorithm = MultipleLossRatioSearch(measurer=mock.Mock(), latency=True, + pkt_size=64, + final_trial_duration=30, + final_relative_width=0.005, + number_of_intermediate_phases=2, + initial_trial_duration=1, + timeout=720) + self.assertEqual(101007.0401907013, + algorithm.double_step_up(0.005, 100000)) + + def test_expand_up(self): + algorithm = MultipleLossRatioSearch(measurer=mock.Mock(), latency=True, + pkt_size=64, + final_trial_duration=30, + final_relative_width=0.005, + number_of_intermediate_phases=2, + initial_trial_duration=1, + timeout=720) + self.assertEqual(101007.0401907013, + algorithm.expand_up(0.005, 1, 100000)) + + def test_half_relative_width(self): + algorithm = MultipleLossRatioSearch(measurer=mock.Mock(), latency=True, + pkt_size=64, + final_trial_duration=30, + final_relative_width=0.005, + number_of_intermediate_phases=2, + initial_trial_duration=1, + timeout=720) + self.assertEqual(0.0025031328369998773, + algorithm.half_relative_width(0.005)) + + def test_half_step_up(self): + algorithm = MultipleLossRatioSearch(measurer=mock.Mock(), latency=True, + pkt_size=64, + final_trial_duration=30, + final_relative_width=0.005, + number_of_intermediate_phases=2, + initial_trial_duration=1, + timeout=720) + self.assertEqual(100250.94142341711, + algorithm.half_step_up(0.005, 100000)) + + def test_init_generator(self): + algorithm = MultipleLossRatioSearch(measurer=mock.Mock(), latency=True, + pkt_size=64, + final_trial_duration=30, + final_relative_width=0.005, + number_of_intermediate_phases=2, + initial_trial_duration=1, + timeout=720) + ports = [0, 1] + port_pg_id = PortPgIDMap() + port_pg_id.add_port(0) + port_pg_id.add_port(1) + self.assertIsNone( + algorithm.init_generator(ports, port_pg_id, mock.Mock(), + mock.Mock(), mock.Mock())) + self.assertEqual(ports, algorithm.ports) + self.assertEqual(port_pg_id, algorithm.port_pg_id) + + def test_collect_kpi(self): + algorithm = MultipleLossRatioSearch(measurer=mock.Mock(), latency=True, + pkt_size=64, + final_trial_duration=30, + final_relative_width=0.005, + number_of_intermediate_phases=2, + initial_trial_duration=1, + timeout=720) + ports = [0, 1] + port_pg_id = PortPgIDMap() + port_pg_id.add_port(0) + port_pg_id.add_port(1) + algorithm.init_generator(ports, port_pg_id, mock.Mock, mock.Mock, + mock.Mock()) + self.assertIsNone(algorithm.collect_kpi({}, 100000)) + + def test_narrow_down_ndr_and_pdr(self): + algorithm = MultipleLossRatioSearch(measurer=mock.Mock(), latency=True, + pkt_size=64, + final_trial_duration=30, + final_relative_width=0.005, + number_of_intermediate_phases=2, + initial_trial_duration=1, + timeout=720) + ports = [0, 1] + port_pg_id = PortPgIDMap() + port_pg_id.add_port(0) + port_pg_id.add_port(1) + self.assertIsNone( + algorithm.init_generator(ports, port_pg_id, mock.Mock(), mock.Mock, + mock.Mock())) + with mock.patch.object(algorithm, 'measure') as \ + mock_measure, \ + mock.patch.object(algorithm, 'ndrpdr') as \ + mock_ndrpdr: + ndr_measured_low = ReceiveRateMeasurement(10, 13880000, 13879927, + 0) + ndr_measured_high = ReceiveRateMeasurement(10, 14880000, 14879927, + 0) + ndr_measured_low.latency = ['1000/3081/3962', '500/3149/3730'] + ndr_measured_high.latency = ['1000/3081/3962', '500/3149/3730'] + pdr_measured_low = ReceiveRateMeasurement(10, 11880000, 11879927, + 0) + pdr_measured_high = ReceiveRateMeasurement(10, 12880000, 12879927, + 0) + pdr_measured_low.latency = ['1000/3081/3962', '500/3149/3730'] + pdr_measured_high.latency = ['1000/3081/3962', '500/3149/3730'] + ndr_interval = ReceiveRateInterval(ndr_measured_low, + ndr_measured_high) + pdr_interval = ReceiveRateInterval(pdr_measured_low, + pdr_measured_high) + starting_result = NdrPdrResult(ndr_interval, pdr_interval) + mock_measure.return_value = ReceiveRateMeasurement(1, 14880000, + 14879927, 0) + mock_ndrpdr.return_value = MultipleLossRatioSearch.ProgressState( + starting_result, 2, 30, 0.005, 0.0, + 4857361, 4977343) + self.assertEqual( + {'Result_NDR_LOWER': {'bandwidth_total_Gbps': 0.9327310944, + 'rate_total_pps': 1387992.7}, + 'Result_NDR_UPPER': { + 'bandwidth_total_Gbps': 0.9999310943999999, + 'rate_total_pps': 1487992.7}, + 'Result_NDR_packets_lost': {'packet_loss_ratio': 0.0, + 'packets_lost': 0.0}, + 'Result_PDR_LOWER': { + 'bandwidth_total_Gbps': 0.7983310943999999, + 'rate_total_pps': 1187992.7}, + 'Result_PDR_UPPER': {'bandwidth_total_Gbps': 0.8655310944, + 'rate_total_pps': 1287992.7}, + 'Result_PDR_packets_lost': {'packet_loss_ratio': 0.0, + 'packets_lost': 0.0}, + 'Result_stream0_NDR_LOWER': {'avg_latency': 3081.0, + 'max_latency': 3962.0, + 'min_latency': 1000.0}, + 'Result_stream0_PDR_LOWER': {'avg_latency': 3081.0, + 'max_latency': 3962.0, + 'min_latency': 1000.0}, + 'Result_stream1_NDR_LOWER': {'avg_latency': 3149.0, + 'max_latency': 3730.0, + 'min_latency': 500.0}, + 'Result_stream1_PDR_LOWER': {'avg_latency': 3149.0, + 'max_latency': 3730.0, + 'min_latency': 500.0}}, + algorithm.narrow_down_ndr_and_pdr(12880000, 15880000, 0.0)) + + def test__measure_and_update_state(self): + algorithm = MultipleLossRatioSearch(measurer=mock.Mock(), latency=True, + pkt_size=64, + final_trial_duration=30, + final_relative_width=0.005, + number_of_intermediate_phases=2, + initial_trial_duration=1, + timeout=720) + ports = [0, 1] + port_pg_id = PortPgIDMap() + port_pg_id.add_port(0) + port_pg_id.add_port(1) + measured_low = ReceiveRateMeasurement(1, 4857361, 4857339, 84965) + measured_high = ReceiveRateMeasurement(1, 4977343, 4977320, 119959) + starting_interval = ReceiveRateInterval(measured_low, measured_high) + starting_result = NdrPdrResult(starting_interval, starting_interval) + previous_state = MultipleLossRatioSearch.ProgressState(starting_result, + 2, 30, 0.005, + 0.0, 4857361, + 4977343) + self.assertIsNone( + algorithm.init_generator(ports, port_pg_id, mock.Mock(), mock.Mock, + mock.Mock())) + with mock.patch.object(algorithm, 'measure') as \ + mock_measure: + mock_measure.return_value = ReceiveRateMeasurement(1, + 4626121.09635, + 4626100, 13074) + state = algorithm._measure_and_update_state(previous_state, + 4626121.09635) + self.assertIsInstance(state, MultipleLossRatioSearch.ProgressState) + self.assertEqual(1, state.result.ndr_interval.measured_low.duration) + self.assertEqual(4626121.09635, + state.result.ndr_interval.measured_low.target_tr) + self.assertEqual(4626100, + state.result.ndr_interval.measured_low.transmit_count) + self.assertEqual(13074, + state.result.ndr_interval.measured_low.loss_count) + self.assertEqual(4613026, + state.result.ndr_interval.measured_low.receive_count) + self.assertEqual(4626100, + state.result.ndr_interval.measured_low.transmit_rate) + self.assertEqual(13074.0, + state.result.ndr_interval.measured_low.loss_rate) + self.assertEqual(4613026.0, + state.result.ndr_interval.measured_low.receive_rate) + self.assertEqual(0.00283, + state.result.ndr_interval.measured_low.loss_fraction) + self.assertEqual(1, state.result.ndr_interval.measured_high.duration) + self.assertEqual(4857361, + state.result.ndr_interval.measured_high.target_tr) + self.assertEqual(4857339, + state.result.ndr_interval.measured_high.transmit_count) + self.assertEqual(84965, + state.result.ndr_interval.measured_high.loss_count) + self.assertEqual(4772374, + state.result.ndr_interval.measured_high.receive_count) + self.assertEqual(4857339, + state.result.ndr_interval.measured_high.transmit_rate) + self.assertEqual(84965.0, + state.result.ndr_interval.measured_high.loss_rate) + self.assertEqual(4772374.0, + state.result.ndr_interval.measured_high.receive_rate) + self.assertEqual(0.01749, + state.result.ndr_interval.measured_high.loss_fraction) + self.assertEqual(1, state.result.pdr_interval.measured_low.duration) + self.assertEqual(4626121.09635, + state.result.pdr_interval.measured_low.target_tr) + self.assertEqual(4626100, + state.result.pdr_interval.measured_low.transmit_count) + self.assertEqual(13074, + state.result.pdr_interval.measured_low.loss_count) + self.assertEqual(4613026, + state.result.pdr_interval.measured_low.receive_count) + self.assertEqual(4626100, + state.result.pdr_interval.measured_low.transmit_rate) + self.assertEqual(13074.0, + state.result.pdr_interval.measured_low.loss_rate) + self.assertEqual(4613026.0, + state.result.pdr_interval.measured_low.receive_rate) + self.assertEqual(0.00283, + state.result.pdr_interval.measured_low.loss_fraction) + self.assertEqual(1, state.result.pdr_interval.measured_high.duration) + self.assertEqual(4857361, + state.result.pdr_interval.measured_high.target_tr) + self.assertEqual(4857339, + state.result.pdr_interval.measured_high.transmit_count) + self.assertEqual(84965, + state.result.pdr_interval.measured_high.loss_count) + self.assertEqual(4772374, + state.result.pdr_interval.measured_high.receive_count) + self.assertEqual(4857339, + state.result.pdr_interval.measured_high.transmit_rate) + self.assertEqual(84965.0, + state.result.pdr_interval.measured_high.loss_rate) + self.assertEqual(4772374.0, + state.result.pdr_interval.measured_high.receive_rate) + self.assertEqual(0.01749, + state.result.pdr_interval.measured_high.loss_fraction) + self.assertEqual(2, state.phases) + self.assertEqual(30, state.duration) + self.assertEqual(0.005, state.width_goal) + self.assertEqual(0.0, state.packet_loss_ratio) + self.assertEqual(4857361, state.minimum_transmit_rate) + self.assertEqual(4977343, state.maximum_transmit_rate) + + def test_new_interval(self): + algorithm = MultipleLossRatioSearch(measurer=mock.Mock(), latency=True, + pkt_size=64, + final_trial_duration=30, + final_relative_width=0.005, + number_of_intermediate_phases=2, + initial_trial_duration=1, + timeout=720) + measured = ReceiveRateMeasurement(1, 3972540.4108, 21758482, 0) + measured_low = ReceiveRateMeasurement(1, 4857361, 4857339, 84965) + measured_high = ReceiveRateMeasurement(1, 4977343, 4977320, 119959) + receive_rate_interval = ReceiveRateInterval(measured_low, + measured_high) + result = algorithm._new_interval(receive_rate_interval, measured, 0.0) + self.assertIsInstance(result, ReceiveRateInterval) + self.assertEqual(1, result.measured_low.duration) + self.assertEqual(3972540.4108, result.measured_low.target_tr) + self.assertEqual(21758482, result.measured_low.transmit_count) + self.assertEqual(0, result.measured_low.loss_count) + self.assertEqual(21758482, result.measured_low.receive_count) + self.assertEqual(21758482, result.measured_low.transmit_rate) + self.assertEqual(0.0, result.measured_low.loss_rate) + self.assertEqual(21758482.0, result.measured_low.receive_rate) + self.assertEqual(0.0, result.measured_low.loss_fraction) + self.assertEqual(1, result.measured_high.duration) + self.assertEqual(4857361, result.measured_high.target_tr) + self.assertEqual(4857339, result.measured_high.transmit_count) + self.assertEqual(84965, result.measured_high.loss_count) + self.assertEqual(4772374, result.measured_high.receive_count) + self.assertEqual(4857339, result.measured_high.transmit_rate) + self.assertEqual(84965.0, result.measured_high.loss_rate) + self.assertEqual(4772374.0, result.measured_high.receive_rate) + self.assertEqual(0.01749, result.measured_high.loss_fraction) + + def test_new_interval_zero(self): + algorithm = MultipleLossRatioSearch(measurer=mock.Mock(), latency=True, + pkt_size=64, + final_trial_duration=30, + final_relative_width=0.005, + number_of_intermediate_phases=2, + initial_trial_duration=1, + timeout=720) + measured = ReceiveRateMeasurement(1, 4977343, 21758482, 0) + measured_low = ReceiveRateMeasurement(1, 4857361, 4857339, 84965) + measured_high = ReceiveRateMeasurement(1, 4977343, 4977320, 119959) + receive_rate_interval = ReceiveRateInterval(measured_low, + measured_high) + result = algorithm._new_interval(receive_rate_interval, measured, 0.0) + self.assertIsInstance(result, ReceiveRateInterval) + self.assertEqual(1, result.measured_low.duration) + self.assertEqual(4857361.0, result.measured_low.target_tr) + self.assertEqual(4857339, result.measured_low.transmit_count) + self.assertEqual(84965, result.measured_low.loss_count) + self.assertEqual(4772374, result.measured_low.receive_count) + self.assertEqual(4857339.0, result.measured_low.transmit_rate) + self.assertEqual(84965.0, result.measured_low.loss_rate) + self.assertEqual(4772374.0, result.measured_low.receive_rate) + self.assertEqual(0.01749, result.measured_low.loss_fraction) + self.assertEqual(1, result.measured_high.duration) + self.assertEqual(4977343.0, result.measured_high.target_tr) + self.assertEqual(21758482, result.measured_high.transmit_count) + self.assertEqual(0, result.measured_high.loss_count) + self.assertEqual(21758482, result.measured_high.receive_count) + self.assertEqual(21758482.0, result.measured_high.transmit_rate) + self.assertEqual(0.0, result.measured_high.loss_rate) + self.assertEqual(21758482.0, result.measured_high.receive_rate) + self.assertEqual(0.0, result.measured_high.loss_fraction) + + def test_new_interval_one(self): + algorithm = MultipleLossRatioSearch(measurer=mock.Mock(), latency=True, + pkt_size=64, + final_trial_duration=30, + final_relative_width=0.005, + number_of_intermediate_phases=2, + initial_trial_duration=1, + timeout=720) + measured = ReceiveRateMeasurement(1, 5000000, 2175848, 0) + measured_low = ReceiveRateMeasurement(1, 4857361, 4857339, 84965) + measured_high = ReceiveRateMeasurement(1, 4977343, 4977320, 119959) + receive_rate_interval = ReceiveRateInterval(measured_low, + measured_high) + result = algorithm._new_interval(receive_rate_interval, measured, 0.0) + self.assertIsInstance(result, ReceiveRateInterval) + self.assertEqual(1, result.measured_low.duration) + self.assertEqual(4857361.0, result.measured_low.target_tr) + self.assertEqual(4857339, result.measured_low.transmit_count) + self.assertEqual(84965, result.measured_low.loss_count) + self.assertEqual(4772374, result.measured_low.receive_count) + self.assertEqual(4857339.0, result.measured_low.transmit_rate) + self.assertEqual(84965.0, result.measured_low.loss_rate) + self.assertEqual(4772374.0, result.measured_low.receive_rate) + self.assertEqual(0.01749, result.measured_low.loss_fraction) + self.assertEqual(1, result.measured_high.duration) + self.assertEqual(4977343.0, result.measured_high.target_tr) + self.assertEqual(4977320, result.measured_high.transmit_count) + self.assertEqual(119959, result.measured_high.loss_count) + self.assertEqual(4857361, result.measured_high.receive_count) + self.assertEqual(4977320.0, result.measured_high.transmit_rate) + self.assertEqual(119959.0, result.measured_high.loss_rate) + self.assertEqual(4857361.0, result.measured_high.receive_rate) + self.assertEqual(0.0241, result.measured_high.loss_fraction) + + def test_new_interval_valid_1st(self): + algorithm = MultipleLossRatioSearch(measurer=mock.Mock(), latency=True, + pkt_size=64, + final_trial_duration=30, + final_relative_width=0.005, + number_of_intermediate_phases=2, + initial_trial_duration=1, + timeout=720) + measured = ReceiveRateMeasurement(1, 4000000, 2175848, 0) + measured_low = ReceiveRateMeasurement(1, 4857361, 4857339, 84965) + measured_high = ReceiveRateMeasurement(1, 4977343, 4977320, 119959) + receive_rate_interval = ReceiveRateInterval(measured_low, + measured_high) + result = algorithm._new_interval(receive_rate_interval, measured, 0.5) + self.assertIsInstance(result, ReceiveRateInterval) + self.assertEqual(1, result.measured_low.duration) + self.assertEqual(4857361.0, result.measured_low.target_tr) + self.assertEqual(4857339, result.measured_low.transmit_count) + self.assertEqual(84965, result.measured_low.loss_count) + self.assertEqual(4772374, result.measured_low.receive_count) + self.assertEqual(4857339.0, result.measured_low.transmit_rate) + self.assertEqual(84965.0, result.measured_low.loss_rate) + self.assertEqual(4772374.0, result.measured_low.receive_rate) + self.assertEqual(0.01749, result.measured_low.loss_fraction) + self.assertEqual(1, result.measured_high.duration) + self.assertEqual(4977343.0, result.measured_high.target_tr) + self.assertEqual(4977320, result.measured_high.transmit_count) + self.assertEqual(119959, result.measured_high.loss_count) + self.assertEqual(4857361, result.measured_high.receive_count) + self.assertEqual(4977320.0, result.measured_high.transmit_rate) + self.assertEqual(119959.0, result.measured_high.loss_rate) + self.assertEqual(4857361.0, result.measured_high.receive_rate) + self.assertEqual(0.0241, result.measured_high.loss_fraction) + + def test_new_interval_valid_1st_loss(self): + algorithm = MultipleLossRatioSearch(measurer=mock.Mock(), latency=True, + pkt_size=64, + final_trial_duration=30, + final_relative_width=0.005, + number_of_intermediate_phases=2, + initial_trial_duration=1, + timeout=720) + measured = ReceiveRateMeasurement(1, 4000000, 2175848, 1000000) + measured_low = ReceiveRateMeasurement(1, 4857361, 4857339, 84965) + measured_high = ReceiveRateMeasurement(1, 4977343, 4977320, 119959) + receive_rate_interval = ReceiveRateInterval(measured_low, + measured_high) + result = algorithm._new_interval(receive_rate_interval, measured, 0.02) + self.assertIsInstance(result, ReceiveRateInterval) + self.assertEqual(1, result.measured_low.duration) + self.assertEqual(4000000.0, result.measured_low.target_tr) + self.assertEqual(2175848, result.measured_low.transmit_count) + self.assertEqual(1000000, result.measured_low.loss_count) + self.assertEqual(1175848, result.measured_low.receive_count) + self.assertEqual(2175848.0, result.measured_low.transmit_rate) + self.assertEqual(1000000.0, result.measured_low.loss_rate) + self.assertEqual(1175848.0, result.measured_low.receive_rate) + self.assertEqual(0.45959, result.measured_low.loss_fraction) + self.assertEqual(1, result.measured_high.duration) + self.assertEqual(4977343.0, result.measured_high.target_tr) + self.assertEqual(4977320, result.measured_high.transmit_count) + self.assertEqual(119959, result.measured_high.loss_count) + self.assertEqual(4857361, result.measured_high.receive_count) + self.assertEqual(4977320.0, result.measured_high.transmit_rate) + self.assertEqual(119959.0, result.measured_high.loss_rate) + self.assertEqual(4857361.0, result.measured_high.receive_rate) + self.assertEqual(0.0241, result.measured_high.loss_fraction) + + def test_new_interval_valid_2nd(self): + algorithm = MultipleLossRatioSearch(measurer=mock.Mock(), latency=True, + pkt_size=64, + final_trial_duration=30, + final_relative_width=0.005, + number_of_intermediate_phases=2, + initial_trial_duration=1, + timeout=720) + measured = ReceiveRateMeasurement(1, 5000000, 2175848, 0) + measured_low = ReceiveRateMeasurement(1, 4857361, 4857339, 84965) + measured_high = ReceiveRateMeasurement(1, 4977343, 4977320, 119959) + receive_rate_interval = ReceiveRateInterval(measured_low, + measured_high) + result = algorithm._new_interval(receive_rate_interval, measured, 0.5) + self.assertIsInstance(result, ReceiveRateInterval) + self.assertEqual(1, result.measured_low.duration) + self.assertEqual(4977343.0, result.measured_low.target_tr) + self.assertEqual(4977320, result.measured_low.transmit_count) + self.assertEqual(119959, result.measured_low.loss_count) + self.assertEqual(4857361, result.measured_low.receive_count) + self.assertEqual(4977320.0, result.measured_low.transmit_rate) + self.assertEqual(119959.0, result.measured_low.loss_rate) + self.assertEqual(4857361.0, result.measured_low.receive_rate) + self.assertEqual(0.0241, result.measured_low.loss_fraction) + self.assertEqual(1, result.measured_high.duration) + self.assertEqual(5000000.0, result.measured_high.target_tr) + self.assertEqual(2175848, result.measured_high.transmit_count) + self.assertEqual(0, result.measured_high.loss_count) + self.assertEqual(2175848, result.measured_high.receive_count) + self.assertEqual(2175848.0, result.measured_high.transmit_rate) + self.assertEqual(0.0, result.measured_high.loss_rate) + self.assertEqual(2175848.0, result.measured_high.receive_rate) + self.assertEqual(0.0, result.measured_high.loss_fraction) + + def test_new_interval_valid_3rd(self): + algorithm = MultipleLossRatioSearch(measurer=mock.Mock(), latency=True, + pkt_size=64, + final_trial_duration=30, + final_relative_width=0.005, + number_of_intermediate_phases=2, + initial_trial_duration=1, + timeout=720) + measured = ReceiveRateMeasurement(1, 4867361, 2175848, 0) + measured_low = ReceiveRateMeasurement(1, 4857361, 4857339, 84965) + measured_high = ReceiveRateMeasurement(1, 4977343, 4977320, 119959) + receive_rate_interval = ReceiveRateInterval(measured_low, + measured_high) + result = algorithm._new_interval(receive_rate_interval, measured, 0.5) + self.assertIsInstance(result, ReceiveRateInterval) + self.assertEqual(1, result.measured_low.duration) + self.assertEqual(4867361.0, result.measured_low.target_tr) + self.assertEqual(2175848, result.measured_low.transmit_count) + self.assertEqual(0, result.measured_low.loss_count) + self.assertEqual(2175848, result.measured_low.receive_count) + self.assertEqual(2175848.0, result.measured_low.transmit_rate) + self.assertEqual(0.0, result.measured_low.loss_rate) + self.assertEqual(2175848.0, result.measured_low.receive_rate) + self.assertEqual(0.0, result.measured_low.loss_fraction) + self.assertEqual(1, result.measured_high.duration) + self.assertEqual(4977343.0, result.measured_high.target_tr) + self.assertEqual(4977320, result.measured_high.transmit_count) + self.assertEqual(119959, result.measured_high.loss_count) + self.assertEqual(4857361, result.measured_high.receive_count) + self.assertEqual(4977320.0, result.measured_high.transmit_rate) + self.assertEqual(119959.0, result.measured_high.loss_rate) + self.assertEqual(4857361.0, result.measured_high.receive_rate) + self.assertEqual(0.0241, result.measured_high.loss_fraction) + + def test_new_interval_valid_3rd_loss(self): + algorithm = MultipleLossRatioSearch(measurer=mock.Mock(), latency=True, + pkt_size=64, + final_trial_duration=30, + final_relative_width=0.005, + number_of_intermediate_phases=2, + initial_trial_duration=1, + timeout=720) + measured = ReceiveRateMeasurement(1, 4867361, 2175848, 1000000) + measured_low = ReceiveRateMeasurement(1, 4857361, 4857339, 84965) + measured_high = ReceiveRateMeasurement(1, 4977343, 4977320, 119959) + receive_rate_interval = ReceiveRateInterval(measured_low, + measured_high) + result = algorithm._new_interval(receive_rate_interval, measured, 0.2) + self.assertIsInstance(result, ReceiveRateInterval) + self.assertEqual(1, result.measured_low.duration) + self.assertEqual(4857361.0, result.measured_low.target_tr) + self.assertEqual(4857339, result.measured_low.transmit_count) + self.assertEqual(84965, result.measured_low.loss_count) + self.assertEqual(4772374, result.measured_low.receive_count) + self.assertEqual(4857339.0, result.measured_low.transmit_rate) + self.assertEqual(84965.0, result.measured_low.loss_rate) + self.assertEqual(4772374.0, result.measured_low.receive_rate) + self.assertEqual(0.01749, result.measured_low.loss_fraction) + self.assertEqual(1, result.measured_high.duration) + self.assertEqual(4867361.0, result.measured_high.target_tr) + self.assertEqual(2175848, result.measured_high.transmit_count) + self.assertEqual(1000000, result.measured_high.loss_count) + self.assertEqual(1175848, result.measured_high.receive_count) + self.assertEqual(2175848.0, result.measured_high.transmit_rate) + self.assertEqual(1000000.0, result.measured_high.loss_rate) + self.assertEqual(1175848.0, result.measured_high.receive_rate) + self.assertEqual(0.45959, result.measured_high.loss_fraction) + + def test_ndrpdr(self): + algorithm = MultipleLossRatioSearch(measurer=mock.Mock(), latency=True, + pkt_size=64, + final_trial_duration=30, + final_relative_width=0.005, + number_of_intermediate_phases=2, + initial_trial_duration=1, + timeout=720) + ports = [0, 1] + port_pg_id = PortPgIDMap() + port_pg_id.add_port(0) + port_pg_id.add_port(1) + self.assertIsNone( + algorithm.init_generator(ports, port_pg_id, mock.Mock(), mock.Mock, + mock.Mock())) + with mock.patch.object(algorithm, 'measure') as \ + mock_measure: + measured_low = ReceiveRateMeasurement(30, 14880000, 14879927, 0) + measured_high = ReceiveRateMeasurement(30, 14880000, 14879927, 0) + measured_low.latency = ['1000/3081/3962', '500/3149/3730'] + measured_high.latency = ['1000/3081/3962', '500/3149/3730'] + starting_interval = ReceiveRateInterval(measured_low, + measured_high) + starting_result = NdrPdrResult(starting_interval, + starting_interval) + mock_measure.return_value = ReceiveRateMeasurement(1, 14880000, + 14879927, 0) + previous_state = MultipleLossRatioSearch.ProgressState( + starting_result, -1, 30, 0.005, 0.0, 14880000, + 14880000) + state = algorithm.ndrpdr(previous_state) + self.assertIsInstance(state, MultipleLossRatioSearch.ProgressState) + self.assertIsInstance(state, MultipleLossRatioSearch.ProgressState) + self.assertEqual(30, state.result.ndr_interval.measured_low.duration) + self.assertEqual(14880000, + state.result.ndr_interval.measured_low.target_tr) + self.assertEqual(14879927, + state.result.ndr_interval.measured_low.transmit_count) + self.assertEqual(0, state.result.ndr_interval.measured_low.loss_count) + self.assertEqual(14879927, + state.result.ndr_interval.measured_low.receive_count) + self.assertEqual(495997.56667, + state.result.ndr_interval.measured_low.transmit_rate) + self.assertEqual(0.0, state.result.ndr_interval.measured_low.loss_rate) + self.assertEqual(495997.56667, + state.result.ndr_interval.measured_low.receive_rate) + self.assertEqual(0.0, + state.result.ndr_interval.measured_low.loss_fraction) + self.assertEqual(30, state.result.ndr_interval.measured_high.duration) + self.assertEqual(14880000, + state.result.ndr_interval.measured_high.target_tr) + self.assertEqual(14879927, + state.result.ndr_interval.measured_high.transmit_count) + self.assertEqual(0, state.result.ndr_interval.measured_high.loss_count) + self.assertEqual(14879927, + state.result.ndr_interval.measured_high.receive_count) + self.assertEqual(495997.56667, + state.result.ndr_interval.measured_high.transmit_rate) + self.assertEqual(0.0, + state.result.ndr_interval.measured_high.loss_rate) + self.assertEqual(495997.56667, + state.result.ndr_interval.measured_high.receive_rate) + self.assertEqual(0.0, + state.result.ndr_interval.measured_high.loss_fraction) + self.assertEqual(30, state.result.pdr_interval.measured_low.duration) + self.assertEqual(14880000, + state.result.pdr_interval.measured_low.target_tr) + self.assertEqual(14879927, + state.result.pdr_interval.measured_low.transmit_count) + self.assertEqual(0, state.result.pdr_interval.measured_low.loss_count) + self.assertEqual(14879927, + state.result.pdr_interval.measured_low.receive_count) + self.assertEqual(495997.56667, + state.result.pdr_interval.measured_low.transmit_rate) + self.assertEqual(0.0, state.result.pdr_interval.measured_low.loss_rate) + self.assertEqual(495997.56667, + state.result.pdr_interval.measured_low.receive_rate) + self.assertEqual(0.0, + state.result.pdr_interval.measured_low.loss_fraction) + self.assertEqual(30, state.result.pdr_interval.measured_high.duration) + self.assertEqual(14880000, + state.result.pdr_interval.measured_high.target_tr) + self.assertEqual(14879927, + state.result.pdr_interval.measured_high.transmit_count) + self.assertEqual(0, state.result.pdr_interval.measured_high.loss_count) + self.assertEqual(14879927, + state.result.pdr_interval.measured_high.receive_count) + self.assertEqual(495997.56667, + state.result.pdr_interval.measured_high.transmit_rate) + self.assertEqual(0.0, + state.result.pdr_interval.measured_high.loss_rate) + self.assertEqual(495997.56667, + state.result.pdr_interval.measured_high.receive_rate) + self.assertEqual(0.0, + state.result.pdr_interval.measured_high.loss_fraction) + self.assertEqual(-1, state.phases) + self.assertEqual(30, state.duration) + self.assertEqual(0.005, state.width_goal) + self.assertEqual(0.0, state.packet_loss_ratio) + self.assertEqual(14880000, state.minimum_transmit_rate) + self.assertEqual(14880000, state.maximum_transmit_rate) + + def test_ndrpdr_ndr_rel_width(self): + algorithm = MultipleLossRatioSearch(measurer=mock.Mock(), latency=True, + pkt_size=64, + final_trial_duration=30, + final_relative_width=0.005, + number_of_intermediate_phases=2, + initial_trial_duration=1, + timeout=720) + ports = [0, 1] + port_pg_id = PortPgIDMap() + port_pg_id.add_port(0) + port_pg_id.add_port(1) + self.assertIsNone( + algorithm.init_generator(ports, port_pg_id, mock.Mock(), mock.Mock, + mock.Mock())) + with mock.patch.object(algorithm, 'measure') as \ + mock_measure, \ + mock.patch.object(algorithm, '_measure_and_update_state') as \ + mock__measure_and_update_state: + measured_low = ReceiveRateMeasurement(30, 880000, 879927, 0) + measured_high = ReceiveRateMeasurement(30, 14880000, 14879927, 0) + measured_low.latency = ['1000/3081/3962', '500/3149/3730'] + measured_high.latency = ['1000/3081/3962', '500/3149/3730'] + starting_interval = ReceiveRateInterval(measured_low, + measured_high) + ending_interval = ReceiveRateInterval(measured_high, measured_high) + starting_result = NdrPdrResult(starting_interval, + starting_interval) + ending_result = NdrPdrResult(ending_interval, ending_interval) + mock_measure.return_value = ReceiveRateMeasurement(1, 14880000, + 14879927, 0) + mock__measure_and_update_state.return_value = \ + MultipleLossRatioSearch.ProgressState(ending_result, -1, 30, + 0.005, 0.0, 14880000, + 14880000) + previous_state = MultipleLossRatioSearch.ProgressState( + starting_result, -1, 30, 0.005, 0.0, 14880000, + 14880000) + state = algorithm.ndrpdr(previous_state) + self.assertIsInstance(state, MultipleLossRatioSearch.ProgressState) + self.assertIsInstance(state, MultipleLossRatioSearch.ProgressState) + self.assertEqual(30, state.result.ndr_interval.measured_low.duration) + self.assertEqual(14880000, + state.result.ndr_interval.measured_low.target_tr) + self.assertEqual(14879927, + state.result.ndr_interval.measured_low.transmit_count) + self.assertEqual(0, state.result.ndr_interval.measured_low.loss_count) + self.assertEqual(14879927, + state.result.ndr_interval.measured_low.receive_count) + self.assertEqual(495997.56667, + state.result.ndr_interval.measured_low.transmit_rate) + self.assertEqual(0.0, state.result.ndr_interval.measured_low.loss_rate) + self.assertEqual(495997.56667, + state.result.ndr_interval.measured_low.receive_rate) + self.assertEqual(0.0, + state.result.ndr_interval.measured_low.loss_fraction) + self.assertEqual(30, state.result.ndr_interval.measured_high.duration) + self.assertEqual(14880000, + state.result.ndr_interval.measured_high.target_tr) + self.assertEqual(14879927, + state.result.ndr_interval.measured_high.transmit_count) + self.assertEqual(0, state.result.ndr_interval.measured_high.loss_count) + self.assertEqual(14879927, + state.result.ndr_interval.measured_high.receive_count) + self.assertEqual(495997.56667, + state.result.ndr_interval.measured_high.transmit_rate) + self.assertEqual(0.0, + state.result.ndr_interval.measured_high.loss_rate) + self.assertEqual(495997.56667, + state.result.ndr_interval.measured_high.receive_rate) + self.assertEqual(0.0, + state.result.ndr_interval.measured_high.loss_fraction) + self.assertEqual(30, state.result.pdr_interval.measured_low.duration) + self.assertEqual(14880000, + state.result.pdr_interval.measured_low.target_tr) + self.assertEqual(14879927, + state.result.pdr_interval.measured_low.transmit_count) + self.assertEqual(0, state.result.pdr_interval.measured_low.loss_count) + self.assertEqual(14879927, + state.result.pdr_interval.measured_low.receive_count) + self.assertEqual(495997.56667, + state.result.pdr_interval.measured_low.transmit_rate) + self.assertEqual(0.0, state.result.pdr_interval.measured_low.loss_rate) + self.assertEqual(495997.56667, + state.result.pdr_interval.measured_low.receive_rate) + self.assertEqual(0.0, + state.result.pdr_interval.measured_low.loss_fraction) + self.assertEqual(30, state.result.pdr_interval.measured_high.duration) + self.assertEqual(14880000, + state.result.pdr_interval.measured_high.target_tr) + self.assertEqual(14879927, + state.result.pdr_interval.measured_high.transmit_count) + self.assertEqual(0, state.result.pdr_interval.measured_high.loss_count) + self.assertEqual(14879927, + state.result.pdr_interval.measured_high.receive_count) + self.assertEqual(495997.56667, + state.result.pdr_interval.measured_high.transmit_rate) + self.assertEqual(0.0, + state.result.pdr_interval.measured_high.loss_rate) + self.assertEqual(495997.56667, + state.result.pdr_interval.measured_high.receive_rate) + self.assertEqual(0.0, + state.result.pdr_interval.measured_high.loss_fraction) + self.assertEqual(-1, state.phases) + self.assertEqual(30, state.duration) + self.assertEqual(0.005, state.width_goal) + self.assertEqual(0.0, state.packet_loss_ratio) + self.assertEqual(14880000, state.minimum_transmit_rate) + self.assertEqual(14880000, state.maximum_transmit_rate) + + def test_ndrpdr_pdr_rel_width(self): + algorithm = MultipleLossRatioSearch(measurer=mock.Mock(), latency=True, + pkt_size=64, + final_trial_duration=30, + final_relative_width=0.005, + number_of_intermediate_phases=2, + initial_trial_duration=1, + timeout=720) + ports = [0, 1] + port_pg_id = PortPgIDMap() + port_pg_id.add_port(0) + port_pg_id.add_port(1) + self.assertIsNone( + algorithm.init_generator(ports, port_pg_id, mock.Mock(), mock.Mock, + mock.Mock())) + with mock.patch.object(algorithm, 'measure') as \ + mock_measure, \ + mock.patch.object(algorithm, '_measure_and_update_state') as \ + mock__measure_and_update_state: + ndr_measured_low = ReceiveRateMeasurement(30, 14880000, 14879927, + 0) + ndr_measured_high = ReceiveRateMeasurement(30, 14880000, 14879927, + 0) + ndr_measured_low.latency = ['1000/3081/3962', '500/3149/3730'] + ndr_measured_high.latency = ['1000/3081/3962', '500/3149/3730'] + pdr_measured_low = ReceiveRateMeasurement(30, 880000, 879927, 0) + pdr_measured_high = ReceiveRateMeasurement(30, 14880000, 14879927, + 0) + pdr_measured_low.latency = ['1000/3081/3962', '500/3149/3730'] + pdr_measured_high.latency = ['1000/3081/3962', '500/3149/3730'] + ndr_interval = ReceiveRateInterval(ndr_measured_low, + ndr_measured_high) + pdr_interval = ReceiveRateInterval(pdr_measured_low, + pdr_measured_high) + starting_result = NdrPdrResult(ndr_interval, pdr_interval) + ending_result = NdrPdrResult(ndr_interval, ndr_interval) + mock_measure.return_value = ReceiveRateMeasurement(1, 14880000, + 14879927, 0) + mock__measure_and_update_state.return_value = \ + MultipleLossRatioSearch.ProgressState(ending_result, -1, 30, + 0.005, 0.0, 14880000, + 14880000) + previous_state = MultipleLossRatioSearch.ProgressState( + starting_result, -1, 30, 0.005, 0.0, 14880000, + 14880000) + state = algorithm.ndrpdr(previous_state) + self.assertIsInstance(state, MultipleLossRatioSearch.ProgressState) + self.assertIsInstance(state, MultipleLossRatioSearch.ProgressState) + self.assertEqual(30, state.result.ndr_interval.measured_low.duration) + self.assertEqual(14880000, + state.result.ndr_interval.measured_low.target_tr) + self.assertEqual(14879927, + state.result.ndr_interval.measured_low.transmit_count) + self.assertEqual(0, state.result.ndr_interval.measured_low.loss_count) + self.assertEqual(14879927, + state.result.ndr_interval.measured_low.receive_count) + self.assertEqual(495997.56667, + state.result.ndr_interval.measured_low.transmit_rate) + self.assertEqual(0.0, state.result.ndr_interval.measured_low.loss_rate) + self.assertEqual(495997.56667, + state.result.ndr_interval.measured_low.receive_rate) + self.assertEqual(0.0, + state.result.ndr_interval.measured_low.loss_fraction) + self.assertEqual(30, state.result.ndr_interval.measured_high.duration) + self.assertEqual(14880000, + state.result.ndr_interval.measured_high.target_tr) + self.assertEqual(14879927, + state.result.ndr_interval.measured_high.transmit_count) + self.assertEqual(0, state.result.ndr_interval.measured_high.loss_count) + self.assertEqual(14879927, + state.result.ndr_interval.measured_high.receive_count) + self.assertEqual(495997.56667, + state.result.ndr_interval.measured_high.transmit_rate) + self.assertEqual(0.0, + state.result.ndr_interval.measured_high.loss_rate) + self.assertEqual(495997.56667, + state.result.ndr_interval.measured_high.receive_rate) + self.assertEqual(0.0, + state.result.ndr_interval.measured_high.loss_fraction) + self.assertEqual(30, state.result.pdr_interval.measured_low.duration) + self.assertEqual(14880000, + state.result.pdr_interval.measured_low.target_tr) + self.assertEqual(14879927, + state.result.pdr_interval.measured_low.transmit_count) + self.assertEqual(0, state.result.pdr_interval.measured_low.loss_count) + self.assertEqual(14879927, + state.result.pdr_interval.measured_low.receive_count) + self.assertEqual(495997.56667, + state.result.pdr_interval.measured_low.transmit_rate) + self.assertEqual(0.0, state.result.pdr_interval.measured_low.loss_rate) + self.assertEqual(495997.56667, + state.result.pdr_interval.measured_low.receive_rate) + self.assertEqual(0.0, + state.result.pdr_interval.measured_low.loss_fraction) + self.assertEqual(30, state.result.pdr_interval.measured_high.duration) + self.assertEqual(14880000, + state.result.pdr_interval.measured_high.target_tr) + self.assertEqual(14879927, + state.result.pdr_interval.measured_high.transmit_count) + self.assertEqual(0, state.result.pdr_interval.measured_high.loss_count) + self.assertEqual(14879927, + state.result.pdr_interval.measured_high.receive_count) + self.assertEqual(495997.56667, + state.result.pdr_interval.measured_high.transmit_rate) + self.assertEqual(0.0, + state.result.pdr_interval.measured_high.loss_rate) + self.assertEqual(495997.56667, + state.result.pdr_interval.measured_high.receive_rate) + self.assertEqual(0.0, + state.result.pdr_interval.measured_high.loss_fraction) + self.assertEqual(-1, state.phases) + self.assertEqual(30, state.duration) + self.assertEqual(0.005, state.width_goal) + self.assertEqual(0.0, state.packet_loss_ratio) + self.assertEqual(14880000, state.minimum_transmit_rate) + self.assertEqual(14880000, state.maximum_transmit_rate) + + def test_ndrpdr_ndr_lo_duration(self): + algorithm = MultipleLossRatioSearch(measurer=mock.Mock(), latency=True, + pkt_size=64, + final_trial_duration=30, + final_relative_width=0.005, + number_of_intermediate_phases=2, + initial_trial_duration=1, + timeout=720) + ports = [0, 1] + port_pg_id = PortPgIDMap() + port_pg_id.add_port(0) + port_pg_id.add_port(1) + self.assertIsNone( + algorithm.init_generator(ports, port_pg_id, mock.Mock(), mock.Mock, + mock.Mock())) + with mock.patch.object(algorithm, 'measure') as \ + mock_measure, \ + mock.patch.object(algorithm, '_measure_and_update_state') as \ + mock__measure_and_update_state: + measured_low = ReceiveRateMeasurement(30, 14880000, 14879927, 0) + measured_high = ReceiveRateMeasurement(30, 14880000, 14879927, 100) + measured_low.latency = ['1000/3081/3962', '500/3149/3730'] + measured_high.latency = ['1000/3081/3962', '500/3149/3730'] + starting_interval = ReceiveRateInterval(measured_low, + measured_high) + starting_result = NdrPdrResult(starting_interval, + starting_interval) + mock_measure.return_value = ReceiveRateMeasurement(1, 14880000, + 14879927, 0) + mock__measure_and_update_state.return_value = \ + MultipleLossRatioSearch.ProgressState(starting_result, -1, 30, + 0.005, 0.0, 14880000, + 14880000) + previous_state = MultipleLossRatioSearch.ProgressState( + starting_result, -1, 50, 0.005, 0.0, 14880000, + 14880000) + state = algorithm.ndrpdr(previous_state) + self.assertIsInstance(state, MultipleLossRatioSearch.ProgressState) + self.assertIsInstance(state, MultipleLossRatioSearch.ProgressState) + self.assertEqual(30, state.result.ndr_interval.measured_low.duration) + self.assertEqual(14880000, + state.result.ndr_interval.measured_low.target_tr) + self.assertEqual(14879927, + state.result.ndr_interval.measured_low.transmit_count) + self.assertEqual(0, state.result.ndr_interval.measured_low.loss_count) + self.assertEqual(14879927, + state.result.ndr_interval.measured_low.receive_count) + self.assertEqual(495997.56667, + state.result.ndr_interval.measured_low.transmit_rate) + self.assertEqual(0.0, state.result.ndr_interval.measured_low.loss_rate) + self.assertEqual(495997.56667, + state.result.ndr_interval.measured_low.receive_rate) + self.assertEqual(0.0, + state.result.ndr_interval.measured_low.loss_fraction) + self.assertEqual(30, state.result.ndr_interval.measured_high.duration) + self.assertEqual(14880000, + state.result.ndr_interval.measured_high.target_tr) + self.assertEqual(14879927, + state.result.ndr_interval.measured_high.transmit_count) + self.assertEqual(100, + state.result.ndr_interval.measured_high.loss_count) + self.assertEqual(14879827, + state.result.ndr_interval.measured_high.receive_count) + self.assertEqual(495997.56667, + state.result.ndr_interval.measured_high.transmit_rate) + self.assertEqual(3.33333, + state.result.ndr_interval.measured_high.loss_rate) + self.assertEqual(495994.23333, + state.result.ndr_interval.measured_high.receive_rate) + self.assertEqual(1e-05, + state.result.ndr_interval.measured_high.loss_fraction) + self.assertEqual(30, state.result.pdr_interval.measured_low.duration) + self.assertEqual(14880000, + state.result.pdr_interval.measured_low.target_tr) + self.assertEqual(14879927, + state.result.pdr_interval.measured_low.transmit_count) + self.assertEqual(0, state.result.pdr_interval.measured_low.loss_count) + self.assertEqual(14879927, + state.result.pdr_interval.measured_low.receive_count) + self.assertEqual(495997.56667, + state.result.pdr_interval.measured_low.transmit_rate) + self.assertEqual(0.0, state.result.pdr_interval.measured_low.loss_rate) + self.assertEqual(495997.56667, + state.result.pdr_interval.measured_low.receive_rate) + self.assertEqual(0.0, + state.result.pdr_interval.measured_low.loss_fraction) + self.assertEqual(30, state.result.pdr_interval.measured_high.duration) + self.assertEqual(14880000, + state.result.pdr_interval.measured_high.target_tr) + self.assertEqual(14879927, + state.result.pdr_interval.measured_high.transmit_count) + self.assertEqual(100, + state.result.pdr_interval.measured_high.loss_count) + self.assertEqual(14879827, + state.result.pdr_interval.measured_high.receive_count) + self.assertEqual(495997.56667, + state.result.pdr_interval.measured_high.transmit_rate) + self.assertEqual(3.33333, + state.result.pdr_interval.measured_high.loss_rate) + self.assertEqual(495994.23333, + state.result.pdr_interval.measured_high.receive_rate) + self.assertEqual(1e-05, + state.result.pdr_interval.measured_high.loss_fraction) + self.assertEqual(-1, state.phases) + self.assertEqual(30, state.duration) + self.assertEqual(0.005, state.width_goal) + self.assertEqual(0.0, state.packet_loss_ratio) + self.assertEqual(14880000, state.minimum_transmit_rate) + self.assertEqual(14880000, state.maximum_transmit_rate) + + def test_ndrpdr_ndr_hi_duration(self): + algorithm = MultipleLossRatioSearch(measurer=mock.Mock(), latency=True, + pkt_size=64, + final_trial_duration=30, + final_relative_width=0.005, + number_of_intermediate_phases=2, + initial_trial_duration=1, + timeout=720) + ports = [0, 1] + port_pg_id = PortPgIDMap() + port_pg_id.add_port(0) + port_pg_id.add_port(1) + self.assertIsNone( + algorithm.init_generator(ports, port_pg_id, mock.Mock(), mock.Mock, + mock.Mock())) + with mock.patch.object(algorithm, 'measure') as \ + mock_measure, \ + mock.patch.object(algorithm, '_measure_and_update_state') as \ + mock__measure_and_update_state: + measured_low = ReceiveRateMeasurement(60, 14880000, 14879927, 0) + measured_high = ReceiveRateMeasurement(30, 14880000, 14879927, 100) + measured_low.latency = ['1000/3081/3962', '500/3149/3730'] + measured_high.latency = ['1000/3081/3962', '500/3149/3730'] + starting_interval = ReceiveRateInterval(measured_low, + measured_high) + starting_result = NdrPdrResult(starting_interval, + starting_interval) + mock_measure.return_value = ReceiveRateMeasurement(1, 14880000, + 14879927, 0) + mock__measure_and_update_state.return_value = \ + MultipleLossRatioSearch.ProgressState(starting_result, -1, 30, + 0.005, 0.0, 14880000, + 14880000) + previous_state = MultipleLossRatioSearch.ProgressState( + starting_result, -1, 50, 0.005, 0.0, 14880000, + 14880000) + state = algorithm.ndrpdr(previous_state) + self.assertIsInstance(state, MultipleLossRatioSearch.ProgressState) + self.assertIsInstance(state, MultipleLossRatioSearch.ProgressState) + self.assertEqual(60.0, state.result.ndr_interval.measured_low.duration) + self.assertEqual(14880000, + state.result.ndr_interval.measured_low.target_tr) + self.assertEqual(14879927, + state.result.ndr_interval.measured_low.transmit_count) + self.assertEqual(0, state.result.ndr_interval.measured_low.loss_count) + self.assertEqual(14879927, + state.result.ndr_interval.measured_low.receive_count) + self.assertEqual(247998.78333, + state.result.ndr_interval.measured_low.transmit_rate) + self.assertEqual(0.0, state.result.ndr_interval.measured_low.loss_rate) + self.assertEqual(247998.78333, + state.result.ndr_interval.measured_low.receive_rate) + self.assertEqual(0.0, + state.result.ndr_interval.measured_low.loss_fraction) + self.assertEqual(30, state.result.ndr_interval.measured_high.duration) + self.assertEqual(14880000, + state.result.ndr_interval.measured_high.target_tr) + self.assertEqual(14879927, + state.result.ndr_interval.measured_high.transmit_count) + self.assertEqual(100, + state.result.ndr_interval.measured_high.loss_count) + self.assertEqual(14879827, + state.result.ndr_interval.measured_high.receive_count) + self.assertEqual(495997.56667, + state.result.ndr_interval.measured_high.transmit_rate) + self.assertEqual(3.33333, + state.result.ndr_interval.measured_high.loss_rate) + self.assertEqual(495994.23333, + state.result.ndr_interval.measured_high.receive_rate) + self.assertEqual(1e-05, + state.result.ndr_interval.measured_high.loss_fraction) + self.assertEqual(60.0, state.result.pdr_interval.measured_low.duration) + self.assertEqual(14880000, + state.result.pdr_interval.measured_low.target_tr) + self.assertEqual(14879927, + state.result.pdr_interval.measured_low.transmit_count) + self.assertEqual(0, state.result.pdr_interval.measured_low.loss_count) + self.assertEqual(14879927, + state.result.pdr_interval.measured_low.receive_count) + self.assertEqual(247998.78333, + state.result.pdr_interval.measured_low.transmit_rate) + self.assertEqual(0.0, state.result.pdr_interval.measured_low.loss_rate) + self.assertEqual(247998.78333, + state.result.pdr_interval.measured_low.receive_rate) + self.assertEqual(0.0, + state.result.pdr_interval.measured_low.loss_fraction) + self.assertEqual(30, state.result.pdr_interval.measured_high.duration) + self.assertEqual(14880000, + state.result.pdr_interval.measured_high.target_tr) + self.assertEqual(14879927, + state.result.pdr_interval.measured_high.transmit_count) + self.assertEqual(100, + state.result.pdr_interval.measured_high.loss_count) + self.assertEqual(14879827, + state.result.pdr_interval.measured_high.receive_count) + self.assertEqual(495997.56667, + state.result.pdr_interval.measured_high.transmit_rate) + self.assertEqual(3.33333, + state.result.pdr_interval.measured_high.loss_rate) + self.assertEqual(495994.23333, + state.result.pdr_interval.measured_high.receive_rate) + self.assertEqual(1e-05, + state.result.pdr_interval.measured_high.loss_fraction) + self.assertEqual(-1, state.phases) + self.assertEqual(30, state.duration) + self.assertEqual(0.005, state.width_goal) + self.assertEqual(0.0, state.packet_loss_ratio) + self.assertEqual(14880000, state.minimum_transmit_rate) + self.assertEqual(14880000, state.maximum_transmit_rate) + + def test_ndrpdr_error(self): + algorithm = MultipleLossRatioSearch(measurer=mock.Mock(), latency=True, + pkt_size=64, + final_trial_duration=30, + final_relative_width=0.005, + number_of_intermediate_phases=2, + initial_trial_duration=1, + timeout=0) + ports = [0, 1] + port_pg_id = PortPgIDMap() + port_pg_id.add_port(0) + port_pg_id.add_port(1) + self.assertIsNone( + algorithm.init_generator(ports, port_pg_id, mock.Mock(), mock.Mock, + mock.Mock())) + with mock.patch.object(algorithm, 'measure') as \ + mock_measure: + measured_low = ReceiveRateMeasurement(30, 14880000, 14879927, 0) + measured_high = ReceiveRateMeasurement(30, 14880000, 14879927, 0) + measured_low.latency = ['1000/3081/3962', '500/3149/3730'] + measured_high.latency = ['1000/3081/3962', '500/3149/3730'] + starting_interval = ReceiveRateInterval(measured_low, + measured_high) + starting_result = NdrPdrResult(starting_interval, + starting_interval) + mock_measure.return_value = ReceiveRateMeasurement(1, 14880000, + 14879927, 0) + previous_state = MultipleLossRatioSearch.ProgressState( + starting_result, -1, 30, 0.005, 0.0, 14880000, + 14880000) + with self.assertRaises(RuntimeError) as raised: + algorithm.ndrpdr(previous_state) + + self.assertIn('Optimized search takes too long.', + str(raised.exception)) + + def test_ndrpdr_update_state_ndr_hi(self): + algorithm = MultipleLossRatioSearch(measurer=mock.Mock(), latency=True, + pkt_size=64, + final_trial_duration=30, + final_relative_width=0.005, + number_of_intermediate_phases=2, + initial_trial_duration=1, + timeout=720) + ports = [0, 1] + port_pg_id = PortPgIDMap() + port_pg_id.add_port(0) + port_pg_id.add_port(1) + self.assertIsNone( + algorithm.init_generator(ports, port_pg_id, mock.Mock(), mock.Mock, + mock.Mock())) + with mock.patch.object(algorithm, 'measure') as \ + mock_measure, \ + mock.patch.object(algorithm, '_measure_and_update_state') as \ + mock__measure_and_update_state: + ndr_measured_low = ReceiveRateMeasurement(30, 10880000, 10879927, + 0) + ndr_measured_high = ReceiveRateMeasurement(30, 12880000, 12879927, + 0) + ndr_measured_low.latency = ['1000/3081/3962', '500/3149/3730'] + ndr_measured_high.latency = ['1000/3081/3962', '500/3149/3730'] + pdr_measured_low = ReceiveRateMeasurement(30, 12880000, 12879927, + 0) + pdr_measured_high = ReceiveRateMeasurement(30, 14880000, 14879927, + 0) + pdr_measured_low.latency = ['1000/3081/3962', '500/3149/3730'] + pdr_measured_high.latency = ['1000/3081/3962', '500/3149/3730'] + ndr_interval = ReceiveRateInterval(ndr_measured_low, + ndr_measured_high) + pdr_interval = ReceiveRateInterval(pdr_measured_low, + pdr_measured_high) + starting_result = NdrPdrResult(ndr_interval, pdr_interval) + ending_result = NdrPdrResult(pdr_interval, pdr_interval) + mock_measure.return_value = ReceiveRateMeasurement(1, 14880000, + 14879927, 0) + mock__measure_and_update_state.return_value = \ + MultipleLossRatioSearch.ProgressState(ending_result, -1, 30, + 0.2, 0.0, 14880000, + 14880000) + previous_state = MultipleLossRatioSearch.ProgressState( + starting_result, -1, 30, 0.005, 0.0, 14880000, + 14880000) + state = algorithm.ndrpdr(previous_state) + self.assertIsInstance(state, MultipleLossRatioSearch.ProgressState) + self.assertIsInstance(state, MultipleLossRatioSearch.ProgressState) + self.assertEqual(30, state.result.ndr_interval.measured_low.duration) + self.assertEqual(12880000.0, + state.result.ndr_interval.measured_low.target_tr) + self.assertEqual(12879927, + state.result.ndr_interval.measured_low.transmit_count) + self.assertEqual(0, state.result.ndr_interval.measured_low.loss_count) + self.assertEqual(12879927, + state.result.ndr_interval.measured_low.receive_count) + self.assertEqual(429330.9, + state.result.ndr_interval.measured_low.transmit_rate) + self.assertEqual(0.0, state.result.ndr_interval.measured_low.loss_rate) + self.assertEqual(429330.9, + state.result.ndr_interval.measured_low.receive_rate) + self.assertEqual(0.0, + state.result.ndr_interval.measured_low.loss_fraction) + self.assertEqual(30, state.result.ndr_interval.measured_high.duration) + self.assertEqual(14880000.0, + state.result.ndr_interval.measured_high.target_tr) + self.assertEqual(14879927, + state.result.ndr_interval.measured_high.transmit_count) + self.assertEqual(0, state.result.ndr_interval.measured_high.loss_count) + self.assertEqual(14879927, + state.result.ndr_interval.measured_high.receive_count) + self.assertEqual(495997.56667, + state.result.ndr_interval.measured_high.transmit_rate) + self.assertEqual(0.0, + state.result.ndr_interval.measured_high.loss_rate) + self.assertEqual(495997.56667, + state.result.ndr_interval.measured_high.receive_rate) + self.assertEqual(0.0, + state.result.ndr_interval.measured_high.loss_fraction) + self.assertEqual(30, state.result.pdr_interval.measured_low.duration) + self.assertEqual(12880000.0, + state.result.pdr_interval.measured_low.target_tr) + self.assertEqual(12879927, + state.result.pdr_interval.measured_low.transmit_count) + self.assertEqual(0, state.result.pdr_interval.measured_low.loss_count) + self.assertEqual(12879927, + state.result.pdr_interval.measured_low.receive_count) + self.assertEqual(429330.9, + state.result.pdr_interval.measured_low.transmit_rate) + self.assertEqual(0.0, state.result.pdr_interval.measured_low.loss_rate) + self.assertEqual(429330.9, + state.result.pdr_interval.measured_low.receive_rate) + self.assertEqual(0.0, + state.result.pdr_interval.measured_low.loss_fraction) + self.assertEqual(30, state.result.pdr_interval.measured_high.duration) + self.assertEqual(14880000, + state.result.pdr_interval.measured_high.target_tr) + self.assertEqual(14879927, + state.result.pdr_interval.measured_high.transmit_count) + self.assertEqual(0, state.result.pdr_interval.measured_high.loss_count) + self.assertEqual(14879927, + state.result.pdr_interval.measured_high.receive_count) + self.assertEqual(495997.56667, + state.result.pdr_interval.measured_high.transmit_rate) + self.assertEqual(0.0, + state.result.pdr_interval.measured_high.loss_rate) + self.assertEqual(495997.56667, + state.result.pdr_interval.measured_high.receive_rate) + self.assertEqual(0.0, + state.result.pdr_interval.measured_high.loss_fraction) + self.assertEqual(-1, state.phases) + self.assertEqual(30, state.duration) + self.assertEqual(0.2, state.width_goal) + self.assertEqual(0.0, state.packet_loss_ratio) + self.assertEqual(14880000, state.minimum_transmit_rate) + self.assertEqual(14880000, state.maximum_transmit_rate) + + def test_ndrpdr_update_state_ndr_hi_duration(self): + algorithm = MultipleLossRatioSearch(measurer=mock.Mock(), latency=True, + pkt_size=64, + final_trial_duration=30, + final_relative_width=0.005, + number_of_intermediate_phases=2, + initial_trial_duration=1, + timeout=720) + ports = [0, 1] + port_pg_id = PortPgIDMap() + port_pg_id.add_port(0) + port_pg_id.add_port(1) + self.assertIsNone( + algorithm.init_generator(ports, port_pg_id, mock.Mock(), mock.Mock, + mock.Mock())) + with mock.patch.object(algorithm, 'measure') as \ + mock_measure, \ + mock.patch.object(algorithm, '_measure_and_update_state') as \ + mock__measure_and_update_state: + ndr_measured_low = ReceiveRateMeasurement(30, 10880000, 10879927, + 0) + ndr_measured_high = ReceiveRateMeasurement(30, 12880000, 12879927, + 0) + ndr_measured_low.latency = ['1000/3081/3962', '500/3149/3730'] + ndr_measured_high.latency = ['1000/3081/3962', '500/3149/3730'] + pdr_measured_low = ReceiveRateMeasurement(30, 12880000, 12879927, + 0) + pdr_measured_high = ReceiveRateMeasurement(30, 14880000, 14879927, + 0) + pdr_measured_low.latency = ['1000/3081/3962', '500/3149/3730'] + pdr_measured_high.latency = ['1000/3081/3962', '500/3149/3730'] + ndr_interval = ReceiveRateInterval(ndr_measured_low, + ndr_measured_high) + pdr_interval = ReceiveRateInterval(pdr_measured_low, + pdr_measured_high) + starting_result = NdrPdrResult(ndr_interval, pdr_interval) + ending_result = NdrPdrResult(pdr_interval, pdr_interval) + mock_measure.return_value = ReceiveRateMeasurement(1, 14880000, + 14879927, 0) + mock__measure_and_update_state.return_value = \ + MultipleLossRatioSearch.ProgressState(ending_result, -1, 30, + 0.2, 0.0, 14880000, + 14880000) + previous_state = MultipleLossRatioSearch.ProgressState( + starting_result, -1, 50, 0.005, 0.0, 4880000, + 10880000) + state = algorithm.ndrpdr(previous_state) + self.assertIsInstance(state, MultipleLossRatioSearch.ProgressState) + self.assertIsInstance(state, MultipleLossRatioSearch.ProgressState) + self.assertEqual(30, state.result.ndr_interval.measured_low.duration) + self.assertEqual(12880000.0, + state.result.ndr_interval.measured_low.target_tr) + self.assertEqual(12879927, + state.result.ndr_interval.measured_low.transmit_count) + self.assertEqual(0, state.result.ndr_interval.measured_low.loss_count) + self.assertEqual(12879927, + state.result.ndr_interval.measured_low.receive_count) + self.assertEqual(429330.9, + state.result.ndr_interval.measured_low.transmit_rate) + self.assertEqual(0.0, state.result.ndr_interval.measured_low.loss_rate) + self.assertEqual(429330.9, + state.result.ndr_interval.measured_low.receive_rate) + self.assertEqual(0.0, + state.result.ndr_interval.measured_low.loss_fraction) + self.assertEqual(30, state.result.ndr_interval.measured_high.duration) + self.assertEqual(14880000.0, + state.result.ndr_interval.measured_high.target_tr) + self.assertEqual(14879927, + state.result.ndr_interval.measured_high.transmit_count) + self.assertEqual(0, state.result.ndr_interval.measured_high.loss_count) + self.assertEqual(14879927, + state.result.ndr_interval.measured_high.receive_count) + self.assertEqual(495997.56667, + state.result.ndr_interval.measured_high.transmit_rate) + self.assertEqual(0.0, + state.result.ndr_interval.measured_high.loss_rate) + self.assertEqual(495997.56667, + state.result.ndr_interval.measured_high.receive_rate) + self.assertEqual(0.0, + state.result.ndr_interval.measured_high.loss_fraction) + self.assertEqual(30, state.result.pdr_interval.measured_low.duration) + self.assertEqual(12880000.0, + state.result.pdr_interval.measured_low.target_tr) + self.assertEqual(12879927, + state.result.pdr_interval.measured_low.transmit_count) + self.assertEqual(0, state.result.pdr_interval.measured_low.loss_count) + self.assertEqual(12879927, + state.result.pdr_interval.measured_low.receive_count) + self.assertEqual(429330.9, + state.result.pdr_interval.measured_low.transmit_rate) + self.assertEqual(0.0, state.result.pdr_interval.measured_low.loss_rate) + self.assertEqual(429330.9, + state.result.pdr_interval.measured_low.receive_rate) + self.assertEqual(0.0, + state.result.pdr_interval.measured_low.loss_fraction) + self.assertEqual(30, state.result.pdr_interval.measured_high.duration) + self.assertEqual(14880000, + state.result.pdr_interval.measured_high.target_tr) + self.assertEqual(14879927, + state.result.pdr_interval.measured_high.transmit_count) + self.assertEqual(0, state.result.pdr_interval.measured_high.loss_count) + self.assertEqual(14879927, + state.result.pdr_interval.measured_high.receive_count) + self.assertEqual(495997.56667, + state.result.pdr_interval.measured_high.transmit_rate) + self.assertEqual(0.0, + state.result.pdr_interval.measured_high.loss_rate) + self.assertEqual(495997.56667, + state.result.pdr_interval.measured_high.receive_rate) + self.assertEqual(0.0, + state.result.pdr_interval.measured_high.loss_fraction) + self.assertEqual(-1, state.phases) + self.assertEqual(30, state.duration) + self.assertEqual(0.2, state.width_goal) + self.assertEqual(0.0, state.packet_loss_ratio) + self.assertEqual(14880000, state.minimum_transmit_rate) + self.assertEqual(14880000, state.maximum_transmit_rate) + + def test_ndrpdr_update_state_ndr_lo(self): + algorithm = MultipleLossRatioSearch(measurer=mock.Mock(), latency=True, + pkt_size=64, + final_trial_duration=30, + final_relative_width=0.005, + number_of_intermediate_phases=2, + initial_trial_duration=1, + timeout=720) + ports = [0, 1] + port_pg_id = PortPgIDMap() + port_pg_id.add_port(0) + port_pg_id.add_port(1) + self.assertIsNone( + algorithm.init_generator(ports, port_pg_id, mock.Mock(), mock.Mock, + mock.Mock())) + with mock.patch.object(algorithm, 'measure') as \ + mock_measure, \ + mock.patch.object(algorithm, '_measure_and_update_state') as \ + mock__measure_and_update_state: + ndr_measured_low = ReceiveRateMeasurement(30, 10880000, 10879927, + 100000) + ndr_measured_high = ReceiveRateMeasurement(30, 12880000, 12879927, + 100000) + ndr_measured_low.latency = ['1000/3081/3962', '500/3149/3730'] + ndr_measured_high.latency = ['1000/3081/3962', '500/3149/3730'] + pdr_measured_low = ReceiveRateMeasurement(30, 12880000, 12879927, + 100000) + pdr_measured_high = ReceiveRateMeasurement(30, 14880000, 14879927, + 100000) + pdr_measured_low.latency = ['1000/3081/3962', '500/3149/3730'] + pdr_measured_high.latency = ['1000/3081/3962', '500/3149/3730'] + ndr_interval = ReceiveRateInterval(ndr_measured_low, + ndr_measured_high) + pdr_interval = ReceiveRateInterval(pdr_measured_low, + pdr_measured_high) + starting_result = NdrPdrResult(ndr_interval, pdr_interval) + ending_result = NdrPdrResult(pdr_interval, pdr_interval) + mock_measure.return_value = ReceiveRateMeasurement(1, 14880000, + 14879927, 0) + mock__measure_and_update_state.return_value = \ + MultipleLossRatioSearch.ProgressState(ending_result, -1, 30, + 0.2, 0.0, 14880000, + 14880000) + previous_state = MultipleLossRatioSearch.ProgressState( + starting_result, -1, 30, 0.005, 0.0, 100000, + 14880000) + state = algorithm.ndrpdr(previous_state) + self.assertIsInstance(state, MultipleLossRatioSearch.ProgressState) + self.assertIsInstance(state, MultipleLossRatioSearch.ProgressState) + self.assertEqual(30, state.result.ndr_interval.measured_low.duration) + self.assertEqual(12880000.0, + state.result.ndr_interval.measured_low.target_tr) + self.assertEqual(12879927, + state.result.ndr_interval.measured_low.transmit_count) + self.assertEqual(100000, + state.result.ndr_interval.measured_low.loss_count) + self.assertEqual(12779927, + state.result.ndr_interval.measured_low.receive_count) + self.assertEqual(429330.9, + state.result.ndr_interval.measured_low.transmit_rate) + self.assertEqual(3333.33333, + state.result.ndr_interval.measured_low.loss_rate) + self.assertEqual(425997.56667, + state.result.ndr_interval.measured_low.receive_rate) + self.assertEqual(0.00776, + state.result.ndr_interval.measured_low.loss_fraction) + self.assertEqual(30, state.result.ndr_interval.measured_high.duration) + self.assertEqual(14880000.0, + state.result.ndr_interval.measured_high.target_tr) + self.assertEqual(14879927, + state.result.ndr_interval.measured_high.transmit_count) + self.assertEqual(100000, + state.result.ndr_interval.measured_high.loss_count) + self.assertEqual(14779927, + state.result.ndr_interval.measured_high.receive_count) + self.assertEqual(495997.56667, + state.result.ndr_interval.measured_high.transmit_rate) + self.assertEqual(3333.33333, + state.result.ndr_interval.measured_high.loss_rate) + self.assertEqual(492664.23333, + state.result.ndr_interval.measured_high.receive_rate) + self.assertEqual(0.00672, + state.result.ndr_interval.measured_high.loss_fraction) + self.assertEqual(30, state.result.pdr_interval.measured_low.duration) + self.assertEqual(12880000.0, + state.result.pdr_interval.measured_low.target_tr) + self.assertEqual(12879927, + state.result.pdr_interval.measured_low.transmit_count) + self.assertEqual(100000, + state.result.pdr_interval.measured_low.loss_count) + self.assertEqual(12779927, + state.result.pdr_interval.measured_low.receive_count) + self.assertEqual(429330.9, + state.result.pdr_interval.measured_low.transmit_rate) + self.assertEqual(3333.33333, + state.result.pdr_interval.measured_low.loss_rate) + self.assertEqual(425997.56667, + state.result.pdr_interval.measured_low.receive_rate) + self.assertEqual(0.00776, + state.result.pdr_interval.measured_low.loss_fraction) + self.assertEqual(30, state.result.pdr_interval.measured_high.duration) + self.assertEqual(14880000, + state.result.pdr_interval.measured_high.target_tr) + self.assertEqual(14879927, + state.result.pdr_interval.measured_high.transmit_count) + self.assertEqual(100000, + state.result.pdr_interval.measured_high.loss_count) + self.assertEqual(14779927, + state.result.pdr_interval.measured_high.receive_count) + self.assertEqual(495997.56667, + state.result.pdr_interval.measured_high.transmit_rate) + self.assertEqual(3333.33333, + state.result.pdr_interval.measured_high.loss_rate) + self.assertEqual(492664.23333, + state.result.pdr_interval.measured_high.receive_rate) + self.assertEqual(0.00672, + state.result.pdr_interval.measured_high.loss_fraction) + self.assertEqual(-1, state.phases) + self.assertEqual(30, state.duration) + self.assertEqual(0.2, state.width_goal) + self.assertEqual(0.0, state.packet_loss_ratio) + self.assertEqual(14880000, state.minimum_transmit_rate) + self.assertEqual(14880000, state.maximum_transmit_rate) + + def test_ndrpdr_update_state_pdr_lo(self): + algorithm = MultipleLossRatioSearch(measurer=mock.Mock(), latency=True, + pkt_size=64, + final_trial_duration=30, + final_relative_width=0.005, + number_of_intermediate_phases=2, + initial_trial_duration=1, + timeout=720) + ports = [0, 1] + port_pg_id = PortPgIDMap() + port_pg_id.add_port(0) + port_pg_id.add_port(1) + self.assertIsNone( + algorithm.init_generator(ports, port_pg_id, mock.Mock(), mock.Mock, + mock.Mock())) + with mock.patch.object(algorithm, 'measure') as \ + mock_measure, \ + mock.patch.object(algorithm, '_measure_and_update_state') as \ + mock__measure_and_update_state: + ndr_measured_low = ReceiveRateMeasurement(30, 10880000, 10879927, + 0) + ndr_measured_high = ReceiveRateMeasurement(30, 12880000, 12879927, + 0) + ndr_measured_low.latency = ['1000/3081/3962', '500/3149/3730'] + ndr_measured_high.latency = ['1000/3081/3962', '500/3149/3730'] + pdr_measured_low = ReceiveRateMeasurement(30, 12880000, 12879927, + 100000) + pdr_measured_high = ReceiveRateMeasurement(30, 14880000, 14879927, + 100000) + pdr_measured_low.latency = ['1000/3081/3962', '500/3149/3730'] + pdr_measured_high.latency = ['1000/3081/3962', '500/3149/3730'] + ndr_interval = ReceiveRateInterval(ndr_measured_low, + ndr_measured_high) + pdr_interval = ReceiveRateInterval(pdr_measured_low, + pdr_measured_high) + starting_result = NdrPdrResult(ndr_interval, pdr_interval) + ending_result = NdrPdrResult(pdr_interval, pdr_interval) + mock_measure.return_value = ReceiveRateMeasurement(1, 14880000, + 14879927, 0) + mock__measure_and_update_state.return_value = \ + MultipleLossRatioSearch.ProgressState(ending_result, -1, 30, + 0.2, 0.0, 14880000, + 14880000) + previous_state = MultipleLossRatioSearch.ProgressState( + starting_result, -1, 30, 0.005, 0.0, 100000, + 14880000) + state = algorithm.ndrpdr(previous_state) + self.assertIsInstance(state, MultipleLossRatioSearch.ProgressState) + self.assertIsInstance(state, MultipleLossRatioSearch.ProgressState) + self.assertEqual(30, state.result.ndr_interval.measured_low.duration) + self.assertEqual(12880000.0, + state.result.ndr_interval.measured_low.target_tr) + self.assertEqual(12879927, + state.result.ndr_interval.measured_low.transmit_count) + self.assertEqual(100000, + state.result.ndr_interval.measured_low.loss_count) + self.assertEqual(12779927, + state.result.ndr_interval.measured_low.receive_count) + self.assertEqual(429330.9, + state.result.ndr_interval.measured_low.transmit_rate) + self.assertEqual(3333.33333, + state.result.ndr_interval.measured_low.loss_rate) + self.assertEqual(425997.56667, + state.result.ndr_interval.measured_low.receive_rate) + self.assertEqual(0.00776, + state.result.ndr_interval.measured_low.loss_fraction) + self.assertEqual(30, state.result.ndr_interval.measured_high.duration) + self.assertEqual(14880000.0, + state.result.ndr_interval.measured_high.target_tr) + self.assertEqual(14879927, + state.result.ndr_interval.measured_high.transmit_count) + self.assertEqual(100000, + state.result.ndr_interval.measured_high.loss_count) + self.assertEqual(14779927, + state.result.ndr_interval.measured_high.receive_count) + self.assertEqual(495997.56667, + state.result.ndr_interval.measured_high.transmit_rate) + self.assertEqual(3333.33333, + state.result.ndr_interval.measured_high.loss_rate) + self.assertEqual(492664.23333, + state.result.ndr_interval.measured_high.receive_rate) + self.assertEqual(0.00672, + state.result.ndr_interval.measured_high.loss_fraction) + self.assertEqual(30, state.result.pdr_interval.measured_low.duration) + self.assertEqual(12880000.0, + state.result.pdr_interval.measured_low.target_tr) + self.assertEqual(12879927, + state.result.pdr_interval.measured_low.transmit_count) + self.assertEqual(100000, + state.result.pdr_interval.measured_low.loss_count) + self.assertEqual(12779927, + state.result.pdr_interval.measured_low.receive_count) + self.assertEqual(429330.9, + state.result.pdr_interval.measured_low.transmit_rate) + self.assertEqual(3333.33333, + state.result.pdr_interval.measured_low.loss_rate) + self.assertEqual(425997.56667, + state.result.pdr_interval.measured_low.receive_rate) + self.assertEqual(0.00776, + state.result.pdr_interval.measured_low.loss_fraction) + self.assertEqual(30, state.result.pdr_interval.measured_high.duration) + self.assertEqual(14880000, + state.result.pdr_interval.measured_high.target_tr) + self.assertEqual(14879927, + state.result.pdr_interval.measured_high.transmit_count) + self.assertEqual(100000, + state.result.pdr_interval.measured_high.loss_count) + self.assertEqual(14779927, + state.result.pdr_interval.measured_high.receive_count) + self.assertEqual(495997.56667, + state.result.pdr_interval.measured_high.transmit_rate) + self.assertEqual(3333.33333, + state.result.pdr_interval.measured_high.loss_rate) + self.assertEqual(492664.23333, + state.result.pdr_interval.measured_high.receive_rate) + self.assertEqual(0.00672, + state.result.pdr_interval.measured_high.loss_fraction) + self.assertEqual(-1, state.phases) + self.assertEqual(30, state.duration) + self.assertEqual(0.2, state.width_goal) + self.assertEqual(0.0, state.packet_loss_ratio) + self.assertEqual(14880000, state.minimum_transmit_rate) + self.assertEqual(14880000, state.maximum_transmit_rate) + + def test_ndrpdr_update_state_pdr_lo_duration(self): + algorithm = MultipleLossRatioSearch(measurer=mock.Mock(), latency=True, + pkt_size=64, + final_trial_duration=30, + final_relative_width=0.005, + number_of_intermediate_phases=2, + initial_trial_duration=1, + timeout=720) + ports = [0, 1] + port_pg_id = PortPgIDMap() + port_pg_id.add_port(0) + port_pg_id.add_port(1) + self.assertIsNone( + algorithm.init_generator(ports, port_pg_id, mock.Mock(), mock.Mock, + mock.Mock())) + with mock.patch.object(algorithm, 'measure') as \ + mock_measure, \ + mock.patch.object(algorithm, '_measure_and_update_state') as \ + mock__measure_and_update_state: + ndr_measured_low = ReceiveRateMeasurement(30, 10880000, 10879927, + 0) + ndr_measured_high = ReceiveRateMeasurement(30, 12880000, 12879927, + 0) + ndr_measured_low.latency = ['1000/3081/3962', '500/3149/3730'] + ndr_measured_high.latency = ['1000/3081/3962', '500/3149/3730'] + pdr_measured_low = ReceiveRateMeasurement(30, 12880000, 12879927, + 100000) + pdr_measured_high = ReceiveRateMeasurement(30, 14880000, 14879927, + 100000) + pdr_measured_low.latency = ['1000/3081/3962', '500/3149/3730'] + pdr_measured_high.latency = ['1000/3081/3962', '500/3149/3730'] + ndr_interval = ReceiveRateInterval(ndr_measured_low, + ndr_measured_high) + pdr_interval = ReceiveRateInterval(pdr_measured_low, + pdr_measured_high) + starting_result = NdrPdrResult(ndr_interval, pdr_interval) + ending_result = NdrPdrResult(pdr_interval, pdr_interval) + mock_measure.return_value = ReceiveRateMeasurement(1, 14880000, + 14879927, 0) + mock__measure_and_update_state.return_value = \ + MultipleLossRatioSearch.ProgressState(ending_result, -1, 30, + 0.2, 0.0, 14880000, + 14880000) + previous_state = MultipleLossRatioSearch.ProgressState( + starting_result, -1, 50, 0.005, 0.0, 14880000, + 14880000) + state = algorithm.ndrpdr(previous_state) + self.assertIsInstance(state, MultipleLossRatioSearch.ProgressState) + self.assertIsInstance(state, MultipleLossRatioSearch.ProgressState) + self.assertEqual(30, state.result.ndr_interval.measured_low.duration) + self.assertEqual(12880000.0, + state.result.ndr_interval.measured_low.target_tr) + self.assertEqual(12879927, + state.result.ndr_interval.measured_low.transmit_count) + self.assertEqual(100000, + state.result.ndr_interval.measured_low.loss_count) + self.assertEqual(12779927, + state.result.ndr_interval.measured_low.receive_count) + self.assertEqual(429330.9, + state.result.ndr_interval.measured_low.transmit_rate) + self.assertEqual(3333.33333, + state.result.ndr_interval.measured_low.loss_rate) + self.assertEqual(425997.56667, + state.result.ndr_interval.measured_low.receive_rate) + self.assertEqual(0.00776, + state.result.ndr_interval.measured_low.loss_fraction) + self.assertEqual(30, state.result.ndr_interval.measured_high.duration) + self.assertEqual(14880000.0, + state.result.ndr_interval.measured_high.target_tr) + self.assertEqual(14879927, + state.result.ndr_interval.measured_high.transmit_count) + self.assertEqual(100000, + state.result.ndr_interval.measured_high.loss_count) + self.assertEqual(14779927, + state.result.ndr_interval.measured_high.receive_count) + self.assertEqual(495997.56667, + state.result.ndr_interval.measured_high.transmit_rate) + self.assertEqual(3333.33333, + state.result.ndr_interval.measured_high.loss_rate) + self.assertEqual(492664.23333, + state.result.ndr_interval.measured_high.receive_rate) + self.assertEqual(0.00672, + state.result.ndr_interval.measured_high.loss_fraction) + self.assertEqual(30, state.result.pdr_interval.measured_low.duration) + self.assertEqual(12880000.0, + state.result.pdr_interval.measured_low.target_tr) + self.assertEqual(12879927, + state.result.pdr_interval.measured_low.transmit_count) + self.assertEqual(100000, + state.result.pdr_interval.measured_low.loss_count) + self.assertEqual(12779927, + state.result.pdr_interval.measured_low.receive_count) + self.assertEqual(429330.9, + state.result.pdr_interval.measured_low.transmit_rate) + self.assertEqual(3333.33333, + state.result.pdr_interval.measured_low.loss_rate) + self.assertEqual(425997.56667, + state.result.pdr_interval.measured_low.receive_rate) + self.assertEqual(0.00776, + state.result.pdr_interval.measured_low.loss_fraction) + self.assertEqual(30, state.result.pdr_interval.measured_high.duration) + self.assertEqual(14880000, + state.result.pdr_interval.measured_high.target_tr) + self.assertEqual(14879927, + state.result.pdr_interval.measured_high.transmit_count) + self.assertEqual(100000, + state.result.pdr_interval.measured_high.loss_count) + self.assertEqual(14779927, + state.result.pdr_interval.measured_high.receive_count) + self.assertEqual(495997.56667, + state.result.pdr_interval.measured_high.transmit_rate) + self.assertEqual(3333.33333, + state.result.pdr_interval.measured_high.loss_rate) + self.assertEqual(492664.23333, + state.result.pdr_interval.measured_high.receive_rate) + self.assertEqual(0.00672, + state.result.pdr_interval.measured_high.loss_fraction) + self.assertEqual(-1, state.phases) + self.assertEqual(30, state.duration) + self.assertEqual(0.2, state.width_goal) + self.assertEqual(0.0, state.packet_loss_ratio) + self.assertEqual(14880000, state.minimum_transmit_rate) + self.assertEqual(14880000, state.maximum_transmit_rate) + + def test_ndrpdr_update_state_pdr_hi(self): + algorithm = MultipleLossRatioSearch(measurer=mock.Mock(), latency=True, + pkt_size=64, + final_trial_duration=30, + final_relative_width=0.005, + number_of_intermediate_phases=2, + initial_trial_duration=1, + timeout=720) + ports = [0, 1] + port_pg_id = PortPgIDMap() + port_pg_id.add_port(0) + port_pg_id.add_port(1) + self.assertIsNone( + algorithm.init_generator(ports, port_pg_id, mock.Mock(), mock.Mock, + mock.Mock())) + with mock.patch.object(algorithm, 'measure') as \ + mock_measure, \ + mock.patch.object(algorithm, '_measure_and_update_state') as \ + mock__measure_and_update_state: + ndr_measured_low = ReceiveRateMeasurement(30, 10880000, 10879927, + 0) + ndr_measured_high = ReceiveRateMeasurement(30, 12880000, 12879927, + 100000) + ndr_measured_low.latency = ['1000/3081/3962', '500/3149/3730'] + ndr_measured_high.latency = ['1000/3081/3962', '500/3149/3730'] + pdr_measured_low = ReceiveRateMeasurement(30, 12880000, 12879927, + 0) + pdr_measured_high = ReceiveRateMeasurement(30, 13880000, 14879927, + 0) + pdr_measured_low.latency = ['1000/3081/3962', '500/3149/3730'] + pdr_measured_high.latency = ['1000/3081/3962', '500/3149/3730'] + ndr_interval = ReceiveRateInterval(ndr_measured_low, + ndr_measured_high) + pdr_interval = ReceiveRateInterval(pdr_measured_low, + pdr_measured_high) + starting_result = NdrPdrResult(ndr_interval, pdr_interval) + ending_result = NdrPdrResult(ndr_interval, ndr_interval) + mock_measure.return_value = ReceiveRateMeasurement(1, 14880000, + 14879927, 0) + mock__measure_and_update_state.return_value = \ + MultipleLossRatioSearch.ProgressState(ending_result, -1, 30, + 0.2, 0.0, 14880000, + 14880000) + previous_state = MultipleLossRatioSearch.ProgressState( + starting_result, -1, 30, 0.005, 0.0, 100000, + 14880000) + state = algorithm.ndrpdr(previous_state) + self.assertIsInstance(state, MultipleLossRatioSearch.ProgressState) + self.assertIsInstance(state, MultipleLossRatioSearch.ProgressState) + self.assertEqual(30, state.result.ndr_interval.measured_low.duration) + self.assertEqual(10880000.0, + state.result.ndr_interval.measured_low.target_tr) + self.assertEqual(10879927, + state.result.ndr_interval.measured_low.transmit_count) + self.assertEqual(0, + state.result.ndr_interval.measured_low.loss_count) + self.assertEqual(10879927, + state.result.ndr_interval.measured_low.receive_count) + self.assertEqual(362664.23333, + state.result.ndr_interval.measured_low.transmit_rate) + self.assertEqual(0.0, + state.result.ndr_interval.measured_low.loss_rate) + self.assertEqual(362664.23333, + state.result.ndr_interval.measured_low.receive_rate) + self.assertEqual(0.0, + state.result.ndr_interval.measured_low.loss_fraction) + self.assertEqual(30, state.result.ndr_interval.measured_high.duration) + self.assertEqual(12880000.0, + state.result.ndr_interval.measured_high.target_tr) + self.assertEqual(12879927, + state.result.ndr_interval.measured_high.transmit_count) + self.assertEqual(100000, + state.result.ndr_interval.measured_high.loss_count) + self.assertEqual(12779927, + state.result.ndr_interval.measured_high.receive_count) + self.assertEqual(429330.9, + state.result.ndr_interval.measured_high.transmit_rate) + self.assertEqual(3333.33333, + state.result.ndr_interval.measured_high.loss_rate) + self.assertEqual(425997.56667, + state.result.ndr_interval.measured_high.receive_rate) + self.assertEqual(0.00776, + state.result.ndr_interval.measured_high.loss_fraction) + self.assertEqual(30, state.result.pdr_interval.measured_low.duration) + self.assertEqual(10880000.0, + state.result.pdr_interval.measured_low.target_tr) + self.assertEqual(10879927, + state.result.pdr_interval.measured_low.transmit_count) + self.assertEqual(0, + state.result.pdr_interval.measured_low.loss_count) + self.assertEqual(10879927, + state.result.pdr_interval.measured_low.receive_count) + self.assertEqual(362664.23333, + state.result.pdr_interval.measured_low.transmit_rate) + self.assertEqual(0.0, + state.result.pdr_interval.measured_low.loss_rate) + self.assertEqual(362664.23333, + state.result.pdr_interval.measured_low.receive_rate) + self.assertEqual(0.0, + state.result.pdr_interval.measured_low.loss_fraction) + self.assertEqual(30, state.result.pdr_interval.measured_high.duration) + self.assertEqual(12880000, + state.result.pdr_interval.measured_high.target_tr) + self.assertEqual(12879927, + state.result.pdr_interval.measured_high.transmit_count) + self.assertEqual(100000, + state.result.pdr_interval.measured_high.loss_count) + self.assertEqual(12779927, + state.result.pdr_interval.measured_high.receive_count) + self.assertEqual(429330.9, + state.result.pdr_interval.measured_high.transmit_rate) + self.assertEqual(3333.33333, + state.result.pdr_interval.measured_high.loss_rate) + self.assertEqual(425997.56667, + state.result.pdr_interval.measured_high.receive_rate) + self.assertEqual(0.00776, + state.result.pdr_interval.measured_high.loss_fraction) + self.assertEqual(-1, state.phases) + self.assertEqual(30, state.duration) + self.assertEqual(0.2, state.width_goal) + self.assertEqual(0.0, state.packet_loss_ratio) + self.assertEqual(14880000, state.minimum_transmit_rate) + self.assertEqual(14880000, state.maximum_transmit_rate) + + def test_ndrpdr_update_state_pdr_hi_duration(self): + algorithm = MultipleLossRatioSearch(measurer=mock.Mock(), latency=True, + pkt_size=64, + final_trial_duration=30, + final_relative_width=0.005, + number_of_intermediate_phases=2, + initial_trial_duration=1, + timeout=720) + ports = [0, 1] + port_pg_id = PortPgIDMap() + port_pg_id.add_port(0) + port_pg_id.add_port(1) + self.assertIsNone( + algorithm.init_generator(ports, port_pg_id, mock.Mock(), mock.Mock, + mock.Mock())) + with mock.patch.object(algorithm, 'measure') as \ + mock_measure, \ + mock.patch.object(algorithm, '_measure_and_update_state') as \ + mock__measure_and_update_state: + ndr_measured_low = ReceiveRateMeasurement(30, 10880000, 10879927, + 0) + ndr_measured_high = ReceiveRateMeasurement(30, 12880000, 12879927, + 100000) + ndr_measured_low.latency = ['1000/3081/3962', '500/3149/3730'] + ndr_measured_high.latency = ['1000/3081/3962', '500/3149/3730'] + pdr_measured_low = ReceiveRateMeasurement(30, 12880000, 12879927, + 0) + pdr_measured_high = ReceiveRateMeasurement(30, 13880000, 14879927, + 0) + pdr_measured_low.latency = ['1000/3081/3962', '500/3149/3730'] + pdr_measured_high.latency = ['1000/3081/3962', '500/3149/3730'] + ndr_interval = ReceiveRateInterval(ndr_measured_low, + ndr_measured_high) + pdr_interval = ReceiveRateInterval(pdr_measured_low, + pdr_measured_high) + starting_result = NdrPdrResult(ndr_interval, pdr_interval) + ending_result = NdrPdrResult(ndr_interval, ndr_interval) + mock_measure.return_value = ReceiveRateMeasurement(1, 14880000, + 14879927, 0) + mock__measure_and_update_state.return_value = \ + MultipleLossRatioSearch.ProgressState(ending_result, -1, 30, + 0.2, 0.0, 14880000, + 14880000) + previous_state = MultipleLossRatioSearch.ProgressState( + starting_result, -1, 50, 0.005, 0.0, 100000, + 10880000) + state = algorithm.ndrpdr(previous_state) + self.assertIsInstance(state, MultipleLossRatioSearch.ProgressState) + self.assertIsInstance(state, MultipleLossRatioSearch.ProgressState) + self.assertEqual(30, state.result.ndr_interval.measured_low.duration) + self.assertEqual(10880000.0, + state.result.ndr_interval.measured_low.target_tr) + self.assertEqual(10879927, + state.result.ndr_interval.measured_low.transmit_count) + self.assertEqual(0, + state.result.ndr_interval.measured_low.loss_count) + self.assertEqual(10879927, + state.result.ndr_interval.measured_low.receive_count) + self.assertEqual(362664.23333, + state.result.ndr_interval.measured_low.transmit_rate) + self.assertEqual(0.0, + state.result.ndr_interval.measured_low.loss_rate) + self.assertEqual(362664.23333, + state.result.ndr_interval.measured_low.receive_rate) + self.assertEqual(0.0, + state.result.ndr_interval.measured_low.loss_fraction) + self.assertEqual(30, state.result.ndr_interval.measured_high.duration) + self.assertEqual(12880000.0, + state.result.ndr_interval.measured_high.target_tr) + self.assertEqual(12879927, + state.result.ndr_interval.measured_high.transmit_count) + self.assertEqual(100000, + state.result.ndr_interval.measured_high.loss_count) + self.assertEqual(12779927, + state.result.ndr_interval.measured_high.receive_count) + self.assertEqual(429330.9, + state.result.ndr_interval.measured_high.transmit_rate) + self.assertEqual(3333.33333, + state.result.ndr_interval.measured_high.loss_rate) + self.assertEqual(425997.56667, + state.result.ndr_interval.measured_high.receive_rate) + self.assertEqual(0.00776, + state.result.ndr_interval.measured_high.loss_fraction) + self.assertEqual(30, state.result.pdr_interval.measured_low.duration) + self.assertEqual(10880000.0, + state.result.pdr_interval.measured_low.target_tr) + self.assertEqual(10879927, + state.result.pdr_interval.measured_low.transmit_count) + self.assertEqual(0, + state.result.pdr_interval.measured_low.loss_count) + self.assertEqual(10879927, + state.result.pdr_interval.measured_low.receive_count) + self.assertEqual(362664.23333, + state.result.pdr_interval.measured_low.transmit_rate) + self.assertEqual(0.0, + state.result.pdr_interval.measured_low.loss_rate) + self.assertEqual(362664.23333, + state.result.pdr_interval.measured_low.receive_rate) + self.assertEqual(0.0, + state.result.pdr_interval.measured_low.loss_fraction) + self.assertEqual(30, state.result.pdr_interval.measured_high.duration) + self.assertEqual(12880000, + state.result.pdr_interval.measured_high.target_tr) + self.assertEqual(12879927, + state.result.pdr_interval.measured_high.transmit_count) + self.assertEqual(100000, + state.result.pdr_interval.measured_high.loss_count) + self.assertEqual(12779927, + state.result.pdr_interval.measured_high.receive_count) + self.assertEqual(429330.9, + state.result.pdr_interval.measured_high.transmit_rate) + self.assertEqual(3333.33333, + state.result.pdr_interval.measured_high.loss_rate) + self.assertEqual(425997.56667, + state.result.pdr_interval.measured_high.receive_rate) + self.assertEqual(0.00776, + state.result.pdr_interval.measured_high.loss_fraction) + self.assertEqual(-1, state.phases) + self.assertEqual(30, state.duration) + self.assertEqual(0.2, state.width_goal) + self.assertEqual(0.0, state.packet_loss_ratio) + self.assertEqual(14880000, state.minimum_transmit_rate) + self.assertEqual(14880000, state.maximum_transmit_rate) + + def test_measure(self): + measurer = mock.MagicMock() + measurer.sent = 102563094 + measurer.loss = 30502 + algorithm = MultipleLossRatioSearch(measurer=measurer, latency=True, + pkt_size=64, + final_trial_duration=30, + final_relative_width=0.005, + number_of_intermediate_phases=2, + initial_trial_duration=1, + timeout=720) + ports = [0, 1] + port_pg_id = PortPgIDMap() + port_pg_id.add_port(0) + port_pg_id.add_port(1) + self.assertIsNone( + algorithm.init_generator(ports, port_pg_id, mock.MagicMock(), + mock.Mock, mock.Mock())) + measurement = algorithm.measure(30, 3418770.3425, True) + self.assertIsInstance(measurement, ReceiveRateMeasurement) + self.assertEqual(30, measurement.duration) + self.assertEqual(3418770.3425, measurement.target_tr) + self.assertEqual(102563094, measurement.transmit_count) + self.assertEqual(30502, measurement.loss_count) + self.assertEqual(102532592, measurement.receive_count) + self.assertEqual(3418769.8, measurement.transmit_rate) + self.assertEqual(1016.73333, measurement.loss_rate) + self.assertEqual(3417753.06667, measurement.receive_rate) + self.assertEqual(0.0003, measurement.loss_fraction) + + def test_perform_additional_measurements_based_on_ndrpdr_result(self): + algorithm = MultipleLossRatioSearch(measurer=mock.Mock(), latency=True, + pkt_size=64, + final_trial_duration=30, + final_relative_width=0.005, + number_of_intermediate_phases=2, + initial_trial_duration=1, + timeout=720) + ports = [0, 1] + port_pg_id = PortPgIDMap() + port_pg_id.add_port(0) + port_pg_id.add_port(1) + self.assertIsNone( + algorithm.init_generator(ports, port_pg_id, mock.Mock, mock.Mock, + mock.Mock())) + result = mock.MagicMock() + result.ndr_interval.measured_low.target_tr.return_result = 100000 + self.assertIsNone( + algorithm.perform_additional_measurements_based_on_ndrpdr_result( + result)) + + def test_display_single_bound(self): + algorithm = MultipleLossRatioSearch(measurer=mock.Mock(), latency=True, + pkt_size=64, + final_trial_duration=30, + final_relative_width=0.005, + number_of_intermediate_phases=2, + initial_trial_duration=1, + timeout=720) + result_samples = {} + self.assertIsNone( + algorithm.display_single_bound(result_samples, 'NDR_LOWER', + 4857361, 64, + ['20/849/1069', '40/69/183'])) + self.assertEqual( + {'Result_NDR_LOWER': {'bandwidth_total_Gbps': 3.264146592, + 'rate_total_pps': 4857361.0}, + 'Result_stream0_NDR_LOWER': {'avg_latency': 849.0, + 'max_latency': 1069.0, + 'min_latency': 20.0}, + 'Result_stream1_NDR_LOWER': {'avg_latency': 69.0, + 'max_latency': 183.0, + 'min_latency': 40.0}}, + result_samples) + + def test_check_ndrpdr_interval_validity(self): + algorithm = MultipleLossRatioSearch(measurer=mock.Mock(), latency=True, + pkt_size=64, + final_trial_duration=30, + final_relative_width=0.005, + number_of_intermediate_phases=2, + initial_trial_duration=1, + timeout=720) + result_samples = {} + measured_low = ReceiveRateMeasurement(1, 4857361, 4857339, 0) + measured_high = ReceiveRateMeasurement(1, 4977343, 4977320, 0) + receive_rate_interval = ReceiveRateInterval(measured_low, + measured_high) + self.assertEqual('Minimal rate loss fraction 0.0 reach target 0.0', + algorithm.check_ndrpdr_interval_validity( + result_samples, 'NDR_LOWER', + receive_rate_interval)) + self.assertEqual( + {'Result_NDR_LOWER_packets_lost': {'packet_loss_ratio': 0.0, + 'packets_lost': 0.0}}, + result_samples) + + def test_check_ndrpdr_interval_validity_fail(self): + algorithm = MultipleLossRatioSearch(measurer=mock.Mock(), latency=True, + pkt_size=64, + final_trial_duration=30, + final_relative_width=0.005, + number_of_intermediate_phases=2, + initial_trial_duration=1, + timeout=720) + result_samples = {} + measured_low = ReceiveRateMeasurement(1, 4857361, 4857339, 84965) + measured_high = ReceiveRateMeasurement(1, 4977343, 4977320, 119959) + receive_rate_interval = ReceiveRateInterval(measured_low, + measured_high) + self.assertEqual( + 'Minimal rate loss fraction 0.01749 does not reach target 0.005\n84965 packets lost.', + algorithm.check_ndrpdr_interval_validity(result_samples, + 'NDR_LOWER', + receive_rate_interval, + 0.005)) + self.assertEqual({'Result_NDR_LOWER_packets_lost': { + 'packet_loss_ratio': 0.01749, + 'packets_lost': 84965.0}}, result_samples) diff --git a/yardstick/tests/unit/network_services/helpers/vpp_helpers/test_ndr_pdr_result.py b/yardstick/tests/unit/network_services/helpers/vpp_helpers/test_ndr_pdr_result.py new file mode 100644 index 000000000..ea9c39a03 --- /dev/null +++ b/yardstick/tests/unit/network_services/helpers/vpp_helpers/test_ndr_pdr_result.py @@ -0,0 +1,91 @@ +# Copyright (c) 2019 Viosoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import mock + +from yardstick.network_services.helpers.vpp_helpers.ndr_pdr_result import \ + NdrPdrResult +from yardstick.network_services.helpers.vpp_helpers.receive_rate_interval import \ + ReceiveRateInterval +from yardstick.network_services.helpers.vpp_helpers.receive_rate_measurement import \ + ReceiveRateMeasurement + + +class TestNdrPdrResult(unittest.TestCase): + + def test___init__(self): + measured_low = ReceiveRateMeasurement(1, 4857361, 4857339, 84965) + measured_high = ReceiveRateMeasurement(1, 4977343, 4977320, 119959) + starting_interval = ReceiveRateInterval(measured_low, measured_high) + ndrpdr_result = NdrPdrResult(starting_interval, starting_interval) + self.assertIsInstance(ndrpdr_result.ndr_interval, ReceiveRateInterval) + self.assertIsInstance(ndrpdr_result.pdr_interval, ReceiveRateInterval) + + def test___init__ndr_error(self): + starting_interval = mock.MagicMock() + measured_low = ReceiveRateMeasurement(1, 4857361, 4857339, 84965) + measured_high = ReceiveRateMeasurement(1, 4977343, 4977320, 119959) + end_interval = ReceiveRateInterval(measured_low, measured_high) + with self.assertRaises(TypeError) as raised: + NdrPdrResult(starting_interval, end_interval) + self.assertIn('ndr_interval, is not a ReceiveRateInterval: ', + str(raised.exception)) + + def test___init__pdr_error(self): + measured_low = ReceiveRateMeasurement(1, 4857361, 4857339, 84965) + measured_high = ReceiveRateMeasurement(1, 4977343, 4977320, 119959) + starting_interval = ReceiveRateInterval(measured_low, measured_high) + end_interval = mock.MagicMock() + with self.assertRaises(TypeError) as raised: + NdrPdrResult(starting_interval, end_interval) + self.assertIn('pdr_interval, is not a ReceiveRateInterval: ', + str(raised.exception)) + + def test_width_in_goals(self): + measured_low = ReceiveRateMeasurement(1, 4857361, 4857339, 84965) + measured_high = ReceiveRateMeasurement(1, 4977343, 4977320, 119959) + starting_interval = ReceiveRateInterval(measured_low, measured_high) + ndrpdr_result = NdrPdrResult(starting_interval, starting_interval) + self.assertEqual('ndr 4.86887; pdr 4.86887', + ndrpdr_result.width_in_goals(0.005)) + + def test___str__(self): + measured_low = ReceiveRateMeasurement(1, 4857361, 4857339, 84965) + measured_high = ReceiveRateMeasurement(1, 4977343, 4977320, 119959) + starting_interval = ReceiveRateInterval(measured_low, measured_high) + ndrpdr_result = NdrPdrResult(starting_interval, starting_interval) + self.assertEqual( + 'NDR=[d=1.0,Tr=4857361.0,Df=0.01749;d=1.0,Tr=4977343.0,Df=0.0241);' + 'PDR=[d=1.0,Tr=4857361.0,Df=0.01749;d=1.0,Tr=4977343.0,Df=0.0241)', + ndrpdr_result.__str__()) + + def test___repr__(self): + measured_low = ReceiveRateMeasurement(1, 4857361, 4857339, 84965) + measured_high = ReceiveRateMeasurement(1, 4977343, 4977320, 119959) + starting_interval = ReceiveRateInterval(measured_low, measured_high) + ndrpdr_result = NdrPdrResult(starting_interval, starting_interval) + self.assertEqual( + 'NdrPdrResult(ndr_interval=ReceiveRateInterval(measured_low=' \ + 'ReceiveRateMeasurement(duration=1.0,target_tr=4857361.0,' \ + 'transmit_count=4857339,loss_count=84965),measured_high=' \ + 'ReceiveRateMeasurement(duration=1.0,target_tr=4977343.0,' \ + 'transmit_count=4977320,loss_count=119959)),pdr_interval=' \ + 'ReceiveRateInterval(measured_low=ReceiveRateMeasurement' \ + '(duration=1.0,target_tr=4857361.0,transmit_count=4857339,' \ + 'loss_count=84965),measured_high=ReceiveRateMeasurement' \ + '(duration=1.0,target_tr=4977343.0,transmit_count=4977320,' \ + 'loss_count=119959)))', + ndrpdr_result.__repr__()) diff --git a/yardstick/tests/unit/network_services/helpers/vpp_helpers/test_receive_rate_interval.py b/yardstick/tests/unit/network_services/helpers/vpp_helpers/test_receive_rate_interval.py new file mode 100644 index 000000000..bbf241613 --- /dev/null +++ b/yardstick/tests/unit/network_services/helpers/vpp_helpers/test_receive_rate_interval.py @@ -0,0 +1,100 @@ +# Copyright (c) 2019 Viosoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import mock + +from yardstick.network_services.helpers.vpp_helpers.receive_rate_interval import \ + ReceiveRateInterval +from yardstick.network_services.helpers.vpp_helpers.receive_rate_measurement import \ + ReceiveRateMeasurement + + +class TestReceiveRateInterval(unittest.TestCase): + + def test__init__(self): + measured_low = ReceiveRateMeasurement(1, 4857361, 4857339, 84965) + measured_high = ReceiveRateMeasurement(1, 4977343, 4977320, 119959) + receive_rate_interval = ReceiveRateInterval(measured_low, + measured_high) + self.assertIsInstance(receive_rate_interval.measured_low, + ReceiveRateMeasurement) + self.assertIsInstance(receive_rate_interval.measured_high, + ReceiveRateMeasurement) + + def test__init__measured_low_error(self): + measured_low = mock.MagicMock() + measured_high = ReceiveRateMeasurement(1, 4977343, 4977320, 119959) + with self.assertRaises(TypeError) as raised: + ReceiveRateInterval(measured_low, measured_high) + self.assertIn('measured_low is not a ReceiveRateMeasurement: ', + str(raised.exception)) + + def test__init__measured_high_error(self): + measured_low = ReceiveRateMeasurement(1, 4857361, 4857339, 84965) + measured_high = mock.MagicMock() + with self.assertRaises(TypeError) as raised: + ReceiveRateInterval(measured_low, measured_high) + self.assertIn('measured_high is not a ReceiveRateMeasurement: ', + str(raised.exception)) + + def test_sort(self): + measured_low = ReceiveRateMeasurement(1, 4857361, 4857339, 84965) + measured_high = ReceiveRateMeasurement(1, 4977343, 4977320, 119959) + receive_rate_interval = ReceiveRateInterval(measured_low, + measured_high) + self.assertIsNone(receive_rate_interval.sort()) + self.assertEqual(119982.0, receive_rate_interval.abs_tr_width) + self.assertEqual(0.02411, + receive_rate_interval.rel_tr_width) + + def test_sort_swap(self): + measured_low = ReceiveRateMeasurement(1, 14857361, 14857339, 184965) + measured_high = ReceiveRateMeasurement(1, 4977343, 4977320, 119959) + receive_rate_interval = ReceiveRateInterval(measured_low, + measured_high) + self.assertIsNone(receive_rate_interval.sort()) + self.assertEqual(9880018.0, receive_rate_interval.abs_tr_width) + self.assertEqual(0.66499, + receive_rate_interval.rel_tr_width) + + def test_width_in_goals(self): + measured_low = ReceiveRateMeasurement(1, 4857361, 4857339, 84965) + measured_high = ReceiveRateMeasurement(1, 4977343, 4977320, 119959) + receive_rate_interval = ReceiveRateInterval(measured_low, + measured_high) + self.assertEqual(4.86887, + receive_rate_interval.width_in_goals(0.005)) + + def test___str__(self): + measured_low = ReceiveRateMeasurement(1, 4857361, 4857339, 84965) + measured_high = ReceiveRateMeasurement(1, 4977343, 4977320, 119959) + receive_rate_interval = ReceiveRateInterval(measured_low, + measured_high) + self.assertEqual( + '[d=1.0,Tr=4857361.0,Df=0.01749;d=1.0,Tr=4977343.0,Df=0.0241)', + receive_rate_interval.__str__()) + + def test___repr__(self): + measured_low = ReceiveRateMeasurement(1, 4857361, 4857339, 84965) + measured_high = ReceiveRateMeasurement(1, 4977343, 4977320, 119959) + receive_rate_interval = ReceiveRateInterval(measured_low, + measured_high) + self.assertEqual('ReceiveRateInterval(measured_low=' \ + 'ReceiveRateMeasurement(duration=1.0,target_tr=4857361.0,' \ + 'transmit_count=4857339,loss_count=84965),measured_high=' \ + 'ReceiveRateMeasurement(duration=1.0,target_tr=4977343.0,' \ + 'transmit_count=4977320,loss_count=119959))', + receive_rate_interval.__repr__()) diff --git a/yardstick/tests/unit/network_services/helpers/vpp_helpers/test_receive_rate_measurement.py b/yardstick/tests/unit/network_services/helpers/vpp_helpers/test_receive_rate_measurement.py new file mode 100644 index 000000000..d4e2d7920 --- /dev/null +++ b/yardstick/tests/unit/network_services/helpers/vpp_helpers/test_receive_rate_measurement.py @@ -0,0 +1,44 @@ +# Copyright (c) 2019 Viosoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +from yardstick.network_services.helpers.vpp_helpers.receive_rate_measurement import \ + ReceiveRateMeasurement + + +class TestReceiveRateMeasurement(unittest.TestCase): + + def test__init__(self): + measured = ReceiveRateMeasurement(1, 4857361, 4857339, 84965) + self.assertEqual(1, measured.duration) + self.assertEqual(4857361, measured.target_tr) + self.assertEqual(4857339, measured.transmit_count) + self.assertEqual(84965, measured.loss_count) + self.assertEqual(4772374, measured.receive_count) + self.assertEqual(4857339, measured.transmit_rate) + self.assertEqual(84965.0, measured.loss_rate) + self.assertEqual(4772374.0, measured.receive_rate) + self.assertEqual(0.01749, measured.loss_fraction) + + def test___str__(self): + measured = ReceiveRateMeasurement(1, 4857361, 4857339, 84965) + self.assertEqual('d=1.0,Tr=4857361.0,Df=0.01749', + measured.__str__()) + + def test___repr__(self): + measured = ReceiveRateMeasurement(1, 4857361, 4857339, 84965) + self.assertEqual('ReceiveRateMeasurement(duration=1.0,' \ + 'target_tr=4857361.0,transmit_count=4857339,loss_count=84965)', + measured.__repr__()) diff --git a/yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py b/yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py index 92ceeae83..a20592dc7 100644 --- a/yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py +++ b/yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,16 +15,54 @@ import mock import IxNetwork import unittest +import re from copy import deepcopy +from collections import OrderedDict from yardstick.common import exceptions from yardstick.network_services.libs.ixia_libs.ixnet import ixnet_api +from yardstick.network_services.traffic_profile import ixia_rfc2544 UPLINK = 'uplink' DOWNLINK = 'downlink' +TRAFFIC_PROFILE = { + 'uplink_0': { + 'ipv4': { + 'outer_l2': { + 'framesize': { + '128B': '0', + '1518B': '0', + '64B': '0', + '373b': '0', + '256B': '0', + '1400B': '0', + '570B': '0'}}, + 'id': 1}}, + 'description': 'Traffic profile to run RFC2544 latency', + 'name': 'rfc2544', + 'schema': 'isb:traffic_profile:0.1', + 'traffic_profile': { + 'injection_time': None, + 'enable_latency': True, + 'frame_rate': '100%', + 'traffic_type': 'IXIARFC2544Profile'}, + 'downlink_0': { + 'ipv4': { + 'outer_l2': { + 'framesize': { + '128B': '0', + '1518B': '0', + '64B': '0', + '373b': '0', + '256B': '0', + '1400B': '0', + '570B': '0'}}, + 'id': 2}}} + + TRAFFIC_PARAMETERS = { UPLINK: { 'id': 1, @@ -46,7 +84,8 @@ TRAFFIC_PARAMETERS = { 'dstip': '152.16.40.20', 'srcip': '152.16.100.20', 'dstmask': 24, - 'srcmask': 24 + 'srcmask': 24, + 'priority': {'raw': '0x01'} }, 'outer_l4': { 'seed': 1, @@ -78,7 +117,8 @@ TRAFFIC_PARAMETERS = { 'dstip': '2001::10', 'srcip': '2021::10', 'dstmask': 64, - 'srcmask': 64 + 'srcmask': 64, + 'priority': {'raw': '0x01'} }, 'outer_l4': { 'seed': 1, @@ -101,6 +141,12 @@ class TestIxNextgen(unittest.TestCase): self.ixnet.getRoot.return_value = 'my_root' self.ixnet_gen = ixnet_api.IxNextgen() self.ixnet_gen._ixnet = self.ixnet + self._mock_log = mock.patch.object(ixnet_api.log, 'info') + self.mock_log = self._mock_log.start() + self.addCleanup(self._stop_mocks) + + def _stop_mocks(self): + self.mock_log.stop() def test_get_config(self): tg_cfg = { @@ -186,6 +232,202 @@ class TestIxNextgen(unittest.TestCase): self.assertIn([64, 64, 75], output) self.assertIn([512, 512, 25], output) + def test_add_topology(self): + self.ixnet_gen.ixnet.add.return_value = 'obj' + self.ixnet_gen.add_topology('topology 1', 'vports') + self.ixnet_gen.ixnet.add.assert_called_once_with('my_root', 'topology') + self.ixnet_gen.ixnet.setMultiAttribute.assert_called_once_with( + 'obj', '-name', 'topology 1', '-vports', 'vports') + self.ixnet_gen.ixnet.commit.assert_called_once() + + def test_add_device_group(self): + self.ixnet_gen.ixnet.add.return_value = 'obj' + self.ixnet_gen.add_device_group('topology', 'device group 1', '1') + self.ixnet_gen.ixnet.add.assert_called_once_with('topology', + 'deviceGroup') + self.ixnet_gen.ixnet.setMultiAttribute.assert_called_once_with( + 'obj', '-name', 'device group 1', '-multiplier', '1') + self.ixnet_gen.ixnet.commit.assert_called_once() + + def test_add_ethernet(self): + self.ixnet_gen.ixnet.add.return_value = 'obj' + self.ixnet_gen.add_ethernet('device_group', 'ethernet 1') + self.ixnet_gen.ixnet.add.assert_called_once_with('device_group', + 'ethernet') + self.ixnet_gen.ixnet.setMultiAttribute.assert_called_once_with( + 'obj', '-name', 'ethernet 1') + self.ixnet_gen.ixnet.commit.assert_called_once() + + def test_add_vlans_single(self): + obj = 'ethernet' + self.ixnet_gen.ixnet.getAttribute.return_value = 'attr' + self.ixnet_gen.ixnet.getList.return_value = ['vlan1', 'vlan2'] + vlan1 = ixnet_api.Vlan(vlan_id=100, tp_id='ethertype88a8', prio=2) + vlan2 = ixnet_api.Vlan(vlan_id=101, tp_id='ethertype88a8', prio=3) + self.ixnet_gen.add_vlans(obj, [vlan1, vlan2]) + self.ixnet_gen.ixnet.setMultiAttribute.assert_any_call('ethernet', + '-vlanCount', 2) + self.ixnet_gen.ixnet.setMultiAttribute.assert_any_call('attr/singleValue', + '-value', 100) + self.ixnet_gen.ixnet.setMultiAttribute.assert_any_call('attr/singleValue', + '-value', 101) + self.ixnet_gen.ixnet.setMultiAttribute.assert_any_call('attr/singleValue', + '-value', 2) + self.ixnet_gen.ixnet.setMultiAttribute.assert_any_call('attr/singleValue', + '-value', 3) + self.ixnet_gen.ixnet.setMultiAttribute.assert_any_call( + 'attr/singleValue', '-value', 'ethertype88a8') + self.assertEqual(self.ixnet.commit.call_count, 2) + + def test_add_vlans_increment(self): + obj = 'ethernet' + self.ixnet_gen.ixnet.add.return_value = 'obj' + self.ixnet_gen.ixnet.getAttribute.return_value = 'attr' + self.ixnet_gen.ixnet.getList.return_value = ['vlan1'] + vlan = ixnet_api.Vlan(vlan_id=100, vlan_id_step=1, prio=3, prio_step=2) + self.ixnet_gen.add_vlans(obj, [vlan]) + self.ixnet.setMultiAttribute.assert_any_call('obj', '-start', 100, + '-step', 1, + '-direction', 'increment') + self.ixnet.setMultiAttribute.assert_any_call('obj', '-start', 3, + '-step', 2, + '-direction', 'increment') + + self.assertEqual(self.ixnet.commit.call_count, 2) + + def test_add_vlans_invalid(self): + vlans = [] + self.assertRaises(RuntimeError, self.ixnet_gen.add_vlans, 'obj', vlans) + + def test_add_ipv4(self): + self.ixnet_gen.ixnet.add.return_value = 'obj' + self.ixnet_gen.add_ipv4('ethernet 1', name='ipv4 1') + self.ixnet_gen.ixnet.add.assert_called_once_with('ethernet 1', 'ipv4') + self.ixnet_gen.ixnet.setAttribute.assert_called_once_with('obj', + '-name', + 'ipv4 1') + self.assertEqual(self.ixnet.commit.call_count, 2) + + def test_add_ipv4_single(self): + self.ixnet_gen.ixnet.add.return_value = 'obj' + self.ixnet_gen.ixnet.getAttribute.return_value = 'attr' + self.ixnet_gen.add_ipv4('ethernet 1', name='ipv4 1', addr='100.1.1.100', + prefix='24', gateway='100.1.1.200') + self.ixnet_gen.ixnet.add.assert_called_once_with('ethernet 1', 'ipv4') + self.ixnet_gen.ixnet.setAttribute.assert_called_once_with('obj', + '-name', + 'ipv4 1') + self.ixnet_gen.ixnet.setMultiAttribute.assert_any_call( + 'attr/singleValue', '-value', '100.1.1.100') + self.ixnet_gen.ixnet.setMultiAttribute.assert_any_call( + 'attr/singleValue', '-value', '24') + self.ixnet_gen.ixnet.setMultiAttribute.assert_any_call( + 'attr/singleValue', '-value', '100.1.1.200') + + self.assertEqual(self.ixnet.commit.call_count, 2) + + def test_add_ipv4_counter(self): + self.ixnet_gen.ixnet.add.return_value = 'obj' + self.ixnet_gen.ixnet.getAttribute.return_value = 'attr' + self.ixnet_gen.add_ipv4('ethernet 1', name='ipv4 1', + addr='100.1.1.100', + addr_step='1', + addr_direction='increment', + prefix='24', + gateway='100.1.1.200', + gw_step='1', + gw_direction='increment') + self.ixnet_gen.ixnet.add.assert_any_call('ethernet 1', 'ipv4') + self.ixnet_gen.ixnet.setAttribute.assert_called_once_with('obj', + '-name', + 'ipv4 1') + self.ixnet_gen.ixnet.add.assert_any_call('attr', 'counter') + self.ixnet_gen.ixnet.setMultiAttribute.assert_any_call('obj', '-start', + '100.1.1.100', + '-step', '1', + '-direction', + 'increment') + self.ixnet_gen.ixnet.setMultiAttribute.assert_any_call( + 'attr/singleValue', '-value', '24') + self.ixnet_gen.ixnet.setMultiAttribute.assert_any_call('obj', '-start', + '100.1.1.200', + '-step', '1', + '-direction', + 'increment') + self.assertEqual(self.ixnet.commit.call_count, 2) + + def test_add_pppox_client(self): + self.ixnet_gen.ixnet.add.return_value = 'obj' + self.ixnet_gen.ixnet.getAttribute.return_value = 'attr' + self.ixnet_gen.add_pppox_client('ethernet 1', 'pap', 'user', 'pwd') + self.ixnet_gen.ixnet.add.assert_called_once_with('ethernet 1', + 'pppoxclient') + + self.ixnet_gen.ixnet.setMultiAttribute.assert_any_call( + 'attr/singleValue', '-value', 'pap') + self.ixnet_gen.ixnet.setMultiAttribute.assert_any_call( + 'attr/singleValue', '-value', 'user') + self.ixnet_gen.ixnet.setMultiAttribute.assert_any_call( + 'attr/singleValue', '-value', 'pwd') + + self.assertEqual(self.ixnet.commit.call_count, 2) + + def test_add_pppox_client_invalid_auth(self): + self.ixnet_gen.ixnet.add.return_value = 'obj' + self.ixnet_gen.ixnet.getAttribute.return_value = 'attr' + self.assertRaises(NotImplementedError, self.ixnet_gen.add_pppox_client, + 'ethernet 1', 'invalid_auth', 'user', 'pwd') + + self.ixnet_gen.ixnet.setMultiAttribute.assert_not_called() + + def test_add_bgp(self): + self.ixnet_gen.ixnet.add.return_value = 'obj' + self.ixnet_gen.ixnet.getAttribute.return_value = 'attr' + self.ixnet_gen.add_bgp(ipv4='ipv4 1', + dut_ip='10.0.0.1', + local_as=65000, + bgp_type='external') + self.ixnet_gen.ixnet.add.assert_called_once_with('ipv4 1', 'bgpIpv4Peer') + self.ixnet_gen.ixnet.setAttribute.assert_any_call( + 'attr/singleValue', '-value', '10.0.0.1') + self.ixnet_gen.ixnet.setAttribute.assert_any_call( + 'attr/singleValue', '-value', 65000) + self.ixnet_gen.ixnet.setAttribute.assert_any_call( + 'attr/singleValue', '-value', 'external') + + def test_add_interface(self): + self.ixnet_gen.ixnet.add.return_value = 'obj' + self.ixnet_gen.add_interface(vport='vport', + ip='10.0.0.2', + mac='00:00:00:00:00:00', + gateway='10.0.0.1') + self.ixnet_gen.ixnet.add.assert_any_call('vport', 'interface') + self.ixnet_gen.ixnet.add.assert_any_call('obj', 'ipv4') + self.ixnet_gen.ixnet.setMultiAttribute.assert_any_call( + 'obj/ethernet', '-macAddress', '00:00:00:00:00:00') + self.ixnet_gen.ixnet.setMultiAttribute.assert_any_call( + 'obj', '-ip', '10.0.0.2') + self.ixnet_gen.ixnet.setMultiAttribute.assert_any_call( + 'obj', '-gateway', '10.0.0.1') + self.ixnet_gen.ixnet.setMultiAttribute.assert_any_call( + 'obj', '-enabled', 'true') + + def test_add_static_ipv4(self): + self.ixnet_gen.ixnet.add.return_value = 'obj' + self.ixnet_gen.add_static_ipv4(iface='iface', + vport='vport', + start_ip='10.0.0.0', + count='100', + mask='32') + self.ixnet_gen.ixnet.add.assert_called_once_with( + 'vport/protocols/static', 'ip') + self.ixnet_gen.ixnet.setMultiAttribute.assert_any_call( + 'obj', '-protocolInterface', 'iface', + '-ipStart', '10.0.0.0', + '-count', '100', + '-mask', '32', + '-enabled', 'true') + @mock.patch.object(IxNetwork, 'IxNet') def test_connect(self, mock_ixnet): mock_ixnet.return_value = self.ixnet @@ -239,8 +481,8 @@ class TestIxNextgen(unittest.TestCase): self.ixnet_gen._cfg = config self.assertIsNone(self.ixnet_gen.assign_ports()) - self.assertEqual(self.ixnet.execute.call_count, 2) - self.assertEqual(self.ixnet.commit.call_count, 4) + self.assertEqual(self.ixnet.execute.call_count, 1) + self.assertEqual(self.ixnet.commit.call_count, 3) self.assertEqual(self.ixnet.getAttribute.call_count, 2) @mock.patch.object(ixnet_api, 'log') @@ -273,17 +515,24 @@ class TestIxNextgen(unittest.TestCase): '-trackBy', 'trafficGroupId0') def test__create_flow_groups(self): + uplink_endpoints = ['up_endp1', 'up_endp2'] + downlink_endpoints = ['down_endp1', 'down_endp2'] self.ixnet_gen.ixnet.getList.side_effect = [['traffic_item'], ['1', '2']] - self.ixnet_gen.ixnet.add.side_effect = ['endp1', 'endp2'] - self.ixnet_gen._create_flow_groups() + self.ixnet_gen.ixnet.add.side_effect = ['endp1', 'endp2', 'endp3', + 'endp4'] + self.ixnet_gen._create_flow_groups(uplink_endpoints, downlink_endpoints) self.ixnet_gen.ixnet.add.assert_has_calls([ mock.call('traffic_item', 'endpointSet'), mock.call('traffic_item', 'endpointSet')]) self.ixnet_gen.ixnet.setMultiAttribute.assert_has_calls([ - mock.call('endp1', '-name', '1', '-sources', ['1/protocols'], - '-destinations', ['2/protocols']), - mock.call('endp2', '-name', '2', '-sources', ['2/protocols'], - '-destinations', ['1/protocols'])]) + mock.call('endp1', '-name', '1', '-sources', ['up_endp1'], + '-destinations', ['down_endp1']), + mock.call('endp2', '-name', '2', '-sources', ['down_endp1'], + '-destinations', ['up_endp1']), + mock.call('endp3', '-name', '3', '-sources', ['up_endp2'], + '-destinations', ['down_endp2']), + mock.call('endp4', '-name', '4', '-sources', ['down_endp2'], + '-destinations', ['up_endp2'])]) def test__append_protocol_to_stack(self): @@ -293,12 +542,17 @@ class TestIxNextgen(unittest.TestCase): 'my_root/traffic/protocolTemplate:"my_protocol"') def test__setup_config_elements(self): + # the config parsed from some_file + yaml_data = {'traffic_profile': {} + } + traffic_profile = ixia_rfc2544.IXIARFC2544Profile(yaml_data) + traffic_profile.params = TRAFFIC_PROFILE self.ixnet_gen.ixnet.getList.side_effect = [['traffic_item'], ['cfg_element']] with mock.patch.object(self.ixnet_gen, '_append_procotol_to_stack') as \ mock_append_proto: - self.ixnet_gen._setup_config_elements() - mock_append_proto.assert_has_calls([ + self.ixnet_gen._setup_config_elements(traffic_profile=traffic_profile) + mock_append_proto.assert_has_calls([ mock.call(ixnet_api.PROTO_UDP, 'cfg_element/stack:"ethernet-1"'), mock.call(ixnet_api.PROTO_IPV4, 'cfg_element/stack:"ethernet-1"')]) self.ixnet_gen.ixnet.setAttribute.assert_has_calls([ @@ -313,12 +567,88 @@ class TestIxNextgen(unittest.TestCase): def test_create_traffic_model(self, mock__setup_config_elements, mock__create_flow_groups, mock__create_traffic_item): - - self.ixnet_gen.create_traffic_model() - mock__create_traffic_item.assert_called_once() - mock__create_flow_groups.assert_called_once() + # the config parsed from some_file + yaml_data = {'traffic_profile': {}} + traffic_profile = ixia_rfc2544.IXIARFC2544Profile(yaml_data) + uplink_ports = ['port1', 'port3'] + downlink_ports = ['port2', 'port4'] + uplink_endpoints = ['port1/protocols', 'port3/protocols'] + downlink_endpoints = ['port2/protocols', 'port4/protocols'] + self.ixnet_gen.create_traffic_model(uplink_ports, downlink_ports, + traffic_profile=traffic_profile) + mock__create_traffic_item.assert_called_once_with('raw') + mock__create_flow_groups.assert_called_once_with(uplink_endpoints, + downlink_endpoints) mock__setup_config_elements.assert_called_once() + @mock.patch.object(ixnet_api.IxNextgen, '_create_traffic_item') + @mock.patch.object(ixnet_api.IxNextgen, '_create_flow_groups') + @mock.patch.object(ixnet_api.IxNextgen, '_setup_config_elements') + def test_create_ipv4_traffic_model(self, mock__setup_config_elements, + mock__create_flow_groups, + mock__create_traffic_item): + uplink_topologies = ['up1', 'up3'] + downlink_topologies = ['down2', 'down4'] + traffic_profile = 'fake_profile' + self.ixnet_gen.create_ipv4_traffic_model(uplink_topologies, + downlink_topologies, + traffic_profile) + mock__create_traffic_item.assert_called_once_with('ipv4') + mock__create_flow_groups.assert_called_once_with(uplink_topologies, + downlink_topologies) + mock__setup_config_elements.assert_called_once_with( + traffic_profile='fake_profile', add_default_proto=False) + + def test_flows_settings(self): + cfg = {'uplink_0': { + 'ipv4': { + 'outer_l2': { + 'framesize': { + '128B': '0', + '1518B': '0', + '64B': '0', + '373b': '0', + '256B': '0', + '1400B': '0', + '570B': '0'}}, + 'id': 1}}} + + expected = [ + {'ipv4': { + 'id': 1, + 'outer_l2': { + 'framesize': { + '1518B': '0', + '1400B': '0', + '128B': '0', + '64B': '0', + '256B': '0', + '373b': '0', + '570B': '0'}}}}] + + self.assertEqual(expected, self.ixnet_gen._flows_settings(cfg=cfg)) + + def test_is_qinq(self): + flow_data = {'ipv4': { + 'outer_l2': {}, + 'id': 1}} + self.assertEqual(False, self.ixnet_gen.is_qinq(flow_data=flow_data)) + + flow_data = {'ipv4': { + 'outer_l2': { + 'QinQ': { + 'C-VLAN': { + 'priority': 0, + 'cfi': 0, + 'id': 512}, + 'S-VLAN': { + 'priority': 0, + 'cfi': 0, + 'id': 128}}, + }, + 'id': 1}} + self.assertEqual(True, self.ixnet_gen.is_qinq(flow_data=flow_data)) + def test__update_frame_mac(self): with mock.patch.object(self.ixnet_gen, '_get_field_in_stack_item') as \ mock_get_field: @@ -393,15 +723,107 @@ class TestIxNextgen(unittest.TestCase): self.ixnet_gen.update_frame(TRAFFIC_PARAMETERS, 40) def test_get_statistics(self): - port_statistics = '::ixNet::OBJ-/statistics/view:"Port Statistics"' - flow_statistics = '::ixNet::OBJ-/statistics/view:"Flow Statistics"' with mock.patch.object(self.ixnet_gen, '_build_stats_map') as \ mock_build_stats: self.ixnet_gen.get_statistics() mock_build_stats.assert_has_calls([ - mock.call(port_statistics, self.ixnet_gen.PORT_STATS_NAME_MAP), - mock.call(flow_statistics, self.ixnet_gen.LATENCY_NAME_MAP)]) + mock.call(self.ixnet_gen.PORT_STATISTICS, + self.ixnet_gen.PORT_STATS_NAME_MAP), + mock.call(self.ixnet_gen.FLOW_STATISTICS, + self.ixnet_gen.LATENCY_NAME_MAP)]) + + def test_set_flow_tracking(self): + self.ixnet_gen._ixnet.getList.return_value = ['traffic_item'] + self.ixnet_gen.set_flow_tracking(track_by=['vlanVlanId0']) + self.ixnet_gen.ixnet.setAttribute.assert_called_once_with( + 'traffic_item/tracking', '-trackBy', ['vlanVlanId0']) + self.assertEqual(self.ixnet.commit.call_count, 1) + + def test__set_egress_flow_tracking(self): + self.ixnet_gen._ixnet.getList.side_effect = [['traffic_item'], + ['encapsulation']] + self.ixnet_gen._set_egress_flow_tracking(encapsulation='Ethernet', + offset='IPv4 TOS Precedence') + self.ixnet_gen.ixnet.setAttribute.assert_any_call( + 'traffic_item', '-egressEnabled', True) + self.ixnet_gen.ixnet.setAttribute.assert_any_call( + 'encapsulation', '-encapsulation', 'Ethernet') + self.ixnet_gen.ixnet.setAttribute.assert_any_call( + 'encapsulation', '-offset', 'IPv4 TOS Precedence') + self.assertEqual(self.ixnet.commit.call_count, 2) + + def test__get_view_page_stats(self): + expected_stats = [ + {'header1': 'row1_1', 'header2': 'row1_2'}, + {'header1': 'row2_1', 'header2': 'row2_2'} + ] + self.ixnet_gen._ixnet.getAttribute.side_effect = [ + ['header1', 'header2'], + [ + [['row1_1', 'row1_2']], + [['row2_1', 'row2_2']] + ] + ] + stats = self.ixnet_gen._get_view_page_stats('view_obj') + self.assertListEqual(stats, expected_stats) + + @mock.patch.object(ixnet_api.IxNextgen, '_get_view_page_stats') + def test_get_pppoe_scenario_statistics(self, mock_get_view): + + pattern = re.compile('Flow 2') + + expected_stats = { + 'port_statistics': [{ + 'port_1': 'port_stat1', + 'port_2': 'port_stat2' + }], + 'flow_statistic': [{ + 'flow_1': 'flow_stat1', + 'flow_2': 'flow_stat2' + }], + 'pppox_client_per_port': [{ + 'sub_1': 'sub_stat1', + 'sub_2': 'sub_stat2' + }] + } + + pppoe_scenario_stats = OrderedDict([ + ('port_statistics', 'view_obj'), + ('flow_statistic', 'view_obj'), + ('pppox_client_per_port', 'view_obj') + ]) + + pppoe_scenario_stats_map = { + 'port_statistics': {'port_1': 'Port 1', + 'port_2': 'Port 2'}, + 'flow_statistic': {'flow_1': 'Flow 1', + 'flow_2': pattern}, + 'pppox_client_per_port': {'sub_1': 'Sub 1', + 'sub_2': 'Sub 2'} + } + + # All stats keys + port_stats = [{'Port 1': 'port_stat1', + 'Port 2': 'port_stat2', + 'Port 3': 'port_stat3'}] + flows_stats = [{'Flow 1': 'flow_stat1', + 'Flow 2': 'flow_stat2', + 'Flow 3': 'flow_stat3'}] + pppoe_sub_stats = [{'Sub 1': 'sub_stat1', + 'Sub 2': 'sub_stat2', + 'Sub 3': 'sub_stat3'}] + + mock_get_view.side_effect = [port_stats, flows_stats, pppoe_sub_stats] + self.ixnet_gen._ixnet.getAttribute.return_value = '1' + + with mock.patch.multiple(ixnet_api.IxNextgen, + PPPOE_SCENARIO_STATS=pppoe_scenario_stats, + PPPOE_SCENARIO_STATS_MAP=pppoe_scenario_stats_map): + stats = self.ixnet_gen.get_pppoe_scenario_statistics() + self.assertDictEqual(stats, expected_stats) + self.assertEqual(self.ixnet_gen.ixnet.getAttribute.call_count, 6) + self.ixnet_gen.ixnet.setAttribute.assert_not_called() def test__update_ipv4_address(self): with mock.patch.object(self.ixnet_gen, '_get_field_in_stack_item', @@ -413,6 +835,78 @@ class TestIxNextgen(unittest.TestCase): '-randomMask', '0.0.0.63', '-valueType', 'random', '-countValue', 25) + def test__update_ipv4_priority_raw(self): + priority = {'raw': '0x01'} + self.ixnet_gen._set_priority_field = mock.Mock() + with mock.patch.object(self.ixnet_gen, '_get_field_in_stack_item', + return_value='field_desc'): + self.ixnet_gen._update_ipv4_priority('field_desc', priority) + + self.ixnet_gen._set_priority_field.assert_called_once_with( + 'field_desc', priority['raw']) + + def test__update_ipv4_priority_dscp(self): + priority = {'dscp': {'defaultPHB': [0, 1, 2, 3]}} + self.ixnet_gen._set_priority_field = mock.Mock() + with mock.patch.object(self.ixnet_gen, '_get_field_in_stack_item', + return_value='field_desc'): + self.ixnet_gen._update_ipv4_priority('field_desc', priority) + + self.ixnet_gen._set_priority_field.assert_called_once_with( + 'field_desc', priority['dscp']['defaultPHB']) + + def test__update_ipv4_priority_tos(self): + priority = {'tos': {'precedence': [0, 4, 7]}} + self.ixnet_gen._set_priority_field = mock.Mock() + with mock.patch.object(self.ixnet_gen, '_get_field_in_stack_item', + return_value='field_desc'): + self.ixnet_gen._update_ipv4_priority('field_desc', priority) + + self.ixnet_gen._set_priority_field.assert_called_once_with( + 'field_desc', priority['tos']['precedence']) + + def test__update_ipv4_priority_wrong_priority_type(self): + priority = {'test': [0, 4, 7]} + self.ixnet_gen._set_priority_field = mock.Mock() + with mock.patch.object(self.ixnet_gen, '_get_field_in_stack_item', + return_value='field_desc'): + self.ixnet_gen._update_ipv4_priority('field_desc', priority) + + self.ixnet_gen._set_priority_field.assert_not_called() + + def test__update_ipv4_priority_not_supported_dscp_class(self): + priority = {'dscp': {'testPHB': [0, 4, 7]}} + self.ixnet_gen._set_priority_field = mock.Mock() + self.ixnet_gen._get_field_in_stack_item = mock.Mock() + self.ixnet_gen._update_ipv4_priority('field_desc', priority) + self.ixnet_gen._set_priority_field.assert_not_called() + self.ixnet_gen._get_field_in_stack_item.assert_not_called() + + def test__update_ipv4_priority_not_supported_tos_field(self): + priority = {'tos': {'test': [0, 4, 7]}} + self.ixnet_gen._set_priority_field = mock.Mock() + self.ixnet_gen._get_field_in_stack_item = mock.Mock() + self.ixnet_gen._update_ipv4_priority('field_desc', priority) + self.ixnet_gen._set_priority_field.assert_not_called() + self.ixnet_gen._get_field_in_stack_item.assert_not_called() + + def test__set_priority_field_list_value(self): + value = [1, 4, 7] + self.ixnet_gen._set_priority_field('field_desc', value) + self.ixnet_gen.ixnet.setMultiAttribute.assert_called_once_with( + 'field_desc', + '-valueList', [1, 4, 7], + '-activeFieldChoice', 'true', + '-valueType', 'valueList') + + def test__set_priority_field_single_value(self): + value = 7 + self.ixnet_gen._set_priority_field('field_desc', value) + self.ixnet_gen.ixnet.setMultiAttribute.assert_called_once_with( + 'field_desc', + '-activeFieldChoice', 'true', + '-singleValue', '7') + def test__update_udp_port(self): with mock.patch.object(self.ixnet_gen, '_get_field_in_stack_item', return_value='field_desc'): @@ -433,10 +927,13 @@ class TestIxNextgen(unittest.TestCase): mock_update_add, \ mock.patch.object(self.ixnet_gen, '_get_stack_item'), \ mock.patch.object(self.ixnet_gen, - '_get_config_element_by_flow_group_name', return_value='celm'): + '_get_config_element_by_flow_group_name', return_value='celm'), \ + mock.patch.object(self.ixnet_gen, '_update_ipv4_priority') as \ + mock_update_priority: self.ixnet_gen.update_ip_packet(TRAFFIC_PARAMETERS) self.assertEqual(4, len(mock_update_add.mock_calls)) + self.assertEqual(2, len(mock_update_priority.mock_calls)) def test_update_ip_packet_exception_no_config_element(self): with mock.patch.object(self.ixnet_gen, @@ -469,6 +966,9 @@ class TestIxNextgen(unittest.TestCase): 'outer_l3': { 'proto': 'unsupported', }, + 'outer_l4': { + 'seed': 1 + } }, } with mock.patch.object(self.ixnet_gen, @@ -524,3 +1024,34 @@ class TestIxNextgen(unittest.TestCase): self.assertIsNone(result) self.ixnet.getList.assert_called_once() self.assertEqual(3, self.ixnet_gen._ixnet.execute.call_count) + + def test__get_protocol_status(self): + self.ixnet.getAttribute.return_value = ['up'] + self.ixnet_gen._get_protocol_status('ipv4') + self.ixnet.getAttribute.assert_called_once_with('ipv4', + '-sessionStatus') + + @mock.patch.object(ixnet_api.IxNextgen, '_get_protocol_status') + def test_is_protocols_running(self, mock_ixnextgen_get_protocol_status): + mock_ixnextgen_get_protocol_status.return_value = ['up', 'up'] + result = self.ixnet_gen.is_protocols_running(['ethernet', 'ipv4']) + self.assertTrue(result) + + @mock.patch.object(ixnet_api.IxNextgen, '_get_protocol_status') + def test_is_protocols_stopped(self, mock_ixnextgen_get_protocol_status): + mock_ixnextgen_get_protocol_status.return_value = ['down', 'down'] + result = self.ixnet_gen.is_protocols_running(['ethernet', 'ipv4']) + self.assertFalse(result) + + def test_start_protocols(self): + self.ixnet_gen.start_protocols() + self.ixnet.execute.assert_called_once_with('startAllProtocols') + + def test_stop_protocols(self): + self.ixnet_gen.stop_protocols() + self.ixnet.execute.assert_called_once_with('stopAllProtocols') + + def test_get_vports(self): + self.ixnet_gen._ixnet.getRoot.return_value = 'root' + self.ixnet_gen.get_vports() + self.ixnet.getList.assert_called_once_with('root', 'vport') diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_base.py b/yardstick/tests/unit/network_services/traffic_profile/test_base.py index 0dc3e0579..d9244e31b 100644 --- a/yardstick/tests/unit/network_services/traffic_profile/test_base.py +++ b/yardstick/tests/unit/network_services/traffic_profile/test_base.py @@ -95,18 +95,18 @@ class TrafficProfileConfigTestCase(unittest.TestCase): def test__parse_rate(self): tp_config = {'traffic_profile': {'packet_sizes': {'64B': 100}}} tp_config_obj = base.TrafficProfileConfig(tp_config) - self.assertEqual((100.0, 'fps'), tp_config_obj._parse_rate('100 ')) - self.assertEqual((200.5, 'fps'), tp_config_obj._parse_rate('200.5')) - self.assertEqual((300.8, 'fps'), tp_config_obj._parse_rate('300.8fps')) + self.assertEqual((100.0, 'fps'), tp_config_obj.parse_rate('100 ')) + self.assertEqual((200.5, 'fps'), tp_config_obj.parse_rate('200.5')) + self.assertEqual((300.8, 'fps'), tp_config_obj.parse_rate('300.8fps')) self.assertEqual((400.2, 'fps'), - tp_config_obj._parse_rate('400.2 fps')) - self.assertEqual((500.3, '%'), tp_config_obj._parse_rate('500.3%')) - self.assertEqual((600.1, '%'), tp_config_obj._parse_rate('600.1 %')) + tp_config_obj.parse_rate('400.2 fps')) + self.assertEqual((500.3, '%'), tp_config_obj.parse_rate('500.3%')) + self.assertEqual((600.1, '%'), tp_config_obj.parse_rate('600.1 %')) def test__parse_rate_exception(self): tp_config = {'traffic_profile': {'packet_sizes': {'64B': 100}}} tp_config_obj = base.TrafficProfileConfig(tp_config) with self.assertRaises(exceptions.TrafficProfileRate): - tp_config_obj._parse_rate('100Fps') + tp_config_obj.parse_rate('100Fps') with self.assertRaises(exceptions.TrafficProfileRate): - tp_config_obj._parse_rate('100 kbps') + tp_config_obj.parse_rate('100 kbps') diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_http_ixload.py b/yardstick/tests/unit/network_services/traffic_profile/test_http_ixload.py index 1adab48bc..996360c2e 100644 --- a/yardstick/tests/unit/network_services/traffic_profile/test_http_ixload.py +++ b/yardstick/tests/unit/network_services/traffic_profile/test_http_ixload.py @@ -1,4 +1,4 @@ -# Copyright (c) 2017 Intel Corporation +# Copyright (c) 2017-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,7 +17,6 @@ import mock from oslo_serialization import jsonutils -from yardstick.common import exceptions from yardstick.network_services.traffic_profile import http_ixload from yardstick.network_services.traffic_profile.http_ixload import \ join_non_strings, validate_non_string_sequence @@ -249,16 +248,20 @@ class TestIxLoadTrafficGen(unittest.TestCase): ixload = http_ixload.IXLOADHttpTest( jsonutils.dump_as_bytes(self.test_input)) - ixload.links_param = {"uplink_0": {"ip": {}}} + ixload.links_param = {"uplink_0": {"ip": {}, + "http_client": {}}} ixload.test = mock.Mock() ixload.test.communityList = community_list ixload.update_network_param = mock.Mock() + ixload.update_http_client_param = mock.Mock() ixload.update_config() ixload.update_network_param.assert_called_once_with(net_taraffic_0, {}) + ixload.update_http_client_param.assert_called_once_with(net_taraffic_0, + {}) def test_update_network_mac_address(self): ethernet = mock.MagicMock() @@ -284,7 +287,7 @@ class TestIxLoadTrafficGen(unittest.TestCase): net_traffic.network.getL1Plugin.return_value = Exception - with self.assertRaises(exceptions.InvalidRxfFile): + with self.assertRaises(http_ixload.InvalidRxfFile): ixload.update_network_mac_address(net_traffic, "auto") def test_update_network_address(self): @@ -308,7 +311,7 @@ class TestIxLoadTrafficGen(unittest.TestCase): net_traffic.network.getL1Plugin.return_value = Exception - with self.assertRaises(exceptions.InvalidRxfFile): + with self.assertRaises(http_ixload.InvalidRxfFile): ixload.update_network_address(net_traffic, "address", "gateway", "prefix") @@ -338,6 +341,57 @@ class TestIxLoadTrafficGen(unittest.TestCase): net_traffic, "mac") + def test_update_http_client_param(self): + net_traffic = mock.Mock() + + ixload = http_ixload.IXLOADHttpTest( + jsonutils.dump_as_bytes(self.test_input)) + + ixload.update_page_size = mock.Mock() + ixload.update_user_count = mock.Mock() + + param = {"page_object": "page_object", + "simulated_users": "simulated_users"} + + ixload.update_http_client_param(net_traffic, param) + + ixload.update_page_size.assert_called_once_with(net_traffic, + "page_object") + ixload.update_user_count.assert_called_once_with(net_traffic, + "simulated_users") + + def test_update_page_size(self): + activity = mock.MagicMock() + net_traffic = mock.Mock() + + ixload = http_ixload.IXLOADHttpTest( + jsonutils.dump_as_bytes(self.test_input)) + + net_traffic.activityList = [activity] + ix_http_command = activity.agent.actionList[0] + ixload.update_page_size(net_traffic, "page_object") + ix_http_command.config.assert_called_once_with( + pageObject="page_object") + + net_traffic.activityList = [] + with self.assertRaises(http_ixload.InvalidRxfFile): + ixload.update_page_size(net_traffic, "page_object") + + def test_update_user_count(self): + activity = mock.MagicMock() + net_traffic = mock.Mock() + + ixload = http_ixload.IXLOADHttpTest( + jsonutils.dump_as_bytes(self.test_input)) + + net_traffic.activityList = [activity] + ixload.update_user_count(net_traffic, 123) + activity.config.assert_called_once_with(userObjectiveValue=123) + + net_traffic.activityList = [] + with self.assertRaises(http_ixload.InvalidRxfFile): + ixload.update_user_count(net_traffic, 123) + @mock.patch('yardstick.network_services.traffic_profile.http_ixload.IxLoad') @mock.patch('yardstick.network_services.traffic_profile.http_ixload.StatCollectorUtils') def test_start_http_test(self, *args): diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py b/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py index 0759ecebd..ddd1828ae 100644 --- a/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py +++ b/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016-2017 Intel Corporation +# Copyright (c) 2016-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,6 +16,7 @@ import copy import mock import unittest +import collections from yardstick.network_services.traffic_profile import ixia_rfc2544 from yardstick.network_services.traffic_profile import trex_traffic_profile @@ -486,7 +487,9 @@ class TestIXIARFC2544Profile(unittest.TestCase): result = r_f_c2544_profile._get_ixia_traffic_profile({}) self.assertDictEqual(result, expected) - def test__ixia_traffic_generate(self): + @mock.patch.object(ixia_rfc2544.IXIARFC2544Profile, + '_update_traffic_tracking_options') + def test__ixia_traffic_generate(self, mock_upd_tracking_opts): traffic_generator = mock.Mock( autospec=trex_traffic_profile.TrexProfile) traffic_generator.networks = { @@ -501,21 +504,69 @@ class TestIXIARFC2544Profile(unittest.TestCase): r_f_c2544_profile = ixia_rfc2544.IXIARFC2544Profile( self.TRAFFIC_PROFILE) r_f_c2544_profile.rate = 100 - result = r_f_c2544_profile._ixia_traffic_generate(traffic, ixia_obj) + result = r_f_c2544_profile._ixia_traffic_generate(traffic, ixia_obj, + traffic_generator) self.assertIsNone(result) + mock_upd_tracking_opts.assert_called_once_with(traffic_generator) + + def test__update_traffic_tracking_options(self): + mock_traffic_gen = mock.Mock() + rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE) + rfc2544_profile._update_traffic_tracking_options(mock_traffic_gen) + mock_traffic_gen.update_tracking_options.assert_called_once() + + def test__get_framesize(self): + traffic_profile = { + 'uplink_0': {'ipv4': {'outer_l2': {'framesize': {'64B': 100}}}}, + 'downlink_0': {'ipv4': {'outer_l2': {'framesize': {'64B': 100}}}}, + 'uplink_1': {'ipv4': {'outer_l2': {'framesize': {'64B': 100}}}}, + 'downlink_1': {'ipv4': {'outer_l2': {'framesize': {'64B': 100}}}} + } + rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE) + rfc2544_profile.params = traffic_profile + result = rfc2544_profile._get_framesize() + self.assertEqual(result, '64B') + + def test__get_framesize_IMIX_traffic(self): + traffic_profile = { + 'uplink_0': {'ipv4': {'outer_l2': {'framesize': {'64B': 50, + '128B': 50}}}}, + 'downlink_0': {'ipv4': {'outer_l2': {'framesize': {'64B': 50, + '128B': 50}}}}, + 'uplink_1': {'ipv4': {'outer_l2': {'framesize': {'64B': 50, + '128B': 50}}}}, + 'downlink_1': {'ipv4': {'outer_l2': {'framesize': {'64B': 50, + '128B': 50}}}} + } + rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE) + rfc2544_profile.params = traffic_profile + result = rfc2544_profile._get_framesize() + self.assertEqual(result, 'IMIX') + + def test__get_framesize_zero_pkt_size_weight(self): + traffic_profile = { + 'uplink_0': {'ipv4': {'outer_l2': {'framesize': {'64B': 0}}}}, + 'downlink_0': {'ipv4': {'outer_l2': {'framesize': {'64B': 0}}}}, + 'uplink_1': {'ipv4': {'outer_l2': {'framesize': {'64B': 0}}}}, + 'downlink_1': {'ipv4': {'outer_l2': {'framesize': {'64B': 0}}}} + } + rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE) + rfc2544_profile.params = traffic_profile + result = rfc2544_profile._get_framesize() + self.assertEqual(result, '') def test_execute_traffic_first_run(self): rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE) rfc2544_profile.first_run = True rfc2544_profile.rate = 50 + traffic_gen = mock.Mock() + traffic_gen.rfc_helper.iteration.value = 0 with mock.patch.object(rfc2544_profile, '_get_ixia_traffic_profile') \ as mock_get_tp, \ mock.patch.object(rfc2544_profile, '_ixia_traffic_generate') \ - as mock_tgenerate, \ - mock.patch.object(rfc2544_profile, 'update_traffic_profile') \ - as mock_update_tp: + as mock_tgenerate: mock_get_tp.return_value = 'fake_tprofile' - output = rfc2544_profile.execute_traffic(mock.ANY, + output = rfc2544_profile.execute_traffic(traffic_gen, ixia_obj=mock.ANY) self.assertTrue(output) @@ -524,20 +575,21 @@ class TestIXIARFC2544Profile(unittest.TestCase): self.assertEqual(0, rfc2544_profile.min_rate) mock_get_tp.assert_called_once() mock_tgenerate.assert_called_once() - mock_update_tp.assert_called_once() def test_execute_traffic_not_first_run(self): rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE) rfc2544_profile.first_run = False rfc2544_profile.max_rate = 70 rfc2544_profile.min_rate = 0 + traffic_gen = mock.Mock() + traffic_gen.rfc_helper.iteration.value = 0 with mock.patch.object(rfc2544_profile, '_get_ixia_traffic_profile') \ as mock_get_tp, \ mock.patch.object(rfc2544_profile, '_ixia_traffic_generate') \ as mock_tgenerate: mock_get_tp.return_value = 'fake_tprofile' rfc2544_profile.full_profile = mock.ANY - output = rfc2544_profile.execute_traffic(mock.ANY, + output = rfc2544_profile.execute_traffic(traffic_gen, ixia_obj=mock.ANY) self.assertFalse(output) @@ -575,42 +627,53 @@ class TestIXIARFC2544Profile(unittest.TestCase): def test_get_drop_percentage_completed(self): samples = {'iface_name_1': - {'in_packets': 1000, 'out_packets': 1000, - 'Store-Forward_Avg_latency_ns': 20, - 'Store-Forward_Min_latency_ns': 15, - 'Store-Forward_Max_latency_ns': 25}, + {'InPackets': 1000, 'OutPackets': 1000, + 'InBytes': 64000, 'OutBytes': 64000, + 'LatencyAvg': 20, + 'LatencyMin': 15, + 'LatencyMax': 25}, 'iface_name_2': - {'in_packets': 1005, 'out_packets': 1007, - 'Store-Forward_Avg_latency_ns': 23, - 'Store-Forward_Min_latency_ns': 13, - 'Store-Forward_Max_latency_ns': 28} + {'InPackets': 1005, 'OutPackets': 1007, + 'InBytes': 64320, 'OutBytes': 64448, + 'LatencyAvg': 23, + 'LatencyMin': 13, + 'LatencyMax': 28} } rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE) - completed, samples = rfc2544_profile.get_drop_percentage(samples, 0, 1) + rfc2544_profile.rate = 100.0 + rfc2544_profile._get_next_rate = mock.Mock(return_value=100.0) + rfc2544_profile._get_framesize = mock.Mock(return_value='64B') + completed, samples = rfc2544_profile.get_drop_percentage( + samples, 0, 1, 4, 0.1) self.assertTrue(completed) self.assertEqual(66.9, samples['TxThroughput']) self.assertEqual(66.833, samples['RxThroughput']) self.assertEqual(0.099651, samples['DropPercentage']) - self.assertEqual(21.5, samples['latency_ns_avg']) - self.assertEqual(14.0, samples['latency_ns_min']) - self.assertEqual(26.5, samples['latency_ns_max']) + self.assertEqual(21.5, samples['LatencyAvg']) + self.assertEqual(13.0, samples['LatencyMin']) + self.assertEqual(28.0, samples['LatencyMax']) + self.assertEqual(100.0, samples['Rate']) + self.assertEqual('64B', samples['PktSize']) def test_get_drop_percentage_over_drop_percentage(self): samples = {'iface_name_1': - {'in_packets': 1000, 'out_packets': 1000, - 'Store-Forward_Avg_latency_ns': 20, - 'Store-Forward_Min_latency_ns': 15, - 'Store-Forward_Max_latency_ns': 25}, + {'InPackets': 1000, 'OutPackets': 1000, + 'InBytes': 64000, 'OutBytes': 64000, + 'LatencyAvg': 20, + 'LatencyMin': 15, + 'LatencyMax': 25}, 'iface_name_2': - {'in_packets': 1005, 'out_packets': 1007, - 'Store-Forward_Avg_latency_ns': 20, - 'Store-Forward_Min_latency_ns': 15, - 'Store-Forward_Max_latency_ns': 25} + {'InPackets': 1005, 'OutPackets': 1007, + 'InBytes': 64320, 'OutBytes': 64448, + 'LatencyAvg': 20, + 'LatencyMin': 15, + 'LatencyMax': 25} } rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE) rfc2544_profile.rate = 1000 + rfc2544_profile._get_next_rate = mock.Mock(return_value=50.0) completed, samples = rfc2544_profile.get_drop_percentage( - samples, 0, 0.05) + samples, 0, 0.05, 4, 0.1) self.assertFalse(completed) self.assertEqual(66.9, samples['TxThroughput']) self.assertEqual(66.833, samples['RxThroughput']) @@ -619,20 +682,23 @@ class TestIXIARFC2544Profile(unittest.TestCase): def test_get_drop_percentage_under_drop_percentage(self): samples = {'iface_name_1': - {'in_packets': 1000, 'out_packets': 1000, - 'Store-Forward_Avg_latency_ns': 20, - 'Store-Forward_Min_latency_ns': 15, - 'Store-Forward_Max_latency_ns': 25}, + {'InPackets': 1000, 'OutPackets': 1000, + 'InBytes': 64000, 'OutBytes': 64000, + 'LatencyAvg': 20, + 'LatencyMin': 15, + 'LatencyMax': 25}, 'iface_name_2': - {'in_packets': 1005, 'out_packets': 1007, - 'Store-Forward_Avg_latency_ns': 20, - 'Store-Forward_Min_latency_ns': 15, - 'Store-Forward_Max_latency_ns': 25} + {'InPackets': 1005, 'OutPackets': 1007, + 'InBytes': 64320, 'OutBytes': 64448, + 'LatencyAvg': 20, + 'LatencyMin': 15, + 'LatencyMax': 25} } rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE) rfc2544_profile.rate = 1000 + rfc2544_profile._get_next_rate = mock.Mock(return_value=50.0) completed, samples = rfc2544_profile.get_drop_percentage( - samples, 0.2, 1) + samples, 0.2, 1, 4, 0.1) self.assertFalse(completed) self.assertEqual(66.9, samples['TxThroughput']) self.assertEqual(66.833, samples['RxThroughput']) @@ -642,20 +708,23 @@ class TestIXIARFC2544Profile(unittest.TestCase): @mock.patch.object(ixia_rfc2544.LOG, 'info') def test_get_drop_percentage_not_flow(self, *args): samples = {'iface_name_1': - {'in_packets': 1000, 'out_packets': 0, - 'Store-Forward_Avg_latency_ns': 20, - 'Store-Forward_Min_latency_ns': 15, - 'Store-Forward_Max_latency_ns': 25}, + {'InPackets': 1000, 'OutPackets': 0, + 'InBytes': 64000, 'OutBytes': 0, + 'LatencyAvg': 20, + 'LatencyMin': 15, + 'LatencyMax': 25}, 'iface_name_2': - {'in_packets': 1005, 'out_packets': 0, - 'Store-Forward_Avg_latency_ns': 20, - 'Store-Forward_Min_latency_ns': 15, - 'Store-Forward_Max_latency_ns': 25} + {'InPackets': 1005, 'OutPackets': 0, + 'InBytes': 64320, 'OutBytes': 0, + 'LatencyAvg': 20, + 'LatencyMin': 15, + 'LatencyMax': 25} } rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE) rfc2544_profile.rate = 1000 + rfc2544_profile._get_next_rate = mock.Mock(return_value=50.0) completed, samples = rfc2544_profile.get_drop_percentage( - samples, 0.2, 1) + samples, 0.2, 1, 4, 0.1) self.assertFalse(completed) self.assertEqual(0.0, samples['TxThroughput']) self.assertEqual(66.833, samples['RxThroughput']) @@ -664,21 +733,292 @@ class TestIXIARFC2544Profile(unittest.TestCase): def test_get_drop_percentage_first_run(self): samples = {'iface_name_1': - {'in_packets': 1000, 'out_packets': 1000, - 'Store-Forward_Avg_latency_ns': 20, - 'Store-Forward_Min_latency_ns': 15, - 'Store-Forward_Max_latency_ns': 25}, + {'InPackets': 1000, 'OutPackets': 1000, + 'InBytes': 64000, 'OutBytes': 64000, + 'LatencyAvg': 20, + 'LatencyMin': 15, + 'LatencyMax': 25}, 'iface_name_2': - {'in_packets': 1005, 'out_packets': 1007, - 'Store-Forward_Avg_latency_ns': 20, - 'Store-Forward_Min_latency_ns': 15, - 'Store-Forward_Max_latency_ns': 25} + {'InPackets': 1005, 'OutPackets': 1007, + 'InBytes': 64320, 'OutBytes': 64448, + 'LatencyAvg': 20, + 'LatencyMin': 15, + 'LatencyMax': 25} } rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE) + rfc2544_profile._get_next_rate = mock.Mock(return_value=50.0) completed, samples = rfc2544_profile.get_drop_percentage( - samples, 0, 1, first_run=True) + samples, 0, 1, 4, 0.1, first_run=True) self.assertTrue(completed) self.assertEqual(66.9, samples['TxThroughput']) self.assertEqual(66.833, samples['RxThroughput']) self.assertEqual(0.099651, samples['DropPercentage']) self.assertEqual(33.45, rfc2544_profile.rate) + + def test_get_drop_percentage_resolution(self): + rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE) + rfc2544_profile._get_next_rate = mock.Mock(return_value=0.1) + samples = {'iface_name_1': + {'InPackets': 1000, 'OutPackets': 1000, + 'InBytes': 64000, 'OutBytes': 64000, + 'LatencyAvg': 20, + 'LatencyMin': 15, + 'LatencyMax': 25}, + 'iface_name_2': + {'InPackets': 1005, 'OutPackets': 1007, + 'InBytes': 64320, 'OutBytes': 64448, + 'LatencyAvg': 20, + 'LatencyMin': 15, + 'LatencyMax': 25} + } + rfc2544_profile.rate = 0.19 + completed, _ = rfc2544_profile.get_drop_percentage( + samples, 0, 0.05, 4, 0.1) + self.assertTrue(completed) + + samples = {'iface_name_1': + {'InPackets': 1000, 'OutPackets': 1000, + 'InBytes': 64000, 'OutBytes': 64000, + 'LatencyAvg': 20, + 'LatencyMin': 15, + 'LatencyMax': 25}, + 'iface_name_2': + {'InPackets': 1005, 'OutPackets': 1007, + 'InBytes': 64320, 'OutBytes': 64448, + 'LatencyAvg': 20, + 'LatencyMin': 15, + 'LatencyMax': 25} + } + rfc2544_profile.rate = 0.5 + completed, _ = rfc2544_profile.get_drop_percentage( + samples, 0, 0.05, 4, 0.1) + self.assertFalse(completed) + + +class TestIXIARFC2544PppoeScenarioProfile(unittest.TestCase): + + TRAFFIC_PROFILE = { + "schema": "nsb:traffic_profile:0.1", + "name": "fixed", + "description": "Fixed traffic profile to run UDP traffic", + "traffic_profile": { + "traffic_type": "FixedTraffic", + "frame_rate": 100}, + 'uplink_0': {'ipv4': {'port': 'xe0', 'id': 1}}, + 'downlink_0': {'ipv4': {'port': 'xe2', 'id': 2}}, + 'uplink_1': {'ipv4': {'port': 'xe1', 'id': 3}}, + 'downlink_1': {'ipv4': {'port': 'xe2', 'id': 4}} + } + + def setUp(self): + self.ixia_tp = ixia_rfc2544.IXIARFC2544PppoeScenarioProfile( + self.TRAFFIC_PROFILE) + self.ixia_tp.rate = 100.0 + self.ixia_tp._get_next_rate = mock.Mock(return_value=50.0) + self.ixia_tp._get_framesize = mock.Mock(return_value='64B') + + def test___init__(self): + self.assertIsInstance(self.ixia_tp.full_profile, + collections.OrderedDict) + + def test__get_flow_groups_params(self): + expected_tp = collections.OrderedDict([ + ('uplink_0', {'ipv4': {'id': 1, 'port': 'xe0'}}), + ('downlink_0', {'ipv4': {'id': 2, 'port': 'xe2'}}), + ('uplink_1', {'ipv4': {'id': 3, 'port': 'xe1'}}), + ('downlink_1', {'ipv4': {'id': 4, 'port': 'xe2'}})]) + + self.ixia_tp._get_flow_groups_params() + self.assertDictEqual(self.ixia_tp.full_profile, expected_tp) + + @mock.patch.object(ixia_rfc2544.IXIARFC2544PppoeScenarioProfile, + '_get_flow_groups_params') + def test_update_traffic_profile(self, mock_get_flow_groups_params): + networks = { + 'uplink_0': 'data1', + 'downlink_0': 'data2', + 'uplink_1': 'data3', + 'downlink_1': 'data4' + } + ports = ['xe0', 'xe1', 'xe2', 'xe3'] + mock_traffic_gen = mock.Mock() + mock_traffic_gen.networks = networks + mock_traffic_gen.vnfd_helper.port_num.side_effect = ports + self.ixia_tp.update_traffic_profile(mock_traffic_gen) + mock_get_flow_groups_params.assert_called_once() + self.assertEqual(self.ixia_tp.ports, ports) + + def test__get_prio_flows_drop_percentage(self): + + input_stats = { + '0': { + 'InPackets': 50, + 'OutPackets': 100, + 'Store-Forward_Avg_latency_ns': 10, + 'Store-Forward_Min_latency_ns': 10, + 'Store-Forward_Max_latency_ns': 10}} + + result = self.ixia_tp._get_prio_flows_drop_percentage(input_stats) + self.assertIsNotNone(result['0'].get('DropPercentage')) + self.assertEqual(result['0'].get('DropPercentage'), 50.0) + + def test__get_prio_flows_drop_percentage_traffic_not_flowing(self): + input_stats = { + '0': { + 'InPackets': 0, + 'OutPackets': 0, + 'Store-Forward_Avg_latency_ns': 0, + 'Store-Forward_Min_latency_ns': 0, + 'Store-Forward_Max_latency_ns': 0}} + + result = self.ixia_tp._get_prio_flows_drop_percentage(input_stats) + self.assertIsNotNone(result['0'].get('DropPercentage')) + self.assertEqual(result['0'].get('DropPercentage'), 100) + + def test__get_summary_pppoe_subs_counters(self): + input_stats = { + 'xe0': { + 'OutPackets': 100, + 'SessionsUp': 4, + 'SessionsDown': 0, + 'SessionsNotStarted': 0, + 'SessionsTotal': 4}, + 'xe1': { + 'OutPackets': 100, + 'SessionsUp': 4, + 'SessionsDown': 0, + 'SessionsNotStarted': 0, + 'SessionsTotal': 4} + } + + expected_stats = { + 'SessionsUp': 8, + 'SessionsDown': 0, + 'SessionsNotStarted': 0, + 'SessionsTotal': 8 + } + + res = self.ixia_tp._get_summary_pppoe_subs_counters(input_stats) + self.assertDictEqual(res, expected_stats) + + @mock.patch.object(ixia_rfc2544.IXIARFC2544PppoeScenarioProfile, + '_get_prio_flows_drop_percentage') + @mock.patch.object(ixia_rfc2544.IXIARFC2544PppoeScenarioProfile, + '_get_summary_pppoe_subs_counters') + def test_get_drop_percentage(self, mock_get_pppoe_subs, + mock_sum_prio_drop_rate): + samples = { + 'priority_stats': { + '0': { + 'InPackets': 100, + 'OutPackets': 100, + 'InBytes': 6400, + 'OutBytes': 6400, + 'LatencyAvg': 10, + 'LatencyMin': 10, + 'LatencyMax': 10}}, + 'xe0': { + 'InPackets': 100, + 'OutPackets': 100, + 'InBytes': 6400, + 'OutBytes': 6400, + 'LatencyAvg': 10, + 'LatencyMin': 10, + 'LatencyMax': 10}} + + mock_get_pppoe_subs.return_value = {'SessionsUp': 1} + mock_sum_prio_drop_rate.return_value = {'0': {'DropPercentage': 0.0}} + + self.ixia_tp._get_framesize = mock.Mock(return_value='64B') + status, res = self.ixia_tp.get_drop_percentage( + samples, tol_min=0.0, tolerance=0.0001, precision=0, + resolution=0.1, first_run=True) + self.assertIsNotNone(res.get('DropPercentage')) + self.assertIsNotNone(res.get('Priority')) + self.assertIsNotNone(res.get('SessionsUp')) + self.assertEqual(res['DropPercentage'], 0.0) + self.assertEqual(res['Rate'], 100.0) + self.assertEqual(res['PktSize'], '64B') + self.assertTrue(status) + mock_sum_prio_drop_rate.assert_called_once() + mock_get_pppoe_subs.assert_called_once() + + @mock.patch.object(ixia_rfc2544.IXIARFC2544PppoeScenarioProfile, + '_get_prio_flows_drop_percentage') + @mock.patch.object(ixia_rfc2544.IXIARFC2544PppoeScenarioProfile, + '_get_summary_pppoe_subs_counters') + def test_get_drop_percentage_failed_status(self, mock_get_pppoe_subs, + mock_sum_prio_drop_rate): + samples = { + 'priority_stats': { + '0': { + 'InPackets': 90, + 'OutPackets': 100, + 'InBytes': 5760, + 'OutBytes': 6400, + 'LatencyAvg': 10, + 'LatencyMin': 10, + 'LatencyMax': 10}}, + 'xe0': { + 'InPackets': 90, + 'OutPackets': 100, + 'InBytes': 5760, + 'OutBytes': 6400, + 'LatencyAvg': 10, + 'LatencyMin': 10, + 'LatencyMax': 10}} + + mock_get_pppoe_subs.return_value = {'SessionsUp': 1} + mock_sum_prio_drop_rate.return_value = {'0': {'DropPercentage': 0.0}} + + status, res = self.ixia_tp.get_drop_percentage( + samples, tol_min=0.0, tolerance=0.0001, precision=0, + resolution=0.1, first_run=True) + self.assertIsNotNone(res.get('DropPercentage')) + self.assertIsNotNone(res.get('Priority')) + self.assertIsNotNone(res.get('SessionsUp')) + self.assertEqual(res['DropPercentage'], 10.0) + self.assertFalse(status) + mock_sum_prio_drop_rate.assert_called_once() + mock_get_pppoe_subs.assert_called_once() + + @mock.patch.object(ixia_rfc2544.IXIARFC2544PppoeScenarioProfile, + '_get_prio_flows_drop_percentage') + @mock.patch.object(ixia_rfc2544.IXIARFC2544PppoeScenarioProfile, + '_get_summary_pppoe_subs_counters') + def test_get_drop_percentage_priority_flow_check(self, mock_get_pppoe_subs, + mock_sum_prio_drop_rate): + samples = { + 'priority_stats': { + '0': { + 'InPackets': 100, + 'OutPackets': 100, + 'InBytes': 6400, + 'OutBytes': 6400, + 'LatencyAvg': 10, + 'LatencyMin': 10, + 'LatencyMax': 10}}, + 'xe0': { + 'InPackets': 90, + 'OutPackets': 100, + 'InBytes': 5760, + 'OutBytes': 6400, + 'LatencyAvg': 10, + 'LatencyMin': 10, + 'LatencyMax': 10 + }} + + mock_get_pppoe_subs.return_value = {'SessionsUp': 1} + mock_sum_prio_drop_rate.return_value = {'0': {'DropPercentage': 0.0}} + + tc_rfc2544_opts = {'priority': '0', + 'allowed_drop_rate': '0.0001 - 0.0001'} + status, res = self.ixia_tp.get_drop_percentage( + samples, tol_min=15.0000, tolerance=15.0001, precision=0, + resolution=0.1, first_run=True, tc_rfc2544_opts=tc_rfc2544_opts) + self.assertIsNotNone(res.get('DropPercentage')) + self.assertIsNotNone(res.get('Priority')) + self.assertIsNotNone(res.get('SessionsUp')) + self.assertTrue(status) + mock_sum_prio_drop_rate.assert_called_once() + mock_get_pppoe_subs.assert_called_once() diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_landslide_profile.py b/yardstick/tests/unit/network_services/traffic_profile/test_landslide_profile.py new file mode 100644 index 000000000..afd550029 --- /dev/null +++ b/yardstick/tests/unit/network_services/traffic_profile/test_landslide_profile.py @@ -0,0 +1,136 @@ +# Copyright (c) 2018 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import unittest + +from yardstick.network_services.traffic_profile import landslide_profile + +TP_CONFIG = { + 'schema': "nsb:traffic_profile:0.1", + 'name': 'LandslideProfile', + 'description': 'Spirent Landslide traffic profile (Data Message Flow)', + 'traffic_profile': { + 'traffic_type': 'LandslideProfile' + }, + 'dmf_config': { + 'dmf': { + 'library': 'test', + 'name': 'Fireball UDP', + 'description': "Basic data flow using UDP/IP (Fireball DMF)", + 'keywords': 'UDP ', + 'dataProtocol': 'fb_udp', + 'burstCount': 1, + 'clientPort': { + 'clientPort': 2002, + 'isClientPortRange': 'false' + }, + 'serverPort': 2003, + 'connection': { + 'initiatingSide': 'Client', + 'disconnectSide': 'Client', + 'underlyingProtocol': 'none', + 'persistentConnection': 'false' + }, + 'protocolId': 0, + 'persistentConnection': 'false', + 'transactionRate': 8.0, + 'transactions': { + 'totalTransactions': 0, + 'retries': 0, + 'dataResponseTime': 60000, + 'packetSize': 64 + }, + 'segment': { + 'segmentSize': 64000, + 'maxSegmentSize': 0 + }, + 'size': { + 'sizeDistribution': 'Fixed', + 'sizeDeviation': 10 + }, + 'interval': { + 'intervalDistribution': 'Fixed', + 'intervalDeviation': 10 + }, + 'ipHeader': { + 'typeOfService': 0, + 'timeToLive': 64 + }, + 'tcpConnection': { + 'force3Way': 'false', + 'fixedRetryTime': 0, + 'maxPacketsToForceAck': 0 + }, + 'tcp': { + 'windowSize': 32768, + 'windowScaling': -1, + 'disableFinAckWait': 'false' + }, + 'disconnectType': 'FIN', + 'slowStart': 'false', + 'connectOnly': 'false', + 'vtag': { + 'VTagMask': '0x0', + 'VTagValue': '0x0' + }, + 'sctpPayloadProtocolId': 0, + 'billingIncludeSyn': 'true', + 'billingIncludeSubflow': 'true', + 'billingRecordPerTransaction': 'false', + 'tcpPush': 'false', + 'hostDataExpansionRatio': 1 + } + } +} +DMF_OPTIONS = { + 'dmf': { + 'transactionRate': 5, + 'packetSize': 512, + 'burstCount': 1 + } +} + + +class TestLandslideProfile(unittest.TestCase): + + def test___init__(self): + ls_traffic_profile = landslide_profile.LandslideProfile(TP_CONFIG) + self.assertListEqual([TP_CONFIG["dmf_config"]], + ls_traffic_profile.dmf_config) + + def test___init__config_not_a_dict(self): + _tp_config = copy.deepcopy(TP_CONFIG) + _tp_config['dmf_config'] = [_tp_config['dmf_config']] + ls_traffic_profile = landslide_profile.LandslideProfile(_tp_config) + self.assertListEqual(_tp_config['dmf_config'], + ls_traffic_profile.dmf_config) + + def test_execute(self): + ls_traffic_profile = landslide_profile.LandslideProfile(TP_CONFIG) + self.assertIsNone(ls_traffic_profile.execute(None)) + + def test_update_dmf_options_dict(self): + ls_traffic_profile = landslide_profile.LandslideProfile(TP_CONFIG) + ls_traffic_profile.update_dmf(DMF_OPTIONS) + self.assertDictContainsSubset(DMF_OPTIONS['dmf'], + ls_traffic_profile.dmf_config[0]) + + def test_update_dmf_options_list(self): + ls_traffic_profile = landslide_profile.LandslideProfile(TP_CONFIG) + _dmf_options = copy.deepcopy(DMF_OPTIONS) + _dmf_options['dmf'] = [_dmf_options['dmf']] + ls_traffic_profile.update_dmf(_dmf_options) + self.assertTrue(all([x in ls_traffic_profile.dmf_config[0] + for x in DMF_OPTIONS['dmf']])) diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_prox_binsearch.py b/yardstick/tests/unit/network_services/traffic_profile/test_prox_binsearch.py index c09903377..f17656328 100644 --- a/yardstick/tests/unit/network_services/traffic_profile/test_prox_binsearch.py +++ b/yardstick/tests/unit/network_services/traffic_profile/test_prox_binsearch.py @@ -21,6 +21,8 @@ from yardstick.network_services.traffic_profile import prox_binsearch class TestProxBinSearchProfile(unittest.TestCase): + THEOR_MAX_THROUGHPUT = 0.00012340000000000002 + def setUp(self): self._mock_log_info = mock.patch.object(prox_binsearch.LOG, 'info') self.mock_log_info = self._mock_log_info.start() @@ -38,6 +40,12 @@ class TestProxBinSearchProfile(unittest.TestCase): return fail_tuple, {} return success_tuple, {} + def side_effect_func(arg1, arg2): + if arg1 == "confirmation": + return arg2 + else: + return {} + tp_config = { 'traffic_profile': { 'packet_sizes': [200], @@ -51,11 +59,13 @@ class TestProxBinSearchProfile(unittest.TestCase): fail_tuple = ProxTestDataTuple(10.0, 1, 2, 3, 4, [5.6, 5.7, 5.8], 850, 1000, 123.4) traffic_generator = mock.MagicMock() - attrs1 = {'get.return_value' : 10} + attrs1 = {'get.return_value': 10} traffic_generator.scenario_helper.all_options.configure_mock(**attrs1) - attrs2 = {'__getitem__.return_value' : 10, 'get.return_value': 10} + attrs2 = {'__getitem__.return_value': 10, 'get.return_value': 10} + attrs3 = {'get.side_effect': side_effect_func} traffic_generator.scenario_helper.scenario_cfg["runner"].configure_mock(**attrs2) + traffic_generator.scenario_helper.scenario_cfg["options"].configure_mock(**attrs3) profile_helper = mock.MagicMock() profile_helper.run_test = target @@ -68,11 +78,11 @@ class TestProxBinSearchProfile(unittest.TestCase): self.assertEqual(round(profile.current_lower, 2), 74.69) self.assertEqual(round(profile.current_upper, 2), 76.09) - self.assertEqual(len(runs), 77) + self.assertEqual(len(runs), 7) # Result Samples inc theor_max result_tuple = {'Actual_throughput': 5e-07, - 'theor_max_throughput': 7.5e-07, + 'theor_max_throughput': self.THEOR_MAX_THROUGHPUT, 'PktSize': 200, 'Status': 'Result'} @@ -88,7 +98,7 @@ class TestProxBinSearchProfile(unittest.TestCase): "PktSize": 200, "RxThroughput": 7.5e-07, "Throughput": 7.5e-07, - "TxThroughput": 0.00012340000000000002, + "TxThroughput": self.THEOR_MAX_THROUGHPUT, "Status": 'Success'} calls = profile.queue.put(success_result_tuple) @@ -121,6 +131,12 @@ class TestProxBinSearchProfile(unittest.TestCase): return fail_tuple, {} return success_tuple, {} + def side_effect_func(arg1, _): + if arg1 == "confirmation": + return 2 + else: + return {} + tp_config = { 'traffic_profile': { 'packet_sizes': [200], @@ -138,7 +154,10 @@ class TestProxBinSearchProfile(unittest.TestCase): traffic_generator.scenario_helper.all_options.configure_mock(**attrs1) attrs2 = {'__getitem__.return_value': 0, 'get.return_value': 0} + attrs3 = {'get.side_effect': side_effect_func} + traffic_generator.scenario_helper.scenario_cfg["runner"].configure_mock(**attrs2) + traffic_generator.scenario_helper.scenario_cfg["options"].configure_mock(**attrs3) profile_helper = mock.MagicMock() profile_helper.run_test = target @@ -150,7 +169,7 @@ class TestProxBinSearchProfile(unittest.TestCase): profile.execute_traffic(traffic_generator) self.assertEqual(round(profile.current_lower, 2), 24.06) self.assertEqual(round(profile.current_upper, 2), 25.47) - self.assertEqual(len(runs), 7) + self.assertEqual(len(runs), 21) def test_execute_3(self): def target(*args, **_): @@ -186,8 +205,6 @@ class TestProxBinSearchProfile(unittest.TestCase): profile.lower_bound = 99.0 profile.execute_traffic(traffic_generator) - - # Result Samples result_tuple = {'Actual_throughput': 0, 'theor_max_throughput': 0, "Status": 'Result', "Next_Step": ''} profile.queue.put.assert_called_with(result_tuple) @@ -207,6 +224,7 @@ class TestProxBinSearchProfile(unittest.TestCase): raise RuntimeError(' '.join([str(args), str(runs)])) if args[2] > 75.0: return fail_tuple, {} + return success_tuple, {} tp_config = { @@ -226,6 +244,7 @@ class TestProxBinSearchProfile(unittest.TestCase): traffic_generator.scenario_helper.all_options.configure_mock(**attrs1) attrs2 = {'__getitem__.return_value': 0, 'get.return_value': 0} + traffic_generator.scenario_helper.scenario_cfg["runner"].configure_mock(**attrs2) profile_helper = mock.MagicMock() @@ -242,7 +261,7 @@ class TestProxBinSearchProfile(unittest.TestCase): # Result Samples inc theor_max result_tuple = {'Actual_throughput': 5e-07, - 'theor_max_throughput': 7.5e-07, + 'theor_max_throughput': self.THEOR_MAX_THROUGHPUT, 'PktSize': 200, "Status": 'Result'} @@ -258,7 +277,7 @@ class TestProxBinSearchProfile(unittest.TestCase): "PktSize": 200, "RxThroughput": 7.5e-07, "Throughput": 7.5e-07, - "TxThroughput": 0.00012340000000000002, + "TxThroughput": self.THEOR_MAX_THROUGHPUT, "Status": 'Success'} calls = profile.queue.put(success_result_tuple) diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_prox_irq.py b/yardstick/tests/unit/network_services/traffic_profile/test_prox_irq.py new file mode 100644 index 000000000..1d9eb0887 --- /dev/null +++ b/yardstick/tests/unit/network_services/traffic_profile/test_prox_irq.py @@ -0,0 +1,57 @@ +# Copyright (c) 2018-2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import time + +import unittest +import mock + +from yardstick.network_services.traffic_profile import prox_irq + + +class TestProxIrqProfile(unittest.TestCase): + + def setUp(self): + self._mock_log_info = mock.patch.object(prox_irq.LOG, 'info') + self.mock_log_info = self._mock_log_info.start() + self.addCleanup(self._stop_mocks) + + def _stop_mocks(self): + self._mock_log_info.stop() + + @mock.patch.object(time, 'sleep') + def test_execute_1(self, *args): + tp_config = { + 'traffic_profile': { + }, + } + + traffic_generator = mock.MagicMock() + attrs1 = {'get.return_value' : 10} + traffic_generator.scenario_helper.all_options.configure_mock(**attrs1) + + attrs2 = {'__getitem__.return_value' : 10, 'get.return_value': 10} + traffic_generator.scenario_helper.scenario_cfg["runner"].configure_mock(**attrs2) + + profile_helper = mock.MagicMock() + + profile = prox_irq.ProxIrqProfile(tp_config) + profile.init(mock.MagicMock()) + profile._profile_helper = profile_helper + + profile.execute_traffic(traffic_generator) + profile.run_test() + is_ended_flag = profile.is_ended() + + self.assertFalse(is_ended_flag) + self.assertEqual(profile.lower_bound, 10.0) diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_prox_profile.py b/yardstick/tests/unit/network_services/traffic_profile/test_prox_profile.py index cf31cc27c..1593a0835 100644 --- a/yardstick/tests/unit/network_services/traffic_profile/test_prox_profile.py +++ b/yardstick/tests/unit/network_services/traffic_profile/test_prox_profile.py @@ -1,4 +1,4 @@ -# Copyright (c) 2017 Intel Corporation +# Copyright (c) 2017-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import time import unittest import mock @@ -78,7 +79,8 @@ class TestProxProfile(unittest.TestCase): profile.init(queue) self.assertIs(profile.queue, queue) - def test_execute_traffic(self): + @mock.patch.object(time, 'sleep') + def test_execute_traffic(self, *args): packet_sizes = [ 10, 100, @@ -100,13 +102,13 @@ class TestProxProfile(unittest.TestCase): profile = ProxProfile(tp_config) - self.assertFalse(profile.done) + self.assertFalse(profile.done.is_set()) for _ in packet_sizes: with self.assertRaises(NotImplementedError): profile.execute_traffic(traffic_generator) self.assertIsNone(profile.execute_traffic(traffic_generator)) - self.assertTrue(profile.done) + self.assertTrue(profile.done.is_set()) def test_bounds_iterator(self): tp_config = { diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py b/yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py index b8fbc6344..febcfe5da 100644 --- a/yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py +++ b/yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py @@ -141,25 +141,25 @@ class TestRFC2544Profile(base.BaseUnitTestCase): port_pg_id, True) mock_stl_profile.assert_called_once_with(['stream1']) - def test__create_imix_data_mode_DIB(self): + def test__create_imix_data_mode_DIP(self): rfc2544_profile = rfc2544.RFC2544Profile(self.TRAFFIC_PROFILE) data = {'64B': 50, '128B': 50} self.assertEqual( {'64': 50.0, '128': 50.0}, rfc2544_profile._create_imix_data( - data, weight_mode=constants.DISTRIBUTION_IN_BYTES)) + data, weight_mode=constants.DISTRIBUTION_IN_PACKETS)) data = {'64B': 1, '128b': 3} self.assertEqual( {'64': 25.0, '128': 75.0}, rfc2544_profile._create_imix_data( - data, weight_mode=constants.DISTRIBUTION_IN_BYTES)) + data, weight_mode=constants.DISTRIBUTION_IN_PACKETS)) data = {} self.assertEqual( {}, rfc2544_profile._create_imix_data( - data, weight_mode=constants.DISTRIBUTION_IN_BYTES)) + data, weight_mode=constants.DISTRIBUTION_IN_PACKETS)) - def test__create_imix_data_mode_DIP(self): + def test__create_imix_data_mode_DIB(self): rfc2544_profile = rfc2544.RFC2544Profile(self.TRAFFIC_PROFILE) data = {'64B': 25, '128B': 25, '512B': 25, '1518B': 25} byte_total = 64 * 25 + 128 * 25 + 512 * 25 + 1518 * 25 @@ -169,17 +169,17 @@ class TestRFC2544Profile(base.BaseUnitTestCase): '512': 512 * 25.0 * 100 / byte_total, '1518': 1518 * 25.0 * 100/ byte_total}, rfc2544_profile._create_imix_data( - data, weight_mode=constants.DISTRIBUTION_IN_PACKETS)) + data, weight_mode=constants.DISTRIBUTION_IN_BYTES)) data = {} self.assertEqual( {}, rfc2544_profile._create_imix_data( - data, weight_mode=constants.DISTRIBUTION_IN_PACKETS)) + data, weight_mode=constants.DISTRIBUTION_IN_BYTES)) data = {'64B': 100} self.assertEqual( {'64': 100.0}, rfc2544_profile._create_imix_data( - data, weight_mode=constants.DISTRIBUTION_IN_PACKETS)) + data, weight_mode=constants.DISTRIBUTION_IN_BYTES)) def test__create_vm(self): packet = {'outer_l2': 'l2_definition'} @@ -248,41 +248,55 @@ class TestRFC2544Profile(base.BaseUnitTestCase): mock.call(percentage=float(25 * 35) / 100), mock.call(percentage=float(75 * 35) / 100)], any_order=True) - def test_get_drop_percentage(self): + @mock.patch.object(rfc2544.RFC2544Profile, '_get_framesize') + def test_get_drop_percentage(self, mock_get_framesize): rfc2544_profile = rfc2544.RFC2544Profile(self.TRAFFIC_PROFILE) + rfc2544_profile.iteration = 1 + mock_get_framesize.return_value = '64B' + samples = [ - {'xe1': {'tx_throughput_fps': 110, - 'rx_throughput_fps': 101, - 'out_packets': 2100, + {'xe1': {'out_packets': 2100, 'in_packets': 2010, + 'out_bytes': 134400, + 'in_bytes': 128640, 'timestamp': datetime.datetime(2000, 1, 1, 1, 1, 1, 1)}, - 'xe2': {'tx_throughput_fps': 210, - 'rx_throughput_fps': 201, - 'out_packets': 4100, + 'xe2': {'out_packets': 4100, 'in_packets': 4010, + 'out_bytes': 262400, + 'in_bytes': 256640, 'timestamp': datetime.datetime(2000, 1, 1, 1, 1, 1, 1)}}, - {'xe1': {'tx_throughput_fps': 156, - 'rx_throughput_fps': 108, - 'out_packets': 2110, + {'xe1': {'out_packets': 2110, 'in_packets': 2040, + 'out_bytes': 135040, + 'in_bytes': 130560, 'latency': 'Latency1', 'timestamp': datetime.datetime(2000, 1, 1, 1, 1, 1, 31)}, - 'xe2': {'tx_throughput_fps': 253, - 'rx_throughput_fps': 215, - 'out_packets': 4150, + 'xe2': {'out_packets': 4150, 'in_packets': 4010, + 'out_bytes': 265600, + 'in_bytes': 256640, 'latency': 'Latency2', 'timestamp': datetime.datetime(2000, 1, 1, 1, 1, 1, 31)}} ] completed, output = rfc2544_profile.get_drop_percentage( - samples, 0, 0, False) - expected = {'DropPercentage': 50.0, - 'Latency': {'xe1': 'Latency1', 'xe2': 'Latency2'}, + samples, 0, 0, False, 0.1) + expected = {'xe1': {'OutPackets': 10, + 'InPackets': 30, + 'OutBytes': 640, + 'InBytes': 1920}, + 'xe2': {'OutPackets': 50, + 'InPackets': 0, + 'OutBytes': 3200, + 'InBytes': 0}, + 'DropPercentage': 50.0, 'RxThroughput': 1000000.0, 'TxThroughput': 2000000.0, - 'CurrentDropPercentage': 50.0, + 'RxThroughputBps': 64000000.0, + 'TxThroughputBps': 128000000.0, 'Rate': 100.0, - 'Throughput': 1000000.0} + 'Iteration': 1, + 'PktSize': '64B', + 'Status': 'Failure'} self.assertEqual(expected, output) self.assertFalse(completed) diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_sip.py b/yardstick/tests/unit/network_services/traffic_profile/test_sip.py new file mode 100644 index 000000000..bf26ee44d --- /dev/null +++ b/yardstick/tests/unit/network_services/traffic_profile/test_sip.py @@ -0,0 +1,51 @@ +# Copyright (c) 2019 Viosoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import mock + +from yardstick.network_services.traffic_profile import sip + + +class TestSipProfile(unittest.TestCase): + + TRAFFIC_PROFILE = { + "schema": "nsb:traffic_profile:0.1", + "name": "sip", + "description": "Traffic profile to run sip", + "traffic_profile": { + "traffic_type": "SipProfile", + "frame_rate": 100, # pps + "duration": 10, + "enable_latency": False}} + + def setUp(self): + self.sip_profile = sip.SipProfile(self.TRAFFIC_PROFILE) + + def test___init__(self): + self.assertIsNone(self.sip_profile.generator) + + def test_execute_traffic(self): + self.sip_profile.generator = None + mock_traffic_generator = mock.Mock() + self.sip_profile.execute_traffic(mock_traffic_generator) + self.assertIsNotNone(self.sip_profile.generator) + + def test_is_ended_true(self): + self.sip_profile.generator = mock.Mock(return_value=True) + self.assertTrue(self.sip_profile.is_ended()) + + def test_is_ended_false(self): + self.sip_profile.generator = None + self.assertFalse(self.sip_profile.is_ended()) diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_vpp_rfc2544.py b/yardstick/tests/unit/network_services/traffic_profile/test_vpp_rfc2544.py new file mode 100644 index 000000000..8ad17b547 --- /dev/null +++ b/yardstick/tests/unit/network_services/traffic_profile/test_vpp_rfc2544.py @@ -0,0 +1,890 @@ +# Copyright (c) 2019 Viosoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock +from trex_stl_lib import trex_stl_client +from trex_stl_lib import trex_stl_packet_builder_scapy +from trex_stl_lib import trex_stl_streams + +from yardstick.common import constants +from yardstick.network_services.helpers.vpp_helpers.multiple_loss_ratio_search import \ + MultipleLossRatioSearch +from yardstick.network_services.helpers.vpp_helpers.ndr_pdr_result import \ + NdrPdrResult +from yardstick.network_services.helpers.vpp_helpers.receive_rate_interval import \ + ReceiveRateInterval +from yardstick.network_services.helpers.vpp_helpers.receive_rate_measurement import \ + ReceiveRateMeasurement +from yardstick.network_services.traffic_profile import base as tp_base +from yardstick.network_services.traffic_profile import rfc2544, vpp_rfc2544 +from yardstick.network_services.traffic_profile.rfc2544 import PortPgIDMap +from yardstick.tests.unit import base + + +class TestVppRFC2544Profile(base.BaseUnitTestCase): + TRAFFIC_PROFILE = { + "schema": "isb:traffic_profile:0.1", + "name": "fixed", + "description": "Fixed traffic profile to run UDP traffic", + "traffic_profile": { + "traffic_type": "FixedTraffic", + "duration": 30, + "enable_latency": True, + "frame_rate": 100, + "intermediate_phases": 2, + "lower_bound": 1.0, + "step_interval": 0.5, + "test_precision": 0.1, + "upper_bound": 100.0}} + + TRAFFIC_PROFILE_MAX_RATE = { + "schema": "isb:traffic_profile:0.1", + "name": "fixed", + "description": "Fixed traffic profile to run UDP traffic", + "traffic_profile": { + "traffic_type": "FixedTraffic", + "duration": 30, + "enable_latency": True, + "frame_rate": 10000, + "intermediate_phases": 2, + "lower_bound": 1.0, + "step_interval": 0.5, + "test_precision": 0.1, + "upper_bound": 100.0}} + + PROFILE = { + "description": "Traffic profile to run RFC2544 latency", + "downlink_0": { + "ipv4": { + "id": 2, + "outer_l2": { + "framesize": { + "1024B": "0", + "1280B": "0", + "128B": "0", + "1400B": "0", + "1500B": "0", + "1518B": "0", + "256B": "0", + "373b": "0", + "512B": "0", + "570B": "0", + "64B": "100" + } + }, + "outer_l3v4": { + "count": "1", + "dstip4": "10.0.0.0-10.0.0.100", + "proto": 61, + "srcip4": "20.0.0.0-20.0.0.100" + } + } + }, + "name": "rfc2544", + "schema": "nsb:traffic_profile:0.1", + "traffic_profile": { + "duration": 30, + "enable_latency": True, + "frame_rate": 100, + "intermediate_phases": 2, + "lower_bound": 1.0, + "step_interval": 0.5, + "test_precision": 0.1, + "traffic_type": "VppRFC2544Profile", + "upper_bound": 100.0 + }, + "uplink": { + "ipv4": { + "id": 1, + "outer_l2": { + "framesize": { + "1024B": "0", + "1280B": "0", + "128B": "0", + "1400B": "0", + "1500B": "0", + "1518B": "0", + "256B": "0", + "373B": "0", + "512B": "0", + "570B": "0", + "64B": "100" + } + }, + "outer_l3v4": { + "count": "10", + "dstip4": "20.0.0.0-20.0.0.100", + "proto": 61, + "srcip4": "10.0.0.0-10.0.0.100" + } + } + } + } + + def test___init__(self): + vpp_rfc2544_profile = vpp_rfc2544.VppRFC2544Profile( + self.TRAFFIC_PROFILE) + self.assertEqual(vpp_rfc2544_profile.max_rate, + vpp_rfc2544_profile.rate) + self.assertEqual(0, vpp_rfc2544_profile.min_rate) + self.assertEqual(2, vpp_rfc2544_profile.number_of_intermediate_phases) + self.assertEqual(30, vpp_rfc2544_profile.duration) + self.assertEqual(0.1, vpp_rfc2544_profile.precision) + self.assertEqual(1.0, vpp_rfc2544_profile.lower_bound) + self.assertEqual(100.0, vpp_rfc2544_profile.upper_bound) + self.assertEqual(0.5, vpp_rfc2544_profile.step_interval) + self.assertEqual(True, vpp_rfc2544_profile.enable_latency) + + def test_init_traffic_params(self): + vpp_rfc2544_profile = vpp_rfc2544.VppRFC2544Profile( + self.TRAFFIC_PROFILE) + mock_generator = mock.MagicMock() + mock_generator.rfc2544_helper.latency = True + mock_generator.rfc2544_helper.tolerance_low = 0.0 + mock_generator.rfc2544_helper.tolerance_high = 0.005 + mock_generator.scenario_helper.all_options = { + "vpp_config": { + "max_rate": 14880000 + } + } + vpp_rfc2544_profile.init_traffic_params(mock_generator) + self.assertEqual(0.0, vpp_rfc2544_profile.tolerance_low) + self.assertEqual(0.005, vpp_rfc2544_profile.tolerance_high) + self.assertEqual(14880000, vpp_rfc2544_profile.max_rate) + self.assertEqual(True, vpp_rfc2544_profile.enable_latency) + + def test_calculate_frame_size(self): + imix = {'40B': 7, '576B': 4, '1500B': 1} + vpp_rfc2544_profile = vpp_rfc2544.VppRFC2544Profile( + self.TRAFFIC_PROFILE) + self.assertEqual((4084 / 12, 12), + vpp_rfc2544_profile.calculate_frame_size(imix)) + + def test_calculate_frame_size_empty(self): + vpp_rfc2544_profile = vpp_rfc2544.VppRFC2544Profile( + self.TRAFFIC_PROFILE) + self.assertEqual((64, 100), + vpp_rfc2544_profile.calculate_frame_size(None)) + + def test_calculate_frame_size_error(self): + imix = {'40B': -7, '576B': 4, '1500B': 1} + vpp_rfc2544_profile = vpp_rfc2544.VppRFC2544Profile( + self.TRAFFIC_PROFILE) + self.assertEqual((64, 100), + vpp_rfc2544_profile.calculate_frame_size(imix)) + + def test__gen_payload(self): + vpp_rfc2544_profile = vpp_rfc2544.VppRFC2544Profile( + self.TRAFFIC_PROFILE) + self.assertIsNotNone(vpp_rfc2544_profile._gen_payload(4)) + + def test_register_generator(self): + vpp_rfc2544_profile = vpp_rfc2544.VppRFC2544Profile( + self.TRAFFIC_PROFILE) + mock_generator = mock.MagicMock() + mock_generator.rfc2544_helper.latency = True + mock_generator.rfc2544_helper.tolerance_low = 0.0 + mock_generator.rfc2544_helper.tolerance_high = 0.005 + mock_generator.scenario_helper.all_options = { + "vpp_config": { + "max_rate": 14880000 + } + } + vpp_rfc2544_profile.register_generator(mock_generator) + self.assertEqual(0.0, vpp_rfc2544_profile.tolerance_low) + self.assertEqual(0.005, vpp_rfc2544_profile.tolerance_high) + self.assertEqual(14880000, vpp_rfc2544_profile.max_rate) + self.assertEqual(True, vpp_rfc2544_profile.enable_latency) + + def test_stop_traffic(self): + vpp_rfc2544_profile = vpp_rfc2544.VppRFC2544Profile( + self.TRAFFIC_PROFILE) + mock_generator = mock.Mock() + vpp_rfc2544_profile.stop_traffic(traffic_generator=mock_generator) + mock_generator.client.stop.assert_called_once() + mock_generator.client.reset.assert_called_once() + mock_generator.client.remove_all_streams.assert_called_once() + + def test_execute_traffic(self): + vpp_rfc2544_profile = vpp_rfc2544.VppRFC2544Profile( + self.TRAFFIC_PROFILE) + vpp_rfc2544_profile.init_queue(mock.MagicMock()) + vpp_rfc2544_profile.params = { + 'downlink_0': 'profile1', + 'uplink_0': 'profile2'} + mock_generator = mock.MagicMock() + mock_generator.networks = { + 'downlink_0': ['xe0', 'xe1'], + 'uplink_0': ['xe2', 'xe3'], + 'uplink_1': ['xe2', 'xe3']} + mock_generator.port_num.side_effect = [10, 20, 30, 40] + mock_generator.rfc2544_helper.correlated_traffic = False + + with mock.patch.object(vpp_rfc2544_profile, 'create_profile') as \ + mock_create_profile: + vpp_rfc2544_profile.execute_traffic( + traffic_generator=mock_generator) + mock_create_profile.assert_has_calls([ + mock.call('profile1', 10), + mock.call('profile1', 20), + mock.call('profile2', 30), + mock.call('profile2', 40)]) + mock_generator.client.add_streams.assert_has_calls([ + mock.call(mock.ANY, ports=[10]), + mock.call(mock.ANY, ports=[20]), + mock.call(mock.ANY, ports=[30]), + mock.call(mock.ANY, ports=[40])]) + + def test_execute_traffic_correlated_traffic(self): + vpp_rfc2544_profile = vpp_rfc2544.VppRFC2544Profile( + self.TRAFFIC_PROFILE) + vpp_rfc2544_profile.init_queue(mock.MagicMock()) + vpp_rfc2544_profile.params = { + 'downlink_0': 'profile1', + 'uplink_0': 'profile2'} + mock_generator = mock.MagicMock() + mock_generator.networks = { + 'downlink_0': ['xe0', 'xe1'], + 'uplink_0': ['xe2', 'xe3']} + mock_generator.port_num.side_effect = [10, 20, 30, 40] + mock_generator.rfc2544_helper.correlated_traffic = True + + with mock.patch.object(vpp_rfc2544_profile, 'create_profile') as \ + mock_create_profile: + vpp_rfc2544_profile.execute_traffic( + traffic_generator=mock_generator) + mock_create_profile.assert_has_calls([ + mock.call('profile2', 10), + mock.call('profile2', 20)]) + mock_generator.client.add_streams.assert_has_calls([ + mock.call(mock.ANY, ports=[10]), + mock.call(mock.ANY, ports=[20]), + mock.call(mock.ANY, ports=[10]), + mock.call(mock.ANY, ports=[20]), + mock.call(mock.ANY, ports=[10]), + mock.call(mock.ANY, ports=[20]), + mock.call(mock.ANY, ports=[10]), + mock.call(mock.ANY, ports=[20]), + mock.call(mock.ANY, ports=[10]), + mock.call(mock.ANY, ports=[20]), + mock.call(mock.ANY, ports=[10]), + mock.call(mock.ANY, ports=[20]), + mock.call(mock.ANY, ports=[10]), + mock.call(mock.ANY, ports=[20]), + mock.call(mock.ANY, ports=[10]), + mock.call(mock.ANY, ports=[20]), + mock.call(mock.ANY, ports=[10]), + mock.call(mock.ANY, ports=[20]), + mock.call(mock.ANY, ports=[10]), + mock.call(mock.ANY, ports=[20]), + mock.call(mock.ANY, ports=[10]), + mock.call(mock.ANY, ports=[20]), + mock.call(mock.ANY, ports=[10]), + mock.call(mock.ANY, ports=[20])]) + + def test_execute_traffic_max_rate(self): + vpp_rfc2544_profile = vpp_rfc2544.VppRFC2544Profile( + self.TRAFFIC_PROFILE_MAX_RATE) + vpp_rfc2544_profile.init_queue(mock.MagicMock()) + vpp_rfc2544_profile.pkt_size = 64 + vpp_rfc2544_profile.params = { + 'downlink_0': 'profile1', + 'uplink_0': 'profile2'} + mock_generator = mock.MagicMock() + mock_generator.networks = { + 'downlink_0': ['xe0', 'xe1'], + 'uplink_0': ['xe2', 'xe3']} + mock_generator.port_num.side_effect = [10, 20, 30, 40] + mock_generator.rfc2544_helper.correlated_traffic = False + + with mock.patch.object(vpp_rfc2544_profile, 'create_profile') as \ + mock_create_profile: + vpp_rfc2544_profile.execute_traffic( + traffic_generator=mock_generator) + mock_create_profile.assert_has_calls([ + mock.call('profile1', 10), + mock.call('profile1', 20), + mock.call('profile2', 30), + mock.call('profile2', 40)]) + mock_generator.client.add_streams.assert_has_calls([ + mock.call(mock.ANY, ports=[10]), + mock.call(mock.ANY, ports=[20]), + mock.call(mock.ANY, ports=[30]), + mock.call(mock.ANY, ports=[40])]) + + @mock.patch.object(trex_stl_streams, 'STLProfile') + def test_create_profile(self, mock_stl_profile): + vpp_rfc2544_profile = vpp_rfc2544.VppRFC2544Profile( + self.TRAFFIC_PROFILE) + port = mock.ANY + profile_data = {'packetid_1': {'outer_l2': {'framesize': 'imix_info'}}} + with mock.patch.object(vpp_rfc2544_profile, 'calculate_frame_size') as \ + mock_calculate_frame_size, \ + mock.patch.object(vpp_rfc2544_profile, '_create_imix_data') as \ + mock_create_imix, \ + mock.patch.object(vpp_rfc2544_profile, '_create_vm') as \ + mock_create_vm, \ + mock.patch.object(vpp_rfc2544_profile, + '_create_single_stream') as \ + mock_create_single_stream: + mock_calculate_frame_size.return_value = 64, 100 + mock_create_imix.return_value = 'imix_data' + mock_create_single_stream.return_value = ['stream1'] + vpp_rfc2544_profile.create_profile(profile_data, port) + + mock_create_imix.assert_called_once_with('imix_info') + mock_create_vm.assert_called_once_with( + {'outer_l2': {'framesize': 'imix_info'}}) + mock_create_single_stream.assert_called_once_with(port, 'imix_data', + 100) + mock_stl_profile.assert_called_once_with(['stream1']) + + @mock.patch.object(trex_stl_streams, 'STLProfile') + def test_create_profile_max_rate(self, mock_stl_profile): + vpp_rfc2544_profile = vpp_rfc2544.VppRFC2544Profile( + self.TRAFFIC_PROFILE_MAX_RATE) + port = mock.ANY + profile_data = {'packetid_1': {'outer_l2': {'framesize': 'imix_info'}}} + with mock.patch.object(vpp_rfc2544_profile, 'calculate_frame_size') as \ + mock_calculate_frame_size, \ + mock.patch.object(vpp_rfc2544_profile, '_create_imix_data') as \ + mock_create_imix, \ + mock.patch.object(vpp_rfc2544_profile, '_create_vm') as \ + mock_create_vm, \ + mock.patch.object(vpp_rfc2544_profile, + '_create_single_stream') as \ + mock_create_single_stream: + mock_calculate_frame_size.return_value = 64, 100 + mock_create_imix.return_value = 'imix_data' + mock_create_single_stream.return_value = ['stream1'] + vpp_rfc2544_profile.create_profile(profile_data, port) + + mock_create_imix.assert_called_once_with('imix_info', 'mode_DIP') + mock_create_vm.assert_called_once_with( + {'outer_l2': {'framesize': 'imix_info'}}) + mock_create_single_stream.assert_called_once_with(port, 'imix_data', + 100) + mock_stl_profile.assert_called_once_with(['stream1']) + + def test__create_imix_data_mode_DIP(self): + rfc2544_profile = vpp_rfc2544.VppRFC2544Profile(self.TRAFFIC_PROFILE) + data = {'64B': 50, '128B': 50} + self.assertEqual( + {'64': 50.0, '128': 50.0}, + rfc2544_profile._create_imix_data( + data, weight_mode=constants.DISTRIBUTION_IN_PACKETS)) + data = {'64B': 1, '128b': 3} + self.assertEqual( + {'64': 25.0, '128': 75.0}, + rfc2544_profile._create_imix_data( + data, weight_mode=constants.DISTRIBUTION_IN_PACKETS)) + data = {} + self.assertEqual( + {}, + rfc2544_profile._create_imix_data( + data, weight_mode=constants.DISTRIBUTION_IN_PACKETS)) + + def test__create_imix_data_mode_DIB(self): + rfc2544_profile = vpp_rfc2544.VppRFC2544Profile(self.TRAFFIC_PROFILE) + data = {'64B': 25, '128B': 25, '512B': 25, '1518B': 25} + byte_total = 64 * 25 + 128 * 25 + 512 * 25 + 1518 * 25 + self.assertEqual( + {'64': 64 * 25.0 * 100 / byte_total, + '128': 128 * 25.0 * 100 / byte_total, + '512': 512 * 25.0 * 100 / byte_total, + '1518': 1518 * 25.0 * 100 / byte_total}, + rfc2544_profile._create_imix_data( + data, weight_mode=constants.DISTRIBUTION_IN_BYTES)) + data = {} + self.assertEqual( + {}, + rfc2544_profile._create_imix_data( + data, weight_mode=constants.DISTRIBUTION_IN_BYTES)) + data = {'64B': 100} + self.assertEqual( + {'64': 100.0}, + rfc2544_profile._create_imix_data( + data, weight_mode=constants.DISTRIBUTION_IN_BYTES)) + + def test__create_vm(self): + packet = {'outer_l2': 'l2_definition'} + vpp_rfc2544_profile = vpp_rfc2544.VppRFC2544Profile( + self.TRAFFIC_PROFILE) + with mock.patch.object(vpp_rfc2544_profile, '_set_outer_l2_fields') as \ + mock_l2_fileds: + vpp_rfc2544_profile._create_vm(packet) + mock_l2_fileds.assert_called_once_with('l2_definition') + + @mock.patch.object(trex_stl_packet_builder_scapy, 'STLPktBuilder', + return_value='packet') + def test__create_single_packet(self, mock_pktbuilder): + size = 128 + vpp_rfc2544_profile = vpp_rfc2544.VppRFC2544Profile( + self.TRAFFIC_PROFILE) + vpp_rfc2544_profile.ether_packet = mock.MagicMock() + vpp_rfc2544_profile.ip_packet = mock.MagicMock() + vpp_rfc2544_profile.udp_packet = mock.MagicMock() + vpp_rfc2544_profile.trex_vm = 'trex_vm' + # base_pkt = ( + # vpp_rfc2544_profile.ether_packet / vpp_rfc2544_profile.ip_packet / + # vpp_rfc2544_profile.udp_packet) + # pad = (size - len(base_pkt)) * 'x' + output = vpp_rfc2544_profile._create_single_packet(size=size) + self.assertEqual(mock_pktbuilder.call_count, 2) + # mock_pktbuilder.assert_called_once_with(pkt=base_pkt / pad, + # vm='trex_vm') + self.assertEqual(output, ('packet', 'packet')) + + def test__set_outer_l3v4_fields(self): + vpp_rfc2544_profile = vpp_rfc2544.VppRFC2544Profile( + self.TRAFFIC_PROFILE) + outer_l3v4 = self.PROFILE[ + tp_base.TrafficProfile.UPLINK]['ipv4']['outer_l3v4'] + outer_l3v4['proto'] = 'tcp' + self.assertIsNone( + vpp_rfc2544_profile._set_outer_l3v4_fields(outer_l3v4)) + + @mock.patch.object(trex_stl_streams, 'STLFlowLatencyStats') + @mock.patch.object(trex_stl_streams, 'STLTXCont') + @mock.patch.object(trex_stl_client, 'STLStream') + def test__create_single_stream(self, mock_stream, mock_txcont, + mock_latency): + imix_data = {'64': 25, '512': 75} + mock_stream.side_effect = ['stream1', 'stream2', 'stream3', 'stream4'] + mock_txcont.side_effect = ['txcont1', 'txcont2', 'txcont3', 'txcont4'] + mock_latency.side_effect = ['latency1', 'latency2'] + vpp_rfc2544_profile = vpp_rfc2544.VppRFC2544Profile( + self.TRAFFIC_PROFILE) + vpp_rfc2544_profile.port_pg_id = rfc2544.PortPgIDMap() + vpp_rfc2544_profile.port_pg_id.add_port(10) + with mock.patch.object(vpp_rfc2544_profile, '_create_single_packet') as \ + mock_create_single_packet: + mock_create_single_packet.return_value = 64, 100 + output = vpp_rfc2544_profile._create_single_stream(10, imix_data, + 100, 0.0) + self.assertEqual(['stream1', 'stream2', 'stream3', 'stream4'], output) + mock_latency.assert_has_calls([ + mock.call(pg_id=1), mock.call(pg_id=2)]) + mock_txcont.assert_has_calls([ + mock.call(percentage=25 * 100 / 100), + mock.call(percentage=75 * 100 / 100)], any_order=True) + + @mock.patch.object(trex_stl_streams, 'STLFlowLatencyStats') + @mock.patch.object(trex_stl_streams, 'STLTXCont') + @mock.patch.object(trex_stl_client, 'STLStream') + def test__create_single_stream_max_rate(self, mock_stream, mock_txcont, + mock_latency): + imix_data = {'64': 25, '512': 75} + mock_stream.side_effect = ['stream1', 'stream2', 'stream3', 'stream4'] + mock_txcont.side_effect = ['txcont1', 'txcont2', 'txcont3', 'txcont4'] + mock_latency.side_effect = ['latency1', 'latency2'] + vpp_rfc2544_profile = vpp_rfc2544.VppRFC2544Profile( + self.TRAFFIC_PROFILE_MAX_RATE) + vpp_rfc2544_profile.pkt_size = 64 + vpp_rfc2544_profile.port_pg_id = rfc2544.PortPgIDMap() + vpp_rfc2544_profile.port_pg_id.add_port(1) + with mock.patch.object(vpp_rfc2544_profile, '_create_single_packet') as \ + mock_create_single_packet: + mock_create_single_packet.return_value = 64, 100 + output = vpp_rfc2544_profile._create_single_stream(1, imix_data, + 100, 0.0) + self.assertEqual(['stream1', 'stream2', 'stream3', 'stream4'], output) + mock_latency.assert_has_calls([ + mock.call(pg_id=1), mock.call(pg_id=2)]) + mock_txcont.assert_has_calls([ + mock.call(pps=int(25 * 100 / 100)), + mock.call(pps=int(75 * 100 / 100))], any_order=True) + + @mock.patch.object(trex_stl_streams, 'STLFlowLatencyStats') + @mock.patch.object(trex_stl_streams, 'STLTXCont') + @mock.patch.object(trex_stl_client, 'STLStream') + def test__create_single_stream_mlr_search(self, mock_stream, mock_txcont, + mock_latency): + imix_data = {'64': 25, '512': 75} + mock_stream.side_effect = ['stream1', 'stream2', 'stream3', 'stream4'] + mock_txcont.side_effect = ['txcont1', 'txcont2', 'txcont3', 'txcont4'] + mock_latency.side_effect = ['latency1', 'latency2'] + vpp_rfc2544_profile = vpp_rfc2544.VppRFC2544Profile( + self.TRAFFIC_PROFILE) + vpp_rfc2544_profile.max_rate = 14880000 + vpp_rfc2544_profile.port_pg_id = rfc2544.PortPgIDMap() + vpp_rfc2544_profile.port_pg_id.add_port(10) + with mock.patch.object(vpp_rfc2544_profile, '_create_single_packet') as \ + mock_create_single_packet: + mock_create_single_packet.return_value = 64, 100 + output = vpp_rfc2544_profile._create_single_stream(10, imix_data, + 100, 0.0) + self.assertEqual(['stream1', 'stream2', 'stream3', 'stream4'], output) + mock_latency.assert_has_calls([ + mock.call(pg_id=1), mock.call(pg_id=2)]) + mock_txcont.assert_has_calls([ + mock.call(pps=25 * 100 / 100), + mock.call(pps=75 * 100 / 100)], any_order=True) + + def test_binary_search_with_optimized(self): + vpp_rfc2544_profile = vpp_rfc2544.VppRFC2544Profile( + self.TRAFFIC_PROFILE) + vpp_rfc2544_profile.pkt_size = 64 + vpp_rfc2544_profile.init_queue(mock.MagicMock()) + mock_generator = mock.MagicMock() + mock_generator.vnfd_helper.interfaces = [ + {"name": "xe0"}, {"name": "xe0"} + ] + + vpp_rfc2544_profile.ports = [0, 1] + vpp_rfc2544_profile.port_pg_id = PortPgIDMap() + vpp_rfc2544_profile.port_pg_id.add_port(0) + vpp_rfc2544_profile.port_pg_id.add_port(1) + vpp_rfc2544_profile.profiles = mock.MagicMock() + vpp_rfc2544_profile.test_data = mock.MagicMock() + vpp_rfc2544_profile.queue = mock.MagicMock() + + with mock.patch.object(MultipleLossRatioSearch, 'measure') as \ + mock_measure, \ + mock.patch.object(MultipleLossRatioSearch, 'ndrpdr') as \ + mock_ndrpdr: + measured_low = ReceiveRateMeasurement(1, 14880000, 14879927, 0) + measured_high = ReceiveRateMeasurement(1, 14880000, 14879927, 0) + measured_low.latency = ['1000/3081/3962', '500/3149/3730'] + measured_high.latency = ['1000/3081/3962', '500/3149/3730'] + starting_interval = ReceiveRateInterval(measured_low, + measured_high) + starting_result = NdrPdrResult(starting_interval, + starting_interval) + mock_measure.return_value = ReceiveRateMeasurement(1, 14880000, + 14879927, 0) + mock_ndrpdr.return_value = MultipleLossRatioSearch.ProgressState( + starting_result, 2, 30, 0.005, 0.0, + 4857361, 4977343) + + result_samples = vpp_rfc2544_profile.binary_search_with_optimized( + traffic_generator=mock_generator, duration=30, + timeout=720, + test_data={}) + + expected = {'Result_NDR_LOWER': {'bandwidth_total_Gbps': 9.999310944, + 'rate_total_pps': 14879927.0}, + 'Result_NDR_UPPER': {'bandwidth_total_Gbps': 9.999310944, + 'rate_total_pps': 14879927.0}, + 'Result_NDR_packets_lost': {'packet_loss_ratio': 0.0, + 'packets_lost': 0.0}, + 'Result_PDR_LOWER': {'bandwidth_total_Gbps': 9.999310944, + 'rate_total_pps': 14879927.0}, + 'Result_PDR_UPPER': {'bandwidth_total_Gbps': 9.999310944, + 'rate_total_pps': 14879927.0}, + 'Result_PDR_packets_lost': {'packet_loss_ratio': 0.0, + 'packets_lost': 0.0}, + 'Result_stream0_NDR_LOWER': {'avg_latency': 3081.0, + 'max_latency': 3962.0, + 'min_latency': 1000.0}, + 'Result_stream0_PDR_LOWER': {'avg_latency': 3081.0, + 'max_latency': 3962.0, + 'min_latency': 1000.0}, + 'Result_stream1_NDR_LOWER': {'avg_latency': 3149.0, + 'max_latency': 3730.0, + 'min_latency': 500.0}, + 'Result_stream1_PDR_LOWER': {'avg_latency': 3149.0, + 'max_latency': 3730.0, + 'min_latency': 500.0}} + self.assertEqual(expected, result_samples) + + def test_binary_search(self): + vpp_rfc2544_profile = vpp_rfc2544.VppRFC2544Profile( + self.TRAFFIC_PROFILE) + vpp_rfc2544_profile.pkt_size = 64 + vpp_rfc2544_profile.init_queue(mock.MagicMock()) + mock_generator = mock.MagicMock() + mock_generator.vnfd_helper.interfaces = [ + {"name": "xe0"}, {"name": "xe1"} + ] + stats = { + "0": { + "ibytes": 55549120, + "ierrors": 0, + "ipackets": 867955, + "obytes": 55549696, + "oerrors": 0, + "opackets": 867964, + "rx_bps": 104339032.0, + "rx_bps_L1": 136944984.0, + "rx_pps": 203787.2, + "rx_util": 1.36944984, + "tx_bps": 134126008.0, + "tx_bps_L1": 176040392.0, + "tx_pps": 261964.9, + "tx_util": 1.7604039200000001 + }, + "1": { + "ibytes": 55549696, + "ierrors": 0, + "ipackets": 867964, + "obytes": 55549120, + "oerrors": 0, + "opackets": 867955, + "rx_bps": 134119648.0, + "rx_bps_L1": 176032032.0, + "rx_pps": 261952.4, + "rx_util": 1.76032032, + "tx_bps": 104338192.0, + "tx_bps_L1": 136943872.0, + "tx_pps": 203785.5, + "tx_util": 1.36943872 + }, + "flow_stats": { + "1": { + "rx_bps": { + "0": 0, + "1": 0, + "total": 0 + }, + "rx_bps_l1": { + "0": 0.0, + "1": 0.0, + "total": 0.0 + }, + "rx_bytes": { + "0": 6400, + "1": 0, + "total": 6400 + }, + "rx_pkts": { + "0": 100, + "1": 0, + "total": 100 + }, + "rx_pps": { + "0": 0, + "1": 0, + "total": 0 + }, + "tx_bps": { + "0": 0, + "1": 0, + "total": 0 + }, + "tx_bps_l1": { + "0": 0.0, + "1": 0.0, + "total": 0.0 + }, + "tx_bytes": { + "0": 0, + "1": 6400, + "total": 6400 + }, + "tx_pkts": { + "0": 0, + "1": 100, + "total": 100 + }, + "tx_pps": { + "0": 0, + "1": 0, + "total": 0 + } + }, + "2": { + "rx_bps": { + "0": 0, + "1": 0, + "total": 0 + }, + "rx_bps_l1": { + "0": 0.0, + "1": 0.0, + "total": 0.0 + }, + "rx_bytes": { + "0": 0, + "1": 6464, + "total": 6464 + }, + "rx_pkts": { + "0": 0, + "1": 101, + "total": 101 + }, + "rx_pps": { + "0": 0, + "1": 0, + "total": 0 + }, + "tx_bps": { + "0": 0, + "1": 0, + "total": 0 + }, + "tx_bps_l1": { + "0": 0.0, + "1": 0.0, + "total": 0.0 + }, + "tx_bytes": { + "0": 6464, + "1": 0, + "total": 6464 + }, + "tx_pkts": { + "0": 101, + "1": 0, + "total": 101 + }, + "tx_pps": { + "0": 0, + "1": 0, + "total": 0 + } + }, + "global": { + "rx_err": { + "0": 0, + "1": 0 + }, + "tx_err": { + "0": 0, + "1": 0 + } + } + }, + "global": { + "bw_per_core": 45.6, + "cpu_util": 0.1494, + "queue_full": 0, + "rx_bps": 238458672.0, + "rx_cpu_util": 4.751e-05, + "rx_drop_bps": 0.0, + "rx_pps": 465739.6, + "tx_bps": 238464208.0, + "tx_pps": 465750.4 + }, + "latency": { + "1": { + "err_cntrs": { + "dropped": 0, + "dup": 0, + "out_of_order": 0, + "seq_too_high": 0, + "seq_too_low": 0 + }, + "latency": { + "average": 63.375, + "histogram": { + "20": 1, + "30": 18, + "40": 12, + "50": 10, + "60": 12, + "70": 11, + "80": 6, + "90": 10, + "100": 20 + }, + "jitter": 23, + "last_max": 122, + "total_max": 123, + "total_min": 20 + } + }, + "2": { + "err_cntrs": { + "dropped": 0, + "dup": 0, + "out_of_order": 0, + "seq_too_high": 0, + "seq_too_low": 0 + }, + "latency": { + "average": 74, + "histogram": { + "60": 20, + "70": 10, + "80": 3, + "90": 4, + "100": 64 + }, + "jitter": 6, + "last_max": 83, + "total_max": 135, + "total_min": 60 + } + }, + "global": { + "bad_hdr": 0, + "old_flow": 0 + } + }, + "total": { + "ibytes": 111098816, + "ierrors": 0, + "ipackets": 1735919, + "obytes": 111098816, + "oerrors": 0, + "opackets": 1735919, + "rx_bps": 238458680.0, + "rx_bps_L1": 312977016.0, + "rx_pps": 465739.6, + "rx_util": 3.1297701599999996, + "tx_bps": 238464200.0, + "tx_bps_L1": 312984264.0, + "tx_pps": 465750.4, + "tx_util": 3.12984264 + } + } + samples = { + "xe0": { + "in_packets": 867955, + "latency": { + "2": { + "avg_latency": 74.0, + "max_latency": 135.0, + "min_latency": 60.0 + } + }, + "out_packets": 867964, + "rx_throughput_bps": 104339032.0, + "rx_throughput_fps": 203787.2, + "tx_throughput_bps": 134126008.0, + "tx_throughput_fps": 261964.9 + }, + "xe1": { + "in_packets": 867964, + "latency": { + "1": { + "avg_latency": 63.375, + "max_latency": 123.0, + "min_latency": 20.0 + } + }, + "out_packets": 867955, + "rx_throughput_bps": 134119648.0, + "rx_throughput_fps": 261952.4, + "tx_throughput_bps": 104338192.0, + "tx_throughput_fps": 203785.5 + } + } + + mock_generator.loss = 0 + mock_generator.sent = 2169700 + mock_generator.send_traffic_on_tg = mock.Mock(return_value=stats) + mock_generator.generate_samples = mock.Mock(return_value=samples) + + result_samples = vpp_rfc2544_profile.binary_search( + traffic_generator=mock_generator, duration=30, + tolerance_value=0.005, + test_data={}) + + expected = {'Result_theor_max_throughput': 134126008.0, + 'xe0': {'Result_Actual_throughput': 104339032.0}, + 'xe1': {'Result_Actual_throughput': 134119648.0}} + self.assertEqual(expected, result_samples) diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/tc_baremetal_rfc2544_ipv4_1flow_64B.yaml b/yardstick/tests/unit/network_services/vnf_generic/vnf/tc_baremetal_rfc2544_ipv4_1flow_64B.yaml index fb1be35c1..09c22ad9e 100644 --- a/yardstick/tests/unit/network_services/vnf_generic/vnf/tc_baremetal_rfc2544_ipv4_1flow_64B.yaml +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/tc_baremetal_rfc2544_ipv4_1flow_64B.yaml @@ -1,4 +1,4 @@ -# Copyright (c) 2016-2017 Intel Corporation +# Copyright (c) 2016-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,8 +19,8 @@ scenarios: traffic_profile: "../../traffic_profiles/ipv4_throughput_vpe.yaml" topology: vpe_vnf_topology.yaml nodes: - tg__1: trafficgen_1.yardstick - vnf__1: vnf.yardstick + tg__0: trafficgen_0.yardstick + vnf__0: vnf_0.yardstick tc_options: rfc2544: allowed_drop_rate: 0.8 - 1 diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py index 69a5fb484..12bb42f20 100644 --- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016-2017 Intel Corporation +# Copyright (c) 2016-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -237,7 +237,7 @@ class TestAclApproxVnf(unittest.TestCase): def test___init__(self, *args): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - acl_approx_vnf = acl_vnf.AclApproxVnf(name, vnfd, 'task_id') + acl_approx_vnf = acl_vnf.AclApproxVnf(name, vnfd) self.assertIsNone(acl_approx_vnf._vnf_process) @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time") @@ -247,7 +247,7 @@ class TestAclApproxVnf(unittest.TestCase): mock_ssh(ssh) vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - acl_approx_vnf = acl_vnf.AclApproxVnf(name, vnfd, 'task_id') + acl_approx_vnf = acl_vnf.AclApproxVnf(name, vnfd) acl_approx_vnf.scenario_helper.scenario_cfg = { 'nodes': {acl_approx_vnf.name: "mock"} } @@ -270,7 +270,7 @@ class TestAclApproxVnf(unittest.TestCase): mock_ssh(ssh) vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - acl_approx_vnf = acl_vnf.AclApproxVnf(name, vnfd, 'task_id') + acl_approx_vnf = acl_vnf.AclApproxVnf(name, vnfd) acl_approx_vnf.q_in = mock.MagicMock() acl_approx_vnf.q_out = mock.MagicMock() acl_approx_vnf.q_out.qsize = mock.Mock(return_value=0) @@ -282,7 +282,7 @@ class TestAclApproxVnf(unittest.TestCase): mock_ssh(ssh) vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - acl_approx_vnf = acl_vnf.AclApproxVnf(name, vnfd, 'task_id') + acl_approx_vnf = acl_vnf.AclApproxVnf(name, vnfd) acl_approx_vnf.q_in = mock.MagicMock() acl_approx_vnf.q_out = mock.MagicMock() acl_approx_vnf.q_out.qsize = mock.Mock(return_value=0) @@ -303,7 +303,7 @@ class TestAclApproxVnf(unittest.TestCase): mock_ssh(ssh) vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - acl_approx_vnf = acl_vnf.AclApproxVnf(name, vnfd, 'task_id') + acl_approx_vnf = acl_vnf.AclApproxVnf(name, vnfd) acl_approx_vnf._build_config = mock.MagicMock() acl_approx_vnf.queue_wrapper = mock.MagicMock() acl_approx_vnf.scenario_helper.scenario_cfg = self.scenario_cfg @@ -323,7 +323,7 @@ class TestAclApproxVnf(unittest.TestCase): mock_ssh(ssh) vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - acl_approx_vnf = acl_vnf.AclApproxVnf(name, vnfd, 'task_id') + acl_approx_vnf = acl_vnf.AclApproxVnf(name, vnfd) acl_approx_vnf.deploy_helper = mock.MagicMock() acl_approx_vnf.resource_helper = mock.MagicMock() acl_approx_vnf._build_config = mock.MagicMock() @@ -341,7 +341,7 @@ class TestAclApproxVnf(unittest.TestCase): mock_ssh(ssh) vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - acl_approx_vnf = acl_vnf.AclApproxVnf(name, vnfd, 'task_id') + acl_approx_vnf = acl_vnf.AclApproxVnf(name, vnfd) acl_approx_vnf._vnf_process = mock.MagicMock() acl_approx_vnf._vnf_process.terminate = mock.Mock() acl_approx_vnf.used_drivers = {"01:01.0": "i40e", diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_agnostic_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_agnostic_vnf.py new file mode 100644 index 000000000..7c7fe5955 --- /dev/null +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_agnostic_vnf.py @@ -0,0 +1,68 @@ +# Copyright (c) 2018-2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +from yardstick.network_services.vnf_generic.vnf import agnostic_vnf + +NAME = 'vnf__0' + +VNFD = { + 'vnfd:vnfd-catalog': { + 'vnfd': [{ + 'id': 'AgnosticVnf', # NSB python class mapping + 'name': 'AgnosticVnf', + 'short-name': 'AgnosticVnf', + 'description': 'AgnosticVnf', + 'mgmt-interface': { + 'vdu-id': 'vepcvnf-baremetal', + 'user': 'user', + 'password': 'password', + 'ip': 'ip' + }, + 'vdu': [{ + 'id': 'vepcvnf-baremetal', + 'name': 'vepc-vnf-baremetal', + 'description': 'vAgnosticVnf workload', + 'external-interface': []}], + 'benchmark': { + 'kpi': []}}]}} + + +class TestAgnosticVnf(unittest.TestCase): + + def setUp(self): + self.vnfd = VNFD['vnfd:vnfd-catalog']['vnfd'][0] + self.agnostic_vnf = agnostic_vnf.AgnosticVnf(NAME, self.vnfd) + + def test_instantiate(self): + self.assertIsNone(self.agnostic_vnf.instantiate({}, {})) + + def test_wait_for_instantiate(self): + self.assertIsNone(self.agnostic_vnf.wait_for_instantiate()) + + def test_terminate(self): + self.assertIsNone(self.agnostic_vnf.terminate()) + + def test_scale(self): + self.assertIsNone(self.agnostic_vnf.scale()) + + def test_collect_kpi(self): + self.assertIsNone(self.agnostic_vnf.collect_kpi()) + + def test_start_collect(self): + self.assertIsNone(self.agnostic_vnf.start_collect()) + + def test_stop_collect(self): + self.assertIsNone(self.agnostic_vnf.stop_collect()) diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_base.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_base.py index 2ea13a5e0..1a72e042b 100644 --- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_base.py +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_base.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016-2017 Intel Corporation +# Copyright (c) 2016-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,15 +14,10 @@ import multiprocessing import os -import uuid import mock -from oslo_config import cfg -import oslo_messaging import unittest -from yardstick.common import messaging -from yardstick.common.messaging import payloads from yardstick.network_services.vnf_generic.vnf import base from yardstick.ssh import SSH from yardstick.tests.unit import base as ut_base @@ -145,24 +140,6 @@ VNFD = { } -class _DummyGenericTrafficGen(base.GenericTrafficGen): # pragma: no cover - - def run_traffic(self, *args): - pass - - def terminate(self): - pass - - def collect_kpi(self): - pass - - def instantiate(self, *args): - pass - - def scale(self, flavor=''): - pass - - class FileAbsPath(object): def __init__(self, module_file): super(FileAbsPath, self).__init__() @@ -235,8 +212,7 @@ class TestGenericVNF(ut_base.BaseUnitTestCase): """Make sure that the abstract class cannot be instantiated""" with self.assertRaises(TypeError) as exc: # pylint: disable=abstract-class-instantiated - base.GenericVNF('vnf1', VNFD['vnfd:vnfd-catalog']['vnfd'][0], - 'task_id') + base.GenericVNF('vnf1', VNFD['vnfd:vnfd-catalog']['vnfd'][0]) msg = ("Can't instantiate abstract class GenericVNF with abstract " "methods collect_kpi, instantiate, scale, start_collect, " @@ -253,96 +229,8 @@ class GenericTrafficGenTestCase(ut_base.BaseUnitTestCase): name = 'vnf1' with self.assertRaises(TypeError) as exc: # pylint: disable=abstract-class-instantiated - base.GenericTrafficGen(name, vnfd, 'task_id') + base.GenericTrafficGen(name, vnfd) msg = ("Can't instantiate abstract class GenericTrafficGen with " "abstract methods collect_kpi, instantiate, run_traffic, " "scale, terminate") self.assertEqual(msg, str(exc.exception)) - - def test_get_mq_producer_id(self): - vnfd = {'benchmark': {'kpi': mock.ANY}, - 'vdu': [{'external-interface': 'ext_int'}] - } - tg = _DummyGenericTrafficGen('name', vnfd, 'task_id') - tg._mq_producer = mock.Mock() - tg._mq_producer.id = 'fake_id' - self.assertEqual('fake_id', tg.get_mq_producer_id()) - - -class TrafficGeneratorProducerTestCase(ut_base.BaseUnitTestCase): - - @mock.patch.object(oslo_messaging, 'Target', return_value='rpc_target') - @mock.patch.object(oslo_messaging, 'RPCClient') - @mock.patch.object(oslo_messaging, 'get_rpc_transport', - return_value='rpc_transport') - @mock.patch.object(cfg, 'CONF') - def test__init(self, mock_config, mock_transport, mock_rpcclient, - mock_target): - _id = uuid.uuid1().int - tg_producer = base.TrafficGeneratorProducer(_id) - mock_transport.assert_called_once_with( - mock_config, url='rabbit://yardstick:yardstick@localhost:5672/') - mock_target.assert_called_once_with(topic=messaging.TOPIC_TG, - fanout=True, - server=messaging.SERVER) - mock_rpcclient.assert_called_once_with('rpc_transport', 'rpc_target') - self.assertEqual(_id, tg_producer._id) - self.assertEqual(messaging.TOPIC_TG, tg_producer._topic) - - @mock.patch.object(oslo_messaging, 'Target', return_value='rpc_target') - @mock.patch.object(oslo_messaging, 'RPCClient') - @mock.patch.object(oslo_messaging, 'get_rpc_transport', - return_value='rpc_transport') - @mock.patch.object(payloads, 'TrafficGeneratorPayload', - return_value='tg_pload') - def test_tg_method_started(self, mock_tg_payload, *args): - tg_producer = base.TrafficGeneratorProducer(uuid.uuid1().int) - with mock.patch.object(tg_producer, 'send_message') as mock_message: - tg_producer.tg_method_started(version=10) - - mock_message.assert_called_once_with(messaging.TG_METHOD_STARTED, - 'tg_pload') - mock_tg_payload.assert_called_once_with(version=10, iteration=0, - kpi={}) - - @mock.patch.object(oslo_messaging, 'Target', return_value='rpc_target') - @mock.patch.object(oslo_messaging, 'RPCClient') - @mock.patch.object(oslo_messaging, 'get_rpc_transport', - return_value='rpc_transport') - @mock.patch.object(payloads, 'TrafficGeneratorPayload', - return_value='tg_pload') - def test_tg_method_finished(self, mock_tg_payload, *args): - tg_producer = base.TrafficGeneratorProducer(uuid.uuid1().int) - with mock.patch.object(tg_producer, 'send_message') as mock_message: - tg_producer.tg_method_finished(version=20) - - mock_message.assert_called_once_with(messaging.TG_METHOD_FINISHED, - 'tg_pload') - mock_tg_payload.assert_called_once_with(version=20, iteration=0, - kpi={}) - - @mock.patch.object(oslo_messaging, 'Target', return_value='rpc_target') - @mock.patch.object(oslo_messaging, 'RPCClient') - @mock.patch.object(oslo_messaging, 'get_rpc_transport', - return_value='rpc_transport') - @mock.patch.object(payloads, 'TrafficGeneratorPayload', - return_value='tg_pload') - def test_tg_method_iteration(self, mock_tg_payload, *args): - tg_producer = base.TrafficGeneratorProducer(uuid.uuid1().int) - with mock.patch.object(tg_producer, 'send_message') as mock_message: - tg_producer.tg_method_iteration(100, version=30, kpi={'k': 'v'}) - - mock_message.assert_called_once_with(messaging.TG_METHOD_ITERATION, - 'tg_pload') - mock_tg_payload.assert_called_once_with(version=30, iteration=100, - kpi={'k': 'v'}) - - -class GenericVNFConsumerTestCase(ut_base.BaseUnitTestCase): - - def test__init(self): - endpoints = 'endpoint_1' - _ids = [uuid.uuid1().int] - gvnf_consumer = base.GenericVNFConsumer(_ids, endpoints) - self.assertEqual(_ids, gvnf_consumer._ids) - self.assertEqual([endpoints], gvnf_consumer._endpoints) diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_cgnapt_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_cgnapt_vnf.py index 32f5b758d..d0672dcfd 100644 --- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_cgnapt_vnf.py +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_cgnapt_vnf.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016-2017 Intel Corporation +# Copyright (c) 2016-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -318,14 +318,14 @@ class TestCgnaptApproxVnf(unittest.TestCase): def test___init__(self, *args): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - cgnapt_approx_vnf = cgnapt_vnf.CgnaptApproxVnf(name, vnfd, 'task_id') + cgnapt_approx_vnf = cgnapt_vnf.CgnaptApproxVnf(name, vnfd) self.assertIsNone(cgnapt_approx_vnf._vnf_process) @mock.patch.object(process, 'check_if_process_failed') @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node') def test_collect_kpi(self, *args): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - cgnapt_approx_vnf = cgnapt_vnf.CgnaptApproxVnf(name, vnfd, 'task_id') + cgnapt_approx_vnf = cgnapt_vnf.CgnaptApproxVnf(name, vnfd) cgnapt_approx_vnf.scenario_helper.scenario_cfg = { 'nodes': {cgnapt_approx_vnf.name: "mock"} } @@ -349,7 +349,7 @@ class TestCgnaptApproxVnf(unittest.TestCase): @mock.patch.object(time, 'sleep') def test_vnf_execute_command(self, *args): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - cgnapt_approx_vnf = cgnapt_vnf.CgnaptApproxVnf(name, vnfd, 'task_id') + cgnapt_approx_vnf = cgnapt_vnf.CgnaptApproxVnf(name, vnfd) cgnapt_approx_vnf.q_in = mock.Mock() cgnapt_approx_vnf.q_out = mock.Mock() cgnapt_approx_vnf.q_out.qsize = mock.Mock(return_value=0) @@ -357,7 +357,7 @@ class TestCgnaptApproxVnf(unittest.TestCase): def test_get_stats(self, *args): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - cgnapt_approx_vnf = cgnapt_vnf.CgnaptApproxVnf(name, vnfd, 'task_id') + cgnapt_approx_vnf = cgnapt_vnf.CgnaptApproxVnf(name, vnfd) with mock.patch.object(cgnapt_approx_vnf, 'vnf_execute') as mock_exec: mock_exec.return_value = 'output' self.assertEqual('output', cgnapt_approx_vnf.get_stats()) @@ -366,7 +366,7 @@ class TestCgnaptApproxVnf(unittest.TestCase): def test_run_vcgnapt(self, *args): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - cgnapt_approx_vnf = cgnapt_vnf.CgnaptApproxVnf(name, vnfd, 'task_id') + cgnapt_approx_vnf = cgnapt_vnf.CgnaptApproxVnf(name, vnfd) cgnapt_approx_vnf.ssh_helper = mock.Mock() cgnapt_approx_vnf.setup_helper = mock.Mock() with mock.patch.object(cgnapt_approx_vnf, '_build_config'), \ @@ -379,7 +379,7 @@ class TestCgnaptApproxVnf(unittest.TestCase): @mock.patch.object(ctx_base.Context, 'get_context_from_server') def test_instantiate(self, *args): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - cgnapt_approx_vnf = cgnapt_vnf.CgnaptApproxVnf(name, vnfd, 'task_id') + cgnapt_approx_vnf = cgnapt_vnf.CgnaptApproxVnf(name, vnfd) cgnapt_approx_vnf.deploy_helper = mock.MagicMock() cgnapt_approx_vnf.resource_helper = mock.MagicMock() cgnapt_approx_vnf._build_config = mock.MagicMock() @@ -396,7 +396,7 @@ class TestCgnaptApproxVnf(unittest.TestCase): def test__vnf_up_post(self, *args): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] self.scenario_cfg['options'][name]['napt'] = 'static' - cgnapt_approx_vnf = cgnapt_vnf.CgnaptApproxVnf(name, vnfd, 'task_id') + cgnapt_approx_vnf = cgnapt_vnf.CgnaptApproxVnf(name, vnfd) cgnapt_approx_vnf.vnf_execute = mock.Mock() cgnapt_approx_vnf.scenario_helper.scenario_cfg = self.scenario_cfg with mock.patch.object(cgnapt_approx_vnf, 'setup_helper') as \ @@ -407,6 +407,6 @@ class TestCgnaptApproxVnf(unittest.TestCase): def test__vnf_up_post_short(self, *args): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - cgnapt_approx_vnf = cgnapt_vnf.CgnaptApproxVnf(name, vnfd, 'task_id') + cgnapt_approx_vnf = cgnapt_vnf.CgnaptApproxVnf(name, vnfd) cgnapt_approx_vnf.scenario_helper.scenario_cfg = self.scenario_cfg cgnapt_approx_vnf._vnf_up_post() diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_epc_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_epc_vnf.py new file mode 100644 index 000000000..b1bef2e39 --- /dev/null +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_epc_vnf.py @@ -0,0 +1,92 @@ +# Copyright (c) 2018-2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import unittest + +from yardstick.network_services.vnf_generic.vnf import epc_vnf + +NAME = 'vnf__0' + +VNFD = { + 'vnfd:vnfd-catalog': { + 'vnfd': [{ + 'id': 'EPCVnf', # NSB python class mapping + 'name': 'EPCVnf', + 'short-name': 'EPCVnf', + 'description': 'EPCVnf', + 'mgmt-interface': { + 'vdu-id': 'vepcvnf-baremetal', + 'user': 'user', # Value filled by vnfdgen + 'password': 'password', # Value filled by vnfdgen + 'ip': 'ip' # Value filled by vnfdgen + }, + 'vdu': [{ + 'id': 'vepcvnf-baremetal', + 'name': 'vepc-vnf-baremetal', + 'description': 'vEPCVnf workload', + 'external-interface': []}], + 'benchmark': { + 'kpi': []}}]}} + + +class TestEPCVnf(unittest.TestCase): + + def setUp(self): + self.vnfd = VNFD['vnfd:vnfd-catalog']['vnfd'][0] + self.epc_vnf = epc_vnf.EPCVnf(NAME, self.vnfd) + + def test___init__(self, *args): + _epc_vnf = epc_vnf.EPCVnf(NAME, self.vnfd) + for x in {'user', 'password', 'ip'}: + self.assertEqual(self.vnfd['mgmt-interface'][x], + _epc_vnf.vnfd_helper.mgmt_interface[x]) + self.assertEqual(NAME, _epc_vnf.name) + self.assertEqual([], _epc_vnf.kpi) + self.assertEqual({}, _epc_vnf.config) + self.assertFalse(_epc_vnf.runs_traffic) + + def test___init__missing_ip(self, *args): + _vnfd = copy.deepcopy(self.vnfd) + _vnfd['mgmt-interface'].pop('ip') + _epc_vnf = epc_vnf.EPCVnf(NAME, _vnfd) + for x in {'user', 'password'}: + self.assertEqual(_vnfd['mgmt-interface'][x], + _epc_vnf.vnfd_helper.mgmt_interface[x]) + self.assertNotIn('ip', _epc_vnf.vnfd_helper.mgmt_interface) + self.assertEqual(NAME, _epc_vnf.name) + self.assertEqual([], _epc_vnf.kpi) + self.assertEqual({}, _epc_vnf.config) + self.assertFalse(_epc_vnf.runs_traffic) + + def test_instantiate(self): + self.assertIsNone(self.epc_vnf.instantiate({}, {})) + + def test_wait_for_instantiate(self): + self.assertIsNone(self.epc_vnf.wait_for_instantiate()) + + def test_terminate(self): + self.assertIsNone(self.epc_vnf.terminate()) + + def test_scale(self): + self.assertIsNone(self.epc_vnf.scale()) + + def test_collect_kpi(self): + self.assertIsNone(self.epc_vnf.collect_kpi()) + + def test_start_collect(self): + self.assertIsNone(self.epc_vnf.start_collect()) + + def test_stop_collect(self): + self.assertIsNone(self.epc_vnf.stop_collect()) diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_ipsec_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_ipsec_vnf.py new file mode 100644 index 000000000..00dc4a5d1 --- /dev/null +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_ipsec_vnf.py @@ -0,0 +1,2151 @@ +# Copyright (c) 2019 Viosoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +from multiprocessing import Process + +import mock + +from yardstick.benchmark.contexts import base as ctx_base +from yardstick.common import utils +from yardstick.network_services.helpers import cpu +from yardstick.network_services.nfvi.resource import ResourceProfile +from yardstick.network_services.vnf_generic.vnf import ipsec_vnf, vpp_helpers +from yardstick.network_services.vnf_generic.vnf.base import VnfdHelper +from yardstick.network_services.vnf_generic.vnf.ipsec_vnf import CryptoAlg, \ + IntegAlg, VipsecApproxSetupEnvHelper +from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import \ + mock_ssh + +SSH_HELPER = 'yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper' + +NAME = 'vnf__1' + + +class TestCryptoAlg(unittest.TestCase): + + def test__init__(self): + encr_alg = CryptoAlg.AES_GCM_128 + self.assertEqual('aes-gcm-128', encr_alg.alg_name) + self.assertEqual('AES-GCM', encr_alg.scapy_name) + self.assertEqual(20, encr_alg.key_len) + + +class TestIntegAlg(unittest.TestCase): + + def test__init__(self): + auth_alg = IntegAlg.AES_GCM_128 + self.assertEqual('aes-gcm-128', auth_alg.alg_name) + self.assertEqual('AES-GCM', auth_alg.scapy_name) + self.assertEqual(20, auth_alg.key_len) + + +@mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.Process") +class TestVipsecApproxVnf(unittest.TestCase): + VNFD = {'vnfd:vnfd-catalog': + {'vnfd': + [{ + "benchmark": { + "kpi": [ + "packets_in", + "packets_fwd", + "packets_dropped" + ] + }, + "connection-point": [ + { + "name": "xe0", + "type": "VPORT" + }, + { + "name": "xe1", + "type": "VPORT" + } + ], + "description": "VPP IPsec", + "id": "VipsecApproxVnf", + "mgmt-interface": { + "ip": "10.10.10.101", + "password": "r00t", + "user": "root", + "vdu-id": "ipsecvnf-baremetal" + }, + "name": "IpsecVnf", + "short-name": "IpsecVnf", + "vdu": [ + { + "description": "VPP Ipsec", + "external-interface": [ + { + "name": "xe0", + "virtual-interface": { + "dpdk_port_num": 0, + "driver": "igb_uio", + "dst_ip": "192.168.100.1", + "dst_mac": "90:e2:ba:7c:30:e8", + "ifname": "xe0", + "local_ip": "192.168.100.2", + "local_mac": "90:e2:ba:7c:41:a8", + "netmask": "255.255.255.0", + "network": {}, + "node_name": "vnf__0", + "peer_ifname": "xe0", + "peer_intf": { + "dpdk_port_num": 0, + "driver": "igb_uio", + "dst_ip": "192.168.100.2", + "dst_mac": "90:e2:ba:7c:41:a8", + "ifname": "xe0", + "local_ip": "192.168.100.1", + "local_mac": "90:e2:ba:7c:30:e8", + "netmask": "255.255.255.0", + "network": {}, + "node_name": "tg__0", + "peer_ifname": "xe0", + "peer_name": "vnf__0", + "vld_id": "uplink_0", + "vpci": "0000:81:00.0" + }, + "peer_name": "tg__0", + "vld_id": "uplink_0", + "vpci": "0000:ff:06.0" + }, + "vnfd-connection-point-ref": "xe0" + }, + { + "name": "xe1", + "virtual-interface": { + "dpdk_port_num": 1, + "driver": "igb_uio", + "dst_ip": "1.1.1.2", + "dst_mac": "0a:b1:ec:fd:a2:66", + "ifname": "xe1", + "local_ip": "1.1.1.1", + "local_mac": "4e:90:85:d3:c5:13", + "netmask": "255.255.255.0", + "network": {}, + "node_name": "vnf__0", + "peer_ifname": "xe1", + "peer_intf": { + "driver": "igb_uio", + "dst_ip": "1.1.1.1", + "dst_mac": "4e:90:85:d3:c5:13", + "ifname": "xe1", + "local_ip": "1.1.1.2", + "local_mac": "0a:b1:ec:fd:a2:66", + "netmask": "255.255.255.0", + "network": {}, + "node_name": "vnf__1", + "peer_ifname": "xe1", + "peer_name": "vnf__0", + "vld_id": "ciphertext", + "vpci": "0000:00:07.0" + }, + "peer_name": "vnf__1", + "vld_id": "ciphertext", + "vpci": "0000:ff:07.0" + }, + "vnfd-connection-point-ref": "xe1" + } + ], + "id": "ipsecvnf-baremetal", + "name": "ipsecvnf-baremetal", + "routing_table": [] + } + ] + } + ]}} + + VNFD_ERROR = {'vnfd:vnfd-catalog': + {'vnfd': + [{ + "benchmark": { + "kpi": [ + "packets_in", + "packets_fwd", + "packets_dropped" + ] + }, + "connection-point": [ + { + "name": "xe0", + "type": "VPORT" + }, + { + "name": "xe1", + "type": "VPORT" + } + ], + "description": "VPP IPsec", + "id": "VipsecApproxVnf", + "mgmt-interface": { + "ip": "10.10.10.101", + "password": "r00t", + "user": "root", + "vdu-id": "ipsecvnf-baremetal" + }, + "name": "IpsecVnf", + "short-name": "IpsecVnf", + "vdu": [ + { + "description": "VPP Ipsec", + "external-interface": [ + { + "name": "xe0", + "virtual-interface": { + "dpdk_port_num": 0, + "driver": "igb_uio", + "dst_ip": "192.168.100.1", + "dst_mac": "90:e2:ba:7c:30:e8", + "ifname": "xe0", + "local_ip": "192.168.100.2", + "local_mac": "90:e2:ba:7c:41:a8", + "netmask": "255.255.255.0", + "network": {}, + "node_name": "vnf__0", + "peer_ifname": "xe0", + "peer_intf": { + "dpdk_port_num": 0, + "driver": "igb_uio", + "dst_ip": "192.168.100.2", + "dst_mac": "90:e2:ba:7c:41:a8", + "ifname": "xe0", + "local_ip": "192.168.100.1", + "local_mac": "90:e2:ba:7c:30:e8", + "netmask": "255.255.255.0", + "network": {}, + "node_name": "tg__0", + "peer_ifname": "xe0", + "peer_name": "vnf__0", + "vld_id": "uplink_0", + "vpci": "0000:81:00.0" + }, + "peer_name": "tg__0", + "vld_id": "uplink_1", + "vpci": "0000:ff:06.0" + }, + "vnfd-connection-point-ref": "xe0" + }, + { + "name": "xe1", + "virtual-interface": { + "dpdk_port_num": 1, + "driver": "igb_uio", + "dst_ip": "1.1.1.2", + "dst_mac": "0a:b1:ec:fd:a2:66", + "ifname": "xe1", + "local_ip": "1.1.1.1", + "local_mac": "4e:90:85:d3:c5:13", + "netmask": "255.255.255.0", + "network": {}, + "node_name": "vnf__1", + "peer_ifname": "xe1", + "peer_intf": { + "driver": "igb_uio", + "dst_ip": "1.1.1.1", + "dst_mac": "4e:90:85:d3:c5:13", + "ifname": "xe1", + "local_ip": "1.1.1.2", + "local_mac": "0a:b1:ec:fd:a2:66", + "netmask": "255.255.255.0", + "network": {}, + "node_name": "vnf__1", + "peer_ifname": "xe1", + "peer_name": "vnf__0", + "vld_id": "ciphertext", + "vpci": "0000:00:07.0" + }, + "peer_name": "vnf__1", + "vld_id": "ciphertext", + "vpci": "0000:ff:07.0" + }, + "vnfd-connection-point-ref": "xe1" + } + ], + "id": "ipsecvnf-baremetal", + "name": "ipsecvnf-baremetal", + "routing_table": [] + } + ] + } + ]}} + + scenario_cfg = { + "nodes": { + "tg__0": "trafficgen.yardstick-5486cc2f", + "vnf__0": "vnf0.yardstick-5486cc2f", + "vnf__1": "vnf1.yardstick-5486cc2f" + }, + "options": { + "flow": { + "count": 1, + "dst_ip": [ + "20.0.0.0-20.0.0.100" + ], + "src_ip": [ + "10.0.0.0-10.0.0.100" + ] + }, + "framesize": { + "downlink": { + "64B": 100 + }, + "uplink": { + "64B": 100 + } + }, + "rfc2544": { + "allowed_drop_rate": "0.0 - 0.005" + }, + "tg__0": { + "collectd": { + "interval": 1 + }, + "queues_per_port": 7 + }, + "traffic_type": 4, + "vnf__0": { + "collectd": { + "interval": 1 + }, + "vnf_config": { + "crypto_type": "SW_cryptodev", + "rxq": 1, + "worker_config": "1C/1T", + "worker_threads": 4 + } + }, + "vnf__1": { + "collectd": { + "interval": 1 + }, + "vnf_config": { + "crypto_type": "SW_cryptodev", + "rxq": 1, + "worker_config": "1C/1T", + "worker_threads": 4 + } + }, + "vpp_config": { + "crypto_algorithms": "aes-gcm", + "tunnel": 1 + } + }, + "runner": { + "duration": 500, + "interval": 10, + "object": + "yardstick.benchmark.scenarios.networking.vnf_generic.NetworkServiceTestCase", + "output_config": { + "DEFAULT": { + "debug": "False", + "dispatcher": [ + "influxdb" + ] + }, + "dispatcher_file": { + "debug": "False", + "dispatcher": "influxdb", + "file_path": "/tmp/yardstick.out" + }, + "dispatcher_http": { + "debug": "False", + "dispatcher": "influxdb", + "target": "http://127.0.0.1:8000/results", + "timeout": "20" + }, + "dispatcher_influxdb": { + "db_name": "yardstick", + "debug": "False", + "dispatcher": "influxdb", + "password": "r00t", + "target": "http://192.168.100.3:8086", + "timeout": "20", + "username": "root" + }, + "nsb": { + "bin_path": "/opt/nsb_bin", + "debug": "False", + "dispatcher": "influxdb", + "trex_client_lib": "/opt/nsb_bin/trex_client/stl", + "trex_path": "/opt/nsb_bin/trex/scripts" + } + }, + "runner_id": 1105, + "type": "Duration" + }, + "task_id": "5486cc2f-d4d3-4feb-b0df-5e0bcd584c9e", + "task_path": "samples/vnf_samples/nsut/ipsec", + "tc": "tc_baremetal_rfc2544_ipv4_1flow_sw_aesgcm_4cores_64B_trex", + "topology": "vpp-tg-topology-2.yaml", + "traffic_profile": "../../traffic_profiles/ipv4_throughput_latency_vpp.yaml", + "type": "NSPerf" + } + + context_cfg = { + "networks": {}, + "nodes": { + "tg__0": { + "VNF model": "../../vnf_descriptors/tg_vpp_tpl.yaml", + "ctx_type": "Node", + "interfaces": { + "xe0": { + "dpdk_port_num": 0, + "driver": "igb_uio", + "dst_ip": "192.168.100.2", + "dst_mac": "90:e2:ba:7c:41:a8", + "ifname": "xe0", + "local_ip": "192.168.100.1", + "local_mac": "90:e2:ba:7c:30:e8", + "netmask": "255.255.255.0", + "network": {}, + "node_name": "tg__0", + "peer_ifname": "xe0", + "peer_intf": { + "dpdk_port_num": 0, + "driver": "igb_uio", + "dst_ip": "192.168.100.1", + "dst_mac": "90:e2:ba:7c:30:e8", + "ifname": "xe0", + "local_ip": "192.168.100.2", + "local_mac": "90:e2:ba:7c:41:a8", + "netmask": "255.255.255.0", + "network": {}, + "node_name": "vnf__0", + "peer_ifname": "xe0", + "peer_name": "tg__0", + "vld_id": "uplink_0", + "vpci": "0000:00:06.0" + }, + "peer_name": "vnf__0", + "vld_id": "uplink_0", + "vpci": "0000:81:00.0" + }, + "xe1": { + "dpdk_port_num": 1, + "driver": "igb_uio", + "dst_ip": "192.168.101.2", + "dst_mac": "90:e2:ba:7c:41:a9", + "ifname": "xe1", + "local_ip": "192.168.101.1", + "local_mac": "90:e2:ba:7c:30:e9", + "netmask": "255.255.255.0", + "network": {}, + "node_name": "tg__0", + "peer_ifname": "xe0", + "peer_intf": { + "dpdk_port_num": 1, + "driver": "igb_uio", + "dst_ip": "192.168.101.1", + "dst_mac": "90:e2:ba:7c:30:e9", + "ifname": "xe0", + "local_ip": "192.168.101.2", + "local_mac": "90:e2:ba:7c:41:a9", + "netmask": "255.255.255.0", + "network": {}, + "node_name": "vnf__1", + "peer_ifname": "xe1", + "peer_name": "tg__0", + "vld_id": "downlink_0", + "vpci": "0000:00:06.0" + }, + "peer_name": "vnf__1", + "vld_id": "downlink_0", + "vpci": "0000:81:00.1" + } + }, + "ip": "10.10.10.10", + "member-vnf-index": "1", + "name": "trafficgen.yardstick-5486cc2f", + "password": "r00t", + "port": 22, + "role": "TrafficGen", + "user": "root", + "username": "root", + "vnfd-id-ref": "tg__0" + }, + "vnf__0": { + "VNF model": "../../vnf_descriptors/vpp_vnfd.yaml", + "ctx_type": "Node", + "interfaces": { + "xe0": { + "dpdk_port_num": 0, + "driver": "igb_uio", + "dst_ip": "192.168.100.1", + "dst_mac": "90:e2:ba:7c:30:e8", + "ifname": "xe0", + "local_ip": "192.168.100.2", + "local_mac": "90:e2:ba:7c:41:a8", + "netmask": "255.255.255.0", + "network": {}, + "node_name": "vnf__0", + "peer_ifname": "xe0", + "peer_intf": { + "dpdk_port_num": 0, + "driver": "igb_uio", + "dst_ip": "192.168.100.2", + "dst_mac": "90:e2:ba:7c:41:a8", + "ifname": "xe0", + "local_ip": "192.168.100.1", + "local_mac": "90:e2:ba:7c:30:e8", + "netmask": "255.255.255.0", + "network": {}, + "node_name": "tg__0", + "peer_ifname": "xe0", + "peer_name": "vnf__0", + "vld_id": "uplink_0", + "vpci": "0000:81:00.0" + }, + "peer_name": "tg__0", + "vld_id": "uplink_0", + "vpci": "0000:00:06.0" + }, + "xe1": { + "dpdk_port_num": 1, + "driver": "igb_uio", + "dst_ip": "1.1.1.2", + "dst_mac": "0a:b1:ec:fd:a2:66", + "ifname": "xe1", + "local_ip": "1.1.1.1", + "local_mac": "4e:90:85:d3:c5:13", + "netmask": "255.255.255.0", + "network": {}, + "node_name": "vnf__0", + "peer_ifname": "xe1", + "peer_intf": { + "dpdk_port_num": 1, + "driver": "igb_uio", + "dst_ip": "1.1.1.1", + "dst_mac": "4e:90:85:d3:c5:13", + "ifname": "xe1", + "local_ip": "1.1.1.2", + "local_mac": "0a:b1:ec:fd:a2:66", + "netmask": "255.255.255.0", + "network": {}, + "node_name": "vnf__1", + "peer_ifname": "xe1", + "peer_name": "vnf__0", + "vld_id": "ciphertext", + "vpci": "0000:00:07.0" + }, + "peer_name": "vnf__1", + "vld_id": "ciphertext", + "vpci": "0000:00:07.0" + } + }, + "ip": "10.10.10.101", + "member-vnf-index": "2", + "name": "vnf0.yardstick-5486cc2f", + "password": "r00t", + "port": 22, + "role": "VirtualNetworkFunction", + "user": "root", + "username": "root", + "vnfd-id-ref": "vnf__0" + }, + "vnf__1": { + "VNF model": "../../vnf_descriptors/vpp_vnfd.yaml", + "ctx_type": "Node", + "interfaces": { + "xe0": { + "dpdk_port_num": 0, + "driver": "igb_uio", + "dst_ip": "192.168.101.1", + "dst_mac": "90:e2:ba:7c:30:e9", + "ifname": "xe0", + "local_ip": "192.168.101.2", + "local_mac": "90:e2:ba:7c:41:a9", + "netmask": "255.255.255.0", + "network": {}, + "node_name": "vnf__1", + "peer_ifname": "xe1", + "peer_intf": { + "dpdk_port_num": 1, + "driver": "igb_uio", + "dst_ip": "192.168.101.2", + "dst_mac": "90:e2:ba:7c:41:a9", + "ifname": "xe1", + "local_ip": "192.168.101.1", + "local_mac": "90:e2:ba:7c:30:e9", + "netmask": "255.255.255.0", + "network": {}, + "node_name": "tg__0", + "peer_ifname": "xe0", + "peer_name": "vnf__1", + "vld_id": "downlink_0", + "vpci": "0000:81:00.1" + }, + "peer_name": "tg__0", + "vld_id": "downlink_0", + "vpci": "0000:00:06.0" + }, + "xe1": { + "dpdk_port_num": 1, + "driver": "igb_uio", + "dst_ip": "1.1.1.1", + "dst_mac": "4e:90:85:d3:c5:13", + "ifname": "xe1", + "local_ip": "1.1.1.2", + "local_mac": "0a:b1:ec:fd:a2:66", + "netmask": "255.255.255.0", + "network": {}, + "node_name": "vnf__1", + "peer_ifname": "xe1", + "peer_intf": { + "dpdk_port_num": 1, + "driver": "igb_uio", + "dst_ip": "1.1.1.2", + "dst_mac": "0a:b1:ec:fd:a2:66", + "ifname": "xe1", + "local_ip": "1.1.1.1", + "local_mac": "4e:90:85:d3:c5:13", + "netmask": "255.255.255.0", + "network": {}, + "node_name": "vnf__0", + "peer_ifname": "xe1", + "peer_name": "vnf__1", + "vld_id": "ciphertext", + "vpci": "0000:00:07.0" + }, + "peer_name": "vnf__0", + "vld_id": "ciphertext", + "vpci": "0000:00:07.0" + } + }, + "ip": "10.10.10.102", + "member-vnf-index": "3", + "name": "vnf1.yardstick-5486cc2f", + "password": "r00t", + "port": 22, + "role": "VirtualNetworkFunction", + "user": "root", + "username": "root", + "vnfd-id-ref": "vnf__1" + } + } + } + + def test___init__(self, *args): + vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] + vipsec_vnf = ipsec_vnf.VipsecApproxVnf(NAME, vnfd) + self.assertIsNone(vipsec_vnf._vnf_process) + + @mock.patch(SSH_HELPER) + def test__run(self, ssh, *args): + mock_ssh(ssh) + + vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] + vipsec_vnf = ipsec_vnf.VipsecApproxVnf(NAME, vnfd) + vipsec_vnf._build_config = mock.MagicMock() + vipsec_vnf.setup_helper.kill_vnf = mock.MagicMock() + vipsec_vnf.setup_helper.create_ipsec_tunnels = mock.MagicMock() + vipsec_vnf.queue_wrapper = mock.MagicMock() + vipsec_vnf.scenario_helper.scenario_cfg = self.scenario_cfg + vipsec_vnf.vnf_cfg = {'lb_config': 'SW', + 'lb_count': 1, + 'worker_config': '1C/1T', + 'worker_threads': 1} + vipsec_vnf.all_options = {'traffic_type': '4', + 'topology': 'nsb_test_case.yaml'} + vipsec_vnf._run() + # vipsec_vnf.setup_helper.ssh_helper.execute.assert_called_once() + + @mock.patch(SSH_HELPER) + def test_wait_for_instantiate(self, ssh, *args): + mock_ssh(ssh) + + mock_process = mock.Mock(autospec=Process) + mock_process.is_alive.return_value = True + mock_process.exitcode = 432 + + vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] + vipsec_vnf = ipsec_vnf.VipsecApproxVnf(NAME, vnfd) + vipsec_vnf.resource_helper.resource = mock.MagicMock() + vipsec_vnf.setup_helper = mock.MagicMock() + vipsec_vnf.setup_helper.check_status.return_value = True + vipsec_vnf._vnf_process = mock_process + vipsec_vnf.WAIT_TIME = 0 + self.assertEqual(vipsec_vnf.wait_for_instantiate(), 432) + + @mock.patch(SSH_HELPER) + def test_wait_for_instantiate_crash(self, ssh, *args): + mock_ssh(ssh) + + mock_process = mock.Mock(autospec=Process) + mock_process.is_alive.return_value = False + mock_process.exitcode = 432 + + vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] + vipsec_vnf = ipsec_vnf.VipsecApproxVnf(NAME, vnfd) + vipsec_vnf.resource_helper.resource = mock.MagicMock() + vipsec_vnf.setup_helper = mock.MagicMock() + vipsec_vnf.setup_helper.check_status.return_value = False + vipsec_vnf._vnf_process = mock_process + vipsec_vnf.WAIT_TIME = 0 + vipsec_vnf.WAIT_TIME_FOR_SCRIPT = 0 + + with self.assertRaises(RuntimeError) as raised: + vipsec_vnf.wait_for_instantiate() + + self.assertIn('VNF process died', str(raised.exception)) + + @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', + return_value='mock_node') + @mock.patch.object(ipsec_vnf.VipsecApproxSetupEnvHelper, + 'get_vpp_statistics', + return_value={'packets_in': 0, 'packets_fwd': 0, + 'packets_dropped': 0}) + @mock.patch(SSH_HELPER) + def test_collect_kpi(self, ssh, *args): + mock_ssh(ssh) + + vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] + vipsec_vnf = ipsec_vnf.VipsecApproxVnf(NAME, vnfd) + vipsec_vnf.scenario_helper.scenario_cfg = { + 'nodes': {vipsec_vnf.name: "mock"} + } + result = { + 'collect_stats': {'packets_in': 0, 'packets_fwd': 0, + 'packets_dropped': 0}, + 'physical_node': 'mock_node' + } + self.assertEqual(result, vipsec_vnf.collect_kpi()) + + @mock.patch.object(utils, 'find_relative_file') + @mock.patch( + "yardstick.network_services.vnf_generic.vnf.sample_vnf.Context") + @mock.patch(SSH_HELPER) + def test_instantiate(self, ssh, *args): + mock_ssh(ssh) + + vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] + vipsec_vnf = ipsec_vnf.VipsecApproxVnf(NAME, vnfd) + vipsec_vnf.deploy_helper = mock.MagicMock() + vipsec_vnf.resource_helper = mock.MagicMock() + vipsec_vnf._build_config = mock.MagicMock() + vipsec_vnf.WAIT_TIME = 0 + self.scenario_cfg.update({"nodes": {"vnf__1": ""}}) + self.assertIsNone(vipsec_vnf.instantiate(self.scenario_cfg, + self.context_cfg)) + + @mock.patch.object(ipsec_vnf.VipsecApproxSetupEnvHelper, 'kill_vnf', + return_value='') + @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time") + @mock.patch(SSH_HELPER) + def test_terminate(self, ssh, *args): + mock_ssh(ssh) + + vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] + vipsec_vnf = ipsec_vnf.VipsecApproxVnf(NAME, vnfd) + vipsec_vnf._vnf_process = mock.MagicMock() + vipsec_vnf._vnf_process.terminate = mock.Mock() + self.assertIsNone(vipsec_vnf.terminate()) + + +class TestVipsecApproxSetupEnvHelper(unittest.TestCase): + ALL_OPTIONS = { + "flow": { + "count": 1, + "dst_ip": [ + "20.0.0.0-20.0.0.100" + ], + "src_ip": [ + "10.0.0.0-10.0.0.100" + ] + }, + "framesize": { + "downlink": { + "64B": 100 + }, + "uplink": { + "64B": 100 + } + }, + "rfc2544": { + "allowed_drop_rate": "0.0 - 0.005" + }, + "tg__0": { + "collectd": { + "interval": 1 + }, + "queues_per_port": 7 + }, + "traffic_type": 4, + "vnf__0": { + "collectd": { + "interval": 1 + }, + "vnf_config": { + "crypto_type": "SW_cryptodev", + "rxq": 1, + "worker_config": "1C/1T", + "worker_threads": 4 + } + }, + "vnf__1": { + "collectd": { + "interval": 1 + }, + "vnf_config": { + "crypto_type": "SW_cryptodev", + "rxq": 1, + "worker_config": "1C/1T", + "worker_threads": 4 + } + }, + "vpp_config": { + "crypto_algorithms": "aes-gcm", + "tunnel": 1 + } + } + + ALL_OPTIONS_CBC_ALGORITHMS = { + "flow": { + "count": 1, + "dst_ip": [ + "20.0.0.0-20.0.0.100" + ], + "src_ip": [ + "10.0.0.0-10.0.0.100" + ] + }, + "framesize": { + "downlink": { + "64B": 100 + }, + "uplink": { + "64B": 100 + } + }, + "rfc2544": { + "allowed_drop_rate": "0.0 - 0.005" + }, + "tg__0": { + "collectd": { + "interval": 1 + }, + "queues_per_port": 7 + }, + "traffic_type": 4, + "vnf__0": { + "collectd": { + "interval": 1 + }, + "vnf_config": { + "crypto_type": "SW_cryptodev", + "rxq": 1, + "worker_config": "1C/1T", + "worker_threads": 4 + } + }, + "vnf__1": { + "collectd": { + "interval": 1 + }, + "vnf_config": { + "crypto_type": "SW_cryptodev", + "rxq": 1, + "worker_config": "1C/1T", + "worker_threads": 4 + } + }, + "vpp_config": { + "crypto_algorithms": "cbc-sha1", + "tunnel": 1 + } + } + + ALL_OPTIONS_ERROR = { + "flow_error": { + "count": 1, + "dst_ip": [ + "20.0.0.0-20.0.0.100" + ], + "src_ip": [ + "10.0.0.0-10.0.0.100" + ] + }, + "framesize": { + "downlink": { + "64B": 100 + }, + "uplink": { + "64B": 100 + } + }, + "rfc2544": { + "allowed_drop_rate": "0.0 - 0.005" + }, + "tg__0": { + "collectd": { + "interval": 1 + }, + "queues_per_port": 7 + }, + "traffic_type": 4, + "vnf__0": { + "collectd": { + "interval": 1 + }, + "vnf_config": { + "crypto_type": "SW_cryptodev", + "rxq": 1, + "worker_config": "1C/1T", + "worker_threads": 4 + } + }, + "vnf__1": { + "collectd": { + "interval": 1 + }, + "vnf_config": { + "crypto_type": "SW_cryptodev", + "rxq": 1, + "worker_config": "1C/1T", + "worker_threads": 4 + } + }, + "vpp_config": { + "crypto_algorithms": "aes-gcm", + "tunnel": 1 + } + } + + OPTIONS = { + "collectd": { + "interval": 1 + }, + "vnf_config": { + "crypto_type": "SW_cryptodev", + "rxq": 1, + "worker_config": "1C/1T", + "worker_threads": 4 + } + } + + OPTIONS_HW = { + "collectd": { + "interval": 1 + }, + "vnf_config": { + "crypto_type": "HW_cryptodev", + "rxq": 1, + "worker_config": "1C/1T", + "worker_threads": 4 + } + } + + CPU_LAYOUT = {'cpuinfo': [[0, 0, 0, 0, 0, 0, 0, 0], + [1, 0, 0, 0, 0, 1, 1, 0], + [2, 1, 0, 0, 0, 2, 2, 1], + [3, 1, 0, 0, 0, 3, 3, 1], + [4, 2, 0, 0, 0, 4, 4, 2], + [5, 2, 0, 0, 0, 5, 5, 2], + [6, 3, 0, 0, 0, 6, 6, 3], + [7, 3, 0, 0, 0, 7, 7, 3], + [8, 4, 0, 0, 0, 8, 8, 4], + [9, 5, 0, 1, 0, 9, 9, 4], + [10, 6, 0, 1, 0, 10, 10, 5], + [11, 6, 0, 1, 0, 11, 11, 5], + [12, 7, 0, 1, 0, 12, 12, 6], + [13, 7, 0, 1, 0, 13, 13, 6], + [14, 8, 0, 1, 0, 14, 14, 7], + [15, 8, 0, 1, 0, 15, 15, 7], + [16, 9, 0, 1, 0, 16, 16, 8], + [17, 9, 0, 1, 0, 17, 17, 8]]} + CPU_SMT = {'cpuinfo': [[0, 0, 0, 0, 0, 0, 0, 0], + [1, 0, 0, 0, 0, 1, 1, 0], + [2, 1, 0, 0, 0, 2, 2, 1], + [3, 1, 0, 0, 0, 3, 3, 1], + [4, 2, 0, 0, 0, 4, 4, 2], + [5, 2, 0, 0, 0, 5, 5, 2], + [6, 3, 0, 0, 0, 6, 6, 3], + [7, 3, 0, 0, 0, 7, 7, 3], + [8, 4, 0, 0, 0, 8, 8, 4], + [9, 5, 0, 1, 0, 0, 0, 0], + [10, 6, 0, 1, 0, 1, 1, 0], + [11, 6, 0, 1, 0, 2, 2, 1], + [12, 7, 0, 1, 0, 3, 3, 1], + [13, 7, 0, 1, 0, 4, 4, 2], + [14, 8, 0, 1, 0, 5, 5, 2], + [15, 8, 0, 1, 0, 6, 6, 3], + [16, 9, 0, 1, 0, 7, 7, 3], + [17, 9, 0, 1, 0, 8, 8, 4]]} + + VPP_INTERFACES_DUMP = [ + { + "sw_if_index": 0, + "sup_sw_if_index": 0, + "l2_address_length": 0, + "l2_address": [0, 0, 0, 0, 0, 0, 0, 0], + "interface_name": "local0", + "admin_up_down": 0, + "link_up_down": 0, + "link_duplex": 0, + "link_speed": 0, + "mtu": 0, + "sub_id": 0, + "sub_dot1ad": 0, + "sub_number_of_tags": 0, + "sub_outer_vlan_id": 0, + "sub_inner_vlan_id": 0, + "sub_exact_match": 0, + "sub_default": 0, + "sub_outer_vlan_id_any": 0, + "sub_inner_vlan_id_any": 0, + "vtr_op": 0, + "vtr_push_dot1q": 0, + "vtr_tag1": 0, + "vtr_tag2": 0 + }, + { + "sw_if_index": 1, + "sup_sw_if_index": 1, + "l2_address_length": 6, + "l2_address": [144, 226, 186, 124, 65, 168, 0, 0], + "interface_name": "TenGigabitEthernetff/6/0", + "admin_up_down": 0, + "link_up_down": 0, + "link_duplex": 2, + "link_speed": 32, + "mtu": 9202, + "sub_id": 0, + "sub_dot1ad": 0, + "sub_number_of_tags": 0, + "sub_outer_vlan_id": 0, + "sub_inner_vlan_id": 0, + "sub_exact_match": 0, + "sub_default": 0, + "sub_outer_vlan_id_any": 0, + "sub_inner_vlan_id_any": 0, + "vtr_op": 0, + "vtr_push_dot1q": 0, + "vtr_tag1": 0, + "vtr_tag2": 0 + }, + { + "sw_if_index": 2, + "sup_sw_if_index": 2, + "l2_address_length": 6, + "l2_address": [78, 144, 133, 211, 197, 19, 0, 0], + "interface_name": "VirtualFunctionEthernetff/7/0", + "admin_up_down": 0, + "link_up_down": 0, + "link_duplex": 2, + "link_speed": 32, + "mtu": 9206, + "sub_id": 0, + "sub_dot1ad": 0, + "sub_number_of_tags": 0, + "sub_outer_vlan_id": 0, + "sub_inner_vlan_id": 0, + "sub_exact_match": 0, + "sub_default": 0, + "sub_outer_vlan_id_any": 0, + "sub_inner_vlan_id_any": 0, + "vtr_op": 0, + "vtr_push_dot1q": 0, + "vtr_tag1": 0, + "vtr_tag2": 0 + } + ] + + VPP_INTERFACES_STATUS = \ + ' Name Idx State MTU (L3/IP4/IP6/MPLS)' \ + 'Counter Count \n' \ + 'TenGigabitEthernetff/6/0 1 up 9000/0/0/0 \n' \ + 'VirtualFunctionEthernetff/7/0 2 up 9000/0/0/0 \n' \ + 'ipsec0 2 up 9000/0/0/0 \n' \ + 'local0 0 down 0/0/0/0 ' + + VPP_INTERFACES_STATUS_FALSE = \ + ' Name Idx State MTU (L3/IP4/IP6/MPLS)' \ + 'Counter Count \n' \ + 'TenGigabitEthernetff/6/0 1 down 9000/0/0/0 \n' \ + 'VirtualFunctionEthernetff/7/0 2 down 9000/0/0/0 \n' \ + 'ipsec0 2 down 9000/0/0/0 \n' \ + 'local0 0 down 0/0/0/0 ' + + def test__get_crypto_type(self): + vnfd_helper = VnfdHelper( + TestVipsecApproxVnf.VNFD['vnfd:vnfd-catalog']['vnfd'][0]) + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + scenario_helper.options = self.OPTIONS + + ipsec_approx_setup_helper = VipsecApproxSetupEnvHelper(vnfd_helper, + ssh_helper, + scenario_helper) + self.assertEqual('SW_cryptodev', + ipsec_approx_setup_helper._get_crypto_type()) + + def test__get_crypto_algorithms(self): + vnfd_helper = VnfdHelper( + TestVipsecApproxVnf.VNFD['vnfd:vnfd-catalog']['vnfd'][0]) + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + scenario_helper.all_options = self.ALL_OPTIONS + + ipsec_approx_setup_helper = VipsecApproxSetupEnvHelper(vnfd_helper, + ssh_helper, + scenario_helper) + self.assertEqual('aes-gcm', + ipsec_approx_setup_helper._get_crypto_algorithms()) + + def test__get_n_tunnels(self): + vnfd_helper = VnfdHelper( + TestVipsecApproxVnf.VNFD['vnfd:vnfd-catalog']['vnfd'][0]) + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + scenario_helper.all_options = self.ALL_OPTIONS + + ipsec_approx_setup_helper = VipsecApproxSetupEnvHelper(vnfd_helper, + ssh_helper, + scenario_helper) + self.assertEqual(1, ipsec_approx_setup_helper._get_n_tunnels()) + + def test__get_n_connections(self): + vnfd_helper = VnfdHelper( + TestVipsecApproxVnf.VNFD['vnfd:vnfd-catalog']['vnfd'][0]) + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + scenario_helper.all_options = self.ALL_OPTIONS + + ipsec_approx_setup_helper = VipsecApproxSetupEnvHelper(vnfd_helper, + ssh_helper, + scenario_helper) + self.assertEqual(1, ipsec_approx_setup_helper._get_n_connections()) + + def test__get_n_connections_error(self): + vnfd_helper = VnfdHelper( + TestVipsecApproxVnf.VNFD['vnfd:vnfd-catalog']['vnfd'][0]) + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + scenario_helper.all_options = self.ALL_OPTIONS_ERROR + + ipsec_approx_setup_helper = VipsecApproxSetupEnvHelper(vnfd_helper, + ssh_helper, + scenario_helper) + with self.assertRaises(KeyError) as raised: + ipsec_approx_setup_helper._get_n_connections() + self.assertIn( + 'Missing flow definition in scenario section of the task definition file', + str(raised.exception)) + + def test__get_flow_src_start_ip(self): + vnfd_helper = VnfdHelper( + TestVipsecApproxVnf.VNFD['vnfd:vnfd-catalog']['vnfd'][0]) + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + scenario_helper.all_options = self.ALL_OPTIONS + + ipsec_approx_setup_helper = VipsecApproxSetupEnvHelper(vnfd_helper, + ssh_helper, + scenario_helper) + self.assertEqual('10.0.0.0', + ipsec_approx_setup_helper._get_flow_src_start_ip()) + + def test__get_flow_src_start_ip_vnf1(self): + vnfd_helper = VnfdHelper( + TestVipsecApproxVnf.VNFD_ERROR['vnfd:vnfd-catalog']['vnfd'][0]) + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + scenario_helper.all_options = self.ALL_OPTIONS + + ipsec_approx_setup_helper = VipsecApproxSetupEnvHelper(vnfd_helper, + ssh_helper, + scenario_helper) + self.assertEqual('20.0.0.0', + ipsec_approx_setup_helper._get_flow_src_start_ip()) + + def test__get_flow_src_start_ip_error(self): + vnfd_helper = VnfdHelper( + TestVipsecApproxVnf.VNFD['vnfd:vnfd-catalog']['vnfd'][0]) + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + scenario_helper.all_options = self.ALL_OPTIONS_ERROR + + ipsec_approx_setup_helper = VipsecApproxSetupEnvHelper(vnfd_helper, + ssh_helper, + scenario_helper) + with self.assertRaises(KeyError) as raised: + ipsec_approx_setup_helper._get_flow_src_start_ip() + self.assertIn( + 'Missing flow definition in scenario section of the task definition file', + str(raised.exception)) + + def test__get_flow_dst_start_ip(self): + vnfd_helper = VnfdHelper( + TestVipsecApproxVnf.VNFD['vnfd:vnfd-catalog']['vnfd'][0]) + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + scenario_helper.all_options = self.ALL_OPTIONS + + ipsec_approx_setup_helper = VipsecApproxSetupEnvHelper(vnfd_helper, + ssh_helper, + scenario_helper) + self.assertEqual('20.0.0.0', + ipsec_approx_setup_helper._get_flow_dst_start_ip()) + + def test__get_flow_dst_start_ip_vnf1(self): + vnfd_helper = VnfdHelper( + TestVipsecApproxVnf.VNFD_ERROR['vnfd:vnfd-catalog']['vnfd'][0]) + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + scenario_helper.all_options = self.ALL_OPTIONS + + ipsec_approx_setup_helper = VipsecApproxSetupEnvHelper(vnfd_helper, + ssh_helper, + scenario_helper) + self.assertEqual('10.0.0.0', + ipsec_approx_setup_helper._get_flow_dst_start_ip()) + + def test__get_flow_dst_start_ip_error(self): + vnfd_helper = VnfdHelper( + TestVipsecApproxVnf.VNFD['vnfd:vnfd-catalog']['vnfd'][0]) + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + scenario_helper.all_options = self.ALL_OPTIONS_ERROR + + ipsec_approx_setup_helper = VipsecApproxSetupEnvHelper(vnfd_helper, + ssh_helper, + scenario_helper) + with self.assertRaises(KeyError) as raised: + ipsec_approx_setup_helper._get_flow_dst_start_ip() + self.assertIn( + 'Missing flow definition in scenario section of the task definition file', + str(raised.exception)) + + def test_build_config(self): + vnfd_helper = VnfdHelper( + TestVipsecApproxVnf.VNFD['vnfd:vnfd-catalog']['vnfd'][0]) + ssh_helper = mock.Mock() + ssh_helper.execute.return_value = 0, '0', '' + scenario_helper = mock.Mock() + scenario_helper.options = self.OPTIONS + scenario_helper.all_options = self.ALL_OPTIONS + + ipsec_approx_setup_helper = VipsecApproxSetupEnvHelper(vnfd_helper, + ssh_helper, + scenario_helper) + + with mock.patch.object(cpu.CpuSysCores, 'get_cpu_layout') as \ + mock_get_cpu_layout, \ + mock.patch.object(ipsec_approx_setup_helper, + 'execute_script_json_out') as \ + mock_execute_script_json_out: + mock_get_cpu_layout.return_value = self.CPU_LAYOUT + mock_execute_script_json_out.return_value = str( + self.VPP_INTERFACES_DUMP).replace("\'", "\"") + ipsec_approx_setup_helper.sys_cores = cpu.CpuSysCores(ssh_helper) + ipsec_approx_setup_helper.sys_cores.cpuinfo = self.CPU_LAYOUT + ipsec_approx_setup_helper._update_vnfd_helper( + ipsec_approx_setup_helper.sys_cores.get_cpu_layout()) + ipsec_approx_setup_helper.update_vpp_interface_data() + ipsec_approx_setup_helper.iface_update_numa() + self.assertIsNone(ipsec_approx_setup_helper.build_config()) + self.assertEqual(0, + ipsec_approx_setup_helper.get_value_by_interface_key( + 'xe0', 'numa_node')) + self.assertEqual('TenGigabitEthernetff/6/0', + ipsec_approx_setup_helper.get_value_by_interface_key( + 'xe0', 'vpp_name')) + self.assertEqual(1, + ipsec_approx_setup_helper.get_value_by_interface_key( + 'xe0', 'vpp_sw_index')) + self.assertEqual(0, + ipsec_approx_setup_helper.get_value_by_interface_key( + 'xe1', 'numa_node')) + self.assertEqual('VirtualFunctionEthernetff/7/0', + ipsec_approx_setup_helper.get_value_by_interface_key( + 'xe1', 'vpp_name')) + self.assertEqual(2, + ipsec_approx_setup_helper.get_value_by_interface_key( + 'xe1', 'vpp_sw_index')) + self.assertGreaterEqual(ssh_helper.execute.call_count, 4) + + def test_build_config_cbc_algorithms(self): + vnfd_helper = VnfdHelper( + TestVipsecApproxVnf.VNFD['vnfd:vnfd-catalog']['vnfd'][0]) + ssh_helper = mock.Mock() + ssh_helper.execute.return_value = 0, '0', '' + scenario_helper = mock.Mock() + scenario_helper.options = self.OPTIONS + scenario_helper.all_options = self.ALL_OPTIONS_CBC_ALGORITHMS + + ipsec_approx_setup_helper = VipsecApproxSetupEnvHelper(vnfd_helper, + ssh_helper, + scenario_helper) + + with mock.patch.object(cpu.CpuSysCores, 'get_cpu_layout') as \ + mock_get_cpu_layout, \ + mock.patch.object(ipsec_approx_setup_helper, + 'execute_script_json_out') as \ + mock_execute_script_json_out: + mock_get_cpu_layout.return_value = self.CPU_LAYOUT + mock_execute_script_json_out.return_value = str( + self.VPP_INTERFACES_DUMP).replace("\'", "\"") + ipsec_approx_setup_helper.sys_cores = cpu.CpuSysCores(ssh_helper) + ipsec_approx_setup_helper.sys_cores.cpuinfo = self.CPU_LAYOUT + ipsec_approx_setup_helper._update_vnfd_helper( + ipsec_approx_setup_helper.sys_cores.get_cpu_layout()) + ipsec_approx_setup_helper.update_vpp_interface_data() + ipsec_approx_setup_helper.iface_update_numa() + self.assertIsNone(ipsec_approx_setup_helper.build_config()) + self.assertEqual(0, + ipsec_approx_setup_helper.get_value_by_interface_key( + 'xe0', 'numa_node')) + self.assertEqual('TenGigabitEthernetff/6/0', + ipsec_approx_setup_helper.get_value_by_interface_key( + 'xe0', 'vpp_name')) + self.assertEqual(1, + ipsec_approx_setup_helper.get_value_by_interface_key( + 'xe0', 'vpp_sw_index')) + self.assertEqual(0, + ipsec_approx_setup_helper.get_value_by_interface_key( + 'xe1', 'numa_node')) + self.assertEqual('VirtualFunctionEthernetff/7/0', + ipsec_approx_setup_helper.get_value_by_interface_key( + 'xe1', 'vpp_name')) + self.assertEqual(2, + ipsec_approx_setup_helper.get_value_by_interface_key( + 'xe1', 'vpp_sw_index')) + self.assertGreaterEqual(ssh_helper.execute.call_count, 4) + + @mock.patch.object(utils, 'setup_hugepages') + def test_setup_vnf_environment(self, *args): + vnfd_helper = VnfdHelper( + TestVipsecApproxVnf.VNFD['vnfd:vnfd-catalog']['vnfd'][0]) + ssh_helper = mock.Mock() + ssh_helper.execute.return_value = 0, '0', '' + scenario_helper = mock.Mock() + scenario_helper.nodes = [None, None] + scenario_helper.options = self.OPTIONS + scenario_helper.all_options = self.ALL_OPTIONS + + ipsec_approx_setup_helper = VipsecApproxSetupEnvHelper(vnfd_helper, + ssh_helper, + scenario_helper) + with mock.patch.object(cpu.CpuSysCores, 'get_cpu_layout') as \ + mock_get_cpu_layout, \ + mock.patch.object(ipsec_approx_setup_helper, + 'execute_script_json_out') as \ + mock_execute_script_json_out: + mock_get_cpu_layout.return_value = self.CPU_LAYOUT + mock_execute_script_json_out.return_value = str( + self.VPP_INTERFACES_DUMP).replace("\'", "\"") + self.assertIsInstance( + ipsec_approx_setup_helper.setup_vnf_environment(), + ResourceProfile) + self.assertEqual(0, + ipsec_approx_setup_helper.get_value_by_interface_key( + 'xe0', 'numa_node')) + self.assertEqual('TenGigabitEthernetff/6/0', + ipsec_approx_setup_helper.get_value_by_interface_key( + 'xe0', 'vpp_name')) + self.assertEqual(1, + ipsec_approx_setup_helper.get_value_by_interface_key( + 'xe0', 'vpp_sw_index')) + self.assertEqual(0, + ipsec_approx_setup_helper.get_value_by_interface_key( + 'xe1', 'numa_node')) + self.assertEqual('VirtualFunctionEthernetff/7/0', + ipsec_approx_setup_helper.get_value_by_interface_key( + 'xe1', 'vpp_name')) + self.assertEqual(2, + ipsec_approx_setup_helper.get_value_by_interface_key( + 'xe1', 'vpp_sw_index')) + self.assertGreaterEqual(ssh_helper.execute.call_count, 4) + + @mock.patch.object(utils, 'setup_hugepages') + def test_setup_vnf_environment_hw(self, *args): + vnfd_helper = VnfdHelper( + TestVipsecApproxVnf.VNFD['vnfd:vnfd-catalog']['vnfd'][0]) + ssh_helper = mock.Mock() + ssh_helper.execute.return_value = 0, '0', '' + scenario_helper = mock.Mock() + scenario_helper.nodes = [None, None] + scenario_helper.options = self.OPTIONS_HW + scenario_helper.all_options = self.ALL_OPTIONS + + ipsec_approx_setup_helper = VipsecApproxSetupEnvHelper(vnfd_helper, + ssh_helper, + scenario_helper) + with mock.patch.object(cpu.CpuSysCores, 'get_cpu_layout') as \ + mock_get_cpu_layout, \ + mock.patch.object(ipsec_approx_setup_helper, + 'execute_script_json_out') as \ + mock_execute_script_json_out: + mock_get_cpu_layout.return_value = self.CPU_LAYOUT + mock_execute_script_json_out.return_value = str( + self.VPP_INTERFACES_DUMP).replace("\'", "\"") + self.assertIsInstance( + ipsec_approx_setup_helper.setup_vnf_environment(), + ResourceProfile) + self.assertEqual(0, + ipsec_approx_setup_helper.get_value_by_interface_key( + 'xe0', 'numa_node')) + self.assertEqual('TenGigabitEthernetff/6/0', + ipsec_approx_setup_helper.get_value_by_interface_key( + 'xe0', 'vpp_name')) + self.assertEqual(1, + ipsec_approx_setup_helper.get_value_by_interface_key( + 'xe0', 'vpp_sw_index')) + self.assertEqual(0, + ipsec_approx_setup_helper.get_value_by_interface_key( + 'xe1', 'numa_node')) + self.assertEqual('VirtualFunctionEthernetff/7/0', + ipsec_approx_setup_helper.get_value_by_interface_key( + 'xe1', 'vpp_name')) + self.assertEqual(2, + ipsec_approx_setup_helper.get_value_by_interface_key( + 'xe1', 'vpp_sw_index')) + self.assertGreaterEqual(ssh_helper.execute.call_count, 4) + + def test_calculate_frame_size(self): + vnfd_helper = VnfdHelper( + TestVipsecApproxVnf.VNFD['vnfd:vnfd-catalog']['vnfd'][0]) + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + + ipsec_approx_setup_helper = VipsecApproxSetupEnvHelper(vnfd_helper, + ssh_helper, + scenario_helper) + self.assertEqual(16984 / 48, + ipsec_approx_setup_helper.calculate_frame_size( + {'64B': 28, '570B': 16, '1518B': 4})) + + def test_calculate_frame_size_64(self): + vnfd_helper = VnfdHelper( + TestVipsecApproxVnf.VNFD['vnfd:vnfd-catalog']['vnfd'][0]) + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + + ipsec_approx_setup_helper = VipsecApproxSetupEnvHelper(vnfd_helper, + ssh_helper, + scenario_helper) + self.assertEqual(64, + ipsec_approx_setup_helper.calculate_frame_size({})) + + def test_calculate_frame_size_64_error(self): + vnfd_helper = VnfdHelper( + TestVipsecApproxVnf.VNFD['vnfd:vnfd-catalog']['vnfd'][0]) + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + + ipsec_approx_setup_helper = VipsecApproxSetupEnvHelper(vnfd_helper, + ssh_helper, + scenario_helper) + self.assertEqual(64, + ipsec_approx_setup_helper.calculate_frame_size( + {'64B': -28, '570B': 16, '1518B': 4})) + + def test_check_status(self): + vnfd_helper = VnfdHelper( + TestVipsecApproxVnf.VNFD['vnfd:vnfd-catalog']['vnfd'][0]) + ssh_helper = mock.Mock() + ssh_helper.execute.return_value = 0, self.VPP_INTERFACES_STATUS, '' + scenario_helper = mock.Mock() + + ipsec_approx_setup_helper = VipsecApproxSetupEnvHelper(vnfd_helper, + ssh_helper, + scenario_helper) + self.assertTrue(ipsec_approx_setup_helper.check_status()) + + def test_check_status_false(self): + vnfd_helper = VnfdHelper( + TestVipsecApproxVnf.VNFD['vnfd:vnfd-catalog']['vnfd'][0]) + ssh_helper = mock.Mock() + ssh_helper.execute.return_value = 0, self.VPP_INTERFACES_STATUS_FALSE, '' + scenario_helper = mock.Mock() + + ipsec_approx_setup_helper = VipsecApproxSetupEnvHelper(vnfd_helper, + ssh_helper, + scenario_helper) + self.assertFalse(ipsec_approx_setup_helper.check_status()) + + def test_get_vpp_statistics(self): + def execute(cmd): + if 'TenGigabitEthernetff/6/0' in cmd: + return 0, output_xe0, '' + elif 'VirtualFunctionEthernetff/7/0' in cmd: + return 0, output_xe1, '' + return 0, '0', '' + + output_xe0 = \ + ' Name Idx State MTU (L3/IP4/IP6/MPLS)' \ + ' Counter Count \n' \ + 'TenGigabitEthernetff/6/0 1 up 9200/0/0/0 ' \ + 'rx packets 23373568\n' \ + ' ' \ + 'rx bytes 1402414080\n' \ + ' ' \ + 'tx packets 20476416\n' \ + ' ' \ + 'tx bytes 1228584960\n' \ + ' ' \ + 'ip4 23373568\n' \ + ' ' \ + 'rx-miss 27789925' + output_xe1 = \ + ' Name Idx State MTU (L3/IP4/IP6/MPLS)' \ + ' Counter Count \n' \ + 'VirtualFunctionEthernetff/7/0 2 up 9200/0/0/0 ' \ + 'rx packets 23373568\n' \ + ' ' \ + 'rx bytes 1402414080\n' \ + ' ' \ + 'tx packets 20476416\n' \ + ' ' \ + 'tx bytes 1228584960\n' \ + ' ' \ + 'ip4 23373568\n' \ + ' ' \ + 'rx-miss 27789925' + + vnfd_helper = VnfdHelper( + TestVipsecApproxVnf.VNFD['vnfd:vnfd-catalog']['vnfd'][0]) + ssh_helper = mock.Mock() + ssh_helper.execute = execute + scenario_helper = mock.Mock() + + ipsec_approx_setup_helper = VipsecApproxSetupEnvHelper(vnfd_helper, + ssh_helper, + scenario_helper) + with mock.patch.object(cpu.CpuSysCores, 'get_cpu_layout') as \ + mock_get_cpu_layout, \ + mock.patch.object(ipsec_approx_setup_helper, + 'execute_script_json_out') as \ + mock_execute_script_json_out: + mock_get_cpu_layout.return_value = self.CPU_LAYOUT + mock_execute_script_json_out.return_value = str( + self.VPP_INTERFACES_DUMP).replace("\'", "\"") + sys_cores = cpu.CpuSysCores(ssh_helper) + ipsec_approx_setup_helper._update_vnfd_helper( + sys_cores.get_cpu_layout()) + ipsec_approx_setup_helper.update_vpp_interface_data() + ipsec_approx_setup_helper.iface_update_numa() + self.assertEqual({'xe0': {'packets_dropped': 27789925, + 'packets_fwd': 20476416, + 'packets_in': 23373568}, + 'xe1': {'packets_dropped': 27789925, + 'packets_fwd': 20476416, + 'packets_in': 23373568}}, + ipsec_approx_setup_helper.get_vpp_statistics()) + + def test_parser_vpp_stats(self): + output = \ + ' Name Idx State MTU (L3/IP4/IP6/MPLS)' \ + 'Counter Count \n' \ + 'TenGigabitEthernetff/6/0 1 up 9200/0/0/0 ' \ + 'rx packets 23373568\n' \ + ' ' \ + 'rx bytes 1402414080\n' \ + ' ' \ + 'tx packets 20476416\n' \ + ' ' \ + 'tx bytes 1228584960\n' \ + ' ' \ + 'ip4 23373568\n' \ + ' ' \ + 'rx-miss 27789925' + vnfd_helper = VnfdHelper( + TestVipsecApproxVnf.VNFD['vnfd:vnfd-catalog']['vnfd'][0]) + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + + ipsec_approx_setup_helper = VipsecApproxSetupEnvHelper(vnfd_helper, + ssh_helper, + scenario_helper) + self.assertEqual({'xe0': {'packets_dropped': 27789925, + 'packets_fwd': 20476416, + 'packets_in': 23373568}}, + ipsec_approx_setup_helper.parser_vpp_stats('xe0', + 'TenGigabitEthernetff/6/0', + output)) + + def test_parser_vpp_stats_no_miss(self): + output = \ + ' Name Idx State ' \ + 'Counter Count \n' \ + 'TenGigabitEthernetff/6/0 1 up ' \ + 'rx packets 23373568\n' \ + ' ' \ + 'rx bytes 1402414080\n' \ + ' ' \ + 'tx packets 20476416\n' \ + ' ' \ + 'tx bytes 1228584960\n' \ + ' ' \ + 'ip4 23373568' + vnfd_helper = VnfdHelper( + TestVipsecApproxVnf.VNFD['vnfd:vnfd-catalog']['vnfd'][0]) + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + + ipsec_approx_setup_helper = VipsecApproxSetupEnvHelper(vnfd_helper, + ssh_helper, + scenario_helper) + self.assertEqual({'xe0': {'packets_dropped': 2897152, + 'packets_fwd': 20476416, + 'packets_in': 23373568}}, + ipsec_approx_setup_helper.parser_vpp_stats('xe0', + 'TenGigabitEthernetff/6/0', + output)) + + def test_create_ipsec_tunnels(self): + vnfd_helper = VnfdHelper( + TestVipsecApproxVnf.VNFD['vnfd:vnfd-catalog']['vnfd'][0]) + ssh_helper = mock.Mock() + ssh_helper.execute.return_value = 0, '0', '' + scenario_helper = mock.Mock() + scenario_helper.options = self.OPTIONS + scenario_helper.all_options = self.ALL_OPTIONS + + ipsec_approx_setup_helper = VipsecApproxSetupEnvHelper(vnfd_helper, + ssh_helper, + scenario_helper) + + with mock.patch.object(cpu.CpuSysCores, 'get_cpu_layout') as \ + mock_get_cpu_layout, \ + mock.patch.object(ipsec_approx_setup_helper, + 'execute_script_json_out') as \ + mock_execute_script_json_out, \ + mock.patch.object(vpp_helpers.VatTerminal, + 'vat_terminal_exec_cmd_from_template') as \ + mock_vat_terminal_exec_cmd_from_template, \ + mock.patch.object(ipsec_approx_setup_helper, + 'vpp_get_interface_data') as \ + mock_ipsec_approx_setup_helper: + mock_get_cpu_layout.return_value = self.CPU_LAYOUT + mock_execute_script_json_out.return_value = str( + self.VPP_INTERFACES_DUMP).replace("\'", "\"") + mock_vat_terminal_exec_cmd_from_template.return_value = self.VPP_INTERFACES_DUMP + mock_ipsec_approx_setup_helper.return_value = self.VPP_INTERFACES_DUMP + sys_cores = cpu.CpuSysCores(ssh_helper) + ipsec_approx_setup_helper._update_vnfd_helper( + sys_cores.get_cpu_layout()) + ipsec_approx_setup_helper.update_vpp_interface_data() + ipsec_approx_setup_helper.iface_update_numa() + self.assertIsNone(ipsec_approx_setup_helper.create_ipsec_tunnels()) + self.assertGreaterEqual( + mock_vat_terminal_exec_cmd_from_template.call_count, 9) + + def test_create_ipsec_tunnels_cbc_algorithms(self): + vnfd_helper = VnfdHelper( + TestVipsecApproxVnf.VNFD['vnfd:vnfd-catalog']['vnfd'][0]) + ssh_helper = mock.Mock() + ssh_helper.execute.return_value = 0, '0', '' + scenario_helper = mock.Mock() + scenario_helper.options = self.OPTIONS + scenario_helper.all_options = self.ALL_OPTIONS_CBC_ALGORITHMS + + ipsec_approx_setup_helper = VipsecApproxSetupEnvHelper(vnfd_helper, + ssh_helper, + scenario_helper) + + with mock.patch.object(cpu.CpuSysCores, 'get_cpu_layout') as \ + mock_get_cpu_layout, \ + mock.patch.object(ipsec_approx_setup_helper, + 'execute_script_json_out') as \ + mock_execute_script_json_out, \ + mock.patch.object(ipsec_approx_setup_helper, + 'find_encrypted_data_interface') as \ + mock_find_encrypted_data_interface, \ + mock.patch.object(vpp_helpers.VatTerminal, + 'vat_terminal_exec_cmd_from_template') as \ + mock_vat_terminal_exec_cmd_from_template, \ + mock.patch.object(ipsec_approx_setup_helper, + 'vpp_get_interface_data') as \ + mock_ipsec_approx_setup_helper: + mock_get_cpu_layout.return_value = self.CPU_LAYOUT + mock_execute_script_json_out.return_value = str( + self.VPP_INTERFACES_DUMP).replace("\'", "\"") + mock_find_encrypted_data_interface.return_value = { + 'dpdk_port_num': 0, + 'driver': 'igb_uio', + 'dst_ip': '192.168.100.1', + 'dst_mac': '90:e2:ba:7c:30:e8', + 'ifname': 'xe0', + 'local_ip': '192.168.100.2', + 'local_mac': '90:e2:ba:7c:41:a8', + 'netmask': '255.255.255.0', + 'network': {}, + 'node_name': 'vnf__1', + 'numa_node': 0, + 'peer_ifname': 'xe0', + 'peer_intf': {'dpdk_port_num': 0, + 'driver': 'igb_uio', + 'dst_ip': '192.168.100.2', + 'dst_mac': '90:e2:ba:7c:41:a8', + 'ifname': 'xe0', + 'local_ip': '192.168.100.1', + 'local_mac': '90:e2:ba:7c:30:e8', + 'netmask': '255.255.255.0', + 'network': {}, + 'node_name': 'tg__0', + 'peer_ifname': 'xe0', + 'peer_name': 'vnf__0', + 'vld_id': 'uplink_0', + 'vpci': '0000:81:00.0'}, + 'peer_name': 'tg__0', + 'vld_id': 'uplink_0', + 'vpci': '0000:ff:06.0', + 'vpp_name': u'TenGigabitEthernetff/6/0', + 'vpp_sw_index': 1} + mock_vat_terminal_exec_cmd_from_template.return_value = self.VPP_INTERFACES_DUMP + mock_ipsec_approx_setup_helper.return_value = self.VPP_INTERFACES_DUMP + sys_cores = cpu.CpuSysCores(ssh_helper) + ipsec_approx_setup_helper._update_vnfd_helper( + sys_cores.get_cpu_layout()) + ipsec_approx_setup_helper.update_vpp_interface_data() + ipsec_approx_setup_helper.iface_update_numa() + self.assertIsNone(ipsec_approx_setup_helper.create_ipsec_tunnels()) + self.assertGreaterEqual( + mock_vat_terminal_exec_cmd_from_template.call_count, 9) + + def test_find_raw_data_interface(self): + expected = {'dpdk_port_num': 0, + 'driver': 'igb_uio', + 'dst_ip': '192.168.100.1', + 'dst_mac': '90:e2:ba:7c:30:e8', + 'ifname': 'xe0', + 'local_ip': '192.168.100.2', + 'local_mac': '90:e2:ba:7c:41:a8', + 'netmask': '255.255.255.0', + 'network': {}, + 'node_name': 'vnf__0', + 'numa_node': 0, + 'peer_ifname': 'xe0', + 'peer_intf': {'dpdk_port_num': 0, + 'driver': 'igb_uio', + 'dst_ip': '192.168.100.2', + 'dst_mac': '90:e2:ba:7c:41:a8', + 'ifname': 'xe0', + 'local_ip': '192.168.100.1', + 'local_mac': '90:e2:ba:7c:30:e8', + 'netmask': '255.255.255.0', + 'network': {}, + 'node_name': 'tg__0', + 'peer_ifname': 'xe0', + 'peer_name': 'vnf__0', + 'vld_id': 'uplink_0', + 'vpci': '0000:81:00.0'}, + 'peer_name': 'tg__0', + 'vld_id': 'uplink_0', + 'vpci': '0000:ff:06.0', + 'vpp_name': u'TenGigabitEthernetff/6/0', + 'vpp_sw_index': 1} + vnfd_helper = VnfdHelper( + TestVipsecApproxVnf.VNFD['vnfd:vnfd-catalog']['vnfd'][0]) + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + + ipsec_approx_setup_helper = VipsecApproxSetupEnvHelper(vnfd_helper, + ssh_helper, + scenario_helper) + self.assertEqual(expected, + ipsec_approx_setup_helper.find_raw_data_interface()) + + def test_find_raw_data_interface_error(self): + vnfd_helper = VnfdHelper( + TestVipsecApproxVnf.VNFD_ERROR['vnfd:vnfd-catalog']['vnfd'][0]) + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + + ipsec_approx_setup_helper = VipsecApproxSetupEnvHelper(vnfd_helper, + ssh_helper, + scenario_helper) + with self.assertRaises(KeyError): + ipsec_approx_setup_helper.find_raw_data_interface() + + def test_find_encrypted_data_interface(self): + expected = {'dpdk_port_num': 1, + 'driver': 'igb_uio', + 'dst_ip': '1.1.1.2', + 'dst_mac': '0a:b1:ec:fd:a2:66', + 'ifname': 'xe1', + 'local_ip': '1.1.1.1', + 'local_mac': '4e:90:85:d3:c5:13', + 'netmask': '255.255.255.0', + 'network': {}, + 'node_name': 'vnf__0', + 'numa_node': 0, + 'peer_ifname': 'xe1', + 'peer_intf': {'driver': 'igb_uio', + 'dst_ip': '1.1.1.1', + 'dst_mac': '4e:90:85:d3:c5:13', + 'ifname': 'xe1', + 'local_ip': '1.1.1.2', + 'local_mac': '0a:b1:ec:fd:a2:66', + 'netmask': '255.255.255.0', + 'network': {}, + 'node_name': 'vnf__1', + 'peer_ifname': 'xe1', + 'peer_name': 'vnf__0', + 'vld_id': 'ciphertext', + 'vpci': '0000:00:07.0'}, + 'peer_name': 'vnf__1', + 'vld_id': 'ciphertext', + 'vpci': '0000:ff:07.0', + 'vpp_name': u'VirtualFunctionEthernetff/7/0', + 'vpp_sw_index': 2} + vnfd_helper = VnfdHelper( + TestVipsecApproxVnf.VNFD['vnfd:vnfd-catalog']['vnfd'][0]) + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + + ipsec_approx_setup_helper = VipsecApproxSetupEnvHelper(vnfd_helper, + ssh_helper, + scenario_helper) + self.assertEqual(expected, + ipsec_approx_setup_helper.find_encrypted_data_interface()) + + def test_create_startup_configuration_of_vpp(self): + vnfd_helper = VnfdHelper( + TestVipsecApproxVnf.VNFD['vnfd:vnfd-catalog']['vnfd'][0]) + ssh_helper = mock.Mock() + ssh_helper.execute.return_value = 0, '0', '' + scenario_helper = mock.Mock() + scenario_helper.options = self.OPTIONS + scenario_helper.all_options = self.ALL_OPTIONS + + ipsec_approx_setup_helper = VipsecApproxSetupEnvHelper(vnfd_helper, + ssh_helper, + scenario_helper) + + with mock.patch.object(cpu.CpuSysCores, 'get_cpu_layout') as \ + mock_get_cpu_layout, \ + mock.patch.object(ipsec_approx_setup_helper, + 'execute_script_json_out') as \ + mock_execute_script_json_out: + mock_get_cpu_layout.return_value = self.CPU_LAYOUT + mock_execute_script_json_out.return_value = str( + self.VPP_INTERFACES_DUMP).replace("\'", "\"") + sys_cores = cpu.CpuSysCores(ssh_helper) + ipsec_approx_setup_helper._update_vnfd_helper( + sys_cores.get_cpu_layout()) + ipsec_approx_setup_helper.update_vpp_interface_data() + ipsec_approx_setup_helper.iface_update_numa() + self.assertIsInstance( + ipsec_approx_setup_helper.create_startup_configuration_of_vpp(), + vpp_helpers.VppConfigGenerator) + + def test_add_worker_threads_and_rxqueues(self): + vnfd_helper = VnfdHelper( + TestVipsecApproxVnf.VNFD['vnfd:vnfd-catalog']['vnfd'][0]) + ssh_helper = mock.Mock() + ssh_helper.execute.return_value = 0, '0', '' + scenario_helper = mock.Mock() + scenario_helper.options = self.OPTIONS + scenario_helper.all_options = self.ALL_OPTIONS + vpp_config_generator = vpp_helpers.VppConfigGenerator() + + ipsec_approx_setup_helper = VipsecApproxSetupEnvHelper(vnfd_helper, + ssh_helper, + scenario_helper) + + with mock.patch.object(cpu.CpuSysCores, 'get_cpu_layout') as \ + mock_get_cpu_layout, \ + mock.patch.object(ipsec_approx_setup_helper, + 'execute_script_json_out') as \ + mock_execute_script_json_out: + mock_get_cpu_layout.return_value = self.CPU_LAYOUT + mock_execute_script_json_out.return_value = str( + self.VPP_INTERFACES_DUMP).replace("\'", "\"") + ipsec_approx_setup_helper.sys_cores = cpu.CpuSysCores(ssh_helper) + ipsec_approx_setup_helper.sys_cores.cpuinfo = self.CPU_LAYOUT + ipsec_approx_setup_helper._update_vnfd_helper( + ipsec_approx_setup_helper.sys_cores.get_cpu_layout()) + ipsec_approx_setup_helper.update_vpp_interface_data() + ipsec_approx_setup_helper.iface_update_numa() + self.assertIsNone( + ipsec_approx_setup_helper.add_worker_threads_and_rxqueues( + vpp_config_generator, 1, 1)) + self.assertEqual( + 'cpu\n{\n corelist-workers 2\n main-core 1\n}\ndpdk\n{\n ' \ + 'dev default\n {\n num-rx-queues 1\n }\n num-mbufs 32768\n}\n', + vpp_config_generator.dump_config()) + + def test_add_worker_threads_and_rxqueues_smt(self): + vnfd_helper = VnfdHelper( + TestVipsecApproxVnf.VNFD['vnfd:vnfd-catalog']['vnfd'][0]) + ssh_helper = mock.Mock() + ssh_helper.execute.return_value = 0, '0', '' + scenario_helper = mock.Mock() + scenario_helper.options = self.OPTIONS + scenario_helper.all_options = self.ALL_OPTIONS + vpp_config_generator = vpp_helpers.VppConfigGenerator() + + ipsec_approx_setup_helper = VipsecApproxSetupEnvHelper(vnfd_helper, + ssh_helper, + scenario_helper) + + with mock.patch.object(cpu.CpuSysCores, 'get_cpu_layout') as \ + mock_get_cpu_layout, \ + mock.patch.object(ipsec_approx_setup_helper, + 'execute_script_json_out') as \ + mock_execute_script_json_out: + mock_get_cpu_layout.return_value = self.CPU_SMT + mock_execute_script_json_out.return_value = str( + self.VPP_INTERFACES_DUMP).replace("\'", "\"") + ipsec_approx_setup_helper.sys_cores = cpu.CpuSysCores(ssh_helper) + ipsec_approx_setup_helper.sys_cores.cpuinfo = self.CPU_SMT + ipsec_approx_setup_helper._update_vnfd_helper( + ipsec_approx_setup_helper.sys_cores.get_cpu_layout()) + ipsec_approx_setup_helper.update_vpp_interface_data() + ipsec_approx_setup_helper.iface_update_numa() + self.assertIsNone( + ipsec_approx_setup_helper.add_worker_threads_and_rxqueues( + vpp_config_generator, 1)) + self.assertEqual( + 'cpu\n{\n corelist-workers 2,6\n main-core 1\n}\ndpdk\n{\n ' \ + 'dev default\n {\n num-rx-queues 1\n }\n num-mbufs 32768\n}\n', + vpp_config_generator.dump_config()) + + def test_add_worker_threads_and_rxqueues_with_numa(self): + vnfd_helper = VnfdHelper( + TestVipsecApproxVnf.VNFD['vnfd:vnfd-catalog']['vnfd'][0]) + ssh_helper = mock.Mock() + ssh_helper.execute.return_value = 0, '0', '' + scenario_helper = mock.Mock() + scenario_helper.options = self.OPTIONS + scenario_helper.all_options = self.ALL_OPTIONS + vpp_config_generator = vpp_helpers.VppConfigGenerator() + + ipsec_approx_setup_helper = VipsecApproxSetupEnvHelper(vnfd_helper, + ssh_helper, + scenario_helper) + + with mock.patch.object(cpu.CpuSysCores, 'get_cpu_layout') as \ + mock_get_cpu_layout, \ + mock.patch.object(ipsec_approx_setup_helper, + 'execute_script_json_out') as \ + mock_execute_script_json_out: + mock_get_cpu_layout.return_value = self.CPU_LAYOUT + mock_execute_script_json_out.return_value = str( + self.VPP_INTERFACES_DUMP).replace("\'", "\"") + ipsec_approx_setup_helper.sys_cores = cpu.CpuSysCores(ssh_helper) + ipsec_approx_setup_helper.sys_cores.cpuinfo = self.CPU_LAYOUT + ipsec_approx_setup_helper._update_vnfd_helper( + ipsec_approx_setup_helper.sys_cores.get_cpu_layout()) + ipsec_approx_setup_helper.update_vpp_interface_data() + ipsec_approx_setup_helper.iface_update_numa() + self.assertIsNone( + ipsec_approx_setup_helper.add_worker_threads_and_rxqueues( + vpp_config_generator, 1, 1)) + self.assertEqual( + 'cpu\n{\n corelist-workers 2\n main-core 1\n}\ndpdk\n{\n ' \ + 'dev default\n {\n num-rx-queues 1\n }\n num-mbufs 32768\n}\n', + vpp_config_generator.dump_config()) + + def test_add_pci_devices(self): + vnfd_helper = VnfdHelper( + TestVipsecApproxVnf.VNFD['vnfd:vnfd-catalog']['vnfd'][0]) + ssh_helper = mock.Mock() + ssh_helper.execute.return_value = 0, '0', '' + scenario_helper = mock.Mock() + scenario_helper.options = self.OPTIONS + scenario_helper.all_options = self.ALL_OPTIONS + vpp_config_generator = vpp_helpers.VppConfigGenerator() + + ipsec_approx_setup_helper = VipsecApproxSetupEnvHelper(vnfd_helper, + ssh_helper, + scenario_helper) + + with mock.patch.object(cpu.CpuSysCores, 'get_cpu_layout') as \ + mock_get_cpu_layout, \ + mock.patch.object(ipsec_approx_setup_helper, + 'execute_script_json_out') as \ + mock_execute_script_json_out: + mock_get_cpu_layout.return_value = self.CPU_LAYOUT + mock_execute_script_json_out.return_value = str( + self.VPP_INTERFACES_DUMP).replace("\'", "\"") + sys_cores = cpu.CpuSysCores(ssh_helper) + ipsec_approx_setup_helper._update_vnfd_helper( + sys_cores.get_cpu_layout()) + ipsec_approx_setup_helper.update_vpp_interface_data() + ipsec_approx_setup_helper.iface_update_numa() + self.assertIsNone(ipsec_approx_setup_helper.add_pci_devices( + vpp_config_generator)) + self.assertEqual( + 'dpdk\n{\n dev 0000:ff:06.0 \n dev 0000:ff:07.0 \n}\n', + vpp_config_generator.dump_config()) + + def test_add_dpdk_cryptodev(self): + vnfd_helper = VnfdHelper( + TestVipsecApproxVnf.VNFD['vnfd:vnfd-catalog']['vnfd'][0]) + ssh_helper = mock.Mock() + ssh_helper.execute.return_value = 0, '0', '' + scenario_helper = mock.Mock() + scenario_helper.options = self.OPTIONS + scenario_helper.all_options = self.ALL_OPTIONS + vpp_config_generator = vpp_helpers.VppConfigGenerator() + + ipsec_approx_setup_helper = VipsecApproxSetupEnvHelper(vnfd_helper, + ssh_helper, + scenario_helper) + + with mock.patch.object(cpu.CpuSysCores, 'get_cpu_layout') as \ + mock_get_cpu_layout, \ + mock.patch.object(ipsec_approx_setup_helper, + 'execute_script_json_out') as \ + mock_execute_script_json_out: + mock_get_cpu_layout.return_value = self.CPU_LAYOUT + mock_execute_script_json_out.return_value = str( + self.VPP_INTERFACES_DUMP).replace("\'", "\"") + ipsec_approx_setup_helper.sys_cores = cpu.CpuSysCores(ssh_helper) + ipsec_approx_setup_helper.sys_cores.cpuinfo = self.CPU_LAYOUT + ipsec_approx_setup_helper._update_vnfd_helper( + ipsec_approx_setup_helper.sys_cores.get_cpu_layout()) + ipsec_approx_setup_helper.update_vpp_interface_data() + ipsec_approx_setup_helper.iface_update_numa() + self.assertIsNone(ipsec_approx_setup_helper.add_dpdk_cryptodev( + vpp_config_generator, 'aesni_gcm', 1)) + self.assertEqual( + 'dpdk\n{\n vdev cryptodev_aesni_gcm_pmd,socket_id=0 \n}\n', + vpp_config_generator.dump_config()) + + def test_add_dpdk_cryptodev_hw(self): + vnfd_helper = VnfdHelper( + TestVipsecApproxVnf.VNFD['vnfd:vnfd-catalog']['vnfd'][0]) + ssh_helper = mock.Mock() + ssh_helper.execute.return_value = 0, '0', '' + scenario_helper = mock.Mock() + scenario_helper.options = self.OPTIONS_HW + scenario_helper.all_options = self.ALL_OPTIONS + vpp_config_generator = vpp_helpers.VppConfigGenerator() + + ipsec_approx_setup_helper = VipsecApproxSetupEnvHelper(vnfd_helper, + ssh_helper, + scenario_helper) + + with mock.patch.object(cpu.CpuSysCores, 'get_cpu_layout') as \ + mock_get_cpu_layout, \ + mock.patch.object(ipsec_approx_setup_helper, + 'execute_script_json_out') as \ + mock_execute_script_json_out: + mock_get_cpu_layout.return_value = self.CPU_LAYOUT + mock_execute_script_json_out.return_value = str( + self.VPP_INTERFACES_DUMP).replace("\'", "\"") + ipsec_approx_setup_helper.sys_cores = cpu.CpuSysCores(ssh_helper) + ipsec_approx_setup_helper.sys_cores.cpuinfo = self.CPU_LAYOUT + ipsec_approx_setup_helper._update_vnfd_helper( + ipsec_approx_setup_helper.sys_cores.get_cpu_layout()) + ipsec_approx_setup_helper.update_vpp_interface_data() + ipsec_approx_setup_helper.iface_update_numa() + self.assertIsNone(ipsec_approx_setup_helper.add_dpdk_cryptodev( + vpp_config_generator, 'aesni_gcm', 1)) + self.assertEqual( + 'dpdk\n{\n dev 0000:ff:01.0 \n uio-driver igb_uio\n}\n', + vpp_config_generator.dump_config()) + + def test_add_dpdk_cryptodev_smt_used(self): + vnfd_helper = VnfdHelper( + TestVipsecApproxVnf.VNFD['vnfd:vnfd-catalog']['vnfd'][0]) + ssh_helper = mock.Mock() + ssh_helper.execute.return_value = 0, '0', '' + scenario_helper = mock.Mock() + scenario_helper.options = self.OPTIONS + scenario_helper.all_options = self.ALL_OPTIONS + vpp_config_generator = vpp_helpers.VppConfigGenerator() + + ipsec_approx_setup_helper = VipsecApproxSetupEnvHelper(vnfd_helper, + ssh_helper, + scenario_helper) + + with mock.patch.object(cpu.CpuSysCores, 'get_cpu_layout') as \ + mock_get_cpu_layout, \ + mock.patch.object(ipsec_approx_setup_helper, + 'execute_script_json_out') as \ + mock_execute_script_json_out: + mock_get_cpu_layout.return_value = self.CPU_SMT + mock_execute_script_json_out.return_value = str( + self.VPP_INTERFACES_DUMP).replace("\'", "\"") + ipsec_approx_setup_helper.sys_cores = cpu.CpuSysCores(ssh_helper) + ipsec_approx_setup_helper.sys_cores.cpuinfo = self.CPU_LAYOUT + ipsec_approx_setup_helper._update_vnfd_helper( + ipsec_approx_setup_helper.sys_cores.get_cpu_layout()) + ipsec_approx_setup_helper.update_vpp_interface_data() + ipsec_approx_setup_helper.iface_update_numa() + self.assertIsNone(ipsec_approx_setup_helper.add_dpdk_cryptodev( + vpp_config_generator, 'aesni_gcm', 1)) + self.assertEqual( + 'dpdk\n{\n vdev cryptodev_aesni_gcm_pmd,socket_id=0 \n}\n', + vpp_config_generator.dump_config()) + + def test_add_dpdk_cryptodev_smt_used_hw(self): + vnfd_helper = VnfdHelper( + TestVipsecApproxVnf.VNFD['vnfd:vnfd-catalog']['vnfd'][0]) + ssh_helper = mock.Mock() + ssh_helper.execute.return_value = 0, '0', '' + scenario_helper = mock.Mock() + scenario_helper.options = self.OPTIONS_HW + scenario_helper.all_options = self.ALL_OPTIONS + vpp_config_generator = vpp_helpers.VppConfigGenerator() + + ipsec_approx_setup_helper = VipsecApproxSetupEnvHelper(vnfd_helper, + ssh_helper, + scenario_helper) + + with mock.patch.object(cpu.CpuSysCores, 'get_cpu_layout') as \ + mock_get_cpu_layout: + mock_get_cpu_layout.return_value = self.CPU_SMT + ipsec_approx_setup_helper.sys_cores = cpu.CpuSysCores(ssh_helper) + ipsec_approx_setup_helper.sys_cores.cpuinfo = self.CPU_SMT + ipsec_approx_setup_helper._update_vnfd_helper( + ipsec_approx_setup_helper.sys_cores.get_cpu_layout()) + self.assertIsNone(ipsec_approx_setup_helper.add_dpdk_cryptodev( + vpp_config_generator, 'aesni_gcm', 1)) + self.assertEqual( + 'dpdk\n{\n dev 0000:ff:01.0 \n dev 0000:ff:01.1 \n uio-driver igb_uio\n}\n', + vpp_config_generator.dump_config()) + + def test_initialize_ipsec(self): + vnfd_helper = VnfdHelper( + TestVipsecApproxVnf.VNFD['vnfd:vnfd-catalog']['vnfd'][0]) + ssh_helper = mock.Mock() + ssh_helper.execute.return_value = 0, '0', '' + scenario_helper = mock.Mock() + scenario_helper.options = self.OPTIONS + scenario_helper.all_options = self.ALL_OPTIONS + + ipsec_approx_setup_helper = VipsecApproxSetupEnvHelper(vnfd_helper, + ssh_helper, + scenario_helper) + + with mock.patch.object(cpu.CpuSysCores, 'get_cpu_layout') as \ + mock_get_cpu_layout, \ + mock.patch.object(ipsec_approx_setup_helper, + 'execute_script_json_out') as \ + mock_execute_script_json_out, \ + mock.patch.object(vpp_helpers.VatTerminal, + 'vat_terminal_exec_cmd_from_template') as \ + mock_vat_terminal_exec_cmd_from_template, \ + mock.patch.object(ipsec_approx_setup_helper, + 'vpp_get_interface_data') as \ + mock_ipsec_approx_setup_helper: + mock_get_cpu_layout.return_value = self.CPU_LAYOUT + mock_execute_script_json_out.return_value = str( + self.VPP_INTERFACES_DUMP).replace("\'", "\"") + mock_vat_terminal_exec_cmd_from_template.return_value = '' + mock_ipsec_approx_setup_helper.return_value = self.VPP_INTERFACES_DUMP + sys_cores = cpu.CpuSysCores(ssh_helper) + ipsec_approx_setup_helper._update_vnfd_helper( + sys_cores.get_cpu_layout()) + ipsec_approx_setup_helper.update_vpp_interface_data() + ipsec_approx_setup_helper.iface_update_numa() + self.assertIsNone(ipsec_approx_setup_helper.initialize_ipsec()) + self.assertGreaterEqual( + mock_vat_terminal_exec_cmd_from_template.call_count, 9) diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_helpers.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_helpers.py index 3b095647c..32f384027 100644 --- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_helpers.py +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_helpers.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016-2017 Intel Corporation +# Copyright (c) 2016-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -320,7 +320,8 @@ class TestProxSocketHelper(unittest.TestCase): self.assertEqual(len(prox._pkt_dumps), 0) mock_select.select.reset_mock() - mock_select.select.side_effect = chain([['a'], ['']], repeat([1], 3)) + mock_select.select.side_effect = chain([['a'], ['']], + repeat([1], 3)) mock_recv.decode.return_value = PACKET_DUMP_1 ret = prox.get_data() self.assertEqual(mock_select.select.call_count, 2) @@ -328,13 +329,54 @@ class TestProxSocketHelper(unittest.TestCase): self.assertEqual(len(prox._pkt_dumps), 1) mock_select.select.reset_mock() - mock_select.select.side_effect = chain([[object()], [None]], repeat([1], 3)) + mock_select.select.side_effect = chain([[object()], [None]], + repeat([1], 3)) mock_recv.decode.return_value = PACKET_DUMP_2 ret = prox.get_data() self.assertEqual(mock_select.select.call_count, 1) self.assertEqual(ret, 'jumped over') self.assertEqual(len(prox._pkt_dumps), 3) + @mock.patch.object(prox_helpers, 'select') + def test_get_string(self, mock_select): + mock_select.select.side_effect = [[1], [0]] + mock_socket = mock.MagicMock() + mock_recv = mock_socket.recv() + mock_recv.decode.return_value = "" + prox = prox_helpers.ProxSocketHelper(mock_socket) + status, ret = prox.get_string() + self.assertEqual(ret, "") + self.assertTrue(status) + self.assertEqual(len(prox._pkt_dumps), 0) + + @mock.patch.object(prox_helpers, 'select') + def test_get_string2(self, mock_select): + mock_select.select.side_effect = chain([['a'], ['']], + repeat([1], 3)) + mock_socket = mock.MagicMock() + mock_recv = mock_socket.recv() + mock_recv.decode.return_value = PACKET_DUMP_1 + prox = prox_helpers.ProxSocketHelper(mock_socket) + status, ret = prox.get_string() + self.assertEqual(mock_select.select.call_count, 2) + self.assertEqual(ret, 'pktdump,3,11') + self.assertTrue(status) + self.assertEqual(len(prox._pkt_dumps), 1) + + @mock.patch.object(prox_helpers, 'select') + def test_get_string3(self, mock_select): + mock_select.select.side_effect = chain([[object()], [None]], + repeat([1], 3)) + mock_socket = mock.MagicMock() + mock_recv = mock_socket.recv() + mock_recv.decode.return_value = PACKET_DUMP_2 + prox = prox_helpers.ProxSocketHelper(mock_socket) + status, ret = prox.get_string() + self.assertTrue(status) + self.assertTrue(mock_select.select.assert_called_once) + self.assertEqual(ret, 'jumped over') + self.assertEqual(len(prox._pkt_dumps), 2) + def test__parse_socket_data_mixed_data(self): prox = prox_helpers.ProxSocketHelper(mock.MagicMock()) ret, _ = prox._parse_socket_data(PACKET_DUMP_NON_1, False) @@ -548,29 +590,160 @@ class TestProxSocketHelper(unittest.TestCase): self.assertEqual(result, expected) @mock.patch.object(prox_helpers.LOG, 'error') + def test_irq_core_stats(self, *args): + mock_socket = mock.MagicMock() + prox = prox_helpers.ProxSocketHelper(mock_socket) + prox.get_data = mock.MagicMock(return_value=('0,1,2,3,4,5,0,1,2,3,4,5,0,1,2,3')) + + data_0 = {"cpu": 0, 'bucket_0': 1, 'bucket_1': 2, 'bucket_2': 3, 'bucket_3': 4, + 'bucket_4': 5, 'bucket_5': 0, 'bucket_6': 1, 'bucket_7': 2, 'bucket_8': 3, + 'bucket_9': 4, 'bucket_10': 5, 'bucket_11': 0, 'bucket_12': 1, + "max_irq": 0, "overflow": 10} + + data_1 = {"cpu": 1, 'bucket_0': 1, 'bucket_1': 2, 'bucket_2': 3, 'bucket_3': 4, + 'bucket_4': 5, 'bucket_5': 0, 'bucket_6': 1, 'bucket_7': 2, 'bucket_8': 3, + 'bucket_9': 4, 'bucket_10': 5, 'bucket_11': 0, 'bucket_12': 1, + "max_irq": 0, "overflow": 10} + + expected = {"core_0": data_0, "core_1": data_1} + + result = prox.irq_core_stats([[0, 1], [1, 0]]) + self.assertDictEqual(result, expected) + + @mock.patch.object(prox_helpers.LOG, 'error') def test_multi_port_stats(self, *args): mock_socket = mock.MagicMock() prox = prox_helpers.ProxSocketHelper(mock_socket) - prox.get_data = mock.MagicMock(return_value='0,1,2,3,4,5;1,1,2,3,4,5') + prox.get_string = mock.MagicMock(return_value=(True, '0,1,2,3,4,5;1,1,2,3,4,5')) expected = [[0, 1, 2, 3, 4, 5], [1, 1, 2, 3, 4, 5]] - result = prox.multi_port_stats([0, 1]) + status, result = prox.multi_port_stats([0, 1]) self.assertEqual(result, expected) - - prox.get_data = mock.MagicMock(return_value='0,1,2,3,4,5;1,1,2,3,4,5') - result = prox.multi_port_stats([0]) - expected = [0] + self.assertEqual(status, True) + + prox.get_string = mock.MagicMock( + return_value=(True, '0,1,2,3,4,5;1,1,2,3,4,5')) + status, result = prox.multi_port_stats([0]) + self.assertEqual(status, False) + + prox.get_string = mock.MagicMock( + return_value=(True, '0,1,2,3,4,5;1,1,2,3,4,5')) + status, result = prox.multi_port_stats([0, 1, 2]) + self.assertEqual(status, False) + + prox.get_string = mock.MagicMock( + return_value=(True, '0,1,2,3;1,1,2,3,4,5')) + status, result = prox.multi_port_stats([0, 1]) + self.assertEqual(status, False) + + prox.get_string = mock.MagicMock( + return_value=(True, '99,1,2,3,4,5;1,1,2,3,4,5')) + status, result = prox.multi_port_stats([0, 1]) + self.assertEqual(status, False) + + prox.get_string = mock.MagicMock( + return_value=(True, '99,1,2,3,4,5;1,1,2,3,4,5')) + status, result = prox.multi_port_stats([99, 1]) + expected = [[99, 1, 2, 3, 4, 5], [1, 1, 2, 3, 4, 5]] + self.assertEqual(status, True) self.assertEqual(result, expected) - prox.get_data = mock.MagicMock(return_value='0,1,2,3;1,1,2,3,4,5') - result = prox.multi_port_stats([0, 1]) - expected = [0] * 2 - self.assertEqual(result, expected) + prox.get_string = mock.MagicMock( + return_value=(True, + '2,21,22,23,24,25;1,11,12,13,14,15;0,1,2,3,4,5')) + + sample1 = [0, 1, 2, 3, 4, 5] + sample2 = [1, 11, 12, 13, 14, 15] + sample3 = [2, 21, 22, 23, 24, 25] + expected = [sample3, sample2, sample1] + status, result = prox.multi_port_stats([1, 2, 0]) + self.assertTrue(status) + self.assertListEqual(result, expected) + + prox.get_string = mock.MagicMock( + return_value=(True, '6,21,22,23,24,25;1,11,12,13,14,15;0,1,2,3,4,5')) + ok, result = prox.multi_port_stats([1, 6, 0]) + sample1 = [6, 21, 22, 23, 24, 25] + sample2 = [1, 11, 12, 13, 14, 15] + sample3 = [0, 1, 2, 3, 4, 5] + expected = [sample1, sample2, sample3] + self.assertListEqual(result, expected) + self.assertTrue(ok) - prox.get_data = mock.MagicMock(return_value='99,1,2,3,4,5;1,1,2,3,4,5') - expected = [0] * 2 - result = prox.multi_port_stats([0, 1]) - self.assertEqual(result, expected) + @mock.patch.object(prox_helpers.LOG, 'error') + def test_multi_port_stats_diff(self, *args): + mock_socket = mock.MagicMock() + prox = prox_helpers.ProxSocketHelper(mock_socket) + prox.get_string = mock.MagicMock(return_value=(True, '0,1,2,3,4,5;1,1,2,3,4,5')) + _, t1 = prox.multi_port_stats([0, 1]) + + prox.get_string = mock.MagicMock(return_value=(True, '0,2,4,6,8,6;1,4,8,16,32,6')) + _, t2 = prox.multi_port_stats([0, 1]) + + prox.get_string = mock.MagicMock(return_value=(True, '0,1,1,1,1,1;1,1,1,1,1,1')) + _, t3 = prox.multi_port_stats([0, 1]) + prox.get_string = mock.MagicMock(return_value=(True, '0,2,2,2,2,2;1,2,2,2,2,2')) + _, t4 = prox.multi_port_stats([0, 1]) + + expected = [[0, 1.0, 2.0, 0, 0, 1], [1, 3.0, 6.0, 0, 0, 1]] + result = prox.multi_port_stats_diff(t1, t2, 1) + + self.assertListEqual(result, expected) + + result = prox.multi_port_stats_diff(t4, t3, 1) + expected = [[0, 1.0, 1.0, 0, 0, 1], [1, 1.0, 1.0, 0, 0, 1]] + + self.assertListEqual(result, expected) + + prox.get_string = mock.MagicMock(return_value=(True, '0,2,4,6,8,10')) + ok, t5 = prox.multi_port_stats([0, 1]) + self.assertFalse(ok) + self.assertListEqual(t5, []) + + result = prox.multi_port_stats_diff(t5, t4, 1) + expected = [[0, 0.0, 0.0, 0, 0, 0], [1, 0.0, 0.0, 0, 0, 0]] + self.assertListEqual(result, expected) + + prox.get_string = mock.MagicMock(return_value=(True, '0,10,10,20,30,0;1,30,40,50,60,0')) + _, t6 = prox.multi_port_stats([0, 1]) + + prox.get_string = \ + mock.MagicMock(return_value=(True, '0,100,100,100,100,0;1,100,100,100,100,0')) + _, t7 = prox.multi_port_stats([0, 1]) + + result = prox.multi_port_stats_diff(t6, t7, 1) + expected = [[0, 0.0, 0.0, 0, 0, 0], [1, 0.0, 0.0, 0, 0, 0]] + self.assertListEqual(result, expected) + + result = prox.multi_port_stats_diff(t1, t2, 0) + expected = [[0, 0.0, 0.0, 0, 0, 1], [1, 0.0, 0.0, 0, 0, 1]] + self.assertListEqual(result, expected) + + @mock.patch.object(prox_helpers.LOG, 'error') + def test_multi_port_stats_tuple(self, *args): + mock_socket = mock.MagicMock() + prox = prox_helpers.ProxSocketHelper(mock_socket) + prox.get_string = mock.MagicMock(return_value=(True, '0,1,2,3,4,5;1,1,2,3,4,5')) + _, result1 = prox.multi_port_stats([0, 1]) + prox.get_string = mock.MagicMock(return_value=(True, '0,2,4,6,8,6;1,4,8,16,32,6')) + _, result2 = prox.multi_port_stats([0, 1]) + + result = prox.multi_port_stats_diff(result1, result2, 1) + + vnfd_helper = mock.MagicMock() + vnfd_helper.ports_iter.return_value = [('xe0', 0), ('xe1', 1)] + + expected = {'xe0': {'in_packets': 1.0, 'out_packets': 2.0}, + 'xe1': {'in_packets': 3.0, 'out_packets': 6.0}} + live_stats = prox.multi_port_stats_tuple(result, vnfd_helper.ports_iter()) + self.assertDictEqual(live_stats, expected) + + live_stats = prox.multi_port_stats_tuple(result, None) + expected = {} + self.assertDictEqual(live_stats, expected) + + live_stats = prox.multi_port_stats_tuple(None, vnfd_helper.ports_iter()) + self.assertDictEqual(live_stats, expected) def test_port_stats(self): port_stats = [ @@ -1201,6 +1374,36 @@ class TestProxDpdkVnfSetupEnvHelper(unittest.TestCase): ['missing_addtional_file', 'dofile("nosuch")'], ], ], + [ + 'core 0', + [ + ['name', 'p0'] + ] + ], + [ + 'core 1-4', + [ + ['name', 'p1'] + ] + ], + [ + 'core 5,6', + [ + ['name', 'p2'] + ] + ], + [ + 'core xx', + [ + ['name', 'p3'] + ] + ], + [ + 'core $x', + [ + ['name', 'p4'] + ] + ] ] expected = [ @@ -1230,6 +1433,54 @@ class TestProxDpdkVnfSetupEnvHelper(unittest.TestCase): ['missing_addtional_file', 'dofile("nosuch")'], ], ], + [ + 'core 0', + [ + ['name', 'p0'] + ] + ], + [ + 'core 1', + [ + ['name', 'p1'] + ] + ], + [ + 'core 2', + [ + ['name', 'p1'] + ] + ], + [ + 'core 3', + [ + ['name', 'p1'] + ] + ], + [ + 'core 4', + [ + ['name', 'p1'] + ] + ], + [ + 'core 5', + [ + ['name', 'p2'] + ] + ], + [ + 'core 6', + [ + ['name', 'p2'] + ] + ], + [ + 'core $x', + [ + ['name', 'p4'] + ] + ] ] result = helper.generate_prox_config_file('/c/d/e') self.assertEqual(result, expected, str(result)) @@ -1492,8 +1743,83 @@ class TestProxResourceHelper(unittest.TestCase): helper = prox_helpers.ProxResourceHelper(mock.MagicMock()) helper._queue = queue = mock.MagicMock() helper._result = {'z': 123} + + helper.client = mock.MagicMock() + helper.client.hz.return_value = 1 + helper.client.multi_port_stats.return_value = \ + (True, [[0, 1, 2, 3, 4, 5], [1, 1, 2, 3, 4, 5]]) + helper.client.multi_port_stats_diff.return_value = \ + ([0, 1, 2, 3, 4, 5, 6, 7]) + helper.client.multi_port_stats_tuple.return_value = \ + {"xe0": {"in_packets": 1, "out_packets": 2}} + helper.resource = resource = mock.MagicMock() + + vnfd_helper = mock.MagicMock() + vnfd_helper.ports_iter.return_value = [('xe0', 0), ('xe1', 1)] + helper.vnfd_helper = vnfd_helper + + resource.check_if_system_agent_running.return_value = 0, '1234' + resource.amqp_collect_nfvi_kpi.return_value = 543 + resource.check_if_system_agent_running.return_value = (0, None) + + queue.empty.return_value = False + queue.get.return_value = {'a': 789} + + expected = {'z': 123, 'a': 789, + 'collect_stats': {'core': 543}, + 'live_stats': {'xe0': {'in_packets': 1, 'out_packets': 2}}} + result = helper.collect_kpi() + self.assertDictEqual(result, expected) + + def test_collect_kpi_no_hz(self): + helper = prox_helpers.ProxResourceHelper(mock.MagicMock()) + helper._queue = queue = mock.MagicMock() + helper._result = {'z': 123} + + helper.client = mock.MagicMock() + helper.client.multi_port_stats.return_value = \ + (True, [[0, 1, 2, 3, 4, 5], [1, 1, 2, 3, 4, 5]]) + helper.client.multi_port_stats_diff.return_value = \ + ([0, 1, 2, 3, 4, 5, 6, 7]) + helper.client.multi_port_stats_tuple.return_value = \ + {"xe0": {"in_packets": 1, "out_packets": 2}} + helper.resource = resource = mock.MagicMock() + + vnfd_helper = mock.MagicMock() + vnfd_helper.ports_iter.return_value = [('xe0', 0), ('xe1', 1)] + helper.vnfd_helper = vnfd_helper + + resource.check_if_system_agent_running.return_value = 0, '1234' + resource.amqp_collect_nfvi_kpi.return_value = 543 + resource.check_if_system_agent_running.return_value = (0, None) + + queue.empty.return_value = False + queue.get.return_value = {'a': 789} + + expected = {'z': 123, 'a': 789, + 'collect_stats': {'core': 543}, + 'live_stats': {'xe0': {'in_packets': 1, 'out_packets': 2}}} + result = helper.collect_kpi() + self.assertDictEqual(result, expected) + + def test_collect_kpi_bad_data(self): + helper = prox_helpers.ProxResourceHelper(mock.MagicMock()) + helper._queue = queue = mock.MagicMock() + helper._result = {'z': 123} + + helper.client = mock.MagicMock() + helper.client.multi_port_stats.return_value = \ + (False, [[0, 1, 2, 3, 4, 5], [1, 1, 2, 3, 4, 5]]) + helper.client.multi_port_stats_diff.return_value = \ + ([0, 1, 2, 3, 4, 5, 6, 7]) + helper.client.multi_port_stats_tuple.return_value = \ + {"xe0": {"in_packets": 1, "out_packets": 2}} helper.resource = resource = mock.MagicMock() + vnfd_helper = mock.MagicMock() + vnfd_helper.ports_iter.return_value = [('xe0', 0), ('xe1', 1)] + helper.vnfd_helper = vnfd_helper + resource.check_if_system_agent_running.return_value = 0, '1234' resource.amqp_collect_nfvi_kpi.return_value = 543 resource.check_if_system_agent_running.return_value = (0, None) @@ -1501,7 +1827,8 @@ class TestProxResourceHelper(unittest.TestCase): queue.empty.return_value = False queue.get.return_value = {'a': 789} - expected = {'z': 123, 'a': 789, 'collect_stats': {'core': 543}} + expected = {'z': 123, 'a': 789, + 'collect_stats': {'core': 543}} result = helper.collect_kpi() self.assertDictEqual(result, expected) @@ -1527,14 +1854,16 @@ class TestProxResourceHelper(unittest.TestCase): def test_run_traffic(self): setup_helper = mock.MagicMock() helper = prox_helpers.ProxResourceHelper(setup_helper) - traffic_profile = mock.MagicMock(**{"done": True}) + traffic_profile = mock.MagicMock() + traffic_profile.done.is_set.return_value = True helper.run_traffic(traffic_profile) self.assertEqual(helper._terminated.value, 1) def test__run_traffic_once(self): setup_helper = mock.MagicMock() helper = prox_helpers.ProxResourceHelper(setup_helper) - traffic_profile = mock.MagicMock(**{"done": True}) + traffic_profile = mock.MagicMock() + traffic_profile.done.is_set.return_value = True helper._run_traffic_once(traffic_profile) self.assertEqual(helper._terminated.value, 1) @@ -1582,8 +1911,9 @@ class TestProxDataHelper(unittest.TestCase): vnfd_helper.port_pairs.all_ports = list(range(4)) sut = mock.MagicMock() - sut.multi_port_stats.return_value = [[0, 1, 2, 3, 4, 5], [1, 1, 2, 3, 4, 5], - [2, 1, 2, 3, 4, 5], [3, 1, 2, 3, 4, 5]] + sut.multi_port_stats.return_value = (True, + [[0, 1, 2, 3, 4, 5], [1, 1, 2, 3, 4, 5], + [2, 1, 2, 3, 4, 5], [3, 1, 2, 3, 4, 5]]) data_helper = prox_helpers.ProxDataHelper( vnfd_helper, sut, pkt_size, 25, None, @@ -1591,14 +1921,77 @@ class TestProxDataHelper(unittest.TestCase): self.assertEqual(data_helper.rx_total, 4) self.assertEqual(data_helper.tx_total, 8) - self.assertEqual(data_helper.requested_pps, 6.25e6) + self.assertEqual(data_helper.requested_pps, 6250000.0) + + vnfd_helper = mock.MagicMock() + vnfd_helper.port_pairs.all_ports = [3, 4] + + sut = mock.MagicMock() + sut.multi_port_stats.return_value = (True, + [[3, 1, 2, 3, 4, 5], [4, 1, 2, 3, 4, 5]]) + + data_helper = prox_helpers.ProxDataHelper( + vnfd_helper, sut, pkt_size, 25, None, + constants.NIC_GBPS_DEFAULT * constants.ONE_GIGABIT_IN_BITS) + + self.assertEqual(data_helper.rx_total, 2) + self.assertEqual(data_helper.tx_total, 4) + self.assertEqual(data_helper.requested_pps, 3125000.0) + + vnfd_helper = mock.MagicMock() + vnfd_helper.port_pairs.all_ports = [0, 1, 2, 3, 4, 6, 7] + + sut = mock.MagicMock() + sut.multi_port_stats.return_value = (True, + [[8, 1, 2, 3, 4, 5], [9, 1, 2, 3, 4, 5]]) + + data_helper = prox_helpers.ProxDataHelper( + vnfd_helper, sut, pkt_size, 25, None, + constants.NIC_GBPS_DEFAULT * constants.ONE_GIGABIT_IN_BITS) + + self.assertEqual(data_helper.rx_total, 2) + self.assertEqual(data_helper.tx_total, 4) + self.assertEqual(data_helper.requested_pps, 10937500.0) + + vnfd_helper = mock.MagicMock() + vnfd_helper.port_pairs.all_ports = [] + + sut = mock.MagicMock() + sut.multi_port_stats.return_value = (True, + [[8, 1, 2, 3, 4, 5], [9, 1, 2, 3, 4, 5]]) + + data_helper = prox_helpers.ProxDataHelper( + vnfd_helper, sut, pkt_size, 25, None, + constants.NIC_GBPS_DEFAULT * constants.ONE_GIGABIT_IN_BITS) + + self.assertEqual(data_helper.rx_total, 2) + self.assertEqual(data_helper.tx_total, 4) + self.assertEqual(data_helper.requested_pps, 0.0) + + def test_totals_and_pps2(self): + pkt_size = 180 + vnfd_helper = mock.MagicMock() + vnfd_helper.port_pairs.all_ports = list(range(4)) + + sut = mock.MagicMock() + sut.multi_port_stats.return_value = (True, + [[0, 'A', 2, 3, 4, 5], [1, 'B', 'C', 3, 4, 5], + ['D', 1, 2, 3, 4, 5], [3, 1, 2, 3, 4, 'F']]) + + data_helper = prox_helpers.ProxDataHelper( + vnfd_helper, sut, pkt_size, 25, None, + constants.NIC_GBPS_DEFAULT * constants.ONE_GIGABIT_IN_BITS) + + self.assertEqual(data_helper.rx_total, 0) + self.assertEqual(data_helper.tx_total, 0) + self.assertEqual(data_helper.requested_pps, 0) def test_samples(self): vnfd_helper = mock.MagicMock() vnfd_helper.ports_iter.return_value = [('xe0', 0), ('xe1', 1)] sut = mock.MagicMock() - sut.multi_port_stats.return_value = [[0, 1, 2, 3, 4, 5], [1, 11, 12, 3, 4, 5]] + sut.multi_port_stats.return_value = (True, [[0, 1, 2, 3, 4, 5], [1, 11, 12, 3, 4, 5]]) data_helper = prox_helpers.ProxDataHelper( vnfd_helper, sut, None, None, None, None) @@ -1616,13 +2009,35 @@ class TestProxDataHelper(unittest.TestCase): result = data_helper.samples self.assertDictEqual(result, expected) + def test_samples2(self): + vnfd_helper = mock.MagicMock() + vnfd_helper.ports_iter.return_value = [('xe1', 3), ('xe2', 7)] + + sut = mock.MagicMock() + sut.multi_port_stats.return_value = (True, [[3, 1, 2, 3, 4, 5], [7, 11, 12, 3, 4, 5]]) + + data_helper = prox_helpers.ProxDataHelper( + vnfd_helper, sut, None, None, None, None) + + expected = { + 'xe1': { + 'in_packets': 1, + 'out_packets': 2, + }, + 'xe2': { + 'in_packets': 11, + 'out_packets': 12, + }, + } + result = data_helper.samples + self.assertDictEqual(result, expected) + def test___enter__(self): vnfd_helper = mock.MagicMock() vnfd_helper.port_pairs.all_ports = list(range(4)) vnfd_helper.ports_iter.return_value = [('xe1', 3), ('xe2', 7)] sut = mock.MagicMock() - sut.port_stats.return_value = list(range(10)) data_helper = prox_helpers.ProxDataHelper(vnfd_helper, sut, None, None, 5.4, constants.NIC_GBPS_DEFAULT * constants.ONE_GIGABIT_IN_BITS) @@ -1976,7 +2391,6 @@ class TestProxProfileHelper(unittest.TestCase): client = mock.MagicMock() client.hz.return_value = 2 - client.port_stats.return_value = tuple(range(12)) helper.client = client helper.get_latency = mock.MagicMock(return_value=[3.3, 3.6, 3.8]) @@ -1986,18 +2400,21 @@ class TestProxProfileHelper(unittest.TestCase): with helper.traffic_context(64, 1): pass - @mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.time') - def test_run_test(self, _): + @mock.patch.object(time, 'sleep') + def test_run_test(self, *args): resource_helper = mock.MagicMock() resource_helper.step_delta = 0.4 resource_helper.vnfd_helper.port_pairs.all_ports = list(range(2)) - resource_helper.sut.port_stats.return_value = list(range(10)) + resource_helper.sut.multi_port_stats.return_value = (True, [[0, 1, 1, 2, 4, 5], + [1, 1, 2, 3, 4, 5]]) helper = prox_helpers.ProxProfileHelper(resource_helper) - helper.run_test(120, 5, 6.5, - constants.NIC_GBPS_DEFAULT * constants.ONE_GIGABIT_IN_BITS) - + helper.run_test(pkt_size=120, duration=5, value=6.5, tolerated_loss=0.0, + line_speed=constants.NIC_GBPS_DEFAULT * constants.ONE_GIGABIT_IN_BITS) + self.assertTrue(resource_helper.sut.multi_port_stats.called) + self.assertTrue(resource_helper.sut.stop_all.called) + self.assertTrue(resource_helper.sut.reset_stats.called) class TestProxMplsProfileHelper(unittest.TestCase): @@ -2133,22 +2550,31 @@ class TestProxBngProfileHelper(unittest.TestCase): self.assertEqual(helper.arp_task_cores, expected_arp_task) self.assertEqual(helper._cores_tuple, expected_combined) - @mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.time') - def test_run_test(self, _): + @mock.patch.object(time, 'sleep') + def test_run_test(self, *args): resource_helper = mock.MagicMock() resource_helper.step_delta = 0.4 resource_helper.vnfd_helper.port_pairs.all_ports = list(range(2)) - resource_helper.sut.port_stats.return_value = list(range(10)) + resource_helper.sut.multi_port_stats.return_value = (True, [[0, 1, 1, 2, 4, 5], + [1, 1, 2, 3, 4, 5]]) helper = prox_helpers.ProxBngProfileHelper(resource_helper) - helper.run_test(120, 5, 6.5, - constants.NIC_GBPS_DEFAULT * constants.ONE_GIGABIT_IN_BITS) + helper.run_test(pkt_size=120, duration=5, value=6.5, tolerated_loss=0.0, + line_speed=constants.NIC_GBPS_DEFAULT * constants.ONE_GIGABIT_IN_BITS) + self.assertTrue(resource_helper.sut.multi_port_stats.called) + self.assertTrue(resource_helper.sut.stop_all.called) + self.assertTrue(resource_helper.sut.reset_stats.called) + + resource_helper.reset_mock() # negative pkt_size is the only way to make ratio > 1 - helper.run_test(-1000, 5, 6.5, - constants.NIC_GBPS_DEFAULT * constants.ONE_GIGABIT_IN_BITS) + helper.run_test(pkt_size=-1000, duration=5, value=6.5, tolerated_loss=0.0, + line_speed=constants.NIC_GBPS_DEFAULT * constants.ONE_GIGABIT_IN_BITS) + self.assertTrue(resource_helper.sut.multi_port_stats.called) + self.assertTrue(resource_helper.sut.stop_all.called) + self.assertTrue(resource_helper.sut.reset_stats.called) class TestProxVpeProfileHelper(unittest.TestCase): @@ -2251,18 +2677,22 @@ class TestProxVpeProfileHelper(unittest.TestCase): self.assertEqual(helper.inet_ports, expected_inet) self.assertEqual(helper._ports_tuple, expected_combined) - @mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.time') - def test_run_test(self, _): + @mock.patch.object(time, 'sleep') + def test_run_test(self, *args): resource_helper = mock.MagicMock() resource_helper.step_delta = 0.4 resource_helper.vnfd_helper.port_pairs.all_ports = list(range(2)) - resource_helper.sut.port_stats.return_value = list(range(10)) + resource_helper.sut.multi_port_stats.return_value = (True, [[0, 1, 1, 2, 4, 5], + [1, 1, 2, 3, 4, 5]]) helper = prox_helpers.ProxVpeProfileHelper(resource_helper) - helper.run_test(120, 5, 6.5) - helper.run_test(-1000, 5, 6.5) # negative pkt_size is the only way to make ratio > 1 + helper.run_test(pkt_size=120, duration=5, value=6.5, tolerated_loss=0.0, + line_speed=constants.NIC_GBPS_DEFAULT * constants.ONE_GIGABIT_IN_BITS) + # negative pkt_size is the only way to make ratio > 1 + helper.run_test(pkt_size=-1000, duration=5, value=6.5, tolerated_loss=0.0, + line_speed=constants.NIC_GBPS_DEFAULT * constants.ONE_GIGABIT_IN_BITS) class TestProxlwAFTRProfileHelper(unittest.TestCase): @@ -2365,14 +2795,31 @@ class TestProxlwAFTRProfileHelper(unittest.TestCase): self.assertEqual(helper.inet_ports, expected_inet) self.assertEqual(helper._ports_tuple, expected_combined) - @mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.time') - def test_run_test(self, _): + @mock.patch.object(time, 'sleep') + def test_run_test(self, *args): resource_helper = mock.MagicMock() resource_helper.step_delta = 0.4 resource_helper.vnfd_helper.port_pairs.all_ports = list(range(2)) - resource_helper.sut.port_stats.return_value = list(range(10)) + resource_helper.sut.multi_port_stats.return_value = (True, [[0, 1, 2, 4, 6, 5], + [1, 1, 2, 3, 4, 5]]) helper = prox_helpers.ProxlwAFTRProfileHelper(resource_helper) - helper.run_test(120, 5, 6.5) - helper.run_test(-1000, 5, 6.5) # negative pkt_size is the only way to make ratio > 1 + helper.run_test(pkt_size=120, duration=5, value=6.5, tolerated_loss=0.0, + line_speed=constants.NIC_GBPS_DEFAULT * constants.ONE_GIGABIT_IN_BITS) + + # negative pkt_size is the only way to make ratio > 1 + helper.run_test(pkt_size=-1000, duration=5, value=6.5, tolerated_loss=0.0, + line_speed=constants.NIC_GBPS_DEFAULT * constants.ONE_GIGABIT_IN_BITS) + + +class TestProxIrqProfileHelper(unittest.TestCase): + + def test_run_test(self, *args): + resource_helper = mock.MagicMock() + helper = prox_helpers.ProxIrqProfileHelper(resource_helper) + self.assertIsNone(helper._cores_tuple) + self.assertIsNone(helper._ports_tuple) + self.assertIsNone(helper._latency_cores) + self.assertIsNone(helper._test_cores) + self.assertIsNone(helper._cpu_topology) diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_irq.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_irq.py new file mode 100644 index 000000000..94197c3be --- /dev/null +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_irq.py @@ -0,0 +1,828 @@ +# Copyright (c) 2017-2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import mock +import errno + +from yardstick.tests import STL_MOCKS +from yardstick.common import exceptions as y_exceptions +from yardstick.network_services.vnf_generic.vnf.prox_irq import ProxIrqGen +from yardstick.network_services.vnf_generic.vnf.prox_irq import ProxIrqVNF +from yardstick.benchmark.contexts import base as ctx_base + +SSH_HELPER = 'yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper' + +STLClient = mock.MagicMock() +stl_patch = mock.patch.dict("sys.modules", STL_MOCKS) +stl_patch.start() + +if stl_patch: + from yardstick.network_services.vnf_generic.vnf import prox_vnf + from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh + +VNF_NAME = "vnf__1" + +class TestProxIrqVNF(unittest.TestCase): + + SCENARIO_CFG = { + 'task_path': "", + 'nodes': { + 'tg__1': 'trafficgen_1.yardstick', + 'vnf__1': 'vnf.yardstick'}, + 'runner': { + 'duration': 600, 'type': 'Duration'}, + 'topology': 'prox-tg-topology-2.yaml', + 'traffic_profile': '../../traffic_profiles/prox_binsearch.yaml', + 'type': 'NSPerf', + 'options': { + 'tg__1': {'prox_args': {'-e': '', + '-t': ''}, + 'prox_config': 'configs/l3-gen-2.cfg', + 'prox_path': + '/root/dppd-PROX-v035/build/prox'}, + 'vnf__1': { + 'prox_args': {'-t': ''}, + 'prox_config': 'configs/l3-swap-2.cfg', + 'prox_path': '/root/dppd-PROX-v035/build/prox'}}} + + VNFD_0 = { + 'short-name': 'VpeVnf', + 'vdu': [ + { + 'routing_table': [ + { + 'network': '152.16.100.20', + 'netmask': '255.255.255.0', + 'gateway': '152.16.100.20', + 'if': 'xe0' + }, + { + 'network': '152.16.40.20', + 'netmask': '255.255.255.0', + 'gateway': '152.16.40.20', + 'if': 'xe1' + }, + ], + 'description': 'VPE approximation using DPDK', + 'name': 'vpevnf-baremetal', + 'nd_route_tbl': [ + { + 'network': '0064:ff9b:0:0:0:0:9810:6414', + 'netmask': '112', + 'gateway': '0064:ff9b:0:0:0:0:9810:6414', + 'if': 'xe0' + }, + { + 'network': '0064:ff9b:0:0:0:0:9810:2814', + 'netmask': '112', + 'gateway': '0064:ff9b:0:0:0:0:9810:2814', + 'if': 'xe1' + }, + ], + 'id': 'vpevnf-baremetal', + 'external-interface': [ + { + 'virtual-interface': { + 'dst_mac': '00:00:00:00:00:03', + 'vpci': '0000:05:00.0', + 'local_ip': '152.16.100.19', + 'type': 'PCI-PASSTHROUGH', + 'netmask': '255.255.255.0', + 'dpdk_port_num': 0, + 'bandwidth': '10 Gbps', + 'dst_ip': '152.16.100.20', + 'local_mac': '00:00:00:00:00:01' + }, + 'vnfd-connection-point-ref': 'xe0', + 'name': 'xe0' + }, + { + 'virtual-interface': { + 'dst_mac': '00:00:00:00:00:04', + 'vpci': '0000:05:00.1', + 'local_ip': '152.16.40.19', + 'type': 'PCI-PASSTHROUGH', + 'netmask': '255.255.255.0', + 'dpdk_port_num': 1, + 'bandwidth': '10 Gbps', + 'dst_ip': '152.16.40.20', + 'local_mac': '00:00:00:00:00:02' + }, + 'vnfd-connection-point-ref': 'xe1', + 'name': 'xe1' + }, + ], + }, + ], + 'description': 'Vpe approximation using DPDK', + 'mgmt-interface': { + 'vdu-id': 'vpevnf-baremetal', + 'host': '1.1.1.1', + 'password': 'r00t', + 'user': 'root', + 'ip': '1.1.1.1' + }, + 'benchmark': { + 'kpi': [ + 'packets_in', + 'packets_fwd', + 'packets_dropped', + ], + }, + 'connection-point': [ + { + 'type': 'VPORT', + 'name': 'xe0', + }, + { + 'type': 'VPORT', + 'name': 'xe1', + }, + ], + 'id': 'VpeApproxVnf', 'name': 'VPEVnfSsh' + } + + VNFD = { + 'vnfd:vnfd-catalog': { + 'vnfd': [ + VNFD_0, + ] + } + } + + TRAFFIC_PROFILE = { + "schema": "isb:traffic_profile:0.1", + "name": "fixed", + "description": "Fixed traffic profile to run UDP traffic", + "traffic_profile": { + "traffic_type": "FixedTraffic", + "frame_rate": 100, # pps + "flow_number": 10, + "frame_size": 64, + }, + } + + CONTEXT_CFG = { + 'nodes': { + 'tg__2': { + 'member-vnf-index': '3', + 'role': 'TrafficGen', + 'name': 'trafficgen_2.yardstick', + 'vnfd-id-ref': 'tg__2', + 'ip': '1.2.1.1', + 'interfaces': { + 'xe0': { + 'local_iface_name': 'ens513f0', + 'vld_id': prox_vnf.ProxApproxVnf.DOWNLINK, + 'netmask': '255.255.255.0', + 'local_ip': '152.16.40.20', + 'dst_mac': '00:00:00:00:00:01', + 'local_mac': '00:00:00:00:00:03', + 'dst_ip': '152.16.40.19', + 'driver': 'ixgbe', + 'vpci': '0000:02:00.0', + 'dpdk_port_num': 0, + }, + 'xe1': { + 'local_iface_name': 'ens513f1', + 'netmask': '255.255.255.0', + 'network': '202.16.100.0', + 'local_ip': '202.16.100.20', + 'local_mac': '00:1e:67:d0:60:5d', + 'driver': 'ixgbe', + 'vpci': '0000:02:00.1', + 'dpdk_port_num': 1, + }, + }, + 'password': 'r00t', + 'VNF model': 'l3fwd_vnf.yaml', + 'user': 'root', + }, + 'tg__1': { + 'member-vnf-index': '1', + 'role': 'TrafficGen', + 'name': 'trafficgen_1.yardstick', + 'vnfd-id-ref': 'tg__1', + 'ip': '1.2.1.1', + 'interfaces': { + 'xe0': { + 'local_iface_name': 'ens785f0', + 'vld_id': prox_vnf.ProxApproxVnf.UPLINK, + 'netmask': '255.255.255.0', + 'local_ip': '152.16.100.20', + 'dst_mac': '00:00:00:00:00:02', + 'local_mac': '00:00:00:00:00:04', + 'dst_ip': '152.16.100.19', + 'driver': 'i40e', + 'vpci': '0000:05:00.0', + 'dpdk_port_num': 0, + }, + 'xe1': { + 'local_iface_name': 'ens785f1', + 'netmask': '255.255.255.0', + 'local_ip': '152.16.100.21', + 'local_mac': '00:00:00:00:00:01', + 'driver': 'i40e', + 'vpci': '0000:05:00.1', + 'dpdk_port_num': 1, + }, + }, + 'password': 'r00t', + 'VNF model': 'tg_rfc2544_tpl.yaml', + 'user': 'root', + }, + 'vnf__1': { + 'name': 'vnf.yardstick', + 'vnfd-id-ref': 'vnf__1', + 'ip': '1.2.1.1', + 'interfaces': { + 'xe0': { + 'local_iface_name': 'ens786f0', + 'vld_id': prox_vnf.ProxApproxVnf.UPLINK, + 'netmask': '255.255.255.0', + 'local_ip': '152.16.100.19', + 'dst_mac': '00:00:00:00:00:04', + 'local_mac': '00:00:00:00:00:02', + 'dst_ip': '152.16.100.20', + 'driver': 'i40e', + 'vpci': '0000:05:00.0', + 'dpdk_port_num': 0, + }, + 'xe1': { + 'local_iface_name': 'ens786f1', + 'vld_id': prox_vnf.ProxApproxVnf.DOWNLINK, + 'netmask': '255.255.255.0', + 'local_ip': '152.16.40.19', + 'dst_mac': '00:00:00:00:00:03', + 'local_mac': '00:00:00:00:00:01', + 'dst_ip': '152.16.40.20', + 'driver': 'i40e', + 'vpci': '0000:05:00.1', + 'dpdk_port_num': 1, + }, + }, + 'routing_table': [ + { + 'netmask': '255.255.255.0', + 'gateway': '152.16.100.20', + 'network': '152.16.100.20', + 'if': 'xe0', + }, + { + 'netmask': '255.255.255.0', + 'gateway': '152.16.40.20', + 'network': '152.16.40.20', + 'if': 'xe1', + }, + ], + 'member-vnf-index': '2', + 'host': '1.2.1.1', + 'role': 'vnf', + 'user': 'root', + 'nd_route_tbl': [ + { + 'netmask': '112', + 'gateway': '0064:ff9b:0:0:0:0:9810:6414', + 'network': '0064:ff9b:0:0:0:0:9810:6414', + 'if': 'xe0', + }, + { + 'netmask': '112', + 'gateway': '0064:ff9b:0:0:0:0:9810:2814', + 'network': '0064:ff9b:0:0:0:0:9810:2814', + 'if': 'xe1', + }, + ], + 'password': 'r00t', + 'VNF model': 'prox_vnf.yaml', + }, + }, + } + + def test___init__(self): + prox_irq_vnf = ProxIrqVNF('vnf1', self.VNFD_0) + + self.assertEqual(prox_irq_vnf.name, 'vnf1') + self.assertDictEqual(prox_irq_vnf.vnfd_helper, self.VNFD_0) + + @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node') + @mock.patch(SSH_HELPER) + def test_collect_kpi(self, ssh, *args): + mock_ssh(ssh) + + vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] + resource_helper = mock.MagicMock() + + resource_helper = mock.MagicMock() + + core_1 = {'bucket_1': 1, 'bucket_2': 2, 'bucket_3': 3, 'bucket_4': 4, 'bucket_5': 5, + 'bucket_6': 6, 'bucket_7': 7, 'bucket_8': 8, 'bucket_9': 9, 'bucket_10': 10, + 'bucket_11': 11, 'bucket_12': 12, 'bucket_0': 100, 'cpu': 1, 'max_irq': 12, + 'overflow': 10} + core_2 = {'bucket_1': 1, 'bucket_2': 2, 'bucket_3': 3, 'bucket_4': 4, 'bucket_5': 5, + 'bucket_6': 0, 'bucket_7': 0, 'bucket_8': 0, 'bucket_9': 0, 'bucket_10': 0, + 'bucket_11': 0, 'bucket_12': 0, 'bucket_0': 100, 'cpu': 2, 'max_irq': 12, + 'overflow': 10} + + irq_data = {'core_1': core_1, 'core_2': core_2} + resource_helper.execute.return_value = (irq_data) + + build_config_file = mock.MagicMock() + build_config_file.return_value = None + + prox_irq_vnf = ProxIrqVNF(VNF_NAME, vnfd) + + startup = ["global", [["eal", "-4"]]] + master_0 = ["core 0", [["mode", "master"]]] + core_1 = ["core 1", [["mode", "irq"]]] + core_2 = ["core 2", [["mode", "irq"], ["task", "2"]]] + + prox_irq_vnf.setup_helper._prox_config_data = \ + [startup, master_0, core_1, core_2] + + prox_irq_vnf.scenario_helper.scenario_cfg = self.SCENARIO_CFG + prox_irq_vnf.resource_helper = resource_helper + prox_irq_vnf.setup_helper.build_config_file = build_config_file + + result = prox_irq_vnf.collect_kpi() + self.assertDictEqual(result["collect_stats"], {}) + + result = prox_irq_vnf.collect_kpi() + self.assertFalse('bucket_10' in result["collect_stats"]['core_2']) + self.assertFalse('bucket_11' in result["collect_stats"]['core_2']) + self.assertFalse('bucket_12' in result["collect_stats"]['core_2']) + self.assertEqual(result["collect_stats"]['core_2']['max_irq'], 12) + + + @mock.patch(SSH_HELPER) + def test_vnf_execute_oserror(self, ssh, *args): + mock_ssh(ssh) + + vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] + prox_irq_vnf = ProxIrqVNF(VNF_NAME, vnfd) + prox_irq_vnf.resource_helper = resource_helper = mock.Mock() + + resource_helper.execute.side_effect = OSError(errno.EPIPE, "") + prox_irq_vnf.vnf_execute("", _ignore_errors=True) + + resource_helper.execute.side_effect = OSError(errno.ESHUTDOWN, "") + prox_irq_vnf.vnf_execute("", _ignore_errors=True) + + resource_helper.execute.side_effect = OSError(errno.EADDRINUSE, "") + with self.assertRaises(OSError): + prox_irq_vnf.vnf_execute("", _ignore_errors=True) + + @mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.socket') + @mock.patch(SSH_HELPER) + def test_terminate(self, ssh, *args): + mock_ssh(ssh) + vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] + + mock_ssh(ssh, exec_result=(1, "", "")) + prox_irq_vnf = ProxIrqVNF(VNF_NAME, vnfd) + + prox_irq_vnf._terminated = mock.MagicMock() + prox_irq_vnf._traffic_process = mock.MagicMock() + prox_irq_vnf._traffic_process.terminate = mock.Mock() + prox_irq_vnf.ssh_helper = mock.MagicMock() + prox_irq_vnf.setup_helper = mock.MagicMock() + prox_irq_vnf.resource_helper = mock.MagicMock() + prox_irq_vnf._vnf_wrapper.setup_helper = mock.MagicMock() + prox_irq_vnf._vnf_wrapper._vnf_process = mock.MagicMock(**{"is_alive.return_value": False}) + prox_irq_vnf._vnf_wrapper.resource_helper = mock.MagicMock() + + prox_irq_vnf._run_prox = mock.Mock(return_value=0) + prox_irq_vnf.q_in = mock.Mock() + prox_irq_vnf.q_out = mock.Mock() + + self.assertIsNone(prox_irq_vnf.terminate()) + + @mock.patch(SSH_HELPER) + def test_wait_for_instantiate_panic(self, ssh, *args): + vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] + + mock_ssh(ssh, exec_result=(1, "", "")) + prox_irq_vnf = ProxIrqVNF(VNF_NAME, vnfd) + + prox_irq_vnf._terminated = mock.MagicMock() + prox_irq_vnf._traffic_process = mock.MagicMock() + prox_irq_vnf._traffic_process.terminate = mock.Mock() + prox_irq_vnf.ssh_helper = mock.MagicMock() + prox_irq_vnf.setup_helper = mock.MagicMock() + prox_irq_vnf.resource_helper = mock.MagicMock() + prox_irq_vnf._vnf_wrapper.setup_helper = mock.MagicMock() + prox_irq_vnf._vnf_wrapper._vnf_process = mock.MagicMock(**{"is_alive.return_value": False}) + prox_irq_vnf._vnf_wrapper.resource_helper = mock.MagicMock() + + prox_irq_vnf._run_prox = mock.Mock(return_value=0) + prox_irq_vnf.q_in = mock.Mock() + prox_irq_vnf.q_out = mock.Mock() + prox_irq_vnf.WAIT_TIME = 0 + with self.assertRaises(RuntimeError): + prox_irq_vnf.wait_for_instantiate() + +class TestProxIrqGen(unittest.TestCase): + + SCENARIO_CFG = { + 'task_path': "", + 'nodes': { + 'tg__1': 'trafficgen_1.yardstick', + 'vnf__1': 'vnf.yardstick'}, + 'runner': { + 'duration': 600, 'type': 'Duration'}, + 'topology': 'prox-tg-topology-2.yaml', + 'traffic_profile': '../../traffic_profiles/prox_binsearch.yaml', + 'type': 'NSPerf', + 'options': { + 'tg__1': {'prox_args': {'-e': '', + '-t': ''}, + 'prox_config': 'configs/l3-gen-2.cfg', + 'prox_path': + '/root/dppd-PROX-v035/build/prox'}, + 'vnf__1': { + 'prox_args': {'-t': ''}, + 'prox_config': 'configs/l3-swap-2.cfg', + 'prox_path': '/root/dppd-PROX-v035/build/prox'}}} + + VNFD_0 = { + 'short-name': 'VpeVnf', + 'vdu': [ + { + 'routing_table': [ + { + 'network': '152.16.100.20', + 'netmask': '255.255.255.0', + 'gateway': '152.16.100.20', + 'if': 'xe0' + }, + { + 'network': '152.16.40.20', + 'netmask': '255.255.255.0', + 'gateway': '152.16.40.20', + 'if': 'xe1' + }, + ], + 'description': 'VPE approximation using DPDK', + 'name': 'vpevnf-baremetal', + 'nd_route_tbl': [ + { + 'network': '0064:ff9b:0:0:0:0:9810:6414', + 'netmask': '112', + 'gateway': '0064:ff9b:0:0:0:0:9810:6414', + 'if': 'xe0' + }, + { + 'network': '0064:ff9b:0:0:0:0:9810:2814', + 'netmask': '112', + 'gateway': '0064:ff9b:0:0:0:0:9810:2814', + 'if': 'xe1' + }, + ], + 'id': 'vpevnf-baremetal', + 'external-interface': [ + { + 'virtual-interface': { + 'dst_mac': '00:00:00:00:00:03', + 'vpci': '0000:05:00.0', + 'driver': 'i40e', + 'local_ip': '152.16.100.19', + 'type': 'PCI-PASSTHROUGH', + 'netmask': '255.255.255.0', + 'dpdk_port_num': 0, + 'bandwidth': '10 Gbps', + 'dst_ip': '152.16.100.20', + 'local_mac': '00:00:00:00:00:01' + }, + 'vnfd-connection-point-ref': 'xe0', + 'name': 'xe0' + }, + { + 'virtual-interface': { + 'dst_mac': '00:00:00:00:00:04', + 'vpci': '0000:05:00.1', + 'driver': 'ixgbe', + 'local_ip': '152.16.40.19', + 'type': 'PCI-PASSTHROUGH', + 'netmask': '255.255.255.0', + 'dpdk_port_num': 1, + 'bandwidth': '10 Gbps', + 'dst_ip': '152.16.40.20', + 'local_mac': '00:00:00:00:00:02' + }, + 'vnfd-connection-point-ref': 'xe1', + 'name': 'xe1' + }, + ], + }, + ], + 'description': 'Vpe approximation using DPDK', + 'mgmt-interface': { + 'vdu-id': 'vpevnf-baremetal', + 'host': '1.1.1.1', + 'password': 'r00t', + 'user': 'root', + 'ip': '1.1.1.1' + }, + 'benchmark': { + 'kpi': [ + 'packets_in', + 'packets_fwd', + 'packets_dropped', + ], + }, + 'connection-point': [ + { + 'type': 'VPORT', + 'name': 'xe0', + }, + { + 'type': 'VPORT', + 'name': 'xe1', + }, + ], + 'id': 'VpeApproxVnf', 'name': 'VPEVnfSsh' + } + + VNFD = { + 'vnfd:vnfd-catalog': { + 'vnfd': [ + VNFD_0, + ], + }, + } + + TRAFFIC_PROFILE = { + "schema": "isb:traffic_profile:0.1", + "name": "fixed", + "description": "Fixed traffic profile to run UDP traffic", + "traffic_profile": { + "traffic_type": "FixedTraffic", + "frame_rate": 100, # pps + "flow_number": 10, + "frame_size": 64, + }, + } + + CONTEXT_CFG = { + 'nodes': { + 'tg__2': { + 'member-vnf-index': '3', + 'role': 'TrafficGen', + 'name': 'trafficgen_2.yardstick', + 'vnfd-id-ref': 'tg__2', + 'ip': '1.2.1.1', + 'interfaces': { + 'xe0': { + 'local_iface_name': 'ens513f0', + 'vld_id': prox_vnf.ProxApproxVnf.DOWNLINK, + 'netmask': '255.255.255.0', + 'local_ip': '152.16.40.20', + 'dst_mac': '00:00:00:00:00:01', + 'local_mac': '00:00:00:00:00:03', + 'dst_ip': '152.16.40.19', + 'driver': 'ixgbe', + 'vpci': '0000:02:00.0', + 'dpdk_port_num': 0, + }, + 'xe1': { + 'local_iface_name': 'ens513f1', + 'netmask': '255.255.255.0', + 'network': '202.16.100.0', + 'local_ip': '202.16.100.20', + 'local_mac': '00:1e:67:d0:60:5d', + 'driver': 'ixgbe', + 'vpci': '0000:02:00.1', + 'dpdk_port_num': 1, + }, + }, + 'password': 'r00t', + 'VNF model': 'l3fwd_vnf.yaml', + 'user': 'root', + }, + 'tg__1': { + 'member-vnf-index': '1', + 'role': 'TrafficGen', + 'name': 'trafficgen_1.yardstick', + 'vnfd-id-ref': 'tg__1', + 'ip': '1.2.1.1', + 'interfaces': { + 'xe0': { + 'local_iface_name': 'ens785f0', + 'vld_id': prox_vnf.ProxApproxVnf.UPLINK, + 'netmask': '255.255.255.0', + 'local_ip': '152.16.100.20', + 'dst_mac': '00:00:00:00:00:02', + 'local_mac': '00:00:00:00:00:04', + 'dst_ip': '152.16.100.19', + 'driver': 'i40e', + 'vpci': '0000:05:00.0', + 'dpdk_port_num': 0, + }, + 'xe1': { + 'local_iface_name': 'ens785f1', + 'netmask': '255.255.255.0', + 'local_ip': '152.16.100.21', + 'local_mac': '00:00:00:00:00:01', + 'driver': 'i40e', + 'vpci': '0000:05:00.1', + 'dpdk_port_num': 1, + }, + }, + 'password': 'r00t', + 'VNF model': 'tg_rfc2544_tpl.yaml', + 'user': 'root', + }, + 'vnf__1': { + 'name': 'vnf.yardstick', + 'vnfd-id-ref': 'vnf__1', + 'ip': '1.2.1.1', + 'interfaces': { + 'xe0': { + 'local_iface_name': 'ens786f0', + 'vld_id': prox_vnf.ProxApproxVnf.UPLINK, + 'netmask': '255.255.255.0', + 'local_ip': '152.16.100.19', + 'dst_mac': '00:00:00:00:00:04', + 'local_mac': '00:00:00:00:00:02', + 'dst_ip': '152.16.100.20', + 'driver': 'i40e', + 'vpci': '0000:05:00.0', + 'dpdk_port_num': 0, + }, + 'xe1': { + 'local_iface_name': 'ens786f1', + 'vld_id': prox_vnf.ProxApproxVnf.DOWNLINK, + 'netmask': '255.255.255.0', + 'local_ip': '152.16.40.19', + 'dst_mac': '00:00:00:00:00:03', + 'local_mac': '00:00:00:00:00:01', + 'dst_ip': '152.16.40.20', + 'driver': 'i40e', + 'vpci': '0000:05:00.1', + 'dpdk_port_num': 1, + }, + }, + 'routing_table': [ + { + 'netmask': '255.255.255.0', + 'gateway': '152.16.100.20', + 'network': '152.16.100.20', + 'if': 'xe0', + }, + { + 'netmask': '255.255.255.0', + 'gateway': '152.16.40.20', + 'network': '152.16.40.20', + 'if': 'xe1', + }, + ], + 'member-vnf-index': '2', + 'host': '1.2.1.1', + 'role': 'vnf', + 'user': 'root', + 'nd_route_tbl': [ + { + 'netmask': '112', + 'gateway': '0064:ff9b:0:0:0:0:9810:6414', + 'network': '0064:ff9b:0:0:0:0:9810:6414', + 'if': 'xe0', + }, + { + 'netmask': '112', + 'gateway': '0064:ff9b:0:0:0:0:9810:2814', + 'network': '0064:ff9b:0:0:0:0:9810:2814', + 'if': 'xe1', + }, + ], + 'password': 'r00t', + 'VNF model': 'prox_vnf.yaml', + }, + }, + } + + + def test__check_status(self): + prox_irq_gen = ProxIrqGen('tg1', self.VNFD_0) + + with self.assertRaises(NotImplementedError): + prox_irq_gen._check_status() + + def test_listen_traffic(self): + prox_irq_gen = ProxIrqGen('tg1', self.VNFD_0) + + prox_irq_gen.listen_traffic(mock.Mock()) + + def test_verify_traffic(self): + prox_irq_gen = ProxIrqGen('tg1', self.VNFD_0) + + prox_irq_gen.verify_traffic(mock.Mock()) + + mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.socket') + @mock.patch(SSH_HELPER) + def test_terminate(self, ssh, *args): + mock_ssh(ssh) + vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] + prox_traffic_gen = ProxIrqGen(VNF_NAME, vnfd) + prox_traffic_gen._terminated = mock.MagicMock() + prox_traffic_gen._traffic_process = mock.MagicMock() + prox_traffic_gen._traffic_process.terminate = mock.Mock() + prox_traffic_gen.ssh_helper = mock.MagicMock() + prox_traffic_gen.setup_helper = mock.MagicMock() + prox_traffic_gen.resource_helper = mock.MagicMock() + prox_traffic_gen._vnf_wrapper.setup_helper = mock.MagicMock() + prox_traffic_gen._vnf_wrapper._vnf_process = mock.MagicMock() + prox_traffic_gen._vnf_wrapper.resource_helper = mock.MagicMock() + self.assertIsNone(prox_traffic_gen.terminate()) + + def test__wait_for_process(self): + prox_irq_gen = ProxIrqGen('tg1', self.VNFD_0) + with mock.patch.object(prox_irq_gen, '_check_status', + return_value=0) as mock_status, \ + mock.patch.object(prox_irq_gen, '_tg_process') as mock_proc: + mock_proc.is_alive.return_value = True + mock_proc.exitcode = 234 + self.assertEqual(prox_irq_gen._wait_for_process(), 234) + mock_proc.is_alive.assert_called_once() + mock_status.assert_called_once() + + def test__wait_for_process_not_alive(self): + prox_irq_gen = ProxIrqGen('tg1', self.VNFD_0) + with mock.patch.object(prox_irq_gen, '_tg_process') as mock_proc: + mock_proc.is_alive.return_value = False + self.assertRaises(RuntimeError, prox_irq_gen._wait_for_process) + mock_proc.is_alive.assert_called_once() + + def test__wait_for_process_delayed(self): + prox_irq_gen = ProxIrqGen('tg1', self.VNFD_0) + with mock.patch.object(prox_irq_gen, '_check_status', + side_effect=[1, 0]) as mock_status, \ + mock.patch.object(prox_irq_gen, + '_tg_process') as mock_proc: + mock_proc.is_alive.return_value = True + mock_proc.exitcode = 234 + self.assertEqual(prox_irq_gen._wait_for_process(), 234) + mock_proc.is_alive.assert_has_calls([mock.call(), mock.call()]) + mock_status.assert_has_calls([mock.call(), mock.call()]) + + def test_scale(self): + prox_irq_gen = ProxIrqGen('tg1', self.VNFD_0) + self.assertRaises(y_exceptions.FunctionNotImplemented, + prox_irq_gen.scale) + + @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node') + @mock.patch(SSH_HELPER) + def test_collect_kpi(self, ssh, *args): + mock_ssh(ssh) + + vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] + resource_helper = mock.MagicMock() + + core_1 = {'bucket_1': 1, 'bucket_2': 2, 'bucket_3': 3, 'bucket_4': 4, 'bucket_5': 5, + 'bucket_6': 6, 'bucket_7': 7, 'bucket_8': 8, 'bucket_9': 9, 'bucket_10': 10, + 'bucket_11': 11, 'bucket_12': 12, 'bucket_0': 100, 'cpu': 1, 'max_irq': 12, + 'overflow': 10} + core_2 = {'bucket_1': 1, 'bucket_2': 2, 'bucket_3': 3, 'bucket_4': 4, 'bucket_5': 5, + 'bucket_6': 0, 'bucket_7': 0, 'bucket_8': 0, 'bucket_9': 0, 'bucket_10': 0, + 'bucket_11': 0, 'bucket_12': 0, 'bucket_0': 100, 'cpu': 2, 'max_irq': 12, + 'overflow': 10} + + irq_data = {'core_1': core_1, 'core_2': core_2} + resource_helper.sut.irq_core_stats.return_value = (irq_data) + + build_config_file = mock.MagicMock() + build_config_file.return_value = None + + prox_irq_gen = ProxIrqGen(VNF_NAME, vnfd) + + startup = ["global", [["eal", "-4"]]] + master_0 = ["core 0", [["mode", "master"]]] + core_1 = ["core 1", [["mode", "irq"]]] + core_2 = ["core 2", [["mode", "irq"], ["task", "2"]]] + + prox_irq_gen.setup_helper._prox_config_data = \ + [startup, master_0, core_1, core_2] + + prox_irq_gen.scenario_helper.scenario_cfg = self.SCENARIO_CFG + prox_irq_gen.resource_helper = resource_helper + prox_irq_gen.setup_helper.build_config_file = build_config_file + + result = prox_irq_gen.collect_kpi() + self.assertDictEqual(result["collect_stats"], {}) + + result = prox_irq_gen.collect_kpi() + self.assertFalse('bucket_10' in result["collect_stats"]['core_2']) + self.assertFalse('bucket_11' in result["collect_stats"]['core_2']) + self.assertFalse('bucket_12' in result["collect_stats"]['core_2']) + self.assertEqual(result["collect_stats"]['core_2']['max_irq'], 12) diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py index f144e8c42..76fd74dfe 100644 --- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016-2017 Intel Corporation +# Copyright (c) 2016-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -317,7 +317,7 @@ class TestProxApproxVnf(unittest.TestCase): @mock.patch(SSH_HELPER) def test___init__(self, ssh, *args): mock_ssh(ssh) - prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0, 'task_id') + prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0) self.assertIsNone(prox_approx_vnf._vnf_process) @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node') @@ -325,7 +325,7 @@ class TestProxApproxVnf(unittest.TestCase): def test_collect_kpi_no_client(self, ssh, *args): mock_ssh(ssh) - prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0, 'task_id') + prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0) prox_approx_vnf.scenario_helper.scenario_cfg = { 'nodes': {prox_approx_vnf.name: "mock"} } @@ -335,6 +335,8 @@ class TestProxApproxVnf(unittest.TestCase): 'packets_in': 0, 'packets_dropped': 0, 'packets_fwd': 0, + 'curr_packets_in': 0, + 'curr_packets_fwd': 0, 'collect_stats': {'core': {}} } result = prox_approx_vnf.collect_kpi() @@ -346,29 +348,70 @@ class TestProxApproxVnf(unittest.TestCase): mock_ssh(ssh) resource_helper = mock.MagicMock() - resource_helper.execute.return_value = [[0, 1, 2, 3, 4, 5], [1, 1, 2, 3, 4, 5], - [2, 1, 2, 3, 4, 5], [3, 1, 2, 3, 4, 5]] + resource_helper.execute.return_value = (True, + [[0, 1, 2, 3, 4, 5], [1, 1, 2, 3, 4, 5]]) resource_helper.collect_collectd_kpi.return_value = {'core': {'result': 234}} - prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0, 'task_id') + prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0) prox_approx_vnf.scenario_helper.scenario_cfg = { 'nodes': {prox_approx_vnf.name: "mock"} } prox_approx_vnf.resource_helper = resource_helper + prox_approx_vnf.tsc_hz = 1000 expected = { + 'curr_packets_in': 200, + 'curr_packets_fwd': 400, 'physical_node': 'mock_node', - 'packets_in': 4, - 'packets_dropped': 4, - 'packets_fwd': 8, + 'packets_in': 2, + 'packets_dropped': 2, + 'packets_fwd': 4, 'collect_stats': {'core': {'result': 234}}, } result = prox_approx_vnf.collect_kpi() self.assertEqual(result['packets_in'], expected['packets_in']) self.assertEqual(result['packets_dropped'], expected['packets_dropped']) self.assertEqual(result['packets_fwd'], expected['packets_fwd']) - self.assertNotEqual(result['packets_fwd'], 0) - self.assertNotEqual(result['packets_fwd'], 0) + self.assertEqual(result['curr_packets_in'], expected['curr_packets_in']) + self.assertEqual(result['curr_packets_fwd'], expected['curr_packets_fwd']) + + @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node') + @mock.patch(SSH_HELPER) + def test_collect_kpi_bad_input(self, ssh, *args): + mock_ssh(ssh) + + resource_helper = mock.MagicMock() + resource_helper.execute.return_value = (True, + [[0, 'A', 'B', 'C', 'D', 'E'], + ['F', 1, 2, 3, 4, 5]]) + + prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0) + prox_approx_vnf.scenario_helper.scenario_cfg = { + 'nodes': {prox_approx_vnf.name: "mock"} + } + prox_approx_vnf.resource_helper = resource_helper + + result = prox_approx_vnf.collect_kpi() + self.assertDictEqual(result, {}) + + @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node') + @mock.patch(SSH_HELPER) + def test_collect_kpi_bad_input2(self, ssh, *args): + mock_ssh(ssh) + + resource_helper = mock.MagicMock() + resource_helper.execute.return_value = (False, + [[0, 'A', 'B', 'C', 'D', 'E'], + ['F', 1, 2, 3, 4, 5]]) + + prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0) + prox_approx_vnf.scenario_helper.scenario_cfg = { + 'nodes': {prox_approx_vnf.name: "mock"} + } + prox_approx_vnf.resource_helper = resource_helper + + result = prox_approx_vnf.collect_kpi() + self.assertDictEqual(result, {}) @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node') @mock.patch(SSH_HELPER) @@ -376,8 +419,7 @@ class TestProxApproxVnf(unittest.TestCase): mock_ssh(ssh) resource_helper = mock.MagicMock() - prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, deepcopy(self.VNFD0), - 'task_id') + prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, deepcopy(self.VNFD0)) prox_approx_vnf.scenario_helper.scenario_cfg = { 'nodes': {prox_approx_vnf.name: "mock"} } @@ -400,7 +442,7 @@ class TestProxApproxVnf(unittest.TestCase): def test_run_prox(self, ssh, *_): mock_ssh(ssh) - prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0, 'task_id') + prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0) prox_approx_vnf.scenario_helper.scenario_cfg = self.SCENARIO_CFG prox_approx_vnf.ssh_helper.join_bin_path.return_value = '/tool_path12/tool_file34' prox_approx_vnf.setup_helper.remote_path = 'configs/file56.cfg' @@ -414,7 +456,7 @@ class TestProxApproxVnf(unittest.TestCase): @mock.patch(SSH_HELPER) def bad_test_instantiate(self, *args): - prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0, 'task_id') + prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0) prox_approx_vnf.scenario_helper = mock.MagicMock() prox_approx_vnf.setup_helper = mock.MagicMock() # we can't mock super @@ -424,7 +466,7 @@ class TestProxApproxVnf(unittest.TestCase): @mock.patch(SSH_HELPER) def test_wait_for_instantiate_panic(self, ssh, *args): mock_ssh(ssh, exec_result=(1, "", "")) - prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0, 'task_id') + prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0) prox_approx_vnf._vnf_process = mock.MagicMock(**{"is_alive.return_value": True}) prox_approx_vnf._run_prox = mock.Mock(return_value=0) prox_approx_vnf.WAIT_TIME = 0 @@ -436,7 +478,7 @@ class TestProxApproxVnf(unittest.TestCase): @mock.patch(SSH_HELPER) def test_terminate(self, ssh, *args): mock_ssh(ssh) - prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0, 'task_id') + prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0) prox_approx_vnf._vnf_process = mock.MagicMock() prox_approx_vnf._vnf_process.terminate = mock.Mock() prox_approx_vnf.ssh_helper = mock.MagicMock() @@ -448,7 +490,7 @@ class TestProxApproxVnf(unittest.TestCase): @mock.patch(SSH_HELPER) def test__vnf_up_post(self, ssh, *args): mock_ssh(ssh) - prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0, 'task_id') + prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0) prox_approx_vnf.resource_helper = resource_helper = mock.Mock() prox_approx_vnf._vnf_up_post() @@ -457,7 +499,7 @@ class TestProxApproxVnf(unittest.TestCase): @mock.patch(SSH_HELPER) def test_vnf_execute_oserror(self, ssh, *args): mock_ssh(ssh) - prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0, 'task_id') + prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0) prox_approx_vnf.resource_helper = resource_helper = mock.Mock() resource_helper.execute.side_effect = OSError(errno.EPIPE, "") diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_router_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_router_vnf.py index ad74145b4..b8f3fcaca 100644 --- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_router_vnf.py +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_router_vnf.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016-2017 Intel Corporation +# Copyright (c) 2016-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -199,7 +199,7 @@ class TestRouterVNF(unittest.TestCase): def test___init__(self): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - router_vnf = RouterVNF(name, vnfd, 'task_id') + router_vnf = RouterVNF(name, vnfd) self.assertIsNone(router_vnf._vnf_process) def test_get_stats(self): @@ -213,7 +213,7 @@ class TestRouterVNF(unittest.TestCase): m = mock_ssh(ssh) vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - router_vnf = RouterVNF(name, vnfd, 'task_id') + router_vnf = RouterVNF(name, vnfd) router_vnf.scenario_helper.scenario_cfg = { 'nodes': {router_vnf.name: "mock"} } @@ -232,7 +232,7 @@ class TestRouterVNF(unittest.TestCase): mock_ssh(ssh) vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - router_vnf = RouterVNF(name, vnfd, 'task_id') + router_vnf = RouterVNF(name, vnfd) router_vnf.scenario_helper.scenario_cfg = self.scenario_cfg router_vnf._run() router_vnf.ssh_helper.drop_connection.assert_called_once() @@ -243,7 +243,7 @@ class TestRouterVNF(unittest.TestCase): mock_ssh(ssh) vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - router_vnf = RouterVNF(name, vnfd, 'task_id') + router_vnf = RouterVNF(name, vnfd) router_vnf.WAIT_TIME = 0 router_vnf.INTERFACE_WAIT = 0 self.scenario_cfg.update({"nodes": {"vnf__1": ""}}) @@ -256,7 +256,7 @@ class TestRouterVNF(unittest.TestCase): mock_ssh(ssh) vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - router_vnf = RouterVNF(name, vnfd, 'task_id') + router_vnf = RouterVNF(name, vnfd) router_vnf._vnf_process = mock.MagicMock() router_vnf._vnf_process.terminate = mock.Mock() self.assertIsNone(router_vnf.terminate()) diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py index 4a1d8c30e..21f0c5e1f 100644 --- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py @@ -1,4 +1,4 @@ -# Copyright (c) 2017-2018 Intel Corporation +# Copyright (c) 2017-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,22 +17,18 @@ from copy import deepcopy import unittest import mock import six +import subprocess +import time + +import paramiko from yardstick.common import exceptions as y_exceptions from yardstick.common import utils -from yardstick.network_services.nfvi.resource import ResourceProfile -from yardstick.network_services.vnf_generic.vnf.base import VnfdHelper +from yardstick.network_services.nfvi import resource +from yardstick.network_services.vnf_generic.vnf import base from yardstick.network_services.vnf_generic.vnf import sample_vnf -from yardstick.network_services.vnf_generic.vnf.vnf_ssh_helper import VnfSshHelper -from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNFDeployHelper -from yardstick.network_services.vnf_generic.vnf.sample_vnf import ScenarioHelper -from yardstick.network_services.vnf_generic.vnf.sample_vnf import ResourceHelper -from yardstick.network_services.vnf_generic.vnf.sample_vnf import ClientResourceHelper -from yardstick.network_services.vnf_generic.vnf.sample_vnf import Rfc2544ResourceHelper -from yardstick.network_services.vnf_generic.vnf.sample_vnf import SetupEnvHelper -from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNF -from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNFTrafficGen -from yardstick.network_services.vnf_generic.vnf.sample_vnf import DpdkVnfSetupEnvHelper +from yardstick.network_services.vnf_generic.vnf import vnf_ssh_helper +from yardstick import ssh from yardstick.tests.unit.network_services.vnf_generic.vnf import test_base from yardstick.benchmark.contexts import base as ctx_base @@ -83,10 +79,11 @@ class TestVnfSshHelper(unittest.TestCase): 'virtual-interface': { 'dst_mac': '00:00:00:00:00:03', 'vpci': '0000:05:00.0', + 'dpdk_port_num': 0, + 'driver': 'i40e', 'local_ip': '152.16.100.19', 'type': 'PCI-PASSTHROUGH', 'netmask': '255.255.255.0', - 'dpdk_port_num': 0, 'bandwidth': '10 Gbps', 'dst_ip': '152.16.100.20', 'local_mac': '00:00:00:00:00:01', @@ -100,10 +97,11 @@ class TestVnfSshHelper(unittest.TestCase): 'virtual-interface': { 'dst_mac': '00:00:00:00:00:04', 'vpci': '0000:05:00.1', + 'dpdk_port_num': 1, + 'driver': 'ixgbe', 'local_ip': '152.16.40.19', 'type': 'PCI-PASSTHROUGH', 'netmask': '255.255.255.0', - 'dpdk_port_num': 1, 'bandwidth': '10 Gbps', 'dst_ip': '152.16.40.20', 'local_mac': '00:00:00:00:00:02', @@ -152,332 +150,131 @@ class TestVnfSshHelper(unittest.TestCase): } } + def setUp(self): + self.ssh_helper = vnf_ssh_helper.VnfSshHelper( + self.VNFD_0['mgmt-interface'], 'my/bin/path') + self.ssh_helper._run = mock.Mock() + def assertAll(self, iterable, message=None): self.assertTrue(all(iterable), message) def test_get_class(self): - self.assertIs(VnfSshHelper.get_class(), VnfSshHelper) + self.assertIs(vnf_ssh_helper.VnfSshHelper.get_class(), + vnf_ssh_helper.VnfSshHelper) - @mock.patch('yardstick.ssh.paramiko') + @mock.patch.object(ssh, 'paramiko') def test_copy(self, _): - ssh_helper = VnfSshHelper(self.VNFD_0['mgmt-interface'], 'my/bin/path') - ssh_helper._run = mock.Mock() - - ssh_helper.execute('ls') - self.assertTrue(ssh_helper.is_connected) - result = ssh_helper.copy() - self.assertIsInstance(result, VnfSshHelper) + self.ssh_helper.execute('ls') + self.assertTrue(self.ssh_helper.is_connected) + result = self.ssh_helper.copy() + self.assertIsInstance(result, vnf_ssh_helper.VnfSshHelper) self.assertFalse(result.is_connected) - self.assertEqual(result.bin_path, ssh_helper.bin_path) - self.assertEqual(result.host, ssh_helper.host) - self.assertEqual(result.port, ssh_helper.port) - self.assertEqual(result.user, ssh_helper.user) - self.assertEqual(result.password, ssh_helper.password) - self.assertEqual(result.key_filename, ssh_helper.key_filename) - - @mock.patch('yardstick.ssh.paramiko') + self.assertEqual(result.bin_path, self.ssh_helper.bin_path) + self.assertEqual(result.host, self.ssh_helper.host) + self.assertEqual(result.port, self.ssh_helper.port) + self.assertEqual(result.user, self.ssh_helper.user) + self.assertEqual(result.password, self.ssh_helper.password) + self.assertEqual(result.key_filename, self.ssh_helper.key_filename) + + @mock.patch.object(paramiko, 'SSHClient') def test_upload_config_file(self, mock_paramiko): - ssh_helper = VnfSshHelper(self.VNFD_0['mgmt-interface'], 'my/bin/path') - ssh_helper._run = mock.MagicMock() + self.assertFalse(self.ssh_helper.is_connected) + cfg_file = self.ssh_helper.upload_config_file('/my/prefix', 'my content') + self.assertTrue(self.ssh_helper.is_connected) + mock_paramiko.assert_called_once() + self.assertEqual(cfg_file, '/my/prefix') - self.assertFalse(ssh_helper.is_connected) - cfg_file = ssh_helper.upload_config_file('my/prefix', 'my content') - self.assertTrue(ssh_helper.is_connected) - mock_paramiko.SSHClient.assert_called_once() + @mock.patch.object(paramiko, 'SSHClient') + def test_upload_config_file_path_does_not_exist(self, mock_paramiko): + self.assertFalse(self.ssh_helper.is_connected) + cfg_file = self.ssh_helper.upload_config_file('my/prefix', 'my content') + self.assertTrue(self.ssh_helper.is_connected) + mock_paramiko.assert_called_once() self.assertTrue(cfg_file.startswith('/tmp')) - cfg_file = ssh_helper.upload_config_file('/my/prefix', 'my content') - self.assertTrue(ssh_helper.is_connected) - mock_paramiko.SSHClient.assert_called_once() - self.assertEqual(cfg_file, '/my/prefix') - def test_join_bin_path(self): - ssh_helper = VnfSshHelper(self.VNFD_0['mgmt-interface'], 'my/bin/path') - expected_start = 'my' expected_middle_list = ['bin'] expected_end = 'path' - result = ssh_helper.join_bin_path() + result = self.ssh_helper.join_bin_path() self.assertTrue(result.startswith(expected_start)) self.assertAll(middle in result for middle in expected_middle_list) self.assertTrue(result.endswith(expected_end)) expected_middle_list.append(expected_end) expected_end = 'some_file.sh' - result = ssh_helper.join_bin_path('some_file.sh') + result = self.ssh_helper.join_bin_path('some_file.sh') self.assertTrue(result.startswith(expected_start)) self.assertAll(middle in result for middle in expected_middle_list) self.assertTrue(result.endswith(expected_end)) expected_middle_list.append('some_dir') expected_end = 'some_file.sh' - result = ssh_helper.join_bin_path('some_dir', 'some_file.sh') + result = self.ssh_helper.join_bin_path('some_dir', 'some_file.sh') self.assertTrue(result.startswith(expected_start)) self.assertAll(middle in result for middle in expected_middle_list) self.assertTrue(result.endswith(expected_end)) - @mock.patch('yardstick.ssh.paramiko') - @mock.patch('yardstick.ssh.provision_tool') + @mock.patch.object(paramiko, 'SSHClient') + @mock.patch.object(ssh, 'provision_tool') def test_provision_tool(self, mock_provision_tool, mock_paramiko): - ssh_helper = VnfSshHelper(self.VNFD_0['mgmt-interface'], 'my/bin/path') - ssh_helper._run = mock.MagicMock() - - self.assertFalse(ssh_helper.is_connected) - ssh_helper.provision_tool() - self.assertTrue(ssh_helper.is_connected) - mock_paramiko.SSHClient.assert_called_once() + self.assertFalse(self.ssh_helper.is_connected) + self.ssh_helper.provision_tool() + self.assertTrue(self.ssh_helper.is_connected) + mock_paramiko.assert_called_once() mock_provision_tool.assert_called_once() - ssh_helper.provision_tool(tool_file='my_tool.sh') - self.assertTrue(ssh_helper.is_connected) - mock_paramiko.SSHClient.assert_called_once() + self.ssh_helper.provision_tool(tool_file='my_tool.sh') + self.assertTrue(self.ssh_helper.is_connected) + mock_paramiko.assert_called_once() self.assertEqual(mock_provision_tool.call_count, 2) - ssh_helper.provision_tool('tool_path', 'my_tool.sh') - self.assertTrue(ssh_helper.is_connected) - mock_paramiko.SSHClient.assert_called_once() + self.ssh_helper.provision_tool('tool_path', 'my_tool.sh') + self.assertTrue(self.ssh_helper.is_connected) + mock_paramiko.assert_called_once() self.assertEqual(mock_provision_tool.call_count, 3) class TestSetupEnvHelper(unittest.TestCase): - VNFD_0 = { - 'short-name': 'VpeVnf', - 'vdu': [ - { - 'routing_table': [ - { - 'network': '152.16.100.20', - 'netmask': '255.255.255.0', - 'gateway': '152.16.100.20', - 'if': 'xe0' - }, - { - 'network': '152.16.40.20', - 'netmask': '255.255.255.0', - 'gateway': '152.16.40.20', - 'if': 'xe1' - }, - ], - 'description': 'VPE approximation using DPDK', - 'name': 'vpevnf-baremetal', - 'nd_route_tbl': [ - { - 'network': '0064:ff9b:0:0:0:0:9810:6414', - 'netmask': '112', - 'gateway': '0064:ff9b:0:0:0:0:9810:6414', - 'if': 'xe0' - }, - { - 'network': '0064:ff9b:0:0:0:0:9810:2814', - 'netmask': '112', - 'gateway': '0064:ff9b:0:0:0:0:9810:2814', - 'if': 'xe1' - }, - ], - 'id': 'vpevnf-baremetal', - 'external-interface': [ - { - 'virtual-interface': { - 'dst_mac': '00:00:00:00:00:03', - 'vpci': '0000:05:00.0', - 'local_ip': '152.16.100.19', - 'type': 'PCI-PASSTHROUGH', - 'netmask': '255.255.255.0', - 'dpdk_port_num': 0, - 'bandwidth': '10 Gbps', - 'dst_ip': '152.16.100.20', - 'local_mac': '00:00:00:00:00:01', - 'vld_id': 'uplink_0', - 'ifname': 'xe0', - }, - 'vnfd-connection-point-ref': 'xe0', - 'name': 'xe0' - }, - { - 'virtual-interface': { - 'dst_mac': '00:00:00:00:00:04', - 'vpci': '0000:05:00.1', - 'local_ip': '152.16.40.19', - 'type': 'PCI-PASSTHROUGH', - 'netmask': '255.255.255.0', - 'dpdk_port_num': 1, - 'bandwidth': '10 Gbps', - 'dst_ip': '152.16.40.20', - 'local_mac': '00:00:00:00:00:02', - 'vld_id': 'downlink_0', - 'ifname': 'xe1', - }, - 'vnfd-connection-point-ref': 'xe1', - 'name': 'xe1' - }, - ], - }, - ], - 'description': 'Vpe approximation using DPDK', - 'mgmt-interface': { - 'vdu-id': 'vpevnf-baremetal', - 'host': '1.1.1.1', - 'password': 'r00t', - 'user': 'root', - 'ip': '1.1.1.1' - }, - 'benchmark': { - 'kpi': [ - 'packets_in', - 'packets_fwd', - 'packets_dropped', - ], - }, - 'connection-point': [ - { - 'type': 'VPORT', - 'name': 'xe0', - }, - { - 'type': 'VPORT', - 'name': 'xe1', - }, - ], - 'id': 'VpeApproxVnf', 'name': 'VPEVnfSsh' - } + VNFD_0 = TestVnfSshHelper.VNFD_0 - def test_build_config(self): - setup_env_helper = SetupEnvHelper(mock.Mock(), mock.Mock(), mock.Mock()) + def setUp(self): + self.setup_env_helper = sample_vnf.SetupEnvHelper( + mock.Mock(), mock.Mock(), mock.Mock()) + def test_build_config(self): with self.assertRaises(NotImplementedError): - setup_env_helper.build_config() + self.setup_env_helper.build_config() def test_setup_vnf_environment(self): - setup_env_helper = SetupEnvHelper(mock.Mock(), mock.Mock(), mock.Mock()) - self.assertIsNone(setup_env_helper.setup_vnf_environment()) + self.assertIsNone(self.setup_env_helper.setup_vnf_environment()) def test_tear_down(self): - setup_env_helper = SetupEnvHelper(mock.Mock(), mock.Mock(), mock.Mock()) - with self.assertRaises(NotImplementedError): - setup_env_helper.tear_down() + self.setup_env_helper.tear_down() class TestDpdkVnfSetupEnvHelper(unittest.TestCase): - VNFD_0 = { - 'short-name': 'VpeVnf', - 'vdu': [ - { - 'routing_table': [ - { - 'network': '152.16.100.20', - 'netmask': '255.255.255.0', - 'gateway': '152.16.100.20', - 'if': 'xe0' - }, - { - 'network': '152.16.40.20', - 'netmask': '255.255.255.0', - 'gateway': '152.16.40.20', - 'if': 'xe1' - }, - ], - 'description': 'VPE approximation using DPDK', - 'name': 'vpevnf-baremetal', - 'nd_route_tbl': [ - { - 'network': '0064:ff9b:0:0:0:0:9810:6414', - 'netmask': '112', - 'gateway': '0064:ff9b:0:0:0:0:9810:6414', - 'if': 'xe0' - }, - { - 'network': '0064:ff9b:0:0:0:0:9810:2814', - 'netmask': '112', - 'gateway': '0064:ff9b:0:0:0:0:9810:2814', - 'if': 'xe1' - }, - ], - 'id': 'vpevnf-baremetal', - 'external-interface': [ - { - 'virtual-interface': { - 'dst_mac': '00:00:00:00:00:03', - 'vpci': '0000:05:00.0', - 'dpdk_port_num': 0, - 'driver': 'i40e', - 'local_ip': '152.16.100.19', - 'type': 'PCI-PASSTHROUGH', - 'netmask': '255.255.255.0', - 'bandwidth': '10 Gbps', - 'dst_ip': '152.16.100.20', - 'local_mac': '00:00:00:00:00:01', - 'vld_id': 'uplink_0', - 'ifname': 'xe0', - }, - 'vnfd-connection-point-ref': 'xe0', - 'name': 'xe0' - }, - { - 'virtual-interface': { - 'dst_mac': '00:00:00:00:00:04', - 'vpci': '0000:05:00.1', - 'dpdk_port_num': 1, - 'driver': 'ixgbe', - 'local_ip': '152.16.40.19', - 'type': 'PCI-PASSTHROUGH', - 'netmask': '255.255.255.0', - 'bandwidth': '10 Gbps', - 'dst_ip': '152.16.40.20', - 'local_mac': '00:00:00:00:00:02', - 'vld_id': 'downlink_0', - 'ifname': 'xe1', - }, - 'vnfd-connection-point-ref': 'xe1', - 'name': 'xe1' - }, - ], - }, - ], - 'description': 'Vpe approximation using DPDK', - 'mgmt-interface': { - 'vdu-id': 'vpevnf-baremetal', - 'host': '1.1.1.1', - 'password': 'r00t', - 'user': 'root', - 'ip': '1.1.1.1' - }, - 'benchmark': { - 'kpi': [ - 'packets_in', - 'packets_fwd', - 'packets_dropped', - ], - }, - 'connection-point': [ - { - 'type': 'VPORT', - 'name': 'xe0', - }, - { - 'type': 'VPORT', - 'name': 'xe1', - }, - ], - 'id': 'VpeApproxVnf', 'name': 'VPEVnfSsh' - } + VNFD_0 = TestVnfSshHelper.VNFD_0 - VNFD = { - 'vnfd:vnfd-catalog': { - 'vnfd': [ - VNFD_0, - ] - } - } + VNFD = TestVnfSshHelper.VNFD + + def setUp(self): + self.vnfd_helper = base.VnfdHelper(deepcopy(self.VNFD_0)) + self.scenario_helper = mock.Mock() + self.ssh_helper = mock.Mock() + self.dpdk_setup_helper = sample_vnf.DpdkVnfSetupEnvHelper( + self.vnfd_helper, self.ssh_helper, self.scenario_helper) def test__update_packet_type(self): ip_pipeline_cfg = 'pkt_type = ipv4' pkt_type = {'pkt_type': '1'} expected = "pkt_type = 1" - result = DpdkVnfSetupEnvHelper._update_packet_type(ip_pipeline_cfg, pkt_type) + result = self.dpdk_setup_helper._update_packet_type( + ip_pipeline_cfg, pkt_type) self.assertEqual(result, expected) def test__update_packet_type_no_op(self): @@ -485,123 +282,99 @@ class TestDpdkVnfSetupEnvHelper(unittest.TestCase): pkt_type = {'pkt_type': '1'} expected = "pkt_type = ipv6" - result = DpdkVnfSetupEnvHelper._update_packet_type(ip_pipeline_cfg, pkt_type) + result = self.dpdk_setup_helper._update_packet_type( + ip_pipeline_cfg, pkt_type) self.assertEqual(result, expected) def test__update_packet_type_multi_op(self): ip_pipeline_cfg = 'pkt_type = ipv4\npkt_type = 1\npkt_type = ipv4' pkt_type = {'pkt_type': '1'} - expected = 'pkt_type = 1\npkt_type = 1\npkt_type = 1' - result = DpdkVnfSetupEnvHelper._update_packet_type(ip_pipeline_cfg, pkt_type) + + result = self.dpdk_setup_helper._update_packet_type( + ip_pipeline_cfg, pkt_type) self.assertEqual(result, expected) def test__update_traffic_type(self): ip_pipeline_cfg = 'pkt_type = ipv4' - - traffic_options = {"vnf_type": DpdkVnfSetupEnvHelper.APP_NAME, 'traffic_type': 4} + traffic_options = { + "vnf_type": sample_vnf.DpdkVnfSetupEnvHelper.APP_NAME, + "traffic_type": 4} expected = "pkt_type = ipv4" - result = DpdkVnfSetupEnvHelper._update_traffic_type(ip_pipeline_cfg, traffic_options) + + result = self.dpdk_setup_helper._update_traffic_type( + ip_pipeline_cfg, traffic_options) self.assertEqual(result, expected) def test__update_traffic_type_ipv6(self): ip_pipeline_cfg = 'pkt_type = ipv4' - - traffic_options = {"vnf_type": DpdkVnfSetupEnvHelper.APP_NAME, 'traffic_type': 6} + traffic_options = { + "vnf_type": sample_vnf.DpdkVnfSetupEnvHelper.APP_NAME, + "traffic_type": 6} expected = "pkt_type = ipv6" - result = DpdkVnfSetupEnvHelper._update_traffic_type(ip_pipeline_cfg, traffic_options) + + result = self.dpdk_setup_helper._update_traffic_type( + ip_pipeline_cfg, traffic_options) self.assertEqual(result, expected) def test__update_traffic_type_not_app_name(self): ip_pipeline_cfg = 'traffic_type = 4' - - vnf_type = ''.join(["Not", DpdkVnfSetupEnvHelper.APP_NAME]) + vnf_type = ''.join(["Not", sample_vnf.DpdkVnfSetupEnvHelper.APP_NAME]) traffic_options = {"vnf_type": vnf_type, 'traffic_type': 8} expected = "traffic_type = 8" - result = DpdkVnfSetupEnvHelper._update_traffic_type(ip_pipeline_cfg, traffic_options) + + result = self.dpdk_setup_helper._update_traffic_type( + ip_pipeline_cfg, traffic_options) self.assertEqual(result, expected) - @mock.patch.object(six, 'BytesIO', return_value=six.BytesIO(b'100\n')) - @mock.patch.object(utils, 'read_meminfo', - return_value={'Hugepagesize': '2048'}) - def test__setup_hugepages_no_hugepages_defined(self, mock_meminfo, *args): - ssh_helper = mock.Mock() - scenario_helper = mock.Mock() - scenario_helper.all_options = {} - dpdk_setup_helper = DpdkVnfSetupEnvHelper( - mock.ANY, ssh_helper, scenario_helper) - with mock.patch.object(sample_vnf.LOG, 'info') as mock_info: - dpdk_setup_helper._setup_hugepages() - mock_info.assert_called_once_with( - 'Hugepages size (kB): %s, number claimed: %s, number set: ' - '%s', 2048, 8192, 100) - mock_meminfo.assert_called_once_with(ssh_helper) - - @mock.patch.object(six, 'BytesIO', return_value=six.BytesIO(b'100\n')) - @mock.patch.object(utils, 'read_meminfo', - return_value={'Hugepagesize': '1048576'}) - def test__setup_hugepages_8gb_hugepages_defined(self, mock_meminfo, *args): - ssh_helper = mock.Mock() - scenario_helper = mock.Mock() - scenario_helper.all_options = {'hugepages_gb': 8} - dpdk_setup_helper = DpdkVnfSetupEnvHelper( - mock.ANY, ssh_helper, scenario_helper) - with mock.patch.object(sample_vnf.LOG, 'info') as mock_info: - dpdk_setup_helper._setup_hugepages() - mock_info.assert_called_once_with( - 'Hugepages size (kB): %s, number claimed: %s, number set: ' - '%s', 1048576, 8, 100) - mock_meminfo.assert_called_once_with(ssh_helper) - - @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.open') + @mock.patch.object(six.moves.builtins, 'open') @mock.patch.object(utils, 'find_relative_file') - @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.MultiPortConfig') - @mock.patch.object(utils, 'open_relative_file') - def test_build_config(self, mock_open_rf, mock_multi_port_config_class, mock_find, *args): + @mock.patch.object(sample_vnf, 'MultiPortConfig') + def test_build_config(self, mock_multi_port_config_class, + mock_find, *args): mock_multi_port_config = mock_multi_port_config_class() - vnfd_helper = VnfdHelper(self.VNFD_0) - ssh_helper = mock.Mock() - scenario_helper = mock.Mock() - scenario_helper.vnf_cfg = {} - scenario_helper.options = {} - scenario_helper.all_options = {} - dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper) + self.scenario_helper.vnf_cfg = {} + self.scenario_helper.options = {} + self.scenario_helper.all_options = {} - dpdk_setup_helper.PIPELINE_COMMAND = expected = 'pipeline command' - result = dpdk_setup_helper.build_config() + self.dpdk_setup_helper.PIPELINE_COMMAND = expected = 'pipeline command' + result = self.dpdk_setup_helper.build_config() self.assertEqual(result, expected) - self.assertGreaterEqual(ssh_helper.upload_config_file.call_count, 2) + self.assertGreaterEqual(self.ssh_helper.upload_config_file.call_count, 2) mock_find.assert_called() mock_multi_port_config.generate_config.assert_called() mock_multi_port_config.generate_script.assert_called() - scenario_helper.options = {'rules': 'fake_file'} - scenario_helper.vnf_cfg = {'file': 'fake_file'} - dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper) + @mock.patch.object(six.moves.builtins, 'open') + @mock.patch.object(utils, 'find_relative_file') + @mock.patch.object(sample_vnf, 'MultiPortConfig') + @mock.patch.object(utils, 'open_relative_file') + def test_build_config2(self, mock_open_rf, mock_multi_port_config_class, + mock_find, *args): + mock_multi_port_config = mock_multi_port_config_class() + self.scenario_helper.options = {'rules': 'fake_file'} + self.scenario_helper.vnf_cfg = {'file': 'fake_file'} + self.scenario_helper.all_options = {} mock_open_rf.side_effect = mock.mock_open(read_data='fake_data') - dpdk_setup_helper.PIPELINE_COMMAND = expected = 'pipeline command' + self.dpdk_setup_helper.PIPELINE_COMMAND = expected = 'pipeline command' - result = dpdk_setup_helper.build_config() + result = self.dpdk_setup_helper.build_config() mock_open_rf.assert_called() self.assertEqual(result, expected) - self.assertGreaterEqual(ssh_helper.upload_config_file.call_count, 2) + self.assertGreaterEqual(self.ssh_helper.upload_config_file.call_count, 2) mock_find.assert_called() mock_multi_port_config.generate_config.assert_called() mock_multi_port_config.generate_script.assert_called() def test__build_pipeline_kwargs(self): - vnfd_helper = VnfdHelper(self.VNFD_0) - ssh_helper = mock.Mock() - ssh_helper.provision_tool.return_value = 'tool_path' - scenario_helper = mock.Mock() - dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper) - dpdk_setup_helper.CFG_CONFIG = 'config' - dpdk_setup_helper.CFG_SCRIPT = 'script' - dpdk_setup_helper.pipeline_kwargs = {} - dpdk_setup_helper.all_ports = [0, 1, 2] - dpdk_setup_helper.scenario_helper.vnf_cfg = {'lb_config': 'HW', - 'worker_threads': 1} + self.ssh_helper.provision_tool.return_value = 'tool_path' + self.dpdk_setup_helper.CFG_CONFIG = 'config' + self.dpdk_setup_helper.CFG_SCRIPT = 'script' + self.dpdk_setup_helper.pipeline_kwargs = {} + self.dpdk_setup_helper.all_ports = [0, 1, 2] + self.dpdk_setup_helper.scenario_helper.vnf_cfg = {'lb_config': 'HW', + 'worker_threads': 1} expected = { 'cfg_file': 'config', @@ -610,12 +383,14 @@ class TestDpdkVnfSetupEnvHelper(unittest.TestCase): 'tool_path': 'tool_path', 'hwlb': ' --hwlb 1', } - dpdk_setup_helper._build_pipeline_kwargs() - self.assertDictEqual(dpdk_setup_helper.pipeline_kwargs, expected) + self.dpdk_setup_helper._build_pipeline_kwargs() + self.assertDictEqual(self.dpdk_setup_helper.pipeline_kwargs, expected) - @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.time') - @mock.patch('yardstick.ssh.SSH') + @mock.patch.object(time, 'sleep') + @mock.patch.object(ssh, 'SSH') def test_setup_vnf_environment(self, *args): + self.scenario_helper.nodes = [None, None] + def execute(cmd): if cmd.startswith('which '): return exec_failure @@ -623,102 +398,82 @@ class TestDpdkVnfSetupEnvHelper(unittest.TestCase): exec_success = (0, 'good output', '') exec_failure = (1, 'bad output', 'error output') + self.ssh_helper.execute = execute - vnfd_helper = VnfdHelper(self.VNFD_0) - ssh_helper = mock.Mock() - ssh_helper.execute = execute + self.dpdk_setup_helper._validate_cpu_cfg = mock.Mock(return_value=[]) - scenario_helper = mock.Mock() - scenario_helper.nodes = [None, None] - dpdk_vnf_setup_env_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper) - dpdk_vnf_setup_env_helper._validate_cpu_cfg = mock.Mock(return_value=[]) - - with mock.patch.object(dpdk_vnf_setup_env_helper, '_setup_dpdk'): + with mock.patch.object(self.dpdk_setup_helper, '_setup_dpdk'): self.assertIsInstance( - dpdk_vnf_setup_env_helper.setup_vnf_environment(), - ResourceProfile) - - def test__setup_dpdk(self): - ssh_helper = mock.Mock() - ssh_helper.execute = mock.Mock() - ssh_helper.execute.return_value = (0, 0, 0) - dpdk_setup_helper = DpdkVnfSetupEnvHelper(mock.ANY, ssh_helper, mock.ANY) - with mock.patch.object(dpdk_setup_helper, '_setup_hugepages') as \ - mock_setup_hp: - dpdk_setup_helper._setup_dpdk() - mock_setup_hp.assert_called_once() - ssh_helper.execute.assert_has_calls([ + self.dpdk_setup_helper.setup_vnf_environment(), + resource.ResourceProfile) + + @mock.patch.object(utils, 'setup_hugepages') + def test__setup_dpdk(self, mock_setup_hugepages): + self.ssh_helper.execute = mock.Mock() + self.ssh_helper.execute.return_value = (0, 0, 0) + self.scenario_helper.all_options = {'hugepages_gb': 8} + self.dpdk_setup_helper._setup_dpdk() + mock_setup_hugepages.assert_called_once_with( + self.ssh_helper, 8*1024*1024) + self.ssh_helper.execute.assert_has_calls([ mock.call('sudo modprobe uio && sudo modprobe igb_uio'), mock.call('lsmod | grep -i igb_uio') ]) - @mock.patch('yardstick.ssh.SSH') + @mock.patch.object(ssh, 'SSH') def test__setup_resources(self, _): - vnfd_helper = VnfdHelper(deepcopy(self.VNFD_0)) - ssh_helper = mock.Mock() - scenario_helper = mock.Mock() - dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper) - dpdk_setup_helper._validate_cpu_cfg = mock.Mock() - - dpdk_setup_helper.bound_pci = [v['virtual-interface']["vpci"] for v in - vnfd_helper.interfaces] - result = dpdk_setup_helper._setup_resources() - self.assertIsInstance(result, ResourceProfile) - self.assertEqual(dpdk_setup_helper.socket, 0) - - @mock.patch('yardstick.ssh.SSH') + self.dpdk_setup_helper._validate_cpu_cfg = mock.Mock() + self.dpdk_setup_helper.bound_pci = [v['virtual-interface']["vpci"] for v in + self.vnfd_helper.interfaces] + result = self.dpdk_setup_helper._setup_resources() + self.assertIsInstance(result, resource.ResourceProfile) + self.assertEqual(self.dpdk_setup_helper.socket, 0) + + @mock.patch.object(ssh, 'SSH') def test__setup_resources_socket_1(self, _): - vnfd_helper = VnfdHelper(deepcopy(self.VNFD_0)) - vnfd_helper.interfaces[0]['virtual-interface']['vpci'] = '0000:55:00.0' - vnfd_helper.interfaces[1]['virtual-interface']['vpci'] = '0000:35:00.0' - ssh_helper = mock.Mock() - scenario_helper = mock.Mock() - dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper) - dpdk_setup_helper._validate_cpu_cfg = mock.Mock() - - dpdk_setup_helper.bound_pci = [v['virtual-interface']["vpci"] for v in - vnfd_helper.interfaces] - result = dpdk_setup_helper._setup_resources() - self.assertIsInstance(result, ResourceProfile) - self.assertEqual(dpdk_setup_helper.socket, 1) - - @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.time') + self.vnfd_helper.interfaces[0]['virtual-interface']['vpci'] = \ + '0000:55:00.0' + self.vnfd_helper.interfaces[1]['virtual-interface']['vpci'] = \ + '0000:35:00.0' + + self.dpdk_setup_helper._validate_cpu_cfg = mock.Mock() + self.dpdk_setup_helper.bound_pci = [v['virtual-interface']["vpci"] for v in + self.vnfd_helper.interfaces] + result = self.dpdk_setup_helper._setup_resources() + self.assertIsInstance(result, resource.ResourceProfile) + self.assertEqual(self.dpdk_setup_helper.socket, 1) + + @mock.patch.object(time, 'sleep') def test__detect_and_bind_drivers(self, *args): - vnfd_helper = VnfdHelper(deepcopy(self.VNFD_0)) - ssh_helper = mock.Mock() - # ssh_helper.execute = mock.Mock(return_value = (0, 'text', '')) - # ssh_helper.execute.return_value = 0, 'output', '' - scenario_helper = mock.Mock() - scenario_helper.nodes = [None, None] + self.scenario_helper.nodes = [None, None] rv = ['0000:05:00.1', '0000:05:00.0'] - dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper) - dpdk_setup_helper.dpdk_bind_helper._get_bound_pci_addresses = mock.Mock(return_value=rv) - dpdk_setup_helper.dpdk_bind_helper.bind = mock.Mock() - dpdk_setup_helper.dpdk_bind_helper.read_status = mock.Mock() + self.dpdk_setup_helper.dpdk_bind_helper._get_bound_pci_addresses = \ + mock.Mock(return_value=rv) + self.dpdk_setup_helper.dpdk_bind_helper.bind = mock.Mock() + self.dpdk_setup_helper.dpdk_bind_helper.read_status = mock.Mock() - self.assertIsNone(dpdk_setup_helper._detect_and_bind_drivers()) + self.assertIsNone(self.dpdk_setup_helper._detect_and_bind_drivers()) - intf_0 = vnfd_helper.vdu[0]['external-interface'][0]['virtual-interface'] - intf_1 = vnfd_helper.vdu[0]['external-interface'][1]['virtual-interface'] + intf_0 = self.vnfd_helper.vdu[0]['external-interface'][0]['virtual-interface'] + intf_1 = self.vnfd_helper.vdu[0]['external-interface'][1]['virtual-interface'] self.assertEqual(0, intf_0['dpdk_port_num']) self.assertEqual(1, intf_1['dpdk_port_num']) def test_tear_down(self): - vnfd_helper = VnfdHelper(self.VNFD_0) - ssh_helper = mock.Mock() - scenario_helper = mock.Mock() - scenario_helper.nodes = [None, None] - dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper) - dpdk_setup_helper.dpdk_bind_helper.bind = mock.Mock() - dpdk_setup_helper.dpdk_bind_helper.used_drivers = { + self.scenario_helper.nodes = [None, None] + + self.dpdk_setup_helper.dpdk_bind_helper.bind = mock.Mock() + self.dpdk_setup_helper.dpdk_bind_helper.used_drivers = { 'd1': ['0000:05:00.0'], 'd3': ['0000:05:01.0'], } - self.assertIsNone(dpdk_setup_helper.tear_down()) - dpdk_setup_helper.dpdk_bind_helper.bind.assert_any_call(['0000:05:00.0'], 'd1', True) - dpdk_setup_helper.dpdk_bind_helper.bind.assert_any_call(['0000:05:01.0'], 'd3', True) + self.assertIsNone(self.dpdk_setup_helper.tear_down()) + self.dpdk_setup_helper.dpdk_bind_helper.bind.assert_any_call( + ['0000:05:00.0'], 'd1', True) + self.dpdk_setup_helper.dpdk_bind_helper.bind.assert_any_call( + ['0000:05:01.0'], 'd3', True) class TestResourceHelper(unittest.TestCase): @@ -822,46 +577,33 @@ class TestResourceHelper(unittest.TestCase): 'id': 'VpeApproxVnf', 'name': 'VPEVnfSsh' } + def setUp(self): + self.vnfd_helper = base.VnfdHelper(self.VNFD_0) + self.dpdk_setup_helper = sample_vnf.DpdkVnfSetupEnvHelper( + self.vnfd_helper, mock.Mock(), mock.Mock()) + self.resource_helper = sample_vnf.ResourceHelper(self.dpdk_setup_helper) + def test_setup(self): resource = object() - vnfd_helper = VnfdHelper(self.VNFD_0) - ssh_helper = mock.Mock() - scenario_helper = mock.Mock() - dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper) - dpdk_setup_helper.setup_vnf_environment = mock.Mock(return_value=resource) - resource_helper = ResourceHelper(dpdk_setup_helper) + self.dpdk_setup_helper.setup_vnf_environment = ( + mock.Mock(return_value=resource)) + resource_helper = sample_vnf.ResourceHelper(self.dpdk_setup_helper) self.assertIsNone(resource_helper.setup()) self.assertIs(resource_helper.resource, resource) def test_generate_cfg(self): - vnfd_helper = VnfdHelper(self.VNFD_0) - ssh_helper = mock.Mock() - scenario_helper = mock.Mock() - dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper) - resource_helper = ResourceHelper(dpdk_setup_helper) - - self.assertIsNone(resource_helper.generate_cfg()) + self.assertIsNone(self.resource_helper.generate_cfg()) def test_stop_collect(self): - vnfd_helper = VnfdHelper(self.VNFD_0) - ssh_helper = mock.Mock() - scenario_helper = mock.Mock() - dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper) - resource_helper = ResourceHelper(dpdk_setup_helper) - resource_helper.resource = mock.Mock() + self.resource_helper.resource = mock.Mock() - self.assertIsNone(resource_helper.stop_collect()) + self.assertIsNone(self.resource_helper.stop_collect()) def test_stop_collect_none(self): - vnfd_helper = VnfdHelper(self.VNFD_0) - ssh_helper = mock.Mock() - scenario_helper = mock.Mock() - dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper) - resource_helper = ResourceHelper(dpdk_setup_helper) - resource_helper.resource = None + self.resource_helper.resource = None - self.assertIsNone(resource_helper.stop_collect()) + self.assertIsNone(self.resource_helper.stop_collect()) class TestClientResourceHelper(unittest.TestCase): @@ -993,154 +735,75 @@ class TestClientResourceHelper(unittest.TestCase): }, } - @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.LOG') - @mock.patch.object(sample_vnf, 'STLError', new_callable=lambda: MockError) - def test_get_stats_not_connected(self, mock_stl_error, *args): - vnfd_helper = VnfdHelper(self.VNFD_0) + def setUp(self): + vnfd_helper = base.VnfdHelper(self.VNFD_0) ssh_helper = mock.Mock() scenario_helper = mock.Mock() - dpdk_setup_helper = DpdkVnfSetupEnvHelper( + dpdk_setup_helper = sample_vnf.DpdkVnfSetupEnvHelper( vnfd_helper, ssh_helper, scenario_helper) - client_resource_helper = ClientResourceHelper(dpdk_setup_helper) - client_resource_helper.client = mock.Mock() - client_resource_helper.client.get_stats.side_effect = mock_stl_error + self.client_resource_helper = ( + sample_vnf.ClientResourceHelper(dpdk_setup_helper)) - self.assertEqual(client_resource_helper.get_stats(), {}) - client_resource_helper.client.get_stats.assert_called_once() + @mock.patch.object(sample_vnf, 'LOG') + @mock.patch.object(sample_vnf, 'STLError', new_callable=lambda: MockError) + def test_get_stats_not_connected(self, mock_stl_error, *args): + self.client_resource_helper.client = mock.Mock() + self.client_resource_helper.client.get_stats.side_effect = \ + mock_stl_error + + self.assertEqual(self.client_resource_helper.get_stats(), {}) + self.client_resource_helper.client.get_stats.assert_called_once() def test_clear_stats(self): - vnfd_helper = VnfdHelper(self.VNFD_0) - ssh_helper = mock.Mock() - scenario_helper = mock.Mock() - dpdk_setup_helper = DpdkVnfSetupEnvHelper( - vnfd_helper, ssh_helper, scenario_helper) - client_resource_helper = ClientResourceHelper(dpdk_setup_helper) - client_resource_helper.client = mock.Mock() + self.client_resource_helper.client = mock.Mock() - self.assertIsNone(client_resource_helper.clear_stats()) + self.assertIsNone(self.client_resource_helper.clear_stats()) self.assertEqual( - client_resource_helper.client.clear_stats.call_count, 1) + self.client_resource_helper.client.clear_stats.call_count, 1) def test_clear_stats_of_ports(self): - vnfd_helper = VnfdHelper(self.VNFD_0) - ssh_helper = mock.Mock() - scenario_helper = mock.Mock() - dpdk_setup_helper = DpdkVnfSetupEnvHelper( - vnfd_helper, ssh_helper, scenario_helper) - client_resource_helper = ClientResourceHelper(dpdk_setup_helper) - client_resource_helper.client = mock.Mock() + self.client_resource_helper.client = mock.Mock() - self.assertIsNone(client_resource_helper.clear_stats([3, 4])) - self.assertEqual( - client_resource_helper.client.clear_stats.call_count, 1) + self.assertIsNone(self.client_resource_helper.clear_stats([3, 4])) + self.client_resource_helper.client.clear_stats.assert_called_once() def test_start(self): - vnfd_helper = VnfdHelper(self.VNFD_0) - ssh_helper = mock.Mock() - scenario_helper = mock.Mock() - dpdk_setup_helper = DpdkVnfSetupEnvHelper( - vnfd_helper, ssh_helper, scenario_helper) - client_resource_helper = ClientResourceHelper(dpdk_setup_helper) - client_resource_helper.client = mock.Mock() + self.client_resource_helper.client = mock.Mock() - self.assertIsNone(client_resource_helper.start()) - client_resource_helper.client.start.assert_called_once() + self.assertIsNone(self.client_resource_helper.start()) + self.client_resource_helper.client.start.assert_called_once() def test_start_ports(self): - vnfd_helper = VnfdHelper(self.VNFD_0) - ssh_helper = mock.Mock() - scenario_helper = mock.Mock() - dpdk_setup_helper = DpdkVnfSetupEnvHelper( - vnfd_helper, ssh_helper, scenario_helper) - client_resource_helper = ClientResourceHelper(dpdk_setup_helper) - client_resource_helper.client = mock.Mock() + self.client_resource_helper.client = mock.Mock() - self.assertIsNone(client_resource_helper.start([3, 4])) - client_resource_helper.client.start.assert_called_once() + self.assertIsNone(self.client_resource_helper.start([3, 4])) + self.client_resource_helper.client.start.assert_called_once() def test_collect_kpi_with_queue(self): - vnfd_helper = VnfdHelper(self.VNFD_0) - ssh_helper = mock.Mock() - scenario_helper = mock.Mock() - dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper) - client_resource_helper = ClientResourceHelper(dpdk_setup_helper) - client_resource_helper._result = {'existing': 43, 'replaceable': 12} - client_resource_helper._queue = mock.Mock() - client_resource_helper._queue.empty.return_value = False - client_resource_helper._queue.get.return_value = {'incoming': 34, 'replaceable': 99} + self.client_resource_helper._result = { + 'existing': 43, + 'replaceable': 12} + self.client_resource_helper._queue = mock.Mock() + self.client_resource_helper._queue.empty.return_value = False + self.client_resource_helper._queue.get.return_value = { + 'incoming': 34, + 'replaceable': 99} expected = { 'existing': 43, 'incoming': 34, 'replaceable': 99, } - result = client_resource_helper.collect_kpi() - self.assertDictEqual(result, expected) + result = self.client_resource_helper.collect_kpi() + self.assertEqual(result, expected) - @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.time') + @mock.patch.object(time, 'sleep') @mock.patch.object(sample_vnf, 'STLError') def test__connect_with_failures(self, mock_stl_error, *args): - vnfd_helper = VnfdHelper(self.VNFD_0) - ssh_helper = mock.Mock() - scenario_helper = mock.Mock() - dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper) - client_resource_helper = ClientResourceHelper(dpdk_setup_helper) client = mock.MagicMock() client.connect.side_effect = mock_stl_error(msg='msg') - self.assertIs(client_resource_helper._connect(client), client) - - @mock.patch.object(ClientResourceHelper, '_build_ports') - @mock.patch.object(ClientResourceHelper, '_run_traffic_once', - return_value=(True, mock.ANY)) - def test_run_traffic(self, mock_run_traffic_once, mock_build_ports): - client_resource_helper = ClientResourceHelper(mock.Mock()) - client = mock.Mock() - traffic_profile = mock.Mock() - mq_producer = mock.Mock() - with mock.patch.object(client_resource_helper, '_connect') \ - as mock_connect, \ - mock.patch.object(client_resource_helper, '_terminated') \ - as mock_terminated: - mock_connect.return_value = client - type(mock_terminated).value = mock.PropertyMock( - side_effect=[0, 1, 1, lambda x: x]) - client_resource_helper.run_traffic(traffic_profile, mq_producer) - - mock_build_ports.assert_called_once() - traffic_profile.register_generator.assert_called_once() - mq_producer.tg_method_started.assert_called_once() - mq_producer.tg_method_finished.assert_called_once() - mq_producer.tg_method_iteration.assert_called_once_with(1) - mock_run_traffic_once.assert_called_once_with(traffic_profile) - - @mock.patch.object(ClientResourceHelper, '_build_ports') - @mock.patch.object(ClientResourceHelper, '_run_traffic_once', - side_effect=Exception) - def test_run_traffic_exception(self, mock_run_traffic_once, - mock_build_ports): - client_resource_helper = ClientResourceHelper(mock.Mock()) - client = mock.Mock() - traffic_profile = mock.Mock() - mq_producer = mock.Mock() - with mock.patch.object(client_resource_helper, '_connect') \ - as mock_connect, \ - mock.patch.object(client_resource_helper, '_terminated') \ - as mock_terminated: - mock_connect.return_value = client - type(mock_terminated).value = mock.PropertyMock(return_value=0) - mq_producer.reset_mock() - # NOTE(ralonsoh): "trex_stl_exceptions.STLError" is mocked - with self.assertRaises(Exception): - client_resource_helper.run_traffic(traffic_profile, - mq_producer) - - mock_build_ports.assert_called_once() - traffic_profile.register_generator.assert_called_once() - mock_run_traffic_once.assert_called_once_with(traffic_profile) - mq_producer.tg_method_started.assert_called_once() - mq_producer.tg_method_finished.assert_not_called() - mq_producer.tg_method_iteration.assert_not_called() + self.assertIs(self.client_resource_helper._connect(client), client) class TestRfc2544ResourceHelper(unittest.TestCase): @@ -1187,183 +850,170 @@ class TestRfc2544ResourceHelper(unittest.TestCase): } } - def test_property_rfc2544(self): - scenario_helper = ScenarioHelper('name1') - scenario_helper.scenario_cfg = self.SCENARIO_CFG_1 - rfc2544_resource_helper = Rfc2544ResourceHelper(scenario_helper) + def setUp(self): + self.scenario_helper = sample_vnf.ScenarioHelper('name1') + self.rfc2544_resource_helper = \ + sample_vnf.Rfc2544ResourceHelper(self.scenario_helper) - self.assertIsNone(rfc2544_resource_helper._rfc2544) - self.assertDictEqual(rfc2544_resource_helper.rfc2544, self.RFC2544_CFG_1) - self.assertDictEqual(rfc2544_resource_helper._rfc2544, self.RFC2544_CFG_1) - scenario_helper.scenario_cfg = {} # ensure that resource_helper caches - self.assertDictEqual(rfc2544_resource_helper.rfc2544, self.RFC2544_CFG_1) + def test_property_rfc2544(self): + self.scenario_helper.scenario_cfg = self.SCENARIO_CFG_1 + + self.assertIsNone(self.rfc2544_resource_helper._rfc2544) + self.assertEqual(self.rfc2544_resource_helper.rfc2544, + self.RFC2544_CFG_1) + self.assertEqual(self.rfc2544_resource_helper._rfc2544, + self.RFC2544_CFG_1) + # ensure that resource_helper caches + self.scenario_helper.scenario_cfg = {} + self.assertEqual(self.rfc2544_resource_helper.rfc2544, + self.RFC2544_CFG_1) def test_property_tolerance_high(self): - scenario_helper = ScenarioHelper('name1') - scenario_helper.scenario_cfg = self.SCENARIO_CFG_1 - rfc2544_resource_helper = Rfc2544ResourceHelper(scenario_helper) + self.scenario_helper.scenario_cfg = self.SCENARIO_CFG_1 - self.assertIsNone(rfc2544_resource_helper._tolerance_high) - self.assertEqual(rfc2544_resource_helper.tolerance_high, 0.15) - self.assertEqual(rfc2544_resource_helper._tolerance_high, 0.15) - scenario_helper.scenario_cfg = {} # ensure that resource_helper caches - self.assertEqual(rfc2544_resource_helper.tolerance_high, 0.15) + self.assertIsNone(self.rfc2544_resource_helper._tolerance_high) + self.assertEqual(self.rfc2544_resource_helper.tolerance_high, 0.15) + self.assertEqual(self.rfc2544_resource_helper._tolerance_high, 0.15) + self.assertEqual(self.rfc2544_resource_helper._tolerance_precision, 2) + # ensure that resource_helper caches + self.scenario_helper.scenario_cfg = {} + self.assertEqual(self.rfc2544_resource_helper.tolerance_high, 0.15) def test_property_tolerance_low(self): - scenario_helper = ScenarioHelper('name1') - scenario_helper.scenario_cfg = self.SCENARIO_CFG_1 - rfc2544_resource_helper = Rfc2544ResourceHelper(scenario_helper) + self.scenario_helper.scenario_cfg = self.SCENARIO_CFG_1 - self.assertIsNone(rfc2544_resource_helper._tolerance_low) - self.assertEqual(rfc2544_resource_helper.tolerance_low, 0.1) - self.assertEqual(rfc2544_resource_helper._tolerance_low, 0.1) - scenario_helper.scenario_cfg = {} # ensure that resource_helper caches - self.assertEqual(rfc2544_resource_helper.tolerance_low, 0.1) + self.assertIsNone(self.rfc2544_resource_helper._tolerance_low) + self.assertEqual(self.rfc2544_resource_helper.tolerance_low, 0.1) + self.assertEqual(self.rfc2544_resource_helper._tolerance_low, 0.1) + # ensure that resource_helper caches + self.scenario_helper.scenario_cfg = {} + self.assertEqual(self.rfc2544_resource_helper.tolerance_low, 0.1) def test_property_tolerance_high_range_swap(self): - scenario_helper = ScenarioHelper('name1') - scenario_helper.scenario_cfg = self.SCENARIO_CFG_2 - rfc2544_resource_helper = Rfc2544ResourceHelper(scenario_helper) + self.scenario_helper.scenario_cfg = self.SCENARIO_CFG_2 - self.assertEqual(rfc2544_resource_helper.tolerance_high, 0.25) + self.assertEqual(self.rfc2544_resource_helper.tolerance_high, 0.25) def test_property_tolerance_low_range_swap(self): - scenario_helper = ScenarioHelper('name1') - scenario_helper.scenario_cfg = self.SCENARIO_CFG_2 - rfc2544_resource_helper = Rfc2544ResourceHelper(scenario_helper) + self.scenario_helper.scenario_cfg = self.SCENARIO_CFG_2 - self.assertEqual(rfc2544_resource_helper.tolerance_low, 0.05) + self.assertEqual(self.rfc2544_resource_helper.tolerance_low, 0.05) def test_property_tolerance_high_not_range(self): - scenario_helper = ScenarioHelper('name1') - scenario_helper.scenario_cfg = self.SCENARIO_CFG_3 - rfc2544_resource_helper = Rfc2544ResourceHelper(scenario_helper) + self.scenario_helper.scenario_cfg = self.SCENARIO_CFG_3 - self.assertEqual(rfc2544_resource_helper.tolerance_high, 0.2) + self.assertEqual(self.rfc2544_resource_helper.tolerance_high, 0.2) + self.assertEqual(self.rfc2544_resource_helper._tolerance_precision, 1) def test_property_tolerance_low_not_range(self): - scenario_helper = ScenarioHelper('name1') - scenario_helper.scenario_cfg = self.SCENARIO_CFG_3 - rfc2544_resource_helper = Rfc2544ResourceHelper(scenario_helper) + self.scenario_helper.scenario_cfg = self.SCENARIO_CFG_3 - self.assertEqual(rfc2544_resource_helper.tolerance_low, 0.2) + self.assertEqual(self.rfc2544_resource_helper.tolerance_low, 0.2) def test_property_tolerance_high_default(self): - scenario_helper = ScenarioHelper('name1') - scenario_helper.scenario_cfg = self.SCENARIO_CFG_4 - rfc2544_resource_helper = Rfc2544ResourceHelper(scenario_helper) + self.scenario_helper.scenario_cfg = self.SCENARIO_CFG_4 - self.assertEqual(rfc2544_resource_helper.tolerance_high, 0.0001) + self.assertEqual(self.rfc2544_resource_helper.tolerance_high, 0.0001) def test_property_tolerance_low_default(self): - scenario_helper = ScenarioHelper('name1') - scenario_helper.scenario_cfg = self.SCENARIO_CFG_4 - rfc2544_resource_helper = Rfc2544ResourceHelper(scenario_helper) + self.scenario_helper.scenario_cfg = self.SCENARIO_CFG_4 - self.assertEqual(rfc2544_resource_helper.tolerance_low, 0.0001) + self.assertEqual(self.rfc2544_resource_helper.tolerance_low, 0.0001) def test_property_latency(self): - scenario_helper = ScenarioHelper('name1') - scenario_helper.scenario_cfg = self.SCENARIO_CFG_1 - rfc2544_resource_helper = Rfc2544ResourceHelper(scenario_helper) + self.scenario_helper.scenario_cfg = self.SCENARIO_CFG_1 - self.assertIsNone(rfc2544_resource_helper._latency) - self.assertTrue(rfc2544_resource_helper.latency) - self.assertTrue(rfc2544_resource_helper._latency) - scenario_helper.scenario_cfg = {} # ensure that resource_helper caches - self.assertTrue(rfc2544_resource_helper.latency) + self.assertIsNone(self.rfc2544_resource_helper._latency) + self.assertTrue(self.rfc2544_resource_helper.latency) + self.assertTrue(self.rfc2544_resource_helper._latency) + # ensure that resource_helper caches + self.scenario_helper.scenario_cfg = {} + self.assertTrue(self.rfc2544_resource_helper.latency) def test_property_latency_default(self): - scenario_helper = ScenarioHelper('name1') - scenario_helper.scenario_cfg = self.SCENARIO_CFG_2 - rfc2544_resource_helper = Rfc2544ResourceHelper(scenario_helper) + self.scenario_helper.scenario_cfg = self.SCENARIO_CFG_2 - self.assertFalse(rfc2544_resource_helper.latency) + self.assertFalse(self.rfc2544_resource_helper.latency) def test_property_correlated_traffic(self): - scenario_helper = ScenarioHelper('name1') - scenario_helper.scenario_cfg = self.SCENARIO_CFG_1 - rfc2544_resource_helper = Rfc2544ResourceHelper(scenario_helper) + self.scenario_helper.scenario_cfg = self.SCENARIO_CFG_1 - self.assertIsNone(rfc2544_resource_helper._correlated_traffic) - self.assertTrue(rfc2544_resource_helper.correlated_traffic) - self.assertTrue(rfc2544_resource_helper._correlated_traffic) - scenario_helper.scenario_cfg = {} # ensure that resource_helper caches - self.assertTrue(rfc2544_resource_helper.correlated_traffic) + self.assertIsNone(self.rfc2544_resource_helper._correlated_traffic) + self.assertTrue(self.rfc2544_resource_helper.correlated_traffic) + self.assertTrue(self.rfc2544_resource_helper._correlated_traffic) + # ensure that resource_helper caches + self.scenario_helper.scenario_cfg = {} + self.assertTrue(self.rfc2544_resource_helper.correlated_traffic) def test_property_correlated_traffic_default(self): - scenario_helper = ScenarioHelper('name1') - scenario_helper.scenario_cfg = self.SCENARIO_CFG_2 - rfc2544_resource_helper = Rfc2544ResourceHelper(scenario_helper) + self.scenario_helper.scenario_cfg = self.SCENARIO_CFG_2 - self.assertFalse(rfc2544_resource_helper.correlated_traffic) + self.assertFalse(self.rfc2544_resource_helper.correlated_traffic) class TestSampleVNFDeployHelper(unittest.TestCase): - @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.time') - @mock.patch('subprocess.check_output') - def test_deploy_vnfs_disabled(self, *_): - vnfd_helper = mock.Mock() - ssh_helper = mock.Mock() - ssh_helper.join_bin_path.return_value = 'joined_path' - ssh_helper.execute.return_value = 1, 'bad output', 'error output' - ssh_helper.put.return_value = None - sample_vnf_deploy_helper = SampleVNFDeployHelper(vnfd_helper, ssh_helper) - - self.assertIsNone(sample_vnf_deploy_helper.deploy_vnfs('name1')) - sample_vnf_deploy_helper.DISABLE_DEPLOY = True - self.assertEqual(ssh_helper.execute.call_count, 5) - ssh_helper.put.assert_called_once() - - @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.time') - @mock.patch('subprocess.check_output') - def test_deploy_vnfs(self, *args): - vnfd_helper = mock.Mock() - ssh_helper = mock.Mock() - ssh_helper.join_bin_path.return_value = 'joined_path' - ssh_helper.execute.return_value = 1, 'bad output', 'error output' - ssh_helper.put.return_value = None - sample_vnf_deploy_helper = SampleVNFDeployHelper(vnfd_helper, ssh_helper) - sample_vnf_deploy_helper.DISABLE_DEPLOY = False - - self.assertIsNone(sample_vnf_deploy_helper.deploy_vnfs('name1')) - self.assertEqual(ssh_helper.execute.call_count, 5) - ssh_helper.put.assert_called_once() - - @mock.patch('subprocess.check_output') - def test_deploy_vnfs_early_success(self, *args): - vnfd_helper = mock.Mock() - ssh_helper = mock.Mock() - ssh_helper.join_bin_path.return_value = 'joined_path' - ssh_helper.execute.return_value = 0, 'output', '' - ssh_helper.put.return_value = None - sample_vnf_deploy_helper = SampleVNFDeployHelper(vnfd_helper, ssh_helper) - sample_vnf_deploy_helper.DISABLE_DEPLOY = False + def setUp(self): + self._mock_time_sleep = mock.patch.object(time, 'sleep') + self.mock_time_sleep = self._mock_time_sleep.start() + self._mock_check_output = mock.patch.object(subprocess, 'check_output') + self.mock_check_output = self._mock_check_output.start() + self.addCleanup(self._stop_mocks) + + self.ssh_helper = mock.Mock() + self.sample_vnf_deploy_helper = sample_vnf.SampleVNFDeployHelper( + mock.Mock(), self.ssh_helper) + self.ssh_helper.join_bin_path.return_value = 'joined_path' + self.ssh_helper.put.return_value = None + + def _stop_mocks(self): + self._mock_time_sleep.stop() + self._mock_check_output.stop() + + def test_deploy_vnfs_disabled(self): + self.ssh_helper.execute.return_value = 1, 'bad output', 'error output' + + self.sample_vnf_deploy_helper.deploy_vnfs('name1') + self.sample_vnf_deploy_helper.DISABLE_DEPLOY = True + self.assertEqual(self.ssh_helper.execute.call_count, 5) + self.ssh_helper.put.assert_called_once() + + def test_deploy_vnfs(self): + self.ssh_helper.execute.return_value = 1, 'bad output', 'error output' + self.sample_vnf_deploy_helper.DISABLE_DEPLOY = False - self.assertIsNone(sample_vnf_deploy_helper.deploy_vnfs('name1')) - ssh_helper.execute.assert_called_once() - ssh_helper.put.assert_not_called() + self.sample_vnf_deploy_helper.deploy_vnfs('name1') + self.assertEqual(self.ssh_helper.execute.call_count, 5) + self.ssh_helper.put.assert_called_once() + + def test_deploy_vnfs_early_success(self): + self.ssh_helper.execute.return_value = 0, 'output', '' + self.sample_vnf_deploy_helper.DISABLE_DEPLOY = False + + self.sample_vnf_deploy_helper.deploy_vnfs('name1') + self.ssh_helper.execute.assert_called_once() + self.ssh_helper.put.assert_not_called() class TestScenarioHelper(unittest.TestCase): + def setUp(self): + self.scenario_helper = sample_vnf.ScenarioHelper('name1') + def test_property_task_path(self): - scenario_helper = ScenarioHelper('name1') - scenario_helper.scenario_cfg = { + self.scenario_helper.scenario_cfg = { 'task_path': 'my_path', } - self.assertEqual(scenario_helper.task_path, 'my_path') + self.assertEqual(self.scenario_helper.task_path, 'my_path') def test_property_nodes(self): nodes = ['node1', 'node2'] - scenario_helper = ScenarioHelper('name1') - scenario_helper.scenario_cfg = { + self.scenario_helper.scenario_cfg = { 'nodes': nodes, } - self.assertEqual(scenario_helper.nodes, nodes) + self.assertEqual(self.scenario_helper.nodes, nodes) def test_property_all_options(self): data = { @@ -1372,30 +1022,27 @@ class TestScenarioHelper(unittest.TestCase): }, 'name2': {} } - scenario_helper = ScenarioHelper('name1') - scenario_helper.scenario_cfg = { + self.scenario_helper.scenario_cfg = { 'options': data, } - self.assertDictEqual(scenario_helper.all_options, data) + self.assertDictEqual(self.scenario_helper.all_options, data) def test_property_options(self): data = { 'key1': 'value1', 'key2': 'value2', } - scenario_helper = ScenarioHelper('name1') - scenario_helper.scenario_cfg = { + self.scenario_helper.scenario_cfg = { 'options': { 'name1': data, }, } - self.assertDictEqual(scenario_helper.options, data) + self.assertDictEqual(self.scenario_helper.options, data) def test_property_vnf_cfg(self): - scenario_helper = ScenarioHelper('name1') - scenario_helper.scenario_cfg = { + self.scenario_helper.scenario_cfg = { 'options': { 'name1': { 'vnf_config': 'my_config', @@ -1403,25 +1050,24 @@ class TestScenarioHelper(unittest.TestCase): }, } - self.assertEqual(scenario_helper.vnf_cfg, 'my_config') + self.assertEqual(self.scenario_helper.vnf_cfg, 'my_config') def test_property_vnf_cfg_default(self): - scenario_helper = ScenarioHelper('name1') - scenario_helper.scenario_cfg = { + self.scenario_helper.scenario_cfg = { 'options': { 'name1': {}, }, } - self.assertDictEqual(scenario_helper.vnf_cfg, ScenarioHelper.DEFAULT_VNF_CFG) + self.assertEqual(self.scenario_helper.vnf_cfg, + sample_vnf.ScenarioHelper.DEFAULT_VNF_CFG) def test_property_topology(self): - scenario_helper = ScenarioHelper('name1') - scenario_helper.scenario_cfg = { + self.scenario_helper.scenario_cfg = { 'topology': 'my_topology', } - self.assertEqual(scenario_helper.topology, 'my_topology') + self.assertEqual(self.scenario_helper.topology, 'my_topology') class TestSampleVnf(unittest.TestCase): @@ -1542,76 +1188,73 @@ class TestSampleVnf(unittest.TestCase): "frame_size": 64, }, } + def setUp(self): + vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] + self.vnf = sample_vnf.SampleVNF('vnf1', vnfd) + self.vnf.APP_NAME = 'sample1' def test___init__(self): - sample_vnf = SampleVNF('vnf1', self.VNFD_0, 'task_id') + vnf = sample_vnf.SampleVNF('vnf1', self.VNFD_0) - self.assertEqual(sample_vnf.name, 'vnf1') - self.assertDictEqual(sample_vnf.vnfd_helper, self.VNFD_0) + self.assertEqual(vnf.name, 'vnf1') + self.assertDictEqual(vnf.vnfd_helper, self.VNFD_0) # test the default setup helper is SetupEnvHelper, not subclass - self.assertEqual(type(sample_vnf.setup_helper), SetupEnvHelper) + self.assertEqual(type(vnf.setup_helper), + sample_vnf.SetupEnvHelper) # test the default resource helper is ResourceHelper, not subclass - self.assertEqual(type(sample_vnf.resource_helper), ResourceHelper) + self.assertEqual(type(vnf.resource_helper), sample_vnf.ResourceHelper) def test___init___alt_types(self): - class MySetupEnvHelper(SetupEnvHelper): + class MySetupEnvHelper(sample_vnf.SetupEnvHelper): pass - class MyResourceHelper(ResourceHelper): + class MyResourceHelper(sample_vnf.ResourceHelper): pass - sample_vnf = SampleVNF('vnf1', self.VNFD_0, 'task_id', - MySetupEnvHelper, MyResourceHelper) + vnf = sample_vnf.SampleVNF('vnf1', self.VNFD_0, + MySetupEnvHelper, MyResourceHelper) - self.assertEqual(sample_vnf.name, 'vnf1') - self.assertDictEqual(sample_vnf.vnfd_helper, self.VNFD_0) + self.assertEqual(vnf.name, 'vnf1') + self.assertDictEqual(vnf.vnfd_helper, self.VNFD_0) # test the default setup helper is MySetupEnvHelper, not subclass - self.assertEqual(type(sample_vnf.setup_helper), MySetupEnvHelper) + self.assertEqual(type(vnf.setup_helper), MySetupEnvHelper) # test the default resource helper is MyResourceHelper, not subclass - self.assertEqual(type(sample_vnf.resource_helper), MyResourceHelper) + self.assertEqual(type(vnf.resource_helper), MyResourceHelper) @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.Process') def test__start_vnf(self, *args): - vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - sample_vnf = SampleVNF('vnf1', vnfd, 'task_id') - sample_vnf._run = mock.Mock() + self.vnf._run = mock.Mock() - self.assertIsNone(sample_vnf.queue_wrapper) - self.assertIsNone(sample_vnf._vnf_process) - self.assertIsNone(sample_vnf._start_vnf()) - self.assertIsNotNone(sample_vnf.queue_wrapper) - self.assertIsNotNone(sample_vnf._vnf_process) + self.assertIsNone(self.vnf.queue_wrapper) + self.assertIsNone(self.vnf._vnf_process) + self.vnf._start_vnf() + self.assertIsNotNone(self.vnf.queue_wrapper) + self.assertIsNotNone(self.vnf._vnf_process) - @mock.patch.object(ctx_base.Context, 'get_context_from_server', return_value='fake_context') - @mock.patch("yardstick.ssh.SSH") + @mock.patch.object(ctx_base.Context, 'get_context_from_server', + return_value='fake_context') + @mock.patch.object(ssh, "SSH") def test_instantiate(self, ssh, *args): test_base.mock_ssh(ssh) nodes = { 'vnf1': 'name1', 'vnf2': 'name2', } - - vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - sample_vnf = SampleVNF('vnf1', vnfd, 'task_id') - sample_vnf.scenario_helper.scenario_cfg = { - 'nodes': {sample_vnf.name: 'mock'} - } - sample_vnf.APP_NAME = 'sample1' - sample_vnf._start_server = mock.Mock(return_value=0) - sample_vnf._vnf_process = mock.MagicMock() - sample_vnf._vnf_process._is_alive.return_value = 1 - sample_vnf.ssh_helper = mock.MagicMock() - sample_vnf.deploy_helper = mock.MagicMock() - sample_vnf.resource_helper.ssh_helper = mock.MagicMock() + self.vnf._start_server = mock.Mock(return_value=0) + self.vnf._vnf_process = mock.MagicMock() + self.vnf._vnf_process._is_alive.return_value = 1 + self.vnf.ssh_helper = mock.MagicMock() + self.vnf.deploy_helper = mock.MagicMock() + self.vnf.resource_helper.ssh_helper = mock.MagicMock() scenario_cfg = { 'nodes': nodes, } - self.assertIsNone(sample_vnf.instantiate(scenario_cfg, {})) + self.assertIsNone(self.vnf.instantiate(scenario_cfg, {})) def test__update_collectd_options(self): scenario_cfg = {'options': @@ -1619,14 +1262,14 @@ class TestSampleVnf(unittest.TestCase): {'interval': 3, 'plugins': {'plugin3': {'param': 3}}}, - 'vnf__0': + 'vnf1': {'collectd': {'interval': 2, 'plugins': {'plugin3': {'param': 2}, 'plugin2': {'param': 2}}}}}} context_cfg = {'nodes': - {'vnf__0': + {'vnf1': {'collectd': {'interval': 1, 'plugins': @@ -1639,10 +1282,8 @@ class TestSampleVnf(unittest.TestCase): 'plugin2': {'param': 1}, 'plugin1': {'param': 1}}} - vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - sample_vnf = SampleVNF('vnf__0', vnfd, 'task_id') - sample_vnf._update_collectd_options(scenario_cfg, context_cfg) - self.assertEqual(sample_vnf.setup_helper.collectd_options, expected) + self.vnf._update_collectd_options(scenario_cfg, context_cfg) + self.assertEqual(self.vnf.setup_helper.collectd_options, expected) def test__update_options(self): options1 = {'interval': 1, @@ -1666,13 +1307,11 @@ class TestSampleVnf(unittest.TestCase): 'plugin2': {'param': 1}, 'plugin1': {'param': 1}}} - vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - sample_vnf = SampleVNF('vnf1', vnfd, 'task_id') - sample_vnf._update_options(options2, options1) + self.vnf._update_options(options2, options1) self.assertEqual(options2, expected) - @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time") - @mock.patch("yardstick.ssh.SSH") + @mock.patch.object(time, 'sleep') + @mock.patch.object(ssh, 'SSH') def test_wait_for_instantiate_empty_queue(self, ssh, *args): test_base.mock_ssh(ssh, exec_result=(1, "", "")) @@ -1688,25 +1327,56 @@ class TestSampleVnf(unittest.TestCase): 'pipeline> ', ] - vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - sample_vnf = SampleVNF('vnf1', vnfd, 'task_id') - sample_vnf.APP_NAME = 'sample1' - sample_vnf.WAIT_TIME_FOR_SCRIPT = 0 - sample_vnf._start_server = mock.Mock(return_value=0) - sample_vnf._vnf_process = mock.MagicMock() - sample_vnf._vnf_process.exitcode = 0 - sample_vnf._vnf_process._is_alive.return_value = 1 - sample_vnf.queue_wrapper = mock.Mock() - sample_vnf.q_out = mock.Mock() - sample_vnf.q_out.qsize.side_effect = iter(queue_size_list) - sample_vnf.q_out.get.side_effect = iter(queue_get_list) - sample_vnf.ssh_helper = mock.MagicMock() - sample_vnf.resource_helper.ssh_helper = mock.MagicMock() - sample_vnf.resource_helper.start_collect = mock.MagicMock() - - self.assertEqual(sample_vnf.wait_for_instantiate(), 0) - - @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time") + self.vnf.WAIT_TIME_FOR_SCRIPT = 0 + self.vnf._start_server = mock.Mock(return_value=0) + self.vnf._vnf_process = mock.MagicMock() + self.vnf._vnf_process.exitcode = 0 + self.vnf._vnf_process._is_alive.return_value = 1 + self.vnf.queue_wrapper = mock.Mock() + self.vnf.q_out = mock.Mock() + self.vnf.q_out.qsize.side_effect = iter(queue_size_list) + self.vnf.q_out.get.side_effect = iter(queue_get_list) + self.vnf.ssh_helper = mock.MagicMock() + self.vnf.resource_helper.ssh_helper = mock.MagicMock() + self.vnf.resource_helper.start_collect = mock.MagicMock() + + self.assertEqual(self.vnf.wait_for_instantiate(), 0) + + @mock.patch.object(time, 'sleep') + @mock.patch.object(ssh, 'SSH') + def test_wait_for_initialize(self, ssh, *args): + test_base.mock_ssh(ssh, exec_result=(1, "", "")) + queue_get_list = [ + 'some output', + 'pipeline> ', + 'run non_existent_script_name', + 'Cannot open file "non_existent_script_name"' + ] + queue_size_list = [ + 0, + len(queue_get_list[0]), + 0, + len(queue_get_list[1]), + len(queue_get_list[2]), + 0, + len(queue_get_list[3]) + ] + self.vnf.WAIT_TIME_FOR_SCRIPT = 0 + self.vnf._start_server = mock.Mock(return_value=0) + self.vnf._vnf_process = mock.MagicMock() + self.vnf._vnf_process.exitcode = 0 + self.vnf._vnf_process._is_alive.return_value = 1 + self.vnf.queue_wrapper = mock.Mock() + self.vnf.q_out = mock.Mock() + self.vnf.q_out.qsize.side_effect = iter(queue_size_list) + self.vnf.q_out.get.side_effect = iter(queue_get_list) + self.vnf.ssh_helper = mock.MagicMock() + self.vnf.resource_helper.ssh_helper = mock.MagicMock() + self.vnf.resource_helper.start_collect = mock.MagicMock() + + self.assertEqual(self.vnf.wait_for_initialize(), 0) + + @mock.patch.object(time, "sleep") def test_vnf_execute_with_queue_data(self, *args): queue_size_list = [ 1, @@ -1718,53 +1388,41 @@ class TestSampleVnf(unittest.TestCase): 'hello ', 'world' ] + self.vnf.q_out = mock.Mock() + self.vnf.q_out.qsize.side_effect = iter(queue_size_list) + self.vnf.q_out.get.side_effect = iter(queue_get_list) - vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - sample_vnf = SampleVNF('vnf1', vnfd, 'task_id') - sample_vnf.APP_NAME = 'sample1' - sample_vnf.q_out = mock.Mock() - sample_vnf.q_out.qsize.side_effect = iter(queue_size_list) - sample_vnf.q_out.get.side_effect = iter(queue_get_list) - - self.assertEqual(sample_vnf.vnf_execute('my command'), 'hello world') + self.assertEqual(self.vnf.vnf_execute('my command'), 'hello world') def test_terminate_without_vnf_process(self): - vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - sample_vnf = SampleVNF('vnf1', vnfd, 'task_id') - sample_vnf.APP_NAME = 'sample1' - sample_vnf.vnf_execute = mock.Mock() - sample_vnf.ssh_helper = mock.Mock() - sample_vnf._tear_down = mock.Mock() - sample_vnf.resource_helper = mock.Mock() + self.vnf.vnf_execute = mock.Mock() + self.vnf.ssh_helper = mock.Mock() + self.vnf._tear_down = mock.Mock() + self.vnf.resource_helper = mock.Mock() - self.assertIsNone(sample_vnf.terminate()) + self.assertIsNone(self.vnf.terminate()) def test_get_stats(self): - vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - sample_vnf = SampleVNF('vnf1', vnfd, 'task_id') - sample_vnf.APP_NAME = 'sample1' - sample_vnf.APP_WORD = 'sample1' - sample_vnf.vnf_execute = mock.Mock(return_value='the stats') + self.vnf.APP_WORD = 'sample1' + self.vnf.vnf_execute = mock.Mock(return_value='the stats') - self.assertEqual(sample_vnf.get_stats(), 'the stats') + self.assertEqual(self.vnf.get_stats(), 'the stats') - @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node') + @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', + return_value='mock_node') def test_collect_kpi(self, *args): - vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - sample_vnf = SampleVNF('vnf1', vnfd, 'task_id') - sample_vnf.scenario_helper.scenario_cfg = { - 'nodes': {sample_vnf.name: "mock"} + self.vnf.scenario_helper.scenario_cfg = { + 'nodes': {self.vnf.name: "mock"} } - sample_vnf.APP_NAME = 'sample1' - sample_vnf.COLLECT_KPI = r'\s(\d+)\D*(\d+)\D*(\d+)' - sample_vnf.COLLECT_MAP = { + self.vnf.COLLECT_KPI = r'\s(\d+)\D*(\d+)\D*(\d+)' + self.vnf.COLLECT_MAP = { 'k1': 3, 'k2': 1, 'k3': 2, } - sample_vnf.get_stats = mock.Mock(return_value='index0: 34 -- 91, 27') - sample_vnf.resource_helper = mock.Mock() - sample_vnf.resource_helper.collect_kpi.return_value = {} + self.vnf.get_stats = mock.Mock(return_value='index0: 34 -- 91, 27') + self.vnf.resource_helper = mock.Mock() + self.vnf.resource_helper.collect_kpi.return_value = {} expected = { 'k1': 27, @@ -1773,19 +1431,17 @@ class TestSampleVnf(unittest.TestCase): 'collect_stats': {}, 'physical_node': 'mock_node' } - result = sample_vnf.collect_kpi() - self.assertDictEqual(result, expected) + result = self.vnf.collect_kpi() + self.assertEqual(result, expected) - @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node') + @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', + return_value='mock_node') def test_collect_kpi_default(self, *args): - vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - sample_vnf = SampleVNF('vnf1', vnfd, 'task_id') - sample_vnf.scenario_helper.scenario_cfg = { - 'nodes': {sample_vnf.name: "mock"} + self.vnf.scenario_helper.scenario_cfg = { + 'nodes': {self.vnf.name: "mock"} } - sample_vnf.APP_NAME = 'sample1' - sample_vnf.COLLECT_KPI = r'\s(\d+)\D*(\d+)\D*(\d+)' - sample_vnf.get_stats = mock.Mock(return_value='') + self.vnf.COLLECT_KPI = r'\s(\d+)\D*(\d+)\D*(\d+)' + self.vnf.get_stats = mock.Mock(return_value='') expected = { 'physical_node': 'mock_node', @@ -1793,208 +1449,84 @@ class TestSampleVnf(unittest.TestCase): 'packets_fwd': 0, 'packets_dropped': 0, } - result = sample_vnf.collect_kpi() - self.assertDictEqual(result, expected) + result = self.vnf.collect_kpi() + self.assertEqual(result, expected) def test_scale(self): - vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - sample_vnf = SampleVNF('vnf1', vnfd, 'task_id') - self.assertRaises(y_exceptions.FunctionNotImplemented, - sample_vnf.scale) + self.assertRaises(y_exceptions.FunctionNotImplemented, self.vnf.scale) def test__run(self): test_cmd = 'test cmd' run_kwargs = {'arg1': 'val1', 'arg2': 'val2'} - vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - sample_vnf = SampleVNF('vnf1', vnfd, 'task_id') - sample_vnf.ssh_helper = mock.Mock() - sample_vnf.setup_helper = mock.Mock() - with mock.patch.object(sample_vnf, '_build_config', + self.vnf.ssh_helper = mock.Mock() + self.vnf.setup_helper = mock.Mock() + with mock.patch.object(self.vnf, '_build_config', return_value=test_cmd), \ - mock.patch.object(sample_vnf, '_build_run_kwargs'): - sample_vnf.run_kwargs = run_kwargs - sample_vnf._run() - sample_vnf.ssh_helper.drop_connection.assert_called_once() - sample_vnf.ssh_helper.run.assert_called_once_with(test_cmd, - **run_kwargs) - sample_vnf.setup_helper.kill_vnf.assert_called_once() + mock.patch.object(self.vnf, '_build_run_kwargs'): + self.vnf.run_kwargs = run_kwargs + self.vnf._run() + self.vnf.ssh_helper.drop_connection.assert_called_once() + self.vnf.ssh_helper.run.assert_called_once_with(test_cmd, **run_kwargs) + self.vnf.setup_helper.kill_vnf.assert_called_once() class TestSampleVNFTrafficGen(unittest.TestCase): - VNFD_0 = { - 'short-name': 'VpeVnf', - 'vdu': [ - { - 'routing_table': [ - { - 'network': '152.16.100.20', - 'netmask': '255.255.255.0', - 'gateway': '152.16.100.20', - 'if': 'xe0' - }, - { - 'network': '152.16.40.20', - 'netmask': '255.255.255.0', - 'gateway': '152.16.40.20', - 'if': 'xe1' - }, - ], - 'description': 'VPE approximation using DPDK', - 'name': 'vpevnf-baremetal', - 'nd_route_tbl': [ - { - 'network': '0064:ff9b:0:0:0:0:9810:6414', - 'netmask': '112', - 'gateway': '0064:ff9b:0:0:0:0:9810:6414', - 'if': 'xe0' - }, - { - 'network': '0064:ff9b:0:0:0:0:9810:2814', - 'netmask': '112', - 'gateway': '0064:ff9b:0:0:0:0:9810:2814', - 'if': 'xe1' - }, - ], - 'id': 'vpevnf-baremetal', - 'external-interface': [ - { - 'virtual-interface': { - 'dst_mac': '00:00:00:00:00:03', - 'vpci': '0000:05:00.0', - 'driver': 'i40e', - 'local_ip': '152.16.100.19', - 'type': 'PCI-PASSTHROUGH', - 'netmask': '255.255.255.0', - 'dpdk_port_num': 0, - 'bandwidth': '10 Gbps', - 'dst_ip': '152.16.100.20', - 'local_mac': '00:00:00:00:00:01' - }, - 'vnfd-connection-point-ref': 'xe0', - 'name': 'xe0' - }, - { - 'virtual-interface': { - 'dst_mac': '00:00:00:00:00:04', - 'vpci': '0000:05:00.1', - 'driver': 'ixgbe', - 'local_ip': '152.16.40.19', - 'type': 'PCI-PASSTHROUGH', - 'netmask': '255.255.255.0', - 'dpdk_port_num': 1, - 'bandwidth': '10 Gbps', - 'dst_ip': '152.16.40.20', - 'local_mac': '00:00:00:00:00:02' - }, - 'vnfd-connection-point-ref': 'xe1', - 'name': 'xe1' - }, - ], - }, - ], - 'description': 'Vpe approximation using DPDK', - 'mgmt-interface': { - 'vdu-id': 'vpevnf-baremetal', - 'host': '1.1.1.1', - 'password': 'r00t', - 'user': 'root', - 'ip': '1.1.1.1' - }, - 'benchmark': { - 'kpi': [ - 'packets_in', - 'packets_fwd', - 'packets_dropped', - ], - }, - 'connection-point': [ - { - 'type': 'VPORT', - 'name': 'xe0', - }, - { - 'type': 'VPORT', - 'name': 'xe1', - }, - ], - 'id': 'VpeApproxVnf', 'name': 'VPEVnfSsh' - } + VNFD_0 = TestSampleVnf.VNFD_0 + VNFD = TestSampleVnf.VNFD - VNFD = { - 'vnfd:vnfd-catalog': { - 'vnfd': [ - VNFD_0, - ], - }, - } + TRAFFIC_PROFILE = TestSampleVnf.TRAFFIC_PROFILE - TRAFFIC_PROFILE = { - "schema": "isb:traffic_profile:0.1", - "name": "fixed", - "description": "Fixed traffic profile to run UDP traffic", - "traffic_profile": { - "traffic_type": "FixedTraffic", - "frame_rate": 100, # pps - "flow_number": 10, - "frame_size": 64, - }, - } + def setUp(self): + self.sample_vnf_tg = sample_vnf.SampleVNFTrafficGen( + 'tg1', self.VNFD_0) def test__check_status(self): - sample_vnf_tg = SampleVNFTrafficGen('tg1', self.VNFD_0, 'task_id') with self.assertRaises(NotImplementedError): - sample_vnf_tg._check_status() + self.sample_vnf_tg._check_status() def test_listen_traffic(self): - sample_vnf_tg = SampleVNFTrafficGen('tg1', self.VNFD_0, 'task_id') - - sample_vnf_tg.listen_traffic(mock.Mock()) + self.sample_vnf_tg.listen_traffic(mock.Mock()) def test_verify_traffic(self): - sample_vnf_tg = SampleVNFTrafficGen('tg1', self.VNFD_0, 'task_id') - - sample_vnf_tg.verify_traffic(mock.Mock()) + self.sample_vnf_tg.verify_traffic(mock.Mock()) def test_terminate(self): - sample_vnf_tg = SampleVNFTrafficGen('tg1', self.VNFD_0, 'task_id') - sample_vnf_tg._traffic_process = mock.Mock() - sample_vnf_tg._tg_process = mock.Mock() + self.sample_vnf_tg._traffic_process = mock.Mock() + self.sample_vnf_tg._tg_process = mock.Mock() - sample_vnf_tg.terminate() + self.sample_vnf_tg.terminate() - def test__wait_for_process(self): - sample_vnf_tg = SampleVNFTrafficGen('tg1', self.VNFD_0, 'task_id') - with mock.patch.object(sample_vnf_tg, '_check_status', + @mock.patch.object(time, 'sleep') + def test__wait_for_process(self, *args): + with mock.patch.object(self.sample_vnf_tg, '_check_status', return_value=0) as mock_status, \ - mock.patch.object(sample_vnf_tg, '_tg_process') as mock_proc: + mock.patch.object(self.sample_vnf_tg, '_tg_process') as mock_proc: mock_proc.is_alive.return_value = True mock_proc.exitcode = 234 - self.assertEqual(sample_vnf_tg._wait_for_process(), 234) + self.assertEqual(self.sample_vnf_tg._wait_for_process(), 234) mock_proc.is_alive.assert_called_once() mock_status.assert_called_once() def test__wait_for_process_not_alive(self): - sample_vnf_tg = SampleVNFTrafficGen('tg1', self.VNFD_0, 'task_id') - with mock.patch.object(sample_vnf_tg, '_tg_process') as mock_proc: + with mock.patch.object(self.sample_vnf_tg, '_tg_process') as mock_proc: mock_proc.is_alive.return_value = False - self.assertRaises(RuntimeError, sample_vnf_tg._wait_for_process) + self.assertRaises(RuntimeError, self.sample_vnf_tg._wait_for_process) mock_proc.is_alive.assert_called_once() - def test__wait_for_process_delayed(self): - sample_vnf_tg = SampleVNFTrafficGen('tg1', self.VNFD_0, 'task_id') - with mock.patch.object(sample_vnf_tg, '_check_status', + @mock.patch.object(time, 'sleep') + def test__wait_for_process_delayed(self, *args): + with mock.patch.object(self.sample_vnf_tg, '_check_status', side_effect=[1, 0]) as mock_status, \ - mock.patch.object(sample_vnf_tg, + mock.patch.object(self.sample_vnf_tg, '_tg_process') as mock_proc: mock_proc.is_alive.return_value = True mock_proc.exitcode = 234 - self.assertEqual(sample_vnf_tg._wait_for_process(), 234) + self.assertEqual(self.sample_vnf_tg._wait_for_process(), 234) mock_proc.is_alive.assert_has_calls([mock.call(), mock.call()]) mock_status.assert_has_calls([mock.call(), mock.call()]) def test_scale(self): - sample_vnf_tg = SampleVNFTrafficGen('tg1', self.VNFD_0, 'task_id') self.assertRaises(y_exceptions.FunctionNotImplemented, - sample_vnf_tg.scale) + self.sample_vnf_tg.scale) diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_imsbench_sipp.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_imsbench_sipp.py new file mode 100644 index 000000000..698b1b03f --- /dev/null +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_imsbench_sipp.py @@ -0,0 +1,481 @@ +# Copyright (c) 2019 Viosoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock +import unittest +from collections import deque + +from yardstick.network_services.vnf_generic.vnf import tg_imsbench_sipp +from yardstick import ssh + + +class TestSippVnf(unittest.TestCase): + + VNFD = { + "short-name": "SippVnf", + "vdu": [ + { + "id": "sippvnf-baremetal", + "routing_table": "", + "external-interface": [ + { + "virtual-interface": { + "vld_id": "ims_network", + "peer_ifname": "xe0", + "dst_mac": "90:e2:ba:7c:41:e8", + "network": {}, + "local_ip": "10.80.3.11", + "peer_intf": { + "vld_id": "data_network", + "peer_ifname": "xe1", + "dst_mac": "90:e2:ba:7c:30:e8", + "network": {}, + "local_ip": "10.80.3.7", + "node_name": "vnf__0", + "netmask": "255.255.255.0", + "peer_name": "tg__0", + "dst_ip": "10.80.3.11", + "ifname": "xe0", + "local_mac": "90:e2:ba:7c:41:a8" + }, + "node_name": "tg__0", + "netmask": "255.255.255.0", + "peer_name": "vnf__1", + "dst_ip": "10.80.3.7", + "ifname": "xe0", + "local_mac": "90:e2:ba:7c:30:e8" + }, + "vnfd-connection-point-ref": "xe0", + "name": "xe0" + }, + { + "virtual-interface": { + "vld_id": "ims_network", + "peer_ifname": "xe0", + "dst_mac": "90:e2:ba:7c:41:e8", + "network": {}, + "local_ip": "10.80.3.11", + "peer_intf": { + "vld_id": "data_network", + "peer_ifname": "xe1", + "dst_mac": "90:e2:ba:7c:30:e8", + "network": {}, + "local_ip": "10.80.3.7", + "peer_intf": { + "vld_id": "ims_network", + "peer_ifname": "xe0", + "dst_mac": "90:e2:ba:7c:41:e8", + "network": {}, + "local_ip": "10.80.3.11", + "node_name": "tg__0", + "netmask": "255.255.255.0", + "peer_name": "vnf__1", + "dst_ip": "10.80.3.7", + "ifname": "xe0", + "local_mac": "90:e2:ba:7c:30:e8" + }, + "node_name": "vnf__0", + "netmask": "255.255.255.0", + "peer_name": "tg__0", + "dst_ip": "10.80.3.11", + "ifname": "xe0", + "local_mac": "90:e2:ba:7c:41:a8" + }, + "node_name": "tg__0", + "netmask": "255.255.255.0", + "peer_name": "vnf__1", + "dst_ip": "10.80.3.7", + "ifname": "xe1", + "local_mac": "90:e2:ba:7c:30:e8" + }, + "vnfd-connection-point-ref": "xe1", + "name": "xe1" + } + ], + "name": "sippvnf-baremetal", + "description": "Sipp" + } + ], + "description": "ImsbenchSipp", + "mgmt-interface": { + "vdu-id": "sipp-baremetal", + "password": "r00t", + "user": "root", + "ip": "10.80.3.11" + }, + "benchmark": { + "kpi": [ + "packets_in", + "packets_fwd", + "packets_dropped" + ] + }, + "id": "SippVnf", + "name": "SippVnf" + } + + SCENARIO_CFG = { + "task_id": "ba636744-898e-4783-a4aa-0a79c60953cc", + "tc": "tc_vims_baremetal_sipp", + "runner": { + "interval": 1, + "output_config": { + "DEFAULT": { + "debug": "False", + "dispatcher": [ + "influxdb" + ] + }, + "nsb": { + "debug": "False", + "trex_client_lib": "/opt/nsb_bin/trex_client/stl", + "bin_path": "/opt/nsb_bin", + "trex_path": "/opt/nsb_bin/trex/scripts", + "dispatcher": "influxdb" + }, + "dispatcher_influxdb": { + "username": "root", + "target": "http://10.80.3.11:8086", + "db_name": "yardstick", + "timeout": "5", + "debug": "False", + "password": "root", + "dispatcher": "influxdb" + }, + "dispatcher_http": { + "debug": "False", + "dispatcher": "influxdb", + "timeout": "5", + "target": "http://127.0.0.1:8000/results" + }, + "dispatcher_file": { + "debug": "False", + "backup_count": "0", + "max_bytes": "0", + "dispatcher": "influxdb", + "file_path": "/tmp/yardstick.out" + } + }, + "runner_id": 18148, + "duration": 60, + "type": "Vims" + }, + "nodes": { + "vnf__0": "pcscf.yardstick-ba636744", + "vnf__1": "hss.yardstick-ba636744", + "tg__0": "sipp.yardstick-ba636744" + }, + "topology": "vims-topology.yaml", + "type": "NSPerf", + "traffic_profile": "../../traffic_profiles/ipv4_throughput.yaml", + "task_path": "samples/vnf_samples/nsut/vims", + "options": { + "init_reg_max": 5000, + "end_user": 10000, + "reg_cps": 20, + "rereg_cps": 20, + "rereg_step": 10, + "wait_time": 5, + "start_user": 1, + "msgc_cps": 10, + "dereg_step": 10, + "call_cps": 10, + "reg_step": 10, + "init_reg_cps": 50, + "dereg_cps": 20, + "msgc_step": 5, + "call_step": 5, + "hold_time": 15, + "port": 5060, + "run_mode": "nortp" + } + } + CONTEXT_CFG = { + "nodes": { + "tg__0": { + "ip": "10.80.3.11", + "interfaces": { + "xe0": { + "vld_id": "ims_network", + "peer_ifname": "xe0", + "dst_mac": "90:e2:ba:7c:41:e8", + "network": {}, + "local_ip": "10.80.3.11", + "peer_intf": { + "vld_id": "data_network", + "peer_ifname": "xe1", + "dst_mac": "90:e2:ba:7c:30:e8", + "network": {}, + "local_ip": "10.80.3.7", + "node_name": "vnf__0", + "netmask": "255.255.255.0", + "peer_name": "tg__0", + "dst_ip": "10.80.3.11", + "ifname": "xe0", + "local_mac": "90:e2:ba:7c:41:a8" + }, + "node_name": "tg__0", + "netmask": "255.255.255.0", + "peer_name": "vnf__1", + "dst_ip": "10.80.3.7", + "ifname": "xe0", + "local_mac": "90:e2:ba:7c:30:e8" + }, + "xe1": { + "vld_id": "ims_network", + "peer_ifname": "xe0", + "dst_mac": "90:e2:ba:7c:41:e8", + "network": {}, + "local_ip": "10.80.3.11", + "peer_intf": { + "vld_id": "data_network", + "peer_ifname": "xe1", + "dst_mac": "90:e2:ba:7c:30:e8", + "network": {}, + "local_ip": "10.80.3.7", + "peer_intf": { + "vld_id": "ims_network", + "peer_ifname": "xe0", + "dst_mac": "90:e2:ba:7c:41:e8", + "network": {}, + "local_ip": "10.80.3.11", + "node_name": "tg__0", + "netmask": "255.255.255.0", + "peer_name": "vnf__1", + "dst_ip": "10.80.3.7", + "ifname": "xe0", + "local_mac": "90:e2:ba:7c:30:e8" + }, + "node_name": "vnf__0", + "netmask": "255.255.255.0", + "peer_name": "tg__0", + "dst_ip": "10.80.3.11", + "ifname": "xe0", + "local_mac": "90:e2:ba:7c:41:a8" + }, + "node_name": "tg__0", + "netmask": "255.255.255.0", + "peer_name": "vnf__1", + "dst_ip": "10.80.3.7", + "ifname": "xe1", + "local_mac": "90:e2:ba:7c:30:e8" + } + }, + "user": "root", + "password": "r00t", + "VNF model": "../../vnf_descriptors/tg_sipp_vnfd.yaml", + "name": "sipp.yardstick-a75a3aff", + "vnfd-id-ref": "tg__0", + "member-vnf-index": "1", + "role": "TrafficGen", + "ctx_type": "Node" + }, + "vnf__0": { + "ip": "10.80.3.7", + "interfaces": { + "xe0": { + "vld_id": "data_network", + "peer_ifname": "xe1", + "dst_mac": "90:e2:ba:7c:30:e8", + "network": {}, + "local_ip": "10.80.3.7", + "peer_intf": { + "tg__0": { + "vld_id": "ims_network", + "peer_ifname": "xe0", + "dst_mac": "90:e2:ba:7c:41:e8", + "network": {}, + "local_ip": "10.80.3.11", + "node_name": "tg__0", + "netmask": "255.255.255.0", + "peer_name": "vnf__1", + "dst_ip": "10.80.3.7", + "ifname": "xe1", + "local_mac": "90:e2:ba:7c:30:e8" + } + }, + "node_name": "vnf__0", + "netmask": "255.255.255.0", + "peer_name": "tg__0", + "dst_ip": "10.80.3.11", + "ifname": "xe0", + "local_mac": "90:e2:ba:7c:41:a8" + } + }, + "user": "root", + "password": "r00t", + "VNF model": "../../vnf_descriptors/vims_pcscf_vnfd.yaml", + "name": "pcscf.yardstick-a75a3aff", + "vnfd-id-ref": "vnf__0", + "member-vnf-index": "2", + "role": "VirtualNetworkFunction", + "ctx_type": "Node" + }, + "vnf__1": { + "ip": "10.80.3.7", + "interfaces": { + "xe0": { + "vld_id": "ims_network", + "peer_ifname": "xe1", + "dst_mac": "90:e2:ba:7c:30:e8", + "network": {}, + "local_ip": "10.80.3.7", + "peer_intf": { + "tg__0": { + "vld_id": "ims_network", + "peer_ifname": "xe0", + "dst_mac": "90:e2:ba:7c:41:e8", + "network": {}, + "local_ip": "10.80.3.11", + "peer_intf": { + "vld_id": "data_network", + "peer_ifname": "xe1", + "dst_mac": "90:e2:ba:7c:30:e8", + "network": {}, + "local_ip": "10.80.3.7", + "peer_intf": { + "vld_id": "ims_network", + "peer_ifname": "xe0", + "dst_mac": "90:e2:ba:7c:41:e8", + "network": {}, + "local_ip": "10.80.3.11", + "node_name": "tg__0", + "netmask": "255.255.255.0", + "peer_name": "vnf__1", + "dst_ip": "10.80.3.7", + "ifname": "xe0", + "local_mac": "90:e2:ba:7c:30:e8" + }, + "node_name": "vnf__0", + "netmask": "255.255.255.0", + "peer_name": "tg__0", + "dst_ip": "10.80.3.11", + "ifname": "xe0", + "local_mac": "90:e2:ba:7c:41:a8" + }, + "node_name": "tg__0", + "netmask": "255.255.255.0", + "peer_name": "vnf__1", + "dst_ip": "10.80.3.7", + "ifname": "xe1", + "local_mac": "90:e2:ba:7c:30:e8" + } + }, + "node_name": "vnf__1", + "netmask": "255.255.255.0", + "peer_name": "tg__0", + "dst_ip": "10.80.3.11", + "ifname": "xe0", + "local_mac": "90:e2:ba:7c:41:e8" + } + }, + "user": "root", + "password": "r00t", + "VNF model": "../../vnf_descriptors/vims_hss_vnfd.yaml", + "name": "hss.yardstick-a75a3aff", + "vnfd-id-ref": "vnf__1", + "member-vnf-index": "3", + "role": "VirtualNetworkFunction", + "ctx_type": "Node" + } + }, + "networks": {} + } + + FILE = "timestamp:1000 reg:100 reg_saps:0" + + QUEUE = {'reg_saps': 0.0, 'timestamp': 1000.0, 'reg': 100.0} + + TRAFFIC_PROFILE = { + "schema": "nsb:traffic_profile:0.1", + "name": "sip", + "description": "Traffic profile to run sip", + "traffic_profile": { + "traffic_type": "SipProfile", + "frame_rate": 100, # pps + "enable_latency": False + }, + } + + def setUp(self): + self._mock_ssh = mock.patch.object(ssh, 'SSH') + self.mock_ssh = self._mock_ssh.start() + + self.addCleanup(self._stop_mocks) + self.sipp_vnf = tg_imsbench_sipp.SippVnf('tg__0', self.VNFD) + + def _stop_mocks(self): + self._mock_ssh.stop() + + def test___init__(self): + self.assertIsInstance(self.sipp_vnf.resource_helper, + tg_imsbench_sipp.SippResourceHelper) + + def test_wait_for_instantiate(self): + self.assertIsNone(self.sipp_vnf.wait_for_instantiate()) + + @mock.patch('six.moves.builtins.open', new_callable=mock.mock_open, read_data=FILE) + def test_handle_result_files(self, mock_file): + result_deque = deque([self.QUEUE]) + file = "/tmp/test.txt" + test = self.sipp_vnf.handle_result_files(file) + self.assertEqual(result_deque, test) + mock_file.assert_called_with(file, 'r') + + @mock.patch.object(ssh.SSH, 'get') + def test_get_result_files(self, mock_get): + self.sipp_vnf.get_result_files() + mock_get.assert_called() + + def test_collect_kpi(self): + self.sipp_vnf.queue = deque([self.QUEUE]) + self.assertEqual(self.QUEUE, self.sipp_vnf.collect_kpi()) + + def test_collect_kpi_empty(self): + self.sipp_vnf.queue = deque([]) + self.assertEqual({}, self.sipp_vnf.collect_kpi()) + + @mock.patch('six.moves.builtins.open', new_callable=mock.mock_open, read_data=FILE) + def test_count_line_num(self, mock_file): + file = "/tmp/test.txt" + mock_file.return_value.__iter__.return_value = self.FILE.splitlines() + self.assertEqual(1, self.sipp_vnf.count_line_num(file)) + mock_file.assert_called_with(file, 'r') + + @mock.patch('six.moves.builtins.open', new_callable=mock.mock_open, read_data='') + def test_count_line_num_file_empty(self, mock_file): + file = "/tmp/test.txt" + self.assertEqual(0, self.sipp_vnf.count_line_num(file)) + mock_file.assert_called_with(file, 'r') + + @mock.patch('six.moves.builtins.open', new_callable=mock.mock_open, read_data=FILE) + def test_count_line_num_file_error(self, mock_file): + file = "/tmp/test.txt" + mock_file.side_effect = IOError() + self.assertEqual(0, self.sipp_vnf.count_line_num(file)) + + def test_is_ended_false(self): + self.sipp_vnf.count_line_num = mock.Mock(return_value=1) + not_end = self.sipp_vnf.is_ended() + self.assertFalse(not_end) + + def test_is_ended_true(self): + self.sipp_vnf.count_line_num = mock.Mock(return_value=0) + end = self.sipp_vnf.is_ended() + self.assertTrue(end) + + def test_terminate(self): + self.sipp_vnf.ssh_helper = mock.MagicMock() + self.sipp_vnf.resource_helper.ssh_helper = mock.MagicMock() + self.assertIsNone(self.sipp_vnf.terminate()) diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ixload.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ixload.py index e7f6206eb..dd1c277c3 100644 --- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ixload.py +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ixload.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016-2017 Intel Corporation +# Copyright (c) 2016-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -128,12 +128,12 @@ class TestIxLoadTrafficGen(ut_base.BaseUnitTestCase): def test___init__(self): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd, 'task_id') + ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd) self.assertIsNone(ixload_traffic_gen.resource_helper.data) def test_update_gateways(self): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd, 'task_id') + ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd) links = {'uplink_0': {'ip': {}}, 'downlink_1': {'ip': {}}} @@ -146,7 +146,7 @@ class TestIxLoadTrafficGen(ut_base.BaseUnitTestCase): return_value='mock_node') def test_collect_kpi(self, *args): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd, 'task_id') + ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd) ixload_traffic_gen.scenario_helper.scenario_cfg = { 'nodes': {ixload_traffic_gen.name: "mock"} } @@ -160,7 +160,7 @@ class TestIxLoadTrafficGen(ut_base.BaseUnitTestCase): def test_listen_traffic(self): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd, 'task_id') + ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd) self.assertIsNone(ixload_traffic_gen.listen_traffic({})) @mock.patch.object(utils, 'find_relative_file') @@ -169,7 +169,7 @@ class TestIxLoadTrafficGen(ut_base.BaseUnitTestCase): @mock.patch.object(tg_ixload, 'shutil') def test_instantiate(self, mock_shutil, *args): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd, 'task_id') + ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd) scenario_cfg = {'tc': "nsb_test_case", 'ixia_profile': "ixload.cfg", 'task_path': "/path/to/task"} @@ -209,7 +209,7 @@ class TestIxLoadTrafficGen(ut_base.BaseUnitTestCase): vnfd['mgmt-interface'].update({'tg-config': {}}) vnfd['mgmt-interface']['tg-config'].update({'ixchassis': '1.1.1.1'}) vnfd['mgmt-interface']['tg-config'].update({'py_bin_path': '/root'}) - sut = tg_ixload.IxLoadTrafficGen(NAME, vnfd, 'task_id') + sut = tg_ixload.IxLoadTrafficGen(NAME, vnfd) sut.connection = mock.Mock() sut._traffic_runner = mock.Mock(return_value=0) result = sut.run_traffic(mock_traffic_profile) @@ -230,7 +230,7 @@ class TestIxLoadTrafficGen(ut_base.BaseUnitTestCase): vnfd['mgmt-interface'].update({'tg-config': {}}) vnfd['mgmt-interface']['tg-config'].update({'ixchassis': '1.1.1.1'}) vnfd['mgmt-interface']['tg-config'].update({'py_bin_path': '/root'}) - sut = tg_ixload.IxLoadTrafficGen(NAME, vnfd, 'task_id') + sut = tg_ixload.IxLoadTrafficGen(NAME, vnfd) sut.connection = mock.Mock() sut._traffic_runner = mock.Mock(return_value=0) subprocess.call(['touch', '/tmp/1.csv']) @@ -240,7 +240,7 @@ class TestIxLoadTrafficGen(ut_base.BaseUnitTestCase): def test_terminate(self): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd, 'task_id') + ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd) self.assertIsNone(ixload_traffic_gen.terminate()) def test_parse_csv_read(self): @@ -253,7 +253,7 @@ class TestIxLoadTrafficGen(ut_base.BaseUnitTestCase): 'HTTP Transaction Rate': True, } http_reader = [kpi_data] - ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd, 'task_id') + ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd) result = ixload_traffic_gen.resource_helper.result ixload_traffic_gen.resource_helper.parse_csv_read(http_reader) for k_left, k_right in tg_ixload.IxLoadResourceHelper.KPI_LIST.items(): @@ -268,7 +268,7 @@ class TestIxLoadTrafficGen(ut_base.BaseUnitTestCase): 'HTTP Connection Rate': 4, 'HTTP Transaction Rate': 5, }] - ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd, 'task_id') + ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd) init_value = ixload_traffic_gen.resource_helper.result ixload_traffic_gen.resource_helper.parse_csv_read(http_reader) self.assertDictEqual(ixload_traffic_gen.resource_helper.result, @@ -282,7 +282,6 @@ class TestIxLoadTrafficGen(ut_base.BaseUnitTestCase): 'HTTP Concurrent Connections': 3, 'HTTP Transaction Rate': 5, }] - ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd, 'task_id') - + ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd) with self.assertRaises(KeyError): ixload_traffic_gen.resource_helper.parse_csv_read(http_reader) diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_landslide.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_landslide.py new file mode 100644 index 000000000..2d8c01bec --- /dev/null +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_landslide.py @@ -0,0 +1,1951 @@ +# Copyright (c) 2018-2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import mock +import requests +import time +import unittest + +from yardstick.benchmark.contexts import base as ctx_base +from yardstick.common import exceptions +from yardstick.common import utils as common_utils +from yardstick.common import yaml_loader +from yardstick.network_services import utils as net_serv_utils +from yardstick.network_services.traffic_profile import landslide_profile +from yardstick.network_services.vnf_generic.vnf import sample_vnf +from yardstick.network_services.vnf_generic.vnf import tg_landslide +from yardstick.network_services.vnf_generic.vnf import base as vnf_base + +NAME = "tg__0" + +EXAMPLE_URL = 'http://example.com/' +TCL_SUCCESS_RESPONSE = 'ls_ok' + +TEST_SERVERS = [ + {'ip': '192.168.122.101', + 'phySubnets': [ + {'mask': '/24', + 'base': '10.42.32.100', + 'numIps': 20, + 'name': 'eth1'} + ], + 'role': 'SGW_Node', + 'name': 'TestServer_1'}, + {'ip': '192.168.122.102', + 'phySubnets': [ + {'mask': '/24', + 'base': '10.42.32.1', + 'numIps': 100, + 'name': 'eth1' + }, + {'mask': '/24', + 'base': '10.42.33.1', + 'numIps': 100, + 'name': 'eth2'} + ], + 'preResolvedArpAddress': [ + {'NumNodes': 1, + 'StartingAddress': '10.42.33.5'} + ], + 'role': 'SGW_Nodal', + 'name': 'TestServer_2', + 'thread_model': 'Fireball' + } +] + +TS1_SUTS = [ + {'name': 'SGW - C TestNode', + 'role': 'SgwControlAddr', + 'managementIp': '12.0.1.1', + 'ip': '10.42.32.100', + 'phy': 'eth5', + 'nextHop': '10.42.32.5' + }, + {'name': 'SGW - U TestNode', + 'role': 'SgwUserAddr', + 'managementIp': '12.0.1.2', + 'ip': '10.42.32.101', + 'phy': 'eth5', + 'nextHop': '10.42.32.5' + } +] + +TS2_SUTS = [ + {'name': 'eNodeB TestNode', + 'role': 'EnbUserAddr', + 'managementIp': '12.0.2.1', + 'ip': '10.42.32.2', + 'phy': 'eth5', + 'nextHop': '10.42.32.5' + }, + {'name': 'MME TestNode', + 'role': 'MmeControlAddr', + 'managementIp': '12.0.3.1', + 'ip': '10.42.32.1', + 'phy': 'eth5', + 'nextHop': '10.42.32.5' + }, + {'name': 'NetHost TestNode', + 'role': 'NetworkHostAddrLocal', + 'managementIp': '12.0.4.1', + 'ip': '10.42.33.1', + 'phy': 'eth5', + 'nextHop': '10.42.32.5' + }, + {'name': 'PGW TestNode', + 'role': 'PgwV4Sut', + 'managementIp': '12.0.5.1', + 'ip': '10.42.32.105', + 'phy': 'eth5', + 'nextHop': '10.42.32.5' + }, + {'name': 'SGW - C SUT', + 'role': 'SgwSut', + 'managementIp': '12.0.6.1', + 'ip': '10.42.32.100' + }, + {'name': 'SGW - U SUT', + 'role': 'SgwUserSut', + 'managementIp': '12.0.6.2', + 'ip': '10.42.32.101'} +] + +VNFD = { + 'vnfd:vnfd-catalog': { + 'vnfd': [{ + 'short-name': 'landslide', + 'vdu': [{ + 'description': 'AB client interface details', + 'name': 'abclient-baremetal', + 'id': 'abclient-baremetal', + 'external-interface': []}], + 'description': 'Spirent Landslide traffic generator', + 'config': [{'test_server': TEST_SERVERS[0], 'suts': TS1_SUTS}, + {'test_server': TEST_SERVERS[1], 'suts': TS2_SUTS}], + 'mgmt-interface': { + 'vdu-id': 'landslide-tas', + 'user': 'user', + 'password': 'user', + 'super-user': 'super-user', + 'super-user-password': 'super-user-password', + 'cfguser_password': 'cfguser_password', + 'license': 48, + 'proto': 'http', + 'ip': '1.1.1.1'}, + 'benchmark': { + 'kpi': [ + 'tx_throughput_mbps', + 'rx_throughput_mbps', + 'in_packets', + 'out_packets', + 'activation_rate_sessps', + 'deactivation_rate_sessps']}, + 'id': 'LandslideTrafficGen', + 'name': 'LandslideTrafficGen'}]}} + +TAS_INFO = VNFD['vnfd:vnfd-catalog']['vnfd'][0]['mgmt-interface'] + +DMF_CFG = { + "dmf": { + "library": "test", + "name": "Basic UDP" + }, + "clientPort": { + "clientPort": 2002, + "isClientPortRange": "false" + }, + "dataProtocol": "udp", + "serverPort": 2003 +} + +RESERVATIONS = [ + {'tsName': TEST_SERVERS[0]['name'], + 'phySubnets': TEST_SERVERS[0]['phySubnets'], + 'tsId': TEST_SERVERS[0]['name'], + 'tsIndex': 0}, + {'tsName': TEST_SERVERS[1]['name'], + 'phySubnets': TEST_SERVERS[1]['phySubnets'], + 'tsId': TEST_SERVERS[1]['name'], + 'tsIndex': 1}] + +SESSION_PROFILE = { + 'keywords': '', + 'duration': 60, + 'iterations': 1, + 'description': 'UE default bearer creation test case', + 'name': 'default_bearer_capacity', + 'reportOptions': {'format': 'CSV'}, + 'reservePorts': 'false', + 'tsGroups': [ + { + 'testCases': [{ + 'type': 'SGW_Node', + 'name': '', + 'linked': "false", + 'AssociatedPhys': '', + 'parameters': { + 'SgiPtpTunnelEn': 'false', + 'Gtp2Imsi': '505024101215074', + 'Sessions': '100000', + 'S5Protocol': 'GTPv2', + 'TrafficMtu': '1500', + 'Gtp2Version': '13.6.0', + 'BearerV4AddrPool': '1.0.0.1', + 'Gtp2Imei': '50502410121507', + 'PgwNodeEn': 'true', + 'DedicatedsPerDefaultBearer': '0', + 'DefaultBearers': '1', + 'SgwUserAddr': { + 'numLinksOrNodes': 1, + 'phy': 'eth1', + 'forcedEthInterface': '', + 'ip': 'SGW_USER_IP', + 'class': 'TestNode', + 'ethStatsEnabled': "false", + 'mtu': 1500 + }, + 'SgwControlAddr': { + 'numLinksOrNodes': 1, + 'phy': 'eth1', + 'forcedEthInterface': '', + 'ip': 'SGW_CONTROL_IP', + 'class': 'TestNode', + 'ethStatsEnabled': "false", + 'mtu': 1500, + 'nextHop': 'SGW_CONTROL_NEXT_HOP' + }, + 'BearerAddrPool': '2001::1', + 'TestType': 'SGW-NODE' + } + }], + 'tsId': TEST_SERVERS[0]['name']}, + { + 'testCases': [{ + 'type': 'SGW_Nodal', + 'name': '', + 'parameters': { + 'DataTraffic': 'Continuous', + 'TrafficStartType': 'When All Sessions Established', + 'NetworkHost': 'Local', + 'Gtp2Imsi': '505024101215074', + 'Dmf': { + 'mainflows': [ + { + 'name': 'Basic UDP', + 'library': 'test' + } + ], + 'class': 'Dmf', + 'instanceGroups': [ + { + 'startPaused': "false", + 'rate': 0, + 'mainflowIdx': 0, + 'mixType': '' + } + ] + }, + 'S5Protocol': 'GTPv2', + 'DataUserCfgFileEn': 'false', + 'PgwUserSutEn': 'false', + 'MmeControlAddr': { + 'numLinksOrNodes': 1, + 'phy': 'eth1', + 'forcedEthInterface': '', + 'ip': 'MME_CONTROL_IP', + 'class': 'TestNode', + 'ethStatsEnabled': "false", + 'mtu': 1500 + }, + 'SgwUserSut': { + 'class': 'Sut', + 'name': 'SGW_USER_NAME' + }, + 'TestActivity': 'Capacity Test', + 'NetworkHostAddrLocal': { + 'numLinksOrNodes': 1, + 'phy': 'eth2', + 'forcedEthInterface': '', + 'ip': 'NET_HOST_IP', + 'class': 'TestNode', + 'ethStatsEnabled': "false", + 'mtu': 1500 + }, + 'DedicatedsPerDefaultBearer': '0', + 'DisconnectRate': '1000.0', + 'Sessions': '100000', + 'SgwSut': { + 'class': 'Sut', + 'name': 'SGW_CONTROL_NAME' + }, + 'TrafficMtu': '1500', + 'Gtp2Version': '13.6.0', + 'Gtp2Imei': '50502410121507', + 'PgwNodeEn': 'false', + 'StartRate': '1000.0', + 'PgwV4Sut': { + 'class': 'Sut', + 'name': 'PGW_SUT_NAME' + }, + 'DefaultBearers': '1', + 'EnbUserAddr': { + 'numLinksOrNodes': 1, + 'phy': 'eth1', + 'forcedEthInterface': '', + 'ip': 'ENB_USER_IP', + 'class': 'TestNode', + 'ethStatsEnabled': "false", + 'mtu': 1500 + }, + 'TestType': 'SGW-NODAL' + } + }], + 'tsId': TEST_SERVERS[1]['name'] + } + ] +} + + +class TestLandslideTrafficGen(unittest.TestCase): + SCENARIO_CFG = { + 'session_profile': '/traffic_profiles/landslide/' + 'landslide_session_default_bearer.yaml', + 'task_path': '', + 'runner': { + 'type': 'Iteration', + 'iterations': 1 + }, + 'nodes': { + 'tg__0': 'tg__0.traffic_gen', + 'vnf__0': 'vnf__0.vnf_epc' + }, + 'topology': 'landslide_tg_topology.yaml', + 'type': 'NSPerf', + 'traffic_profile': '../../traffic_profiles/landslide/' + 'landslide_dmf_udp.yaml', + 'options': { + 'traffic_duration': 71, + 'test_cases': [ + { + 'BearerAddrPool': '2002::2', + 'type': 'SGW_Node', + 'BearerV4AddrPool': '2.0.0.2', + 'Sessions': '90000' + }, + { + 'StartRate': '900.0', + 'type': 'SGW_Nodal', + 'DisconnectRate': '900.0', + 'Sessions': '90000' + } + ], + 'dmf': + { + 'transactionRate': 1000, + 'packetSize': 512 + } + } + } + + CONTEXT_CFG = { + 'contexts': [ + { + 'type': 'Node', + 'name': 'traffic_gen', + 'file': '/etc/yardstick/nodes/pod_landslide.yaml' + }, + { + 'type': 'Node', + 'name': 'vnf_epc', + 'file': '/etc/yardstick/nodes/pod_vepc_sut.yaml' + } + ] + } + + TRAFFIC_PROFILE = { + "schema": "nsb:traffic_profile:0.1", + "name": "LandslideProfile", + "description": "Spirent Landslide traffic profile", + "traffic_profile": { + "traffic_type": "LandslideProfile" + }, + "dmf_config": { + "dmf": { + "library": "test", + "name": "Basic UDP" + }, + "description": "Basic data flow using UDP/IP", + "keywords": "UDP", + "dataProtocol": "udp" + } + } + + SUCCESS_CREATED_CODE = 201 + SUCCESS_OK_CODE = 200 + SUCCESS_RECORD_ID = 5 + TEST_USER_ID = 11 + + def setUp(self): + self.mock_lsapi = mock.patch.object(tg_landslide, 'LsApi') + self.mock_lsapi.start() + + self.mock_ssh_helper = mock.patch.object(sample_vnf, 'VnfSshHelper') + self.mock_ssh_helper.start() + self.vnfd = VNFD['vnfd:vnfd-catalog']['vnfd'][0] + self.ls_tg = tg_landslide.LandslideTrafficGen( + NAME, self.vnfd) + self.session_profile = copy.deepcopy(SESSION_PROFILE) + self.ls_tg.session_profile = self.session_profile + + self.addCleanup(self._cleanup) + + def _cleanup(self): + self.mock_lsapi.stop() + self.mock_ssh_helper.stop() + + @mock.patch.object(net_serv_utils, 'get_nsb_option') + def test___init__(self, mock_get_nsb_option, *args): + _path_to_nsb = 'path/to/nsb' + mock_get_nsb_option.return_value = _path_to_nsb + ls_tg = tg_landslide.LandslideTrafficGen(NAME, self.vnfd) + self.assertIsInstance(ls_tg.resource_helper, + tg_landslide.LandslideResourceHelper) + mock_get_nsb_option.assert_called_once_with('bin_path') + self.assertEqual(_path_to_nsb, ls_tg.bin_path) + self.assertEqual(NAME, ls_tg.name) + self.assertTrue(ls_tg.runs_traffic) + self.assertFalse(ls_tg.traffic_finished) + self.assertIsNone(ls_tg.session_profile) + + def test_listen_traffic(self): + _traffic_profile = {} + self.assertIsNone(self.ls_tg.listen_traffic(_traffic_profile)) + + def test_terminate(self, *args): + self.ls_tg.resource_helper._tcl = mock.Mock() + self.assertIsNone(self.ls_tg.terminate()) + self.ls_tg.resource_helper._tcl.disconnect.assert_called_once() + + @mock.patch.object(ctx_base.Context, 'get_context_from_server', + return_value='fake_context') + def test_instantiate(self, *args): + self.ls_tg._tg_process = mock.Mock() + self.ls_tg._tg_process.start = mock.Mock() + self.ls_tg.resource_helper.connect = mock.Mock() + self.ls_tg.resource_helper.create_test_servers = mock.Mock() + self.ls_tg.resource_helper.create_suts = mock.Mock() + self.ls_tg._load_session_profile = mock.Mock() + self.assertIsNone(self.ls_tg.instantiate(self.SCENARIO_CFG, + self.CONTEXT_CFG)) + self.ls_tg.resource_helper.connect.assert_called_once() + self.ls_tg.resource_helper.create_test_servers.assert_called_once() + _suts_blocks_num = len([item['suts'] for item in self.vnfd['config']]) + self.assertEqual(_suts_blocks_num, + self.ls_tg.resource_helper.create_suts.call_count) + self.ls_tg._load_session_profile.assert_called_once() + + @mock.patch.object(tg_landslide.LandslideResourceHelper, + 'get_running_tests') + def test_run_traffic(self, mock_get_tests, *args): + self.ls_tg.resource_helper._url = EXAMPLE_URL + self.ls_tg.scenario_helper.scenario_cfg = self.SCENARIO_CFG + mock_traffic_profile = mock.Mock( + spec=landslide_profile.LandslideProfile) + mock_traffic_profile.dmf_config = { + 'keywords': 'UDP', + 'dataProtocol': 'udp', + 'dmf': {'library': 'test', 'name': 'name'}} + mock_traffic_profile.params = self.TRAFFIC_PROFILE + self.ls_tg.resource_helper._user_id = self.TEST_USER_ID + mock_get_tests.return_value = [{'id': self.SUCCESS_RECORD_ID, + 'testStateOrStep': 'COMPLETE'}] + mock_post = mock.Mock() + mock_post.status_code = self.SUCCESS_CREATED_CODE + mock_post.json.return_value = {'id': self.SUCCESS_RECORD_ID} + mock_session = mock.Mock(spec=requests.Session) + mock_session.post.return_value = mock_post + self.ls_tg.resource_helper.session = mock_session + self.ls_tg.resource_helper._tcl = mock.Mock() + _tcl = self.ls_tg.resource_helper._tcl + self.assertIsNone(self.ls_tg.run_traffic(mock_traffic_profile)) + self.assertEqual(self.SUCCESS_RECORD_ID, + self.ls_tg.resource_helper.run_id) + mock_traffic_profile.update_dmf.assert_called_with( + self.ls_tg.scenario_helper.all_options) + _tcl.create_dmf.assert_called_with(mock_traffic_profile.dmf_config) + _tcl.create_test_session.assert_called_with(self.session_profile) + + @mock.patch.object(tg_landslide.LandslideResourceHelper, + 'check_running_test_state') + def test_collect_kpi(self, mock_check_running_test_state, *args): + self.ls_tg.resource_helper.run_id = self.SUCCESS_RECORD_ID + mock_check_running_test_state.return_value = 'COMPLETE' + self.assertEqual({'done': True}, self.ls_tg.collect_kpi()) + mock_check_running_test_state.assert_called_once() + + def test_wait_for_instantiate(self): + self.assertIsNone(self.ls_tg.wait_for_instantiate()) + self.ls_tg.wait_for_instantiate() + + def test__update_session_suts_no_tc_role(self, *args): + _suts = [{'role': 'epc_role'}] + _testcase = {'parameters': {'diff_epc_role': {'class': 'Sut'}}} + res = self.ls_tg._update_session_suts(_suts, _testcase) + self.assertEqual(_testcase, res) + + def test__update_session_suts(self, *args): + + def get_testnode_param(role, key, session_prof): + """ Get value by key from the deep nested dict to avoid calls like: + e.g. session_prof['tsGroups'][0]['testCases'][1]['parameters'][key] + """ + for group in session_prof['tsGroups']: + for tc in group['testCases']: + tc_params = tc['parameters'] + if tc_params.get(role): + return tc_params[role][key] + + def get_sut_param(role, key, suts): + """ Search list of dicts for one with specific role. + Return the value of related dict by key. Expect key presence. + """ + for sut in suts: + if sut.get('role') == role: + return sut[key] + + # TestNode to verify + testnode_role = 'SgwControlAddr' + # SUT to verify + sut_role = 'SgwUserSut' + + config_suts = [config['suts'] for config in self.vnfd['config']] + session_tcs = [_tc for _ts_group in self.ls_tg.session_profile['tsGroups'] + for _tc in _ts_group['testCases']] + for suts, tc in zip(config_suts, session_tcs): + self.assertEqual(tc, self.ls_tg._update_session_suts(suts, tc)) + + # Verify TestNode class objects keys were updated + for _key in {'ip', 'phy', 'nextHop'}: + self.assertEqual( + get_testnode_param(testnode_role, _key, self.ls_tg.session_profile), + get_sut_param(testnode_role, _key, TS1_SUTS)) + # Verify Sut class objects name was updated + self.assertEqual( + get_testnode_param(sut_role, 'name', self.ls_tg.session_profile), + get_sut_param(sut_role, 'name', TS2_SUTS)) + + def test__update_session_test_servers(self, *args): + for ts_index, ts in enumerate(TEST_SERVERS): + self.assertIsNone( + self.ls_tg._update_session_test_servers(ts, ts_index)) + # Verify preResolvedArpAddress key was added + self.assertTrue(any( + _item.get('preResolvedArpAddress') + for _item in self.ls_tg.session_profile['tsGroups'])) + # Verify reservations key was added to session profile + self.assertEqual(RESERVATIONS, + self.ls_tg.session_profile.get('reservations')) + self.assertEqual('true', + self.ls_tg.session_profile.get('reservePorts')) + + def test__update_session_tc_params_assoc_phys(self): + _tc_options = {'AssociatedPhys': 'eth1'} + _testcase = {} + _testcase_orig = copy.deepcopy(_testcase) + res = self.ls_tg._update_session_tc_params(_tc_options, _testcase) + self.assertNotEqual(_testcase_orig, res) + self.assertEqual(_tc_options, _testcase) + + def test__update_session_tc_params(self, *args): + + def get_session_tc_param_value(param, tc_type, session_prof): + """ Get param value from the deep nested dict to avoid calls like: + session_prof['tsGroups'][0]['testCases'][0]['parameters'][key] + """ + for test_group in session_prof['tsGroups']: + session_tc = test_group['testCases'][0] + if session_tc['type'] == tc_type: + return session_tc['parameters'].get(param) + + session_tcs = [_tc for _ts_group in self.ls_tg.session_profile['tsGroups'] + for _tc in _ts_group['testCases']] + scenario_tcs = [_tc for _tc in + self.SCENARIO_CFG['options']['test_cases']] + for tc_options, tc in zip(scenario_tcs, session_tcs): + self.assertEqual( + tc, + self.ls_tg._update_session_tc_params(tc_options, tc)) + + # Verify that each test case parameter was updated + # Params been compared are deeply nested. Using loops to ease access. + for _tc in self.SCENARIO_CFG['options']['test_cases']: + for _key, _val in _tc.items(): + if _key != 'type': + self.assertEqual( + _val, + get_session_tc_param_value(_key, _tc.get('type'), + self.ls_tg.session_profile)) + + def test__update_session_library_name(self, *args): + _session = copy.deepcopy(SESSION_PROFILE) + _session['tsGroups'].pop(0) + self.ls_tg.vnfd_helper = mock.MagicMock() + self.ls_tg.vnfd_helper.mgmt_interface.__getitem__.side_effect = { + 'user': TAS_INFO['user']} + self.ls_tg._update_session_library_name(_session) + _dmf = _session['tsGroups'][0]['testCases'][0]['parameters']['Dmf'] + # Expect DMF library name updated in Nodal test types + self.assertEqual(TAS_INFO['user'], _dmf['mainflows'][0]['library']) + + def test__update_session_library_name_wrong_tc_type(self, *args): + _session = copy.deepcopy(SESSION_PROFILE) + _session['tsGroups'].pop(1) + self.ls_tg.vnfd_helper = mock.MagicMock() + self.ls_tg.vnfd_helper.mgmt_interface.__getitem__.side_effect = { + 'user': TAS_INFO['user']} + # Expect DMF library name not updated in Node test types + self.assertNotIn('Dmf', + _session['tsGroups'][0]['testCases'][0]['parameters']) + self.ls_tg._update_session_library_name(_session) + + @mock.patch.object(common_utils, 'open_relative_file') + @mock.patch.object(yaml_loader, 'yaml_load') + @mock.patch.object(tg_landslide.LandslideTrafficGen, + '_update_session_test_servers') + @mock.patch.object(tg_landslide.LandslideTrafficGen, + '_update_session_suts') + @mock.patch.object(tg_landslide.LandslideTrafficGen, + '_update_session_tc_params') + def test__load_session_profile(self, mock_upd_ses_tc_params, + mock_upd_ses_suts, mock_upd_ses_ts, + mock_yaml_load, *args): + self.ls_tg.scenario_helper.scenario_cfg = \ + copy.deepcopy(self.SCENARIO_CFG) + mock_yaml_load.return_value = copy.deepcopy(SESSION_PROFILE) + self.assertIsNone(self.ls_tg._load_session_profile()) + self.assertIsNotNone(self.ls_tg.session_profile) + # Number of blocks in configuration files + # Number of test servers, suts and tc params blocks should be equal + _config_files_blocks_num = len([item['test_server'] + for item in self.vnfd['config']]) + self.assertEqual(_config_files_blocks_num, + mock_upd_ses_ts.call_count) + self.assertEqual(_config_files_blocks_num, + mock_upd_ses_suts.call_count) + self.assertEqual(_config_files_blocks_num, + mock_upd_ses_tc_params.call_count) + + @mock.patch.object(common_utils, 'open_relative_file') + @mock.patch.object(yaml_loader, 'yaml_load') + def test__load_session_profile_unequal_num_of_cfg_blocks( + self, mock_yaml_load, *args): + vnfd = copy.deepcopy(VNFD['vnfd:vnfd-catalog']['vnfd'][0]) + ls_traffic_gen = tg_landslide.LandslideTrafficGen(NAME, vnfd) + ls_traffic_gen.scenario_helper.scenario_cfg = self.SCENARIO_CFG + mock_yaml_load.return_value = copy.deepcopy(SESSION_PROFILE) + # Delete test_servers item from pod file to make it not valid + ls_traffic_gen.vnfd_helper['config'].pop() + with self.assertRaises(RuntimeError): + ls_traffic_gen._load_session_profile() + + @mock.patch.object(common_utils, 'open_relative_file') + @mock.patch.object(yaml_loader, 'yaml_load') + def test__load_session_profile_test_type_mismatch(self, mock_yaml_load, + *args): + vnfd = copy.deepcopy(VNFD['vnfd:vnfd-catalog']['vnfd'][0]) + # Swap test servers data in pod file + vnfd['config'] = list(reversed(vnfd['config'])) + ls_tg = tg_landslide.LandslideTrafficGen(NAME, vnfd) + ls_tg.scenario_helper.scenario_cfg = self.SCENARIO_CFG + mock_yaml_load.return_value = SESSION_PROFILE + with self.assertRaises(RuntimeError): + ls_tg._load_session_profile() + + +class TestLandslideResourceHelper(unittest.TestCase): + + PROTO_PORT = 8080 + EXAMPLE_URL = ''.join([TAS_INFO['proto'], '://', TAS_INFO['ip'], ':', + str(PROTO_PORT), '/api/']) + SUCCESS_CREATED_CODE = 201 + SUCCESS_OK_CODE = 200 + INVALID_REST_CODE = '400' + NOT_MODIFIED_CODE = 500810 + ERROR_CODE = 500800 + SUCCESS_RECORD_ID = 11 + EXPIRE_DATE = '2020/01/01 12:00 FLE Standard Time' + TEST_USER = 'test' + TEST_TERMINATED = 1 + AUTH_DATA = {'user': TAS_INFO['user'], 'password': TAS_INFO['password']} + TEST_SESSION_NAME = 'default_bearer_capacity' + + USERS_DATA = { + "users": [{ + "url": ''.join([EXAMPLE_URL, 'users/', str(SUCCESS_RECORD_ID)]), + "id": SUCCESS_RECORD_ID, + "level": 1, + "username": TEST_USER + }] + } + + CREATE_USER_DATA = {'username': TAS_INFO['user'], + 'expiresOn': EXPIRE_DATE, + 'level': 1, + 'contactInformation': '', + 'fullName': 'Test User', + 'password': TAS_INFO['password'], + 'isActive': 'true'} + + SUTS_DATA = { + "suts": [ + { + "url": ''.join([EXAMPLE_URL, 'suts/', str(SUCCESS_RECORD_ID)]), + "id": SUCCESS_RECORD_ID, + "name": "10.41.32.1" + }]} + + TEST_SERVERS_DATA = { + "testServers": [ + { + "url": ''.join([EXAMPLE_URL, "testServers/1"]), + "id": 1, + "name": TEST_SERVERS[0]['name'], + "state": "READY", + "version": "16.4.0.10" + }, + { + "url": ''.join([EXAMPLE_URL, "testServers/2"]), + "id": 2, + "name": TEST_SERVERS[1]['name'], + "state": "READY", + "version": "16.4.0.10" + } + + ] + } + + RUN_ID = 3 + + RUNNING_TESTS_DATA = { + "runningTests": [{ + "url": ''.join([EXAMPLE_URL, "runningTests/{}".format(RUN_ID)]), + "measurementsUrl": ''.join( + [EXAMPLE_URL, + "runningTests/{}/measurements".format(RUN_ID)]), + "criteriaUrl": ''.join( + [EXAMPLE_URL, + "runningTests/{}/criteria".format(RUN_ID)]), + "noteToUser": "", + "id": RUN_ID, + "library": SUCCESS_RECORD_ID, + "name": "default_bearer_capacity", + "user": TEST_USER, + "criteriaStatus": "NA", + "testStateOrStep": "COMPLETE" + }]} + + TEST_RESULTS_DATA = { + "interval": 0, + "elapsedTime": 138, + "actualTime": 1521548057296, + "iteration": 1, + "tabs": { + "Test Summary": { + "Start Time": "Tue Mar 20 07:11:55 CDT 2018", + "Actual Dedicated Bearer Session Connects": "100", + "Actual Dedicated Bearer Session Disconnects": "100", + "Actual Disconnect Rate(Sessions / Second)(P - I)": "164.804", + "Average Session Disconnect Time(P - I)": "5.024 s", + "Total Data Sent + Received Packets / Sec(P - I)": "1,452.294" + }}} + + def setUp(self): + self.mock_lsapi = mock.patch.object(tg_landslide, 'LsApi') + self.mock_lsapi.start() + + mock_env_helper = mock.Mock() + self.res_helper = tg_landslide.LandslideResourceHelper(mock_env_helper) + self.res_helper._url = EXAMPLE_URL + + self.addCleanup(self._cleanup) + + def _cleanup(self): + self.mock_lsapi.stop() + self.res_helper._url = None + + def test___init__(self, *args): + self.assertIsInstance(self.res_helper, + tg_landslide.LandslideResourceHelper) + self.assertEqual({}, self.res_helper._result) + self.assertIsNone(self.res_helper.run_id) + + @mock.patch.object(tg_landslide.LandslideResourceHelper, + 'stop_running_tests') + @mock.patch.object(tg_landslide.LandslideResourceHelper, + 'get_running_tests') + def test_abort_running_tests_no_running_tests(self, mock_get_tests, + mock_stop_tests, *args): + tests_data = [{'id': self.SUCCESS_RECORD_ID, + 'testStateOrStep': 'COMPLETE'}] + mock_get_tests.return_value = tests_data + self.assertIsNone(self.res_helper.abort_running_tests()) + mock_stop_tests.assert_not_called() + + @mock.patch.object(time, 'sleep') + @mock.patch.object(tg_landslide.LandslideResourceHelper, + 'stop_running_tests') + @mock.patch.object(tg_landslide.LandslideResourceHelper, + 'get_running_tests') + def test_abort_running_tests(self, mock_get_tests, mock_stop_tests, *args): + test_states_seq = iter(['RUNNING', 'COMPLETE']) + + def configure_mock(*args): + return [{'id': self.SUCCESS_RECORD_ID, + 'testStateOrStep': next(test_states_seq)}] + + mock_get_tests.side_effect = configure_mock + self.assertIsNone(self.res_helper.abort_running_tests()) + mock_stop_tests.assert_called_once_with( + running_test_id=self.SUCCESS_RECORD_ID, + force=True) + self.assertEqual(2, mock_get_tests.call_count) + + @mock.patch.object(tg_landslide.LandslideResourceHelper, + 'stop_running_tests') + @mock.patch.object(tg_landslide.LandslideResourceHelper, + 'get_running_tests') + def test_abort_running_tests_error(self, mock_get_tests, mock_stop_tests, + *args): + tests_data = {'id': self.SUCCESS_RECORD_ID, + 'testStateOrStep': 'RUNNING'} + mock_get_tests.return_value = [tests_data] + with self.assertRaises(RuntimeError): + self.res_helper.abort_running_tests(timeout=1, delay=1) + mock_stop_tests.assert_called_with( + running_test_id=self.SUCCESS_RECORD_ID, + force=True) + + def test__build_url(self, *args): + resource = 'users' + action = {'action': 'userCreate'} + expected_url = ''.join([EXAMPLE_URL, 'users?action=userCreate']) + self.assertEqual(expected_url, + self.res_helper._build_url(resource, action)) + + def test__build_url_error(self, *args): + resource = '' + action = {'action': 'userCreate'} + + with self.assertRaises(ValueError): + self.res_helper._build_url(resource, action) + + def test_get_response_params(self, *args): + method = 'get' + resource = 'users' + mock_session = mock.Mock(spec=requests.Session) + get_resp_data = {'status_code': self.SUCCESS_OK_CODE, + 'json.return_value': self.USERS_DATA} + mock_session.get.return_value.configure_mock(**get_resp_data) + self.res_helper.session = mock_session + resp = self.res_helper.get_response_params(method, resource) + self.assertTrue(resp) + + @mock.patch.object(tg_landslide.LandslideResourceHelper, '_get_users') + @mock.patch.object(time, 'time') + def test__create_user(self, mock_time, mock_get_users, *args): + mock_time.strftime.return_value = self.EXPIRE_DATE + post_resp_data = {'status_code': self.SUCCESS_CREATED_CODE, + 'json.return_value': {'id': self.SUCCESS_RECORD_ID}} + mock_session = mock.Mock(spec=requests.Session) + mock_session.post.return_value.configure_mock(**post_resp_data) + self.res_helper.session = mock_session + self.assertEqual(self.SUCCESS_RECORD_ID, + self.res_helper._create_user(self.AUTH_DATA)) + mock_get_users.assert_not_called() + + @mock.patch.object(tg_landslide.LandslideResourceHelper, '_modify_user') + @mock.patch.object(time, 'time') + def test__create_user_username_exists(self, mock_time, mock_modify_user, + *args): + mock_time.strftime.return_value = self.EXPIRE_DATE + mock_modify_user.return_value = {'id': self.SUCCESS_RECORD_ID, + 'result': 'No changes requested'} + post_resp_data = { + 'status_code': self.ERROR_CODE, + 'json.return_value': {'id': self.SUCCESS_OK_CODE, + 'apiCode': self.NOT_MODIFIED_CODE}} + mock_session = mock.Mock(spec=requests.Session) + mock_session.post.return_value.configure_mock(**post_resp_data) + self.res_helper.session = mock_session + res = self.res_helper._create_user(self.AUTH_DATA) + mock_modify_user.assert_called_once_with(TAS_INFO['user'], + {'isActive': 'true'}) + self.assertEqual(self.SUCCESS_RECORD_ID, res) + + @mock.patch.object(time, 'time') + def test__create_user_error(self, mock_time, *args): + mock_time.strftime.return_value = self.EXPIRE_DATE + mock_session = mock.Mock(spec=requests.Session) + post_resp_data = {'status_code': self.SUCCESS_OK_CODE, + 'json.return_value': {'apiCode': self.ERROR_CODE}} + mock_session.post.return_value.configure_mock(**post_resp_data) + self.res_helper.session = mock_session + with self.assertRaises(exceptions.RestApiError): + self.res_helper._create_user(self.AUTH_DATA) + + def test__modify_user(self, *args): + post_data = {'username': 'test_user'} + mock_session = mock.Mock(spec=requests.Session) + post_resp_data = {'status_code': self.SUCCESS_OK_CODE, + 'json.return_value': {'id': self.SUCCESS_RECORD_ID}} + mock_session.post.return_value.configure_mock(**post_resp_data) + self.res_helper.session = mock_session + res = self.res_helper._modify_user(username=self.TEST_USER, + fields=post_data) + self.assertEqual(self.SUCCESS_RECORD_ID, res['id']) + + def test__modify_user_rest_resp_fail(self, *args): + post_data = {'non-existing-key': ''} + mock_session = mock.Mock(spec=requests.Session) + mock_session.post.ok = False + self.res_helper.session = mock_session + self.assertRaises(exceptions.RestApiError, + self.res_helper._modify_user, + username=self.TEST_USER, fields=post_data) + mock_session.post.assert_called_once() + + def test__delete_user(self, *args): + mock_session = mock.Mock(spec=requests.Session) + self.res_helper.session = mock_session + self.assertIsNone(self.res_helper._delete_user( + username=self.TEST_USER)) + + def test__get_users(self, *args): + mock_session = mock.Mock(spec=requests.Session) + get_resp_data = {'status_code': self.SUCCESS_OK_CODE, + 'json.return_value': self.USERS_DATA} + mock_session.get.return_value.configure_mock(**get_resp_data) + self.res_helper.session = mock_session + self.assertEqual(self.USERS_DATA['users'], + self.res_helper._get_users()) + + def test_exec_rest_request(self, *args): + resource = 'testServers' + action = {'action': 'modify'} + expected_url = ''.join([EXAMPLE_URL, 'testServers?action=modify']) + post_resp_data = {'status_code': self.SUCCESS_CREATED_CODE, + 'json.return_value': {'id': self.SUCCESS_RECORD_ID}} + mock_session = mock.Mock(spec=requests.Session) + mock_session.post.return_value.configure_mock(**post_resp_data) + self.res_helper.session = mock_session + self.res_helper.exec_rest_request('post', resource, action) + self.res_helper.session.post.assert_called_once_with(expected_url, + json={}) + + def test_exec_rest_request_unsupported_method_error(self, *args): + resource = 'testServers' + action = {'action': 'modify'} + with self.assertRaises(ValueError): + self.res_helper.exec_rest_request('patch', resource, action) + + def test_exec_rest_request_missed_action_arg(self, *args): + resource = 'testServers' + with self.assertRaises(ValueError): + self.res_helper.exec_rest_request('post', resource) + + def test_exec_rest_request_raise_exc(self): + resource = 'users' + action = {'action': 'modify'} + post_resp_data = {'status_code': self.ERROR_CODE, + 'json.return_value': { + 'status_code': self.ERROR_CODE}} + mock_session = mock.Mock(spec=requests.Session) + mock_session.post.return_value.configure_mock(**post_resp_data) + self.assertRaises(exceptions.RestApiError, + self.res_helper.exec_rest_request, + 'post', resource, action, raise_exc=True) + + @mock.patch.object(time, 'time') + def test_connect(self, mock_time, *args): + vnfd = VNFD['vnfd:vnfd-catalog']['vnfd'][0] + mock_time.strftime.return_value = self.EXPIRE_DATE + self.res_helper.vnfd_helper = vnfd + + self.res_helper._tcl = mock.Mock() + post_resp_data = {'status_code': self.SUCCESS_CREATED_CODE, + 'json.return_value': {'id': self.SUCCESS_RECORD_ID}} + mock_session = mock.Mock(spec=requests.Session, headers={}) + mock_session.post.return_value.configure_mock(**post_resp_data) + self.res_helper.session = mock_session + self.assertIsInstance(self.res_helper.connect(), requests.Session) + self.res_helper._tcl.connect.assert_called_once_with( + TAS_INFO['ip'], + TAS_INFO['user'], + TAS_INFO['password']) + + def test_disconnect(self, *args): + self.res_helper._tcl = mock.Mock() + self.assertIsNone(self.res_helper.disconnect()) + self.assertIsNone(self.res_helper.session) + self.res_helper._tcl.disconnect.assert_called_once() + + def test_terminate(self, *args): + self.assertIsNone(self.res_helper.terminate()) + self.assertEqual(self.TEST_TERMINATED, + self.res_helper._terminated.value) + + def test_create_dmf(self, *args): + self.res_helper._tcl = mock.Mock() + self.res_helper.vnfd_helper = mock.Mock(spec=vnf_base.VnfdHelper) + self.res_helper.vnfd_helper.mgmt_interface = {'user': TAS_INFO['user']} + self.assertIsNone(self.res_helper.create_dmf(DMF_CFG)) + self.res_helper._tcl.create_dmf.assert_called_once_with(DMF_CFG) + + def test_create_dmf_as_list(self, *args): + self.res_helper._tcl = mock.Mock() + self.res_helper.vnfd_helper = mock.Mock(spec=vnf_base.VnfdHelper) + self.res_helper.vnfd_helper.mgmt_interface = {'user': TAS_INFO['user']} + self.assertIsNone(self.res_helper.create_dmf([DMF_CFG])) + self.res_helper._tcl.create_dmf.assert_called_once_with(DMF_CFG) + + def test_delete_dmf(self, *args): + self.res_helper._tcl = mock.Mock() + self.assertIsNone(self.res_helper.delete_dmf(DMF_CFG)) + self.res_helper._tcl.delete_dmf.assert_called_once_with(DMF_CFG) + + def test_delete_dmf_as_list(self, *args): + self.res_helper._tcl = mock.Mock() + self.assertIsNone(self.res_helper.delete_dmf([DMF_CFG])) + self.res_helper._tcl.delete_dmf.assert_called_once_with(DMF_CFG) + + @mock.patch.object(tg_landslide.LandslideResourceHelper, 'configure_sut') + def test_create_suts(self, mock_configure_sut, *args): + mock_session = mock.Mock(spec=requests.Session) + post_resp_data = {'status_code': self.SUCCESS_CREATED_CODE} + mock_session.post.return_value.configure_mock(**post_resp_data) + self.res_helper.session = mock_session + self.assertIsNone(self.res_helper.create_suts(TS1_SUTS)) + mock_configure_sut.assert_not_called() + + @mock.patch.object(tg_landslide.LandslideResourceHelper, 'configure_sut') + def test_create_suts_sut_exists(self, mock_configure_sut, *args): + sut_name = 'test_sut' + suts = [ + {'name': sut_name, + 'role': 'SgwControlAddr', + 'managementIp': '12.0.1.1', + 'ip': '10.42.32.100' + } + ] + mock_session = mock.Mock(spec=requests.Session) + post_resp_data = {'status_code': self.NOT_MODIFIED_CODE} + mock_session.post.return_value.configure_mock(**post_resp_data) + self.res_helper.session = mock_session + self.assertIsNone(self.res_helper.create_suts(suts)) + mock_configure_sut.assert_called_once_with( + sut_name=sut_name, + json_data={k: v for k, v in suts[0].items() + if k not in {'phy', 'nextHop', 'role', 'name'}}) + + def test_get_suts(self, *args): + mock_session = mock.Mock(spec=requests.Session) + get_resp_data = {'status_code': self.SUCCESS_OK_CODE, + 'json.return_value': self.SUTS_DATA} + mock_session.get.return_value.configure_mock(**get_resp_data) + self.res_helper.session = mock_session + self.assertIsInstance(self.res_helper.get_suts(), list) + + def test_get_suts_single_id(self, *args): + mock_session = mock.Mock(spec=requests.Session) + get_resp_data = {'status_code': self.SUCCESS_OK_CODE, + 'json.return_value': self.SUTS_DATA['suts'][0]} + mock_session.get.return_value.configure_mock(**get_resp_data) + self.res_helper.session = mock_session + self.assertIsInstance(self.res_helper.get_suts(suts_id=2), dict) + + def test_configure_sut(self, *args): + post_data = {'managementIp': '2.2.2.2'} + mock_session = mock.Mock(spec=requests.Session) + post_resp_data = {'status_code': self.SUCCESS_OK_CODE, + 'json.return_value': {'id': self.SUCCESS_RECORD_ID}} + mock_session.post.return_value.configure_mock(**post_resp_data) + self.res_helper.session = mock_session + self.assertIsNone(self.res_helper.configure_sut('test_name', + post_data)) + mock_session.post.assert_called_once() + + def test_configure_sut_error(self, *args): + post_data = {'managementIp': '2.2.2.2'} + mock_session = mock.Mock(spec=requests.Session) + post_resp_data = {'status_code': self.NOT_MODIFIED_CODE} + mock_session.post.return_value.configure_mock(**post_resp_data) + self.res_helper.session = mock_session + with self.assertRaises(exceptions.RestApiError): + self.res_helper.configure_sut('test_name', post_data) + + def test_delete_suts(self, *args): + mock_session = mock.Mock(spec=requests.Session) + get_resp_data = {'status_code': self.SUCCESS_OK_CODE, + 'json.return_value': self.SUTS_DATA} + delete_resp_data = {'status_code': self.SUCCESS_OK_CODE} + mock_session.get.return_value.configure_mock(**get_resp_data) + mock_session.delete.return_value.configure_mock(**delete_resp_data) + self.res_helper.session = mock_session + self.assertIsNone(self.res_helper.delete_suts()) + mock_session.delete.assert_called_once() + + @mock.patch.object(tg_landslide.LandslideResourceHelper, + 'get_test_servers') + def test__check_test_servers_state(self, mock_get_test_servers, *args): + mock_get_test_servers.return_value = \ + self.TEST_SERVERS_DATA['testServers'] + self.res_helper._check_test_servers_state() + mock_get_test_servers.assert_called_once() + + @mock.patch.object(tg_landslide.LandslideResourceHelper, + 'get_test_servers') + def test__check_test_servers_state_server_not_ready( + self, mock_get_test_servers, *args): + test_servers_not_ready = [ + { + "url": ''.join([EXAMPLE_URL, "testServers/1"]), + "id": 1, + "name": "TestServer_1", + "state": "NOT_READY", + "version": "16.4.0.10" + } + ] + + mock_get_test_servers.return_value = test_servers_not_ready + with self.assertRaises(RuntimeError): + self.res_helper._check_test_servers_state(timeout=1, delay=0) + + @mock.patch.object(tg_landslide.LandslideResourceHelper, + '_check_test_servers_state') + def test_create_test_servers(self, mock_check_ts_state, *args): + test_servers_ids = [ + ts['id'] for ts in self.TEST_SERVERS_DATA['testServers']] + + self.res_helper.license_data['lic_id'] = TAS_INFO['license'] + self.res_helper._tcl.create_test_server = mock.Mock() + self.res_helper._tcl.create_test_server.side_effect = test_servers_ids + self.assertIsNone(self.res_helper.create_test_servers(TEST_SERVERS)) + mock_check_ts_state.assert_called_once_with(test_servers_ids) + + @mock.patch.object(tg_landslide.LandslideTclClient, + 'resolve_test_server_name') + @mock.patch.object(tg_landslide.LsTclHandler, 'execute') + def test_create_test_servers_error(self, mock_execute, + mock_resolve_ts_name, *args): + self.res_helper.license_data['lic_id'] = TAS_INFO['license'] + # Return message for case test server wasn't created + mock_execute.return_value = 'TS not found' + # Return message for case test server name wasn't resolved + mock_resolve_ts_name.return_value = 'TS not found' + with self.assertRaises(RuntimeError): + self.res_helper.create_test_servers(TEST_SERVERS) + + def test_get_test_servers(self, *args): + mock_session = mock.Mock(spec=requests.Session) + get_resp_data = {'status_code': self.SUCCESS_OK_CODE, + 'json.return_value': self.TEST_SERVERS_DATA} + mock_session.get.return_value.configure_mock(**get_resp_data) + self.res_helper.session = mock_session + res = self.res_helper.get_test_servers() + self.assertEqual(self.TEST_SERVERS_DATA['testServers'], res) + + def test_get_test_servers_by_id(self, *args): + mock_session = mock.Mock(spec=requests.Session) + + _ts = self.TEST_SERVERS_DATA['testServers'][0] + get_resp_data = {'status_code': self.SUCCESS_OK_CODE, + 'json.return_value': _ts} + mock_session.get.return_value.configure_mock(**get_resp_data) + self.res_helper.session = mock_session + res = self.res_helper.get_test_servers(test_server_ids=[_ts['id']]) + self.assertEqual([_ts], res) + + def test_configure_test_servers(self, *args): + mock_session = mock.Mock(spec=requests.Session) + get_resp_data = {'status_code': self.SUCCESS_OK_CODE, + 'json.return_value': self.TEST_SERVERS_DATA} + mock_session.get.return_value.configure_mock(**get_resp_data) + self.res_helper.session = mock_session + res = self.res_helper.configure_test_servers( + action={'action': 'recycle'}) + self.assertEqual( + [x['id'] for x in self.TEST_SERVERS_DATA['testServers']], + res) + self.assertEqual(len(self.TEST_SERVERS_DATA['testServers']), + mock_session.post.call_count) + + def test_delete_test_servers(self, *args): + mock_session = mock.Mock(spec=requests.Session) + get_resp_data = {'status_code': self.SUCCESS_OK_CODE, + 'json.return_value': self.TEST_SERVERS_DATA} + mock_session.get.return_value.configure_mock(**get_resp_data) + self.res_helper.session = mock_session + self.assertIsNone(self.res_helper.delete_test_servers()) + self.assertEqual(len(self.TEST_SERVERS_DATA['testServers']), + mock_session.delete.call_count) + + def test_create_test_session_res_helper(self, *args): + self.res_helper._user_id = self.SUCCESS_RECORD_ID + self.res_helper._tcl = mock.Mock() + self.res_helper.scenario_helper.all_options = {'traffic_duration': 71} + _session = {'name': 'test', 'duration': 60} + self.assertIsNone(self.res_helper.create_test_session(_session)) + self.res_helper._tcl.create_test_session.assert_called_once_with( + {'name': _session['name'], + 'duration': 71, + 'library': self.SUCCESS_RECORD_ID}) + + def test_create_test_session_res_helper_no_traffic_duration(self, *args): + self.res_helper._user_id = self.SUCCESS_RECORD_ID + self.res_helper._tcl = mock.Mock() + self.res_helper.scenario_helper.all_options = {} + _session = {'name': 'test', 'duration': 60} + self.assertIsNone(self.res_helper.create_test_session(_session)) + self.res_helper._tcl.create_test_session.assert_called_once_with( + {'name': _session['name'], + 'duration': 60, + 'library': self.SUCCESS_RECORD_ID}) + + @mock.patch.object(tg_landslide.LandslideTclClient, + 'resolve_test_server_name', + return_value='Not Found') + def test_create_test_session_ts_name_not_found(self, *args): + self.res_helper._user_id = self.SUCCESS_RECORD_ID + test_session = { + 'duration': 60, + 'description': 'UE default bearer creation test case', + 'name': 'default_bearer_capacity', + 'tsGroups': [{'testCases': [{'type': 'SGW_Node', + 'name': ''}], + 'tsId': 'TestServer_3'}] + } + with self.assertRaises(RuntimeError): + self.res_helper.create_test_session(test_session) + + def test_get_test_session(self, *args): + test_session = {"name": self.TEST_SESSION_NAME} + self.res_helper._user_id = self.SUCCESS_RECORD_ID + mock_session = mock.Mock(spec=requests.Session) + get_resp_data = {'status_code': self.SUCCESS_OK_CODE, + 'json.return_value': test_session} + mock_session.get.return_value.configure_mock(**get_resp_data) + self.res_helper.session = mock_session + res = self.res_helper.get_test_session(self.TEST_SESSION_NAME) + self.assertEqual(test_session, res) + + def test_configure_test_session(self, *args): + test_session = {'name': self.TEST_SESSION_NAME} + self.res_helper._user_id = self.SUCCESS_RECORD_ID + self.res_helper.user_lib_uri = 'libraries/{{}}/{}'.format( + self.res_helper.test_session_uri) + mock_session = mock.Mock(spec=requests.Session) + self.res_helper.session = mock_session + res = self.res_helper.configure_test_session(self.TEST_SESSION_NAME, + test_session) + self.assertIsNotNone(res) + mock_session.post.assert_called_once() + + def test_delete_test_session(self, *args): + self.res_helper._user_id = self.SUCCESS_RECORD_ID + self.res_helper.user_lib_uri = 'libraries/{{}}/{}'.format( + self.res_helper.test_session_uri) + mock_session = mock.Mock(spec=requests.Session) + self.res_helper.session = mock_session + res = self.res_helper.delete_test_session(self.TEST_SESSION_NAME) + self.assertIsNotNone(res) + mock_session.delete.assert_called_once() + + def test_create_running_tests(self, *args): + self.res_helper._user_id = self.SUCCESS_RECORD_ID + test_session = {'id': self.SUCCESS_RECORD_ID} + mock_session = mock.Mock(spec=requests.Session) + post_resp_data = {'status_code': self.SUCCESS_CREATED_CODE, + 'json.return_value': test_session} + mock_session.post.return_value.configure_mock(**post_resp_data) + self.res_helper.session = mock_session + self.res_helper.create_running_tests(self.TEST_SESSION_NAME) + self.assertEqual(self.SUCCESS_RECORD_ID, self.res_helper.run_id) + + def test_create_running_tests_error(self, *args): + self.res_helper._user_id = self.SUCCESS_RECORD_ID + mock_session = mock.Mock(spec=requests.Session) + post_resp_data = {'status_code': self.NOT_MODIFIED_CODE} + mock_session.post.return_value.configure_mock(**post_resp_data) + self.res_helper.session = mock_session + with self.assertRaises(exceptions.RestApiError): + self.res_helper.create_running_tests(self.TEST_SESSION_NAME) + + def test_get_running_tests(self, *args): + mock_session = mock.Mock(spec=requests.Session) + get_resp_data = {'status_code': self.SUCCESS_OK_CODE, + 'json.return_value': self.RUNNING_TESTS_DATA} + mock_session.get.return_value.configure_mock(**get_resp_data) + self.res_helper.session = mock_session + res = self.res_helper.get_running_tests() + self.assertEqual(self.RUNNING_TESTS_DATA['runningTests'], res) + + def test_delete_running_tests(self, *args): + mock_session = mock.Mock(spec=requests.Session) + delete_resp_data = {'status_code': self.SUCCESS_OK_CODE, + 'json.return_value': self.RUNNING_TESTS_DATA} + mock_session.delete.return_value.configure_mock(**delete_resp_data) + self.res_helper.session = mock_session + self.assertIsNone(self.res_helper.delete_running_tests()) + + def test__running_tests_action(self, *args): + action = 'abort' + mock_session = mock.Mock(spec=requests.Session) + self.res_helper.session = mock_session + res = self.res_helper._running_tests_action(self.SUCCESS_RECORD_ID, + action) + self.assertIsNone(res) + + @mock.patch.object(tg_landslide.LandslideResourceHelper, + '_running_tests_action') + def test_stop_running_tests(self, mock_tests_action, *args): + res = self.res_helper.stop_running_tests(self.SUCCESS_RECORD_ID) + self.assertIsNone(res) + mock_tests_action.assert_called_once() + + def test_check_running_test_state(self, *args): + mock_session = mock.Mock(spec=requests.Session) + get_resp_data = { + 'status_code': self.SUCCESS_OK_CODE, + 'json.return_value': self.RUNNING_TESTS_DATA["runningTests"][0]} + mock_session.get.return_value.configure_mock(**get_resp_data) + self.res_helper.session = mock_session + res = self.res_helper.check_running_test_state(self.SUCCESS_RECORD_ID) + self.assertEqual( + self.RUNNING_TESTS_DATA["runningTests"][0]['testStateOrStep'], + res) + + def test_get_running_tests_results(self, *args): + mock_session = mock.Mock(spec=requests.Session) + get_resp_data = {'status_code': self.SUCCESS_OK_CODE, + 'json.return_value': self.TEST_RESULTS_DATA} + mock_session.get.return_value.configure_mock(**get_resp_data) + self.res_helper.session = mock_session + res = self.res_helper.get_running_tests_results( + self.SUCCESS_RECORD_ID) + self.assertEqual(self.TEST_RESULTS_DATA, res) + + def test__write_results(self, *args): + res = self.res_helper._write_results(self.TEST_RESULTS_DATA) + exp_res = { + "Test Summary::Actual Dedicated Bearer Session Connects": 100.0, + "Test Summary::Actual Dedicated Bearer Session Disconnects": 100.0, + "Test Summary::Actual Disconnect Rate(Sessions / Second)(P - I)": 164.804, + "Test Summary::Average Session Disconnect Time(P - I)": 5.024, + "Test Summary::Total Data Sent + Received Packets / Sec(P - I)": 1452.294 + } + self.assertEqual(exp_res, res) + + def test__write_results_no_tabs(self, *args): + _res_data = copy.deepcopy(self.TEST_RESULTS_DATA) + del _res_data['tabs'] + # Return None if tabs not found in test results dict + self.assertIsNone(self.res_helper._write_results(_res_data)) + + @mock.patch.object(tg_landslide.LandslideResourceHelper, + 'check_running_test_state') + @mock.patch.object(tg_landslide.LandslideResourceHelper, + 'get_running_tests_results') + def test_collect_kpi_test_running(self, mock_tests_results, + mock_tests_state, *args): + self.res_helper.run_id = self.SUCCESS_RECORD_ID + mock_tests_state.return_value = 'RUNNING' + mock_tests_results.return_value = self.TEST_RESULTS_DATA + res = self.res_helper.collect_kpi() + self.assertNotIn('done', res) + mock_tests_state.assert_called_once_with(self.res_helper.run_id) + mock_tests_results.assert_called_once_with(self.res_helper.run_id) + + @mock.patch.object(tg_landslide.LandslideResourceHelper, + 'check_running_test_state') + @mock.patch.object(tg_landslide.LandslideResourceHelper, + 'get_running_tests_results') + def test_collect_kpi_test_completed(self, mock_tests_results, + mock_tests_state, *args): + self.res_helper.run_id = self.SUCCESS_RECORD_ID + mock_tests_state.return_value = 'COMPLETE' + res = self.res_helper.collect_kpi() + self.assertIsNotNone(res) + mock_tests_state.assert_called_once_with(self.res_helper.run_id) + mock_tests_results.assert_not_called() + self.assertDictContainsSubset({'done': True}, res) + + +class TestLandslideTclClient(unittest.TestCase): + def setUp(self): + self.mock_tcl_handler = mock.Mock(spec=tg_landslide.LsTclHandler) + self.ls_res_helper = mock.Mock( + spec=tg_landslide.LandslideResourceHelper) + self.ls_tcl_client = tg_landslide.LandslideTclClient( + self.mock_tcl_handler, + self.ls_res_helper) + + def test___init__(self, *args): + self.ls_tcl_client = tg_landslide.LandslideTclClient( + self.mock_tcl_handler, + self.ls_res_helper) + self.assertIsNone(self.ls_tcl_client.tcl_server_ip) + self.assertIsNone(self.ls_tcl_client._user) + self.assertIsNone(self.ls_tcl_client._library_id) + self.assertIsNone(self.ls_tcl_client._basic_library_id) + self.assertEqual(set(), self.ls_tcl_client.ts_ids) + self.assertIsInstance(self.ls_tcl_client._tc_types, set) + self.assertIsNotNone(self.ls_tcl_client._tc_types) + + def test_connect_login_success(self, *args): + lib_id = '123' + exec_responses = ['java0x2', lib_id, lib_id] + auth = ('user', 'password') + self.mock_tcl_handler.execute.side_effect = exec_responses + self.ls_tcl_client.connect(TAS_INFO['ip'], *auth) + self.assertEqual(lib_id, self.ls_tcl_client._library_id) + self.assertEqual(lib_id, self.ls_tcl_client._basic_library_id) + self.assertEqual(TAS_INFO['ip'], self.ls_tcl_client.tcl_server_ip) + self.assertEqual(auth[0], self.ls_tcl_client._user) + self.assertEqual(len(exec_responses), + self.mock_tcl_handler.execute.call_count) + self.mock_tcl_handler.execute.assert_has_calls([ + mock.call("ls::login 1.1.1.1 user password"), + mock.call("ls::get [ls::query LibraryInfo -userLibraryName user] -Id"), + ]) + + def test_connect_login_failed(self, *args): + exec_responses = ['Login failed'] + auth = ('user', 'password') + self.mock_tcl_handler.execute.side_effect = exec_responses + self.assertRaises(exceptions.LandslideTclException, + self.ls_tcl_client.connect, + TAS_INFO['ip'], + *auth) + self.assertIsNone(self.ls_tcl_client._library_id) + self.assertIsNone(self.ls_tcl_client._basic_library_id) + self.assertIsNone(self.ls_tcl_client.tcl_server_ip) + self.assertIsNone(self.ls_tcl_client._user) + self.assertEqual(len(exec_responses), + self.mock_tcl_handler.execute.call_count) + self.mock_tcl_handler.execute.assert_called_with( + "ls::login 1.1.1.1 user password") + + def test_disconnect(self, *args): + self.ls_tcl_client.disconnect() + self.mock_tcl_handler.execute.assert_called_once_with("ls::logout") + self.assertIsNone(self.ls_tcl_client.tcl_server_ip) + self.assertIsNone(self.ls_tcl_client._user) + self.assertIsNone(self.ls_tcl_client._library_id) + self.assertIsNone(self.ls_tcl_client._basic_library_id) + + def test_create_test_server(self, *args): + return_value = '2' + self.ls_tcl_client._ts_context.vnfd_helper = \ + VNFD['vnfd:vnfd-catalog']['vnfd'][0] + self.ls_tcl_client._ts_context.license_data = {'lic_id': return_value} + self.mock_tcl_handler.execute.return_value = return_value + self.ls_tcl_client._set_thread_model = mock.Mock() + res = self.ls_tcl_client.create_test_server(TEST_SERVERS[1]) + self.assertEqual(3, self.mock_tcl_handler.execute.call_count) + self.mock_tcl_handler.execute.assert_has_calls([ + mock.call('ls::query TsId TestServer_2'), + mock.call('set ts [ls::retrieve TsInfo -Name "TestServer_2"]'), + mock.call('ls::get $ts -RequestedLicense'), + ]) + self.ls_tcl_client._set_thread_model.assert_called_once_with( + TEST_SERVERS[1]['name'], + TEST_SERVERS[1]['thread_model']) + self.assertEqual(int(return_value), res) + + def test_create_test_server_fail_limit_reach(self, *args): + self.mock_tcl_handler.execute.side_effect = ['TS not found', + 'Add failed'] + self.assertRaises(RuntimeError, + self.ls_tcl_client.create_test_server, + TEST_SERVERS[0]) + self.assertEqual(2, self.mock_tcl_handler.execute.call_count) + self.mock_tcl_handler.execute.assert_has_calls([ + mock.call('ls::query TsId TestServer_1'), + mock.call('ls::perform AddTs -Name "TestServer_1" ' + '-Ip "192.168.122.101"'), + ]) + + def test__add_test_server(self): + ts_id = '2' + self.mock_tcl_handler.execute.side_effect = ['TS not found', ts_id] + self.assertEqual(ts_id, + self.ls_tcl_client._add_test_server('name', 'ip')) + self.assertEqual(2, self.mock_tcl_handler.execute.call_count) + self.mock_tcl_handler.execute.assert_has_calls([ + mock.call('ls::query TsId name'), + mock.call('ls::perform AddTs -Name "name" -Ip "ip"'), + ]) + + def test__add_test_server_failed(self): + self.mock_tcl_handler.execute.side_effect = ['TS not found', + 'Add failed'] + self.assertRaises(RuntimeError, self.ls_tcl_client._add_test_server, + 'name', 'ip') + self.assertEqual(2, self.mock_tcl_handler.execute.call_count) + self.mock_tcl_handler.execute.assert_has_calls([ + mock.call('ls::query TsId name'), + mock.call('ls::perform AddTs -Name "name" -Ip "ip"'), + ]) + + def test__update_license(self): + curr_lic_id = '111' + new_lic_id = '222' + exec_resp = ['java0x4', + curr_lic_id, + TCL_SUCCESS_RESPONSE, + TCL_SUCCESS_RESPONSE] + self.ls_tcl_client._ts_context.license_data = {'lic_id': new_lic_id} + self.mock_tcl_handler.execute.side_effect = exec_resp + self.ls_tcl_client._update_license('name') + self.assertEqual(len(exec_resp), + self.mock_tcl_handler.execute.call_count) + + self.mock_tcl_handler.execute.assert_has_calls([ + mock.call('set ts [ls::retrieve TsInfo -Name "name"]'), + mock.call('ls::get $ts -RequestedLicense'), + mock.call('ls::config $ts -RequestedLicense 222'), + mock.call('ls::perform ModifyTs $ts'), + ]) + + def test__update_license_same_as_current(self): + curr_lic_id = '111' + new_lic_id = '111' + exec_resp = ['java0x4', curr_lic_id] + self.ls_tcl_client._ts_context.license_data = {'lic_id': new_lic_id} + self.mock_tcl_handler.execute.side_effect = exec_resp + self.ls_tcl_client._update_license('name') + self.assertEqual(len(exec_resp), + self.mock_tcl_handler.execute.call_count) + self.mock_tcl_handler.execute.assert_has_calls([ + mock.call('set ts [ls::retrieve TsInfo -Name "name"]'), + mock.call('ls::get $ts -RequestedLicense'), + ]) + + def test__set_thread_model_update_needed(self): + self.ls_tcl_client._ts_context.vnfd_helper = { + 'mgmt-interface': { + 'cfguser_password': 'cfguser_password' + } + } + exec_resp = ['java0x4', 'V0', '', ''] + self.mock_tcl_handler.execute.side_effect = exec_resp + self.ls_tcl_client._set_thread_model('name', 'Fireball') + self.assertEqual(len(exec_resp), + self.mock_tcl_handler.execute.call_count) + self.mock_tcl_handler.execute.assert_has_calls([ + mock.call('set tsc [ls::perform RetrieveTsConfiguration ' + '-name "name" cfguser_password]'), + mock.call('ls::get $tsc -ThreadModel'), + mock.call('ls::config $tsc -ThreadModel "V1_FB3"'), + mock.call('ls::perform ApplyTsConfiguration $tsc cfguser_password'), + ]) + + def test__set_thread_model_no_update_needed(self): + self.ls_tcl_client._ts_context.vnfd_helper = { + 'mgmt-interface': { + 'cfguser_password': 'cfguser_password' + } + } + exec_resp = ['java0x4', 'V0'] + self.mock_tcl_handler.execute.side_effect = exec_resp + self.ls_tcl_client._set_thread_model('name', 'Legacy') + self.assertEqual(len(exec_resp), + self.mock_tcl_handler.execute.call_count) + self.mock_tcl_handler.execute.assert_has_calls([ + mock.call('set tsc [ls::perform RetrieveTsConfiguration ' + '-name "name" cfguser_password]'), + mock.call('ls::get $tsc -ThreadModel'), + ]) + + @mock.patch.object(tg_landslide.LandslideTclClient, + 'resolve_test_server_name', side_effect=['4', '2']) + def test_create_test_session(self, *args): + _session_profile = copy.deepcopy(SESSION_PROFILE) + _session_profile['reservations'] = RESERVATIONS + self.ls_tcl_client._save_test_session = mock.Mock() + self.ls_tcl_client._configure_ts_group = mock.Mock() + self.ls_tcl_client._library_id = 42 + self.ls_tcl_client.create_test_session(_session_profile) + self.assertEqual(17, self.mock_tcl_handler.execute.call_count) + self.mock_tcl_handler.execute.assert_has_calls([ + mock.call('set test_ [ls::create TestSession]'), + mock.call('ls::config $test_ -Library 42 ' + '-Name "default_bearer_capacity"'), + mock.call('ls::config $test_ -Description ' \ + '"UE default bearer creation test case"'), + mock.call('ls::config $test_ -Keywords ""'), + mock.call('ls::config $test_ -Duration "60"'), + mock.call('ls::config $test_ -Iterations "1"'), + # _configure_reservation + mock.call('set reservation_ [ls::create Reservation -under $test_]'), + mock.call('ls::config $reservation_ -TsIndex 0 ' + '-TsId 4 -TsName "TestServer_1"'), + mock.call('set physubnet_ [ls::create PhySubnet -under $reservation_]'), + mock.call('ls::config $physubnet_ -Name "eth1" -Base "10.42.32.100" ' + '-Mask "/24" -NumIps 20'), + # _configure_reservation + mock.call('set reservation_ [ls::create Reservation -under $test_]'), + mock.call('ls::config $reservation_ -TsIndex 1 ' + '-TsId 2 -TsName "TestServer_2"'), + mock.call('set physubnet_ [ls::create PhySubnet -under $reservation_]'), + mock.call('ls::config $physubnet_ -Name "eth1" -Base "10.42.32.1" ' + '-Mask "/24" -NumIps 100'), + mock.call('set physubnet_ [ls::create PhySubnet -under $reservation_]'), + mock.call('ls::config $physubnet_ -Name "eth2" -Base "10.42.33.1" ' + '-Mask "/24" -NumIps 100'), + # _configure_report_options + mock.call('ls::config $test_.ReportOptions -Format 1 -Ts -3 -Tc -3'), + ]) + + def test_create_dmf(self): + self.mock_tcl_handler.execute.return_value = '2' + self.ls_tcl_client._save_dmf = mock.Mock() + self.ls_tcl_client.create_dmf(copy.deepcopy(DMF_CFG)) + self.assertEqual(6, self.mock_tcl_handler.execute.call_count) + # This is needed because the dictionary is unordered and the arguments + # can come in either order + call1 = mock.call( + 'ls::config $dmf_ -clientPort 2002 -isClientPortRange "false"') + call2 = mock.call( + 'ls::config $dmf_ -isClientPortRange "false" -clientPort 2002') + self.assertTrue( + call1 in self.mock_tcl_handler.execute.mock_calls or + call2 in self.mock_tcl_handler.execute.mock_calls) + + self.mock_tcl_handler.execute.assert_has_calls([ + mock.call('set dmf_ [ls::create Dmf]'), + mock.call( + 'ls::get [ls::query LibraryInfo -systemLibraryName user] -Id'), + mock.call('ls::config $dmf_ -Library 2 -Name "Basic UDP"'), + mock.call('ls::config $dmf_ -dataProtocol "udp"'), + # mock.call( + # 'ls::config $dmf_ -clientPort 2002 -isClientPortRange "false"'), + mock.call('ls::config $dmf_ -serverPort 2003'), + ], any_order=True) + + def test_configure_dmf(self): + self.mock_tcl_handler.execute.return_value = '2' + self.ls_tcl_client._save_dmf = mock.Mock() + self.ls_tcl_client.configure_dmf(DMF_CFG) + self.assertEqual(6, self.mock_tcl_handler.execute.call_count) + # This is need because the dictionary is unordered and the arguments + # can come in either order + call1 = mock.call( + 'ls::config $dmf_ -clientPort 2002 -isClientPortRange "false"') + call2 = mock.call( + 'ls::config $dmf_ -isClientPortRange "false" -clientPort 2002') + self.assertTrue( + call1 in self.mock_tcl_handler.execute.mock_calls or + call2 in self.mock_tcl_handler.execute.mock_calls) + + self.mock_tcl_handler.execute.assert_has_calls([ + mock.call('set dmf_ [ls::create Dmf]'), + mock.call( + 'ls::get [ls::query LibraryInfo -systemLibraryName user] -Id'), + mock.call('ls::config $dmf_ -Library 2 -Name "Basic UDP"'), + mock.call('ls::config $dmf_ -dataProtocol "udp"'), + # mock.call( + # 'ls::config $dmf_ -clientPort 2002 -isClientPortRange "false"'), + mock.call('ls::config $dmf_ -serverPort 2003'), + ], any_order=True) + + def test_delete_dmf(self): + self.assertRaises(NotImplementedError, + self.ls_tcl_client.delete_dmf, + DMF_CFG) + + def test__save_dmf_valid(self): + exec_resp = [TCL_SUCCESS_RESPONSE, TCL_SUCCESS_RESPONSE] + self.mock_tcl_handler.execute.side_effect = exec_resp + self.ls_tcl_client._save_dmf() + self.assertEqual(len(exec_resp), + self.mock_tcl_handler.execute.call_count) + self.mock_tcl_handler.execute.assert_has_calls([ + mock.call('ls::perform Validate -Dmf $dmf_'), + mock.call('ls::save $dmf_ -overwrite'), + ]) + + def test__save_dmf_invalid(self): + exec_resp = ['Invalid', 'List of errors and warnings'] + self.mock_tcl_handler.execute.side_effect = exec_resp + self.assertRaises(exceptions.LandslideTclException, + self.ls_tcl_client._save_dmf) + self.assertEqual(len(exec_resp), + self.mock_tcl_handler.execute.call_count) + self.mock_tcl_handler.execute.assert_has_calls([ + mock.call('ls::perform Validate -Dmf $dmf_'), + mock.call('ls::get $dmf_ -ErrorsAndWarnings'), + ]) + + def test__configure_report_options(self): + _options = {'format': 'CSV', 'PerInterval': 'false'} + self.ls_tcl_client._configure_report_options(_options) + self.assertEqual(2, self.mock_tcl_handler.execute.call_count) + self.mock_tcl_handler.execute.assert_has_calls([ + mock.call('ls::config $test_.ReportOptions -Format 1 -Ts -3 -Tc -3'), + mock.call('ls::config $test_.ReportOptions -PerInterval false'), + ], + any_order=True) + + def test___configure_ts_group(self, *args): + _ts_group = copy.deepcopy(SESSION_PROFILE['tsGroups'][0]) + self.ls_tcl_client._configure_tc_type = mock.Mock() + self.ls_tcl_client._configure_preresolved_arp = mock.Mock() + self.ls_tcl_client.resolve_test_server_name = mock.Mock( + return_value='2') + self.ls_tcl_client._configure_ts_group(_ts_group, 0) + self.mock_tcl_handler.execute.assert_called_once_with( + 'set tss_ [ls::create TsGroup -under $test_ -tsId 2 ]') + + def test___configure_ts_group_resolve_ts_fail(self, *args): + _ts_group = copy.deepcopy(SESSION_PROFILE['tsGroups'][0]) + self.ls_tcl_client._configure_tc_type = mock.Mock() + self.ls_tcl_client._configure_preresolved_arp = mock.Mock() + self.ls_tcl_client.resolve_test_server_name = mock.Mock( + return_value='TS Not Found') + self.assertRaises(RuntimeError, self.ls_tcl_client._configure_ts_group, + _ts_group, 0) + self.mock_tcl_handler.execute.assert_not_called() + + def test__configure_tc_type(self): + _tc = copy.deepcopy(SESSION_PROFILE['tsGroups'][0]['testCases'][0]) + self.mock_tcl_handler.execute.return_value = TCL_SUCCESS_RESPONSE + self.ls_tcl_client._configure_parameters = mock.Mock() + self.ls_tcl_client._configure_tc_type(_tc, 0) + self.assertEqual(7, self.mock_tcl_handler.execute.call_count) + + def test__configure_tc_type_optional_param_omitted(self): + _tc = copy.deepcopy(SESSION_PROFILE['tsGroups'][0]['testCases'][0]) + del _tc['linked'] + self.mock_tcl_handler.execute.return_value = TCL_SUCCESS_RESPONSE + self.ls_tcl_client._configure_parameters = mock.Mock() + self.ls_tcl_client._configure_tc_type(_tc, 0) + self.assertEqual(6, self.mock_tcl_handler.execute.call_count) + + def test__configure_tc_type_wrong_type(self): + _tc = copy.deepcopy(SESSION_PROFILE['tsGroups'][0]['testCases'][0]) + _tc['type'] = 'not_supported' + self.ls_tcl_client._configure_parameters = mock.Mock() + self.assertRaises(RuntimeError, + self.ls_tcl_client._configure_tc_type, + _tc, 0) + self.mock_tcl_handler.assert_not_called() + + def test__configure_tc_type_not_found_basic_lib(self): + _tc = copy.deepcopy(SESSION_PROFILE['tsGroups'][0]['testCases'][0]) + self.ls_tcl_client._configure_parameters = mock.Mock() + self.mock_tcl_handler.execute.return_value = 'Invalid' + self.assertRaises(RuntimeError, + self.ls_tcl_client._configure_tc_type, + _tc, 0) + + def test__configure_parameters(self): + _params = copy.deepcopy( + SESSION_PROFILE['tsGroups'][0]['testCases'][0]['parameters']) + self.ls_tcl_client._configure_parameters(_params) + self.assertEqual(16, self.mock_tcl_handler.execute.call_count) + + def test__configure_array_param(self): + _array = {"class": "Array", + "array": ["0"]} + self.ls_tcl_client._configure_array_param('name', _array) + self.assertEqual(2, self.mock_tcl_handler.execute.call_count) + self.mock_tcl_handler.execute.assert_has_calls([ + mock.call('ls::create -Array-name -under $p_ ;'), + mock.call('ls::create ArrayItem -under $p_.name -Value "0"'), + ]) + + def test__configure_test_node_param(self): + _params = copy.deepcopy( + SESSION_PROFILE['tsGroups'][0]['testCases'][0]['parameters']) + self.ls_tcl_client._configure_test_node_param('SgwUserAddr', + _params['SgwUserAddr']) + cmd = ('ls::create -TestNode-SgwUserAddr -under $p_ -Type "eth" ' + '-Phy "eth1" -Ip "SGW_USER_IP" -NumLinksOrNodes 1 ' + '-NextHop "SGW_CONTROL_NEXT_HOP" -Mac "" -MTU 1500 ' + '-ForcedEthInterface "" -EthStatsEnabled false -VlanId 0 ' + '-VlanUserPriority 0 -NumVlan 1 -UniqueVlanAddr false;') + self.mock_tcl_handler.execute.assert_called_once_with(cmd) + + def test__configure_sut_param(self): + _params = {'name': 'name'} + self.ls_tcl_client._configure_sut_param('name', _params) + self.mock_tcl_handler.execute.assert_called_once_with( + 'ls::create -Sut-name -under $p_ -Name "name";') + + def test__configure_dmf_param(self): + _params = {"mainflows": [{"library": '111', + "name": "Basic UDP"}], + "instanceGroups": [{ + "mainflowIdx": 0, + "mixType": "", + "rate": 0.0, + "rows": [{ + "clientPort": 0, + "context": 0, + "node": 0, + "overridePort": "false", + "ratingGroup": 0, + "role": 0, + "serviceId": 0, + "transport": "Any"}] + }]} + self.ls_tcl_client._get_library_id = mock.Mock(return_value='111') + res = self.ls_tcl_client._configure_dmf_param('name', _params) + self.assertEqual(5, self.mock_tcl_handler.execute.call_count) + self.assertIsNone(res) + self.mock_tcl_handler.execute.assert_has_calls([ + mock.call('ls::create -Dmf-name -under $p_ ;'), + mock.call('ls::perform AddDmfMainflow $p_.Dmf 111 "Basic UDP"'), + mock.call('ls::config $p_.Dmf.InstanceGroup(0) -mixType '), + mock.call('ls::config $p_.Dmf.InstanceGroup(0) -rate 0.0'), + mock.call('ls::config $p_.Dmf.InstanceGroup(0).Row(0) -Node 0 ' + '-OverridePort false -ClientPort 0 -Context 0 -Role 0 ' + '-PreferredTransport Any -RatingGroup 0 ' + '-ServiceID 0'), + ]) + + def test__configure_dmf_param_no_instance_groups(self): + _params = {"mainflows": [{"library": '111', + "name": "Basic UDP"}]} + self.ls_tcl_client._get_library_id = mock.Mock(return_value='111') + res = self.ls_tcl_client._configure_dmf_param('name', _params) + self.assertEqual(2, self.mock_tcl_handler.execute.call_count) + self.assertIsNone(res) + self.mock_tcl_handler.execute.assert_has_calls([ + mock.call('ls::create -Dmf-name -under $p_ ;'), + mock.call('ls::perform AddDmfMainflow $p_.Dmf 111 "Basic UDP"'), + ]) + + def test__configure_reservation(self): + _reservation = copy.deepcopy(RESERVATIONS[0]) + self.ls_tcl_client.resolve_test_server_name = mock.Mock( + return_value='4') + res = self.ls_tcl_client._configure_reservation(_reservation) + self.assertIsNone(res) + self.assertEqual(4, self.mock_tcl_handler.execute.call_count) + self.mock_tcl_handler.execute.assert_has_calls([ + mock.call('set reservation_ [ls::create Reservation -under $test_]'), + mock.call('ls::config $reservation_ -TsIndex 0 -TsId 4 ' + \ + '-TsName "TestServer_1"'), + mock.call('set physubnet_ [ls::create PhySubnet -under $reservation_]'), + mock.call('ls::config $physubnet_ -Name "eth1" ' + \ + '-Base "10.42.32.100" -Mask "/24" -NumIps 20'), + ]) + + def test__configure_preresolved_arp(self): + _arp = [{'StartingAddress': '10.81.1.10', + 'NumNodes': 1}] + res = self.ls_tcl_client._configure_preresolved_arp(_arp) + self.mock_tcl_handler.execute.assert_called_once() + self.assertIsNone(res) + self.mock_tcl_handler.execute.assert_called_once_with( + 'ls::create PreResolvedArpAddress -under $tss_ ' + \ + '-StartingAddress "10.81.1.10" -NumNodes 1') + + def test__configure_preresolved_arp_none(self): + res = self.ls_tcl_client._configure_preresolved_arp(None) + self.assertIsNone(res) + self.mock_tcl_handler.execute.assert_not_called() + + def test_delete_test_session(self): + self.assertRaises(NotImplementedError, + self.ls_tcl_client.delete_test_session, {}) + + def test__save_test_session(self): + self.mock_tcl_handler.execute.side_effect = [TCL_SUCCESS_RESPONSE, + TCL_SUCCESS_RESPONSE] + res = self.ls_tcl_client._save_test_session() + self.assertEqual(2, self.mock_tcl_handler.execute.call_count) + self.assertIsNone(res) + self.mock_tcl_handler.execute.assert_has_calls([ + mock.call('ls::perform Validate -TestSession $test_'), + mock.call('ls::save $test_ -overwrite'), + ]) + + def test__save_test_session_invalid(self): + self.mock_tcl_handler.execute.side_effect = ['Invalid', 'Errors'] + self.assertRaises(exceptions.LandslideTclException, + self.ls_tcl_client._save_test_session) + self.assertEqual(2, self.mock_tcl_handler.execute.call_count) + self.mock_tcl_handler.execute.assert_has_calls([ + mock.call('ls::perform Validate -TestSession $test_'), + mock.call('ls::get $test_ -ErrorsAndWarnings'), + ]) + + def test__get_library_id_system_lib(self): + self.mock_tcl_handler.execute.return_value = '111' + res = self.ls_tcl_client._get_library_id('name') + self.mock_tcl_handler.execute.assert_called_once() + self.assertEqual('111', res) + self.mock_tcl_handler.execute.assert_called_with( + 'ls::get [ls::query LibraryInfo -systemLibraryName name] -Id') + + def test__get_library_id_user_lib(self): + self.mock_tcl_handler.execute.side_effect = ['Not found', '222'] + res = self.ls_tcl_client._get_library_id('name') + self.assertEqual(2, self.mock_tcl_handler.execute.call_count) + self.assertEqual('222', res) + self.mock_tcl_handler.execute.assert_has_calls([ + mock.call( + 'ls::get [ls::query LibraryInfo -systemLibraryName name] -Id'), + mock.call( + 'ls::get [ls::query LibraryInfo -userLibraryName name] -Id'), + ]) + + def test__get_library_id_exception(self): + self.mock_tcl_handler.execute.side_effect = ['Not found', 'Not found'] + self.assertRaises(exceptions.LandslideTclException, + self.ls_tcl_client._get_library_id, + 'name') + self.assertEqual(2, self.mock_tcl_handler.execute.call_count) + self.mock_tcl_handler.execute.assert_has_calls([ + mock.call( + 'ls::get [ls::query LibraryInfo -systemLibraryName name] -Id'), + mock.call( + 'ls::get [ls::query LibraryInfo -userLibraryName name] -Id'), + ]) + + +class TestLsTclHandler(unittest.TestCase): + + def setUp(self): + self.mock_lsapi = mock.patch.object(tg_landslide, 'LsApi') + self.mock_lsapi.start() + + self.addCleanup(self._cleanup) + + def _cleanup(self): + self.mock_lsapi.stop() + + def test___init__(self, *args): + self.ls_tcl_handler = tg_landslide.LsTclHandler() + self.assertEqual({}, self.ls_tcl_handler.tcl_cmds) + self.ls_tcl_handler._ls.tcl.assert_called_once() + + def test_execute(self, *args): + self.ls_tcl_handler = tg_landslide.LsTclHandler() + self.ls_tcl_handler.execute('command') + self.assertIn('command', self.ls_tcl_handler.tcl_cmds) diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ping.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ping.py index 434a7b770..a3e4384cf 100644 --- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ping.py +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ping.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016-2017 Intel Corporation +# Copyright (c) 2016-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -226,7 +226,7 @@ class TestPingTrafficGen(unittest.TestCase): @mock.patch("yardstick.ssh.SSH") def test___init__(self, ssh): ssh.from_node.return_value.execute.return_value = 0, "success", "" - ping_traffic_gen = PingTrafficGen('vnf1', self.VNFD_0, 'task_id') + ping_traffic_gen = PingTrafficGen('vnf1', self.VNFD_0) self.assertIsInstance(ping_traffic_gen.setup_helper, PingSetupEnvHelper) self.assertIsInstance(ping_traffic_gen.resource_helper, PingResourceHelper) @@ -243,7 +243,7 @@ class TestPingTrafficGen(unittest.TestCase): (0, 'if_name_2', ''), ] ssh.from_node.return_value.execute.side_effect = iter(execute_result_data) - ping_traffic_gen = PingTrafficGen('vnf1', self.VNFD_0, 'task_id') + ping_traffic_gen = PingTrafficGen('vnf1', self.VNFD_0) ext_ifs = ping_traffic_gen.vnfd_helper.interfaces self.assertNotEqual(ext_ifs[0]['virtual-interface']['local_iface_name'], 'if_name_1') self.assertNotEqual(ext_ifs[1]['virtual-interface']['local_iface_name'], 'if_name_2') @@ -253,7 +253,7 @@ class TestPingTrafficGen(unittest.TestCase): def test_collect_kpi(self, ssh, *args): mock_ssh(ssh, exec_result=(0, "success", "")) - ping_traffic_gen = PingTrafficGen('vnf1', self.VNFD_0, 'task_id') + ping_traffic_gen = PingTrafficGen('vnf1', self.VNFD_0) ping_traffic_gen.scenario_helper.scenario_cfg = { 'nodes': {ping_traffic_gen.name: "mock"} } @@ -271,7 +271,7 @@ class TestPingTrafficGen(unittest.TestCase): @mock.patch(SSH_HELPER) def test_instantiate(self, ssh): mock_ssh(ssh, spec=VnfSshHelper, exec_result=(0, "success", "")) - ping_traffic_gen = PingTrafficGen('vnf1', self.VNFD_0, 'task_id') + ping_traffic_gen = PingTrafficGen('vnf1', self.VNFD_0) ping_traffic_gen.setup_helper.ssh_helper = mock.MagicMock( **{"execute.return_value": (0, "xe0_fake", "")}) self.assertIsInstance(ping_traffic_gen.ssh_helper, mock.Mock) @@ -286,7 +286,7 @@ class TestPingTrafficGen(unittest.TestCase): self.assertIsNotNone(ping_traffic_gen._result) def test_listen_traffic(self): - ping_traffic_gen = PingTrafficGen('vnf1', self.VNFD_0, 'task_id') + ping_traffic_gen = PingTrafficGen('vnf1', self.VNFD_0) self.assertIsNone(ping_traffic_gen.listen_traffic({})) @mock.patch("yardstick.ssh.SSH") @@ -294,5 +294,5 @@ class TestPingTrafficGen(unittest.TestCase): ssh.from_node.return_value.execute.return_value = 0, "success", "" ssh.from_node.return_value.run.return_value = 0, "success", "" - ping_traffic_gen = PingTrafficGen('vnf1', self.VNFD_0, 'task_id') + ping_traffic_gen = PingTrafficGen('vnf1', self.VNFD_0) self.assertIsNone(ping_traffic_gen.terminate()) diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_pktgen.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_pktgen.py index d341b970b..1ecb6ffc9 100644 --- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_pktgen.py +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_pktgen.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import uuid - import mock from yardstick.common import constants @@ -33,23 +31,12 @@ class PktgenTrafficGenTestCase(ut_base.BaseUnitTestCase): 'benchmark': {'kpi': 'fake_kpi'} } - def setUp(self): - self._id = uuid.uuid1().int - self._mock_vnf_consumer = mock.patch.object(vnf_base, - 'GenericVNFConsumer') - self.mock_vnf_consumer = self._mock_vnf_consumer.start() - self.addCleanup(self._stop_mock) - - def _stop_mock(self): - self._mock_vnf_consumer.stop() - def test__init(self): - tg = tg_pktgen.PktgenTrafficGen('name1', self.VNFD, self._id) - self.assertTrue(isinstance(tg, (vnf_base.GenericTrafficGen, - vnf_base.GenericVNFEndpoint))) + tg = tg_pktgen.PktgenTrafficGen('name1', self.VNFD) + self.assertTrue(isinstance(tg, vnf_base.GenericTrafficGen)) def test_run_traffic(self): - tg = tg_pktgen.PktgenTrafficGen('name1', self.VNFD, self._id) + tg = tg_pktgen.PktgenTrafficGen('name1', self.VNFD) mock_tp = mock.Mock() with mock.patch.object(tg, '_is_running', return_value=True): tg.run_traffic(mock_tp) @@ -57,23 +44,23 @@ class PktgenTrafficGenTestCase(ut_base.BaseUnitTestCase): mock_tp.init.assert_called_once_with(tg._node_ip, tg._lua_node_port) def test__get_lua_node_port(self): - tg = tg_pktgen.PktgenTrafficGen('name1', self.VNFD, self._id) + tg = tg_pktgen.PktgenTrafficGen('name1', self.VNFD) service_ports = [{'port': constants.LUA_PORT, 'node_port': '12345'}] self.assertEqual(12345, tg._get_lua_node_port(service_ports)) def test__get_lua_node_port_no_lua_port(self): - tg = tg_pktgen.PktgenTrafficGen('name1', self.VNFD, self._id) + tg = tg_pktgen.PktgenTrafficGen('name1', self.VNFD) service_ports = [{'port': '333'}] self.assertIsNone(tg._get_lua_node_port(service_ports)) def test__is_running(self): - tg = tg_pktgen.PktgenTrafficGen('name1', self.VNFD, self._id) + tg = tg_pktgen.PktgenTrafficGen('name1', self.VNFD) with mock.patch.object(tg, '_traffic_profile'): self.assertTrue(tg._is_running()) def test__is_running_exception(self): - tg = tg_pktgen.PktgenTrafficGen('name1', self.VNFD, self._id) + tg = tg_pktgen.PktgenTrafficGen('name1', self.VNFD) with mock.patch.object(tg, '_traffic_profile') as mock_tp: mock_tp.help.side_effect = exceptions.PktgenActionError() self.assertFalse(tg._is_running()) diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_prox.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_prox.py index a7e61da0f..0aaf17790 100644 --- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_prox.py +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_prox.py @@ -1,4 +1,4 @@ -# Copyright (c) 2017 Intel Corporation +# Copyright (c) 2017-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -314,28 +314,41 @@ class TestProxTrafficGen(unittest.TestCase): @mock.patch(SSH_HELPER) def test___init__(self, ssh, *args): mock_ssh(ssh) - prox_traffic_gen = ProxTrafficGen(NAME, self.VNFD0, 'task_id') + prox_traffic_gen = ProxTrafficGen(NAME, self.VNFD0) self.assertIsNone(prox_traffic_gen._tg_process) self.assertIsNone(prox_traffic_gen._traffic_process) - self.assertIsNone(prox_traffic_gen._mq_producer) @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node') @mock.patch(SSH_HELPER) def test_collect_kpi(self, ssh, *args): mock_ssh(ssh) - prox_traffic_gen = ProxTrafficGen(NAME, self.VNFD0, 'task_id') + prox_traffic_gen = ProxTrafficGen(NAME, self.VNFD0) prox_traffic_gen.scenario_helper.scenario_cfg = { 'nodes': {prox_traffic_gen.name: "mock"} } prox_traffic_gen._vnf_wrapper.resource_helper.resource = mock.MagicMock( **{"self.check_if_system_agent_running.return_value": [False]}) + + vnfd_helper = mock.MagicMock() + vnfd_helper.ports_iter.return_value = [('xe0', 0), ('xe1', 1)] + prox_traffic_gen.resource_helper.vnfd_helper = vnfd_helper + + prox_traffic_gen._vnf_wrapper.resource_helper.client = mock.MagicMock() + prox_traffic_gen._vnf_wrapper.resource_helper.client.multi_port_stats.return_value = \ + [[0, 1, 2, 3, 4, 5], [1, 1, 2, 3, 4, 5]] + prox_traffic_gen._vnf_wrapper.resource_helper.client.multi_port_stats_diff.return_value = \ + [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7] + prox_traffic_gen._vnf_wrapper.resource_helper.client.\ + multi_port_stats_tuple.return_value = \ + {"xe0": {"in_packets": 1, "out_packets": 2}} + prox_traffic_gen._vnf_wrapper.vnf_execute = mock.Mock(return_value="") expected = { - 'collect_stats': {}, + 'collect_stats': {'live_stats': {'xe0': {'in_packets': 1, 'out_packets': 2}}}, 'physical_node': 'mock_node' } - self.assertEqual(prox_traffic_gen.collect_kpi(), expected) - + result = prox_traffic_gen.collect_kpi() + self.assertDictEqual(result, expected) @mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.find_relative_file') @mock.patch( @@ -351,7 +364,7 @@ class TestProxTrafficGen(unittest.TestCase): mock_traffic_profile.params = self.TRAFFIC_PROFILE vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - prox_traffic_gen = ProxTrafficGen(NAME, vnfd, 'task_id') + prox_traffic_gen = ProxTrafficGen(NAME, vnfd) ssh_helper = mock.MagicMock( **{"execute.return_value": (0, "", ""), "bin_path": ""}) prox_traffic_gen.ssh_helper = ssh_helper @@ -393,22 +406,21 @@ class TestProxTrafficGen(unittest.TestCase): mock_traffic_profile.params = self.TRAFFIC_PROFILE vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - sut = ProxTrafficGen(NAME, vnfd, 'task_id') + sut = ProxTrafficGen(NAME, vnfd) sut._get_socket = mock.MagicMock() sut.ssh_helper = mock.Mock() sut.ssh_helper.run = mock.Mock() sut.setup_helper.prox_config_dict = {} sut._connect_client = mock.Mock(autospec=mock.Mock()) sut._connect_client.get_stats = mock.Mock(return_value="0") - sut._setup_mq_producer = mock.Mock(return_value='mq_producer') - sut._traffic_runner(mock_traffic_profile, mock.ANY) + sut._traffic_runner(mock_traffic_profile) @mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.socket') @mock.patch(SSH_HELPER) def test_listen_traffic(self, ssh, *args): mock_ssh(ssh) vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - prox_traffic_gen = ProxTrafficGen(NAME, vnfd, 'task_id') + prox_traffic_gen = ProxTrafficGen(NAME, vnfd) self.assertIsNone(prox_traffic_gen.listen_traffic(mock.Mock())) @mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.socket') @@ -416,7 +428,7 @@ class TestProxTrafficGen(unittest.TestCase): def test_terminate(self, ssh, *args): mock_ssh(ssh) vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - prox_traffic_gen = ProxTrafficGen(NAME, vnfd, 'task_id') + prox_traffic_gen = ProxTrafficGen(NAME, vnfd) prox_traffic_gen._terminated = mock.MagicMock() prox_traffic_gen._traffic_process = mock.MagicMock() prox_traffic_gen._traffic_process.terminate = mock.Mock() diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py index ec0e6aa6d..c3f3e5f67 100644 --- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016-2017 Intel Corporation +# Copyright (c) 2016-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,13 +17,18 @@ import os import mock import six import unittest +import ipaddress +import time +from collections import OrderedDict from yardstick.common import utils +from yardstick.common import exceptions from yardstick.benchmark import contexts from yardstick.benchmark.contexts import base as ctx_base from yardstick.network_services.libs.ixia_libs.ixnet import ixnet_api from yardstick.network_services.traffic_profile import base as tp_base from yardstick.network_services.vnf_generic.vnf import tg_rfc2544_ixia +from yardstick.network_services.traffic_profile import ixia_rfc2544 TEST_FILE_YAML = 'nsb_test_case.yaml' @@ -49,12 +54,43 @@ class TestIxiaResourceHelper(unittest.TestCase): mock.Mock(), MyRfcHelper) self.assertIsInstance(ixia_resource_helper.rfc_helper, MyRfcHelper) + def test__init_ix_scenario(self): + mock_scenario = mock.Mock() + mock_scenario_helper = mock.Mock() + mock_scenario_helper.scenario_cfg = {'ixia_config': 'TestScenario', + 'options': 'scenario_options'} + mock_setup_helper = mock.Mock(scenario_helper=mock_scenario_helper) + ixia_resource_helper = tg_rfc2544_ixia.IxiaResourceHelper(mock_setup_helper) + ixia_resource_helper._ixia_scenarios = {'TestScenario': mock_scenario} + ixia_resource_helper.client = 'client' + ixia_resource_helper.context_cfg = 'context' + ixia_resource_helper._init_ix_scenario() + mock_scenario.assert_called_once_with('client', 'context', 'scenario_options') + + def test__init_ix_scenario_not_supported_cfg_type(self): + mock_scenario_helper = mock.Mock() + mock_scenario_helper.scenario_cfg = {'ixia_config': 'FakeScenario', + 'options': 'scenario_options'} + mock_setup_helper = mock.Mock(scenario_helper=mock_scenario_helper) + ixia_resource_helper = tg_rfc2544_ixia.IxiaResourceHelper(mock_setup_helper) + ixia_resource_helper._ixia_scenarios = {'TestScenario': mock.Mock()} + with self.assertRaises(RuntimeError): + ixia_resource_helper._init_ix_scenario() + + @mock.patch.object(tg_rfc2544_ixia.IxiaResourceHelper, '_init_ix_scenario') + def test_setup(self, mock__init_ix_scenario): + ixia_resource_helper = tg_rfc2544_ixia.IxiaResourceHelper(mock.Mock()) + ixia_resource_helper.setup() + mock__init_ix_scenario.assert_called_once() + def test_stop_collect_with_client(self): mock_client = mock.Mock() ixia_resource_helper = tg_rfc2544_ixia.IxiaResourceHelper(mock.Mock()) ixia_resource_helper.client = mock_client + ixia_resource_helper._ix_scenario = mock.Mock() ixia_resource_helper.stop_collect() self.assertEqual(1, ixia_resource_helper._terminated.value) + ixia_resource_helper._ix_scenario.stop_protocols.assert_called_once() def test_run_traffic(self): mock_tprofile = mock.Mock() @@ -63,6 +99,7 @@ class TestIxiaResourceHelper(unittest.TestCase): ixia_rhelper = tg_rfc2544_ixia.IxiaResourceHelper(mock.Mock()) ixia_rhelper.rfc_helper = mock.Mock() ixia_rhelper.vnfd_helper = mock.Mock() + ixia_rhelper._ix_scenario = mock.Mock() ixia_rhelper.vnfd_helper.port_pairs.all_ports = [] with mock.patch.object(ixia_rhelper, 'generate_samples'), \ mock.patch.object(ixia_rhelper, '_build_ports'), \ @@ -71,6 +108,32 @@ class TestIxiaResourceHelper(unittest.TestCase): ixia_rhelper.run_traffic(mock_tprofile) self.assertEqual('fake_samples', ixia_rhelper._queue.get()) + mock_tprofile.update_traffic_profile.assert_called_once() + + def test_run_test(self): + expected_result = {'test': 'fake_samples', 'Iteration': 1} + mock_tprofile = mock.Mock() + mock_tprofile.config.duration = 10 + mock_tprofile.get_drop_percentage.return_value = \ + True, {'test': 'fake_samples', 'Iteration': 1} + ixia_rhelper = tg_rfc2544_ixia.IxiaResourceHelper(mock.Mock()) + tasks_queue = mock.Mock() + tasks_queue.get.return_value = 'RUN_TRAFFIC' + results_queue = mock.Mock() + ixia_rhelper.rfc_helper = mock.Mock() + ixia_rhelper.vnfd_helper = mock.Mock() + ixia_rhelper._ix_scenario = mock.Mock() + ixia_rhelper.vnfd_helper.port_pairs.all_ports = [] + with mock.patch.object(ixia_rhelper, 'generate_samples'), \ + mock.patch.object(ixia_rhelper, '_build_ports'), \ + mock.patch.object(ixia_rhelper, '_initialize_client'), \ + mock.patch.object(utils, 'wait_until_true'): + ixia_rhelper.run_test(mock_tprofile, tasks_queue, results_queue) + + self.assertEqual(expected_result, ixia_rhelper._queue.get()) + mock_tprofile.update_traffic_profile.assert_called_once() + tasks_queue.task_done.assert_called_once() + results_queue.put.assert_called_once_with('COMPLETE') @mock.patch.object(tg_rfc2544_ixia, 'ixnet_api') @@ -180,7 +243,7 @@ class TestIXIATrafficGen(unittest.TestCase): ssh.from_node.return_value = ssh_mock vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] # NOTE(ralonsoh): check the object returned. - tg_rfc2544_ixia.IxiaTrafficGen(NAME, vnfd, 'task_id') + tg_rfc2544_ixia.IxiaTrafficGen(NAME, vnfd) def test_listen_traffic(self, *args): with mock.patch("yardstick.ssh.SSH") as ssh: @@ -189,8 +252,7 @@ class TestIXIATrafficGen(unittest.TestCase): mock.Mock(return_value=(0, "", "")) ssh.from_node.return_value = ssh_mock vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - ixnet_traffic_gen = tg_rfc2544_ixia.IxiaTrafficGen(NAME, vnfd, - 'task_id') + ixnet_traffic_gen = tg_rfc2544_ixia.IxiaTrafficGen(NAME, vnfd) self.assertIsNone(ixnet_traffic_gen.listen_traffic({})) @mock.patch.object(ctx_base.Context, 'get_context_from_server', return_value='fake_context') @@ -203,10 +265,9 @@ class TestIXIATrafficGen(unittest.TestCase): mock.Mock(return_value=(0, "", "")) ssh.from_node.return_value = ssh_mock vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - ixnet_traffic_gen = tg_rfc2544_ixia.IxiaTrafficGen(NAME, vnfd, - 'task_id') - scenario_cfg = {'tc': "nsb_test_case", "topology": "", - 'ixia_profile': "ixload.cfg"} + ixnet_traffic_gen = tg_rfc2544_ixia.IxiaTrafficGen(NAME, vnfd) + scenario_cfg = {'tc': "nsb_test_case", + "topology": ""} scenario_cfg.update( { 'options': { @@ -241,8 +302,7 @@ class TestIXIATrafficGen(unittest.TestCase): ssh.from_node.return_value = ssh_mock vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - ixnet_traffic_gen = tg_rfc2544_ixia.IxiaTrafficGen(NAME, vnfd, - 'task_id') + ixnet_traffic_gen = tg_rfc2544_ixia.IxiaTrafficGen(NAME, vnfd) ixnet_traffic_gen.scenario_helper.scenario_cfg = { 'nodes': {ixnet_traffic_gen.name: "mock"} } @@ -261,8 +321,8 @@ class TestIXIATrafficGen(unittest.TestCase): ssh_mock.execute = \ mock.Mock(return_value=(0, "", "")) ssh.from_node.return_value = ssh_mock - ixnet_traffic_gen = tg_rfc2544_ixia.IxiaTrafficGen(NAME, vnfd, - 'task_id') + ixnet_traffic_gen = tg_rfc2544_ixia.IxiaTrafficGen( + NAME, vnfd, resource_helper_type=mock.Mock()) ixnet_traffic_gen._terminated = mock.MagicMock() ixnet_traffic_gen._terminated.value = 0 ixnet_traffic_gen._ixia_traffic_gen = mock.MagicMock() @@ -278,7 +338,7 @@ class TestIXIATrafficGen(unittest.TestCase): def test__check_status(self, *args): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - sut = tg_rfc2544_ixia.IxiaTrafficGen('vnf1', vnfd, 'task_id') + sut = tg_rfc2544_ixia.IxiaTrafficGen('vnf1', vnfd) sut._check_status() @mock.patch("yardstick.ssh.SSH") @@ -344,7 +404,7 @@ class TestIXIATrafficGen(unittest.TestCase): mock_traffic_profile.get_drop_percentage.return_value = [ 'Completed', samples] - sut = tg_rfc2544_ixia.IxiaTrafficGen(name, vnfd, 'task_id') + sut = tg_rfc2544_ixia.IxiaTrafficGen(name, vnfd) sut.vnf_port_pairs = [[[0], [1]]] sut.tc_file_name = self._get_file_abspath(TEST_FILE_YAML) sut.topology = "" @@ -360,6 +420,7 @@ class TestIXIATrafficGen(unittest.TestCase): sut.resource_helper.client_started = mock.MagicMock() sut.resource_helper.client_started.value = 1 sut.resource_helper.rfc_helper.iteration.value = 11 + sut.resource_helper._ix_scenario = mock.Mock() sut.scenario_helper.scenario_cfg = { 'options': { @@ -379,7 +440,6 @@ class TestIXIATrafficGen(unittest.TestCase): }, }, }, - 'ixia_profile': '/path/to/profile', 'task_path': '/path/to/task' } @@ -388,8 +448,818 @@ class TestIXIATrafficGen(unittest.TestCase): mock.mock_open(), create=True) @mock.patch('yardstick.network_services.vnf_generic.vnf.tg_rfc2544_ixia.LOG.exception') def _traffic_runner(*args): - sut._setup_mq_producer = mock.Mock(return_value='mq_producer') - result = sut._traffic_runner(mock_traffic_profile, mock.ANY) + result = sut._traffic_runner(mock_traffic_profile) self.assertIsNone(result) _traffic_runner() + + def test_run_traffic_once(self, *args): + vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] + sut = tg_rfc2544_ixia.IxiaTrafficGen('vnf1', vnfd) + sut._init_traffic_process = mock.Mock() + sut._tasks_queue.put = mock.Mock() + sut.resource_helper.client_started.value = 0 + sut.run_traffic_once(self.TRAFFIC_PROFILE) + sut._tasks_queue.put.assert_called_once_with("RUN_TRAFFIC") + sut._init_traffic_process.assert_called_once_with(self.TRAFFIC_PROFILE) + + def test__test_runner(self, *args): + vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] + sut = tg_rfc2544_ixia.IxiaTrafficGen('vnf1', vnfd) + tasks = 'tasks' + results = 'results' + sut.resource_helper = mock.Mock() + sut._test_runner(self.TRAFFIC_PROFILE, tasks, results) + sut.resource_helper.run_test.assert_called_once_with(self.TRAFFIC_PROFILE, + tasks, results) + + @mock.patch.object(time, 'sleep', return_value=0) + def test__init_traffic_process(self, *args): + vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] + sut = tg_rfc2544_ixia.IxiaTrafficGen('vnf1', vnfd) + sut._test_runner = mock.Mock(return_value=0) + sut.resource_helper = mock.Mock() + sut.resource_helper.client_started.value = 0 + sut._init_traffic_process(self.TRAFFIC_PROFILE) + + def test_wait_on_traffic(self, *args): + vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] + sut = tg_rfc2544_ixia.IxiaTrafficGen('vnf1', vnfd) + sut._tasks_queue.join = mock.Mock(return_value=0) + sut._result_queue.get = mock.Mock(return_value='COMPLETE') + result = sut.wait_on_traffic() + sut._tasks_queue.join.assert_called_once() + sut._result_queue.get.assert_called_once() + self.assertEqual(result, 'COMPLETE') + + +class TestIxiaBasicScenario(unittest.TestCase): + + STATS = {'stat_name': ['Card01/Port01', + 'Card02/Port02'], + 'port_name': ['Ethernet - 001', 'Ethernet - 002'], + 'Frames_Tx': ['150', '150'], + 'Valid_Frames_Rx': ['150', '150'], + 'Frames_Tx_Rate': ['0.0', '0.0'], + 'Valid_Frames_Rx_Rate': ['0.0', '0.0'], + 'Bytes_Rx': ['9600', '9600'], + 'Bytes_Tx': ['9600', '9600'], + 'Tx_Rate_Kbps': ['0.0', '0.0'], + 'Rx_Rate_Mbps': ['0.0', '0.0'], + 'Tx_Rate_Mbps': ['0.0', '0.0'], + 'Rx_Rate_Kbps': ['0.0', '0.0'], + 'Store-Forward_Max_latency_ns': ['100', '200'], + 'Store-Forward_Min_latency_ns': ['100', '200'], + 'Store-Forward_Avg_latency_ns': ['100', '200']} + + def setUp(self): + self._mock_IxNextgen = mock.patch.object(ixnet_api, 'IxNextgen') + self.mock_IxNextgen = self._mock_IxNextgen.start() + self.context_cfg = mock.Mock() + self.ixia_cfg = mock.Mock() + self.scenario = tg_rfc2544_ixia.IxiaBasicScenario(self.mock_IxNextgen, + self.context_cfg, + self.ixia_cfg) + self.addCleanup(self._stop_mocks) + + def _stop_mocks(self): + self._mock_IxNextgen.stop() + + def test___init___(self): + self.assertIsInstance(self.scenario, tg_rfc2544_ixia.IxiaBasicScenario) + self.assertEqual(self.scenario.client, self.mock_IxNextgen) + + def test_create_traffic_model(self): + self.mock_IxNextgen.get_vports.return_value = [1, 2, 3, 4] + yaml_data = {'traffic_profile': {} + } + traffic_profile = ixia_rfc2544.IXIARFC2544Profile(yaml_data) + self.scenario.create_traffic_model(traffic_profile) + self.scenario.client.get_vports.assert_called_once() + self.scenario.client.create_traffic_model.assert_called_once_with( + [1, 3], [2, 4], traffic_profile) + + def test_apply_config(self): + self.assertIsNone(self.scenario.apply_config()) + + def test_run_protocols(self): + self.assertIsNone(self.scenario.run_protocols()) + + def test_stop_protocols(self): + self.assertIsNone(self.scenario.stop_protocols()) + + def test__get_stats(self): + self.scenario._get_stats() + self.scenario.client.get_statistics.assert_called_once() + + @mock.patch.object(tg_rfc2544_ixia.IxiaBasicScenario, '_get_stats') + def test_generate_samples(self, mock_get_stats): + + expected_samples = {'xe0': { + 'InPackets': 150, + 'OutPackets': 150, + 'InBytes': 9600, + 'OutBytes': 9600, + 'RxThroughput': 5.0, + 'TxThroughput': 5.0, + 'RxThroughputBps': 320.0, + 'TxThroughputBps': 320.0, + 'LatencyMax': 100, + 'LatencyMin': 100, + 'LatencyAvg': 100}, + 'xe1': { + 'InPackets': 150, + 'OutPackets': 150, + 'InBytes': 9600, + 'OutBytes': 9600, + 'RxThroughput': 5.0, + 'TxThroughput': 5.0, + 'RxThroughputBps': 320.0, + 'TxThroughputBps': 320.0, + 'LatencyMax': 200, + 'LatencyMin': 200, + 'LatencyAvg': 200}} + + res_helper = mock.Mock() + res_helper.vnfd_helper.find_interface_by_port.side_effect = \ + [{'name': 'xe0'}, {'name': 'xe1'}] + ports = [0, 1] + duration = 30 + mock_get_stats.return_value = self.STATS + samples = self.scenario.generate_samples(res_helper, ports, duration) + mock_get_stats.assert_called_once() + self.assertEqual(samples, expected_samples) + + +class TestIxiaL3Scenario(TestIxiaBasicScenario): + IXIA_CFG = { + 'flow': { + 'src_ip': ['192.168.0.1-192.168.0.50'], + 'dst_ip': ['192.168.1.1-192.168.1.150'] + } + } + + CONTEXT_CFG = { + 'nodes': { + 'tg__0': { + 'role': 'IxNet', + 'interfaces': { + 'xe0': { + 'vld_id': 'uplink_0', + 'local_ip': '10.1.1.1', + 'local_mac': 'aa:bb:cc:dd:ee:ff', + 'ifname': 'xe0' + }, + 'xe1': { + 'vld_id': 'downlink_0', + 'local_ip': '20.2.2.2', + 'local_mac': 'bb:bb:cc:dd:ee:ee', + 'ifname': 'xe1' + } + }, + 'routing_table': [{ + 'network': "152.16.100.20", + 'netmask': '255.255.0.0', + 'gateway': '152.16.100.21', + 'if': 'xe0' + }] + } + } + } + + def setUp(self): + super(TestIxiaL3Scenario, self).setUp() + self.ixia_cfg = self.IXIA_CFG + self.context_cfg = self.CONTEXT_CFG + self.scenario = tg_rfc2544_ixia.IxiaL3Scenario(self.mock_IxNextgen, + self.context_cfg, + self.ixia_cfg) + + def test___init___(self): + self.assertIsInstance(self.scenario, tg_rfc2544_ixia.IxiaL3Scenario) + self.assertEqual(self.scenario.client, self.mock_IxNextgen) + + def test_create_traffic_model(self): + self.mock_IxNextgen.get_vports.return_value = ['1', '2'] + traffic_profile = 'fake_profile' + self.scenario.create_traffic_model(traffic_profile) + self.scenario.client.get_vports.assert_called_once() + self.scenario.client.create_ipv4_traffic_model.\ + assert_called_once_with(['1/protocols/static'], + ['2/protocols/static'], + 'fake_profile') + + def test_apply_config(self): + self.scenario._add_interfaces = mock.Mock() + self.scenario._add_static_ips = mock.Mock() + self.assertIsNone(self.scenario.apply_config()) + + def test__add_static(self): + self.mock_IxNextgen.get_vports.return_value = ['1', '2'] + self.mock_IxNextgen.get_static_interface.side_effect = ['intf1', + 'intf2'] + + self.scenario._add_static_ips() + + self.mock_IxNextgen.get_static_interface.assert_any_call('1') + self.mock_IxNextgen.get_static_interface.assert_any_call('2') + + self.scenario.client.add_static_ipv4.assert_any_call( + 'intf1', '1', '192.168.0.1', 49, '32') + self.scenario.client.add_static_ipv4.assert_any_call( + 'intf2', '2', '192.168.1.1', 149, '32') + + def test__add_interfaces(self): + self.mock_IxNextgen.get_vports.return_value = ['1', '2'] + + self.scenario._add_interfaces() + + self.mock_IxNextgen.add_interface.assert_any_call('1', + '10.1.1.1', + 'aa:bb:cc:dd:ee:ff', + '152.16.100.21') + self.mock_IxNextgen.add_interface.assert_any_call('2', + '20.2.2.2', + 'bb:bb:cc:dd:ee:ee', + None) + + +class TestIxiaPppoeClientScenario(unittest.TestCase): + + IXIA_CFG = { + 'pppoe_client': { + 'sessions_per_port': 4, + 'sessions_per_svlan': 1, + 's_vlan': 10, + 'c_vlan': 20, + 'ip': ['10.3.3.1', '10.4.4.1'] + }, + 'ipv4_client': { + 'sessions_per_port': 1, + 'sessions_per_vlan': 1, + 'vlan': 101, + 'gateway_ip': ['10.1.1.1', '10.2.2.1'], + 'ip': ['10.1.1.1', '10.2.2.1'], + 'prefix': ['24', '24'] + }, + 'priority': { + 'tos': {'precedence': [0, 4]} + } + } + + CONTEXT_CFG = { + 'nodes': {'tg__0': { + 'interfaces': {'xe0': { + 'local_ip': '10.1.1.1', + 'netmask': '255.255.255.0' + }}}}} + + def setUp(self): + self._mock_IxNextgen = mock.patch.object(ixnet_api, 'IxNextgen') + self.mock_IxNextgen = self._mock_IxNextgen.start() + self.scenario = tg_rfc2544_ixia.IxiaPppoeClientScenario( + self.mock_IxNextgen, self.CONTEXT_CFG, self.IXIA_CFG) + tg_rfc2544_ixia.WAIT_PROTOCOLS_STARTED = 2 + self.addCleanup(self._stop_mocks) + + def _stop_mocks(self): + self._mock_IxNextgen.stop() + + def test___init___(self): + self.assertIsInstance(self.scenario, tg_rfc2544_ixia.IxiaPppoeClientScenario) + self.assertEqual(self.scenario.client, self.mock_IxNextgen) + + @mock.patch.object(tg_rfc2544_ixia.IxiaPppoeClientScenario, + '_fill_ixia_config') + @mock.patch.object(tg_rfc2544_ixia.IxiaPppoeClientScenario, + '_apply_access_network_config') + @mock.patch.object(tg_rfc2544_ixia.IxiaPppoeClientScenario, + '_apply_core_network_config') + def test_apply_config(self, mock_apply_core_net_cfg, + mock_apply_access_net_cfg, + mock_fill_ixia_config): + self.mock_IxNextgen.get_vports.return_value = [1, 2, 3, 4] + self.scenario.apply_config() + self.scenario.client.get_vports.assert_called_once() + self.assertEqual(self.scenario._uplink_vports, [1, 3]) + self.assertEqual(self.scenario._downlink_vports, [2, 4]) + mock_fill_ixia_config.assert_called_once() + mock_apply_core_net_cfg.assert_called_once() + mock_apply_access_net_cfg.assert_called_once() + + @mock.patch.object(tg_rfc2544_ixia.IxiaPppoeClientScenario, + '_get_endpoints_src_dst_id_pairs') + @mock.patch.object(tg_rfc2544_ixia.IxiaPppoeClientScenario, + '_get_endpoints_src_dst_obj_pairs') + def test_create_traffic_model(self, mock_obj_pairs, mock_id_pairs): + uplink_endpoints = ['group1', 'group2'] + downlink_endpoints = ['group3', 'group3'] + mock_id_pairs.return_value = ['xe0', 'xe1', 'xe0', 'xe1'] + mock_obj_pairs.return_value = ['group1', 'group3', 'group2', 'group3'] + mock_tp = mock.Mock() + mock_tp.full_profile = {'uplink_0': 'data', + 'downlink_0': 'data', + 'uplink_1': 'data', + 'downlink_1': 'data' + } + self.scenario.create_traffic_model(mock_tp) + mock_id_pairs.assert_called_once_with(mock_tp.full_profile) + mock_obj_pairs.assert_called_once_with(['xe0', 'xe1', 'xe0', 'xe1']) + self.scenario.client.create_ipv4_traffic_model.assert_called_once_with( + uplink_endpoints, downlink_endpoints, mock_tp) + + @mock.patch.object(tg_rfc2544_ixia.IxiaPppoeClientScenario, + '_get_endpoints_src_dst_id_pairs') + @mock.patch.object(tg_rfc2544_ixia.IxiaPppoeClientScenario, + '_get_endpoints_src_dst_obj_pairs') + def test_create_traffic_model_topology_based_flows(self, mock_obj_pairs, + mock_id_pairs): + uplink_topologies = ['topology1', 'topology3'] + downlink_topologies = ['topology2', 'topology4'] + mock_id_pairs.return_value = [] + mock_obj_pairs.return_value = [] + mock_tp = mock.Mock() + mock_tp.full_profile = {'uplink_0': 'data', + 'downlink_0': 'data', + 'uplink_1': 'data', + 'downlink_1': 'data' + } + self.scenario._access_topologies = ['topology1', 'topology3'] + self.scenario._core_topologies = ['topology2', 'topology4'] + self.scenario.create_traffic_model(mock_tp) + mock_id_pairs.assert_called_once_with(mock_tp.full_profile) + mock_obj_pairs.assert_called_once_with([]) + self.scenario.client.create_ipv4_traffic_model.assert_called_once_with( + uplink_topologies, downlink_topologies, mock_tp) + + def test__get_endpoints_src_dst_id_pairs(self): + full_tp = OrderedDict([ + ('uplink_0', {'ipv4': {'port': 'xe0'}}), + ('downlink_0', {'ipv4': {'port': 'xe1'}}), + ('uplink_1', {'ipv4': {'port': 'xe0'}}), + ('downlink_1', {'ipv4': {'port': 'xe3'}})]) + endpoints_src_dst_pairs = ['xe0', 'xe1', 'xe0', 'xe3'] + res = self.scenario._get_endpoints_src_dst_id_pairs(full_tp) + self.assertEqual(res, endpoints_src_dst_pairs) + + def test__get_endpoints_src_dst_id_pairs_wrong_flows_number(self): + full_tp = OrderedDict([ + ('uplink_0', {'ipv4': {'port': 'xe0'}}), + ('downlink_0', {'ipv4': {'port': 'xe1'}}), + ('uplink_1', {'ipv4': {'port': 'xe0'}})]) + with self.assertRaises(RuntimeError): + self.scenario._get_endpoints_src_dst_id_pairs(full_tp) + + def test__get_endpoints_src_dst_id_pairs_no_port_key(self): + full_tp = OrderedDict([ + ('uplink_0', {'ipv4': {'id': 1}}), + ('downlink_0', {'ipv4': {'id': 2}})]) + self.assertEqual( + self.scenario._get_endpoints_src_dst_id_pairs(full_tp), []) + + def test__get_endpoints_src_dst_obj_pairs_tp_with_port_key(self): + endpoints_id_pairs = ['xe0', 'xe1', + 'xe0', 'xe1', + 'xe0', 'xe3', + 'xe0', 'xe3'] + ixia_cfg = { + 'pppoe_client': { + 'sessions_per_port': 4, + 'sessions_per_svlan': 1 + }, + 'flow': { + 'src_ip': [{'tg__0': 'xe0'}, {'tg__0': 'xe2'}], + 'dst_ip': [{'tg__0': 'xe1'}, {'tg__0': 'xe3'}] + } + } + + expected_result = ['tp1_dg1', 'tp3_dg1', 'tp1_dg2', 'tp3_dg1', + 'tp1_dg3', 'tp4_dg1', 'tp1_dg4', 'tp4_dg1'] + + self.scenario._ixia_cfg = ixia_cfg + self.scenario._access_topologies = ['topology1', 'topology2'] + self.scenario._core_topologies = ['topology3', 'topology4'] + self.mock_IxNextgen.get_topology_device_groups.side_effect = \ + [['tp1_dg1', 'tp1_dg2', 'tp1_dg3', 'tp1_dg4'], + ['tp2_dg1', 'tp2_dg2', 'tp2_dg3', 'tp2_dg4'], + ['tp3_dg1'], + ['tp4_dg1']] + res = self.scenario._get_endpoints_src_dst_obj_pairs( + endpoints_id_pairs) + self.assertEqual(res, expected_result) + + def test__get_endpoints_src_dst_obj_pairs_default_flows_mapping(self): + endpoints_id_pairs = [] + ixia_cfg = { + 'pppoe_client': { + 'sessions_per_port': 4, + 'sessions_per_svlan': 1 + }, + 'flow': { + 'src_ip': [{'tg__0': 'xe0'}, {'tg__0': 'xe2'}], + 'dst_ip': [{'tg__0': 'xe1'}, {'tg__0': 'xe3'}] + } + } + + self.scenario._ixia_cfg = ixia_cfg + res = self.scenario._get_endpoints_src_dst_obj_pairs( + endpoints_id_pairs) + self.assertEqual(res, []) + + def test_run_protocols(self): + self.scenario.client.is_protocols_running.return_value = True + self.scenario.run_protocols() + self.scenario.client.start_protocols.assert_called_once() + + def test_run_protocols_timeout_exception(self): + self.scenario.client.is_protocols_running.return_value = False + with self.assertRaises(exceptions.WaitTimeout): + self.scenario.run_protocols() + self.scenario.client.start_protocols.assert_called_once() + + def test_stop_protocols(self): + self.scenario.stop_protocols() + self.scenario.client.stop_protocols.assert_called_once() + + def test__get_intf_addr_str_type_input(self): + intf = '192.168.10.2/24' + ip, mask = self.scenario._get_intf_addr(intf) + self.assertEqual(ip, '192.168.10.2') + self.assertEqual(mask, 24) + + def test__get_intf_addr_dict_type_input(self): + intf = {'tg__0': 'xe0'} + ip, mask = self.scenario._get_intf_addr(intf) + self.assertEqual(ip, '10.1.1.1') + self.assertEqual(mask, 24) + + @mock.patch.object(tg_rfc2544_ixia.IxiaPppoeClientScenario, '_get_intf_addr') + def test__fill_ixia_config(self, mock_get_intf_addr): + + ixia_cfg = { + 'pppoe_client': { + 'sessions_per_port': 4, + 'sessions_per_svlan': 1, + 's_vlan': 10, + 'c_vlan': 20, + 'ip': ['10.3.3.1/24', '10.4.4.1/24'] + }, + 'ipv4_client': { + 'sessions_per_port': 1, + 'sessions_per_vlan': 1, + 'vlan': 101, + 'gateway_ip': ['10.1.1.1/24', '10.2.2.1/24'], + 'ip': ['10.1.1.1/24', '10.2.2.1/24'] + } + } + + mock_get_intf_addr.side_effect = [ + ('10.3.3.1', '24'), + ('10.4.4.1', '24'), + ('10.1.1.1', '24'), + ('10.2.2.1', '24'), + ('10.1.1.1', '24'), + ('10.2.2.1', '24') + ] + self.scenario._ixia_cfg = ixia_cfg + self.scenario._fill_ixia_config() + self.assertEqual(mock_get_intf_addr.call_count, 6) + self.assertEqual(self.scenario._ixia_cfg['pppoe_client']['ip'], + ['10.3.3.1', '10.4.4.1']) + self.assertEqual(self.scenario._ixia_cfg['ipv4_client']['ip'], + ['10.1.1.1', '10.2.2.1']) + self.assertEqual(self.scenario._ixia_cfg['ipv4_client']['prefix'], + ['24', '24']) + + @mock.patch('yardstick.network_services.libs.ixia_libs.ixnet.ixnet_api.Vlan') + def test__apply_access_network_config_pap_auth(self, mock_vlan): + _ixia_cfg = { + 'pppoe_client': { + 'sessions_per_port': 4, + 'sessions_per_svlan': 1, + 's_vlan': 10, + 'c_vlan': 20, + 'pap_user': 'test_pap', + 'pap_password': 'pap' + }} + pap_user = _ixia_cfg['pppoe_client']['pap_user'] + pap_passwd = _ixia_cfg['pppoe_client']['pap_password'] + self.scenario._ixia_cfg = _ixia_cfg + self.scenario._uplink_vports = [0, 2] + self.scenario.client.add_topology.side_effect = ['Topology 1', 'Topology 2'] + self.scenario.client.add_device_group.side_effect = ['Dg1', 'Dg2', 'Dg3', + 'Dg4', 'Dg5', 'Dg6', + 'Dg7', 'Dg8'] + self.scenario.client.add_ethernet.side_effect = ['Eth1', 'Eth2', 'Eth3', + 'Eth4', 'Eth5', 'Eth6', + 'Eth7', 'Eth8'] + self.scenario._apply_access_network_config() + self.assertEqual(self.scenario.client.add_topology.call_count, 2) + self.assertEqual(self.scenario.client.add_device_group.call_count, 8) + self.assertEqual(self.scenario.client.add_ethernet.call_count, 8) + self.assertEqual(mock_vlan.call_count, 16) + self.assertEqual(self.scenario.client.add_vlans.call_count, 8) + self.assertEqual(self.scenario.client.add_pppox_client.call_count, 8) + self.scenario.client.add_topology.assert_has_calls([ + mock.call('Topology access 0', 0), + mock.call('Topology access 1', 2) + ]) + self.scenario.client.add_device_group.assert_has_calls([ + mock.call('Topology 1', 'SVLAN 10', 1), + mock.call('Topology 1', 'SVLAN 11', 1), + mock.call('Topology 1', 'SVLAN 12', 1), + mock.call('Topology 1', 'SVLAN 13', 1), + mock.call('Topology 2', 'SVLAN 14', 1), + mock.call('Topology 2', 'SVLAN 15', 1), + mock.call('Topology 2', 'SVLAN 16', 1), + mock.call('Topology 2', 'SVLAN 17', 1) + ]) + self.scenario.client.add_ethernet.assert_has_calls([ + mock.call('Dg1', 'Ethernet'), + mock.call('Dg2', 'Ethernet'), + mock.call('Dg3', 'Ethernet'), + mock.call('Dg4', 'Ethernet'), + mock.call('Dg5', 'Ethernet'), + mock.call('Dg6', 'Ethernet'), + mock.call('Dg7', 'Ethernet'), + mock.call('Dg8', 'Ethernet') + ]) + mock_vlan.assert_has_calls([ + mock.call(vlan_id=10), + mock.call(vlan_id=20, vlan_id_step=1), + mock.call(vlan_id=11), + mock.call(vlan_id=20, vlan_id_step=1), + mock.call(vlan_id=12), + mock.call(vlan_id=20, vlan_id_step=1), + mock.call(vlan_id=13), + mock.call(vlan_id=20, vlan_id_step=1), + mock.call(vlan_id=14), + mock.call(vlan_id=20, vlan_id_step=1), + mock.call(vlan_id=15), + mock.call(vlan_id=20, vlan_id_step=1), + mock.call(vlan_id=16), + mock.call(vlan_id=20, vlan_id_step=1), + mock.call(vlan_id=17), + mock.call(vlan_id=20, vlan_id_step=1) + ]) + self.scenario.client.add_pppox_client.assert_has_calls([ + mock.call('Eth1', 'pap', pap_user, pap_passwd), + mock.call('Eth2', 'pap', pap_user, pap_passwd), + mock.call('Eth3', 'pap', pap_user, pap_passwd), + mock.call('Eth4', 'pap', pap_user, pap_passwd), + mock.call('Eth5', 'pap', pap_user, pap_passwd), + mock.call('Eth6', 'pap', pap_user, pap_passwd), + mock.call('Eth7', 'pap', pap_user, pap_passwd), + mock.call('Eth8', 'pap', pap_user, pap_passwd) + ]) + + def test__apply_access_network_config_chap_auth(self): + _ixia_cfg = { + 'pppoe_client': { + 'sessions_per_port': 4, + 'sessions_per_svlan': 1, + 's_vlan': 10, + 'c_vlan': 20, + 'chap_user': 'test_chap', + 'chap_password': 'chap' + }} + chap_user = _ixia_cfg['pppoe_client']['chap_user'] + chap_passwd = _ixia_cfg['pppoe_client']['chap_password'] + self.scenario._ixia_cfg = _ixia_cfg + self.scenario._uplink_vports = [0, 2] + self.scenario.client.add_ethernet.side_effect = ['Eth1', 'Eth2', 'Eth3', + 'Eth4', 'Eth5', 'Eth6', + 'Eth7', 'Eth8'] + self.scenario._apply_access_network_config() + self.assertEqual(self.scenario.client.add_pppox_client.call_count, 8) + self.scenario.client.add_pppox_client.assert_has_calls([ + mock.call('Eth1', 'chap', chap_user, chap_passwd), + mock.call('Eth2', 'chap', chap_user, chap_passwd), + mock.call('Eth3', 'chap', chap_user, chap_passwd), + mock.call('Eth4', 'chap', chap_user, chap_passwd), + mock.call('Eth5', 'chap', chap_user, chap_passwd), + mock.call('Eth6', 'chap', chap_user, chap_passwd), + mock.call('Eth7', 'chap', chap_user, chap_passwd), + mock.call('Eth8', 'chap', chap_user, chap_passwd) + ]) + + @mock.patch('yardstick.network_services.libs.ixia_libs.ixnet.ixnet_api.Vlan') + def test__apply_core_network_config_no_bgp_proto(self, mock_vlan): + self.scenario._downlink_vports = [1, 3] + self.scenario.client.add_topology.side_effect = ['Topology 1', 'Topology 2'] + self.scenario.client.add_device_group.side_effect = ['Dg1', 'Dg2'] + self.scenario.client.add_ethernet.side_effect = ['Eth1', 'Eth2'] + self.scenario._apply_core_network_config() + self.assertEqual(self.scenario.client.add_topology.call_count, 2) + self.assertEqual(self.scenario.client.add_device_group.call_count, 2) + self.assertEqual(self.scenario.client.add_ethernet.call_count, 2) + self.assertEqual(mock_vlan.call_count, 2) + self.assertEqual(self.scenario.client.add_vlans.call_count, 2) + self.assertEqual(self.scenario.client.add_ipv4.call_count, 2) + self.scenario.client.add_topology.assert_has_calls([ + mock.call('Topology core 0', 1), + mock.call('Topology core 1', 3) + ]) + self.scenario.client.add_device_group.assert_has_calls([ + mock.call('Topology 1', 'Core port 0', 1), + mock.call('Topology 2', 'Core port 1', 1) + ]) + self.scenario.client.add_ethernet.assert_has_calls([ + mock.call('Dg1', 'Ethernet'), + mock.call('Dg2', 'Ethernet') + ]) + mock_vlan.assert_has_calls([ + mock.call(vlan_id=101), + mock.call(vlan_id=102) + ]) + self.scenario.client.add_ipv4.assert_has_calls([ + mock.call('Eth1', name='ipv4', addr=ipaddress.IPv4Address('10.1.1.2'), + addr_step='0.0.0.1', prefix='24', gateway='10.1.1.1'), + mock.call('Eth2', name='ipv4', addr=ipaddress.IPv4Address('10.2.2.2'), + addr_step='0.0.0.1', prefix='24', gateway='10.2.2.1') + ]) + self.scenario.client.add_bgp.assert_not_called() + + def test__apply_core_network_config_with_bgp_proto(self): + bgp_params = { + 'bgp': { + 'bgp_type': 'external', + 'dut_ip': '10.0.0.1', + 'as_number': 65000 + } + } + self.scenario._ixia_cfg['ipv4_client'].update(bgp_params) + self.scenario._downlink_vports = [1, 3] + self.scenario.client.add_ipv4.side_effect = ['ipv4_1', 'ipv4_2'] + self.scenario._apply_core_network_config() + self.assertEqual(self.scenario.client.add_bgp.call_count, 2) + self.scenario.client.add_bgp.assert_has_calls([ + mock.call('ipv4_1', dut_ip=bgp_params["bgp"]["dut_ip"], + local_as=bgp_params["bgp"]["as_number"], + bgp_type=bgp_params["bgp"]["bgp_type"]), + mock.call('ipv4_2', dut_ip=bgp_params["bgp"]["dut_ip"], + local_as=bgp_params["bgp"]["as_number"], + bgp_type=bgp_params["bgp"]["bgp_type"]) + ]) + + def test_update_tracking_options_raw_priority(self): + raw_priority = {'raw': 4} + self.scenario._ixia_cfg['priority'] = raw_priority + self.scenario.update_tracking_options() + self.scenario.client.set_flow_tracking.assert_called_once_with( + ['flowGroup0', 'vlanVlanId0', 'ipv4Raw0']) + + def test_update_tracking_options_tos_priority(self): + tos_priority = {'tos': {'precedence': [4, 7]}} + self.scenario._ixia_cfg['priority'] = tos_priority + self.scenario.update_tracking_options() + self.scenario.client.set_flow_tracking.assert_called_once_with( + ['flowGroup0', 'vlanVlanId0', 'ipv4Precedence0']) + + def test_update_tracking_options_dscp_priority(self): + dscp_priority = {'dscp': {'defaultPHB': [4, 7]}} + self.scenario._ixia_cfg['priority'] = dscp_priority + self.scenario.update_tracking_options() + self.scenario.client.set_flow_tracking.assert_called_once_with( + ['flowGroup0', 'vlanVlanId0', 'ipv4DefaultPhb0']) + + def test_update_tracking_options_invalid_priority_data(self): + invalid_priority = {'tos': {'inet-precedence': [4, 7]}} + self.scenario._ixia_cfg['priority'] = invalid_priority + self.scenario.update_tracking_options() + self.scenario.client.set_flow_tracking.assert_called_once_with( + ['flowGroup0', 'vlanVlanId0', 'ipv4Precedence0']) + + def test_get_tc_rfc2544_options(self): + rfc2544_tc_opts = {'allowed_drop_rate': '0.0001 - 0.0001'} + self.scenario._ixia_cfg['rfc2544'] = rfc2544_tc_opts + res = self.scenario.get_tc_rfc2544_options() + self.assertEqual(res, rfc2544_tc_opts) + + def test__get_stats(self): + self.scenario._get_stats() + self.scenario.client.get_pppoe_scenario_statistics.assert_called_once() + + def test_get_flow_id_data(self): + stats = [{'id': 1, 'in_packets': 10, 'out_packets': 20}] + key = "in_packets" + flow_id = 1 + res = self.scenario.get_flow_id_data(stats, flow_id, key) + self.assertEqual(res, 10) + + @mock.patch.object(tg_rfc2544_ixia.IxiaPppoeClientScenario, '_get_stats') + @mock.patch.object(tg_rfc2544_ixia.IxiaPppoeClientScenario, + 'get_priority_flows_stats') + def test_generate_samples(self, mock_prio_flow_statistics, + mock_get_stats): + ixia_stats = { + 'flow_statistic': [ + {'Flow_Group': 'RFC2544-1 - Flow Group 0001', + 'Frames_Delta': '0', + 'IP_Priority': '0', + 'Rx_Frames': '3000', + 'Tx_Frames': '3000', + 'VLAN-ID': '100', + 'Tx_Port': 'Ethernet - 001', + 'Store-Forward_Avg_latency_ns': '2', + 'Store-Forward_Min_latency_ns': '2', + 'Store-Forward_Max_latency_ns': '2'}, + {'Flow_Group': 'RFC2544-2 - Flow Group 0001', + 'Frames_Delta': '0', + 'IP_Priority': '0', + 'Rx_Frames': '3000', + 'Tx_Frames': '3000', + 'VLAN-ID': '101', + 'Tx_Port': 'Ethernet - 002', + 'Store-Forward_Avg_latency_ns': '2', + 'Store-Forward_Min_latency_ns': '2', + 'Store-Forward_Max_latency_ns': '2' + }], + 'port_statistics': [ + {'Frames_Tx': '3000', + 'Valid_Frames_Rx': '3000', + 'Bytes_Rx': '192000', + 'Bytes_Tx': '192000', + 'Rx_Rate_Kbps': '0.0', + 'Tx_Rate_Kbps': '0.0', + 'Rx_Rate_Mbps': '0.0', + 'Tx_Rate_Mbps': '0.0', + 'port_name': 'Ethernet - 001'}, + {'Frames_Tx': '3000', + 'Valid_Frames_Rx': '3000', + 'Bytes_Rx': '192000', + 'Bytes_Tx': '192000', + 'Rx_Rate_Kbps': '0.0', + 'Tx_Rate_Kbps': '0.0', + 'Rx_Rate_Mbps': '0.0', + 'Tx_Rate_Mbps': '0.0', + 'port_name': 'Ethernet - 002'}], + 'pppox_client_per_port': [ + {'Sessions_Down': '0', + 'Sessions_Not_Started': '0', + 'Sessions_Total': '1', + 'Sessions_Up': '1', + 'subs_port': 'Ethernet - 001'}]} + + prio_flows_stats = { + '0': { + 'InPackets': 6000, + 'OutPackets': 6000, + 'RxThroughput': 200.0, + 'TxThroughput': 200.0, + 'LatencyAvg': 2, + 'LatencyMax': 2, + 'LatencyMin': 2 + } + } + + expected_result = {'priority_stats': { + '0': {'RxThroughput': 200.0, + 'TxThroughput': 200.0, + 'LatencyAvg': 2, + 'LatencyMax': 2, + 'LatencyMin': 2, + 'InPackets': 6000, + 'OutPackets': 6000}}, + 'xe0': {'RxThroughput': 100.0, + 'LatencyAvg': 2, + 'LatencyMax': 2, + 'LatencyMin': 2, + 'TxThroughput': 100.0, + 'InPackets': 3000, + 'OutPackets': 3000, + 'InBytes': 192000, + 'OutBytes': 192000, + 'RxThroughputBps': 6400.0, + 'TxThroughputBps': 6400.0, + 'SessionsDown': 0, + 'SessionsNotStarted': 0, + 'SessionsTotal': 1, + 'SessionsUp': 1}, + 'xe1': {'RxThroughput': 100.0, + 'LatencyAvg': 2, + 'LatencyMax': 2, + 'LatencyMin': 2, + 'TxThroughput': 100.0, + 'InPackets': 3000, + 'OutPackets': 3000, + 'InBytes': 192000, + 'OutBytes': 192000, + 'RxThroughputBps': 6400.0, + 'TxThroughputBps': 6400.0}} + + mock_get_stats.return_value = ixia_stats + mock_prio_flow_statistics.return_value = prio_flows_stats + ports = [0, 1] + port_names = [{'name': 'xe0'}, {'name': 'xe1'}] + duration = 30 + res_helper = mock.Mock() + res_helper.vnfd_helper.find_interface_by_port.side_effect = \ + port_names + samples = self.scenario.generate_samples(res_helper, ports, duration) + self.assertIsNotNone(samples) + self.assertIsNotNone(samples.get('xe0')) + self.assertIsNotNone(samples.get('xe1')) + self.assertEqual(samples, expected_result) + mock_get_stats.assert_called_once() + mock_prio_flow_statistics.assert_called_once() diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py index a5b9f258e..51b1b0d33 100644 --- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016-2017 Intel Corporation +# Copyright (c) 2016-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import time import mock import unittest @@ -24,7 +25,8 @@ from yardstick.network_services.vnf_generic.vnf import tg_rfc2544_trex class TestTrexRfcResouceHelper(unittest.TestCase): - def test__run_traffic_once(self): + @mock.patch.object(time, 'sleep') + def test__run_traffic_once(self, *args): mock_setup_helper = mock.Mock() mock_traffic_profile = mock.Mock() mock_traffic_profile.config.duration = 3 @@ -224,14 +226,12 @@ class TestTrexTrafficGenRFC(unittest.TestCase): self._mock_ssh_helper.stop() def test___init__(self): - trex_traffic_gen = tg_rfc2544_trex.TrexTrafficGenRFC( - 'vnf1', self.VNFD_0, 'task_id') + trex_traffic_gen = tg_rfc2544_trex.TrexTrafficGenRFC('vnf1', self.VNFD_0) self.assertIsNotNone(trex_traffic_gen.resource_helper._terminated.value) @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node') def test_collect_kpi(self, *args): - trex_traffic_gen = tg_rfc2544_trex.TrexTrafficGenRFC( - 'vnf1', self.VNFD_0, 'task_id') + trex_traffic_gen = tg_rfc2544_trex.TrexTrafficGenRFC('vnf1', self.VNFD_0) trex_traffic_gen.scenario_helper.scenario_cfg = { 'nodes': {trex_traffic_gen.name: "mock"} } @@ -247,8 +247,7 @@ class TestTrexTrafficGenRFC(unittest.TestCase): mock_traffic_profile.get_traffic_definition.return_value = "64" mock_traffic_profile.params = self.TRAFFIC_PROFILE - trex_traffic_gen = tg_rfc2544_trex.TrexTrafficGenRFC( - 'vnf1', self.VNFD_0, 'task_id') + trex_traffic_gen = tg_rfc2544_trex.TrexTrafficGenRFC('vnf1', self.VNFD_0) trex_traffic_gen._start_server = mock.Mock(return_value=0) trex_traffic_gen.resource_helper = mock.MagicMock() trex_traffic_gen.setup_helper.setup_vnf_environment = mock.MagicMock() @@ -283,8 +282,7 @@ class TestTrexTrafficGenRFC(unittest.TestCase): mock_traffic_profile.get_traffic_definition.return_value = "64" mock_traffic_profile.params = self.TRAFFIC_PROFILE - trex_traffic_gen = tg_rfc2544_trex.TrexTrafficGenRFC( - 'vnf1', self.VNFD_0, 'task_id') + trex_traffic_gen = tg_rfc2544_trex.TrexTrafficGenRFC('vnf1', self.VNFD_0) trex_traffic_gen.resource_helper = mock.MagicMock() trex_traffic_gen.setup_helper.setup_vnf_environment = mock.MagicMock() scenario_cfg = { diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_trex.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_trex.py index 9ed2abbb9..0a441c8ce 100644 --- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_trex.py +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_trex.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016-2017 Intel Corporation +# Copyright (c) 2016-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -300,14 +300,14 @@ class TestTrexTrafficGen(unittest.TestCase): def test___init__(self): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd, 'task_id') + trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd) self.assertIsInstance(trex_traffic_gen.resource_helper, tg_trex.TrexResourceHelper) @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node') def test_collect_kpi(self, *args): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd, 'task_id') + trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd) trex_traffic_gen.scenario_helper.scenario_cfg = { 'nodes': {trex_traffic_gen.name: "mock"} } @@ -321,13 +321,13 @@ class TestTrexTrafficGen(unittest.TestCase): def test_listen_traffic(self): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd, 'task_id') + trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd) self.assertIsNone(trex_traffic_gen.listen_traffic({})) @mock.patch.object(ctx_base.Context, 'get_context_from_server', return_value='fake_context') def test_instantiate(self, *args): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd, 'task_id') + trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd) trex_traffic_gen._start_server = mock.Mock(return_value=0) trex_traffic_gen._tg_process = mock.MagicMock() trex_traffic_gen._tg_process.start = mock.Mock() @@ -342,7 +342,7 @@ class TestTrexTrafficGen(unittest.TestCase): @mock.patch.object(ctx_base.Context, 'get_context_from_server', return_value='fake_context') def test_instantiate_error(self, *args): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd, 'task_id') + trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd) trex_traffic_gen._start_server = mock.Mock(return_value=0) trex_traffic_gen._tg_process = mock.MagicMock() trex_traffic_gen._tg_process.start = mock.Mock() @@ -355,7 +355,7 @@ class TestTrexTrafficGen(unittest.TestCase): def test__start_server(self): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd, 'task_id') + trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd) trex_traffic_gen.ssh_helper = mock.MagicMock() trex_traffic_gen.resource_helper.ssh_helper = mock.MagicMock() trex_traffic_gen.scenario_helper.scenario_cfg = {} @@ -363,7 +363,7 @@ class TestTrexTrafficGen(unittest.TestCase): def test__start_server_multiple_queues(self): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd, 'task_id') + trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd) trex_traffic_gen.ssh_helper = mock.MagicMock() trex_traffic_gen.resource_helper.ssh_helper = mock.MagicMock() trex_traffic_gen.scenario_helper.scenario_cfg = { @@ -377,7 +377,7 @@ class TestTrexTrafficGen(unittest.TestCase): mock_traffic_profile.params = self.TRAFFIC_PROFILE vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - self.sut = tg_trex.TrexTrafficGen(NAME, vnfd, 'task_id') + self.sut = tg_trex.TrexTrafficGen(NAME, vnfd) self.sut.ssh_helper = mock.Mock() self.sut.ssh_helper.run = mock.Mock() self.sut._connect_client = mock.Mock() @@ -387,13 +387,12 @@ class TestTrexTrafficGen(unittest.TestCase): # must generate cfg before we can run traffic so Trex port mapping is # created self.sut.resource_helper.generate_cfg() - self.sut._setup_mq_producer = mock.Mock() with mock.patch.object(self.sut.resource_helper, 'run_traffic'): - self.sut._traffic_runner(mock_traffic_profile, mock.ANY) + self.sut._traffic_runner(mock_traffic_profile) def test__generate_trex_cfg(self): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd, 'task_id') + trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd) trex_traffic_gen.resource_helper.ssh_helper = mock.MagicMock() self.assertIsNone(trex_traffic_gen.resource_helper.generate_cfg()) @@ -432,7 +431,7 @@ class TestTrexTrafficGen(unittest.TestCase): 'local_mac': '00:00:00:00:00:01'}, 'vnfd-connection-point-ref': 'xe1', 'name': 'xe1'}] - trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd, 'task_id') + trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd) trex_traffic_gen.resource_helper.ssh_helper = mock.MagicMock() trex_traffic_gen.resource_helper.generate_cfg() trex_traffic_gen.resource_helper._build_ports() @@ -449,24 +448,25 @@ class TestTrexTrafficGen(unittest.TestCase): mock_traffic_profile.params = self.TRAFFIC_PROFILE vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - self.sut = tg_trex.TrexTrafficGen(NAME, vnfd, 'task_id') + self.sut = tg_trex.TrexTrafficGen(NAME, vnfd) self.sut.ssh_helper = mock.Mock() self.sut.ssh_helper.run = mock.Mock() self.sut._traffic_runner = mock.Mock(return_value=0) self.sut.resource_helper.client_started.value = 1 - self.sut.run_traffic(mock_traffic_profile) + result = self.sut.run_traffic(mock_traffic_profile) self.sut._traffic_process.terminate() + self.assertIsNotNone(result) def test_terminate(self): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd, 'task_id') + trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd) trex_traffic_gen.ssh_helper = mock.MagicMock() trex_traffic_gen.resource_helper.ssh_helper = mock.MagicMock() self.assertIsNone(trex_traffic_gen.terminate()) def test__connect_client(self): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd, 'task_id') + trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd) client = mock.Mock() client.connect = mock.Mock(return_value=0) self.assertIsNotNone(trex_traffic_gen.resource_helper._connect(client)) diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_trex_vpp.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_trex_vpp.py new file mode 100644 index 000000000..ef1ae1182 --- /dev/null +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_trex_vpp.py @@ -0,0 +1,1130 @@ +# Copyright (c) 2019 Viosoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +from multiprocessing import Process + +import mock +from trex_stl_lib.trex_stl_exceptions import STLError + +from yardstick.benchmark.contexts import base as ctx_base +from yardstick.network_services.traffic_profile import base as tp_base +from yardstick.network_services.traffic_profile import rfc2544 +from yardstick.network_services.vnf_generic.vnf import base, sample_vnf, \ + tg_trex_vpp +from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import \ + mock_ssh + + +class TestTrexVppResourceHelper(unittest.TestCase): + TRAFFIC_PROFILE = { + "schema": "isb:traffic_profile:0.1", + "name": "fixed", + "description": "Fixed traffic profile to run UDP traffic", + "traffic_profile": { + "traffic_type": "FixedTraffic", + "frame_rate": 100, # pps + "flow_number": 10, + "frame_size": 64 + }, + } + + def test_fmt_latency(self): + mock_setup_helper = mock.Mock() + vpp_rfc = tg_trex_vpp.TrexVppResourceHelper(mock_setup_helper) + self.assertEqual('10/90/489', vpp_rfc.fmt_latency(10, 90, 489)) + + def test_fmt_latency_error(self): + mock_setup_helper = mock.Mock() + vpp_rfc = tg_trex_vpp.TrexVppResourceHelper(mock_setup_helper) + self.assertEqual('-1/-1/-1', vpp_rfc.fmt_latency('err', 'err', 'err')) + + def test_generate_samples(self): + stats = { + 0: { + "ibytes": 55549120, + "ierrors": 0, + "ipackets": 867955, + "obytes": 55549696, + "oerrors": 0, + "opackets": 867964, + "rx_bps": 104339032.0, + "rx_bps_L1": 136944984.0, + "rx_pps": 203787.2, + "rx_util": 1.36944984, + "tx_bps": 134126008.0, + "tx_bps_L1": 176040392.0, + "tx_pps": 261964.9, + "tx_util": 1.7604039200000001 + }, + 1: { + "ibytes": 55549696, + "ierrors": 0, + "ipackets": 867964, + "obytes": 55549120, + "oerrors": 0, + "opackets": 867955, + "rx_bps": 134119648.0, + "rx_bps_L1": 176032032.0, + "rx_pps": 261952.4, + "rx_util": 1.76032032, + "tx_bps": 104338192.0, + "tx_bps_L1": 136943872.0, + "tx_pps": 203785.5, + "tx_util": 1.36943872 + }, + "flow_stats": { + 1: { + "rx_bps": { + "0": 0, + "1": 0, + "total": 0 + }, + "rx_bps_l1": { + "0": 0.0, + "1": 0.0, + "total": 0.0 + }, + "rx_bytes": { + "0": 6400, + "1": 0, + "total": 6400 + }, + "rx_pkts": { + "0": 100, + "1": 0, + "total": 100 + }, + "rx_pps": { + "0": 0, + "1": 0, + "total": 0 + }, + "tx_bps": { + "0": 0, + "1": 0, + "total": 0 + }, + "tx_bps_l1": { + "0": 0.0, + "1": 0.0, + "total": 0.0 + }, + "tx_bytes": { + "0": 0, + "1": 6400, + "total": 6400 + }, + "tx_pkts": { + "0": 0, + "1": 100, + "total": 100 + }, + "tx_pps": { + "0": 0, + "1": 0, + "total": 0 + } + }, + 2: { + "rx_bps": { + "0": 0, + "1": 0, + "total": 0 + }, + "rx_bps_l1": { + "0": 0.0, + "1": 0.0, + "total": 0.0 + }, + "rx_bytes": { + "0": 0, + "1": 6464, + "total": 6464 + }, + "rx_pkts": { + "0": 0, + "1": 101, + "total": 101 + }, + "rx_pps": { + "0": 0, + "1": 0, + "total": 0 + }, + "tx_bps": { + "0": 0, + "1": 0, + "total": 0 + }, + "tx_bps_l1": { + "0": 0.0, + "1": 0.0, + "total": 0.0 + }, + "tx_bytes": { + "0": 6464, + "1": 0, + "total": 6464 + }, + "tx_pkts": { + "0": 101, + "1": 0, + "total": 101 + }, + "tx_pps": { + "0": 0, + "1": 0, + "total": 0 + } + }, + "global": { + "rx_err": { + "0": 0, + "1": 0 + }, + "tx_err": { + "0": 0, + "1": 0 + } + } + }, + "global": { + "bw_per_core": 45.6, + "cpu_util": 0.1494, + "queue_full": 0, + "rx_bps": 238458672.0, + "rx_cpu_util": 4.751e-05, + "rx_drop_bps": 0.0, + "rx_pps": 465739.6, + "tx_bps": 238464208.0, + "tx_pps": 465750.4 + }, + "latency": { + 1: { + "err_cntrs": { + "dropped": 0, + "dup": 0, + "out_of_order": 0, + "seq_too_high": 0, + "seq_too_low": 0 + }, + "latency": { + "average": 63.375, + "histogram": { + "20": 1, + "30": 18, + "40": 12, + "50": 10, + "60": 12, + "70": 11, + "80": 6, + "90": 10, + "100": 20 + }, + "jitter": 23, + "last_max": 122, + "total_max": 123, + "total_min": 20 + } + }, + 2: { + "err_cntrs": { + "dropped": 0, + "dup": 0, + "out_of_order": 0, + "seq_too_high": 0, + "seq_too_low": 0 + }, + "latency": { + "average": 74, + "histogram": { + "60": 20, + "70": 10, + "80": 3, + "90": 4, + "100": 64 + }, + "jitter": 6, + "last_max": 83, + "total_max": 135, + "total_min": 60 + } + }, + "global": { + "bad_hdr": 0, + "old_flow": 0 + } + }, + "total": { + "ibytes": 111098816, + "ierrors": 0, + "ipackets": 1735919, + "obytes": 111098816, + "oerrors": 0, + "opackets": 1735919, + "rx_bps": 238458680.0, + "rx_bps_L1": 312977016.0, + "rx_pps": 465739.6, + "rx_util": 3.1297701599999996, + "tx_bps": 238464200.0, + "tx_bps_L1": 312984264.0, + "tx_pps": 465750.4, + "tx_util": 3.12984264 + } + } + expected = { + "xe0": { + "in_packets": 867955, + "latency": { + 2: { + "avg_latency": 74.0, + "max_latency": 135.0, + "min_latency": 60.0 + } + }, + "out_packets": 867964, + "rx_throughput_bps": 104339032.0, + "rx_throughput_fps": 203787.2, + "tx_throughput_bps": 134126008.0, + "tx_throughput_fps": 261964.9 + }, + "xe1": { + "in_packets": 867964, + "latency": { + 1: { + "avg_latency": 63.375, + "max_latency": 123.0, + "min_latency": 20.0 + } + }, + "out_packets": 867955, + "rx_throughput_bps": 134119648.0, + "rx_throughput_fps": 261952.4, + "tx_throughput_bps": 104338192.0, + "tx_throughput_fps": 203785.5 + } + } + mock_setup_helper = mock.Mock() + vpp_rfc = tg_trex_vpp.TrexVppResourceHelper(mock_setup_helper) + vpp_rfc.vnfd_helper = base.VnfdHelper(TestTrexTrafficGenVpp.VNFD_0) + port_pg_id = rfc2544.PortPgIDMap() + port_pg_id.add_port(1) + port_pg_id.increase_pg_id() + port_pg_id.add_port(0) + port_pg_id.increase_pg_id() + self.assertEqual(expected, + vpp_rfc.generate_samples(stats, [0, 1], port_pg_id, + True)) + + def test_generate_samples_error(self): + stats = { + 0: { + "ibytes": 55549120, + "ierrors": 0, + "ipackets": 867955, + "obytes": 55549696, + "oerrors": 0, + "opackets": 867964, + "rx_bps": 104339032.0, + "rx_bps_L1": 136944984.0, + "rx_pps": 203787.2, + "rx_util": 1.36944984, + "tx_bps": 134126008.0, + "tx_bps_L1": 176040392.0, + "tx_pps": 261964.9, + "tx_util": 1.7604039200000001 + }, + 1: { + "ibytes": 55549696, + "ierrors": 0, + "ipackets": 867964, + "obytes": 55549120, + "oerrors": 0, + "opackets": 867955, + "rx_bps": 134119648.0, + "rx_bps_L1": 176032032.0, + "rx_pps": 261952.4, + "rx_util": 1.76032032, + "tx_bps": 104338192.0, + "tx_bps_L1": 136943872.0, + "tx_pps": 203785.5, + "tx_util": 1.36943872 + }, + "flow_stats": { + 1: { + "rx_bps": { + "0": 0, + "1": 0, + "total": 0 + }, + "rx_bps_l1": { + "0": 0.0, + "1": 0.0, + "total": 0.0 + }, + "rx_bytes": { + "0": 6400, + "1": 0, + "total": 6400 + }, + "rx_pkts": { + "0": 100, + "1": 0, + "total": 100 + }, + "rx_pps": { + "0": 0, + "1": 0, + "total": 0 + }, + "tx_bps": { + "0": 0, + "1": 0, + "total": 0 + }, + "tx_bps_l1": { + "0": 0.0, + "1": 0.0, + "total": 0.0 + }, + "tx_bytes": { + "0": 0, + "1": 6400, + "total": 6400 + }, + "tx_pkts": { + "0": 0, + "1": 100, + "total": 100 + }, + "tx_pps": { + "0": 0, + "1": 0, + "total": 0 + } + }, + 2: { + "rx_bps": { + "0": 0, + "1": 0, + "total": 0 + }, + "rx_bps_l1": { + "0": 0.0, + "1": 0.0, + "total": 0.0 + }, + "rx_bytes": { + "0": 0, + "1": 6464, + "total": 6464 + }, + "rx_pkts": { + "0": 0, + "1": 101, + "total": 101 + }, + "rx_pps": { + "0": 0, + "1": 0, + "total": 0 + }, + "tx_bps": { + "0": 0, + "1": 0, + "total": 0 + }, + "tx_bps_l1": { + "0": 0.0, + "1": 0.0, + "total": 0.0 + }, + "tx_bytes": { + "0": 6464, + "1": 0, + "total": 6464 + }, + "tx_pkts": { + "0": 101, + "1": 0, + "total": 101 + }, + "tx_pps": { + "0": 0, + "1": 0, + "total": 0 + } + }, + "global": { + "rx_err": { + "0": 0, + "1": 0 + }, + "tx_err": { + "0": 0, + "1": 0 + } + } + }, + "global": { + "bw_per_core": 45.6, + "cpu_util": 0.1494, + "queue_full": 0, + "rx_bps": 238458672.0, + "rx_cpu_util": 4.751e-05, + "rx_drop_bps": 0.0, + "rx_pps": 465739.6, + "tx_bps": 238464208.0, + "tx_pps": 465750.4 + }, + "latency": { + 1: { + "err_cntrs": { + "dropped": 0, + "dup": 0, + "out_of_order": 0, + "seq_too_high": 0, + "seq_too_low": 0 + }, + "latency": { + "average": "err", + "histogram": { + "20": 1, + "30": 18, + "40": 12, + "50": 10, + "60": 12, + "70": 11, + "80": 6, + "90": 10, + "100": 20 + }, + "jitter": 23, + "last_max": 122, + "total_max": "err", + "total_min": "err" + } + }, + 2: { + "err_cntrs": { + "dropped": 0, + "dup": 0, + "out_of_order": 0, + "seq_too_high": 0, + "seq_too_low": 0 + }, + "latency": { + "average": 74, + "histogram": { + "60": 20, + "70": 10, + "80": 3, + "90": 4, + "100": 64 + }, + "jitter": 6, + "last_max": 83, + "total_max": 135, + "total_min": 60 + } + }, + "global": { + "bad_hdr": 0, + "old_flow": 0 + } + }, + "total": { + "ibytes": 111098816, + "ierrors": 0, + "ipackets": 1735919, + "obytes": 111098816, + "oerrors": 0, + "opackets": 1735919, + "rx_bps": 238458680.0, + "rx_bps_L1": 312977016.0, + "rx_pps": 465739.6, + "rx_util": 3.1297701599999996, + "tx_bps": 238464200.0, + "tx_bps_L1": 312984264.0, + "tx_pps": 465750.4, + "tx_util": 3.12984264 + } + } + expected = {'xe0': {'in_packets': 867955, + 'latency': {2: {'avg_latency': 74.0, + 'max_latency': 135.0, + 'min_latency': 60.0}}, + 'out_packets': 867964, + 'rx_throughput_bps': 104339032.0, + 'rx_throughput_fps': 203787.2, + 'tx_throughput_bps': 134126008.0, + 'tx_throughput_fps': 261964.9}, + 'xe1': {'in_packets': 867964, + 'latency': {1: {'avg_latency': -1.0, + 'max_latency': -1.0, + 'min_latency': -1.0}}, + 'out_packets': 867955, + 'rx_throughput_bps': 134119648.0, + 'rx_throughput_fps': 261952.4, + 'tx_throughput_bps': 104338192.0, + 'tx_throughput_fps': 203785.5}} + mock_setup_helper = mock.Mock() + vpp_rfc = tg_trex_vpp.TrexVppResourceHelper(mock_setup_helper) + vpp_rfc.vnfd_helper = base.VnfdHelper(TestTrexTrafficGenVpp.VNFD_0) + vpp_rfc.get_stats = mock.Mock() + vpp_rfc.get_stats.return_value = stats + port_pg_id = rfc2544.PortPgIDMap() + port_pg_id.add_port(1) + port_pg_id.increase_pg_id() + port_pg_id.add_port(0) + port_pg_id.increase_pg_id() + self.assertEqual(expected, + vpp_rfc.generate_samples(stats=None, ports=[0, 1], + port_pg_id=port_pg_id, + latency=True)) + + def test__run_traffic_once(self): + mock_setup_helper = mock.Mock() + mock_traffic_profile = mock.Mock() + vpp_rfc = tg_trex_vpp.TrexVppResourceHelper(mock_setup_helper) + vpp_rfc.TRANSIENT_PERIOD = 0 + vpp_rfc.rfc2544_helper = mock.Mock() + + self.assertTrue(vpp_rfc._run_traffic_once(mock_traffic_profile)) + mock_traffic_profile.execute_traffic.assert_called_once_with(vpp_rfc) + + def test_run_traffic(self): + mock_traffic_profile = mock.Mock(autospec=tp_base.TrafficProfile) + mock_traffic_profile.get_traffic_definition.return_value = "64" + mock_traffic_profile.params = self.TRAFFIC_PROFILE + mock_setup_helper = mock.Mock() + vpp_rfc = tg_trex_vpp.TrexVppResourceHelper(mock_setup_helper) + vpp_rfc.ssh_helper = mock.Mock() + vpp_rfc.ssh_helper.run = mock.Mock() + vpp_rfc._traffic_runner = mock.Mock(return_value=0) + vpp_rfc._build_ports = mock.Mock() + vpp_rfc._connect = mock.Mock() + vpp_rfc.run_traffic(mock_traffic_profile) + + def test_send_traffic_on_tg(self): + stats = { + 0: { + "ibytes": 55549120, + "ierrors": 0, + "ipackets": 867955, + "obytes": 55549696, + "oerrors": 0, + "opackets": 867964, + "rx_bps": 104339032.0, + "rx_bps_L1": 136944984.0, + "rx_pps": 203787.2, + "rx_util": 1.36944984, + "tx_bps": 134126008.0, + "tx_bps_L1": 176040392.0, + "tx_pps": 261964.9, + "tx_util": 1.7604039200000001 + }, + 1: { + "ibytes": 55549696, + "ierrors": 0, + "ipackets": 867964, + "obytes": 55549120, + "oerrors": 0, + "opackets": 867955, + "rx_bps": 134119648.0, + "rx_bps_L1": 176032032.0, + "rx_pps": 261952.4, + "rx_util": 1.76032032, + "tx_bps": 104338192.0, + "tx_bps_L1": 136943872.0, + "tx_pps": 203785.5, + "tx_util": 1.36943872 + }, + "flow_stats": { + 1: { + "rx_bps": { + "0": 0, + "1": 0, + "total": 0 + }, + "rx_bps_l1": { + "0": 0.0, + "1": 0.0, + "total": 0.0 + }, + "rx_bytes": { + "0": 6400, + "1": 0, + "total": 6400 + }, + "rx_pkts": { + "0": 100, + "1": 0, + "total": 100 + }, + "rx_pps": { + "0": 0, + "1": 0, + "total": 0 + }, + "tx_bps": { + "0": 0, + "1": 0, + "total": 0 + }, + "tx_bps_l1": { + "0": 0.0, + "1": 0.0, + "total": 0.0 + }, + "tx_bytes": { + "0": 0, + "1": 6400, + "total": 6400 + }, + "tx_pkts": { + "0": 0, + "1": 100, + "total": 100 + }, + "tx_pps": { + "0": 0, + "1": 0, + "total": 0 + } + }, + 2: { + "rx_bps": { + "0": 0, + "1": 0, + "total": 0 + }, + "rx_bps_l1": { + "0": 0.0, + "1": 0.0, + "total": 0.0 + }, + "rx_bytes": { + "0": 0, + "1": 6464, + "total": 6464 + }, + "rx_pkts": { + "0": 0, + "1": 101, + "total": 101 + }, + "rx_pps": { + "0": 0, + "1": 0, + "total": 0 + }, + "tx_bps": { + "0": 0, + "1": 0, + "total": 0 + }, + "tx_bps_l1": { + "0": 0.0, + "1": 0.0, + "total": 0.0 + }, + "tx_bytes": { + "0": 6464, + "1": 0, + "total": 6464 + }, + "tx_pkts": { + "0": 101, + "1": 0, + "total": 101 + }, + "tx_pps": { + "0": 0, + "1": 0, + "total": 0 + } + }, + "global": { + "rx_err": { + "0": 0, + "1": 0 + }, + "tx_err": { + "0": 0, + "1": 0 + } + } + }, + "global": { + "bw_per_core": 45.6, + "cpu_util": 0.1494, + "queue_full": 0, + "rx_bps": 238458672.0, + "rx_cpu_util": 4.751e-05, + "rx_drop_bps": 0.0, + "rx_pps": 465739.6, + "tx_bps": 238464208.0, + "tx_pps": 465750.4 + }, + "latency": { + 1: { + "err_cntrs": { + "dropped": 0, + "dup": 0, + "out_of_order": 0, + "seq_too_high": 0, + "seq_too_low": 0 + }, + "latency": { + "average": 63.375, + "histogram": { + "20": 1, + "30": 18, + "40": 12, + "50": 10, + "60": 12, + "70": 11, + "80": 6, + "90": 10, + "100": 20 + }, + "jitter": 23, + "last_max": 122, + "total_max": 123, + "total_min": 20 + } + }, + 2: { + "err_cntrs": { + "dropped": 0, + "dup": 0, + "out_of_order": 0, + "seq_too_high": 0, + "seq_too_low": 0 + }, + "latency": { + "average": 74, + "histogram": { + "60": 20, + "70": 10, + "80": 3, + "90": 4, + "100": 64 + }, + "jitter": 6, + "last_max": 83, + "total_max": 135, + "total_min": 60 + } + }, + "global": { + "bad_hdr": 0, + "old_flow": 0 + } + }, + "total": { + "ibytes": 111098816, + "ierrors": 0, + "ipackets": 1735919, + "obytes": 111098816, + "oerrors": 0, + "opackets": 1735919, + "rx_bps": 238458680.0, + "rx_bps_L1": 312977016.0, + "rx_pps": 465739.6, + "rx_util": 3.1297701599999996, + "tx_bps": 238464200.0, + "tx_bps_L1": 312984264.0, + "tx_pps": 465750.4, + "tx_util": 3.12984264 + } + } + mock_setup_helper = mock.Mock() + vpp_rfc = tg_trex_vpp.TrexVppResourceHelper(mock_setup_helper) + vpp_rfc.vnfd_helper = base.VnfdHelper(TestTrexTrafficGenVpp.VNFD_0) + vpp_rfc.client = mock.Mock() + vpp_rfc.client.get_warnings.return_value = 'get_warnings' + vpp_rfc.client.get_stats.return_value = stats + port_pg_id = rfc2544.PortPgIDMap() + port_pg_id.add_port(1) + port_pg_id.increase_pg_id() + port_pg_id.add_port(0) + port_pg_id.increase_pg_id() + self.assertEqual(stats, + vpp_rfc.send_traffic_on_tg([0, 1], port_pg_id, 30, + 10000, True)) + + def test_send_traffic_on_tg_error(self): + mock_setup_helper = mock.Mock() + vpp_rfc = tg_trex_vpp.TrexVppResourceHelper(mock_setup_helper) + vpp_rfc.vnfd_helper = base.VnfdHelper(TestTrexTrafficGenVpp.VNFD_0) + vpp_rfc.client = mock.Mock() + vpp_rfc.client.get_warnings.return_value = 'get_warnings' + vpp_rfc.client.get_stats.side_effect = STLError('get_stats') + vpp_rfc.client.wait_on_traffic.side_effect = STLError( + 'wait_on_traffic') + port_pg_id = rfc2544.PortPgIDMap() + port_pg_id.add_port(1) + port_pg_id.increase_pg_id() + port_pg_id.add_port(0) + port_pg_id.increase_pg_id() + # with self.assertRaises(RuntimeError) as raised: + vpp_rfc.send_traffic_on_tg([0, 1], port_pg_id, 30, 10000, True) + # self.assertIn('TRex stateless runtime error', str(raised.exception)) + + +class TestTrexTrafficGenVpp(unittest.TestCase): + VNFD_0 = { + "benchmark": { + "kpi": [ + "rx_throughput_fps", + "tx_throughput_fps", + "tx_throughput_mbps", + "rx_throughput_mbps", + "in_packets", + "out_packets", + "min_latency", + "max_latency", + "avg_latency" + ] + }, + "description": "TRex stateless traffic verifier", + "id": "TrexTrafficGenVpp", + "mgmt-interface": { + "ip": "10.10.10.10", + "password": "r00t", + "user": "root", + "vdu-id": "trexgen-baremetal" + }, + "name": "trexverifier", + "short-name": "trexverifier", + "vdu": [ + { + "description": "TRex stateless traffic verifier", + "external-interface": [ + { + "name": "xe0", + "virtual-interface": { + "dpdk_port_num": 0, + "driver": "igb_uio", + "dst_ip": "192.168.100.2", + "dst_mac": "90:e2:ba:7c:41:a8", + "ifname": "xe0", + "local_ip": "192.168.100.1", + "local_mac": "90:e2:ba:7c:30:e8", + "netmask": "255.255.255.0", + "network": {}, + "node_name": "tg__0", + "peer_ifname": "xe0", + "peer_intf": { + "driver": "igb_uio", + "dst_ip": "192.168.100.1", + "dst_mac": "90:e2:ba:7c:30:e8", + "ifname": "xe0", + "local_ip": "192.168.100.2", + "local_mac": "90:e2:ba:7c:41:a8", + "netmask": "255.255.255.0", + "network": {}, + "node_name": "vnf__0", + "peer_ifname": "xe0", + "peer_name": "tg__0", + "vld_id": "uplink_0", + "vpci": "0000:ff:06.0" + }, + "peer_name": "vnf__0", + "vld_id": "uplink_0", + "vpci": "0000:81:00.0" + }, + "vnfd-connection-point-ref": "xe0" + }, + { + "name": "xe1", + "virtual-interface": { + "dpdk_port_num": 1, + "driver": "igb_uio", + "dst_ip": "192.168.101.2", + "dst_mac": "90:e2:ba:7c:41:a9", + "ifname": "xe1", + "local_ip": "192.168.101.1", + "local_mac": "90:e2:ba:7c:30:e9", + "netmask": "255.255.255.0", + "network": {}, + "node_name": "tg__0", + "peer_ifname": "xe0", + "peer_intf": { + "driver": "igb_uio", + "dst_ip": "192.168.101.1", + "dst_mac": "90:e2:ba:7c:30:e9", + "ifname": "xe0", + "local_ip": "192.168.101.2", + "local_mac": "90:e2:ba:7c:41:a9", + "netmask": "255.255.255.0", + "network": {}, + "node_name": "vnf__1", + "peer_ifname": "xe1", + "peer_name": "tg__0", + "vld_id": "downlink_0", + "vpci": "0000:ff:06.0" + }, + "peer_name": "vnf__1", + "vld_id": "downlink_0", + "vpci": "0000:81:00.1" + }, + "vnfd-connection-point-ref": "xe1" + } + ], + "id": "trexgen-baremetal", + "name": "trexgen-baremetal" + } + ] + } + + VNFD = { + 'vnfd:vnfd-catalog': { + 'vnfd': [ + VNFD_0, + ], + }, + } + + def setUp(self): + self._mock_ssh_helper = mock.patch.object(sample_vnf, 'VnfSshHelper') + self.mock_ssh_helper = self._mock_ssh_helper.start() + self.addCleanup(self._stop_mocks) + + def _stop_mocks(self): + self._mock_ssh_helper.stop() + + def test___init__(self): + trex_traffic_gen = tg_trex_vpp.TrexTrafficGenVpp( + 'tg0', self.VNFD_0) + self.assertIsNotNone( + trex_traffic_gen.resource_helper._terminated.value) + + def test__check_status(self): + vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] + trex_traffic_gen = tg_trex_vpp.TrexTrafficGenVpp('tg0', vnfd) + trex_traffic_gen.ssh_helper = mock.MagicMock() + trex_traffic_gen.resource_helper.ssh_helper = mock.MagicMock() + trex_traffic_gen.resource_helper.ssh_helper.execute.return_value = 0, '', '' + trex_traffic_gen.scenario_helper.scenario_cfg = {} + self.assertEqual(0, trex_traffic_gen._check_status()) + + def test__start_server(self): + vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] + trex_traffic_gen = tg_trex_vpp.TrexTrafficGenVpp('tg0', vnfd) + trex_traffic_gen.ssh_helper = mock.MagicMock() + trex_traffic_gen.resource_helper.ssh_helper = mock.MagicMock() + trex_traffic_gen.scenario_helper.scenario_cfg = {} + self.assertIsNone(trex_traffic_gen._start_server()) + + @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', + return_value='mock_node') + def test_collect_kpi(self, *args): + trex_traffic_gen = tg_trex_vpp.TrexTrafficGenVpp( + 'tg0', self.VNFD_0) + trex_traffic_gen.scenario_helper.scenario_cfg = { + 'nodes': {trex_traffic_gen.name: "mock"} + } + expected = { + 'physical_node': 'mock_node', + 'collect_stats': {}, + } + self.assertEqual(trex_traffic_gen.collect_kpi(), expected) + + @mock.patch.object(ctx_base.Context, 'get_context_from_server', + return_value='fake_context') + def test_instantiate(self, *args): + trex_traffic_gen = tg_trex_vpp.TrexTrafficGenVpp( + 'tg0', self.VNFD_0) + trex_traffic_gen._start_server = mock.Mock(return_value=0) + trex_traffic_gen.resource_helper = mock.MagicMock() + trex_traffic_gen.setup_helper.setup_vnf_environment = mock.MagicMock() + + scenario_cfg = { + "tc": "tc_baremetal_rfc2544_ipv4_1flow_64B", + "topology": 'nsb_test_case.yaml', + 'options': { + 'packetsize': 64, + 'traffic_type': 4, + 'rfc2544': { + 'allowed_drop_rate': '0.8 - 1', + }, + 'vnf__0': { + 'rules': 'acl_1rule.yaml', + 'vnf_config': { + 'lb_config': 'SW', + 'lb_count': 1, + 'worker_config': '1C/1T', + 'worker_threads': 1 + }, + }, + }, + } + tg_trex_vpp.WAIT_TIME = 3 + scenario_cfg.update({"nodes": {"tg0": {}, "vnf0": {}}}) + self.assertIsNone(trex_traffic_gen.instantiate(scenario_cfg, {})) + + @mock.patch.object(ctx_base.Context, 'get_context_from_server', + return_value='fake_context') + def test_instantiate_error(self, *args): + trex_traffic_gen = tg_trex_vpp.TrexTrafficGenVpp( + 'tg0', self.VNFD_0) + trex_traffic_gen.resource_helper = mock.MagicMock() + trex_traffic_gen.setup_helper.setup_vnf_environment = mock.MagicMock() + scenario_cfg = { + "tc": "tc_baremetal_rfc2544_ipv4_1flow_64B", + "nodes": { + "tg0": {}, + "vnf0": {} + }, + "topology": 'nsb_test_case.yaml', + 'options': { + 'packetsize': 64, + 'traffic_type': 4, + 'rfc2544': { + 'allowed_drop_rate': '0.8 - 1', + }, + 'vnf__0': { + 'rules': 'acl_1rule.yaml', + 'vnf_config': { + 'lb_config': 'SW', + 'lb_count': 1, + 'worker_config': '1C/1T', + 'worker_threads': 1, + }, + }, + }, + } + trex_traffic_gen.instantiate(scenario_cfg, {}) + + @mock.patch( + 'yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper') + def test_wait_for_instantiate(self, ssh, *args): + mock_ssh(ssh) + + mock_process = mock.Mock(autospec=Process) + mock_process.is_alive.return_value = True + mock_process.exitcode = 432 + + vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] + trex_traffic_gen = tg_trex_vpp.TrexTrafficGenVpp('tg0', vnfd) + trex_traffic_gen.ssh_helper = mock.MagicMock() + trex_traffic_gen.resource_helper.ssh_helper = mock.MagicMock() + trex_traffic_gen.resource_helper.ssh_helper.execute.return_value = 0, '', '' + trex_traffic_gen.scenario_helper.scenario_cfg = {} + trex_traffic_gen._tg_process = mock_process + self.assertEqual(432, trex_traffic_gen.wait_for_instantiate()) diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_vcmts_pktgen.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_vcmts_pktgen.py new file mode 100755 index 000000000..3b226d3f1 --- /dev/null +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_vcmts_pktgen.py @@ -0,0 +1,652 @@ +# Copyright (c) 2019 Viosoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import mock +import socket +import threading +import time +import os +import copy + +from yardstick.benchmark.contexts import base as ctx_base +from yardstick.network_services.vnf_generic.vnf.base import VnfdHelper +from yardstick.network_services.vnf_generic.vnf import tg_vcmts_pktgen +from yardstick.common import exceptions + + +NAME = "tg__0" + + +class TestPktgenHelper(unittest.TestCase): + + def test___init__(self): + pktgen_helper = tg_vcmts_pktgen.PktgenHelper("localhost", 23000) + self.assertEqual(pktgen_helper.host, "localhost") + self.assertEqual(pktgen_helper.port, 23000) + self.assertFalse(pktgen_helper.connected) + + def _run_fake_server(self): + server_sock = socket.socket() + server_sock.bind(('localhost', 23000)) + server_sock.listen(0) + client_socket, _ = server_sock.accept() + client_socket.close() + server_sock.close() + + def test__connect(self): + pktgen_helper = tg_vcmts_pktgen.PktgenHelper("localhost", 23000) + self.assertFalse(pktgen_helper._connect()) + server_thread = threading.Thread(target=self._run_fake_server) + server_thread.start() + time.sleep(0.5) + self.assertTrue(pktgen_helper._connect()) + pktgen_helper._sock.close() + server_thread.join() + + @mock.patch('yardstick.network_services.vnf_generic.vnf.tg_vcmts_pktgen.time') + def test_connect(self, *args): + pktgen_helper = tg_vcmts_pktgen.PktgenHelper("localhost", 23000) + pktgen_helper.connected = True + self.assertTrue(pktgen_helper.connect()) + pktgen_helper.connected = False + + pktgen_helper._connect = mock.MagicMock(return_value=True) + self.assertTrue(pktgen_helper.connect()) + self.assertTrue(pktgen_helper.connected) + + pktgen_helper = tg_vcmts_pktgen.PktgenHelper("localhost", 23000) + pktgen_helper._connect = mock.MagicMock(return_value=False) + self.assertFalse(pktgen_helper.connect()) + self.assertFalse(pktgen_helper.connected) + + def test_send_command(self): + pktgen_helper = tg_vcmts_pktgen.PktgenHelper("localhost", 23000) + self.assertFalse(pktgen_helper.send_command("")) + + pktgen_helper.connected = True + pktgen_helper._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.assertFalse(pktgen_helper.send_command("")) + + pktgen_helper._sock = mock.MagicMock() + self.assertTrue(pktgen_helper.send_command("")) + + +class TestVcmtsPktgenSetupEnvHelper(unittest.TestCase): + + PKTGEN_PARAMETERS = "export LUA_PATH=/vcmts/Pktgen.lua;"\ + "export CMK_PROC_FS=/host/proc;"\ + " /pktgen-config/setup.sh 0 4 18:02.0 "\ + "18:02.1 18:02.2 18:02.3 00:00.0 00:00.0 "\ + "00:00.0 00:00.0 imix1_100cms_1ofdm.pcap "\ + "imix1_100cms_1ofdm.pcap imix1_100cms_1ofdm.pcap "\ + "imix1_100cms_1ofdm.pcap imix1_100cms_1ofdm.pcap "\ + "imix1_100cms_1ofdm.pcap imix1_100cms_1ofdm.pcap "\ + "imix1_100cms_1ofdm.pcap" + + OPTIONS = { + "pktgen_values": "/tmp/pktgen_values.yaml", + "tg__0": { + "pktgen_id": 0 + }, + "vcmts_influxdb_ip": "10.80.5.150", + "vcmts_influxdb_port": 8086, + "vcmtsd_values": "/tmp/vcmtsd_values.yaml", + "vnf__0": { + "sg_id": 0, + "stream_dir": "us" + }, + "vnf__1": { + "sg_id": 0, + "stream_dir": "ds" + } + } + + def setUp(self): + vnfd_helper = VnfdHelper( + TestVcmtsPktgen.VNFD['vnfd:vnfd-catalog']['vnfd'][0]) + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + scenario_helper.options = self.OPTIONS + + self.setup_helper = tg_vcmts_pktgen.VcmtsPktgenSetupEnvHelper( + vnfd_helper, ssh_helper, scenario_helper) + + def test_generate_pcap_filename(self): + pcap_file_name = self.setup_helper.generate_pcap_filename(\ + TestVcmtsPktgen.PKTGEN_POD_VALUES[0]['ports'][0]) + self.assertEquals(pcap_file_name, "imix1_100cms_1ofdm.pcap") + + def test_find_port_cfg(self): + port_cfg = self.setup_helper.find_port_cfg(\ + TestVcmtsPktgen.PKTGEN_POD_VALUES[0]['ports'], "port_0") + self.assertIsNotNone(port_cfg) + + port_cfg = self.setup_helper.find_port_cfg(\ + TestVcmtsPktgen.PKTGEN_POD_VALUES[0]['ports'], "port_8") + self.assertIsNone(port_cfg) + + def test_build_pktgen_parameters(self): + parameters = self.setup_helper.build_pktgen_parameters( + TestVcmtsPktgen.PKTGEN_POD_VALUES[0]) + self.assertEquals(parameters, self.PKTGEN_PARAMETERS) + + def test_start_pktgen(self): + self.setup_helper.ssh_helper = mock.MagicMock() + self.setup_helper.start_pktgen(TestVcmtsPktgen.PKTGEN_POD_VALUES[0]) + self.setup_helper.ssh_helper.send_command.assert_called_with( + self.PKTGEN_PARAMETERS) + + def test_setup_vnf_environment(self): + self.assertIsNone(self.setup_helper.setup_vnf_environment()) + +class TestVcmtsPktgen(unittest.TestCase): + + VNFD = {'vnfd:vnfd-catalog': + {'vnfd': + [{ + "benchmark": { + "kpi": [ + "upstream/bits_per_second" + ] + }, + "connection-point": [ + { + "name": "xe0", + "type": "VPORT" + }, + { + "name": "xe1", + "type": "VPORT" + } + ], + "description": "vCMTS Pktgen Kubernetes", + "id": "VcmtsPktgen", + "mgmt-interface": { + "ip": "192.168.24.150", + "key_filename": "/tmp/yardstick_key-a3b663c2", + "user": "root", + "vdu-id": "vcmtspktgen-kubernetes" + }, + "name": "vcmtspktgen", + "short-name": "vcmtspktgen", + "vdu": [ + { + "description": "vCMTS Pktgen Kubernetes", + "external-interface": [], + "id": "vcmtspktgen-kubernetes", + "name": "vcmtspktgen-kubernetes" + } + ], + "vm-flavor": { + "memory-mb": "4096", + "vcpu-count": "4" + } + }] + }} + + PKTGEN_POD_VALUES = [ + { + "num_ports": "4", + "pktgen_id": "0", + "ports": [ + { + "net_pktgen": "18:02.0", + "num_ofdm": "1", + "num_subs": "100", + "port_0": "", + "traffic_type": "imix1" + }, + { + "net_pktgen": "18:02.1", + "num_ofdm": "1", + "num_subs": "100", + "port_1": "", + "traffic_type": "imix1" + }, + { + "net_pktgen": "18:02.2", + "num_ofdm": "1", + "num_subs": "100", + "port_2": "", + "traffic_type": "imix1" + }, + { + "net_pktgen": "18:02.3", + "num_ofdm": "1", + "num_subs": "100", + "port_3": "", + "traffic_type": "imix1" + }, + { + "net_pktgen": "00:00.0", + "num_ofdm": "1", + "num_subs": "100", + "port_4": "", + "traffic_type": "imix1" + }, + { + "net_pktgen": "00:00.0", + "num_ofdm": "1", + "num_subs": "100", + "port_5": "", + "traffic_type": "imix1" + }, + { + "net_pktgen": "00:00.0", + "num_ofdm": "1", + "num_subs": "100", + "port_6": "", + "traffic_type": "imix1" + }, + { + "net_pktgen": "00:00.0", + "num_ofdm": "1", + "num_subs": "100", + "port_7": "", + "traffic_type": "imix1" + } + ] + }, + { + "num_ports": 4, + "pktgen_id": 1, + "ports": [ + { + "net_pktgen": "18:0a.0", + "num_ofdm": "1", + "num_subs": "100", + "port_0": "", + "traffic_type": "imix1" + }, + { + "net_pktgen": "18:0a.1", + "num_ofdm": "1", + "num_subs": "100", + "port_1": "", + "traffic_type": "imix1" + }, + { + "net_pktgen": "18:0a.2", + "num_ofdm": "1", + "num_subs": "100", + "port_2": "", + "traffic_type": "imix1" + }, + { + "net_pktgen": "18:0a.3", + "num_ofdm": "1", + "num_subs": "100", + "port_3": "", + "traffic_type": "imix1" + }, + { + "net_pktgen": "00:00.0", + "num_ofdm": "1", + "num_subs": "100", + "port_4": "", + "traffic_type": "imix1" + }, + { + "net_pktgen": "00:00.0", + "num_ofdm": "1", + "num_subs": "100", + "port_5": "", + "traffic_type": "imix1" + }, + { + "net_pktgen": "00:00.0", + "num_ofdm": "1", + "num_subs": "100", + "port_6": "", + "traffic_type": "imix1" + }, + { + "net_pktgen": "00:00.0", + "num_ofdm": "1", + "num_subs": "100", + "port_7": "", + "traffic_type": "imix1" + } + ] + } + ] + + SCENARIO_CFG = { + "nodes": { + "tg__0": "pktgen0-k8syardstick-a3b663c2", + "vnf__0": "vnf0us-k8syardstick-a3b663c2", + "vnf__1": "vnf0ds-k8syardstick-a3b663c2" + }, + "options": { + "pktgen_values": "/tmp/pktgen_values.yaml", + "tg__0": { + "pktgen_id": 0 + }, + "vcmts_influxdb_ip": "10.80.5.150", + "vcmts_influxdb_port": 8086, + "vcmtsd_values": "/tmp/vcmtsd_values.yaml", + "vnf__0": { + "sg_id": 0, + "stream_dir": "us" + }, + "vnf__1": { + "sg_id": 0, + "stream_dir": "ds" + } + }, + "task_id": "a3b663c2-e616-4777-b6d0-ec2ea7a06f42", + "task_path": "samples/vnf_samples/nsut/cmts", + "tc": "tc_vcmts_k8s_pktgen", + "topology": "k8s_vcmts_topology.yaml", + "traffic_profile": "../../traffic_profiles/fixed.yaml", + "type": "NSPerf" + } + + CONTEXT_CFG = { + "networks": { + "flannel": { + "name": "flannel" + }, + "xe0": { + "name": "xe0" + }, + "xe1": { + "name": "xe1" + } + }, + "nodes": { + "tg__0": { + "VNF model": "../../vnf_descriptors/tg_vcmts_tpl.yaml", + "interfaces": { + "flannel": { + "local_ip": "192.168.24.150", + "local_mac": None, + "network_name": "flannel" + }, + "xe0": { + "local_ip": "192.168.24.150", + "local_mac": None, + "network_name": "xe0" + }, + "xe1": { + "local_ip": "192.168.24.150", + "local_mac": None, + "network_name": "xe1" + } + }, + "ip": "192.168.24.150", + "key_filename": "/tmp/yardstick_key-a3b663c2", + "member-vnf-index": "1", + "name": "pktgen0-k8syardstick-a3b663c2", + "private_ip": "192.168.24.150", + "service_ports": [ + { + "name": "ssh", + "node_port": 60270, + "port": 22, + "protocol": "TCP", + "target_port": 22 + }, + { + "name": "lua", + "node_port": 43619, + "port": 22022, + "protocol": "TCP", + "target_port": 22022 + } + ], + "ssh_port": 60270, + "user": "root", + "vnfd-id-ref": "tg__0" + }, + "vnf__0": { + "VNF model": "../../vnf_descriptors/vnf_vcmts_tpl.yaml", + "interfaces": { + "flannel": { + "local_ip": "192.168.100.132", + "local_mac": None, + "network_name": "flannel" + }, + "xe0": { + "local_ip": "192.168.100.132", + "local_mac": None, + "network_name": "xe0" + }, + "xe1": { + "local_ip": "192.168.100.132", + "local_mac": None, + "network_name": "xe1" + } + }, + "ip": "192.168.100.132", + "key_filename": "/tmp/yardstick_key-a3b663c2", + "member-vnf-index": "3", + "name": "vnf0us-k8syardstick-a3b663c2", + "private_ip": "192.168.100.132", + "service_ports": [ + { + "name": "ssh", + "node_port": 57057, + "port": 22, + "protocol": "TCP", + "target_port": 22 + }, + { + "name": "lua", + "node_port": 29700, + "port": 22022, + "protocol": "TCP", + "target_port": 22022 + } + ], + "ssh_port": 57057, + "user": "root", + "vnfd-id-ref": "vnf__0" + }, + "vnf__1": { + "VNF model": "../../vnf_descriptors/vnf_vcmts_tpl.yaml", + "interfaces": { + "flannel": { + "local_ip": "192.168.100.134", + "local_mac": None, + "network_name": "flannel" + }, + "xe0": { + "local_ip": "192.168.100.134", + "local_mac": None, + "network_name": "xe0" + }, + "xe1": { + "local_ip": "192.168.100.134", + "local_mac": None, + "network_name": "xe1" + } + }, + "ip": "192.168.100.134", + "key_filename": "/tmp/yardstick_key-a3b663c2", + "member-vnf-index": "4", + "name": "vnf0ds-k8syardstick-a3b663c2", + "private_ip": "192.168.100.134", + "service_ports": [ + { + "name": "ssh", + "node_port": 18581, + "port": 22, + "protocol": "TCP", + "target_port": 22 + }, + { + "name": "lua", + "node_port": 18469, + "port": 22022, + "protocol": "TCP", + "target_port": 22022 + } + ], + "ssh_port": 18581, + "user": "root", + "vnfd-id-ref": "vnf__1" + } + } + } + + PKTGEN_VALUES_PATH = "/tmp/pktgen_values.yaml" + + PKTGEN_VALUES = \ + "serviceAccount: cmk-serviceaccount\n" \ + "images:\n" \ + " vcmts_pktgen: vcmts-pktgen:v18.10\n" \ + "topology:\n" \ + " pktgen_replicas: 8\n" \ + " pktgen_pods:\n" \ + " - pktgen_id: 0\n" \ + " num_ports: 4\n" \ + " ports:\n" \ + " - port_0:\n" \ + " traffic_type: 'imix2'\n" \ + " num_ofdm: 4\n" \ + " num_subs: 300\n" \ + " net_pktgen: 8a:02.0\n" \ + " - port_1:\n" \ + " traffic_type: 'imix2'\n" \ + " num_ofdm: 4\n" \ + " num_subs: 300\n" \ + " net_pktgen: 8a:02.1\n" \ + " - port_2:\n" \ + " traffic_type: 'imix2'\n" \ + " num_ofdm: 4\n" \ + " num_subs: 300\n" \ + " net_pktgen: 8a:02.2\n" \ + " - port_3:\n" \ + " traffic_type: 'imix2'\n" \ + " num_ofdm: 4\n" \ + " num_subs: 300\n" \ + " net_pktgen: 8a:02.3\n" \ + " - port_4:\n" \ + " traffic_type: 'imix2'\n" \ + " num_ofdm: 4\n" \ + " num_subs: 300\n" \ + " net_pktgen: 8a:02.4\n" \ + " - port_5:\n" \ + " traffic_type: 'imix2'\n" \ + " num_ofdm: 4\n" \ + " num_subs: 300\n" \ + " net_pktgen: 8a:02.5\n" \ + " - port_6:\n" \ + " traffic_type: 'imix2'\n" \ + " num_ofdm: 4\n" \ + " num_subs: 300\n" \ + " net_pktgen: 8a:02.6\n" \ + " - port_7:\n" \ + " traffic_type: 'imix2'\n" \ + " num_ofdm: 4\n" \ + " num_subs: 300\n" \ + " net_pktgen: 8a:02.7\n" + + def setUp(self): + vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] + self.vcmts_pktgen = tg_vcmts_pktgen.VcmtsPktgen(NAME, vnfd) + self.vcmts_pktgen._start_server = mock.Mock(return_value=0) + self.vcmts_pktgen.resource_helper = mock.MagicMock() + self.vcmts_pktgen.setup_helper = mock.MagicMock() + + def test___init__(self): + self.assertFalse(self.vcmts_pktgen.traffic_finished) + self.assertIsNotNone(self.vcmts_pktgen.setup_helper) + self.assertIsNotNone(self.vcmts_pktgen.resource_helper) + + def test_extract_pod_cfg(self): + pod_cfg = self.vcmts_pktgen.extract_pod_cfg(self.PKTGEN_POD_VALUES, "0") + self.assertIsNotNone(pod_cfg) + self.assertEqual(pod_cfg["pktgen_id"], "0") + pod_cfg = self.vcmts_pktgen.extract_pod_cfg(self.PKTGEN_POD_VALUES, "4") + self.assertIsNone(pod_cfg) + + @mock.patch.object(ctx_base.Context, 'get_context_from_server', + return_value='fake_context') + def test_instantiate_missing_pktgen_values_key(self, *args): + err_scenario_cfg = copy.deepcopy(self.SCENARIO_CFG) + err_scenario_cfg['options'].pop('pktgen_values', None) + with self.assertRaises(KeyError): + self.vcmts_pktgen.instantiate(err_scenario_cfg, self.CONTEXT_CFG) + + @mock.patch.object(ctx_base.Context, 'get_context_from_server', + return_value='fake_context') + def test_instantiate_missing_pktgen_values_file(self, *args): + if os.path.isfile(self.PKTGEN_VALUES_PATH): + os.remove(self.PKTGEN_VALUES_PATH) + err_scenario_cfg = copy.deepcopy(self.SCENARIO_CFG) + err_scenario_cfg['options']['pktgen_values'] = self.PKTGEN_VALUES_PATH + with self.assertRaises(RuntimeError): + self.vcmts_pktgen.instantiate(err_scenario_cfg, self.CONTEXT_CFG) + + @mock.patch.object(ctx_base.Context, 'get_context_from_server', + return_value='fake_context') + def test_instantiate_empty_pktgen_values_file(self, *args): + yaml_sample = open(self.PKTGEN_VALUES_PATH, 'w') + yaml_sample.write("") + yaml_sample.close() + + err_scenario_cfg = copy.deepcopy(self.SCENARIO_CFG) + err_scenario_cfg['options']['pktgen_values'] = self.PKTGEN_VALUES_PATH + with self.assertRaises(RuntimeError): + self.vcmts_pktgen.instantiate(err_scenario_cfg, self.CONTEXT_CFG) + + if os.path.isfile(self.PKTGEN_VALUES_PATH): + os.remove(self.PKTGEN_VALUES_PATH) + + @mock.patch.object(ctx_base.Context, 'get_context_from_server', + return_value='fake_context') + def test_instantiate_invalid_pktgen_id(self, *args): + yaml_sample = open(self.PKTGEN_VALUES_PATH, 'w') + yaml_sample.write(self.PKTGEN_VALUES) + yaml_sample.close() + + err_scenario_cfg = copy.deepcopy(self.SCENARIO_CFG) + err_scenario_cfg['options'][NAME]['pktgen_id'] = 12 + with self.assertRaises(KeyError): + self.vcmts_pktgen.instantiate(err_scenario_cfg, self.CONTEXT_CFG) + + if os.path.isfile(self.PKTGEN_VALUES_PATH): + os.remove(self.PKTGEN_VALUES_PATH) + + @mock.patch.object(ctx_base.Context, 'get_context_from_server', + return_value='fake_context') + def test_instantiate_all_valid(self, *args): + yaml_sample = open(self.PKTGEN_VALUES_PATH, 'w') + yaml_sample.write(self.PKTGEN_VALUES) + yaml_sample.close() + + self.vcmts_pktgen.instantiate(self.SCENARIO_CFG, self.CONTEXT_CFG) + self.assertIsNotNone(self.vcmts_pktgen.pod_cfg) + self.assertEqual(self.vcmts_pktgen.pod_cfg["pktgen_id"], "0") + + if os.path.isfile(self.PKTGEN_VALUES_PATH): + os.remove(self.PKTGEN_VALUES_PATH) + + def test_run_traffic_failed_connect(self): + self.vcmts_pktgen.pktgen_helper = mock.MagicMock() + self.vcmts_pktgen.pktgen_helper.connect.return_value = False + with self.assertRaises(exceptions.PktgenActionError): + self.vcmts_pktgen.run_traffic({}) + + def test_run_traffic_successful_connect(self): + self.vcmts_pktgen.pktgen_helper = mock.MagicMock() + self.vcmts_pktgen.pktgen_helper.connect.return_value = True + self.vcmts_pktgen.pktgen_rate = 8.0 + self.assertTrue(self.vcmts_pktgen.run_traffic({})) + self.vcmts_pktgen.pktgen_helper.connect.assert_called_once() + self.vcmts_pktgen.pktgen_helper.send_command.assert_called_with( + 'pktgen.start("all");') diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_udp_replay.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_udp_replay.py index 56c971da6..aabd402a6 100644 --- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_udp_replay.py +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_udp_replay.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016-2017 Intel Corporation +# Copyright (c) 2016-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -317,8 +317,7 @@ class TestUdpReplayApproxVnf(unittest.TestCase): } def test___init__(self, *args): - udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, self.VNFD_0, - 'task_id') + udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, self.VNFD_0) self.assertIsNone(udp_replay_approx_vnf._vnf_process) @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time") @@ -330,10 +329,10 @@ class TestUdpReplayApproxVnf(unittest.TestCase): vnfd = self.VNFD_0 get_stats_ret_val = \ "stats\r\r\n\r\nUDP_Replay stats:\r\n--------------\r\n" \ - "Port\t\tRx Packet\t\tTx Packet\t\tRx Pkt Drop\t\tTx Pkt Drop \r\n"\ - "0\t\t7374156\t\t7374136\t\t\t0\t\t\t0\r\n" \ - "1\t\t7374316\t\t7374315\t\t\t0\t\t\t0\r\n\r\nReplay>\r\r\nReplay>" - udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, vnfd, 'task_id') + "Port\t\tRx Packet\t\tTx Packet\t\tRx Pkt Drop\t\tTx Pkt Drop\t\tarp_pkts \r\n"\ + "0\t\t7374156\t\t7374136\t\t\t0\t\t\t0\t\t\t0\r\n" \ + "1\t\t7374316\t\t7374315\t\t\t0\t\t\t0\t\t\t0\r\n\r\nReplay>\r\r\nReplay>" + udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, vnfd) udp_replay_approx_vnf.scenario_helper.scenario_cfg = { 'nodes': {udp_replay_approx_vnf.name: "mock"} } @@ -355,8 +354,7 @@ class TestUdpReplayApproxVnf(unittest.TestCase): def test_get_stats(self, ssh, *args): mock_ssh(ssh) - udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, self.VNFD_0, - 'task_id') + udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, self.VNFD_0) udp_replay_approx_vnf.q_in = mock.MagicMock() udp_replay_approx_vnf.q_out = mock.MagicMock() udp_replay_approx_vnf.q_out.qsize = mock.Mock(return_value=0) @@ -382,8 +380,7 @@ class TestUdpReplayApproxVnf(unittest.TestCase): nfvi_context.attrs = {'nfvi_type': 'baremetal'} mock_get_ctx.return_value = nfvi_context - udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, self.VNFD_0, - 'task_id') + udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, self.VNFD_0) udp_replay_approx_vnf.queue_wrapper = mock.MagicMock() udp_replay_approx_vnf.nfvi_context = mock_get_ctx udp_replay_approx_vnf.nfvi_context.attrs = {'nfvi_type': 'baremetal'} @@ -408,8 +405,7 @@ class TestUdpReplayApproxVnf(unittest.TestCase): nfvi_context.attrs = {'nfvi_type': "baremetal"} mock_get_ctx.return_value = nfvi_context - udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, self.VNFD_0, - 'task_id') + udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, self.VNFD_0) udp_replay_approx_vnf.setup_helper.bound_pci = ['0000:00:0.1', '0000:00:0.3'] udp_replay_approx_vnf.all_ports = ["xe0", "xe1"] udp_replay_approx_vnf.ssh_helper.provision_tool = mock.MagicMock(return_value="tool_path") @@ -431,8 +427,7 @@ class TestUdpReplayApproxVnf(unittest.TestCase): def test_run_udp_replay(self, ssh, *args): mock_ssh(ssh) - udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, self.VNFD_0, - 'task_id') + udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, self.VNFD_0) udp_replay_approx_vnf._build_config = mock.MagicMock() udp_replay_approx_vnf.queue_wrapper = mock.MagicMock() udp_replay_approx_vnf.scenario_helper = mock.MagicMock() @@ -446,8 +441,7 @@ class TestUdpReplayApproxVnf(unittest.TestCase): def test_instantiate(self, ssh, *args): mock_ssh(ssh) - udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, self.VNFD_0, - 'task_id') + udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, self.VNFD_0) udp_replay_approx_vnf.q_out.put("Replay>") udp_replay_approx_vnf.WAIT_TIME = 0 udp_replay_approx_vnf.setup_helper.setup_vnf_environment = mock.Mock() @@ -465,8 +459,7 @@ class TestUdpReplayApproxVnf(unittest.TestCase): @mock.patch('yardstick.ssh.SSH') @mock.patch(SSH_HELPER) def test_instantiate_panic(self, *args): - udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, self.VNFD_0, - 'task_id') + udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, self.VNFD_0) udp_replay_approx_vnf.WAIT_TIME = 0 udp_replay_approx_vnf.q_out.put("some text PANIC some text") udp_replay_approx_vnf.setup_helper.setup_vnf_environment = mock.Mock() diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vcmts_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vcmts_vnf.py new file mode 100755 index 000000000..11e3d6e17 --- /dev/null +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vcmts_vnf.py @@ -0,0 +1,651 @@ +# Copyright (c) 2019 Viosoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import mock +import copy +import os + +from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh +from yardstick.network_services.vnf_generic.vnf.base import VnfdHelper +from yardstick.network_services.vnf_generic.vnf import vcmts_vnf +from yardstick.common import exceptions + +from influxdb.resultset import ResultSet + +NAME = "vnf__0" + + +class TestInfluxDBHelper(unittest.TestCase): + + def test___init__(self): + influxdb_helper = vcmts_vnf.InfluxDBHelper("localhost", 8086) + self.assertEqual(influxdb_helper._vcmts_influxdb_ip, "localhost") + self.assertEqual(influxdb_helper._vcmts_influxdb_port, 8086) + self.assertIsNotNone(influxdb_helper._last_upstream_rx) + self.assertIsNotNone(influxdb_helper._last_values_time) + + def test_start(self): + influxdb_helper = vcmts_vnf.InfluxDBHelper("localhost", 8086) + influxdb_helper.start() + self.assertIsNotNone(influxdb_helper._read_client) + self.assertIsNotNone(influxdb_helper._write_client) + + def test__get_last_value_time(self): + influxdb_helper = vcmts_vnf.InfluxDBHelper("localhost", 8086) + self.assertEqual(influxdb_helper._get_last_value_time('cpu_value'), + vcmts_vnf.InfluxDBHelper.INITIAL_VALUE) + + influxdb_helper._last_values_time['cpu_value'] = "RANDOM" + self.assertEqual(influxdb_helper._get_last_value_time('cpu_value'), + "RANDOM") + + def test__set_last_value_time(self): + influxdb_helper = vcmts_vnf.InfluxDBHelper("localhost", 8086) + influxdb_helper._set_last_value_time('cpu_value', '00:00') + self.assertEqual(influxdb_helper._last_values_time['cpu_value'], + "'00:00'") + + def test__query_measurement(self): + influxdb_helper = vcmts_vnf.InfluxDBHelper("localhost", 8086) + influxdb_helper._read_client = mock.MagicMock() + + resulted_generator = mock.MagicMock() + resulted_generator.keys.return_value = [] + influxdb_helper._read_client.query.return_value = resulted_generator + query_result = influxdb_helper._query_measurement('cpu_value') + self.assertIsNone(query_result) + + resulted_generator = mock.MagicMock() + resulted_generator.keys.return_value = ["", ""] + resulted_generator.get_points.return_value = ResultSet({"":""}) + influxdb_helper._read_client.query.return_value = resulted_generator + query_result = influxdb_helper._query_measurement('cpu_value') + self.assertIsNotNone(query_result) + + def test__rw_measurment(self): + influxdb_helper = vcmts_vnf.InfluxDBHelper("localhost", 8086) + influxdb_helper._query_measurement = mock.MagicMock() + influxdb_helper._query_measurement.return_value = None + influxdb_helper._rw_measurment('cpu_value', []) + self.assertEqual(len(influxdb_helper._last_values_time), 0) + + entry = { + "type":"type", + "host":"host", + "time":"time", + "id": "1", + "value": "1.0" + } + influxdb_helper._query_measurement.return_value = [entry] + influxdb_helper._write_client = mock.MagicMock() + influxdb_helper._rw_measurment('cpu_value', ["id", "value"]) + self.assertEqual(len(influxdb_helper._last_values_time), 1) + influxdb_helper._write_client.write_points.assert_called_once() + + def test_copy_kpi(self): + influxdb_helper = vcmts_vnf.InfluxDBHelper("localhost", 8086) + influxdb_helper._rw_measurment = mock.MagicMock() + influxdb_helper.copy_kpi() + influxdb_helper._rw_measurment.assert_called() + + +class TestVcmtsdSetupEnvHelper(unittest.TestCase): + POD_CFG = { + "cm_crypto": "aes", + "cpu_socket_id": "0", + "ds_core_pool_index": "2", + "ds_core_type": "exclusive", + "net_ds": "1a:02.1", + "net_us": "1a:02.0", + "num_ofdm": "1", + "num_subs": "100", + "power_mgmt": "pm_on", + "qat": "qat_off", + "service_group_config": "", + "sg_id": "0", + "vcmtsd_image": "vcmts-d:perf" + } + + OPTIONS = { + "pktgen_values": "/tmp/pktgen_values.yaml", + "tg__0": { + "pktgen_id": 0 + }, + "vcmts_influxdb_ip": "10.80.5.150", + "vcmts_influxdb_port": 8086, + "vcmtsd_values": "/tmp/vcmtsd_values.yaml", + "vnf__0": { + "sg_id": 0, + "stream_dir": "us" + }, + "vnf__1": { + "sg_id": 0, + "stream_dir": "ds" + } + } + + def setUp(self): + vnfd_helper = VnfdHelper( + TestVcmtsVNF.VNFD['vnfd:vnfd-catalog']['vnfd'][0]) + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + scenario_helper.options = self.OPTIONS + + self.setup_helper = vcmts_vnf.VcmtsdSetupEnvHelper( + vnfd_helper, ssh_helper, scenario_helper) + + def _build_us_parameters(self): + return vcmts_vnf.VcmtsdSetupEnvHelper.BASE_PARAMETERS + " " \ + + " /opt/bin/cmk isolate --conf-dir=/etc/cmk" \ + + " --socket-id=" + str(self.POD_CFG['cpu_socket_id']) \ + + " --pool=shared" \ + + " /vcmts-config/run_upstream.sh " + self.POD_CFG['sg_id'] \ + + " " + self.POD_CFG['ds_core_type'] \ + + " " + str(self.POD_CFG['num_ofdm']) + "ofdm" \ + + " " + str(self.POD_CFG['num_subs']) + "cm" \ + + " " + self.POD_CFG['cm_crypto'] \ + + " " + self.POD_CFG['qat'] \ + + " " + self.POD_CFG['net_us'] \ + + " " + self.POD_CFG['power_mgmt'] + + def test_build_us_parameters(self): + constructed = self._build_us_parameters() + result = self.setup_helper.build_us_parameters(self.POD_CFG) + self.assertEqual(constructed, result) + + def _build_ds_parameters(self): + return vcmts_vnf.VcmtsdSetupEnvHelper.BASE_PARAMETERS + " " \ + + " /opt/bin/cmk isolate --conf-dir=/etc/cmk" \ + + " --socket-id=" + str(self.POD_CFG['cpu_socket_id']) \ + + " --pool=" + self.POD_CFG['ds_core_type'] \ + + " /vcmts-config/run_downstream.sh " + self.POD_CFG['sg_id'] \ + + " " + self.POD_CFG['ds_core_type'] \ + + " " + str(self.POD_CFG['ds_core_pool_index']) \ + + " " + str(self.POD_CFG['num_ofdm']) + "ofdm" \ + + " " + str(self.POD_CFG['num_subs']) + "cm" \ + + " " + self.POD_CFG['cm_crypto'] \ + + " " + self.POD_CFG['qat'] \ + + " " + self.POD_CFG['net_ds'] \ + + " " + self.POD_CFG['power_mgmt'] + + def test_build_ds_parameters(self): + constructed = self._build_ds_parameters() + result = self.setup_helper.build_ds_parameters(self.POD_CFG) + self.assertEqual(constructed, result) + + def test_build_cmd(self): + us_constructed = self._build_us_parameters() + us_result = self.setup_helper.build_cmd('us', self.POD_CFG) + self.assertEqual(us_constructed, us_result) + ds_constructed = self._build_ds_parameters() + ds_result = self.setup_helper.build_cmd('ds', self.POD_CFG) + self.assertEqual(ds_constructed, ds_result) + + def test_run_vcmtsd(self): + us_constructed = self._build_us_parameters() + + vnfd_helper = VnfdHelper( + TestVcmtsVNF.VNFD['vnfd:vnfd-catalog']['vnfd'][0]) + ssh_helper = mock.MagicMock() + scenario_helper = mock.Mock() + scenario_helper.options = self.OPTIONS + + setup_helper = vcmts_vnf.VcmtsdSetupEnvHelper( + vnfd_helper, ssh_helper, scenario_helper) + + setup_helper.run_vcmtsd('us', self.POD_CFG) + ssh_helper.send_command.assert_called_with(us_constructed) + + def test_setup_vnf_environment(self): + self.assertIsNone(self.setup_helper.setup_vnf_environment()) + +class TestVcmtsVNF(unittest.TestCase): + + VNFD = {'vnfd:vnfd-catalog': + {'vnfd': + [{ + "benchmark": { + "kpi": [ + "upstream/bits_per_second" + ] + }, + "connection-point": [ + { + "name": "xe0", + "type": "VPORT" + }, + { + "name": "xe1", + "type": "VPORT" + } + ], + "description": "vCMTS Upstream-Downstream Kubernetes", + "id": "VcmtsVNF", + "mgmt-interface": { + "ip": "192.168.100.35", + "key_filename": "/tmp/yardstick_key-81dcca91", + "user": "root", + "vdu-id": "vcmtsvnf-kubernetes" + }, + "name": "vcmtsvnf", + "short-name": "vcmtsvnf", + "vdu": [ + { + "description": "vCMTS Upstream-Downstream Kubernetes", + "external-interface": [], + "id": "vcmtsvnf-kubernetes", + "name": "vcmtsvnf-kubernetes" + } + ], + "vm-flavor": { + "memory-mb": "4096", + "vcpu-count": "4" + } + }] + } + } + + POD_CFG = [ + { + "cm_crypto": "aes", + "cpu_socket_id": "0", + "ds_core_pool_index": "2", + "ds_core_type": "exclusive", + "net_ds": "1a:02.1", + "net_us": "1a:02.0", + "num_ofdm": "1", + "num_subs": "100", + "power_mgmt": "pm_on", + "qat": "qat_off", + "service_group_config": "", + "sg_id": "0", + "vcmtsd_image": "vcmts-d:perf" + }, + ] + + SCENARIO_CFG = { + "nodes": { + "tg__0": "pktgen0-k8syardstick-afae18b2", + "vnf__0": "vnf0us-k8syardstick-afae18b2", + "vnf__1": "vnf0ds-k8syardstick-afae18b2" + }, + "options": { + "pktgen_values": "/tmp/pktgen_values.yaml", + "tg__0": { + "pktgen_id": 0 + }, + "vcmts_influxdb_ip": "10.80.5.150", + "vcmts_influxdb_port": 8086, + "vcmtsd_values": "/tmp/vcmtsd_values.yaml", + "vnf__0": { + "sg_id": 0, + "stream_dir": "us" + }, + "vnf__1": { + "sg_id": 0, + "stream_dir": "ds" + } + }, + "task_id": "afae18b2-9902-477f-8128-49afde7c3040", + "task_path": "samples/vnf_samples/nsut/cmts", + "tc": "tc_vcmts_k8s_pktgen", + "topology": "k8s_vcmts_topology.yaml", + "traffic_profile": "../../traffic_profiles/fixed.yaml", + "type": "NSPerf" + } + + CONTEXT_CFG = { + "networks": { + "flannel": { + "name": "flannel" + }, + "xe0": { + "name": "xe0" + }, + "xe1": { + "name": "xe1" + } + }, + "nodes": { + "tg__0": { + "VNF model": "../../vnf_descriptors/tg_vcmts_tpl.yaml", + "interfaces": { + "flannel": { + "local_ip": "192.168.24.110", + "local_mac": None, + "network_name": "flannel" + }, + "xe0": { + "local_ip": "192.168.24.110", + "local_mac": None, + "network_name": "xe0" + }, + "xe1": { + "local_ip": "192.168.24.110", + "local_mac": None, + "network_name": "xe1" + } + }, + "ip": "192.168.24.110", + "key_filename": "/tmp/yardstick_key-afae18b2", + "member-vnf-index": "1", + "name": "pktgen0-k8syardstick-afae18b2", + "private_ip": "192.168.24.110", + "service_ports": [ + { + "name": "ssh", + "node_port": 17153, + "port": 22, + "protocol": "TCP", + "target_port": 22 + }, + { + "name": "lua", + "node_port": 51250, + "port": 22022, + "protocol": "TCP", + "target_port": 22022 + } + ], + "ssh_port": 17153, + "user": "root", + "vnfd-id-ref": "tg__0" + }, + "vnf__0": { + "VNF model": "../../vnf_descriptors/vnf_vcmts_tpl.yaml", + "interfaces": { + "flannel": { + "local_ip": "192.168.100.53", + "local_mac": None, + "network_name": "flannel" + }, + "xe0": { + "local_ip": "192.168.100.53", + "local_mac": None, + "network_name": "xe0" + }, + "xe1": { + "local_ip": "192.168.100.53", + "local_mac": None, + "network_name": "xe1" + } + }, + "ip": "192.168.100.53", + "key_filename": "/tmp/yardstick_key-afae18b2", + "member-vnf-index": "3", + "name": "vnf0us-k8syardstick-afae18b2", + "private_ip": "192.168.100.53", + "service_ports": [ + { + "name": "ssh", + "node_port": 34027, + "port": 22, + "protocol": "TCP", + "target_port": 22 + }, + { + "name": "lua", + "node_port": 32580, + "port": 22022, + "protocol": "TCP", + "target_port": 22022 + } + ], + "ssh_port": 34027, + "user": "root", + "vnfd-id-ref": "vnf__0" + }, + "vnf__1": { + "VNF model": "../../vnf_descriptors/vnf_vcmts_tpl.yaml", + "interfaces": { + "flannel": { + "local_ip": "192.168.100.52", + "local_mac": None, + "network_name": "flannel" + }, + "xe0": { + "local_ip": "192.168.100.52", + "local_mac": None, + "network_name": "xe0" + }, + "xe1": { + "local_ip": "192.168.100.52", + "local_mac": None, + "network_name": "xe1" + } + }, + "ip": "192.168.100.52", + "key_filename": "/tmp/yardstick_key-afae18b2", + "member-vnf-index": "4", + "name": "vnf0ds-k8syardstick-afae18b2", + "private_ip": "192.168.100.52", + "service_ports": [ + { + "name": "ssh", + "node_port": 58661, + "port": 22, + "protocol": "TCP", + "target_port": 22 + }, + { + "name": "lua", + "node_port": 58233, + "port": 22022, + "protocol": "TCP", + "target_port": 22022 + } + ], + "ssh_port": 58661, + "user": "root", + "vnfd-id-ref": "vnf__1" + }, + } + } + + VCMTSD_VALUES_PATH = "/tmp/vcmtsd_values.yaml" + + VCMTSD_VALUES = \ + "serviceAccount: cmk-serviceaccount\n" \ + "topology:\n" \ + " vcmts_replicas: 16\n" \ + " vcmts_pods:\n" \ + " - service_group_config:\n" \ + " sg_id: 0\n" \ + " net_us: 18:02.0\n" \ + " net_ds: 18:02.1\n" \ + " num_ofdm: 4\n" \ + " num_subs: 300\n" \ + " cm_crypto: aes\n" \ + " qat: qat_off\n" \ + " power_mgmt: pm_on\n" \ + " cpu_socket_id: 0\n" \ + " ds_core_type: exclusive\n" \ + " ds_core_pool_index: 0\n" \ + " vcmtsd_image: vcmts-d:feat" + + VCMTSD_VALUES_INCOMPLETE = \ + "serviceAccount: cmk-serviceaccount\n" \ + "topology:\n" \ + " vcmts_replicas: 16" + + def setUp(self): + vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] + self.vnf = vcmts_vnf.VcmtsVNF(NAME, vnfd) + + def test___init__(self, *args): + self.assertIsNotNone(self.vnf.setup_helper) + + def test_extract_pod_cfg(self): + pod_cfg = self.vnf.extract_pod_cfg(self.POD_CFG, "0") + self.assertIsNotNone(pod_cfg) + self.assertEqual(pod_cfg['sg_id'], '0') + pod_cfg = self.vnf.extract_pod_cfg(self.POD_CFG, "1") + self.assertIsNone(pod_cfg) + + def test_instantiate_missing_influxdb_info(self): + err_scenario_cfg = copy.deepcopy(self.SCENARIO_CFG) + err_scenario_cfg['options'].pop('vcmts_influxdb_ip', None) + with self.assertRaises(KeyError): + self.vnf.instantiate(err_scenario_cfg, self.CONTEXT_CFG) + + def test_instantiate_missing_vcmtsd_values_file(self): + if os.path.isfile(self.VCMTSD_VALUES_PATH): + os.remove(self.VCMTSD_VALUES_PATH) + err_scenario_cfg = copy.deepcopy(self.SCENARIO_CFG) + err_scenario_cfg['options']['vcmtsd_values'] = self.VCMTSD_VALUES_PATH + with self.assertRaises(RuntimeError): + self.vnf.instantiate(err_scenario_cfg, self.CONTEXT_CFG) + + def test_instantiate_empty_vcmtsd_values_file(self): + yaml_sample = open(self.VCMTSD_VALUES_PATH, 'w') + yaml_sample.write("") + yaml_sample.close() + + err_scenario_cfg = copy.deepcopy(self.SCENARIO_CFG) + err_scenario_cfg['options']['vcmtsd_values'] = self.VCMTSD_VALUES_PATH + with self.assertRaises(RuntimeError): + self.vnf.instantiate(err_scenario_cfg, self.CONTEXT_CFG) + + if os.path.isfile(self.VCMTSD_VALUES_PATH): + os.remove(self.VCMTSD_VALUES_PATH) + + def test_instantiate_missing_vcmtsd_values_key(self): + err_scenario_cfg = copy.deepcopy(self.SCENARIO_CFG) + err_scenario_cfg['options'].pop('vcmtsd_values', None) + with self.assertRaises(KeyError): + self.vnf.instantiate(err_scenario_cfg, self.CONTEXT_CFG) + + def test_instantiate_invalid_vcmtsd_values(self): + yaml_sample = open(self.VCMTSD_VALUES_PATH, 'w') + yaml_sample.write(self.VCMTSD_VALUES_INCOMPLETE) + yaml_sample.close() + + err_scenario_cfg = copy.deepcopy(self.SCENARIO_CFG) + with self.assertRaises(KeyError): + self.vnf.instantiate(err_scenario_cfg, self.CONTEXT_CFG) + + if os.path.isfile(self.VCMTSD_VALUES_PATH): + os.remove(self.VCMTSD_VALUES_PATH) + + def test_instantiate_invalid_sg_id(self): + yaml_sample = open(self.VCMTSD_VALUES_PATH, 'w') + yaml_sample.write(self.VCMTSD_VALUES) + yaml_sample.close() + + err_scenario_cfg = copy.deepcopy(self.SCENARIO_CFG) + err_scenario_cfg['options'][NAME]['sg_id'] = 8 + with self.assertRaises(exceptions.IncorrectConfig): + self.vnf.instantiate(err_scenario_cfg, self.CONTEXT_CFG) + + if os.path.isfile(self.VCMTSD_VALUES_PATH): + os.remove(self.VCMTSD_VALUES_PATH) + + @mock.patch('yardstick.network_services.vnf_generic.vnf.vcmts_vnf.VnfSshHelper') + def test_instantiate_all_valid(self, ssh, *args): + mock_ssh(ssh) + + vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] + vnf = vcmts_vnf.VcmtsVNF(NAME, vnfd) + + yaml_sample = open(self.VCMTSD_VALUES_PATH, 'w') + yaml_sample.write(self.VCMTSD_VALUES) + yaml_sample.close() + + vnf.instantiate(self.SCENARIO_CFG, self.CONTEXT_CFG) + self.assertEqual(vnf.vcmts_influxdb_ip, "10.80.5.150") + self.assertEqual(vnf.vcmts_influxdb_port, 8086) + + if os.path.isfile(self.VCMTSD_VALUES_PATH): + os.remove(self.VCMTSD_VALUES_PATH) + + def test__update_collectd_options(self): + scenario_cfg = {'options': + {'collectd': + {'interval': 3, + 'plugins': + {'plugin3': {'param': 3}}}, + 'vnf__0': + {'collectd': + {'interval': 2, + 'plugins': + {'plugin3': {'param': 2}, + 'plugin2': {'param': 2}}}}}} + context_cfg = {'nodes': + {'vnf__0': + {'collectd': + {'interval': 1, + 'plugins': + {'plugin3': {'param': 1}, + 'plugin2': {'param': 1}, + 'plugin1': {'param': 1}}}}}} + expected = {'interval': 1, + 'plugins': + {'plugin3': {'param': 1}, + 'plugin2': {'param': 1}, + 'plugin1': {'param': 1}}} + + self.vnf._update_collectd_options(scenario_cfg, context_cfg) + self.assertEqual(self.vnf.setup_helper.collectd_options, expected) + + def test__update_options(self): + options1 = {'interval': 1, + 'param1': 'value1', + 'plugins': + {'plugin3': {'param': 3}, + 'plugin2': {'param': 1}, + 'plugin1': {'param': 1}}} + options2 = {'interval': 2, + 'param2': 'value2', + 'plugins': + {'plugin4': {'param': 4}, + 'plugin2': {'param': 2}, + 'plugin1': {'param': 2}}} + expected = {'interval': 1, + 'param1': 'value1', + 'param2': 'value2', + 'plugins': + {'plugin4': {'param': 4}, + 'plugin3': {'param': 3}, + 'plugin2': {'param': 1}, + 'plugin1': {'param': 1}}} + + vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] + vnf = vcmts_vnf.VcmtsVNF('vnf1', vnfd) + vnf._update_options(options2, options1) + self.assertEqual(options2, expected) + + def test_wait_for_instantiate(self): + self.assertIsNone(self.vnf.wait_for_instantiate()) + + def test_terminate(self): + self.assertIsNone(self.vnf.terminate()) + + def test_scale(self): + self.assertIsNone(self.vnf.scale()) + + def test_collect_kpi(self): + self.vnf.influxdb_helper = mock.MagicMock() + self.vnf.collect_kpi() + self.vnf.influxdb_helper.copy_kpi.assert_called_once() + + def test_start_collect(self): + self.vnf.vcmts_influxdb_ip = "localhost" + self.vnf.vcmts_influxdb_port = 8800 + + self.assertIsNone(self.vnf.start_collect()) + self.assertIsNotNone(self.vnf.influxdb_helper) + + def test_stop_collect(self): + self.assertIsNone(self.vnf.stop_collect()) diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vfw_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vfw_vnf.py index efbb7a856..5334ce18c 100644 --- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vfw_vnf.py +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vfw_vnf.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016-2017 Intel Corporation +# Copyright (c) 2016-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -232,7 +232,7 @@ class TestFWApproxVnf(unittest.TestCase): def test___init__(self, *args): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - vfw_approx_vnf = FWApproxVnf(name, vnfd, 'task_id') + vfw_approx_vnf = FWApproxVnf(name, vnfd) self.assertIsNone(vfw_approx_vnf._vnf_process) STATS = """\ @@ -255,7 +255,7 @@ pipeline> def test_collect_kpi(self, ssh, *args): mock_ssh(ssh) vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - vfw_approx_vnf = FWApproxVnf(name, vnfd, 'task_id') + vfw_approx_vnf = FWApproxVnf(name, vnfd) vfw_approx_vnf.scenario_helper.scenario_cfg = { 'nodes': {vfw_approx_vnf.name: "mock"} } @@ -281,7 +281,7 @@ pipeline> mock_ssh(ssh) vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - vfw_approx_vnf = FWApproxVnf(name, vnfd, 'task_id') + vfw_approx_vnf = FWApproxVnf(name, vnfd) vfw_approx_vnf.q_in = mock.MagicMock() vfw_approx_vnf.q_out = mock.MagicMock() vfw_approx_vnf.q_out.qsize = mock.Mock(return_value=0) @@ -293,7 +293,7 @@ pipeline> mock_ssh(ssh) vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - vfw_approx_vnf = FWApproxVnf(name, vnfd, 'task_id') + vfw_approx_vnf = FWApproxVnf(name, vnfd) vfw_approx_vnf.q_in = mock.MagicMock() vfw_approx_vnf.q_out = mock.MagicMock() vfw_approx_vnf.q_out.qsize = mock.Mock(return_value=0) @@ -313,7 +313,7 @@ pipeline> mock_ssh(ssh) vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - vfw_approx_vnf = FWApproxVnf(name, vnfd, 'task_id') + vfw_approx_vnf = FWApproxVnf(name, vnfd) vfw_approx_vnf._build_config = mock.MagicMock() vfw_approx_vnf.queue_wrapper = mock.MagicMock() vfw_approx_vnf.ssh_helper = mock.MagicMock() @@ -335,7 +335,7 @@ pipeline> mock_ssh(ssh) vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - vfw_approx_vnf = FWApproxVnf(name, vnfd, 'task_id') + vfw_approx_vnf = FWApproxVnf(name, vnfd) vfw_approx_vnf.ssh_helper = ssh vfw_approx_vnf.deploy_helper = mock.MagicMock() vfw_approx_vnf.resource_helper = mock.MagicMock() diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vims_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vims_vnf.py new file mode 100644 index 000000000..d86dab8ad --- /dev/null +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vims_vnf.py @@ -0,0 +1,713 @@ +# Copyright (c) 2019 Viosoft Corporation +# +# Licensed under the Apache License,Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing,software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import mock + +from yardstick.network_services.vnf_generic.vnf import vims_vnf +from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh + + +class TestVimsPcscfVnf(unittest.TestCase): + + VNFD_0 = { + "short-name": "SippVnf", + "vdu": [ + { + "id": "sippvnf-baremetal", + "routing_table": "", + "external-interface": [ + { + "virtual-interface": { + "vld_id": "ims_network", + "peer_ifname": "xe0", + "dst_mac": "90:e2:ba:7c:41:e8", + "network": {}, + "local_ip": "10.80.3.11", + "peer_intf": { + "vnf__0": { + "vld_id": "data_network", + "peer_ifname": "xe1", + "dst_mac": "90:e2:ba:7c:30:e8", + "network": {}, + "local_ip": "10.80.3.7", + "node_name": "vnf__0", + "netmask": "255.255.255.0", + "peer_name": "tg__0", + "dst_ip": "10.80.3.11", + "ifname": "xe0", + "local_mac": "90:e2:ba:7c:41:a8" + }, + "vnf__1": { + "vld_id": "ims_network", + "peer_ifname": "xe1", + "dst_mac": "90:e2:ba:7c:30:e8", + "network": {}, + "local_ip": "10.80.3.7", + "node_name": "vnf__1", + "netmask": "255.255.255.0", + "peer_name": "tg__0", + "dst_ip": "10.80.3.11", + "ifname": "xe0", + "local_mac": "90:e2:ba:7c:41:e8" + } + }, + "node_name": "tg__0", + "netmask": "255.255.255.0", + "peer_name": "vnf__1", + "dst_ip": "10.80.3.7", + "ifname": "xe0", + "local_mac": "90:e2:ba:7c:30:e8" + }, + "vnfd-connection-point-ref": "xe0", + "name": "xe0" + }, + { + "virtual-interface": { + "vld_id": "ims_network", + "peer_ifname": "xe0", + "dst_mac": "90:e2:ba:7c:41:e8", + "network": {}, + "local_ip": "10.80.3.11", + "peer_intf": { + "vnf__0": { + "vld_id": "data_network", + "peer_ifname": "xe1", + "dst_mac": "90:e2:ba:7c:30:e8", + "network": {}, + "local_ip": "10.80.3.7", + "peer_intf": { + "vld_id": "ims_network", + "peer_ifname": "xe0", + "dst_mac": "90:e2:ba:7c:41:e8", + "network": {}, + "local_ip": "10.80.3.11", + "node_name": "tg__0", + "netmask": "255.255.255.0", + "peer_name": "vnf__1", + "dst_ip": "10.80.3.7", + "ifname": "xe0", + "local_mac": "90:e2:ba:7c:30:e8" + }, + "node_name": "vnf__0", + "netmask": "255.255.255.0", + "peer_name": "tg__0", + "dst_ip": "10.80.3.11", + "ifname": "xe0", + "local_mac": "90:e2:ba:7c:41:a8" + }, + "vnf__1": { + "vld_id": "ims_network", + "peer_ifname": "xe1", + "dst_mac": "90:e2:ba:7c:30:e8", + "network": {}, + "local_ip": "10.80.3.7", + "peer_intf": { + "vld_id": "ims_network", + "peer_ifname": "xe0", + "dst_mac": "90:e2:ba:7c:41:e8", + "network": {}, + "local_ip": "10.80.3.11", + "peer_intf": { + "vld_id": "data_network", + "peer_ifname": "xe1", + "dst_mac": "90:e2:ba:7c:30:e8", + "network": {}, + "local_ip": "10.80.3.7", + "node_name": "vnf__0", + "netmask": "255.255.255.0", + "peer_name": "tg__0", + "dst_ip": "10.80.3.11", + "ifname": "xe0", + "local_mac": "90:e2:ba:7c:41:a8" + }, + "node_name": "tg__0", + "netmask": "255.255.255.0", + "peer_name": "vnf__1", + "dst_ip": "10.80.3.7", + "ifname": "xe0", + "local_mac": "90:e2:ba:7c:30:e8" + }, + "node_name": "vnf__1", + "netmask": "255.255.255.0", + "peer_name": "tg__0", + "dst_ip": "10.80.3.11", + "ifname": "xe0", + "local_mac": "90:e2:ba:7c:41:e8" + } + }, + "node_name": "tg__0", + "netmask": "255.255.255.0", + "peer_name": "vnf__1", + "dst_ip": "10.80.3.7", + "ifname": "xe1", + "local_mac": "90:e2:ba:7c:30:e8" + }, + "vnfd-connection-point-ref": "xe1", + "name": "xe1" + } + ], + "name": "sippvnf-baremetal", + "description": "Sipp" + } + ], + "description": "ImsbenchSipp", + "mgmt-interface": { + "vdu-id": "sipp-baremetal", + "password": "r00t", + "user": "root", + "ip": "10.80.3.11" + }, + "benchmark": { + "kpi": [ + "packets_in", + "packets_fwd", + "packets_dropped" + ] + }, + "id": "SippVnf", + "name": "SippVnf" + } + + def setUp(self): + self.pcscf_vnf = vims_vnf.VimsPcscfVnf('vnf__0', self.VNFD_0) + + def test___init__(self): + self.assertEqual(self.pcscf_vnf.name, 'vnf__0') + self.assertIsInstance(self.pcscf_vnf.resource_helper, + vims_vnf.VimsResourceHelper) + self.assertIsNone(self.pcscf_vnf._vnf_process) + + def test_wait_for_instantiate(self): + self.assertIsNone(self.pcscf_vnf.wait_for_instantiate()) + + def test__run(self): + self.assertIsNone(self.pcscf_vnf._run()) + + def test_start_collect(self): + self.assertIsNone(self.pcscf_vnf.start_collect()) + + def test_collect_kpi(self): + self.assertIsNone(self.pcscf_vnf.collect_kpi()) + + +class TestVimsHssVnf(unittest.TestCase): + + VNFD_1 = { + "short-name": "SippVnf", + "vdu": [ + { + "id": "sippvnf-baremetal", + "routing_table": "", + "external-interface": [ + { + "virtual-interface": { + "vld_id": "ims_network", + "peer_ifname": "xe0", + "dst_mac": "90:e2:ba:7c:41:e8", + "network": {}, + "local_ip": "10.80.3.11", + "peer_intf": { + "vnf__0": { + "vld_id": "data_network", + "peer_ifname": "xe1", + "dst_mac": "90:e2:ba:7c:30:e8", + "network": {}, + "local_ip": "10.80.3.7", + "node_name": "vnf__0", + "netmask": "255.255.255.0", + "peer_name": "tg__0", + "dst_ip": "10.80.3.11", + "ifname": "xe0", + "local_mac": "90:e2:ba:7c:41:a8" + }, + "vnf__1": { + "vld_id": "ims_network", + "peer_ifname": "xe1", + "dst_mac": "90:e2:ba:7c:30:e8", + "network": {}, + "local_ip": "10.80.3.7", + "node_name": "vnf__1", + "netmask": "255.255.255.0", + "peer_name": "tg__0", + "dst_ip": "10.80.3.11", + "ifname": "xe0", + "local_mac": "90:e2:ba:7c:41:e8" + } + }, + "node_name": "tg__0", + "netmask": "255.255.255.0", + "peer_name": "vnf__1", + "dst_ip": "10.80.3.7", + "ifname": "xe0", + "local_mac": "90:e2:ba:7c:30:e8" + }, + "vnfd-connection-point-ref": "xe0", + "name": "xe0" + }, + { + "virtual-interface": { + "vld_id": "ims_network", + "peer_ifname": "xe0", + "dst_mac": "90:e2:ba:7c:41:e8", + "network": {}, + "local_ip": "10.80.3.11", + "peer_intf": { + "vnf__0": { + "vld_id": "data_network", + "peer_ifname": "xe1", + "dst_mac": "90:e2:ba:7c:30:e8", + "network": {}, + "local_ip": "10.80.3.7", + "peer_intf": { + "vld_id": "ims_network", + "peer_ifname": "xe0", + "dst_mac": "90:e2:ba:7c:41:e8", + "network": {}, + "local_ip": "10.80.3.11", + "node_name": "tg__0", + "netmask": "255.255.255.0", + "peer_name": "vnf__1", + "dst_ip": "10.80.3.7", + "ifname": "xe0", + "local_mac": "90:e2:ba:7c:30:e8" + }, + "node_name": "vnf__0", + "netmask": "255.255.255.0", + "peer_name": "tg__0", + "dst_ip": "10.80.3.11", + "ifname": "xe0", + "local_mac": "90:e2:ba:7c:41:a8" + }, + "vnf__1": { + "vld_id": "ims_network", + "peer_ifname": "xe1", + "dst_mac": "90:e2:ba:7c:30:e8", + "network": {}, + "local_ip": "10.80.3.7", + "peer_intf": { + "vld_id": "ims_network", + "peer_ifname": "xe0", + "dst_mac": "90:e2:ba:7c:41:e8", + "network": {}, + "local_ip": "10.80.3.11", + "peer_intf": { + "vld_id": "data_network", + "peer_ifname": "xe1", + "dst_mac": "90:e2:ba:7c:30:e8", + "network": {}, + "local_ip": "10.80.3.7", + "node_name": "vnf__0", + "netmask": "255.255.255.0", + "peer_name": "tg__0", + "dst_ip": "10.80.3.11", + "ifname": "xe0", + "local_mac": "90:e2:ba:7c:41:a8" + }, + "node_name": "tg__0", + "netmask": "255.255.255.0", + "peer_name": "vnf__1", + "dst_ip": "10.80.3.7", + "ifname": "xe0", + "local_mac": "90:e2:ba:7c:30:e8" + }, + "node_name": "vnf__1", + "netmask": "255.255.255.0", + "peer_name": "tg__0", + "dst_ip": "10.80.3.11", + "ifname": "xe0", + "local_mac": "90:e2:ba:7c:41:e8" + } + }, + "node_name": "tg__0", + "netmask": "255.255.255.0", + "peer_name": "vnf__1", + "dst_ip": "10.80.3.7", + "ifname": "xe1", + "local_mac": "90:e2:ba:7c:30:e8" + }, + "vnfd-connection-point-ref": "xe1", + "name": "xe1" + } + ], + "name": "sippvnf-baremetal", + "description": "Sipp" + } + ], + "description": "ImsbenchSipp", + "mgmt-interface": { + "vdu-id": "sipp-baremetal", + "password": "r00t", + "user": "root", + "ip": "10.80.3.11" + }, + "benchmark": { + "kpi": [ + "packets_in", + "packets_fwd", + "packets_dropped" + ] + }, + "id": "SippVnf", + "name": "SippVnf" + } + + SCENARIO_CFG = { + "task_id": "86414e11-5ef5-4426-b175-71baaa00fbd7", + "tc": "tc_vims_baremetal_sipp", + "runner": { + "interval": 1, + "output_config": { + "DEFAULT": { + "debug": "False", + "dispatcher": [ + "influxdb" + ] + }, + "nsb": { + "debug": "False", + "trex_client_lib": "/opt/nsb_bin/trex_client/stl", + "bin_path": "/opt/nsb_bin", + "trex_path": "/opt/nsb_bin/trex/scripts", + "dispatcher": "influxdb" + }, + "dispatcher_influxdb": { + "username": "root", + "target": "http://10.80.3.11:8086", + "db_name": "yardstick", + "timeout": "5", + "debug": "False", + "password": "root", + "dispatcher": "influxdb" + }, + "dispatcher_http": { + "debug": "False", + "dispatcher": "influxdb", + "timeout": "5", + "target": "http://127.0.0.1:8000/results" + }, + "dispatcher_file": { + "debug": "False", + "backup_count": "0", + "max_bytes": "0", + "dispatcher": "influxdb", + "file_path": "/tmp/yardstick.out" + } + }, + "runner_id": 22610, + "duration": 60, + "type": "Vims" + }, + "nodes": { + "vnf__0": "pcscf.yardstick-86414e11", + "vnf__1": "hss.yardstick-86414e11", + "tg__0": "sipp.yardstick-86414e11" + }, + "topology": "vims-topology.yaml", + "type": "NSPerf", + "traffic_profile": "../../traffic_profiles/ipv4_throughput.yaml", + "task_path": "samples/vnf_samples/nsut/vims", + "options": { + "init_reg_max": 5000, + "end_user": 10000, + "reg_cps": 20, + "rereg_cps": 20, + "rereg_step": 10, + "wait_time": 5, + "start_user": 1, + "msgc_cps": 10, + "dereg_step": 10, + "call_cps": 10, + "reg_step": 10, + "init_reg_cps": 50, + "dereg_cps": 20, + "msgc_step": 5, + "call_step": 5, + "hold_time": 15, + "port": 5060, + "run_mode": "nortp" + } + } + + CONTEXT_CFG = { + "nodes": { + "tg__0": { + "ip": "10.80.3.11", + "interfaces": { + "xe0": { + "vld_id": "ims_network", + "peer_ifname": "xe0", + "dst_mac": "90:e2:ba:7c:41:e8", + "network": {}, + "local_ip": "10.80.3.11", + "peer_intf": { + "vnf__0": { + "vld_id": "data_network", + "peer_ifname": "xe1", + "dst_mac": "90:e2:ba:7c:30:e8", + "network": {}, + "local_ip": "10.80.3.7", + "node_name": "vnf__0", + "netmask": "255.255.255.0", + "peer_name": "tg__0", + "dst_ip": "10.80.3.11", + "ifname": "xe0", + "local_mac": "90:e2:ba:7c:41:a8" + }, + "vnf__1": { + "vld_id": "ims_network", + "peer_ifname": "xe1", + "dst_mac": "90:e2:ba:7c:30:e8", + "network": {}, + "local_ip": "10.80.3.7", + "node_name": "vnf__1", + "netmask": "255.255.255.0", + "peer_name": "tg__0", + "dst_ip": "10.80.3.11", + "ifname": "xe0", + "local_mac": "90:e2:ba:7c:41:e8" + } + }, + "node_name": "tg__0", + "netmask": "255.255.255.0", + "peer_name": "vnf__1", + "dst_ip": "10.80.3.7", + "ifname": "xe0", + "local_mac": "90:e2:ba:7c:30:e8" + }, + "xe1": { + "vld_id": "ims_network", + "peer_ifname": "xe0", + "dst_mac": "90:e2:ba:7c:41:e8", + "network": {}, + "local_ip": "10.80.3.11", + "peer_intf": { + "vnf__0": { + "vld_id": "data_network", + "peer_ifname": "xe1", + "dst_mac": "90:e2:ba:7c:30:e8", + "network": {}, + "local_ip": "10.80.3.7", + "peer_intf": { + "vld_id": "ims_network", + "peer_ifname": "xe0", + "dst_mac": "90:e2:ba:7c:41:e8", + "network": {}, + "local_ip": "10.80.3.11", + "node_name": "tg__0", + "netmask": "255.255.255.0", + "peer_name": "vnf__1", + "dst_ip": "10.80.3.7", + "ifname": "xe0", + "local_mac": "90:e2:ba:7c:30:e8" + }, + "node_name": "vnf__0", + "netmask": "255.255.255.0", + "peer_name": "tg__0", + "dst_ip": "10.80.3.11", + "ifname": "xe0", + "local_mac": "90:e2:ba:7c:41:a8" + }, + "vnf__1": { + "vld_id": "ims_network", + "peer_ifname": "xe1", + "dst_mac": "90:e2:ba:7c:30:e8", + "network": {}, + "local_ip": "10.80.3.7", + "peer_intf": { + "vld_id": "ims_network", + "peer_ifname": "xe0", + "dst_mac": "90:e2:ba:7c:41:e8", + "network": {}, + "local_ip": "10.80.3.11", + "peer_intf": { + "vld_id": "data_network", + "peer_ifname": "xe1", + "dst_mac": "90:e2:ba:7c:30:e8", + "network": {}, + "local_ip": "10.80.3.7", + "node_name": "vnf__0", + "netmask": "255.255.255.0", + "peer_name": "tg__0", + "dst_ip": "10.80.3.11", + "ifname": "xe0", + "local_mac": "90:e2:ba:7c:41:a8" + }, + "node_name": "tg__0", + "netmask": "255.255.255.0", + "peer_name": "vnf__1", + "dst_ip": "10.80.3.7", + "ifname": "xe0", + "local_mac": "90:e2:ba:7c:30:e8" + }, + "node_name": "vnf__1", + "netmask": "255.255.255.0", + "peer_name": "tg__0", + "dst_ip": "10.80.3.11", + "ifname": "xe0", + "local_mac": "90:e2:ba:7c:41:e8" + } + }, + "node_name": "tg__0", + "netmask": "255.255.255.0", + "peer_name": "vnf__1", + "dst_ip": "10.80.3.7", + "ifname": "xe1", + "local_mac": "90:e2:ba:7c:30:e8" + } + }, + "user": "root", + "password": "r00t", + "VNF model": "../../vnf_descriptors/tg_sipp_vnfd.yaml", + "name": "sipp.yardstick-86414e11", + "vnfd-id-ref": "tg__0", + "member-vnf-index": "1", + "role": "TrafficGen", + "ctx_type": "Node" + }, + "vnf__0": { + "ip": "10.80.3.7", + "interfaces": { + "xe0": { + "vld_id": "data_network", + "peer_ifname": "xe1", + "dst_mac": "90:e2:ba:7c:30:e8", + "network": {}, + "local_ip": "10.80.3.7", + "peer_intf": { + "tg__0": { + "vld_id": "ims_network", + "peer_ifname": "xe0", + "dst_mac": "90:e2:ba:7c:41:e8", + "network": {}, + "local_ip": "10.80.3.11", + "node_name": "tg__0", + "netmask": "255.255.255.0", + "peer_name": "vnf__1", + "dst_ip": "10.80.3.7", + "ifname": "xe1", + "local_mac": "90:e2:ba:7c:30:e8" + } + }, + "node_name": "vnf__0", + "netmask": "255.255.255.0", + "peer_name": "tg__0", + "dst_ip": "10.80.3.11", + "ifname": "xe0", + "local_mac": "90:e2:ba:7c:41:a8" + } + }, + "user": "root", + "password": "r00t", + "VNF model": "../../vnf_descriptors/vims_pcscf_vnfd.yaml", + "name": "pcscf.yardstick-86414e11", + "vnfd-id-ref": "vnf__0", + "member-vnf-index": "2", + "role": "VirtualNetworkFunction", + "ctx_type": "Node" + }, + "vnf__1": { + "ip": "10.80.3.7", + "interfaces": { + "xe0": { + "vld_id": "ims_network", + "peer_ifname": "xe1", + "dst_mac": "90:e2:ba:7c:30:e8", + "network": {}, + "local_ip": "10.80.3.7", + "peer_intf": { + "tg__0": { + "vld_id": "ims_network", + "peer_ifname": "xe0", + "dst_mac": "90:e2:ba:7c:41:e8", + "network": {}, + "local_ip": "10.80.3.11", + "peer_intf": { + "vld_id": "data_network", + "peer_ifname": "xe1", + "dst_mac": "90:e2:ba:7c:30:e8", + "network": {}, + "local_ip": "10.80.3.7", + "peer_intf": { + "vld_id": "ims_network", + "peer_ifname": "xe0", + "dst_mac": "90:e2:ba:7c:41:e8", + "network": {}, + "local_ip": "10.80.3.11", + "node_name": "tg__0", + "netmask": "255.255.255.0", + "peer_name": "vnf__1", + "dst_ip": "10.80.3.7", + "ifname": "xe0", + "local_mac": "90:e2:ba:7c:30:e8" + }, + "node_name": "vnf__0", + "netmask": "255.255.255.0", + "peer_name": "tg__0", + "dst_ip": "10.80.3.11", + "ifname": "xe0", + "local_mac": "90:e2:ba:7c:41:a8" + }, + "node_name": "tg__0", + "netmask": "255.255.255.0", + "peer_name": "vnf__1", + "dst_ip": "10.80.3.7", + "ifname": "xe1", + "local_mac": "90:e2:ba:7c:30:e8" + } + }, + "node_name": "vnf__1", + "netmask": "255.255.255.0", + "peer_name": "tg__0", + "dst_ip": "10.80.3.11", + "ifname": "xe0", + "local_mac": "90:e2:ba:7c:41:e8" + } + }, + "user": "root", + "password": "r00t", + "VNF model": "../../vnf_descriptors/vims_hss_vnfd.yaml", + "name": "hss.yardstick-86414e11", + "vnfd-id-ref": "vnf__1", + "member-vnf-index": "3", + "role": "VirtualNetworkFunction", + "ctx_type": "Node" + } + }, + "networks": {} + } + + def setUp(self): + self.hss_vnf = vims_vnf.VimsHssVnf('vnf__1', self.VNFD_1) + + def test___init__(self): + self.assertIsInstance(self.hss_vnf.resource_helper, + vims_vnf.VimsResourceHelper) + self.assertIsNone(self.hss_vnf._vnf_process) + + @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper") + def test_instantiate(self, ssh): + mock_ssh(ssh) + hss_vnf = vims_vnf.VimsHssVnf('vnf__1', self.VNFD_1) + self.assertIsNone(hss_vnf.instantiate(self.SCENARIO_CFG, + self.CONTEXT_CFG)) + + def test_wait_for_instantiate(self): + self.assertIsNone(self.hss_vnf.wait_for_instantiate()) + + def test_start_collect(self): + self.assertIsNone(self.hss_vnf.start_collect()) + + def test_collect_kpi(self): + self.assertIsNone(self.hss_vnf.collect_kpi()) diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py index 7b937dfb5..8342f5faa 100644 --- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016-2017 Intel Corporation +# Copyright (c) 2016-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,7 +16,6 @@ from multiprocessing import Process, Queue import time import mock -from six.moves import configparser import unittest from yardstick.benchmark.contexts import base as ctx_base @@ -147,48 +146,6 @@ class TestConfigCreate(unittest.TestCase): self.assertEqual(config_create.downlink_ports, ['xe1']) self.assertEqual(config_create.socket, 2) - def test_dpdk_port_to_link_id(self): - vnfd_helper = vnf_base.VnfdHelper(self.VNFD_0) - config_create = vpe_vnf.ConfigCreate(vnfd_helper, 2) - self.assertEqual(config_create.dpdk_port_to_link_id_map, {'xe0': 0, 'xe1': 1}) - - def test_vpe_initialize(self): - vnfd_helper = vnf_base.VnfdHelper(self.VNFD_0) - config_create = vpe_vnf.ConfigCreate(vnfd_helper, 2) - config = configparser.ConfigParser() - config_create.vpe_initialize(config) - self.assertEqual(config.get('EAL', 'log_level'), '0') - self.assertEqual(config.get('PIPELINE0', 'type'), 'MASTER') - self.assertEqual(config.get('PIPELINE0', 'core'), 's2C0') - self.assertEqual(config.get('MEMPOOL0', 'pool_size'), '256K') - self.assertEqual(config.get('MEMPOOL1', 'pool_size'), '2M') - - def test_vpe_rxq(self): - vnfd_helper = vnf_base.VnfdHelper(self.VNFD_0) - config_create = vpe_vnf.ConfigCreate(vnfd_helper, 2) - config = configparser.ConfigParser() - config_create.downlink_ports = ['xe0'] - config_create.vpe_rxq(config) - self.assertEqual(config.get('RXQ0.0', 'mempool'), 'MEMPOOL1') - - def test_get_sink_swq(self): - vnfd_helper = vnf_base.VnfdHelper(self.VNFD_0) - config_create = vpe_vnf.ConfigCreate(vnfd_helper, 2) - config = configparser.ConfigParser() - config.add_section('PIPELINE0') - config.set('PIPELINE0', 'key1', 'value1') - config.set('PIPELINE0', 'key2', 'value2 SINK') - config.set('PIPELINE0', 'key3', 'TM value3') - config.set('PIPELINE0', 'key4', 'value4') - config.set('PIPELINE0', 'key5', 'the SINK value5') - - self.assertEqual(config_create.get_sink_swq(config, 'PIPELINE0', 'key1', 5), 'SWQ-1') - self.assertEqual(config_create.get_sink_swq(config, 'PIPELINE0', 'key2', 5), 'SWQ-1 SINK0') - self.assertEqual(config_create.get_sink_swq(config, 'PIPELINE0', 'key3', 5), 'SWQ-1 TM5') - config_create.sw_q += 1 - self.assertEqual(config_create.get_sink_swq(config, 'PIPELINE0', 'key4', 5), 'SWQ0') - self.assertEqual(config_create.get_sink_swq(config, 'PIPELINE0', 'key5', 5), 'SWQ0 SINK1') - def test_generate_vpe_script(self): vnfd_helper = vnf_base.VnfdHelper(self.VNFD_0) vpe_config_vnf = vpe_vnf.ConfigCreate(vnfd_helper, 2) @@ -214,36 +171,6 @@ class TestConfigCreate(unittest.TestCase): self.assertIsInstance(result, str) self.assertNotEqual(result, '') - def test_create_vpe_config(self): - vnfd_helper = vnf_base.VnfdHelper(self.VNFD_0) - config_create = vpe_vnf.ConfigCreate(vnfd_helper, 23) - config_create.uplink_ports = ['xe1'] - with mock.patch.object(config_create, 'vpe_upstream') as mock_up, \ - mock.patch.object(config_create, 'vpe_downstream') as \ - mock_down, \ - mock.patch.object(config_create, 'vpe_tmq') as mock_tmq, \ - mock.patch.object(config_create, 'vpe_initialize') as \ - mock_ini, \ - mock.patch.object(config_create, 'vpe_rxq') as mock_rxq: - mock_ini_obj = mock.Mock() - mock_rxq_obj = mock.Mock() - mock_up_obj = mock.Mock() - mock_down_obj = mock.Mock() - mock_tmq_obj = mock.Mock() - mock_ini.return_value = mock_ini_obj - mock_rxq.return_value = mock_rxq_obj - mock_up.return_value = mock_up_obj - mock_down.return_value = mock_down_obj - mock_tmq.return_value = mock_tmq_obj - config_create.create_vpe_config('fake_config_file') - - mock_rxq.assert_called_once_with(mock_ini_obj) - mock_up.assert_called_once_with('fake_config_file', 0) - mock_down.assert_called_once_with('fake_config_file', 0) - mock_tmq.assert_called_once_with(mock_down_obj, 0) - mock_up_obj.write.assert_called_once() - mock_tmq_obj.write.assert_called_once() - class TestVpeApproxVnf(unittest.TestCase): @@ -549,7 +476,7 @@ class TestVpeApproxVnf(unittest.TestCase): self._mock_time_sleep.stop() def test___init__(self): - vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0, 'task_id') + vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0) self.assertIsNone(vpe_approx_vnf._vnf_process) @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', @@ -563,7 +490,7 @@ class TestVpeApproxVnf(unittest.TestCase): resource.amqp_collect_nfvi_kpi.return_value = {'foo': 234} resource.check_if_system_agent_running.return_value = (1, None) - vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0, 'task_id') + vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0) vpe_approx_vnf.scenario_helper.scenario_cfg = { 'nodes': {vpe_approx_vnf.name: "mock"} } @@ -592,7 +519,7 @@ class TestVpeApproxVnf(unittest.TestCase): resource.check_if_system_agent_running.return_value = 0, '1234' resource.amqp_collect_nfvi_kpi.return_value = {'foo': 234} - vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0, 'task_id') + vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0) vpe_approx_vnf.scenario_helper.scenario_cfg = { 'nodes': {vpe_approx_vnf.name: "mock"} } @@ -614,7 +541,7 @@ class TestVpeApproxVnf(unittest.TestCase): @mock.patch.object(sample_vnf, 'VnfSshHelper') def test_vnf_execute(self, ssh): test_base.mock_ssh(ssh) - vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0, 'task_id') + vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0) vpe_approx_vnf.q_in = mock.MagicMock() vpe_approx_vnf.q_out = mock.MagicMock() vpe_approx_vnf.q_out.qsize = mock.Mock(return_value=0) @@ -624,7 +551,7 @@ class TestVpeApproxVnf(unittest.TestCase): def test_run_vpe(self, ssh): test_base.mock_ssh(ssh) - vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0, 'task_id') + vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0) vpe_approx_vnf.tc_file_name = get_file_abspath(TEST_FILE_YAML) vpe_approx_vnf.vnf_cfg = { 'lb_config': 'SW', @@ -707,7 +634,7 @@ class TestVpeApproxVnf(unittest.TestCase): mock_resource = mock.MagicMock() - vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0, 'task_id') + vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0) vpe_approx_vnf._vnf_process = mock_process vpe_approx_vnf.q_out = mock_q_out vpe_approx_vnf.queue_wrapper = mock.Mock( @@ -732,7 +659,7 @@ class TestVpeApproxVnf(unittest.TestCase): mock_resource = mock.MagicMock() - vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0, 'task_id') + vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0) vpe_approx_vnf._vnf_process = mock_process vpe_approx_vnf.q_out = mock_q_out vpe_approx_vnf.queue_wrapper = mock.Mock( @@ -751,7 +678,7 @@ class TestVpeApproxVnf(unittest.TestCase): mock_resource = mock.MagicMock() - vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0, 'task_id') + vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0) vpe_approx_vnf._vnf_process = mock_process vpe_approx_vnf.resource_helper.resource = mock_resource @@ -770,7 +697,7 @@ class TestVpeApproxVnf(unittest.TestCase): mock_resource = mock.MagicMock() - vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0, 'task_id') + vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0) vpe_approx_vnf._vnf_process = mock_process vpe_approx_vnf.resource_helper.resource = mock_resource @@ -795,7 +722,7 @@ class TestVpeApproxVnf(unittest.TestCase): mock_resource = mock.MagicMock() - vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0, 'task_id') + vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0) vpe_approx_vnf._vnf_process = mock_process vpe_approx_vnf.q_out = mock_q_out vpe_approx_vnf.resource_helper.resource = mock_resource @@ -809,7 +736,7 @@ class TestVpeApproxVnf(unittest.TestCase): def test_terminate(self, ssh): test_base.mock_ssh(ssh) - vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0, 'task_id') + vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0) vpe_approx_vnf._vnf_process = mock.MagicMock() vpe_approx_vnf._resource_collect_stop = mock.Mock() vpe_approx_vnf.resource_helper = mock.MagicMock() diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vpp_helpers.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vpp_helpers.py new file mode 100644 index 000000000..cca604f43 --- /dev/null +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vpp_helpers.py @@ -0,0 +1,1723 @@ +# Copyright (c) 2019 Viosoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import ipaddress +import unittest + +import mock + +from yardstick.common import exceptions +from yardstick.network_services.helpers import cpu +from yardstick.network_services.vnf_generic.vnf import vpp_helpers +from yardstick.network_services.vnf_generic.vnf.base import VnfdHelper +from yardstick.network_services.vnf_generic.vnf.vpp_helpers import \ + VppSetupEnvHelper, VppConfigGenerator, VatTerminal + + +class TestVppConfigGenerator(unittest.TestCase): + + def test_add_config_item(self): + test_item = {} + vpp_config_generator = VppConfigGenerator() + vpp_config_generator.add_config_item(test_item, '/tmp/vpe.log', + ['unix', 'log']) + self.assertEqual({'unix': {'log': '/tmp/vpe.log'}}, test_item) + + def test_add_config_item_str(self): + test_item = {'unix': ''} + vpp_config_generator = VppConfigGenerator() + vpp_config_generator.add_config_item(test_item, '/tmp/vpe.log', + ['unix', 'log']) + self.assertEqual({'unix': {'log': '/tmp/vpe.log'}}, test_item) + + def test_add_unix_log(self): + vpp_config_generator = VppConfigGenerator() + vpp_config_generator.add_unix_log() + self.assertEqual('unix\n{\n log /tmp/vpe.log\n}\n', + vpp_config_generator.dump_config()) + + def test_add_unix_cli_listen(self): + vpp_config_generator = VppConfigGenerator() + vpp_config_generator.add_unix_cli_listen() + self.assertEqual('unix\n{\n cli-listen /run/vpp/cli.sock\n}\n', + vpp_config_generator.dump_config()) + + def test_add_unix_nodaemon(self): + vpp_config_generator = VppConfigGenerator() + vpp_config_generator.add_unix_nodaemon() + self.assertEqual('unix\n{\n nodaemon \n}\n', + vpp_config_generator.dump_config()) + + def test_add_unix_coredump(self): + vpp_config_generator = VppConfigGenerator() + vpp_config_generator.add_unix_coredump() + self.assertEqual('unix\n{\n full-coredump \n}\n', + vpp_config_generator.dump_config()) + + def test_add_dpdk_dev(self): + vpp_config_generator = VppConfigGenerator() + vpp_config_generator.add_dpdk_dev('0000:00:00.0') + self.assertEqual('dpdk\n{\n dev 0000:00:00.0 \n}\n', + vpp_config_generator.dump_config()) + + def test_add_dpdk_cryptodev(self): + vpp_config_generator = VppConfigGenerator() + vpp_config_generator.add_dpdk_cryptodev(2, '0000:00:00.0') + self.assertEqual( + 'dpdk\n{\n dev 0000:00:01.0 \n dev 0000:00:01.1 \n uio-driver igb_uio\n}\n', + vpp_config_generator.dump_config()) + + def test_add_dpdk_sw_cryptodev(self): + vpp_config_generator = VppConfigGenerator() + vpp_config_generator.add_dpdk_sw_cryptodev('aesni_gcm', 0, 2) + self.assertEqual( + 'dpdk\n{\n vdev cryptodev_aesni_gcm_pmd,socket_id=0 \n}\n', + vpp_config_generator.dump_config()) + + def test_add_dpdk_dev_default_rxq(self): + vpp_config_generator = VppConfigGenerator() + vpp_config_generator.add_dpdk_dev_default_rxq(1) + self.assertEqual( + 'dpdk\n{\n dev default\n {\n num-rx-queues 1\n }\n}\n', + vpp_config_generator.dump_config()) + + def test_add_dpdk_dev_default_rxd(self): + vpp_config_generator = VppConfigGenerator() + vpp_config_generator.add_dpdk_dev_default_rxd(2048) + self.assertEqual( + 'dpdk\n{\n dev default\n {\n num-rx-desc 2048\n }\n}\n', + vpp_config_generator.dump_config()) + + def test_add_dpdk_dev_default_txd(self): + vpp_config_generator = VppConfigGenerator() + vpp_config_generator.add_dpdk_dev_default_txd(2048) + self.assertEqual( + 'dpdk\n{\n dev default\n {\n num-tx-desc 2048\n }\n}\n', + vpp_config_generator.dump_config()) + + def test_add_dpdk_log_level(self): + vpp_config_generator = VppConfigGenerator() + vpp_config_generator.add_dpdk_log_level('debug') + self.assertEqual('dpdk\n{\n log-level debug\n}\n', + vpp_config_generator.dump_config()) + + def test_add_dpdk_socketmem(self): + vpp_config_generator = VppConfigGenerator() + vpp_config_generator.add_dpdk_socketmem('1024,1024') + self.assertEqual('dpdk\n{\n socket-mem 1024,1024\n}\n', + vpp_config_generator.dump_config()) + + def test_add_dpdk_num_mbufs(self): + vpp_config_generator = VppConfigGenerator() + vpp_config_generator.add_dpdk_num_mbufs(32768) + self.assertEqual('dpdk\n{\n num-mbufs 32768\n}\n', + vpp_config_generator.dump_config()) + + def test_add_dpdk_uio_driver(self): + vpp_config_generator = VppConfigGenerator() + vpp_config_generator.add_dpdk_uio_driver('igb_uio') + self.assertEqual('dpdk\n{\n uio-driver igb_uio\n}\n', + vpp_config_generator.dump_config()) + + def test_add_cpu_main_core(self): + vpp_config_generator = VppConfigGenerator() + vpp_config_generator.add_cpu_main_core('1,2') + self.assertEqual('cpu\n{\n main-core 1,2\n}\n', + vpp_config_generator.dump_config()) + + def test_add_cpu_corelist_workers(self): + vpp_config_generator = VppConfigGenerator() + vpp_config_generator.add_cpu_corelist_workers('1,2') + self.assertEqual('cpu\n{\n corelist-workers 1,2\n}\n', + vpp_config_generator.dump_config()) + + def test_add_heapsize(self): + vpp_config_generator = VppConfigGenerator() + vpp_config_generator.add_heapsize('4G') + self.assertEqual('heapsize 4G\n', vpp_config_generator.dump_config()) + + def test_add_ip6_hash_buckets(self): + vpp_config_generator = VppConfigGenerator() + vpp_config_generator.add_ip6_hash_buckets(2000000) + self.assertEqual('ip6\n{\n hash-buckets 2000000\n}\n', + vpp_config_generator.dump_config()) + + def test_add_ip6_heap_size(self): + vpp_config_generator = VppConfigGenerator() + vpp_config_generator.add_ip6_heap_size('4G') + self.assertEqual('ip6\n{\n heap-size 4G\n}\n', + vpp_config_generator.dump_config()) + + def test_add_ip_heap_size(self): + vpp_config_generator = VppConfigGenerator() + vpp_config_generator.add_ip_heap_size('4G') + self.assertEqual('ip\n{\n heap-size 4G\n}\n', + vpp_config_generator.dump_config()) + + def test_add_statseg_size(self): + vpp_config_generator = VppConfigGenerator() + vpp_config_generator.add_statseg_size('4G') + self.assertEqual('statseg\n{\n size 4G\n}\n', + vpp_config_generator.dump_config()) + + def test_add_plugin(self): + vpp_config_generator = VppConfigGenerator() + vpp_config_generator.add_plugin('enable', ['dpdk_plugin.so']) + self.assertEqual( + 'plugins\n{\n plugin [\'dpdk_plugin.so\']\n {\n enable \n }\n}\n', + vpp_config_generator.dump_config()) + + def test_add_dpdk_no_multi_seg(self): + vpp_config_generator = VppConfigGenerator() + vpp_config_generator.add_dpdk_no_multi_seg() + self.assertEqual('dpdk\n{\n no-multi-seg \n}\n', + vpp_config_generator.dump_config()) + + def test_add_dpdk_no_tx_checksum_offload(self): + vpp_config_generator = VppConfigGenerator() + vpp_config_generator.add_dpdk_no_tx_checksum_offload() + self.assertEqual('dpdk\n{\n no-tx-checksum-offload \n}\n', + vpp_config_generator.dump_config()) + + def test_dump_config(self): + vpp_config_generator = VppConfigGenerator() + vpp_config_generator.add_unix_log() + self.assertEqual('unix\n{\n log /tmp/vpe.log\n}\n', + vpp_config_generator.dump_config()) + + def test_pci_dev_check(self): + self.assertTrue(VppConfigGenerator.pci_dev_check('0000:00:00.0')) + + def test_pci_dev_check_error(self): + with self.assertRaises(ValueError) as raised: + VppConfigGenerator.pci_dev_check('0000:00:0.0') + self.assertIn( + 'PCI address 0000:00:0.0 is not in valid format xxxx:xx:xx.x', + str(raised.exception)) + + +class TestVppSetupEnvHelper(unittest.TestCase): + VNFD_0 = { + "benchmark": { + "kpi": [ + "packets_in", + "packets_fwd", + "packets_dropped" + ] + }, + "connection-point": [ + { + "name": "xe0", + "type": "VPORT" + }, + { + "name": "xe1", + "type": "VPORT" + } + ], + "description": "VPP IPsec", + "id": "VipsecApproxVnf", + "mgmt-interface": { + "ip": "10.10.10.101", + "password": "r00t", + "user": "root", + "vdu-id": "ipsecvnf-baremetal" + }, + "name": "IpsecVnf", + "short-name": "IpsecVnf", + "vdu": [ + { + "description": "VPP Ipsec", + "external-interface": [ + { + "name": "xe0", + "virtual-interface": { + "driver": "igb_uio", + "dst_ip": "192.168.100.1", + "dst_mac": "90:e2:ba:7c:30:e8", + "ifname": "xe0", + "local_ip": "192.168.100.2", + "local_mac": "90:e2:ba:7c:41:a8", + "netmask": "255.255.255.0", + "network": {}, + "node_name": "vnf__0", + "peer_ifname": "xe0", + "peer_intf": { + "dpdk_port_num": 0, + "driver": "igb_uio", + "dst_ip": "192.168.100.2", + "dst_mac": "90:e2:ba:7c:41:a8", + "ifname": "xe0", + "local_ip": "192.168.100.1", + "local_mac": "90:e2:ba:7c:30:e8", + "netmask": "255.255.255.0", + "network": {}, + "node_name": "tg__0", + "peer_ifname": "xe0", + "peer_name": "vnf__0", + "vld_id": "uplink_0", + "vpci": "0000:81:00.0" + }, + "peer_name": "tg__0", + "vld_id": "uplink_0", + "vpci": "0000:ff:06.0" + }, + "vnfd-connection-point-ref": "xe0" + }, + { + "name": "xe1", + "virtual-interface": { + "driver": "igb_uio", + "dst_ip": "1.1.1.2", + "dst_mac": "0a:b1:ec:fd:a2:66", + "ifname": "xe1", + "local_ip": "1.1.1.1", + "local_mac": "4e:90:85:d3:c5:13", + "netmask": "255.255.255.0", + "network": {}, + "node_name": "vnf__0", + "peer_ifname": "xe1", + "peer_intf": { + "driver": "igb_uio", + "dst_ip": "1.1.1.1", + "dst_mac": "4e:90:85:d3:c5:13", + "ifname": "xe1", + "local_ip": "1.1.1.2", + "local_mac": "0a:b1:ec:fd:a2:66", + "netmask": "255.255.255.0", + "network": {}, + "node_name": "vnf__1", + "peer_ifname": "xe1", + "peer_name": "vnf__0", + "vld_id": "ciphertext", + "vpci": "0000:00:07.0" + }, + "peer_name": "vnf__1", + "vld_id": "ciphertext", + "vpci": "0000:ff:07.0" + }, + "vnfd-connection-point-ref": "xe1" + } + ], + "id": "ipsecvnf-baremetal", + "name": "ipsecvnf-baremetal", + "routing_table": [] + } + ] + } + + VNFD_1 = { + "benchmark": { + "kpi": [ + "packets_in", + "packets_fwd", + "packets_dropped" + ] + }, + "connection-point": [ + { + "name": "xe0", + "type": "VPORT" + }, + { + "name": "xe1", + "type": "VPORT" + } + ], + "description": "VPP IPsec", + "id": "VipsecApproxVnf", + "mgmt-interface": { + "ip": "10.10.10.101", + "password": "r00t", + "user": "root", + "vdu-id": "ipsecvnf-baremetal" + }, + "name": "IpsecVnf", + "short-name": "IpsecVnf", + "vdu": [ + { + "description": "VPP Ipsec", + "external-interface": [ + { + "name": "xe0", + "virtual-interface": { + "driver": "igb_uio", + "dst_ip": "192.168.100.1", + "dst_mac": "90:e2:ba:7c:30:e8", + "ifname": "xe0", + "local_ip": "192.168.100.2", + "local_mac": "90:e2:ba:7c:41:a8", + "netmask": "255.255.255.0", + "network": {}, + "node_name": "vnf__0", + "peer_ifname": "xe0", + "peer_intf": { + "dpdk_port_num": 0, + "driver": "igb_uio", + "dst_ip": "192.168.100.2", + "dst_mac": "90:e2:ba:7c:41:a8", + "ifname": "xe0", + "local_ip": "192.168.100.1", + "local_mac": "90:e2:ba:7c:30:e8", + "netmask": "255.255.255.0", + "network": {}, + "node_name": "tg__0", + "peer_ifname": "xe0", + "peer_name": "vnf__0", + "vld_id": "uplink_0", + "vpci": "0000:81:00.0" + }, + "peer_name": "tg__0", + "vld_id": "uplink_0", + "vpci": "0000:ff:06.0" + }, + "vnfd-connection-point-ref": "xe0" + }, + { + "name": "xe1", + "virtual-interface": { + "driver": "igb_uio", + "dst_ip": "1.1.1.2", + "dst_mac": "0a:b1:ec:fd:a2:66", + "ifname": "xe1", + "local_ip": "1.1.1.1", + "local_mac": "4e:90:85:d3:c5:13", + "netmask": "255.255.255.0", + "network": {}, + "node_name": "vnf__0", + "peer_ifname": "xe1", + "peer_intf": { + "driver": "igb_uio", + "dst_ip": "1.1.1.1", + "dst_mac": "4e:90:85:d3:c5:13", + "ifname": "xe1", + "local_ip": "1.1.1.2", + "local_mac": "0a:b1:ec:fd:a2:66", + "netmask": "255.255.255.0", + "network": {}, + "node_name": "vnf__1", + "peer_ifname": "xe1", + "peer_name": "vnf__0", + "vld_id": "ciphertext", + "vpci": "0000:00:07.0" + }, + "peer_name": "vnf__1", + "vld_id": "ciphertext", + "vpci": "0000:ff:07.0" + }, + "vnfd-connection-point-ref": "xe1" + } + ], + "id": "ipsecvnf-baremetal", + "name": "ipsecvnf-baremetal", + "routing_table": [] + } + ] + } + + VNFD_2 = { + "benchmark": { + "kpi": [ + "packets_in", + "packets_fwd", + "packets_dropped" + ] + }, + "connection-point": [ + { + "name": "xe0", + "type": "VPORT" + }, + { + "name": "xe1", + "type": "VPORT" + } + ], + "description": "VPP IPsec", + "id": "VipsecApproxVnf", + "mgmt-interface": { + "ip": "10.10.10.101", + "password": "r00t", + "user": "root", + "vdu-id": "ipsecvnf-baremetal" + }, + "name": "IpsecVnf", + "short-name": "IpsecVnf", + "vdu": [ + { + "description": "VPP Ipsec", + "external-interface": [ + { + "name": "xe0", + "virtual-interface": { + "driver": "igb_uio", + "dst_ip": "192.168.100.1", + "dst_mac": "90:e2:ba:7c:30:e8", + "ifname": "xe0", + "local_ip": "192.168.100.2", + "local_mac": "90:e2:ba:7c:41:a8", + "netmask": "255.255.255.0", + "network": {}, + "node_name": "vnf__0", + "peer_ifname": "xe0", + "peer_intf": { + "dpdk_port_num": 0, + "driver": "igb_uio", + "dst_ip": "192.168.100.2", + "dst_mac": "90:e2:ba:7c:41:a8", + "ifname": "xe0", + "local_ip": "192.168.100.1", + "local_mac": "90:e2:ba:7c:30:e8", + "netmask": "255.255.255.0", + "network": {}, + "node_name": "tg__0", + "peer_ifname": "xe0", + "peer_name": "vnf__0", + "vld_id": "uplink_0", + "vpci": "0000:81:00.0" + }, + "peer_name": "tg__0", + "vld_id": "uplink_0", + "vpci": "0000:ff:06.0" + }, + "vnfd-connection-point-ref": "xe0" + }, + { + "name": "xe1", + "virtual-interface": { + "driver": "igb_uio", + "dst_ip": "1.1.1.2", + "dst_mac": "0a:b1:ec:fd:a2:66", + "ifname": "xe1", + "local_ip": "1.1.1.1", + "local_mac": "4e:90:85:d3:c5:13", + "netmask": "255.255.255.0", + "network": {}, + "node_name": "vnf__0", + "peer_ifname": "xe1", + "peer_intf": { + "driver": "igb_uio", + "dst_ip": "1.1.1.1", + "dst_mac": "4e:90:85:d3:c5:13", + "ifname": "xe1", + "local_ip": "1.1.1.2", + "local_mac": "0a:b1:ec:fd:a2:66", + "netmask": "255.255.255.0", + "network": {}, + "node_name": "vnf__1", + "peer_ifname": "xe1", + "peer_name": "vnf__0", + "vld_id": "ciphertext", + "vpci": "0000:00:07.0" + }, + "peer_name": "vnf__1", + "vld_id": "ciphertext", + "vpci": "0000:ff:07.0" + }, + "vnfd-connection-point-ref": "xe1" + } + ], + "id": "ipsecvnf-baremetal", + "name": "ipsecvnf-baremetal", + "routing_table": [] + } + ] + } + + VNFD = { + 'vnfd:vnfd-catalog': { + 'vnfd': [ + VNFD_0, + ], + }, + } + + VPP_INTERFACES_DUMP = [ + { + "sw_if_index": 0, + "sup_sw_if_index": 0, + "l2_address_length": 0, + "l2_address": [0, 0, 0, 0, 0, 0, 0, 0], + "interface_name": "local0", + "admin_up_down": 0, + "link_up_down": 0, + "link_duplex": 0, + "link_speed": 0, + "mtu": 0, + "sub_id": 0, + "sub_dot1ad": 0, + "sub_number_of_tags": 0, + "sub_outer_vlan_id": 0, + "sub_inner_vlan_id": 0, + "sub_exact_match": 0, + "sub_default": 0, + "sub_outer_vlan_id_any": 0, + "sub_inner_vlan_id_any": 0, + "vtr_op": 0, + "vtr_push_dot1q": 0, + "vtr_tag1": 0, + "vtr_tag2": 0 + }, + { + "sw_if_index": 1, + "sup_sw_if_index": 1, + "l2_address_length": 6, + "l2_address": [144, 226, 186, 124, 65, 168, 0, 0], + "interface_name": "TenGigabitEthernetff/6/0", + "admin_up_down": 0, + "link_up_down": 0, + "link_duplex": 2, + "link_speed": 32, + "mtu": 9202, + "sub_id": 0, + "sub_dot1ad": 0, + "sub_number_of_tags": 0, + "sub_outer_vlan_id": 0, + "sub_inner_vlan_id": 0, + "sub_exact_match": 0, + "sub_default": 0, + "sub_outer_vlan_id_any": 0, + "sub_inner_vlan_id_any": 0, + "vtr_op": 0, + "vtr_push_dot1q": 0, + "vtr_tag1": 0, + "vtr_tag2": 0 + }, + { + "sw_if_index": 2, + "sup_sw_if_index": 2, + "l2_address_length": 6, + "l2_address": [78, 144, 133, 211, 197, 19, 0, 0], + "interface_name": "VirtualFunctionEthernetff/7/0", + "admin_up_down": 0, + "link_up_down": 0, + "link_duplex": 2, + "link_speed": 32, + "mtu": 9206, + "sub_id": 0, + "sub_dot1ad": 0, + "sub_number_of_tags": 0, + "sub_outer_vlan_id": 0, + "sub_inner_vlan_id": 0, + "sub_exact_match": 0, + "sub_default": 0, + "sub_outer_vlan_id_any": 0, + "sub_inner_vlan_id_any": 0, + "vtr_op": 0, + "vtr_push_dot1q": 0, + "vtr_tag1": 0, + "vtr_tag2": 0 + } + ] + + VPP_INTERFACES_DUMP_MAC_ERR = [ + { + "sw_if_index": 0, + "sup_sw_if_index": 0, + "l2_address_length": 0, + "l2_address": [0, 0, 0, 0, 0, 0, 0, 0], + "interface_name": "local0", + "admin_up_down": 0, + "link_up_down": 0, + "link_duplex": 0, + "link_speed": 0, + "mtu": 0, + "sub_id": 0, + "sub_dot1ad": 0, + "sub_number_of_tags": 0, + "sub_outer_vlan_id": 0, + "sub_inner_vlan_id": 0, + "sub_exact_match": 0, + "sub_default": 0, + "sub_outer_vlan_id_any": 0, + "sub_inner_vlan_id_any": 0, + "vtr_op": 0, + "vtr_push_dot1q": 0, + "vtr_tag1": 0, + "vtr_tag2": 0 + }, + { + "sw_if_index": 1, + "sup_sw_if_index": 1, + "l2_address_length": 6, + "l2_address": [144, 226, 186, 124, 65, 169, 0, 0], + "interface_name": "TenGigabitEthernetff/6/0", + "admin_up_down": 0, + "link_up_down": 0, + "link_duplex": 2, + "link_speed": 32, + "mtu": 9202, + "sub_id": 0, + "sub_dot1ad": 0, + "sub_number_of_tags": 0, + "sub_outer_vlan_id": 0, + "sub_inner_vlan_id": 0, + "sub_exact_match": 0, + "sub_default": 0, + "sub_outer_vlan_id_any": 0, + "sub_inner_vlan_id_any": 0, + "vtr_op": 0, + "vtr_push_dot1q": 0, + "vtr_tag1": 0, + "vtr_tag2": 0 + }, + { + "sw_if_index": 2, + "sup_sw_if_index": 2, + "l2_address_length": 6, + "l2_address": [78, 144, 133, 211, 197, 20, 0, 0], + "interface_name": "VirtualFunctionEthernetff/7/0", + "admin_up_down": 0, + "link_up_down": 0, + "link_duplex": 2, + "link_speed": 32, + "mtu": 9206, + "sub_id": 0, + "sub_dot1ad": 0, + "sub_number_of_tags": 0, + "sub_outer_vlan_id": 0, + "sub_inner_vlan_id": 0, + "sub_exact_match": 0, + "sub_default": 0, + "sub_outer_vlan_id_any": 0, + "sub_inner_vlan_id_any": 0, + "vtr_op": 0, + "vtr_push_dot1q": 0, + "vtr_tag1": 0, + "vtr_tag2": 0 + } + ] + + CPU_LAYOUT = {'cpuinfo': [[0, 0, 0, 0, 0, 0, 0, 0], + [1, 0, 0, 0, 0, 1, 1, 0]]} + CPU_SMT = {'cpuinfo': [[0, 0, 0, 0, 0, 0, 0, 0], + [1, 0, 0, 0, 0, 1, 1, 0], + [2, 1, 0, 0, 0, 2, 2, 1], + [3, 1, 0, 0, 0, 3, 3, 1], + [4, 2, 0, 0, 0, 4, 4, 2], + [5, 2, 0, 0, 0, 5, 5, 2], + [6, 3, 0, 0, 0, 6, 6, 3], + [7, 3, 0, 0, 0, 7, 7, 3], + [8, 4, 0, 0, 0, 8, 8, 4], + [9, 5, 0, 1, 0, 0, 0, 0], + [10, 6, 0, 1, 0, 1, 1, 0], + [11, 6, 0, 1, 0, 2, 2, 1], + [12, 7, 0, 1, 0, 3, 3, 1], + [13, 7, 0, 1, 0, 4, 4, 2], + [14, 8, 0, 1, 0, 5, 5, 2], + [15, 8, 0, 1, 0, 6, 6, 3], + [16, 9, 0, 1, 0, 7, 7, 3], + [17, 9, 0, 1, 0, 8, 8, 4]]} + + def test_kill_vnf(self): + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + ssh_helper.execute.return_value = 0, 0, 0 + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + vpp_setup_env_helper.kill_vnf() + + def test_kill_vnf_error(self): + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + ssh_helper.execute.return_value = 1, 0, 0 + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + with self.assertRaises(RuntimeError) as raised: + vpp_setup_env_helper.kill_vnf() + + self.assertIn('Failed to stop service vpp', str(raised.exception)) + + def test_tear_down(self): + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + vpp_setup_env_helper.tear_down() + + def test_start_vpp_service(self): + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + ssh_helper.execute.return_value = 0, 0, 0 + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + vpp_setup_env_helper.start_vpp_service() + + def test_start_vpp_service_error(self): + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + ssh_helper.execute.return_value = 1, 0, 0 + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + with self.assertRaises(RuntimeError) as raised: + vpp_setup_env_helper.start_vpp_service() + + self.assertIn('Failed to start service vpp', str(raised.exception)) + + def test__update_vnfd_helper(self): + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + vpp_setup_env_helper._update_vnfd_helper( + {'vpp-data': {'vpp-key': 'vpp-value'}}) + + self.assertEqual({'vpp-key': 'vpp-value'}, + vpp_setup_env_helper.vnfd_helper.get('vpp-data', {})) + + def test__update_vnfd_helper_with_key(self): + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + vpp_setup_env_helper._update_vnfd_helper({'driver': 'qat'}, 'xe0') + + self.assertEqual('qat', + vpp_setup_env_helper.get_value_by_interface_key( + 'xe0', 'driver')) + + def test__update_vnfd_helper_dict_without_key(self): + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + vpp_setup_env_helper._update_vnfd_helper( + {'mgmt-interface': {'name': 'net'}}) + + self.assertEqual({'ip': '10.10.10.101', + 'name': 'net', + 'password': 'r00t', + 'user': 'root', + 'vdu-id': 'ipsecvnf-baremetal'}, + vpp_setup_env_helper.vnfd_helper.get('mgmt-interface', + {})) + + def test_get_value_by_interface_key(self): + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + vpp_setup_env_helper._update_vnfd_helper( + {'vpp-data': {'vpp-key': 'vpp-value'}}, 'xe0') + + self.assertEqual({'vpp-key': 'vpp-value'}, + vpp_setup_env_helper.get_value_by_interface_key( + 'xe0', 'vpp-data')) + + def test_get_value_by_interface_key_error(self): + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + vpp_setup_env_helper._update_vnfd_helper( + {'vpp-data': {'vpp-key': 'vpp-value'}}, 'xe0') + + self.assertIsNone(vpp_setup_env_helper.get_value_by_interface_key( + 'xe2', 'vpp-err')) + + def test_crypto_device_init(self): + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + vpp_setup_env_helper.dpdk_bind_helper.load_dpdk_driver = mock.Mock() + vpp_setup_env_helper.dpdk_bind_helper.bind = mock.Mock() + + vpp_setup_env_helper.kill_vnf = mock.Mock() + vpp_setup_env_helper.pci_driver_unbind = mock.Mock() + + with mock.patch.object(vpp_setup_env_helper, 'get_pci_dev_driver') as \ + mock_get_pci_dev_driver, \ + mock.patch.object(vpp_setup_env_helper, 'set_sriov_numvfs') as \ + mock_set_sriov_numvfs: + mock_get_pci_dev_driver.return_value = 'igb_uio' + self.assertIsNone( + vpp_setup_env_helper.crypto_device_init('0000:ff:06.0', 32)) + mock_set_sriov_numvfs.assert_called() + + def test_get_sriov_numvfs(self): + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + ssh_helper.execute.return_value = 0, '32', '' + + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + self.assertEqual(32, + vpp_setup_env_helper.get_sriov_numvfs('0000:ff:06.0')) + + def test_get_sriov_numvfs_error(self): + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + ssh_helper.execute.return_value = 0, 'err', '' + + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + self.assertEqual(0, + vpp_setup_env_helper.get_sriov_numvfs('0000:ff:06.0')) + + def test_set_sriov_numvfs(self): + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + vpp_setup_env_helper.set_sriov_numvfs('0000:ff:06.0') + self.assertEqual(ssh_helper.execute.call_count, 1) + + def test_pci_driver_unbind(self): + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + vpp_setup_env_helper.pci_driver_unbind('0000:ff:06.0') + self.assertEqual(ssh_helper.execute.call_count, 1) + + def test_get_pci_dev_driver(self): + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + ssh_helper.execute.return_value = \ + 0, 'Slot: ff:07.0\n' \ + 'Class: Ethernet controller\n' \ + 'Vendor: Intel Corporation\n' \ + 'Device: 82599 Ethernet Controller Virtual Function\n' \ + 'SVendor: Intel Corporation\n' \ + 'SDevice: 82599 Ethernet Controller Virtual Function\n' \ + 'Rev: 01\n' \ + 'Driver: igb_uio\n' \ + 'Module: ixgbevf', '' + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + self.assertEqual('igb_uio', vpp_setup_env_helper.get_pci_dev_driver( + '0000:ff:06.0')) + + def test_get_pci_dev_driver_error(self): + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + ssh_helper.execute.return_value = 1, 'err', '' + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + with self.assertRaises(RuntimeError) as raised: + vpp_setup_env_helper.get_pci_dev_driver( + '0000:ff:06.0') + + self.assertIn("'lspci -vmmks 0000:ff:06.0' failed", + str(raised.exception)) + + def test_get_pci_dev_driver_output_error(self): + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + ssh_helper.execute.return_value = \ + 0, 'Slot: ff:07.0\n' \ + '\n\t' \ + 'Vendor: Intel Corporation\n' \ + 'Device: 82599 Ethernet Controller Virtual Function\n' \ + 'SVendor: Intel Corporation\n' \ + 'SDevice: 82599 Ethernet Controller Virtual Function\n' \ + 'Rev: 01\n' \ + 'Driver_err: igb_uio\n' \ + 'Module: ixgbevf', '' + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + self.assertIsNone( + vpp_setup_env_helper.get_pci_dev_driver('0000:ff:06.0')) + + def test_vpp_create_ipsec_tunnels(self): + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + ssh_helper.execute.return_value = 0, '', '' + + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + + self.assertIsNone( + vpp_setup_env_helper.vpp_create_ipsec_tunnels('10.10.10.2', + '10.10.10.1', 'xe0', + 1, 1, mock.Mock(), + 'crypto_key', + mock.Mock(), + 'integ_key', + '20.20.20.0')) + self.assertGreaterEqual(ssh_helper.execute.call_count, 2) + + def test_vpp_create_ipsec_1000_tunnels(self): + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + ssh_helper.execute.return_value = 0, '', '' + + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + + self.assertIsNone( + vpp_setup_env_helper.vpp_create_ipsec_tunnels('10.10.10.2', + '10.10.10.1', 'xe0', + 1000, 128000, + mock.Mock(), + 'crypto_key', + mock.Mock(), + 'integ_key', + '20.20.20.0')) + self.assertGreaterEqual(ssh_helper.execute.call_count, 2) + + def test_apply_config(self): + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + ssh_helper.execute.return_value = 0, '', '' + + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + self.assertIsNone(vpp_setup_env_helper.apply_config(mock.Mock())) + self.assertGreaterEqual(ssh_helper.execute.call_count, 2) + + def test_apply_config_error(self): + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + ssh_helper.execute.return_value = 1, '', '' + + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + with self.assertRaises(RuntimeError) as raised: + vpp_setup_env_helper.apply_config(mock.Mock()) + + self.assertIn('Writing config file failed', str(raised.exception)) + + def test_vpp_route_add(self): + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + + with mock.patch.object(vpp_helpers.VatTerminal, + 'vat_terminal_exec_cmd_from_template') as \ + mock_vat_terminal_exec_cmd_from_template: + mock_vat_terminal_exec_cmd_from_template.return_value = '' + self.assertIsNone( + vpp_setup_env_helper.vpp_route_add('xe0', '10.10.10.1', 24)) + + def test_vpp_route_add_without_index(self): + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + + with mock.patch.object(vpp_helpers.VatTerminal, + 'vat_terminal_exec_cmd_from_template') as \ + mock_vat_terminal_exec_cmd_from_template: + mock_vat_terminal_exec_cmd_from_template.return_value = '' + self.assertIsNone( + vpp_setup_env_helper.vpp_route_add('xe0', '10.10.10.1', 24, + interface='xe0', + use_sw_index=False)) + + def test_add_arp_on_dut(self): + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + + with mock.patch.object(vpp_helpers.VatTerminal, + 'vat_terminal_exec_cmd_from_template') as \ + mock_vat_terminal_exec_cmd_from_template: + mock_vat_terminal_exec_cmd_from_template.return_value = '' + self.assertEqual('', vpp_setup_env_helper.add_arp_on_dut('xe0', + '10.10.10.1', + '00:00:00:00:00:00')) + + def test_set_ip(self): + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + + with mock.patch.object(vpp_helpers.VatTerminal, + 'vat_terminal_exec_cmd_from_template') as \ + mock_vat_terminal_exec_cmd_from_template: + mock_vat_terminal_exec_cmd_from_template.return_value = '' + self.assertEqual('', + vpp_setup_env_helper.set_ip('xe0', '10.10.10.1', + 24)) + + def test_set_interface_state(self): + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + + with mock.patch.object(vpp_helpers.VatTerminal, + 'vat_terminal_exec_cmd_from_template') as \ + mock_vat_terminal_exec_cmd_from_template: + mock_vat_terminal_exec_cmd_from_template.return_value = '' + self.assertEqual('', + vpp_setup_env_helper.set_interface_state('xe0', + 'up')) + + def test_set_interface_state_error(self): + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + + with mock.patch.object(vpp_helpers.VatTerminal, + 'vat_terminal_exec_cmd_from_template') as \ + mock_vat_terminal_exec_cmd_from_template: + mock_vat_terminal_exec_cmd_from_template.return_value = '' + with self.assertRaises(ValueError) as raised: + vpp_setup_env_helper.set_interface_state('xe0', 'error') + self.assertIn('Unexpected interface state: error', + str(raised.exception)) + + def test_set_interface_down_state(self): + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + + with mock.patch.object(vpp_helpers.VatTerminal, + 'vat_terminal_exec_cmd_from_template') as \ + mock_vat_terminal_exec_cmd_from_template: + mock_vat_terminal_exec_cmd_from_template.return_value = '' + self.assertEqual('', + vpp_setup_env_helper.set_interface_state('xe0', + 'down')) + + def test_vpp_set_interface_mtu(self): + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + + with mock.patch.object(vpp_helpers.VatTerminal, + 'vat_terminal_exec_cmd_from_template') as \ + mock_vat_terminal_exec_cmd_from_template: + mock_vat_terminal_exec_cmd_from_template.return_value = '' + self.assertIsNone( + vpp_setup_env_helper.vpp_set_interface_mtu('xe0', 9200)) + + def test_vpp_interfaces_ready_wait(self): + json_output = [self.VPP_INTERFACES_DUMP] + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + + with mock.patch.object(vpp_helpers.VatTerminal, + 'vat_terminal_exec_cmd_from_template') as \ + mock_vat_terminal_exec_cmd_from_template: + mock_vat_terminal_exec_cmd_from_template.return_value = json_output + self.assertIsNone(vpp_setup_env_helper.vpp_interfaces_ready_wait()) + + def test_vpp_interfaces_ready_wait_timeout(self): + json_output = [[ + { + "sw_if_index": 0, + "sup_sw_if_index": 0, + "l2_address_length": 0, + "l2_address": [0, 0, 0, 0, 0, 0, 0, 0], + "interface_name": "xe0", + "admin_up_down": 1, + "link_up_down": 0, + "link_duplex": 0, + "link_speed": 0, + "mtu": 0, + "sub_id": 0, + "sub_dot1ad": 0, + "sub_number_of_tags": 0, + "sub_outer_vlan_id": 0, + "sub_inner_vlan_id": 0, + "sub_exact_match": 0, + "sub_default": 0, + "sub_outer_vlan_id_any": 0, + "sub_inner_vlan_id_any": 0, + "vtr_op": 0, + "vtr_push_dot1q": 0, + "vtr_tag1": 0, + "vtr_tag2": 0 + }]] + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + + with mock.patch.object(vpp_helpers.VatTerminal, + 'vat_terminal_exec_cmd_from_template') as \ + mock_vat_terminal_exec_cmd_from_template: + mock_vat_terminal_exec_cmd_from_template.return_value = json_output + with self.assertRaises(RuntimeError) as raised: + vpp_setup_env_helper.vpp_interfaces_ready_wait(5) + self.assertIn('timeout, not up [\'xe0\']', str(raised.exception)) + + def test_vpp_get_interface_data(self): + json_output = [self.VPP_INTERFACES_DUMP] + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + + with mock.patch.object(vpp_helpers.VatTerminal, + 'vat_terminal_exec_cmd_from_template') as \ + mock_vat_terminal_exec_cmd_from_template: + mock_vat_terminal_exec_cmd_from_template.return_value = json_output + self.assertEqual(json_output[0], + vpp_setup_env_helper.vpp_get_interface_data()) + + def test_vpp_get_interface_data_ifname(self): + json_output = [self.VPP_INTERFACES_DUMP] + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + + with mock.patch.object(vpp_helpers.VatTerminal, + 'vat_terminal_exec_cmd_from_template') as \ + mock_vat_terminal_exec_cmd_from_template: + mock_vat_terminal_exec_cmd_from_template.return_value = json_output + self.assertEqual(json_output[0][2], + vpp_setup_env_helper.vpp_get_interface_data( + 'VirtualFunctionEthernetff/7/0')) + + def test_vpp_get_interface_data_ifname_error(self): + json_output = [self.VPP_INTERFACES_DUMP] + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + + with mock.patch.object(vpp_helpers.VatTerminal, + 'vat_terminal_exec_cmd_from_template') as \ + mock_vat_terminal_exec_cmd_from_template: + mock_vat_terminal_exec_cmd_from_template.return_value = json_output + self.assertEqual({}, vpp_setup_env_helper.vpp_get_interface_data( + 'error')) + + def test_vpp_get_interface_data_ifindex(self): + json_output = [self.VPP_INTERFACES_DUMP] + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + + with mock.patch.object(vpp_helpers.VatTerminal, + 'vat_terminal_exec_cmd_from_template') as \ + mock_vat_terminal_exec_cmd_from_template: + mock_vat_terminal_exec_cmd_from_template.return_value = json_output + self.assertEqual(json_output[0][1], + vpp_setup_env_helper.vpp_get_interface_data(1)) + + def test_vpp_get_interface_data_error(self): + json_output = [self.VPP_INTERFACES_DUMP] + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + + with mock.patch.object(vpp_helpers.VatTerminal, + 'vat_terminal_exec_cmd_from_template') as \ + mock_vat_terminal_exec_cmd_from_template: + mock_vat_terminal_exec_cmd_from_template.return_value = json_output + with self.assertRaises(TypeError) as raised: + vpp_setup_env_helper.vpp_get_interface_data(1.0) + self.assertEqual('', str(raised.exception)) + + def test_update_vpp_interface_data(self): + output = '{}\n{}'.format(self.VPP_INTERFACES_DUMP, + 'dump_interface_table:6019: JSON output ' \ + 'supported only for VPE API calls and dump_stats_table\n' \ + '/opt/nsb_bin/vpp/templates/dump_interfaces.vat(2): \n' \ + 'dump_interface_table error: Misc') + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + ssh_helper.execute.return_value = 0, output.replace("\'", "\""), '' + ssh_helper.join_bin_path.return_value = '/opt/nsb_bin/vpp/templates' + + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + self.assertIsNone(vpp_setup_env_helper.update_vpp_interface_data()) + self.assertGreaterEqual(ssh_helper.execute.call_count, 1) + self.assertEqual('TenGigabitEthernetff/6/0', + vpp_setup_env_helper.get_value_by_interface_key( + 'xe0', 'vpp_name')) + self.assertEqual(1, vpp_setup_env_helper.get_value_by_interface_key( + 'xe0', 'vpp_sw_index')) + self.assertEqual('VirtualFunctionEthernetff/7/0', + vpp_setup_env_helper.get_value_by_interface_key( + 'xe1', 'vpp_name')) + self.assertEqual(2, vpp_setup_env_helper.get_value_by_interface_key( + 'xe1', 'vpp_sw_index')) + + def test_update_vpp_interface_data_error(self): + output = '{}\n{}'.format(self.VPP_INTERFACES_DUMP_MAC_ERR, + 'dump_interface_table:6019: JSON output ' \ + 'supported only for VPE API calls and dump_stats_table\n' \ + '/opt/nsb_bin/vpp/templates/dump_interfaces.vat(2): \n' \ + 'dump_interface_table error: Misc') + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + ssh_helper.execute.return_value = 0, output.replace("\'", "\""), '' + ssh_helper.join_bin_path.return_value = '/opt/nsb_bin/vpp/templates' + + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + self.assertIsNone(vpp_setup_env_helper.update_vpp_interface_data()) + self.assertGreaterEqual(ssh_helper.execute.call_count, 1) + + def test_iface_update_numa(self): + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + ssh_helper.execute.return_value = 0, '0', '' + + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + self.assertIsNone(vpp_setup_env_helper.iface_update_numa()) + self.assertGreaterEqual(ssh_helper.execute.call_count, 2) + self.assertEqual(0, vpp_setup_env_helper.get_value_by_interface_key( + 'xe0', 'numa_node')) + self.assertEqual(0, vpp_setup_env_helper.get_value_by_interface_key( + 'xe1', 'numa_node')) + + def test_iface_update_numa_error(self): + vnfd_helper = VnfdHelper(self.VNFD_1) + ssh_helper = mock.Mock() + ssh_helper.execute.return_value = 0, '-1', '' + + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + with mock.patch.object(cpu.CpuSysCores, 'get_cpu_layout') as \ + mock_get_cpu_layout: + mock_get_cpu_layout.return_value = self.CPU_LAYOUT + sys_cores = cpu.CpuSysCores(ssh_helper) + vpp_setup_env_helper._update_vnfd_helper( + sys_cores.get_cpu_layout()) + self.assertIsNone(vpp_setup_env_helper.iface_update_numa()) + self.assertGreaterEqual(ssh_helper.execute.call_count, 2) + self.assertEqual(0, vpp_setup_env_helper.get_value_by_interface_key( + 'xe0', 'numa_node')) + self.assertEqual(0, vpp_setup_env_helper.get_value_by_interface_key( + 'xe1', 'numa_node')) + + def test_iface_update_without_numa(self): + vnfd_helper = VnfdHelper(self.VNFD_2) + ssh_helper = mock.Mock() + ssh_helper.execute.return_value = 0, '-1', '' + + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + with mock.patch.object(cpu.CpuSysCores, 'get_cpu_layout') as \ + mock_get_cpu_layout: + mock_get_cpu_layout.return_value = self.CPU_SMT + sys_cores = cpu.CpuSysCores(ssh_helper) + vpp_setup_env_helper._update_vnfd_helper( + sys_cores.get_cpu_layout()) + self.assertIsNone(vpp_setup_env_helper.iface_update_numa()) + self.assertGreaterEqual(ssh_helper.execute.call_count, 2) + self.assertIsNone(vpp_setup_env_helper.get_value_by_interface_key( + 'xe0', 'numa_node')) + self.assertIsNone(vpp_setup_env_helper.get_value_by_interface_key( + 'xe1', 'numa_node')) + + def test_execute_script(self): + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + vpp_setup_env_helper.execute_script('dump_interfaces.vat', True, True) + self.assertGreaterEqual(ssh_helper.put_file.call_count, 1) + self.assertGreaterEqual(ssh_helper.execute.call_count, 1) + + def test_execute_script_error(self): + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + ssh_helper.execute.side_effect = Exception + + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + with self.assertRaises(Exception) as raised: + vpp_setup_env_helper.execute_script('dump_interfaces.vat', True, + True) + self.assertIn( + 'VAT script execution failed: vpp_api_test json in dump_interfaces.vat script', + str(raised.exception)) + self.assertGreaterEqual(ssh_helper.put_file.call_count, 1) + + def test_execute_script_json_out(self): + json_output = [ + { + "sw_if_index": 0, + "sup_sw_if_index": 0 + }, + { + "l2_address_length": 6, + "l2_address": [144, 226, 186, 124, 65, 168, 0, 0] + }, + { + "interface_name": "VirtualFunctionEthernetff/7/0", + "admin_up_down": 0 + } + ] + output = '{}\n{}'.format(json_output, + 'dump_interface_table:6019: JSON output ' \ + 'supported only for VPE API calls and dump_stats_table\n' \ + '/opt/nsb_bin/vpp/templates/dump_interfaces.vat(2): \n' \ + 'dump_interface_table error: Misc') + vnfd_helper = VnfdHelper(self.VNFD_0) + ssh_helper = mock.Mock() + ssh_helper.execute.return_value = 0, output, '' + ssh_helper.join_bin_path.return_value = '/opt/nsb_bin/vpp/templates' + scenario_helper = mock.Mock() + vpp_setup_env_helper = VppSetupEnvHelper(vnfd_helper, ssh_helper, + scenario_helper) + self.assertEqual(str(json_output), + vpp_setup_env_helper.execute_script_json_out( + 'dump_interfaces.vat')) + + def test_self_cleanup_vat_json_output(self): + json_output = [ + { + "sw_if_index": 0, + "sup_sw_if_index": 0 + }, + { + "l2_address_length": 6, + "l2_address": [144, 226, 186, 124, 65, 168, 0, 0] + }, + { + "interface_name": "VirtualFunctionEthernetff/7/0", + "admin_up_down": 0 + } + ] + + output = '{}\n{}'.format(json_output, + 'dump_interface_table:6019: JSON output ' \ + 'supported only for VPE API calls and dump_stats_table\n' \ + '/opt/nsb_bin/vpp/templates/dump_interfaces.vat(2): \n' \ + 'dump_interface_table error: Misc') + self.assertEqual(str(json_output), + VppSetupEnvHelper.cleanup_vat_json_output(output, + '/opt/nsb_bin/vpp/templates/dump_interfaces.vat')) + + def test__convert_mac_to_number_list(self): + self.assertEqual([144, 226, 186, 124, 65, 168], + VppSetupEnvHelper._convert_mac_to_number_list( + '90:e2:ba:7c:41:a8')) + + def test_get_vpp_interface_by_mac(self): + mac_address = '90:e2:ba:7c:41:a8' + self.assertEqual({'admin_up_down': 0, + 'interface_name': 'TenGigabitEthernetff/6/0', + 'l2_address': [144, 226, 186, 124, 65, 168, 0, 0], + 'l2_address_length': 6, + 'link_duplex': 2, + 'link_speed': 32, + 'link_up_down': 0, + 'mtu': 9202, + 'sub_default': 0, + 'sub_dot1ad': 0, + 'sub_exact_match': 0, + 'sub_id': 0, + 'sub_inner_vlan_id': 0, + 'sub_inner_vlan_id_any': 0, + 'sub_number_of_tags': 0, + 'sub_outer_vlan_id': 0, + 'sub_outer_vlan_id_any': 0, + 'sup_sw_if_index': 1, + 'sw_if_index': 1, + 'vtr_op': 0, + 'vtr_push_dot1q': 0, + 'vtr_tag1': 0, + 'vtr_tag2': 0}, + VppSetupEnvHelper.get_vpp_interface_by_mac( + self.VPP_INTERFACES_DUMP, mac_address)) + + def test_get_vpp_interface_by_mac_error(self): + mac_address = '90:e2:ba:7c:41:a9' + with self.assertRaises(ValueError) as raised: + VppSetupEnvHelper.get_vpp_interface_by_mac( + [{ + "sw_if_index": 1, + "sup_sw_if_index": 1, + "l2_address_length": 7, + "l2_address": [144, 226, 186, 124, 65, 169, 0, 0], + "interface_name": "TenGigabitEthernetff/6/0", + "admin_up_down": 0, + "link_up_down": 0, + "link_duplex": 2, + "link_speed": 32, + "mtu": 9202, + "sub_id": 0, + "sub_dot1ad": 0, + "sub_number_of_tags": 0, + "sub_outer_vlan_id": 0, + "sub_inner_vlan_id": 0, + "sub_exact_match": 0, + "sub_default": 0, + "sub_outer_vlan_id_any": 0, + "sub_inner_vlan_id_any": 0, + "vtr_op": 0, + "vtr_push_dot1q": 0, + "vtr_tag1": 0, + "vtr_tag2": 0 + }], mac_address) + + self.assertIn('l2_address_length value is not 6.', + str(raised.exception)) + + def test_get_vpp_interface_by_mac_l2_error(self): + mac_address = '90:e2:ba:7c:41:a7' + with self.assertRaises(KeyError) as raised: + VppSetupEnvHelper.get_vpp_interface_by_mac( + [{ + "sw_if_index": 1, + "sup_sw_if_index": 1, + "l2_address_length": 6, + "l2_address_err": [144, 226, 186, 124, 65, 167, 0, 0], + "interface_name": "TenGigabitEthernetff/6/0", + "admin_up_down": 0, + "link_up_down": 0, + "link_duplex": 2, + "link_speed": 32, + "mtu": 9202, + "sub_id": 0, + "sub_dot1ad": 0, + "sub_number_of_tags": 0, + "sub_outer_vlan_id": 0, + "sub_inner_vlan_id": 0, + "sub_exact_match": 0, + "sub_default": 0, + "sub_outer_vlan_id_any": 0, + "sub_inner_vlan_id_any": 0, + "vtr_op": 0, + "vtr_push_dot1q": 0, + "vtr_tag1": 0, + "vtr_tag2": 0 + }], mac_address) + + self.assertIn( + 'key l2_address not found in interface dict.Probably input list ' \ + 'is not parsed from correct VAT json output.', + str(raised.exception)) + + def test_get_vpp_interface_by_mac_l2_length_error(self): + mac_address = '90:e2:ba:7c:41:a6' + with self.assertRaises(KeyError) as raised: + VppSetupEnvHelper.get_vpp_interface_by_mac( + [{ + "sw_if_index": 1, + "sup_sw_if_index": 1, + "l2_address_length_err": 6, + "l2_address": [144, 226, 186, 124, 65, 166, 0, 0], + "interface_name": "TenGigabitEthernetff/6/0", + "admin_up_down": 0, + "link_up_down": 0, + "link_duplex": 2, + "link_speed": 32, + "mtu": 9202, + "sub_id": 0, + "sub_dot1ad": 0, + "sub_number_of_tags": 0, + "sub_outer_vlan_id": 0, + "sub_inner_vlan_id": 0, + "sub_exact_match": 0, + "sub_default": 0, + "sub_outer_vlan_id_any": 0, + "sub_inner_vlan_id_any": 0, + "vtr_op": 0, + "vtr_push_dot1q": 0, + "vtr_tag1": 0, + "vtr_tag2": 0 + }], mac_address) + + self.assertIn( + 'key l2_address_length not found in interface dict. Probably ' \ + 'input list is not parsed from correct VAT json output.', + str(raised.exception)) + + def test_get_prefix_length(self): + start_ip = '10.10.10.0' + end_ip = '10.10.10.127' + ips = [ipaddress.ip_address(ip) for ip in + [str(ipaddress.ip_address(start_ip)), str(end_ip)]] + lowest_ip, highest_ip = min(ips), max(ips) + + self.assertEqual(25, + VppSetupEnvHelper.get_prefix_length(int(lowest_ip), + int(highest_ip), + lowest_ip.max_prefixlen)) + + def test_get_prefix_length_zero_prefix(self): + start_ip = '10.0.0.0' + end_ip = '10.0.0.0' + ips = [ipaddress.ip_address(ip) for ip in + [str(ipaddress.ip_address(start_ip)), str(end_ip)]] + lowest_ip, highest_ip = min(ips), max(ips) + + self.assertEqual(0, + VppSetupEnvHelper.get_prefix_length(int(lowest_ip), + int(highest_ip), + 0)) + + +class TestVatTerminal(unittest.TestCase): + + def test___init___error(self): + ssh_helper = mock.Mock() + ssh_helper.interactive_terminal_open.side_effect = exceptions.SSHTimeout + + with self.assertRaises(RuntimeError) as raised: + VatTerminal(ssh_helper, json_param=True) + self.assertIn('Cannot open interactive terminal', + str(raised.exception)) + + def test___init___exec_error(self): + ssh_helper = mock.Mock() + ssh_helper.interactive_terminal_exec_command.side_effect = exceptions.SSHTimeout + VatTerminal(ssh_helper, json_param=True) + + def test_vat_terminal_exec_cmd(self): + ssh_helper = mock.Mock() + ssh_helper.interactive_terminal_exec_command.return_value = str( + {'empty': 'value'}).replace("\'", "\"") + vat_terminal = VatTerminal(ssh_helper, json_param=True) + + self.assertEqual({'empty': 'value'}, + vat_terminal.vat_terminal_exec_cmd( + "hw_interface_set_mtu sw_if_index 1 mtu 9200")) + + def test_vat_terminal_exec_cmd_array(self): + ssh_helper = mock.Mock() + ssh_helper.interactive_terminal_exec_command.return_value = str( + [{'empty': 'value'}]).replace("\'", "\"") + vat_terminal = VatTerminal(ssh_helper, json_param=True) + + self.assertEqual([{'empty': 'value'}], + vat_terminal.vat_terminal_exec_cmd( + "hw_interface_set_mtu sw_if_index 1 mtu 9200")) + + def test_vat_terminal_exec_cmd_without_output(self): + ssh_helper = mock.Mock() + ssh_helper.interactive_terminal_exec_command.return_value = str( + {'empty': 'value'}).replace("\'", "\"") + vat_terminal = VatTerminal(ssh_helper, json_param=False) + + self.assertIsNone(vat_terminal.vat_terminal_exec_cmd( + "hw_interface_set_mtu sw_if_index 1 mtu 9200")) + + def test_vat_terminal_exec_cmd_error(self): + ssh_helper = mock.Mock() + ssh_helper.interactive_terminal_exec_command.return_value = str( + {'empty': 'value'}).replace("\'", "\"") + ssh_helper.interactive_terminal_exec_command.side_effect = exceptions.SSHTimeout + + vat_terminal = VatTerminal(ssh_helper, json_param=True) + + with self.assertRaises(RuntimeError) as raised: + vat_terminal.vat_terminal_exec_cmd( + "hw_interface_set_mtu sw_if_index 1 mtu 9200") + self.assertIn( + 'VPP is not running on node. VAT command hw_interface_set_mtu ' \ + 'sw_if_index 1 mtu 9200 execution failed', + str(raised.exception)) + + def test_vat_terminal_exec_cmd_output_error(self): + ssh_helper = mock.Mock() + ssh_helper.interactive_terminal_exec_command.return_value = str( + 'empty: value').replace("\'", "\"") + + vat_terminal = VatTerminal(ssh_helper, json_param=True) + + with self.assertRaises(RuntimeError) as raised: + vat_terminal.vat_terminal_exec_cmd( + "hw_interface_set_mtu sw_if_index 1 mtu 9200") + self.assertIn( + 'VAT command hw_interface_set_mtu sw_if_index 1 mtu 9200: no JSON data.', + str(raised.exception)) + + def test_vat_terminal_close(self): + ssh_helper = mock.Mock() + vat_terminal = VatTerminal(ssh_helper, json_param=False) + self.assertIsNone(vat_terminal.vat_terminal_close()) + + def test_vat_terminal_close_error(self): + ssh_helper = mock.Mock() + ssh_helper.interactive_terminal_exec_command.side_effect = exceptions.SSHTimeout + vat_terminal = VatTerminal(ssh_helper, json_param=False) + with self.assertRaises(RuntimeError) as raised: + vat_terminal.vat_terminal_close() + self.assertIn('Failed to close VAT console', str(raised.exception)) + + def test_vat_terminal_close_vat_error(self): + ssh_helper = mock.Mock() + ssh_helper.interactive_terminal_close.side_effect = exceptions.SSHTimeout + vat_terminal = VatTerminal(ssh_helper, json_param=False) + with self.assertRaises(RuntimeError) as raised: + vat_terminal.vat_terminal_close() + self.assertIn('Cannot close interactive terminal', + str(raised.exception)) + + def test_vat_terminal_exec_cmd_from_template(self): + ssh_helper = mock.Mock() + vat_terminal = VatTerminal(ssh_helper, json_param=False) + + with mock.patch.object(vat_terminal, 'vat_terminal_exec_cmd') as \ + mock_vat_terminal_exec_cmd: + mock_vat_terminal_exec_cmd.return_value = 'empty' + self.assertEqual(['empty'], + vat_terminal.vat_terminal_exec_cmd_from_template( + "hw_interface_set_mtu.vat", sw_if_index=1, + mtu=9200)) diff --git a/yardstick/tests/unit/test_ssh.py b/yardstick/tests/unit/test_ssh.py index 71929f1a2..374fb6644 100644 --- a/yardstick/tests/unit/test_ssh.py +++ b/yardstick/tests/unit/test_ssh.py @@ -286,6 +286,48 @@ class SSHTestCase(unittest.TestCase): mock_paramiko_exec_command.assert_called_once_with('cmd', get_pty=True) + @mock.patch("yardstick.ssh.paramiko") + def test_interactive_terminal_open(self, mock_paramiko): + fake_client = mock.Mock() + fake_session = mock.Mock() + fake_session.recv.return_value = ":~# " + fake_transport = mock.Mock() + fake_transport.open_session.return_value = fake_session + fake_client.get_transport.return_value = fake_transport + mock_paramiko.SSHClient.return_value = fake_client + + test_ssh = ssh.SSH("admin", "example.net", pkey="key") + result = test_ssh.interactive_terminal_open() + self.assertEqual(fake_session, result) + + @mock.patch("yardstick.ssh.paramiko") + def test_interactive_terminal_exec_command(self, mock_paramiko): + fake_client = mock.Mock() + fake_session = mock.Mock() + fake_session.recv.return_value = "stdout fake data" + fake_transport = mock.Mock() + fake_transport.open_session.return_value = fake_session + fake_client.get_transport.return_value = fake_transport + mock_paramiko.SSHClient.return_value = fake_client + + test_ssh = ssh.SSH("admin", "example.net", pkey="key") + with mock.patch.object(fake_session, "sendall") \ + as mock_paramiko_send_command: + result = test_ssh.interactive_terminal_exec_command(fake_session, + 'cmd', "vat# ") + self.assertEqual("stdout fake data", result) + mock_paramiko_send_command.assert_called_once_with('cmd\n') + + @mock.patch("yardstick.ssh.paramiko") + def test_interactive_terminal_close(self, _): + fake_session = mock.Mock() + paramiko_sshclient = self.test_client._get_client() + paramiko_sshclient.get_transport.open_session.return_value = fake_session + with mock.patch.object(fake_session, "close") \ + as mock_paramiko_terminal_close: + self.test_client.interactive_terminal_close(fake_session) + mock_paramiko_terminal_close.assert_called_once_with() + class SSHRunTestCase(unittest.TestCase): """Test SSH.run method in different aspects. |