aboutsummaryrefslogtreecommitdiffstats
path: root/yardstick
diff options
context:
space:
mode:
Diffstat (limited to 'yardstick')
-rw-r--r--yardstick/benchmark/contexts/model.py11
-rw-r--r--yardstick/benchmark/contexts/standalone/model.py107
-rw-r--r--yardstick/benchmark/contexts/standalone/ovs_dpdk.py19
-rw-r--r--yardstick/benchmark/contexts/standalone/sriov.py21
-rw-r--r--yardstick/benchmark/scenarios/compute/qemu_migrate.py4
-rw-r--r--yardstick/benchmark/scenarios/compute/qemu_migrate_benchmark.bash20
-rw-r--r--yardstick/common/ansible_common.py5
-rw-r--r--yardstick/network_services/traffic_profile/prox_mpls_tag_untag.py99
-rw-r--r--yardstick/network_services/traffic_profile/rfc2544.py4
-rw-r--r--yardstick/network_services/utils.py52
-rw-r--r--yardstick/network_services/vnf_generic/vnf/prox_helpers.py190
-rw-r--r--yardstick/network_services/vnf_generic/vnf/sample_vnf.py4
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_trex.py32
-rw-r--r--yardstick/orchestrator/heat.py28
14 files changed, 389 insertions, 207 deletions
diff --git a/yardstick/benchmark/contexts/model.py b/yardstick/benchmark/contexts/model.py
index 97560c9f6..ae56066ee 100644
--- a/yardstick/benchmark/contexts/model.py
+++ b/yardstick/benchmark/contexts/model.py
@@ -239,6 +239,7 @@ class Server(Object): # pragma: no cover
self._flavor = attrs["flavor"]
self.user_data = attrs.get('user_data', '')
+ self.availability_zone = attrs.get('availability_zone')
Server.list.append(self)
@@ -355,12 +356,10 @@ class Server(Object): # pragma: no cover
mountpoint=self.volume_mountpoint)
template.add_server(server_name, self.image, flavor=self.flavor_name,
- flavors=self.context.flavors,
- ports=port_name_list,
- user=self.user,
- key_name=self.keypair_name,
- user_data=self.user_data,
- scheduler_hints=scheduler_hints)
+ flavors=self.context.flavors, ports=port_name_list,
+ scheduler_hints=scheduler_hints, user=self.user,
+ key_name=self.keypair_name, user_data=self.user_data,
+ availability_zone=self.availability_zone)
def add_to_template(self, template, networks, scheduler_hints=None):
"""adds to the template one or more servers (instances)"""
diff --git a/yardstick/benchmark/contexts/standalone/model.py b/yardstick/benchmark/contexts/standalone/model.py
index ffd8858d9..a8943c3f6 100644
--- a/yardstick/benchmark/contexts/standalone/model.py
+++ b/yardstick/benchmark/contexts/standalone/model.py
@@ -94,33 +94,63 @@ class Libvirt(object):
cmd_template = "virsh list --name | grep -i %s"
status = connection.execute(cmd_template % vm_name)[0]
if status == 0:
- LOG.info("VM '%s' is already present.. destroying" % vm_name)
+ LOG.info("VM '%s' is already present... destroying", vm_name)
connection.execute("virsh destroy %s" % vm_name)
@staticmethod
def virsh_create_vm(connection, cfg):
err = connection.execute("virsh create %s" % cfg)[0]
- LOG.info("VM create status: %s" % (err))
+ LOG.info("VM create status: %s", err)
@staticmethod
def virsh_destroy_vm(vm_name, connection):
connection.execute("virsh destroy %s" % vm_name)
@staticmethod
- def add_interface_address(interface, pci_address):
+ def _add_interface_address(interface, pci_address):
+ """Add a PCI 'address' XML node
+
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x08'
+ function='0x0'/>
+
+ Refence: https://software.intel.com/en-us/articles/
+ configure-sr-iov-network-virtual-functions-in-linux-kvm
+ """
vm_pci = ET.SubElement(interface, 'address')
vm_pci.set('type', 'pci')
- vm_pci.set('domain', '0x%s' % pci_address.domain)
- vm_pci.set('bus', '0x%s' % pci_address.bus)
- vm_pci.set('slot', '0x%s' % pci_address.slot)
- vm_pci.set('function', '0x%s' % pci_address.function)
+ vm_pci.set('domain', '0x{}'.format(pci_address.domain))
+ vm_pci.set('bus', '0x{}'.format(pci_address.bus))
+ vm_pci.set('slot', '0x{}'.format(pci_address.slot))
+ vm_pci.set('function', '0x{}'.format(pci_address.function))
return vm_pci
@classmethod
def add_ovs_interface(cls, vpath, port_num, vpci, vports_mac, xml):
- vhost_path = '{0}/var/run/openvswitch/dpdkvhostuser{1}'
+ """Add a DPDK OVS 'interface' XML node in 'devices' node
+
+ <devices>
+ <interface type='vhostuser'>
+ <mac address='00:00:00:00:00:01'/>
+ <source type='unix' path='/usr/local/var/run/openvswitch/
+ dpdkvhostuser0' mode='client'/>
+ <model type='virtio'/>
+ <driver queues='4'>
+ <host mrg_rxbuf='off'/>
+ </driver>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x03'
+ function='0x0'/>
+ </interface>
+ ...
+ </devices>
+
+ Reference: http://docs.openvswitch.org/en/latest/topics/dpdk/
+ vhost-user/
+ """
+
+ vhost_path = ('{0}/var/run/openvswitch/dpdkvhostuser{1}'.
+ format(vpath, port_num))
root = ET.parse(xml)
- pci_address = PciAddress.parse_address(vpci.strip(), multi_line=True)
+ pci_address = PciAddress(vpci.strip())
device = root.find('devices')
interface = ET.SubElement(device, 'interface')
@@ -130,7 +160,7 @@ class Libvirt(object):
source = ET.SubElement(interface, 'source')
source.set('type', 'unix')
- source.set('path', vhost_path.format(vpath, port_num))
+ source.set('path', vhost_path)
source.set('mode', 'client')
model = ET.SubElement(interface, 'model')
@@ -142,14 +172,35 @@ class Libvirt(object):
host = ET.SubElement(driver, 'host')
host.set('mrg_rxbuf', 'off')
- cls.add_interface_address(interface, pci_address)
+ cls._add_interface_address(interface, pci_address)
root.write(xml)
@classmethod
- def add_sriov_interfaces(cls, vm_pci, vf_pci, vfmac, xml):
+ def add_sriov_interfaces(cls, vm_pci, vf_pci, vf_mac, xml):
+ """Add a SR-IOV 'interface' XML node in 'devices' node
+
+ <devices>
+ <interface type='hostdev' managed='yes'>
+ <source>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x03'
+ function='0x0'/>
+ </source>
+ <mac address='52:54:00:6d:90:02'>
+ <address type='pci' domain='0x0000' bus='0x02' slot='0x04'
+ function='0x1'/>
+ </interface>
+ ...
+ </devices>
+
+ Reference: https://access.redhat.com/documentation/en-us/
+ red_hat_enterprise_linux/6/html/
+ virtualization_host_configuration_and_guest_installation_guide/
+ sect-virtualization_host_configuration_and_guest_installation_guide
+ -sr_iov-how_sr_iov_libvirt_works
+ """
+
root = ET.parse(xml)
- pci_address = PciAddress.parse_address(vf_pci.strip(), multi_line=True)
device = root.find('devices')
interface = ET.SubElement(device, 'interface')
@@ -157,18 +208,15 @@ class Libvirt(object):
interface.set('type', 'hostdev')
mac = ET.SubElement(interface, 'mac')
- mac.set('address', vfmac)
- source = ET.SubElement(interface, 'source')
+ mac.set('address', vf_mac)
- addr = ET.SubElement(source, "address")
- addr.set('domain', "0x0")
- addr.set('bus', "{0}".format(pci_address.bus))
- addr.set('function', "{0}".format(pci_address.function))
- addr.set('slot', "0x{0}".format(pci_address.slot))
- addr.set('type', "pci")
+ source = ET.SubElement(interface, 'source')
+ addr = ET.SubElement(source, 'address')
+ pci_address = PciAddress(vf_pci.strip())
+ cls._add_interface_address(addr, pci_address)
- pci_vm_address = PciAddress.parse_address(vm_pci.strip(), multi_line=True)
- cls.add_interface_address(interface, pci_vm_address)
+ pci_vm_address = PciAddress(vm_pci.strip())
+ cls._add_interface_address(interface, pci_vm_address)
root.write(xml)
@@ -192,7 +240,7 @@ class Libvirt(object):
vcpu = int(cpu) * int(threads)
numa_cpus = '0-%s' % (vcpu - 1)
hw_socket = flavor.get('hw_socket', '0')
- cpuset = Libvirt.pin_vcpu_for_perf(connection, vm_name, vcpu, hw_socket)
+ cpuset = Libvirt.pin_vcpu_for_perf(connection, hw_socket)
mac = StandaloneContextHelper.get_mac_address(0x00)
image = cls.create_snapshot_qemu(connection, index,
@@ -216,13 +264,13 @@ class Libvirt(object):
connection.execute("echo never > /sys/kernel/mm/transparent_hugepage/enabled")
@classmethod
- def pin_vcpu_for_perf(cls, connection, vm_name, cpu, socket="0"):
+ def pin_vcpu_for_perf(cls, connection, socket='0'):
threads = ""
sys_obj = CpuSysCores(connection)
soc_cpu = sys_obj.get_core_socket()
sys_cpu = int(soc_cpu["cores_per_socket"])
cores = "%s-%s" % (soc_cpu[socket][0], soc_cpu[socket][sys_cpu - 1])
- if int(soc_cpu["thread_per_core"]):
+ if int(soc_cpu["thread_per_core"]) > 1:
threads = "%s-%s" % (soc_cpu[socket][sys_cpu], soc_cpu[socket][-1])
cpuset = "%s,%s" % (cores, threads)
return cpuset
@@ -244,9 +292,6 @@ class StandaloneContextHelper(object):
if connection.execute(cmd_template % pkg)[0]:
connection.execute("apt-get update")
connection.execute("apt-get -y install %s" % pkg)
- else:
- # all installed
- return
@staticmethod
def get_kernel_module(connection, pci, driver):
@@ -283,7 +328,7 @@ class StandaloneContextHelper(object):
'interface': str(interface),
'driver': driver
})
- LOG.info("{0}".format(networks))
+ LOG.info(networks)
return networks
@@ -352,7 +397,7 @@ class StandaloneContextHelper(object):
while not mgmtip and times:
connection.execute("fping -c 1 -g %s > /dev/null 2>&1" % cidr)
out = connection.execute("ip neighbor | grep '%s'" % mac)[1]
- LOG.info("fping -c 1 -g %s > /dev/null 2>&1" % cidr)
+ LOG.info("fping -c 1 -g %s > /dev/null 2>&1", cidr)
if out.strip():
mgmtip = str(out.split(" ")[0]).strip()
client = ssh.SSH.from_node(node, overrides={"ip": mgmtip})
diff --git a/yardstick/benchmark/contexts/standalone/ovs_dpdk.py b/yardstick/benchmark/contexts/standalone/ovs_dpdk.py
index fcb7bb66c..a6c35de53 100644
--- a/yardstick/benchmark/contexts/standalone/ovs_dpdk.py
+++ b/yardstick/benchmark/contexts/standalone/ovs_dpdk.py
@@ -118,7 +118,7 @@ class OvsDpdkContext(Context):
self.connection.execute(cmd)
bind_cmd = "{dpdk_nic_bind} --force -b {driver} {port}"
phy_driver = "vfio-pci"
- for key, port in self.networks.items():
+ for _, port in self.networks.items():
vpci = port.get("phy_port")
self.connection.execute(bind_cmd.format(dpdk_nic_bind=self.dpdk_nic_bind,
driver=phy_driver, port=vpci))
@@ -170,7 +170,7 @@ class OvsDpdkContext(Context):
]
ordered_network = OrderedDict(self.networks)
- for index, (key, vnf) in enumerate(ordered_network.items()):
+ for index, vnf in enumerate(ordered_network.values()):
if ovs_ver >= [2, 7, 0]:
dpdk_args = " options:dpdk-devargs=%s" % vnf.get("phy_port")
dpdk_list.append(ovs_add_port.format(br='br0', port='dpdk%s' % vnf.get("port_num", 0),
@@ -263,7 +263,7 @@ class OvsDpdkContext(Context):
# Bind nics back to kernel
bind_cmd = "{dpdk_nic_bind} --force -b {driver} {port}"
- for key, port in self.networks.items():
+ for port in self.networks.values():
vpci = port.get("phy_port")
phy_driver = port.get("driver")
self.connection.execute(bind_cmd.format(dpdk_nic_bind=self.dpdk_nic_bind,
@@ -328,17 +328,17 @@ class OvsDpdkContext(Context):
def configure_nics_for_ovs_dpdk(self):
portlist = OrderedDict(self.networks)
- for key, ports in portlist.items():
+ for key in portlist:
mac = StandaloneContextHelper.get_mac_address()
portlist[key].update({'mac': mac})
self.networks = portlist
- LOG.info("Ports %s" % self.networks)
+ LOG.info("Ports %s", self.networks)
def _enable_interfaces(self, index, vfs, cfg):
vpath = self.ovs_properties.get("vpath", "/usr/local")
vf = self.networks[vfs[0]]
port_num = vf.get('port_num', 0)
- vpci = PciAddress.parse_address(vf['vpci'].strip(), multi_line=True)
+ vpci = PciAddress(vf['vpci'].strip())
# Generate the vpci for the interfaces
slot = index + port_num + 10
vf['vpci'] = \
@@ -357,9 +357,10 @@ class OvsDpdkContext(Context):
# 1. Check and delete VM if already exists
Libvirt.check_if_vm_exists_and_delete(vm_name, self.connection)
- vcpu, mac = Libvirt.build_vm_xml(self.connection, self.vm_flavor, cfg, vm_name, index)
+ _, mac = Libvirt.build_vm_xml(self.connection, self.vm_flavor,
+ cfg, vm_name, index)
# 2: Cleanup already available VMs
- for idx, (vkey, vfs) in enumerate(OrderedDict(vnf["network_ports"]).items()):
+ for vkey, vfs in OrderedDict(vnf["network_ports"]).items():
if vkey == "mgmt":
continue
self._enable_interfaces(index, vfs, cfg)
@@ -367,7 +368,7 @@ class OvsDpdkContext(Context):
# copy xml to target...
self.connection.put(cfg, cfg)
- # FIXME: launch through libvirt
+ # NOTE: launch through libvirt
LOG.info("virsh create ...")
Libvirt.virsh_create_vm(self.connection, cfg)
diff --git a/yardstick/benchmark/contexts/standalone/sriov.py b/yardstick/benchmark/contexts/standalone/sriov.py
index 69825fbbf..9d8423b5f 100644
--- a/yardstick/benchmark/contexts/standalone/sriov.py
+++ b/yardstick/benchmark/contexts/standalone/sriov.py
@@ -110,7 +110,7 @@ class SriovContext(Context):
Libvirt.check_if_vm_exists_and_delete(vm, self.connection)
# Bind nics back to kernel
- for key, ports in self.networks.items():
+ for ports in self.networks.values():
# enable VFs for given...
build_vfs = "echo 0 > /sys/bus/pci/devices/{0}/sriov_numvfs"
self.connection.execute(build_vfs.format(ports.get('phy_port')))
@@ -170,8 +170,7 @@ class SriovContext(Context):
def configure_nics_for_sriov(self):
vf_cmd = "ip link set {0} vf 0 mac {1}"
- for key, ports in self.networks.items():
- vf_pci = []
+ for ports in self.networks.values():
host_driver = ports.get('driver')
if host_driver not in self.drivers:
self.connection.execute("rmmod %svf" % host_driver)
@@ -187,19 +186,19 @@ class SriovContext(Context):
if interface is not None:
self.connection.execute(vf_cmd.format(interface, mac))
- vf_pci = self.get_vf_data('vf_pci', ports.get('phy_port'), mac, interface)
+ vf_pci = self._get_vf_data(ports.get('phy_port'), mac, interface)
ports.update({
'vf_pci': vf_pci,
'mac': mac
})
- LOG.info("Ports %s" % self.networks)
+ LOG.info('Ports %s', self.networks)
def _enable_interfaces(self, index, idx, vfs, cfg):
vf_spoofchk = "ip link set {0} vf 0 spoofchk off"
vf = self.networks[vfs[0]]
- vpci = PciAddress.parse_address(vf['vpci'].strip(), multi_line=True)
+ vpci = PciAddress(vf['vpci'].strip())
# Generate the vpci for the interfaces
slot = index + idx + 10
vf['vpci'] = \
@@ -222,7 +221,7 @@ class SriovContext(Context):
# 1. Check and delete VM if already exists
Libvirt.check_if_vm_exists_and_delete(vm_name, self.connection)
- vcpu, mac = Libvirt.build_vm_xml(self.connection, self.vm_flavor, cfg, vm_name, index)
+ _, mac = Libvirt.build_vm_xml(self.connection, self.vm_flavor, cfg, vm_name, index)
# 2: Cleanup already available VMs
for idx, (vkey, vfs) in enumerate(OrderedDict(vnf["network_ports"]).items()):
if vkey == "mgmt":
@@ -232,7 +231,7 @@ class SriovContext(Context):
# copy xml to target...
self.connection.put(cfg, cfg)
- # FIXME: launch through libvirt
+ # NOTE: launch through libvirt
LOG.info("virsh create ...")
Libvirt.virsh_create_vm(self.connection, cfg)
@@ -246,15 +245,15 @@ class SriovContext(Context):
return nodes
- def get_vf_data(self, key, value, vfmac, pfif):
+ def _get_vf_data(self, value, vfmac, pfif):
vf_data = {
"mac": vfmac,
"pf_if": pfif
}
vfs = StandaloneContextHelper.get_virtual_devices(self.connection, value)
for k, v in vfs.items():
- m = PciAddress.parse_address(k.strip(), multi_line=True)
- m1 = PciAddress.parse_address(value.strip(), multi_line=True)
+ m = PciAddress(k.strip())
+ m1 = PciAddress(value.strip())
if m.bus == m1.bus:
vf_data.update({"vf_pci": str(v)})
break
diff --git a/yardstick/benchmark/scenarios/compute/qemu_migrate.py b/yardstick/benchmark/scenarios/compute/qemu_migrate.py
index 286d8cdaf..2de1270ef 100644
--- a/yardstick/benchmark/scenarios/compute/qemu_migrate.py
+++ b/yardstick/benchmark/scenarios/compute/qemu_migrate.py
@@ -113,11 +113,11 @@ class QemuMigrate(base.Scenario):
if status:
raise RuntimeError(stderr)
- parsed_data = jsonutils.loads(stdout)
+ result.update(jsonutils.loads(stdout))
if "sla" in self.scenario_cfg:
sla_error = ""
- for t, timevalue in parsed_data.items():
+ for t, timevalue in result.items():
if 'max_%s' % t not in self.scenario_cfg['sla']:
continue
diff --git a/yardstick/benchmark/scenarios/compute/qemu_migrate_benchmark.bash b/yardstick/benchmark/scenarios/compute/qemu_migrate_benchmark.bash
index 757553e8b..d95e91425 100644
--- a/yardstick/benchmark/scenarios/compute/qemu_migrate_benchmark.bash
+++ b/yardstick/benchmark/scenarios/compute/qemu_migrate_benchmark.bash
@@ -21,13 +21,6 @@ max_down_time=$6
OUTPUT_FILE=/tmp/output-qemu.log
-echo "To check the parameters:"
-echo "SRC: $src"
-echo "DST: $dst"
-echo "DST_IP: $dst_ip"
-echo "MIGRATE_PORT: $migrate_to_port"
-echo "DOWN_TIME: $max_down_time"
-
do_migrate()
{
echo "Execution of Live Migration"
@@ -49,7 +42,8 @@ do_migrate()
done
echo "End of Live Migration"
-}
+
+} > /dev/null
output_qemu()
{
@@ -74,14 +68,18 @@ echo -e "{ \
\"setuptime\":\"$setuptime\" \
}"
}
+
# main entry
main()
{
- echo "Perform LiveMigration"
+ # Perform LiveMigration
do_migrate
- echo "LiveMigration Status"
+
+ # LiveMigration Status
output_qemu
- echo "LiveMigration JSON output "
+
+ # LiveMigration JSON output
output_json
}
+
main
diff --git a/yardstick/common/ansible_common.py b/yardstick/common/ansible_common.py
index 0cafa9708..9a4426bf9 100644
--- a/yardstick/common/ansible_common.py
+++ b/yardstick/common/ansible_common.py
@@ -298,8 +298,9 @@ class AnsibleNode(MutableMapping):
def gen_inventory_line(self):
inventory_params = self.get_inventory_params()
# use format to convert ints
+ # sort to ensure consistent key value ordering
formatted_args = (u"{}={}".format(*entry) for entry in
- inventory_params.items())
+ sorted(inventory_params.items()))
line = u" ".join(chain([self['name']], formatted_args))
return line
@@ -472,6 +473,8 @@ class AnsibleCommon(object):
prefix = '_'.join([self.prefix, prefix, 'inventory'])
ini_temp_file = IniMapTemporaryFile(directory=directory, prefix=prefix)
inventory_config = ConfigParser.ConfigParser(allow_no_value=True)
+ # disable default lowercasing
+ inventory_config.optionxform = str
return ini_temp_file.make_context(self.inventory_dict, write_func,
descriptor='inventory')
diff --git a/yardstick/network_services/traffic_profile/prox_mpls_tag_untag.py b/yardstick/network_services/traffic_profile/prox_mpls_tag_untag.py
deleted file mode 100644
index 0e1048b5d..000000000
--- a/yardstick/network_services/traffic_profile/prox_mpls_tag_untag.py
+++ /dev/null
@@ -1,99 +0,0 @@
-# Copyright (c) 2016-2017 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-""" Fixed traffic profile definitions """
-
-from __future__ import absolute_import
-
-import logging
-
-from yardstick.network_services.traffic_profile.prox_profile import ProxProfile
-
-LOG = logging.getLogger(__name__)
-
-
-class ProxMplsTagUntagProfile(ProxProfile):
- """
- This profile adds a single stream at the beginning of the traffic session
- """
-
- def __init__(self, tp_config):
- super(ProxMplsTagUntagProfile, self).__init__(tp_config)
- self.current_lower = self.lower_bound
- self.current_upper = self.upper_bound
-
- @property
- def delta(self):
- return self.current_upper - self.current_lower
-
- @property
- def mid_point(self):
- return (self.current_lower + self.current_upper) / 2
-
- def bounds_iterator(self, logger=None):
- self.current_lower = self.lower_bound
- self.current_upper = self.upper_bound
-
- test_value = self.current_upper
- while abs(self.delta) >= self.precision:
- if logger:
- logger.debug("New interval [%s, %s), precision: %d", self.current_lower,
- self.current_upper, self.step_value)
- logger.info("Testing with value %s", test_value)
-
- yield test_value
- test_value = self.mid_point
-
- def run_test_with_pkt_size(self, traffic_gen, pkt_size, duration):
- """Run the test for a single packet size.
-
- :param traffic_gen: traffic generator instance
- :type traffic_gen: TrafficGen
- :param pkt_size: The packet size to test with.
- :type pkt_size: int
- :param duration: The duration for each try.
- :type duration: int
-
- """
-
- LOG.info("Testing with packet size %d", pkt_size)
-
- # Binary search assumes the lower value of the interval is
- # successful and the upper value is a failure.
- # The first value that is tested, is the maximum value. If that
- # succeeds, no more searching is needed. If it fails, a regular
- # binary search is performed.
- #
- # The test_value used for the first iteration of binary search
- # is adjusted so that the delta between this test_value and the
- # upper bound is a power-of-2 multiple of precision. In the
- # optimistic situation where this first test_value results in a
- # success, the binary search will complete on an integer multiple
- # of the precision, rather than on a fraction of it.
-
- # throughput and packet loss from the most recent successful test
- successful_pkt_loss = 0.0
- for test_value in self.bounds_iterator(LOG):
- result, port_samples = self._profile_helper.run_test(pkt_size, duration,
- test_value, self.tolerated_loss)
-
- if result.success:
- LOG.debug("Success! Increasing lower bound")
- self.current_lower = test_value
- successful_pkt_loss = result.pkt_loss
- else:
- LOG.debug("Failure... Decreasing upper bound")
- self.current_upper = test_value
-
- samples = result.get_samples(pkt_size, successful_pkt_loss, port_samples)
- self.queue.put(samples)
diff --git a/yardstick/network_services/traffic_profile/rfc2544.py b/yardstick/network_services/traffic_profile/rfc2544.py
index 16e809b65..b1ca8a345 100644
--- a/yardstick/network_services/traffic_profile/rfc2544.py
+++ b/yardstick/network_services/traffic_profile/rfc2544.py
@@ -62,7 +62,7 @@ class RFC2544Profile(TrexProfile):
self.generator.rfc2544_helper.correlated_traffic:
continue
for intf in intfs:
- port = self.generator.vnfd_helper.port_num(intf)
+ port = self.generator.port_num(intf)
self.ports.append(port)
self.generator.client.add_streams(self.get_streams(profile_data), ports=port)
@@ -170,7 +170,7 @@ class RFC2544Profile(TrexProfile):
self.generator.rfc2544_helper.correlated_traffic:
continue
for intf in intfs:
- port = self.generator.vnfd_helper.port_num(intf)
+ port = self.generator.port_num(intf)
self.ports.append(port)
self.generator.client.add_streams(self.get_streams(profile_data), ports=port)
diff --git a/yardstick/network_services/utils.py b/yardstick/network_services/utils.py
index eac3c814f..7a1815eb9 100644
--- a/yardstick/network_services/utils.py
+++ b/yardstick/network_services/utils.py
@@ -22,6 +22,7 @@ from oslo_config import cfg
from oslo_config.cfg import NoSuchOptError
from oslo_utils import encodeutils
+
NSB_ROOT = "/opt/nsb_bin"
CONF = cfg.CONF
@@ -43,30 +44,37 @@ HEXADECIMAL = "[0-9a-zA-Z]"
class PciAddress(object):
+ """Class to handle PCI addresses.
- PCI_PATTERN_STR = HEXADECIMAL.join([
- "(",
- "{4}):(", # domain (4 bytes)
- "{2}):(", # bus (2 bytes)
- "{2}).(", # function (2 bytes)
- ")", # slot (1 byte)
- ])
+ A PCI address could be written in two ways:
+ - Simple BDF notation:
+ 00:00.0 # bus:device.function
+ - With domain extension.
+ 0000:00:00.0 # domain:bus:device.function
- PCI_PATTERN = re.compile(PCI_PATTERN_STR)
+ Note: in Libvirt, 'device' is called 'slot'.
- @classmethod
- def parse_address(cls, text, multi_line=False):
- if multi_line:
- text = text.replace(os.linesep, '')
- match = cls.PCI_PATTERN.search(text)
- return cls(match.group(0))
+ Reference: https://wiki.xenproject.org/wiki/
+ Bus:Device.Function_(BDF)_Notation
+ """
+ PCI_PATTERN_STR = (
+ r"((?P<domain>[0-9a-zA-Z]{4}):)?(?P<bus>[0-9a-zA-Z]{2}):"
+ r"(?P<slot>[0-9a-zA-Z]{2})\.(?P<function>[0-9a-zA-Z]{1})")
def __init__(self, address):
- super(PciAddress, self).__init__()
- match = self.PCI_PATTERN.match(address)
+ pci_pattern = re.compile(self.PCI_PATTERN_STR)
+ match = pci_pattern.search(address)
if not match:
raise ValueError('Invalid PCI address: {}'.format(address))
- self.address = address
+
+ self._domain = (match.group('domain') or '0000').lower()
+ self._bus = match.group('bus').lower()
+ self._slot = match.group('slot').lower()
+ self._function = match.group('function').lower()
+ self.address = '{:0>4}:{:0>2}:{:0>2}.{:1}'.format(self.domain,
+ self.bus,
+ self.slot,
+ self.function)
self.match = match
def __repr__(self):
@@ -74,22 +82,22 @@ class PciAddress(object):
@property
def domain(self):
- return self.match.group(1)
+ return self._domain
@property
def bus(self):
- return self.match.group(2)
+ return self._bus
@property
def slot(self):
- return self.match.group(3)
+ return self._slot
@property
def function(self):
- return self.match.group(4)
+ return self._function
def values(self):
- return [self.match.group(n) for n in range(1, 5)]
+ return [self._domain, self._bus, self._slot, self._function]
def get_nsb_option(option, default=None):
diff --git a/yardstick/network_services/vnf_generic/vnf/prox_helpers.py b/yardstick/network_services/vnf_generic/vnf/prox_helpers.py
index ac5abfbcb..d9acae2f2 100644
--- a/yardstick/network_services/vnf_generic/vnf/prox_helpers.py
+++ b/yardstick/network_services/vnf_generic/vnf/prox_helpers.py
@@ -1490,7 +1490,6 @@ class ProxVpeProfileHelper(ProxProfileHelper):
if item_key != 'name':
continue
- for item_key, item_value in section:
if item_value.startswith("cpe"):
cpe_ports.append(tx_port_no)
@@ -1595,3 +1594,192 @@ class ProxVpeProfileHelper(ProxProfileHelper):
data_helper.latency = self.get_latency()
return data_helper.result_tuple, data_helper.samples
+
+
+class ProxlwAFTRProfileHelper(ProxProfileHelper):
+
+ __prox_profile_type__ = "lwAFTR gen"
+
+ def __init__(self, resource_helper):
+ super(ProxlwAFTRProfileHelper, self).__init__(resource_helper)
+ self._cores_tuple = None
+ self._ports_tuple = None
+ self.step_delta = 5
+ self.step_time = 0.5
+
+ @property
+ def _lwaftr_cores(self):
+ if not self._cores_tuple:
+ self._cores_tuple = self._get_cores_gen_lwaftr()
+ return self._cores_tuple
+
+ @property
+ def tun_cores(self):
+ return self._lwaftr_cores[0]
+
+ @property
+ def inet_cores(self):
+ return self._lwaftr_cores[1]
+
+ @property
+ def _lwaftr_ports(self):
+ if not self._ports_tuple:
+ self._ports_tuple = self._get_ports_gen_lw_aftr()
+ return self._ports_tuple
+
+ @property
+ def tun_ports(self):
+ return self._lwaftr_ports[0]
+
+ @property
+ def inet_ports(self):
+ return self._lwaftr_ports[1]
+
+ @property
+ def all_rx_cores(self):
+ return self.latency_cores
+
+ def _get_cores_gen_lwaftr(self):
+ tun_cores = []
+ inet_cores = []
+ for section_name, section in self.resource_helper.setup_helper.prox_config_data:
+ if not section_name.startswith("core"):
+ continue
+
+ if all(key != "mode" or value != self.PROX_CORE_GEN_MODE for key, value in section):
+ continue
+
+ core_tuple = CoreSocketTuple(section_name)
+ core_tag = core_tuple.find_in_topology(self.cpu_topology)
+ for item_value in (v for k, v in section if k == 'name'):
+ if item_value.startswith('tun'):
+ tun_cores.append(core_tag)
+ elif item_value.startswith('inet'):
+ inet_cores.append(core_tag)
+
+ return tun_cores, inet_cores
+
+ def _get_ports_gen_lw_aftr(self):
+ tun_ports = []
+ inet_ports = []
+
+ re_port = re.compile('port (\d+)')
+ for section_name, section in self.resource_helper.setup_helper.prox_config_data:
+ match = re_port.search(section_name)
+ if not match:
+ continue
+
+ tx_port_no = int(match.group(1))
+ for item_value in (v for k, v in section if k == 'name'):
+ if item_value.startswith('lwB4'):
+ tun_ports.append(tx_port_no)
+ elif item_value.startswith('inet'):
+ inet_ports.append(tx_port_no)
+
+ return tun_ports, inet_ports
+
+ @staticmethod
+ def _resize(len1, len2):
+ if len1 == len2:
+ return 1.0
+ return 1.0 * len1 / len2
+
+ @contextmanager
+ def traffic_context(self, pkt_size, value):
+ # Tester is sending packets at the required speed already after
+ # setup_test(). Just get the current statistics, sleep the required
+ # amount of time and calculate packet loss.
+ tun_pkt_size = pkt_size
+ inet_pkt_size = pkt_size - 40
+ ratio = 1.0 * (tun_pkt_size + 20) / (inet_pkt_size + 20)
+
+ curr_up_speed = curr_down_speed = 0
+ max_up_speed = max_down_speed = value
+
+ max_up_speed = value / ratio
+
+ # Adjust speed when multiple cores per port are used to generate traffic
+ if len(self.tun_ports) != len(self.tun_cores):
+ max_down_speed *= self._resize(len(self.tun_ports), len(self.tun_cores))
+ if len(self.inet_ports) != len(self.inet_cores):
+ max_up_speed *= self._resize(len(self.inet_ports), len(self.inet_cores))
+
+ # Initialize cores
+ self.sut.stop_all()
+ time.sleep(0.5)
+
+ # Flush any packets in the NIC RX buffers, otherwise the stats will be
+ # wrong.
+ self.sut.start(self.all_rx_cores)
+ time.sleep(0.5)
+ self.sut.stop(self.all_rx_cores)
+ time.sleep(0.5)
+ self.sut.reset_stats()
+
+ self.sut.set_pkt_size(self.inet_cores, inet_pkt_size)
+ self.sut.set_pkt_size(self.tun_cores, tun_pkt_size)
+
+ self.sut.reset_values(self.tun_cores)
+ self.sut.reset_values(self.inet_cores)
+
+ # Set correct IP and UDP lengths in packet headers
+ # tun
+ # IPv6 length (byte 18): 58 for MAC(12), EthType(2), IPv6(40) , CRC(4)
+ self.sut.set_value(self.tun_cores, 18, tun_pkt_size - 58, 2)
+ # IP length (byte 56): 58 for MAC(12), EthType(2), CRC(4)
+ self.sut.set_value(self.tun_cores, 56, tun_pkt_size - 58, 2)
+ # UDP length (byte 78): 78 for MAC(12), EthType(2), IP(20), UDP(8), CRC(4)
+ self.sut.set_value(self.tun_cores, 78, tun_pkt_size - 78, 2)
+
+ # INET
+ # IP length (byte 20): 22 for MAC(12), EthType(2), CRC(4)
+ self.sut.set_value(self.inet_cores, 16, inet_pkt_size - 18, 2)
+ # UDP length (byte 42): 42 for MAC(12), EthType(2), IP(20), UPD(8), CRC(4)
+ self.sut.set_value(self.inet_cores, 38, inet_pkt_size - 38, 2)
+
+ LOG.info("Initializing SUT: sending lwAFTR packets")
+ self.sut.set_speed(self.inet_cores, curr_up_speed)
+ self.sut.set_speed(self.tun_cores, curr_down_speed)
+ time.sleep(4)
+
+ # Ramp up the transmission speed. First go to the common speed, then
+ # increase steps for the faster one.
+ self.sut.start(self.tun_cores + self.inet_cores + self.latency_cores)
+
+ LOG.info("Ramping up speed to %s up, %s down", max_up_speed, max_down_speed)
+
+ while (curr_up_speed < max_up_speed) or (curr_down_speed < max_down_speed):
+ # The min(..., ...) takes care of 1) floating point rounding errors
+ # that could make curr_*_speed to be slightly greater than
+ # max_*_speed and 2) max_*_speed not being an exact multiple of
+ # self._step_delta.
+ if curr_up_speed < max_up_speed:
+ curr_up_speed = min(curr_up_speed + self.step_delta, max_up_speed)
+ if curr_down_speed < max_down_speed:
+ curr_down_speed = min(curr_down_speed + self.step_delta, max_down_speed)
+
+ self.sut.set_speed(self.inet_cores, curr_up_speed)
+ self.sut.set_speed(self.tun_cores, curr_down_speed)
+ time.sleep(self.step_time)
+
+ LOG.info("Target speeds reached. Starting real test.")
+
+ yield
+
+ self.sut.stop(self.tun_cores + self.inet_cores)
+ LOG.info("Test ended. Flushing NIC buffers")
+ self.sut.start(self.all_rx_cores)
+ time.sleep(3)
+ self.sut.stop(self.all_rx_cores)
+
+ def run_test(self, pkt_size, duration, value, tolerated_loss=0.0):
+ data_helper = ProxDataHelper(self.vnfd_helper, self.sut, pkt_size, value, tolerated_loss)
+
+ with data_helper, self.traffic_context(pkt_size, value):
+ with data_helper.measure_tot_stats():
+ time.sleep(duration)
+ # Getting statistics to calculate PPS at right speed....
+ data_helper.capture_tsc_hz()
+ data_helper.latency = self.get_latency()
+
+ return data_helper.result_tuple, data_helper.samples
diff --git a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
index 08ec44f65..5599c0a3b 100644
--- a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
@@ -433,6 +433,10 @@ class ClientResourceHelper(ResourceHelper):
self.vnfd_helper.port_nums(self.vnfd_helper.port_pairs.downlink_ports)
self.all_ports = self.vnfd_helper.port_nums(self.vnfd_helper.port_pairs.all_ports)
+ def port_num(self, intf):
+ # by default return port num
+ return self.vnfd_helper.port_num(intf)
+
def get_stats(self, *args, **kwargs):
try:
return self.client.get_stats(*args, **kwargs)
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_trex.py b/yardstick/network_services/vnf_generic/vnf/tg_trex.py
index 458f1b844..93ba8557a 100644
--- a/yardstick/network_services/vnf_generic/vnf/tg_trex.py
+++ b/yardstick/network_services/vnf_generic/vnf/tg_trex.py
@@ -48,27 +48,38 @@ class TrexResourceHelper(ClientResourceHelper):
ASYNC_PORT = 4500
SYNC_PORT = 4501
+ def __init__(self, setup_helper):
+ super(TrexResourceHelper, self).__init__(setup_helper)
+ self.port_map = {}
+ self.dpdk_to_trex_port_map = {}
+
def generate_cfg(self):
port_names = self.vnfd_helper.port_pairs.all_ports
vpci_list = []
port_list = []
+ self.port_map = {}
+ self.dpdk_to_trex_port_map = {}
- port_nums = sorted(self.vnfd_helper.port_nums(port_names))
- for port_num in port_nums:
- interface = self.vnfd_helper.find_interface_by_port(port_num)
+ sorted_ports = sorted((self.vnfd_helper.port_num(port_name), port_name) for port_name in
+ port_names)
+ for index, (port_num, port_name) in enumerate(sorted_ports):
+ interface = self.vnfd_helper.find_interface(name=port_name)
virtual_interface = interface['virtual-interface']
dst_mac = virtual_interface["dst_mac"]
- # why skip?, ordering is based on DPDK port number so we can't skip
+ # this is to check for unused ports, all ports in the topology
+ # will always have dst_mac
if not dst_mac:
continue
- # TRex ports must be in DPDK port number, so order of append matters
+ # TRex ports are in logical order roughly based on DPDK port number sorting
vpci_list.append(virtual_interface["vpci"])
local_mac = virtual_interface["local_mac"]
port_list.append({
"src_mac": mac_address_to_hex_list(local_mac),
"dest_mac": mac_address_to_hex_list(dst_mac),
})
+ self.port_map[port_name] = index
+ self.dpdk_to_trex_port_map[port_num] = index
trex_cfg = {
'interfaces': vpci_list,
'port_info': port_list,
@@ -80,6 +91,17 @@ class TrexResourceHelper(ClientResourceHelper):
cfg_str = yaml.safe_dump(cfg_file, default_flow_style=False, explicit_start=True)
self.ssh_helper.upload_config_file(os.path.basename(self.CONF_FILE), cfg_str)
+ def _build_ports(self):
+ super(TrexResourceHelper, self)._build_ports()
+ # override with TRex logic port number
+ self.uplink_ports = [self.dpdk_to_trex_port_map[p] for p in self.uplink_ports]
+ self.downlink_ports = [self.dpdk_to_trex_port_map[p] for p in self.downlink_ports]
+ self.all_ports = [self.dpdk_to_trex_port_map[p] for p in self.all_ports]
+
+ def port_num(self, intf):
+ # return logical TRex port
+ return self.port_map[intf]
+
def check_status(self):
status, _, _ = self.ssh_helper.execute("sudo lsof -i:%s" % self.SYNC_PORT)
return status
diff --git a/yardstick/orchestrator/heat.py b/yardstick/orchestrator/heat.py
index 455ddc34e..d58ae5618 100644
--- a/yardstick/orchestrator/heat.py
+++ b/yardstick/orchestrator/heat.py
@@ -497,7 +497,7 @@ name (i.e. %s).\
'type': 'OS::Neutron::SecurityGroup',
'properties': {
'name': name,
- 'description': "Group allowing icmp and upd/tcp on all ports",
+ 'description': "Group allowing IPv4 and IPv6 for icmp and upd/tcp on all ports",
'rules': [
{'remote_ip_prefix': '0.0.0.0/0',
'protocol': 'tcp',
@@ -508,7 +508,20 @@ name (i.e. %s).\
'port_range_min': '1',
'port_range_max': '65535'},
{'remote_ip_prefix': '0.0.0.0/0',
- 'protocol': 'icmp'}
+ 'protocol': 'icmp'},
+ {'remote_ip_prefix': '::/0',
+ 'ethertype': 'IPv6',
+ 'protocol': 'tcp',
+ 'port_range_min': '1',
+ 'port_range_max': '65535'},
+ {'remote_ip_prefix': '::/0',
+ 'ethertype': 'IPv6',
+ 'protocol': 'udp',
+ 'port_range_min': '1',
+ 'port_range_max': '65535'},
+ {'remote_ip_prefix': '::/0',
+ 'ethertype': 'IPv6',
+ 'protocol': 'ipv6-icmp'}
]
}
}
@@ -518,11 +531,10 @@ name (i.e. %s).\
'value': {'get_resource': name}
}
- def add_server(self, name, image, flavor, flavors, ports=None,
- networks=None, scheduler_hints=None, user=None,
- key_name=None, user_data=None, metadata=None,
- additional_properties=None):
- """add to the template a Nova Server"""
+ def add_server(self, name, image, flavor, flavors, ports=None, networks=None,
+ scheduler_hints=None, user=None, key_name=None, user_data=None, metadata=None,
+ additional_properties=None, availability_zone=None):
+ """add to the template a Nova Server """
log.debug("adding Nova::Server '%s', image '%s', flavor '%s', "
"ports %s", name, image, flavor, ports)
@@ -537,6 +549,8 @@ name (i.e. %s).\
'flavor': {},
'networks': [] # list of dictionaries
}
+ if availability_zone:
+ server_properties["availability_zone"] = availability_zone
if flavor in flavors:
self.resources[name]['depends_on'].append(flavor)